diff options
304 files changed, 3578 insertions, 1742 deletions
diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy index 6434f0d..6cd6dae 100644 --- a/Documentation/ABI/testing/ima_policy +++ b/Documentation/ABI/testing/ima_policy @@ -20,7 +20,7 @@ Description: lsm: [[subj_user=] [subj_role=] [subj_type=] [obj_user=] [obj_role=] [obj_type=]] - base: func:= [BPRM_CHECK][FILE_MMAP][INODE_PERMISSION] + base: func:= [BPRM_CHECK][FILE_MMAP][FILE_CHECK] mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC] fsmagic:= hex value uid:= decimal value @@ -40,11 +40,11 @@ Description: measure func=BPRM_CHECK measure func=FILE_MMAP mask=MAY_EXEC - measure func=INODE_PERM mask=MAY_READ uid=0 + measure func=FILE_CHECK mask=MAY_READ uid=0 The default policy measures all executables in bprm_check, all files mmapped executable in file_mmap, and all files - open for read by root in inode_permission. + open for read by root in do_filp_open. Examples of LSM specific definitions: @@ -54,8 +54,8 @@ Description: dont_measure obj_type=var_log_t dont_measure obj_type=auditd_log_t - measure subj_user=system_u func=INODE_PERM mask=MAY_READ - measure subj_role=system_r func=INODE_PERM mask=MAY_READ + measure subj_user=system_u func=FILE_CHECK mask=MAY_READ + measure subj_role=system_r func=FILE_CHECK mask=MAY_READ Smack: - measure subj_user=_ func=INODE_PERM mask=MAY_READ + measure subj_user=_ func=FILE_CHECK mask=MAY_READ diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt index aed082f..737988f 100644 --- a/Documentation/cpu-freq/governors.txt +++ b/Documentation/cpu-freq/governors.txt @@ -145,8 +145,8 @@ show_sampling_rate_max: THIS INTERFACE IS DEPRECATED, DON'T USE IT. up_threshold: defines what the average CPU usage between the samplings of 'sampling_rate' needs to be for the kernel to make a decision on whether it should increase the frequency. For example when it is set -to its default value of '80' it means that between the checking -intervals the CPU needs to be on average more than 80% in use to then +to its default value of '95' it means that between the checking +intervals the CPU needs to be on average more than 95% in use to then decide that the CPU frequency needs to be increased. ignore_nice_load: this parameter takes a value of '0' or '1'. When diff --git a/MAINTAINERS b/MAINTAINERS index 03f38c1..412eff6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3411,8 +3411,10 @@ S: Maintained F: drivers/scsi/sym53c8xx_2/ LTP (Linux Test Project) -M: Subrata Modak <subrata@linux.vnet.ibm.com> -M: Mike Frysinger <vapier@gentoo.org> +M: Rishikesh K Rajak <risrajak@linux.vnet.ibm.com> +M: Garrett Cooper <yanegomi@gmail.com> +M: Mike Frysinger <vapier@gentoo.org> +M: Subrata Modak <subrata@linux.vnet.ibm.com> L: ltp-list@lists.sourceforge.net (subscribers-only) W: http://ltp.sourceforge.net/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/galak/ltp.git @@ -3836,6 +3838,7 @@ NETWORKING DRIVERS L: netdev@vger.kernel.org W: http://www.linuxfoundation.org/en/Net T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git S: Odd Fixes F: drivers/net/ F: include/linux/if_* @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 33 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc8 NAME = Man-Eating Seals of Antiquity # *DOCUMENTATION* diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 4c33ca8..184a6bd 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -702,6 +702,7 @@ config ARCH_OMAP select ARCH_HAS_CPUFREQ select GENERIC_TIME select GENERIC_CLOCKEVENTS + select ARCH_HAS_HOLES_MEMORYMODEL help Support for TI's OMAP platform (OMAP1 and OMAP2). diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 9e75825..356d702 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -94,7 +94,7 @@ CFLAGS_ABI +=-funwind-tables endif ifeq ($(CONFIG_THUMB2_KERNEL),y) -AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=thumb,-Wa$(comma)-mauto-it) +AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mauto-it) AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W) CFLAGS_THUMB2 :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN) AFLAGS_THUMB2 :=$(CFLAGS_THUMB2) -Wa$(comma)-mthumb diff --git a/arch/arm/mach-mx25/clock.c b/arch/arm/mach-mx25/clock.c index 6e838b8..6acc88b 100644 --- a/arch/arm/mach-mx25/clock.c +++ b/arch/arm/mach-mx25/clock.c @@ -119,6 +119,11 @@ static unsigned long get_rate_nfc(struct clk *clk) return get_rate_per(8); } +static unsigned long get_rate_gpt(struct clk *clk) +{ + return get_rate_per(5); +} + static unsigned long get_rate_otg(struct clk *clk) { return 48000000; /* FIXME */ @@ -144,7 +149,7 @@ static void clk_cgcr_disable(struct clk *clk) __raw_writel(reg, clk->enable_reg); } -#define DEFINE_CLOCK(name, i, er, es, gr, sr) \ +#define DEFINE_CLOCK(name, i, er, es, gr, sr, s) \ static struct clk name = { \ .id = i, \ .enable_reg = CRM_BASE + er, \ @@ -153,27 +158,30 @@ static void clk_cgcr_disable(struct clk *clk) .set_rate = sr, \ .enable = clk_cgcr_enable, \ .disable = clk_cgcr_disable, \ + .secondary = s, \ } -DEFINE_CLOCK(gpt_clk, 0, CCM_CGCR0, 5, get_rate_ipg, NULL); -DEFINE_CLOCK(cspi1_clk, 0, CCM_CGCR1, 5, get_rate_ipg, NULL); -DEFINE_CLOCK(cspi2_clk, 0, CCM_CGCR1, 6, get_rate_ipg, NULL); -DEFINE_CLOCK(cspi3_clk, 0, CCM_CGCR1, 7, get_rate_ipg, NULL); -DEFINE_CLOCK(uart1_clk, 0, CCM_CGCR2, 14, get_rate_uart, NULL); -DEFINE_CLOCK(uart2_clk, 0, CCM_CGCR2, 15, get_rate_uart, NULL); -DEFINE_CLOCK(uart3_clk, 0, CCM_CGCR2, 16, get_rate_uart, NULL); -DEFINE_CLOCK(uart4_clk, 0, CCM_CGCR2, 17, get_rate_uart, NULL); -DEFINE_CLOCK(uart5_clk, 0, CCM_CGCR2, 18, get_rate_uart, NULL); -DEFINE_CLOCK(nfc_clk, 0, CCM_CGCR0, 8, get_rate_nfc, NULL); -DEFINE_CLOCK(usbotg_clk, 0, CCM_CGCR0, 28, get_rate_otg, NULL); -DEFINE_CLOCK(pwm1_clk, 0, CCM_CGCR1, 31, get_rate_ipg, NULL); -DEFINE_CLOCK(pwm2_clk, 0, CCM_CGCR2, 0, get_rate_ipg, NULL); -DEFINE_CLOCK(pwm3_clk, 0, CCM_CGCR2, 1, get_rate_ipg, NULL); -DEFINE_CLOCK(pwm4_clk, 0, CCM_CGCR2, 2, get_rate_ipg, NULL); -DEFINE_CLOCK(kpp_clk, 0, CCM_CGCR1, 28, get_rate_ipg, NULL); -DEFINE_CLOCK(tsc_clk, 0, CCM_CGCR2, 13, get_rate_ipg, NULL); -DEFINE_CLOCK(i2c_clk, 0, CCM_CGCR0, 6, get_rate_i2c, NULL); -DEFINE_CLOCK(fec_clk, 0, CCM_CGCR0, 23, get_rate_ipg, NULL); +DEFINE_CLOCK(gpt_clk, 0, CCM_CGCR0, 5, get_rate_gpt, NULL, NULL); +DEFINE_CLOCK(uart_per_clk, 0, CCM_CGCR0, 15, get_rate_uart, NULL, NULL); +DEFINE_CLOCK(cspi1_clk, 0, CCM_CGCR1, 5, get_rate_ipg, NULL, NULL); +DEFINE_CLOCK(cspi2_clk, 0, CCM_CGCR1, 6, get_rate_ipg, NULL, NULL); +DEFINE_CLOCK(cspi3_clk, 0, CCM_CGCR1, 7, get_rate_ipg, NULL, NULL); +DEFINE_CLOCK(fec_ahb_clk, 0, CCM_CGCR0, 23, NULL, NULL, NULL); +DEFINE_CLOCK(uart1_clk, 0, CCM_CGCR2, 14, get_rate_uart, NULL, &uart_per_clk); +DEFINE_CLOCK(uart2_clk, 0, CCM_CGCR2, 15, get_rate_uart, NULL, &uart_per_clk); +DEFINE_CLOCK(uart3_clk, 0, CCM_CGCR2, 16, get_rate_uart, NULL, &uart_per_clk); +DEFINE_CLOCK(uart4_clk, 0, CCM_CGCR2, 17, get_rate_uart, NULL, &uart_per_clk); +DEFINE_CLOCK(uart5_clk, 0, CCM_CGCR2, 18, get_rate_uart, NULL, &uart_per_clk); +DEFINE_CLOCK(nfc_clk, 0, CCM_CGCR0, 8, get_rate_nfc, NULL, NULL); +DEFINE_CLOCK(usbotg_clk, 0, CCM_CGCR0, 28, get_rate_otg, NULL, NULL); +DEFINE_CLOCK(pwm1_clk, 0, CCM_CGCR1, 31, get_rate_ipg, NULL, NULL); +DEFINE_CLOCK(pwm2_clk, 0, CCM_CGCR2, 0, get_rate_ipg, NULL, NULL); +DEFINE_CLOCK(pwm3_clk, 0, CCM_CGCR2, 1, get_rate_ipg, NULL, NULL); +DEFINE_CLOCK(pwm4_clk, 0, CCM_CGCR2, 2, get_rate_ipg, NULL, NULL); +DEFINE_CLOCK(kpp_clk, 0, CCM_CGCR1, 28, get_rate_ipg, NULL, NULL); +DEFINE_CLOCK(tsc_clk, 0, CCM_CGCR2, 13, get_rate_ipg, NULL, NULL); +DEFINE_CLOCK(i2c_clk, 0, CCM_CGCR0, 6, get_rate_i2c, NULL, NULL); +DEFINE_CLOCK(fec_clk, 0, CCM_CGCR1, 15, get_rate_ipg, NULL, &fec_ahb_clk); #define _REGISTER_CLOCK(d, n, c) \ { \ @@ -208,13 +216,21 @@ static struct clk_lookup lookups[] = { _REGISTER_CLOCK("fec.0", NULL, fec_clk) }; -int __init mx25_clocks_init(unsigned long fref) +int __init mx25_clocks_init(void) { int i; for (i = 0; i < ARRAY_SIZE(lookups); i++) clkdev_add(&lookups[i]); + /* Turn off all clocks except the ones we need to survive, namely: + * EMI, GPIO1-3 (CCM_CGCR1[18:16]), GPT1, IOMUXC (CCM_CGCR1[27]), IIM, + * SCC + */ + __raw_writel((1 << 19), CRM_BASE + CCM_CGCR0); + __raw_writel((0xf << 16) | (3 << 26), CRM_BASE + CCM_CGCR1); + __raw_writel((1 << 5), CRM_BASE + CCM_CGCR2); + mxc_timer_init(&gpt_clk, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54); return 0; diff --git a/arch/arm/mach-mx25/mx25pdk.c b/arch/arm/mach-mx25/mx25pdk.c index 921bc99..6f06089 100644 --- a/arch/arm/mach-mx25/mx25pdk.c +++ b/arch/arm/mach-mx25/mx25pdk.c @@ -91,7 +91,7 @@ static void __init mx25pdk_init(void) static void __init mx25pdk_timer_init(void) { - mx25_clocks_init(26000000); + mx25_clocks_init(); } static struct sys_timer mx25pdk_timer = { diff --git a/arch/arm/mach-mx3/mx31ads.c b/arch/arm/mach-mx3/mx31ads.c index 3e7bafa..938c549 100644 --- a/arch/arm/mach-mx3/mx31ads.c +++ b/arch/arm/mach-mx3/mx31ads.c @@ -173,6 +173,7 @@ static void expio_unmask_irq(u32 irq) } static struct irq_chip expio_irq_chip = { + .name = "EXPIO(CPLD)", .ack = expio_ack_irq, .mask = expio_mask_irq, .unmask = expio_unmask_irq, @@ -302,6 +303,7 @@ static struct regulator_init_data ldo1_data = { .min_uV = 2800000, .max_uV = 2800000, .valid_modes_mask = REGULATOR_MODE_NORMAL, + .valid_ops_mask = REGULATOR_CHANGE_STATUS, .apply_uV = 1, }, }; @@ -322,6 +324,7 @@ static struct regulator_init_data ldo2_data = { .min_uV = 3300000, .max_uV = 3300000, .valid_modes_mask = REGULATOR_MODE_NORMAL, + .valid_ops_mask = REGULATOR_CHANGE_STATUS, .apply_uV = 1, }, .num_consumer_supplies = ARRAY_SIZE(ldo2_consumers), @@ -459,6 +462,7 @@ static int mx31_wm8350_init(struct wm8350 *wm8350) static struct wm8350_platform_data __initdata mx31_wm8350_pdata = { .init = mx31_wm8350_init, + .irq_base = MXC_BOARD_IRQ_START + MXC_MAX_EXP_IO_LINES, }; #endif diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index 3f1334f..7027cdc 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c @@ -505,7 +505,7 @@ static void __init gpmc_mem_init(void) void __init gpmc_init(void) { u32 l; - char *ck; + char *ck = NULL; if (cpu_is_omap24xx()) { ck = "core_l3_ck"; @@ -521,6 +521,9 @@ void __init gpmc_init(void) l = OMAP44XX_GPMC_BASE; } + if (WARN_ON(!ck)) + return; + gpmc_l3_clk = clk_get(NULL, ck); if (IS_ERR(gpmc_l3_clk)) { printk(KERN_ERR "Could not get GPMC clock %s\n", ck); diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c index 2705402..26aeef5 100644 --- a/arch/arm/mach-omap2/irq.c +++ b/arch/arm/mach-omap2/irq.c @@ -194,7 +194,7 @@ void __init omap_init_irq(void) int i; for (i = 0; i < ARRAY_SIZE(irq_banks); i++) { - unsigned long base; + unsigned long base = 0; struct omap_irq_bank *bank = irq_banks + i; if (cpu_is_omap24xx()) @@ -202,6 +202,8 @@ void __init omap_init_irq(void) else if (cpu_is_omap34xx()) base = OMAP34XX_IC_BASE; + BUG_ON(!base); + /* Static mapping, never released */ bank->base_reg = ioremap(base, SZ_4K); if (!bank->base_reg) { diff --git a/arch/arm/mach-omap2/mmc-twl4030.c b/arch/arm/mach-omap2/mmc-twl4030.c index 0c3c72d..8afe9dd 100644 --- a/arch/arm/mach-omap2/mmc-twl4030.c +++ b/arch/arm/mach-omap2/mmc-twl4030.c @@ -408,6 +408,7 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers) { struct twl4030_hsmmc_info *c; int nr_hsmmc = ARRAY_SIZE(hsmmc_data); + int i; if (cpu_is_omap2430()) { control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE; @@ -434,7 +435,7 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers) mmc = kzalloc(sizeof(struct omap_mmc_platform_data), GFP_KERNEL); if (!mmc) { pr_err("Cannot allocate memory for mmc device!\n"); - return; + goto done; } if (c->name) @@ -532,6 +533,10 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers) continue; c->dev = mmc->dev; } + +done: + for (i = 0; i < nr_hsmmc; i++) + kfree(hsmmc_data[i]); } #endif diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c index 3f59bd1..5fedc50 100644 --- a/arch/arm/mach-omap2/mux.c +++ b/arch/arm/mach-omap2/mux.c @@ -486,7 +486,7 @@ int __init omap_mux_init_signal(char *muxname, int val) static inline void omap_mux_decode(struct seq_file *s, u16 val) { char *flags[OMAP_MUX_MAX_NR_FLAGS]; - char mode[14]; + char mode[sizeof("OMAP_MUX_MODE") + 1]; int i = -1; sprintf(mode, "OMAP_MUX_MODE%d", val & 0x7); @@ -553,6 +553,7 @@ static int omap_mux_dbg_board_show(struct seq_file *s, void *unused) if (!m0_name) continue; + /* REVISIT: Needs to be updated if mode0 names get longer */ for (i = 0; i < OMAP_MUX_DEFNAME_LEN; i++) { if (m0_name[i] == '\0') { m0_def[i] = m0_name[i]; @@ -968,6 +969,13 @@ static void __init omap_mux_init_list(struct omap_mux *superset) } #endif +#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS) + if (!superset->muxnames || !superset->muxnames[0]) { + superset++; + continue; + } +#endif + entry = omap_mux_list_add(superset); if (!entry) { printk(KERN_ERR "mux: Could not add entry\n"); diff --git a/arch/arm/mach-omap2/mux34xx.c b/arch/arm/mach-omap2/mux34xx.c index 68e0a59..07aa7b3 100644 --- a/arch/arm/mach-omap2/mux34xx.c +++ b/arch/arm/mach-omap2/mux34xx.c @@ -649,6 +649,53 @@ static struct omap_mux __initdata omap3_muxmodes[] = { _OMAP3_MUXENTRY(UART3_TX_IRTX, 166, "uart3_tx_irtx", NULL, NULL, NULL, "gpio_166", NULL, NULL, "safe_mode"), + + /* Only on 3630, see omap36xx_cbp_subset for the signals */ + _OMAP3_MUXENTRY(GPMC_A11, 0, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL), + _OMAP3_MUXENTRY(SAD2D_MBUSFLAG, 0, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL), + _OMAP3_MUXENTRY(SAD2D_MREAD, 0, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL), + _OMAP3_MUXENTRY(SAD2D_MWRITE, 0, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL), + _OMAP3_MUXENTRY(SAD2D_SBUSFLAG, 0, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL), + _OMAP3_MUXENTRY(SAD2D_SREAD, 0, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL), + _OMAP3_MUXENTRY(SAD2D_SWRITE, 0, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL), + _OMAP3_MUXENTRY(GPMC_A11, 0, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL), + _OMAP3_MUXENTRY(SAD2D_MCAD28, 0, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL), + _OMAP3_MUXENTRY(SAD2D_MCAD29, 0, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL), + _OMAP3_MUXENTRY(SAD2D_MCAD32, 0, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL), + _OMAP3_MUXENTRY(SAD2D_MCAD33, 0, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL), + _OMAP3_MUXENTRY(SAD2D_MCAD34, 0, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL), + _OMAP3_MUXENTRY(SAD2D_MCAD35, 0, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL), + _OMAP3_MUXENTRY(SAD2D_MCAD36, 0, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL), { .reg_offset = OMAP_MUX_TERMINATOR }, }; diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c index 8c964be..e10a02d 100644 --- a/arch/arm/mach-omap2/serial.c +++ b/arch/arm/mach-omap2/serial.c @@ -36,7 +36,13 @@ #define UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV 0x52 #define UART_OMAP_WER 0x17 /* Wake-up enable register */ -#define DEFAULT_TIMEOUT (5 * HZ) +/* + * NOTE: By default the serial timeout is disabled as it causes lost characters + * over the serial ports. This means that the UART clocks will stay on until + * disabled via sysfs. This also causes that any deeper omap sleep states are + * blocked. + */ +#define DEFAULT_TIMEOUT 0 struct omap_uart_state { int num; @@ -422,7 +428,8 @@ static void omap_uart_idle_init(struct omap_uart_state *uart) uart->timeout = DEFAULT_TIMEOUT; setup_timer(&uart->timer, omap_uart_idle_timer, (unsigned long) uart); - mod_timer(&uart->timer, jiffies + uart->timeout); + if (uart->timeout) + mod_timer(&uart->timer, jiffies + uart->timeout); omap_uart_smart_idle_enable(uart, 0); if (cpu_is_omap34xx()) { diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c index a21a4b3..d94857e 100644 --- a/arch/arm/mach-realview/realview_pbx.c +++ b/arch/arm/mach-realview/realview_pbx.c @@ -334,8 +334,8 @@ static void realview_pbx_reset(char mode) * in the system FPGA */ __raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl); - __raw_writel(0x0000, reset_ctrl); - __raw_writel(0x0004, reset_ctrl); + __raw_writel(0x00F0, reset_ctrl); + __raw_writel(0x00F4, reset_ctrl); } static void __init realview_pbx_init(void) diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 3f9cd3d..795dc61 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -41,7 +41,7 @@ ENTRY(cpu_arm7_dcache_clean_area) ENTRY(cpu_arm7_data_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR - ldr r8, [r0] @ read arm instruction + ldr r8, [r2] @ read arm instruction tst r8, #1 << 20 @ L = 0 -> write? orreq r1, r1, #1 << 11 @ yes. and r7, r8, #15 << 24 diff --git a/arch/arm/plat-mxc/audmux-v2.c b/arch/arm/plat-mxc/audmux-v2.c index 6f21096..b06954a 100644 --- a/arch/arm/plat-mxc/audmux-v2.c +++ b/arch/arm/plat-mxc/audmux-v2.c @@ -23,6 +23,7 @@ #include <linux/err.h> #include <linux/io.h> #include <linux/clk.h> +#include <linux/debugfs.h> #include <mach/audmux.h> #include <mach/hardware.h> @@ -32,6 +33,140 @@ static void __iomem *audmux_base; #define MXC_AUDMUX_V2_PTCR(x) ((x) * 8) #define MXC_AUDMUX_V2_PDCR(x) ((x) * 8 + 4) +#ifdef CONFIG_DEBUG_FS +static struct dentry *audmux_debugfs_root; + +static int audmux_open_file(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +/* There is an annoying discontinuity in the SSI numbering with regard + * to the Linux number of the devices */ +static const char *audmux_port_string(int port) +{ + switch (port) { + case MX31_AUDMUX_PORT1_SSI0: + return "imx-ssi.0"; + case MX31_AUDMUX_PORT2_SSI1: + return "imx-ssi.1"; + case MX31_AUDMUX_PORT3_SSI_PINS_3: + return "SSI3"; + case MX31_AUDMUX_PORT4_SSI_PINS_4: + return "SSI4"; + case MX31_AUDMUX_PORT5_SSI_PINS_5: + return "SSI5"; + case MX31_AUDMUX_PORT6_SSI_PINS_6: + return "SSI6"; + default: + return "UNKNOWN"; + } +} + +static ssize_t audmux_read_file(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + ssize_t ret; + char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + int port = (int)file->private_data; + u32 pdcr, ptcr; + + if (!buf) + return -ENOMEM; + + if (audmux_clk) + clk_enable(audmux_clk); + + ptcr = readl(audmux_base + MXC_AUDMUX_V2_PTCR(port)); + pdcr = readl(audmux_base + MXC_AUDMUX_V2_PDCR(port)); + + if (audmux_clk) + clk_disable(audmux_clk); + + ret = snprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n", + pdcr, ptcr); + + if (ptcr & MXC_AUDMUX_V2_PTCR_TFSDIR) + ret += snprintf(buf + ret, PAGE_SIZE - ret, + "TxFS output from %s, ", + audmux_port_string((ptcr >> 27) & 0x7)); + else + ret += snprintf(buf + ret, PAGE_SIZE - ret, + "TxFS input, "); + + if (ptcr & MXC_AUDMUX_V2_PTCR_TCLKDIR) + ret += snprintf(buf + ret, PAGE_SIZE - ret, + "TxClk output from %s", + audmux_port_string((ptcr >> 22) & 0x7)); + else + ret += snprintf(buf + ret, PAGE_SIZE - ret, + "TxClk input"); + + ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n"); + + if (ptcr & MXC_AUDMUX_V2_PTCR_SYN) { + ret += snprintf(buf + ret, PAGE_SIZE - ret, + "Port is symmetric"); + } else { + if (ptcr & MXC_AUDMUX_V2_PTCR_RFSDIR) + ret += snprintf(buf + ret, PAGE_SIZE - ret, + "RxFS output from %s, ", + audmux_port_string((ptcr >> 17) & 0x7)); + else + ret += snprintf(buf + ret, PAGE_SIZE - ret, + "RxFS input, "); + + if (ptcr & MXC_AUDMUX_V2_PTCR_RCLKDIR) + ret += snprintf(buf + ret, PAGE_SIZE - ret, + "RxClk output from %s", + audmux_port_string((ptcr >> 12) & 0x7)); + else + ret += snprintf(buf + ret, PAGE_SIZE - ret, + "RxClk input"); + } + + ret += snprintf(buf + ret, PAGE_SIZE - ret, + "\nData received from %s\n", + audmux_port_string((pdcr >> 13) & 0x7)); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); + + kfree(buf); + + return ret; +} + +static const struct file_operations audmux_debugfs_fops = { + .open = audmux_open_file, + .read = audmux_read_file, +}; + +static void audmux_debugfs_init(void) +{ + int i; + char buf[20]; + + audmux_debugfs_root = debugfs_create_dir("audmux", NULL); + if (!audmux_debugfs_root) { + pr_warning("Failed to create AUDMUX debugfs root\n"); + return; + } + + for (i = 1; i < 8; i++) { + snprintf(buf, sizeof(buf), "ssi%d", i); + if (!debugfs_create_file(buf, 0444, audmux_debugfs_root, + (void *)i, &audmux_debugfs_fops)) + pr_warning("Failed to create AUDMUX port %d debugfs file\n", + i); + } +} +#else +static inline void audmux_debugfs_init(void) +{ +} +#endif + int mxc_audmux_v2_configure_port(unsigned int port, unsigned int ptcr, unsigned int pdcr) { @@ -68,6 +203,8 @@ static int mxc_audmux_v2_init(void) if (cpu_is_mx31() || cpu_is_mx35()) audmux_base = IO_ADDRESS(AUDMUX_BASE_ADDR); + audmux_debugfs_init(); + return 0; } diff --git a/arch/arm/plat-mxc/include/mach/board-mx31lite.h b/arch/arm/plat-mxc/include/mach/board-mx31lite.h index 0184b63..2b2da03 100644 --- a/arch/arm/plat-mxc/include/mach/board-mx31lite.h +++ b/arch/arm/plat-mxc/include/mach/board-mx31lite.h @@ -25,7 +25,7 @@ #ifndef __ASSEMBLY__ -enum mx31lilly_boards { +enum mx31lite_boards { MX31LITE_NOBOARD = 0, MX31LITE_DB = 1, }; diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h index 286cb9b..4bf1068 100644 --- a/arch/arm/plat-mxc/include/mach/common.h +++ b/arch/arm/plat-mxc/include/mach/common.h @@ -32,7 +32,7 @@ extern void mxc91231_init_irq(void); extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int); extern int mx1_clocks_init(unsigned long fref); extern int mx21_clocks_init(unsigned long lref, unsigned long fref); -extern int mx25_clocks_init(unsigned long fref); +extern int mx25_clocks_init(void); extern int mx27_clocks_init(unsigned long fref); extern int mx31_clocks_init(unsigned long fref); extern int mx35_clocks_init(void); diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx35.h b/arch/arm/plat-mxc/include/mach/iomux-mx35.h index 00b0ac1..c88d407 100644 --- a/arch/arm/plat-mxc/include/mach/iomux-mx35.h +++ b/arch/arm/plat-mxc/include/mach/iomux-mx35.h @@ -671,7 +671,7 @@ #define MX35_PAD_LD8__SDMA_SDMA_DEBUG_PC_8 IOMUX_PAD(0x634, 0x1d0, 6, 0x0, 0, NO_PAD_CTRL) #define MX35_PAD_LD9__IPU_DISPB_DAT_9 IOMUX_PAD(0x638, 0x1d4, 0, 0x0, 0, NO_PAD_CTRL) -#define MX35_PAD_LD9__GPIO2_9 IOMUX_PAD(0x638, 0x1d4, 5, 0x8e4 0, NO_PAD_CTRL) +#define MX35_PAD_LD9__GPIO2_9 IOMUX_PAD(0x638, 0x1d4, 5, 0x8e4, 0, NO_PAD_CTRL) #define MX35_PAD_LD9__SDMA_SDMA_DEBUG_PC_9 IOMUX_PAD(0x638, 0x1d4, 6, 0x0, 0, NO_PAD_CTRL) #define MX35_PAD_LD10__IPU_DISPB_DAT_10 IOMUX_PAD(0x63c, 0x1d8, 0, 0x0, 0, NO_PAD_CTRL) diff --git a/arch/arm/plat-mxc/include/mach/irqs.h b/arch/arm/plat-mxc/include/mach/irqs.h index ead9d59..0cb3476 100644 --- a/arch/arm/plat-mxc/include/mach/irqs.h +++ b/arch/arm/plat-mxc/include/mach/irqs.h @@ -37,7 +37,12 @@ * within sensible limits. */ #define MXC_BOARD_IRQ_START (MXC_INTERNAL_IRQS + MXC_GPIO_IRQS) + +#ifdef CONFIG_MACH_MX31ADS_WM1133_EV1 +#define MXC_BOARD_IRQS 80 +#else #define MXC_BOARD_IRQS 16 +#endif #define MXC_IPU_IRQ_START (MXC_BOARD_IRQ_START + MXC_BOARD_IRQS) diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c index d9f8c84..4becbdd 100644 --- a/arch/arm/plat-omap/clock.c +++ b/arch/arm/plat-omap/clock.c @@ -391,7 +391,7 @@ static struct dentry *clk_debugfs_root; static int clk_debugfs_register_one(struct clk *c) { int err; - struct dentry *d, *child; + struct dentry *d, *child, *child_tmp; struct clk *pa = c->parent; char s[255]; char *p = s; @@ -423,7 +423,7 @@ static int clk_debugfs_register_one(struct clk *c) err_out: d = c->dent; - list_for_each_entry(child, &d->d_subdirs, d_u.d_child) + list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child) debugfs_remove(child); debugfs_remove(c->dent); return err; diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c index d17620c..d2422c7 100644 --- a/arch/arm/plat-omap/gpio.c +++ b/arch/arm/plat-omap/gpio.c @@ -750,6 +750,7 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio, } #endif +#ifdef CONFIG_ARCH_OMAP1 /* * This only applies to chips that can't do both rising and falling edge * detection at once. For all other chips, this function is a noop. @@ -760,11 +761,9 @@ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) u32 l = 0; switch (bank->method) { -#ifdef CONFIG_ARCH_OMAP1 case METHOD_MPUIO: reg += OMAP_MPUIO_GPIO_INT_EDGE; break; -#endif #ifdef CONFIG_ARCH_OMAP15XX case METHOD_GPIO_1510: reg += OMAP1510_GPIO_INT_CONTROL; @@ -787,6 +786,7 @@ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) __raw_writel(l, reg); } +#endif static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger) { diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c index 1e5648d..2ed7201 100644 --- a/arch/arm/plat-omap/omap_device.c +++ b/arch/arm/plat-omap/omap_device.c @@ -89,16 +89,6 @@ #define USE_WAKEUP_LAT 0 #define IGNORE_WAKEUP_LAT 1 -/* XXX this should be moved into a separate file */ -#if defined(CONFIG_ARCH_OMAP2420) -# define OMAP_32KSYNCT_BASE 0x48004000 -#elif defined(CONFIG_ARCH_OMAP2430) -# define OMAP_32KSYNCT_BASE 0x49020000 -#elif defined(CONFIG_ARCH_OMAP3430) -# define OMAP_32KSYNCT_BASE 0x48320000 -#else -# error Unknown OMAP device -#endif /* Private functions */ diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index f60a540..a63c4be 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -197,10 +197,13 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_ } /* - * Update the FPSCR with the additional exception flags. + * If any of the status flags are set, update the FPSCR. * Comparison instructions always return at least one of * these flags set. */ + if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V)) + fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V); + fpscr |= exceptions; fmxr(FPSCR, fpscr); diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index 1aa1ea5..b13d187 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c @@ -1325,7 +1325,7 @@ struct platform_device *__init at32_add_device_mci(unsigned int id, struct mci_platform_data *data) { struct platform_device *pdev; - struct mci_dma_slave *slave; + struct mci_dma_data *slave; u32 pioa_mask; u32 piob_mask; @@ -1344,7 +1344,9 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) ARRAY_SIZE(atmel_mci0_resource))) goto fail; - slave = kzalloc(sizeof(struct mci_dma_slave), GFP_KERNEL); + slave = kzalloc(sizeof(struct mci_dma_data), GFP_KERNEL); + if (!slave) + goto fail; slave->sdata.dma_dev = &dw_dmac0_device.dev; slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT; @@ -1357,7 +1359,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) if (platform_device_add_data(pdev, data, sizeof(struct mci_platform_data))) - goto fail; + goto fail_free; /* CLK line is common to both slots */ pioa_mask = 1 << 10; @@ -1381,7 +1383,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) /* Slot is unused */ break; default: - goto fail; + goto fail_free; } select_peripheral(PIOA, pioa_mask, PERIPH_A, 0); @@ -1408,7 +1410,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) break; default: if (!data->slot[0].bus_width) - goto fail; + goto fail_free; data->slot[1].bus_width = 0; break; @@ -1419,9 +1421,10 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) platform_device_add(pdev); return pdev; +fail_free: + kfree(slave); fail: data->dma_slave = NULL; - kfree(slave); platform_device_put(pdev); return NULL; } diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig index bb7c374..6fced1f 100644 --- a/arch/microblaze/configs/mmu_defconfig +++ b/arch/microblaze/configs/mmu_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.31 -# Thu Sep 24 10:28:50 2009 +# Linux kernel version: 2.6.33-rc6 +# Wed Feb 3 10:02:59 2010 # CONFIG_MICROBLAZE=y # CONFIG_SWAP is not set @@ -19,8 +19,12 @@ CONFIG_GENERIC_CLOCKEVENTS=y CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_GENERIC_GPIO=y CONFIG_GENERIC_CSUM=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y # CONFIG_PCI is not set CONFIG_NO_DMA=y +CONFIG_DTC=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" CONFIG_CONSTRUCTORS=y @@ -44,6 +48,7 @@ CONFIG_SYSVIPC_SYSCTL=y # CONFIG_TREE_RCU=y # CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set # CONFIG_RCU_TRACE is not set CONFIG_RCU_FANOUT=32 # CONFIG_RCU_FANOUT_EXACT is not set @@ -64,10 +69,12 @@ CONFIG_INITRAMFS_ROOT_GID=0 CONFIG_RD_GZIP=y # CONFIG_RD_BZIP2 is not set # CONFIG_RD_LZMA is not set +# CONFIG_RD_LZO is not set # CONFIG_INITRAMFS_COMPRESSION_NONE is not set CONFIG_INITRAMFS_COMPRESSION_GZIP=y # CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set # CONFIG_INITRAMFS_COMPRESSION_LZMA is not set +# CONFIG_INITRAMFS_COMPRESSION_LZO is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y CONFIG_ANON_INODES=y @@ -90,21 +97,20 @@ CONFIG_EVENTFD=y CONFIG_AIO=y # -# Performance Counters +# Kernel Performance Events And Counters # CONFIG_VM_EVENT_COUNTERS=y -# CONFIG_STRIP_ASM_SYMS is not set CONFIG_COMPAT_BRK=y CONFIG_SLAB=y # CONFIG_SLUB is not set # CONFIG_SLOB is not set # CONFIG_PROFILING is not set -# CONFIG_MARKERS is not set +CONFIG_HAVE_OPROFILE=y # # GCOV-based kernel profiling # -# CONFIG_SLOW_WORK is not set +CONFIG_SLOW_WORK=y # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y CONFIG_BASE_SMALL=1 @@ -123,14 +129,41 @@ CONFIG_LBDAF=y # IO Schedulers # CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_AS=y CONFIG_IOSCHED_DEADLINE=y CONFIG_IOSCHED_CFQ=y -# CONFIG_DEFAULT_AS is not set # CONFIG_DEFAULT_DEADLINE is not set CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="cfq" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +# CONFIG_INLINE_SPIN_UNLOCK is not set +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +# CONFIG_MUTEX_SPIN_ON_OWNER is not set # CONFIG_FREEZER is not set # @@ -139,11 +172,6 @@ CONFIG_DEFAULT_IOSCHED="cfq" CONFIG_PLATFORM_GENERIC=y CONFIG_OPT_LIB_FUNCTION=y CONFIG_OPT_LIB_ASM=y -CONFIG_ALLOW_EDIT_AUTO=y - -# -# Automatic platform settings from Kconfig.auto -# # # Definitions for MICROBLAZE0 @@ -203,12 +231,11 @@ CONFIG_FLATMEM_MANUAL=y CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_PAGEFLAGS_EXTENDED=y -CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_SPLIT_PTLOCK_CPUS=999999 # CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=0 CONFIG_VIRT_TO_BUS=y -CONFIG_HAVE_MLOCK=y -CONFIG_HAVE_MLOCKED_PAGE_BIT=y +# CONFIG_KSM is not set CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 # @@ -289,7 +316,13 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_IRDA is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set -# CONFIG_WIRELESS is not set +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set +# CONFIG_LIB80211 is not set + +# +# CFG80211 needs to be enabled for MAC80211 +# # CONFIG_WIMAX is not set # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set @@ -313,6 +346,10 @@ CONFIG_OF_DEVICE=y CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_COW_COMMON is not set # CONFIG_BLK_DEV_LOOP is not set + +# +# DRBD disabled because PROC_FS, INET or CONNECTOR not selected +# # CONFIG_BLK_DEV_NBD is not set CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 @@ -349,7 +386,6 @@ CONFIG_NETDEVICES=y # CONFIG_PHYLIB is not set CONFIG_NET_ETHERNET=y # CONFIG_MII is not set -# CONFIG_ETHOC is not set # CONFIG_DNET is not set # CONFIG_IBM_NEW_EMAC_ZMII is not set # CONFIG_IBM_NEW_EMAC_RGMII is not set @@ -359,12 +395,12 @@ CONFIG_NET_ETHERNET=y # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set # CONFIG_KS8842 is not set +# CONFIG_KS8851_MLL is not set CONFIG_XILINX_EMACLITE=y CONFIG_NETDEV_1000=y CONFIG_NETDEV_10000=y CONFIG_WLAN=y -# CONFIG_WLAN_PRE80211 is not set -# CONFIG_WLAN_80211 is not set +# CONFIG_HOSTAP is not set # # Enable WiMAX (Networking options) to see the WiMAX drivers @@ -408,6 +444,7 @@ CONFIG_SERIAL_UARTLITE=y CONFIG_SERIAL_UARTLITE_CONSOLE=y CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set CONFIG_UNIX98_PTYS=y # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set CONFIG_LEGACY_PTYS=y @@ -433,7 +470,6 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y # CONFIG_POWER_SUPPLY is not set # CONFIG_HWMON is not set # CONFIG_THERMAL is not set -# CONFIG_THERMAL_HWMON is not set # CONFIG_WATCHDOG is not set # @@ -526,8 +562,6 @@ CONFIG_PROC_FS=y CONFIG_PROC_SYSCTL=y CONFIG_PROC_PAGE_MONITOR=y CONFIG_SYSFS=y -CONFIG_TMPFS=y -# CONFIG_TMPFS_POSIX_ACL is not set # CONFIG_HUGETLB_PAGE is not set # CONFIG_CONFIGFS_FS is not set CONFIG_MISC_FILESYSTEMS=y @@ -638,11 +672,13 @@ CONFIG_NLS_DEFAULT="iso8859-1" # # Kernel hacking # +CONFIG_TRACE_IRQFLAGS_SUPPORT=y # CONFIG_PRINTK_TIME is not set CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=1024 # CONFIG_MAGIC_SYSRQ is not set +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set # CONFIG_DEBUG_FS is not set # CONFIG_HEADERS_CHECK is not set @@ -662,6 +698,9 @@ CONFIG_DEBUG_SLAB=y # CONFIG_DEBUG_SLAB_LEAK is not set CONFIG_DEBUG_SPINLOCK=y # CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_DEBUG_KOBJECT is not set @@ -680,10 +719,29 @@ CONFIG_DEBUG_INFO=y # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set # CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set # CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set # CONFIG_SYSCTL_SYSCALL_CHECK is not set # CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_ENABLE_DEFAULT_TRACERS is not set +# CONFIG_BOOT_TRACER is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_KMEMTRACE is not set +# CONFIG_WORKQUEUE_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set # CONFIG_SAMPLES is not set -# CONFIG_KMEMCHECK is not set CONFIG_EARLY_PRINTK=y # CONFIG_HEART_BEAT is not set CONFIG_DEBUG_BOOTMEM=y @@ -694,7 +752,11 @@ CONFIG_DEBUG_BOOTMEM=y # CONFIG_KEYS is not set # CONFIG_SECURITY is not set # CONFIG_SECURITYFS is not set -# CONFIG_SECURITY_FILE_CAPABILITIES is not set +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_SMACK is not set +# CONFIG_DEFAULT_SECURITY_TOMOYO is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" CONFIG_CRYPTO=y # diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig index adb839b..ce2da53 100644 --- a/arch/microblaze/configs/nommu_defconfig +++ b/arch/microblaze/configs/nommu_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.31 -# Thu Sep 24 10:29:43 2009 +# Linux kernel version: 2.6.33-rc6 +# Wed Feb 3 10:03:21 2010 # CONFIG_MICROBLAZE=y # CONFIG_SWAP is not set @@ -19,8 +19,12 @@ CONFIG_GENERIC_CLOCKEVENTS=y CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_GENERIC_GPIO=y CONFIG_GENERIC_CSUM=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y # CONFIG_PCI is not set CONFIG_NO_DMA=y +CONFIG_DTC=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" CONFIG_CONSTRUCTORS=y @@ -46,6 +50,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y # CONFIG_TREE_RCU=y # CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set # CONFIG_RCU_TRACE is not set CONFIG_RCU_FANOUT=32 # CONFIG_RCU_FANOUT_EXACT is not set @@ -81,16 +86,16 @@ CONFIG_EVENTFD=y CONFIG_AIO=y # -# Performance Counters +# Kernel Performance Events And Counters # CONFIG_VM_EVENT_COUNTERS=y -# CONFIG_STRIP_ASM_SYMS is not set CONFIG_COMPAT_BRK=y CONFIG_SLAB=y # CONFIG_SLUB is not set # CONFIG_SLOB is not set +# CONFIG_MMAP_ALLOW_UNINITIALIZED is not set # CONFIG_PROFILING is not set -# CONFIG_MARKERS is not set +CONFIG_HAVE_OPROFILE=y # # GCOV-based kernel profiling @@ -116,14 +121,41 @@ CONFIG_LBDAF=y # IO Schedulers # CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_AS=y CONFIG_IOSCHED_DEADLINE=y CONFIG_IOSCHED_CFQ=y -# CONFIG_DEFAULT_AS is not set # CONFIG_DEFAULT_DEADLINE is not set CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="cfq" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +CONFIG_INLINE_SPIN_UNLOCK=y +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +CONFIG_INLINE_READ_UNLOCK=y +# CONFIG_INLINE_READ_UNLOCK_BH is not set +CONFIG_INLINE_READ_UNLOCK_IRQ=y +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +CONFIG_INLINE_WRITE_UNLOCK=y +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +# CONFIG_MUTEX_SPIN_ON_OWNER is not set # CONFIG_FREEZER is not set # @@ -132,7 +164,10 @@ CONFIG_DEFAULT_IOSCHED="cfq" CONFIG_PLATFORM_GENERIC=y # CONFIG_SELFMOD is not set # CONFIG_OPT_LIB_FUNCTION is not set -# CONFIG_ALLOW_EDIT_AUTO is not set + +# +# Definitions for MICROBLAZE0 +# CONFIG_KERNEL_BASE_ADDR=0x90000000 CONFIG_XILINX_MICROBLAZE0_FAMILY="virtex5" CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1 @@ -190,7 +225,6 @@ CONFIG_SPLIT_PTLOCK_CPUS=4 # CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=0 CONFIG_VIRT_TO_BUS=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 CONFIG_NOMMU_INITIAL_TRIM_EXCESS=1 # @@ -274,9 +308,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_AF_RXRPC is not set CONFIG_WIRELESS=y # CONFIG_CFG80211 is not set -CONFIG_CFG80211_DEFAULT_PS_VALUE=0 -CONFIG_WIRELESS_OLD_REGULATORY=y -# CONFIG_WIRELESS_EXT is not set # CONFIG_LIB80211 is not set # @@ -301,9 +332,9 @@ CONFIG_STANDALONE=y # CONFIG_CONNECTOR is not set CONFIG_MTD=y # CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set CONFIG_MTD_CONCAT=y CONFIG_MTD_PARTITIONS=y -# CONFIG_MTD_TESTS is not set # CONFIG_MTD_REDBOOT_PARTS is not set CONFIG_MTD_CMDLINE_PARTS=y # CONFIG_MTD_OF_PARTS is not set @@ -387,6 +418,10 @@ CONFIG_OF_DEVICE=y CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_COW_COMMON is not set # CONFIG_BLK_DEV_LOOP is not set + +# +# DRBD disabled because PROC_FS, INET or CONNECTOR not selected +# CONFIG_BLK_DEV_NBD=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 @@ -423,7 +458,6 @@ CONFIG_NETDEVICES=y # CONFIG_PHYLIB is not set CONFIG_NET_ETHERNET=y # CONFIG_MII is not set -# CONFIG_ETHOC is not set # CONFIG_DNET is not set # CONFIG_IBM_NEW_EMAC_ZMII is not set # CONFIG_IBM_NEW_EMAC_RGMII is not set @@ -433,12 +467,12 @@ CONFIG_NET_ETHERNET=y # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set # CONFIG_KS8842 is not set +# CONFIG_KS8851_MLL is not set # CONFIG_XILINX_EMACLITE is not set CONFIG_NETDEV_1000=y CONFIG_NETDEV_10000=y CONFIG_WLAN=y -# CONFIG_WLAN_PRE80211 is not set -# CONFIG_WLAN_80211 is not set +# CONFIG_HOSTAP is not set # # Enable WiMAX (Networking options) to see the WiMAX drivers @@ -482,6 +516,7 @@ CONFIG_SERIAL_UARTLITE=y CONFIG_SERIAL_UARTLITE_CONSOLE=y CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set CONFIG_UNIX98_PTYS=y # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set CONFIG_LEGACY_PTYS=y @@ -508,7 +543,6 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y # CONFIG_POWER_SUPPLY is not set # CONFIG_HWMON is not set # CONFIG_THERMAL is not set -# CONFIG_THERMAL_HWMON is not set # CONFIG_WATCHDOG is not set # @@ -616,7 +650,6 @@ CONFIG_INOTIFY_USER=y CONFIG_PROC_FS=y CONFIG_PROC_SYSCTL=y CONFIG_SYSFS=y -# CONFIG_TMPFS is not set # CONFIG_HUGETLB_PAGE is not set # CONFIG_CONFIGFS_FS is not set CONFIG_MISC_FILESYSTEMS=y @@ -672,11 +705,13 @@ CONFIG_MSDOS_PARTITION=y # # Kernel hacking # +CONFIG_TRACE_IRQFLAGS_SUPPORT=y # CONFIG_PRINTK_TIME is not set CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=1024 # CONFIG_MAGIC_SYSRQ is not set +# CONFIG_STRIP_ASM_SYMS is not set CONFIG_UNUSED_SYMBOLS=y CONFIG_DEBUG_FS=y # CONFIG_HEADERS_CHECK is not set @@ -695,12 +730,16 @@ CONFIG_DEBUG_OBJECTS=y CONFIG_DEBUG_OBJECTS_SELFTEST=y CONFIG_DEBUG_OBJECTS_FREE=y CONFIG_DEBUG_OBJECTS_TIMERS=y +# CONFIG_DEBUG_OBJECTS_WORK is not set CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 # CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_RT_MUTEXES is not set # CONFIG_RT_MUTEX_TESTER is not set # CONFIG_DEBUG_SPINLOCK is not set # CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_DEBUG_KOBJECT is not set @@ -720,8 +759,28 @@ CONFIG_DEBUG_SG=y # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set # CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set # CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set CONFIG_SYSCTL_SYSCALL_CHECK=y # CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_ENABLE_DEFAULT_TRACERS is not set +# CONFIG_BOOT_TRACER is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_KMEMTRACE is not set +# CONFIG_WORKQUEUE_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set # CONFIG_DYNAMIC_DEBUG is not set # CONFIG_SAMPLES is not set CONFIG_EARLY_PRINTK=y @@ -734,7 +793,11 @@ CONFIG_EARLY_PRINTK=y # CONFIG_KEYS is not set # CONFIG_SECURITY is not set # CONFIG_SECURITYFS is not set -# CONFIG_SECURITY_FILE_CAPABILITIES is not set +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_SMACK is not set +# CONFIG_DEFAULT_SECURITY_TOMOYO is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" CONFIG_CRYPTO=y # diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S index 95b0855..391d619 100644 --- a/arch/microblaze/kernel/entry-nommu.S +++ b/arch/microblaze/kernel/entry-nommu.S @@ -122,7 +122,7 @@ ENTRY(_interrupt) ret_from_intr: lwi r11, r1, PT_MODE - bneid r11, 3f + bneid r11, no_intr_resched lwi r6, r31, TS_THREAD_INFO /* get thread info */ lwi r19, r6, TI_FLAGS /* get flags in thread info */ @@ -133,16 +133,18 @@ ret_from_intr: bralid r15, schedule nop 1: andi r11, r19, _TIF_SIGPENDING - beqid r11, no_intr_reshed + beqid r11, no_intr_resched addk r5, r1, r0 addk r7, r0, r0 bralid r15, do_signal addk r6, r0, r0 -no_intr_reshed: +no_intr_resched: + /* Disable interrupts, we are now committed to the state restore */ + disable_irq + /* save mode indicator */ lwi r11, r1, PT_MODE -3: swi r11, r0, PER_CPU(KM) /* save r31 */ diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index 5372b24..bb8c4b9 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c @@ -54,6 +54,7 @@ void __init setup_arch(char **cmdline_p) microblaze_cache_init(); + invalidate_dcache(); enable_dcache(); invalidate_icache(); diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig index ed84b4c..84b6503 100644 --- a/arch/mips/configs/ip27_defconfig +++ b/arch/mips/configs/ip27_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.23-rc2 -# Tue Aug 7 13:04:24 2007 +# Linux kernel version: 2.6.33-rc6 +# Wed Feb 3 18:12:31 2010 # CONFIG_MIPS=y @@ -9,20 +9,28 @@ CONFIG_MIPS=y # Machine selection # # CONFIG_MACH_ALCHEMY is not set +# CONFIG_AR7 is not set +# CONFIG_BCM47XX is not set +# CONFIG_BCM63XX is not set # CONFIG_MIPS_COBALT is not set # CONFIG_MACH_DECSTATION is not set # CONFIG_MACH_JAZZ is not set -# CONFIG_LEMOTE_FULONG is not set +# CONFIG_LASAT is not set +# CONFIG_MACH_LOONGSON is not set # CONFIG_MIPS_MALTA is not set # CONFIG_MIPS_SIM is not set -# CONFIG_MARKEINS is not set +# CONFIG_NEC_MARKEINS is not set # CONFIG_MACH_VR41XX is not set +# CONFIG_NXP_STB220 is not set +# CONFIG_NXP_STB225 is not set # CONFIG_PNX8550_JBS is not set # CONFIG_PNX8550_STB810 is not set # CONFIG_PMC_MSP is not set # CONFIG_PMC_YOSEMITE is not set +# CONFIG_POWERTV is not set # CONFIG_SGI_IP22 is not set CONFIG_SGI_IP27=y +# CONFIG_SGI_IP28 is not set # CONFIG_SGI_IP32 is not set # CONFIG_SIBYTE_CRHINE is not set # CONFIG_SIBYTE_CARMEL is not set @@ -33,32 +41,39 @@ CONFIG_SGI_IP27=y # CONFIG_SIBYTE_SENTOSA is not set # CONFIG_SIBYTE_BIGSUR is not set # CONFIG_SNI_RM is not set -# CONFIG_TOSHIBA_JMR3927 is not set -# CONFIG_TOSHIBA_RBTX4927 is not set -# CONFIG_TOSHIBA_RBTX4938 is not set +# CONFIG_MACH_TX39XX is not set +# CONFIG_MACH_TX49XX is not set +# CONFIG_MIKROTIK_RB532 is not set # CONFIG_WR_PPMC is not set +# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set +# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set +# CONFIG_ALCHEMY_GPIO_INDIRECT is not set CONFIG_SGI_SN_M_MODE=y # CONFIG_SGI_SN_N_MODE is not set # CONFIG_MAPPED_KERNEL is not set # CONFIG_REPLICATE_KTEXT is not set # CONFIG_REPLICATE_EXHANDLERS is not set +CONFIG_LOONGSON_UART_BASE=y CONFIG_RWSEM_GENERIC_SPINLOCK=y # CONFIG_ARCH_HAS_ILOG2_U32 is not set # CONFIG_ARCH_HAS_ILOG2_U64 is not set +CONFIG_ARCH_SUPPORTS_OPROFILE=y CONFIG_GENERIC_FIND_NEXT_BIT=y CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_CLOCKEVENTS=y CONFIG_GENERIC_TIME=y -CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_ARC=y CONFIG_DMA_COHERENT=y -CONFIG_EARLY_PRINTK=y CONFIG_SYS_HAS_EARLY_PRINTK=y # CONFIG_NO_IOPORT is not set CONFIG_CPU_BIG_ENDIAN=y # CONFIG_CPU_LITTLE_ENDIAN is not set CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y +CONFIG_DEFAULT_SGI_PARTITION=y CONFIG_MIPS_L1_CACHE_SHIFT=7 CONFIG_ARC64=y CONFIG_BOOT_ELF64=y @@ -66,7 +81,8 @@ CONFIG_BOOT_ELF64=y # # CPU selection # -# CONFIG_CPU_LOONGSON2 is not set +# CONFIG_CPU_LOONGSON2E is not set +# CONFIG_CPU_LOONGSON2F is not set # CONFIG_CPU_MIPS32_R1 is not set # CONFIG_CPU_MIPS32_R2 is not set # CONFIG_CPU_MIPS64_R1 is not set @@ -79,6 +95,7 @@ CONFIG_BOOT_ELF64=y # CONFIG_CPU_TX49XX is not set # CONFIG_CPU_R5000 is not set # CONFIG_CPU_R5432 is not set +# CONFIG_CPU_R5500 is not set # CONFIG_CPU_R6000 is not set # CONFIG_CPU_NEVADA is not set # CONFIG_CPU_R8000 is not set @@ -86,6 +103,7 @@ CONFIG_CPU_R10000=y # CONFIG_CPU_RM7000 is not set # CONFIG_CPU_RM9000 is not set # CONFIG_CPU_SB1 is not set +# CONFIG_CPU_CAVIUM_OCTEON is not set CONFIG_SYS_HAS_CPU_R10000=y CONFIG_SYS_SUPPORTS_64BIT_KERNEL=y CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y @@ -99,6 +117,7 @@ CONFIG_64BIT=y CONFIG_PAGE_SIZE_4KB=y # CONFIG_PAGE_SIZE_8KB is not set # CONFIG_PAGE_SIZE_16KB is not set +# CONFIG_PAGE_SIZE_32KB is not set # CONFIG_PAGE_SIZE_64KB is not set CONFIG_CPU_HAS_PREFETCH=y CONFIG_MIPS_MT_DISABLED=y @@ -110,6 +129,7 @@ CONFIG_GENERIC_IRQ_PROBE=y CONFIG_IRQ_PER_CPU=y CONFIG_CPU_SUPPORTS_HIGHMEM=y CONFIG_ARCH_DISCONTIGMEM_ENABLE=y +CONFIG_ARCH_POPULATES_NODE_MAP=y CONFIG_NUMA=y CONFIG_SYS_SUPPORTS_NUMA=y CONFIG_NODES_SHIFT=6 @@ -120,16 +140,22 @@ CONFIG_DISCONTIGMEM_MANUAL=y CONFIG_DISCONTIGMEM=y CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_NEED_MULTIPLE_NODES=y -# CONFIG_SPARSEMEM_STATIC is not set +CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 CONFIG_MIGRATION=y -CONFIG_RESOURCES_64BIT=y +CONFIG_PHYS_ADDR_T_64BIT=y CONFIG_ZONE_DMA_FLAG=0 CONFIG_VIRT_TO_BUS=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 CONFIG_SMP=y CONFIG_SYS_SUPPORTS_SMP=y CONFIG_NR_CPUS_DEFAULT_64=y CONFIG_NR_CPUS=64 +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y # CONFIG_HZ_48 is not set # CONFIG_HZ_100 is not set # CONFIG_HZ_128 is not set @@ -142,13 +168,13 @@ CONFIG_HZ=1000 CONFIG_PREEMPT_NONE=y # CONFIG_PREEMPT_VOLUNTARY is not set # CONFIG_PREEMPT is not set -CONFIG_PREEMPT_BKL=y # CONFIG_MIPS_INSANE_LARGE is not set # CONFIG_KEXEC is not set CONFIG_SECCOMP=y CONFIG_LOCKDEP_SUPPORT=y CONFIG_STACKTRACE_SUPPORT=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y # # General setup @@ -162,20 +188,41 @@ CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y # CONFIG_BSD_PROCESS_ACCT is not set # CONFIG_TASKSTATS is not set -# CONFIG_USER_NS is not set # CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=64 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=15 +# CONFIG_GROUP_SCHED is not set CONFIG_CGROUPS=y +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_CGROUP_NS is not set +# CONFIG_CGROUP_FREEZER is not set +# CONFIG_CGROUP_DEVICE is not set CONFIG_CPUSETS=y -CONFIG_SYSFS_DEPRECATED=y +CONFIG_PROC_PID_CPUSET=y +# CONFIG_CGROUP_CPUACCT is not set +# CONFIG_RESOURCE_COUNTERS is not set +# CONFIG_SYSFS_DEPRECATED_V2 is not set CONFIG_RELAY=y +# CONFIG_NAMESPACES is not set # CONFIG_BLK_DEV_INITRD is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y CONFIG_EMBEDDED=y CONFIG_SYSCTL_SYSCALL=y CONFIG_KALLSYMS=y @@ -184,44 +231,92 @@ CONFIG_HOTPLUG=y CONFIG_PRINTK=y CONFIG_BUG=y CONFIG_ELF_CORE=y +# CONFIG_PCSPKR_PLATFORM is not set CONFIG_BASE_FULL=y CONFIG_FUTEX=y -CONFIG_ANON_INODES=y CONFIG_EPOLL=y CONFIG_SIGNALFD=y CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y +CONFIG_AIO=y + +# +# Kernel Performance Events And Counters +# CONFIG_VM_EVENT_COUNTERS=y +CONFIG_PCI_QUIRKS=y +CONFIG_COMPAT_BRK=y CONFIG_SLAB=y # CONFIG_SLUB is not set # CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +CONFIG_HAVE_OPROFILE=y +CONFIG_HAVE_SYSCALL_WRAPPERS=y +CONFIG_USE_GENERIC_SMP_HELPERS=y + +# +# GCOV-based kernel profiling +# +CONFIG_SLOW_WORK=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y CONFIG_RT_MUTEXES=y -# CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=0 CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set # CONFIG_MODVERSIONS is not set CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_KMOD=y CONFIG_STOP_MACHINE=y CONFIG_BLOCK=y -# CONFIG_BLK_DEV_IO_TRACE is not set # CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set +# CONFIG_BLK_CGROUP is not set +CONFIG_BLOCK_COMPAT=y # # IO Schedulers # CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_AS=y CONFIG_IOSCHED_DEADLINE=y CONFIG_IOSCHED_CFQ=y -CONFIG_DEFAULT_AS=y +# CONFIG_CFQ_GROUP_IOSCHED is not set # CONFIG_DEFAULT_DEADLINE is not set -# CONFIG_DEFAULT_CFQ is not set +CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="anticipatory" +CONFIG_DEFAULT_IOSCHED="cfq" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +CONFIG_INLINE_SPIN_UNLOCK=y +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +CONFIG_INLINE_READ_UNLOCK=y +# CONFIG_INLINE_READ_UNLOCK_BH is not set +CONFIG_INLINE_READ_UNLOCK_IRQ=y +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +CONFIG_INLINE_WRITE_UNLOCK=y +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +CONFIG_MUTEX_SPIN_ON_OWNER=y +# CONFIG_FREEZER is not set # # Bus options (PCI, PCMCIA, EISA, ISA, TC) @@ -230,11 +325,10 @@ CONFIG_HW_HAS_PCI=y CONFIG_PCI=y CONFIG_PCI_DOMAINS=y # CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCI_LEGACY is not set +# CONFIG_PCI_STUB is not set +# CONFIG_PCI_IOV is not set CONFIG_MMU=y - -# -# PCCARD (PCMCIA/CardBus) support -# # CONFIG_PCCARD is not set # CONFIG_HOTPLUG_PCI is not set @@ -242,8 +336,9 @@ CONFIG_MMU=y # Executable file formats # CONFIG_BINFMT_ELF=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +# CONFIG_HAVE_AOUT is not set # CONFIG_BINFMT_MISC is not set -# CONFIG_BUILD_ELF64 is not set CONFIG_MIPS32_COMPAT=y CONFIG_COMPAT=y CONFIG_SYSVIPC_COMPAT=y @@ -255,13 +350,10 @@ CONFIG_BINFMT_ELF32=y # Power management options # CONFIG_PM=y -# CONFIG_PM_LEGACY is not set # CONFIG_PM_DEBUG is not set - -# -# Networking -# +# CONFIG_PM_RUNTIME is not set CONFIG_NET=y +CONFIG_COMPAT_NETLINK_MESSAGES=y # # Networking options @@ -273,6 +365,8 @@ CONFIG_XFRM=y CONFIG_XFRM_USER=m # CONFIG_XFRM_SUB_POLICY is not set CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m CONFIG_NET_KEY=y CONFIG_NET_KEY_MIGRATE=y CONFIG_INET=y @@ -292,19 +386,40 @@ CONFIG_IP_PNP=y # CONFIG_INET_ESP is not set # CONFIG_INET_IPCOMP is not set # CONFIG_INET_XFRM_TUNNEL is not set -# CONFIG_INET_TUNNEL is not set +CONFIG_INET_TUNNEL=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m CONFIG_INET_XFRM_MODE_BEET=m +CONFIG_INET_LRO=y CONFIG_INET_DIAG=y CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_CUBIC=y CONFIG_DEFAULT_TCP_CONG="cubic" CONFIG_TCP_MD5SIG=y -# CONFIG_IPV6 is not set -# CONFIG_INET6_XFRM_TUNNEL is not set -# CONFIG_INET6_TUNNEL is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_INET6_XFRM_MODE_TRANSPORT=m +CONFIG_INET6_XFRM_MODE_TUNNEL=m +CONFIG_INET6_XFRM_MODE_BEET=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_PIMSM_V2=y CONFIG_NETWORK_SECMARK=y # CONFIG_NETFILTER is not set # CONFIG_IP_DCCP is not set @@ -314,9 +429,11 @@ CONFIG_IP_SCTP=m # CONFIG_SCTP_HMAC_NONE is not set # CONFIG_SCTP_HMAC_SHA1 is not set CONFIG_SCTP_HMAC_MD5=y +# CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set # CONFIG_BRIDGE is not set +# CONFIG_NET_DSA is not set # CONFIG_VLAN_8021Q is not set # CONFIG_DECNET is not set # CONFIG_LLC2 is not set @@ -326,12 +443,9 @@ CONFIG_SCTP_HMAC_MD5=y # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set - -# -# QoS and/or fair queueing -# +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set CONFIG_NET_SCHED=y -CONFIG_NET_SCH_FIFO=y # # Queueing/Scheduling @@ -340,7 +454,7 @@ CONFIG_NET_SCH_CBQ=m CONFIG_NET_SCH_HTB=m CONFIG_NET_SCH_HFSC=m CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_RR=m +CONFIG_NET_SCH_MULTIQ=y CONFIG_NET_SCH_RED=m CONFIG_NET_SCH_SFQ=m CONFIG_NET_SCH_TEQL=m @@ -348,6 +462,7 @@ CONFIG_NET_SCH_TBF=m CONFIG_NET_SCH_GRED=m CONFIG_NET_SCH_DSMARK=m CONFIG_NET_SCH_NETEM=m +# CONFIG_NET_SCH_DRR is not set CONFIG_NET_SCH_INGRESS=m # @@ -364,41 +479,63 @@ CONFIG_NET_CLS_U32=m CONFIG_CLS_U32_MARK=y CONFIG_NET_CLS_RSVP=m CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y # CONFIG_NET_EMATCH is not set CONFIG_NET_CLS_ACT=y CONFIG_NET_ACT_POLICE=y CONFIG_NET_ACT_GACT=m CONFIG_GACT_PROB=y CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_NAT=m CONFIG_NET_ACT_PEDIT=m # CONFIG_NET_ACT_SIMP is not set -CONFIG_NET_CLS_POLICE=y +CONFIG_NET_ACT_SKBEDIT=m # CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set # # Network testing # # CONFIG_NET_PKTGEN is not set # CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set # CONFIG_IRDA is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set - -# -# Wireless -# -CONFIG_CFG80211=m +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y CONFIG_WIRELESS_EXT=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_SPY=y +CONFIG_WEXT_PRIV=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_REG_DEBUG is not set +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_WIRELESS_OLD_REGULATORY is not set +CONFIG_CFG80211_WEXT=y +CONFIG_WIRELESS_EXT_SYSFS=y +CONFIG_LIB80211=m +CONFIG_LIB80211_CRYPT_WEP=m +CONFIG_LIB80211_CRYPT_CCMP=m +CONFIG_LIB80211_CRYPT_TKIP=m +# CONFIG_LIB80211_DEBUG is not set CONFIG_MAC80211=m -# CONFIG_MAC80211_DEBUG is not set -CONFIG_IEEE80211=m -# CONFIG_IEEE80211_DEBUG is not set -CONFIG_IEEE80211_CRYPT_WEP=m -CONFIG_IEEE80211_CRYPT_CCMP=m -CONFIG_IEEE80211_CRYPT_TKIP=m -CONFIG_IEEE80211_SOFTMAC=m -# CONFIG_IEEE80211_SOFTMAC_DEBUG is not set +CONFIG_MAC80211_RC_PID=y +CONFIG_MAC80211_RC_MINSTREL=y +# CONFIG_MAC80211_RC_DEFAULT_PID is not set +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +# CONFIG_MAC80211_DEBUG_MENU is not set +# CONFIG_WIMAX is not set CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y # CONFIG_NET_9P is not set # @@ -408,9 +545,13 @@ CONFIG_RFKILL=m # # Generic Driver Options # +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +# CONFIG_DEVTMPFS is not set CONFIG_STANDALONE=y CONFIG_PREVENT_FIRMWARE_BUILD=y CONFIG_FW_LOADER=y +CONFIG_FIRMWARE_IN_KERNEL=y +CONFIG_EXTRA_FIRMWARE="" # CONFIG_SYS_HYPERVISOR is not set CONFIG_CONNECTOR=m # CONFIG_MTD is not set @@ -423,14 +564,19 @@ CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_COW_COMMON is not set CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=m +# CONFIG_BLK_DEV_DRBD is not set # CONFIG_BLK_DEV_NBD is not set +CONFIG_BLK_DEV_OSD=m # CONFIG_BLK_DEV_SX8 is not set # CONFIG_BLK_DEV_RAM is not set CONFIG_CDROM_PKTCDVD=m CONFIG_CDROM_PKTCDVD_BUFFERS=8 # CONFIG_CDROM_PKTCDVD_WCACHE is not set CONFIG_ATA_OVER_ETH=m +# CONFIG_BLK_DEV_HD is not set # CONFIG_MISC_DEVICES is not set +CONFIG_EEPROM_93CX6=m +CONFIG_HAVE_IDE=y # CONFIG_IDE is not set # @@ -453,10 +599,6 @@ CONFIG_BLK_DEV_SR=m CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_CHR_DEV_SCH=m - -# -# Some SCSI devices (e.g. CD jukebox) support multiple LUNs -# # CONFIG_SCSI_MULTI_LUN is not set CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y @@ -471,11 +613,18 @@ CONFIG_SCSI_FC_ATTRS=y CONFIG_SCSI_ISCSI_ATTRS=m CONFIG_SCSI_SAS_ATTRS=m CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_HOST_SMP=y # CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set +# CONFIG_SCSI_SRP_ATTRS is not set CONFIG_SCSI_LOWLEVEL=y # CONFIG_ISCSI_TCP is not set +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_BE2ISCSI=m # CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m # CONFIG_SCSI_3W_9XXX is not set +CONFIG_SCSI_3W_SAS=m # CONFIG_SCSI_ACARD is not set # CONFIG_SCSI_AACRAID is not set # CONFIG_SCSI_AIC7XXX is not set @@ -483,11 +632,21 @@ CONFIG_SCSI_LOWLEVEL=y # CONFIG_SCSI_AIC79XX is not set CONFIG_SCSI_AIC94XX=m # CONFIG_AIC94XX_DEBUG is not set +CONFIG_SCSI_MVSAS=m +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_DPT_I2O=m +# CONFIG_SCSI_ADVANSYS is not set # CONFIG_SCSI_ARCMSR is not set # CONFIG_MEGARAID_NEWGEN is not set # CONFIG_MEGARAID_LEGACY is not set # CONFIG_MEGARAID_SAS is not set +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +# CONFIG_SCSI_MPT2SAS_LOGGING is not set # CONFIG_SCSI_HPTIOP is not set +CONFIG_LIBFC=m +# CONFIG_LIBFCOE is not set +# CONFIG_FCOE is not set # CONFIG_SCSI_DMX3191D is not set # CONFIG_SCSI_FUTURE_DOMAIN is not set # CONFIG_SCSI_IPS is not set @@ -502,16 +661,31 @@ CONFIG_SCSI_QLOGIC_1280=y # CONFIG_SCSI_DC395x is not set # CONFIG_SCSI_DC390T is not set # CONFIG_SCSI_DEBUG is not set +CONFIG_SCSI_PMCRAID=m +# CONFIG_SCSI_PM8001 is not set # CONFIG_SCSI_SRP is not set +CONFIG_SCSI_BFA_FC=m +CONFIG_SCSI_DH=m +CONFIG_SCSI_DH_RDAC=m +CONFIG_SCSI_DH_HP_SW=m +CONFIG_SCSI_DH_EMC=m +CONFIG_SCSI_DH_ALUA=m +CONFIG_SCSI_OSD_INITIATOR=m +CONFIG_SCSI_OSD_ULD=m +CONFIG_SCSI_OSD_DPRINT_SENSE=1 +# CONFIG_SCSI_OSD_DEBUG is not set # CONFIG_ATA is not set CONFIG_MD=y CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y CONFIG_MD_LINEAR=m CONFIG_MD_RAID0=y CONFIG_MD_RAID1=y CONFIG_MD_RAID10=m CONFIG_MD_RAID456=y -CONFIG_MD_RAID5_RESHAPE=y +# CONFIG_MULTICORE_RAID456 is not set +CONFIG_MD_RAID6_PQ=y +# CONFIG_ASYNC_RAID6_TEST is not set CONFIG_MD_MULTIPATH=m CONFIG_MD_FAULTY=m CONFIG_BLK_DEV_DM=m @@ -519,36 +693,39 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m -CONFIG_DM_MULTIPATH_EMC=m -CONFIG_DM_MULTIPATH_RDAC=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m # CONFIG_DM_DELAY is not set +CONFIG_DM_UEVENT=y +# CONFIG_FUSION is not set # -# Fusion MPT device support +# IEEE 1394 (FireWire) support # -# CONFIG_FUSION is not set -# CONFIG_FUSION_SPI is not set -# CONFIG_FUSION_FC is not set -# CONFIG_FUSION_SAS is not set # -# IEEE 1394 (FireWire) support +# You can enable one or both FireWire driver stacks. +# + +# +# The newer stack is recommended. # # CONFIG_FIREWIRE is not set # CONFIG_IEEE1394 is not set # CONFIG_I2O is not set CONFIG_NETDEVICES=y -CONFIG_NETDEVICES_MULTIQUEUE=y CONFIG_IFB=m # CONFIG_DUMMY is not set # CONFIG_BONDING is not set CONFIG_MACVLAN=m # CONFIG_EQUALIZER is not set # CONFIG_TUN is not set +CONFIG_VETH=m # CONFIG_ARCNET is not set -CONFIG_PHYLIB=m +CONFIG_PHYLIB=y # # MII PHY device drivers @@ -562,23 +739,51 @@ CONFIG_VITESSE_PHY=m CONFIG_SMSC_PHY=m # CONFIG_BROADCOM_PHY is not set CONFIG_ICPLUS_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_NATIONAL_PHY=m +CONFIG_STE10XP=m +CONFIG_LSI_ET1011C_PHY=m # CONFIG_FIXED_PHY is not set +CONFIG_MDIO_BITBANG=m CONFIG_NET_ETHERNET=y CONFIG_MII=y CONFIG_AX88796=m +CONFIG_AX88796_93CX6=y CONFIG_SGI_IOC3_ETH=y # CONFIG_HAPPYMEAL is not set # CONFIG_SUNGEM is not set # CONFIG_CASSINI is not set # CONFIG_NET_VENDOR_3COM is not set +CONFIG_SMC91X=m # CONFIG_DM9000 is not set +CONFIG_ETHOC=m +CONFIG_SMSC911X=m +CONFIG_DNET=m # CONFIG_NET_TULIP is not set # CONFIG_HP100 is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set # CONFIG_NET_PCI is not set +CONFIG_B44=m +CONFIG_B44_PCI_AUTOSELECT=y +CONFIG_B44_PCICORE_AUTOSELECT=y +CONFIG_B44_PCI=y +CONFIG_KS8842=m +CONFIG_KS8851_MLL=m +CONFIG_ATL2=m CONFIG_NETDEV_1000=y # CONFIG_ACENIC is not set # CONFIG_DL2K is not set # CONFIG_E1000 is not set +CONFIG_E1000E=m +CONFIG_IP1000=m +CONFIG_IGB=m +CONFIG_IGBVF=m # CONFIG_NS83820 is not set # CONFIG_HAMACHI is not set # CONFIG_YELLOWFIN is not set @@ -588,24 +793,75 @@ CONFIG_NETDEV_1000=y # CONFIG_SKY2 is not set CONFIG_VIA_VELOCITY=m # CONFIG_TIGON3 is not set -# CONFIG_BNX2 is not set +CONFIG_BNX2=m +CONFIG_CNIC=m CONFIG_QLA3XXX=m # CONFIG_ATL1 is not set +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_JME=m CONFIG_NETDEV_10000=y +CONFIG_MDIO=m # CONFIG_CHELSIO_T1 is not set +CONFIG_CHELSIO_T3_DEPENDS=y CONFIG_CHELSIO_T3=m +CONFIG_ENIC=m +CONFIG_IXGBE=m # CONFIG_IXGB is not set # CONFIG_S2IO is not set +CONFIG_VXGE=m +# CONFIG_VXGE_DEBUG_TRACE_ALL is not set # CONFIG_MYRI10GE is not set CONFIG_NETXEN_NIC=m -# CONFIG_MLX4_CORE is not set +CONFIG_NIU=m +CONFIG_MLX4_EN=m +CONFIG_MLX4_CORE=m +# CONFIG_MLX4_DEBUG is not set +CONFIG_TEHUTI=m +CONFIG_BNX2X=m +CONFIG_QLGE=m +CONFIG_SFC=m +CONFIG_BE2NET=m # CONFIG_TR is not set - -# -# Wireless LAN -# -# CONFIG_WLAN_PRE80211 is not set -CONFIG_WLAN_80211=y +CONFIG_WLAN=y +CONFIG_LIBERTAS_THINFIRM=m +CONFIG_ATMEL=m +CONFIG_PCI_ATMEL=m +CONFIG_PRISM54=m +CONFIG_RTL8180=m +CONFIG_ADM8211=m +# CONFIG_MAC80211_HWSIM is not set +CONFIG_MWL8K=m +CONFIG_ATH_COMMON=m +# CONFIG_ATH_DEBUG is not set +CONFIG_ATH5K=m +# CONFIG_ATH5K_DEBUG is not set +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K=m +CONFIG_B43=m +CONFIG_B43_PCI_AUTOSELECT=y +CONFIG_B43_PCICORE_AUTOSELECT=y +CONFIG_B43_PHY_LP=y +CONFIG_B43_LEDS=y +CONFIG_B43_HWRNG=y +# CONFIG_B43_DEBUG is not set +CONFIG_B43LEGACY=m +CONFIG_B43LEGACY_PCI_AUTOSELECT=y +CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y +CONFIG_B43LEGACY_LEDS=y +CONFIG_B43LEGACY_HWRNG=y +# CONFIG_B43LEGACY_DEBUG is not set +CONFIG_B43LEGACY_DMA=y +CONFIG_B43LEGACY_PIO=y +CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y +# CONFIG_B43LEGACY_DMA_MODE is not set +# CONFIG_B43LEGACY_PIO_MODE is not set +CONFIG_HOSTAP=m +CONFIG_HOSTAP_FIRMWARE=y +CONFIG_HOSTAP_FIRMWARE_NVRAM=y +CONFIG_HOSTAP_PLX=m +CONFIG_HOSTAP_PCI=m CONFIG_IPW2100=m CONFIG_IPW2100_MONITOR=y CONFIG_IPW2100_DEBUG=y @@ -615,38 +871,57 @@ CONFIG_IPW2200_RADIOTAP=y CONFIG_IPW2200_PROMISCUOUS=y CONFIG_IPW2200_QOS=y CONFIG_IPW2200_DEBUG=y +CONFIG_LIBIPW=m +# CONFIG_LIBIPW_DEBUG is not set +CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT=y +# CONFIG_IWLWIFI_DEBUG is not set +CONFIG_IWLAGN=m +CONFIG_IWL4965=y +CONFIG_IWL5000=y +CONFIG_IWL3945=m +CONFIG_IWL3945_SPECTRUM_MEASUREMENT=y CONFIG_LIBERTAS=m # CONFIG_LIBERTAS_DEBUG is not set CONFIG_HERMES=m +# CONFIG_HERMES_CACHE_FW_ON_INIT is not set CONFIG_PLX_HERMES=m CONFIG_TMD_HERMES=m CONFIG_NORTEL_HERMES=m CONFIG_PCI_HERMES=m -CONFIG_ATMEL=m -CONFIG_PCI_ATMEL=m -CONFIG_PRISM54=m -CONFIG_HOSTAP=m -CONFIG_HOSTAP_FIRMWARE=y -CONFIG_HOSTAP_FIRMWARE_NVRAM=y -CONFIG_HOSTAP_PLX=m -CONFIG_HOSTAP_PCI=m -CONFIG_BCM43XX=m -CONFIG_BCM43XX_DEBUG=y -CONFIG_BCM43XX_DMA=y -CONFIG_BCM43XX_PIO=y -CONFIG_BCM43XX_DMA_AND_PIO_MODE=y -# CONFIG_BCM43XX_DMA_MODE is not set -# CONFIG_BCM43XX_PIO_MODE is not set +CONFIG_P54_COMMON=m +CONFIG_P54_PCI=m +CONFIG_P54_LEDS=y +CONFIG_RT2X00=m +CONFIG_RT2400PCI=m +CONFIG_RT2500PCI=m +CONFIG_RT61PCI=m +CONFIG_RT2800PCI_PCI=m +CONFIG_RT2800PCI=m +CONFIG_RT2800_LIB=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_HT=y +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WL12XX=m +CONFIG_WL1251=m + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# # CONFIG_WAN is not set # CONFIG_FDDI is not set # CONFIG_HIPPI is not set # CONFIG_PPP is not set # CONFIG_SLIP is not set # CONFIG_NET_FC is not set -# CONFIG_SHAPER is not set # CONFIG_NETCONSOLE is not set # CONFIG_NETPOLL is not set # CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_VMXNET3 is not set # CONFIG_ISDN is not set # CONFIG_PHONE is not set @@ -664,13 +939,16 @@ CONFIG_SERIO_SERPORT=y # CONFIG_SERIO_PCIPS2 is not set CONFIG_SERIO_LIBPS2=m CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m # CONFIG_GAMEPORT is not set # # Character devices # # CONFIG_VT is not set +CONFIG_DEVKMEM=y # CONFIG_SERIAL_NONSTANDARD is not set +CONFIG_NOZOMI=m # # Serial drivers @@ -693,95 +971,258 @@ CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_JSM is not set CONFIG_UNIX98_PTYS=y +CONFIG_DEVPTS_MULTIPLE_INSTANCES=y CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTY_COUNT=256 # CONFIG_IPMI_HANDLER is not set -# CONFIG_WATCHDOG is not set CONFIG_HW_RANDOM=m -# CONFIG_RTC is not set +CONFIG_HW_RANDOM_TIMERIOMEM=m # CONFIG_R3964 is not set # CONFIG_APPLICOM is not set -# CONFIG_DRM is not set # CONFIG_RAW_DRIVER is not set # CONFIG_TCG_TPM is not set CONFIG_DEVPORT=y -# CONFIG_I2C is not set +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=m +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +CONFIG_I2C_ALI1535=m +CONFIG_I2C_ALI1563=m +CONFIG_I2C_ALI15X3=m +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_I801=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_PIIX4=m +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_SIS5595=m +CONFIG_I2C_SIS630=m +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m # -# SPI support +# I2C system bus drivers (mostly embedded / system-on-chip) # +CONFIG_I2C_OCORES=m +CONFIG_I2C_SIMTEC=m + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_PARPORT_LIGHT=m +CONFIG_I2C_TAOS_EVM=m + +# +# Other I2C/SMBus bus drivers +# +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_STUB=m + +# +# Miscellaneous I2C Chip support +# +CONFIG_SENSORS_TSL2550=m +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set # CONFIG_SPI is not set -# CONFIG_SPI_MASTER is not set + +# +# PPS support +# +CONFIG_PPS=m +# CONFIG_PPS_DEBUG is not set # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set # CONFIG_HWMON is not set +CONFIG_THERMAL=m +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y # -# Multifunction device drivers -# -# CONFIG_MFD_SM501 is not set +# Sonics Silicon Backplane +# +CONFIG_SSB=m +CONFIG_SSB_SPROM=y +CONFIG_SSB_PCIHOST_POSSIBLE=y +CONFIG_SSB_PCIHOST=y +CONFIG_SSB_B43_PCI_BRIDGE=y +# CONFIG_SSB_SILENT is not set +# CONFIG_SSB_DEBUG is not set +CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y +CONFIG_SSB_DRIVER_PCICORE=y +# CONFIG_SSB_DRIVER_MIPS is not set # -# Multimedia devices +# Multifunction device drivers # -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -# CONFIG_DAB is not set +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_WM8400 is not set +CONFIG_MFD_WM8350=m +CONFIG_MFD_WM8350_I2C=m +CONFIG_MFD_PCF50633=m +CONFIG_PCF50633_ADC=m +CONFIG_PCF50633_GPIO=m +CONFIG_AB3100_CORE=m +CONFIG_AB3100_OTP=m +# CONFIG_REGULATOR is not set +# CONFIG_MEDIA_SUPPORT is not set # # Graphics support # -# CONFIG_BACKLIGHT_LCD_SUPPORT is not set - -# -# Display device support -# -# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_VGA_ARB is not set +# CONFIG_DRM is not set # CONFIG_VGASTATE is not set # CONFIG_VIDEO_OUTPUT_CONTROL is not set # CONFIG_FB is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set # -# Sound +# Display device support # +# CONFIG_DISPLAY_SUPPORT is not set # CONFIG_SOUND is not set CONFIG_USB_SUPPORT=y CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB_ARCH_HAS_OHCI=y CONFIG_USB_ARCH_HAS_EHCI=y # CONFIG_USB is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set # -# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' +# Enable Host or Gadget support to see Inventra options # # -# USB Gadget Support +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may # # CONFIG_USB_GADGET is not set + +# +# OTG and related infrastructure +# +# CONFIG_UWB is not set # CONFIG_MMC is not set -# CONFIG_NEW_LEDS is not set -# CONFIG_INFINIBAND is not set -# CONFIG_RTC_CLASS is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=m + +# +# LED drivers +# +CONFIG_LEDS_LP3944=m +CONFIG_LEDS_PCA955X=m +CONFIG_LEDS_WM8350=m +CONFIG_LEDS_BD2802=m + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m # -# DMA Engine support +# iptables trigger is under Netfilter config (LED target) # -# CONFIG_DMA_ENGINE is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_INFINIBAND is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set # -# DMA Clients +# RTC interfaces # +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set # -# DMA Devices +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# + # +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=y +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set +# CONFIG_RTC_DRV_WM8350 is not set +# CONFIG_RTC_DRV_PCF50633 is not set +CONFIG_RTC_DRV_AB3100=m # -# Userspace I/O +# on-CPU RTC drivers # +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set CONFIG_UIO=y # CONFIG_UIO_CIF is not set +# CONFIG_UIO_PDRV is not set +# CONFIG_UIO_PDRV_GENIRQ is not set +CONFIG_UIO_SMX=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m + +# +# TI VLYNQ +# +# CONFIG_STAGING is not set # # File systems @@ -792,36 +1233,58 @@ CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y # CONFIG_EXT2_FS_XIP is not set CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set CONFIG_EXT3_FS_XATTR=y CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y -# CONFIG_EXT4DEV_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_XATTR=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set CONFIG_JBD=y -CONFIG_JBD_DEBUG=y +CONFIG_JBD2=y CONFIG_FS_MBCACHE=y # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set CONFIG_FS_POSIX_ACL=y CONFIG_XFS_FS=m CONFIG_XFS_QUOTA=y -CONFIG_XFS_SECURITY=y CONFIG_XFS_POSIX_ACL=y # CONFIG_XFS_RT is not set +# CONFIG_XFS_DEBUG is not set # CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_ROMFS_FS is not set +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y # CONFIG_QUOTA is not set +CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QUOTACTL=y -CONFIG_DNOTIFY=y CONFIG_AUTOFS_FS=m # CONFIG_AUTOFS4_FS is not set CONFIG_FUSE_FS=m +CONFIG_CUSE=m CONFIG_GENERIC_ACL=y # +# Caches +# +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set + +# # CD-ROM/DVD Filesystems # # CONFIG_ISO9660_FS is not set @@ -840,16 +1303,13 @@ CONFIG_GENERIC_ACL=y CONFIG_PROC_FS=y CONFIG_PROC_KCORE=y CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y CONFIG_SYSFS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y # CONFIG_HUGETLB_PAGE is not set -CONFIG_RAMFS=y CONFIG_CONFIGFS_FS=m - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_ECRYPT_FS is not set @@ -859,28 +1319,32 @@ CONFIG_CONFIGFS_FS=m # CONFIG_BFS_FS is not set # CONFIG_EFS_FS is not set # CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 # CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +CONFIG_OMFS_FS=m # CONFIG_HPFS_FS is not set # CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set # CONFIG_SYSV_FS is not set # CONFIG_UFS_FS is not set - -# -# Network File Systems -# +CONFIG_EXOFS_FS=m +# CONFIG_EXOFS_DEBUG is not set +CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=y CONFIG_NFS_V3=y # CONFIG_NFS_V3_ACL is not set # CONFIG_NFS_V4 is not set -# CONFIG_NFS_DIRECTIO is not set -# CONFIG_NFSD is not set # CONFIG_ROOT_NFS is not set +# CONFIG_NFSD is not set CONFIG_LOCKD=y CONFIG_LOCKD_V4=y +CONFIG_EXPORTFS=m CONFIG_NFS_COMMON=y CONFIG_SUNRPC=y CONFIG_SUNRPC_GSS=y -# CONFIG_SUNRPC_BIND34 is not set CONFIG_RPCSEC_GSS_KRB5=y # CONFIG_RPCSEC_GSS_SPKM3 is not set # CONFIG_SMB_FS is not set @@ -910,35 +1374,37 @@ CONFIG_SGI_PARTITION=y # CONFIG_KARMA_PARTITION is not set # CONFIG_EFI_PARTITION is not set # CONFIG_SYSV68_PARTITION is not set - -# -# Native Language Support -# # CONFIG_NLS is not set - -# -# Distributed Lock Manager -# CONFIG_DLM=m # CONFIG_DLM_DEBUG is not set # -# Profiling support -# -# CONFIG_PROFILING is not set - -# # Kernel hacking # CONFIG_TRACE_IRQFLAGS_SUPPORT=y # CONFIG_PRINTK_TIME is not set +CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=2048 # CONFIG_MAGIC_SYSRQ is not set +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set # CONFIG_DEBUG_FS is not set # CONFIG_HEADERS_CHECK is not set # CONFIG_DEBUG_KERNEL is not set -CONFIG_CROSSCOMPILE=y +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_EARLY_PRINTK=y # CONFIG_CMDLINE_BOOL is not set # @@ -947,65 +1413,140 @@ CONFIG_CROSSCOMPILE=y CONFIG_KEYS=y CONFIG_KEYS_DEBUG_PROC_KEYS=y # CONFIG_SECURITY is not set -CONFIG_XOR_BLOCKS=m -CONFIG_ASYNC_CORE=m -CONFIG_ASYNC_MEMCPY=m -CONFIG_ASYNC_XOR=m +CONFIG_SECURITYFS=y +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_SMACK is not set +# CONFIG_DEFAULT_SECURITY_TOMOYO is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_XOR_BLOCKS=y +CONFIG_ASYNC_CORE=y +CONFIG_ASYNC_MEMCPY=y +CONFIG_ASYNC_XOR=y +CONFIG_ASYNC_PQ=y +CONFIG_ASYNC_RAID6_RECOV=y CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ABLKCIPHER=m +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=m +CONFIG_CRYPTO_AEAD2=y CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=m +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP=y CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_GF128MUL=m +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_AUTHENC=m +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_SEQIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CTR=m +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m + +# +# Hash modes +# CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_VMAC=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=m +CONFIG_CRYPTO_GHASH=m CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA1=m CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_GF128MUL=m -CONFIG_CRYPTO_ECB=m -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_DES=y -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_TWOFISH_COMMON=m -CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_WP512=m + +# +# Ciphers +# CONFIG_CRYPTO_AES=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m + +# +# Compression +# CONFIG_CRYPTO_DEFLATE=m -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_CRC32C=m -CONFIG_CRYPTO_CAMELLIA=m -# CONFIG_CRYPTO_TEST is not set +CONFIG_CRYPTO_ZLIB=m +CONFIG_CRYPTO_LZO=m + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_HIFN_795X=m +# CONFIG_CRYPTO_DEV_HIFN_795X_RNG is not set +# CONFIG_BINARY_PRINTF is not set # # Library routines # CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y CONFIG_CRC_CCITT=m -# CONFIG_CRC16 is not set -# CONFIG_CRC_ITU_T is not set +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=m +CONFIG_CRC_ITU_T=m CONFIG_CRC32=y -# CONFIG_CRC7 is not set +CONFIG_CRC7=m CONFIG_LIBCRC32C=m CONFIG_ZLIB_INFLATE=m CONFIG_ZLIB_DEFLATE=m -CONFIG_PLIST=y +CONFIG_LZO_COMPRESS=m +CONFIG_LZO_DECOMPRESS=m CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 9c187a6..758ad42 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -287,9 +287,9 @@ static inline int __cpu_has_fpu(void) static inline void cpu_probe_vmbits(struct cpuinfo_mips *c) { #ifdef __NEED_VMBITS_PROBE - write_c0_entryhi(0x3ffffffffffff000ULL); + write_c0_entryhi(0x3fffffffffffe000ULL); back_to_back_c0_hazard(); - c->vmbits = fls64(read_c0_entryhi() & 0x3ffffffffffff000ULL); + c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL); #endif } diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 338dfe8..31b204b 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1501,6 +1501,7 @@ void __cpuinit per_cpu_trap_init(void) cp0_perfcount_irq = -1; } else { cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; + cp0_compare_irq_shift = cp0_compare_irq; cp0_perfcount_irq = -1; } diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c index 94e05e5..e06f1af 100644 --- a/arch/mips/mm/c-octeon.c +++ b/arch/mips/mm/c-octeon.c @@ -174,7 +174,7 @@ static void octeon_flush_cache_page(struct vm_area_struct *vma, * Probe Octeon's caches * */ -static void __devinit probe_octeon(void) +static void __cpuinit probe_octeon(void) { unsigned long icache_size; unsigned long dcache_size; @@ -235,7 +235,7 @@ static void __devinit probe_octeon(void) * Setup the Octeon cache flush routines * */ -void __devinit octeon_cache_init(void) +void __cpuinit octeon_cache_init(void) { extern unsigned long ebase; extern char except_vec2_octeon; diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 102b2df..e716caf 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -155,7 +155,7 @@ static inline void setup_protection_map(void) protection_map[15] = PAGE_SHARED; } -void __devinit cpu_cache_init(void) +void __cpuinit cpu_cache_init(void) { if (cpu_has_3k_cache) { extern void __weak r3k_cache_init(void); diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c index 46f0069..31e2583 100644 --- a/arch/mips/sni/rm200.c +++ b/arch/mips/sni/rm200.c @@ -404,7 +404,7 @@ void __init sni_rm200_i8259_irqs(void) if (!rm200_pic_master) return; rm200_pic_slave = ioremap_nocache(0x160000a0, 4); - if (!rm200_pic_master) { + if (!rm200_pic_slave) { iounmap(rm200_pic_master); return; } diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index fb37ac5..35c827e 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -468,7 +468,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - tracehook_signal_handler(sig, info, ka, regs, 0); + tracehook_signal_handler(sig, info, ka, regs, + test_thread_flag(TIF_SINGLESTEP) || + test_thread_flag(TIF_BLOCKSTEP)); return 1; } diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 282d930..1ec0657 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -63,15 +63,21 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, if (huge) { #ifdef CONFIG_HUGETLB_PAGE psize = get_slice_psize(mm, addr); + /* Mask the address for the correct page size */ + addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); #else BUG(); psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ #endif - } else + } else { psize = pte_pagesize_index(mm, addr, pte); + /* Mask the address for the standard page size. If we + * have a 64k page kernel, but the hardware does not + * support 64k pages, this might be different from the + * hardware page size encoded in the slice table. */ + addr &= PAGE_MASK; + } - /* Mask the address for the correct page size */ - addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); /* Build full vaddr */ if (!is_kernel_addr(addr)) { diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 1ee66db..f5f7919 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -784,9 +784,13 @@ static void xics_set_cpu_priority(unsigned char cppr) { struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); - BUG_ON(os_cppr->index != 0); + /* + * we only really want to set the priority when there's + * just one cppr value on the stack + */ + WARN_ON(os_cppr->index != 0); - os_cppr->stack[os_cppr->index] = cppr; + os_cppr->stack[0] = cppr; if (firmware_has_feature(FW_FEATURE_LPAR)) lpar_cppr_info(cppr); @@ -821,8 +825,14 @@ void xics_setup_cpu(void) void xics_teardown_cpu(void) { + struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); int cpu = smp_processor_id(); + /* + * we have to reset the cppr index to 0 because we're + * not going to return from the IPI + */ + os_cppr->index = 0; xics_set_cpu_priority(0); /* Clear any pending IPI request */ diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index f2ef4b6..c25dfac 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -293,12 +293,12 @@ struct _lowcore __u64 clock_comparator; /* 0x02d0 */ __u32 machine_flags; /* 0x02d8 */ __u32 ftrace_func; /* 0x02dc */ - __u8 pad_0x02f0[0x0300-0x02f0]; /* 0x02f0 */ + __u8 pad_0x02e0[0x0300-0x02e0]; /* 0x02e0 */ /* Interrupt response block */ __u8 irb[64]; /* 0x0300 */ - __u8 pad_0x0400[0x0e00-0x0400]; /* 0x0400 */ + __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */ /* * 0xe00 contains the address of the IPL Parameter Information diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index 3f7e2a2..f6a389c 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -132,7 +132,6 @@ ENTRY(tlb_protection_violation_store) mov #1, r5 call_handle_tlbmiss: - setup_frame_reg mov.l 1f, r0 mov r5, r8 mov.l @r0, r6 @@ -365,6 +364,8 @@ handle_exception: mov.l @k2, k2 ! read out vector and keep in k2 handle_exception_special: + setup_frame_reg + ! Setup return address and jump to exception handler mov.l 7f, r9 ! fetch return address stc r2_bank, r0 ! k2 (vector) diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 88d28ec..e511680 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -540,6 +540,8 @@ void dwarf_free_frame(struct dwarf_frame *frame) mempool_free(frame, dwarf_frame_pool); } +extern void ret_from_irq(void); + /** * dwarf_unwind_stack - unwind the stack * @@ -678,6 +680,24 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, addr = frame->cfa + reg->addr; frame->return_addr = __raw_readl(addr); + /* + * Ah, the joys of unwinding through interrupts. + * + * Interrupts are tricky - the DWARF info needs to be _really_ + * accurate and unfortunately I'm seeing a lot of bogus DWARF + * info. For example, I've seen interrupts occur in epilogues + * just after the frame pointer (r14) had been restored. The + * problem was that the DWARF info claimed that the CFA could be + * reached by using the value of the frame pointer before it was + * restored. + * + * So until the compiler can be trusted to produce reliable + * DWARF info when it really matters, let's stop unwinding once + * we've calculated the function that was interrupted. + */ + if (prev && prev->pc == (unsigned long)ret_from_irq) + frame->return_addr = 0; + return frame; bail: diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index f0abd58..2b15ae6 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -70,8 +70,14 @@ ret_from_exception: CFI_STARTPROC simple CFI_DEF_CFA r14, 0 CFI_REL_OFFSET 17, 64 - CFI_REL_OFFSET 15, 0 + CFI_REL_OFFSET 15, 60 CFI_REL_OFFSET 14, 56 + CFI_REL_OFFSET 13, 52 + CFI_REL_OFFSET 12, 48 + CFI_REL_OFFSET 11, 44 + CFI_REL_OFFSET 10, 40 + CFI_REL_OFFSET 9, 36 + CFI_REL_OFFSET 8, 32 preempt_stop() ENTRY(ret_from_irq) ! diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 2830b41..c49865b 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -526,7 +526,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, * Set some valid stack frames to give to the child. */ childstack = (struct sparc_stackf __user *) - (sp & ~0x7UL); + (sp & ~0xfUL); parentstack = (struct sparc_stackf __user *) regs->u_regs[UREG_FP]; diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index c3f1cce..cb70476 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -398,11 +398,11 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) } else __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); - /* Now 8-byte align the stack as this is mandatory in the - * Sparc ABI due to how register windows work. This hides - * the restriction from thread libraries etc. -DaveM + /* Now align the stack as this is mandatory in the Sparc ABI + * due to how register windows work. This hides the + * restriction from thread libraries etc. */ - csp &= ~7UL; + csp &= ~15UL; distance = fp - psp; rval = (csp - distance); diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index ba5b09a..ea22cd3 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -120,8 +120,8 @@ struct rt_signal_frame32 { }; /* Align macros */ -#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 7) & (~7))) -#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 7) & (~7))) +#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 15) & (~15))) +#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 15) & (~15))) int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) { @@ -420,15 +420,17 @@ static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, uns sp = current->sas_ss_sp + current->sas_ss_size; } + sp -= framesize; + /* Always align the stack frame. This handles two cases. First, * sigaltstack need not be mindful of platform specific stack * alignment. Second, if we took this signal because the stack * is not aligned properly, we'd like to take the signal cleanly * and report that. */ - sp &= ~7UL; + sp &= ~15UL; - return (void __user *)(sp - framesize); + return (void __user *) sp; } static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 7ce1a10..9882df9 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -267,15 +267,17 @@ static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *re sp = current->sas_ss_sp + current->sas_ss_size; } + sp -= framesize; + /* Always align the stack frame. This handles two cases. First, * sigaltstack need not be mindful of platform specific stack * alignment. Second, if we took this signal because the stack * is not aligned properly, we'd like to take the signal cleanly * and report that. */ - sp &= ~7UL; + sp &= ~15UL; - return (void __user *)(sp - framesize); + return (void __user *) sp; } static inline int diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 647afbd..9fa48c3 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -353,7 +353,7 @@ segv: /* Checks if the fp is valid */ static int invalid_frame_pointer(void __user *fp, int fplen) { - if (((unsigned long) fp) & 7) + if (((unsigned long) fp) & 15) return 1; return 0; } @@ -396,15 +396,17 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs * sp = current->sas_ss_sp + current->sas_ss_size; } + sp -= framesize; + /* Always align the stack frame. This handles two cases. First, * sigaltstack need not be mindful of platform specific stack * alignment. Second, if we took this signal because the stack * is not aligned properly, we'd like to take the signal cleanly * and report that. */ - sp &= ~7UL; + sp &= ~15UL; - return (void __user *)(sp - framesize); + return (void __user *) sp; } static inline void diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index ecb544e..e04740f 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -11,9 +11,9 @@ #include <linux/irqflags.h> /* entries in ARCH_DLINFO: */ -#ifdef CONFIG_IA32_EMULATION +#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64) # define AT_VECTOR_SIZE_ARCH 2 -#else +#else /* else it's non-compat x86-64 */ # define AT_VECTOR_SIZE_ARCH 1 #endif diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 036d28a..0acbcdf 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1185,9 +1185,6 @@ static void __init acpi_process_madt(void) if (!error) { acpi_lapic = 1; -#ifdef CONFIG_X86_BIGSMP - generic_bigsmp_probe(); -#endif /* * Parse MADT IO-APIC entries */ @@ -1197,8 +1194,6 @@ static void __init acpi_process_madt(void) acpi_ioapic = 1; smp_found_config = 1; - if (apic->setup_apic_routing) - apic->setup_apic_routing(); } } if (error == -EINVAL) { diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 3987e44..dfca210 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1641,9 +1641,7 @@ int __init APIC_init_uniprocessor(void) #endif enable_IR_x2apic(); -#ifdef CONFIG_X86_64 default_setup_apic_routing(); -#endif verify_local_APIC(); connect_bsp_APIC(); @@ -1891,21 +1889,6 @@ void __cpuinit generic_processor_info(int apicid, int version) if (apicid > max_physical_apicid) max_physical_apicid = apicid; -#ifdef CONFIG_X86_32 - if (num_processors > 8) { - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_INTEL: - if (!APIC_XAPIC(version)) { - def_to_bigsmp = 0; - break; - } - /* If P4 and above fall through */ - case X86_VENDOR_AMD: - def_to_bigsmp = 1; - } - } -#endif - #if defined(CONFIG_SMP) || defined(CONFIG_X86_64) early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 1a6559f..99d2fe0 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -52,7 +52,32 @@ static int __init print_ipi_mode(void) } late_initcall(print_ipi_mode); -void default_setup_apic_routing(void) +void __init default_setup_apic_routing(void) +{ + int version = apic_version[boot_cpu_physical_apicid]; + + if (num_possible_cpus() > 8) { + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_INTEL: + if (!APIC_XAPIC(version)) { + def_to_bigsmp = 0; + break; + } + /* If P4 and above fall through */ + case X86_VENDOR_AMD: + def_to_bigsmp = 1; + } + } + +#ifdef CONFIG_X86_BIGSMP + generic_bigsmp_probe(); +#endif + + if (apic->setup_apic_routing) + apic->setup_apic_routing(); +} + +static void setup_apic_flat_routing(void) { #ifdef CONFIG_X86_IO_APIC printk(KERN_INFO @@ -103,7 +128,7 @@ struct apic apic_default = { .init_apic_ldr = default_init_apic_ldr, .ioapic_phys_id_map = default_ioapic_phys_id_map, - .setup_apic_routing = default_setup_apic_routing, + .setup_apic_routing = setup_apic_flat_routing, .multi_timer_check = NULL, .apicid_to_node = default_apicid_to_node, .cpu_to_logical_apicid = default_cpu_to_logical_apicid, diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index 450fe20..83e9be4 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c @@ -67,7 +67,7 @@ void __init default_setup_apic_routing(void) } #endif - if (apic == &apic_flat && num_processors > 8) + if (apic == &apic_flat && num_possible_cpus() > 8) apic = &apic_physflat; printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index f125e5c..6e44519 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -1356,6 +1356,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) kfree(data->powernow_table); kfree(data); + per_cpu(powernow_data, pol->cpu) = NULL; return 0; } @@ -1375,7 +1376,7 @@ static unsigned int powernowk8_get(unsigned int cpu) int err; if (!data) - return -EINVAL; + return 0; smp_call_function_single(cpu, query_values_on_cpu, &err, true); if (err) diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 40b54ce..a2c1edd 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -359,13 +359,6 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) x86_init.mpparse.mpc_record(1); } -#ifdef CONFIG_X86_BIGSMP - generic_bigsmp_probe(); -#endif - - if (apic->setup_apic_routing) - apic->setup_apic_routing(); - if (!num_processors) printk(KERN_ERR "MPTABLE: no processors registered!\n"); return num_processors; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 678d0b8..b4e870c 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1083,9 +1083,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) set_cpu_sibling_map(0); enable_IR_x2apic(); -#ifdef CONFIG_X86_64 default_setup_apic_routing(); -#endif if (smp_sanity_check(max_cpus) < 0) { printk(KERN_INFO "SMP disabled\n"); diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 296aba4..15578f1 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -467,6 +467,9 @@ static int pit_ioport_read(struct kvm_io_device *this, return -EOPNOTSUPP; addr &= KVM_PIT_CHANNEL_MASK; + if (addr == 3) + return 0; + s = &pit_state->channels[addr]; mutex_lock(&pit_state->lock); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1ddcad4..a1e1bc9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -670,7 +670,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) { static int version; struct pvclock_wall_clock wc; - struct timespec now, sys, boot; + struct timespec boot; if (!wall_clock) return; @@ -685,9 +685,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) * wall clock specified here. guest system time equals host * system time for us, thus we must fill in host boot time here. */ - now = current_kernel_time(); - ktime_get_ts(&sys); - boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys)); + getboottime(&boot); wc.sec = boot.tv_sec; wc.nsec = boot.tv_nsec; @@ -762,6 +760,7 @@ static void kvm_write_guest_time(struct kvm_vcpu *v) local_irq_save(flags); kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp); ktime_get_ts(&ts); + monotonic_to_bootbased(&ts); local_irq_restore(flags); /* With all the info we got, fill in the values */ diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 71da1bc..738e659 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -18,7 +18,7 @@ static inline pte_t gup_get_pte(pte_t *ptep) #else /* * With get_user_pages_fast, we walk down the pagetables without taking - * any locks. For this we would like to load the pointers atoimcally, + * any locks. For this we would like to load the pointers atomically, * but that is not possible (without expensive cmpxchg8b) on PAE. What * we do have is the guarantee that a pte will only either go from not * present to present, or present to not present or both -- it will not diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 17b768d0..023f4e6 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -42,16 +42,13 @@ static const int cfq_hist_divisor = 4; */ #define CFQ_MIN_TT (2) -/* - * Allow merged cfqqs to perform this amount of seeky I/O before - * deciding to break the queues up again. - */ -#define CFQQ_COOP_TOUT (HZ) - #define CFQ_SLICE_SCALE (5) #define CFQ_HW_QUEUE_MIN (5) #define CFQ_SERVICE_SHIFT 12 +#define CFQQ_SEEK_THR 8 * 1024 +#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR) + #define RQ_CIC(rq) \ ((struct cfq_io_context *) (rq)->elevator_private) #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2) @@ -137,7 +134,6 @@ struct cfq_queue { u64 seek_total; sector_t seek_mean; sector_t last_request_pos; - unsigned long seeky_start; pid_t pid; @@ -314,6 +310,7 @@ enum cfqq_state_flags { CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ CFQ_CFQQ_FLAG_sync, /* synchronous queue */ CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ + CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */ CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ }; @@ -342,6 +339,7 @@ CFQ_CFQQ_FNS(prio_changed); CFQ_CFQQ_FNS(slice_new); CFQ_CFQQ_FNS(sync); CFQ_CFQQ_FNS(coop); +CFQ_CFQQ_FNS(split_coop); CFQ_CFQQ_FNS(deep); CFQ_CFQQ_FNS(wait_busy); #undef CFQ_CFQQ_FNS @@ -1566,6 +1564,15 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_clear_cfqq_wait_busy(cfqq); /* + * If this cfqq is shared between multiple processes, check to + * make sure that those processes are still issuing I/Os within + * the mean seek distance. If not, it may be time to break the + * queues apart again. + */ + if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq)) + cfq_mark_cfqq_split_coop(cfqq); + + /* * store what was left of this slice, if the queue idled/timed out */ if (timed_out && !cfq_cfqq_slice_new(cfqq)) { @@ -1663,9 +1670,6 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, return cfqd->last_position - blk_rq_pos(rq); } -#define CFQQ_SEEK_THR 8 * 1024 -#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR) - static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct request *rq, bool for_preempt) { @@ -3000,19 +3004,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, total = cfqq->seek_total + (cfqq->seek_samples/2); do_div(total, cfqq->seek_samples); cfqq->seek_mean = (sector_t)total; - - /* - * If this cfqq is shared between multiple processes, check to - * make sure that those processes are still issuing I/Os within - * the mean seek distance. If not, it may be time to break the - * queues apart again. - */ - if (cfq_cfqq_coop(cfqq)) { - if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start) - cfqq->seeky_start = jiffies; - else if (!CFQQ_SEEKY(cfqq)) - cfqq->seeky_start = 0; - } } /* @@ -3453,14 +3444,6 @@ cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic, return cic_to_cfqq(cic, 1); } -static int should_split_cfqq(struct cfq_queue *cfqq) -{ - if (cfqq->seeky_start && - time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT)) - return 1; - return 0; -} - /* * Returns NULL if a new cfqq should be allocated, or the old cfqq if this * was the last process referring to said cfqq. @@ -3469,9 +3452,9 @@ static struct cfq_queue * split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq) { if (cfqq_process_refs(cfqq) == 1) { - cfqq->seeky_start = 0; cfqq->pid = current->pid; cfq_clear_cfqq_coop(cfqq); + cfq_clear_cfqq_split_coop(cfqq); return cfqq; } @@ -3510,7 +3493,7 @@ new_queue: /* * If the queue was seeky for too long, break it apart. */ - if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) { + if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) { cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); cfqq = split_cfqq(cic, cfqq); if (!cfqq) diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index b8bea10..b343903 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -2868,6 +2868,21 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) }, .driver_data = "F.23", /* cutoff BIOS version */ }, + /* + * Acer eMachines G725 has the same problem. BIOS + * V1.03 is known to be broken. V3.04 is known to + * work. Inbetween, there are V1.06, V2.06 and V3.03 + * that we don't have much idea about. For now, + * blacklist anything older than V3.04. + */ + { + .ident = "G725", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "eMachines"), + DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"), + }, + .driver_data = "V3.04", /* cutoff BIOS version */ + }, { } /* terminate list */ }; const struct dmi_system_id *dmi = dmi_first_match(sysids); diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index f4ea5a8..d096fbc 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2875,7 +2875,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) * write indication (used for PIO/DMA setup), result TF is * copied back and we don't whine too much about its failure. */ - tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; if (scmd->sc_data_direction == DMA_TO_DEVICE) tf->flags |= ATA_TFLAG_WRITE; diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 741065c..730ef3c 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -893,6 +893,9 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) do_write); } + if (!do_write) + flush_dcache_page(page); + qc->curbytes += qc->sect_size; qc->cursg_ofs += qc->sect_size; diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 873e594..9291614 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -337,6 +337,9 @@ static int cciss_seq_show(struct seq_file *seq, void *v) if (*pos > h->highest_lun) return 0; + if (drv == NULL) /* it's possible for h->drv[] to have holes. */ + return 0; + if (drv->heads == 0) return 0; diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index f36defa..57d965b 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -808,6 +808,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv, exit: sdio_release_host(card->func); + kfree(tmpbuf); return ret; } diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 34cf04e..fd50ead 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -767,16 +767,19 @@ int __init agp_amd64_init(void) static int __init agp_amd64_mod_init(void) { +#ifndef MODULE if (gart_iommu_aperture) return agp_bridges_found ? 0 : -ENODEV; - +#endif return agp_amd64_init(); } static void __exit agp_amd64_cleanup(void) { +#ifndef MODULE if (gart_iommu_aperture) return; +#endif if (aperture_resource) release_resource(aperture_resource); pci_unregister_driver(&agp_amd64_pci_driver); diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c index ecba494..f584407 100644 --- a/drivers/char/tpm/tpm_infineon.c +++ b/drivers/char/tpm/tpm_infineon.c @@ -39,12 +39,12 @@ struct tpm_inf_dev { int iotype; - void __iomem *mem_base; /* MMIO ioremap'd addr */ - unsigned long map_base; /* phys MMIO base */ - unsigned long map_size; /* MMIO region size */ - unsigned int index_off; /* index register offset */ + void __iomem *mem_base; /* MMIO ioremap'd addr */ + unsigned long map_base; /* phys MMIO base */ + unsigned long map_size; /* MMIO region size */ + unsigned int index_off; /* index register offset */ - unsigned int data_regs; /* Data registers */ + unsigned int data_regs; /* Data registers */ unsigned int data_size; unsigned int config_port; /* IO Port config index reg */ @@ -406,14 +406,14 @@ static const struct tpm_vendor_specific tpm_inf = { .miscdev = {.fops = &inf_ops,}, }; -static const struct pnp_device_id tpm_pnp_tbl[] = { +static const struct pnp_device_id tpm_inf_pnp_tbl[] = { /* Infineon TPMs */ {"IFX0101", 0}, {"IFX0102", 0}, {"", 0} }; -MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); +MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl); static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) @@ -430,7 +430,7 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) { - tpm_dev.iotype = TPM_INF_IO_PORT; + tpm_dev.iotype = TPM_INF_IO_PORT; tpm_dev.config_port = pnp_port_start(dev, 0); tpm_dev.config_size = pnp_port_len(dev, 0); @@ -459,9 +459,9 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, goto err_last; } } else if (pnp_mem_valid(dev, 0) && - !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { + !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { - tpm_dev.iotype = TPM_INF_IO_MEM; + tpm_dev.iotype = TPM_INF_IO_MEM; tpm_dev.map_base = pnp_mem_start(dev, 0); tpm_dev.map_size = pnp_mem_len(dev, 0); @@ -563,11 +563,11 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, "product id 0x%02x%02x" "%s\n", tpm_dev.iotype == TPM_INF_IO_PORT ? - tpm_dev.config_port : - tpm_dev.map_base + tpm_dev.index_off, + tpm_dev.config_port : + tpm_dev.map_base + tpm_dev.index_off, tpm_dev.iotype == TPM_INF_IO_PORT ? - tpm_dev.data_regs : - tpm_dev.map_base + tpm_dev.data_regs, + tpm_dev.data_regs : + tpm_dev.map_base + tpm_dev.data_regs, version[0], version[1], vendorid[0], vendorid[1], productid[0], productid[1], chipname); @@ -607,20 +607,55 @@ static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev) iounmap(tpm_dev.mem_base); release_mem_region(tpm_dev.map_base, tpm_dev.map_size); } + tpm_dev_vendor_release(chip); tpm_remove_hardware(chip->dev); } } +static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state) +{ + struct tpm_chip *chip = pnp_get_drvdata(dev); + int rc; + if (chip) { + u8 savestate[] = { + 0, 193, /* TPM_TAG_RQU_COMMAND */ + 0, 0, 0, 10, /* blob length (in bytes) */ + 0, 0, 0, 152 /* TPM_ORD_SaveState */ + }; + dev_info(&dev->dev, "saving TPM state\n"); + rc = tpm_inf_send(chip, savestate, sizeof(savestate)); + if (rc < 0) { + dev_err(&dev->dev, "error while saving TPM state\n"); + return rc; + } + } + return 0; +} + +static int tpm_inf_pnp_resume(struct pnp_dev *dev) +{ + /* Re-configure TPM after suspending */ + tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR); + tpm_config_out(IOLIMH, TPM_INF_ADDR); + tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA); + tpm_config_out(IOLIML, TPM_INF_ADDR); + tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA); + /* activate register */ + tpm_config_out(TPM_DAR, TPM_INF_ADDR); + tpm_config_out(0x01, TPM_INF_DATA); + tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR); + /* disable RESET, LP and IRQC */ + tpm_data_out(RESET_LP_IRQC_DISABLE, CMD); + return tpm_pm_resume(&dev->dev); +} + static struct pnp_driver tpm_inf_pnp_driver = { .name = "tpm_inf_pnp", - .driver = { - .owner = THIS_MODULE, - .suspend = tpm_pm_suspend, - .resume = tpm_pm_resume, - }, - .id_table = tpm_pnp_tbl, + .id_table = tpm_inf_pnp_tbl, .probe = tpm_inf_pnp_probe, - .remove = __devexit_p(tpm_inf_pnp_remove), + .suspend = tpm_inf_pnp_suspend, + .resume = tpm_inf_pnp_resume, + .remove = __devexit_p(tpm_inf_pnp_remove) }; static int __init init_inf(void) @@ -638,5 +673,5 @@ module_exit(cleanup_inf); MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>"); MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); -MODULE_VERSION("1.9"); +MODULE_VERSION("1.9.2"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index c6f3b48..dcb9083 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -1951,8 +1951,10 @@ static int tty_fasync(int fd, struct file *filp, int on) pid = task_pid(current); type = PIDTYPE_PID; } - retval = __f_setown(filp, pid, type, 0); + get_pid(pid); spin_unlock_irqrestore(&tty->ctrl_lock, flags); + retval = __f_setown(filp, pid, type, 0); + put_pid(pid); if (retval) goto out; } else { diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 4b34ade..bd444dc 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -554,6 +554,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential); + if (freq_next < policy->min) + freq_next = policy->min; + if (!dbs_tuners_ins.powersave_bias) { __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index b5f2ee0..64a9372 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -613,8 +613,6 @@ static void dma_tasklet(unsigned long data) cohd_fin->pending_irqs--; cohc->completed = cohd_fin->desc.cookie; - BUG_ON(cohc->nbr_active_done && cohd_fin == NULL); - if (cohc->nbr_active_done == 0) return; diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 6f51a0a..e7a3230 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -826,6 +826,7 @@ void dma_async_device_unregister(struct dma_device *device) chan->dev->chan = NULL; mutex_unlock(&dma_list_mutex); device_unregister(&chan->dev->device); + free_percpu(chan->local); } } EXPORT_SYMBOL(dma_async_device_unregister); diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 8b90516..948d563 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -467,7 +467,7 @@ err_srcs: if (iterations > 0) while (!kthread_should_stop()) { - DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit); + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); interruptible_sleep_on(&wait_dmatest_exit); } diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 5f7a500..5cc37af 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -249,7 +249,7 @@ int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo) if (is_ioat_active(status) || is_ioat_idle(status)) ioat_suspend(chan); while (is_ioat_active(status) || is_ioat_idle(status)) { - if (end && time_after(jiffies, end)) { + if (tmo && time_after(jiffies, end)) { err = -ETIMEDOUT; break; } diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 9a5bc1a..e80bae1 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -761,12 +761,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n) * @buffer_n: buffer number to update. * 0 or 1 are the only valid values. * @phyaddr: buffer physical address. - * @return: Returns 0 on success or negative error code on failure. This - * function will fail if the buffer is set to ready. */ /* Called under spin_lock(_irqsave)(&ichan->lock) */ -static int ipu_update_channel_buffer(struct idmac_channel *ichan, - int buffer_n, dma_addr_t phyaddr) +static void ipu_update_channel_buffer(struct idmac_channel *ichan, + int buffer_n, dma_addr_t phyaddr) { enum ipu_channel channel = ichan->dma_chan.chan_id; uint32_t reg; @@ -806,8 +804,6 @@ static int ipu_update_channel_buffer(struct idmac_channel *ichan, } spin_unlock_irqrestore(&ipu_data.lock, flags); - - return 0; } /* Called under spin_lock_irqsave(&ichan->lock) */ @@ -816,7 +812,6 @@ static int ipu_submit_buffer(struct idmac_channel *ichan, { unsigned int chan_id = ichan->dma_chan.chan_id; struct device *dev = &ichan->dma_chan.dev->device; - int ret; if (async_tx_test_ack(&desc->txd)) return -EINTR; @@ -827,14 +822,7 @@ static int ipu_submit_buffer(struct idmac_channel *ichan, * could make it conditional on status >= IPU_CHANNEL_ENABLED, but * doing it again shouldn't hurt either. */ - ret = ipu_update_channel_buffer(ichan, buf_idx, - sg_dma_address(sg)); - - if (ret < 0) { - dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n", - sg, chan_id, buf_idx); - return ret; - } + ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg)); ipu_select_buffer(chan_id, buf_idx); dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", @@ -1379,10 +1367,11 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) if (likely(sgnew) && ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { - callback = desc->txd.callback; - callback_param = desc->txd.callback_param; + callback = descnew->txd.callback; + callback_param = descnew->txd.callback_param; spin_unlock(&ichan->lock); - callback(callback_param); + if (callback) + callback(callback_param); spin_lock(&ichan->lock); } diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 000dc67..3391e67 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -2658,10 +2658,11 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) * the memory system completely. A command line option allows to force-enable * hardware ECC later in amd64_enable_ecc_error_reporting(). */ -static const char *ecc_warning = - "WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n" - " Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n" - " Also, use of the override can cause unknown side effects.\n"; +static const char *ecc_msg = + "ECC disabled in the BIOS or no ECC capability, module will not load.\n" + " Either enable ECC checking or force module loading by setting " + "'ecc_enable_override'.\n" + " (Note that use of the override may cause unknown side effects.)\n"; static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) { @@ -2673,7 +2674,7 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); if (!ecc_enabled) - amd64_printk(KERN_WARNING, "This node reports that Memory ECC " + amd64_printk(KERN_NOTICE, "This node reports that Memory ECC " "is currently disabled, set F3x%x[22] (%s).\n", K8_NBCFG, pci_name(pvt->misc_f3_ctl)); else @@ -2681,13 +2682,13 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id); if (!nb_mce_en) - amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR " + amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR " "0x%08x[4] on node %d to enable.\n", MSR_IA32_MCG_CTL, pvt->mc_node_id); if (!ecc_enabled || !nb_mce_en) { if (!ecc_enable_override) { - amd64_printk(KERN_WARNING, "%s", ecc_warning); + amd64_printk(KERN_NOTICE, "%s", ecc_msg); return -ENODEV; } ecc_enable_override = 0; diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index cf27402..ecd5928 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c @@ -804,8 +804,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci) end <<= (24 - PAGE_SHIFT); end |= (1 << (24 - PAGE_SHIFT)) - 1; - csrow->first_page = start >> PAGE_SHIFT; - csrow->last_page = end >> PAGE_SHIFT; + csrow->first_page = start; + csrow->last_page = end; csrow->nr_pages = end + 1 - start; csrow->grain = 8; csrow->mtype = mtype; @@ -892,10 +892,6 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op, mpc85xx_init_csrows(mci); -#ifdef CONFIG_EDAC_DEBUG - edac_mc_register_mcidev_debug((struct attribute **)debug_attr); -#endif - /* store the original error disable bits */ orig_ddr_err_disable = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE); diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c index a1fce68..17be051 100644 --- a/drivers/gpu/drm/ati_pcigart.c +++ b/drivers/gpu/drm/ati_pcigart.c @@ -113,7 +113,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { DRM_ERROR("fail to set dma mask to 0x%Lx\n", - gart_info->table_mask); + (unsigned long long)gart_info->table_mask); ret = 1; goto done; } diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index e660ac0..2307f98 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -735,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, if (cmdbuf->num_cliprects) { cliprects = kcalloc(cmdbuf->num_cliprects, sizeof(struct drm_clip_rect), GFP_KERNEL); - if (cliprects == NULL) + if (cliprects == NULL) { + ret = -ENOMEM; goto fail_batch_free; + } ret = copy_from_user(cliprects, cmdbuf->cliprects, cmdbuf->num_cliprects * diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 46d8896..79beffc 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -120,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = { const static struct intel_device_info intel_pineview_info = { .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, - .has_pipe_cxsr = 1, + .need_gfx_hws = 1, .has_hotplug = 1, }; @@ -174,78 +174,100 @@ const static struct pci_device_id pciidlist[] = { MODULE_DEVICE_TABLE(pci, pciidlist); #endif -static int i915_suspend(struct drm_device *dev, pm_message_t state) +static int i915_drm_freeze(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!dev || !dev_priv) { - DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv); - DRM_ERROR("DRM not initialized, aborting suspend.\n"); - return -ENODEV; - } - - if (state.event == PM_EVENT_PRETHAW) - return 0; - pci_save_state(dev->pdev); /* If KMS is active, we do the leavevt stuff here */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { - if (i915_gem_idle(dev)) + int error = i915_gem_idle(dev); + if (error) { dev_err(&dev->pdev->dev, - "GEM idle failed, resume may fail\n"); + "GEM idle failed, resume might fail\n"); + return error; + } drm_irq_uninstall(dev); } i915_save_state(dev); + return 0; +} + +static void i915_drm_suspend(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + intel_opregion_free(dev, 1); + /* Modeset on resume, not lid events */ + dev_priv->modeset_on_lid = 0; +} + +static int i915_suspend(struct drm_device *dev, pm_message_t state) +{ + int error; + + if (!dev || !dev->dev_private) { + DRM_ERROR("dev: %p\n", dev); + DRM_ERROR("DRM not initialized, aborting suspend.\n"); + return -ENODEV; + } + + if (state.event == PM_EVENT_PRETHAW) + return 0; + + error = i915_drm_freeze(dev); + if (error) + return error; + + i915_drm_suspend(dev); + if (state.event == PM_EVENT_SUSPEND) { /* Shut down the device */ pci_disable_device(dev->pdev); pci_set_power_state(dev->pdev, PCI_D3hot); } - /* Modeset on resume, not lid events */ - dev_priv->modeset_on_lid = 0; - return 0; } -static int i915_resume(struct drm_device *dev) +static int i915_drm_thaw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int ret = 0; - - if (pci_enable_device(dev->pdev)) - return -1; - pci_set_master(dev->pdev); - - i915_restore_state(dev); - - intel_opregion_init(dev, 1); + int error = 0; /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex); dev_priv->mm.suspended = 0; - ret = i915_gem_init_ringbuffer(dev); - if (ret != 0) - ret = -1; + error = i915_gem_init_ringbuffer(dev); mutex_unlock(&dev->struct_mutex); drm_irq_install(dev); - } - if (drm_core_check_feature(dev, DRIVER_MODESET)) { + /* Resume the modeset for every activated CRTC */ drm_helper_resume_force_mode(dev); } dev_priv->modeset_on_lid = 0; - return ret; + return error; +} + +static int i915_resume(struct drm_device *dev) +{ + if (pci_enable_device(dev->pdev)) + return -EIO; + + pci_set_master(dev->pdev); + + i915_restore_state(dev); + + intel_opregion_init(dev, 1); + + return i915_drm_thaw(dev); } /** @@ -386,57 +408,69 @@ i915_pci_remove(struct pci_dev *pdev) drm_put_dev(dev); } -static int -i915_pci_suspend(struct pci_dev *pdev, pm_message_t state) +static int i915_pm_suspend(struct device *dev) { - struct drm_device *dev = pci_get_drvdata(pdev); + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + int error; - return i915_suspend(dev, state); -} + if (!drm_dev || !drm_dev->dev_private) { + dev_err(dev, "DRM not initialized, aborting suspend.\n"); + return -ENODEV; + } -static int -i915_pci_resume(struct pci_dev *pdev) -{ - struct drm_device *dev = pci_get_drvdata(pdev); + error = i915_drm_freeze(drm_dev); + if (error) + return error; - return i915_resume(dev); -} + i915_drm_suspend(drm_dev); -static int -i915_pm_suspend(struct device *dev) -{ - return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND); -} + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); -static int -i915_pm_resume(struct device *dev) -{ - return i915_pci_resume(to_pci_dev(dev)); + return 0; } -static int -i915_pm_freeze(struct device *dev) +static int i915_pm_resume(struct device *dev) { - return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE); + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + return i915_resume(drm_dev); } -static int -i915_pm_thaw(struct device *dev) +static int i915_pm_freeze(struct device *dev) { - /* thaw during hibernate, do nothing! */ - return 0; + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + if (!drm_dev || !drm_dev->dev_private) { + dev_err(dev, "DRM not initialized, aborting suspend.\n"); + return -ENODEV; + } + + return i915_drm_freeze(drm_dev); } -static int -i915_pm_poweroff(struct device *dev) +static int i915_pm_thaw(struct device *dev) { - return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE); + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + return i915_drm_thaw(drm_dev); } -static int -i915_pm_restore(struct device *dev) +static int i915_pm_poweroff(struct device *dev) { - return i915_pci_resume(to_pci_dev(dev)); + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + int error; + + error = i915_drm_freeze(drm_dev); + if (!error) + i915_drm_suspend(drm_dev); + + return error; } const struct dev_pm_ops i915_pm_ops = { @@ -445,7 +479,7 @@ const struct dev_pm_ops i915_pm_ops = { .freeze = i915_pm_freeze, .thaw = i915_pm_thaw, .poweroff = i915_pm_poweroff, - .restore = i915_pm_restore, + .restore = i915_pm_resume, }; static struct vm_operations_struct i915_gem_vm_ops = { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index aaf934d..b99b6a8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -493,6 +493,15 @@ typedef struct drm_i915_private { struct list_head flushing_list; /** + * List of objects currently pending a GPU write flush. + * + * All elements on this list will belong to either the + * active_list or flushing_list, last_rendering_seqno can + * be used to differentiate between the two elements. + */ + struct list_head gpu_write_list; + + /** * LRU list of objects which are not in the ringbuffer and * are ready to unbind, but are still in the GTT. * @@ -592,6 +601,8 @@ struct drm_i915_gem_object { /** This object's place on the active/flushing/inactive lists */ struct list_head list; + /** This object's place on GPU write list */ + struct list_head gpu_write_list; /** This object's place on the fenced object LRU */ struct list_head fence_list; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index dda787a..ec8a0d7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1552,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) else list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + BUG_ON(!list_empty(&obj_priv->gpu_write_list)); + obj_priv->last_rendering_seqno = 0; if (obj_priv->active) { obj_priv->active = 0; @@ -1622,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, struct drm_i915_gem_object *obj_priv, *next; list_for_each_entry_safe(obj_priv, next, - &dev_priv->mm.flushing_list, list) { + &dev_priv->mm.gpu_write_list, + gpu_write_list) { struct drm_gem_object *obj = obj_priv->obj; if ((obj->write_domain & flush_domains) == @@ -1630,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, uint32_t old_write_domain = obj->write_domain; obj->write_domain = 0; + list_del_init(&obj_priv->gpu_write_list); i915_gem_object_move_to_active(obj, seqno); trace_i915_gem_object_change_domain(obj, @@ -2084,8 +2088,8 @@ static int i915_gem_evict_everything(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t seqno; int ret; + uint32_t seqno; bool lists_empty; spin_lock(&dev_priv->mm.active_list_lock); @@ -2107,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev) if (ret) return ret; + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); + ret = i915_gem_evict_from_inactive_list(dev); if (ret) return ret; @@ -2701,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) old_write_domain = obj->write_domain; i915_gem_flush(dev, 0, obj->write_domain); seqno = i915_add_request(dev, NULL, obj->write_domain); - obj->write_domain = 0; + BUG_ON(obj->write_domain); i915_gem_object_move_to_active(obj, seqno); trace_i915_gem_object_change_domain(obj, @@ -3564,6 +3570,9 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, uint32_t reloc_count = 0, i; int ret = 0; + if (relocs == NULL) + return 0; + for (i = 0; i < buffer_count; i++) { struct drm_i915_gem_relocation_entry __user *user_relocs; int unwritten; @@ -3653,7 +3662,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_gem_object *batch_obj; struct drm_i915_gem_object *obj_priv; struct drm_clip_rect *cliprects = NULL; - struct drm_i915_gem_relocation_entry *relocs; + struct drm_i915_gem_relocation_entry *relocs = NULL; int ret = 0, ret2, i, pinned = 0; uint64_t exec_offset; uint32_t seqno, flush_domains, reloc_index; @@ -3679,8 +3688,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (args->num_cliprects != 0) { cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), GFP_KERNEL); - if (cliprects == NULL) + if (cliprects == NULL) { + ret = -ENOMEM; goto pre_mutex_err; + } ret = copy_from_user(cliprects, (struct drm_clip_rect __user *) @@ -3722,6 +3733,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (object_list[i] == NULL) { DRM_ERROR("Invalid object handle %d at index %d\n", exec_list[i].handle, i); + /* prevent error path from reading uninitialized data */ + args->buffer_count = i + 1; ret = -EBADF; goto err; } @@ -3730,6 +3743,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (obj_priv->in_execbuffer) { DRM_ERROR("Object %p appears more than once in object list\n", object_list[i]); + /* prevent error path from reading uninitialized data */ + args->buffer_count = i + 1; ret = -EBADF; goto err; } @@ -3843,16 +3858,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, i915_gem_flush(dev, dev->invalidate_domains, dev->flush_domains); - if (dev->flush_domains) + if (dev->flush_domains & I915_GEM_GPU_DOMAINS) (void)i915_add_request(dev, file_priv, dev->flush_domains); } for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; + struct drm_i915_gem_object *obj_priv = obj->driver_private; uint32_t old_write_domain = obj->write_domain; obj->write_domain = obj->pending_write_domain; + if (obj->write_domain) + list_move_tail(&obj_priv->gpu_write_list, + &dev_priv->mm.gpu_write_list); + else + list_del_init(&obj_priv->gpu_write_list); + trace_i915_gem_object_change_domain(obj, obj->read_domains, old_write_domain); @@ -3926,6 +3948,7 @@ err: mutex_unlock(&dev->struct_mutex); +pre_mutex_err: /* Copy the updated relocations out regardless of current error * state. Failure to update the relocs would mean that the next * time userland calls execbuf, it would do so with presumed offset @@ -3940,7 +3963,6 @@ err: ret = ret2; } -pre_mutex_err: drm_free_large(object_list); kfree(cliprects); @@ -4363,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj) obj_priv->obj = obj; obj_priv->fence_reg = I915_FENCE_REG_NONE; INIT_LIST_HEAD(&obj_priv->list); + INIT_LIST_HEAD(&obj_priv->gpu_write_list); INIT_LIST_HEAD(&obj_priv->fence_list); obj_priv->madv = I915_MADV_WILLNEED; @@ -4814,6 +4837,7 @@ i915_gem_load(struct drm_device *dev) spin_lock_init(&dev_priv->mm.active_list_lock); INIT_LIST_HEAD(&dev_priv->mm.active_list); INIT_LIST_HEAD(&dev_priv->mm.flushing_list); + INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); INIT_LIST_HEAD(&dev_priv->mm.request_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 89a071a..a17d6bdf 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -309,6 +309,22 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) if (de_iir & DE_GSE) ironlake_opregion_gse_intr(dev); + if (de_iir & DE_PLANEA_FLIP_DONE) { + intel_prepare_page_flip(dev, 0); + intel_finish_page_flip(dev, 0); + } + + if (de_iir & DE_PLANEB_FLIP_DONE) { + intel_prepare_page_flip(dev, 1); + intel_finish_page_flip(dev, 1); + } + + if (de_iir & DE_PIPEA_VBLANK) + drm_handle_vblank(dev, 0); + + if (de_iir & DE_PIPEB_VBLANK) + drm_handle_vblank(dev, 1); + /* check event from PCH */ if ((de_iir & DE_PCH_EVENT) && (pch_iir & SDE_HOTPLUG_MASK)) { @@ -844,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) if (!(pipeconf & PIPEACONF_ENABLE)) return -EINVAL; - if (IS_IRONLAKE(dev)) - return 0; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - if (IS_I965G(dev)) + if (IS_IRONLAKE(dev)) + ironlake_enable_display_irq(dev_priv, (pipe == 0) ? + DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); + else if (IS_I965G(dev)) i915_enable_pipestat(dev_priv, pipe, PIPE_START_VBLANK_INTERRUPT_ENABLE); else @@ -866,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; - if (IS_IRONLAKE(dev)) - return; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - i915_disable_pipestat(dev_priv, pipe, - PIPE_VBLANK_INTERRUPT_ENABLE | - PIPE_START_VBLANK_INTERRUPT_ENABLE); + if (IS_IRONLAKE(dev)) + ironlake_disable_display_irq(dev_priv, (pipe == 0) ? + DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); + else + i915_disable_pipestat(dev_priv, pipe, + PIPE_VBLANK_INTERRUPT_ENABLE | + PIPE_START_VBLANK_INTERRUPT_ENABLE); spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } @@ -1015,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; /* enable kind of interrupts always enabled */ - u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; + u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | + DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; u32 render_mask = GT_USER_INTERRUPT; u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; dev_priv->irq_mask_reg = ~display_mask; - dev_priv->de_irq_enable_reg = display_mask; + dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; /* should always can generate irq */ I915_WRITE(DEIIR, I915_READ(DEIIR)); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 847006c..ab1bd2d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -338,6 +338,7 @@ #define FBC_CTL_PERIODIC (1<<30) #define FBC_CTL_INTERVAL_SHIFT (16) #define FBC_CTL_UNCOMPRESSIBLE (1<<14) +#define FBC_C3_IDLE (1<<13) #define FBC_CTL_STRIDE_SHIFT (5) #define FBC_CTL_FENCENO (1<<0) #define FBC_COMMAND 0x0320c diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index ddefc87..79dd402 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) adpa = I915_READ(PCH_ADPA); adpa &= ~ADPA_CRT_HOTPLUG_MASK; + /* disable HPD first */ + I915_WRITE(PCH_ADPA, adpa); + (void)I915_READ(PCH_ADPA); adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | ADPA_CRT_HOTPLUG_WARMUP_10MS | diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 45da78e..b27202d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -240,33 +240,86 @@ struct intel_limit { #define IRONLAKE_DOT_MAX 350000 #define IRONLAKE_VCO_MIN 1760000 #define IRONLAKE_VCO_MAX 3510000 -#define IRONLAKE_N_MIN 1 -#define IRONLAKE_N_MAX 6 -#define IRONLAKE_M_MIN 79 -#define IRONLAKE_M_MAX 127 #define IRONLAKE_M1_MIN 12 #define IRONLAKE_M1_MAX 22 #define IRONLAKE_M2_MIN 5 #define IRONLAKE_M2_MAX 9 -#define IRONLAKE_P_SDVO_DAC_MIN 5 -#define IRONLAKE_P_SDVO_DAC_MAX 80 -#define IRONLAKE_P_LVDS_MIN 28 -#define IRONLAKE_P_LVDS_MAX 112 -#define IRONLAKE_P1_MIN 1 -#define IRONLAKE_P1_MAX 8 -#define IRONLAKE_P2_SDVO_DAC_SLOW 10 -#define IRONLAKE_P2_SDVO_DAC_FAST 5 -#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */ -#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */ #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ -#define IRONLAKE_P_DISPLAY_PORT_MIN 10 -#define IRONLAKE_P_DISPLAY_PORT_MAX 20 -#define IRONLAKE_P2_DISPLAY_PORT_FAST 10 -#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10 -#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0 -#define IRONLAKE_P1_DISPLAY_PORT_MIN 1 -#define IRONLAKE_P1_DISPLAY_PORT_MAX 2 +/* We have parameter ranges for different type of outputs. */ + +/* DAC & HDMI Refclk 120Mhz */ +#define IRONLAKE_DAC_N_MIN 1 +#define IRONLAKE_DAC_N_MAX 5 +#define IRONLAKE_DAC_M_MIN 79 +#define IRONLAKE_DAC_M_MAX 127 +#define IRONLAKE_DAC_P_MIN 5 +#define IRONLAKE_DAC_P_MAX 80 +#define IRONLAKE_DAC_P1_MIN 1 +#define IRONLAKE_DAC_P1_MAX 8 +#define IRONLAKE_DAC_P2_SLOW 10 +#define IRONLAKE_DAC_P2_FAST 5 + +/* LVDS single-channel 120Mhz refclk */ +#define IRONLAKE_LVDS_S_N_MIN 1 +#define IRONLAKE_LVDS_S_N_MAX 3 +#define IRONLAKE_LVDS_S_M_MIN 79 +#define IRONLAKE_LVDS_S_M_MAX 118 +#define IRONLAKE_LVDS_S_P_MIN 28 +#define IRONLAKE_LVDS_S_P_MAX 112 +#define IRONLAKE_LVDS_S_P1_MIN 2 +#define IRONLAKE_LVDS_S_P1_MAX 8 +#define IRONLAKE_LVDS_S_P2_SLOW 14 +#define IRONLAKE_LVDS_S_P2_FAST 14 + +/* LVDS dual-channel 120Mhz refclk */ +#define IRONLAKE_LVDS_D_N_MIN 1 +#define IRONLAKE_LVDS_D_N_MAX 3 +#define IRONLAKE_LVDS_D_M_MIN 79 +#define IRONLAKE_LVDS_D_M_MAX 127 +#define IRONLAKE_LVDS_D_P_MIN 14 +#define IRONLAKE_LVDS_D_P_MAX 56 +#define IRONLAKE_LVDS_D_P1_MIN 2 +#define IRONLAKE_LVDS_D_P1_MAX 8 +#define IRONLAKE_LVDS_D_P2_SLOW 7 +#define IRONLAKE_LVDS_D_P2_FAST 7 + +/* LVDS single-channel 100Mhz refclk */ +#define IRONLAKE_LVDS_S_SSC_N_MIN 1 +#define IRONLAKE_LVDS_S_SSC_N_MAX 2 +#define IRONLAKE_LVDS_S_SSC_M_MIN 79 +#define IRONLAKE_LVDS_S_SSC_M_MAX 126 +#define IRONLAKE_LVDS_S_SSC_P_MIN 28 +#define IRONLAKE_LVDS_S_SSC_P_MAX 112 +#define IRONLAKE_LVDS_S_SSC_P1_MIN 2 +#define IRONLAKE_LVDS_S_SSC_P1_MAX 8 +#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14 +#define IRONLAKE_LVDS_S_SSC_P2_FAST 14 + +/* LVDS dual-channel 100Mhz refclk */ +#define IRONLAKE_LVDS_D_SSC_N_MIN 1 +#define IRONLAKE_LVDS_D_SSC_N_MAX 3 +#define IRONLAKE_LVDS_D_SSC_M_MIN 79 +#define IRONLAKE_LVDS_D_SSC_M_MAX 126 +#define IRONLAKE_LVDS_D_SSC_P_MIN 14 +#define IRONLAKE_LVDS_D_SSC_P_MAX 42 +#define IRONLAKE_LVDS_D_SSC_P1_MIN 2 +#define IRONLAKE_LVDS_D_SSC_P1_MAX 6 +#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7 +#define IRONLAKE_LVDS_D_SSC_P2_FAST 7 + +/* DisplayPort */ +#define IRONLAKE_DP_N_MIN 1 +#define IRONLAKE_DP_N_MAX 2 +#define IRONLAKE_DP_M_MIN 81 +#define IRONLAKE_DP_M_MAX 90 +#define IRONLAKE_DP_P_MIN 10 +#define IRONLAKE_DP_P_MAX 20 +#define IRONLAKE_DP_P2_FAST 10 +#define IRONLAKE_DP_P2_SLOW 10 +#define IRONLAKE_DP_P2_LIMIT 0 +#define IRONLAKE_DP_P1_MIN 1 +#define IRONLAKE_DP_P1_MAX 2 static bool intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, @@ -474,33 +527,78 @@ static const intel_limit_t intel_limits_pineview_lvds = { .find_pll = intel_find_best_PLL, }; -static const intel_limit_t intel_limits_ironlake_sdvo = { +static const intel_limit_t intel_limits_ironlake_dac = { .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, - .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, - .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, + .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX }, + .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX }, .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, - .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX }, - .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, + .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX }, + .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX }, .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, - .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, - .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, + .p2_slow = IRONLAKE_DAC_P2_SLOW, + .p2_fast = IRONLAKE_DAC_P2_FAST }, .find_pll = intel_g4x_find_best_PLL, }; -static const intel_limit_t intel_limits_ironlake_lvds = { +static const intel_limit_t intel_limits_ironlake_single_lvds = { .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, - .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, - .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, + .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX }, + .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX }, .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, - .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX }, - .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, + .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX }, + .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX }, .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, - .p2_slow = IRONLAKE_P2_LVDS_SLOW, - .p2_fast = IRONLAKE_P2_LVDS_FAST }, + .p2_slow = IRONLAKE_LVDS_S_P2_SLOW, + .p2_fast = IRONLAKE_LVDS_S_P2_FAST }, + .find_pll = intel_g4x_find_best_PLL, +}; + +static const intel_limit_t intel_limits_ironlake_dual_lvds = { + .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, + .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, + .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX }, + .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX }, + .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, + .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, + .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX }, + .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX }, + .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, + .p2_slow = IRONLAKE_LVDS_D_P2_SLOW, + .p2_fast = IRONLAKE_LVDS_D_P2_FAST }, + .find_pll = intel_g4x_find_best_PLL, +}; + +static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { + .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, + .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, + .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX }, + .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX }, + .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, + .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, + .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX }, + .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX }, + .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, + .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW, + .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST }, + .find_pll = intel_g4x_find_best_PLL, +}; + +static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { + .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, + .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, + .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX }, + .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX }, + .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, + .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, + .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX }, + .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX }, + .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, + .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW, + .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST }, .find_pll = intel_g4x_find_best_PLL, }; @@ -509,34 +607,53 @@ static const intel_limit_t intel_limits_ironlake_display_port = { .max = IRONLAKE_DOT_MAX }, .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX}, - .n = { .min = IRONLAKE_N_MIN, - .max = IRONLAKE_N_MAX }, - .m = { .min = IRONLAKE_M_MIN, - .max = IRONLAKE_M_MAX }, + .n = { .min = IRONLAKE_DP_N_MIN, + .max = IRONLAKE_DP_N_MAX }, + .m = { .min = IRONLAKE_DP_M_MIN, + .max = IRONLAKE_DP_M_MAX }, .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, - .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN, - .max = IRONLAKE_P_DISPLAY_PORT_MAX }, - .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN, - .max = IRONLAKE_P1_DISPLAY_PORT_MAX}, - .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT, - .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW, - .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST }, + .p = { .min = IRONLAKE_DP_P_MIN, + .max = IRONLAKE_DP_P_MAX }, + .p1 = { .min = IRONLAKE_DP_P1_MIN, + .max = IRONLAKE_DP_P1_MAX}, + .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT, + .p2_slow = IRONLAKE_DP_P2_SLOW, + .p2_fast = IRONLAKE_DP_P2_FAST }, .find_pll = intel_find_pll_ironlake_dp, }; static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) { + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; const intel_limit_t *limit; - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) - limit = &intel_limits_ironlake_lvds; - else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || + int refclk = 120; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100) + refclk = 100; + + if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == + LVDS_CLKB_POWER_UP) { + /* LVDS dual channel */ + if (refclk == 100) + limit = &intel_limits_ironlake_dual_lvds_100m; + else + limit = &intel_limits_ironlake_dual_lvds; + } else { + if (refclk == 100) + limit = &intel_limits_ironlake_single_lvds_100m; + else + limit = &intel_limits_ironlake_single_lvds; + } + } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || HAS_eDP) limit = &intel_limits_ironlake_display_port; else - limit = &intel_limits_ironlake_sdvo; + limit = &intel_limits_ironlake_dac; return limit; } @@ -914,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) /* enable it... */ fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; + if (IS_I945GM(dev)) + fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */ fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; if (obj_priv->tiling_mode != I915_TILING_NONE) @@ -1638,6 +1757,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_OFF: DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); + drm_vblank_off(dev, pipe); /* Disable display plane */ temp = I915_READ(dspcntr_reg); if ((temp & DISPLAY_PLANE_ENABLE) != 0) { @@ -2519,6 +2639,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, sr_entries = roundup(sr_entries / cacheline_size, 1); DRM_DEBUG("self-refresh entries: %d\n", sr_entries); I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); + } else { + /* Turn off self refresh if both pipes are enabled */ + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) + & ~FW_BLC_SELF_EN); } DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", @@ -2562,6 +2686,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, srwm = 1; srwm &= 0x3f; I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); + } else { + /* Turn off self refresh if both pipes are enabled */ + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) + & ~FW_BLC_SELF_EN); } DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", @@ -2630,6 +2758,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, if (srwm < 0) srwm = 1; I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); + } else { + /* Turn off self refresh if both pipes are enabled */ + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) + & ~FW_BLC_SELF_EN); } DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", @@ -3949,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) struct intel_unpin_work { struct work_struct work; struct drm_device *dev; - struct drm_gem_object *obj; + struct drm_gem_object *old_fb_obj; + struct drm_gem_object *pending_flip_obj; struct drm_pending_vblank_event *event; int pending; }; @@ -3960,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work) container_of(__work, struct intel_unpin_work, work); mutex_lock(&work->dev->struct_mutex); - i915_gem_object_unpin(work->obj); - drm_gem_object_unreference(work->obj); + i915_gem_object_unpin(work->old_fb_obj); + drm_gem_object_unreference(work->pending_flip_obj); + drm_gem_object_unreference(work->old_fb_obj); mutex_unlock(&work->dev->struct_mutex); kfree(work); } @@ -3984,6 +4118,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) spin_lock_irqsave(&dev->event_lock, flags); work = intel_crtc->unpin_work; if (work == NULL || !work->pending) { + if (work && !work->pending) { + obj_priv = work->pending_flip_obj->driver_private; + DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", + obj_priv, + atomic_read(&obj_priv->pending_flip)); + } spin_unlock_irqrestore(&dev->event_lock, flags); return; } @@ -4004,8 +4144,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) spin_unlock_irqrestore(&dev->event_lock, flags); - obj_priv = work->obj->driver_private; - if (atomic_dec_and_test(&obj_priv->pending_flip)) + obj_priv = work->pending_flip_obj->driver_private; + + /* Initial scanout buffer will have a 0 pending flip count */ + if ((atomic_read(&obj_priv->pending_flip) == 0) || + atomic_dec_and_test(&obj_priv->pending_flip)) DRM_WAKEUP(&dev_priv->pending_flip_queue); schedule_work(&work->work); } @@ -4018,8 +4161,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane) unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); - if (intel_crtc->unpin_work) + if (intel_crtc->unpin_work) { intel_crtc->unpin_work->pending = 1; + } else { + DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); + } spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -4035,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; unsigned long flags; - int ret; + int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; + int ret, pipesrc; RING_LOCALS; work = kzalloc(sizeof *work, GFP_KERNEL); @@ -4047,12 +4194,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->event = event; work->dev = crtc->dev; intel_fb = to_intel_framebuffer(crtc->fb); - work->obj = intel_fb->obj; + work->old_fb_obj = intel_fb->obj; INIT_WORK(&work->work, intel_unpin_work_fn); /* We borrow the event spin lock for protecting unpin_work */ spin_lock_irqsave(&dev->event_lock, flags); if (intel_crtc->unpin_work) { + DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); spin_unlock_irqrestore(&dev->event_lock, flags); kfree(work); mutex_unlock(&dev->struct_mutex); @@ -4066,19 +4214,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ret = intel_pin_and_fence_fb_obj(dev, obj); if (ret != 0) { + DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", + obj->driver_private); kfree(work); + intel_crtc->unpin_work = NULL; mutex_unlock(&dev->struct_mutex); return ret; } - /* Reference the old fb object for the scheduled work. */ - drm_gem_object_reference(work->obj); + /* Reference the objects for the scheduled work. */ + drm_gem_object_reference(work->old_fb_obj); + drm_gem_object_reference(obj); crtc->fb = fb; i915_gem_object_flush_write_domain(obj); drm_vblank_get(dev, intel_crtc->pipe); obj_priv = obj->driver_private; atomic_inc(&obj_priv->pending_flip); + work->pending_flip_obj = obj; BEGIN_LP_RING(4); OUT_RING(MI_DISPLAY_FLIP | @@ -4086,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, OUT_RING(fb->pitch); if (IS_I965G(dev)) { OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); - OUT_RING((fb->width << 16) | fb->height); + pipesrc = I915_READ(pipesrc_reg); + OUT_RING(pipesrc & 0x0fff0fff); } else { OUT_RING(obj_priv->gtt_offset); OUT_RING(MI_NOOP); diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 371d753..aaabbcb 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, mutex_lock(&dev->struct_mutex); - ret = i915_gem_object_pin(fbo, PAGE_SIZE); + ret = i915_gem_object_pin(fbo, 64*1024); if (ret) { DRM_ERROR("failed to pin fb: %d\n", ret); goto out_unref; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index aa74e59..b1d0acb 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -611,7 +611,7 @@ static const struct dmi_system_id bad_lid_status[] = { { .ident = "Samsung SX20S", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"), + DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"), DMI_MATCH(DMI_BOARD_NAME, "SX20S"), }, }, @@ -623,6 +623,13 @@ static const struct dmi_system_id bad_lid_status[] = { }, }, { + .ident = "Aspire 1810T", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"), + }, + }, + { .ident = "PC-81005", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), @@ -643,7 +650,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect { enum drm_connector_status status = connector_status_connected; - if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) + if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) status = connector_status_disconnected; return status; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index eaacfd0..82678d3 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2345,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) connector->connector_type = DRM_MODE_CONNECTOR_VGA; intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT); + } else if (flags & SDVO_OUTPUT_CVBS0) { + + sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0; + encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; + connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; + sdvo_priv->is_tv = true; + intel_output->needs_tv_clock = true; + intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; } else if (flags & SDVO_OUTPUT_LVDS0) { sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 1cf4882..48227e7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -90,21 +90,21 @@ int nouveau_hybrid_setup(struct drm_device *dev) { int result; - if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY, + if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE, &result)) return -ENODEV; NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); - if (result & 0x1) { /* Stamina mode - disable the external GPU */ + if (result) { /* Ensure that the external GPU is enabled */ + nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL); + nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED, + NULL); + } else { /* Stamina mode - disable the external GPU */ nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, NULL); nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, NULL); - } else { /* Ensure that the external GPU is enabled */ - nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL); - nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED, - NULL); } return 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index d7f8d8b..2cd0fad 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -1865,7 +1865,7 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) struct drm_nouveau_private *dev_priv = bios->dev->dev_private; - if (dev_priv->card_type >= NV_50) + if (dev_priv->card_type >= NV_40) return 1; /* @@ -3765,7 +3765,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, */ struct drm_nouveau_private *dev_priv = dev->dev_private; - struct init_exec iexec = {true, false}; struct nvbios *bios = &dev_priv->VBIOS; uint8_t *table = &bios->data[bios->display.script_table_ptr]; uint8_t *otable = NULL; @@ -3845,8 +3844,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, } } - bios->display.output = dcbent; - if (pxclk == 0) { script = ROM16(otable[6]); if (!script) { @@ -3855,7 +3852,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, } NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); - parse_init_table(bios, script, &iexec); + nouveau_bios_run_init_table(dev, script, dcbent); } else if (pxclk == -1) { script = ROM16(otable[8]); @@ -3865,7 +3862,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, } NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); - parse_init_table(bios, script, &iexec); + nouveau_bios_run_init_table(dev, script, dcbent); } else if (pxclk == -2) { if (table[4] >= 12) @@ -3878,7 +3875,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, } NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); - parse_init_table(bios, script, &iexec); + nouveau_bios_run_init_table(dev, script, dcbent); } else if (pxclk > 0) { script = ROM16(otable[table[4] + i*6 + 2]); @@ -3890,7 +3887,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, } NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); - parse_init_table(bios, script, &iexec); + nouveau_bios_run_init_table(dev, script, dcbent); } else if (pxclk < 0) { script = ROM16(otable[table[4] + i*6 + 4]); @@ -3902,7 +3899,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, } NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); - parse_init_table(bios, script, &iexec); + nouveau_bios_run_init_table(dev, script, dcbent); } return 0; @@ -5864,10 +5861,13 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table, struct drm_nouveau_private *dev_priv = dev->dev_private; struct nvbios *bios = &dev_priv->VBIOS; struct init_exec iexec = { true, false }; + unsigned long flags; + spin_lock_irqsave(&bios->lock, flags); bios->display.output = dcbent; parse_init_table(bios, table, &iexec); bios->display.output = NULL; + spin_unlock_irqrestore(&bios->lock, flags); } static bool NVInitVBIOS(struct drm_device *dev) @@ -5876,6 +5876,7 @@ static bool NVInitVBIOS(struct drm_device *dev) struct nvbios *bios = &dev_priv->VBIOS; memset(bios, 0, sizeof(struct nvbios)); + spin_lock_init(&bios->lock); bios->dev = dev; if (!NVShadowVBIOS(dev, bios->data)) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index 058e98c..68446fd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h @@ -205,6 +205,8 @@ struct nvbios { struct drm_device *dev; struct nouveau_bios_info pub; + spinlock_t lock; + uint8_t data[NV_PROM_SIZE]; unsigned int length; bool execute; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index db0ed4c..028719f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -65,8 +65,10 @@ nouveau_bo_fixup_align(struct drm_device *dev, /* * Some of the tile_flags have a periodic structure of N*4096 bytes, - * align to to that as well as the page size. Overallocate memory to - * avoid corruption of other buffer objects. + * align to to that as well as the page size. Align the size to the + * appropriate boundaries. This does imply that sizes are rounded up + * 3-7 pages, so be aware of this and do not waste memory by allocating + * many small buffers. */ if (dev_priv->card_type == NV_50) { uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; @@ -77,22 +79,20 @@ nouveau_bo_fixup_align(struct drm_device *dev, case 0x2800: case 0x4800: case 0x7a00: - *size = roundup(*size, block_size); if (is_power_of_2(block_size)) { - *size += 3 * block_size; for (i = 1; i < 10; i++) { *align = 12 * i * block_size; if (!(*align % 65536)) break; } } else { - *size += 6 * block_size; for (i = 1; i < 10; i++) { *align = 8 * i * block_size; if (!(*align % 65536)) break; } } + *size = roundup(*size, *align); break; default: break; diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 343d718..2281f99 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -278,12 +278,11 @@ nouveau_channel_free(struct nouveau_channel *chan) /* Ensure the channel is no longer active on the GPU */ pfifo->reassign(dev, false); - if (pgraph->channel(dev) == chan) { - pgraph->fifo_access(dev, false); + pgraph->fifo_access(dev, false); + if (pgraph->channel(dev) == chan) pgraph->unload_context(dev); - pgraph->fifo_access(dev, true); - } pgraph->destroy_context(chan); + pgraph->fifo_access(dev, true); if (pfifo->channel_id(dev) == chan->id) { pfifo->disable(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 7e6d673..d2f6335 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -88,13 +88,14 @@ nouveau_connector_destroy(struct drm_connector *drm_connector) { struct nouveau_connector *nv_connector = nouveau_connector(drm_connector); - struct drm_device *dev = nv_connector->base.dev; - - NV_DEBUG_KMS(dev, "\n"); + struct drm_device *dev; if (!nv_connector) return; + dev = nv_connector->base.dev; + NV_DEBUG_KMS(dev, "\n"); + kfree(nv_connector->edid); drm_sysfs_connector_remove(drm_connector); drm_connector_cleanup(drm_connector); diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index dd49372..f954ad9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -502,12 +502,12 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, break; } - if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { - ret = -EREMOTEIO; - goto out; - } - if (cmd & 1) { + if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { + ret = -EREMOTEIO; + goto out; + } + for (i = 0; i < 4; i++) { data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 343ab7f..da3b93b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -56,7 +56,7 @@ int nouveau_vram_pushbuf; module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); -int nouveau_vram_notify; +int nouveau_vram_notify = 1; module_param_named(vram_notify, nouveau_vram_notify, int, 0400); MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); @@ -75,6 +75,14 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status"); int nouveau_ignorelid = 0; module_param_named(ignorelid, nouveau_ignorelid, int, 0400); +MODULE_PARM_DESC(noagp, "Disable all acceleration"); +int nouveau_noaccel = 0; +module_param_named(noaccel, nouveau_noaccel, int, 0400); + +MODULE_PARM_DESC(noagp, "Disable fbcon acceleration"); +int nouveau_nofbaccel = 0; +module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); + MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 6b96904..5445cef 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -678,6 +678,8 @@ extern int nouveau_reg_debug; extern char *nouveau_vbios; extern int nouveau_ctxfw; extern int nouveau_ignorelid; +extern int nouveau_nofbaccel; +extern int nouveau_noaccel; /* nouveau_state.c */ extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 0b05c86..ea879a2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -107,6 +107,34 @@ static struct fb_ops nouveau_fbcon_ops = { .fb_setcmap = drm_fb_helper_setcmap, }; +static struct fb_ops nv04_fbcon_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_setcolreg = drm_fb_helper_setcolreg, + .fb_fillrect = nv04_fbcon_fillrect, + .fb_copyarea = nv04_fbcon_copyarea, + .fb_imageblit = nv04_fbcon_imageblit, + .fb_sync = nouveau_fbcon_sync, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_blank = drm_fb_helper_blank, + .fb_setcmap = drm_fb_helper_setcmap, +}; + +static struct fb_ops nv50_fbcon_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_setcolreg = drm_fb_helper_setcolreg, + .fb_fillrect = nv50_fbcon_fillrect, + .fb_copyarea = nv50_fbcon_copyarea, + .fb_imageblit = nv50_fbcon_imageblit, + .fb_sync = nouveau_fbcon_sync, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_blank = drm_fb_helper_blank, + .fb_setcmap = drm_fb_helper_setcmap, +}; + static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno) { @@ -267,8 +295,12 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, dev_priv->fbdev_info = info; strcpy(info->fix.id, "nouveaufb"); - info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | - FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; + if (nouveau_nofbaccel) + info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED; + else + info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | + FBINFO_HWACCEL_FILLRECT | + FBINFO_HWACCEL_IMAGEBLIT; info->fbops = &nouveau_fbcon_ops; info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - dev_priv->vm_vram_base; @@ -316,13 +348,15 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, par->nouveau_fb = nouveau_fb; par->dev = dev; - if (dev_priv->channel) { + if (dev_priv->channel && !nouveau_nofbaccel) { switch (dev_priv->card_type) { case NV_50: nv50_fbcon_accel_init(info); + info->fbops = &nv50_fbcon_ops; break; default: nv04_fbcon_accel_init(info); + info->fbops = &nv04_fbcon_ops; break; }; } diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index 462e0b8..f9c34e1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h @@ -40,7 +40,13 @@ int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb); void nouveau_fbcon_restore(void); void nouveau_fbcon_zfill(struct drm_device *dev); +void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); +void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); int nv04_fbcon_accel_init(struct fb_info *info); +void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); +void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); int nv50_fbcon_accel_init(struct fb_info *info); void nouveau_fbcon_gpu_lockup(struct fb_info *info); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 6ac804b..70cc308 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -925,7 +925,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, } if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { + spin_lock(&nvbo->bo.lock); ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); + spin_unlock(&nvbo->bo.lock); } else { ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); if (ret == 0) diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c index 419f4c2..c7ebec6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_grctx.c +++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c @@ -97,8 +97,8 @@ nouveau_grctx_prog_load(struct drm_device *dev) } pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); - if (!pgraph->ctxprog) { - NV_ERROR(dev, "OOM copying ctxprog\n"); + if (!pgraph->ctxvals) { + NV_ERROR(dev, "OOM copying ctxvals\n"); release_firmware(fw); nouveau_grctx_fini(dev); return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 3b9bad6..447f9f6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -211,6 +211,20 @@ nouveau_fifo_irq_handler(struct drm_device *dev) get + 4); } + if (status & NV_PFIFO_INTR_SEMAPHORE) { + uint32_t sem; + + status &= ~NV_PFIFO_INTR_SEMAPHORE; + nv_wr32(dev, NV03_PFIFO_INTR_0, + NV_PFIFO_INTR_SEMAPHORE); + + sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE); + nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); + + nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); + nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); + } + if (status) { NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", status, chid); @@ -566,86 +580,99 @@ nouveau_pgraph_irq_handler(struct drm_device *dev) static void nv50_pgraph_irq_handler(struct drm_device *dev) { - uint32_t status, nsource; + uint32_t status; - status = nv_rd32(dev, NV03_PGRAPH_INTR); - nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); + while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { + uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); - if (status & 0x00000001) { - nouveau_pgraph_intr_notify(dev, nsource); - status &= ~0x00000001; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); - } + if (status & 0x00000001) { + nouveau_pgraph_intr_notify(dev, nsource); + status &= ~0x00000001; + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); + } - if (status & 0x00000010) { - nouveau_pgraph_intr_error(dev, nsource | - NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); + if (status & 0x00000010) { + nouveau_pgraph_intr_error(dev, nsource | + NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); - status &= ~0x00000010; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); - } + status &= ~0x00000010; + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); + } - if (status & 0x00001000) { - nv_wr32(dev, 0x400500, 0x00000000); - nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); - nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, - NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH); - nv_wr32(dev, 0x400500, 0x00010001); + if (status & 0x00001000) { + nv_wr32(dev, 0x400500, 0x00000000); + nv_wr32(dev, NV03_PGRAPH_INTR, + NV_PGRAPH_INTR_CONTEXT_SWITCH); + nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, + NV40_PGRAPH_INTR_EN) & + ~NV_PGRAPH_INTR_CONTEXT_SWITCH); + nv_wr32(dev, 0x400500, 0x00010001); - nv50_graph_context_switch(dev); + nv50_graph_context_switch(dev); - status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; - } + status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; + } - if (status & 0x00100000) { - nouveau_pgraph_intr_error(dev, nsource | - NV03_PGRAPH_NSOURCE_DATA_ERROR); + if (status & 0x00100000) { + nouveau_pgraph_intr_error(dev, nsource | + NV03_PGRAPH_NSOURCE_DATA_ERROR); - status &= ~0x00100000; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); - } + status &= ~0x00100000; + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); + } - if (status & 0x00200000) { - int r; - - nouveau_pgraph_intr_error(dev, nsource | - NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); - - NV_ERROR(dev, "magic set 1:\n"); - for (r = 0x408900; r <= 0x408910; r += 4) - NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); - nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000); - for (r = 0x408e08; r <= 0x408e24; r += 4) - NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); - nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000); - - NV_ERROR(dev, "magic set 2:\n"); - for (r = 0x409900; r <= 0x409910; r += 4) - NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); - nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000); - for (r = 0x409e08; r <= 0x409e24; r += 4) - NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); - nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000); - - status &= ~0x00200000; - nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); - } + if (status & 0x00200000) { + int r; + + nouveau_pgraph_intr_error(dev, nsource | + NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); + + NV_ERROR(dev, "magic set 1:\n"); + for (r = 0x408900; r <= 0x408910; r += 4) + NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, + nv_rd32(dev, r)); + nv_wr32(dev, 0x408900, + nv_rd32(dev, 0x408904) | 0xc0000000); + for (r = 0x408e08; r <= 0x408e24; r += 4) + NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, + nv_rd32(dev, r)); + nv_wr32(dev, 0x408e08, + nv_rd32(dev, 0x408e08) | 0xc0000000); + + NV_ERROR(dev, "magic set 2:\n"); + for (r = 0x409900; r <= 0x409910; r += 4) + NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, + nv_rd32(dev, r)); + nv_wr32(dev, 0x409900, + nv_rd32(dev, 0x409904) | 0xc0000000); + for (r = 0x409e08; r <= 0x409e24; r += 4) + NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, + nv_rd32(dev, r)); + nv_wr32(dev, 0x409e08, + nv_rd32(dev, 0x409e08) | 0xc0000000); + + status &= ~0x00200000; + nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); + } - if (status) { - NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status); - nv_wr32(dev, NV03_PGRAPH_INTR, status); - } + if (status) { + NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", + status); + nv_wr32(dev, NV03_PGRAPH_INTR, status); + } - { - const int isb = (1 << 16) | (1 << 0); + { + const int isb = (1 << 16) | (1 << 0); - if ((nv_rd32(dev, 0x400500) & isb) != isb) - nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb); - nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); + if ((nv_rd32(dev, 0x400500) & isb) != isb) + nv_wr32(dev, 0x400500, + nv_rd32(dev, 0x400500) | isb); + } } nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); + nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); } static void diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 6c66a34..d99dc08 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -34,15 +34,20 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct nouveau_bo *ntfy = NULL; + uint32_t flags; int ret; - ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ? - TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT, + if (nouveau_vram_notify) + flags = TTM_PL_FLAG_VRAM; + else + flags = TTM_PL_FLAG_TT; + + ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0x0000, false, true, &ntfy); if (ret) return ret; - ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM); + ret = nouveau_bo_pin(ntfy, flags); if (ret) goto out_err; @@ -128,6 +133,8 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, target = NV_DMA_TARGET_PCI; } else { target = NV_DMA_TARGET_AGP; + if (dev_priv->card_type >= NV_50) + offset += dev_priv->vm_gart_base; } } else { NV_ERROR(dev, "Bad DMA target, mem_type %d!\n", diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 6c2cf817..e7c100b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -885,11 +885,12 @@ int nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, struct nouveau_gpuobj **gpuobj_ret) { - struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct drm_nouveau_private *dev_priv; struct nouveau_gpuobj *gpuobj; if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) return -EINVAL; + dev_priv = chan->dev->dev_private; gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); if (!gpuobj) diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index 251f1b3..aa9b310 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h @@ -99,6 +99,7 @@ * the card will hang early on in the X init process. */ # define NV_PMC_ENABLE_UNK13 (1<<13) +#define NV40_PMC_GRAPH_UNITS 0x00001540 #define NV40_PMC_BACKLIGHT 0x000015f0 # define NV40_PMC_BACKLIGHT_MASK 0x001f0000 #define NV40_PMC_1700 0x00001700 diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 4c7f1e4..ed15905 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -54,11 +54,12 @@ static void nouveau_sgdma_clear(struct ttm_backend *be) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; - struct drm_device *dev = nvbe->dev; - - NV_DEBUG(nvbe->dev, "\n"); + struct drm_device *dev; if (nvbe && nvbe->pages) { + dev = nvbe->dev; + NV_DEBUG(dev, "\n"); + if (nvbe->bound) be->func->unbind(be); diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index f2d0187..a4851af 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -310,6 +310,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) static unsigned int nouveau_vga_set_decode(void *priv, bool state) { + struct drm_device *dev = priv; + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->chipset >= 0x40) + nv_wr32(dev, 0x88054, state); + else + nv_wr32(dev, 0x1854, state); + if (state) return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; @@ -427,15 +435,19 @@ nouveau_card_init(struct drm_device *dev) if (ret) goto out_timer; - /* PGRAPH */ - ret = engine->graph.init(dev); - if (ret) - goto out_fb; + if (nouveau_noaccel) + engine->graph.accel_blocked = true; + else { + /* PGRAPH */ + ret = engine->graph.init(dev); + if (ret) + goto out_fb; - /* PFIFO */ - ret = engine->fifo.init(dev); - if (ret) - goto out_graph; + /* PFIFO */ + ret = engine->fifo.init(dev); + if (ret) + goto out_graph; + } /* this call irq_preinstall, register irq handler and * call irq_postinstall @@ -479,9 +491,11 @@ nouveau_card_init(struct drm_device *dev) out_irq: drm_irq_uninstall(dev); out_fifo: - engine->fifo.takedown(dev); + if (!nouveau_noaccel) + engine->fifo.takedown(dev); out_graph: - engine->graph.takedown(dev); + if (!nouveau_noaccel) + engine->graph.takedown(dev); out_fb: engine->fb.takedown(dev); out_timer: @@ -518,8 +532,10 @@ static void nouveau_card_takedown(struct drm_device *dev) dev_priv->channel = NULL; } - engine->fifo.takedown(dev); - engine->graph.takedown(dev); + if (!nouveau_noaccel) { + engine->fifo.takedown(dev); + engine->graph.takedown(dev); + } engine->fb.takedown(dev); engine->timer.takedown(dev); engine->mc.takedown(dev); @@ -817,6 +833,15 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, case NOUVEAU_GETPARAM_VM_VRAM_BASE: getparam->value = dev_priv->vm_vram_base; break; + case NOUVEAU_GETPARAM_GRAPH_UNITS: + /* NV40 and NV50 versions are quite different, but register + * address is the same. User is supposed to know the card + * family anyway... */ + if (dev_priv->chipset >= 0x40) { + getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS); + break; + } + /* FALLTHRU */ default: NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index d910873..fd01caa 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c @@ -27,7 +27,7 @@ #include "nouveau_dma.h" #include "nouveau_fbcon.h" -static void +void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) { struct nouveau_fbcon_par *par = info->par; @@ -54,7 +54,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) FIRE_RING(chan); } -static void +void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct nouveau_fbcon_par *par = info->par; @@ -88,7 +88,7 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) FIRE_RING(chan); } -static void +void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) { struct nouveau_fbcon_par *par = info->par; @@ -307,9 +307,6 @@ nv04_fbcon_accel_init(struct fb_info *info) FIRE_RING(chan); - info->fbops->fb_fillrect = nv04_fbcon_fillrect; - info->fbops->fb_copyarea = nv04_fbcon_copyarea; - info->fbops->fb_imageblit = nv04_fbcon_imageblit; return 0; } diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 40b7360..d1a651e 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -298,14 +298,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) static void nv50_crtc_destroy(struct drm_crtc *crtc) { - struct drm_device *dev = crtc->dev; - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - - NV_DEBUG_KMS(dev, "\n"); + struct drm_device *dev; + struct nouveau_crtc *nv_crtc; if (!crtc) return; + dev = crtc->dev; + nv_crtc = nouveau_crtc(crtc); + + NV_DEBUG_KMS(dev, "\n"); + drm_crtc_cleanup(&nv_crtc->base); nv50_cursor_fini(nv_crtc); diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index e4f279e..0f57cdf 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -3,7 +3,7 @@ #include "nouveau_dma.h" #include "nouveau_fbcon.h" -static void +void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct nouveau_fbcon_par *par = info->par; @@ -46,7 +46,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) FIRE_RING(chan); } -static void +void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) { struct nouveau_fbcon_par *par = info->par; @@ -81,7 +81,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) FIRE_RING(chan); } -static void +void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) { struct nouveau_fbcon_par *par = info->par; @@ -262,9 +262,6 @@ nv50_fbcon_accel_init(struct fb_info *info) OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base); - info->fbops->fb_fillrect = nv50_fbcon_fillrect; - info->fbops->fb_copyarea = nv50_fbcon_copyarea; - info->fbops->fb_imageblit = nv50_fbcon_imageblit; return 0; } diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 32b244b..204a79f 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c @@ -317,17 +317,20 @@ void nv50_fifo_destroy_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; + struct nouveau_gpuobj_ref *ramfc = chan->ramfc; NV_DEBUG(dev, "ch%d\n", chan->id); - nouveau_gpuobj_ref_del(dev, &chan->ramfc); - nouveau_gpuobj_ref_del(dev, &chan->cache); - + /* This will ensure the channel is seen as disabled. */ + chan->ramfc = NULL; nv50_fifo_channel_disable(dev, chan->id, false); /* Dummy channel, also used on ch 127 */ if (chan->id == 0) nv50_fifo_channel_disable(dev, 127, false); + + nouveau_gpuobj_ref_del(dev, &ramfc); + nouveau_gpuobj_ref_del(dev, &chan->cache); } int diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 20319e5..6d50480 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -165,6 +165,12 @@ nv50_graph_channel(struct drm_device *dev) uint32_t inst; int i; + /* Be sure we're not in the middle of a context switch or bad things + * will happen, such as unloading the wrong pgraph context. + */ + if (!nv_wait(0x400300, 0x00000001, 0x00000000)) + NV_ERROR(dev, "Ctxprog is still running\n"); + inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) return NULL; @@ -275,7 +281,7 @@ nv50_graph_load_context(struct nouveau_channel *chan) int nv50_graph_unload_context(struct drm_device *dev) { - uint32_t inst, fifo = nv_rd32(dev, 0x400500); + uint32_t inst; inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) @@ -283,12 +289,10 @@ nv50_graph_unload_context(struct drm_device *dev) inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; nouveau_wait_for_idle(dev); - nv_wr32(dev, 0x400500, fifo & ~1); nv_wr32(dev, 0x400784, inst); nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); nouveau_wait_for_idle(dev); - nv_wr32(dev, 0x400500, fifo); nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); return 0; diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c index ecf1936..c2fff54 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c @@ -101,6 +101,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) struct nouveau_encoder *nvenc = nouveau_encoder(enc); if (nvenc == nv_encoder || + nvenc->disconnect != nv50_sor_disconnect || nvenc->dcb->or != nv_encoder->dcb->or) continue; diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig index 5982321..1c02d23 100644 --- a/drivers/gpu/drm/radeon/Kconfig +++ b/drivers/gpu/drm/radeon/Kconfig @@ -1,10 +1,14 @@ config DRM_RADEON_KMS - bool "Enable modesetting on radeon by default" + bool "Enable modesetting on radeon by default - NEW DRIVER" depends on DRM_RADEON help - Choose this option if you want kernel modesetting enabled by default, - and you have a new enough userspace to support this. Running old - userspaces with this enabled will cause pain. + Choose this option if you want kernel modesetting enabled by default. + + This is a completely new driver. It's only part of the existing drm + for compatibility reasons. It requires an entirely different graphics + stack above it and works very differently from the old drm stack. + i.e. don't enable this unless you know what you are doing it may + cause issues or bugs compared to the previous userspace driver stack. When kernel modesetting is enabled the IOCTL of radeon/drm driver are considered as invalid and an error message is printed diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 7106011..b32eeea 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -332,11 +332,13 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); unsigned char *base; + int retry_count = 0; memset(&args, 0, sizeof(args)); base = (unsigned char *)rdev->mode_info.atom_context->scratch; +retry: memcpy(base, req_bytes, num_bytes); args.lpAuxRequest = 0; @@ -347,10 +349,12 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - if (args.ucReplyStatus) { - DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n", + if (args.ucReplyStatus && !args.ucDataOutLen) { + if (args.ucReplyStatus == 0x20 && retry_count < 10) + goto retry; + DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], - chan->rec.i2c_id, args.ucReplyStatus); + chan->rec.i2c_id, args.ucReplyStatus, retry_count); return false; } diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 11c9a3f..c0d4650 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -354,11 +354,17 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) return RREG32(RADEON_CRTC2_CRNT_FRAME); } +/* Who ever call radeon_fence_emit should call ring_lock and ask + * for enough space (today caller are ib schedule and buffer move) */ void r100_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { - /* Who ever call radeon_fence_emit should call ring_lock and ask - * for enough space (today caller are ib schedule and buffer move) */ + /* We have to make sure that caches are flushed before + * CPU might read something from VRAM. */ + radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); + radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL); + radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); + radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL); /* Wait until IDLE & CLEAN */ radeon_ring_write(rdev, PACKET0(0x1720, 0)); radeon_ring_write(rdev, (1 << 16) | (1 << 17)); @@ -3369,7 +3375,6 @@ int r100_suspend(struct radeon_device *rdev) void r100_fini(struct radeon_device *rdev) { - r100_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -3481,13 +3486,12 @@ int r100_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); - r100_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); + radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); - radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 0051d11..43b55a0 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -506,11 +506,14 @@ void r300_vram_info(struct radeon_device *rdev) /* DDR for all card after R300 & IGP */ rdev->mc.vram_is_ddr = true; + tmp = RREG32(RADEON_MEM_CNTL); - if (tmp & R300_MEM_NUM_CHANNELS_MASK) { - rdev->mc.vram_width = 128; - } else { - rdev->mc.vram_width = 64; + tmp &= R300_MEM_NUM_CHANNELS_MASK; + switch (tmp) { + case 0: rdev->mc.vram_width = 64; break; + case 1: rdev->mc.vram_width = 128; break; + case 2: rdev->mc.vram_width = 256; break; + default: rdev->mc.vram_width = 128; break; } r100_vram_init_sizes(rdev); @@ -1327,7 +1330,6 @@ int r300_suspend(struct radeon_device *rdev) void r300_fini(struct radeon_device *rdev) { - r300_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -1418,15 +1420,15 @@ int r300_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); - r300_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); + radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); - radeon_irq_kms_fini(rdev); + radeon_agp_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 4526faa..d937324 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -389,16 +389,15 @@ int r420_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); - r420_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); + radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); radeon_agp_fini(rdev); - radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 9a18907..ddf5731 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -294,13 +294,12 @@ int r520_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); - rv515_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); + radeon_irq_kms_fini(rdev); rv370_pcie_gart_fini(rdev); radeon_agp_fini(rdev); - radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 1b6d000..2ffcf5a 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1654,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size) rdev->cp.align_mask = 16 - 1; } +void r600_cp_fini(struct radeon_device *rdev) +{ + r600_cp_stop(rdev); + radeon_ring_fini(rdev); +} + /* * GPU scratch registers helpers function. @@ -1861,6 +1867,12 @@ int r600_startup(struct radeon_device *rdev) return r; } r600_gpu_init(rdev); + r = r600_blit_init(rdev); + if (r) { + r600_blit_fini(rdev); + rdev->asic->copy = NULL; + dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); + } /* pin copy shader into vram */ if (rdev->r600_blit.shader_obj) { r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); @@ -1938,6 +1950,13 @@ int r600_resume(struct radeon_device *rdev) DRM_ERROR("radeon: failled testing IB (%d).\n", r); return r; } + + r = r600_audio_init(rdev); + if (r) { + DRM_ERROR("radeon: audio resume failed\n"); + return r; + } + return r; } @@ -1945,6 +1964,7 @@ int r600_suspend(struct radeon_device *rdev) { int r; + r600_audio_fini(rdev); /* FIXME: we should wait for ring to be empty */ r600_cp_stop(rdev); rdev->cp.ready = false; @@ -2045,19 +2065,15 @@ int r600_init(struct radeon_device *rdev) r = r600_pcie_gart_init(rdev); if (r) return r; - r = r600_blit_init(rdev); - if (r) { - r600_blit_fini(rdev); - rdev->asic->copy = NULL; - dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); - } rdev->accel_working = true; r = r600_startup(rdev); if (r) { - r600_suspend(rdev); + dev_err(rdev->dev, "disabling GPU acceleration\n"); + r600_cp_fini(rdev); r600_wb_fini(rdev); - radeon_ring_fini(rdev); + r600_irq_fini(rdev); + radeon_irq_kms_fini(rdev); r600_pcie_gart_fini(rdev); rdev->accel_working = false; } @@ -2083,20 +2099,17 @@ int r600_init(struct radeon_device *rdev) void r600_fini(struct radeon_device *rdev) { - /* Suspend operations */ - r600_suspend(rdev); - r600_audio_fini(rdev); r600_blit_fini(rdev); + r600_cp_fini(rdev); + r600_wb_fini(rdev); r600_irq_fini(rdev); radeon_irq_kms_fini(rdev); - radeon_ring_fini(rdev); - r600_wb_fini(rdev); r600_pcie_gart_fini(rdev); + radeon_agp_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); radeon_clocks_fini(rdev); - radeon_agp_fini(rdev); radeon_bo_fini(rdev); radeon_atombios_fini(rdev); kfree(rdev->bios); @@ -2900,3 +2913,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev) return 0; #endif } + +/** + * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl + * rdev: radeon device structure + * bo: buffer object struct which userspace is waiting for idle + * + * Some R6XX/R7XX doesn't seems to take into account HDP flush performed + * through ring buffer, this leads to corruption in rendering, see + * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we + * directly perform HDP flush by writing register through MMIO. + */ +void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) +{ + WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); +} diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index 99e2c38..0dcb690 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c @@ -35,7 +35,7 @@ */ static int r600_audio_chipset_supported(struct radeon_device *rdev) { - return rdev->family >= CHIP_R600 + return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710) || rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740; @@ -261,7 +261,6 @@ void r600_audio_fini(struct radeon_device *rdev) if (!r600_audio_chipset_supported(rdev)) return; - WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000); - del_timer(&rdev->audio_timer); + WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000); } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 2d5f2bf..f57480b 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -661,6 +661,13 @@ struct radeon_asic { void (*hpd_fini)(struct radeon_device *rdev); bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); + /* ioctl hw specific callback. Some hw might want to perform special + * operation on specific ioctl. For instance on wait idle some hw + * might want to perform and HDP flush through MMIO as it seems that + * some R6XX/R7XX hw doesn't take HDP flush into account if programmed + * through ring. + */ + void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); }; /* @@ -1143,6 +1150,7 @@ extern bool r600_card_posted(struct radeon_device *rdev); extern void r600_cp_stop(struct radeon_device *rdev); extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); extern int r600_cp_resume(struct radeon_device *rdev); +extern void r600_cp_fini(struct radeon_device *rdev); extern int r600_count_pipe_bits(uint32_t val); extern int r600_gart_clear_page(struct radeon_device *rdev, int i); extern int r600_mc_wait_for_idle(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index f2fbd2e..05ee1ae 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -117,6 +117,7 @@ static struct radeon_asic r100_asic = { .hpd_fini = &r100_hpd_fini, .hpd_sense = &r100_hpd_sense, .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, }; @@ -176,6 +177,7 @@ static struct radeon_asic r300_asic = { .hpd_fini = &r100_hpd_fini, .hpd_sense = &r100_hpd_sense, .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, }; /* @@ -219,6 +221,7 @@ static struct radeon_asic r420_asic = { .hpd_fini = &r100_hpd_fini, .hpd_sense = &r100_hpd_sense, .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, }; @@ -267,6 +270,7 @@ static struct radeon_asic rs400_asic = { .hpd_fini = &r100_hpd_fini, .hpd_sense = &r100_hpd_sense, .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, }; @@ -323,6 +327,7 @@ static struct radeon_asic rs600_asic = { .hpd_fini = &rs600_hpd_fini, .hpd_sense = &rs600_hpd_sense, .hpd_set_polarity = &rs600_hpd_set_polarity, + .ioctl_wait_idle = NULL, }; @@ -370,6 +375,7 @@ static struct radeon_asic rs690_asic = { .hpd_fini = &rs600_hpd_fini, .hpd_sense = &rs600_hpd_sense, .hpd_set_polarity = &rs600_hpd_set_polarity, + .ioctl_wait_idle = NULL, }; @@ -421,6 +427,7 @@ static struct radeon_asic rv515_asic = { .hpd_fini = &rs600_hpd_fini, .hpd_sense = &rs600_hpd_sense, .hpd_set_polarity = &rs600_hpd_set_polarity, + .ioctl_wait_idle = NULL, }; @@ -463,6 +470,7 @@ static struct radeon_asic r520_asic = { .hpd_fini = &rs600_hpd_fini, .hpd_sense = &rs600_hpd_sense, .hpd_set_polarity = &rs600_hpd_set_polarity, + .ioctl_wait_idle = NULL, }; /* @@ -504,6 +512,7 @@ void r600_hpd_fini(struct radeon_device *rdev); bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); void r600_hpd_set_polarity(struct radeon_device *rdev, enum radeon_hpd_id hpd); +extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); static struct radeon_asic r600_asic = { .init = &r600_init, @@ -538,6 +547,7 @@ static struct radeon_asic r600_asic = { .hpd_fini = &r600_hpd_fini, .hpd_sense = &r600_hpd_sense, .hpd_set_polarity = &r600_hpd_set_polarity, + .ioctl_wait_idle = r600_ioctl_wait_idle, }; /* @@ -582,6 +592,7 @@ static struct radeon_asic rv770_asic = { .hpd_fini = &r600_hpd_fini, .hpd_sense = &r600_hpd_sense, .hpd_set_polarity = &r600_hpd_set_polarity, + .ioctl_wait_idle = r600_ioctl_wait_idle, }; #endif diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index fa82ca7..2dcda61 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -287,6 +287,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, *connector_type = DRM_MODE_CONNECTOR_DVID; } + /* XFX Pine Group device rv730 reports no VGA DDC lines + * even though they are wired up to record 0x93 + */ + if ((dev->pdev->device == 0x9498) && + (dev->pdev->subsystem_vendor == 0x1682) && + (dev->pdev->subsystem_device == 0x2452)) { + struct radeon_device *rdev = dev->dev_private; + *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93); + } return true; } diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 4ddfd4b..7932dc4 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -65,31 +65,42 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, if (r) { goto out_cleanup; } - start_jiffies = jiffies; - for (i = 0; i < n; i++) { - r = radeon_fence_create(rdev, &fence); - if (r) { - goto out_cleanup; + + /* r100 doesn't have dma engine so skip the test */ + if (rdev->asic->copy_dma) { + + start_jiffies = jiffies; + for (i = 0; i < n; i++) { + r = radeon_fence_create(rdev, &fence); + if (r) { + goto out_cleanup; + } + + r = radeon_copy_dma(rdev, saddr, daddr, + size / RADEON_GPU_PAGE_SIZE, fence); + + if (r) { + goto out_cleanup; + } + r = radeon_fence_wait(fence, false); + if (r) { + goto out_cleanup; + } + radeon_fence_unref(&fence); } - r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence); - if (r) { - goto out_cleanup; + end_jiffies = jiffies; + time = end_jiffies - start_jiffies; + time = jiffies_to_msecs(time); + if (time > 0) { + i = ((n * size) >> 10) / time; + printk(KERN_INFO "radeon: dma %u bo moves of %ukb from" + " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n", + n, size >> 10, + sdomain, ddomain, time, + i, i * 1000, (i * 1000) / 1024); } - r = radeon_fence_wait(fence, false); - if (r) { - goto out_cleanup; - } - radeon_fence_unref(&fence); - } - end_jiffies = jiffies; - time = end_jiffies - start_jiffies; - time = jiffies_to_msecs(time); - if (time > 0) { - i = ((n * size) >> 10) / time; - printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d" - " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10, - sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024); } + start_jiffies = jiffies; for (i = 0; i < n; i++) { r = radeon_fence_create(rdev, &fence); diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 579c892..e7b1944 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -971,8 +971,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder lvds->native_mode.vdisplay); lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); - if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0) - lvds->panel_vcc_delay = 2000; + lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000); lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24); lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 5526641..2381885 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -580,16 +580,18 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder; struct drm_encoder_helper_funcs *encoder_funcs; - bool dret; + bool dret = false; enum drm_connector_status ret = connector_status_disconnected; encoder = radeon_best_single_encoder(connector); if (!encoder) ret = connector_status_disconnected; - radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); - dret = radeon_ddc_probe(radeon_connector); - radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); + if (radeon_connector->ddc_bus) { + radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); + dret = radeon_ddc_probe(radeon_connector); + radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); + } if (dret) { if (radeon_connector->edid) { kfree(radeon_connector->edid); @@ -740,11 +742,13 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect struct drm_mode_object *obj; int i; enum drm_connector_status ret = connector_status_disconnected; - bool dret; + bool dret = false; - radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); - dret = radeon_ddc_probe(radeon_connector); - radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); + if (radeon_connector->ddc_bus) { + radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); + dret = radeon_ddc_probe(radeon_connector); + radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); + } if (dret) { if (radeon_connector->edid) { kfree(radeon_connector->edid); @@ -1343,7 +1347,7 @@ radeon_add_legacy_connector(struct drm_device *dev, radeon_connector->dac_load_detect = false; drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, - 1); + radeon_connector->dac_load_detect); drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.tv_std_property, radeon_combios_get_tv_info(rdev)); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 6a92f99..7e17a36 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -278,7 +278,7 @@ static void radeon_print_display_setup(struct drm_device *dev) DRM_INFO(" %s\n", connector_names[connector->connector_type]); if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); - if (radeon_connector->ddc_bus) + if (radeon_connector->ddc_bus) { DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", radeon_connector->ddc_bus->rec.mask_clk_reg, radeon_connector->ddc_bus->rec.mask_data_reg, @@ -288,6 +288,15 @@ static void radeon_print_display_setup(struct drm_device *dev) radeon_connector->ddc_bus->rec.en_data_reg, radeon_connector->ddc_bus->rec.y_clk_reg, radeon_connector->ddc_bus->rec.y_data_reg); + } else { + if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || + connector->connector_type == DRM_MODE_CONNECTOR_DVII || + connector->connector_type == DRM_MODE_CONNECTOR_DVID || + connector->connector_type == DRM_MODE_CONNECTOR_DVIA || + connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || + connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) + DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n"); + } DRM_INFO(" Encoders:\n"); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { radeon_encoder = to_radeon_encoder(encoder); diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 3ba213d..d71e346 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -248,7 +248,7 @@ int radeonfb_create(struct drm_device *dev, if (ret) goto out_unref; - memset_io(fbptr, 0xff, aligned_size); + memset_io(fbptr, 0x0, aligned_size); strcpy(info->fix.id, "radeondrmfb"); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 0e1325e..db8e9a3 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -308,6 +308,9 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, } robj = gobj->driver_private; r = radeon_bo_wait(robj, NULL, false); + /* callback hw specific functions if any */ + if (robj->rdev->asic->ioctl_wait_idle) + robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); mutex_lock(&dev->struct_mutex); drm_gem_object_unreference(gobj); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 9f54189..287fceb 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -223,15 +223,31 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) return 0; } +int rs400_mc_wait_for_idle(struct radeon_device *rdev) +{ + unsigned i; + uint32_t tmp; + + for (i = 0; i < rdev->usec_timeout; i++) { + /* read MC_STATUS */ + tmp = RREG32(0x0150); + if (tmp & (1 << 2)) { + return 0; + } + DRM_UDELAY(1); + } + return -1; +} + void rs400_gpu_init(struct radeon_device *rdev) { /* FIXME: HDP same place on rs400 ? */ r100_hdp_reset(rdev); /* FIXME: is this correct ? */ r420_pipes_init(rdev); - if (r300_mc_wait_for_idle(rdev)) { - printk(KERN_WARNING "Failed to wait MC idle while " - "programming pipes. Bad things might happen.\n"); + if (rs400_mc_wait_for_idle(rdev)) { + printk(KERN_WARNING "rs400: Failed to wait MC idle while " + "programming pipes. Bad things might happen. %08x\n", RREG32(0x150)); } } @@ -370,8 +386,8 @@ void rs400_mc_program(struct radeon_device *rdev) r100_mc_stop(rdev, &save); /* Wait for mc idle */ - if (r300_mc_wait_for_idle(rdev)) - dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); + if (rs400_mc_wait_for_idle(rdev)) + dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n"); WREG32(R_000148_MC_FB_LOCATION, S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); @@ -448,7 +464,6 @@ int rs400_suspend(struct radeon_device *rdev) void rs400_fini(struct radeon_device *rdev) { - rs400_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -527,7 +542,6 @@ int rs400_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); - rs400_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index d525575..c381856 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -610,7 +610,6 @@ int rs600_suspend(struct radeon_device *rdev) void rs600_fini(struct radeon_device *rdev) { - rs600_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -689,7 +688,6 @@ int rs600_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); - rs600_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index cd31da9..06e2771 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -676,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev) void rs690_fini(struct radeon_device *rdev) { - rs690_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -756,7 +755,6 @@ int rs690_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); - rs690_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 6275671..0e1e6b8 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -537,7 +537,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev) void rv515_fini(struct radeon_device *rdev) { - rv515_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -615,13 +614,12 @@ int rv515_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); - rv515_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); + radeon_irq_kms_fini(rdev); rv370_pcie_gart_fini(rdev); radeon_agp_fini(rdev); - radeon_irq_kms_fini(rdev); rdev->accel_working = false; } return 0; diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index afd9e82..5943d56 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -887,6 +887,12 @@ static int rv770_startup(struct radeon_device *rdev) return r; } rv770_gpu_init(rdev); + r = r600_blit_init(rdev); + if (r) { + r600_blit_fini(rdev); + rdev->asic->copy = NULL; + dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); + } /* pin copy shader into vram */ if (rdev->r600_blit.shader_obj) { r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); @@ -1055,19 +1061,15 @@ int rv770_init(struct radeon_device *rdev) r = r600_pcie_gart_init(rdev); if (r) return r; - r = r600_blit_init(rdev); - if (r) { - r600_blit_fini(rdev); - rdev->asic->copy = NULL; - dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); - } rdev->accel_working = true; r = rv770_startup(rdev); if (r) { - rv770_suspend(rdev); + dev_err(rdev->dev, "disabling GPU acceleration\n"); + r600_cp_fini(rdev); r600_wb_fini(rdev); - radeon_ring_fini(rdev); + r600_irq_fini(rdev); + radeon_irq_kms_fini(rdev); rv770_pcie_gart_fini(rdev); rdev->accel_working = false; } @@ -1089,13 +1091,11 @@ int rv770_init(struct radeon_device *rdev) void rv770_fini(struct radeon_device *rdev) { - rv770_suspend(rdev); - r600_blit_fini(rdev); + r600_cp_fini(rdev); + r600_wb_fini(rdev); r600_irq_fini(rdev); radeon_irq_kms_fini(rdev); - radeon_ring_fini(rdev); - r600_wb_fini(rdev); rv770_pcie_gart_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 135be96..356dc93 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -39,10 +39,10 @@ #include "ttm/ttm_execbuf_util.h" #include "ttm/ttm_module.h" -#define VMWGFX_DRIVER_DATE "20090724" -#define VMWGFX_DRIVER_MAJOR 0 -#define VMWGFX_DRIVER_MINOR 1 -#define VMWGFX_DRIVER_PATCHLEVEL 2 +#define VMWGFX_DRIVER_DATE "20100209" +#define VMWGFX_DRIVER_MAJOR 1 +#define VMWGFX_DRIVER_MINOR 0 +#define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) #define VMWGFX_MAX_RELOCATIONS 2048 @@ -113,6 +113,7 @@ struct vmw_fifo_state { unsigned long static_buffer_size; bool using_bounce_buffer; uint32_t capabilities; + struct mutex fifo_mutex; struct rw_semaphore rwsem; }; @@ -213,7 +214,7 @@ struct vmw_private { * Fencing and IRQs. */ - uint32_t fence_seq; + atomic_t fence_seq; wait_queue_head_t fence_queue; wait_queue_head_t fifo_queue; atomic_t fence_queue_waiters; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 4157547..39d43a0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c @@ -74,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) fifo->reserved_size = 0; fifo->using_bounce_buffer = false; + mutex_init(&fifo->fifo_mutex); init_rwsem(&fifo->rwsem); /* @@ -117,7 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) (unsigned int) min, (unsigned int) fifo->capabilities); - dev_priv->fence_seq = dev_priv->last_read_sequence; + atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); return vmw_fifo_send_fence(dev_priv, &dummy); @@ -283,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; int ret; - down_write(&fifo_state->rwsem); + mutex_lock(&fifo_state->fifo_mutex); max = ioread32(fifo_mem + SVGA_FIFO_MAX); min = ioread32(fifo_mem + SVGA_FIFO_MIN); next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); @@ -351,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) } out_err: fifo_state->reserved_size = 0; - up_write(&fifo_state->rwsem); + mutex_unlock(&fifo_state->fifo_mutex); return NULL; } @@ -426,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) } + down_write(&fifo_state->rwsem); if (fifo_state->using_bounce_buffer || reserveable) { next_cmd += bytes; if (next_cmd >= max) @@ -437,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) if (reserveable) iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); mb(); - vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); up_write(&fifo_state->rwsem); + vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); + mutex_unlock(&fifo_state->fifo_mutex); } int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) @@ -451,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) fm = vmw_fifo_reserve(dev_priv, bytes); if (unlikely(fm == NULL)) { - down_write(&fifo_state->rwsem); - *sequence = dev_priv->fence_seq; - up_write(&fifo_state->rwsem); + *sequence = atomic_read(&dev_priv->fence_seq); ret = -ENOMEM; (void)vmw_fallback_wait(dev_priv, false, true, *sequence, false, 3*HZ); @@ -461,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) } do { - *sequence = dev_priv->fence_seq++; + *sequence = atomic_add_return(1, &dev_priv->fence_seq); } while (*sequence == 0); if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 778851f..1c7a316 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -48,6 +48,12 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, case DRM_VMW_PARAM_FIFO_OFFSET: param->value = dev_priv->mmio_start; break; + case DRM_VMW_PARAM_HW_CAPS: + param->value = dev_priv->capabilities; + break; + case DRM_VMW_PARAM_FIFO_CAPS: + param->value = dev_priv->fifo.capabilities; + break; default: DRM_ERROR("Illegal vmwgfx get param request: %d\n", param->param); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index d40086f..4d7cb53 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c @@ -85,19 +85,12 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv, return true; /** - * Below is to signal stale fences that have wrapped. - * First, block fence submission. - */ - - down_read(&fifo_state->rwsem); - - /** * Then check if the sequence is higher than what we've actually * emitted. Then the fence is stale and signaled. */ - ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP); - up_read(&fifo_state->rwsem); + ret = ((atomic_read(&dev_priv->fence_seq) - sequence) + > VMW_FENCE_WRAP); return ret; } @@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, if (fifo_idle) down_read(&fifo_state->rwsem); - signal_seq = dev_priv->fence_seq; + signal_seq = atomic_read(&dev_priv->fence_seq); ret = 0; for (;;) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index eeba6d1..31f9afe 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -769,10 +769,10 @@ int vmw_kms_init(struct vmw_private *dev_priv) drm_mode_config_init(dev); dev->mode_config.funcs = &vmw_kms_funcs; - dev->mode_config.min_width = 640; - dev->mode_config.min_height = 480; - dev->mode_config.max_width = 2048; - dev->mode_config.max_height = 2048; + dev->mode_config.min_width = 1; + dev->mode_config.min_height = 1; + dev->mode_config.max_width = dev_priv->fb_max_width; + dev->mode_config.max_height = dev_priv->fb_max_height; ret = vmw_kms_init_legacy_display_system(dev_priv); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c7efbd47..f8fbbc6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -35,11 +35,6 @@ #define VMW_RES_SURFACE ttm_driver_type1 #define VMW_RES_STREAM ttm_driver_type2 -/* XXX: This isn't a real hardware flag, but just a hack for kernel to - * know about primary surfaces. Find a better way to accomplish this. - */ -#define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9) - struct vmw_user_context { struct ttm_base_object base; struct vmw_resource res; @@ -579,6 +574,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, srf->flags = req->flags; srf->format = req->format; + srf->scanout = req->scanout; memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); srf->num_sizes = 0; for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) @@ -604,16 +600,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, if (unlikely(ret != 0)) goto out_err1; - if (srf->flags & SVGA3D_SURFACE_HINT_SCANOUT) { - /* we should not send this flag down to hardware since - * its not a official one - */ - srf->flags &= ~SVGA3D_SURFACE_HINT_SCANOUT; - srf->scanout = true; - } else { - srf->scanout = false; - } - if (srf->scanout && srf->num_sizes == 1 && srf->sizes[0].width == 64 && diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 1ac0c93..24b56dc 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, remaining -= 7; pr_devel("client 0x%p called 'target'\n", priv); /* if target is default */ - if (!strncmp(buf, "default", 7)) + if (!strncmp(kbuf, "default", 7)) pdev = pci_dev_get(vga_default_device()); else { if (!vga_pci_str_to_vars(curr_pos, remaining, diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c index a31e77c..b8156b4 100644 --- a/drivers/hwmon/adt7462.c +++ b/drivers/hwmon/adt7462.c @@ -179,7 +179,7 @@ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END }; * * Some, but not all, of these voltages have low/high limits. */ -#define ADT7462_VOLT_COUNT 12 +#define ADT7462_VOLT_COUNT 13 #define ADT7462_VENDOR 0x41 #define ADT7462_DEVICE 0x62 diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c index cadcbd9..72ff2c4 100644 --- a/drivers/hwmon/lm78.c +++ b/drivers/hwmon/lm78.c @@ -851,17 +851,16 @@ static struct lm78_data *lm78_update_device(struct device *dev) static int __init lm78_isa_found(unsigned short address) { int val, save, found = 0; - - /* We have to request the region in two parts because some - boards declare base+4 to base+7 as a PNP device */ - if (!request_region(address, 4, "lm78")) { - pr_debug("lm78: Failed to request low part of region\n"); - return 0; - } - if (!request_region(address + 4, 4, "lm78")) { - pr_debug("lm78: Failed to request high part of region\n"); - release_region(address, 4); - return 0; + int port; + + /* Some boards declare base+0 to base+7 as a PNP device, some base+4 + * to base+7 and some base+5 to base+6. So we better request each port + * individually for the probing phase. */ + for (port = address; port < address + LM78_EXTENT; port++) { + if (!request_region(port, 1, "lm78")) { + pr_debug("lm78: Failed to request port 0x%x\n", port); + goto release; + } } #define REALLY_SLOW_IO @@ -925,8 +924,8 @@ static int __init lm78_isa_found(unsigned short address) val & 0x80 ? "LM79" : "LM78", (int)address); release: - release_region(address + 4, 4); - release_region(address, 4); + for (port--; port >= address; port--) + release_region(port, 1); return found; } diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c index 05f9225..32d4ade 100644 --- a/drivers/hwmon/w83781d.c +++ b/drivers/hwmon/w83781d.c @@ -1793,17 +1793,17 @@ static int __init w83781d_isa_found(unsigned short address) { int val, save, found = 0; - - /* We have to request the region in two parts because some - boards declare base+4 to base+7 as a PNP device */ - if (!request_region(address, 4, "w83781d")) { - pr_debug("w83781d: Failed to request low part of region\n"); - return 0; - } - if (!request_region(address + 4, 4, "w83781d")) { - pr_debug("w83781d: Failed to request high part of region\n"); - release_region(address, 4); - return 0; + int port; + + /* Some boards declare base+0 to base+7 as a PNP device, some base+4 + * to base+7 and some base+5 to base+6. So we better request each port + * individually for the probing phase. */ + for (port = address; port < address + W83781D_EXTENT; port++) { + if (!request_region(port, 1, "w83781d")) { + pr_debug("w83781d: Failed to request port 0x%x\n", + port); + goto release; + } } #define REALLY_SLOW_IO @@ -1877,8 +1877,8 @@ w83781d_isa_found(unsigned short address) val == 0x30 ? "W83782D" : "W83781D", (int)address); release: - release_region(address + 4, 4); - release_region(address, 4); + for (port--; port >= address; port--) + release_region(port, 1); return found; } diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c index b1c050f..e29b6d5 100644 --- a/drivers/i2c/busses/i2c-tiny-usb.c +++ b/drivers/i2c/busses/i2c-tiny-usb.c @@ -13,6 +13,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> +#include <linux/types.h> /* include interfaces to usb layer */ #include <linux/usb.h> @@ -31,8 +32,8 @@ #define CMD_I2C_IO_END (1<<1) /* i2c bit delay, default is 10us -> 100kHz */ -static int delay = 10; -module_param(delay, int, 0); +static unsigned short delay = 10; +module_param(delay, ushort, 0); MODULE_PARM_DESC(delay, "bit delay in microseconds, " "e.g. 10 for 100kHz (default is 100kHz)"); @@ -109,7 +110,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) static u32 usb_func(struct i2c_adapter *adapter) { - u32 func; + __le32 func; /* get functionality from adapter */ if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) != @@ -118,7 +119,7 @@ static u32 usb_func(struct i2c_adapter *adapter) return 0; } - return func; + return le32_to_cpu(func); } /* This is the actual algorithm we define */ @@ -216,8 +217,7 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface, "i2c-tiny-usb at bus %03d device %03d", dev->usb_dev->bus->busnum, dev->usb_dev->devnum); - if (usb_write(&dev->adapter, CMD_SET_DELAY, - cpu_to_le16(delay), 0, NULL, 0) != 0) { + if (usb_write(&dev->adapter, CMD_SET_DELAY, delay, 0, NULL, 0) != 0) { dev_err(&dev->adapter.dev, "failure setting delay to %dus\n", delay); retval = -EIO; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index cc9b594..875e34e 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2115,9 +2115,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) if (ret) goto err1; - if (cma_loopback_addr(addr)) { - ret = cma_bind_loopback(id_priv); - } else if (!cma_zero_addr(addr)) { + if (!cma_any_addr(addr)) { ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); if (ret) goto err1; diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 9774bdf..d8c0c8d 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -1141,7 +1141,14 @@ static void psmouse_cleanup(struct serio *serio) psmouse_deactivate(parent); } - psmouse_deactivate(psmouse); + psmouse_set_state(psmouse, PSMOUSE_INITIALIZING); + + /* + * Disable stream mode so cleanup routine can proceed undisturbed. + */ + if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE)) + printk(KERN_WARNING "psmouse.c: Failed to disable mouse on %s\n", + psmouse->ps2dev.serio->phys); if (psmouse->cleanup) psmouse->cleanup(psmouse); diff --git a/drivers/md/md.c b/drivers/md/md.c index dd3dfe4..a20a71e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -4075,8 +4075,10 @@ static void mddev_delayed_delete(struct work_struct *ws) { mddev_t *mddev = container_of(ws, mddev_t, del_work); - if (mddev->private == &md_redundancy_group) { + if (mddev->private) { sysfs_remove_group(&mddev->kobj, &md_redundancy_group); + if (mddev->private != (void*)1) + sysfs_remove_group(&mddev->kobj, mddev->private); if (mddev->sysfs_action) sysfs_put(mddev->sysfs_action); mddev->sysfs_action = NULL; @@ -4287,10 +4289,7 @@ static int do_md_run(mddev_t * mddev) sysfs_notify_dirent(rdev->sysfs_state); } - md_probe(mddev->unit, NULL, NULL); disk = mddev->gendisk; - if (!disk) - return -ENOMEM; spin_lock(&pers_lock); pers = find_pers(mddev->level, mddev->clevel); @@ -4530,8 +4529,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) mddev->queue->unplug_fn = NULL; mddev->queue->backing_dev_info.congested_fn = NULL; module_put(mddev->pers->owner); - if (mddev->pers->sync_request) - mddev->private = &md_redundancy_group; + if (mddev->pers->sync_request && mddev->private == NULL) + mddev->private = (void*)1; mddev->pers = NULL; /* tell userspace to handle 'inactive' */ sysfs_notify_dirent(mddev->sysfs_state); @@ -4578,9 +4577,6 @@ out: } mddev->bitmap_info.offset = 0; - /* make sure all md_delayed_delete calls have finished */ - flush_scheduled_work(); - export_array(mddev); mddev->array_sectors = 0; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e84204e..ceb24af 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5136,9 +5136,8 @@ static int stop(mddev_t *mddev) mddev->thread = NULL; mddev->queue->backing_dev_info.congested_fn = NULL; blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ - sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); free_conf(conf); - mddev->private = NULL; + mddev->private = &raid5_attrs_group; return 0; } @@ -5464,11 +5463,11 @@ static int raid5_start_reshape(mddev_t *mddev) !test_bit(Faulty, &rdev->flags)) { if (raid5_add_disk(mddev, rdev) == 0) { char nm[20]; - if (rdev->raid_disk >= conf->previous_raid_disks) + if (rdev->raid_disk >= conf->previous_raid_disks) { set_bit(In_sync, &rdev->flags); - else + added_devices++; + } else rdev->recovery_offset = 0; - added_devices++; sprintf(nm, "rd%d", rdev->raid_disk); if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm)) @@ -5480,9 +5479,12 @@ static int raid5_start_reshape(mddev_t *mddev) break; } + /* When a reshape changes the number of devices, ->degraded + * is measured against the large of the pre and post number of + * devices.*/ if (mddev->delta_disks > 0) { spin_lock_irqsave(&conf->device_lock, flags); - mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) + mddev->degraded += (conf->raid_disks - conf->previous_raid_disks) - added_devices; spin_unlock_irqrestore(&conf->device_lock, flags); } diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c index becbaad..5ed7526 100644 --- a/drivers/media/common/saa7146_video.c +++ b/drivers/media/common/saa7146_video.c @@ -1333,9 +1333,9 @@ static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb) DEB_CAP(("vbuf:%p\n",vb)); - release_all_pagetables(dev, buf); - saa7146_dma_free(dev,q,buf); + + release_all_pagetables(dev, buf); } static struct videobuf_queue_ops video_qops = { diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c index c37790a..9ddc579 100644 --- a/drivers/media/dvb/dvb-core/dmxdev.c +++ b/drivers/media/dvb/dvb-core/dmxdev.c @@ -761,7 +761,6 @@ static int dvb_demux_open(struct inode *inode, struct file *file) dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); dmxdevfilter->type = DMXDEV_TYPE_NONE; dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); - INIT_LIST_HEAD(&dmxdevfilter->feed.ts); init_timer(&dmxdevfilter->timer); dvbdev->users++; @@ -887,6 +886,7 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev, dmxdevfilter->type = DMXDEV_TYPE_PES; memcpy(&dmxdevfilter->params, params, sizeof(struct dmx_pes_filter_params)); + INIT_LIST_HEAD(&dmxdevfilter->feed.ts); dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c index b78cfb7..67f189b 100644 --- a/drivers/media/dvb/dvb-core/dvb_demux.c +++ b/drivers/media/dvb/dvb-core/dvb_demux.c @@ -426,16 +426,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) }; }; - if (dvb_demux_tscheck) { - if (!demux->cnt_storage) - demux->cnt_storage = vmalloc(MAX_PID + 1); - - if (!demux->cnt_storage) { - printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n"); - dvb_demux_tscheck = 0; - goto no_dvb_demux_tscheck; - } - + if (demux->cnt_storage) { /* check pkt counter */ if (pid < MAX_PID) { if (buf[1] & 0x80) @@ -454,7 +445,6 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) }; /* end check */ }; -no_dvb_demux_tscheck: list_for_each_entry(feed, &demux->feed_list, list_head) { if ((feed->pid != pid) && (feed->pid != 0x2000)) @@ -1246,6 +1236,7 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux) dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed)); if (!dvbdemux->feed) { vfree(dvbdemux->filter); + dvbdemux->filter = NULL; return -ENOMEM; } for (i = 0; i < dvbdemux->filternum; i++) { @@ -1257,6 +1248,13 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux) dvbdemux->feed[i].index = i; } + if (dvb_demux_tscheck) { + dvbdemux->cnt_storage = vmalloc(MAX_PID + 1); + + if (!dvbdemux->cnt_storage) + printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n"); + } + INIT_LIST_HEAD(&dvbdemux->frontend_list); for (i = 0; i < DMX_TS_PES_OTHER; i++) { diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c index 7dfecfc..ee5bff0 100644 --- a/drivers/media/video/saa7134/saa7134-empress.c +++ b/drivers/media/video/saa7134/saa7134-empress.c @@ -93,9 +93,9 @@ static int ts_open(struct file *file) dprintk("open dev=%s\n", video_device_node_name(vdev)); err = -EBUSY; if (!mutex_trylock(&dev->empress_tsq.vb_lock)) - goto done; + return err; if (atomic_read(&dev->empress_users)) - goto done_up; + goto done; /* Unmute audio */ saa_writeb(SAA7134_AUDIO_MUTE_CTRL, @@ -105,10 +105,8 @@ static int ts_open(struct file *file) file->private_data = dev; err = 0; -done_up: - mutex_unlock(&dev->empress_tsq.vb_lock); done: - unlock_kernel(); + mutex_unlock(&dev->empress_tsq.vb_lock); return err; } diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 5775275..81279b3 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -1796,7 +1796,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: " "Command not in the active list! (sc=%p)\n", ioc->name, SCpnt)); - retval = 0; + retval = SUCCESS; goto out; } diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index b9f1e84..e7f8027 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -74,6 +74,9 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test, } mrq->cmd->arg = dev_addr; + if (!mmc_card_blockaddr(test->card)) + mrq->cmd->arg <<= 9; + mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; if (blocks == 1) @@ -190,7 +193,7 @@ static int __mmc_test_prepare(struct mmc_test_card *test, int write) } for (i = 0;i < BUFFER_SIZE / 512;i++) { - ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); + ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); if (ret) return ret; } @@ -219,7 +222,7 @@ static int mmc_test_cleanup(struct mmc_test_card *test) memset(test->buffer, 0, 512); for (i = 0;i < BUFFER_SIZE / 512;i++) { - ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); + ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); if (ret) return ret; } @@ -426,7 +429,7 @@ static int mmc_test_transfer(struct mmc_test_card *test, for (i = 0;i < sectors;i++) { ret = mmc_test_buffer_transfer(test, test->buffer + i * 512, - dev_addr + i * 512, 512, 0); + dev_addr + i, 512, 0); if (ret) return ret; } diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c index 62d9c9c..1dd4403 100644 --- a/drivers/net/ax88796.c +++ b/drivers/net/ax88796.c @@ -921,7 +921,7 @@ static int ax_probe(struct platform_device *pdev) size = (res->end - res->start) + 1; ax->mem2 = request_mem_region(res->start, size, pdev->name); - if (ax->mem == NULL) { + if (ax->mem2 == NULL) { dev_err(&pdev->dev, "cannot reserve registers\n"); ret = -ENXIO; goto exit_mem1; diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index bdbd147..318a018 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -2079,6 +2079,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, struct sge_fl *fl, int len, int complete) { struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; + struct port_info *pi = netdev_priv(qs->netdev); struct sk_buff *skb = NULL; struct cpl_rx_pkt *cpl; struct skb_frag_struct *rx_frag; @@ -2116,11 +2117,18 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, if (!nr_frags) { offset = 2 + sizeof(struct cpl_rx_pkt); - qs->lro_va = sd->pg_chunk.va + 2; - } - len -= offset; + cpl = qs->lro_va = sd->pg_chunk.va + 2; - prefetch(qs->lro_va); + if ((pi->rx_offload & T3_RX_CSUM) && + cpl->csum_valid && cpl->csum == htons(0xffff)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; + } else + skb->ip_summed = CHECKSUM_NONE; + } else + cpl = qs->lro_va; + + len -= offset; rx_frag += nr_frags; rx_frag->page = sd->pg_chunk.page; @@ -2136,12 +2144,8 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, return; skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); - skb->ip_summed = CHECKSUM_UNNECESSARY; - cpl = qs->lro_va; if (unlikely(cpl->vlan_valid)) { - struct net_device *dev = qs->netdev; - struct port_info *pi = netdev_priv(dev); struct vlan_group *grp = pi->vlan_grp; if (likely(grp != NULL)) { diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 997124d..c881347 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -421,6 +421,8 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; if (tx_queue > IGB_N0_QUEUE) msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; + if (!adapter->msix_entries && msix_vector == 0) + msixbm |= E1000_EIMS_OTHER; array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); q_vector->eims_value = msixbm; break; @@ -877,7 +879,6 @@ static int igb_request_irq(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; - struct e1000_hw *hw = &adapter->hw; int err = 0; if (adapter->msix_entries) { @@ -909,20 +910,7 @@ static int igb_request_irq(struct igb_adapter *adapter) igb_setup_all_tx_resources(adapter); igb_setup_all_rx_resources(adapter); } else { - switch (hw->mac.type) { - case e1000_82575: - wr32(E1000_MSIXBM(0), - (E1000_EICR_RX_QUEUE0 | - E1000_EICR_TX_QUEUE0 | - E1000_EIMS_OTHER)); - break; - case e1000_82580: - case e1000_82576: - wr32(E1000_IVAR0, E1000_IVAR_VALID); - break; - default: - break; - } + igb_assign_vector(adapter->q_vector[0], 0); } if (adapter->flags & IGB_FLAG_HAS_MSI) { @@ -1140,6 +1128,8 @@ int igb_up(struct igb_adapter *adapter) } if (adapter->msix_entries) igb_configure_msix(adapter); + else + igb_assign_vector(adapter->q_vector[0], 0); /* Clear any pending interrupts. */ rd32(E1000_ICR); diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index b5f64ad..7b7c848 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -5179,7 +5179,7 @@ dma_error: ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); } - return count; + return 0; } static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, @@ -5329,8 +5329,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) struct ixgbe_adapter *adapter = netdev_priv(dev); int txq = smp_processor_id(); - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + while (unlikely(txq >= dev->real_num_tx_queues)) + txq -= dev->real_num_tx_queues; return txq; + } #ifdef IXGBE_FCOE if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 9f9d608..24279e6 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -1941,7 +1941,7 @@ static void netxen_tx_timeout_task(struct work_struct *work) netif_wake_queue(adapter->netdev); clear_bit(__NX_RESETTING, &adapter->state); - + return; } else { clear_bit(__NX_RESETTING, &adapter->state); if (!netxen_nic_reset_context(adapter)) { @@ -2240,7 +2240,9 @@ netxen_detach_work(struct work_struct *work) netxen_nic_down(adapter, netdev); + rtnl_lock(); netxen_nic_detach(adapter); + rtnl_unlock(); status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index d760650..67249c3 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -1025,11 +1025,8 @@ static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr, static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot) { struct sky2_tx_le *le = sky2->tx_le + *slot; - struct tx_ring_info *re = sky2->tx_ring + *slot; *slot = RING_NEXT(*slot, sky2->tx_ring_size); - re->flags = 0; - re->skb = NULL; le->ctrl = 0; return le; } @@ -1622,8 +1619,7 @@ static unsigned tx_le_req(const struct sk_buff *skb) return count; } -static void sky2_tx_unmap(struct pci_dev *pdev, - const struct tx_ring_info *re) +static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re) { if (re->flags & TX_MAP_SINGLE) pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr), @@ -1633,6 +1629,7 @@ static void sky2_tx_unmap(struct pci_dev *pdev, pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr), pci_unmap_len(re, maplen), PCI_DMA_TODEVICE); + re->flags = 0; } /* @@ -1839,6 +1836,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; + re->skb = NULL; dev_kfree_skb_any(skb); sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index c746943..d58b940 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -338,6 +338,23 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); +/* + * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS + * ver. 1.33 20070103) don't set the correct ISA PCI region header info. + * BAR0 should be 8 bytes; instead, it may be set to something like 8k + * (which conflicts w/ BAR1's memory range). + */ +static void __devinit quirk_cs5536_vsa(struct pci_dev *dev) +{ + if (pci_resource_len(dev, 0) != 8) { + struct resource *res = &dev->resource[0]; + res->end = res->start + 8 - 1; + dev_info(&dev->dev, "CS5536 ISA bridge bug detected " + "(incorrect header); workaround applied.\n"); + } +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); + static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, unsigned size, int nr, const char *name) { diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c index fa39e75..6ea3cb5 100644 --- a/drivers/power/wm97xx_battery.c +++ b/drivers/power/wm97xx_battery.c @@ -175,8 +175,14 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev) dev_err(&dev->dev, "Do not pass platform_data through " "wm97xx_bat_set_pdata!\n"); return -EINVAL; - } else - pdata = wmdata->batt_pdata; + } + + if (!wmdata) { + dev_err(&dev->dev, "No platform data supplied\n"); + return -EINVAL; + } + + pdata = wmdata->batt_pdata; if (dev->id != -1) return -EINVAL; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 686ef27..b60a4c9 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -661,7 +661,7 @@ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state) static void print_constraints(struct regulator_dev *rdev) { struct regulation_constraints *constraints = rdev->constraints; - char buf[80]; + char buf[80] = ""; int count = 0; int ret; diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c index 76d08c2..4f33a0f 100644 --- a/drivers/regulator/lp3971.c +++ b/drivers/regulator/lp3971.c @@ -183,7 +183,7 @@ static int lp3971_ldo_set_voltage(struct regulator_dev *dev, if (vol_map[val] >= min_vol) break; - if (vol_map[val] > max_vol) + if (val > LDO_VOL_MAX_IDX || vol_map[val] > max_vol) return -EINVAL; return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo), @@ -272,7 +272,7 @@ static int lp3971_dcdc_set_voltage(struct regulator_dev *dev, if (vol_map[val] >= min_vol) break; - if (vol_map[val] > max_vol) + if (val > BUCK_TARGET_VOL_MAX_IDX || vol_map[val] > max_vol) return -EINVAL; ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck), diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 999fe80..62b654a 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -531,7 +531,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q) qdio_siga_sync_q(q); get_buf_state(q, q->first_to_check, &state, 0); - if (state == SLSB_P_INPUT_PRIMED) + if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR) /* more work coming */ return 0; @@ -960,6 +960,8 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, qdio_handle_activate_check(cdev, intparm, cstat, dstat); break; + case QDIO_IRQ_STATE_STOPPED: + break; default: WARN_ON(1); } diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 0f7b493..271399f 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -671,12 +671,11 @@ static void zfcp_fc_ct_els_job_handler(void *data) { struct fc_bsg_job *job = data; struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data; - int status = zfcp_ct_els->status; - int reply_status; + struct fc_bsg_reply *jr = job->reply; - reply_status = status ? FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK; - job->reply->reply_data.ctels_reply.status = reply_status; - job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; + jr->reply_payload_rcv_len = job->reply_payload.payload_len; + jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; + jr->result = zfcp_ct_els->status ? -EIO : 0; job->job_done(job); } diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index f61fb8d..8bc6f53 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -453,6 +453,5 @@ extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); -extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *); #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index ffd0efd..6fc63b9 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1917,6 +1917,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) struct rsp_que *rsp; struct device_reg_24xx __iomem *reg; struct scsi_qla_host *vha; + unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -1927,15 +1928,15 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) ha = rsp->hw; reg = &ha->iobase->isp24; - spin_lock_irq(&ha->hardware_lock); + spin_lock_irqsave(&ha->hardware_lock, flags); - vha = qla25xx_get_host(rsp); + vha = pci_get_drvdata(ha->pdev); qla24xx_process_response_queue(vha, rsp); if (!ha->flags.disable_msix_handshake) { WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD_RELAXED(®->hccr); } - spin_unlock_irq(&ha->hardware_lock); + spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } @@ -1946,6 +1947,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_24xx __iomem *reg; + unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -1958,10 +1960,10 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) /* Clear the interrupt, if enabled, for this response queue */ if (rsp->options & ~BIT_6) { reg = &ha->iobase->isp24; - spin_lock_irq(&ha->hardware_lock); + spin_lock_irqsave(&ha->hardware_lock, flags); WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD_RELAXED(®->hccr); - spin_unlock_irq(&ha->hardware_lock); + spin_unlock_irqrestore(&ha->hardware_lock, flags); } queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); @@ -1979,6 +1981,7 @@ qla24xx_msix_default(int irq, void *dev_id) uint32_t stat; uint32_t hccr; uint16_t mb[4]; + unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -1990,7 +1993,7 @@ qla24xx_msix_default(int irq, void *dev_id) reg = &ha->iobase->isp24; status = 0; - spin_lock_irq(&ha->hardware_lock); + spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); do { stat = RD_REG_DWORD(®->host_status); @@ -2039,7 +2042,7 @@ qla24xx_msix_default(int irq, void *dev_id) } WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); } while (0); - spin_unlock_irq(&ha->hardware_lock); + spin_unlock_irqrestore(&ha->hardware_lock, flags); if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && (status & MBX_INTERRUPT) && ha->flags.mbox_int) { @@ -2277,30 +2280,3 @@ int qla25xx_request_irq(struct rsp_que *rsp) msix->rsp = rsp; return ret; } - -struct scsi_qla_host * -qla25xx_get_host(struct rsp_que *rsp) -{ - srb_t *sp; - struct qla_hw_data *ha = rsp->hw; - struct scsi_qla_host *vha = NULL; - struct sts_entry_24xx *pkt; - struct req_que *req; - uint16_t que; - uint32_t handle; - - pkt = (struct sts_entry_24xx *) rsp->ring_ptr; - que = MSW(pkt->handle); - handle = (uint32_t) LSW(pkt->handle); - req = ha->req_q_map[que]; - if (handle < MAX_OUTSTANDING_COMMANDS) { - sp = req->outstanding_cmds[handle]; - if (sp) - return sp->fcport->vha; - else - goto base_que; - } -base_que: - vha = pci_get_drvdata(ha->pdev); - return vha; -} diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index b901aa2..ff17dee 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -636,13 +636,15 @@ failed: static void qla_do_work(struct work_struct *work) { + unsigned long flags; struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); struct scsi_qla_host *vha; + struct qla_hw_data *ha = rsp->hw; - spin_lock_irq(&rsp->hw->hardware_lock); - vha = qla25xx_get_host(rsp); + spin_lock_irqsave(&rsp->hw->hardware_lock, flags); + vha = pci_get_drvdata(ha->pdev); qla24xx_process_response_queue(vha, rsp); - spin_unlock_irq(&rsp->hw->hardware_lock); + spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags); } /* create response queue */ diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index 0ceec12..bee558ae 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -35,7 +35,9 @@ #include <linux/usb.h> #include <linux/platform_device.h> #include <linux/io.h> +#include <linux/mm.h> #include <linux/irq.h> +#include <asm/cacheflush.h> #include "../core/hcd.h" #include "r8a66597.h" @@ -820,6 +822,26 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb, enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb); } +static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb, + int status) +__releases(r8a66597->lock) +__acquires(r8a66597->lock) +{ + if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) { + void *ptr; + + for (ptr = urb->transfer_buffer; + ptr < urb->transfer_buffer + urb->transfer_buffer_length; + ptr += PAGE_SIZE) + flush_dcache_page(virt_to_page(ptr)); + } + + usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); + spin_unlock(&r8a66597->lock); + usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status); + spin_lock(&r8a66597->lock); +} + /* this function must be called with interrupt disabled */ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) { @@ -838,15 +860,9 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) list_del(&td->queue); kfree(td); - if (urb) { - usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), - urb); + if (urb) + r8a66597_urb_done(r8a66597, urb, -ENODEV); - spin_unlock(&r8a66597->lock); - usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, - -ENODEV); - spin_lock(&r8a66597->lock); - } break; } } @@ -1006,6 +1022,8 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port, /* this function must be called with interrupt disabled */ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, u16 syssts) +__releases(r8a66597->lock) +__acquires(r8a66597->lock) { if (syssts == SE0) { r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); @@ -1023,7 +1041,9 @@ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597)); } + spin_unlock(&r8a66597->lock); usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597)); + spin_lock(&r8a66597->lock); } /* this function must be called with interrupt disabled */ @@ -1283,10 +1303,7 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock) if (usb_pipeisoc(urb->pipe)) urb->start_frame = r8a66597_get_frame(hcd); - usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); - spin_unlock(&r8a66597->lock); - usb_hcd_giveback_urb(hcd, urb, status); - spin_lock(&r8a66597->lock); + r8a66597_urb_done(r8a66597, urb, status); } if (restart) { diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index cf62b05..7d6c213 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -84,7 +84,7 @@ static const match_table_t tokens = { static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) { - char *options; + char *options, *tmp_options; substring_t args[MAX_OPT_ARGS]; char *p; int option = 0; @@ -102,9 +102,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) if (!opts) return 0; - options = kstrdup(opts, GFP_KERNEL); - if (!options) + tmp_options = kstrdup(opts, GFP_KERNEL); + if (!tmp_options) { + ret = -ENOMEM; goto fail_option_alloc; + } + options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; @@ -159,8 +162,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) break; case Opt_cache: s = match_strdup(&args[0]); - if (!s) - goto fail_option_alloc; + if (!s) { + ret = -ENOMEM; + P9_DPRINTK(P9_DEBUG_ERROR, + "problem allocating copy of cache arg\n"); + goto free_and_return; + } if (strcmp(s, "loose") == 0) v9ses->cache = CACHE_LOOSE; @@ -173,8 +180,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) case Opt_access: s = match_strdup(&args[0]); - if (!s) - goto fail_option_alloc; + if (!s) { + ret = -ENOMEM; + P9_DPRINTK(P9_DEBUG_ERROR, + "problem allocating copy of access arg\n"); + goto free_and_return; + } v9ses->flags &= ~V9FS_ACCESS_MASK; if (strcmp(s, "user") == 0) @@ -194,13 +205,11 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) continue; } } - kfree(options); - return ret; +free_and_return: + kfree(tmp_options); fail_option_alloc: - P9_DPRINTK(P9_DEBUG_ERROR, - "failed to allocate copy of option argument\n"); - return -ENOMEM; + return ret; } /** diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h index 3a7560e..ed83583 100644 --- a/fs/9p/v9fs_vfs.h +++ b/fs/9p/v9fs_vfs.h @@ -60,3 +60,4 @@ void v9fs_dentry_release(struct dentry *); int v9fs_uflags2omode(int uflags, int extended); ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64); +void v9fs_blank_wstat(struct p9_wstat *wstat); diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 3902bf4..74a0461 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -257,6 +257,23 @@ v9fs_file_write(struct file *filp, const char __user * data, return total; } +static int v9fs_file_fsync(struct file *filp, struct dentry *dentry, + int datasync) +{ + struct p9_fid *fid; + struct p9_wstat wstat; + int retval; + + P9_DPRINTK(P9_DEBUG_VFS, "filp %p dentry %p datasync %x\n", filp, + dentry, datasync); + + fid = filp->private_data; + v9fs_blank_wstat(&wstat); + + retval = p9_client_wstat(fid, &wstat); + return retval; +} + static const struct file_operations v9fs_cached_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, @@ -266,6 +283,7 @@ static const struct file_operations v9fs_cached_file_operations = { .release = v9fs_dir_release, .lock = v9fs_file_lock, .mmap = generic_file_readonly_mmap, + .fsync = v9fs_file_fsync, }; const struct file_operations v9fs_file_operations = { @@ -276,4 +294,5 @@ const struct file_operations v9fs_file_operations = { .release = v9fs_dir_release, .lock = v9fs_file_lock, .mmap = generic_file_readonly_mmap, + .fsync = v9fs_file_fsync, }; diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 9d03d1e..a407fa3 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -176,7 +176,7 @@ int v9fs_uflags2omode(int uflags, int extended) * */ -static void +void v9fs_blank_wstat(struct p9_wstat *wstat) { wstat->type = ~0; diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index 33baf27..34ddda8 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -873,6 +873,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent) brelse(bh); unacquire_priv_sbp: + kfree(befs_sb->mount_opts.iocharset); kfree(sb->s_fs_info); unacquire_none: diff --git a/fs/block_dev.c b/fs/block_dev.c index 73d6a73..d11d028 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -246,7 +246,8 @@ struct super_block *freeze_bdev(struct block_device *bdev) if (!sb) goto out; if (sb->s_flags & MS_RDONLY) { - deactivate_locked_super(sb); + sb->s_frozen = SB_FREEZE_TRANS; + up_write(&sb->s_umount); mutex_unlock(&bdev->bd_fsfreeze_mutex); return sb; } @@ -307,7 +308,7 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb) BUG_ON(sb->s_bdev != bdev); down_write(&sb->s_umount); if (sb->s_flags & MS_RDONLY) - goto out_deactivate; + goto out_unfrozen; if (sb->s_op->unfreeze_fs) { error = sb->s_op->unfreeze_fs(sb); @@ -321,11 +322,11 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb) } } +out_unfrozen: sb->s_frozen = SB_UNFROZEN; smp_wmb(); wake_up(&sb->s_wait_unfrozen); -out_deactivate: if (sb) deactivate_locked_super(sb); out_unlock: diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 87b2554..2b59201 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1982,7 +1982,12 @@ struct btrfs_root *open_ctree(struct super_block *sb, if (!(sb->s_flags & MS_RDONLY)) { ret = btrfs_recover_relocation(tree_root); - BUG_ON(ret); + if (ret < 0) { + printk(KERN_WARNING + "btrfs: failed to recover relocation\n"); + err = -EINVAL; + goto fail_trans_kthread; + } } location.objectid = BTRFS_FS_TREE_OBJECTID; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 432a2da..559f724 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5402,10 +5402,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans, int ret; while (level >= 0) { - if (path->slots[level] >= - btrfs_header_nritems(path->nodes[level])) - break; - ret = walk_down_proc(trans, root, path, wc, lookup_info); if (ret > 0) break; @@ -5413,6 +5409,10 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans, if (level == 0) break; + if (path->slots[level] >= + btrfs_header_nritems(path->nodes[level])) + break; + ret = do_walk_down(trans, root, path, wc, &lookup_info); if (ret > 0) { path->slots[level]++; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 96577e8..b177ed3 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3165,10 +3165,9 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, spin_unlock(&tree->buffer_lock); goto free_eb; } - spin_unlock(&tree->buffer_lock); - /* add one reference for the tree */ atomic_inc(&eb->refs); + spin_unlock(&tree->buffer_lock); return eb; free_eb: diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index c020335..9d08096 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1133,7 +1133,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) } mutex_lock(&dentry->d_inode->i_mutex); out: - return ret > 0 ? EIO : ret; + return ret > 0 ? -EIO : ret; } static const struct vm_operations_struct btrfs_file_vm_ops = { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8cd1099..4deb280 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1681,24 +1681,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, * before we start the transaction. It limits the amount of btree * reads required while inside the transaction. */ -static noinline void reada_csum(struct btrfs_root *root, - struct btrfs_path *path, - struct btrfs_ordered_extent *ordered_extent) -{ - struct btrfs_ordered_sum *sum; - u64 bytenr; - - sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum, - list); - bytenr = sum->sums[0].bytenr; - - /* - * we don't care about the results, the point of this search is - * just to get the btree leaves into ram - */ - btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0); -} - /* as ordered data IO finishes, this gets called so we can finish * an ordered extent if the range of bytes in the file it covers are * fully written. @@ -1709,7 +1691,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct btrfs_trans_handle *trans; struct btrfs_ordered_extent *ordered_extent = NULL; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; - struct btrfs_path *path; int compressed = 0; int ret; @@ -1717,32 +1698,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) if (!ret) return 0; - /* - * before we join the transaction, try to do some of our IO. - * This will limit the amount of IO that we have to do with - * the transaction running. We're unlikely to need to do any - * IO if the file extents are new, the disk_i_size checks - * covers the most common case. - */ - if (start < BTRFS_I(inode)->disk_i_size) { - path = btrfs_alloc_path(); - if (path) { - ret = btrfs_lookup_file_extent(NULL, root, path, - inode->i_ino, - start, 0); - ordered_extent = btrfs_lookup_ordered_extent(inode, - start); - if (!list_empty(&ordered_extent->list)) { - btrfs_release_path(root, path); - reada_csum(root, path, ordered_extent); - } - btrfs_free_path(path); - } - } - - if (!ordered_extent) - ordered_extent = btrfs_lookup_ordered_extent(inode, start); + ordered_extent = btrfs_lookup_ordered_extent(inode, start); BUG_ON(!ordered_extent); + if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { BUG_ON(!list_empty(&ordered_extent->list)); ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); @@ -5841,7 +5799,9 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end, inode->i_ctime = CURRENT_TIME; BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; if (!(mode & FALLOC_FL_KEEP_SIZE) && - cur_offset > inode->i_size) { + (actual_len > inode->i_size) && + (cur_offset > inode->i_size)) { + if (cur_offset > actual_len) i_size = actual_len; else diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index ed3e4a2..ab7ab53 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3764,7 +3764,8 @@ out: BTRFS_DATA_RELOC_TREE_OBJECTID); if (IS_ERR(fs_root)) err = PTR_ERR(fs_root); - btrfs_orphan_cleanup(fs_root); + else + btrfs_orphan_cleanup(fs_root); } return err; } diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index 7b2600b..49503d2 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES @@ -1,3 +1,7 @@ +Version 1.62 +------------ +Add sockopt=TCP_NODELAY mount option. + Version 1.61 ------------ Fix append problem to Samba servers (files opened with O_APPEND could diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index ac2b24c..78c1b86 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -113,5 +113,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* EXPERIMENTAL */ -#define CIFS_VERSION "1.61" +#define CIFS_VERSION "1.62" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 4b35f7e..ed751bb 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -149,6 +149,7 @@ struct TCP_Server_Info { bool svlocal:1; /* local server or remote */ bool noblocksnd; /* use blocking sendmsg */ bool noautotune; /* do not autotune send buf sizes */ + bool tcp_nodelay; atomic_t inFlight; /* number of requests on the wire to server */ #ifdef CONFIG_CIFS_STATS2 atomic_t inSend; /* requests trying to send */ diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 3bbcaa7..2e9e09c 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -98,7 +98,7 @@ struct smb_vol { bool nostrictsync:1; /* do not force expensive SMBflush on every sync */ unsigned int rsize; unsigned int wsize; - unsigned int sockopt; + bool sockopt_tcp_nodelay:1; unsigned short int port; char *prepath; }; @@ -1142,9 +1142,11 @@ cifs_parse_mount_options(char *options, const char *devname, simple_strtoul(value, &value, 0); } } else if (strnicmp(data, "sockopt", 5) == 0) { - if (value && *value) { - vol->sockopt = - simple_strtoul(value, &value, 0); + if (!value || !*value) { + cERROR(1, ("no socket option specified")); + continue; + } else if (strnicmp(value, "TCP_NODELAY", 11) == 0) { + vol->sockopt_tcp_nodelay = 1; } } else if (strnicmp(data, "netbiosname", 4) == 0) { if (!value || !*value || (*value == ' ')) { @@ -1514,6 +1516,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) tcp_ses->noblocksnd = volume_info->noblocksnd; tcp_ses->noautotune = volume_info->noautotune; + tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay; atomic_set(&tcp_ses->inFlight, 0); init_waitqueue_head(&tcp_ses->response_q); init_waitqueue_head(&tcp_ses->request_q); @@ -1764,6 +1767,7 @@ static int ipv4_connect(struct TCP_Server_Info *server) { int rc = 0; + int val; bool connected = false; __be16 orig_port = 0; struct socket *socket = server->ssocket; @@ -1845,6 +1849,14 @@ ipv4_connect(struct TCP_Server_Info *server) socket->sk->sk_rcvbuf = 140 * 1024; } + if (server->tcp_nodelay) { + val = 1; + rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY, + (char *)&val, sizeof(val)); + if (rc) + cFYI(1, ("set TCP_NODELAY socket option error %d", rc)); + } + cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx", socket->sk->sk_sndbuf, socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo)); @@ -1916,6 +1928,7 @@ static int ipv6_connect(struct TCP_Server_Info *server) { int rc = 0; + int val; bool connected = false; __be16 orig_port = 0; struct socket *socket = server->ssocket; @@ -1987,6 +2000,15 @@ ipv6_connect(struct TCP_Server_Info *server) */ socket->sk->sk_rcvtimeo = 7 * HZ; socket->sk->sk_sndtimeo = 5 * HZ; + + if (server->tcp_nodelay) { + val = 1; + rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY, + (char *)&val, sizeof(val)); + if (rc) + cFYI(1, ("set TCP_NODELAY socket option error %d", rc)); + } + server->ssocket = socket; return rc; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index cf18ee7..e3fda97 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1762,8 +1762,18 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs) CIFS_MOUNT_MAP_SPECIAL_CHR); } - if (!rc) + if (!rc) { rc = inode_setattr(inode, attrs); + + /* force revalidate when any of these times are set since some + of the fs types (eg ext3, fat) do not have fine enough + time granularity to match protocol, and we do not have a + a way (yet) to query the server fs's time granularity (and + whether it rounds times down). + */ + if (!rc && (attrs->ia_valid & (ATTR_MTIME | ATTR_CTIME))) + cifsInode->time = 0; + } out: kfree(args); kfree(full_path); diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index f84062f..c343b14 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -77,6 +77,11 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name, cFYI(1, ("For %s", name->name)); + if (parent->d_op && parent->d_op->d_hash) + parent->d_op->d_hash(parent, name); + else + name->hash = full_name_hash(name->name, name->len); + dentry = d_lookup(parent, name); if (dentry) { /* FIXME: check for inode number changes? */ @@ -666,12 +671,11 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst, min(len, max_len), nlt, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); + pqst->len -= nls_nullsize(nlt); } else { pqst->name = filename; pqst->len = len; } - pqst->hash = full_name_hash(pqst->name, pqst->len); -/* cFYI(1, ("filldir on %s",pqst->name)); */ return rc; } diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 7085a62..aaa9c1c 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -223,9 +223,9 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, /* null user mount */ *bcc_ptr = 0; *(bcc_ptr+1) = 0; - } else { /* 300 should be long enough for any conceivable user name */ + } else { bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName, - 300, nls_cp); + MAX_USERNAME_SIZE, nls_cp); } bcc_ptr += 2 * bytes_ret; bcc_ptr += 2; /* account for null termination */ @@ -246,11 +246,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, /* copy user */ if (ses->userName == NULL) { /* BB what about null user mounts - check that we do this BB */ - } else { /* 300 should be long enough for any conceivable user name */ - strncpy(bcc_ptr, ses->userName, 300); + } else { + strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE); } - /* BB improve check for overflow */ - bcc_ptr += strnlen(ses->userName, 300); + bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE); *bcc_ptr = 0; bcc_ptr++; /* account for null termination */ diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index c5c45de..0ca9ec4 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -301,6 +301,12 @@ static int sg_ioctl_trans(unsigned int fd, unsigned int cmd, u32 data; void __user *dxferp; int err; + int interface_id; + + if (get_user(interface_id, &sgio32->interface_id)) + return -EFAULT; + if (interface_id != 'S') + return sys_ioctl(fd, cmd, (unsigned long)sgio32); if (get_user(iovec_count, &sgio32->iovec_count)) return -EFAULT; @@ -936,6 +942,7 @@ COMPATIBLE_IOCTL(TCSETSF) COMPATIBLE_IOCTL(TIOCLINUX) COMPATIBLE_IOCTL(TIOCSBRK) COMPATIBLE_IOCTL(TIOCCBRK) +COMPATIBLE_IOCTL(TIOCGSID) COMPATIBLE_IOCTL(TIOCGICOUNT) /* Little t */ COMPATIBLE_IOCTL(TIOCGETD) @@ -1038,6 +1045,8 @@ COMPATIBLE_IOCTL(FIOQSIZE) #ifdef CONFIG_BLOCK /* loop */ IGNORE_IOCTL(LOOP_CLR_FD) +/* md calls this on random blockdevs */ +IGNORE_IOCTL(RAID_VERSION) /* SG stuff */ COMPATIBLE_IOCTL(SG_SET_TIMEOUT) COMPATIBLE_IOCTL(SG_GET_TIMEOUT) @@ -571,6 +571,9 @@ int setup_arg_pages(struct linux_binprm *bprm, struct vm_area_struct *prev = NULL; unsigned long vm_flags; unsigned long stack_base; + unsigned long stack_size; + unsigned long stack_expand; + unsigned long rlim_stack; #ifdef CONFIG_STACK_GROWSUP /* Limit stack size to 1GB */ @@ -627,10 +630,24 @@ int setup_arg_pages(struct linux_binprm *bprm, goto out_unlock; } + stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE; + stack_size = vma->vm_end - vma->vm_start; + /* + * Align this down to a page boundary as expand_stack + * will align it up. + */ + rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK; + rlim_stack = min(rlim_stack, stack_size); #ifdef CONFIG_STACK_GROWSUP - stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE; + if (stack_size + stack_expand > rlim_stack) + stack_base = vma->vm_start + rlim_stack; + else + stack_base = vma->vm_end + stack_expand; #else - stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE; + if (stack_size + stack_expand > rlim_stack) + stack_base = vma->vm_end - rlim_stack; + else + stack_base = vma->vm_start - stack_expand; #endif ret = expand_stack(vma, stack_base); if (ret) @@ -199,9 +199,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg) static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, int force) { - unsigned long flags; - - write_lock_irqsave(&filp->f_owner.lock, flags); + write_lock_irq(&filp->f_owner.lock); if (force || !filp->f_owner.pid) { put_pid(filp->f_owner.pid); filp->f_owner.pid = get_pid(pid); @@ -213,7 +211,7 @@ static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, filp->f_owner.euid = cred->euid; } } - write_unlock_irqrestore(&filp->f_owner.lock, flags); + write_unlock_irq(&filp->f_owner.lock); } int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, diff --git a/fs/file_table.c b/fs/file_table.c index 69652c5..b98404b 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -253,6 +253,7 @@ void __fput(struct file *file) if (file->f_op && file->f_op->release) file->f_op->release(inode, file); security_file_free(file); + ima_file_free(file); if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) cdev_put(inode->i_cdev); fops_put(file->f_op); diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 6d47379..583e823 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -541,7 +541,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock, *ptr++ = cpu_to_be64(bn++); break; } - } while (state != ALLOC_DATA); + } while ((state != ALLOC_DATA) || !dblock); ip->i_height = height; gfs2_add_inode_blocks(&ip->i_inode, alloced); diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index f455a03..f426633 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -769,6 +769,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, if (!gl) return -ENOMEM; + atomic_inc(&sdp->sd_glock_disposal); gl->gl_flags = 0; gl->gl_name = name; atomic_set(&gl->gl_ref, 1); @@ -1538,6 +1539,9 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) up_write(&gfs2_umount_flush_sem); msleep(10); } + flush_workqueue(glock_workqueue); + wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0); + gfs2_dump_lockstate(sdp); } void gfs2_glock_finish_truncate(struct gfs2_inode *ip) diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index 13f0bd2..c0262fa 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h @@ -123,7 +123,7 @@ struct lm_lockops { int (*lm_mount) (struct gfs2_sbd *sdp, const char *fsname); void (*lm_unmount) (struct gfs2_sbd *sdp); void (*lm_withdraw) (struct gfs2_sbd *sdp); - void (*lm_put_lock) (struct kmem_cache *cachep, void *gl); + void (*lm_put_lock) (struct kmem_cache *cachep, struct gfs2_glock *gl); unsigned int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state, unsigned int flags); void (*lm_cancel) (struct gfs2_glock *gl); diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 4792200..bc0ad15 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -544,6 +544,8 @@ struct gfs2_sbd { struct gfs2_holder sd_live_gh; struct gfs2_glock *sd_rename_gl; struct gfs2_glock *sd_trans_gl; + wait_queue_head_t sd_glock_wait; + atomic_t sd_glock_disposal; /* Inode Stuff */ diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 46df988..0e5e0e7 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -21,6 +21,7 @@ static void gdlm_ast(void *arg) { struct gfs2_glock *gl = arg; unsigned ret = gl->gl_state; + struct gfs2_sbd *sdp = gl->gl_sbd; BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED); @@ -30,6 +31,8 @@ static void gdlm_ast(void *arg) switch (gl->gl_lksb.sb_status) { case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */ kmem_cache_free(gfs2_glock_cachep, gl); + if (atomic_dec_and_test(&sdp->sd_glock_disposal)) + wake_up(&sdp->sd_glock_wait); return; case -DLM_ECANCEL: /* Cancel while getting lock */ ret |= LM_OUT_CANCELED; @@ -164,14 +167,16 @@ static unsigned int gdlm_lock(struct gfs2_glock *gl, return LM_OUT_ASYNC; } -static void gdlm_put_lock(struct kmem_cache *cachep, void *ptr) +static void gdlm_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl) { - struct gfs2_glock *gl = ptr; - struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; + struct gfs2_sbd *sdp = gl->gl_sbd; + struct lm_lockstruct *ls = &sdp->sd_lockstruct; int error; if (gl->gl_lksb.sb_lkid == 0) { kmem_cache_free(cachep, gl); + if (atomic_dec_and_test(&sdp->sd_glock_disposal)) + wake_up(&sdp->sd_glock_wait); return; } diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index edfee24..a86ed63 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -82,6 +82,8 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) gfs2_tune_init(&sdp->sd_tune); + init_waitqueue_head(&sdp->sd_glock_wait); + atomic_set(&sdp->sd_glock_disposal, 0); spin_lock_init(&sdp->sd_statfs_spin); spin_lock_init(&sdp->sd_rindex_spin); @@ -723,7 +725,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) goto fail; } - error = -EINVAL; + error = -EUSERS; if (!gfs2_jindex_size(sdp)) { fs_err(sdp, "no journals!\n"); goto fail_jindex; @@ -983,9 +985,17 @@ static const match_table_t nolock_tokens = { { Opt_err, NULL }, }; +static void nolock_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl) +{ + struct gfs2_sbd *sdp = gl->gl_sbd; + kmem_cache_free(cachep, gl); + if (atomic_dec_and_test(&sdp->sd_glock_disposal)) + wake_up(&sdp->sd_glock_wait); +} + static const struct lm_lockops nolock_ops = { .lm_proto_name = "lock_nolock", - .lm_put_lock = kmem_cache_free, + .lm_put_lock = nolock_put_lock, .lm_tokens = &nolock_tokens, }; diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index c282ad4..b9dd3da 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -21,6 +21,7 @@ #include <linux/gfs2_ondisk.h> #include <linux/crc32.h> #include <linux/time.h> +#include <linux/wait.h> #include "gfs2.h" #include "incore.h" @@ -1736,8 +1736,7 @@ do_last: if (nd.root.mnt) path_put(&nd.root); if (!IS_ERR(filp)) { - error = ima_path_check(&filp->f_path, filp->f_mode & - (MAY_READ | MAY_WRITE | MAY_EXEC)); + error = ima_file_check(filp, acc_mode); if (error) { fput(filp); filp = ERR_PTR(error); @@ -1797,8 +1796,7 @@ ok: } filp = nameidata_to_filp(&nd); if (!IS_ERR(filp)) { - error = ima_path_check(&filp->f_path, filp->f_mode & - (MAY_READ | MAY_WRITE | MAY_EXEC)); + error = ima_file_check(filp, acc_mode); if (error) { fput(filp); filp = ERR_PTR(error); diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 6b89132..63f2071 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -486,6 +486,8 @@ static int nfs_release_page(struct page *page, gfp_t gfp) { dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); + if (gfp & __GFP_WAIT) + nfs_wb_page(page->mapping->host, page); /* If PagePrivate() is set, then the page is not freeable */ if (PagePrivate(page)) return 0; diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c index fa58800..237874f 100644 --- a/fs/nfs/fscache.c +++ b/fs/nfs/fscache.c @@ -354,12 +354,11 @@ void nfs_fscache_reset_inode_cookie(struct inode *inode) */ int nfs_fscache_release_page(struct page *page, gfp_t gfp) { - struct nfs_inode *nfsi = NFS_I(page->mapping->host); - struct fscache_cookie *cookie = nfsi->fscache; - - BUG_ON(!cookie); - if (PageFsCache(page)) { + struct nfs_inode *nfsi = NFS_I(page->mapping->host); + struct fscache_cookie *cookie = nfsi->fscache; + + BUG_ON(!cookie); dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n", cookie, page, nfsi); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index faa0918..f141bde 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1261,8 +1261,10 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) if (fattr->valid & NFS_ATTR_FATTR_MODE) { if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) { + umode_t newmode = inode->i_mode & S_IFMT; + newmode |= fattr->mode & S_IALLUGO; + inode->i_mode = newmode; invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; - inode->i_mode = fattr->mode; } } else if (server->caps & NFS_CAP_MODE) invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index 0adefc4..59047f8 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c @@ -120,7 +120,7 @@ static struct { { .status = MNT3ERR_INVAL, .errno = -EINVAL, }, { .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, }, { .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, }, - { .status = MNT3ERR_SERVERFAULT, .errno = -ESERVERFAULT, }, + { .status = MNT3ERR_SERVERFAULT, .errno = -EREMOTEIO, }, }; struct mountres { diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 5e078b2..7bc2da8 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -699,7 +699,7 @@ static struct { { NFSERR_BAD_COOKIE, -EBADCOOKIE }, { NFSERR_NOTSUPP, -ENOTSUPP }, { NFSERR_TOOSMALL, -ETOOSMALL }, - { NFSERR_SERVERFAULT, -ESERVERFAULT }, + { NFSERR_SERVERFAULT, -EREMOTEIO }, { NFSERR_BADTYPE, -EBADTYPE }, { NFSERR_JUKEBOX, -EJUKEBOX }, { -1, -EIO } diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 865265b..0c6fda3 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -146,6 +146,7 @@ enum { NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */ NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */ NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */ + NFS_STATE_POSIX_LOCKS, /* Posix locks are supported */ }; struct nfs4_state { @@ -277,6 +278,7 @@ extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); extern void nfs4_schedule_state_recovery(struct nfs_client *); extern void nfs4_schedule_state_manager(struct nfs_client *); extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); +extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state); extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 198d51d..375f0fa 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -249,19 +249,15 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, if (state == NULL) break; nfs4_state_mark_reclaim_nograce(clp, state); - case -NFS4ERR_STALE_CLIENTID: + goto do_state_recovery; case -NFS4ERR_STALE_STATEID: - case -NFS4ERR_EXPIRED: - nfs4_schedule_state_recovery(clp); - ret = nfs4_wait_clnt_recover(clp); - if (ret == 0) - exception->retry = 1; -#if !defined(CONFIG_NFS_V4_1) - break; -#else /* !defined(CONFIG_NFS_V4_1) */ - if (!nfs4_has_session(server->nfs_client)) + if (state == NULL) break; - /* FALLTHROUGH */ + nfs4_state_mark_reclaim_reboot(clp, state); + case -NFS4ERR_STALE_CLIENTID: + case -NFS4ERR_EXPIRED: + goto do_state_recovery; +#if defined(CONFIG_NFS_V4_1) case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: @@ -274,7 +270,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, nfs4_schedule_state_recovery(clp); exception->retry = 1; break; -#endif /* !defined(CONFIG_NFS_V4_1) */ +#endif /* defined(CONFIG_NFS_V4_1) */ case -NFS4ERR_FILE_OPEN: if (exception->timeout > HZ) { /* We have retried a decent amount, time to @@ -293,6 +289,12 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, } /* We failed to handle the error */ return nfs4_map_errors(ret); +do_state_recovery: + nfs4_schedule_state_recovery(clp); + ret = nfs4_wait_clnt_recover(clp); + if (ret == 0) + exception->retry = 1; + return ret; } @@ -1658,6 +1660,8 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in status = PTR_ERR(state); if (IS_ERR(state)) goto err_opendata_put; + if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0) + set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); nfs4_opendata_put(opendata); nfs4_put_state_owner(sp); *res = state; @@ -3422,15 +3426,14 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, if (state == NULL) break; nfs4_state_mark_reclaim_nograce(clp, state); - case -NFS4ERR_STALE_CLIENTID: + goto do_state_recovery; case -NFS4ERR_STALE_STATEID: + if (state == NULL) + break; + nfs4_state_mark_reclaim_reboot(clp, state); + case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_EXPIRED: - rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); - nfs4_schedule_state_recovery(clp); - if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) - rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); - task->tk_status = 0; - return -EAGAIN; + goto do_state_recovery; #if defined(CONFIG_NFS_V4_1) case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: @@ -3458,6 +3461,13 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, } task->tk_status = nfs4_map_errors(task->tk_status); return 0; +do_state_recovery: + rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); + nfs4_schedule_state_recovery(clp); + if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) + rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); + task->tk_status = 0; + return -EAGAIN; } static int @@ -4088,6 +4098,28 @@ static const struct rpc_call_ops nfs4_recover_lock_ops = { .rpc_release = nfs4_lock_release, }; +static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) +{ + struct nfs_client *clp = server->nfs_client; + struct nfs4_state *state = lsp->ls_state; + + switch (error) { + case -NFS4ERR_ADMIN_REVOKED: + case -NFS4ERR_BAD_STATEID: + case -NFS4ERR_EXPIRED: + if (new_lock_owner != 0 || + (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) + nfs4_state_mark_reclaim_nograce(clp, state); + lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; + break; + case -NFS4ERR_STALE_STATEID: + if (new_lock_owner != 0 || + (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) + nfs4_state_mark_reclaim_reboot(clp, state); + lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; + }; +} + static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) { struct nfs4_lockdata *data; @@ -4126,6 +4158,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f ret = nfs4_wait_for_completion_rpc_task(task); if (ret == 0) { ret = data->rpc_status; + if (ret) + nfs4_handle_setlk_error(data->server, data->lsp, + data->arg.new_lock_owner, ret); } else data->cancelled = 1; rpc_put_task(task); @@ -4181,8 +4216,11 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock { struct nfs_inode *nfsi = NFS_I(state->inode); unsigned char fl_flags = request->fl_flags; - int status; + int status = -ENOLCK; + if ((fl_flags & FL_POSIX) && + !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) + goto out; /* Is this a delegated open? */ status = nfs4_set_lock_state(state, request); if (status != 0) diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 6d263ed..c1e2733 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -901,7 +901,7 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp) nfs4_schedule_state_manager(clp); } -static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) +int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) { set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index e437fd6..5cd5184 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -4631,7 +4631,7 @@ static int decode_sequence(struct xdr_stream *xdr, * If the server returns different values for sessionID, slotID or * sequence number, the server is looney tunes. */ - status = -ESERVERFAULT; + status = -EREMOTEIO; if (memcmp(id.data, res->sr_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { @@ -5774,7 +5774,7 @@ static struct { { NFS4ERR_BAD_COOKIE, -EBADCOOKIE }, { NFS4ERR_NOTSUPP, -ENOTSUPP }, { NFS4ERR_TOOSMALL, -ETOOSMALL }, - { NFS4ERR_SERVERFAULT, -ESERVERFAULT }, + { NFS4ERR_SERVERFAULT, -EREMOTEIO }, { NFS4ERR_BADTYPE, -EBADTYPE }, { NFS4ERR_LOCKED, -EAGAIN }, { NFS4ERR_SYMLINK, -ELOOP }, @@ -5801,7 +5801,7 @@ nfs4_stat_to_errno(int stat) } if (stat <= 10000 || stat > 10100) { /* The server is looney tunes. */ - return -ESERVERFAULT; + return -EREMOTEIO; } /* If we cannot translate the error, the recovery routines should * handle it. diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index e297593..a12c45b 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -176,6 +176,12 @@ void nfs_release_request(struct nfs_page *req) kref_put(&req->wb_kref, nfs_free_request); } +static int nfs_wait_bit_uninterruptible(void *word) +{ + io_schedule(); + return 0; +} + /** * nfs_wait_on_request - Wait for a request to complete. * @req: request to wait upon. @@ -186,14 +192,9 @@ void nfs_release_request(struct nfs_page *req) int nfs_wait_on_request(struct nfs_page *req) { - int ret = 0; - - if (!test_bit(PG_BUSY, &req->wb_flags)) - goto out; - ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY, - nfs_wait_bit_killable, TASK_KILLABLE); -out: - return ret; + return wait_on_bit(&req->wb_flags, PG_BUSY, + nfs_wait_bit_uninterruptible, + TASK_UNINTERRUPTIBLE); } /** diff --git a/fs/nfs/super.c b/fs/nfs/super.c index ce907ef..f1afee4 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -243,6 +243,7 @@ static int nfs_show_stats(struct seq_file *, struct vfsmount *); static int nfs_get_sb(struct file_system_type *, int, const char *, void *, struct vfsmount *); static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt); +static void nfs_put_super(struct super_block *); static void nfs_kill_super(struct super_block *); static int nfs_remount(struct super_block *sb, int *flags, char *raw_data); @@ -266,6 +267,7 @@ static const struct super_operations nfs_sops = { .alloc_inode = nfs_alloc_inode, .destroy_inode = nfs_destroy_inode, .write_inode = nfs_write_inode, + .put_super = nfs_put_super, .statfs = nfs_statfs, .clear_inode = nfs_clear_inode, .umount_begin = nfs_umount_begin, @@ -335,6 +337,7 @@ static const struct super_operations nfs4_sops = { .alloc_inode = nfs_alloc_inode, .destroy_inode = nfs_destroy_inode, .write_inode = nfs_write_inode, + .put_super = nfs_put_super, .statfs = nfs_statfs, .clear_inode = nfs4_clear_inode, .umount_begin = nfs_umount_begin, @@ -2258,6 +2261,17 @@ error_splat_super: } /* + * Ensure that we unregister the bdi before kill_anon_super + * releases the device name + */ +static void nfs_put_super(struct super_block *s) +{ + struct nfs_server *server = NFS_SB(s); + + bdi_unregister(&server->backing_dev_info); +} + +/* * Destroy an NFS2/3 superblock */ static void nfs_kill_super(struct super_block *s) @@ -2265,7 +2279,6 @@ static void nfs_kill_super(struct super_block *s) struct nfs_server *server = NFS_SB(s); kill_anon_super(s); - bdi_unregister(&server->backing_dev_info); nfs_fscache_release_super_cookie(s); nfs_free_server(server); } diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c index 70e1fbb..ad4d2e7 100644 --- a/fs/nfs/sysctl.c +++ b/fs/nfs/sysctl.c @@ -15,8 +15,10 @@ #include "callback.h" +#ifdef CONFIG_NFS_V4 static const int nfs_set_port_min = 0; static const int nfs_set_port_max = 65535; +#endif static struct ctl_table_header *nfs_callback_sysctl_table; static ctl_table nfs_cb_sysctls[] = { diff --git a/fs/nfs/write.c b/fs/nfs/write.c index d171696..d63d964 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1233,7 +1233,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data) #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) -void nfs_commitdata_release(void *data) +static void nfs_commitdata_release(void *data) { struct nfs_write_data *wdata = data; @@ -1541,6 +1541,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) break; } ret = nfs_wait_on_request(req); + nfs_release_request(req); if (ret < 0) goto out; } @@ -1597,8 +1598,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage, struct nfs_page *req; int ret; - if (PageFsCache(page)) - nfs_fscache_release_page(page, GFP_KERNEL); + nfs_fscache_release_page(page, GFP_KERNEL); req = nfs_find_and_lock_request(page); ret = PTR_ERR(req); diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index c487810..a0c4016 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -1316,19 +1316,11 @@ rqst_exp_parent(struct svc_rqst *rqstp, struct path *path) static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp) { - struct svc_export *exp; u32 fsidv[2]; mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL); - exp = rqst_exp_find(rqstp, FSID_NUM, fsidv); - /* - * We shouldn't have accepting an nfsv4 request at all if we - * don't have a pseudoexport!: - */ - if (IS_ERR(exp) && PTR_ERR(exp) == -ENOENT) - exp = ERR_PTR(-ESERVERFAULT); - return exp; + return rqst_exp_find(rqstp, FSID_NUM, fsidv); } /* diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index c194793..97d79ef 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -752,6 +752,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, flags, current_cred()); if (IS_ERR(*filp)) host_err = PTR_ERR(*filp); + host_err = ima_file_check(*filp, access); out_nfserr: err = nfserrno(host_err); out: @@ -2127,7 +2128,6 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp, */ path.mnt = exp->ex_path.mnt; path.dentry = dentry; - err = ima_path_check(&path, acc & (MAY_READ | MAY_WRITE | MAY_EXEC)); nfsd_out: return err? nfserrno(err) : 0; } diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 17584c5..105b508 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -2829,7 +2829,7 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) || sci->sc_seq_request != sci->sc_seq_done); spin_unlock(&sci->sc_state_lock); - if (flag || nilfs_segctor_confirm(sci)) + if (flag || !nilfs_segctor_confirm(sci)) nilfs_segctor_write_out(sci); WARN_ON(!list_empty(&sci->sc_copied_buffers)); diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 3dae4a1..7e9df11 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -599,7 +599,7 @@ bail: return ret; } -/* +/* * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're * particularly interested in the aio/dio case. Like the core uses * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from @@ -670,7 +670,7 @@ static ssize_t ocfs2_direct_IO(int rw, ret = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, - nr_segs, + nr_segs, ocfs2_direct_IO_get_blocks, ocfs2_dio_end_io); diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index d43d34a..21c808f 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c @@ -368,7 +368,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, } ocfs2_metadata_cache_io_unlock(ci); - mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", + mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", (unsigned long long)block, nr, ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes", flags); diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index eda5b8b..5c98900 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -78,7 +78,7 @@ static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type); unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD; -/* Only sets a new threshold if there are no active regions. +/* Only sets a new threshold if there are no active regions. * * No locking or otherwise interesting code is required for reading * o2hb_dead_threshold as it can't change once regions are active and @@ -170,7 +170,7 @@ static void o2hb_write_timeout(struct work_struct *work) mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " "milliseconds\n", reg->hr_dev_name, - jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); + jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); o2quo_disk_timeout(); } @@ -624,7 +624,7 @@ static int o2hb_check_slot(struct o2hb_region *reg, "seq %llu last %llu changed %u equal %u\n", slot->ds_node_num, (long long)slot->ds_last_generation, le32_to_cpu(hb_block->hb_cksum), - (unsigned long long)le64_to_cpu(hb_block->hb_seq), + (unsigned long long)le64_to_cpu(hb_block->hb_seq), (unsigned long long)slot->ds_last_time, slot->ds_changed_samples, slot->ds_equal_samples); diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 334f231..d8d0c65 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -485,7 +485,7 @@ static void o2net_set_nn_state(struct o2net_node *nn, } if (was_valid && !valid) { - printk(KERN_INFO "o2net: no longer connected to " + printk(KERN_NOTICE "o2net: no longer connected to " SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); o2net_complete_nodes_nsw(nn); } @@ -493,7 +493,7 @@ static void o2net_set_nn_state(struct o2net_node *nn, if (!was_valid && valid) { o2quo_conn_up(o2net_num_from_nn(nn)); cancel_delayed_work(&nn->nn_connect_expired); - printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n", + printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n", o2nm_this_node() > sc->sc_node->nd_num ? "connected to" : "accepted connection from", SC_NODEF_ARGS(sc)); @@ -930,7 +930,7 @@ static void o2net_sendpage(struct o2net_sock_container *sc, cond_resched(); continue; } - mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT + mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret); o2net_ensure_shutdown(nn, sc, 0); break; @@ -1476,14 +1476,14 @@ static void o2net_idle_timer(unsigned long data) do_gettimeofday(&now); - printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " + printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " "seconds, shutting it down.\n", SC_NODEF_ARGS(sc), o2net_idle_timeout() / 1000, o2net_idle_timeout() % 1000); mlog(ML_NOTICE, "here are some times that might help debug the " "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv " "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n", - sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, + sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, now.tv_sec, (long) now.tv_usec, sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec, sc->sc_tv_advance_start.tv_sec, diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h index 8d58cfe..96fa7eb 100644 --- a/fs/ocfs2/cluster/tcp_internal.h +++ b/fs/ocfs2/cluster/tcp_internal.h @@ -32,10 +32,10 @@ * on their number */ #define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS) -/* +/* * This version number represents quite a lot, unfortunately. It not * only represents the raw network message protocol on the wire but also - * locking semantics of the file system using the protocol. It should + * locking semantics of the file system using the protocol. It should * be somewhere else, I'm sure, but right now it isn't. * * With version 11, we separate out the filesystem locking portion. The diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h index b5786a7..3cfa114 100644 --- a/fs/ocfs2/dlm/dlmapi.h +++ b/fs/ocfs2/dlm/dlmapi.h @@ -95,7 +95,7 @@ const char *dlm_errname(enum dlm_status err); mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \ } while (0) -#define DLM_LKSB_UNUSED1 0x01 +#define DLM_LKSB_UNUSED1 0x01 #define DLM_LKSB_PUT_LVB 0x02 #define DLM_LKSB_GET_LVB 0x04 #define DLM_LKSB_UNUSED2 0x08 diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c index 01cf8cc..dccc439 100644 --- a/fs/ocfs2/dlm/dlmast.c +++ b/fs/ocfs2/dlm/dlmast.c @@ -123,7 +123,7 @@ static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) dlm_lock_put(lock); /* free up the reserved bast that we are cancelling. * guaranteed that this will not be the last reserved - * ast because *both* an ast and a bast were reserved + * ast because *both* an ast and a bast were reserved * to get to this point. the res->spinlock will not be * taken here */ dlm_lockres_release_ast(dlm, res); diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c index ca96bce..f283bce 100644 --- a/fs/ocfs2/dlm/dlmconvert.c +++ b/fs/ocfs2/dlm/dlmconvert.c @@ -396,7 +396,7 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, /* instead of logging the same network error over * and over, sleep here and wait for the heartbeat * to notice the node is dead. times out after 5s. */ - dlm_wait_for_node_death(dlm, res->owner, + dlm_wait_for_node_death(dlm, res->owner, DLM_NODE_DEATH_WAIT_MAX); ret = DLM_RECOVERING; mlog(0, "node %u died so returning DLM_RECOVERING " diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index 42b0bad..0cd24cf 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c @@ -102,7 +102,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) assert_spin_locked(&res->spinlock); stringify_lockname(res->lockname.name, res->lockname.len, - buf, sizeof(buf) - 1); + buf, sizeof(buf)); printk("lockres: %s, owner=%u, state=%u\n", buf, res->owner, res->state); printk(" last used: %lu, refcnt: %u, on purge list: %s\n", diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 0334000..988c905 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -816,7 +816,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, } /* Once the dlm ctxt is marked as leaving then we don't want - * to be put in someone's domain map. + * to be put in someone's domain map. * Also, explicitly disallow joining at certain troublesome * times (ie. during recovery). */ if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) { diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 437698e..73333777 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c @@ -269,7 +269,7 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, } dlm_revert_pending_lock(res, lock); dlm_lock_put(lock); - } else if (dlm_is_recovery_lock(res->lockname.name, + } else if (dlm_is_recovery_lock(res->lockname.name, res->lockname.len)) { /* special case for the $RECOVERY lock. * there will never be an AST delivered to put diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 03ccf9a..a659606 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -366,7 +366,7 @@ void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up) struct dlm_master_list_entry *mle; assert_spin_locked(&dlm->spinlock); - + list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { if (node_up) dlm_mle_node_up(dlm, mle, NULL, idx); @@ -833,7 +833,7 @@ lookup: __dlm_insert_mle(dlm, mle); /* still holding the dlm spinlock, check the recovery map - * to see if there are any nodes that still need to be + * to see if there are any nodes that still need to be * considered. these will not appear in the mle nodemap * but they might own this lockres. wait on them. */ bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); @@ -883,7 +883,7 @@ redo_request: msleep(500); } continue; - } + } dlm_kick_recovery_thread(dlm); msleep(1000); @@ -939,8 +939,8 @@ wait: res->lockname.name, blocked); if (++tries > 20) { mlog(ML_ERROR, "%s:%.*s: spinning on " - "dlm_wait_for_lock_mastery, blocked=%d\n", - dlm->name, res->lockname.len, + "dlm_wait_for_lock_mastery, blocked=%d\n", + dlm->name, res->lockname.len, res->lockname.name, blocked); dlm_print_one_lock_resource(res); dlm_print_one_mle(mle); @@ -1029,7 +1029,7 @@ recheck: ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); b = (mle->type == DLM_MLE_BLOCK); if ((*blocked && !b) || (!*blocked && b)) { - mlog(0, "%s:%.*s: status change: old=%d new=%d\n", + mlog(0, "%s:%.*s: status change: old=%d new=%d\n", dlm->name, res->lockname.len, res->lockname.name, *blocked, b); *blocked = b; @@ -1602,7 +1602,7 @@ send_response: } mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", dlm->node_num, res->lockname.len, res->lockname.name); - ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, + ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, DLM_ASSERT_MASTER_MLE_CLEANUP); if (ret < 0) { mlog(ML_ERROR, "failed to dispatch assert master work\n"); @@ -1701,7 +1701,7 @@ again: if (r & DLM_ASSERT_RESPONSE_REASSERT) { mlog(0, "%.*s: node %u create mles on other " - "nodes and requests a re-assert\n", + "nodes and requests a re-assert\n", namelen, lockname, to); reassert = 1; } @@ -1812,7 +1812,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, spin_unlock(&dlm->master_lock); spin_unlock(&dlm->spinlock); goto done; - } + } } } spin_unlock(&dlm->master_lock); @@ -1883,7 +1883,7 @@ ok: int extra_ref = 0; int nn = -1; int rr, err = 0; - + spin_lock(&mle->spinlock); if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) extra_ref = 1; @@ -1891,7 +1891,7 @@ ok: /* MASTER mle: if any bits set in the response map * then the calling node needs to re-assert to clear * up nodes that this node contacted */ - while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, + while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, nn+1)) < O2NM_MAX_NODES) { if (nn != dlm->node_num && nn != assert->node_idx) master_request = 1; @@ -2002,7 +2002,7 @@ kill: __dlm_print_one_lock_resource(res); spin_unlock(&res->spinlock); spin_unlock(&dlm->spinlock); - *ret_data = (void *)res; + *ret_data = (void *)res; dlm_put(dlm); return -EINVAL; } @@ -2040,10 +2040,10 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, item->u.am.request_from = request_from; item->u.am.flags = flags; - if (ignore_higher) - mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, + if (ignore_higher) + mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, res->lockname.name); - + spin_lock(&dlm->work_lock); list_add_tail(&item->list, &dlm->work_list); spin_unlock(&dlm->work_lock); @@ -2133,7 +2133,7 @@ put: * think that $RECOVERY is currently mastered by a dead node. If so, * we wait a short time to allow that node to get notified by its own * heartbeat stack, then check again. All $RECOVERY lock resources - * mastered by dead nodes are purged when the hearbeat callback is + * mastered by dead nodes are purged when the hearbeat callback is * fired, so we can know for sure that it is safe to continue once * the node returns a live node or no node. */ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, @@ -2174,7 +2174,7 @@ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, ret = -EAGAIN; } spin_unlock(&dlm->spinlock); - mlog(0, "%s: reco lock master is %u\n", dlm->name, + mlog(0, "%s: reco lock master is %u\n", dlm->name, master); break; } @@ -2602,7 +2602,7 @@ fail: mlog(0, "%s:%.*s: timed out during migration\n", dlm->name, res->lockname.len, res->lockname.name); - /* avoid hang during shutdown when migrating lockres + /* avoid hang during shutdown when migrating lockres * to a node which also goes down */ if (dlm_is_node_dead(dlm, target)) { mlog(0, "%s:%.*s: expected migration " @@ -2738,7 +2738,7 @@ static int dlm_migration_can_proceed(struct dlm_ctxt *dlm, can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); spin_unlock(&res->spinlock); - /* target has died, so make the caller break out of the + /* target has died, so make the caller break out of the * wait_event, but caller must recheck the domain_map */ spin_lock(&dlm->spinlock); if (!test_bit(mig_target, dlm->domain_map)) diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 2f9e4e1..344bcf9 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -1050,7 +1050,7 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, if (lock->ml.node == dead_node) { mlog(0, "AHA! there was " "a $RECOVERY lock for dead " - "node %u (%s)!\n", + "node %u (%s)!\n", dead_node, dlm->name); list_del_init(&lock->list); dlm_lock_put(lock); @@ -1164,6 +1164,39 @@ static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, mres->master = master; } +static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock, + struct dlm_migratable_lockres *mres, + int queue) +{ + if (!lock->lksb) + return; + + /* Ignore lvb in all locks in the blocked list */ + if (queue == DLM_BLOCKED_LIST) + return; + + /* Only consider lvbs in locks with granted EX or PR lock levels */ + if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE) + return; + + if (dlm_lvb_is_empty(mres->lvb)) { + memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); + return; + } + + /* Ensure the lvb copied for migration matches in other valid locks */ + if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN)) + return; + + mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, " + "node=%u\n", + dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), + lock->lockres->lockname.len, lock->lockres->lockname.name, + lock->ml.node); + dlm_print_one_lock_resource(lock->lockres); + BUG(); +} /* returns 1 if this lock fills the network structure, * 0 otherwise */ @@ -1181,20 +1214,7 @@ static int dlm_add_lock_to_array(struct dlm_lock *lock, ml->list = queue; if (lock->lksb) { ml->flags = lock->lksb->flags; - /* send our current lvb */ - if (ml->type == LKM_EXMODE || - ml->type == LKM_PRMODE) { - /* if it is already set, this had better be a PR - * and it has to match */ - if (!dlm_lvb_is_empty(mres->lvb) && - (ml->type == LKM_EXMODE || - memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) { - mlog(ML_ERROR, "mismatched lvbs!\n"); - dlm_print_one_lock_resource(lock->lockres); - BUG(); - } - memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); - } + dlm_prepare_lvb_for_migration(lock, mres, queue); } ml->node = lock->ml.node; mres->num_locks++; @@ -1730,6 +1750,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, struct dlm_lock *lock = NULL; u8 from = O2NM_MAX_NODES; unsigned int added = 0; + __be64 c; mlog(0, "running %d locks for this lockres\n", mres->num_locks); for (i=0; i<mres->num_locks; i++) { @@ -1777,19 +1798,48 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, /* lock is always created locally first, and * destroyed locally last. it must be on the list */ if (!lock) { - __be64 c = ml->cookie; - mlog(ML_ERROR, "could not find local lock " - "with cookie %u:%llu!\n", + c = ml->cookie; + mlog(ML_ERROR, "Could not find local lock " + "with cookie %u:%llu, node %u, " + "list %u, flags 0x%x, type %d, " + "conv %d, highest blocked %d\n", dlm_get_lock_cookie_node(be64_to_cpu(c)), - dlm_get_lock_cookie_seq(be64_to_cpu(c))); + dlm_get_lock_cookie_seq(be64_to_cpu(c)), + ml->node, ml->list, ml->flags, ml->type, + ml->convert_type, ml->highest_blocked); + __dlm_print_one_lock_resource(res); + BUG(); + } + + if (lock->ml.node != ml->node) { + c = lock->ml.cookie; + mlog(ML_ERROR, "Mismatched node# in lock " + "cookie %u:%llu, name %.*s, node %u\n", + dlm_get_lock_cookie_node(be64_to_cpu(c)), + dlm_get_lock_cookie_seq(be64_to_cpu(c)), + res->lockname.len, res->lockname.name, + lock->ml.node); + c = ml->cookie; + mlog(ML_ERROR, "Migrate lock cookie %u:%llu, " + "node %u, list %u, flags 0x%x, type %d, " + "conv %d, highest blocked %d\n", + dlm_get_lock_cookie_node(be64_to_cpu(c)), + dlm_get_lock_cookie_seq(be64_to_cpu(c)), + ml->node, ml->list, ml->flags, ml->type, + ml->convert_type, ml->highest_blocked); __dlm_print_one_lock_resource(res); BUG(); } - BUG_ON(lock->ml.node != ml->node); if (tmpq != queue) { - mlog(0, "lock was on %u instead of %u for %.*s\n", - j, ml->list, res->lockname.len, res->lockname.name); + c = ml->cookie; + mlog(0, "Lock cookie %u:%llu was on list %u " + "instead of list %u for %.*s\n", + dlm_get_lock_cookie_node(be64_to_cpu(c)), + dlm_get_lock_cookie_seq(be64_to_cpu(c)), + j, ml->list, res->lockname.len, + res->lockname.name); + __dlm_print_one_lock_resource(res); spin_unlock(&res->spinlock); continue; } @@ -1839,7 +1889,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, * the lvb. */ memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); } else { - /* otherwise, the node is sending its + /* otherwise, the node is sending its * most recent valid lvb info */ BUG_ON(ml->type != LKM_EXMODE && ml->type != LKM_PRMODE); @@ -1886,7 +1936,7 @@ skip_lvb: spin_lock(&res->spinlock); list_for_each_entry(lock, queue, list) { if (lock->ml.cookie == ml->cookie) { - __be64 c = lock->ml.cookie; + c = lock->ml.cookie; mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " "exists on this lockres!\n", dlm->name, res->lockname.len, res->lockname.name, @@ -2114,7 +2164,7 @@ static void dlm_revalidate_lvb(struct dlm_ctxt *dlm, assert_spin_locked(&res->spinlock); if (res->owner == dlm->node_num) - /* if this node owned the lockres, and if the dead node + /* if this node owned the lockres, and if the dead node * had an EX when he died, blank out the lvb */ search_node = dead_node; else { @@ -2152,7 +2202,7 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm, /* this node is the lockres master: * 1) remove any stale locks for the dead node - * 2) if the dead node had an EX when he died, blank out the lvb + * 2) if the dead node had an EX when he died, blank out the lvb */ assert_spin_locked(&dlm->spinlock); assert_spin_locked(&res->spinlock); @@ -2193,7 +2243,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm, mlog(0, "%s:%.*s: freed %u locks for dead node %u, " "dropping ref from lockres\n", dlm->name, res->lockname.len, res->lockname.name, freed, dead_node); - BUG_ON(!test_bit(dead_node, res->refmap)); + if(!test_bit(dead_node, res->refmap)) { + mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, " + "but ref was not set\n", dlm->name, + res->lockname.len, res->lockname.name, freed, dead_node); + __dlm_print_one_lock_resource(res); + } dlm_lockres_clear_refmap_bit(dead_node, res); } else if (test_bit(dead_node, res->refmap)) { mlog(0, "%s:%.*s: dead node %u had a ref, but had " @@ -2260,7 +2315,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) } spin_unlock(&res->spinlock); continue; - } + } spin_lock(&res->spinlock); /* zero the lvb if necessary */ dlm_revalidate_lvb(dlm, res, dead_node); @@ -2411,7 +2466,7 @@ static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st) * this function on each node racing to become the recovery * master will not stop attempting this until either: * a) this node gets the EX (and becomes the recovery master), - * or b) dlm->reco.new_master gets set to some nodenum + * or b) dlm->reco.new_master gets set to some nodenum * != O2NM_INVALID_NODE_NUM (another node will do the reco). * so each time a recovery master is needed, the entire cluster * will sync at this point. if the new master dies, that will @@ -2424,7 +2479,7 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); -again: +again: memset(&lksb, 0, sizeof(lksb)); ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, @@ -2437,8 +2492,8 @@ again: if (ret == DLM_NORMAL) { mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", dlm->name, dlm->node_num); - - /* got the EX lock. check to see if another node + + /* got the EX lock. check to see if another node * just became the reco master */ if (dlm_reco_master_ready(dlm)) { mlog(0, "%s: got reco EX lock, but %u will " @@ -2451,12 +2506,12 @@ again: /* see if recovery was already finished elsewhere */ spin_lock(&dlm->spinlock); if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { - status = -EINVAL; + status = -EINVAL; mlog(0, "%s: got reco EX lock, but " "node got recovered already\n", dlm->name); if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { mlog(ML_ERROR, "%s: new master is %u " - "but no dead node!\n", + "but no dead node!\n", dlm->name, dlm->reco.new_master); BUG(); } @@ -2468,7 +2523,7 @@ again: * set the master and send the messages to begin recovery */ if (!status) { mlog(0, "%s: dead=%u, this=%u, sending " - "begin_reco now\n", dlm->name, + "begin_reco now\n", dlm->name, dlm->reco.dead_node, dlm->node_num); status = dlm_send_begin_reco_message(dlm, dlm->reco.dead_node); @@ -2501,7 +2556,7 @@ again: mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", dlm->name, dlm->node_num); /* another node is master. wait on - * reco.new_master != O2NM_INVALID_NODE_NUM + * reco.new_master != O2NM_INVALID_NODE_NUM * for at most one second */ wait_event_timeout(dlm->dlm_reco_thread_wq, dlm_reco_master_ready(dlm), @@ -2589,7 +2644,13 @@ retry: "begin reco msg (%d)\n", dlm->name, nodenum, ret); ret = 0; } - if (ret == -EAGAIN) { + + /* + * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8, + * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN. + * We are handling both for compatibility reasons. + */ + if (ret == -EAGAIN || ret == EAGAIN) { mlog(0, "%s: trying to start recovery of node " "%u, but node %u is waiting for last recovery " "to complete, backoff for a bit\n", dlm->name, @@ -2599,7 +2660,7 @@ retry: } if (ret < 0) { struct dlm_lock_resource *res; - /* this is now a serious problem, possibly ENOMEM + /* this is now a serious problem, possibly ENOMEM * in the network stack. must retry */ mlog_errno(ret); mlog(ML_ERROR, "begin reco of dlm %s to node %u " @@ -2612,7 +2673,7 @@ retry: } else { mlog(ML_ERROR, "recovery lock not found\n"); } - /* sleep for a bit in hopes that we can avoid + /* sleep for a bit in hopes that we can avoid * another ENOMEM */ msleep(100); goto retry; @@ -2664,7 +2725,7 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, } if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { mlog(ML_NOTICE, "%s: dead_node previously set to %u, " - "node %u changing it to %u\n", dlm->name, + "node %u changing it to %u\n", dlm->name, dlm->reco.dead_node, br->node_idx, br->dead_node); } dlm_set_reco_master(dlm, br->node_idx); @@ -2730,8 +2791,8 @@ stage2: if (ret < 0) { mlog_errno(ret); if (dlm_is_host_down(ret)) { - /* this has no effect on this recovery - * session, so set the status to zero to + /* this has no effect on this recovery + * session, so set the status to zero to * finish out the last recovery */ mlog(ML_ERROR, "node %u went down after this " "node finished recovery.\n", nodenum); @@ -2768,7 +2829,7 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, mlog(0, "%s: node %u finalizing recovery stage%d of " "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); - + spin_lock(&dlm->spinlock); if (dlm->reco.new_master != fr->node_idx) { diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c index 00f53b2..49e29ec 100644 --- a/fs/ocfs2/dlm/dlmunlock.c +++ b/fs/ocfs2/dlm/dlmunlock.c @@ -190,8 +190,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, actions &= ~(DLM_UNLOCK_REMOVE_LOCK| DLM_UNLOCK_REGRANT_LOCK| DLM_UNLOCK_CLEAR_CONVERT_TYPE); - } else if (status == DLM_RECOVERING || - status == DLM_MIGRATING || + } else if (status == DLM_RECOVERING || + status == DLM_MIGRATING || status == DLM_FORWARD) { /* must clear the actions because this unlock * is about to be retried. cannot free or do @@ -661,14 +661,14 @@ retry: if (call_ast) { mlog(0, "calling unlockast(%p, %d)\n", data, status); if (is_master) { - /* it is possible that there is one last bast + /* it is possible that there is one last bast * pending. make sure it is flushed, then * call the unlockast. * not an issue if this is a mastered remotely, * since this lock has been removed from the * lockres queues and cannot be found. */ dlm_kick_thread(dlm, NULL); - wait_event(dlm->ast_wq, + wait_event(dlm->ast_wq, dlm_lock_basts_flushed(dlm, lock)); } (*unlockast)(data, status); diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index c5e4a49..e044019 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -875,6 +875,14 @@ static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lo lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); lockres->l_level = lockres->l_requested; + + /* + * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing + * the OCFS2_LOCK_BUSY flag to prevent the dc thread from + * downconverting the lock before the upconvert has fully completed. + */ + lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); + lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); mlog_exit_void(); @@ -907,8 +915,6 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, assert_spin_locked(&lockres->l_lock); - lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); - if (level > lockres->l_blocking) { /* only schedule a downconvert if we haven't already scheduled * one that goes low enough to satisfy the level we're @@ -921,6 +927,9 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, lockres->l_blocking = level; } + if (needs_downconvert) + lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); + mlog_exit(needs_downconvert); return needs_downconvert; } @@ -1133,6 +1142,7 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres, mlog_entry_void(); spin_lock_irqsave(&lockres->l_lock, flags); lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); + lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); if (convert) lockres->l_action = OCFS2_AST_INVALID; else @@ -1323,13 +1333,13 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb, again: wait = 0; + spin_lock_irqsave(&lockres->l_lock, flags); + if (catch_signals && signal_pending(current)) { ret = -ERESTARTSYS; - goto out; + goto unlock; } - spin_lock_irqsave(&lockres->l_lock, flags); - mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING, "Cluster lock called on freeing lockres %s! flags " "0x%lx\n", lockres->l_name, lockres->l_flags); @@ -1346,6 +1356,25 @@ again: goto unlock; } + if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) { + /* + * We've upconverted. If the lock now has a level we can + * work with, we take it. If, however, the lock is not at the + * required level, we go thru the full cycle. One way this could + * happen is if a process requesting an upconvert to PR is + * closely followed by another requesting upconvert to an EX. + * If the process requesting EX lands here, we want it to + * continue attempting to upconvert and let the process + * requesting PR take the lock. + * If multiple processes request upconvert to PR, the first one + * here will take the lock. The others will have to go thru the + * OCFS2_LOCK_BLOCKED check to ensure that there is no pending + * downconvert request. + */ + if (level <= lockres->l_level) + goto update_holders; + } + if (lockres->l_flags & OCFS2_LOCK_BLOCKED && !ocfs2_may_continue_on_blocked_lock(lockres, level)) { /* is the lock is currently blocked on behalf of @@ -1416,11 +1445,14 @@ again: goto again; } +update_holders: /* Ok, if we get here then we're good to go. */ ocfs2_inc_holders(lockres, level); ret = 0; unlock: + lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); + spin_unlock_irqrestore(&lockres->l_lock, flags); out: /* @@ -3155,7 +3187,7 @@ out: /* Mark the lockres as being dropped. It will no longer be * queued if blocking, but we still may have to wait on it * being dequeued from the downconvert thread before we can consider - * it safe to drop. + * it safe to drop. * * You can *not* attempt to call cluster_lock on this lockres anymore. */ void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres) @@ -3352,6 +3384,7 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb, unsigned long flags; int blocking; int new_level; + int level; int ret = 0; int set_lvb = 0; unsigned int gen; @@ -3360,9 +3393,17 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb, spin_lock_irqsave(&lockres->l_lock, flags); - BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED)); - recheck: + /* + * Is it still blocking? If not, we have no more work to do. + */ + if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) { + BUG_ON(lockres->l_blocking != DLM_LOCK_NL); + spin_unlock_irqrestore(&lockres->l_lock, flags); + ret = 0; + goto leave; + } + if (lockres->l_flags & OCFS2_LOCK_BUSY) { /* XXX * This is a *big* race. The OCFS2_LOCK_PENDING flag @@ -3401,6 +3442,31 @@ recheck: goto leave; } + /* + * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is + * set when the ast is received for an upconvert just before the + * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast + * on the heels of the ast, we want to delay the downconvert just + * enough to allow the up requestor to do its task. Because this + * lock is in the blocked queue, the lock will be downconverted + * as soon as the requestor is done with the lock. + */ + if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) + goto leave_requeue; + + /* + * How can we block and yet be at NL? We were trying to upconvert + * from NL and got canceled. The code comes back here, and now + * we notice and clear BLOCKING. + */ + if (lockres->l_level == DLM_LOCK_NL) { + BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders); + lockres->l_blocking = DLM_LOCK_NL; + lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED); + spin_unlock_irqrestore(&lockres->l_lock, flags); + goto leave; + } + /* if we're blocking an exclusive and we have *any* holders, * then requeue. */ if ((lockres->l_blocking == DLM_LOCK_EX) @@ -3438,6 +3504,7 @@ recheck: * may sleep, so we save off a copy of what we're blocking as * it may change while we're not holding the spin lock. */ blocking = lockres->l_blocking; + level = lockres->l_level; spin_unlock_irqrestore(&lockres->l_lock, flags); ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking); @@ -3446,7 +3513,7 @@ recheck: goto leave; spin_lock_irqsave(&lockres->l_lock, flags); - if (blocking != lockres->l_blocking) { + if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) { /* If this changed underneath us, then we can't drop * it just yet. */ goto recheck; diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c index 15713cb..19ad145 100644 --- a/fs/ocfs2/export.c +++ b/fs/ocfs2/export.c @@ -239,7 +239,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len, mlog(0, "Encoding parent: blkno: %llu, generation: %u\n", (unsigned long long)blkno, generation); } - + *max_len = len; bail: diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index d35a27f..5328529 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c @@ -192,7 +192,7 @@ static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi, emi->ei_clusters += ins->ei_clusters; return 1; } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys && - (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys && + (ins->ei_cpos + ins->ei_clusters) == emi->ei_cpos && ins->ei_flags == emi->ei_flags) { emi->ei_phys = ins->ei_phys; emi->ei_cpos = ins->ei_cpos; diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 06ccf6a..558ce03 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -749,7 +749,7 @@ static int ocfs2_write_zero_page(struct inode *inode, int ret; offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */ - /* ugh. in prepare/commit_write, if from==to==start of block, we + /* ugh. in prepare/commit_write, if from==to==start of block, we ** skip the prepare. make sure we never send an offset for the start ** of a block */ @@ -1779,7 +1779,7 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry, struct inode *inode = dentry->d_inode; loff_t saved_pos, end; - /* + /* * We start with a read level meta lock and only jump to an ex * if we need to make modifications here. */ @@ -2013,8 +2013,8 @@ out_dio: /* buffered aio wouldn't have proper lock coverage today */ BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); - if ((file->f_flags & O_DSYNC && !direct_io) || IS_SYNC(inode) || - (file->f_flags & O_DIRECT && has_refcount)) { + if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || + ((file->f_flags & O_DIRECT) && has_refcount)) { ret = filemap_fdatawrite_range(file->f_mapping, pos, pos + count - 1); if (ret < 0) @@ -2033,7 +2033,7 @@ out_dio: pos + count - 1); } - /* + /* * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io * function pointer which is called when o_direct io completes so that * it can unlock our rw lock. (it's the clustered equivalent of @@ -2198,7 +2198,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, goto bail; } - /* + /* * buffered reads protect themselves in ->readpage(). O_DIRECT reads * need locks to protect pending reads from racing with truncate. */ @@ -2220,10 +2220,10 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, * We're fine letting folks race truncates and extending * writes with read across the cluster, just like they can * locally. Hence no rw_lock during read. - * + * * Take and drop the meta data lock to update inode fields * like i_size. This allows the checks down below - * generic_file_aio_read() a chance of actually working. + * generic_file_aio_read() a chance of actually working. */ ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level); if (ret < 0) { @@ -2248,7 +2248,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, bail: if (have_alloc_sem) up_read(&inode->i_alloc_sem); - if (rw_level != -1) + if (rw_level != -1) ocfs2_rw_unlock(inode, rw_level); mlog_exit(ret); diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 0297fb8..88459bd 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -475,7 +475,7 @@ static int ocfs2_read_locked_inode(struct inode *inode, if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) { status = ocfs2_try_open_lock(inode, 0); if (status) { - make_bad_inode(inode); + make_bad_inode(inode); return status; } } @@ -684,7 +684,7 @@ bail: return status; } -/* +/* * Serialize with orphan dir recovery. If the process doing * recovery on this orphan dir does an iget() with the dir * i_mutex held, we'll deadlock here. Instead we detect this diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index 31fbb06..7d9d9c1 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c @@ -7,6 +7,7 @@ #include <linux/fs.h> #include <linux/mount.h> +#include <linux/compat.h> #define MLOG_MASK_PREFIX ML_INODE #include <cluster/masklog.h> @@ -181,6 +182,10 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) #ifdef CONFIG_COMPAT long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) { + bool preserve; + struct reflink_arguments args; + struct inode *inode = file->f_path.dentry->d_inode; + switch (cmd) { case OCFS2_IOC32_GETFLAGS: cmd = OCFS2_IOC_GETFLAGS; @@ -195,8 +200,15 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) case OCFS2_IOC_GROUP_EXTEND: case OCFS2_IOC_GROUP_ADD: case OCFS2_IOC_GROUP_ADD64: - case OCFS2_IOC_REFLINK: break; + case OCFS2_IOC_REFLINK: + if (copy_from_user(&args, (struct reflink_arguments *)arg, + sizeof(args))) + return -EFAULT; + preserve = (args.preserve != 0); + + return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path), + compat_ptr(args.new_path), preserve); default: return -ENOIOCTLCMD; } diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index bf34c49..9336c60 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -2034,7 +2034,7 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb, status = -ENOENT; mlog_errno(status); return status; - } + } mutex_lock(&orphan_dir_inode->i_mutex); status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0); diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 9362eea..740f448 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -136,6 +136,10 @@ enum ocfs2_unlock_action { #define OCFS2_LOCK_PENDING (0x00000400) /* This lockres is pending a call to dlm_lock. Only exists with BUSY set. */ +#define OCFS2_LOCK_UPCONVERT_FINISHING (0x00000800) /* blocks the dc thread + * from downconverting + * before the upconvert + * has completed */ struct ocfs2_lock_res_ops; diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 1a1a679..7638a38 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -1417,9 +1417,16 @@ static inline int ocfs2_fast_symlink_chars(int blocksize) return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink); } -static inline int ocfs2_max_inline_data(int blocksize) +static inline int ocfs2_max_inline_data_with_xattr(int blocksize, + struct ocfs2_dinode *di) { - return blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data); + if (di && (di->i_dyn_features & OCFS2_INLINE_XATTR_FL)) + return blocksize - + offsetof(struct ocfs2_dinode, id2.i_data.id_data) - + di->i_xattr_inline_size; + else + return blocksize - + offsetof(struct ocfs2_dinode, id2.i_data.id_data); } static inline int ocfs2_extent_recs_per_inode(int blocksize) diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 74db2be..8ae65c9 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -2945,7 +2945,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle, while (offset < end) { page_index = offset >> PAGE_CACHE_SHIFT; - map_end = (page_index + 1) << PAGE_CACHE_SHIFT; + map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; if (map_end > end) map_end = end; @@ -2957,8 +2957,12 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle, page = grab_cache_page(mapping, page_index); - /* This page can't be dirtied before we CoW it out. */ - BUG_ON(PageDirty(page)); + /* + * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page + * can't be dirtied before we CoW it out. + */ + if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) + BUG_ON(PageDirty(page)); if (!PageUptodate(page)) { ret = block_read_full_page(page, ocfs2_get_block); @@ -3170,7 +3174,7 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb, while (offset < end) { page_index = offset >> PAGE_CACHE_SHIFT; - map_end = (page_index + 1) << PAGE_CACHE_SHIFT; + map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; if (map_end > end) map_end = end; diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c index e49c410..3038c92 100644 --- a/fs/ocfs2/stack_o2cb.c +++ b/fs/ocfs2/stack_o2cb.c @@ -277,7 +277,7 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn) u32 dlm_key; struct dlm_ctxt *dlm; struct o2dlm_private *priv; - struct dlm_protocol_version dlm_version; + struct dlm_protocol_version fs_version; BUG_ON(conn == NULL); BUG_ON(o2cb_stack.sp_proto == NULL); @@ -304,18 +304,18 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn) /* used by the dlm code to make message headers unique, each * node in this domain must agree on this. */ dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen); - dlm_version.pv_major = conn->cc_version.pv_major; - dlm_version.pv_minor = conn->cc_version.pv_minor; + fs_version.pv_major = conn->cc_version.pv_major; + fs_version.pv_minor = conn->cc_version.pv_minor; - dlm = dlm_register_domain(conn->cc_name, dlm_key, &dlm_version); + dlm = dlm_register_domain(conn->cc_name, dlm_key, &fs_version); if (IS_ERR(dlm)) { rc = PTR_ERR(dlm); mlog_errno(rc); goto out_free; } - conn->cc_version.pv_major = dlm_version.pv_major; - conn->cc_version.pv_minor = dlm_version.pv_minor; + conn->cc_version.pv_major = fs_version.pv_major; + conn->cc_version.pv_minor = fs_version.pv_minor; conn->cc_lockspace = dlm; dlm_register_eviction_cb(dlm, &priv->op_eviction_cb); diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 2606991..755cd49 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1062,7 +1062,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) "file system, but write access is " "unavailable.\n"); else - mlog_errno(status); + mlog_errno(status); goto read_super_error; } diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index 49b133c..32499d21 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c @@ -137,20 +137,20 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry, } memcpy(link, target, len); - nd_set_link(nd, link); bail: + nd_set_link(nd, status ? ERR_PTR(status) : link); brelse(bh); mlog_exit(status); - return status ? ERR_PTR(status) : link; + return NULL; } static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) { - char *link = cookie; - - kfree(link); + char *link = nd_get_link(nd); + if (!IS_ERR(link)) + kfree(link); } const struct inode_operations ocfs2_symlink_inode_operations = { diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c index c613693..a0a120e 100644 --- a/fs/ocfs2/uptodate.c +++ b/fs/ocfs2/uptodate.c @@ -267,8 +267,8 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci, } /* Warning: even if it returns true, this does *not* guarantee that - * the block is stored in our inode metadata cache. - * + * the block is stored in our inode metadata cache. + * * This can be called under lock_buffer() */ int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci, diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h index 1e67c44..f745948 100644 --- a/include/drm/nouveau_drm.h +++ b/include/drm/nouveau_drm.h @@ -77,6 +77,7 @@ struct drm_nouveau_gpuobj_free { #define NOUVEAU_GETPARAM_PCI_PHYSICAL 10 #define NOUVEAU_GETPARAM_CHIPSET_ID 11 #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 +#define NOUVEAU_GETPARAM_GRAPH_UNITS 13 struct drm_nouveau_getparam { uint64_t param; uint64_t value; diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h index 2be7e12..c7645f4 100644 --- a/include/drm/vmwgfx_drm.h +++ b/include/drm/vmwgfx_drm.h @@ -68,7 +68,8 @@ #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1 #define DRM_VMW_PARAM_3D 2 #define DRM_VMW_PARAM_FIFO_OFFSET 3 - +#define DRM_VMW_PARAM_HW_CAPS 4 +#define DRM_VMW_PARAM_FIFO_CAPS 5 /** * struct drm_vmw_getparam_arg @@ -181,6 +182,8 @@ struct drm_vmw_context_arg { * The size of the array should equal the total number of mipmap levels. * @shareable: Boolean whether other clients (as identified by file descriptors) * may reference this surface. + * @scanout: Boolean whether the surface is intended to be used as a + * scanout. * * Input data to the DRM_VMW_CREATE_SURFACE Ioctl. * Output data from the DRM_VMW_REF_SURFACE Ioctl. @@ -192,7 +195,7 @@ struct drm_vmw_surface_create_req { uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; uint64_t size_addr; int32_t shareable; - uint32_t pad64; + int32_t scanout; }; /** @@ -295,17 +298,28 @@ union drm_vmw_surface_reference_arg { * * @commands: User-space address of a command buffer cast to an uint64_t. * @command-size: Size in bytes of the command buffer. + * @throttle-us: Sleep until software is less than @throttle_us + * microseconds ahead of hardware. The driver may round this value + * to the nearest kernel tick. * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an * uint64_t. + * @version: Allows expanding the execbuf ioctl parameters without breaking + * backwards compatibility, since user-space will always tell the kernel + * which version it uses. + * @flags: Execbuf flags. None currently. * * Argument to the DRM_VMW_EXECBUF Ioctl. */ +#define DRM_VMW_EXECBUF_VERSION 0 + struct drm_vmw_execbuf_arg { uint64_t commands; uint32_t command_size; - uint32_t pad64; + uint32_t throttle_us; uint64_t fence_rep; + uint32_t version; + uint32_t flags; }; /** diff --git a/include/linux/ata.h b/include/linux/ata.h index 38a6948..20f3156 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -647,9 +647,9 @@ static inline int ata_id_has_large_logical_sectors(const u16 *id) return id[ATA_ID_SECTOR_SIZE] & (1 << 13); } -static inline u8 ata_id_logical_per_physical_sectors(const u16 *id) +static inline u16 ata_id_logical_per_physical_sectors(const u16 *id) { - return id[ATA_ID_SECTOR_SIZE] & 0xf; + return 1 << (id[ATA_ID_SECTOR_SIZE] & 0xf); } static inline int ata_id_has_lba48(const u16 *id) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 5be3dab..188fcae 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -15,6 +15,7 @@ # define __acquire(x) __context__(x,1) # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) +# define __percpu __attribute__((noderef, address_space(3))) extern void __chk_user_ptr(const volatile void __user *); extern void __chk_io_ptr(const volatile void __iomem *); #else @@ -32,6 +33,7 @@ extern void __chk_io_ptr(const volatile void __iomem *); # define __acquire(x) (void)0 # define __release(x) (void)0 # define __cond_lock(x,c) (c) +# define __percpu #endif #ifdef __KERNEL__ diff --git a/include/linux/ima.h b/include/linux/ima.h index 99dc6d5..975837e 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -17,7 +17,7 @@ struct linux_binprm; extern int ima_bprm_check(struct linux_binprm *bprm); extern int ima_inode_alloc(struct inode *inode); extern void ima_inode_free(struct inode *inode); -extern int ima_path_check(struct path *path, int mask); +extern int ima_file_check(struct file *file, int mask); extern void ima_file_free(struct file *file); extern int ima_file_mmap(struct file *file, unsigned long prot); extern void ima_counts_get(struct file *file); @@ -38,7 +38,7 @@ static inline void ima_inode_free(struct inode *inode) return; } -static inline int ima_path_check(struct path *path, int mask) +static inline int ima_file_check(struct file *file, int mask) { return 0; } diff --git a/include/linux/sched.h b/include/linux/sched.h index abdfacc..78efe7c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -310,6 +310,7 @@ extern void sched_show_task(struct task_struct *p); #ifdef CONFIG_DETECT_SOFTLOCKUP extern void softlockup_tick(void); extern void touch_softlockup_watchdog(void); +extern void touch_softlockup_watchdog_sync(void); extern void touch_all_softlockup_watchdogs(void); extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, void __user *buffer, @@ -323,6 +324,9 @@ static inline void softlockup_tick(void) static inline void touch_softlockup_watchdog(void) { } +static inline void touch_softlockup_watchdog_sync(void) +{ +} static inline void touch_all_softlockup_watchdogs(void) { } diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index ba1ba0c..63d4498 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -11,6 +11,8 @@ struct nf_conntrack_ecache; struct netns_ct { atomic_t count; unsigned int expect_count; + unsigned int htable_size; + struct kmem_cache *nf_conntrack_cachep; struct hlist_nulls_head *hash; struct hlist_head *expect_hash; struct hlist_nulls_head unconfirmed; @@ -28,5 +30,6 @@ struct netns_ct { #endif int hash_vmalloc; int expect_vmalloc; + char *slabname; }; #endif diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 2eb3814..9a4b8b7 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -40,6 +40,7 @@ struct netns_ipv4 { struct xt_table *iptable_security; struct xt_table *nat_table; struct hlist_head *nat_bysource; + unsigned int nat_htable_size; int nat_vmalloced; #endif diff --git a/init/main.c b/init/main.c index dac44a9..4cb47a1 100644 --- a/init/main.c +++ b/init/main.c @@ -657,9 +657,9 @@ asmlinkage void __init start_kernel(void) proc_caches_init(); buffer_init(); key_init(); + radix_tree_init(); security_init(); vfs_caches_init(totalram_pages); - radix_tree_init(); signals_init(); /* rootfs populating might need page-writeback */ page_writeback_init(); diff --git a/kernel/futex.c b/kernel/futex.c index d9b3a22..e7a35f10 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -530,8 +530,25 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, return -EINVAL; WARN_ON(!atomic_read(&pi_state->refcount)); - WARN_ON(pid && pi_state->owner && - pi_state->owner->pid != pid); + + /* + * When pi_state->owner is NULL then the owner died + * and another waiter is on the fly. pi_state->owner + * is fixed up by the task which acquires + * pi_state->rt_mutex. + * + * We do not check for pid == 0 which can happen when + * the owner died and robust_list_exit() cleared the + * TID. + */ + if (pid && pi_state->owner) { + /* + * Bail out if user space manipulated the + * futex value. + */ + if (pid != task_pid_vnr(pi_state->owner)) + return -EINVAL; + } atomic_inc(&pi_state->refcount); *ps = pi_state; @@ -758,6 +775,13 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) if (!pi_state) return -EINVAL; + /* + * If current does not own the pi_state then the futex is + * inconsistent and user space fiddled with the futex value. + */ + if (pi_state->owner != current) + return -EINVAL; + raw_spin_lock(&pi_state->pi_mutex.wait_lock); new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); @@ -1971,7 +1995,7 @@ retry_private: /* Unqueue and drop the lock */ unqueue_me_pi(&q); - goto out; + goto out_put_key; out_unlock_put_key: queue_unlock(&q, hb); diff --git a/kernel/kgdb.c b/kernel/kgdb.c index c7ade62..761fdd2 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c @@ -599,7 +599,7 @@ static void kgdb_wait(struct pt_regs *regs) /* Signal the primary CPU that we are done: */ atomic_set(&cpu_in_kgdb[cpu], 0); - touch_softlockup_watchdog(); + touch_softlockup_watchdog_sync(); clocksource_touch_watchdog(); local_irq_restore(flags); } @@ -1453,7 +1453,7 @@ acquirelock: (kgdb_info[cpu].task && kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { atomic_set(&kgdb_active, -1); - touch_softlockup_watchdog(); + touch_softlockup_watchdog_sync(); clocksource_touch_watchdog(); local_irq_restore(flags); @@ -1553,7 +1553,7 @@ kgdb_restore: } /* Free kgdb_active */ atomic_set(&kgdb_active, -1); - touch_softlockup_watchdog(); + touch_softlockup_watchdog_sync(); clocksource_touch_watchdog(); local_irq_restore(flags); diff --git a/kernel/softlockup.c b/kernel/softlockup.c index d225790..0d4c789 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -25,6 +25,7 @@ static DEFINE_SPINLOCK(print_lock); static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */ static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */ static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); +static DEFINE_PER_CPU(bool, softlock_touch_sync); static int __read_mostly did_panic; int __read_mostly softlockup_thresh = 60; @@ -79,6 +80,12 @@ void touch_softlockup_watchdog(void) } EXPORT_SYMBOL(touch_softlockup_watchdog); +void touch_softlockup_watchdog_sync(void) +{ + __raw_get_cpu_var(softlock_touch_sync) = true; + __raw_get_cpu_var(softlockup_touch_ts) = 0; +} + void touch_all_softlockup_watchdogs(void) { int cpu; @@ -118,6 +125,14 @@ void softlockup_tick(void) } if (touch_ts == 0) { + if (unlikely(per_cpu(softlock_touch_sync, this_cpu))) { + /* + * If the time stamp was touched atomically + * make sure the scheduler tick is up to date. + */ + per_cpu(softlock_touch_sync, this_cpu) = false; + sched_clock_tick(); + } __touch_softlockup_watchdog(); return; } diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 7faaa32..e2ab064 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -880,6 +880,7 @@ void getboottime(struct timespec *ts) set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); } +EXPORT_SYMBOL_GPL(getboottime); /** * monotonic_to_bootbased - Convert the monotonic time to boot based. @@ -889,6 +890,7 @@ void monotonic_to_bootbased(struct timespec *ts) { *ts = timespec_add_safe(*ts, total_sleep_time); } +EXPORT_SYMBOL_GPL(monotonic_to_bootbased); unsigned long get_seconds(void) { @@ -140,7 +140,8 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) id = *starting_id; restart: p = idp->top; - l = p->layer; + l = idp->layers; + pa[l--] = NULL; while (1) { /* * We run around this while until we reach the leaf node... @@ -154,8 +155,8 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) oid = id; id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; - /* did id go over the limit? */ - if (id >= (1 << (idp->layers * IDR_BITS))) { + /* if already at the top layer, we need to grow */ + if (!(p = pa[l])) { *starting_id = id; return IDR_NEED_TO_GROW; } diff --git a/mm/migrate.c b/mm/migrate.c index efddbf0..9a0db5b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -912,6 +912,9 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task, goto out_pm; err = -ENODEV; + if (node < 0 || node >= MAX_NUMNODES) + goto out_pm; + if (!node_state(node, N_HIGH_MEMORY)) goto out_pm; diff --git a/net/9p/client.c b/net/9p/client.c index 8af95b2..09d4f1e2 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -69,7 +69,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...); static int parse_opts(char *opts, struct p9_client *clnt) { - char *options; + char *options, *tmp_options; char *p; substring_t args[MAX_OPT_ARGS]; int option; @@ -81,12 +81,13 @@ static int parse_opts(char *opts, struct p9_client *clnt) if (!opts) return 0; - options = kstrdup(opts, GFP_KERNEL); - if (!options) { + tmp_options = kstrdup(opts, GFP_KERNEL); + if (!tmp_options) { P9_DPRINTK(P9_DEBUG_ERROR, "failed to allocate copy of option string\n"); return -ENOMEM; } + options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; @@ -108,6 +109,13 @@ static int parse_opts(char *opts, struct p9_client *clnt) break; case Opt_trans: clnt->trans_mod = v9fs_get_trans_by_name(&args[0]); + if(clnt->trans_mod == NULL) { + P9_DPRINTK(P9_DEBUG_ERROR, + "Could not find request transport: %s\n", + (char *) &args[0]); + ret = -EINVAL; + goto free_and_return; + } break; case Opt_legacy: clnt->dotu = 0; @@ -117,7 +125,8 @@ static int parse_opts(char *opts, struct p9_client *clnt) } } - kfree(options); +free_and_return: + kfree(tmp_options); return ret; } @@ -667,18 +676,12 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) clnt->trans = NULL; spin_lock_init(&clnt->lock); INIT_LIST_HEAD(&clnt->fidlist); - clnt->fidpool = p9_idpool_create(); - if (IS_ERR(clnt->fidpool)) { - err = PTR_ERR(clnt->fidpool); - clnt->fidpool = NULL; - goto error; - } p9_tag_init(clnt); err = parse_opts(options, clnt); if (err < 0) - goto error; + goto free_client; if (!clnt->trans_mod) clnt->trans_mod = v9fs_get_default_trans(); @@ -687,7 +690,14 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) err = -EPROTONOSUPPORT; P9_DPRINTK(P9_DEBUG_ERROR, "No transport defined or default transport\n"); - goto error; + goto free_client; + } + + clnt->fidpool = p9_idpool_create(); + if (IS_ERR(clnt->fidpool)) { + err = PTR_ERR(clnt->fidpool); + clnt->fidpool = NULL; + goto put_trans; } P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d dotu %d\n", @@ -695,19 +705,25 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) err = clnt->trans_mod->create(clnt, dev_name, options); if (err) - goto error; + goto destroy_fidpool; if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize) clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ; err = p9_client_version(clnt); if (err) - goto error; + goto close_trans; return clnt; -error: - p9_client_destroy(clnt); +close_trans: + clnt->trans_mod->close(clnt); +destroy_fidpool: + p9_idpool_destroy(clnt->fidpool); +put_trans: + v9fs_put_trans(clnt->trans_mod); +free_client: + kfree(clnt); return ERR_PTR(err); } EXPORT_SYMBOL(p9_client_create); @@ -1214,10 +1230,11 @@ static int p9_client_statsize(struct p9_wstat *wst, int optional) { int ret; + /* NOTE: size shouldn't include its own length */ /* size[2] type[2] dev[4] qid[13] */ /* mode[4] atime[4] mtime[4] length[8]*/ /* name[s] uid[s] gid[s] muid[s] */ - ret = 2+2+4+13+4+4+4+8+2+2+2+2; + ret = 2+4+13+4+4+4+8+2+2+2+2; if (wst->name) ret += strlen(wst->name); @@ -1258,7 +1275,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst) wst->name, wst->uid, wst->gid, wst->muid, wst->extension, wst->n_uid, wst->n_gid, wst->n_muid); - req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size, wst); + req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size+2, wst); if (IS_ERR(req)) { err = PTR_ERR(req); goto error; diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index be1cb90..31d0b05 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -714,7 +714,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts) char *p; substring_t args[MAX_OPT_ARGS]; int option; - char *options; + char *options, *tmp_options; int ret; opts->port = P9_PORT; @@ -724,12 +724,13 @@ static int parse_opts(char *params, struct p9_fd_opts *opts) if (!params) return 0; - options = kstrdup(params, GFP_KERNEL); - if (!options) { + tmp_options = kstrdup(params, GFP_KERNEL); + if (!tmp_options) { P9_DPRINTK(P9_DEBUG_ERROR, "failed to allocate copy of option string\n"); return -ENOMEM; } + options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; @@ -760,7 +761,8 @@ static int parse_opts(char *params, struct p9_fd_opts *opts) continue; } } - kfree(options); + + kfree(tmp_options); return 0; } diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index 65cb29d..2c95a89 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c @@ -166,7 +166,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts) char *p; substring_t args[MAX_OPT_ARGS]; int option; - char *options; + char *options, *tmp_options; int ret; opts->port = P9_PORT; @@ -177,12 +177,13 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts) if (!params) return 0; - options = kstrdup(params, GFP_KERNEL); - if (!options) { + tmp_options = kstrdup(params, GFP_KERNEL); + if (!tmp_options) { P9_DPRINTK(P9_DEBUG_ERROR, "failed to allocate copy of option string\n"); return -ENOMEM; } + options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; @@ -216,7 +217,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts) } /* RQ must be at least as large as the SQ */ opts->rq_depth = max(opts->rq_depth, opts->sq_depth); - kfree(options); + kfree(tmp_options); return 0; } diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index ea1e3da..cb50f4a 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -102,7 +102,8 @@ static void p9_virtio_close(struct p9_client *client) struct virtio_chan *chan = client->trans; mutex_lock(&virtio_9p_lock); - chan->inuse = false; + if (chan) + chan->inuse = false; mutex_unlock(&virtio_9p_lock); } @@ -311,6 +312,7 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args) } client->trans = (void *)chan; + client->status = Connected; chan->client = client; return 0; diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index b7c4224..b10e3cd 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -377,6 +377,9 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 if (acl->state == BT_CONNECTED && (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { + acl->power_save = 1; + hci_conn_enter_active_mode(acl); + if (lmp_esco_capable(hdev)) hci_setup_sync(sco, acl->handle); else diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 28517ba..592da5c 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1699,6 +1699,7 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu break; case 0x1c: /* SCO interval rejected */ + case 0x1a: /* Unsupported Remote Feature */ case 0x1f: /* Unspecified error */ if (conn->out && conn->attempt < 2) { conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 6cf526d..fc6ec1e 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -703,29 +703,9 @@ static void hidp_close(struct hid_device *hid) static int hidp_parse(struct hid_device *hid) { struct hidp_session *session = hid->driver_data; - struct hidp_connadd_req *req = session->req; - unsigned char *buf; - int ret; - - buf = kmalloc(req->rd_size, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - if (copy_from_user(buf, req->rd_data, req->rd_size)) { - kfree(buf); - return -EFAULT; - } - - ret = hid_parse_report(session->hid, buf, req->rd_size); - - kfree(buf); - - if (ret) - return ret; - - session->req = NULL; - return 0; + return hid_parse_report(session->hid, session->rd_data, + session->rd_size); } static int hidp_start(struct hid_device *hid) @@ -770,12 +750,24 @@ static int hidp_setup_hid(struct hidp_session *session, bdaddr_t src, dst; int err; + session->rd_data = kzalloc(req->rd_size, GFP_KERNEL); + if (!session->rd_data) + return -ENOMEM; + + if (copy_from_user(session->rd_data, req->rd_data, req->rd_size)) { + err = -EFAULT; + goto fault; + } + session->rd_size = req->rd_size; + hid = hid_allocate_device(); - if (IS_ERR(hid)) - return PTR_ERR(hid); + if (IS_ERR(hid)) { + err = PTR_ERR(hid); + goto fault; + } session->hid = hid; - session->req = req; + hid->driver_data = session; baswap(&src, &bt_sk(session->ctrl_sock->sk)->src); @@ -806,6 +798,10 @@ failed: hid_destroy_device(hid); session->hid = NULL; +fault: + kfree(session->rd_data); + session->rd_data = NULL; + return err; } @@ -900,6 +896,9 @@ unlink: session->hid = NULL; } + kfree(session->rd_data); + session->rd_data = NULL; + purge: skb_queue_purge(&session->ctrl_transmit); skb_queue_purge(&session->intr_transmit); diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h index faf3d74..a4e215d 100644 --- a/net/bluetooth/hidp/hidp.h +++ b/net/bluetooth/hidp/hidp.h @@ -154,7 +154,9 @@ struct hidp_session { struct sk_buff_head ctrl_transmit; struct sk_buff_head intr_transmit; - struct hidp_connadd_req *req; + /* Report descriptor */ + __u8 *rd_data; + uint rd_size; }; static inline void hidp_schedule(struct hidp_session *session) diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index fc5ee32..89f4a59 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -252,7 +252,6 @@ static void rfcomm_session_timeout(unsigned long arg) BT_DBG("session %p state %ld", s, s->state); set_bit(RFCOMM_TIMED_OUT, &s->flags); - rfcomm_session_put(s); rfcomm_schedule(RFCOMM_SCHED_TIMEO); } @@ -1151,7 +1150,11 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci) break; case BT_DISCONN: - rfcomm_session_put(s); + /* When socket is closed and we are not RFCOMM + * initiator rfcomm_process_rx already calls + * rfcomm_session_put() */ + if (s->sock->sk->sk_state != BT_CLOSED) + rfcomm_session_put(s); break; } } @@ -1920,6 +1923,7 @@ static inline void rfcomm_process_sessions(void) if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { s->state = BT_DISCONN; rfcomm_send_disc(s, 0); + rfcomm_session_put(s); continue; } diff --git a/net/core/dst.c b/net/core/dst.c index 57bc4d5..cb1b348 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -17,6 +17,7 @@ #include <linux/string.h> #include <linux/types.h> #include <net/net_namespace.h> +#include <linux/sched.h> #include <net/dst.h> @@ -79,6 +80,7 @@ loop: while ((dst = next) != NULL) { next = dst->next; prefetch(&next->next); + cond_resched(); if (likely(atomic_read(&dst->__refcnt))) { last->next = dst; last = dst; diff --git a/net/core/pktgen.c b/net/core/pktgen.c index de0c2c7..2e692af 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3524,6 +3524,7 @@ static int pktgen_thread_worker(void *arg) wait_event_interruptible_timeout(t->queue, t->control != 0, HZ/10); + try_to_freeze(); continue; } diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c index 57dfb9c..ff16e9d 100644 --- a/net/dccp/ccid.c +++ b/net/dccp/ccid.c @@ -83,7 +83,7 @@ static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_f va_list args; va_start(args, fmt); - vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args); + vsnprintf(slab_name_fmt, CCID_SLAB_NAME_LENGTH, fmt, args); va_end(args); slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index 269958b..6df6f8a 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h @@ -19,7 +19,9 @@ #include <linux/list.h> #include <linux/module.h> -#define CCID_MAX 255 +/* maximum value for a CCID (RFC 4340, 19.5) */ +#define CCID_MAX 255 +#define CCID_SLAB_NAME_LENGTH 32 struct tcp_info; @@ -49,8 +51,8 @@ struct ccid_operations { const char *ccid_name; struct kmem_cache *ccid_hc_rx_slab, *ccid_hc_tx_slab; - char ccid_hc_rx_slab_name[32]; - char ccid_hc_tx_slab_name[32]; + char ccid_hc_rx_slab_name[CCID_SLAB_NAME_LENGTH]; + char ccid_hc_tx_slab_name[CCID_SLAB_NAME_LENGTH]; __u32 ccid_hc_rx_obj_size, ccid_hc_tx_obj_size; /* Interface Routines */ diff --git a/net/dccp/probe.c b/net/dccp/probe.c index bace1d8..f5b3464 100644 --- a/net/dccp/probe.c +++ b/net/dccp/probe.c @@ -161,8 +161,8 @@ static __init int dccpprobe_init(void) if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) goto err0; - ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0), - "dccp"); + try_then_request_module((ret = register_jprobe(&dccp_send_probe)) == 0, + "dccp"); if (ret) goto err1; diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 0663276..90203e1 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -925,10 +925,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) if (t && !IS_ERR(t)) { struct arpt_getinfo info; const struct xt_table_info *private = t->private; - #ifdef CONFIG_COMPAT + struct xt_table_info tmp; + if (compat) { - struct xt_table_info tmp; ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(NFPROTO_ARP); private = &tmp; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 572330a..3ce53cf 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1132,10 +1132,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) if (t && !IS_ERR(t)) { struct ipt_getinfo info; const struct xt_table_info *private = t->private; - #ifdef CONFIG_COMPAT + struct xt_table_info tmp; + if (compat) { - struct xt_table_info tmp; ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(AF_INET); private = &tmp; diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index d171b12..d1ea38a 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -210,7 +210,7 @@ static ctl_table ip_ct_sysctl_table[] = { }, { .procname = "ip_conntrack_buckets", - .data = &nf_conntrack_htable_size, + .data = &init_net.ct.htable_size, .maxlen = sizeof(unsigned int), .mode = 0444, .proc_handler = proc_dointvec, diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index 8668a3d..2fb7b76 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c @@ -32,7 +32,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) struct hlist_nulls_node *n; for (st->bucket = 0; - st->bucket < nf_conntrack_htable_size; + st->bucket < net->ct.htable_size; st->bucket++) { n = rcu_dereference(net->ct.hash[st->bucket].first); if (!is_a_nulls(n)) @@ -50,7 +50,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq, head = rcu_dereference(head->next); while (is_a_nulls(head)) { if (likely(get_nulls_value(head) == st->bucket)) { - if (++st->bucket >= nf_conntrack_htable_size) + if (++st->bucket >= net->ct.htable_size) return NULL; } head = rcu_dereference(net->ct.hash[st->bucket].first); diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index fe1a644..26066a2 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c @@ -35,9 +35,6 @@ static DEFINE_SPINLOCK(nf_nat_lock); static struct nf_conntrack_l3proto *l3proto __read_mostly; -/* Calculated at init based on memory size */ -static unsigned int nf_nat_htable_size __read_mostly; - #define MAX_IP_NAT_PROTO 256 static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO] __read_mostly; @@ -72,7 +69,7 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put); /* We keep an extra hash for each conntrack, for fast searching. */ static inline unsigned int -hash_by_src(const struct nf_conntrack_tuple *tuple) +hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple) { unsigned int hash; @@ -80,7 +77,7 @@ hash_by_src(const struct nf_conntrack_tuple *tuple) hash = jhash_3words((__force u32)tuple->src.u3.ip, (__force u32)tuple->src.u.all, tuple->dst.protonum, 0); - return ((u64)hash * nf_nat_htable_size) >> 32; + return ((u64)hash * net->ipv4.nat_htable_size) >> 32; } /* Is this tuple already taken? (not by us) */ @@ -147,7 +144,7 @@ find_appropriate_src(struct net *net, struct nf_conntrack_tuple *result, const struct nf_nat_range *range) { - unsigned int h = hash_by_src(tuple); + unsigned int h = hash_by_src(net, tuple); const struct nf_conn_nat *nat; const struct nf_conn *ct; const struct hlist_node *n; @@ -330,7 +327,7 @@ nf_nat_setup_info(struct nf_conn *ct, if (have_to_hash) { unsigned int srchash; - srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); + srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); spin_lock_bh(&nf_nat_lock); /* nf_conntrack_alter_reply might re-allocate exntension aera */ nat = nfct_nat(ct); @@ -679,8 +676,10 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct, static int __net_init nf_nat_net_init(struct net *net) { - net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, - &net->ipv4.nat_vmalloced, 0); + /* Leave them the same for the moment. */ + net->ipv4.nat_htable_size = net->ct.htable_size; + net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, + &net->ipv4.nat_vmalloced, 0); if (!net->ipv4.nat_bysource) return -ENOMEM; return 0; @@ -703,7 +702,7 @@ static void __net_exit nf_nat_net_exit(struct net *net) nf_ct_iterate_cleanup(net, &clean_nat, NULL); synchronize_rcu(); nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced, - nf_nat_htable_size); + net->ipv4.nat_htable_size); } static struct pernet_operations nf_nat_net_ops = { @@ -724,9 +723,6 @@ static int __init nf_nat_init(void) return ret; } - /* Leave them the same for the moment. */ - nf_nat_htable_size = nf_conntrack_htable_size; - ret = register_pernet_subsys(&nf_nat_net_ops); if (ret < 0) goto cleanup_extend; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 480d7f8..8a7e0f5 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1164,10 +1164,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) if (t && !IS_ERR(t)) { struct ip6t_getinfo info; const struct xt_table_info *private = t->private; - #ifdef CONFIG_COMPAT + struct xt_table_info tmp; + if (compat) { - struct xt_table_info tmp; ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(AF_INET6); private = &tmp; diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c index 156020d..6b3602d 100644 --- a/net/irda/irnet/irnet_ppp.c +++ b/net/irda/irnet/irnet_ppp.c @@ -698,15 +698,18 @@ dev_irnet_ioctl( /* Query PPP channel and unit number */ case PPPIOCGCHAN: + lock_kernel(); if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan), (int __user *)argp)) err = 0; + unlock_kernel(); break; case PPPIOCGUNIT: lock_kernel(); if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan), (int __user *)argp)) - err = 0; + err = 0; + unlock_kernel(); break; /* All these ioctls can be passed both directly and from ppp_generic, diff --git a/net/key/af_key.c b/net/key/af_key.c index 76fa6fe..539f43b 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3794,9 +3794,9 @@ static struct pernet_operations pfkey_net_ops = { static void __exit ipsec_pfkey_exit(void) { - unregister_pernet_subsys(&pfkey_net_ops); xfrm_unregister_km(&pfkeyv2_mgr); sock_unregister(PF_KEY); + unregister_pernet_subsys(&pfkey_net_ops); proto_unregister(&key_proto); } @@ -3807,21 +3807,22 @@ static int __init ipsec_pfkey_init(void) if (err != 0) goto out; - err = sock_register(&pfkey_family_ops); + err = register_pernet_subsys(&pfkey_net_ops); if (err != 0) goto out_unregister_key_proto; + err = sock_register(&pfkey_family_ops); + if (err != 0) + goto out_unregister_pernet; err = xfrm_register_km(&pfkeyv2_mgr); if (err != 0) goto out_sock_unregister; - err = register_pernet_subsys(&pfkey_net_ops); - if (err != 0) - goto out_xfrm_unregister_km; out: return err; -out_xfrm_unregister_km: - xfrm_unregister_km(&pfkeyv2_mgr); + out_sock_unregister: sock_unregister(PF_KEY); +out_unregister_pernet: + unregister_pernet_subsys(&pfkey_net_ops); out_unregister_key_proto: proto_unregister(&key_proto); goto out; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 0e98c32..4d79e3c 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -30,6 +30,7 @@ #include <linux/netdevice.h> #include <linux/socket.h> #include <linux/mm.h> +#include <linux/nsproxy.h> #include <linux/rculist_nulls.h> #include <net/netfilter/nf_conntrack.h> @@ -63,8 +64,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max); struct nf_conn nf_conntrack_untracked __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_untracked); -static struct kmem_cache *nf_conntrack_cachep __read_mostly; - static int nf_conntrack_hash_rnd_initted; static unsigned int nf_conntrack_hash_rnd; @@ -86,9 +85,10 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, return ((u64)h * size) >> 32; } -static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) +static inline u_int32_t hash_conntrack(const struct net *net, + const struct nf_conntrack_tuple *tuple) { - return __hash_conntrack(tuple, nf_conntrack_htable_size, + return __hash_conntrack(tuple, net->ct.htable_size, nf_conntrack_hash_rnd); } @@ -296,7 +296,7 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple) { struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; - unsigned int hash = hash_conntrack(tuple); + unsigned int hash = hash_conntrack(net, tuple); /* Disable BHs the entire time since we normally need to disable them * at least once for the stats anyway. @@ -366,10 +366,11 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct, void nf_conntrack_hash_insert(struct nf_conn *ct) { + struct net *net = nf_ct_net(ct); unsigned int hash, repl_hash; - hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); - repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); + hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); + repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); __nf_conntrack_hash_insert(ct, hash, repl_hash); } @@ -397,8 +398,8 @@ __nf_conntrack_confirm(struct sk_buff *skb) if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) return NF_ACCEPT; - hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); - repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); + hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); + repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); /* We're not in hash table, and we refuse to set up related connections for unconfirmed conns. But packet copies and @@ -468,7 +469,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, struct net *net = nf_ct_net(ignored_conntrack); struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; - unsigned int hash = hash_conntrack(tuple); + unsigned int hash = hash_conntrack(net, tuple); /* Disable BHs the entire time since we need to disable them at * least once for the stats anyway. @@ -503,7 +504,7 @@ static noinline int early_drop(struct net *net, unsigned int hash) int dropped = 0; rcu_read_lock(); - for (i = 0; i < nf_conntrack_htable_size; i++) { + for (i = 0; i < net->ct.htable_size; i++) { hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { tmp = nf_ct_tuplehash_to_ctrack(h); @@ -523,7 +524,7 @@ static noinline int early_drop(struct net *net, unsigned int hash) if (cnt >= NF_CT_EVICTION_RANGE) break; - hash = (hash + 1) % nf_conntrack_htable_size; + hash = (hash + 1) % net->ct.htable_size; } rcu_read_unlock(); @@ -557,7 +558,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, if (nf_conntrack_max && unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { - unsigned int hash = hash_conntrack(orig); + unsigned int hash = hash_conntrack(net, orig); if (!early_drop(net, hash)) { atomic_dec(&net->ct.count); if (net_ratelimit()) @@ -572,7 +573,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, * Do not use kmem_cache_zalloc(), as this cache uses * SLAB_DESTROY_BY_RCU. */ - ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); + ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp); if (ct == NULL) { pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); atomic_dec(&net->ct.count); @@ -611,7 +612,7 @@ void nf_conntrack_free(struct nf_conn *ct) nf_ct_ext_destroy(ct); atomic_dec(&net->ct.count); nf_ct_ext_free(ct); - kmem_cache_free(nf_conntrack_cachep, ct); + kmem_cache_free(net->ct.nf_conntrack_cachep, ct); } EXPORT_SYMBOL_GPL(nf_conntrack_free); @@ -1014,7 +1015,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), struct hlist_nulls_node *n; spin_lock_bh(&nf_conntrack_lock); - for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { + for (; *bucket < net->ct.htable_size; (*bucket)++) { hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { ct = nf_ct_tuplehash_to_ctrack(h); if (iter(ct, data)) @@ -1113,9 +1114,12 @@ static void nf_ct_release_dying_list(struct net *net) static void nf_conntrack_cleanup_init_net(void) { + /* wait until all references to nf_conntrack_untracked are dropped */ + while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) + schedule(); + nf_conntrack_helper_fini(); nf_conntrack_proto_fini(); - kmem_cache_destroy(nf_conntrack_cachep); } static void nf_conntrack_cleanup_net(struct net *net) @@ -1127,15 +1131,14 @@ static void nf_conntrack_cleanup_net(struct net *net) schedule(); goto i_see_dead_people; } - /* wait until all references to nf_conntrack_untracked are dropped */ - while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) - schedule(); nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, - nf_conntrack_htable_size); + net->ct.htable_size); nf_conntrack_ecache_fini(net); nf_conntrack_acct_fini(net); nf_conntrack_expect_fini(net); + kmem_cache_destroy(net->ct.nf_conntrack_cachep); + kfree(net->ct.slabname); free_percpu(net->ct.stat); } @@ -1190,10 +1193,12 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) { int i, bucket, vmalloced, old_vmalloced; unsigned int hashsize, old_size; - int rnd; struct hlist_nulls_head *hash, *old_hash; struct nf_conntrack_tuple_hash *h; + if (current->nsproxy->net_ns != &init_net) + return -EOPNOTSUPP; + /* On boot, we can set this without any fancy locking. */ if (!nf_conntrack_htable_size) return param_set_uint(val, kp); @@ -1206,33 +1211,29 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) if (!hash) return -ENOMEM; - /* We have to rehahs for the new table anyway, so we also can - * use a newrandom seed */ - get_random_bytes(&rnd, sizeof(rnd)); - /* Lookups in the old hash might happen in parallel, which means we * might get false negatives during connection lookup. New connections * created because of a false negative won't make it into the hash * though since that required taking the lock. */ spin_lock_bh(&nf_conntrack_lock); - for (i = 0; i < nf_conntrack_htable_size; i++) { + for (i = 0; i < init_net.ct.htable_size; i++) { while (!hlist_nulls_empty(&init_net.ct.hash[i])) { h = hlist_nulls_entry(init_net.ct.hash[i].first, struct nf_conntrack_tuple_hash, hnnode); hlist_nulls_del_rcu(&h->hnnode); - bucket = __hash_conntrack(&h->tuple, hashsize, rnd); + bucket = __hash_conntrack(&h->tuple, hashsize, + nf_conntrack_hash_rnd); hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); } } - old_size = nf_conntrack_htable_size; + old_size = init_net.ct.htable_size; old_vmalloced = init_net.ct.hash_vmalloc; old_hash = init_net.ct.hash; - nf_conntrack_htable_size = hashsize; + init_net.ct.htable_size = nf_conntrack_htable_size = hashsize; init_net.ct.hash_vmalloc = vmalloced; init_net.ct.hash = hash; - nf_conntrack_hash_rnd = rnd; spin_unlock_bh(&nf_conntrack_lock); nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); @@ -1271,15 +1272,6 @@ static int nf_conntrack_init_init_net(void) NF_CONNTRACK_VERSION, nf_conntrack_htable_size, nf_conntrack_max); - nf_conntrack_cachep = kmem_cache_create("nf_conntrack", - sizeof(struct nf_conn), - 0, SLAB_DESTROY_BY_RCU, NULL); - if (!nf_conntrack_cachep) { - printk(KERN_ERR "Unable to create nf_conn slab cache\n"); - ret = -ENOMEM; - goto err_cache; - } - ret = nf_conntrack_proto_init(); if (ret < 0) goto err_proto; @@ -1288,13 +1280,19 @@ static int nf_conntrack_init_init_net(void) if (ret < 0) goto err_helper; + /* Set up fake conntrack: to never be deleted, not in any hashes */ +#ifdef CONFIG_NET_NS + nf_conntrack_untracked.ct_net = &init_net; +#endif + atomic_set(&nf_conntrack_untracked.ct_general.use, 1); + /* - and look it like as a confirmed connection */ + set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); + return 0; err_helper: nf_conntrack_proto_fini(); err_proto: - kmem_cache_destroy(nf_conntrack_cachep); -err_cache: return ret; } @@ -1316,7 +1314,24 @@ static int nf_conntrack_init_net(struct net *net) ret = -ENOMEM; goto err_stat; } - net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, + + net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); + if (!net->ct.slabname) { + ret = -ENOMEM; + goto err_slabname; + } + + net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname, + sizeof(struct nf_conn), 0, + SLAB_DESTROY_BY_RCU, NULL); + if (!net->ct.nf_conntrack_cachep) { + printk(KERN_ERR "Unable to create nf_conn slab cache\n"); + ret = -ENOMEM; + goto err_cache; + } + + net->ct.htable_size = nf_conntrack_htable_size; + net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, &net->ct.hash_vmalloc, 1); if (!net->ct.hash) { ret = -ENOMEM; @@ -1333,15 +1348,6 @@ static int nf_conntrack_init_net(struct net *net) if (ret < 0) goto err_ecache; - /* Set up fake conntrack: - - to never be deleted, not in any hashes */ -#ifdef CONFIG_NET_NS - nf_conntrack_untracked.ct_net = &init_net; -#endif - atomic_set(&nf_conntrack_untracked.ct_general.use, 1); - /* - and look it like as a confirmed connection */ - set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); - return 0; err_ecache: @@ -1350,8 +1356,12 @@ err_acct: nf_conntrack_expect_fini(net); err_expect: nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, - nf_conntrack_htable_size); + net->ct.htable_size); err_hash: + kmem_cache_destroy(net->ct.nf_conntrack_cachep); +err_cache: + kfree(net->ct.slabname); +err_slabname: free_percpu(net->ct.stat); err_stat: return ret; diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index fdf5d2a..2f25ff6 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -569,7 +569,7 @@ static void exp_proc_remove(struct net *net) #endif /* CONFIG_PROC_FS */ } -module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600); +module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400); int nf_conntrack_expect_init(struct net *net) { @@ -577,7 +577,7 @@ int nf_conntrack_expect_init(struct net *net) if (net_eq(net, &init_net)) { if (!nf_ct_expect_hsize) { - nf_ct_expect_hsize = nf_conntrack_htable_size / 256; + nf_ct_expect_hsize = net->ct.htable_size / 256; if (!nf_ct_expect_hsize) nf_ct_expect_hsize = 1; } diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 65c2a7b..4b1a56b 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -192,7 +192,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me, /* Get rid of expecteds, set helpers to NULL. */ hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode) unhelp(h, me); - for (i = 0; i < nf_conntrack_htable_size; i++) { + for (i = 0; i < net->ct.htable_size; i++) { hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) unhelp(h, me); } diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 42f21c0..0ffe689 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -594,7 +594,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock(); last = (struct nf_conn *)cb->args[1]; - for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { + for (; cb->args[0] < init_net.ct.htable_size; cb->args[0]++) { restart: hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]], hnnode) { diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 028aba6..e310f15 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -51,7 +51,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) struct hlist_nulls_node *n; for (st->bucket = 0; - st->bucket < nf_conntrack_htable_size; + st->bucket < net->ct.htable_size; st->bucket++) { n = rcu_dereference(net->ct.hash[st->bucket].first); if (!is_a_nulls(n)) @@ -69,7 +69,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq, head = rcu_dereference(head->next); while (is_a_nulls(head)) { if (likely(get_nulls_value(head) == st->bucket)) { - if (++st->bucket >= nf_conntrack_htable_size) + if (++st->bucket >= net->ct.htable_size) return NULL; } head = rcu_dereference(net->ct.hash[st->bucket].first); @@ -355,7 +355,7 @@ static ctl_table nf_ct_sysctl_table[] = { }, { .procname = "nf_conntrack_buckets", - .data = &nf_conntrack_htable_size, + .data = &init_net.ct.htable_size, .maxlen = sizeof(unsigned int), .mode = 0444, .proc_handler = proc_dointvec, @@ -421,6 +421,7 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net) goto out_kmemdup; table[1].data = &net->ct.count; + table[2].data = &net->ct.htable_size; table[3].data = &net->ct.sysctl_checksum; table[4].data = &net->ct.sysctl_log_invalid; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index a4957bf..4c5972b 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -455,9 +455,14 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol, if (nl_table[protocol].registered && try_module_get(nl_table[protocol].module)) module = nl_table[protocol].module; + else + err = -EPROTONOSUPPORT; cb_mutex = nl_table[protocol].cb_mutex; netlink_unlock_table(); + if (err < 0) + goto out; + err = __netlink_create(net, sock, cb_mutex, protocol); if (err < 0) goto out_module; diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 929218a4..21f9c76 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -433,7 +433,7 @@ config NET_ACT_POLICE module. To compile this code as a module, choose M here: the - module will be called police. + module will be called act_police. config NET_ACT_GACT tristate "Generic actions" @@ -443,7 +443,7 @@ config NET_ACT_GACT accepting packets. To compile this code as a module, choose M here: the - module will be called gact. + module will be called act_gact. config GACT_PROB bool "Probability support" @@ -459,7 +459,7 @@ config NET_ACT_MIRRED other devices. To compile this code as a module, choose M here: the - module will be called mirred. + module will be called act_mirred. config NET_ACT_IPT tristate "IPtables targets" @@ -469,7 +469,7 @@ config NET_ACT_IPT classification. To compile this code as a module, choose M here: the - module will be called ipt. + module will be called act_ipt. config NET_ACT_NAT tristate "Stateless NAT" @@ -479,7 +479,7 @@ config NET_ACT_NAT netfilter for NAT unless you know what you are doing. To compile this code as a module, choose M here: the - module will be called nat. + module will be called act_nat. config NET_ACT_PEDIT tristate "Packet Editing" @@ -488,7 +488,7 @@ config NET_ACT_PEDIT Say Y here if you want to mangle the content of packets. To compile this code as a module, choose M here: the - module will be called pedit. + module will be called act_pedit. config NET_ACT_SIMP tristate "Simple Example (Debug)" @@ -502,7 +502,7 @@ config NET_ACT_SIMP If unsure, say N. To compile this code as a module, choose M here: the - module will be called simple. + module will be called act_simple. config NET_ACT_SKBEDIT tristate "SKB Editing" @@ -513,7 +513,7 @@ config NET_ACT_SKBEDIT If unsure, say N. To compile this code as a module, choose M here: the - module will be called skbedit. + module will be called act_skbedit. config NET_CLS_IND bool "Incoming device classification" diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index c41afe6..47fb65d 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h @@ -65,7 +65,6 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode, const char *cause, int result, int info); /* Internal IMA function definitions */ -void ima_iintcache_init(void); int ima_init(void); void ima_cleanup(void); int ima_fs_init(void); @@ -131,7 +130,7 @@ void iint_free(struct kref *kref); void iint_rcu_free(struct rcu_head *rcu); /* IMA policy related functions */ -enum ima_hooks { PATH_CHECK = 1, FILE_MMAP, BPRM_CHECK }; +enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK }; int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask); void ima_init_policy(void); diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c index 3cd58b6..2a5e0bc 100644 --- a/security/integrity/ima/ima_api.c +++ b/security/integrity/ima/ima_api.c @@ -95,12 +95,12 @@ err_out: * ima_must_measure - measure decision based on policy. * @inode: pointer to inode to measure * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE) - * @function: calling function (PATH_CHECK, BPRM_CHECK, FILE_MMAP) + * @function: calling function (FILE_CHECK, BPRM_CHECK, FILE_MMAP) * * The policy is defined in terms of keypairs: * subj=, obj=, type=, func=, mask=, fsmagic= * subj,obj, and type: are LSM specific. - * func: PATH_CHECK | BPRM_CHECK | FILE_MMAP + * func: FILE_CHECK | BPRM_CHECK | FILE_MMAP * mask: contains the permission mask * fsmagic: hex value * diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c index fa592ff..0d83edc 100644 --- a/security/integrity/ima/ima_iint.c +++ b/security/integrity/ima/ima_iint.c @@ -52,9 +52,6 @@ int ima_inode_alloc(struct inode *inode) struct ima_iint_cache *iint = NULL; int rc = 0; - if (!ima_initialized) - return 0; - iint = kmem_cache_alloc(iint_cache, GFP_NOFS); if (!iint) return -ENOMEM; @@ -118,8 +115,6 @@ void ima_inode_free(struct inode *inode) { struct ima_iint_cache *iint; - if (!ima_initialized) - return; spin_lock(&ima_iint_lock); iint = radix_tree_delete(&ima_iint_store, (unsigned long)inode); spin_unlock(&ima_iint_lock); @@ -141,9 +136,11 @@ static void init_once(void *foo) kref_set(&iint->refcount, 1); } -void __init ima_iintcache_init(void) +static int __init ima_iintcache_init(void) { iint_cache = kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0, SLAB_PANIC, init_once); + return 0; } +security_initcall(ima_iintcache_init); diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index a89f44d..294b005 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -14,7 +14,7 @@ * * File: ima_main.c * implements the IMA hooks: ima_bprm_check, ima_file_mmap, - * and ima_path_check. + * and ima_file_check. */ #include <linux/module.h> #include <linux/file.h> @@ -84,6 +84,36 @@ out: return found; } +/* ima_read_write_check - reflect possible reading/writing errors in the PCR. + * + * When opening a file for read, if the file is already open for write, + * the file could change, resulting in a file measurement error. + * + * Opening a file for write, if the file is already open for read, results + * in a time of measure, time of use (ToMToU) error. + * + * In either case invalidate the PCR. + */ +enum iint_pcr_error { TOMTOU, OPEN_WRITERS }; +static void ima_read_write_check(enum iint_pcr_error error, + struct ima_iint_cache *iint, + struct inode *inode, + const unsigned char *filename) +{ + switch (error) { + case TOMTOU: + if (iint->readcount > 0) + ima_add_violation(inode, filename, "invalid_pcr", + "ToMToU"); + break; + case OPEN_WRITERS: + if (iint->writecount > 0) + ima_add_violation(inode, filename, "invalid_pcr", + "open_writers"); + break; + } +} + /* * Update the counts given an fmode_t */ @@ -99,6 +129,47 @@ static void ima_inc_counts(struct ima_iint_cache *iint, fmode_t mode) } /* + * ima_counts_get - increment file counts + * + * Maintain read/write counters for all files, but only + * invalidate the PCR for measured files: + * - Opening a file for write when already open for read, + * results in a time of measure, time of use (ToMToU) error. + * - Opening a file for read when already open for write, + * could result in a file measurement error. + * + */ +void ima_counts_get(struct file *file) +{ + struct dentry *dentry = file->f_path.dentry; + struct inode *inode = dentry->d_inode; + fmode_t mode = file->f_mode; + struct ima_iint_cache *iint; + int rc; + + if (!ima_initialized || !S_ISREG(inode->i_mode)) + return; + iint = ima_iint_find_get(inode); + if (!iint) + return; + mutex_lock(&iint->mutex); + rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK); + if (rc < 0) + goto out; + + if (mode & FMODE_WRITE) { + ima_read_write_check(TOMTOU, iint, inode, dentry->d_name.name); + goto out; + } + ima_read_write_check(OPEN_WRITERS, iint, inode, dentry->d_name.name); +out: + ima_inc_counts(iint, file->f_mode); + mutex_unlock(&iint->mutex); + + kref_put(&iint->refcount, iint_free); +} + +/* * Decrement ima counts */ static void ima_dec_counts(struct ima_iint_cache *iint, struct inode *inode, @@ -153,123 +224,6 @@ void ima_file_free(struct file *file) kref_put(&iint->refcount, iint_free); } -/* ima_read_write_check - reflect possible reading/writing errors in the PCR. - * - * When opening a file for read, if the file is already open for write, - * the file could change, resulting in a file measurement error. - * - * Opening a file for write, if the file is already open for read, results - * in a time of measure, time of use (ToMToU) error. - * - * In either case invalidate the PCR. - */ -enum iint_pcr_error { TOMTOU, OPEN_WRITERS }; -static void ima_read_write_check(enum iint_pcr_error error, - struct ima_iint_cache *iint, - struct inode *inode, - const unsigned char *filename) -{ - switch (error) { - case TOMTOU: - if (iint->readcount > 0) - ima_add_violation(inode, filename, "invalid_pcr", - "ToMToU"); - break; - case OPEN_WRITERS: - if (iint->writecount > 0) - ima_add_violation(inode, filename, "invalid_pcr", - "open_writers"); - break; - } -} - -static int get_path_measurement(struct ima_iint_cache *iint, struct file *file, - const unsigned char *filename) -{ - int rc = 0; - - ima_inc_counts(iint, file->f_mode); - - rc = ima_collect_measurement(iint, file); - if (!rc) - ima_store_measurement(iint, file, filename); - return rc; -} - -/** - * ima_path_check - based on policy, collect/store measurement. - * @path: contains a pointer to the path to be measured - * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE - * - * Measure the file being open for readonly, based on the - * ima_must_measure() policy decision. - * - * Keep read/write counters for all files, but only - * invalidate the PCR for measured files: - * - Opening a file for write when already open for read, - * results in a time of measure, time of use (ToMToU) error. - * - Opening a file for read when already open for write, - * could result in a file measurement error. - * - * Always return 0 and audit dentry_open failures. - * (Return code will be based upon measurement appraisal.) - */ -int ima_path_check(struct path *path, int mask) -{ - struct inode *inode = path->dentry->d_inode; - struct ima_iint_cache *iint; - struct file *file = NULL; - int rc; - - if (!ima_initialized || !S_ISREG(inode->i_mode)) - return 0; - iint = ima_iint_find_get(inode); - if (!iint) - return 0; - - mutex_lock(&iint->mutex); - - rc = ima_must_measure(iint, inode, MAY_READ, PATH_CHECK); - if (rc < 0) - goto out; - - if ((mask & MAY_WRITE) || (mask == 0)) - ima_read_write_check(TOMTOU, iint, inode, - path->dentry->d_name.name); - - if ((mask & (MAY_WRITE | MAY_READ | MAY_EXEC)) != MAY_READ) - goto out; - - ima_read_write_check(OPEN_WRITERS, iint, inode, - path->dentry->d_name.name); - if (!(iint->flags & IMA_MEASURED)) { - struct dentry *dentry = dget(path->dentry); - struct vfsmount *mnt = mntget(path->mnt); - - file = dentry_open(dentry, mnt, O_RDONLY | O_LARGEFILE, - current_cred()); - if (IS_ERR(file)) { - int audit_info = 0; - - integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, - dentry->d_name.name, - "add_measurement", - "dentry_open failed", - 1, audit_info); - file = NULL; - goto out; - } - rc = get_path_measurement(iint, file, dentry->d_name.name); - } -out: - mutex_unlock(&iint->mutex); - if (file) - fput(file); - kref_put(&iint->refcount, iint_free); - return 0; -} -EXPORT_SYMBOL_GPL(ima_path_check); - static int process_measurement(struct file *file, const unsigned char *filename, int mask, int function) { @@ -297,33 +251,6 @@ out: return rc; } -/* - * ima_counts_get - increment file counts - * - * - for IPC shm and shmat file. - * - for nfsd exported files. - * - * Increment the counts for these files to prevent unnecessary - * imbalance messages. - */ -void ima_counts_get(struct file *file) -{ - struct inode *inode = file->f_dentry->d_inode; - struct ima_iint_cache *iint; - - if (!ima_initialized || !S_ISREG(inode->i_mode)) - return; - iint = ima_iint_find_get(inode); - if (!iint) - return; - mutex_lock(&iint->mutex); - ima_inc_counts(iint, file->f_mode); - mutex_unlock(&iint->mutex); - - kref_put(&iint->refcount, iint_free); -} -EXPORT_SYMBOL_GPL(ima_counts_get); - /** * ima_file_mmap - based on policy, collect/store measurement. * @file: pointer to the file to be measured (May be NULL) @@ -369,11 +296,31 @@ int ima_bprm_check(struct linux_binprm *bprm) return 0; } +/** + * ima_path_check - based on policy, collect/store measurement. + * @file: pointer to the file to be measured + * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE + * + * Measure files based on the ima_must_measure() policy decision. + * + * Always return 0 and audit dentry_open failures. + * (Return code will be based upon measurement appraisal.) + */ +int ima_file_check(struct file *file, int mask) +{ + int rc; + + rc = process_measurement(file, file->f_dentry->d_name.name, + mask & (MAY_READ | MAY_WRITE | MAY_EXEC), + FILE_CHECK); + return 0; +} +EXPORT_SYMBOL_GPL(ima_file_check); + static int __init init_ima(void) { int error; - ima_iintcache_init(); error = ima_init(); ima_initialized = 1; return error; diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index e127839..4759d0f 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -67,7 +67,7 @@ static struct ima_measure_rule_entry default_rules[] = { .flags = IMA_FUNC | IMA_MASK}, {.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC, .flags = IMA_FUNC | IMA_MASK}, - {.action = MEASURE,.func = PATH_CHECK,.mask = MAY_READ,.uid = 0, + {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = 0, .flags = IMA_FUNC | IMA_MASK | IMA_UID}, }; @@ -282,8 +282,11 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry) break; case Opt_func: audit_log_format(ab, "func=%s ", args[0].from); - if (strcmp(args[0].from, "PATH_CHECK") == 0) - entry->func = PATH_CHECK; + if (strcmp(args[0].from, "FILE_CHECK") == 0) + entry->func = FILE_CHECK; + /* PATH_CHECK is for backwards compat */ + else if (strcmp(args[0].from, "PATH_CHECK") == 0) + entry->func = FILE_CHECK; else if (strcmp(args[0].from, "FILE_MMAP") == 0) entry->func = FILE_MMAP; else if (strcmp(args[0].from, "BPRM_CHECK") == 0) diff --git a/security/security.c b/security/security.c index 24e060b..122b748 100644 --- a/security/security.c +++ b/security/security.c @@ -666,8 +666,6 @@ int security_file_alloc(struct file *file) void security_file_free(struct file *file) { security_ops->file_free_security(file); - if (file->f_dentry) - ima_file_free(file); } int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c index cb65bd0..459c1f6 100644 --- a/sound/pci/ctxfi/ctatc.c +++ b/sound/pci/ctxfi/ctatc.c @@ -166,18 +166,7 @@ static void ct_unmap_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm) static unsigned long atc_get_ptp_phys(struct ct_atc *atc, int index) { - struct ct_vm *vm; - void *kvirt_addr; - unsigned long phys_addr; - - vm = atc->vm; - kvirt_addr = vm->get_ptp_virt(vm, index); - if (kvirt_addr == NULL) - phys_addr = (~0UL); - else - phys_addr = virt_to_phys(kvirt_addr); - - return phys_addr; + return atc->vm->get_ptp_phys(atc->vm, index); } static unsigned int convert_format(snd_pcm_format_t snd_format) @@ -1669,7 +1658,7 @@ int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci, } /* Set up device virtual memory management object */ - err = ct_vm_create(&atc->vm); + err = ct_vm_create(&atc->vm, pci); if (err < 0) goto error1; diff --git a/sound/pci/ctxfi/ctvmem.c b/sound/pci/ctxfi/ctvmem.c index 6b78752..65da6e4 100644 --- a/sound/pci/ctxfi/ctvmem.c +++ b/sound/pci/ctxfi/ctvmem.c @@ -138,7 +138,7 @@ ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size) return NULL; } - ptp = vm->ptp[0]; + ptp = (unsigned long *)vm->ptp[0].area; pte_start = (block->addr >> CT_PAGE_SHIFT); pages = block->size >> CT_PAGE_SHIFT; for (i = 0; i < pages; i++) { @@ -158,25 +158,25 @@ static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block) } /* * - * return the host (kmalloced) addr of the @index-th device - * page talbe page on success, or NULL on failure. - * The first returned NULL indicates the termination. + * return the host physical addr of the @index-th device + * page table page on success, or ~0UL on failure. + * The first returned ~0UL indicates the termination. * */ -static void * -ct_get_ptp_virt(struct ct_vm *vm, int index) +static dma_addr_t +ct_get_ptp_phys(struct ct_vm *vm, int index) { - void *addr; + dma_addr_t addr; - addr = (index >= CT_PTP_NUM) ? NULL : vm->ptp[index]; + addr = (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr; return addr; } -int ct_vm_create(struct ct_vm **rvm) +int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci) { struct ct_vm *vm; struct ct_vm_block *block; - int i; + int i, err = 0; *rvm = NULL; @@ -188,23 +188,21 @@ int ct_vm_create(struct ct_vm **rvm) /* Allocate page table pages */ for (i = 0; i < CT_PTP_NUM; i++) { - vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!vm->ptp[i]) + err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, + snd_dma_pci_data(pci), + PAGE_SIZE, &vm->ptp[i]); + if (err < 0) break; } - if (!i) { + if (err < 0) { /* no page table pages are allocated */ - kfree(vm); + ct_vm_destroy(vm); return -ENOMEM; } vm->size = CT_ADDRS_PER_PAGE * i; - /* Initialise remaining ptps */ - for (; i < CT_PTP_NUM; i++) - vm->ptp[i] = NULL; - vm->map = ct_vm_map; vm->unmap = ct_vm_unmap; - vm->get_ptp_virt = ct_get_ptp_virt; + vm->get_ptp_phys = ct_get_ptp_phys; INIT_LIST_HEAD(&vm->unused); INIT_LIST_HEAD(&vm->used); block = kzalloc(sizeof(*block), GFP_KERNEL); @@ -242,7 +240,7 @@ void ct_vm_destroy(struct ct_vm *vm) /* free allocated page table pages */ for (i = 0; i < CT_PTP_NUM; i++) - kfree(vm->ptp[i]); + snd_dma_free_pages(&vm->ptp[i]); vm->size = 0; diff --git a/sound/pci/ctxfi/ctvmem.h b/sound/pci/ctxfi/ctvmem.h index 01e4fd0..b23adfc 100644 --- a/sound/pci/ctxfi/ctvmem.h +++ b/sound/pci/ctxfi/ctvmem.h @@ -22,6 +22,8 @@ #include <linux/mutex.h> #include <linux/list.h> +#include <linux/pci.h> +#include <sound/memalloc.h> /* The chip can handle the page table of 4k pages * (emu20k1 can handle even 8k pages, but we don't use it right now) @@ -41,7 +43,7 @@ struct snd_pcm_substream; /* Virtual memory management object for card device */ struct ct_vm { - void *ptp[CT_PTP_NUM]; /* Device page table pages */ + struct snd_dma_buffer ptp[CT_PTP_NUM]; /* Device page table pages */ unsigned int size; /* Available addr space in bytes */ struct list_head unused; /* List of unused blocks */ struct list_head used; /* List of used blocks */ @@ -52,10 +54,10 @@ struct ct_vm { int size); /* Unmap device logical addr area. */ void (*unmap)(struct ct_vm *, struct ct_vm_block *block); - void *(*get_ptp_virt)(struct ct_vm *vm, int index); + dma_addr_t (*get_ptp_phys)(struct ct_vm *vm, int index); }; -int ct_vm_create(struct ct_vm **rvm); +int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci); void ct_vm_destroy(struct ct_vm *vm); #endif /* CTVMEM_H */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 565de38..3600e9c 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -426,6 +426,7 @@ struct azx { /* flags */ int position_fix; + int poll_count; unsigned int running :1; unsigned int initialized :1; unsigned int single_cmd :1; @@ -506,7 +507,7 @@ static char *driver_short_names[] __devinitdata = { #define get_azx_dev(substream) (substream->runtime->private_data) static int azx_acquire_irq(struct azx *chip, int do_disconnect); - +static int azx_send_cmd(struct hda_bus *bus, unsigned int val); /* * Interface for HD codec */ @@ -664,11 +665,12 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, { struct azx *chip = bus->private_data; unsigned long timeout; + int do_poll = 0; again: timeout = jiffies + msecs_to_jiffies(1000); for (;;) { - if (chip->polling_mode) { + if (chip->polling_mode || do_poll) { spin_lock_irq(&chip->reg_lock); azx_update_rirb(chip); spin_unlock_irq(&chip->reg_lock); @@ -676,6 +678,9 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, if (!chip->rirb.cmds[addr]) { smp_rmb(); bus->rirb_error = 0; + + if (!do_poll) + chip->poll_count = 0; return chip->rirb.res[addr]; /* the last value */ } if (time_after(jiffies, timeout)) @@ -688,6 +693,16 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, } } + if (!chip->polling_mode && chip->poll_count < 2) { + snd_printdd(SFX "azx_get_response timeout, " + "polling the codec once: last cmd=0x%08x\n", + chip->last_cmd[addr]); + do_poll = 1; + chip->poll_count++; + goto again; + } + + if (!chip->polling_mode) { snd_printk(KERN_WARNING SFX "azx_get_response timeout, " "switching to polling mode: last cmd=0x%08x\n", @@ -1878,6 +1893,9 @@ static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev) if (!bdl_pos_adj[chip->dev_index]) return 1; /* no delayed ack */ + if (WARN_ONCE(!azx_dev->period_bytes, + "hda-intel: zero azx_dev->period_bytes")) + return 0; /* this shouldn't happen! */ if (pos % azx_dev->period_bytes > azx_dev->period_bytes / 2) return 0; /* NG - it's below the period boundary */ return 1; /* OK, it's fine */ @@ -2043,7 +2061,7 @@ static int azx_acquire_irq(struct azx *chip, int do_disconnect) { if (request_irq(chip->pci->irq, azx_interrupt, chip->msi ? 0 : IRQF_SHARED, - "HDA Intel", chip)) { + "hda_intel", chip)) { printk(KERN_ERR "hda-intel: unable to grab IRQ %d, " "disabling device\n", chip->pci->irq); if (do_disconnect) diff --git a/sound/pci/ice1712/aureon.c b/sound/pci/ice1712/aureon.c index 765d7bd..9e66f6d 100644 --- a/sound/pci/ice1712/aureon.c +++ b/sound/pci/ice1712/aureon.c @@ -703,11 +703,13 @@ static void wm_set_vol(struct snd_ice1712 *ice, unsigned int index, unsigned sho { unsigned char nvol; - if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) + if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) { nvol = 0; - else + } else { nvol = ((vol % WM_VOL_CNT) * (master % WM_VOL_CNT)) / WM_VOL_MAX; + nvol += 0x1b; + } wm_put(ice, index, nvol); wm_put_nocache(ice, index, 0x180 | nvol); @@ -778,7 +780,7 @@ static int wm_master_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_ for (ch = 0; ch < 2; ch++) { unsigned int vol = ucontrol->value.integer.value[ch]; if (vol > WM_VOL_MAX) - continue; + vol = WM_VOL_MAX; vol |= spec->master[ch] & WM_VOL_MUTE; if (vol != spec->master[ch]) { int dac; @@ -834,8 +836,8 @@ static int wm_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value * for (i = 0; i < voices; i++) { unsigned int vol = ucontrol->value.integer.value[i]; if (vol > WM_VOL_MAX) - continue; - vol |= spec->vol[ofs+i]; + vol = WM_VOL_MAX; + vol |= spec->vol[ofs+i] & WM_VOL_MUTE; if (vol != spec->vol[ofs+i]) { spec->vol[ofs+i] = vol; idx = WM_DAC_ATTEN + ofs + i; diff --git a/sound/soc/omap/omap3pandora.c b/sound/soc/omap/omap3pandora.c index 71b2c16..68980c1 100644 --- a/sound/soc/omap/omap3pandora.c +++ b/sound/soc/omap/omap3pandora.c @@ -145,6 +145,7 @@ static const struct snd_soc_dapm_widget omap3pandora_in_dapm_widgets[] = { }; static const struct snd_soc_dapm_route omap3pandora_out_map[] = { + {"PCM DAC", NULL, "APLL Enable"}, {"Headphone Amplifier", NULL, "PCM DAC"}, {"Line Out", NULL, "PCM DAC"}, {"Headphone Jack", NULL, "Headphone Amplifier"}, |