summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-14 03:54:50 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-14 03:54:50 +0200
commitdfe2c6dcc8ca2cdc662d7c0473e9811b72ef3370 (patch)
tree9ed639a08c16322cdf136d576f42df5b97cd1549 /arch
parenta45d572841a24db02a62cf05e1157c35fdd3705b (diff)
parent64e455079e1bd7787cc47be30b7f601ce682a5f6 (diff)
downloadop-kernel-dev-dfe2c6dcc8ca2cdc662d7c0473e9811b72ef3370.zip
op-kernel-dev-dfe2c6dcc8ca2cdc662d7c0473e9811b72ef3370.tar.gz
Merge branch 'akpm' (patches from Andrew Morton)
Merge second patch-bomb from Andrew Morton: - a few hotfixes - drivers/dma updates - MAINTAINERS updates - Quite a lot of lib/ updates - checkpatch updates - binfmt updates - autofs4 - drivers/rtc/ - various small tweaks to less used filesystems - ipc/ updates - kernel/watchdog.c changes * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (135 commits) mm: softdirty: enable write notifications on VMAs after VM_SOFTDIRTY cleared kernel/param: consolidate __{start,stop}___param[] in <linux/moduleparam.h> ia64: remove duplicate declarations of __per_cpu_start[] and __per_cpu_end[] frv: remove unused declarations of __start___ex_table and __stop___ex_table kvm: ensure hard lockup detection is disabled by default kernel/watchdog.c: control hard lockup detection default staging: rtl8192u: use %*pEn to escape buffer staging: rtl8192e: use %*pEn to escape buffer staging: wlan-ng: use %*pEhp to print SN lib80211: remove unused print_ssid() wireless: hostap: proc: print properly escaped SSID wireless: ipw2x00: print SSID via %*pE wireless: libertas: print esaped string via %*pE lib/vsprintf: add %*pE[achnops] format specifier lib / string_helpers: introduce string_escape_mem() lib / string_helpers: refactoring the test suite lib / string_helpers: move documentation to c-file include/linux: remove strict_strto* definitions arch/x86/mm/numa.c: fix boot failure when all nodes are hotpluggable fs: check bh blocknr earlier when searching lru ...
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi2
-rw-r--r--arch/arm/mach-pxa/lpd270.c12
-rw-r--r--arch/frv/mm/extable.c2
-rw-r--r--arch/ia64/include/asm/sections.h2
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/crash.c10
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c15
-rw-r--r--arch/x86/kernel/kvm.c8
-rw-r--r--arch/x86/mm/ioremap.c20
-rw-r--r--arch/x86/mm/numa.c89
-rw-r--r--arch/x86/purgatory/Makefile3
11 files changed, 96 insertions, 71 deletions
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 1d52de6..429a6c6 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -164,7 +164,7 @@
};
rtc: rtc@10070000 {
- compatible = "samsung,s3c6410-rtc";
+ compatible = "samsung,exynos3250-rtc";
reg = <0x10070000 0x100>;
interrupts = <0 73 0>, <0 74 0>;
status = "disabled";
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c
index 9f6ec167..ad777b3 100644
--- a/arch/arm/mach-pxa/lpd270.c
+++ b/arch/arm/mach-pxa/lpd270.c
@@ -416,17 +416,17 @@ static struct pxafb_mach_info *lpd270_lcd_to_use;
static int __init lpd270_set_lcd(char *str)
{
- if (!strnicmp(str, "lq057q3dc02", 11)) {
+ if (!strncasecmp(str, "lq057q3dc02", 11)) {
lpd270_lcd_to_use = &sharp_lq057q3dc02;
- } else if (!strnicmp(str, "lq121s1dg31", 11)) {
+ } else if (!strncasecmp(str, "lq121s1dg31", 11)) {
lpd270_lcd_to_use = &sharp_lq121s1dg31;
- } else if (!strnicmp(str, "lq036q1da01", 11)) {
+ } else if (!strncasecmp(str, "lq036q1da01", 11)) {
lpd270_lcd_to_use = &sharp_lq036q1da01;
- } else if (!strnicmp(str, "lq64d343", 8)) {
+ } else if (!strncasecmp(str, "lq64d343", 8)) {
lpd270_lcd_to_use = &sharp_lq64d343;
- } else if (!strnicmp(str, "lq10d368", 8)) {
+ } else if (!strncasecmp(str, "lq10d368", 8)) {
lpd270_lcd_to_use = &sharp_lq10d368;
- } else if (!strnicmp(str, "lq035q7db02-20", 14)) {
+ } else if (!strncasecmp(str, "lq035q7db02-20", 14)) {
lpd270_lcd_to_use = &sharp_lq035q7db02_20;
} else {
printk(KERN_INFO "lpd270: unknown lcd panel [%s]\n", str);
diff --git a/arch/frv/mm/extable.c b/arch/frv/mm/extable.c
index 6aea124..2fb9b3a 100644
--- a/arch/frv/mm/extable.c
+++ b/arch/frv/mm/extable.c
@@ -6,8 +6,6 @@
#include <linux/spinlock.h>
#include <asm/uaccess.h>
-extern const struct exception_table_entry __attribute__((aligned(8))) __start___ex_table[];
-extern const struct exception_table_entry __attribute__((aligned(8))) __stop___ex_table[];
extern const void __memset_end, __memset_user_error_lr, __memset_user_error_handler;
extern const void __memcpy_end, __memcpy_user_error_lr, __memcpy_user_error_handler;
extern spinlock_t modlist_lock;
diff --git a/arch/ia64/include/asm/sections.h b/arch/ia64/include/asm/sections.h
index 1a873b3..2ab2003 100644
--- a/arch/ia64/include/asm/sections.h
+++ b/arch/ia64/include/asm/sections.h
@@ -10,7 +10,7 @@
#include <linux/uaccess.h>
#include <asm-generic/sections.h>
-extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[];
+extern char __phys_per_cpu_start[];
#ifdef CONFIG_SMP
extern char __cpu0_per_cpu[];
#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 700f958..3eff36f 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -964,6 +964,7 @@ static void vgetcpu_set_mode(void)
vgetcpu_mode = VGETCPU_LSL;
}
+#ifdef CONFIG_IA32_EMULATION
/* May not be __init: called during resume */
static void syscall32_cpu_init(void)
{
@@ -975,7 +976,8 @@ static void syscall32_cpu_init(void)
wrmsrl(MSR_CSTAR, ia32_cstar_target);
}
-#endif
+#endif /* CONFIG_IA32_EMULATION */
+#endif /* CONFIG_X86_64 */
#ifdef CONFIG_X86_32
void enable_sep_cpu(void)
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index a618fcd..f5ab56d 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -237,7 +237,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced,
ced->max_nr_ranges++;
/* If crashk_low_res is not 0, another range split possible */
- if (crashk_low_res.end != 0)
+ if (crashk_low_res.end)
ced->max_nr_ranges++;
}
@@ -335,9 +335,11 @@ static int elf_header_exclude_ranges(struct crash_elf_data *ced,
if (ret)
return ret;
- ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
- if (ret)
- return ret;
+ if (crashk_low_res.end) {
+ ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
+ if (ret)
+ return ret;
+ }
/* Exclude GART region */
if (ced->gart_end) {
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 9642b9b..ca05f86 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -26,6 +26,7 @@
#include <asm/setup.h>
#include <asm/crash.h>
#include <asm/efi.h>
+#include <asm/kexec-bzimage64.h>
#define MAX_ELFCOREHDR_STR_LEN 30 /* elfcorehdr=0x<64bit-value> */
@@ -267,7 +268,7 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params,
return ret;
}
-int bzImage64_probe(const char *buf, unsigned long len)
+static int bzImage64_probe(const char *buf, unsigned long len)
{
int ret = -ENOEXEC;
struct setup_header *header;
@@ -325,10 +326,10 @@ int bzImage64_probe(const char *buf, unsigned long len)
return ret;
}
-void *bzImage64_load(struct kimage *image, char *kernel,
- unsigned long kernel_len, char *initrd,
- unsigned long initrd_len, char *cmdline,
- unsigned long cmdline_len)
+static void *bzImage64_load(struct kimage *image, char *kernel,
+ unsigned long kernel_len, char *initrd,
+ unsigned long initrd_len, char *cmdline,
+ unsigned long cmdline_len)
{
struct setup_header *header;
@@ -514,7 +515,7 @@ out_free_params:
}
/* This cleanup function is called after various segments have been loaded */
-int bzImage64_cleanup(void *loader_data)
+static int bzImage64_cleanup(void *loader_data)
{
struct bzimage64_data *ldata = loader_data;
@@ -528,7 +529,7 @@ int bzImage64_cleanup(void *loader_data)
}
#ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG
-int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
+static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
{
bool trusted;
int ret;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 3dd8e2c..95c3cb1 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/kprobes.h>
#include <linux/debugfs.h>
+#include <linux/nmi.h>
#include <asm/timer.h>
#include <asm/cpu.h>
#include <asm/traps.h>
@@ -499,6 +500,13 @@ void __init kvm_guest_init(void)
#else
kvm_guest_cpu_init();
#endif
+
+ /*
+ * Hard lockup detection is enabled by default. Disable it, as guests
+ * can get false positives too easily, for example if the host is
+ * overcommitted.
+ */
+ watchdog_enable_hardlockup_detector(false);
}
static noinline uint32_t __kvm_cpuid_base(void)
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index baff1da..af78e50 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -86,6 +86,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
pgprot_t prot;
int retval;
void __iomem *ret_addr;
+ int ram_region;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
@@ -108,12 +109,23 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
- pfn = phys_addr >> PAGE_SHIFT;
- last_pfn = last_addr >> PAGE_SHIFT;
- if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
- __ioremap_check_ram) == 1)
+ /* First check if whole region can be identified as RAM or not */
+ ram_region = region_is_ram(phys_addr, size);
+ if (ram_region > 0) {
+ WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
+ (unsigned long int)phys_addr,
+ (unsigned long int)last_addr);
return NULL;
+ }
+ /* If could not be identified(-1), check page by page */
+ if (ram_region < 0) {
+ pfn = phys_addr >> PAGE_SHIFT;
+ last_pfn = last_addr >> PAGE_SHIFT;
+ if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
+ __ioremap_check_ram) == 1)
+ return NULL;
+ }
/*
* Mappings have to be page-aligned
*/
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index d221374..1a88370 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -463,6 +463,42 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
return true;
}
+static void __init numa_clear_kernel_node_hotplug(void)
+{
+ int i, nid;
+ nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
+ unsigned long start, end;
+ struct memblock_region *r;
+
+ /*
+ * At this time, all memory regions reserved by memblock are
+ * used by the kernel. Set the nid in memblock.reserved will
+ * mark out all the nodes the kernel resides in.
+ */
+ for (i = 0; i < numa_meminfo.nr_blks; i++) {
+ struct numa_memblk *mb = &numa_meminfo.blk[i];
+
+ memblock_set_node(mb->start, mb->end - mb->start,
+ &memblock.reserved, mb->nid);
+ }
+
+ /* Mark all kernel nodes. */
+ for_each_memblock(reserved, r)
+ node_set(r->nid, numa_kernel_nodes);
+
+ /* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */
+ for (i = 0; i < numa_meminfo.nr_blks; i++) {
+ nid = numa_meminfo.blk[i].nid;
+ if (!node_isset(nid, numa_kernel_nodes))
+ continue;
+
+ start = numa_meminfo.blk[i].start;
+ end = numa_meminfo.blk[i].end;
+
+ memblock_clear_hotplug(start, end - start);
+ }
+}
+
static int __init numa_register_memblks(struct numa_meminfo *mi)
{
unsigned long uninitialized_var(pfn_align);
@@ -481,6 +517,15 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
}
/*
+ * At very early time, the kernel have to use some memory such as
+ * loading the kernel image. We cannot prevent this anyway. So any
+ * node the kernel resides in should be un-hotpluggable.
+ *
+ * And when we come here, alloc node data won't fail.
+ */
+ numa_clear_kernel_node_hotplug();
+
+ /*
* If sections array is gonna be used for pfn -> nid mapping, check
* whether its granularity is fine enough.
*/
@@ -548,41 +593,6 @@ static void __init numa_init_array(void)
}
}
-static void __init numa_clear_kernel_node_hotplug(void)
-{
- int i, nid;
- nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
- unsigned long start, end;
- struct memblock_region *r;
-
- /*
- * At this time, all memory regions reserved by memblock are
- * used by the kernel. Set the nid in memblock.reserved will
- * mark out all the nodes the kernel resides in.
- */
- for (i = 0; i < numa_meminfo.nr_blks; i++) {
- struct numa_memblk *mb = &numa_meminfo.blk[i];
- memblock_set_node(mb->start, mb->end - mb->start,
- &memblock.reserved, mb->nid);
- }
-
- /* Mark all kernel nodes. */
- for_each_memblock(reserved, r)
- node_set(r->nid, numa_kernel_nodes);
-
- /* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */
- for (i = 0; i < numa_meminfo.nr_blks; i++) {
- nid = numa_meminfo.blk[i].nid;
- if (!node_isset(nid, numa_kernel_nodes))
- continue;
-
- start = numa_meminfo.blk[i].start;
- end = numa_meminfo.blk[i].end;
-
- memblock_clear_hotplug(start, end - start);
- }
-}
-
static int __init numa_init(int (*init_func)(void))
{
int i;
@@ -637,15 +647,6 @@ static int __init numa_init(int (*init_func)(void))
}
numa_init_array();
- /*
- * At very early time, the kernel have to use some memory such as
- * loading the kernel image. We cannot prevent this anyway. So any
- * node the kernel resides in should be un-hotpluggable.
- *
- * And when we come here, numa_init() won't fail.
- */
- numa_clear_kernel_node_hotplug();
-
return 0;
}
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 899dd24..f52e033 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -18,8 +18,9 @@ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
targets += kexec-purgatory.c
+CMD_BIN2C = $(objtree)/scripts/basic/bin2c
quiet_cmd_bin2c = BIN2C $@
- cmd_bin2c = cat $(obj)/purgatory.ro | $(objtree)/scripts/basic/bin2c kexec_purgatory > $(obj)/kexec-purgatory.c
+ cmd_bin2c = $(CMD_BIN2C) kexec_purgatory < $< > $@
$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
$(call if_changed,bin2c)
OpenPOWER on IntegriCloud