summaryrefslogtreecommitdiffstats
path: root/arch/s390/numa
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2016-12-03 09:50:21 +0100
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2016-12-07 07:23:25 +0100
commit8c9105802235c28b03359d779cbd0557b7b66e70 (patch)
treef713b6e57cf9f2dc6af152e17a654b787eb167e7 /arch/s390/numa
parent30fc4ca2a8ab508d160a917b89b7e1c27f893354 (diff)
downloadop-kernel-dev-8c9105802235c28b03359d779cbd0557b7b66e70.zip
op-kernel-dev-8c9105802235c28b03359d779cbd0557b7b66e70.tar.gz
s390/numa: establish cpu to node mapping early
Initialize the cpu topology and therefore also the cpu to node mapping much earlier. Fixes this warning and subsequent crashes when using the fake numa emulation mode on s390: WARNING: CPU: 0 PID: 1 at include/linux/cpumask.h:121 select_task_rq+0xe6/0x1a8 CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.6.0-rc6-00001-ge9d867a67fd0-dirty #28 task: 00000001dd270008 ti: 00000001eccb4000 task.ti: 00000001eccb4000 Krnl PSW : 0404c00180000000 0000000000176c56 (select_task_rq+0xe6/0x1a8) R:0 T:1 IO:0 EX:0 Key:0 M:1 W:0 P:0 AS:3 CC:0 PM:0 RI:0 EA:3 Call Trace: ([<0000000000176c30>] select_task_rq+0xc0/0x1a8) ([<0000000000177d64>] try_to_wake_up+0x2e4/0x478) ([<000000000015d46c>] create_worker+0x174/0x1c0) ([<0000000000161a98>] alloc_unbound_pwq+0x360/0x438) ([<0000000000162550>] apply_wqattrs_prepare+0x200/0x2a0) ([<000000000016266a>] apply_workqueue_attrs_locked+0x7a/0xb0) ([<0000000000162af0>] apply_workqueue_attrs+0x50/0x78) ([<000000000016441c>] __alloc_workqueue_key+0x304/0x520) ([<0000000000ee3706>] default_bdi_init+0x3e/0x70) ([<0000000000100270>] do_one_initcall+0x140/0x1d8) ([<0000000000ec9da8>] kernel_init_freeable+0x220/0x2d8) ([<0000000000984a7a>] kernel_init+0x2a/0x150) ([<00000000009913fa>] kernel_thread_starter+0x6/0xc) ([<00000000009913f4>] kernel_thread_starter+0x0/0xc) Reviewed-by: Michael Holzheu <holzheu@linux.vnet.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/numa')
-rw-r--r--arch/s390/numa/mode_emu.c9
-rw-r--r--arch/s390/numa/toptree.c16
2 files changed, 16 insertions, 9 deletions
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
index 2ed27e8..02b840d 100644
--- a/arch/s390/numa/mode_emu.c
+++ b/arch/s390/numa/mode_emu.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/cpumask.h>
#include <linux/memblock.h>
+#include <linux/bootmem.h>
#include <linux/node.h>
#include <linux/memory.h>
#include <linux/slab.h>
@@ -307,13 +308,11 @@ fail:
/*
* Allocate and initialize core to node mapping
*/
-static void create_core_to_node_map(void)
+static void __ref create_core_to_node_map(void)
{
int i;
- emu_cores = kzalloc(sizeof(*emu_cores), GFP_KERNEL);
- if (emu_cores == NULL)
- panic("Could not allocate cores to node memory");
+ emu_cores = memblock_virt_alloc(sizeof(*emu_cores), 8);
for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
emu_cores->to_node_id[i] = NODE_ID_FREE;
}
@@ -354,7 +353,7 @@ static struct toptree *toptree_from_topology(void)
phys = toptree_new(TOPTREE_ID_PHYS, 1);
- for_each_online_cpu(cpu) {
+ for_each_cpu(cpu, &cpus_with_topology) {
top = &cpu_topology[cpu];
node = toptree_get_child(phys, 0);
drawer = toptree_get_child(node, top->drawer_id);
diff --git a/arch/s390/numa/toptree.c b/arch/s390/numa/toptree.c
index 902d350..26f622b 100644
--- a/arch/s390/numa/toptree.c
+++ b/arch/s390/numa/toptree.c
@@ -7,6 +7,7 @@
*/
#include <linux/kernel.h>
+#include <linux/bootmem.h>
#include <linux/cpumask.h>
#include <linux/list.h>
#include <linux/list_sort.h>
@@ -25,10 +26,14 @@
* RETURNS:
* Pointer to the new tree node or NULL on error
*/
-struct toptree *toptree_alloc(int level, int id)
+struct toptree __ref *toptree_alloc(int level, int id)
{
- struct toptree *res = kzalloc(sizeof(struct toptree), GFP_KERNEL);
+ struct toptree *res;
+ if (slab_is_available())
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ else
+ res = memblock_virt_alloc(sizeof(*res), 8);
if (!res)
return res;
@@ -65,7 +70,7 @@ static void toptree_remove(struct toptree *cand)
* cleanly using toptree_remove. Possible children are freed
* recursively. In the end @cand itself is freed.
*/
-void toptree_free(struct toptree *cand)
+void __ref toptree_free(struct toptree *cand)
{
struct toptree *child, *tmp;
@@ -73,7 +78,10 @@ void toptree_free(struct toptree *cand)
toptree_remove(cand);
toptree_for_each_child_safe(child, tmp, cand)
toptree_free(child);
- kfree(cand);
+ if (slab_is_available())
+ kfree(cand);
+ else
+ memblock_free_early((unsigned long)cand, sizeof(*cand));
}
/**
OpenPOWER on IntegriCloud