From e7986739a76cde5079da08809d8bbc6878387ae0 Mon Sep 17 00:00:00 2001 From: Mike Travis <travis@sgi.com> Date: Tue, 16 Dec 2008 17:33:52 -0800 Subject: x86 smp: modify send_IPI_mask interface to accept cpumask_t pointers Impact: cleanup, change parameter passing * Change genapic interfaces to accept cpumask_t pointers where possible. * Modify external callers to use cpumask_t pointers in function calls. * Create new send_IPI_mask_allbutself which is the same as the send_IPI_mask functions but removes smp_processor_id() from list. This removes another common need for a temporary cpumask_t variable. * Functions that used a temp cpumask_t variable for: cpumask_t allbutme = cpu_online_map; cpu_clear(smp_processor_id(), allbutme); if (!cpus_empty(allbutme)) ... become: if (!cpus_equal(cpu_online_map, cpumask_of_cpu(cpu))) ... * Other minor code optimizations (like using cpus_clear instead of CPU_MASK_NONE, etc.) Applies to linux-2.6.tip/master. Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Acked-by: Ingo Molnar <mingo@elte.hu> --- arch/x86/kernel/genx2apic_cluster.c | 60 ++++++++++++++++++++++++------------- 1 file changed, 40 insertions(+), 20 deletions(-) (limited to 'arch/x86/kernel/genx2apic_cluster.c') diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index f6a2c8e..f5fa9a9 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -22,19 +22,18 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ -static cpumask_t x2apic_target_cpus(void) +static const cpumask_t *x2apic_target_cpus(void) { - return cpumask_of_cpu(0); + return &cpumask_of_cpu(0); } /* * for now each logical cpu is in its own vector allocation domain. */ -static cpumask_t x2apic_vector_allocation_domain(int cpu) +static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask) { - cpumask_t domain = CPU_MASK_NONE; - cpu_set(cpu, domain); - return domain; + cpus_clear(*retmask); + cpu_set(cpu, *retmask); } static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, @@ -56,32 +55,52 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, * at once. We have 16 cpu's in a cluster. This will minimize IPI register * writes. */ -static void x2apic_send_IPI_mask(cpumask_t mask, int vector) +static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector) { unsigned long flags; unsigned long query_cpu; local_irq_save(flags); - for_each_cpu_mask(query_cpu, mask) { - __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_logical_apicid, query_cpu), - vector, APIC_DEST_LOGICAL); - } + for_each_cpu_mask_nr(query_cpu, *mask) + __x2apic_send_IPI_dest( + per_cpu(x86_cpu_to_logical_apicid, query_cpu), + vector, APIC_DEST_LOGICAL); local_irq_restore(flags); } -static void x2apic_send_IPI_allbutself(int vector) +static void x2apic_send_IPI_mask_allbutself(const cpumask_t *mask, int vector) { - cpumask_t mask = cpu_online_map; + unsigned long flags; + unsigned long query_cpu; + unsigned long this_cpu = smp_processor_id(); - cpu_clear(smp_processor_id(), mask); + local_irq_save(flags); + for_each_cpu_mask_nr(query_cpu, *mask) + if (query_cpu != this_cpu) + __x2apic_send_IPI_dest( + per_cpu(x86_cpu_to_logical_apicid, query_cpu), + vector, APIC_DEST_LOGICAL); + local_irq_restore(flags); +} - if (!cpus_empty(mask)) - x2apic_send_IPI_mask(mask, vector); +static void x2apic_send_IPI_allbutself(int vector) +{ + unsigned long flags; + unsigned long query_cpu; + unsigned long this_cpu = smp_processor_id(); + + local_irq_save(flags); + for_each_online_cpu(query_cpu) + if (query_cpu != this_cpu) + __x2apic_send_IPI_dest( + per_cpu(x86_cpu_to_logical_apicid, query_cpu), + vector, APIC_DEST_LOGICAL); + local_irq_restore(flags); } static void x2apic_send_IPI_all(int vector) { - x2apic_send_IPI_mask(cpu_online_map, vector); + x2apic_send_IPI_mask(&cpu_online_map, vector); } static int x2apic_apic_id_registered(void) @@ -89,7 +108,7 @@ static int x2apic_apic_id_registered(void) return 1; } -static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) +static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) { int cpu; @@ -97,8 +116,8 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - cpu = first_cpu(cpumask); - if ((unsigned)cpu < NR_CPUS) + cpu = first_cpu(*cpumask); + if ((unsigned)cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_logical_apicid, cpu); else return BAD_APICID; @@ -150,6 +169,7 @@ struct genapic apic_x2apic_cluster = { .send_IPI_all = x2apic_send_IPI_all, .send_IPI_allbutself = x2apic_send_IPI_allbutself, .send_IPI_mask = x2apic_send_IPI_mask, + .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, .send_IPI_self = x2apic_send_IPI_self, .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, .phys_pkg_id = phys_pkg_id, -- cgit v1.1 From 95d313cf1c1ecedc8bec5727b09bdacbf67dfc45 Mon Sep 17 00:00:00 2001 From: Mike Travis <travis@sgi.com> Date: Tue, 16 Dec 2008 17:33:54 -0800 Subject: x86: Add cpu_mask_to_apicid_and Impact: new API Add a helper function that takes two cpumask's, and's them and then returns the apicid of the result. This removes a need in io_apic.c that uses a temporary cpumask to hold (mask & cfg->domain). Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> --- arch/x86/kernel/genx2apic_cluster.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'arch/x86/kernel/genx2apic_cluster.c') diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index f5fa9a9..fd8047f 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -123,6 +123,21 @@ static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) return BAD_APICID; } +static unsigned int x2apic_cpu_mask_to_apicid_and(const cpumask_t *cpumask, + const cpumask_t *andmask) +{ + int cpu; + + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) + if (cpu_isset(cpu, *andmask)) + return per_cpu(x86_cpu_to_apicid, cpu); + return BAD_APICID; +} + static unsigned int get_apic_id(unsigned long x) { unsigned int id; @@ -172,6 +187,7 @@ struct genapic apic_x2apic_cluster = { .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, .send_IPI_self = x2apic_send_IPI_self, .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, .phys_pkg_id = phys_pkg_id, .get_apic_id = get_apic_id, .set_apic_id = set_apic_id, -- cgit v1.1 From 6eeb7c5a99434596c5953a95baa17d2f085664e3 Mon Sep 17 00:00:00 2001 From: Mike Travis <travis@sgi.com> Date: Tue, 16 Dec 2008 17:33:55 -0800 Subject: x86: update add-cpu_mask_to_apicid_and to use struct cpumask* Impact: use updated APIs Various API updates for x86:add-cpu_mask_to_apicid_and (Note: separate because previous patch has been "backported" to 2.6.27.) Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Mike Travis <travis@sgi.com> --- arch/x86/kernel/genx2apic_cluster.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/x86/kernel/genx2apic_cluster.c') diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index fd8047f..e7d16f5 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -123,8 +123,8 @@ static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) return BAD_APICID; } -static unsigned int x2apic_cpu_mask_to_apicid_and(const cpumask_t *cpumask, - const cpumask_t *andmask) +static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) { int cpu; @@ -132,9 +132,9 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const cpumask_t *cpumask, * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) - if (cpu_isset(cpu, *andmask)) - return per_cpu(x86_cpu_to_apicid, cpu); + cpu = cpumask_any_and(cpumask, andmask); + if (cpu < nr_cpu_ids) + return per_cpu(x86_cpu_to_apicid, cpu); return BAD_APICID; } -- cgit v1.1 From bcda016eddd7a8b374bb371473c821a91ff1d8cc Mon Sep 17 00:00:00 2001 From: Mike Travis <travis@sgi.com> Date: Tue, 16 Dec 2008 17:33:59 -0800 Subject: x86: cosmetic changes apic-related files. This patch simply changes cpumask_t to struct cpumask and similar trivial modernizations. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Mike Travis <travis@sgi.com> --- arch/x86/kernel/genx2apic_cluster.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) (limited to 'arch/x86/kernel/genx2apic_cluster.c') diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index e7d16f5..4716a0c 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -22,18 +22,18 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ -static const cpumask_t *x2apic_target_cpus(void) +static const struct cpumask *x2apic_target_cpus(void) { - return &cpumask_of_cpu(0); + return cpumask_of(0); } /* * for now each logical cpu is in its own vector allocation domain. */ -static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask) +static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) { - cpus_clear(*retmask); - cpu_set(cpu, *retmask); + cpumask_clear(retmask); + cpumask_set_cpu(cpu, retmask); } static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, @@ -55,27 +55,28 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, * at once. We have 16 cpu's in a cluster. This will minimize IPI register * writes. */ -static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector) +static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) { unsigned long flags; unsigned long query_cpu; local_irq_save(flags); - for_each_cpu_mask_nr(query_cpu, *mask) + for_each_cpu(query_cpu, mask) __x2apic_send_IPI_dest( per_cpu(x86_cpu_to_logical_apicid, query_cpu), vector, APIC_DEST_LOGICAL); local_irq_restore(flags); } -static void x2apic_send_IPI_mask_allbutself(const cpumask_t *mask, int vector) +static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, + int vector) { unsigned long flags; unsigned long query_cpu; unsigned long this_cpu = smp_processor_id(); local_irq_save(flags); - for_each_cpu_mask_nr(query_cpu, *mask) + for_each_cpu(query_cpu, mask) if (query_cpu != this_cpu) __x2apic_send_IPI_dest( per_cpu(x86_cpu_to_logical_apicid, query_cpu), @@ -100,7 +101,7 @@ static void x2apic_send_IPI_allbutself(int vector) static void x2apic_send_IPI_all(int vector) { - x2apic_send_IPI_mask(&cpu_online_map, vector); + x2apic_send_IPI_mask(cpu_online_mask, vector); } static int x2apic_apic_id_registered(void) @@ -108,7 +109,7 @@ static int x2apic_apic_id_registered(void) return 1; } -static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) +static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) { int cpu; @@ -116,7 +117,7 @@ static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - cpu = first_cpu(*cpumask); + cpu = cpumask_first(cpumask); if ((unsigned)cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_logical_apicid, cpu); else -- cgit v1.1 From a775a38b1353161a6d7af86b667d6523c12c1a37 Mon Sep 17 00:00:00 2001 From: Mike Travis <travis@sgi.com> Date: Wed, 17 Dec 2008 15:21:39 -0800 Subject: x86: fix cpu_mask_to_apicid_and to include cpu_online_mask Impact: fix potential APIC crash In determining the destination apicid, there are usually three cpumasks that are considered: the incoming cpumask arg, cfg->domain and the cpu_online_mask. Since we are just introducing the cpu_mask_to_apicid_and function, make sure it includes the cpu_online_mask in it's evaluation. [Added with this patch.] There are two io_apic.c functions that did not previously use the cpu_online_mask: setup_IO_APIC_irq and msi_compose_msg. Both of these simply used cpu_mask_to_apicid(cfg->domain & TARGET_CPUS), and all but one arch (NUMAQ[*]) returns only online cpus in the TARGET_CPUS mask, so the behavior is identical for all cases. [*: NUMAQ bug?] Note that alloc_cpumask_var is only used for the 32-bit cases where it's highly likely that the cpumask set size will be small and therefore CPUMASK_OFFSTACK=n. But if that's not the case, failing the allocate will cause the same return value as the default. Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> --- arch/x86/kernel/genx2apic_cluster.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/x86/kernel/genx2apic_cluster.c') diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index 4716a0c..d451c9b 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -133,7 +133,9 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - cpu = cpumask_any_and(cpumask, andmask); + for_each_cpu_and(cpu, cpumask, andmask) + if (cpumask_test_cpu(cpu, cpu_online_mask)) + break; if (cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_apicid, cpu); return BAD_APICID; -- cgit v1.1 From 7d87d5365556b1c6e8c00abcc632c3ad1fdc58b8 Mon Sep 17 00:00:00 2001 From: Suresh Siddha <suresh.b.siddha@intel.com> Date: Mon, 22 Dec 2008 17:33:28 -0800 Subject: x86: use logical apicid in x2apic_cluster's x2apic_cpu_mask_to_apicid_and() These commits: commit 95d313cf1c1ecedc8bec5727b09bdacbf67dfc45 Author: Mike Travis <travis@sgi.com> Date: Tue Dec 16 17:33:54 2008 -0800 x86: Add cpu_mask_to_apicid_and and commit 6eeb7c5a99434596c5953a95baa17d2f085664e3 Author: Mike Travis <travis@sgi.com> Date: Tue Dec 16 17:33:55 2008 -0800 x86: update add-cpu_mask_to_apicid_and to use struct cpumask* broke interrupt delivery on x2apic platforms. As x2apic cluster mode uses logical delivery mode, we need to use logical apicid instead of physical apicid in x2apic_cpu_mask_to_apicid_and() Impact: fixes the broken interrupt delivery issue on generic x2apic platforms. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Acked-by: Mike Travis <travis@sgi.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> --- arch/x86/kernel/genx2apic_cluster.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86/kernel/genx2apic_cluster.c') diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index d451c9b..6ce497c 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -114,7 +114,7 @@ static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) int cpu; /* - * We're using fixed IRQ delivery, can only return one phys APIC ID. + * We're using fixed IRQ delivery, can only return one logical APIC ID. * May as well be the first. */ cpu = cpumask_first(cpumask); @@ -130,14 +130,14 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, int cpu; /* - * We're using fixed IRQ delivery, can only return one phys APIC ID. + * We're using fixed IRQ delivery, can only return one logical APIC ID. * May as well be the first. */ for_each_cpu_and(cpu, cpumask, andmask) if (cpumask_test_cpu(cpu, cpu_online_mask)) break; if (cpu < nr_cpu_ids) - return per_cpu(x86_cpu_to_apicid, cpu); + return per_cpu(x86_cpu_to_logical_apicid, cpu); return BAD_APICID; } -- cgit v1.1