From 821fe94727e72d63e1abd1bc0b044c72dfad9fb6 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 25 Jun 2005 14:54:29 -0700 Subject: [PATCH] gcc4 compile fix for recent ia64 xpc changes Gcc4 doesn't like volatile casts as lvalues. Make the structure members volatile instead. Signed-off-by: Dave Jones Cc: "Luck, Tony" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/sn/kernel/xpc.h | 12 ++++++------ arch/ia64/sn/kernel/xpc_channel.c | 12 ++++++------ arch/ia64/sn/kernel/xpc_partition.c | 2 +- 3 files changed, 13 insertions(+), 13 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/xpc.h b/arch/ia64/sn/kernel/xpc.h index 1a0aed8..d0ee635 100644 --- a/arch/ia64/sn/kernel/xpc.h +++ b/arch/ia64/sn/kernel/xpc.h @@ -87,7 +87,7 @@ struct xpc_rsvd_page { u8 partid; /* partition ID from SAL */ u8 version; u8 pad[6]; /* pad to u64 align */ - u64 vars_pa; + volatile u64 vars_pa; u64 part_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned; u64 mach_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned; }; @@ -138,7 +138,7 @@ struct xpc_vars { * occupies half a cacheline. */ struct xpc_vars_part { - u64 magic; + volatile u64 magic; u64 openclose_args_pa; /* physical address of open and close args */ u64 GPs_pa; /* physical address of Get/Put values */ @@ -185,8 +185,8 @@ struct xpc_vars_part { * Define a Get/Put value pair (pointers) used with a message queue. */ struct xpc_gp { - s64 get; /* Get value */ - s64 put; /* Put value */ + volatile s64 get; /* Get value */ + volatile s64 put; /* Put value */ }; #define XPC_GP_SIZE \ @@ -231,7 +231,7 @@ struct xpc_openclose_args { */ struct xpc_notify { struct semaphore sema; /* notify semaphore */ - u8 type; /* type of notification */ + volatile u8 type; /* type of notification */ /* the following two fields are only used if type == XPC_N_CALL */ xpc_notify_func func; /* user's notify function */ @@ -439,7 +439,7 @@ struct xpc_partition { /* XPC infrastructure referencing and teardown control */ - u8 setup_state; /* infrastructure setup state */ + volatile u8 setup_state; /* infrastructure setup state */ wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ atomic_t references; /* #of references to infrastructure */ diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c index 0bf6fbc..6d02dac 100644 --- a/arch/ia64/sn/kernel/xpc_channel.c +++ b/arch/ia64/sn/kernel/xpc_channel.c @@ -209,7 +209,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) * With the setting of the partition setup_state to XPC_P_SETUP, we're * declaring that this partition is ready to go. */ - (volatile u8) part->setup_state = XPC_P_SETUP; + part->setup_state = XPC_P_SETUP; /* @@ -227,7 +227,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) xpc_vars_part[partid].IPI_phys_cpuid = cpu_physical_id(smp_processor_id()); xpc_vars_part[partid].nchannels = part->nchannels; - (volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC1; + xpc_vars_part[partid].magic = XPC_VP_MAGIC1; return xpcSuccess; } @@ -355,7 +355,7 @@ xpc_pull_remote_vars_part(struct xpc_partition *part) /* let the other side know that we've pulled their variables */ - (volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC2; + xpc_vars_part[partid].magic = XPC_VP_MAGIC2; } if (pulled_entry->magic == XPC_VP_MAGIC1) { @@ -1183,7 +1183,7 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) */ xpc_clear_local_msgqueue_flags(ch); - (volatile s64) ch->w_remote_GP.get = ch->remote_GP.get; + ch->w_remote_GP.get = ch->remote_GP.get; dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, " "channel=%d\n", ch->w_remote_GP.get, ch->partid, @@ -1211,7 +1211,7 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) */ xpc_clear_remote_msgqueue_flags(ch); - (volatile s64) ch->w_remote_GP.put = ch->remote_GP.put; + ch->w_remote_GP.put = ch->remote_GP.put; dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, " "channel=%d\n", ch->w_remote_GP.put, ch->partid, @@ -1875,7 +1875,7 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, notify = &ch->notify_queue[msg_number % ch->local_nentries]; notify->func = func; notify->key = key; - (volatile u8) notify->type = notify_type; + notify->type = notify_type; // >>> is a mb() needed here? diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c index cd7ed73..578265e 100644 --- a/arch/ia64/sn/kernel/xpc_partition.c +++ b/arch/ia64/sn/kernel/xpc_partition.c @@ -253,7 +253,7 @@ xpc_rsvd_page_init(void) * This signifies to the remote partition that our reserved * page is initialized. */ - (volatile u64) rp->vars_pa = __pa(xpc_vars); + rp->vars_pa = __pa(xpc_vars); return rp; } -- cgit v1.1 From f370513640492641b4046bfd9a6e4714f6ae530d Mon Sep 17 00:00:00 2001 From: Zwane Mwaikambo Date: Sat, 25 Jun 2005 14:54:50 -0700 Subject: [PATCH] i386 CPU hotplug (The i386 CPU hotplug patch provides infrastructure for some work which Pavel is doing as well as for ACPI S3 (suspend-to-RAM) work which Li Shaohua is doing) The following provides i386 architecture support for safely unregistering and registering processors during runtime, updated for the current -mm tree. In order to avoid dumping cpu hotplug code into kernel/irq/* i dropped the cpu_online check in do_IRQ() by modifying fixup_irqs(). The difference being that on cpu offline, fixup_irqs() is called before we clear the cpu from cpu_online_map and a long delay in order to ensure that we never have any queued external interrupts on the APICs. There are additional changes to s390 and ppc64 to account for this change. 1) Add CONFIG_HOTPLUG_CPU 2) disable local APIC timer on dead cpus. 3) Disable preempt around irq balancing to prevent CPUs going down. 4) Print irq stats for all possible cpus. 5) Debugging check for interrupts on offline cpus. 6) Hacky fixup_irqs() to redirect irqs when cpus go off/online. 7) play_dead() for offline cpus to spin inside. 8) Handle offline cpus set in flush_tlb_others(). 9) Grab lock earlier in smp_call_function() to prevent CPUs going down. 10) Implement __cpu_disable() and __cpu_die(). 11) Enable local interrupts in cpu_enable() after fixup_irqs() 12) Don't fiddle with NMI on dead cpu, but leave intact on other cpus. 13) Program IRQ affinity whilst cpu is still in cpu_online_map on offline. Signed-off-by: Zwane Mwaikambo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/kernel/smpboot.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 3865f08..a888ddc 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -688,6 +688,7 @@ int __cpu_disable(void) return -EBUSY; remove_siblinginfo(cpu); + cpu_clear(cpu, cpu_online_map); fixup_irqs(); local_flush_tlb_all(); cpu_clear(cpu, cpu_callin_map); -- cgit v1.1 From a9fa06c26f7b7914c8cdf4d309b74df3151cc227 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Sat, 25 Jun 2005 14:55:05 -0700 Subject: [PATCH] set cpu_state for CPU hotplug (ia64) Dead CPU notifies online CPU that it's dead using cpu_state variable. After switching to physical cpu hotplug, we forgot setting the variable. This patch fixes it. Currently only __cpu_die uses it. We changed other locations for consistency in case others use it. Signed-off-by: Shaohua Li Acked-by: Ashok Raj Cc: "Luck, Tony" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/kernel/smpboot.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index a888ddc..623b0a5 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -346,6 +346,7 @@ smp_callin (void) lock_ipi_calllock(); cpu_set(cpuid, cpu_online_map); unlock_ipi_calllock(); + per_cpu(cpu_state, cpuid) = CPU_ONLINE; smp_setup_percpu_timer(); @@ -611,6 +612,7 @@ void __devinit smp_prepare_boot_cpu(void) { cpu_set(smp_processor_id(), cpu_online_map); cpu_set(smp_processor_id(), cpu_callin_map); + per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; } /* @@ -775,6 +777,7 @@ __cpu_up (unsigned int cpu) if (cpu_isset(cpu, cpu_callin_map)) return -EINVAL; + per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /* Processor goes to start_secondary(), sets online flag */ ret = do_boot_cpu(sapicid, cpu); if (ret < 0) -- cgit v1.1 From 687f1661d302bc70ce906594a6d3f615ef075a50 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 25 Jun 2005 14:57:21 -0700 Subject: [PATCH] sched: sched tuning Do some basic initial tuning. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/kernel/domain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/domain.c b/arch/ia64/kernel/domain.c index fe532c9..afbde79 100644 --- a/arch/ia64/kernel/domain.c +++ b/arch/ia64/kernel/domain.c @@ -14,7 +14,7 @@ #include #include -#define SD_NODES_PER_DOMAIN 6 +#define SD_NODES_PER_DOMAIN 16 #ifdef CONFIG_NUMA /** -- cgit v1.1 From 7f1867a5b3dc3034cbea403b229d65eed4a7f62e Mon Sep 17 00:00:00 2001 From: Dinakar Guniguntala Date: Sat, 25 Jun 2005 14:57:36 -0700 Subject: [PATCH] Dynamic sched domains: ia64 changes ia64 changes similar to kernel/sched.c. Signed-off-by: Dinakar Guniguntala Acked-by: Paul Jackson Acked-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/kernel/domain.c | 76 ++++++++++++++++++++++++++++------------------- 1 file changed, 45 insertions(+), 31 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/domain.c b/arch/ia64/kernel/domain.c index afbde79..d65e87b 100644 --- a/arch/ia64/kernel/domain.c +++ b/arch/ia64/kernel/domain.c @@ -27,7 +27,7 @@ * * Should use nodemask_t. */ -static int __devinit find_next_best_node(int node, unsigned long *used_nodes) +static int find_next_best_node(int node, unsigned long *used_nodes) { int i, n, val, min_val, best_node = 0; @@ -66,7 +66,7 @@ static int __devinit find_next_best_node(int node, unsigned long *used_nodes) * should be one that prevents unnecessary balancing, but also spreads tasks * out optimally. */ -static cpumask_t __devinit sched_domain_node_span(int node) +static cpumask_t sched_domain_node_span(int node) { int i; cpumask_t span, nodemask; @@ -96,7 +96,7 @@ static cpumask_t __devinit sched_domain_node_span(int node) #ifdef CONFIG_SCHED_SMT static DEFINE_PER_CPU(struct sched_domain, cpu_domains); static struct sched_group sched_group_cpus[NR_CPUS]; -static int __devinit cpu_to_cpu_group(int cpu) +static int cpu_to_cpu_group(int cpu) { return cpu; } @@ -104,7 +104,7 @@ static int __devinit cpu_to_cpu_group(int cpu) static DEFINE_PER_CPU(struct sched_domain, phys_domains); static struct sched_group sched_group_phys[NR_CPUS]; -static int __devinit cpu_to_phys_group(int cpu) +static int cpu_to_phys_group(int cpu) { #ifdef CONFIG_SCHED_SMT return first_cpu(cpu_sibling_map[cpu]); @@ -125,44 +125,36 @@ static struct sched_group *sched_group_nodes[MAX_NUMNODES]; static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); static struct sched_group sched_group_allnodes[MAX_NUMNODES]; -static int __devinit cpu_to_allnodes_group(int cpu) +static int cpu_to_allnodes_group(int cpu) { return cpu_to_node(cpu); } #endif /* - * Set up scheduler domains and groups. Callers must hold the hotplug lock. + * Build sched domains for a given set of cpus and attach the sched domains + * to the individual cpus */ -void __devinit arch_init_sched_domains(void) +void build_sched_domains(const cpumask_t *cpu_map) { int i; - cpumask_t cpu_default_map; /* - * Setup mask for cpus without special case scheduling requirements. - * For now this just excludes isolated cpus, but could be used to - * exclude other special cases in the future. + * Set up domains for cpus specified by the cpu_map. */ - cpus_complement(cpu_default_map, cpu_isolated_map); - cpus_and(cpu_default_map, cpu_default_map, cpu_online_map); - - /* - * Set up domains. Isolated domains just stay on the dummy domain. - */ - for_each_cpu_mask(i, cpu_default_map) { + for_each_cpu_mask(i, *cpu_map) { int group; struct sched_domain *sd = NULL, *p; cpumask_t nodemask = node_to_cpumask(cpu_to_node(i)); - cpus_and(nodemask, nodemask, cpu_default_map); + cpus_and(nodemask, nodemask, *cpu_map); #ifdef CONFIG_NUMA if (num_online_cpus() > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) { sd = &per_cpu(allnodes_domains, i); *sd = SD_ALLNODES_INIT; - sd->span = cpu_default_map; + sd->span = *cpu_map; group = cpu_to_allnodes_group(i); sd->groups = &sched_group_allnodes[group]; p = sd; @@ -173,7 +165,7 @@ void __devinit arch_init_sched_domains(void) *sd = SD_NODE_INIT; sd->span = sched_domain_node_span(cpu_to_node(i)); sd->parent = p; - cpus_and(sd->span, sd->span, cpu_default_map); + cpus_and(sd->span, sd->span, *cpu_map); #endif p = sd; @@ -190,7 +182,7 @@ void __devinit arch_init_sched_domains(void) group = cpu_to_cpu_group(i); *sd = SD_SIBLING_INIT; sd->span = cpu_sibling_map[i]; - cpus_and(sd->span, sd->span, cpu_default_map); + cpus_and(sd->span, sd->span, *cpu_map); sd->parent = p; sd->groups = &sched_group_cpus[group]; #endif @@ -198,9 +190,9 @@ void __devinit arch_init_sched_domains(void) #ifdef CONFIG_SCHED_SMT /* Set up CPU (sibling) groups */ - for_each_cpu_mask(i, cpu_default_map) { + for_each_cpu_mask(i, *cpu_map) { cpumask_t this_sibling_map = cpu_sibling_map[i]; - cpus_and(this_sibling_map, this_sibling_map, cpu_default_map); + cpus_and(this_sibling_map, this_sibling_map, *cpu_map); if (i != first_cpu(this_sibling_map)) continue; @@ -213,7 +205,7 @@ void __devinit arch_init_sched_domains(void) for (i = 0; i < MAX_NUMNODES; i++) { cpumask_t nodemask = node_to_cpumask(i); - cpus_and(nodemask, nodemask, cpu_default_map); + cpus_and(nodemask, nodemask, *cpu_map); if (cpus_empty(nodemask)) continue; @@ -222,7 +214,7 @@ void __devinit arch_init_sched_domains(void) } #ifdef CONFIG_NUMA - init_sched_build_groups(sched_group_allnodes, cpu_default_map, + init_sched_build_groups(sched_group_allnodes, *cpu_map, &cpu_to_allnodes_group); for (i = 0; i < MAX_NUMNODES; i++) { @@ -233,12 +225,12 @@ void __devinit arch_init_sched_domains(void) cpumask_t covered = CPU_MASK_NONE; int j; - cpus_and(nodemask, nodemask, cpu_default_map); + cpus_and(nodemask, nodemask, *cpu_map); if (cpus_empty(nodemask)) continue; domainspan = sched_domain_node_span(i); - cpus_and(domainspan, domainspan, cpu_default_map); + cpus_and(domainspan, domainspan, *cpu_map); sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL); sched_group_nodes[i] = sg; @@ -266,7 +258,7 @@ void __devinit arch_init_sched_domains(void) int n = (i + j) % MAX_NUMNODES; cpus_complement(notcovered, covered); - cpus_and(tmp, notcovered, cpu_default_map); + cpus_and(tmp, notcovered, *cpu_map); cpus_and(tmp, tmp, domainspan); if (cpus_empty(tmp)) break; @@ -293,7 +285,7 @@ void __devinit arch_init_sched_domains(void) #endif /* Calculate CPU power for physical packages and nodes */ - for_each_cpu_mask(i, cpu_default_map) { + for_each_cpu_mask(i, *cpu_map) { int power; struct sched_domain *sd; #ifdef CONFIG_SCHED_SMT @@ -359,13 +351,35 @@ next_sg: cpu_attach_domain(sd, i); } } +/* + * Set up scheduler domains and groups. Callers must hold the hotplug lock. + */ +void arch_init_sched_domains(const cpumask_t *cpu_map) +{ + cpumask_t cpu_default_map; + + /* + * Setup mask for cpus without special case scheduling requirements. + * For now this just excludes isolated cpus, but could be used to + * exclude other special cases in the future. + */ + cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map); + + build_sched_domains(&cpu_default_map); +} -void __devinit arch_destroy_sched_domains(void) +void arch_destroy_sched_domains(const cpumask_t *cpu_map) { #ifdef CONFIG_NUMA int i; for (i = 0; i < MAX_NUMNODES; i++) { + cpumask_t nodemask = node_to_cpumask(i); struct sched_group *oldsg, *sg = sched_group_nodes[i]; + + cpus_and(nodemask, nodemask, *cpu_map); + if (cpus_empty(nodemask)) + continue; + if (sg == NULL) continue; sg = sg->next; -- cgit v1.1