diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-25 15:50:20 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-25 15:50:20 -0700 |
commit | e4903fb59590f86190280a549420f6cb85bd7f7e (patch) | |
tree | 48a299a5e61d7645811f804d1a5bfeb408bfb202 /arch | |
parent | 6a28a05f9b1b4db920e390ac89968ed6d2e4b8ec (diff) | |
parent | cb2e0912f714b116812ef5834b5ba80d894ac967 (diff) | |
download | op-kernel-dev-e4903fb59590f86190280a549420f6cb85bd7f7e.zip op-kernel-dev-e4903fb59590f86190280a549420f6cb85bd7f7e.tar.gz |
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
[IA64] Nail two more simple section mismatch errors
[IA64] fix section mismatch warnings
[IA64] rename partial_page
[IA64] Ensure that machvec is set up takes place before serial console
[IA64] vector-domain - fix vector_table
[IA64] vector-domain - handle assign_irq_vector(AUTO_ASSIGN)
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ia64/ia32/ia32_support.c | 8 | ||||
-rw-r--r-- | arch/ia64/ia32/ia32priv.h | 12 | ||||
-rw-r--r-- | arch/ia64/ia32/sys_ia32.c | 81 | ||||
-rw-r--r-- | arch/ia64/kernel/head.S | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/irq_ia64.c | 26 | ||||
-rw-r--r-- | arch/ia64/kernel/machvec.c | 27 | ||||
-rw-r--r-- | arch/ia64/kernel/process.c | 7 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 11 | ||||
-rw-r--r-- | arch/ia64/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/vmlinux.lds.S | 2 | ||||
-rw-r--r-- | arch/ia64/pci/pci.c | 2 |
12 files changed, 103 insertions, 85 deletions
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c index e13a1a1..d1d50cd 100644 --- a/arch/ia64/ia32/ia32_support.c +++ b/arch/ia64/ia32/ia32_support.c @@ -249,11 +249,11 @@ ia32_init (void) #if PAGE_SHIFT > IA32_PAGE_SHIFT { - extern struct kmem_cache *partial_page_cachep; + extern struct kmem_cache *ia64_partial_page_cachep; - partial_page_cachep = kmem_cache_create("partial_page_cache", - sizeof(struct partial_page), - 0, SLAB_PANIC, NULL); + ia64_partial_page_cachep = kmem_cache_create("ia64_partial_page_cache", + sizeof(struct ia64_partial_page), + 0, SLAB_PANIC, NULL); } #endif return 0; diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h index cfa0bc0..466bbcb 100644 --- a/arch/ia64/ia32/ia32priv.h +++ b/arch/ia64/ia32/ia32priv.h @@ -25,8 +25,8 @@ * partially mapped pages provide precise accounting of which 4k sub pages * are mapped and which ones are not, thereby improving IA-32 compatibility. */ -struct partial_page { - struct partial_page *next; /* linked list, sorted by address */ +struct ia64_partial_page { + struct ia64_partial_page *next; /* linked list, sorted by address */ struct rb_node pp_rb; /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64 * should suffice.*/ @@ -34,17 +34,17 @@ struct partial_page { unsigned int base; }; -struct partial_page_list { - struct partial_page *pp_head; /* list head, points to the lowest +struct ia64_partial_page_list { + struct ia64_partial_page *pp_head; /* list head, points to the lowest * addressed partial page */ struct rb_root ppl_rb; - struct partial_page *pp_hint; /* pp_hint->next is the last + struct ia64_partial_page *pp_hint; /* pp_hint->next is the last * accessed partial page */ atomic_t pp_count; /* reference count */ }; #if PAGE_SHIFT > IA32_PAGE_SHIFT -struct partial_page_list* ia32_init_pp_list (void); +struct ia64_partial_page_list* ia32_init_pp_list (void); #else # define ia32_init_pp_list() 0 #endif diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 0afb4fe..af10462 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c @@ -253,17 +253,17 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro return ret; } -/* SLAB cache for partial_page structures */ -struct kmem_cache *partial_page_cachep; +/* SLAB cache for ia64_partial_page structures */ +struct kmem_cache *ia64_partial_page_cachep; /* - * init partial_page_list. + * init ia64_partial_page_list. * return 0 means kmalloc fail. */ -struct partial_page_list* +struct ia64_partial_page_list* ia32_init_pp_list(void) { - struct partial_page_list *p; + struct ia64_partial_page_list *p; if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL) return p; @@ -280,12 +280,12 @@ ia32_init_pp_list(void) * Else, return 0 and provide @pprev, @rb_link, @rb_parent to * be used by later __ia32_insert_pp(). */ -static struct partial_page * -__ia32_find_pp(struct partial_page_list *ppl, unsigned int start, - struct partial_page **pprev, struct rb_node ***rb_link, +static struct ia64_partial_page * +__ia32_find_pp(struct ia64_partial_page_list *ppl, unsigned int start, + struct ia64_partial_page **pprev, struct rb_node ***rb_link, struct rb_node **rb_parent) { - struct partial_page *pp; + struct ia64_partial_page *pp; struct rb_node **__rb_link, *__rb_parent, *rb_prev; pp = ppl->pp_hint; @@ -297,7 +297,7 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start, while (*__rb_link) { __rb_parent = *__rb_link; - pp = rb_entry(__rb_parent, struct partial_page, pp_rb); + pp = rb_entry(__rb_parent, struct ia64_partial_page, pp_rb); if (pp->base == start) { ppl->pp_hint = pp; @@ -314,7 +314,7 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start, *rb_parent = __rb_parent; *pprev = NULL; if (rb_prev) - *pprev = rb_entry(rb_prev, struct partial_page, pp_rb); + *pprev = rb_entry(rb_prev, struct ia64_partial_page, pp_rb); return NULL; } @@ -322,9 +322,9 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start, * insert @pp into @ppl. */ static void -__ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp, - struct partial_page *prev, struct rb_node **rb_link, - struct rb_node *rb_parent) +__ia32_insert_pp(struct ia64_partial_page_list *ppl, + struct ia64_partial_page *pp, struct ia64_partial_page *prev, + struct rb_node **rb_link, struct rb_node *rb_parent) { /* link list */ if (prev) { @@ -334,7 +334,7 @@ __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp, ppl->pp_head = pp; if (rb_parent) pp->next = rb_entry(rb_parent, - struct partial_page, pp_rb); + struct ia64_partial_page, pp_rb); else pp->next = NULL; } @@ -350,8 +350,8 @@ __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp, * delete @pp from partial page list @ppl. */ static void -__ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp, - struct partial_page *prev) +__ia32_delete_pp(struct ia64_partial_page_list *ppl, + struct ia64_partial_page *pp, struct ia64_partial_page *prev) { if (prev) { prev->next = pp->next; @@ -363,15 +363,15 @@ __ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp, ppl->pp_hint = pp->next; } rb_erase(&pp->pp_rb, &ppl->ppl_rb); - kmem_cache_free(partial_page_cachep, pp); + kmem_cache_free(ia64_partial_page_cachep, pp); } -static struct partial_page * -__pp_prev(struct partial_page *pp) +static struct ia64_partial_page * +__pp_prev(struct ia64_partial_page *pp) { struct rb_node *prev = rb_prev(&pp->pp_rb); if (prev) - return rb_entry(prev, struct partial_page, pp_rb); + return rb_entry(prev, struct ia64_partial_page, pp_rb); else return NULL; } @@ -383,7 +383,7 @@ __pp_prev(struct partial_page *pp) static void __ia32_delete_pp_range(unsigned int start, unsigned int end) { - struct partial_page *pp, *prev; + struct ia64_partial_page *pp, *prev; struct rb_node **rb_link, *rb_parent; if (start >= end) @@ -401,7 +401,7 @@ __ia32_delete_pp_range(unsigned int start, unsigned int end) } while (pp && pp->base < end) { - struct partial_page *tmp = pp->next; + struct ia64_partial_page *tmp = pp->next; __ia32_delete_pp(current->thread.ppl, pp, prev); pp = tmp; } @@ -414,7 +414,7 @@ __ia32_delete_pp_range(unsigned int start, unsigned int end) static int __ia32_set_pp(unsigned int start, unsigned int end, int flags) { - struct partial_page *pp, *prev; + struct ia64_partial_page *pp, *prev; struct rb_node ** rb_link, *rb_parent; unsigned int pstart, start_bit, end_bit, i; @@ -450,8 +450,8 @@ __ia32_set_pp(unsigned int start, unsigned int end, int flags) return 0; } - /* new a partial_page */ - pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL); + /* new a ia64_partial_page */ + pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL); if (!pp) return -ENOMEM; pp->base = pstart; @@ -504,7 +504,7 @@ ia32_set_pp(unsigned int start, unsigned int end, int flags) static int __ia32_unset_pp(unsigned int start, unsigned int end) { - struct partial_page *pp, *prev; + struct ia64_partial_page *pp, *prev; struct rb_node ** rb_link, *rb_parent; unsigned int pstart, start_bit, end_bit, i; struct vm_area_struct *vma; @@ -532,8 +532,8 @@ __ia32_unset_pp(unsigned int start, unsigned int end) return -ENOMEM; } - /* new a partial_page */ - pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL); + /* new a ia64_partial_page */ + pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL); if (!pp) return -ENOMEM; pp->base = pstart; @@ -605,7 +605,7 @@ ia32_unset_pp(unsigned int *startp, unsigned int *endp) static int __ia32_compare_pp(unsigned int start, unsigned int end) { - struct partial_page *pp, *prev; + struct ia64_partial_page *pp, *prev; struct rb_node ** rb_link, *rb_parent; unsigned int pstart, start_bit, end_bit, size; unsigned int first_bit, next_zero_bit; /* the first range in bitmap */ @@ -682,13 +682,13 @@ ia32_compare_pp(unsigned int *startp, unsigned int *endp) } static void -__ia32_drop_pp_list(struct partial_page_list *ppl) +__ia32_drop_pp_list(struct ia64_partial_page_list *ppl) { - struct partial_page *pp = ppl->pp_head; + struct ia64_partial_page *pp = ppl->pp_head; while (pp) { - struct partial_page *next = pp->next; - kmem_cache_free(partial_page_cachep, pp); + struct ia64_partial_page *next = pp->next; + kmem_cache_free(ia64_partial_page_cachep, pp); pp = next; } @@ -696,9 +696,9 @@ __ia32_drop_pp_list(struct partial_page_list *ppl) } void -ia32_drop_partial_page_list(struct task_struct *task) +ia32_drop_ia64_partial_page_list(struct task_struct *task) { - struct partial_page_list* ppl = task->thread.ppl; + struct ia64_partial_page_list* ppl = task->thread.ppl; if (ppl && atomic_dec_and_test(&ppl->pp_count)) __ia32_drop_pp_list(ppl); @@ -708,9 +708,9 @@ ia32_drop_partial_page_list(struct task_struct *task) * Copy current->thread.ppl to ppl (already initialized). */ static int -__ia32_copy_pp_list(struct partial_page_list *ppl) +__ia32_copy_pp_list(struct ia64_partial_page_list *ppl) { - struct partial_page *pp, *tmp, *prev; + struct ia64_partial_page *pp, *tmp, *prev; struct rb_node **rb_link, *rb_parent; ppl->pp_head = NULL; @@ -721,7 +721,7 @@ __ia32_copy_pp_list(struct partial_page_list *ppl) prev = NULL; for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) { - tmp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL); + tmp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL); if (!tmp) return -ENOMEM; *tmp = *pp; @@ -734,7 +734,8 @@ __ia32_copy_pp_list(struct partial_page_list *ppl) } int -ia32_copy_partial_page_list(struct task_struct *p, unsigned long clone_flags) +ia32_copy_ia64_partial_page_list(struct task_struct *p, + unsigned long clone_flags) { int retval = 0; diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 44d540e..4e5e275 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -178,7 +178,7 @@ swapper_pg_dir: halt_msg: stringz "Halting kernel\n" - .text + .section .text.head,"ax" .global start_ap @@ -392,6 +392,8 @@ self: hint @pause br.sptk.many self // endless loop END(_start) + .text + GLOBAL_ENTRY(ia64_save_debug_regs) alloc r16=ar.pfs,1,0,0,0 mov r20=ar.lc // preserve ar.lc diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 91797c1..fcb7733 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -85,8 +85,8 @@ DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = { [0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR }; -static cpumask_t vector_table[IA64_MAX_DEVICE_VECTORS] = { - [0 ... IA64_MAX_DEVICE_VECTORS - 1] = CPU_MASK_NONE +static cpumask_t vector_table[IA64_NUM_VECTORS] = { + [0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE }; static int irq_status[NR_IRQS] = { @@ -123,17 +123,18 @@ static inline int find_unassigned_irq(void) static inline int find_unassigned_vector(cpumask_t domain) { cpumask_t mask; - int pos; + int pos, vector; cpus_and(mask, domain, cpu_online_map); if (cpus_empty(mask)) return -EINVAL; for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) { - cpus_and(mask, domain, vector_table[pos]); + vector = IA64_FIRST_DEVICE_VECTOR + pos; + cpus_and(mask, domain, vector_table[vector]); if (!cpus_empty(mask)) continue; - return IA64_FIRST_DEVICE_VECTOR + pos; + return vector; } return -ENOSPC; } @@ -141,7 +142,7 @@ static inline int find_unassigned_vector(cpumask_t domain) static int __bind_irq_vector(int irq, int vector, cpumask_t domain) { cpumask_t mask; - int cpu, pos; + int cpu; struct irq_cfg *cfg = &irq_cfg[irq]; cpus_and(mask, domain, cpu_online_map); @@ -156,8 +157,7 @@ static int __bind_irq_vector(int irq, int vector, cpumask_t domain) cfg->vector = vector; cfg->domain = domain; irq_status[irq] = IRQ_USED; - pos = vector - IA64_FIRST_DEVICE_VECTOR; - cpus_or(vector_table[pos], vector_table[pos], domain); + cpus_or(vector_table[vector], vector_table[vector], domain); return 0; } @@ -174,7 +174,7 @@ int bind_irq_vector(int irq, int vector, cpumask_t domain) static void __clear_irq_vector(int irq) { - int vector, cpu, pos; + int vector, cpu; cpumask_t mask; cpumask_t domain; struct irq_cfg *cfg = &irq_cfg[irq]; @@ -189,8 +189,7 @@ static void __clear_irq_vector(int irq) cfg->vector = IRQ_VECTOR_UNASSIGNED; cfg->domain = CPU_MASK_NONE; irq_status[irq] = IRQ_UNUSED; - pos = vector - IA64_FIRST_DEVICE_VECTOR; - cpus_andnot(vector_table[pos], vector_table[pos], domain); + cpus_andnot(vector_table[vector], vector_table[vector], domain); } static void clear_irq_vector(int irq) @@ -212,9 +211,6 @@ assign_irq_vector (int irq) vector = -ENOSPC; spin_lock_irqsave(&vector_lock, flags); - if (irq < 0) { - goto out; - } for_each_online_cpu(cpu) { domain = vector_allocation_domain(cpu); vector = find_unassigned_vector(domain); @@ -223,6 +219,8 @@ assign_irq_vector (int irq) } if (vector < 0) goto out; + if (irq == AUTO_ASSIGN) + irq = vector; BUG_ON(__bind_irq_vector(irq, vector, domain)); out: spin_unlock_irqrestore(&vector_lock, flags); diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c index 13df337..7ccb228 100644 --- a/arch/ia64/kernel/machvec.c +++ b/arch/ia64/kernel/machvec.c @@ -13,14 +13,6 @@ struct ia64_machine_vector ia64_mv; EXPORT_SYMBOL(ia64_mv); -static __initdata const char *mvec_name; -static __init int setup_mvec(char *s) -{ - mvec_name = s; - return 0; -} -early_param("machvec", setup_mvec); - static struct ia64_machine_vector * __init lookup_machvec (const char *name) { @@ -41,7 +33,7 @@ machvec_init (const char *name) struct ia64_machine_vector *mv; if (!name) - name = mvec_name ? mvec_name : acpi_get_sysname(); + name = acpi_get_sysname(); mv = lookup_machvec(name); if (!mv) panic("generic kernel failed to find machine vector for" @@ -51,6 +43,23 @@ machvec_init (const char *name) printk(KERN_INFO "booting generic kernel on platform %s\n", name); } +void __init +machvec_init_from_cmdline(const char *cmdline) +{ + char str[64]; + const char *start; + char *end; + + if (! (start = strstr(cmdline, "machvec=")) ) + return machvec_init(NULL); + + strlcpy(str, start + strlen("machvec="), sizeof(str)); + if ( (end = strchr(str, ' ')) ) + *end = '\0'; + + return machvec_init(str); +} + #endif /* CONFIG_IA64_GENERIC */ void diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index fa40cba..4158906 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -499,7 +499,8 @@ copy_thread (int nr, unsigned long clone_flags, /* Copy partially mapped page list */ if (!retval) - retval = ia32_copy_partial_page_list(p, clone_flags); + retval = ia32_copy_ia64_partial_page_list(p, + clone_flags); } #endif @@ -728,7 +729,7 @@ flush_thread (void) ia64_drop_fpu(current); #ifdef CONFIG_IA32_SUPPORT if (IS_IA32_PROCESS(task_pt_regs(current))) { - ia32_drop_partial_page_list(current); + ia32_drop_ia64_partial_page_list(current); current->thread.task_size = IA32_PAGE_OFFSET; set_fs(USER_DS); } @@ -754,7 +755,7 @@ exit_thread (void) pfm_release_debug_registers(current); #endif if (IS_IA32_PROCESS(task_pt_regs(current))) - ia32_drop_partial_page_list(current); + ia32_drop_ia64_partial_page_list(current); } unsigned long diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index cf06fe79..7cecd29 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -491,12 +491,17 @@ setup_arch (char **cmdline_p) efi_init(); io_port_init(); - parse_early_param(); - #ifdef CONFIG_IA64_GENERIC - machvec_init(NULL); + /* machvec needs to be parsed from the command line + * before parse_early_param() is called to ensure + * that ia64_mv is initialised before any command line + * settings may cause console setup to occur + */ + machvec_init_from_cmdline(*cmdline_p); #endif + parse_early_param(); + if (early_console_setup(*cmdline_p) == 0) mark_bsp_online(); diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 9f72838..0982882 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -468,7 +468,7 @@ smp_send_stop (void) send_IPI_allbutself(IPI_CPU_STOP); } -int __init +int setup_profiling_timer (unsigned int multiplier) { return -EINVAL; diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 9f5c90b..62209dc 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -487,7 +487,7 @@ struct create_idle { int cpu; }; -void +void __cpuinit do_fork_idle(struct work_struct *work) { struct create_idle *c_idle = @@ -497,7 +497,7 @@ do_fork_idle(struct work_struct *work) complete(&c_idle->done); } -static int __devinit +static int __cpuinit do_boot_cpu (int sapicid, int cpu) { int timeout; @@ -808,7 +808,7 @@ set_cpu_sibling_map(int cpu) } } -int __devinit +int __cpuinit __cpu_up (unsigned int cpu) { int ret; diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 860f251..83e8067 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -50,6 +50,8 @@ SECTIONS KPROBES_TEXT *(.gnu.linkonce.t*) } + .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) + { *(.text.head) } .text2 : AT(ADDR(.text2) - LOAD_OFFSET) { *(.text2) } #ifdef CONFIG_SMP diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 07d0e92..488e48a 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -581,7 +581,7 @@ pcibios_align_resource (void *data, struct resource *res, /* * PCI BIOS setup, always defaults to SAL interface */ -char * __init +char * __devinit pcibios_setup (char *str) { return str; |