diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 19 | ||||
-rw-r--r-- | arch/ia64/kernel/Makefile | 3 | ||||
-rw-r--r-- | arch/ia64/kernel/acpi-ext.c | 143 | ||||
-rw-r--r-- | arch/ia64/kernel/entry.S | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/kprobes.c | 10 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 43 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_asm.S | 10 | ||||
-rw-r--r-- | arch/ia64/kernel/module.c | 2 | ||||
-rw-r--r-- | arch/ia64/mm/discontig.c | 66 | ||||
-rw-r--r-- | arch/ia64/mm/fault.c | 3 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_channel.c | 2 |
11 files changed, 192 insertions, 113 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index edffe25..9f40eef 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -260,15 +260,6 @@ config NR_CPUS than 64 will cause the use of a CPU mask array, causing a small performance hit. -config IA64_NR_NODES - int "Maximum number of NODEs (256-1024)" if (IA64_SGI_SN2 || IA64_GENERIC) - range 256 1024 - depends on IA64_SGI_SN2 || IA64_GENERIC - default "256" - help - This option specifies the maximum number of nodes in your SSI system. - If in doubt, use the default. - config HOTPLUG_CPU bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" depends on SMP && EXPERIMENTAL @@ -352,6 +343,16 @@ config NUMA Access). This option is for configuring high-end multiprocessor server systems. If in doubt, say N. +config NODES_SHIFT + int "Max num nodes shift(3-10)" + range 3 10 + default "8" + depends on NEED_MULTIPLE_NODES + help + This option specifies the maximum number of nodes in your SSI system. + MAX_NUMNODES will be 2^(This value). + If in doubt, use the default. + # VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent. # VIRTUAL_MEM_MAP has been retained for historical reasons. config VIRTUAL_MEM_MAP diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 59e871d..09a0dbc 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ - unwind.o mca.o mca_asm.o topology.o dmi_scan.o + unwind.o mca.o mca_asm.o topology.o obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o @@ -30,7 +30,6 @@ obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o mca_recovery-y += mca_drv.o mca_drv_asm.o -dmi_scan-y += ../../i386/kernel/dmi_scan.o # The gate DSO image is built using a special linker script. targets += gate.so gate-syms.o diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c index 4a5574f..fff8292 100644 --- a/arch/ia64/kernel/acpi-ext.c +++ b/arch/ia64/kernel/acpi-ext.c @@ -1,105 +1,104 @@ /* - * arch/ia64/kernel/acpi-ext.c + * (c) Copyright 2003, 2006 Hewlett-Packard Development Company, L.P. + * Alex Williamson <alex.williamson@hp.com> + * Bjorn Helgaas <bjorn.helgaas@hp.com> * - * Copyright (C) 2003 Hewlett-Packard - * Copyright (C) Alex Williamson - * Copyright (C) Bjorn Helgaas - * - * Vendor specific extensions to ACPI. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ #include <linux/config.h> #include <linux/module.h> #include <linux/types.h> #include <linux/acpi.h> -#include <linux/efi.h> #include <asm/acpi-ext.h> -struct acpi_vendor_descriptor { - u8 guid_id; - efi_guid_t guid; -}; +/* + * Device CSRs that do not appear in PCI config space should be described + * via ACPI. This would normally be done with Address Space Descriptors + * marked as "consumer-only," but old versions of Windows and Linux ignore + * the producer/consumer flag, so HP invented a vendor-defined resource to + * describe the location and size of CSR space. + */ -struct acpi_vendor_info { - struct acpi_vendor_descriptor *descriptor; - u8 *data; - u32 length; +struct acpi_vendor_uuid hp_ccsr_uuid = { + .subtype = 2, + .data = { 0xf9, 0xad, 0xe9, 0x69, 0x4f, 0x92, 0x5f, 0xab, 0xf6, 0x4a, + 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad }, }; -acpi_status -acpi_vendor_resource_match(struct acpi_resource *resource, void *context) +static acpi_status hp_ccsr_locate(acpi_handle obj, u64 *base, u64 *length) { - struct acpi_vendor_info *info = (struct acpi_vendor_info *)context; - struct acpi_resource_vendor *vendor; - struct acpi_vendor_descriptor *descriptor; - u32 byte_length; - - if (resource->type != ACPI_RESOURCE_TYPE_VENDOR) - return AE_OK; - - vendor = (struct acpi_resource_vendor *)&resource->data; - descriptor = (struct acpi_vendor_descriptor *)vendor->byte_data; - if (vendor->byte_length <= sizeof(*info->descriptor) || - descriptor->guid_id != info->descriptor->guid_id || - efi_guidcmp(descriptor->guid, info->descriptor->guid)) - return AE_OK; - - byte_length = vendor->byte_length - sizeof(struct acpi_vendor_descriptor); - info->data = acpi_os_allocate(byte_length); - if (!info->data) - return AE_NO_MEMORY; - - memcpy(info->data, - vendor->byte_data + sizeof(struct acpi_vendor_descriptor), - byte_length); - info->length = byte_length; - return AE_CTRL_TERMINATE; -} + acpi_status status; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_resource *resource; + struct acpi_resource_vendor_typed *vendor; -acpi_status -acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id, - u8 ** data, u32 * byte_length) -{ - struct acpi_vendor_info info; + status = acpi_get_vendor_resource(obj, METHOD_NAME__CRS, &hp_ccsr_uuid, + &buffer); - info.descriptor = id; - info.data = NULL; + resource = buffer.pointer; + vendor = &resource->data.vendor_typed; - acpi_walk_resources(obj, METHOD_NAME__CRS, acpi_vendor_resource_match, - &info); - if (!info.data) - return AE_NOT_FOUND; + if (ACPI_FAILURE(status) || vendor->byte_length < 16) { + status = AE_NOT_FOUND; + goto exit; + } - *data = info.data; - *byte_length = info.length; - return AE_OK; + memcpy(base, vendor->byte_data, sizeof(*base)); + memcpy(length, vendor->byte_data + 8, sizeof(*length)); + + exit: + acpi_os_free(buffer.pointer); + return status; } -struct acpi_vendor_descriptor hp_ccsr_descriptor = { - .guid_id = 2, - .guid = - EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, 0xf6, 0x4a, 0x24, 0xd2, 0x01, - 0x37, 0x0e, 0xad) +struct csr_space { + u64 base; + u64 length; }; -acpi_status hp_acpi_csr_space(acpi_handle obj, u64 * csr_base, u64 * csr_length) +static acpi_status find_csr_space(struct acpi_resource *resource, void *data) { + struct csr_space *space = data; + struct acpi_resource_address64 addr; acpi_status status; - u8 *data; - u32 length; - status = - acpi_find_vendor_resource(obj, &hp_ccsr_descriptor, &data, &length); + status = acpi_resource_to_address64(resource, &addr); + if (ACPI_SUCCESS(status) && + addr.resource_type == ACPI_MEMORY_RANGE && + addr.address_length && + addr.producer_consumer == ACPI_CONSUMER) { + space->base = addr.minimum; + space->length = addr.address_length; + return AE_CTRL_TERMINATE; + } + return AE_OK; /* keep looking */ +} - if (ACPI_FAILURE(status) || length != 16) - return AE_NOT_FOUND; +static acpi_status hp_crs_locate(acpi_handle obj, u64 *base, u64 *length) +{ + struct csr_space space = { 0, 0 }; - memcpy(csr_base, data, sizeof(*csr_base)); - memcpy(csr_length, data + 8, sizeof(*csr_length)); - acpi_os_free(data); + acpi_walk_resources(obj, METHOD_NAME__CRS, find_csr_space, &space); + if (!space.length) + return AE_NOT_FOUND; + *base = space.base; + *length = space.length; return AE_OK; } +acpi_status hp_acpi_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length) +{ + acpi_status status; + + status = hp_ccsr_locate(obj, csr_base, csr_length); + if (ACPI_SUCCESS(status)) + return status; + + return hp_crs_locate(obj, csr_base, csr_length); +} EXPORT_SYMBOL(hp_acpi_csr_space); diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 750e8e7..e307988 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1606,5 +1606,9 @@ sys_call_table: data8 sys_ni_syscall // 1295 reserved for ppoll data8 sys_unshare data8 sys_splice + data8 sys_set_robust_list + data8 sys_get_robust_list + data8 sys_sync_file_range // 1300 + data8 sys_tee .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 789881c..f9039f8 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -251,7 +251,7 @@ static void __kprobes prepare_break_inst(uint template, uint slot, update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p); } -static inline void get_kprobe_inst(bundle_t *bundle, uint slot, +static void __kprobes get_kprobe_inst(bundle_t *bundle, uint slot, unsigned long *kprobe_inst, uint *major_opcode) { unsigned long kprobe_inst_p0, kprobe_inst_p1; @@ -278,7 +278,7 @@ static inline void get_kprobe_inst(bundle_t *bundle, uint slot, } /* Returns non-zero if the addr is in the Interrupt Vector Table */ -static inline int in_ivt_functions(unsigned long addr) +static int __kprobes in_ivt_functions(unsigned long addr) { return (addr >= (unsigned long)__start_ivt_text && addr < (unsigned long)__end_ivt_text); @@ -308,19 +308,19 @@ static int __kprobes valid_kprobe_addr(int template, int slot, return 0; } -static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) +static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) { kcb->prev_kprobe.kp = kprobe_running(); kcb->prev_kprobe.status = kcb->kprobe_status; } -static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) +static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) { __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; kcb->kprobe_status = kcb->prev_kprobe.status; } -static inline void set_current_kprobe(struct kprobe *p, +static void __kprobes set_current_kprobe(struct kprobe *p, struct kprobe_ctlblk *kcb) { __get_cpu_var(current_kprobe) = p; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 8963171..6a08806 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -581,10 +581,12 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs) { unsigned long flags; int cpu = smp_processor_id(); + struct ia64_mca_notify_die nd = + { .sos = NULL, .monarch_cpu = &monarch_cpu }; /* Mask all interrupts */ local_irq_save(flags); - if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", regs, 0, 0, 0) + if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", regs, (long)&nd, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); @@ -594,7 +596,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs) */ ia64_sal_mc_rendez(); - if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", regs, 0, 0, 0) + if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", regs, (long)&nd, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); @@ -602,7 +604,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs) while (monarch_cpu != -1) cpu_relax(); /* spin until monarch leaves */ - if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", regs, 0, 0, 0) + if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", regs, (long)&nd, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); @@ -961,7 +963,7 @@ no_mod: */ static void -ia64_wait_for_slaves(int monarch) +ia64_wait_for_slaves(int monarch, const char *type) { int c, wait = 0, missing = 0; for_each_online_cpu(c) { @@ -987,7 +989,7 @@ ia64_wait_for_slaves(int monarch) } if (!missing) goto all_in; - printk(KERN_INFO "OS MCA slave did not rendezvous on cpu"); + printk(KERN_INFO "OS %s slave did not rendezvous on cpu", type); for_each_online_cpu(c) { if (c == monarch) continue; @@ -998,7 +1000,7 @@ ia64_wait_for_slaves(int monarch) return; all_in: - printk(KERN_INFO "All OS MCA slaves have reached rendezvous\n"); + printk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type); return; } @@ -1023,6 +1025,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, &sos->proc_state_param; int recover, cpu = smp_processor_id(); task_t *previous_current; + struct ia64_mca_notify_die nd = + { .sos = sos, .monarch_cpu = &monarch_cpu }; oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ console_loglevel = 15; /* make sure printks make it to console */ @@ -1031,10 +1035,10 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); monarch_cpu = cpu; - if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, 0, 0, 0) + if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); - ia64_wait_for_slaves(cpu); + ia64_wait_for_slaves(cpu, "MCA"); /* Wakeup all the processors which are spinning in the rendezvous loop. * They will leave SAL, then spin in the OS with interrupts disabled @@ -1043,7 +1047,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, * spinning in SAL does not work. */ ia64_mca_wakeup_all(); - if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, 0, 0, 0) + if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); @@ -1064,7 +1068,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); sos->os_status = IA64_MCA_CORRECTED; } - if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, 0, 0, recover) + if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); @@ -1351,10 +1355,14 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, static atomic_t monarchs; task_t *previous_current; int cpu = smp_processor_id(); + struct ia64_mca_notify_die nd = + { .sos = sos, .monarch_cpu = &monarch_cpu }; oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ console_loglevel = 15; /* make sure printks make it to console */ + (void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0); + printk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch); salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0); @@ -1390,15 +1398,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; while (monarch_cpu == -1) cpu_relax(); /* spin until monarch enters */ - if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, 0, 0, 0) + if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); - if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, 0, 0, 0) + if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); while (monarch_cpu != -1) cpu_relax(); /* spin until monarch leaves */ - if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, 0, 0, 0) + if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); printk("Slave on cpu %d returning to normal service.\n", cpu); @@ -1409,7 +1417,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, } monarch_cpu = cpu; - if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, 0, 0, 0) + if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); @@ -1421,15 +1429,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, */ printk("Delaying for 5 seconds...\n"); udelay(5*1000000); - ia64_wait_for_slaves(cpu); + ia64_wait_for_slaves(cpu, "INIT"); /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through * to default_monarch_init_process() above and just print all the * tasks. */ - if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, 0, 0, 0) + if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); - if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, 0, 0, 0) + if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); @@ -1631,6 +1639,7 @@ ia64_mca_init(void) printk(KERN_INFO "Increasing MCA rendezvous timeout from " "%ld to %ld milliseconds\n", timeout, isrv.v0); timeout = isrv.v0; + (void) notify_die(DIE_MCA_NEW_TIMEOUT, "MCA", NULL, timeout, 0, 0); continue; } printk(KERN_ERR "Failed to register rendezvous interrupt " diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index 60a464b..6dff024 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S @@ -827,7 +827,7 @@ ia64_state_restore: ld8 r9=[temp2],16 // sal_gp ;; ld8 r22=[temp1],16 // pal_min_state, virtual - ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT + ld8 r13=[temp2],16 // prev_IA64_KR_CURRENT ;; ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK ld8 r20=[temp2],16 // prev_task @@ -848,7 +848,7 @@ ia64_state_restore: mov cr.iim=temp3 mov cr.iha=temp4 dep r22=0,r22,62,1 // pal_min_state, physical, uncached - mov IA64_KR(CURRENT)=r21 + mov IA64_KR(CURRENT)=r13 ld8 r8=[temp1] // os_status ld8 r10=[temp2] // context @@ -856,7 +856,7 @@ ia64_state_restore: * avoid any dependencies on the algorithm in ia64_switch_to(), just * purge any existing CURRENT_STACK mapping and insert the new one. * - * r16 contains prev_IA64_KR_CURRENT_STACK, r21 contains + * r16 contains prev_IA64_KR_CURRENT_STACK, r13 contains * prev_IA64_KR_CURRENT, these values may have been changed by the C * code. Do not use r8, r9, r10, r22, they contain values ready for * the return to SAL. @@ -873,7 +873,7 @@ ia64_state_restore: ;; srlz.d - extr.u r19=r21,61,3 // r21 = prev_IA64_KR_CURRENT + extr.u r19=r13,61,3 // r13 = prev_IA64_KR_CURRENT shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK movl r21=PAGE_KERNEL // page properties ;; @@ -883,7 +883,7 @@ ia64_state_restore: (p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:( ;; mov cr.itir=r18 - mov cr.ifa=r21 + mov cr.ifa=r13 mov r20=IA64_TR_CURRENT_STACK ;; itr.d dtr[r20]=r21 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index 7a2f0a7..3a30cfc 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c @@ -947,7 +947,7 @@ void percpu_modcopy (void *pcpudst, const void *src, unsigned long size) { unsigned int i; - for_each_cpu(i) { + for_each_possible_cpu(i) { memcpy(pcpudst + __per_cpu_offset[i], src, size); } } diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index ec9eeb8..b6bcc9f 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -519,6 +519,68 @@ void __cpuinit *per_cpu_init(void) } #endif /* CONFIG_SMP */ +#ifdef CONFIG_VIRTUAL_MEM_MAP +static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i) +{ + unsigned long end_address, hole_next_pfn; + unsigned long stop_address; + + end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; + end_address = PAGE_ALIGN(end_address); + + stop_address = (unsigned long) &vmem_map[ + pgdat->node_start_pfn + pgdat->node_spanned_pages]; + + do { + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + pgd = pgd_offset_k(end_address); + if (pgd_none(*pgd)) { + end_address += PGDIR_SIZE; + continue; + } + + pud = pud_offset(pgd, end_address); + if (pud_none(*pud)) { + end_address += PUD_SIZE; + continue; + } + + pmd = pmd_offset(pud, end_address); + if (pmd_none(*pmd)) { + end_address += PMD_SIZE; + continue; + } + + pte = pte_offset_kernel(pmd, end_address); +retry_pte: + if (pte_none(*pte)) { + end_address += PAGE_SIZE; + pte++; + if ((end_address < stop_address) && + (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) + goto retry_pte; + continue; + } + /* Found next valid vmem_map page */ + break; + } while (end_address < stop_address); + + end_address = min(end_address, stop_address); + end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; + hole_next_pfn = end_address / sizeof(struct page); + return hole_next_pfn - pgdat->node_start_pfn; +} +#else +static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i) +{ + return i + 1; +} +#endif + /** * show_mem - give short summary of memory stats * @@ -547,8 +609,10 @@ void show_mem(void) struct page *page; if (pfn_valid(pgdat->node_start_pfn + i)) page = pfn_to_page(pgdat->node_start_pfn + i); - else + else { + i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1; continue; + } if (PageReserved(page)) reserved++; else if (PageSwapCache(page)) diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index af7eb08..d98ec49 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -60,6 +60,9 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re struct siginfo si; unsigned long mask; + /* mmap_sem is performance critical.... */ + prefetchw(&mm->mmap_sem); + /* * If we're in an interrupt or have no user context, we must not take the fault.. */ diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c index d0abddd..8255a9b 100644 --- a/arch/ia64/sn/kernel/xpc_channel.c +++ b/arch/ia64/sn/kernel/xpc_channel.c @@ -1831,7 +1831,7 @@ xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload) { struct xpc_partition *part = &xpc_partitions[partid]; enum xpc_retval ret = xpcUnknownReason; - struct xpc_msg *msg; + struct xpc_msg *msg = NULL; DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); |