diff options
author | Matthew Wilcox <matthew@wil.cx> | 2009-05-22 13:49:49 -0700 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2009-06-17 09:33:49 -0700 |
commit | e088a4ad7fa53c3dc3c29f930025f41ccf01953e (patch) | |
tree | 07b012952bbbaccfe4ef3bb44b1ea0a3a3bb3868 /arch/ia64/kernel | |
parent | e56e2dcd381d9ec35379328f332221581eda4787 (diff) | |
download | op-kernel-dev-e088a4ad7fa53c3dc3c29f930025f41ccf01953e.zip op-kernel-dev-e088a4ad7fa53c3dc3c29f930025f41ccf01953e.tar.gz |
[IA64] Convert ia64 to use int-ll64.h
It is generally agreed that it would be beneficial for u64 to be an
unsigned long long on all architectures. ia64 (in common with several
other 64-bit architectures) currently uses unsigned long. Migrating
piecemeal is too painful; this giant patch fixes all compilation warnings
and errors that come as a result of switching to use int-ll64.h.
Note that userspace will still see __u64 defined as unsigned long. This
is important as it affects C++ name mangling.
[Updated by Tony Luck to change efi.h:efi_freemem_callback_t to use
u64 for start/end rather than unsigned long]
Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/efi.c | 10 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 18 | ||||
-rw-r--r-- | arch/ia64/kernel/module.c | 14 | ||||
-rw-r--r-- | arch/ia64/kernel/palinfo.c | 68 | ||||
-rw-r--r-- | arch/ia64/kernel/pci-dma.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 32 | ||||
-rw-r--r-- | arch/ia64/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/time.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/topology.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/uncached.c | 3 |
12 files changed, 83 insertions, 80 deletions
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 7ef80e8..c745d0a 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -46,7 +46,7 @@ extern efi_status_t efi_call_phys (void *, ...); struct efi efi; EXPORT_SYMBOL(efi); static efi_runtime_services_t *runtime; -static unsigned long mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL; +static u64 mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL; #define efi_call_virt(f, args...) (*(f))(args) @@ -356,7 +356,7 @@ efi_get_pal_addr (void) if (++pal_code_count > 1) { printk(KERN_ERR "Too many EFI Pal Code memory ranges, " - "dropped @ %lx\n", md->phys_addr); + "dropped @ %llx\n", md->phys_addr); continue; } /* @@ -490,10 +490,10 @@ efi_init (void) } } if (min_addr != 0UL) - printk(KERN_INFO "Ignoring memory below %luMB\n", + printk(KERN_INFO "Ignoring memory below %lluMB\n", min_addr >> 20); if (max_addr != ~0UL) - printk(KERN_INFO "Ignoring memory above %luMB\n", + printk(KERN_INFO "Ignoring memory above %lluMB\n", max_addr >> 20); efi.systab = __va(ia64_boot_param->efi_systab); @@ -1066,7 +1066,7 @@ find_memmap_space (void) * parts exist, and are WB. */ unsigned long -efi_memmap_init(unsigned long *s, unsigned long *e) +efi_memmap_init(u64 *s, u64 *e) { struct kern_memdesc *k, *prev = NULL; u64 contig_low=0, contig_high=0; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 1cce4ce..c259b94 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -850,7 +850,7 @@ EXPORT_SYMBOL(ia64_unreg_MCA_extension); static inline void -copy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat) +copy_reg(const u64 *fr, u64 fnat, unsigned long *tr, unsigned long *tnat) { u64 fslot, tslot, nat; *tr = *fr; @@ -914,9 +914,9 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, struct switch_stack *old_sw; unsigned size = sizeof(struct pt_regs) + sizeof(struct switch_stack) + 16; - u64 *old_bspstore, *old_bsp; - u64 *new_bspstore, *new_bsp; - u64 old_unat, old_rnat, new_rnat, nat; + unsigned long *old_bspstore, *old_bsp; + unsigned long *new_bspstore, *new_bsp; + unsigned long old_unat, old_rnat, new_rnat, nat; u64 slots, loadrs = regs->loadrs; u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1]; u64 ar_bspstore = regs->ar_bspstore; @@ -968,10 +968,10 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, * loadrs for the new stack and save it in the new pt_regs, where * ia64_old_stack() can get it. */ - old_bspstore = (u64 *)ar_bspstore; - old_bsp = (u64 *)ar_bsp; + old_bspstore = (unsigned long *)ar_bspstore; + old_bsp = (unsigned long *)ar_bsp; slots = ia64_rse_num_regs(old_bspstore, old_bsp); - new_bspstore = (u64 *)((u64)current + IA64_RBS_OFFSET); + new_bspstore = (unsigned long *)((u64)current + IA64_RBS_OFFSET); new_bsp = ia64_rse_skip_regs(new_bspstore, slots); regs->loadrs = (new_bsp - new_bspstore) * 8 << 16; @@ -1918,9 +1918,9 @@ ia64_mca_init(void) ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave; ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch; int i; - s64 rc; + long rc; struct ia64_sal_retval isrv; - u64 timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */ + unsigned long timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */ static struct notifier_block default_init_monarch_nb = { .notifier_call = default_monarch_init_process, .priority = 0/* we need to notified last */ diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index da3b0cf..1481b0a 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c @@ -171,7 +171,8 @@ apply_imm60 (struct module *mod, struct insn *insn, uint64_t val) return 0; } if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) { - printk(KERN_ERR "%s: value %ld out of IMM60 range\n", mod->name, (int64_t) val); + printk(KERN_ERR "%s: value %ld out of IMM60 range\n", + mod->name, (long) val); return 0; } ia64_patch_imm60((u64) insn, val); @@ -182,7 +183,8 @@ static int apply_imm22 (struct module *mod, struct insn *insn, uint64_t val) { if (val + (1 << 21) >= (1 << 22)) { - printk(KERN_ERR "%s: value %li out of IMM22 range\n", mod->name, (int64_t)val); + printk(KERN_ERR "%s: value %li out of IMM22 range\n", + mod->name, (long)val); return 0; } ia64_patch((u64) insn, 0x01fffcfe000UL, ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */ @@ -196,7 +198,8 @@ static int apply_imm21b (struct module *mod, struct insn *insn, uint64_t val) { if (val + (1 << 20) >= (1 << 21)) { - printk(KERN_ERR "%s: value %li out of IMM21b range\n", mod->name, (int64_t)val); + printk(KERN_ERR "%s: value %li out of IMM21b range\n", + mod->name, (long)val); return 0; } ia64_patch((u64) insn, 0x11ffffe000UL, ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */ @@ -701,8 +704,9 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, case RV_PCREL2: if (r_type == R_IA64_PCREL21BI) { if (!is_internal(mod, val)) { - printk(KERN_ERR "%s: %s reloc against non-local symbol (%lx)\n", - __func__, reloc_name[r_type], val); + printk(KERN_ERR "%s: %s reloc against " + "non-local symbol (%lx)\n", __func__, + reloc_name[r_type], (unsigned long)val); return -ENOEXEC; } format = RF_INSN21B; diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index a4f19c7..fdf6f9d 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -218,10 +218,10 @@ static int cache_info(char *page) { char *p = page; - u64 i, levels, unique_caches; + unsigned long i, levels, unique_caches; pal_cache_config_info_t cci; int j, k; - s64 status; + long status; if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) { printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status); @@ -303,7 +303,7 @@ vm_info(char *page) ia64_ptce_info_t ptce; const char *sep; int i, j; - s64 status; + long status; if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) { printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status); @@ -431,9 +431,9 @@ register_info(char *page) char *p = page; u64 reg_info[2]; u64 info; - u64 phys_stacked; + unsigned long phys_stacked; pal_hints_u_t hints; - u64 iregs, dregs; + unsigned long iregs, dregs; char *info_type[]={ "Implemented AR(s)", "AR(s) with read side-effects", @@ -530,8 +530,8 @@ static char **proc_features[]={ NULL, NULL, NULL, NULL, }; -static char * -feature_set_info(char *page, u64 avail, u64 status, u64 control, u64 set) +static char * feature_set_info(char *page, u64 avail, u64 status, u64 control, + unsigned long set) { char *p = page; char **vf, **v; @@ -714,7 +714,7 @@ frequency_info(char *page) { char *p = page; struct pal_freq_ratio proc, itc, bus; - u64 base; + unsigned long base; if (ia64_pal_freq_base(&base) == -1) p += sprintf(p, "Output clock : not implemented\n"); @@ -736,43 +736,43 @@ static int tr_info(char *page) { char *p = page; - s64 status; + long status; pal_tr_valid_u_t tr_valid; u64 tr_buffer[4]; pal_vm_info_1_u_t vm_info_1; pal_vm_info_2_u_t vm_info_2; - u64 i, j; - u64 max[3], pgm; + unsigned long i, j; + unsigned long max[3], pgm; struct ifa_reg { - u64 valid:1; - u64 ig:11; - u64 vpn:52; + unsigned long valid:1; + unsigned long ig:11; + unsigned long vpn:52; } *ifa_reg; struct itir_reg { - u64 rv1:2; - u64 ps:6; - u64 key:24; - u64 rv2:32; + unsigned long rv1:2; + unsigned long ps:6; + unsigned long key:24; + unsigned long rv2:32; } *itir_reg; struct gr_reg { - u64 p:1; - u64 rv1:1; - u64 ma:3; - u64 a:1; - u64 d:1; - u64 pl:2; - u64 ar:3; - u64 ppn:38; - u64 rv2:2; - u64 ed:1; - u64 ig:11; + unsigned long p:1; + unsigned long rv1:1; + unsigned long ma:3; + unsigned long a:1; + unsigned long d:1; + unsigned long pl:2; + unsigned long ar:3; + unsigned long ppn:38; + unsigned long rv2:2; + unsigned long ed:1; + unsigned long ig:11; } *gr_reg; struct rid_reg { - u64 ig1:1; - u64 rv1:1; - u64 ig2:6; - u64 rid:24; - u64 rv2:32; + unsigned long ig1:1; + unsigned long rv1:1; + unsigned long ig2:6; + unsigned long rid:24; + unsigned long rv2:32; } *rid_reg; if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) { diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c index eb98738..1376da4 100644 --- a/arch/ia64/kernel/pci-dma.c +++ b/arch/ia64/kernel/pci-dma.c @@ -91,7 +91,7 @@ int iommu_dma_supported(struct device *dev, u64 mask) type. Normally this doesn't make any difference, but gives more gentle handling of IOMMU overflow. */ if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) { - dev_info(dev, "Force SAC with mask %lx\n", mask); + dev_info(dev, "Force SAC with mask %llx\n", mask); return 0; } diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 8a06dc48..89ad0bb 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -312,7 +312,7 @@ typedef struct pfm_context { unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */ unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */ - u64 ctx_saved_psr_up; /* only contains psr.up value */ + unsigned long ctx_saved_psr_up; /* only contains psr.up value */ unsigned long ctx_last_activation; /* context last activation number for last_cpu */ unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */ @@ -5213,8 +5213,8 @@ pfm_end_notify_user(pfm_context_t *ctx) * main overflow processing routine. * it can be called from the interrupt path or explicitly during the context switch code */ -static void -pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs) +static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, + unsigned long pmc0, struct pt_regs *regs) { pfm_ovfl_arg_t *ovfl_arg; unsigned long mask; diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 714066a..1b23ec12 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -151,9 +151,9 @@ int num_rsvd_regions __initdata; * This routine does not assume the incoming segments are sorted. */ int __init -filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) +filter_rsvd_memory (u64 start, u64 end, void *arg) { - unsigned long range_start, range_end, prev_start; + u64 range_start, range_end, prev_start; void (*func)(unsigned long, unsigned long, int); int i; @@ -191,7 +191,7 @@ filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) * are not filtered out. */ int __init -filter_memory(unsigned long start, unsigned long end, void *arg) +filter_memory(u64 start, u64 end, void *arg) { void (*func)(unsigned long, unsigned long, int); @@ -397,7 +397,7 @@ find_initrd (void) initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start); initrd_end = initrd_start+ia64_boot_param->initrd_size; - printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n", + printk(KERN_INFO "Initial ramdisk at: 0x%lx (%llu bytes)\n", initrd_start, ia64_boot_param->initrd_size); } #endif @@ -505,9 +505,9 @@ static int __init parse_elfcorehdr(char *arg) } early_param("elfcorehdr", parse_elfcorehdr); -int __init reserve_elfcorehdr(unsigned long *start, unsigned long *end) +int __init reserve_elfcorehdr(u64 *start, u64 *end) { - unsigned long length; + u64 length; /* We get the address using the kernel command line, * but the size is extracted from the EFI tables. @@ -588,7 +588,7 @@ setup_arch (char **cmdline_p) ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); #else { - u64 num_phys_stacked; + unsigned long num_phys_stacked; if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96) ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); @@ -872,9 +872,9 @@ static void __cpuinit get_cache_info(void) { unsigned long line_size, max = 1; - u64 l, levels, unique_caches; - pal_cache_config_info_t cci; - s64 status; + unsigned long l, levels, unique_caches; + pal_cache_config_info_t cci; + long status; status = ia64_pal_cache_summary(&levels, &unique_caches); if (status != 0) { @@ -892,9 +892,9 @@ get_cache_info(void) /* cache_type (data_or_unified)=2 */ status = ia64_pal_cache_config_info(l, 2, &cci); if (status != 0) { - printk(KERN_ERR - "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", - __func__, l, status); + printk(KERN_ERR "%s: ia64_pal_cache_config_info" + "(l=%lu, 2) failed (status=%ld)\n", + __func__, l, status); max = SMP_CACHE_BYTES; /* The safest setup for "flush_icache_range()" */ cci.pcci_stride = I_CACHE_STRIDE_SHIFT; @@ -914,10 +914,10 @@ get_cache_info(void) /* cache_type (instruction)=1*/ status = ia64_pal_cache_config_info(l, 1, &cci); if (status != 0) { - printk(KERN_ERR - "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", + printk(KERN_ERR "%s: ia64_pal_cache_config_info" + "(l=%lu, 1) failed (status=%ld)\n", __func__, l, status); - /* The safest setup for "flush_icache_range()" */ + /* The safest setup for flush_icache_range() */ cci.pcci_stride = I_CACHE_STRIDE_SHIFT; } } diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 5230eaa..f0c521b 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -66,7 +66,7 @@ static DEFINE_PER_CPU(unsigned short, shadow_flush_counts[NR_CPUS]) ____cachelin #define IPI_KDUMP_CPU_STOP 3 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ -static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation); +static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, ipi_operation); extern void cpu_halt (void); diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 2a70af4..de100aa 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -865,7 +865,7 @@ init_smp_config(void) void __devinit identify_siblings(struct cpuinfo_ia64 *c) { - s64 status; + long status; u16 pltid; pal_logical_to_physical_t info; diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 604c1a3..4990495 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -385,7 +385,7 @@ ia64_init_itm (void) static cycle_t itc_get_cycles(struct clocksource *cs) { - u64 lcycle, now, ret; + unsigned long lcycle, now, ret; if (!itc_jitter_data.itc_jitter) return get_cycles(); diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index a8d61a3..bc80dff 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -306,10 +306,10 @@ static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu) static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu) { - u64 i, levels, unique_caches; + unsigned long i, levels, unique_caches; pal_cache_config_info_t cci; int j; - s64 status; + long status; struct cache_info *this_cache; int num_cache_leaves = 0; diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index 8eff8c1..e6ac3c3 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -249,8 +249,7 @@ EXPORT_SYMBOL(uncached_free_page); * Called at boot time to build a map of pages that can be used for * memory special operations. */ -static int __init uncached_build_memmap(unsigned long uc_start, - unsigned long uc_end, void *arg) +static int __init uncached_build_memmap(u64 uc_start, u64 uc_end, void *arg) { int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET); struct gen_pool *pool = uncached_pools[nid].pool; |