diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2013-01-30 09:49:40 +0100 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2013-02-14 15:55:10 +0100 |
commit | 1aae0560d160ee6ebef927a35e4f405306a079df (patch) | |
tree | 70809629dccba1bf31e067dfcd3a20fe58b6d24e /arch | |
parent | 58fece7827a7cc40e02bc68a7db8229166984893 (diff) | |
download | op-kernel-dev-1aae0560d160ee6ebef927a35e4f405306a079df.zip op-kernel-dev-1aae0560d160ee6ebef927a35e4f405306a079df.tar.gz |
s390/time: rename tod clock access functions
Fix name clash with some common code device drivers and add "tod"
to all tod clock access function names.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/appldata/appldata_mem.c | 2 | ||||
-rw-r--r-- | arch/s390/appldata/appldata_net_sum.c | 2 | ||||
-rw-r--r-- | arch/s390/appldata/appldata_os.c | 2 | ||||
-rw-r--r-- | arch/s390/hypfs/hypfs_vm.c | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/timex.h | 18 | ||||
-rw-r--r-- | arch/s390/kernel/debug.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/early.c | 6 | ||||
-rw-r--r-- | arch/s390/kernel/nmi.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 10 | ||||
-rw-r--r-- | arch/s390/kernel/time.c | 26 | ||||
-rw-r--r-- | arch/s390/kernel/vtime.c | 2 | ||||
-rw-r--r-- | arch/s390/kvm/interrupt.c | 6 | ||||
-rw-r--r-- | arch/s390/lib/delay.c | 16 |
13 files changed, 48 insertions, 48 deletions
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c index 02d9a1c..7ef60b5 100644 --- a/arch/s390/appldata/appldata_mem.c +++ b/arch/s390/appldata/appldata_mem.c @@ -108,7 +108,7 @@ static void appldata_get_mem_data(void *data) mem_data->totalswap = P2K(val.totalswap); mem_data->freeswap = P2K(val.freeswap); - mem_data->timestamp = get_clock(); + mem_data->timestamp = get_tod_clock(); mem_data->sync_count_2++; } diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c index 1370e35..2d224b9 100644 --- a/arch/s390/appldata/appldata_net_sum.c +++ b/arch/s390/appldata/appldata_net_sum.c @@ -111,7 +111,7 @@ static void appldata_get_net_sum_data(void *data) net_data->tx_dropped = tx_dropped; net_data->collisions = collisions; - net_data->timestamp = get_clock(); + net_data->timestamp = get_tod_clock(); net_data->sync_count_2++; } diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c index 87521ba..de8e2b3 100644 --- a/arch/s390/appldata/appldata_os.c +++ b/arch/s390/appldata/appldata_os.c @@ -156,7 +156,7 @@ static void appldata_get_os_data(void *data) } ops.size = new_size; } - os_data->timestamp = get_clock(); + os_data->timestamp = get_tod_clock(); os_data->sync_count_2++; } diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c index 4f6afaa..f364dcf 100644 --- a/arch/s390/hypfs/hypfs_vm.c +++ b/arch/s390/hypfs/hypfs_vm.c @@ -245,7 +245,7 @@ static int dbfs_diag2fc_create(void **data, void **data_free_ptr, size_t *size) d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr)); if (IS_ERR(d2fc)) return PTR_ERR(d2fc); - get_clock_ext(d2fc->hdr.tod_ext); + get_tod_clock_ext(d2fc->hdr.tod_ext); d2fc->hdr.len = count * sizeof(struct diag2fc_data); d2fc->hdr.version = DBFS_D2FC_HDR_VERSION; d2fc->hdr.count = count; diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 4c060bb..8ad8af9 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -15,7 +15,7 @@ #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL /* Inline functions for clock register access. */ -static inline int set_clock(__u64 time) +static inline int set_tod_clock(__u64 time) { int cc; @@ -27,7 +27,7 @@ static inline int set_clock(__u64 time) return cc; } -static inline int store_clock(__u64 *time) +static inline int store_tod_clock(__u64 *time) { int cc; @@ -71,7 +71,7 @@ static inline void local_tick_enable(unsigned long long comp) typedef unsigned long long cycles_t; -static inline unsigned long long get_clock(void) +static inline unsigned long long get_tod_clock(void) { unsigned long long clk; @@ -83,21 +83,21 @@ static inline unsigned long long get_clock(void) return clk; } -static inline void get_clock_ext(char *clk) +static inline void get_tod_clock_ext(char *clk) { asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); } -static inline unsigned long long get_clock_xt(void) +static inline unsigned long long get_tod_clock_xt(void) { unsigned char clk[16]; - get_clock_ext(clk); + get_tod_clock_ext(clk); return *((unsigned long long *)&clk[1]); } static inline cycles_t get_cycles(void) { - return (cycles_t) get_clock() >> 2; + return (cycles_t) get_tod_clock() >> 2; } int get_sync_clock(unsigned long long *clock); @@ -123,9 +123,9 @@ extern u64 sched_clock_base_cc; * function, otherwise the returned value is not guaranteed to * be monotonic. */ -static inline unsigned long long get_clock_monotonic(void) +static inline unsigned long long get_tod_clock_monotonic(void) { - return get_clock_xt() - sched_clock_base_cc; + return get_tod_clock_xt() - sched_clock_base_cc; } /** diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 4e8215e..09a94cd 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -867,7 +867,7 @@ static inline void debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level, int exception) { - active->id.stck = get_clock(); + active->id.stck = get_tod_clock(); active->id.fields.cpuid = smp_processor_id(); active->caller = __builtin_return_address(0); active->id.fields.exception = exception; diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 1f0eee9..1ee98e5 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -47,10 +47,10 @@ static void __init reset_tod_clock(void) { u64 time; - if (store_clock(&time) == 0) + if (store_tod_clock(&time) == 0) return; /* TOD clock not running. Set the clock to Unix Epoch. */ - if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0) + if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0) disabled_wait(0); sched_clock_base_cc = TOD_UNIX_EPOCH; @@ -173,7 +173,7 @@ static noinline __init void create_kernel_nss(void) } /* re-initialize cputime accounting. */ - sched_clock_base_cc = get_clock(); + sched_clock_base_cc = get_tod_clock(); S390_lowcore.last_update_clock = sched_clock_base_cc; S390_lowcore.last_update_timer = 0x7fffffffffffffffULL; S390_lowcore.user_timer = 0; diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 7918fbe..504175e 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -293,7 +293,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) * retry this instruction. */ spin_lock(&ipd_lock); - tmp = get_clock(); + tmp = get_tod_clock(); if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME) ipd_count++; else diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 7433a2f..549c9d1 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -365,16 +365,16 @@ void smp_emergency_stop(cpumask_t *cpumask) u64 end; int cpu; - end = get_clock() + (1000000UL << 12); + end = get_tod_clock() + (1000000UL << 12); for_each_cpu(cpu, cpumask) { struct pcpu *pcpu = pcpu_devices + cpu; set_bit(ec_stop_cpu, &pcpu->ec_mask); while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL, 0, NULL) == SIGP_CC_BUSY && - get_clock() < end) + get_tod_clock() < end) cpu_relax(); } - while (get_clock() < end) { + while (get_tod_clock() < end) { for_each_cpu(cpu, cpumask) if (pcpu_stopped(pcpu_devices + cpu)) cpumask_clear_cpu(cpu, cpumask); @@ -694,7 +694,7 @@ static void __init smp_detect_cpus(void) */ static void __cpuinit smp_start_secondary(void *cpuvoid) { - S390_lowcore.last_update_clock = get_clock(); + S390_lowcore.last_update_clock = get_tod_clock(); S390_lowcore.restart_stack = (unsigned long) restart_stack; S390_lowcore.restart_fn = (unsigned long) do_restart; S390_lowcore.restart_data = 0; @@ -947,7 +947,7 @@ static ssize_t show_idle_time(struct device *dev, unsigned int sequence; do { - now = get_clock(); + now = get_tod_clock(); sequence = ACCESS_ONCE(idle->sequence); idle_time = ACCESS_ONCE(idle->idle_time); idle_enter = ACCESS_ONCE(idle->clock_idle_enter); diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 0aa98db..876546b 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators); */ unsigned long long notrace __kprobes sched_clock(void) { - return tod_to_ns(get_clock_monotonic()); + return tod_to_ns(get_tod_clock_monotonic()); } /* @@ -194,7 +194,7 @@ static void stp_reset(void); void read_persistent_clock(struct timespec *ts) { - tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts); + tod_to_timeval(get_tod_clock() - TOD_UNIX_EPOCH, ts); } void read_boot_clock(struct timespec *ts) @@ -204,7 +204,7 @@ void read_boot_clock(struct timespec *ts) static cycle_t read_tod_clock(struct clocksource *cs) { - return get_clock(); + return get_tod_clock(); } static struct clocksource clocksource_tod = { @@ -342,7 +342,7 @@ int get_sync_clock(unsigned long long *clock) sw_ptr = &get_cpu_var(clock_sync_word); sw0 = atomic_read(sw_ptr); - *clock = get_clock(); + *clock = get_tod_clock(); sw1 = atomic_read(sw_ptr); put_cpu_var(clock_sync_word); if (sw0 == sw1 && (sw0 & 0x80000000U)) @@ -486,7 +486,7 @@ static void etr_reset(void) .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0, .es = 0, .sl = 0 }; if (etr_setr(&etr_eacr) == 0) { - etr_tolec = get_clock(); + etr_tolec = get_tod_clock(); set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags); if (etr_port0_online && etr_port1_online) set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); @@ -768,8 +768,8 @@ static int etr_sync_clock(void *data) __ctl_set_bit(14, 21); __ctl_set_bit(0, 29); clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32; - old_clock = get_clock(); - if (set_clock(clock) == 0) { + old_clock = get_tod_clock(); + if (set_tod_clock(clock) == 0) { __udelay(1); /* Wait for the clock to start. */ __ctl_clear_bit(0, 29); __ctl_clear_bit(14, 21); @@ -845,7 +845,7 @@ static struct etr_eacr etr_handle_events(struct etr_eacr eacr) * assume that this can have caused an stepping * port switch. */ - etr_tolec = get_clock(); + etr_tolec = get_tod_clock(); eacr.p0 = etr_port0_online; if (!eacr.p0) eacr.e0 = 0; @@ -858,7 +858,7 @@ static struct etr_eacr etr_handle_events(struct etr_eacr eacr) * assume that this can have caused an stepping * port switch. */ - etr_tolec = get_clock(); + etr_tolec = get_tod_clock(); eacr.p1 = etr_port1_online; if (!eacr.p1) eacr.e1 = 0; @@ -974,7 +974,7 @@ static void etr_update_eacr(struct etr_eacr eacr) etr_eacr = eacr; etr_setr(&etr_eacr); if (dp_changed) - etr_tolec = get_clock(); + etr_tolec = get_tod_clock(); } /* @@ -1012,7 +1012,7 @@ static void etr_work_fn(struct work_struct *work) /* Store aib to get the current ETR status word. */ BUG_ON(etr_stetr(&aib) != 0); etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */ - now = get_clock(); + now = get_tod_clock(); /* * Update the port information if the last stepping port change @@ -1537,10 +1537,10 @@ static int stp_sync_clock(void *data) if (stp_info.todoff[0] || stp_info.todoff[1] || stp_info.todoff[2] || stp_info.todoff[3] || stp_info.tmd != 2) { - old_clock = get_clock(); + old_clock = get_tod_clock(); rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0); if (rc == 0) { - delta = adjust_time(old_clock, get_clock(), 0); + delta = adjust_time(old_clock, get_tod_clock(), 0); fixup_clock_comparator(delta); rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index e84b8b6..8911a16 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -191,7 +191,7 @@ cputime64_t s390_get_idle_time(int cpu) unsigned int sequence; do { - now = get_clock(); + now = get_tod_clock(); sequence = ACCESS_ONCE(idle->sequence); idle_enter = ACCESS_ONCE(idle->clock_idle_enter); idle_exit = ACCESS_ONCE(idle->clock_idle_exit); diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 82c481d..87418b5 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -362,7 +362,7 @@ static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) } if ((!rc) && (vcpu->arch.sie_block->ckc < - get_clock() + vcpu->arch.sie_block->epoch)) { + get_tod_clock() + vcpu->arch.sie_block->epoch)) { if ((!psw_extint_disabled(vcpu)) && (vcpu->arch.sie_block->gcr[0] & 0x800ul)) rc = 1; @@ -402,7 +402,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) goto no_timer; } - now = get_clock() + vcpu->arch.sie_block->epoch; + now = get_tod_clock() + vcpu->arch.sie_block->epoch; if (vcpu->arch.sie_block->ckc < now) { __unset_cpu_idle(vcpu); return 0; @@ -492,7 +492,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) } if ((vcpu->arch.sie_block->ckc < - get_clock() + vcpu->arch.sie_block->epoch)) + get_tod_clock() + vcpu->arch.sie_block->epoch)) __try_deliver_ckc_interrupt(vcpu); if (atomic_read(&fi->active)) { diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index 42d0cf8..c61b9fa 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c @@ -32,7 +32,7 @@ static void __udelay_disabled(unsigned long long usecs) unsigned long cr0, cr6, new; u64 clock_saved, end; - end = get_clock() + (usecs << 12); + end = get_tod_clock() + (usecs << 12); clock_saved = local_tick_disable(); __ctl_store(cr0, 0, 0); __ctl_store(cr6, 6, 6); @@ -45,7 +45,7 @@ static void __udelay_disabled(unsigned long long usecs) set_clock_comparator(end); vtime_stop_cpu(); local_irq_disable(); - } while (get_clock() < end); + } while (get_tod_clock() < end); lockdep_on(); __ctl_load(cr0, 0, 0); __ctl_load(cr6, 6, 6); @@ -56,7 +56,7 @@ static void __udelay_enabled(unsigned long long usecs) { u64 clock_saved, end; - end = get_clock() + (usecs << 12); + end = get_tod_clock() + (usecs << 12); do { clock_saved = 0; if (end < S390_lowcore.clock_comparator) { @@ -67,7 +67,7 @@ static void __udelay_enabled(unsigned long long usecs) local_irq_disable(); if (clock_saved) local_tick_enable(clock_saved); - } while (get_clock() < end); + } while (get_tod_clock() < end); } /* @@ -111,8 +111,8 @@ void udelay_simple(unsigned long long usecs) { u64 end; - end = get_clock() + (usecs << 12); - while (get_clock() < end) + end = get_tod_clock() + (usecs << 12); + while (get_tod_clock() < end) cpu_relax(); } @@ -122,10 +122,10 @@ void __ndelay(unsigned long long nsecs) nsecs <<= 9; do_div(nsecs, 125); - end = get_clock() + nsecs; + end = get_tod_clock() + nsecs; if (nsecs & ~0xfffUL) __udelay(nsecs >> 12); - while (get_clock() < end) + while (get_tod_clock() < end) barrier(); } EXPORT_SYMBOL(__ndelay); |