diff options
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/asm-offsets.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/compat_linux.c | 18 | ||||
-rw-r--r-- | arch/s390/kernel/compat_wrapper.S | 18 | ||||
-rw-r--r-- | arch/s390/kernel/early.c | 19 | ||||
-rw-r--r-- | arch/s390/kernel/entry.S | 13 | ||||
-rw-r--r-- | arch/s390/kernel/entry64.S | 13 | ||||
-rw-r--r-- | arch/s390/kernel/head.S | 18 | ||||
-rw-r--r-- | arch/s390/kernel/ipl.c | 1 | ||||
-rw-r--r-- | arch/s390/kernel/nmi.c | 5 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 12 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/syscalls.S | 2 | ||||
-rw-r--r-- | arch/s390/kernel/time.c | 79 | ||||
-rw-r--r-- | arch/s390/kernel/vmlinux.lds.S | 4 | ||||
-rw-r--r-- | arch/s390/kernel/vtime.c | 83 |
15 files changed, 207 insertions, 84 deletions
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 67a6001..fa9905c 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -27,6 +27,8 @@ int main(void) DEFINE(__TI_flags, offsetof(struct thread_info, flags)); DEFINE(__TI_cpu, offsetof(struct thread_info, cpu)); DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count)); + DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer)); + DEFINE(__TI_system_timer, offsetof(struct thread_info, system_timer)); BLANK(); DEFINE(__PT_ARGS, offsetof(struct pt_regs, args)); DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 6cc87d8..002c70d 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -702,20 +702,12 @@ asmlinkage long sys32_fstatat64(unsigned int dfd, char __user *filename, struct stat64_emu31 __user* statbuf, int flag) { struct kstat stat; - int error = -EINVAL; - - if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0) - goto out; - - if (flag & AT_SYMLINK_NOFOLLOW) - error = vfs_lstat_fd(dfd, filename, &stat); - else - error = vfs_stat_fd(dfd, filename, &stat); + int error; - if (!error) - error = cp_stat64(statbuf, &stat); -out: - return error; + error = vfs_fstatat(dfd, filename, &stat, flag); + if (error) + return error; + return cp_stat64(statbuf, &stat); } /* diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 87cf5a7..fb38af6 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -1805,3 +1805,21 @@ compat_sys_keyctl_wrapper: llgfr %r5,%r5 # u32 llgfr %r6,%r6 # u32 jg compat_sys_keyctl # branch to system call + + .globl compat_sys_preadv_wrapper +compat_sys_preadv_wrapper: + llgfr %r2,%r2 # unsigned long + llgtr %r3,%r3 # compat_iovec * + llgfr %r4,%r4 # unsigned long + llgfr %r5,%r5 # u32 + llgfr %r6,%r6 # u32 + jg compat_sys_preadv # branch to system call + + .globl compat_sys_pwritev_wrapper +compat_sys_pwritev_wrapper: + llgfr %r2,%r2 # unsigned long + llgtr %r3,%r3 # compat_iovec * + llgfr %r4,%r4 # unsigned long + llgfr %r5,%r5 # u32 + llgfr %r6,%r6 # u32 + jg compat_sys_pwritev # branch to system call diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 4d221c8..cf09948 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -34,8 +34,25 @@ char kernel_nss_name[NSS_NAME_SIZE + 1]; +static unsigned long machine_flags; + static void __init setup_boot_command_line(void); +/* + * Get the TOD clock running. + */ +static void __init reset_tod_clock(void) +{ + u64 time; + + if (store_clock(&time) == 0) + return; + /* TOD clock not running. Set the clock to Unix Epoch. */ + if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0) + disabled_wait(0); + + sched_clock_base_cc = TOD_UNIX_EPOCH; +} #ifdef CONFIG_SHARED_KERNEL int __init savesys_ipl_nss(char *cmd, const int cmdlen); @@ -370,6 +387,7 @@ static void __init setup_boot_command_line(void) */ void __init startup_init(void) { + reset_tod_clock(); ipl_save_parameters(); rescue_initrd(); clear_bss_section(); @@ -391,5 +409,6 @@ void __init startup_init(void) setup_hpage(); sclp_facilities_detect(); detect_memory_layout(memory_chunk); + S390_lowcore.machine_flags = machine_flags; lockdep_on(); } diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 1268aa2..f3e2759 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -837,16 +837,29 @@ mcck_return: __CPUINIT .globl restart_int_handler restart_int_handler: + basr %r1,0 +restart_base: + spt restart_vtime-restart_base(%r1) + stck __LC_LAST_UPDATE_CLOCK + mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1) + mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1) l %r15,__LC_SAVE_AREA+60 # load ksp lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs lam %a0,%a15,__LC_AREGS_SAVE_AREA lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone + l %r1,__LC_THREAD_INFO + mvc __LC_USER_TIMER(8),__TI_user_timer(%r1) + mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) + xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on basr %r14,0 l %r14,restart_addr-.(%r14) br %r14 # branch to start_secondary restart_addr: .long start_secondary + .align 8 +restart_vtime: + .long 0x7fffffff,0xffffffff .previous #else /* diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index c6fbde1..84a1058 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -831,14 +831,27 @@ mcck_return: __CPUINIT .globl restart_int_handler restart_int_handler: + basr %r1,0 +restart_base: + spt restart_vtime-restart_base(%r1) + stck __LC_LAST_UPDATE_CLOCK + mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1) + mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1) lg %r15,__LC_SAVE_AREA+120 # load ksp lghi %r10,__LC_CREGS_SAVE_AREA lctlg %c0,%c15,0(%r10) # get new ctl regs lghi %r10,__LC_AREGS_SAVE_AREA lam %a0,%a15,0(%r10) lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone + lg %r1,__LC_THREAD_INFO + mvc __LC_USER_TIMER(8),__TI_user_timer(%r1) + mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) + xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on jg start_secondary + .align 8 +restart_vtime: + .long 0x7fffffff,0xffffffff .previous #else /* diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 1046c2c..22596d7 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -23,6 +23,7 @@ * */ +#include <linux/init.h> #include <asm/setup.h> #include <asm/lowcore.h> #include <asm/asm-offsets.h> @@ -35,7 +36,7 @@ #define ARCH_OFFSET 0 #endif -.section ".text.head","ax" +__HEAD #ifndef CONFIG_IPL .org 0 .long 0x00080000,0x80000000+startup # Just a restart PSW @@ -471,7 +472,12 @@ startup:basr %r13,0 # get base .LPG0: xc 0x200(256),0x200 # partially clear lowcore xc 0x300(256),0x300 - + l %r1,5f-.LPG0(%r13) + stck 0(%r1) + spt 6f-.LPG0(%r13) + mvc __LC_LAST_UPDATE_CLOCK(8),0(%r1) + mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) + mvc __LC_EXIT_TIMER(8),5f-.LPG0(%r13) #ifndef CONFIG_MARCH_G5 # check processor version against MARCH_{G5,Z900,Z990,Z9_109,Z10} stidp __LC_CPUID # store cpuid @@ -496,9 +502,13 @@ startup:basr %r13,0 # get base brct %r0,0b #endif - l %r13,0f-.LPG0(%r13) + l %r13,4f-.LPG0(%r13) b 0(%r13) -0: .long startup_continue + .align 4 +4: .long startup_continue +5: .long sched_clock_base_cc + .align 8 +6: .long 0x7fffffff,0xffffffff # # params at 10400 (setup.h) diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 6f3711a..b8bf4b1 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -23,6 +23,7 @@ #include <asm/ebcdic.h> #include <asm/reset.h> #include <asm/sclp.h> +#include <asm/sigp.h> #include <asm/checksum.h> #define IPL_PARM_BLOCK_VERSION 0 diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 4bfdc42..28cf196 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -10,6 +10,7 @@ #include <linux/init.h> #include <linux/errno.h> +#include <linux/hardirq.h> #include <linux/time.h> #include <linux/module.h> #include <asm/lowcore.h> @@ -253,7 +254,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) struct mci *mci; int umode; - lockdep_off(); + nmi_enter(); s390_idle_check(); mci = (struct mci *) &S390_lowcore.mcck_interruption_code; @@ -363,7 +364,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) mcck->warning = 1; set_thread_flag(TIF_MCCK_PENDING); } - lockdep_on(); + nmi_exit(); } static int __init machine_check_init(void) diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 06201b9..7402b6a 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -82,9 +82,6 @@ EXPORT_SYMBOL(console_devno); unsigned int console_irq = -1; EXPORT_SYMBOL(console_irq); -unsigned long machine_flags; -EXPORT_SYMBOL(machine_flags); - unsigned long elf_hwcap = 0; char elf_platform[ELF_PLATFORM_SIZE]; @@ -426,6 +423,7 @@ setup_lowcore(void) __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; lc->current_task = (unsigned long) init_thread_union.thread_info.task; lc->thread_info = (unsigned long) &init_thread_union; + lc->machine_flags = S390_lowcore.machine_flags; #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { lc->extended_save_area_addr = (__u32) @@ -436,6 +434,14 @@ setup_lowcore(void) #else lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; #endif + lc->sync_enter_timer = S390_lowcore.sync_enter_timer; + lc->async_enter_timer = S390_lowcore.async_enter_timer; + lc->exit_timer = S390_lowcore.exit_timer; + lc->user_timer = S390_lowcore.user_timer; + lc->system_timer = S390_lowcore.system_timer; + lc->steal_timer = S390_lowcore.steal_timer; + lc->last_update_timer = S390_lowcore.last_update_timer; + lc->last_update_clock = S390_lowcore.last_update_clock; set_prefix((u32)(unsigned long) lc); lowcore_ptr[0] = lc; } diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 006ed50..a985a3b 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -571,6 +571,7 @@ int __cpuinit __cpu_up(unsigned int cpu) cpu_lowcore->current_task = (unsigned long) idle; cpu_lowcore->cpu_nr = cpu; cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; + cpu_lowcore->machine_flags = S390_lowcore.machine_flags; eieio(); while (signal_processor(cpu, sigp_restart) == sigp_busy) @@ -590,7 +591,8 @@ static int __init setup_possible_cpus(char *s) int pcpus, cpu; pcpus = simple_strtoul(s, NULL, 0); - for (cpu = 0; cpu < pcpus && cpu < nr_cpu_ids; cpu++) + init_cpu_possible(cpumask_of(0)); + for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++) set_cpu_possible(cpu, true); return 0; } diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index fe5b25a..2c7739f 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -336,3 +336,5 @@ SYSCALL(sys_inotify_init1,sys_inotify_init1,sys_inotify_init1_wrapper) SYSCALL(sys_pipe2,sys_pipe2,sys_pipe2_wrapper) /* 325 */ SYSCALL(sys_dup3,sys_dup3,sys_dup3_wrapper) SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper) +SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper) +SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper) diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index f72d410..ef596d0 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -52,9 +52,6 @@ #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) #define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) -/* The value of the TOD clock for 1.1.1970. */ -#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL - /* * Create a small time difference between the timer interrupts * on the different cpus to avoid lock contention. @@ -63,9 +60,10 @@ #define TICK_SIZE tick +u64 sched_clock_base_cc = -1; /* Force to data section. */ + static ext_int_info_t ext_int_info_cc; static ext_int_info_t ext_int_etr_cc; -static u64 sched_clock_base_cc; static DEFINE_PER_CPU(struct clock_event_device, comparators); @@ -195,25 +193,15 @@ static void timing_alert_interrupt(__u16 code) static void etr_reset(void); static void stp_reset(void); -/* - * Get the TOD clock running. - */ -static u64 __init reset_tod_clock(void) +unsigned long read_persistent_clock(void) { - u64 time; - - etr_reset(); - stp_reset(); - if (store_clock(&time) == 0) - return time; - /* TOD clock not running. Set the clock to Unix Epoch. */ - if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0) - panic("TOD clock not operational."); + struct timespec ts; - return TOD_UNIX_EPOCH; + tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, &ts); + return ts.tv_sec; } -static cycle_t read_tod_clock(void) +static cycle_t read_tod_clock(struct clocksource *cs) { return get_clock(); } @@ -265,12 +253,13 @@ void update_vsyscall_tz(void) */ void __init time_init(void) { - sched_clock_base_cc = reset_tod_clock(); + struct timespec ts; + unsigned long flags; + cycle_t now; - /* set xtime */ - tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &xtime); - set_normalized_timespec(&wall_to_monotonic, - -xtime.tv_sec, -xtime.tv_nsec); + /* Reset time synchronization interfaces. */ + etr_reset(); + stp_reset(); /* request the clock comparator external interrupt */ if (register_early_external_interrupt(0x1004, @@ -278,17 +267,38 @@ void __init time_init(void) &ext_int_info_cc) != 0) panic("Couldn't request external interrupt 0x1004"); - if (clocksource_register(&clocksource_tod) != 0) - panic("Could not register TOD clock source"); - /* request the timing alert external interrupt */ if (register_early_external_interrupt(0x1406, timing_alert_interrupt, &ext_int_etr_cc) != 0) panic("Couldn't request external interrupt 0x1406"); + if (clocksource_register(&clocksource_tod) != 0) + panic("Could not register TOD clock source"); + + /* + * The TOD clock is an accurate clock. The xtime should be + * initialized in a way that the difference between TOD and + * xtime is reasonably small. Too bad that timekeeping_init + * sets xtime.tv_nsec to zero. In addition the clock source + * change from the jiffies clock source to the TOD clock + * source add another error of up to 1/HZ second. The same + * function sets wall_to_monotonic to a value that is too + * small for /proc/uptime to be accurate. + * Reset xtime and wall_to_monotonic to sane values. + */ + write_seqlock_irqsave(&xtime_lock, flags); + now = get_clock(); + tod_to_timeval(now - TOD_UNIX_EPOCH, &xtime); + clocksource_tod.cycle_last = now; + clocksource_tod.raw_time = xtime; + tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &ts); + set_normalized_timespec(&wall_to_monotonic, -ts.tv_sec, -ts.tv_nsec); + write_sequnlock_irqrestore(&xtime_lock, flags); + /* Enable TOD clock interrupts on the boot cpu. */ init_cpu_timer(); + /* Enable cpu timer interrupts on the boot cpu. */ vtime_init(); } @@ -1423,6 +1433,7 @@ static void *stp_page; static void stp_work_fn(struct work_struct *work); static DEFINE_MUTEX(stp_work_mutex); static DECLARE_WORK(stp_work, stp_work_fn); +static struct timer_list stp_timer; static int __init early_parse_stp(char *p) { @@ -1454,10 +1465,16 @@ static void __init stp_reset(void) } } +static void stp_timeout(unsigned long dummy) +{ + queue_work(time_sync_wq, &stp_work); +} + static int __init stp_init(void) { if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) return 0; + setup_timer(&stp_timer, stp_timeout, 0UL); time_init_wq(); if (!stp_online) return 0; @@ -1565,6 +1582,7 @@ static void stp_work_fn(struct work_struct *work) if (!stp_online) { chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000); + del_timer_sync(&stp_timer); goto out_unlock; } @@ -1586,6 +1604,13 @@ static void stp_work_fn(struct work_struct *work) stop_machine(stp_sync_clock, &stp_sync, &cpu_online_map); put_online_cpus(); + if (!check_sync_clock()) + /* + * There is a usable clock but the synchonization failed. + * Retry after a second. + */ + mod_timer(&stp_timer, jiffies + HZ); + out_unlock: mutex_unlock(&stp_work_mutex); } diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 7a2063e..89399b8 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -29,8 +29,8 @@ SECTIONS . = 0x00000000; .text : { _text = .; /* Text and read-only data */ - *(.text.head) - TEXT_TEXT + HEAD_TEXT + TEXT_TEXT SCHED_TEXT LOCK_TEXT KPROBES_TEXT diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index ecf0304..c87f59b 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -134,6 +134,8 @@ void vtime_start_cpu(void) /* Account time spent with enabled wait psw loaded as idle time. */ idle_time = S390_lowcore.int_clock - idle->idle_enter; account_idle_time(idle_time); + S390_lowcore.steal_timer += + idle->idle_enter - S390_lowcore.last_update_clock; S390_lowcore.last_update_clock = S390_lowcore.int_clock; /* Account system time spent going idle. */ @@ -238,6 +240,22 @@ void vtime_stop_cpu(void) } } +cputime64_t s390_get_idle_time(int cpu) +{ + struct s390_idle_data *idle; + unsigned long long now, idle_time, idle_enter; + + idle = &per_cpu(s390_idle, cpu); + spin_lock(&idle->lock); + now = get_clock(); + idle_time = 0; + idle_enter = idle->idle_enter; + if (idle_enter != 0ULL && idle_enter < now) + idle_time = now - idle_enter; + spin_unlock(&idle->lock); + return idle_time; +} + /* * Sorted add to a list. List is linear searched until first bigger * element is found. @@ -425,17 +443,7 @@ void add_virt_timer_periodic(void *new) } EXPORT_SYMBOL(add_virt_timer_periodic); -/* - * If we change a pending timer the function must be called on the CPU - * where the timer is running on, e.g. by smp_call_function_single() - * - * The original mod_timer adds the timer if it is not pending. For - * compatibility we do the same. The timer will be added on the current - * CPU as a oneshot timer. - * - * returns whether it has modified a pending timer (1) or not (0) - */ -int mod_virt_timer(struct vtimer_list *timer, __u64 expires) +int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic) { struct vtimer_queue *vq; unsigned long flags; @@ -444,39 +452,35 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires) BUG_ON(!timer->function); BUG_ON(!expires || expires > VTIMER_MAX_SLICE); - /* - * This is a common optimization triggered by the - * networking code - if the timer is re-modified - * to be the same thing then just return: - */ if (timer->expires == expires && vtimer_pending(timer)) return 1; cpu = get_cpu(); vq = &per_cpu(virt_cpu_timer, cpu); - /* check if we run on the right CPU */ - BUG_ON(timer->cpu != cpu); - /* disable interrupts before test if timer is pending */ spin_lock_irqsave(&vq->lock, flags); /* if timer isn't pending add it on the current CPU */ if (!vtimer_pending(timer)) { spin_unlock_irqrestore(&vq->lock, flags); - /* we do not activate an interval timer with mod_virt_timer */ - timer->interval = 0; + + if (periodic) + timer->interval = expires; + else + timer->interval = 0; timer->expires = expires; timer->cpu = cpu; internal_add_vtimer(timer); return 0; } + /* check if we run on the right CPU */ + BUG_ON(timer->cpu != cpu); + list_del_init(&timer->entry); timer->expires = expires; - - /* also change the interval if we have an interval timer */ - if (timer->interval) + if (periodic) timer->interval = expires; /* the timer can't expire anymore so we can release the lock */ @@ -484,9 +488,32 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires) internal_add_vtimer(timer); return 1; } + +/* + * If we change a pending timer the function must be called on the CPU + * where the timer is running on. + * + * returns whether it has modified a pending timer (1) or not (0) + */ +int mod_virt_timer(struct vtimer_list *timer, __u64 expires) +{ + return __mod_vtimer(timer, expires, 0); +} EXPORT_SYMBOL(mod_virt_timer); /* + * If we change a pending timer the function must be called on the CPU + * where the timer is running on. + * + * returns whether it has modified a pending timer (1) or not (0) + */ +int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires) +{ + return __mod_vtimer(timer, expires, 1); +} +EXPORT_SYMBOL(mod_virt_timer_periodic); + +/* * delete a virtual timer * * returns whether the deleted timer was pending (1) or not (0) @@ -516,16 +543,8 @@ EXPORT_SYMBOL(del_virt_timer); */ void init_cpu_vtimer(void) { - struct thread_info *ti = current_thread_info(); struct vtimer_queue *vq; - S390_lowcore.user_timer = ti->user_timer; - S390_lowcore.system_timer = ti->system_timer; - - /* kick the virtual timer */ - asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock)); - asm volatile ("STPT %0" : "=m" (S390_lowcore.last_update_timer)); - /* initialize per cpu vtimer structure */ vq = &__get_cpu_var(virt_cpu_timer); INIT_LIST_HEAD(&vq->list); |