summaryrefslogtreecommitdiffstats
path: root/kernel/timer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/timer.c')
-rw-r--r--kernel/timer.c38
1 files changed, 23 insertions, 15 deletions
diff --git a/kernel/timer.c b/kernel/timer.c
index 6b812c0..67eaf0f 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -81,9 +81,10 @@ struct tvec_t_base_s {
} ____cacheline_aligned_in_smp;
typedef struct tvec_t_base_s tvec_base_t;
-static DEFINE_PER_CPU(tvec_base_t *, tvec_bases);
+
tvec_base_t boot_tvec_bases;
EXPORT_SYMBOL(boot_tvec_bases);
+static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = { &boot_tvec_bases };
static inline void set_running_timer(tvec_base_t *base,
struct timer_list *timer)
@@ -1224,28 +1225,36 @@ static int __devinit init_timers_cpu(int cpu)
{
int j;
tvec_base_t *base;
+ static char __devinitdata tvec_base_done[NR_CPUS];
- base = per_cpu(tvec_bases, cpu);
- if (!base) {
+ if (!tvec_base_done[cpu]) {
static char boot_done;
- /*
- * Cannot do allocation in init_timers as that runs before the
- * allocator initializes (and would waste memory if there are
- * more possible CPUs than will ever be installed/brought up).
- */
if (boot_done) {
+ /*
+ * The APs use this path later in boot
+ */
base = kmalloc_node(sizeof(*base), GFP_KERNEL,
cpu_to_node(cpu));
if (!base)
return -ENOMEM;
memset(base, 0, sizeof(*base));
+ per_cpu(tvec_bases, cpu) = base;
} else {
- base = &boot_tvec_bases;
+ /*
+ * This is for the boot CPU - we use compile-time
+ * static initialisation because per-cpu memory isn't
+ * ready yet and because the memory allocators are not
+ * initialised either.
+ */
boot_done = 1;
+ base = &boot_tvec_bases;
}
- per_cpu(tvec_bases, cpu) = base;
+ tvec_base_done[cpu] = 1;
+ } else {
+ base = per_cpu(tvec_bases, cpu);
}
+
spin_lock_init(&base->lock);
for (j = 0; j < TVN_SIZE; j++) {
INIT_LIST_HEAD(base->tv5.vec + j);
@@ -1305,7 +1314,7 @@ static void __devinit migrate_timers(int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int __devinit timer_cpu_notify(struct notifier_block *self,
+static int timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
@@ -1325,7 +1334,7 @@ static int __devinit timer_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block __devinitdata timers_nb = {
+static struct notifier_block timers_nb = {
.notifier_call = timer_cpu_notify,
};
@@ -1455,7 +1464,7 @@ static void time_interpolator_update(long delta_nsec)
*/
if (jiffies % INTERPOLATOR_ADJUST == 0)
{
- if (time_interpolator->skips == 0 && time_interpolator->offset > TICK_NSEC)
+ if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec)
time_interpolator->nsec_per_cyc--;
if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
time_interpolator->nsec_per_cyc++;
@@ -1479,8 +1488,7 @@ register_time_interpolator(struct time_interpolator *ti)
unsigned long flags;
/* Sanity check */
- if (ti->frequency == 0 || ti->mask == 0)
- BUG();
+ BUG_ON(ti->frequency == 0 || ti->mask == 0);
ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
spin_lock(&time_interpolator_lock);
OpenPOWER on IntegriCloud