summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-03-09 14:32:05 +1100
committerPaul Mackerras <paulus@samba.org>2006-03-09 14:32:05 +1100
commit516450179454de9e689e0a53ed8f34b896e8651c (patch)
tree78eae2f77de6cd39b18c7393fc5854456fc3fb1f /kernel
parent6749c5507388f3fc3719f57a54b540ee83f6661a (diff)
parent0d514f040ac6629311974889d5b96bcf21c6461a (diff)
downloadop-kernel-dev-516450179454de9e689e0a53ed8f34b896e8651c.zip
op-kernel-dev-516450179454de9e689e0a53ed8f34b896e8651c.tar.gz
Merge ../linux-2.6
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c35
-rw-r--r--kernel/rcupdate.c76
-rw-r--r--kernel/sched.c3
-rw-r--r--kernel/sysctl.c19
-rw-r--r--kernel/timer.c22
5 files changed, 134 insertions, 21 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 5ae51f1..14bc9cf 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -505,6 +505,41 @@ ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
return rem;
}
+#ifdef CONFIG_NO_IDLE_HZ
+/**
+ * hrtimer_get_next_event - get the time until next expiry event
+ *
+ * Returns the delta to the next expiry event or KTIME_MAX if no timer
+ * is pending.
+ */
+ktime_t hrtimer_get_next_event(void)
+{
+ struct hrtimer_base *base = __get_cpu_var(hrtimer_bases);
+ ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < MAX_HRTIMER_BASES; i++, base++) {
+ struct hrtimer *timer;
+
+ spin_lock_irqsave(&base->lock, flags);
+ if (!base->first) {
+ spin_unlock_irqrestore(&base->lock, flags);
+ continue;
+ }
+ timer = rb_entry(base->first, struct hrtimer, node);
+ delta.tv64 = timer->expires.tv64;
+ spin_unlock_irqrestore(&base->lock, flags);
+ delta = ktime_sub(delta, base->get_time());
+ if (delta.tv64 < mindelta.tv64)
+ mindelta.tv64 = delta.tv64;
+ }
+ if (mindelta.tv64 < 0)
+ mindelta.tv64 = 0;
+ return mindelta;
+}
+#endif
+
/**
* hrtimer_init - initialize a timer to the given clock
*
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 0cf8146..8cf15a5 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -67,7 +67,43 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
/* Fake initialization required by compiler */
static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
-static int maxbatch = 10000;
+static int blimit = 10;
+static int qhimark = 10000;
+static int qlowmark = 100;
+#ifdef CONFIG_SMP
+static int rsinterval = 1000;
+#endif
+
+static atomic_t rcu_barrier_cpu_count;
+static struct semaphore rcu_barrier_sema;
+static struct completion rcu_barrier_completion;
+
+#ifdef CONFIG_SMP
+static void force_quiescent_state(struct rcu_data *rdp,
+ struct rcu_ctrlblk *rcp)
+{
+ int cpu;
+ cpumask_t cpumask;
+ set_need_resched();
+ if (unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval)) {
+ rdp->last_rs_qlen = rdp->qlen;
+ /*
+ * Don't send IPI to itself. With irqs disabled,
+ * rdp->cpu is the current cpu.
+ */
+ cpumask = rcp->cpumask;
+ cpu_clear(rdp->cpu, cpumask);
+ for_each_cpu_mask(cpu, cpumask)
+ smp_send_reschedule(cpu);
+ }
+}
+#else
+static inline void force_quiescent_state(struct rcu_data *rdp,
+ struct rcu_ctrlblk *rcp)
+{
+ set_need_resched();
+}
+#endif
/**
* call_rcu - Queue an RCU callback for invocation after a grace period.
@@ -92,17 +128,13 @@ void fastcall call_rcu(struct rcu_head *head,
rdp = &__get_cpu_var(rcu_data);
*rdp->nxttail = head;
rdp->nxttail = &head->next;
-
- if (unlikely(++rdp->count > 10000))
- set_need_resched();
-
+ if (unlikely(++rdp->qlen > qhimark)) {
+ rdp->blimit = INT_MAX;
+ force_quiescent_state(rdp, &rcu_ctrlblk);
+ }
local_irq_restore(flags);
}
-static atomic_t rcu_barrier_cpu_count;
-static struct semaphore rcu_barrier_sema;
-static struct completion rcu_barrier_completion;
-
/**
* call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
@@ -131,12 +163,12 @@ void fastcall call_rcu_bh(struct rcu_head *head,
rdp = &__get_cpu_var(rcu_bh_data);
*rdp->nxttail = head;
rdp->nxttail = &head->next;
- rdp->count++;
-/*
- * Should we directly call rcu_do_batch() here ?
- * if (unlikely(rdp->count > 10000))
- * rcu_do_batch(rdp);
- */
+
+ if (unlikely(++rdp->qlen > qhimark)) {
+ rdp->blimit = INT_MAX;
+ force_quiescent_state(rdp, &rcu_bh_ctrlblk);
+ }
+
local_irq_restore(flags);
}
@@ -199,10 +231,12 @@ static void rcu_do_batch(struct rcu_data *rdp)
next = rdp->donelist = list->next;
list->func(list);
list = next;
- rdp->count--;
- if (++count >= maxbatch)
+ rdp->qlen--;
+ if (++count >= rdp->blimit)
break;
}
+ if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
+ rdp->blimit = blimit;
if (!rdp->donelist)
rdp->donetail = &rdp->donelist;
else
@@ -473,6 +507,7 @@ static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
rdp->quiescbatch = rcp->completed;
rdp->qs_pending = 0;
rdp->cpu = cpu;
+ rdp->blimit = blimit;
}
static void __devinit rcu_online_cpu(int cpu)
@@ -567,7 +602,12 @@ void synchronize_kernel(void)
synchronize_rcu();
}
-module_param(maxbatch, int, 0);
+module_param(blimit, int, 0);
+module_param(qhimark, int, 0);
+module_param(qlowmark, int, 0);
+#ifdef CONFIG_SMP
+module_param(rsinterval, int, 0);
+#endif
EXPORT_SYMBOL_GPL(rcu_batches_completed);
EXPORT_SYMBOL(call_rcu); /* WARNING: GPL-only in April 2006. */
EXPORT_SYMBOL(call_rcu_bh); /* WARNING: GPL-only in April 2006. */
diff --git a/kernel/sched.c b/kernel/sched.c
index 12d291b..e82c99f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4028,6 +4028,8 @@ static inline void __cond_resched(void)
*/
if (unlikely(preempt_count()))
return;
+ if (unlikely(system_state != SYSTEM_RUNNING))
+ return;
do {
add_preempt_count(PREEMPT_ACTIVE);
schedule();
@@ -4333,6 +4335,7 @@ void __devinit init_idle(task_t *idle, int cpu)
runqueue_t *rq = cpu_rq(cpu);
unsigned long flags;
+ idle->timestamp = sched_clock();
idle->sleep_avg = 0;
idle->array = NULL;
idle->prio = MAX_PRIO;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c05a2b7..32b48e8 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -50,6 +50,9 @@
#include <asm/uaccess.h>
#include <asm/processor.h>
+extern int proc_nr_files(ctl_table *table, int write, struct file *filp,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
#if defined(CONFIG_SYSCTL)
/* External variables not in a header file. */
@@ -124,6 +127,10 @@ extern int sysctl_hz_timer;
extern int acct_parm[];
#endif
+#ifdef CONFIG_IA64
+extern int no_unaligned_warning;
+#endif
+
static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t,
ctl_table *, void **);
static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
@@ -663,6 +670,16 @@ static ctl_table kern_table[] = {
.data = &acpi_video_flags,
.maxlen = sizeof (unsigned long),
.mode = 0644,
+ .proc_handler = &proc_doulongvec_minmax,
+ },
+#endif
+#ifdef CONFIG_IA64
+ {
+ .ctl_name = KERN_IA64_UNALIGNED,
+ .procname = "ignore-unaligned-usertrap",
+ .data = &no_unaligned_warning,
+ .maxlen = sizeof (int),
+ .mode = 0644,
.proc_handler = &proc_dointvec,
},
#endif
@@ -929,7 +946,7 @@ static ctl_table fs_table[] = {
.data = &files_stat,
.maxlen = 3*sizeof(int),
.mode = 0444,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &proc_nr_files,
},
{
.ctl_name = FS_MAXFILE,
diff --git a/kernel/timer.c b/kernel/timer.c
index fe3a9a9..bf7c419 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -489,9 +489,21 @@ unsigned long next_timer_interrupt(void)
struct list_head *list;
struct timer_list *nte;
unsigned long expires;
+ unsigned long hr_expires = MAX_JIFFY_OFFSET;
+ ktime_t hr_delta;
tvec_t *varray[4];
int i, j;
+ hr_delta = hrtimer_get_next_event();
+ if (hr_delta.tv64 != KTIME_MAX) {
+ struct timespec tsdelta;
+ tsdelta = ktime_to_timespec(hr_delta);
+ hr_expires = timespec_to_jiffies(&tsdelta);
+ if (hr_expires < 3)
+ return hr_expires + jiffies;
+ }
+ hr_expires += jiffies;
+
base = &__get_cpu_var(tvec_bases);
spin_lock(&base->t_base.lock);
expires = base->timer_jiffies + (LONG_MAX >> 1);
@@ -542,6 +554,10 @@ found:
}
}
spin_unlock(&base->t_base.lock);
+
+ if (time_before(hr_expires, expires))
+ return hr_expires;
+
return expires;
}
#endif
@@ -925,6 +941,8 @@ static inline void update_times(void)
void do_timer(struct pt_regs *regs)
{
jiffies_64++;
+ /* prevent loading jiffies before storing new jiffies_64 value. */
+ barrier();
update_times();
softlockup_tick(regs);
}
@@ -1351,10 +1369,10 @@ static inline u64 time_interpolator_get_cycles(unsigned int src)
return x();
case TIME_SOURCE_MMIO64 :
- return readq((void __iomem *) time_interpolator->addr);
+ return readq_relaxed((void __iomem *)time_interpolator->addr);
case TIME_SOURCE_MMIO32 :
- return readl((void __iomem *) time_interpolator->addr);
+ return readl_relaxed((void __iomem *)time_interpolator->addr);
default: return get_cycles();
}
OpenPOWER on IntegriCloud