summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-05-08 11:31:07 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-05-08 11:31:07 -0700
commitc4f51b4662074c6ee26c972126a317c05f65be06 (patch)
treeea6b25a3b45d11d649f85626a9879ce7723554da /kernel
parentf589274533eb94e0217acc1db6240ce6e621bb56 (diff)
parent46151122e0a2e80e5a6b2889f595e371fe2b600d (diff)
downloadop-kernel-dev-c4f51b4662074c6ee26c972126a317c05f65be06.zip
op-kernel-dev-c4f51b4662074c6ee26c972126a317c05f65be06.tar.gz
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-fixes
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-fixes: sched: fix weight calculations semaphore: fix
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_fair.c11
-rw-r--r--kernel/semaphore.c64
2 files changed, 38 insertions, 37 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c863663..e24ecd3 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -662,10 +662,15 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
if (!initial) {
/* sleeps upto a single latency don't count. */
if (sched_feat(NEW_FAIR_SLEEPERS)) {
+ unsigned long thresh = sysctl_sched_latency;
+
+ /*
+ * convert the sleeper threshold into virtual time
+ */
if (sched_feat(NORMALIZED_SLEEPER))
- vruntime -= calc_delta_weight(sysctl_sched_latency, se);
- else
- vruntime -= sysctl_sched_latency;
+ thresh = calc_delta_fair(thresh, se);
+
+ vruntime -= thresh;
}
/* ensure we never gain time by being placed backwards. */
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
index 5c2942e..5e41217 100644
--- a/kernel/semaphore.c
+++ b/kernel/semaphore.c
@@ -54,10 +54,9 @@ void down(struct semaphore *sem)
unsigned long flags;
spin_lock_irqsave(&sem->lock, flags);
- if (likely(sem->count > 0))
- sem->count--;
- else
+ if (unlikely(!sem->count))
__down(sem);
+ sem->count--;
spin_unlock_irqrestore(&sem->lock, flags);
}
EXPORT_SYMBOL(down);
@@ -77,10 +76,10 @@ int down_interruptible(struct semaphore *sem)
int result = 0;
spin_lock_irqsave(&sem->lock, flags);
- if (likely(sem->count > 0))
- sem->count--;
- else
+ if (unlikely(!sem->count))
result = __down_interruptible(sem);
+ if (!result)
+ sem->count--;
spin_unlock_irqrestore(&sem->lock, flags);
return result;
@@ -103,10 +102,10 @@ int down_killable(struct semaphore *sem)
int result = 0;
spin_lock_irqsave(&sem->lock, flags);
- if (likely(sem->count > 0))
- sem->count--;
- else
+ if (unlikely(!sem->count))
result = __down_killable(sem);
+ if (!result)
+ sem->count--;
spin_unlock_irqrestore(&sem->lock, flags);
return result;
@@ -157,10 +156,10 @@ int down_timeout(struct semaphore *sem, long jiffies)
int result = 0;
spin_lock_irqsave(&sem->lock, flags);
- if (likely(sem->count > 0))
- sem->count--;
- else
+ if (unlikely(!sem->count))
result = __down_timeout(sem, jiffies);
+ if (!result)
+ sem->count--;
spin_unlock_irqrestore(&sem->lock, flags);
return result;
@@ -179,9 +178,8 @@ void up(struct semaphore *sem)
unsigned long flags;
spin_lock_irqsave(&sem->lock, flags);
- if (likely(list_empty(&sem->wait_list)))
- sem->count++;
- else
+ sem->count++;
+ if (unlikely(!list_empty(&sem->wait_list)))
__up(sem);
spin_unlock_irqrestore(&sem->lock, flags);
}
@@ -192,7 +190,6 @@ EXPORT_SYMBOL(up);
struct semaphore_waiter {
struct list_head list;
struct task_struct *task;
- int up;
};
/*
@@ -205,33 +202,34 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
{
struct task_struct *task = current;
struct semaphore_waiter waiter;
+ int ret = 0;
- list_add_tail(&waiter.list, &sem->wait_list);
waiter.task = task;
- waiter.up = 0;
+ list_add_tail(&waiter.list, &sem->wait_list);
for (;;) {
- if (state == TASK_INTERRUPTIBLE && signal_pending(task))
- goto interrupted;
- if (state == TASK_KILLABLE && fatal_signal_pending(task))
- goto interrupted;
- if (timeout <= 0)
- goto timed_out;
+ if (state == TASK_INTERRUPTIBLE && signal_pending(task)) {
+ ret = -EINTR;
+ break;
+ }
+ if (state == TASK_KILLABLE && fatal_signal_pending(task)) {
+ ret = -EINTR;
+ break;
+ }
+ if (timeout <= 0) {
+ ret = -ETIME;
+ break;
+ }
__set_task_state(task, state);
spin_unlock_irq(&sem->lock);
timeout = schedule_timeout(timeout);
spin_lock_irq(&sem->lock);
- if (waiter.up)
- return 0;
+ if (sem->count > 0)
+ break;
}
- timed_out:
- list_del(&waiter.list);
- return -ETIME;
-
- interrupted:
list_del(&waiter.list);
- return -EINTR;
+ return ret;
}
static noinline void __sched __down(struct semaphore *sem)
@@ -258,7 +256,5 @@ static noinline void __sched __up(struct semaphore *sem)
{
struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
struct semaphore_waiter, list);
- list_del(&waiter->list);
- waiter->up = 1;
wake_up_process(waiter->task);
}
OpenPOWER on IntegriCloud