summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2009-04-23 13:58:08 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2009-04-23 13:58:17 +0200
commite1c805309d19c69d4ebeac38724076fa86feacdf (patch)
tree73886783aac7c489a29f3e5eec56cbc2371a12dd
parentb1ad171efa089ae26aba750d747d8149a4f860d5 (diff)
downloadop-kernel-dev-e1c805309d19c69d4ebeac38724076fa86feacdf.zip
op-kernel-dev-e1c805309d19c69d4ebeac38724076fa86feacdf.tar.gz
[S390] /proc/stat idle field for idle cpus
The cpu idle field in the output of /proc/stat is too small for cpus that have been idle for more than a tick. Add the architecture hook arch_idle_time that allows to add the not accounted idle time of a sleeping cpu without waking the cpu. The s390 implementation of arch_idle_time uses the already existing s390_idle_data per_cpu variable to find the sleep time of a neighboring idle cpu. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/cputime.h4
-rw-r--r--arch/s390/kernel/vtime.c16
-rw-r--r--fs/proc/stat.c5
3 files changed, 25 insertions, 0 deletions
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 95b0f7d..941384f 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -174,4 +174,8 @@ cputime64_to_clock_t(cputime64_t cputime)
return __div(cputime, 4096000000ULL / USER_HZ);
}
+cputime64_t s390_get_idle_time(int cpu);
+
+#define arch_idle_time(cpu) s390_get_idle_time(cpu)
+
#endif /* _S390_CPUTIME_H */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 38ea92f..c87f59b 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -240,6 +240,22 @@ void vtime_stop_cpu(void)
}
}
+cputime64_t s390_get_idle_time(int cpu)
+{
+ struct s390_idle_data *idle;
+ unsigned long long now, idle_time, idle_enter;
+
+ idle = &per_cpu(s390_idle, cpu);
+ spin_lock(&idle->lock);
+ now = get_clock();
+ idle_time = 0;
+ idle_enter = idle->idle_enter;
+ if (idle_enter != 0ULL && idle_enter < now)
+ idle_time = now - idle_enter;
+ spin_unlock(&idle->lock);
+ return idle_time;
+}
+
/*
* Sorted add to a list. List is linear searched until first bigger
* element is found.
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index f75efa2..81e4eb6 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -18,6 +18,9 @@
#ifndef arch_irq_stat
#define arch_irq_stat() 0
#endif
+#ifndef arch_idle_time
+#define arch_idle_time(cpu) 0
+#endif
static int show_stat(struct seq_file *p, void *v)
{
@@ -40,6 +43,7 @@ static int show_stat(struct seq_file *p, void *v)
nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
system = cputime64_add(system, kstat_cpu(i).cpustat.system);
idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle);
+ idle = cputime64_add(idle, arch_idle_time(i));
iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait);
irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
@@ -69,6 +73,7 @@ static int show_stat(struct seq_file *p, void *v)
nice = kstat_cpu(i).cpustat.nice;
system = kstat_cpu(i).cpustat.system;
idle = kstat_cpu(i).cpustat.idle;
+ idle = cputime64_add(idle, arch_idle_time(i));
iowait = kstat_cpu(i).cpustat.iowait;
irq = kstat_cpu(i).cpustat.irq;
softirq = kstat_cpu(i).cpustat.softirq;
OpenPOWER on IntegriCloud