summaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorAdam Buchbinder <adam.buchbinder@gmail.com>2016-03-04 11:20:04 -0800
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2016-03-08 15:00:17 +0100
commit7eb792bf7c253cb63202aae72885f5f7abdd8668 (patch)
tree7608cf1cff10cac1312d0072c6fe7b8017b383c5 /arch/s390
parent1e133ab296f3ff8d9e58a5e758291ed39ba72ad7 (diff)
downloadop-kernel-dev-7eb792bf7c253cb63202aae72885f5f7abdd8668.zip
op-kernel-dev-7eb792bf7c253cb63202aae72885f5f7abdd8668.tar.gz
s390: Fix misspellings in comments
Signed-off-by: Adam Buchbinder <adam.buchbinder@gmail.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/perf_event.h2
-rw-r--r--arch/s390/include/asm/rwsem.h2
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c2
-rw-r--r--arch/s390/kernel/perf_event.c2
-rw-r--r--arch/s390/kvm/guestdbg.c4
5 files changed, 6 insertions, 6 deletions
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index f897ec7..1f7ff85 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -21,7 +21,7 @@
#define PMU_F_ERR_LSDA 0x0200
#define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA)
-/* Perf defintions for PMU event attributes in sysfs */
+/* Perf definitions for PMU event attributes in sysfs */
extern __init const struct attribute_group **cpumf_cf_event_group(void);
extern ssize_t cpumf_events_sysfs_show(struct device *dev,
struct device_attribute *attr,
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index 4b43ee7..fead491 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -31,7 +31,7 @@
* This should be totally fair - if anything is waiting, a process that wants a
* lock will go to the back of the queue. When the currently active lock is
* released, if there's a writer at the front of the queue, then that and only
- * that will be woken up; if there's a bunch of consequtive readers at the
+ * that will be woken up; if there's a bunch of consecutive readers at the
* front, then they'll all be woken up, but no other readers will be.
*/
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 929c147..58bf457 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -383,7 +383,7 @@ static int __hw_perf_event_init(struct perf_event *event)
/* Validate the counter that is assigned to this event.
* Because the counter facility can use numerous counters at the
- * same time without constraints, it is not necessary to explicity
+ * same time without constraints, it is not necessary to explicitly
* validate event groups (event->group_leader != event).
*/
err = validate_event(hwc);
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index a1b7086..c3e4099 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -238,7 +238,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
dump_trace(__perf_callchain_kernel, entry, NULL, regs->gprs[15]);
}
-/* Perf defintions for PMU event attributes in sysfs */
+/* Perf definitions for PMU event attributes in sysfs */
ssize_t cpumf_events_sysfs_show(struct device *dev,
struct device_attribute *attr, char *page)
{
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index d697312..e8c6843 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -17,7 +17,7 @@
/*
* Extends the address range given by *start and *stop to include the address
* range starting with estart and the length len. Takes care of overflowing
- * intervals and tries to minimize the overall intervall size.
+ * intervals and tries to minimize the overall interval size.
*/
static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len)
{
@@ -72,7 +72,7 @@ static void enable_all_hw_bp(struct kvm_vcpu *vcpu)
return;
/*
- * If the guest is not interrested in branching events, we can savely
+ * If the guest is not interested in branching events, we can safely
* limit them to the PER address range.
*/
if (!(*cr9 & PER_EVENT_BRANCH))
OpenPOWER on IntegriCloud