diff options
author | Simon Arlott <simon@fire.lp0.eu> | 2007-10-20 01:25:36 +0200 |
---|---|---|
committer | Adrian Bunk <bunk@kernel.org> | 2007-10-20 01:25:36 +0200 |
commit | 676b1855de0a18100b3c340084eb8ef72bde4fb1 (patch) | |
tree | cbcbe6dec24a23f97f93ec7753ab74d34a92473a /arch | |
parent | 5b20311eeae7c5e7d9484cd0878ac756a20a78e4 (diff) | |
download | op-kernel-dev-676b1855de0a18100b3c340084eb8ef72bde4fb1.zip op-kernel-dev-676b1855de0a18100b3c340084eb8ef72bde4fb1.tar.gz |
spelling fixes: arch/x86_64/
Spelling fixes in arch/x86_64/.
Signed-off-by: Simon Arlott <simon@fire.lp0.eu>
Signed-off-by: Adrian Bunk <bunk@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/boot/compressed/misc_64.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/io_apic_64.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/mce_64.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/signal_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/traps_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/vsyscall_64.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/fault_64.c | 4 | ||||
-rw-r--r-- | arch/x86/mm/srat_64.c | 2 |
9 files changed, 13 insertions, 13 deletions
diff --git a/arch/x86/boot/compressed/misc_64.c b/arch/x86/boot/compressed/misc_64.c index f932b0e..6ea015a 100644 --- a/arch/x86/boot/compressed/misc_64.c +++ b/arch/x86/boot/compressed/misc_64.c @@ -25,7 +25,7 @@ /* * Getting to provable safe in place decompression is hard. - * Worst case behaviours need to be analized. + * Worst case behaviours need to be analyzed. * Background information: * * The file layout is: @@ -94,7 +94,7 @@ * Adding 32768 instead of 32767 just makes for round numbers. * Adding the decompressor_size is necessary as it musht live after all * of the data as well. Last I measured the decompressor is about 14K. - * 10K of actuall data and 4K of bss. + * 10K of actual data and 4K of bss. * */ diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 1c2c7bf..b3c2d26 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -1770,7 +1770,7 @@ __setup("no_timer_check", notimercheck); /* * - * IRQ's that are handled by the PIC in the MPS IOAPIC case. + * IRQs that are handled by the PIC in the MPS IOAPIC case. * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ. * Linux doesn't really care, as it's not actually used * for any interrupt handling anyway. @@ -1921,7 +1921,7 @@ void destroy_irq(unsigned int irq) } /* - * MSI mesage composition + * MSI message composition */ #ifdef CONFIG_PCI_MSI static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) diff --git a/arch/x86/kernel/mce_64.c b/arch/x86/kernel/mce_64.c index 66e6b79..82c85bd 100644 --- a/arch/x86/kernel/mce_64.c +++ b/arch/x86/kernel/mce_64.c @@ -320,7 +320,7 @@ void do_machine_check(struct pt_regs * regs, long error_code) #ifdef CONFIG_X86_MCE_INTEL /*** * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog - * @cpu: The CPU on which the event occured. + * @cpu: The CPU on which the event occurred. * @status: Event status information * * This function should be called by the thermal interrupt after the @@ -688,7 +688,7 @@ static int __init mcheck_disable(char *str) return 1; } -/* mce=off disables machine check. Note you can reenable it later +/* mce=off disables machine check. Note you can re-enable it later using sysfs. mce=TOLERANCELEVEL (number, see above) mce=bootlog Log MCEs from before booting. Disabled by default on AMD. diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index 683802b..ab086b0 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c @@ -410,7 +410,7 @@ static void do_signal(struct pt_regs *regs) signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { - /* Reenable any watchpoints before delivering the + /* Re-enable any watchpoints before delivering the * signal to user space. The processor register will * have been cleared if the watchpoint triggered * inside the kernel. diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index e351ac4..d4c33ab 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -350,7 +350,7 @@ void __cpuinit start_secondary(void) /* * We need to hold call_lock, so there is no inconsistency * between the time smp_call_function() determines number of - * IPI receipients, and the time when the determination is made + * IPI recipients, and the time when the determination is made * for which cpus receive the IPI in genapic_flat.c. Holding this * lock helps us to not include this cpu in a currently in progress * smp_call_function(). diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index b4a9b3d..b4c8873 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c @@ -201,7 +201,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, #define MSG(txt) ops->warning(data, txt) /* - * x86-64 can have upto three kernel stacks: + * x86-64 can have up to three kernel stacks: * process stack * interrupt stack * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 585541c..e14cb3f 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -53,7 +53,7 @@ /* * vsyscall_gtod_data contains data that is : * - readonly from vsyscalls - * - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64) + * - written by timer interrupt or systcl (/proc/sys/kernel/vsyscall64) * Try to keep this structure as small as possible to avoid cache line ping pongs */ int __vgetcpu_mode __section_vgetcpu_mode; diff --git a/arch/x86/mm/fault_64.c b/arch/x86/mm/fault_64.c index 5149ac1..7c56084 100644 --- a/arch/x86/mm/fault_64.c +++ b/arch/x86/mm/fault_64.c @@ -378,7 +378,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, again: /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the - * kernel and should generate an OOPS. Unfortunatly, in the case of an + * kernel and should generate an OOPS. Unfortunately, in the case of an * erroneous fault occurring in a code path which already holds mmap_sem * we will deadlock attempting to validate the fault against the * address space. Luckily the kernel only validly references user @@ -386,7 +386,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, * exceptions table. * * As the vast majority of faults will be valid we will only perform - * the source reference check when there is a possibilty of a deadlock. + * the source reference check when there is a possibility of a deadlock. * Attempt to lock the address space, if we cannot we then validate the * source. If this is invalid we can skip the address space check, * thus avoiding the deadlock. diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 56089cc..ea85172 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -218,7 +218,7 @@ static inline int save_add_info(void) {return 0;} /* * Update nodes_add and decide if to include add are in the zone. * Both SPARSE and RESERVE need nodes_add infomation. - * This code supports one contigious hot add area per node. + * This code supports one contiguous hot add area per node. */ static int reserve_hotadd(int node, unsigned long start, unsigned long end) { |