summaryrefslogtreecommitdiffstats
path: root/include/asm-x86/mmu_context_64.h
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2008-03-23 01:02:43 -0700
committerIngo Molnar <mingo@elte.hu>2008-04-17 17:41:25 +0200
commitc4fe760efde84e52168a81bf125f25ba2f118b51 (patch)
tree99a8b2a79d2ba28bc0dd799e485b05f8fdf8cdd9 /include/asm-x86/mmu_context_64.h
parent55464da94a845e057ffb94a9fc7be1aa86ffcd89 (diff)
downloadop-kernel-dev-c4fe760efde84e52168a81bf125f25ba2f118b51.zip
op-kernel-dev-c4fe760efde84e52168a81bf125f25ba2f118b51.tar.gz
include/asm-x86/mmu_context_64.h: checkpatch cleanups - formatting only
Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/mmu_context_64.h')
-rw-r--r--include/asm-x86/mmu_context_64.h21
1 files changed, 11 insertions, 10 deletions
diff --git a/include/asm-x86/mmu_context_64.h b/include/asm-x86/mmu_context_64.h
index ad6dc82..ca44c71 100644
--- a/include/asm-x86/mmu_context_64.h
+++ b/include/asm-x86/mmu_context_64.h
@@ -20,12 +20,12 @@ void destroy_context(struct mm_struct *mm);
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
#ifdef CONFIG_SMP
- if (read_pda(mmu_state) == TLBSTATE_OK)
+ if (read_pda(mmu_state) == TLBSTATE_OK)
write_pda(mmu_state, TLBSTATE_LAZY);
#endif
}
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned cpu = smp_processor_id();
@@ -39,7 +39,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
cpu_set(cpu, next->cpu_vm_mask);
load_cr3(next->pgd);
- if (unlikely(next->context.ldt != prev->context.ldt))
+ if (unlikely(next->context.ldt != prev->context.ldt))
load_LDT_nolock(&next->context);
}
#ifdef CONFIG_SMP
@@ -48,7 +48,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (read_pda(active_mm) != next)
BUG();
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
- /* We were in lazy tlb mode and leave_mm disabled
+ /* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*/
@@ -59,13 +59,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
#endif
}
-#define deactivate_mm(tsk,mm) do { \
- load_gs_index(0); \
- asm volatile("movl %0,%%fs"::"r"(0)); \
-} while(0)
+#define deactivate_mm(tsk, mm) \
+do { \
+ load_gs_index(0); \
+ asm volatile("movl %0,%%fs"::"r"(0)); \
+} while (0)
-#define activate_mm(prev, next) \
- switch_mm((prev),(next),NULL)
+#define activate_mm(prev, next) \
+ switch_mm((prev), (next), NULL)
#endif
OpenPOWER on IntegriCloud