diff options
author | Brent Casavant <bcasavan@sgi.com> | 2006-01-26 15:55:52 -0800 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2006-01-26 15:55:52 -0800 |
commit | e08e6c521355cd33e647b2f739885bc3050eead6 (patch) | |
tree | 251dd80647bd3a0140f5f31c35c125094c035f9c /include/asm-ia64/machvec.h | |
parent | 3ee68c4af3fd7228c1be63254b9f884614f9ebb2 (diff) | |
download | op-kernel-dev-e08e6c521355cd33e647b2f739885bc3050eead6.zip op-kernel-dev-e08e6c521355cd33e647b2f739885bc3050eead6.tar.gz |
[IA64] hooks to wait for mmio writes to drain when migrating processes
On SN2, MMIO writes which are issued from separate processors are not
guaranteed to arrive in any particular order at the IO hardware. When
performing such writes from the kernel this is not a problem, as a
kernel thread will not migrate to another CPU during execution, and
mmiowb() calls can guarantee write ordering when control of the IO
resource is allowed to move between threads.
However, when MMIO writes can be performed from user space (e.g. DRM)
there are no such guarantees and mechanisms, as the process may
context-switch at any time, and may migrate to a different CPU as part
of the switch. For such programs/hardware to operate correctly, it is
required that the MMIO writes from the old CPU be accepted by the IO
hardware before subsequent writes from the new CPU can be issued.
The following patch implements this behavior on SN2 by waiting for a
Shub register to indicate that these writes have been accepted. This
is placed in the context switch-in path, and only performs the wait
when the newly scheduled task changes CPUs.
Signed-off-by: Prarit Bhargava <prarit@sgi.com>
Signed-off-by: Brent Casavant <bcasavan@sgi.com>
Diffstat (limited to 'include/asm-ia64/machvec.h')
-rw-r--r-- | include/asm-ia64/machvec.h | 13 |
1 files changed, 13 insertions, 0 deletions
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h index ca5ea99..c3e4ed8 100644 --- a/include/asm-ia64/machvec.h +++ b/include/asm-ia64/machvec.h @@ -20,6 +20,7 @@ struct scatterlist; struct page; struct mm_struct; struct pci_bus; +struct task_struct; typedef void ia64_mv_setup_t (char **); typedef void ia64_mv_cpu_init_t (void); @@ -34,6 +35,7 @@ typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val, u8 size); typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val, u8 size); +typedef void ia64_mv_migrate_t(struct task_struct * task); /* DMA-mapping interface: */ typedef void ia64_mv_dma_init (void); @@ -85,6 +87,11 @@ machvec_noop_mm (struct mm_struct *mm) { } +static inline void +machvec_noop_task (struct task_struct *task) +{ +} + extern void machvec_setup (char **); extern void machvec_timer_interrupt (int, void *, struct pt_regs *); extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int); @@ -146,6 +153,7 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); # define platform_readw_relaxed ia64_mv.readw_relaxed # define platform_readl_relaxed ia64_mv.readl_relaxed # define platform_readq_relaxed ia64_mv.readq_relaxed +# define platform_migrate ia64_mv.migrate # endif /* __attribute__((__aligned__(16))) is required to make size of the @@ -194,6 +202,7 @@ struct ia64_machine_vector { ia64_mv_readw_relaxed_t *readw_relaxed; ia64_mv_readl_relaxed_t *readl_relaxed; ia64_mv_readq_relaxed_t *readq_relaxed; + ia64_mv_migrate_t *migrate; } __attribute__((__aligned__(16))); /* align attrib? see above comment */ #define MACHVEC_INIT(name) \ @@ -238,6 +247,7 @@ struct ia64_machine_vector { platform_readw_relaxed, \ platform_readl_relaxed, \ platform_readq_relaxed, \ + platform_migrate, \ } extern struct ia64_machine_vector ia64_mv; @@ -386,5 +396,8 @@ extern ia64_mv_dma_supported swiotlb_dma_supported; #ifndef platform_readq_relaxed # define platform_readq_relaxed __ia64_readq_relaxed #endif +#ifndef platform_migrate +# define platform_migrate machvec_noop_task +#endif #endif /* _ASM_IA64_MACHVEC_H */ |