summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/pci-dma.c
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2010-08-26 13:58:05 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2010-08-26 15:14:52 -0700
commitee1f284f38c8dfcbc7b656915a039dde016de7d3 (patch)
treed89bc3d8bc153ca28e334ed991f0eaf8478b04c3 /arch/x86/kernel/pci-dma.c
parent4db77ff3237a88ea74f691dd776e92b2f86a8f3f (diff)
downloadop-kernel-dev-ee1f284f38c8dfcbc7b656915a039dde016de7d3.zip
op-kernel-dev-ee1f284f38c8dfcbc7b656915a039dde016de7d3.tar.gz
x86, iommu: Utilize the IOMMU_INIT macros functionality.
We remove all of the sub-platform detection/init routines and instead use on the .iommu_table array of structs to call the .early_init if .detect returned a positive value. Also we can stop detecting other IOMMUs if the IOMMU used the _FINISH type macro. During the 'pci_iommu_init' stage, we call .init for the second-stage initialization if it was defined. Currently only SWIOTLB has this defined and it used to de-allocate the SWIOTLB if the other detected IOMMUs have deemed it unnecessary to use SWIOTLB. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> LKML-Reference: <1282845485-8991-11-git-send-email-konrad.wilk@oracle.com> CC: Fujita Tomonori <fujita.tomonori@lab.ntt.co.jp> CC: Thomas Gleixner <tglx@linutronix.de> CC: Ingo Molnar <mingo@redhat.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/kernel/pci-dma.c')
-rw-r--r--arch/x86/kernel/pci-dma.c46
1 files changed, 21 insertions, 25 deletions
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 1b3beb5..9ea999a 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -11,9 +11,8 @@
#include <asm/iommu.h>
#include <asm/gart.h>
#include <asm/calgary.h>
-#include <asm/amd_iommu.h>
#include <asm/x86_init.h>
-#include <asm/xen/swiotlb-xen.h>
+#include <asm/iommu_table.h>
static int forbid_dac __read_mostly;
@@ -45,6 +44,8 @@ int iommu_detected __read_mostly = 0;
*/
int iommu_pass_through __read_mostly;
+extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
+
/* Dummy device used for NULL arguments (normally ISA). */
struct device x86_dma_fallback_dev = {
.init_name = "fallback device",
@@ -130,28 +131,24 @@ static void __init dma32_free_bootmem(void)
void __init pci_iommu_alloc(void)
{
+ struct iommu_table_entry *p;
+
/* free the range so iommu could get some range less than 4G */
dma32_free_bootmem();
- if (pci_xen_swiotlb_detect() || pci_swiotlb_detect_override())
- goto out;
-
- pci_swiotlb_detect_4gb();
-
- gart_iommu_hole_init();
-
- detect_calgary();
-
- detect_intel_iommu();
+ sort_iommu_table(__iommu_table, __iommu_table_end);
+ check_iommu_entries(__iommu_table, __iommu_table_end);
- /* needs to be called after gart_iommu_hole_init */
- amd_iommu_detect();
-out:
- pci_xen_swiotlb_init();
-
- pci_swiotlb_init();
+ for (p = __iommu_table; p < __iommu_table_end; p++) {
+ if (p && p->detect && p->detect() > 0) {
+ p->flags |= IOMMU_DETECTED;
+ if (p->early_init)
+ p->early_init();
+ if (p->flags & IOMMU_FINISH_IF_DETECTED)
+ break;
+ }
+ }
}
-
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag)
{
@@ -294,6 +291,7 @@ EXPORT_SYMBOL(dma_supported);
static int __init pci_iommu_init(void)
{
+ struct iommu_table_entry *p;
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
#ifdef CONFIG_PCI
@@ -301,12 +299,10 @@ static int __init pci_iommu_init(void)
#endif
x86_init.iommu.iommu_init();
- if (swiotlb || xen_swiotlb) {
- printk(KERN_INFO "PCI-DMA: "
- "Using software bounce buffering for IO (SWIOTLB)\n");
- swiotlb_print_info();
- } else
- swiotlb_free();
+ for (p = __iommu_table; p < __iommu_table_end; p++) {
+ if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
+ p->late_init();
+ }
return 0;
}
OpenPOWER on IntegriCloud