summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Kconfig1
-rw-r--r--drivers/iommu/amd_iommu.c4
-rw-r--r--drivers/iommu/dmar.c135
-rw-r--r--drivers/iommu/fsl_pamu_domain.c6
-rw-r--r--drivers/iommu/intel-iommu.c216
-rw-r--r--drivers/iommu/intel_irq_remapping.c105
-rw-r--r--drivers/iommu/irq_remapping.c6
-rw-r--r--drivers/iommu/of_iommu.c1
-rw-r--r--drivers/iommu/shmobile-iommu.c3
-rw-r--r--drivers/iommu/shmobile-ipmmu.c10
-rw-r--r--drivers/iommu/shmobile-ipmmu.h2
11 files changed, 232 insertions, 257 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 3e7fdbb..79bbc21 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -207,6 +207,7 @@ config SHMOBILE_IOMMU
bool "IOMMU for Renesas IPMMU/IPMMUI"
default n
depends on ARM
+ depends on SH_MOBILE || COMPILE_TEST
select IOMMU_API
select ARM_DMA_USE_IOMMU
select SHMOBILE_IPMMU
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 72531f0..faf0da4 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -248,8 +248,8 @@ static bool check_device(struct device *dev)
if (!dev || !dev->dma_mask)
return false;
- /* No device or no PCI device */
- if (dev->bus != &pci_bus_type)
+ /* No PCI device */
+ if (!dev_is_pci(dev))
return false;
devid = get_device_id(dev);
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 8b452c9..1581565 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -52,6 +52,9 @@ LIST_HEAD(dmar_drhd_units);
struct acpi_table_header * __initdata dmar_tbl;
static acpi_size dmar_tbl_size;
+static int alloc_iommu(struct dmar_drhd_unit *drhd);
+static void free_iommu(struct intel_iommu *iommu);
+
static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
{
/*
@@ -100,7 +103,6 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
if (!pdev) {
pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
segment, scope->bus, path->device, path->function);
- *dev = NULL;
return 0;
}
if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
@@ -151,7 +153,7 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
ret = dmar_parse_one_dev_scope(scope,
&(*devices)[index], segment);
if (ret) {
- kfree(*devices);
+ dmar_free_dev_scope(devices, cnt);
return ret;
}
index ++;
@@ -162,6 +164,17 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
return 0;
}
+void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
+{
+ if (*devices && *cnt) {
+ while (--*cnt >= 0)
+ pci_dev_put((*devices)[*cnt]);
+ kfree(*devices);
+ *devices = NULL;
+ *cnt = 0;
+ }
+}
+
/**
* dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
* structure which uniquely represent one DMA remapping hardware unit
@@ -193,25 +206,28 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
return 0;
}
+static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
+{
+ if (dmaru->devices && dmaru->devices_cnt)
+ dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
+ if (dmaru->iommu)
+ free_iommu(dmaru->iommu);
+ kfree(dmaru);
+}
+
static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
{
struct acpi_dmar_hardware_unit *drhd;
- int ret = 0;
drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
if (dmaru->include_all)
return 0;
- ret = dmar_parse_dev_scope((void *)(drhd + 1),
- ((void *)drhd) + drhd->header.length,
- &dmaru->devices_cnt, &dmaru->devices,
- drhd->segment);
- if (ret) {
- list_del(&dmaru->list);
- kfree(dmaru);
- }
- return ret;
+ return dmar_parse_dev_scope((void *)(drhd + 1),
+ ((void *)drhd) + drhd->header.length,
+ &dmaru->devices_cnt, &dmaru->devices,
+ drhd->segment);
}
#ifdef CONFIG_ACPI_NUMA
@@ -423,7 +439,7 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
int __init dmar_dev_scope_init(void)
{
static int dmar_dev_scope_initialized;
- struct dmar_drhd_unit *drhd, *drhd_n;
+ struct dmar_drhd_unit *drhd;
int ret = -ENODEV;
if (dmar_dev_scope_initialized)
@@ -432,7 +448,7 @@ int __init dmar_dev_scope_init(void)
if (list_empty(&dmar_drhd_units))
goto fail;
- list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
+ list_for_each_entry(drhd, &dmar_drhd_units, list) {
ret = dmar_parse_dev(drhd);
if (ret)
goto fail;
@@ -456,24 +472,23 @@ int __init dmar_table_init(void)
static int dmar_table_initialized;
int ret;
- if (dmar_table_initialized)
- return 0;
-
- dmar_table_initialized = 1;
-
- ret = parse_dmar_table();
- if (ret) {
- if (ret != -ENODEV)
- pr_info("parse DMAR table failure.\n");
- return ret;
- }
+ if (dmar_table_initialized == 0) {
+ ret = parse_dmar_table();
+ if (ret < 0) {
+ if (ret != -ENODEV)
+ pr_info("parse DMAR table failure.\n");
+ } else if (list_empty(&dmar_drhd_units)) {
+ pr_info("No DMAR devices found\n");
+ ret = -ENODEV;
+ }
- if (list_empty(&dmar_drhd_units)) {
- pr_info("No DMAR devices found\n");
- return -ENODEV;
+ if (ret < 0)
+ dmar_table_initialized = ret;
+ else
+ dmar_table_initialized = 1;
}
- return 0;
+ return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
}
static void warn_invalid_dmar(u64 addr, const char *message)
@@ -488,7 +503,7 @@ static void warn_invalid_dmar(u64 addr, const char *message)
dmi_get_system_info(DMI_PRODUCT_VERSION));
}
-int __init check_zero_address(void)
+static int __init check_zero_address(void)
{
struct acpi_table_dmar *dmar;
struct acpi_dmar_header *entry_header;
@@ -546,14 +561,6 @@ int __init detect_intel_iommu(void)
if (ret)
ret = check_zero_address();
{
- struct acpi_table_dmar *dmar;
-
- dmar = (struct acpi_table_dmar *) dmar_tbl;
-
- if (ret && irq_remapping_enabled && cpu_has_x2apic &&
- dmar->flags & 0x1)
- pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
-
if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
iommu_detected = 1;
/* Make sure ACS will be enabled */
@@ -565,7 +572,7 @@ int __init detect_intel_iommu(void)
x86_init.iommu.iommu_init = intel_iommu_init;
#endif
}
- early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
+ early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
dmar_tbl = NULL;
return ret ? 1 : -ENODEV;
@@ -647,7 +654,7 @@ out:
return err;
}
-int alloc_iommu(struct dmar_drhd_unit *drhd)
+static int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
u32 ver, sts;
@@ -721,12 +728,19 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
return err;
}
-void free_iommu(struct intel_iommu *iommu)
+static void free_iommu(struct intel_iommu *iommu)
{
- if (!iommu)
- return;
+ if (iommu->irq) {
+ free_irq(iommu->irq, iommu);
+ irq_set_handler_data(iommu->irq, NULL);
+ destroy_irq(iommu->irq);
+ }
- free_dmar_iommu(iommu);
+ if (iommu->qi) {
+ free_page((unsigned long)iommu->qi->desc);
+ kfree(iommu->qi->desc_status);
+ kfree(iommu->qi);
+ }
if (iommu->reg)
unmap_iommu(iommu);
@@ -1050,7 +1064,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
if (!desc_page) {
kfree(qi);
- iommu->qi = 0;
+ iommu->qi = NULL;
return -ENOMEM;
}
@@ -1060,7 +1074,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
if (!qi->desc_status) {
free_page((unsigned long) qi->desc);
kfree(qi);
- iommu->qi = 0;
+ iommu->qi = NULL;
return -ENOMEM;
}
@@ -1111,9 +1125,7 @@ static const char *irq_remap_fault_reasons[] =
"Blocked an interrupt request due to source-id verification failure",
};
-#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
-
-const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
+static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
{
if (fault_reason >= 0x20 && (fault_reason - 0x20 <
ARRAY_SIZE(irq_remap_fault_reasons))) {
@@ -1303,15 +1315,14 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
int __init enable_drhd_fault_handling(void)
{
struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
/*
* Enable fault control interrupt.
*/
- for_each_drhd_unit(drhd) {
- int ret;
- struct intel_iommu *iommu = drhd->iommu;
+ for_each_iommu(iommu, drhd) {
u32 fault_status;
- ret = dmar_set_interrupt(iommu);
+ int ret = dmar_set_interrupt(iommu);
if (ret) {
pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
@@ -1366,4 +1377,22 @@ int __init dmar_ir_support(void)
return 0;
return dmar->flags & 0x1;
}
+
+static int __init dmar_free_unused_resources(void)
+{
+ struct dmar_drhd_unit *dmaru, *dmaru_n;
+
+ /* DMAR units are in use */
+ if (irq_remapping_enabled || intel_iommu_enabled)
+ return 0;
+
+ list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
+ list_del(&dmaru->list);
+ dmar_free_drhd(dmaru);
+ }
+
+ return 0;
+}
+
+late_initcall(dmar_free_unused_resources);
IOMMU_INIT_POST(detect_intel_iommu);
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index c857c30..93072ba 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -691,7 +691,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
* Use LIODN of the PCI controller while attaching a
* PCI device.
*/
- if (dev->bus == &pci_bus_type) {
+ if (dev_is_pci(dev)) {
pdev = to_pci_dev(dev);
pci_ctl = pci_bus_to_host(pdev->bus);
/*
@@ -729,7 +729,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
* Use LIODN of the PCI controller while detaching a
* PCI device.
*/
- if (dev->bus == &pci_bus_type) {
+ if (dev_is_pci(dev)) {
pdev = to_pci_dev(dev);
pci_ctl = pci_bus_to_host(pdev->bus);
/*
@@ -1056,7 +1056,7 @@ static int fsl_pamu_add_device(struct device *dev)
* For platform devices we allocate a separate group for
* each of the devices.
*/
- if (dev->bus == &pci_bus_type) {
+ if (dev_is_pci(dev)) {
pdev = to_pci_dev(dev);
/* Don't create device groups for virtual PCI bridges */
if (pdev->subordinate)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 43b9bfe..5ac7efc 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -63,6 +63,7 @@
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
#define MAX_AGAW_WIDTH 64
+#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
@@ -106,12 +107,12 @@ static inline int agaw_to_level(int agaw)
static inline int agaw_to_width(int agaw)
{
- return 30 + agaw * LEVEL_STRIDE;
+ return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
}
static inline int width_to_agaw(int width)
{
- return (width - 30) / LEVEL_STRIDE;
+ return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
}
static inline unsigned int level_to_offset_bits(int level)
@@ -141,7 +142,7 @@ static inline unsigned long align_to_level(unsigned long pfn, int level)
static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
{
- return 1 << ((lvl - 1) * LEVEL_STRIDE);
+ return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
}
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
@@ -288,26 +289,6 @@ static inline void dma_clear_pte(struct dma_pte *pte)
pte->val = 0;
}
-static inline void dma_set_pte_readable(struct dma_pte *pte)
-{
- pte->val |= DMA_PTE_READ;
-}
-
-static inline void dma_set_pte_writable(struct dma_pte *pte)
-{
- pte->val |= DMA_PTE_WRITE;
-}
-
-static inline void dma_set_pte_snp(struct dma_pte *pte)
-{
- pte->val |= DMA_PTE_SNP;
-}
-
-static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
-{
- pte->val = (pte->val & ~3) | (prot & 3);
-}
-
static inline u64 dma_pte_addr(struct dma_pte *pte)
{
#ifdef CONFIG_64BIT
@@ -318,11 +299,6 @@ static inline u64 dma_pte_addr(struct dma_pte *pte)
#endif
}
-static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
-{
- pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
-}
-
static inline bool dma_pte_present(struct dma_pte *pte)
{
return (pte->val & 3) != 0;
@@ -406,7 +382,7 @@ struct device_domain_info {
static void flush_unmaps_timeout(unsigned long data);
-DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
+static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
#define HIGH_WATER_MARK 250
struct deferred_flush_tables {
@@ -652,9 +628,7 @@ static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
struct dmar_drhd_unit *drhd = NULL;
int i;
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
+ for_each_active_drhd_unit(drhd) {
if (segment != drhd->segment)
continue;
@@ -865,7 +839,6 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
unsigned int large_page = 1;
struct dma_pte *first_pte, *pte;
- int order;
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
@@ -890,8 +863,7 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
} while (start_pfn && start_pfn <= last_pfn);
- order = (large_page - 1) * 9;
- return order;
+ return min_t(int, (large_page - 1) * 9, MAX_AGAW_PFN_WIDTH);
}
static void dma_pte_free_level(struct dmar_domain *domain, int level,
@@ -1255,8 +1227,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
unsigned long nlongs;
ndomains = cap_ndoms(iommu->cap);
- pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id,
- ndomains);
+ pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
+ iommu->seq_id, ndomains);
nlongs = BITS_TO_LONGS(ndomains);
spin_lock_init(&iommu->lock);
@@ -1266,13 +1238,17 @@ static int iommu_init_domains(struct intel_iommu *iommu)
*/
iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
if (!iommu->domain_ids) {
- printk(KERN_ERR "Allocating domain id array failed\n");
+ pr_err("IOMMU%d: allocating domain id array failed\n",
+ iommu->seq_id);
return -ENOMEM;
}
iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
GFP_KERNEL);
if (!iommu->domains) {
- printk(KERN_ERR "Allocating domain array failed\n");
+ pr_err("IOMMU%d: allocating domain array failed\n",
+ iommu->seq_id);
+ kfree(iommu->domain_ids);
+ iommu->domain_ids = NULL;
return -ENOMEM;
}
@@ -1289,10 +1265,10 @@ static int iommu_init_domains(struct intel_iommu *iommu)
static void domain_exit(struct dmar_domain *domain);
static void vm_domain_exit(struct dmar_domain *domain);
-void free_dmar_iommu(struct intel_iommu *iommu)
+static void free_dmar_iommu(struct intel_iommu *iommu)
{
struct dmar_domain *domain;
- int i;
+ int i, count;
unsigned long flags;
if ((iommu->domains) && (iommu->domain_ids)) {
@@ -1301,28 +1277,24 @@ void free_dmar_iommu(struct intel_iommu *iommu)
clear_bit(i, iommu->domain_ids);
spin_lock_irqsave(&domain->iommu_lock, flags);
- if (--domain->iommu_count == 0) {
+ count = --domain->iommu_count;
+ spin_unlock_irqrestore(&domain->iommu_lock, flags);
+ if (count == 0) {
if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
vm_domain_exit(domain);
else
domain_exit(domain);
}
- spin_unlock_irqrestore(&domain->iommu_lock, flags);
}
}
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
- if (iommu->irq) {
- irq_set_handler_data(iommu->irq, NULL);
- /* This will mask the irq */
- free_irq(iommu->irq, iommu);
- destroy_irq(iommu->irq);
- }
-
kfree(iommu->domains);
kfree(iommu->domain_ids);
+ iommu->domains = NULL;
+ iommu->domain_ids = NULL;
g_iommus[iommu->seq_id] = NULL;
@@ -2245,8 +2217,6 @@ static int __init si_domain_init(int hw)
if (!si_domain)
return -EFAULT;
- pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
-
for_each_active_iommu(iommu, drhd) {
ret = iommu_attach_domain(si_domain, iommu);
if (ret) {
@@ -2261,6 +2231,8 @@ static int __init si_domain_init(int hw)
}
si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
+ pr_debug("IOMMU: identity mapping domain is domain %d\n",
+ si_domain->id);
if (hw)
return 0;
@@ -2492,11 +2464,7 @@ static int __init init_dmars(void)
goto error;
}
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
-
- iommu = drhd->iommu;
+ for_each_active_iommu(iommu, drhd) {
g_iommus[iommu->seq_id] = iommu;
ret = iommu_init_domains(iommu);
@@ -2520,12 +2488,7 @@ static int __init init_dmars(void)
/*
* Start from the sane iommu hardware state.
*/
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
-
- iommu = drhd->iommu;
-
+ for_each_active_iommu(iommu, drhd) {
/*
* If the queued invalidation is already initialized by us
* (for example, while enabling interrupt-remapping) then
@@ -2545,12 +2508,7 @@ static int __init init_dmars(void)
dmar_disable_qi(iommu);
}
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
-
- iommu = drhd->iommu;
-
+ for_each_active_iommu(iommu, drhd) {
if (dmar_enable_qi(iommu)) {
/*
* Queued Invalidate not enabled, use Register Based
@@ -2633,17 +2591,16 @@ static int __init init_dmars(void)
* global invalidate iotlb
* enable translation
*/
- for_each_drhd_unit(drhd) {
+ for_each_iommu(iommu, drhd) {
if (drhd->ignored) {
/*
* we always have to disable PMRs or DMA may fail on
* this device
*/
if (force_on)
- iommu_disable_protect_mem_regions(drhd->iommu);
+ iommu_disable_protect_mem_regions(iommu);
continue;
}
- iommu = drhd->iommu;
iommu_flush_write_buffer(iommu);
@@ -2665,12 +2622,9 @@ static int __init init_dmars(void)
return 0;
error:
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
- iommu = drhd->iommu;
- free_iommu(iommu);
- }
+ for_each_active_iommu(iommu, drhd)
+ free_dmar_iommu(iommu);
+ kfree(deferred_flush);
kfree(g_iommus);
return ret;
}
@@ -2758,7 +2712,7 @@ static int iommu_no_mapping(struct device *dev)
struct pci_dev *pdev;
int found;
- if (unlikely(dev->bus != &pci_bus_type))
+ if (unlikely(!dev_is_pci(dev)))
return 1;
pdev = to_pci_dev(dev);
@@ -3318,9 +3272,9 @@ static void __init init_no_remapping_devices(void)
}
}
- for_each_drhd_unit(drhd) {
+ for_each_active_drhd_unit(drhd) {
int i;
- if (drhd->ignored || drhd->include_all)
+ if (drhd->include_all)
continue;
for (i = 0; i < drhd->devices_cnt; i++)
@@ -3514,18 +3468,12 @@ static int __init
rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
{
struct acpi_dmar_reserved_memory *rmrr;
- int ret;
rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
- ret = dmar_parse_dev_scope((void *)(rmrr + 1),
- ((void *)rmrr) + rmrr->header.length,
- &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
-
- if (ret || (rmrru->devices_cnt == 0)) {
- list_del(&rmrru->list);
- kfree(rmrru);
- }
- return ret;
+ return dmar_parse_dev_scope((void *)(rmrr + 1),
+ ((void *)rmrr) + rmrr->header.length,
+ &rmrru->devices_cnt, &rmrru->devices,
+ rmrr->segment);
}
static LIST_HEAD(dmar_atsr_units);
@@ -3550,23 +3498,39 @@ int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
{
- int rc;
struct acpi_dmar_atsr *atsr;
if (atsru->include_all)
return 0;
atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
- rc = dmar_parse_dev_scope((void *)(atsr + 1),
- (void *)atsr + atsr->header.length,
- &atsru->devices_cnt, &atsru->devices,
- atsr->segment);
- if (rc || !atsru->devices_cnt) {
- list_del(&atsru->list);
- kfree(atsru);
+ return dmar_parse_dev_scope((void *)(atsr + 1),
+ (void *)atsr + atsr->header.length,
+ &atsru->devices_cnt, &atsru->devices,
+ atsr->segment);
+}
+
+static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
+{
+ dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
+ kfree(atsru);
+}
+
+static void intel_iommu_free_dmars(void)
+{
+ struct dmar_rmrr_unit *rmrru, *rmrr_n;
+ struct dmar_atsr_unit *atsru, *atsr_n;
+
+ list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
+ list_del(&rmrru->list);
+ dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
+ kfree(rmrru);
}
- return rc;
+ list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
+ list_del(&atsru->list);
+ intel_iommu_free_atsr(atsru);
+ }
}
int dmar_find_matched_atsr_unit(struct pci_dev *dev)
@@ -3610,17 +3574,17 @@ found:
int __init dmar_parse_rmrr_atsr_dev(void)
{
- struct dmar_rmrr_unit *rmrr, *rmrr_n;
- struct dmar_atsr_unit *atsr, *atsr_n;
+ struct dmar_rmrr_unit *rmrr;
+ struct dmar_atsr_unit *atsr;
int ret = 0;
- list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
+ list_for_each_entry(rmrr, &dmar_rmrr_units, list) {
ret = rmrr_parse_dev(rmrr);
if (ret)
return ret;
}
- list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
+ list_for_each_entry(atsr, &dmar_atsr_units, list) {
ret = atsr_parse_dev(atsr);
if (ret)
return ret;
@@ -3667,8 +3631,9 @@ static struct notifier_block device_nb = {
int __init intel_iommu_init(void)
{
- int ret = 0;
+ int ret = -ENODEV;
struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
/* VT-d is required for a TXT/tboot launch, so enforce that */
force_on = tboot_force_iommu();
@@ -3676,36 +3641,29 @@ int __init intel_iommu_init(void)
if (dmar_table_init()) {
if (force_on)
panic("tboot: Failed to initialize DMAR table\n");
- return -ENODEV;
+ goto out_free_dmar;
}
/*
* Disable translation if already enabled prior to OS handover.
*/
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu;
-
- if (drhd->ignored)
- continue;
-
- iommu = drhd->iommu;
+ for_each_active_iommu(iommu, drhd)
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
- }
if (dmar_dev_scope_init() < 0) {
if (force_on)
panic("tboot: Failed to initialize DMAR device scope\n");
- return -ENODEV;
+ goto out_free_dmar;
}
if (no_iommu || dmar_disabled)
- return -ENODEV;
+ goto out_free_dmar;
if (iommu_init_mempool()) {
if (force_on)
panic("tboot: Failed to initialize iommu memory\n");
- return -ENODEV;
+ goto out_free_dmar;
}
if (list_empty(&dmar_rmrr_units))
@@ -3717,7 +3675,7 @@ int __init intel_iommu_init(void)
if (dmar_init_reserved_ranges()) {
if (force_on)
panic("tboot: Failed to reserve iommu ranges\n");
- return -ENODEV;
+ goto out_free_mempool;
}
init_no_remapping_devices();
@@ -3727,9 +3685,7 @@ int __init intel_iommu_init(void)
if (force_on)
panic("tboot: Failed to initialize DMARs\n");
printk(KERN_ERR "IOMMU: dmar init failed\n");
- put_iova_domain(&reserved_iova_list);
- iommu_exit_mempool();
- return ret;
+ goto out_free_reserved_range;
}
printk(KERN_INFO
"PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
@@ -3749,6 +3705,14 @@ int __init intel_iommu_init(void)
intel_iommu_enabled = 1;
return 0;
+
+out_free_reserved_range:
+ put_iova_domain(&reserved_iova_list);
+out_free_mempool:
+ iommu_exit_mempool();
+out_free_dmar:
+ intel_iommu_free_dmars();
+ return ret;
}
static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
@@ -3877,7 +3841,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
}
/* domain id for virtual machine, it won't be set in context */
-static unsigned long vm_domid;
+static atomic_t vm_domid = ATOMIC_INIT(0);
static struct dmar_domain *iommu_alloc_vm_domain(void)
{
@@ -3887,7 +3851,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
if (!domain)
return NULL;
- domain->id = vm_domid++;
+ domain->id = atomic_inc_return(&vm_domid);
domain->nid = -1;
memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
@@ -3934,11 +3898,7 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
unsigned long i;
unsigned long ndomains;
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
- iommu = drhd->iommu;
-
+ for_each_active_iommu(iommu, drhd) {
ndomains = cap_ndoms(iommu->cap);
for_each_set_bit(i, iommu->domain_ids, ndomains) {
if (iommu->domains[i] == domain) {
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index bab10b1..b30b423 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -40,13 +40,15 @@ static int ir_ioapic_num, ir_hpet_num;
static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
+static int __init parse_ioapics_under_ir(void);
+
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
struct irq_cfg *cfg = irq_get_chip_data(irq);
return cfg ? &cfg->irq_2_iommu : NULL;
}
-int get_irte(int irq, struct irte *entry)
+static int get_irte(int irq, struct irte *entry)
{
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
unsigned long flags;
@@ -69,19 +71,13 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
struct ir_table *table = iommu->ir_table;
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
struct irq_cfg *cfg = irq_get_chip_data(irq);
- u16 index, start_index;
unsigned int mask = 0;
unsigned long flags;
- int i;
+ int index;
if (!count || !irq_iommu)
return -1;
- /*
- * start the IRTE search from index 0.
- */
- index = start_index = 0;
-
if (count > 1) {
count = __roundup_pow_of_two(count);
mask = ilog2(count);
@@ -96,32 +92,17 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
}
raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
- do {
- for (i = index; i < index + count; i++)
- if (table->base[i].present)
- break;
- /* empty index found */
- if (i == index + count)
- break;
-
- index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
-
- if (index == start_index) {
- raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
- printk(KERN_ERR "can't allocate an IRTE\n");
- return -1;
- }
- } while (1);
-
- for (i = index; i < index + count; i++)
- table->base[i].present = 1;
-
- cfg->remapped = 1;
- irq_iommu->iommu = iommu;
- irq_iommu->irte_index = index;
- irq_iommu->sub_handle = 0;
- irq_iommu->irte_mask = mask;
-
+ index = bitmap_find_free_region(table->bitmap,
+ INTR_REMAP_TABLE_ENTRIES, mask);
+ if (index < 0) {
+ pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
+ } else {
+ cfg->remapped = 1;
+ irq_iommu->iommu = iommu;
+ irq_iommu->irte_index = index;
+ irq_iommu->sub_handle = 0;
+ irq_iommu->irte_mask = mask;
+ }
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return index;
@@ -254,6 +235,8 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
set_64bit(&entry->low, 0);
set_64bit(&entry->high, 0);
}
+ bitmap_release_region(iommu->ir_table->bitmap, index,
+ irq_iommu->irte_mask);
return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
}
@@ -336,7 +319,7 @@ static int set_ioapic_sid(struct irte *irte, int apic)
return -1;
}
- set_irte_sid(irte, 1, 0, sid);
+ set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
return 0;
}
@@ -453,6 +436,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
{
struct ir_table *ir_table;
struct page *pages;
+ unsigned long *bitmap;
ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
GFP_ATOMIC);
@@ -464,13 +448,23 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
INTR_REMAP_PAGE_ORDER);
if (!pages) {
- printk(KERN_ERR "failed to allocate pages of order %d\n",
- INTR_REMAP_PAGE_ORDER);
+ pr_err("IR%d: failed to allocate pages of order %d\n",
+ iommu->seq_id, INTR_REMAP_PAGE_ORDER);
kfree(iommu->ir_table);
return -ENOMEM;
}
+ bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
+ sizeof(long), GFP_ATOMIC);
+ if (bitmap == NULL) {
+ pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
+ __free_pages(pages, INTR_REMAP_PAGE_ORDER);
+ kfree(ir_table);
+ return -ENOMEM;
+ }
+
ir_table->base = page_address(pages);
+ ir_table->bitmap = bitmap;
iommu_set_irq_remapping(iommu, mode);
return 0;
@@ -521,6 +515,7 @@ static int __init dmar_x2apic_optout(void)
static int __init intel_irq_remapping_supported(void)
{
struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
if (disable_irq_remap)
return 0;
@@ -539,12 +534,9 @@ static int __init intel_irq_remapping_supported(void)
if (!dmar_ir_support())
return 0;
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu = drhd->iommu;
-
+ for_each_iommu(iommu, drhd)
if (!ecap_ir_support(iommu->ecap))
return 0;
- }
return 1;
}
@@ -552,6 +544,7 @@ static int __init intel_irq_remapping_supported(void)
static int __init intel_enable_irq_remapping(void)
{
struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
bool x2apic_present;
int setup = 0;
int eim = 0;
@@ -564,6 +557,8 @@ static int __init intel_enable_irq_remapping(void)
}
if (x2apic_present) {
+ pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
+
eim = !dmar_x2apic_optout();
if (!eim)
printk(KERN_WARNING
@@ -572,9 +567,7 @@ static int __init intel_enable_irq_remapping(void)
"Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
}
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu = drhd->iommu;
-
+ for_each_iommu(iommu, drhd) {
/*
* If the queued invalidation is already initialized,
* shouldn't disable it.
@@ -599,9 +592,7 @@ static int __init intel_enable_irq_remapping(void)
/*
* check for the Interrupt-remapping support
*/
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu = drhd->iommu;
-
+ for_each_iommu(iommu, drhd) {
if (!ecap_ir_support(iommu->ecap))
continue;
@@ -615,10 +606,8 @@ static int __init intel_enable_irq_remapping(void)
/*
* Enable queued invalidation for all the DRHD's.
*/
- for_each_drhd_unit(drhd) {
- int ret;
- struct intel_iommu *iommu = drhd->iommu;
- ret = dmar_enable_qi(iommu);
+ for_each_iommu(iommu, drhd) {
+ int ret = dmar_enable_qi(iommu);
if (ret) {
printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
@@ -631,9 +620,7 @@ static int __init intel_enable_irq_remapping(void)
/*
* Setup Interrupt-remapping for all the DRHD's now.
*/
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu = drhd->iommu;
-
+ for_each_iommu(iommu, drhd) {
if (!ecap_ir_support(iommu->ecap))
continue;
@@ -774,22 +761,20 @@ static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
* Finds the assocaition between IOAPIC's and its Interrupt-remapping
* hardware unit.
*/
-int __init parse_ioapics_under_ir(void)
+static int __init parse_ioapics_under_ir(void)
{
struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
int ir_supported = 0;
int ioapic_idx;
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu = drhd->iommu;
-
+ for_each_iommu(iommu, drhd)
if (ecap_ir_support(iommu->ecap)) {
if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
return -1;
ir_supported = 1;
}
- }
if (!ir_supported)
return 0;
@@ -807,7 +792,7 @@ int __init parse_ioapics_under_ir(void)
return 1;
}
-int __init ir_dev_scope_init(void)
+static int __init ir_dev_scope_init(void)
{
if (!irq_remapping_enabled)
return 0;
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 39f81ae..228632c9 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -150,7 +150,7 @@ static int irq_remapping_setup_msi_irqs(struct pci_dev *dev,
return do_setup_msix_irqs(dev, nvec);
}
-void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
+static void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
{
/*
* Intr-remapping uses pin number as the virtual vector
@@ -295,8 +295,8 @@ int setup_ioapic_remapped_entry(int irq,
vector, attr);
}
-int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask,
- bool force)
+static int set_remapped_irq_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
{
if (!config_enabled(CONFIG_SMP) || !remap_ops ||
!remap_ops->set_affinity)
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index ee249bc..e550ccb 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -20,6 +20,7 @@
#include <linux/export.h>
#include <linux/limits.h>
#include <linux/of.h>
+#include <linux/of_iommu.h>
/**
* of_get_dma_window - Parse *dma-window property and returns 0 if found.
diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c
index d572863..7a3b928 100644
--- a/drivers/iommu/shmobile-iommu.c
+++ b/drivers/iommu/shmobile-iommu.c
@@ -380,14 +380,13 @@ int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu)
kmem_cache_destroy(l1cache);
return -ENOMEM;
}
- archdata = kmalloc(sizeof(*archdata), GFP_KERNEL);
+ archdata = kzalloc(sizeof(*archdata), GFP_KERNEL);
if (!archdata) {
kmem_cache_destroy(l1cache);
kmem_cache_destroy(l2cache);
return -ENOMEM;
}
spin_lock_init(&archdata->attach_lock);
- archdata->attached = NULL;
archdata->ipmmu = ipmmu;
ipmmu_archdata = archdata;
bus_set_iommu(&platform_bus_type, &shmobile_iommu_ops);
diff --git a/drivers/iommu/shmobile-ipmmu.c b/drivers/iommu/shmobile-ipmmu.c
index 8321f89..e3bc2e1 100644
--- a/drivers/iommu/shmobile-ipmmu.c
+++ b/drivers/iommu/shmobile-ipmmu.c
@@ -35,12 +35,12 @@ void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu)
if (!ipmmu)
return;
- mutex_lock(&ipmmu->flush_lock);
+ spin_lock(&ipmmu->flush_lock);
if (ipmmu->tlb_enabled)
ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH | IMCTR1_TLBEN);
else
ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH);
- mutex_unlock(&ipmmu->flush_lock);
+ spin_unlock(&ipmmu->flush_lock);
}
void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
@@ -49,7 +49,7 @@ void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
if (!ipmmu)
return;
- mutex_lock(&ipmmu->flush_lock);
+ spin_lock(&ipmmu->flush_lock);
switch (size) {
default:
ipmmu->tlb_enabled = 0;
@@ -85,7 +85,7 @@ void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
}
ipmmu_reg_write(ipmmu, IMTTBR, phys);
ipmmu_reg_write(ipmmu, IMASID, asid);
- mutex_unlock(&ipmmu->flush_lock);
+ spin_unlock(&ipmmu->flush_lock);
}
static int ipmmu_probe(struct platform_device *pdev)
@@ -104,7 +104,7 @@ static int ipmmu_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "cannot allocate device data\n");
return -ENOMEM;
}
- mutex_init(&ipmmu->flush_lock);
+ spin_lock_init(&ipmmu->flush_lock);
ipmmu->dev = &pdev->dev;
ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start,
resource_size(res));
diff --git a/drivers/iommu/shmobile-ipmmu.h b/drivers/iommu/shmobile-ipmmu.h
index 4d53684..9524743 100644
--- a/drivers/iommu/shmobile-ipmmu.h
+++ b/drivers/iommu/shmobile-ipmmu.h
@@ -14,7 +14,7 @@ struct shmobile_ipmmu {
struct device *dev;
void __iomem *ipmmu_base;
int tlb_enabled;
- struct mutex flush_lock;
+ spinlock_t flush_lock;
const char * const *dev_names;
unsigned int num_dev_names;
};
OpenPOWER on IntegriCloud