summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/iommu/amd_iommu.c145
-rw-r--r--drivers/iommu/amd_iommu_init.c154
-rw-r--r--drivers/iommu/amd_iommu_types.h2
-rw-r--r--drivers/iommu/dmar.c23
-rw-r--r--drivers/iommu/exynos-iommu.c2
-rw-r--r--drivers/iommu/intel-iommu.c24
-rw-r--r--drivers/iommu/intel_irq_remapping.c10
-rw-r--r--drivers/iommu/iommu.c37
-rw-r--r--drivers/iommu/irq_remapping.c6
-rw-r--r--drivers/iommu/irq_remapping.h2
-rw-r--r--drivers/iommu/msm_iommu.c2
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--drivers/iommu/pci.h29
-rw-r--r--drivers/iommu/shmobile-iommu.c2
-rw-r--r--drivers/iommu/tegra-gart.c5
-rw-r--r--drivers/iommu/tegra-smmu.c5
16 files changed, 328 insertions, 122 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 8301837..21d02b0 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -46,6 +46,7 @@
#include "amd_iommu_proto.h"
#include "amd_iommu_types.h"
#include "irq_remapping.h"
+#include "pci.h"
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
@@ -263,12 +264,6 @@ static bool check_device(struct device *dev)
return true;
}
-static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
-{
- pci_dev_put(*from);
- *from = to;
-}
-
static struct pci_bus *find_hosted_bus(struct pci_bus *bus)
{
while (!bus->self) {
@@ -701,9 +696,6 @@ retry:
static void iommu_poll_events(struct amd_iommu *iommu)
{
u32 head, tail;
- unsigned long flags;
-
- spin_lock_irqsave(&iommu->lock, flags);
head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
@@ -714,8 +706,6 @@ static void iommu_poll_events(struct amd_iommu *iommu)
}
writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
}
static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
@@ -740,17 +730,11 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
static void iommu_poll_ppr_log(struct amd_iommu *iommu)
{
- unsigned long flags;
u32 head, tail;
if (iommu->ppr_log == NULL)
return;
- /* enable ppr interrupts again */
- writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
-
- spin_lock_irqsave(&iommu->lock, flags);
-
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
@@ -786,34 +770,50 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
- /*
- * Release iommu->lock because ppr-handling might need to
- * re-acquire it
- */
- spin_unlock_irqrestore(&iommu->lock, flags);
-
/* Handle PPR entry */
iommu_handle_ppr_entry(iommu, entry);
- spin_lock_irqsave(&iommu->lock, flags);
-
/* Refresh ring-buffer information */
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
}
-
- spin_unlock_irqrestore(&iommu->lock, flags);
}
irqreturn_t amd_iommu_int_thread(int irq, void *data)
{
- struct amd_iommu *iommu;
+ struct amd_iommu *iommu = (struct amd_iommu *) data;
+ u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
- for_each_iommu(iommu) {
- iommu_poll_events(iommu);
- iommu_poll_ppr_log(iommu);
- }
+ while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) {
+ /* Enable EVT and PPR interrupts again */
+ writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK),
+ iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (status & MMIO_STATUS_EVT_INT_MASK) {
+ pr_devel("AMD-Vi: Processing IOMMU Event Log\n");
+ iommu_poll_events(iommu);
+ }
+
+ if (status & MMIO_STATUS_PPR_INT_MASK) {
+ pr_devel("AMD-Vi: Processing IOMMU PPR Log\n");
+ iommu_poll_ppr_log(iommu);
+ }
+
+ /*
+ * Hardware bug: ERBT1312
+ * When re-enabling interrupt (by writing 1
+ * to clear the bit), the hardware might also try to set
+ * the interrupt bit in the event status register.
+ * In this scenario, the bit will be set, and disable
+ * subsequent interrupts.
+ *
+ * Workaround: The IOMMU driver should read back the
+ * status register and check if the interrupt bits are cleared.
+ * If not, driver will need to go through the interrupt handler
+ * again and re-clear the bits
+ */
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ }
return IRQ_HANDLED;
}
@@ -2839,24 +2839,6 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
}
/*
- * This is a special map_sg function which is used if we should map a
- * device which is not handled by an AMD IOMMU in the system.
- */
-static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
- int nelems, int dir)
-{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sglist, s, nelems, i) {
- s->dma_address = (dma_addr_t)sg_phys(s);
- s->dma_length = s->length;
- }
-
- return nelems;
-}
-
-/*
* The exported map_sg function for dma_ops (handles scatter-gather
* lists).
*/
@@ -2875,9 +2857,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
INC_STATS_COUNTER(cnt_map_sg);
domain = get_domain(dev);
- if (PTR_ERR(domain) == -EINVAL)
- return map_sg_no_iommu(dev, sglist, nelems, dir);
- else if (IS_ERR(domain))
+ if (IS_ERR(domain))
return 0;
dma_mask = *dev->dma_mask;
@@ -3410,7 +3390,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
}
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
- unsigned long iova)
+ dma_addr_t iova)
{
struct protection_domain *domain = dom->priv;
unsigned long offset_mask;
@@ -3947,6 +3927,9 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
if (!table)
goto out;
+ /* Initialize table spin-lock */
+ spin_lock_init(&table->lock);
+
if (ioapic)
/* Keep the first 32 indexes free for IOAPIC interrupts */
table->min_index = 32;
@@ -4007,7 +3990,7 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
c = 0;
if (c == count) {
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
for (; c != 0; --c)
table->table[index - c + 1] = IRTE_ALLOCATED;
@@ -4015,9 +3998,9 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
index -= count - 1;
cfg->remapped = 1;
- irte_info = &cfg->irq_2_iommu;
- irte_info->sub_handle = devid;
- irte_info->irte_index = index;
+ irte_info = &cfg->irq_2_irte;
+ irte_info->devid = devid;
+ irte_info->index = index;
goto out;
}
@@ -4098,7 +4081,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
struct io_apic_irq_attr *attr)
{
struct irq_remap_table *table;
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
struct irq_cfg *cfg;
union irte irte;
int ioapic_id;
@@ -4110,7 +4093,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
if (!cfg)
return -EINVAL;
- irte_info = &cfg->irq_2_iommu;
+ irte_info = &cfg->irq_2_irte;
ioapic_id = mpc_ioapic_id(attr->ioapic);
devid = get_ioapic_devid(ioapic_id);
@@ -4125,8 +4108,8 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
/* Setup IRQ remapping info */
cfg->remapped = 1;
- irte_info->sub_handle = devid;
- irte_info->irte_index = index;
+ irte_info->devid = devid;
+ irte_info->index = index;
/* Setup IRTE for IOMMU */
irte.val = 0;
@@ -4160,7 +4143,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
static int set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
unsigned int dest, irq;
struct irq_cfg *cfg;
union irte irte;
@@ -4171,12 +4154,12 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask,
cfg = data->chip_data;
irq = data->irq;
- irte_info = &cfg->irq_2_iommu;
+ irte_info = &cfg->irq_2_irte;
if (!cpumask_intersects(mask, cpu_online_mask))
return -EINVAL;
- if (get_irte(irte_info->sub_handle, irte_info->irte_index, &irte))
+ if (get_irte(irte_info->devid, irte_info->index, &irte))
return -EBUSY;
if (assign_irq_vector(irq, cfg, mask))
@@ -4192,7 +4175,7 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask,
irte.fields.vector = cfg->vector;
irte.fields.destination = dest;
- modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);
+ modify_irte(irte_info->devid, irte_info->index, irte);
if (cfg->move_in_progress)
send_cleanup_vector(cfg);
@@ -4204,16 +4187,16 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask,
static int free_irq(int irq)
{
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
struct irq_cfg *cfg;
cfg = irq_get_chip_data(irq);
if (!cfg)
return -EINVAL;
- irte_info = &cfg->irq_2_iommu;
+ irte_info = &cfg->irq_2_irte;
- free_irte(irte_info->sub_handle, irte_info->irte_index);
+ free_irte(irte_info->devid, irte_info->index);
return 0;
}
@@ -4222,7 +4205,7 @@ static void compose_msi_msg(struct pci_dev *pdev,
unsigned int irq, unsigned int dest,
struct msi_msg *msg, u8 hpet_id)
{
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
struct irq_cfg *cfg;
union irte irte;
@@ -4230,7 +4213,7 @@ static void compose_msi_msg(struct pci_dev *pdev,
if (!cfg)
return;
- irte_info = &cfg->irq_2_iommu;
+ irte_info = &cfg->irq_2_irte;
irte.val = 0;
irte.fields.vector = cfg->vector;
@@ -4239,11 +4222,11 @@ static void compose_msi_msg(struct pci_dev *pdev,
irte.fields.dm = apic->irq_dest_mode;
irte.fields.valid = 1;
- modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);
+ modify_irte(irte_info->devid, irte_info->index, irte);
msg->address_hi = MSI_ADDR_BASE_HI;
msg->address_lo = MSI_ADDR_BASE_LO;
- msg->data = irte_info->irte_index;
+ msg->data = irte_info->index;
}
static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
@@ -4268,7 +4251,7 @@ static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
int index, int offset)
{
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
struct irq_cfg *cfg;
u16 devid;
@@ -4283,18 +4266,18 @@ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
return 0;
devid = get_device_id(&pdev->dev);
- irte_info = &cfg->irq_2_iommu;
+ irte_info = &cfg->irq_2_irte;
cfg->remapped = 1;
- irte_info->sub_handle = devid;
- irte_info->irte_index = index + offset;
+ irte_info->devid = devid;
+ irte_info->index = index + offset;
return 0;
}
static int setup_hpet_msi(unsigned int irq, unsigned int id)
{
- struct irq_2_iommu *irte_info;
+ struct irq_2_irte *irte_info;
struct irq_cfg *cfg;
int index, devid;
@@ -4302,7 +4285,7 @@ static int setup_hpet_msi(unsigned int irq, unsigned int id)
if (!cfg)
return -EINVAL;
- irte_info = &cfg->irq_2_iommu;
+ irte_info = &cfg->irq_2_irte;
devid = get_hpet_devid(id);
if (devid < 0)
return devid;
@@ -4312,8 +4295,8 @@ static int setup_hpet_msi(unsigned int irq, unsigned int id)
return index;
cfg->remapped = 1;
- irte_info->sub_handle = devid;
- irte_info->irte_index = index;
+ irte_info->devid = devid;
+ irte_info->index = index;
return 0;
}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 2f46881..bf51abb 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -213,6 +213,14 @@ enum iommu_init_state {
IOMMU_INIT_ERROR,
};
+/* Early ioapic and hpet maps from kernel command line */
+#define EARLY_MAP_SIZE 4
+static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
+static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
+static int __initdata early_ioapic_map_size;
+static int __initdata early_hpet_map_size;
+static bool __initdata cmdline_maps;
+
static enum iommu_init_state init_state = IOMMU_START_STATE;
static int amd_iommu_enable_interrupts(void);
@@ -703,31 +711,66 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
set_iommu_for_device(iommu, devid);
}
-static int add_special_device(u8 type, u8 id, u16 devid)
+static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line)
{
struct devid_map *entry;
struct list_head *list;
- if (type != IVHD_SPECIAL_IOAPIC && type != IVHD_SPECIAL_HPET)
+ if (type == IVHD_SPECIAL_IOAPIC)
+ list = &ioapic_map;
+ else if (type == IVHD_SPECIAL_HPET)
+ list = &hpet_map;
+ else
return -EINVAL;
+ list_for_each_entry(entry, list, list) {
+ if (!(entry->id == id && entry->cmd_line))
+ continue;
+
+ pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n",
+ type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
+
+ return 0;
+ }
+
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
- entry->id = id;
- entry->devid = devid;
-
- if (type == IVHD_SPECIAL_IOAPIC)
- list = &ioapic_map;
- else
- list = &hpet_map;
+ entry->id = id;
+ entry->devid = devid;
+ entry->cmd_line = cmd_line;
list_add_tail(&entry->list, list);
return 0;
}
+static int __init add_early_maps(void)
+{
+ int i, ret;
+
+ for (i = 0; i < early_ioapic_map_size; ++i) {
+ ret = add_special_device(IVHD_SPECIAL_IOAPIC,
+ early_ioapic_map[i].id,
+ early_ioapic_map[i].devid,
+ early_ioapic_map[i].cmd_line);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < early_hpet_map_size; ++i) {
+ ret = add_special_device(IVHD_SPECIAL_HPET,
+ early_hpet_map[i].id,
+ early_hpet_map[i].devid,
+ early_hpet_map[i].cmd_line);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* Reads the device exclusion range from ACPI and initializes the IOMMU with
* it
@@ -764,6 +807,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
u32 dev_i, ext_flags = 0;
bool alias = false;
struct ivhd_entry *e;
+ int ret;
+
+
+ ret = add_early_maps();
+ if (ret)
+ return ret;
/*
* First save the recommended feature enable bits from ACPI
@@ -929,7 +978,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
PCI_FUNC(devid));
set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
- ret = add_special_device(type, handle, devid);
+ ret = add_special_device(type, handle, devid, false);
if (ret)
return ret;
break;
@@ -1275,7 +1324,7 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
amd_iommu_int_handler,
amd_iommu_int_thread,
0, "AMD-Vi",
- iommu->dev);
+ iommu);
if (r) {
pci_disable_msi(iommu->dev);
@@ -1638,18 +1687,28 @@ static void __init free_on_init_error(void)
static bool __init check_ioapic_information(void)
{
+ const char *fw_bug = FW_BUG;
bool ret, has_sb_ioapic;
int idx;
has_sb_ioapic = false;
ret = false;
+ /*
+ * If we have map overrides on the kernel command line the
+ * messages in this function might not describe firmware bugs
+ * anymore - so be careful
+ */
+ if (cmdline_maps)
+ fw_bug = "";
+
for (idx = 0; idx < nr_ioapics; idx++) {
int devid, id = mpc_ioapic_id(idx);
devid = get_ioapic_devid(id);
if (devid < 0) {
- pr_err(FW_BUG "AMD-Vi: IOAPIC[%d] not in IVRS table\n", id);
+ pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n",
+ fw_bug, id);
ret = false;
} else if (devid == IOAPIC_SB_DEVID) {
has_sb_ioapic = true;
@@ -1666,11 +1725,11 @@ static bool __init check_ioapic_information(void)
* when the BIOS is buggy and provides us the wrong
* device id for the IOAPIC in the system.
*/
- pr_err(FW_BUG "AMD-Vi: No southbridge IOAPIC found in IVRS table\n");
+ pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug);
}
if (!ret)
- pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug(s)\n");
+ pr_err("AMD-Vi: Disabling interrupt remapping\n");
return ret;
}
@@ -1801,6 +1860,7 @@ static int __init early_amd_iommu_init(void)
* Interrupt remapping enabled, create kmem_cache for the
* remapping tables.
*/
+ ret = -ENOMEM;
amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
MAX_IRQS_PER_TABLE * sizeof(u32),
IRQ_TABLE_ALIGNMENT,
@@ -2097,8 +2157,70 @@ static int __init parse_amd_iommu_options(char *str)
return 1;
}
-__setup("amd_iommu_dump", parse_amd_iommu_dump);
-__setup("amd_iommu=", parse_amd_iommu_options);
+static int __init parse_ivrs_ioapic(char *str)
+{
+ unsigned int bus, dev, fn;
+ int ret, id, i;
+ u16 devid;
+
+ ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
+
+ if (ret != 4) {
+ pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str);
+ return 1;
+ }
+
+ if (early_ioapic_map_size == EARLY_MAP_SIZE) {
+ pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
+ str);
+ return 1;
+ }
+
+ devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+
+ cmdline_maps = true;
+ i = early_ioapic_map_size++;
+ early_ioapic_map[i].id = id;
+ early_ioapic_map[i].devid = devid;
+ early_ioapic_map[i].cmd_line = true;
+
+ return 1;
+}
+
+static int __init parse_ivrs_hpet(char *str)
+{
+ unsigned int bus, dev, fn;
+ int ret, id, i;
+ u16 devid;
+
+ ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
+
+ if (ret != 4) {
+ pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str);
+ return 1;
+ }
+
+ if (early_hpet_map_size == EARLY_MAP_SIZE) {
+ pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n",
+ str);
+ return 1;
+ }
+
+ devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+
+ cmdline_maps = true;
+ i = early_hpet_map_size++;
+ early_hpet_map[i].id = id;
+ early_hpet_map[i].devid = devid;
+ early_hpet_map[i].cmd_line = true;
+
+ return 1;
+}
+
+__setup("amd_iommu_dump", parse_amd_iommu_dump);
+__setup("amd_iommu=", parse_amd_iommu_options);
+__setup("ivrs_ioapic", parse_ivrs_ioapic);
+__setup("ivrs_hpet", parse_ivrs_hpet);
IOMMU_INIT_FINISH(amd_iommu_detect,
gart_iommu_hole_init,
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index ec36cf6..0285a21 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -100,6 +100,7 @@
#define PASID_MASK 0x000fffff
/* MMIO status bits */
+#define MMIO_STATUS_EVT_INT_MASK (1 << 1)
#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2)
#define MMIO_STATUS_PPR_INT_MASK (1 << 6)
@@ -589,6 +590,7 @@ struct devid_map {
struct list_head list;
u8 id;
u16 devid;
+ bool cmd_line;
};
/* Map HPET and IOAPIC ids to the devid used by the IOMMU */
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index b8008f6..a7967ce 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -646,7 +646,7 @@ out:
int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
- u32 ver;
+ u32 ver, sts;
static int iommu_allocated = 0;
int agaw = 0;
int msagaw = 0;
@@ -696,6 +696,15 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
(unsigned long long)iommu->cap,
(unsigned long long)iommu->ecap);
+ /* Reflect status in gcmd */
+ sts = readl(iommu->reg + DMAR_GSTS_REG);
+ if (sts & DMA_GSTS_IRES)
+ iommu->gcmd |= DMA_GCMD_IRE;
+ if (sts & DMA_GSTS_TES)
+ iommu->gcmd |= DMA_GCMD_TE;
+ if (sts & DMA_GSTS_QIES)
+ iommu->gcmd |= DMA_GCMD_QIE;
+
raw_spin_lock_init(&iommu->register_lock);
drhd->iommu = iommu;
@@ -1205,7 +1214,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
/* TBD: ignore advanced fault log currently */
if (!(fault_status & DMA_FSTS_PPF))
- goto clear_rest;
+ goto unlock_exit;
fault_index = dma_fsts_fault_record_index(fault_status);
reg = cap_fault_reg_offset(iommu->cap);
@@ -1246,11 +1255,10 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
fault_index = 0;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
}
-clear_rest:
- /* clear all the other faults */
- fault_status = readl(iommu->reg + DMAR_FSTS_REG);
- writel(fault_status, iommu->reg + DMAR_FSTS_REG);
+ writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
+
+unlock_exit:
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
return IRQ_HANDLED;
}
@@ -1298,6 +1306,7 @@ int __init enable_drhd_fault_handling(void)
for_each_drhd_unit(drhd) {
int ret;
struct intel_iommu *iommu = drhd->iommu;
+ u32 fault_status;
ret = dmar_set_interrupt(iommu);
if (ret) {
@@ -1310,6 +1319,8 @@ int __init enable_drhd_fault_handling(void)
* Clear any previous faults.
*/
dmar_fault(iommu->irq, iommu);
+ fault_status = readl(iommu->reg + DMAR_FSTS_REG);
+ writel(fault_status, iommu->reg + DMAR_FSTS_REG);
}
return 0;
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 238a3ca..3f32d64 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1027,7 +1027,7 @@ done:
}
static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
- unsigned long iova)
+ dma_addr_t iova)
{
struct exynos_iommu_domain *priv = domain->priv;
unsigned long *entry;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0099667..b4f0e28 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -47,6 +47,7 @@
#include <asm/iommu.h>
#include "irq_remapping.h"
+#include "pci.h"
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
@@ -3665,6 +3666,7 @@ static struct notifier_block device_nb = {
int __init intel_iommu_init(void)
{
int ret = 0;
+ struct dmar_drhd_unit *drhd;
/* VT-d is required for a TXT/tboot launch, so enforce that */
force_on = tboot_force_iommu();
@@ -3675,6 +3677,20 @@ int __init intel_iommu_init(void)
return -ENODEV;
}
+ /*
+ * Disable translation if already enabled prior to OS handover.
+ */
+ for_each_drhd_unit(drhd) {
+ struct intel_iommu *iommu;
+
+ if (drhd->ignored)
+ continue;
+
+ iommu = drhd->iommu;
+ if (iommu->gcmd & DMA_GCMD_TE)
+ iommu_disable_translation(iommu);
+ }
+
if (dmar_dev_scope_init() < 0) {
if (force_on)
panic("tboot: Failed to initialize DMAR device scope\n");
@@ -4111,7 +4127,7 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
- unsigned long iova)
+ dma_addr_t iova)
{
struct dmar_domain *dmar_domain = domain->priv;
struct dma_pte *pte;
@@ -4137,12 +4153,6 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
return 0;
}
-static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
-{
- pci_dev_put(*from);
- *from = to;
-}
-
#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
static int intel_iommu_add_device(struct device *dev)
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index f3b8f23..5b19b2d 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -524,6 +524,16 @@ static int __init intel_irq_remapping_supported(void)
if (disable_irq_remap)
return 0;
+ if (irq_remap_broken) {
+ WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
+ "This system BIOS has enabled interrupt remapping\n"
+ "on a chipset that contains an erratum making that\n"
+ "feature unstable. To maintain system stability\n"
+ "interrupt remapping is being disabled. Please\n"
+ "contact your BIOS vendor for an update\n");
+ disable_irq_remap = 1;
+ return 0;
+ }
if (!dmar_ir_support())
return 0;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index b972d43..d8f98b1 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -204,6 +204,35 @@ again:
}
EXPORT_SYMBOL_GPL(iommu_group_alloc);
+struct iommu_group *iommu_group_get_by_id(int id)
+{
+ struct kobject *group_kobj;
+ struct iommu_group *group;
+ const char *name;
+
+ if (!iommu_group_kset)
+ return NULL;
+
+ name = kasprintf(GFP_KERNEL, "%d", id);
+ if (!name)
+ return NULL;
+
+ group_kobj = kset_find_obj(iommu_group_kset, name);
+ kfree(name);
+
+ if (!group_kobj)
+ return NULL;
+
+ group = container_of(group_kobj, struct iommu_group, kobj);
+ BUG_ON(group->id != id);
+
+ kobject_get(group->devices_kobj);
+ kobject_put(&group->kobj);
+
+ return group;
+}
+EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
+
/**
* iommu_group_get_iommudata - retrieve iommu_data registered for a group
* @group: the group
@@ -706,8 +735,7 @@ void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
}
EXPORT_SYMBOL_GPL(iommu_detach_group);
-phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
- unsigned long iova)
+phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
{
if (unlikely(domain->ops->iova_to_phys == NULL))
return 0;
@@ -854,12 +882,13 @@ EXPORT_SYMBOL_GPL(iommu_unmap);
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
- phys_addr_t paddr, u64 size)
+ phys_addr_t paddr, u64 size, int prot)
{
if (unlikely(domain->ops->domain_window_enable == NULL))
return -ENODEV;
- return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size);
+ return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
+ prot);
}
EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 7c11ff3..dcfea4e 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -18,6 +18,7 @@
int irq_remapping_enabled;
int disable_irq_remap;
+int irq_remap_broken;
int disable_sourceid_checking;
int no_x2apic_optout;
@@ -210,6 +211,11 @@ void __init setup_irq_remapping_ops(void)
#endif
}
+void set_irq_remapping_broken(void)
+{
+ irq_remap_broken = 1;
+}
+
int irq_remapping_supported(void)
{
if (disable_irq_remap)
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index ecb6376..90c4dae 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -32,6 +32,7 @@ struct pci_dev;
struct msi_msg;
extern int disable_irq_remap;
+extern int irq_remap_broken;
extern int disable_sourceid_checking;
extern int no_x2apic_optout;
extern int irq_remapping_enabled;
@@ -89,6 +90,7 @@ extern struct irq_remap_ops amd_iommu_irq_ops;
#define irq_remapping_enabled 0
#define disable_irq_remap 1
+#define irq_remap_broken 0
#endif /* CONFIG_IRQ_REMAP */
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 6a8870a..8ab4f41 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -554,7 +554,7 @@ fail:
}
static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
- unsigned long va)
+ dma_addr_t va)
{
struct msm_priv *priv;
struct msm_iommu_drvdata *iommu_drvdata;
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 6ac02fa..e02e5d7 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1219,7 +1219,7 @@ static void omap_iommu_domain_destroy(struct iommu_domain *domain)
}
static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
- unsigned long da)
+ dma_addr_t da)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu *oiommu = omap_domain->iommu_dev;
diff --git a/drivers/iommu/pci.h b/drivers/iommu/pci.h
new file mode 100644
index 0000000..352d80a
--- /dev/null
+++ b/drivers/iommu/pci.h
@@ -0,0 +1,29 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) 2013 Red Hat, Inc.
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ */
+#ifndef __IOMMU_PCI_H
+#define __IOMMU_PCI_H
+
+/* Helper function for swapping pci device reference */
+static inline void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
+{
+ pci_dev_put(*from);
+ *from = to;
+}
+
+#endif /* __IOMMU_PCI_H */
diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c
index b6e8b57..d572863 100644
--- a/drivers/iommu/shmobile-iommu.c
+++ b/drivers/iommu/shmobile-iommu.c
@@ -296,7 +296,7 @@ done:
}
static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain,
- unsigned long iova)
+ dma_addr_t iova)
{
struct shmobile_iommu_domain *sh_domain = domain->priv;
uint32_t l1entry = 0, l2entry = 0;
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 8643757..108c0e9 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -279,7 +279,7 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
}
static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
- unsigned long iova)
+ dma_addr_t iova)
{
struct gart_device *gart = domain->priv;
unsigned long pte;
@@ -295,7 +295,8 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
pa = (pte & GART_PAGE_MASK);
if (!pfn_valid(__phys_to_pfn(pa))) {
- dev_err(gart->dev, "No entry for %08lx:%08x\n", iova, pa);
+ dev_err(gart->dev, "No entry for %08llx:%08x\n",
+ (unsigned long long)iova, pa);
gart_dump_table(gart);
return -EINVAL;
}
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index b34e5fd..f6f120e 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -757,7 +757,7 @@ static size_t smmu_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
}
static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain,
- unsigned long iova)
+ dma_addr_t iova)
{
struct smmu_as *as = domain->priv;
unsigned long *pte;
@@ -772,7 +772,8 @@ static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain,
pfn = *pte & SMMU_PFN_MASK;
WARN_ON(!pfn_valid(pfn));
dev_dbg(as->smmu->dev,
- "iova:%08lx pfn:%08lx asid:%d\n", iova, pfn, as->asid);
+ "iova:%08llx pfn:%08lx asid:%d\n", (unsigned long long)iova,
+ pfn, as->asid);
spin_unlock_irqrestore(&as->lock, flags);
return PFN_PHYS(pfn);
OpenPOWER on IntegriCloud