summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/fault.c8
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/mm/iomap_32.c11
-rw-r--r--arch/x86/mm/ioremap.c19
-rw-r--r--arch/x86/mm/kmmio.c164
-rw-r--r--arch/x86/mm/numa_64.c2
-rw-r--r--arch/x86/mm/pageattr.c39
-rw-r--r--arch/x86/mm/pat.c85
-rw-r--r--arch/x86/mm/testmmiotrace.c70
9 files changed, 257 insertions, 143 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 90dfae5..c76ef1d 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -603,8 +603,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
si_code = SEGV_MAPERR;
- if (notify_page_fault(regs))
- return;
if (unlikely(kmmio_fault(regs, address)))
return;
@@ -634,6 +632,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (spurious_fault(address, error_code))
return;
+ /* kprobes don't want to hook the spurious faults. */
+ if (notify_page_fault(regs))
+ return;
/*
* Don't take the mm semaphore here. If we fixup a prefetch
* fault we could otherwise deadlock.
@@ -641,6 +642,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
goto bad_area_nosemaphore;
}
+ /* kprobes don't want to hook the spurious faults. */
+ if (notify_page_fault(regs))
+ return;
/*
* It's safe to allow irq's after cr2 has been saved and the
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index e6d36b4..b135225 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -714,6 +714,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
pos = start_pfn << PAGE_SHIFT;
end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
<< (PMD_SHIFT - PAGE_SHIFT);
+ if (end_pfn > (end >> PAGE_SHIFT))
+ end_pfn = end >> PAGE_SHIFT;
if (start_pfn < end_pfn) {
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
pos = end_pfn << PAGE_SHIFT;
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index ca53224..04102d4 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -20,6 +20,17 @@
#include <asm/pat.h>
#include <linux/module.h>
+int is_io_mapping_possible(resource_size_t base, unsigned long size)
+{
+#ifndef CONFIG_X86_PAE
+ /* There is no way to map greater than 1 << 32 address without PAE */
+ if (base + size > 0x100000000ULL)
+ return 0;
+#endif
+ return 1;
+}
+EXPORT_SYMBOL_GPL(is_io_mapping_possible);
+
/* Map 'pfn' using fixed map 'type' and protections 'prot'
*/
void *
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index af750ab..f45d5e2 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -134,25 +134,6 @@ int page_is_ram(unsigned long pagenr)
return 0;
}
-int pagerange_is_ram(unsigned long start, unsigned long end)
-{
- int ram_page = 0, not_rampage = 0;
- unsigned long page_nr;
-
- for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
- ++page_nr) {
- if (page_is_ram(page_nr))
- ram_page = 1;
- else
- not_rampage = 1;
-
- if (ram_page == not_rampage)
- return -1;
- }
-
- return ram_page;
-}
-
/*
* Fix up the linear direct mapping of the kernel to avoid cache attribute
* conflicts.
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 93d82038..6a518dd 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -32,11 +32,14 @@ struct kmmio_fault_page {
struct list_head list;
struct kmmio_fault_page *release_next;
unsigned long page; /* location of the fault page */
+ bool old_presence; /* page presence prior to arming */
+ bool armed;
/*
* Number of times this page has been registered as a part
* of a probe. If zero, page is disarmed and this may be freed.
- * Used only by writers (RCU).
+ * Used only by writers (RCU) and post_kmmio_handler().
+ * Protected by kmmio_lock, when linked into kmmio_page_table.
*/
int count;
};
@@ -105,57 +108,85 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
return NULL;
}
-static void set_page_present(unsigned long addr, bool present,
- unsigned int *pglevel)
+static void set_pmd_presence(pmd_t *pmd, bool present, bool *old)
+{
+ pmdval_t v = pmd_val(*pmd);
+ *old = !!(v & _PAGE_PRESENT);
+ v &= ~_PAGE_PRESENT;
+ if (present)
+ v |= _PAGE_PRESENT;
+ set_pmd(pmd, __pmd(v));
+}
+
+static void set_pte_presence(pte_t *pte, bool present, bool *old)
+{
+ pteval_t v = pte_val(*pte);
+ *old = !!(v & _PAGE_PRESENT);
+ v &= ~_PAGE_PRESENT;
+ if (present)
+ v |= _PAGE_PRESENT;
+ set_pte_atomic(pte, __pte(v));
+}
+
+static int set_page_presence(unsigned long addr, bool present, bool *old)
{
- pteval_t pteval;
- pmdval_t pmdval;
unsigned int level;
- pmd_t *pmd;
pte_t *pte = lookup_address(addr, &level);
if (!pte) {
pr_err("kmmio: no pte for page 0x%08lx\n", addr);
- return;
+ return -1;
}
- if (pglevel)
- *pglevel = level;
-
switch (level) {
case PG_LEVEL_2M:
- pmd = (pmd_t *)pte;
- pmdval = pmd_val(*pmd) & ~_PAGE_PRESENT;
- if (present)
- pmdval |= _PAGE_PRESENT;
- set_pmd(pmd, __pmd(pmdval));
+ set_pmd_presence((pmd_t *)pte, present, old);
break;
-
case PG_LEVEL_4K:
- pteval = pte_val(*pte) & ~_PAGE_PRESENT;
- if (present)
- pteval |= _PAGE_PRESENT;
- set_pte_atomic(pte, __pte(pteval));
+ set_pte_presence(pte, present, old);
break;
-
default:
pr_err("kmmio: unexpected page level 0x%x.\n", level);
- return;
+ return -1;
}
__flush_tlb_one(addr);
+ return 0;
}
-/** Mark the given page as not present. Access to it will trigger a fault. */
-static void arm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)
+/*
+ * Mark the given page as not present. Access to it will trigger a fault.
+ *
+ * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the
+ * protection is ignored here. RCU read lock is assumed held, so the struct
+ * will not disappear unexpectedly. Furthermore, the caller must guarantee,
+ * that double arming the same virtual address (page) cannot occur.
+ *
+ * Double disarming on the other hand is allowed, and may occur when a fault
+ * and mmiotrace shutdown happen simultaneously.
+ */
+static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
{
- set_page_present(page & PAGE_MASK, false, pglevel);
+ int ret;
+ WARN_ONCE(f->armed, KERN_ERR "kmmio page already armed.\n");
+ if (f->armed) {
+ pr_warning("kmmio double-arm: page 0x%08lx, ref %d, old %d\n",
+ f->page, f->count, f->old_presence);
+ }
+ ret = set_page_presence(f->page, false, &f->old_presence);
+ WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", f->page);
+ f->armed = true;
+ return ret;
}
-/** Mark the given page as present. */
-static void disarm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)
+/** Restore the given page to saved presence state. */
+static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
{
- set_page_present(page & PAGE_MASK, true, pglevel);
+ bool tmp;
+ int ret = set_page_presence(f->page, f->old_presence, &tmp);
+ WARN_ONCE(ret < 0,
+ KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
+ f->armed = false;
}
/*
@@ -202,28 +233,32 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
ctx = &get_cpu_var(kmmio_ctx);
if (ctx->active) {
- disarm_kmmio_fault_page(faultpage->page, NULL);
if (addr == ctx->addr) {
/*
- * On SMP we sometimes get recursive probe hits on the
- * same address. Context is already saved, fall out.
+ * A second fault on the same page means some other
+ * condition needs handling by do_page_fault(), the
+ * page really not being present is the most common.
*/
- pr_debug("kmmio: duplicate probe hit on CPU %d, for "
- "address 0x%08lx.\n",
- smp_processor_id(), addr);
- ret = 1;
- goto no_kmmio_ctx;
- }
- /*
- * Prevent overwriting already in-flight context.
- * This should not happen, let's hope disarming at least
- * prevents a panic.
- */
- pr_emerg("kmmio: recursive probe hit on CPU %d, "
+ pr_debug("kmmio: secondary hit for 0x%08lx CPU %d.\n",
+ addr, smp_processor_id());
+
+ if (!faultpage->old_presence)
+ pr_info("kmmio: unexpected secondary hit for "
+ "address 0x%08lx on CPU %d.\n", addr,
+ smp_processor_id());
+ } else {
+ /*
+ * Prevent overwriting already in-flight context.
+ * This should not happen, let's hope disarming at
+ * least prevents a panic.
+ */
+ pr_emerg("kmmio: recursive probe hit on CPU %d, "
"for address 0x%08lx. Ignoring.\n",
smp_processor_id(), addr);
- pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
- ctx->addr);
+ pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
+ ctx->addr);
+ disarm_kmmio_fault_page(faultpage);
+ }
goto no_kmmio_ctx;
}
ctx->active++;
@@ -244,7 +279,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
regs->flags &= ~X86_EFLAGS_IF;
/* Now we set present bit in PTE and single step. */
- disarm_kmmio_fault_page(ctx->fpage->page, NULL);
+ disarm_kmmio_fault_page(ctx->fpage);
/*
* If another cpu accesses the same page while we are stepping,
@@ -275,7 +310,7 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
if (!ctx->active) {
- pr_debug("kmmio: spurious debug trap on CPU %d.\n",
+ pr_warning("kmmio: spurious debug trap on CPU %d.\n",
smp_processor_id());
goto out;
}
@@ -283,7 +318,11 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
if (ctx->probe && ctx->probe->post_handler)
ctx->probe->post_handler(ctx->probe, condition, regs);
- arm_kmmio_fault_page(ctx->fpage->page, NULL);
+ /* Prevent racing against release_kmmio_fault_page(). */
+ spin_lock(&kmmio_lock);
+ if (ctx->fpage->count)
+ arm_kmmio_fault_page(ctx->fpage);
+ spin_unlock(&kmmio_lock);
regs->flags &= ~X86_EFLAGS_TF;
regs->flags |= ctx->saved_flags;
@@ -315,20 +354,24 @@ static int add_kmmio_fault_page(unsigned long page)
f = get_kmmio_fault_page(page);
if (f) {
if (!f->count)
- arm_kmmio_fault_page(f->page, NULL);
+ arm_kmmio_fault_page(f);
f->count++;
return 0;
}
- f = kmalloc(sizeof(*f), GFP_ATOMIC);
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
if (!f)
return -1;
f->count = 1;
f->page = page;
- list_add_rcu(&f->list, kmmio_page_list(f->page));
- arm_kmmio_fault_page(f->page, NULL);
+ if (arm_kmmio_fault_page(f)) {
+ kfree(f);
+ return -1;
+ }
+
+ list_add_rcu(&f->list, kmmio_page_list(f->page));
return 0;
}
@@ -347,7 +390,7 @@ static void release_kmmio_fault_page(unsigned long page,
f->count--;
BUG_ON(f->count < 0);
if (!f->count) {
- disarm_kmmio_fault_page(f->page, NULL);
+ disarm_kmmio_fault_page(f);
f->release_next = *release_list;
*release_list = f;
}
@@ -408,23 +451,24 @@ static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
static void remove_kmmio_fault_pages(struct rcu_head *head)
{
- struct kmmio_delayed_release *dr = container_of(
- head,
- struct kmmio_delayed_release,
- rcu);
+ struct kmmio_delayed_release *dr =
+ container_of(head, struct kmmio_delayed_release, rcu);
struct kmmio_fault_page *p = dr->release_list;
struct kmmio_fault_page **prevp = &dr->release_list;
unsigned long flags;
+
spin_lock_irqsave(&kmmio_lock, flags);
while (p) {
- if (!p->count)
+ if (!p->count) {
list_del_rcu(&p->list);
- else
+ prevp = &p->release_next;
+ } else {
*prevp = p->release_next;
- prevp = &p->release_next;
+ }
p = p->release_next;
}
spin_unlock_irqrestore(&kmmio_lock, flags);
+
/* This is the real RCU destroy call. */
call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
}
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 71a14f8..f3516da 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -145,7 +145,7 @@ int __init compute_hash_shift(struct bootnode *nodes, int numnodes,
return shift;
}
-int early_pfn_to_nid(unsigned long pfn)
+int __meminit __early_pfn_to_nid(unsigned long pfn)
{
return phys_to_nid(pfn << PAGE_SHIFT);
}
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 84ba748..7233bd7 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -508,18 +508,24 @@ static int split_large_page(pte_t *kpte, unsigned long address)
#endif
/*
- * Install the new, split up pagetable. Important details here:
+ * Install the new, split up pagetable.
*
- * On Intel the NX bit of all levels must be cleared to make a
- * page executable. See section 4.13.2 of Intel 64 and IA-32
- * Architectures Software Developer's Manual).
+ * We use the standard kernel pagetable protections for the new
+ * pagetable protections, the actual ptes set above control the
+ * primary protection behavior:
+ */
+ __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
+
+ /*
+ * Intel Atom errata AAH41 workaround.
*
- * Mark the entry present. The current mapping might be
- * set to not present, which we preserved above.
+ * The real fix should be in hw or in a microcode update, but
+ * we also probabilistically try to reduce the window of having
+ * a large TLB mixed with 4K TLBs while instruction fetches are
+ * going on.
*/
- ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
- pgprot_val(ref_prot) |= _PAGE_PRESENT;
- __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
+ __flush_tlb_all();
+
base = NULL;
out_unlock:
@@ -575,7 +581,6 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
address = cpa->vaddr[cpa->curpage];
else
address = *cpa->vaddr;
-
repeat:
kpte = lookup_address(address, &level);
if (!kpte)
@@ -812,6 +817,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
vm_unmap_aliases();
+ /*
+ * If we're called with lazy mmu updates enabled, the
+ * in-memory pte state may be stale. Flush pending updates to
+ * bring them up to date.
+ */
+ arch_flush_lazy_mmu_mode();
+
cpa.vaddr = addr;
cpa.numpages = numpages;
cpa.mask_set = mask_set;
@@ -854,6 +866,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
} else
cpa_flush_all(cache);
+ /*
+ * If we've been called with lazy mmu updates enabled, then
+ * make sure that everything gets flushed out before we
+ * return.
+ */
+ arch_flush_lazy_mmu_mode();
+
out:
return ret;
}
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 7b61036..e0ab173 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -11,6 +11,7 @@
#include <linux/bootmem.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/fs.h>
@@ -211,6 +212,33 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
static struct memtype *cached_entry;
static u64 cached_start;
+static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
+{
+ int ram_page = 0, not_rampage = 0;
+ unsigned long page_nr;
+
+ for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
+ ++page_nr) {
+ /*
+ * For legacy reasons, physical address range in the legacy ISA
+ * region is tracked as non-RAM. This will allow users of
+ * /dev/mem to map portions of legacy ISA region, even when
+ * some of those portions are listed(or not even listed) with
+ * different e820 types(RAM/reserved/..)
+ */
+ if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
+ page_is_ram(page_nr))
+ ram_page = 1;
+ else
+ not_rampage = 1;
+
+ if (ram_page == not_rampage)
+ return -1;
+ }
+
+ return ram_page;
+}
+
/*
* For RAM pages, mark the pages as non WB memory type using
* PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
@@ -336,20 +364,12 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
if (new_type)
*new_type = actual_type;
- /*
- * For legacy reasons, some parts of the physical address range in the
- * legacy 1MB region is treated as non-RAM (even when listed as RAM in
- * the e820 tables). So we will track the memory attributes of this
- * legacy 1MB region using the linear memtype_list always.
- */
- if (end >= ISA_END_ADDRESS) {
- is_range_ram = pagerange_is_ram(start, end);
- if (is_range_ram == 1)
- return reserve_ram_pages_type(start, end, req_type,
- new_type);
- else if (is_range_ram < 0)
- return -EINVAL;
- }
+ is_range_ram = pat_pagerange_is_ram(start, end);
+ if (is_range_ram == 1)
+ return reserve_ram_pages_type(start, end, req_type,
+ new_type);
+ else if (is_range_ram < 0)
+ return -EINVAL;
new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
if (!new)
@@ -446,19 +466,11 @@ int free_memtype(u64 start, u64 end)
if (is_ISA_range(start, end - 1))
return 0;
- /*
- * For legacy reasons, some parts of the physical address range in the
- * legacy 1MB region is treated as non-RAM (even when listed as RAM in
- * the e820 tables). So we will track the memory attributes of this
- * legacy 1MB region using the linear memtype_list always.
- */
- if (end >= ISA_END_ADDRESS) {
- is_range_ram = pagerange_is_ram(start, end);
- if (is_range_ram == 1)
- return free_ram_pages_type(start, end);
- else if (is_range_ram < 0)
- return -EINVAL;
- }
+ is_range_ram = pat_pagerange_is_ram(start, end);
+ if (is_range_ram == 1)
+ return free_ram_pages_type(start, end);
+ else if (is_range_ram < 0)
+ return -EINVAL;
spin_lock(&memtype_lock);
list_for_each_entry(entry, &memtype_list, nd) {
@@ -626,17 +638,13 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
unsigned long flags;
unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
- is_ram = pagerange_is_ram(paddr, paddr + size);
+ is_ram = pat_pagerange_is_ram(paddr, paddr + size);
- if (is_ram != 0) {
- /*
- * For mapping RAM pages, drivers need to call
- * set_memory_[uc|wc|wb] directly, for reserve and free, before
- * setting up the PTE.
- */
- WARN_ON_ONCE(1);
- return 0;
- }
+ /*
+ * reserve_pfn_range() doesn't support RAM pages.
+ */
+ if (is_ram != 0)
+ return -EINVAL;
ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
if (ret)
@@ -693,7 +701,7 @@ static void free_pfn_range(u64 paddr, unsigned long size)
{
int is_ram;
- is_ram = pagerange_is_ram(paddr, paddr + size);
+ is_ram = pat_pagerange_is_ram(paddr, paddr + size);
if (is_ram == 0)
free_memtype(paddr, paddr + size);
}
@@ -861,6 +869,7 @@ pgprot_t pgprot_writecombine(pgprot_t prot)
else
return pgprot_noncached(prot);
}
+EXPORT_SYMBOL_GPL(pgprot_writecombine);
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c
index ab50a8d..427fd1b 100644
--- a/arch/x86/mm/testmmiotrace.c
+++ b/arch/x86/mm/testmmiotrace.c
@@ -1,5 +1,5 @@
/*
- * Written by Pekka Paalanen, 2008 <pq@iki.fi>
+ * Written by Pekka Paalanen, 2008-2009 <pq@iki.fi>
*/
#include <linux/module.h>
#include <linux/io.h>
@@ -9,35 +9,74 @@
static unsigned long mmio_address;
module_param(mmio_address, ulong, 0);
-MODULE_PARM_DESC(mmio_address, "Start address of the mapping of 16 kB.");
+MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB "
+ "(or 8 MB if read_far is non-zero).");
+
+static unsigned long read_far = 0x400100;
+module_param(read_far, ulong, 0);
+MODULE_PARM_DESC(read_far, " Offset of a 32-bit read within 8 MB "
+ "(default: 0x400100).");
+
+static unsigned v16(unsigned i)
+{
+ return i * 12 + 7;
+}
+
+static unsigned v32(unsigned i)
+{
+ return i * 212371 + 13;
+}
static void do_write_test(void __iomem *p)
{
unsigned int i;
+ pr_info(MODULE_NAME ": write test.\n");
mmiotrace_printk("Write test.\n");
+
for (i = 0; i < 256; i++)
iowrite8(i, p + i);
+
for (i = 1024; i < (5 * 1024); i += 2)
- iowrite16(i * 12 + 7, p + i);
+ iowrite16(v16(i), p + i);
+
for (i = (5 * 1024); i < (16 * 1024); i += 4)
- iowrite32(i * 212371 + 13, p + i);
+ iowrite32(v32(i), p + i);
}
static void do_read_test(void __iomem *p)
{
unsigned int i;
+ unsigned errs[3] = { 0 };
+ pr_info(MODULE_NAME ": read test.\n");
mmiotrace_printk("Read test.\n");
+
for (i = 0; i < 256; i++)
- ioread8(p + i);
+ if (ioread8(p + i) != i)
+ ++errs[0];
+
for (i = 1024; i < (5 * 1024); i += 2)
- ioread16(p + i);
+ if (ioread16(p + i) != v16(i))
+ ++errs[1];
+
for (i = (5 * 1024); i < (16 * 1024); i += 4)
- ioread32(p + i);
+ if (ioread32(p + i) != v32(i))
+ ++errs[2];
+
+ mmiotrace_printk("Read errors: 8-bit %d, 16-bit %d, 32-bit %d.\n",
+ errs[0], errs[1], errs[2]);
}
-static void do_test(void)
+static void do_read_far_test(void __iomem *p)
{
- void __iomem *p = ioremap_nocache(mmio_address, 0x4000);
+ pr_info(MODULE_NAME ": read far test.\n");
+ mmiotrace_printk("Read far test.\n");
+
+ ioread32(p + read_far);
+}
+
+static void do_test(unsigned long size)
+{
+ void __iomem *p = ioremap_nocache(mmio_address, size);
if (!p) {
pr_err(MODULE_NAME ": could not ioremap, aborting.\n");
return;
@@ -45,11 +84,15 @@ static void do_test(void)
mmiotrace_printk("ioremap returned %p.\n", p);
do_write_test(p);
do_read_test(p);
+ if (read_far && read_far < size - 4)
+ do_read_far_test(p);
iounmap(p);
}
static int __init init(void)
{
+ unsigned long size = (read_far) ? (8 << 20) : (16 << 10);
+
if (mmio_address == 0) {
pr_err(MODULE_NAME ": you have to use the module argument "
"mmio_address.\n");
@@ -58,10 +101,11 @@ static int __init init(void)
return -ENXIO;
}
- pr_warning(MODULE_NAME ": WARNING: mapping 16 kB @ 0x%08lx "
- "in PCI address space, and writing "
- "rubbish in there.\n", mmio_address);
- do_test();
+ pr_warning(MODULE_NAME ": WARNING: mapping %lu kB @ 0x%08lx in PCI "
+ "address space, and writing 16 kB of rubbish in there.\n",
+ size >> 10, mmio_address);
+ do_test(size);
+ pr_info(MODULE_NAME ": All done.\n");
return 0;
}
OpenPOWER on IntegriCloud