summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/iommu.c17
-rw-r--r--arch/powerpc/mm/slb.c10
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c1
-rw-r--r--arch/sparc/kernel/time.c9
-rw-r--r--arch/sparc64/kernel/ktlb.S6
5 files changed, 29 insertions, 14 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 2d0c9ef..79a85d6 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -278,6 +278,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
unsigned long flags;
struct scatterlist *s, *outs, *segstart;
int outcount, incount, i;
+ unsigned int align;
unsigned long handle;
BUG_ON(direction == DMA_NONE);
@@ -309,7 +310,12 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
/* Allocate iommu entries for that segment */
vaddr = (unsigned long) sg_virt(s);
npages = iommu_num_pages(vaddr, slen);
- entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0);
+ align = 0;
+ if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
+ (vaddr & ~PAGE_MASK) == 0)
+ align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
+ entry = iommu_range_alloc(tbl, npages, &handle,
+ mask >> IOMMU_PAGE_SHIFT, align);
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
@@ -572,7 +578,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
{
dma_addr_t dma_handle = DMA_ERROR_CODE;
unsigned long uaddr;
- unsigned int npages;
+ unsigned int npages, align;
BUG_ON(direction == DMA_NONE);
@@ -580,8 +586,13 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
npages = iommu_num_pages(uaddr, size);
if (tbl) {
+ align = 0;
+ if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
+ ((unsigned long)vaddr & ~PAGE_MASK) == 0)
+ align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
+
dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
- mask >> IOMMU_PAGE_SHIFT, 0);
+ mask >> IOMMU_PAGE_SHIFT, align);
if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) {
printk(KERN_INFO "iommu_alloc failed, "
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index a282bc2..50d7372 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -82,14 +82,6 @@ static inline void slb_shadow_clear(unsigned long entry)
get_slb_shadow()->save_area[entry].esid = 0;
}
-void slb_shadow_clear_all(void)
-{
- int i;
-
- for (i = 0; i < SLB_NUM_BOLTED; i++)
- slb_shadow_clear(i);
-}
-
static inline void create_shadowed_slbe(unsigned long ea, int ssize,
unsigned long flags,
unsigned long entry)
@@ -300,6 +292,8 @@ void slb_initialize(void)
create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
+ slb_shadow_clear(2);
+
/* We don't bolt the stack for the time being - we're in boot,
* so the stack is in the bolted segment. By the time it goes
* elsewhere, we'll call _switch() which will bolt in the new
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 34317aa..9a455d4 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -272,7 +272,6 @@ void vpa_init(int cpu)
*/
addr = __pa(&slb_shadow[cpu]);
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
- slb_shadow_clear_all();
ret = register_slb_shadow(hwcpu, addr);
if (ret)
printk(KERN_ERR
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index 45cb7c5..00b393c 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -436,7 +436,14 @@ void __init time_init(void)
static inline unsigned long do_gettimeoffset(void)
{
- return (*master_l10_counter >> 10) & 0x1fffff;
+ unsigned long val = *master_l10_counter;
+ unsigned long usec = (val >> 10) & 0x1fffff;
+
+ /* Limit hit? */
+ if (val & 0x80000000)
+ usec += 1000000 / HZ;
+
+ return usec;
}
/* Ok, my cute asm atomicity trick doesn't work anymore.
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index 964527d..cef8def 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -1,6 +1,6 @@
/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
*
- * Copyright (C) 1995, 1997, 2005 David S. Miller <davem@davemloft.net>
+ * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
* Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
* Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
* Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
@@ -226,6 +226,7 @@ kvmap_dtlb_load:
ba,pt %xcc, sun4v_dtlb_load
mov %g5, %g3
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
kvmap_vmemmap:
sub %g4, %g5, %g5
srlx %g5, 22, %g5
@@ -234,6 +235,7 @@ kvmap_vmemmap:
or %g1, %lo(vmemmap_table), %g1
ba,pt %xcc, kvmap_dtlb_load
ldx [%g1 + %g5], %g5
+#endif
kvmap_dtlb_nonlinear:
/* Catch kernel NULL pointer derefs. */
@@ -242,12 +244,14 @@ kvmap_dtlb_nonlinear:
bleu,pn %xcc, kvmap_dtlb_longpath
nop
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
/* Do not use the TSB for vmemmap. */
mov (VMEMMAP_BASE >> 24), %g5
sllx %g5, 24, %g5
cmp %g4,%g5
bgeu,pn %xcc, kvmap_vmemmap
nop
+#endif
KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
OpenPOWER on IntegriCloud