diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/hp/common/sba_iommu.c | 38 | ||||
-rw-r--r-- | arch/ia64/include/asm/hw_irq.h | 6 | ||||
-rw-r--r-- | arch/ia64/include/asm/mca.h | 5 | ||||
-rw-r--r-- | arch/ia64/include/asm/rwsem.h | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/irq_ia64.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 11 |
6 files changed, 50 insertions, 18 deletions
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index f332e3f..e14c492 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -677,12 +677,19 @@ sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size) spin_unlock_irqrestore(&ioc->saved_lock, flags); pide = sba_search_bitmap(ioc, dev, pages_needed, 0); - if (unlikely(pide >= (ioc->res_size << 3))) - panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", - ioc->ioc_hpa); + if (unlikely(pide >= (ioc->res_size << 3))) { + printk(KERN_WARNING "%s: I/O MMU @ %p is" + "out of mapping resources, %u %u %lx\n", + __func__, ioc->ioc_hpa, ioc->res_size, + pages_needed, dma_get_seg_boundary(dev)); + return -1; + } #else - panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", - ioc->ioc_hpa); + printk(KERN_WARNING "%s: I/O MMU @ %p is" + "out of mapping resources, %u %u %lx\n", + __func__, ioc->ioc_hpa, ioc->res_size, + pages_needed, dma_get_seg_boundary(dev)); + return -1; #endif } } @@ -965,6 +972,8 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page, #endif pide = sba_alloc_range(ioc, dev, size); + if (pide < 0) + return 0; iovp = (dma_addr_t) pide << iovp_shift; @@ -1320,6 +1329,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, unsigned long dma_offset, dma_len; /* start/len of DMA stream */ int n_mappings = 0; unsigned int max_seg_size = dma_get_max_seg_size(dev); + int idx; while (nents > 0) { unsigned long vaddr = (unsigned long) sba_sg_address(startsg); @@ -1418,16 +1428,22 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, vcontig_sg->dma_length = vcontig_len; dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask; ASSERT(dma_len <= DMA_CHUNK_SIZE); - dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG - | (sba_alloc_range(ioc, dev, dma_len) << iovp_shift) - | dma_offset); + idx = sba_alloc_range(ioc, dev, dma_len); + if (idx < 0) { + dma_sg->dma_length = 0; + return -1; + } + dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift) + | dma_offset); n_mappings++; } return n_mappings; } - +static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs); /** * sba_map_sg - map Scatter/Gather list * @dev: instance of PCI owned by the driver that's asking. @@ -1493,6 +1509,10 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, ** Access to the virtual address is what forces a two pass algorithm. */ coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents); + if (coalesced < 0) { + sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs); + return 0; + } /* ** Program the I/O Pdir diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h index 91619b3..bf2e374 100644 --- a/arch/ia64/include/asm/hw_irq.h +++ b/arch/ia64/include/asm/hw_irq.h @@ -59,7 +59,13 @@ typedef u16 ia64_vector; extern int ia64_first_device_vector; extern int ia64_last_device_vector; +#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_DIG)) +/* Reserve the lower priority vector than device vectors for "move IRQ" IPI */ +#define IA64_IRQ_MOVE_VECTOR 0x30 /* "move IRQ" IPI */ +#define IA64_DEF_FIRST_DEVICE_VECTOR 0x31 +#else #define IA64_DEF_FIRST_DEVICE_VECTOR 0x30 +#endif #define IA64_DEF_LAST_DEVICE_VECTOR 0xe7 #define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector #define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h index c171cdf..43f96ab 100644 --- a/arch/ia64/include/asm/mca.h +++ b/arch/ia64/include/asm/mca.h @@ -106,6 +106,11 @@ struct ia64_sal_os_state { unsigned long os_status; /* OS status to SAL, enum below */ unsigned long context; /* 0 if return to same context 1 if return to new context */ + + /* I-resources */ + unsigned long iip; + unsigned long ipsr; + unsigned long ifs; }; enum { diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h index fbee74b..e876268 100644 --- a/arch/ia64/include/asm/rwsem.h +++ b/arch/ia64/include/asm/rwsem.h @@ -47,7 +47,7 @@ struct rw_semaphore { #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) #define __RWSEM_INITIALIZER(name) \ - { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ + { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ LIST_HEAD_INIT((name).wait_list) } #define DECLARE_RWSEM(name) \ diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 70e4bad..d4093a1 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -260,7 +260,6 @@ void __setup_vector_irq(int cpu) } #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) -#define IA64_IRQ_MOVE_VECTOR IA64_DEF_FIRST_DEVICE_VECTOR static enum vector_domain_type { VECTOR_DOMAIN_NONE, @@ -659,11 +658,8 @@ init_IRQ (void) register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); #ifdef CONFIG_SMP #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) - if (vector_domain_type != VECTOR_DOMAIN_NONE) { - BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR); - IA64_FIRST_DEVICE_VECTOR++; + if (vector_domain_type != VECTOR_DOMAIN_NONE) register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction); - } #endif #endif #ifdef CONFIG_PERFMON diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 496ac7a..32f2639 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -888,9 +888,10 @@ ia64_mca_modify_comm(const struct task_struct *previous_current) } static void -finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms, +finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos, unsigned long *nat) { + const pal_min_state_area_t *ms = sos->pal_min_state; const u64 *bank; /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use @@ -904,6 +905,10 @@ finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms, regs->cr_iip = ms->pmsa_xip; regs->cr_ipsr = ms->pmsa_xpsr; regs->cr_ifs = ms->pmsa_xfs; + + sos->iip = ms->pmsa_iip; + sos->ipsr = ms->pmsa_ipsr; + sos->ifs = ms->pmsa_ifs; } regs->pr = ms->pmsa_pr; regs->b0 = ms->pmsa_br0; @@ -1079,7 +1084,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, memcpy(old_regs, regs, sizeof(*regs)); old_regs->loadrs = loadrs; old_unat = old_regs->ar_unat; - finish_pt_regs(old_regs, ms, &old_unat); + finish_pt_regs(old_regs, sos, &old_unat); /* Next stack a struct switch_stack. mca_asm.S built a partial * switch_stack, copy it and fill in the blanks using pt_regs and @@ -1150,7 +1155,7 @@ no_mod: mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", smp_processor_id(), type, msg); old_unat = regs->ar_unat; - finish_pt_regs(regs, ms, &old_unat); + finish_pt_regs(regs, sos, &old_unat); return previous_current; } |