diff options
author | Mark Maule <maule@sgi.com> | 2005-08-03 14:07:00 -0700 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-08-11 15:56:31 -0700 |
commit | c9221da9f2796f082642c3498edb2c8783ad4774 (patch) | |
tree | 1603dbace88f4790b6b0138f6e0b43a8958dd656 | |
parent | 5b53ed1f2ed6c85e2b1c39d97cc112ea32004609 (diff) | |
download | op-kernel-dev-c9221da9f2796f082642c3498edb2c8783ad4774.zip op-kernel-dev-c9221da9f2796f082642c3498edb2c8783ad4774.tar.gz |
[IA64-SGI] sn pci provider for TIOCE (pci
Altix patch to add an SN pci provider for TIOCE, which is SGI's
PCI Express implementation.
Signed-off-by: Mark Maule <maule@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r-- | arch/ia64/sn/kernel/io_init.c | 2 | ||||
-rw-r--r-- | arch/ia64/sn/pci/Makefile | 2 | ||||
-rw-r--r-- | arch/ia64/sn/pci/tioce_provider.c | 733 | ||||
-rw-r--r-- | include/asm-ia64/sn/pcibus_provider_defs.h | 3 | ||||
-rw-r--r-- | include/asm-ia64/sn/tioce.h | 740 | ||||
-rw-r--r-- | include/asm-ia64/sn/tioce_provider.h | 66 |
6 files changed, 1544 insertions, 2 deletions
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index 829ea79..d1fc09b 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c @@ -18,6 +18,7 @@ #include <asm/sn/simulator.h> #include <asm/sn/sn_sal.h> #include <asm/sn/tioca_provider.h> +#include <asm/sn/tioce_provider.h> #include "xtalk/hubdev.h" #include "xtalk/xwidgetdev.h" @@ -481,6 +482,7 @@ static int __init sn_pci_init(void) pcibr_init_provider(); tioca_init_provider(); + tioce_init_provider(); /* * This is needed to avoid bounce limit checks in the blk layer diff --git a/arch/ia64/sn/pci/Makefile b/arch/ia64/sn/pci/Makefile index 2f915bc..321576b 100644 --- a/arch/ia64/sn/pci/Makefile +++ b/arch/ia64/sn/pci/Makefile @@ -7,4 +7,4 @@ # # Makefile for the sn pci general routines. -obj-y := pci_dma.o tioca_provider.o pcibr/ +obj-y := pci_dma.o tioca_provider.o tioce_provider.o pcibr/ diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c new file mode 100644 index 0000000..d908136 --- /dev/null +++ b/arch/ia64/sn/pci/tioce_provider.c @@ -0,0 +1,733 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved. + */ + +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <asm/sn/sn_sal.h> +#include <asm/sn/addrs.h> +#include <asm/sn/pcidev.h> +#include <asm/sn/pcibus_provider_defs.h> +#include <asm/sn/tioce_provider.h> + +/** + * Bus address ranges for the 5 flavors of TIOCE DMA + */ + +#define TIOCE_D64_MIN 0x8000000000000000UL +#define TIOCE_D64_MAX 0xffffffffffffffffUL +#define TIOCE_D64_ADDR(a) ((a) >= TIOCE_D64_MIN) + +#define TIOCE_D32_MIN 0x0000000080000000UL +#define TIOCE_D32_MAX 0x00000000ffffffffUL +#define TIOCE_D32_ADDR(a) ((a) >= TIOCE_D32_MIN && (a) <= TIOCE_D32_MAX) + +#define TIOCE_M32_MIN 0x0000000000000000UL +#define TIOCE_M32_MAX 0x000000007fffffffUL +#define TIOCE_M32_ADDR(a) ((a) >= TIOCE_M32_MIN && (a) <= TIOCE_M32_MAX) + +#define TIOCE_M40_MIN 0x0000004000000000UL +#define TIOCE_M40_MAX 0x0000007fffffffffUL +#define TIOCE_M40_ADDR(a) ((a) >= TIOCE_M40_MIN && (a) <= TIOCE_M40_MAX) + +#define TIOCE_M40S_MIN 0x0000008000000000UL +#define TIOCE_M40S_MAX 0x000000ffffffffffUL +#define TIOCE_M40S_ADDR(a) ((a) >= TIOCE_M40S_MIN && (a) <= TIOCE_M40S_MAX) + +/* + * ATE manipulation macros. + */ + +#define ATE_PAGESHIFT(ps) (__ffs(ps)) +#define ATE_PAGEMASK(ps) ((ps)-1) + +#define ATE_PAGE(x, ps) ((x) >> ATE_PAGESHIFT(ps)) +#define ATE_NPAGES(start, len, pagesize) \ + (ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1) + +#define ATE_VALID(ate) ((ate) & (1UL << 63)) +#define ATE_MAKE(addr, ps) (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63)) + +/* + * Flavors of ate-based mapping supported by tioce_alloc_map() + */ + +#define TIOCE_ATE_M32 1 +#define TIOCE_ATE_M40 2 +#define TIOCE_ATE_M40S 3 + +#define KB(x) ((x) << 10) +#define MB(x) ((x) << 20) +#define GB(x) ((x) << 30) + +/** + * tioce_dma_d64 - create a DMA mapping using 64-bit direct mode + * @ct_addr: system coretalk address + * + * Map @ct_addr into 64-bit CE bus space. No device context is necessary + * and no CE mapping are consumed. + * + * Bits 53:0 come from the coretalk address. The remaining bits are set as + * follows: + * + * 63 - must be 1 to indicate d64 mode to CE hardware + * 62 - barrier bit ... controlled with tioce_dma_barrier() + * 61 - 0 since this is not an MSI transaction + * 60:54 - reserved, MBZ + */ +static uint64_t +tioce_dma_d64(unsigned long ct_addr) +{ + uint64_t bus_addr; + + bus_addr = ct_addr | (1UL << 63); + + return bus_addr; +} + +/** + * pcidev_to_tioce - return misc ce related pointers given a pci_dev + * @pci_dev: pci device context + * @base: ptr to store struct tioce_mmr * for the CE holding this device + * @kernel: ptr to store struct tioce_kernel * for the CE holding this device + * @port: ptr to store the CE port number that this device is on + * + * Return pointers to various CE-related structures for the CE upstream of + * @pci_dev. + */ +static inline void +pcidev_to_tioce(struct pci_dev *pdev, struct tioce **base, + struct tioce_kernel **kernel, int *port) +{ + struct pcidev_info *pcidev_info; + struct tioce_common *ce_common; + struct tioce_kernel *ce_kernel; + + pcidev_info = SN_PCIDEV_INFO(pdev); + ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; + ce_kernel = (struct tioce_kernel *)ce_common->ce_kernel_private; + + if (base) + *base = (struct tioce *)ce_common->ce_pcibus.bs_base; + if (kernel) + *kernel = ce_kernel; + + /* + * we use port as a zero-based value internally, even though the + * documentation is 1-based. + */ + if (port) + *port = + (pdev->bus->number < ce_kernel->ce_port1_secondary) ? 0 : 1; +} + +/** + * tioce_alloc_map - Given a coretalk address, map it to pcie bus address + * space using one of the various ATE-based address modes. + * @ce_kern: tioce context + * @type: map mode to use + * @port: 0-based port that the requesting device is downstream of + * @ct_addr: the coretalk address to map + * @len: number of bytes to map + * + * Given the addressing type, set up various paramaters that define the + * ATE pool to use. Search for a contiguous block of entries to cover the + * length, and if enough resources exist, fill in the ATE's and construct a + * tioce_dmamap struct to track the mapping. + */ +static uint64_t +tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, + uint64_t ct_addr, int len) +{ + int i; + int j; + int first; + int last; + int entries; + int nates; + int pagesize; + uint64_t *ate_shadow; + uint64_t *ate_reg; + uint64_t addr; + struct tioce *ce_mmr; + uint64_t bus_base; + struct tioce_dmamap *map; + + ce_mmr = (struct tioce *)ce_kern->ce_common->ce_pcibus.bs_base; + + switch (type) { + case TIOCE_ATE_M32: + /* + * The first 64 entries of the ate3240 pool are dedicated to + * super-page (TIOCE_ATE_M40S) mode. + */ + first = 64; + entries = TIOCE_NUM_M3240_ATES - 64; + ate_shadow = ce_kern->ce_ate3240_shadow; + ate_reg = ce_mmr->ce_ure_ate3240; + pagesize = ce_kern->ce_ate3240_pagesize; + bus_base = TIOCE_M32_MIN; + break; + case TIOCE_ATE_M40: + first = 0; + entries = TIOCE_NUM_M40_ATES; + ate_shadow = ce_kern->ce_ate40_shadow; + ate_reg = ce_mmr->ce_ure_ate40; + pagesize = MB(64); + bus_base = TIOCE_M40_MIN; + break; + case TIOCE_ATE_M40S: + /* + * ate3240 entries 0-31 are dedicated to port1 super-page + * mappings. ate3240 entries 32-63 are dedicated to port2. + */ + first = port * 32; + entries = 32; + ate_shadow = ce_kern->ce_ate3240_shadow; + ate_reg = ce_mmr->ce_ure_ate3240; + pagesize = GB(16); + bus_base = TIOCE_M40S_MIN; + break; + default: + return 0; + } + + nates = ATE_NPAGES(ct_addr, len, pagesize); + if (nates > entries) + return 0; + + last = first + entries - nates; + for (i = first; i <= last; i++) { + if (ATE_VALID(ate_shadow[i])) + continue; + + for (j = i; j < i + nates; j++) + if (ATE_VALID(ate_shadow[j])) + break; + + if (j >= i + nates) + break; + } + + if (i > last) + return 0; + + map = kcalloc(1, sizeof(struct tioce_dmamap), GFP_ATOMIC); + if (!map) + return 0; + + addr = ct_addr; + for (j = 0; j < nates; j++) { + uint64_t ate; + + ate = ATE_MAKE(addr, pagesize); + ate_shadow[i + j] = ate; + ate_reg[i + j] = ate; + addr += pagesize; + } + + map->refcnt = 1; + map->nbytes = nates * pagesize; + map->ct_start = ct_addr & ~ATE_PAGEMASK(pagesize); + map->pci_start = bus_base + (i * pagesize); + map->ate_hw = &ate_reg[i]; + map->ate_shadow = &ate_shadow[i]; + map->ate_count = nates; + + list_add(&map->ce_dmamap_list, &ce_kern->ce_dmamap_list); + + return (map->pci_start + (ct_addr - map->ct_start)); +} + +/** + * tioce_dma_d32 - create a DMA mapping using 32-bit direct mode + * @pdev: linux pci_dev representing the function + * @paddr: system physical address + * + * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info. + */ +static uint64_t +tioce_dma_d32(struct pci_dev *pdev, uint64_t ct_addr) +{ + int dma_ok; + int port; + struct tioce *ce_mmr; + struct tioce_kernel *ce_kern; + uint64_t ct_upper; + uint64_t ct_lower; + dma_addr_t bus_addr; + + ct_upper = ct_addr & ~0x3fffffffUL; + ct_lower = ct_addr & 0x3fffffffUL; + + pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port); + + if (ce_kern->ce_port[port].dirmap_refcnt == 0) { + volatile uint64_t tmp; + + ce_kern->ce_port[port].dirmap_shadow = ct_upper; + ce_mmr->ce_ure_dir_map[port] = ct_upper; + tmp = ce_mmr->ce_ure_dir_map[port]; + dma_ok = 1; + } else + dma_ok = (ce_kern->ce_port[port].dirmap_shadow == ct_upper); + + if (dma_ok) { + ce_kern->ce_port[port].dirmap_refcnt++; + bus_addr = TIOCE_D32_MIN + ct_lower; + } else + bus_addr = 0; + + return bus_addr; +} + +/** + * tioce_dma_barrier - swizzle a TIOCE bus address to include or exclude + * the barrier bit. + * @bus_addr: bus address to swizzle + * + * Given a TIOCE bus address, set the appropriate bit to indicate barrier + * attributes. + */ +static uint64_t +tioce_dma_barrier(uint64_t bus_addr, int on) +{ + uint64_t barrier_bit; + + /* barrier not supported in M40/M40S mode */ + if (TIOCE_M40_ADDR(bus_addr) || TIOCE_M40S_ADDR(bus_addr)) + return bus_addr; + + if (TIOCE_D64_ADDR(bus_addr)) + barrier_bit = (1UL << 62); + else /* must be m32 or d32 */ + barrier_bit = (1UL << 30); + + return (on) ? (bus_addr | barrier_bit) : (bus_addr & ~barrier_bit); +} + +/** + * tioce_dma_unmap - release CE mapping resources + * @pdev: linux pci_dev representing the function + * @bus_addr: bus address returned by an earlier tioce_dma_map + * @dir: mapping direction (unused) + * + * Locate mapping resources associated with @bus_addr and release them. + * For mappings created using the direct modes there are no resources + * to release. + */ +void +tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) +{ + int i; + int port; + struct tioce_kernel *ce_kern; + struct tioce *ce_mmr; + unsigned long flags; + + bus_addr = tioce_dma_barrier(bus_addr, 0); + pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port); + + /* nothing to do for D64 */ + + if (TIOCE_D64_ADDR(bus_addr)) + return; + + spin_lock_irqsave(&ce_kern->ce_lock, flags); + + if (TIOCE_D32_ADDR(bus_addr)) { + if (--ce_kern->ce_port[port].dirmap_refcnt == 0) { + ce_kern->ce_port[port].dirmap_shadow = 0; + ce_mmr->ce_ure_dir_map[port] = 0; + } + } else { + struct tioce_dmamap *map; + + list_for_each_entry(map, &ce_kern->ce_dmamap_list, + ce_dmamap_list) { + uint64_t last; + + last = map->pci_start + map->nbytes - 1; + if (bus_addr >= map->pci_start && bus_addr <= last) + break; + } + + if (&map->ce_dmamap_list == &ce_kern->ce_dmamap_list) { + printk(KERN_WARNING + "%s: %s - no map found for bus_addr 0x%lx\n", + __FUNCTION__, pci_name(pdev), bus_addr); + } else if (--map->refcnt == 0) { + for (i = 0; i < map->ate_count; i++) { + map->ate_shadow[i] = 0; + map->ate_hw[i] = 0; + } + + list_del(&map->ce_dmamap_list); + kfree(map); + } + } + + spin_unlock_irqrestore(&ce_kern->ce_lock, flags); +} + +/** + * tioce_do_dma_map - map pages for PCI DMA + * @pdev: linux pci_dev representing the function + * @paddr: host physical address to map + * @byte_count: bytes to map + * + * This is the main wrapper for mapping host physical pages to CE PCI space. + * The mapping mode used is based on the device's dma_mask. + */ +static uint64_t +tioce_do_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count, + int barrier) +{ + unsigned long flags; + uint64_t ct_addr; + uint64_t mapaddr = 0; + struct tioce_kernel *ce_kern; + struct tioce_dmamap *map; + int port; + uint64_t dma_mask; + + dma_mask = (barrier) ? pdev->dev.coherent_dma_mask : pdev->dma_mask; + + /* cards must be able to address at least 31 bits */ + if (dma_mask < 0x7fffffffUL) + return 0; + + ct_addr = PHYS_TO_TIODMA(paddr); + + /* + * If the device can generate 64 bit addresses, create a D64 map. + * Since this should never fail, bypass the rest of the checks. + */ + if (dma_mask == ~0UL) { + mapaddr = tioce_dma_d64(ct_addr); + goto dma_map_done; + } + + pcidev_to_tioce(pdev, NULL, &ce_kern, &port); + + spin_lock_irqsave(&ce_kern->ce_lock, flags); + + /* + * D64 didn't work ... See if we have an existing map that covers + * this address range. Must account for devices dma_mask here since + * an existing map might have been done in a mode using more pci + * address bits than this device can support. + */ + list_for_each_entry(map, &ce_kern->ce_dmamap_list, ce_dmamap_list) { + uint64_t last; + + last = map->ct_start + map->nbytes - 1; + if (ct_addr >= map->ct_start && + ct_addr + byte_count - 1 <= last && + map->pci_start <= dma_mask) { + map->refcnt++; + mapaddr = map->pci_start + (ct_addr - map->ct_start); + break; + } + } + + /* + * If we don't have a map yet, and the card can generate 40 + * bit addresses, try the M40/M40S modes. Note these modes do not + * support a barrier bit, so if we need a consistent map these + * won't work. + */ + if (!mapaddr && !barrier && dma_mask >= 0xffffffffffUL) { + /* + * We have two options for 40-bit mappings: 16GB "super" ATE's + * and 64MB "regular" ATE's. We'll try both if needed for a + * given mapping but which one we try first depends on the + * size. For requests >64MB, prefer to use a super page with + * regular as the fallback. Otherwise, try in the reverse order. + */ + + if (byte_count > MB(64)) { + mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, + port, ct_addr, byte_count); + if (!mapaddr) + mapaddr = + tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, + ct_addr, byte_count); + } else { + mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, + ct_addr, byte_count); + if (!mapaddr) + mapaddr = + tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, + port, ct_addr, byte_count); + } + } + + /* + * 32-bit direct is the next mode to try + */ + if (!mapaddr && dma_mask >= 0xffffffffUL) + mapaddr = tioce_dma_d32(pdev, ct_addr); + + /* + * Last resort, try 32-bit ATE-based map. + */ + if (!mapaddr) + mapaddr = + tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr, + byte_count); + + spin_unlock_irqrestore(&ce_kern->ce_lock, flags); + +dma_map_done: + if (mapaddr & barrier) + mapaddr = tioce_dma_barrier(mapaddr, 1); + + return mapaddr; +} + +/** + * tioce_dma - standard pci dma map interface + * @pdev: pci device requesting the map + * @paddr: system physical address to map into pci space + * @byte_count: # bytes to map + * + * Simply call tioce_do_dma_map() to create a map with the barrier bit clear + * in the address. + */ +static uint64_t +tioce_dma(struct pci_dev *pdev, uint64_t paddr, size_t byte_count) +{ + return tioce_do_dma_map(pdev, paddr, byte_count, 0); +} + +/** + * tioce_dma_consistent - consistent pci dma map interface + * @pdev: pci device requesting the map + * @paddr: system physical address to map into pci space + * @byte_count: # bytes to map + * + * Simply call tioce_do_dma_map() to create a map with the barrier bit set + * in the address. + */ static uint64_t +tioce_dma_consistent(struct pci_dev *pdev, uint64_t paddr, size_t byte_count) +{ + return tioce_do_dma_map(pdev, paddr, byte_count, 1); +} + +/** + * tioce_error_intr_handler - SGI TIO CE error interrupt handler + * @irq: unused + * @arg: pointer to tioce_common struct for the given CE + * @pt: unused + * + * Handle a CE error interrupt. Simply a wrapper around a SAL call which + * defers processing to the SGI prom. + */ static irqreturn_t +tioce_error_intr_handler(int irq, void *arg, struct pt_regs *pt) +{ + struct tioce_common *soft = arg; + struct ia64_sal_retval ret_stuff; + ret_stuff.status = 0; + ret_stuff.v0 = 0; + + SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_ERROR_INTERRUPT, + soft->ce_pcibus.bs_persist_segment, + soft->ce_pcibus.bs_persist_busnum, 0, 0, 0, 0, 0); + + return IRQ_HANDLED; +} + +/** + * tioce_kern_init - init kernel structures related to a given TIOCE + * @tioce_common: ptr to a cached tioce_common struct that originated in prom + */ static struct tioce_kernel * +tioce_kern_init(struct tioce_common *tioce_common) +{ + int i; + uint32_t tmp; + struct tioce *tioce_mmr; + struct tioce_kernel *tioce_kern; + + tioce_kern = kcalloc(1, sizeof(struct tioce_kernel), GFP_KERNEL); + if (!tioce_kern) { + return NULL; + } + + tioce_kern->ce_common = tioce_common; + spin_lock_init(&tioce_kern->ce_lock); + INIT_LIST_HEAD(&tioce_kern->ce_dmamap_list); + tioce_common->ce_kernel_private = (uint64_t) tioce_kern; + + /* + * Determine the secondary bus number of the port2 logical PPB. + * This is used to decide whether a given pci device resides on + * port1 or port2. Note: We don't have enough plumbing set up + * here to use pci_read_config_xxx() so use the raw_pci_ops vector. + */ + + raw_pci_ops->read(tioce_common->ce_pcibus.bs_persist_segment, + tioce_common->ce_pcibus.bs_persist_busnum, + PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1, &tmp); + tioce_kern->ce_port1_secondary = (uint8_t) tmp; + + /* + * Set PMU pagesize to the largest size available, and zero out + * the ate's. + */ + + tioce_mmr = (struct tioce *)tioce_common->ce_pcibus.bs_base; + tioce_mmr->ce_ure_page_map &= ~CE_URE_PAGESIZE_MASK; + tioce_mmr->ce_ure_page_map |= CE_URE_256K_PAGESIZE; + tioce_kern->ce_ate3240_pagesize = KB(256); + + for (i = 0; i < TIOCE_NUM_M40_ATES; i++) { + tioce_kern->ce_ate40_shadow[i] = 0; + tioce_mmr->ce_ure_ate40[i] = 0; + } + + for (i = 0; i < TIOCE_NUM_M3240_ATES; i++) { + tioce_kern->ce_ate3240_shadow[i] = 0; + tioce_mmr->ce_ure_ate3240[i] = 0; + } + + return tioce_kern; +} + +/** + * tioce_force_interrupt - implement altix force_interrupt() backend for CE + * @sn_irq_info: sn asic irq that we need an interrupt generated for + * + * Given an sn_irq_info struct, set the proper bit in ce_adm_force_int to + * force a secondary interrupt to be generated. This is to work around an + * asic issue where there is a small window of opportunity for a legacy device + * interrupt to be lost. + */ +static void +tioce_force_interrupt(struct sn_irq_info *sn_irq_info) +{ + struct pcidev_info *pcidev_info; + struct tioce_common *ce_common; + struct tioce *ce_mmr; + uint64_t force_int_val; + + if (!sn_irq_info->irq_bridge) + return; + + if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_TIOCE) + return; + + pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; + if (!pcidev_info) + return; + + ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; + ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base; + + /* + * irq_int_bit is originally set up by prom, and holds the interrupt + * bit shift (not mask) as defined by the bit definitions in the + * ce_adm_int mmr. These shifts are not the same for the + * ce_adm_force_int register, so do an explicit mapping here to make + * things clearer. + */ + + switch (sn_irq_info->irq_int_bit) { + case CE_ADM_INT_PCIE_PORT1_DEV_A_SHFT: + force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_A_SHFT; + break; + case CE_ADM_INT_PCIE_PORT1_DEV_B_SHFT: + force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_B_SHFT; + break; + case CE_ADM_INT_PCIE_PORT1_DEV_C_SHFT: + force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_C_SHFT; + break; + case CE_ADM_INT_PCIE_PORT1_DEV_D_SHFT: + force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_D_SHFT; + break; + case CE_ADM_INT_PCIE_PORT2_DEV_A_SHFT: + force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_A_SHFT; + break; + case CE_ADM_INT_PCIE_PORT2_DEV_B_SHFT: + force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_B_SHFT; + break; + case CE_ADM_INT_PCIE_PORT2_DEV_C_SHFT: + force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_C_SHFT; + break; + case CE_ADM_INT_PCIE_PORT2_DEV_D_SHFT: + force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_D_SHFT; + break; + default: + return; + } + ce_mmr->ce_adm_force_int = force_int_val; +} + +/** + * tioce_bus_fixup - perform final PCI fixup for a TIO CE bus + * @prom_bussoft: Common prom/kernel struct representing the bus + * + * Replicates the tioce_common pointed to by @prom_bussoft in kernel + * space. Allocates and initializes a kernel-only area for a given CE, + * and sets up an irq for handling CE error interrupts. + * + * On successful setup, returns the kernel version of tioce_common back to + * the caller. + */ +static void * +tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller) +{ + struct tioce_common *tioce_common; + + /* + * Allocate kernel bus soft and copy from prom. + */ + + tioce_common = kcalloc(1, sizeof(struct tioce_common), GFP_KERNEL); + if (!tioce_common) + return NULL; + + memcpy(tioce_common, prom_bussoft, sizeof(struct tioce_common)); + tioce_common->ce_pcibus.bs_base |= __IA64_UNCACHED_OFFSET; + + if (tioce_kern_init(tioce_common) == NULL) { + kfree(tioce_common); + return NULL; + } + + if (request_irq(SGI_PCIASIC_ERROR, + tioce_error_intr_handler, + SA_SHIRQ, "TIOCE error", (void *)tioce_common)) + printk(KERN_WARNING + "%s: Unable to get irq %d. " + "Error interrupts won't be routed for " + "TIOCE bus %04x:%02x\n", + __FUNCTION__, SGI_PCIASIC_ERROR, + tioce_common->ce_pcibus.bs_persist_segment, + tioce_common->ce_pcibus.bs_persist_busnum); + + return tioce_common; +} + +static struct sn_pcibus_provider tioce_pci_interfaces = { + .dma_map = tioce_dma, + .dma_map_consistent = tioce_dma_consistent, + .dma_unmap = tioce_dma_unmap, + .bus_fixup = tioce_bus_fixup, + .force_interrupt = tioce_force_interrupt +}; + +/** + * tioce_init_provider - init SN PCI provider ops for TIO CE + */ +int +tioce_init_provider(void) +{ + sn_pci_provider[PCIIO_ASIC_TYPE_TIOCE] = &tioce_pci_interfaces; + return 0; +} diff --git a/include/asm-ia64/sn/pcibus_provider_defs.h b/include/asm-ia64/sn/pcibus_provider_defs.h index 5c3ba57..5a92f51 100644 --- a/include/asm-ia64/sn/pcibus_provider_defs.h +++ b/include/asm-ia64/sn/pcibus_provider_defs.h @@ -18,8 +18,9 @@ #define PCIIO_ASIC_TYPE_PIC 2 #define PCIIO_ASIC_TYPE_TIOCP 3 #define PCIIO_ASIC_TYPE_TIOCA 4 +#define PCIIO_ASIC_TYPE_TIOCE 5 -#define PCIIO_ASIC_MAX_TYPES 5 +#define PCIIO_ASIC_MAX_TYPES 6 /* * Common pciio bus provider data. There should be one of these as the diff --git a/include/asm-ia64/sn/tioce.h b/include/asm-ia64/sn/tioce.h new file mode 100644 index 0000000..2287985 --- /dev/null +++ b/include/asm-ia64/sn/tioce.h @@ -0,0 +1,740 @@ +/************************************************************************** + * * + * Unpublished copyright (c) 2005, Silicon Graphics, Inc. * + * THIS IS UNPUBLISHED CONFIDENTIAL AND PROPRIETARY SOURCE CODE OF SGI. * + * * + * The copyright notice above does not evidence any actual or intended * + * publication or disclosure of this source code, which includes * + * information that is confidential and/or proprietary, and is a trade * + * secret, of Silicon Graphics, Inc. ANY REPRODUCTION, MODIFICATION, * + * DISTRIBUTION, PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH * + * USE OF THIS SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF * + * SILICON GRAPHICS, INC. IS STRICTLY PROHIBITED, AND IN VIOLATION OF * + * APPLICABLE LAWS AND INTERNATIONAL TREATIES. THE RECEIPT OR * + * POSSESSION OF THIS SOURCE CODE AND/OR RELATED INFORMATION DOES NOT * + * CONVEY OR IMPLY ANY RIGHTS TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS * + * CONTENTS, OR TO MANUFACTURE, USE, OR SELL ANYTHING THAT IT MAY * + * DESCRIBE, IN WHOLE OR IN PART. * + * * + **************************************************************************/ + +#ifndef __ASM_IA64_SN_TIOCE_H__ +#define __ASM_IA64_SN_TIOCE_H__ + +/* CE ASIC part & mfgr information */ +#define TIOCE_PART_NUM 0xCE00 +#define TIOCE_MFGR_NUM 0x36 +#define TIOCE_REV_A 0x1 + +/* CE Virtual PPB Vendor/Device IDs */ +#define CE_VIRT_PPB_VENDOR_ID 0x10a9 +#define CE_VIRT_PPB_DEVICE_ID 0x4002 + +/* CE Host Bridge Vendor/Device IDs */ +#define CE_HOST_BRIDGE_VENDOR_ID 0x10a9 +#define CE_HOST_BRIDGE_DEVICE_ID 0x4003 + + +#define TIOCE_NUM_M40_ATES 4096 +#define TIOCE_NUM_M3240_ATES 2048 +#define TIOCE_NUM_PORTS 2 + +/* + * Register layout for TIOCE. MMR offsets are shown at the far right of the + * structure definition. + */ +typedef volatile struct tioce { + /* + * ADMIN : Administration Registers + */ + uint64_t ce_adm_id; /* 0x000000 */ + uint64_t ce_pad_000008; /* 0x000008 */ + uint64_t ce_adm_dyn_credit_status; /* 0x000010 */ + uint64_t ce_adm_last_credit_status; /* 0x000018 */ + uint64_t ce_adm_credit_limit; /* 0x000020 */ + uint64_t ce_adm_force_credit; /* 0x000028 */ + uint64_t ce_adm_control; /* 0x000030 */ + uint64_t ce_adm_mmr_chn_timeout; /* 0x000038 */ + uint64_t ce_adm_ssp_ure_timeout; /* 0x000040 */ + uint64_t ce_adm_ssp_dre_timeout; /* 0x000048 */ + uint64_t ce_adm_ssp_debug_sel; /* 0x000050 */ + uint64_t ce_adm_int_status; /* 0x000058 */ + uint64_t ce_adm_int_status_alias; /* 0x000060 */ + uint64_t ce_adm_int_mask; /* 0x000068 */ + uint64_t ce_adm_int_pending; /* 0x000070 */ + uint64_t ce_adm_force_int; /* 0x000078 */ + uint64_t ce_adm_ure_ups_buf_barrier_flush; /* 0x000080 */ + uint64_t ce_adm_int_dest[15]; /* 0x000088 -- 0x0000F8 */ + uint64_t ce_adm_error_summary; /* 0x000100 */ + uint64_t ce_adm_error_summary_alias; /* 0x000108 */ + uint64_t ce_adm_error_mask; /* 0x000110 */ + uint64_t ce_adm_first_error; /* 0x000118 */ + uint64_t ce_adm_error_overflow; /* 0x000120 */ + uint64_t ce_adm_error_overflow_alias; /* 0x000128 */ + uint64_t ce_pad_000130[2]; /* 0x000130 -- 0x000138 */ + uint64_t ce_adm_tnum_error; /* 0x000140 */ + uint64_t ce_adm_mmr_err_detail; /* 0x000148 */ + uint64_t ce_adm_msg_sram_perr_detail; /* 0x000150 */ + uint64_t ce_adm_bap_sram_perr_detail; /* 0x000158 */ + uint64_t ce_adm_ce_sram_perr_detail; /* 0x000160 */ + uint64_t ce_adm_ce_credit_oflow_detail; /* 0x000168 */ + uint64_t ce_adm_tx_link_idle_max_timer; /* 0x000170 */ + uint64_t ce_adm_pcie_debug_sel; /* 0x000178 */ + uint64_t ce_pad_000180[16]; /* 0x000180 -- 0x0001F8 */ + + uint64_t ce_adm_pcie_debug_sel_top; /* 0x000200 */ + uint64_t ce_adm_pcie_debug_lat_sel_lo_top; /* 0x000208 */ + uint64_t ce_adm_pcie_debug_lat_sel_hi_top; /* 0x000210 */ + uint64_t ce_adm_pcie_debug_trig_sel_top; /* 0x000218 */ + uint64_t ce_adm_pcie_debug_trig_lat_sel_lo_top; /* 0x000220 */ + uint64_t ce_adm_pcie_debug_trig_lat_sel_hi_top; /* 0x000228 */ + uint64_t ce_adm_pcie_trig_compare_top; /* 0x000230 */ + uint64_t ce_adm_pcie_trig_compare_en_top; /* 0x000238 */ + uint64_t ce_adm_ssp_debug_sel_top; /* 0x000240 */ + uint64_t ce_adm_ssp_debug_lat_sel_lo_top; /* 0x000248 */ + uint64_t ce_adm_ssp_debug_lat_sel_hi_top; /* 0x000250 */ + uint64_t ce_adm_ssp_debug_trig_sel_top; /* 0x000258 */ + uint64_t ce_adm_ssp_debug_trig_lat_sel_lo_top; /* 0x000260 */ + uint64_t ce_adm_ssp_debug_trig_lat_sel_hi_top; /* 0x000268 */ + uint64_t ce_adm_ssp_trig_compare_top; /* 0x000270 */ + uint64_t ce_adm_ssp_trig_compare_en_top; /* 0x000278 */ + uint64_t ce_pad_000280[48]; /* 0x000280 -- 0x0003F8 */ + + uint64_t ce_adm_bap_ctrl; /* 0x000400 */ + uint64_t ce_pad_000408[127]; /* 0x000408 -- 0x0007F8 */ + + uint64_t ce_msg_buf_data63_0[35]; /* 0x000800 -- 0x000918 */ + uint64_t ce_pad_000920[29]; /* 0x000920 -- 0x0009F8 */ + + uint64_t ce_msg_buf_data127_64[35]; /* 0x000A00 -- 0x000B18 */ + uint64_t ce_pad_000B20[29]; /* 0x000B20 -- 0x000BF8 */ + + uint64_t ce_msg_buf_parity[35]; /* 0x000C00 -- 0x000D18 */ + uint64_t ce_pad_000D20[29]; /* 0x000D20 -- 0x000DF8 */ + + uint64_t ce_pad_000E00[576]; /* 0x000E00 -- 0x001FF8 */ + + /* + * LSI : LSI's PCI Express Link Registers (Link#1 and Link#2) + * Link#1 MMRs at start at 0x002000, Link#2 MMRs at 0x003000 + * NOTE: the comment offsets at far right: let 'z' = {2 or 3} + */ + #define ce_lsi(link_num) ce_lsi[link_num-1] + struct ce_lsi_reg { + uint64_t ce_lsi_lpu_id; /* 0x00z000 */ + uint64_t ce_lsi_rst; /* 0x00z008 */ + uint64_t ce_lsi_dbg_stat; /* 0x00z010 */ + uint64_t ce_lsi_dbg_cfg; /* 0x00z018 */ + uint64_t ce_lsi_ltssm_ctrl; /* 0x00z020 */ + uint64_t ce_lsi_lk_stat; /* 0x00z028 */ + uint64_t ce_pad_00z030[2]; /* 0x00z030 -- 0x00z038 */ + uint64_t ce_lsi_int_and_stat; /* 0x00z040 */ + uint64_t ce_lsi_int_mask; /* 0x00z048 */ + uint64_t ce_pad_00z050[22]; /* 0x00z050 -- 0x00z0F8 */ + uint64_t ce_lsi_lk_perf_cnt_sel; /* 0x00z100 */ + uint64_t ce_pad_00z108; /* 0x00z108 */ + uint64_t ce_lsi_lk_perf_cnt_ctrl; /* 0x00z110 */ + uint64_t ce_pad_00z118; /* 0x00z118 */ + uint64_t ce_lsi_lk_perf_cnt1; /* 0x00z120 */ + uint64_t ce_lsi_lk_perf_cnt1_test; /* 0x00z128 */ + uint64_t ce_lsi_lk_perf_cnt2; /* 0x00z130 */ + uint64_t ce_lsi_lk_perf_cnt2_test; /* 0x00z138 */ + uint64_t ce_pad_00z140[24]; /* 0x00z140 -- 0x00z1F8 */ + uint64_t ce_lsi_lk_lyr_cfg; /* 0x00z200 */ + uint64_t ce_lsi_lk_lyr_status; /* 0x00z208 */ + uint64_t ce_lsi_lk_lyr_int_stat; /* 0x00z210 */ + uint64_t ce_lsi_lk_ly_int_stat_test; /* 0x00z218 */ + uint64_t ce_lsi_lk_ly_int_stat_mask; /* 0x00z220 */ + uint64_t ce_pad_00z228[3]; /* 0x00z228 -- 0x00z238 */ + uint64_t ce_lsi_fc_upd_ctl; /* 0x00z240 */ + uint64_t ce_pad_00z248[3]; /* 0x00z248 -- 0x00z258 */ + uint64_t ce_lsi_flw_ctl_upd_to_timer; /* 0x00z260 */ + uint64_t ce_lsi_flw_ctl_upd_timer0; /* 0x00z268 */ + uint64_t ce_lsi_flw_ctl_upd_timer1; /* 0x00z270 */ + uint64_t ce_pad_00z278[49]; /* 0x00z278 -- 0x00z3F8 */ + uint64_t ce_lsi_freq_nak_lat_thrsh; /* 0x00z400 */ + uint64_t ce_lsi_ack_nak_lat_tmr; /* 0x00z408 */ + uint64_t ce_lsi_rply_tmr_thr; /* 0x00z410 */ + uint64_t ce_lsi_rply_tmr; /* 0x00z418 */ + uint64_t ce_lsi_rply_num_stat; /* 0x00z420 */ + uint64_t ce_lsi_rty_buf_max_addr; /* 0x00z428 */ + uint64_t ce_lsi_rty_fifo_ptr; /* 0x00z430 */ + uint64_t ce_lsi_rty_fifo_rd_wr_ptr; /* 0x00z438 */ + uint64_t ce_lsi_rty_fifo_cred; /* 0x00z440 */ + uint64_t ce_lsi_seq_cnt; /* 0x00z448 */ + uint64_t ce_lsi_ack_sent_seq_num; /* 0x00z450 */ + uint64_t ce_lsi_seq_cnt_fifo_max_addr; /* 0x00z458 */ + uint64_t ce_lsi_seq_cnt_fifo_ptr; /* 0x00z460 */ + uint64_t ce_lsi_seq_cnt_rd_wr_ptr; /* 0x00z468 */ + uint64_t ce_lsi_tx_lk_ts_ctl; /* 0x00z470 */ + uint64_t ce_pad_00z478; /* 0x00z478 */ + uint64_t ce_lsi_mem_addr_ctl; /* 0x00z480 */ + uint64_t ce_lsi_mem_d_ld0; /* 0x00z488 */ + uint64_t ce_lsi_mem_d_ld1; /* 0x00z490 */ + uint64_t ce_lsi_mem_d_ld2; /* 0x00z498 */ + uint64_t ce_lsi_mem_d_ld3; /* 0x00z4A0 */ + uint64_t ce_lsi_mem_d_ld4; /* 0x00z4A8 */ + uint64_t ce_pad_00z4B0[2]; /* 0x00z4B0 -- 0x00z4B8 */ + uint64_t ce_lsi_rty_d_cnt; /* 0x00z4C0 */ + uint64_t ce_lsi_seq_buf_cnt; /* 0x00z4C8 */ + uint64_t ce_lsi_seq_buf_bt_d; /* 0x00z4D0 */ + uint64_t ce_pad_00z4D8; /* 0x00z4D8 */ + uint64_t ce_lsi_ack_lat_thr; /* 0x00z4E0 */ + uint64_t ce_pad_00z4E8[3]; /* 0x00z4E8 -- 0x00z4F8 */ + uint64_t ce_lsi_nxt_rcv_seq_1_cntr; /* 0x00z500 */ + uint64_t ce_lsi_unsp_dllp_rcvd; /* 0x00z508 */ + uint64_t ce_lsi_rcv_lk_ts_ctl; /* 0x00z510 */ + uint64_t ce_pad_00z518[29]; /* 0x00z518 -- 0x00z5F8 */ + uint64_t ce_lsi_phy_lyr_cfg; /* 0x00z600 */ + uint64_t ce_pad_00z608; /* 0x00z608 */ + uint64_t ce_lsi_phy_lyr_int_stat; /* 0x00z610 */ + uint64_t ce_lsi_phy_lyr_int_stat_test; /* 0x00z618 */ + uint64_t ce_lsi_phy_lyr_int_mask; /* 0x00z620 */ + uint64_t ce_pad_00z628[11]; /* 0x00z628 -- 0x00z678 */ + uint64_t ce_lsi_rcv_phy_cfg; /* 0x00z680 */ + uint64_t ce_lsi_rcv_phy_stat1; /* 0x00z688 */ + uint64_t ce_lsi_rcv_phy_stat2; /* 0x00z690 */ + uint64_t ce_lsi_rcv_phy_stat3; /* 0x00z698 */ + uint64_t ce_lsi_rcv_phy_int_stat; /* 0x00z6A0 */ + uint64_t ce_lsi_rcv_phy_int_stat_test; /* 0x00z6A8 */ + uint64_t ce_lsi_rcv_phy_int_mask; /* 0x00z6B0 */ + uint64_t ce_pad_00z6B8[9]; /* 0x00z6B8 -- 0x00z6F8 */ + uint64_t ce_lsi_tx_phy_cfg; /* 0x00z700 */ + uint64_t ce_lsi_tx_phy_stat; /* 0x00z708 */ + uint64_t ce_lsi_tx_phy_int_stat; /* 0x00z710 */ + uint64_t ce_lsi_tx_phy_int_stat_test; /* 0x00z718 */ + uint64_t ce_lsi_tx_phy_int_mask; /* 0x00z720 */ + uint64_t ce_lsi_tx_phy_stat2; /* 0x00z728 */ + uint64_t ce_pad_00z730[10]; /* 0x00z730 -- 0x00z77F */ + uint64_t ce_lsi_ltssm_cfg1; /* 0x00z780 */ + uint64_t ce_lsi_ltssm_cfg2; /* 0x00z788 */ + uint64_t ce_lsi_ltssm_cfg3; /* 0x00z790 */ + uint64_t ce_lsi_ltssm_cfg4; /* 0x00z798 */ + uint64_t ce_lsi_ltssm_cfg5; /* 0x00z7A0 */ + uint64_t ce_lsi_ltssm_stat1; /* 0x00z7A8 */ + uint64_t ce_lsi_ltssm_stat2; /* 0x00z7B0 */ + uint64_t ce_lsi_ltssm_int_stat; /* 0x00z7B8 */ + uint64_t ce_lsi_ltssm_int_stat_test; /* 0x00z7C0 */ + uint64_t ce_lsi_ltssm_int_mask; /* 0x00z7C8 */ + uint64_t ce_lsi_ltssm_stat_wr_en; /* 0x00z7D0 */ + uint64_t ce_pad_00z7D8[5]; /* 0x00z7D8 -- 0x00z7F8 */ + uint64_t ce_lsi_gb_cfg1; /* 0x00z800 */ + uint64_t ce_lsi_gb_cfg2; /* 0x00z808 */ + uint64_t ce_lsi_gb_cfg3; /* 0x00z810 */ + uint64_t ce_lsi_gb_cfg4; /* 0x00z818 */ + uint64_t ce_lsi_gb_stat; /* 0x00z820 */ + uint64_t ce_lsi_gb_int_stat; /* 0x00z828 */ + uint64_t ce_lsi_gb_int_stat_test; /* 0x00z830 */ + uint64_t ce_lsi_gb_int_mask; /* 0x00z838 */ + uint64_t ce_lsi_gb_pwr_dn1; /* 0x00z840 */ + uint64_t ce_lsi_gb_pwr_dn2; /* 0x00z848 */ + uint64_t ce_pad_00z850[246]; /* 0x00z850 -- 0x00zFF8 */ + } ce_lsi[2]; + + uint64_t ce_pad_004000[10]; /* 0x004000 -- 0x004048 */ + + /* + * CRM: Coretalk Receive Module Registers + */ + uint64_t ce_crm_debug_mux; /* 0x004050 */ + uint64_t ce_pad_004058; /* 0x004058 */ + uint64_t ce_crm_ssp_err_cmd_wrd; /* 0x004060 */ + uint64_t ce_crm_ssp_err_addr; /* 0x004068 */ + uint64_t ce_crm_ssp_err_syn; /* 0x004070 */ + + uint64_t ce_pad_004078[499]; /* 0x004078 -- 0x005008 */ + + /* + * CXM: Coretalk Xmit Module Registers + */ + uint64_t ce_cxm_dyn_credit_status; /* 0x005010 */ + uint64_t ce_cxm_last_credit_status; /* 0x005018 */ + uint64_t ce_cxm_credit_limit; /* 0x005020 */ + uint64_t ce_cxm_force_credit; /* 0x005028 */ + uint64_t ce_cxm_disable_bypass; /* 0x005030 */ + uint64_t ce_pad_005038[3]; /* 0x005038 -- 0x005048 */ + uint64_t ce_cxm_debug_mux; /* 0x005050 */ + + uint64_t ce_pad_005058[501]; /* 0x005058 -- 0x005FF8 */ + + /* + * DTL: Downstream Transaction Layer Regs (Link#1 and Link#2) + * DTL: Link#1 MMRs at start at 0x006000, Link#2 MMRs at 0x008000 + * DTL: the comment offsets at far right: let 'y' = {6 or 8} + * + * UTL: Downstream Transaction Layer Regs (Link#1 and Link#2) + * UTL: Link#1 MMRs at start at 0x007000, Link#2 MMRs at 0x009000 + * UTL: the comment offsets at far right: let 'z' = {7 or 9} + */ + #define ce_dtl(link_num) ce_dtl_utl[link_num-1] + #define ce_utl(link_num) ce_dtl_utl[link_num-1] + struct ce_dtl_utl_reg { + /* DTL */ + uint64_t ce_dtl_dtdr_credit_limit; /* 0x00y000 */ + uint64_t ce_dtl_dtdr_credit_force; /* 0x00y008 */ + uint64_t ce_dtl_dyn_credit_status; /* 0x00y010 */ + uint64_t ce_dtl_dtl_last_credit_stat; /* 0x00y018 */ + uint64_t ce_dtl_dtl_ctrl; /* 0x00y020 */ + uint64_t ce_pad_00y028[5]; /* 0x00y028 -- 0x00y048 */ + uint64_t ce_dtl_debug_sel; /* 0x00y050 */ + uint64_t ce_pad_00y058[501]; /* 0x00y058 -- 0x00yFF8 */ + + /* UTL */ + uint64_t ce_utl_utl_ctrl; /* 0x00z000 */ + uint64_t ce_utl_debug_sel; /* 0x00z008 */ + uint64_t ce_pad_00z010[510]; /* 0x00z010 -- 0x00zFF8 */ + } ce_dtl_utl[2]; + + uint64_t ce_pad_00A000[514]; /* 0x00A000 -- 0x00B008 */ + + /* + * URE: Upstream Request Engine + */ + uint64_t ce_ure_dyn_credit_status; /* 0x00B010 */ + uint64_t ce_ure_last_credit_status; /* 0x00B018 */ + uint64_t ce_ure_credit_limit; /* 0x00B020 */ + uint64_t ce_pad_00B028; /* 0x00B028 */ + uint64_t ce_ure_control; /* 0x00B030 */ + uint64_t ce_ure_status; /* 0x00B038 */ + uint64_t ce_pad_00B040[2]; /* 0x00B040 -- 0x00B048 */ + uint64_t ce_ure_debug_sel; /* 0x00B050 */ + uint64_t ce_ure_pcie_debug_sel; /* 0x00B058 */ + uint64_t ce_ure_ssp_err_cmd_wrd; /* 0x00B060 */ + uint64_t ce_ure_ssp_err_addr; /* 0x00B068 */ + uint64_t ce_ure_page_map; /* 0x00B070 */ + uint64_t ce_ure_dir_map[TIOCE_NUM_PORTS]; /* 0x00B078 */ + uint64_t ce_ure_pipe_sel1; /* 0x00B088 */ + uint64_t ce_ure_pipe_mask1; /* 0x00B090 */ + uint64_t ce_ure_pipe_sel2; /* 0x00B098 */ + uint64_t ce_ure_pipe_mask2; /* 0x00B0A0 */ + uint64_t ce_ure_pcie1_credits_sent; /* 0x00B0A8 */ + uint64_t ce_ure_pcie1_credits_used; /* 0x00B0B0 */ + uint64_t ce_ure_pcie1_credit_limit; /* 0x00B0B8 */ + uint64_t ce_ure_pcie2_credits_sent; /* 0x00B0C0 */ + uint64_t ce_ure_pcie2_credits_used; /* 0x00B0C8 */ + uint64_t ce_ure_pcie2_credit_limit; /* 0x00B0D0 */ + uint64_t ce_ure_pcie_force_credit; /* 0x00B0D8 */ + uint64_t ce_ure_rd_tnum_val; /* 0x00B0E0 */ + uint64_t ce_ure_rd_tnum_rsp_rcvd; /* 0x00B0E8 */ + uint64_t ce_ure_rd_tnum_esent_timer; /* 0x00B0F0 */ + uint64_t ce_ure_rd_tnum_error; /* 0x00B0F8 */ + uint64_t ce_ure_rd_tnum_first_cl; /* 0x00B100 */ + uint64_t ce_ure_rd_tnum_link_buf; /* 0x00B108 */ + uint64_t ce_ure_wr_tnum_val; /* 0x00B110 */ + uint64_t ce_ure_sram_err_addr0; /* 0x00B118 */ + uint64_t ce_ure_sram_err_addr1; /* 0x00B120 */ + uint64_t ce_ure_sram_err_addr2; /* 0x00B128 */ + uint64_t ce_ure_sram_rd_addr0; /* 0x00B130 */ + uint64_t ce_ure_sram_rd_addr1; /* 0x00B138 */ + uint64_t ce_ure_sram_rd_addr2; /* 0x00B140 */ + uint64_t ce_ure_sram_wr_addr0; /* 0x00B148 */ + uint64_t ce_ure_sram_wr_addr1; /* 0x00B150 */ + uint64_t ce_ure_sram_wr_addr2; /* 0x00B158 */ + uint64_t ce_ure_buf_flush10; /* 0x00B160 */ + uint64_t ce_ure_buf_flush11; /* 0x00B168 */ + uint64_t ce_ure_buf_flush12; /* 0x00B170 */ + uint64_t ce_ure_buf_flush13; /* 0x00B178 */ + uint64_t ce_ure_buf_flush20; /* 0x00B180 */ + uint64_t ce_ure_buf_flush21; /* 0x00B188 */ + uint64_t ce_ure_buf_flush22; /* 0x00B190 */ + uint64_t ce_ure_buf_flush23; /* 0x00B198 */ + uint64_t ce_ure_pcie_control1; /* 0x00B1A0 */ + uint64_t ce_ure_pcie_control2; /* 0x00B1A8 */ + + uint64_t ce_pad_00B1B0[458]; /* 0x00B1B0 -- 0x00BFF8 */ + + /* Upstream Data Buffer, Port1 */ + struct ce_ure_maint_ups_dat1_data { + uint64_t data63_0[512]; /* 0x00C000 -- 0x00CFF8 */ + uint64_t data127_64[512]; /* 0x00D000 -- 0x00DFF8 */ + uint64_t parity[512]; /* 0x00E000 -- 0x00EFF8 */ + } ce_ure_maint_ups_dat1; + + /* Upstream Header Buffer, Port1 */ + struct ce_ure_maint_ups_hdr1_data { + uint64_t data63_0[512]; /* 0x00F000 -- 0x00FFF8 */ + uint64_t data127_64[512]; /* 0x010000 -- 0x010FF8 */ + uint64_t parity[512]; /* 0x011000 -- 0x011FF8 */ + } ce_ure_maint_ups_hdr1; + + /* Upstream Data Buffer, Port2 */ + struct ce_ure_maint_ups_dat2_data { + uint64_t data63_0[512]; /* 0x012000 -- 0x012FF8 */ + uint64_t data127_64[512]; /* 0x013000 -- 0x013FF8 */ + uint64_t parity[512]; /* 0x014000 -- 0x014FF8 */ + } ce_ure_maint_ups_dat2; + + /* Upstream Header Buffer, Port2 */ + struct ce_ure_maint_ups_hdr2_data { + uint64_t data63_0[512]; /* 0x015000 -- 0x015FF8 */ + uint64_t data127_64[512]; /* 0x016000 -- 0x016FF8 */ + uint64_t parity[512]; /* 0x017000 -- 0x017FF8 */ + } ce_ure_maint_ups_hdr2; + + /* Downstream Data Buffer */ + struct ce_ure_maint_dns_dat_data { + uint64_t data63_0[512]; /* 0x018000 -- 0x018FF8 */ + uint64_t data127_64[512]; /* 0x019000 -- 0x019FF8 */ + uint64_t parity[512]; /* 0x01A000 -- 0x01AFF8 */ + } ce_ure_maint_dns_dat; + + /* Downstream Header Buffer */ + struct ce_ure_maint_dns_hdr_data { + uint64_t data31_0[64]; /* 0x01B000 -- 0x01B1F8 */ + uint64_t data95_32[64]; /* 0x01B200 -- 0x01B3F8 */ + uint64_t parity[64]; /* 0x01B400 -- 0x01B5F8 */ + } ce_ure_maint_dns_hdr; + + /* RCI Buffer Data */ + struct ce_ure_maint_rci_data { + uint64_t data41_0[64]; /* 0x01B600 -- 0x01B7F8 */ + uint64_t data69_42[64]; /* 0x01B800 -- 0x01B9F8 */ + } ce_ure_maint_rci; + + /* Response Queue */ + uint64_t ce_ure_maint_rspq[64]; /* 0x01BA00 -- 0x01BBF8 */ + + uint64_t ce_pad_01C000[4224]; /* 0x01BC00 -- 0x023FF8 */ + + /* Admin Build-a-Packet Buffer */ + struct ce_adm_maint_bap_buf_data { + uint64_t data63_0[258]; /* 0x024000 -- 0x024808 */ + uint64_t data127_64[258]; /* 0x024810 -- 0x025018 */ + uint64_t parity[258]; /* 0x025020 -- 0x025828 */ + } ce_adm_maint_bap_buf; + + uint64_t ce_pad_025830[5370]; /* 0x025830 -- 0x02FFF8 */ + + /* URE: 40bit PMU ATE Buffer */ /* 0x030000 -- 0x037FF8 */ + uint64_t ce_ure_ate40[TIOCE_NUM_M40_ATES]; + + /* URE: 32/40bit PMU ATE Buffer */ /* 0x038000 -- 0x03BFF8 */ + uint64_t ce_ure_ate3240[TIOCE_NUM_M3240_ATES]; + + uint64_t ce_pad_03C000[2050]; /* 0x03C000 -- 0x040008 */ + + /* + * DRE: Down Stream Request Engine + */ + uint64_t ce_dre_dyn_credit_status1; /* 0x040010 */ + uint64_t ce_dre_dyn_credit_status2; /* 0x040018 */ + uint64_t ce_dre_last_credit_status1; /* 0x040020 */ + uint64_t ce_dre_last_credit_status2; /* 0x040028 */ + uint64_t ce_dre_credit_limit1; /* 0x040030 */ + uint64_t ce_dre_credit_limit2; /* 0x040038 */ + uint64_t ce_dre_force_credit1; /* 0x040040 */ + uint64_t ce_dre_force_credit2; /* 0x040048 */ + uint64_t ce_dre_debug_mux1; /* 0x040050 */ + uint64_t ce_dre_debug_mux2; /* 0x040058 */ + uint64_t ce_dre_ssp_err_cmd_wrd; /* 0x040060 */ + uint64_t ce_dre_ssp_err_addr; /* 0x040068 */ + uint64_t ce_dre_comp_err_cmd_wrd; /* 0x040070 */ + uint64_t ce_dre_comp_err_addr; /* 0x040078 */ + uint64_t ce_dre_req_status; /* 0x040080 */ + uint64_t ce_dre_config1; /* 0x040088 */ + uint64_t ce_dre_config2; /* 0x040090 */ + uint64_t ce_dre_config_req_status; /* 0x040098 */ + uint64_t ce_pad_0400A0[12]; /* 0x0400A0 -- 0x0400F8 */ + uint64_t ce_dre_dyn_fifo; /* 0x040100 */ + uint64_t ce_pad_040108[3]; /* 0x040108 -- 0x040118 */ + uint64_t ce_dre_last_fifo; /* 0x040120 */ + + uint64_t ce_pad_040128[27]; /* 0x040128 -- 0x0401F8 */ + + /* DRE Downstream Head Queue */ + struct ce_dre_maint_ds_head_queue { + uint64_t data63_0[32]; /* 0x040200 -- 0x0402F8 */ + uint64_t data127_64[32]; /* 0x040300 -- 0x0403F8 */ + uint64_t parity[32]; /* 0x040400 -- 0x0404F8 */ + } ce_dre_maint_ds_head_q; + + uint64_t ce_pad_040500[352]; /* 0x040500 -- 0x040FF8 */ + + /* DRE Downstream Data Queue */ + struct ce_dre_maint_ds_data_queue { + uint64_t data63_0[256]; /* 0x041000 -- 0x0417F8 */ + uint64_t ce_pad_041800[256]; /* 0x041800 -- 0x041FF8 */ + uint64_t data127_64[256]; /* 0x042000 -- 0x0427F8 */ + uint64_t ce_pad_042800[256]; /* 0x042800 -- 0x042FF8 */ + uint64_t parity[256]; /* 0x043000 -- 0x0437F8 */ + uint64_t ce_pad_043800[256]; /* 0x043800 -- 0x043FF8 */ + } ce_dre_maint_ds_data_q; + + /* DRE URE Upstream Response Queue */ + struct ce_dre_maint_ure_us_rsp_queue { + uint64_t data63_0[8]; /* 0x044000 -- 0x044038 */ + uint64_t ce_pad_044040[24]; /* 0x044040 -- 0x0440F8 */ + uint64_t data127_64[8]; /* 0x044100 -- 0x044138 */ + uint64_t ce_pad_044140[24]; /* 0x044140 -- 0x0441F8 */ + uint64_t parity[8]; /* 0x044200 -- 0x044238 */ + uint64_t ce_pad_044240[24]; /* 0x044240 -- 0x0442F8 */ + } ce_dre_maint_ure_us_rsp_q; + + uint64_t ce_dre_maint_us_wrt_rsp[32];/* 0x044300 -- 0x0443F8 */ + + uint64_t ce_end_of_struct; /* 0x044400 */ +} tioce_t; + + +/* ce_adm_int_mask/ce_adm_int_status register bit defines */ +#define CE_ADM_INT_CE_ERROR_SHFT 0 +#define CE_ADM_INT_LSI1_IP_ERROR_SHFT 1 +#define CE_ADM_INT_LSI2_IP_ERROR_SHFT 2 +#define CE_ADM_INT_PCIE_ERROR_SHFT 3 +#define CE_ADM_INT_PORT1_HOTPLUG_EVENT_SHFT 4 +#define CE_ADM_INT_PORT2_HOTPLUG_EVENT_SHFT 5 +#define CE_ADM_INT_PCIE_PORT1_DEV_A_SHFT 6 +#define CE_ADM_INT_PCIE_PORT1_DEV_B_SHFT 7 +#define CE_ADM_INT_PCIE_PORT1_DEV_C_SHFT 8 +#define CE_ADM_INT_PCIE_PORT1_DEV_D_SHFT 9 +#define CE_ADM_INT_PCIE_PORT2_DEV_A_SHFT 10 +#define CE_ADM_INT_PCIE_PORT2_DEV_B_SHFT 11 +#define CE_ADM_INT_PCIE_PORT2_DEV_C_SHFT 12 +#define CE_ADM_INT_PCIE_PORT2_DEV_D_SHFT 13 +#define CE_ADM_INT_PCIE_MSG_SHFT 14 /*see int_dest_14*/ +#define CE_ADM_INT_PCIE_MSG_SLOT_0_SHFT 14 +#define CE_ADM_INT_PCIE_MSG_SLOT_1_SHFT 15 +#define CE_ADM_INT_PCIE_MSG_SLOT_2_SHFT 16 +#define CE_ADM_INT_PCIE_MSG_SLOT_3_SHFT 17 +#define CE_ADM_INT_PORT1_PM_PME_MSG_SHFT 22 +#define CE_ADM_INT_PORT2_PM_PME_MSG_SHFT 23 + +/* ce_adm_force_int register bit defines */ +#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_A_SHFT 0 +#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_B_SHFT 1 +#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_C_SHFT 2 +#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_D_SHFT 3 +#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_A_SHFT 4 +#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_B_SHFT 5 +#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_C_SHFT 6 +#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_D_SHFT 7 +#define CE_ADM_FORCE_INT_ALWAYS_SHFT 8 + +/* ce_adm_int_dest register bit masks & shifts */ +#define INTR_VECTOR_SHFT 56 + +/* ce_adm_error_mask and ce_adm_error_summary register bit masks */ +#define CE_ADM_ERR_CRM_SSP_REQ_INVALID (0x1ULL << 0) +#define CE_ADM_ERR_SSP_REQ_HEADER (0x1ULL << 1) +#define CE_ADM_ERR_SSP_RSP_HEADER (0x1ULL << 2) +#define CE_ADM_ERR_SSP_PROTOCOL_ERROR (0x1ULL << 3) +#define CE_ADM_ERR_SSP_SBE (0x1ULL << 4) +#define CE_ADM_ERR_SSP_MBE (0x1ULL << 5) +#define CE_ADM_ERR_CXM_CREDIT_OFLOW (0x1ULL << 6) +#define CE_ADM_ERR_DRE_SSP_REQ_INVAL (0x1ULL << 7) +#define CE_ADM_ERR_SSP_REQ_LONG (0x1ULL << 8) +#define CE_ADM_ERR_SSP_REQ_OFLOW (0x1ULL << 9) +#define CE_ADM_ERR_SSP_REQ_SHORT (0x1ULL << 10) +#define CE_ADM_ERR_SSP_REQ_SIDEBAND (0x1ULL << 11) +#define CE_ADM_ERR_SSP_REQ_ADDR_ERR (0x1ULL << 12) +#define CE_ADM_ERR_SSP_REQ_BAD_BE (0x1ULL << 13) +#define CE_ADM_ERR_PCIE_COMPL_TIMEOUT (0x1ULL << 14) +#define CE_ADM_ERR_PCIE_UNEXP_COMPL (0x1ULL << 15) +#define CE_ADM_ERR_PCIE_ERR_COMPL (0x1ULL << 16) +#define CE_ADM_ERR_DRE_CREDIT_OFLOW (0x1ULL << 17) +#define CE_ADM_ERR_DRE_SRAM_PE (0x1ULL << 18) +#define CE_ADM_ERR_SSP_RSP_INVALID (0x1ULL << 19) +#define CE_ADM_ERR_SSP_RSP_LONG (0x1ULL << 20) +#define CE_ADM_ERR_SSP_RSP_SHORT (0x1ULL << 21) +#define CE_ADM_ERR_SSP_RSP_SIDEBAND (0x1ULL << 22) +#define CE_ADM_ERR_URE_SSP_RSP_UNEXP (0x1ULL << 23) +#define CE_ADM_ERR_URE_SSP_WR_REQ_TIMEOUT (0x1ULL << 24) +#define CE_ADM_ERR_URE_SSP_RD_REQ_TIMEOUT (0x1ULL << 25) +#define CE_ADM_ERR_URE_ATE3240_PAGE_FAULT (0x1ULL << 26) +#define CE_ADM_ERR_URE_ATE40_PAGE_FAULT (0x1ULL << 27) +#define CE_ADM_ERR_URE_CREDIT_OFLOW (0x1ULL << 28) +#define CE_ADM_ERR_URE_SRAM_PE (0x1ULL << 29) +#define CE_ADM_ERR_ADM_SSP_RSP_UNEXP (0x1ULL << 30) +#define CE_ADM_ERR_ADM_SSP_REQ_TIMEOUT (0x1ULL << 31) +#define CE_ADM_ERR_MMR_ACCESS_ERROR (0x1ULL << 32) +#define CE_ADM_ERR_MMR_ADDR_ERROR (0x1ULL << 33) +#define CE_ADM_ERR_ADM_CREDIT_OFLOW (0x1ULL << 34) +#define CE_ADM_ERR_ADM_SRAM_PE (0x1ULL << 35) +#define CE_ADM_ERR_DTL1_MIN_PDATA_CREDIT_ERR (0x1ULL << 36) +#define CE_ADM_ERR_DTL1_INF_COMPL_CRED_UPDT_ERR (0x1ULL << 37) +#define CE_ADM_ERR_DTL1_INF_POSTED_CRED_UPDT_ERR (0x1ULL << 38) +#define CE_ADM_ERR_DTL1_INF_NPOSTED_CRED_UPDT_ERR (0x1ULL << 39) +#define CE_ADM_ERR_DTL1_COMP_HD_CRED_MAX_ERR (0x1ULL << 40) +#define CE_ADM_ERR_DTL1_COMP_D_CRED_MAX_ERR (0x1ULL << 41) +#define CE_ADM_ERR_DTL1_NPOSTED_HD_CRED_MAX_ERR (0x1ULL << 42) +#define CE_ADM_ERR_DTL1_NPOSTED_D_CRED_MAX_ERR (0x1ULL << 43) +#define CE_ADM_ERR_DTL1_POSTED_HD_CRED_MAX_ERR (0x1ULL << 44) +#define CE_ADM_ERR_DTL1_POSTED_D_CRED_MAX_ERR (0x1ULL << 45) +#define CE_ADM_ERR_DTL2_MIN_PDATA_CREDIT_ERR (0x1ULL << 46) +#define CE_ADM_ERR_DTL2_INF_COMPL_CRED_UPDT_ERR (0x1ULL << 47) +#define CE_ADM_ERR_DTL2_INF_POSTED_CRED_UPDT_ERR (0x1ULL << 48) +#define CE_ADM_ERR_DTL2_INF_NPOSTED_CRED_UPDT_ERR (0x1ULL << 49) +#define CE_ADM_ERR_DTL2_COMP_HD_CRED_MAX_ERR (0x1ULL << 50) +#define CE_ADM_ERR_DTL2_COMP_D_CRED_MAX_ERR (0x1ULL << 51) +#define CE_ADM_ERR_DTL2_NPOSTED_HD_CRED_MAX_ERR (0x1ULL << 52) +#define CE_ADM_ERR_DTL2_NPOSTED_D_CRED_MAX_ERR (0x1ULL << 53) +#define CE_ADM_ERR_DTL2_POSTED_HD_CRED_MAX_ERR (0x1ULL << 54) +#define CE_ADM_ERR_DTL2_POSTED_D_CRED_MAX_ERR (0x1ULL << 55) +#define CE_ADM_ERR_PORT1_PCIE_COR_ERR (0x1ULL << 56) +#define CE_ADM_ERR_PORT1_PCIE_NFAT_ERR (0x1ULL << 57) +#define CE_ADM_ERR_PORT1_PCIE_FAT_ERR (0x1ULL << 58) +#define CE_ADM_ERR_PORT2_PCIE_COR_ERR (0x1ULL << 59) +#define CE_ADM_ERR_PORT2_PCIE_NFAT_ERR (0x1ULL << 60) +#define CE_ADM_ERR_PORT2_PCIE_FAT_ERR (0x1ULL << 61) + +/* ce_adm_ure_ups_buf_barrier_flush register bit masks and shifts */ +#define FLUSH_SEL_PORT1_PIPE0_SHFT 0 +#define FLUSH_SEL_PORT1_PIPE1_SHFT 4 +#define FLUSH_SEL_PORT1_PIPE2_SHFT 8 +#define FLUSH_SEL_PORT1_PIPE3_SHFT 12 +#define FLUSH_SEL_PORT2_PIPE0_SHFT 16 +#define FLUSH_SEL_PORT2_PIPE1_SHFT 20 +#define FLUSH_SEL_PORT2_PIPE2_SHFT 24 +#define FLUSH_SEL_PORT2_PIPE3_SHFT 28 + +/* ce_dre_config1 register bit masks and shifts */ +#define CE_DRE_RO_ENABLE (0x1ULL << 0) +#define CE_DRE_DYN_RO_ENABLE (0x1ULL << 1) +#define CE_DRE_SUP_CONFIG_COMP_ERROR (0x1ULL << 2) +#define CE_DRE_SUP_IO_COMP_ERROR (0x1ULL << 3) +#define CE_DRE_ADDR_MODE_SHFT 4 + +/* ce_dre_config_req_status register bit masks */ +#define CE_DRE_LAST_CONFIG_COMPLETION (0x7ULL << 0) +#define CE_DRE_DOWNSTREAM_CONFIG_ERROR (0x1ULL << 3) +#define CE_DRE_CONFIG_COMPLETION_VALID (0x1ULL << 4) +#define CE_DRE_CONFIG_REQUEST_ACTIVE (0x1ULL << 5) + +/* ce_ure_control register bit masks & shifts */ +#define CE_URE_RD_MRG_ENABLE (0x1ULL << 0) +#define CE_URE_WRT_MRG_ENABLE1 (0x1ULL << 4) +#define CE_URE_WRT_MRG_ENABLE2 (0x1ULL << 5) +#define CE_URE_RSPQ_BYPASS_DISABLE (0x1ULL << 24) +#define CE_URE_UPS_DAT1_PAR_DISABLE (0x1ULL << 32) +#define CE_URE_UPS_HDR1_PAR_DISABLE (0x1ULL << 33) +#define CE_URE_UPS_DAT2_PAR_DISABLE (0x1ULL << 34) +#define CE_URE_UPS_HDR2_PAR_DISABLE (0x1ULL << 35) +#define CE_URE_ATE_PAR_DISABLE (0x1ULL << 36) +#define CE_URE_RCI_PAR_DISABLE (0x1ULL << 37) +#define CE_URE_RSPQ_PAR_DISABLE (0x1ULL << 38) +#define CE_URE_DNS_DAT_PAR_DISABLE (0x1ULL << 39) +#define CE_URE_DNS_HDR_PAR_DISABLE (0x1ULL << 40) +#define CE_URE_MALFORM_DISABLE (0x1ULL << 44) +#define CE_URE_UNSUP_DISABLE (0x1ULL << 45) + +/* ce_ure_page_map register bit masks & shifts */ +#define CE_URE_ATE3240_ENABLE (0x1ULL << 0) +#define CE_URE_ATE40_ENABLE (0x1ULL << 1) +#define CE_URE_PAGESIZE_SHFT 4 +#define CE_URE_PAGESIZE_MASK (0x7ULL << CE_URE_PAGESIZE_SHFT) +#define CE_URE_4K_PAGESIZE (0x0ULL << CE_URE_PAGESIZE_SHFT) +#define CE_URE_16K_PAGESIZE (0x1ULL << CE_URE_PAGESIZE_SHFT) +#define CE_URE_64K_PAGESIZE (0x2ULL << CE_URE_PAGESIZE_SHFT) +#define CE_URE_128K_PAGESIZE (0x3ULL << CE_URE_PAGESIZE_SHFT) +#define CE_URE_256K_PAGESIZE (0x4ULL << CE_URE_PAGESIZE_SHFT) + +/* ce_ure_pipe_sel register bit masks & shifts */ +#define PKT_TRAFIC_SHRT 16 +#define BUS_SRC_ID_SHFT 8 +#define DEV_SRC_ID_SHFT 3 +#define FNC_SRC_ID_SHFT 0 +#define CE_URE_TC_MASK (0x07ULL << PKT_TRAFIC_SHRT) +#define CE_URE_BUS_MASK (0xFFULL << BUS_SRC_ID_SHFT) +#define CE_URE_DEV_MASK (0x1FULL << DEV_SRC_ID_SHFT) +#define CE_URE_FNC_MASK (0x07ULL << FNC_SRC_ID_SHFT) +#define CE_URE_PIPE_BUS(b) (((uint64_t)(b) << BUS_SRC_ID_SHFT) & \ + CE_URE_BUS_MASK) +#define CE_URE_PIPE_DEV(d) (((uint64_t)(d) << DEV_SRC_ID_SHFT) & \ + CE_URE_DEV_MASK) +#define CE_URE_PIPE_FNC(f) (((uint64_t)(f) << FNC_SRC_ID_SHFT) & \ + CE_URE_FNC_MASK) + +#define CE_URE_SEL1_SHFT 0 +#define CE_URE_SEL2_SHFT 20 +#define CE_URE_SEL3_SHFT 40 +#define CE_URE_SEL1_MASK (0x7FFFFULL << CE_URE_SEL1_SHFT) +#define CE_URE_SEL2_MASK (0x7FFFFULL << CE_URE_SEL2_SHFT) +#define CE_URE_SEL3_MASK (0x7FFFFULL << CE_URE_SEL3_SHFT) + + +/* ce_ure_pipe_mask register bit masks & shifts */ +#define CE_URE_MASK1_SHFT 0 +#define CE_URE_MASK2_SHFT 20 +#define CE_URE_MASK3_SHFT 40 +#define CE_URE_MASK1_MASK (0x7FFFFULL << CE_URE_MASK1_SHFT) +#define CE_URE_MASK2_MASK (0x7FFFFULL << CE_URE_MASK2_SHFT) +#define CE_URE_MASK3_MASK (0x7FFFFULL << CE_URE_MASK3_SHFT) + + +/* ce_ure_pcie_control1 register bit masks & shifts */ +#define CE_URE_SI (0x1ULL << 0) +#define CE_URE_ELAL_SHFT 4 +#define CE_URE_ELAL_MASK (0x7ULL << CE_URE_ELAL_SHFT) +#define CE_URE_ELAL1_SHFT 8 +#define CE_URE_ELAL1_MASK (0x7ULL << CE_URE_ELAL1_SHFT) +#define CE_URE_SCC (0x1ULL << 12) +#define CE_URE_PN1_SHFT 16 +#define CE_URE_PN1_MASK (0xFFULL << CE_URE_PN1_SHFT) +#define CE_URE_PN2_SHFT 24 +#define CE_URE_PN2_MASK (0xFFULL << CE_URE_PN2_SHFT) +#define CE_URE_PN1_SET(n) (((uint64_t)(n) << CE_URE_PN1_SHFT) & \ + CE_URE_PN1_MASK) +#define CE_URE_PN2_SET(n) (((uint64_t)(n) << CE_URE_PN2_SHFT) & \ + CE_URE_PN2_MASK) + +/* ce_ure_pcie_control2 register bit masks & shifts */ +#define CE_URE_ABP (0x1ULL << 0) +#define CE_URE_PCP (0x1ULL << 1) +#define CE_URE_MSP (0x1ULL << 2) +#define CE_URE_AIP (0x1ULL << 3) +#define CE_URE_PIP (0x1ULL << 4) +#define CE_URE_HPS (0x1ULL << 5) +#define CE_URE_HPC (0x1ULL << 6) +#define CE_URE_SPLV_SHFT 7 +#define CE_URE_SPLV_MASK (0xFFULL << CE_URE_SPLV_SHFT) +#define CE_URE_SPLS_SHFT 15 +#define CE_URE_SPLS_MASK (0x3ULL << CE_URE_SPLS_SHFT) +#define CE_URE_PSN1_SHFT 19 +#define CE_URE_PSN1_MASK (0x1FFFULL << CE_URE_PSN1_SHFT) +#define CE_URE_PSN2_SHFT 32 +#define CE_URE_PSN2_MASK (0x1FFFULL << CE_URE_PSN2_SHFT) +#define CE_URE_PSN1_SET(n) (((uint64_t)(n) << CE_URE_PSN1_SHFT) & \ + CE_URE_PSN1_MASK) +#define CE_URE_PSN2_SET(n) (((uint64_t)(n) << CE_URE_PSN2_SHFT) & \ + CE_URE_PSN2_MASK) + +/* + * PIO address space ranges for CE + */ + +/* Local CE Registers Space */ +#define CE_PIO_MMR 0x00000000 +#define CE_PIO_MMR_LEN 0x04000000 + +/* PCI Compatible Config Space */ +#define CE_PIO_CONFIG_SPACE 0x04000000 +#define CE_PIO_CONFIG_SPACE_LEN 0x04000000 + +/* PCI I/O Space Alias */ +#define CE_PIO_IO_SPACE_ALIAS 0x08000000 +#define CE_PIO_IO_SPACE_ALIAS_LEN 0x08000000 + +/* PCI Enhanced Config Space */ +#define CE_PIO_E_CONFIG_SPACE 0x10000000 +#define CE_PIO_E_CONFIG_SPACE_LEN 0x10000000 + +/* PCI I/O Space */ +#define CE_PIO_IO_SPACE 0x100000000 +#define CE_PIO_IO_SPACE_LEN 0x100000000 + +/* PCI MEM Space */ +#define CE_PIO_MEM_SPACE 0x200000000 +#define CE_PIO_MEM_SPACE_LEN TIO_HWIN_SIZE + + +/* + * CE PCI Enhanced Config Space shifts & masks + */ +#define CE_E_CONFIG_BUS_SHFT 20 +#define CE_E_CONFIG_BUS_MASK (0xFF << CE_E_CONFIG_BUS_SHFT) +#define CE_E_CONFIG_DEVICE_SHFT 15 +#define CE_E_CONFIG_DEVICE_MASK (0x1F << CE_E_CONFIG_DEVICE_SHFT) +#define CE_E_CONFIG_FUNC_SHFT 12 +#define CE_E_CONFIG_FUNC_MASK (0x7 << CE_E_CONFIG_FUNC_SHFT) + +#endif /* __ASM_IA64_SN_TIOCE_H__ */ diff --git a/include/asm-ia64/sn/tioce_provider.h b/include/asm-ia64/sn/tioce_provider.h new file mode 100644 index 0000000..7f63dec --- /dev/null +++ b/include/asm-ia64/sn/tioce_provider.h @@ -0,0 +1,66 @@ +/************************************************************************** + * Copyright (C) 2005, Silicon Graphics, Inc. * + * * + * These coded instructions, statements, and computer programs contain * + * unpublished proprietary information of Silicon Graphics, Inc., and * + * are protected by Federal copyright law. They may not be disclosed * + * to third parties or copied or duplicated in any form, in whole or * + * in part, without the prior written consent of Silicon Graphics, Inc. * + * * + **************************************************************************/ + +#ifndef _ASM_IA64_SN_CE_PROVIDER_H +#define _ASM_IA64_SN_CE_PROVIDER_H + +#include <asm/sn/pcibus_provider_defs.h> +#include <asm/sn/tioce.h> + +/* + * Common TIOCE structure shared between the prom and kernel + * + * DO NOT CHANGE THIS STRUCT WITHOUT MAKING CORRESPONDING CHANGES TO THE + * PROM VERSION. + */ +struct tioce_common { + struct pcibus_bussoft ce_pcibus; /* common pciio header */ + + uint32_t ce_rev; + uint64_t ce_kernel_private; + uint64_t ce_prom_private; +}; + +struct tioce_kernel { + struct tioce_common *ce_common; + spinlock_t ce_lock; + struct list_head ce_dmamap_list; + + uint64_t ce_ate40_shadow[TIOCE_NUM_M40_ATES]; + uint64_t ce_ate3240_shadow[TIOCE_NUM_M3240_ATES]; + uint32_t ce_ate3240_pagesize; + + uint8_t ce_port1_secondary; + + /* per-port resources */ + struct { + int dirmap_refcnt; + uint64_t dirmap_shadow; + } ce_port[TIOCE_NUM_PORTS]; +}; + +struct tioce_dmamap { + struct list_head ce_dmamap_list; /* headed by tioce_kernel */ + uint32_t refcnt; + + uint64_t nbytes; /* # bytes mapped */ + + uint64_t ct_start; /* coretalk start address */ + uint64_t pci_start; /* bus start address */ + + uint64_t *ate_hw; /* hw ptr of first ate in map */ + uint64_t *ate_shadow; /* shadow ptr of firat ate */ + uint16_t ate_count; /* # ate's in the map */ +}; + +extern int tioce_init_provider(void); + +#endif /* __ASM_IA64_SN_CE_PROVIDER_H */ |