diff options
Diffstat (limited to 'sys/dev')
71 files changed, 1189 insertions, 531 deletions
diff --git a/sys/dev/age/if_age.c b/sys/dev/age/if_age.c index 7239121..51edec3 100644 --- a/sys/dev/age/if_age.c +++ b/sys/dev/age/if_age.c @@ -587,7 +587,7 @@ age_attach(device_t dev) /* Create device sysctl node. */ age_sysctl_node(sc); - if ((error = age_dma_alloc(sc) != 0)) + if ((error = age_dma_alloc(sc)) != 0) goto fail; /* Load station address. */ diff --git a/sys/dev/ahci/ahci.c b/sys/dev/ahci/ahci.c index 5fabdaf..a9061bc 100644 --- a/sys/dev/ahci/ahci.c +++ b/sys/dev/ahci/ahci.c @@ -711,7 +711,7 @@ ahci_ch_attach(device_t dev) /* Construct SIM entry */ ch->sim = cam_sim_alloc(ahciaction, ahcipoll, "ahcich", ch, device_get_unit(dev), (struct mtx *)&ch->mtx, - min(2, ch->numslots), + (ch->quirks & AHCI_Q_NOCCS) ? 1 : min(2, ch->numslots), (ch->caps & AHCI_CAP_SNCQ) ? ch->numslots : 0, devq); if (ch->sim == NULL) { @@ -1122,8 +1122,6 @@ ahci_ch_intr(void *arg) /* Read interrupt statuses. */ istatus = ATA_INL(ch->r_mem, AHCI_P_IS); - if (istatus == 0) - return; mtx_lock(&ch->mtx); ahci_ch_intr_main(ch, istatus); @@ -1140,8 +1138,6 @@ ahci_ch_intr_direct(void *arg) /* Read interrupt statuses. */ istatus = ATA_INL(ch->r_mem, AHCI_P_IS); - if (istatus == 0) - return; mtx_lock(&ch->mtx); ch->batch = 1; @@ -1228,8 +1224,19 @@ ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus) /* Process command errors */ if (istatus & (AHCI_P_IX_OF | AHCI_P_IX_IF | AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { - ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK) - >> AHCI_P_CMD_CCS_SHIFT; + if (ch->quirks & AHCI_Q_NOCCS) { + /* + * ASMedia chips sometimes report failed commands as + * completed. Count all running commands as failed. + */ + cstatus |= ch->rslots; + + /* They also report wrong CCS, so try to guess one. */ + ccs = powerof2(cstatus) ? ffs(cstatus) - 1 : -1; + } else { + ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & + AHCI_P_CMD_CCS_MASK) >> AHCI_P_CMD_CCS_SHIFT; + } //device_printf(dev, "%s ERROR is %08x cs %08x ss %08x rs %08x tfd %02x serr %08x fbs %08x ccs %d\n", // __func__, istatus, cstatus, sstatus, ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), // serr, ATA_INL(ch->r_mem, AHCI_P_FBS), ccs); diff --git a/sys/dev/ahci/ahci.h b/sys/dev/ahci/ahci.h index 61643c1..ca7631e 100644 --- a/sys/dev/ahci/ahci.h +++ b/sys/dev/ahci/ahci.h @@ -574,6 +574,7 @@ enum ahci_err_type { #define AHCI_Q_SATA1_UNIT0 0x00008000 /* need better method for this */ #define AHCI_Q_ABAR0 0x00010000 #define AHCI_Q_1MSI 0x00020000 +#define AHCI_Q_NOCCS 0x00400000 #define AHCI_Q_BIT_STRING \ "\020" \ @@ -594,7 +595,8 @@ enum ahci_err_type { "\017MAXIO_64K" \ "\020SATA1_UNIT0" \ "\021ABAR0" \ - "\0221MSI" + "\0221MSI" \ + "\027NOCCS" int ahci_attach(device_t dev); int ahci_detach(device_t dev); diff --git a/sys/dev/ahci/ahci_pci.c b/sys/dev/ahci/ahci_pci.c index bb14ed6..7f4a1b6 100644 --- a/sys/dev/ahci/ahci_pci.c +++ b/sys/dev/ahci/ahci_pci.c @@ -73,8 +73,15 @@ static const struct { {0x78021022, 0x00, "AMD Hudson-2", 0}, {0x78031022, 0x00, "AMD Hudson-2", 0}, {0x78041022, 0x00, "AMD Hudson-2", 0}, - {0x06111b21, 0x00, "ASMedia ASM2106", 0}, - {0x06121b21, 0x00, "ASMedia ASM1061", 0}, + {0x06011b21, 0x00, "ASMedia ASM1060", AHCI_Q_NOCCS}, + {0x06021b21, 0x00, "ASMedia ASM1060", AHCI_Q_NOCCS}, + {0x06111b21, 0x00, "ASMedia ASM1061", AHCI_Q_NOCCS}, + {0x06121b21, 0x00, "ASMedia ASM1062", AHCI_Q_NOCCS}, + {0x06201b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS}, + {0x06211b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS}, + {0x06221b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS}, + {0x06241b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS}, + {0x06251b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS}, {0x26528086, 0x00, "Intel ICH6", AHCI_Q_NOFORCE}, {0x26538086, 0x00, "Intel ICH6M", AHCI_Q_NOFORCE}, {0x26818086, 0x00, "Intel ESB2", 0}, diff --git a/sys/dev/alc/if_alc.c b/sys/dev/alc/if_alc.c index ba37b5f..76e4614 100644 --- a/sys/dev/alc/if_alc.c +++ b/sys/dev/alc/if_alc.c @@ -120,6 +120,8 @@ static struct alc_ident alc_ident_table[] = { "Atheros AR8172 PCIe Fast Ethernet" }, { VENDORID_ATHEROS, DEVICEID_ATHEROS_E2200, 9 * 1024, "Killer E2200 Gigabit Ethernet" }, + { VENDORID_ATHEROS, DEVICEID_ATHEROS_E2400, 9 * 1024, + "Killer E2400 Gigabit Ethernet" }, { 0, 0, 0, NULL} }; @@ -254,7 +256,7 @@ static struct resource_spec alc_irq_spec_msix[] = { { -1, 0, 0 } }; -static uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 }; +static uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0, 0 }; static int alc_miibus_readreg(device_t dev, int phy, int reg) @@ -1079,6 +1081,7 @@ alc_phy_down(struct alc_softc *sc) switch (sc->alc_ident->deviceid) { case DEVICEID_ATHEROS_AR8161: case DEVICEID_ATHEROS_E2200: + case DEVICEID_ATHEROS_E2400: case DEVICEID_ATHEROS_AR8162: case DEVICEID_ATHEROS_AR8171: case DEVICEID_ATHEROS_AR8172: @@ -1396,12 +1399,15 @@ alc_attach(device_t dev) * shows the same PHY model/revision number of AR8131. */ switch (sc->alc_ident->deviceid) { + case DEVICEID_ATHEROS_E2200: + case DEVICEID_ATHEROS_E2400: + sc->alc_flags |= ALC_FLAG_E2X00; + /* FALLTHROUGH */ case DEVICEID_ATHEROS_AR8161: if (pci_get_subvendor(dev) == VENDORID_ATHEROS && pci_get_subdevice(dev) == 0x0091 && sc->alc_rev == 0) sc->alc_flags |= ALC_FLAG_LINK_WAR; /* FALLTHROUGH */ - case DEVICEID_ATHEROS_E2200: case DEVICEID_ATHEROS_AR8171: sc->alc_flags |= ALC_FLAG_AR816X_FAMILY; break; @@ -1472,6 +1478,12 @@ alc_attach(device_t dev) sc->alc_dma_rd_burst = 3; if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024) sc->alc_dma_wr_burst = 3; + /* + * Force maximum payload size to 128 bytes for E2200/E2400. + * Otherwise it triggers DMA write error. + */ + if ((sc->alc_flags & ALC_FLAG_E2X00) != 0) + sc->alc_dma_wr_burst = 0; alc_init_pcie(sc); } @@ -1531,7 +1543,7 @@ alc_attach(device_t dev) /* Create device sysctl node. */ alc_sysctl_node(sc); - if ((error = alc_dma_alloc(sc) != 0)) + if ((error = alc_dma_alloc(sc)) != 0) goto fail; /* Load station address. */ @@ -4194,13 +4206,17 @@ alc_init_locked(struct alc_softc *sc) reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & RXQ_CFG_RD_BURST_MASK; reg |= RXQ_CFG_RSS_MODE_DIS; - if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) + if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { reg |= (RXQ_CFG_816X_IDT_TBL_SIZE_DEFAULT << RXQ_CFG_816X_IDT_TBL_SIZE_SHIFT) & RXQ_CFG_816X_IDT_TBL_SIZE_MASK; - if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0 && - sc->alc_ident->deviceid != DEVICEID_ATHEROS_AR8151_V2) - reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_1M; + if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) + reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M; + } else { + if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0 && + sc->alc_ident->deviceid != DEVICEID_ATHEROS_AR8151_V2) + reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M; + } CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); /* Configure DMA parameters. */ @@ -4224,12 +4240,12 @@ alc_init_locked(struct alc_softc *sc) switch (AR816X_REV(sc->alc_rev)) { case AR816X_REV_A0: case AR816X_REV_A1: - reg |= DMA_CFG_RD_CHNL_SEL_1; + reg |= DMA_CFG_RD_CHNL_SEL_2; break; case AR816X_REV_B0: /* FALLTHROUGH */ default: - reg |= DMA_CFG_RD_CHNL_SEL_3; + reg |= DMA_CFG_RD_CHNL_SEL_4; break; } } diff --git a/sys/dev/alc/if_alcreg.h b/sys/dev/alc/if_alcreg.h index 1ad75a3..ae63084 100644 --- a/sys/dev/alc/if_alcreg.h +++ b/sys/dev/alc/if_alcreg.h @@ -45,10 +45,11 @@ #define DEVICEID_ATHEROS_AR8152_B 0x2060 /* L2C V1.1 */ #define DEVICEID_ATHEROS_AR8152_B2 0x2062 /* L2C V2.0 */ #define DEVICEID_ATHEROS_AR8161 0x1091 -#define DEVICEID_ATHEROS_E2200 0xE091 #define DEVICEID_ATHEROS_AR8162 0x1090 #define DEVICEID_ATHEROS_AR8171 0x10A1 #define DEVICEID_ATHEROS_AR8172 0x10A0 +#define DEVICEID_ATHEROS_E2200 0xE091 +#define DEVICEID_ATHEROS_E2400 0xE0A1 #define ATHEROS_AR8152_B_V10 0xC0 #define ATHEROS_AR8152_B_V11 0xC1 diff --git a/sys/dev/alc/if_alcvar.h b/sys/dev/alc/if_alcvar.h index 9a73ef4..a1c3382 100644 --- a/sys/dev/alc/if_alcvar.h +++ b/sys/dev/alc/if_alcvar.h @@ -235,7 +235,8 @@ struct alc_softc { #define ALC_FLAG_APS 0x1000 #define ALC_FLAG_AR816X_FAMILY 0x2000 #define ALC_FLAG_LINK_WAR 0x4000 -#define ALC_FLAG_LINK 0x8000 +#define ALC_FLAG_E2X00 0x8000 +#define ALC_FLAG_LINK 0x10000 struct callout alc_tick_ch; struct alc_hw_stats alc_stats; diff --git a/sys/dev/ale/if_ale.c b/sys/dev/ale/if_ale.c index 6f0f83e..4eb5835 100644 --- a/sys/dev/ale/if_ale.c +++ b/sys/dev/ale/if_ale.c @@ -602,7 +602,7 @@ ale_attach(device_t dev) /* Create device sysctl node. */ ale_sysctl_node(sc); - if ((error = ale_dma_alloc(sc) != 0)) + if ((error = ale_dma_alloc(sc)) != 0) goto fail; /* Load station address. */ diff --git a/sys/dev/amdsbwd/amdsbwd.c b/sys/dev/amdsbwd/amdsbwd.c index f8824e5..628aef2 100644 --- a/sys/dev/amdsbwd/amdsbwd.c +++ b/sys/dev/amdsbwd/amdsbwd.c @@ -416,8 +416,8 @@ amdsbwd_probe(device_t dev) return (ENXIO); } rid = 0; - res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0ul, ~0ul, - AMDSB_PMIO_WIDTH, RF_ACTIVE | RF_SHAREABLE); + res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, + RF_ACTIVE | RF_SHAREABLE); if (res == NULL) { device_printf(dev, "bus_alloc_resource for IO failed\n"); return (ENXIO); diff --git a/sys/dev/arcmsr/arcmsr.c b/sys/dev/arcmsr/arcmsr.c index b4e6ce5..6bcadb7 100644 --- a/sys/dev/arcmsr/arcmsr.c +++ b/sys/dev/arcmsr/arcmsr.c @@ -872,7 +872,7 @@ static void arcmsr_srb_timeout(void *arg) ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); if(srb->srb_state == ARCMSR_SRB_START) { - cmd = srb->pccb->csio.cdb_io.cdb_bytes[0]; + cmd = scsiio_cdb_ptr(&srb->pccb->csio)[0]; srb->srb_state = ARCMSR_SRB_TIMEOUT; srb->pccb->ccb_h.status |= CAM_CMD_TIMEOUT; arcmsr_srb_complete(srb, 1); @@ -997,7 +997,7 @@ static void arcmsr_build_srb(struct CommandControlBlock *srb, arcmsr_cdb->LUN = pccb->ccb_h.target_lun; arcmsr_cdb->Function = 1; arcmsr_cdb->CdbLength = (u_int8_t)pcsio->cdb_len; - bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len); + bcopy(scsiio_cdb_ptr(pcsio), arcmsr_cdb->Cdb, pcsio->cdb_len); if(nseg != 0) { struct AdapterControlBlock *acb = srb->acb; bus_dmasync_op_t op; @@ -2453,10 +2453,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *p struct CMD_MESSAGE_FIELD *pcmdmessagefld; int retvalue = 0, transfer_len = 0; char *buffer; - u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 | - (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 | - (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8 | - (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8]; + uint8_t *ptr = scsiio_cdb_ptr(&pccb->csio); + u_int32_t controlcode = (u_int32_t ) ptr[5] << 24 | + (u_int32_t ) ptr[6] << 16 | + (u_int32_t ) ptr[7] << 8 | + (u_int32_t ) ptr[8]; /* 4 bytes: Areca io control code */ if ((pccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) { buffer = pccb->csio.data_ptr; @@ -2683,7 +2684,7 @@ static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, if(acb->devstate[target][lun] == ARECA_RAID_GONE) { u_int8_t block_cmd, cmd; - cmd = pccb->csio.cdb_io.cdb_bytes[0]; + cmd = scsiio_cdb_ptr(&pccb->csio)[0]; block_cmd = cmd & 0x0f; if(block_cmd == 0x08 || block_cmd == 0x0a) { printf("arcmsr%d:block 'read/write' command " @@ -2800,7 +2801,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, return; } pccb->ccb_h.status |= CAM_REQ_CMP; - switch (pccb->csio.cdb_io.cdb_bytes[0]) { + switch (scsiio_cdb_ptr(&pccb->csio)[0]) { case INQUIRY: { unsigned char inqdata[36]; char *buffer = pccb->csio.data_ptr; @@ -2853,6 +2854,12 @@ static void arcmsr_action(struct cam_sim *psim, union ccb *pccb) int target = pccb->ccb_h.target_id; int error; + if (pccb->ccb_h.flags & CAM_CDB_PHYS) { + pccb->ccb_h.status = CAM_REQ_INVALID; + xpt_done(pccb); + return; + } + if(target == 16) { /* virtual device for iop message transfer */ arcmsr_handle_virtual_command(acb, pccb); @@ -4143,7 +4150,7 @@ static u_int32_t arcmsr_initialize(device_t dev) u_int32_t rid0 = PCIR_BAR(0); vm_offset_t mem_base0; - acb->sys_res_arcmsr[0] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, 0x1000, RF_ACTIVE); + acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE); if(acb->sys_res_arcmsr[0] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); @@ -4177,11 +4184,11 @@ static u_int32_t arcmsr_initialize(device_t dev) size = sizeof(struct HBB_DOORBELL); for(i=0; i < 2; i++) { if(i == 0) { - acb->sys_res_arcmsr[i] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid[i], - 0ul, ~0ul, size, RF_ACTIVE); + acb->sys_res_arcmsr[i] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid[i], + RF_ACTIVE); } else { - acb->sys_res_arcmsr[i] = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid[i], - 0ul, ~0ul, sizeof(struct HBB_RWBUFFER), RF_ACTIVE); + acb->sys_res_arcmsr[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid[i], + RF_ACTIVE); } if(acb->sys_res_arcmsr[i] == NULL) { arcmsr_free_resource(acb); @@ -4224,7 +4231,7 @@ static u_int32_t arcmsr_initialize(device_t dev) u_int32_t rid0 = PCIR_BAR(1); vm_offset_t mem_base0; - acb->sys_res_arcmsr[0] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBC_MessageUnit), RF_ACTIVE); + acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE); if(acb->sys_res_arcmsr[0] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); @@ -4251,7 +4258,7 @@ static u_int32_t arcmsr_initialize(device_t dev) u_int32_t rid0 = PCIR_BAR(0); vm_offset_t mem_base0; - acb->sys_res_arcmsr[0] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBD_MessageUnit), RF_ACTIVE); + acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE); if(acb->sys_res_arcmsr[0] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c index d976313..82d1dec 100644 --- a/sys/dev/cxgbe/t4_main.c +++ b/sys/dev/cxgbe/t4_main.c @@ -611,6 +611,7 @@ struct { #endif }, t6_pciids[] = { {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */ + {0x6400, "Chelsio T6225-DBG"}, /* 2 x 10/25G, debug */ {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */ {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */ {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */ diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c index 9152d9f..d831f94 100644 --- a/sys/dev/cxgbe/t4_sge.c +++ b/sys/dev/cxgbe/t4_sge.c @@ -2289,7 +2289,7 @@ slowpath: w = &eq->desc[eq->pidx]; IDXINCR(eq->pidx, ndesc, eq->sidx); - if (__predict_false(eq->pidx < ndesc - 1)) { + if (__predict_false(cookie->pidx + ndesc > eq->sidx)) { w = &wrq->ss[0]; wrq->ss_pidx = cookie->pidx; wrq->ss_len = len16 * 16; @@ -3296,12 +3296,13 @@ ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq) c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); c.physeqid_pkd = htobe32(0); c.fetchszm_to_iqid = - htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | + htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); c.dcaen_to_eqsize = htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) | V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | + V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) | V_FW_EQ_CTRL_CMD_EQSIZE(qsize)); c.eqaddr = htobe64(eq->ba); diff --git a/sys/dev/cxgbe/tom/t4_connect.c b/sys/dev/cxgbe/tom/t4_connect.c index 5ec627c..02f01ef 100644 --- a/sys/dev/cxgbe/tom/t4_connect.c +++ b/sys/dev/cxgbe/tom/t4_connect.c @@ -107,7 +107,7 @@ free_atid(struct adapter *sc, int atid) } /* - * Active open failed. + * Active open succeeded. */ static int do_act_establish(struct sge_iq *iq, const struct rss_header *rss, @@ -126,9 +126,10 @@ do_act_establish(struct sge_iq *iq, const struct rss_header *rss, CTR3(KTR_CXGBE, "%s: atid %u, tid %u", __func__, atid, tid); free_atid(sc, atid); + CURVNET_SET(toep->vnet); INP_WLOCK(inp); toep->tid = tid; - insert_tid(sc, tid, toep); + insert_tid(sc, tid, toep, inp->inp_vflag & INP_IPV6 ? 2 : 1); if (inp->inp_flags & INP_DROPPED) { /* socket closed by the kernel before hw told us it connected */ @@ -141,6 +142,7 @@ do_act_establish(struct sge_iq *iq, const struct rss_header *rss, make_established(toep, cpl->snd_isn, cpl->rcv_isn, cpl->tcp_opt); done: INP_WUNLOCK(inp); + CURVNET_RESTORE(); return (0); } @@ -178,6 +180,7 @@ act_open_failure_cleanup(struct adapter *sc, u_int atid, u_int status) free_atid(sc, atid); toep->tid = -1; + CURVNET_SET(toep->vnet); if (status != EAGAIN) INP_INFO_RLOCK(&V_tcbinfo); INP_WLOCK(inp); @@ -185,8 +188,12 @@ act_open_failure_cleanup(struct adapter *sc, u_int atid, u_int status) final_cpl_received(toep); /* unlocks inp */ if (status != EAGAIN) INP_INFO_RUNLOCK(&V_tcbinfo); + CURVNET_RESTORE(); } +/* + * Active open failed. + */ static int do_act_open_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) @@ -268,6 +275,14 @@ t4_init_connect_cpl_handlers(void) t4_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl); } +void +t4_uninit_connect_cpl_handlers(void) +{ + + t4_register_cpl_handler(CPL_ACT_ESTABLISH, NULL); + t4_register_cpl_handler(CPL_ACT_OPEN_RPL, NULL); +} + #define DONT_OFFLOAD_ACTIVE_OPEN(x) do { \ reason = __LINE__; \ rc = (x); \ @@ -357,6 +372,7 @@ t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt, if (wr == NULL) DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); + toep->vnet = so->so_vnet; if (sc->tt.ddp && (so->so_options & SO_NO_DDP) == 0) set_tcpddp_ulp_mode(toep); else diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c index f446432..6e26c2b 100644 --- a/sys/dev/cxgbe/tom/t4_cpl_io.c +++ b/sys/dev/cxgbe/tom/t4_cpl_io.c @@ -302,7 +302,6 @@ make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn, uint16_t tcpopt = be16toh(opt); struct flowc_tx_params ftxp; - CURVNET_SET(so->so_vnet); INP_WLOCK_ASSERT(inp); KASSERT(tp->t_state == TCPS_SYN_SENT || tp->t_state == TCPS_SYN_RECEIVED, @@ -353,7 +352,6 @@ make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn, send_flowc_wr(toep, &ftxp); soisconnected(so); - CURVNET_RESTORE(); } static int @@ -1107,6 +1105,7 @@ do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); + CURVNET_SET(toep->vnet); INP_INFO_RLOCK(&V_tcbinfo); INP_WLOCK(inp); tp = intotcpcb(inp); @@ -1150,6 +1149,7 @@ do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) tcp_twstart(tp); INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ INP_INFO_RUNLOCK(&V_tcbinfo); + CURVNET_RESTORE(); INP_WLOCK(inp); final_cpl_received(toep); @@ -1162,6 +1162,7 @@ do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) done: INP_WUNLOCK(inp); INP_INFO_RUNLOCK(&V_tcbinfo); + CURVNET_RESTORE(); return (0); } @@ -1188,6 +1189,7 @@ do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss, KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); + CURVNET_SET(toep->vnet); INP_INFO_RLOCK(&V_tcbinfo); INP_WLOCK(inp); tp = intotcpcb(inp); @@ -1207,6 +1209,7 @@ do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss, release: INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ INP_INFO_RUNLOCK(&V_tcbinfo); + CURVNET_RESTORE(); INP_WLOCK(inp); final_cpl_received(toep); /* no more CPLs expected */ @@ -1231,6 +1234,7 @@ release: done: INP_WUNLOCK(inp); INP_INFO_RUNLOCK(&V_tcbinfo); + CURVNET_RESTORE(); return (0); } @@ -1304,6 +1308,7 @@ do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) } inp = toep->inp; + CURVNET_SET(toep->vnet); INP_INFO_RLOCK(&V_tcbinfo); /* for tcp_close */ INP_WLOCK(inp); @@ -1339,6 +1344,7 @@ do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) final_cpl_received(toep); done: INP_INFO_RUNLOCK(&V_tcbinfo); + CURVNET_RESTORE(); send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST); return (0); } @@ -1456,18 +1462,21 @@ do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) SOCKBUF_UNLOCK(sb); INP_WUNLOCK(inp); + CURVNET_SET(toep->vnet); INP_INFO_RLOCK(&V_tcbinfo); INP_WLOCK(inp); tp = tcp_drop(tp, ECONNRESET); if (tp) INP_WUNLOCK(inp); INP_INFO_RUNLOCK(&V_tcbinfo); + CURVNET_RESTORE(); return (0); } /* receive buffer autosize */ - CURVNET_SET(so->so_vnet); + MPASS(toep->vnet == so->so_vnet); + CURVNET_SET(toep->vnet); if (sb->sb_flags & SB_AUTOSIZE && V_tcp_do_autorcvbuf && sb->sb_hiwat < V_tcp_autorcvbuf_max && @@ -1674,10 +1683,12 @@ do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) if (toep->flags & TPF_TX_SUSPENDED && toep->tx_credits >= toep->tx_total / 4) { toep->flags &= ~TPF_TX_SUSPENDED; + CURVNET_SET(toep->vnet); if (toep->ulp_mode == ULP_MODE_ISCSI) t4_push_pdus(sc, toep, plen); else t4_push_frames(sc, toep, plen); + CURVNET_RESTORE(); } else if (plen > 0) { struct sockbuf *sb = &so->so_snd; int sbu; @@ -1787,11 +1798,11 @@ void t4_uninit_cpl_io_handlers(void) { - t4_register_cpl_handler(CPL_PEER_CLOSE, do_peer_close); - t4_register_cpl_handler(CPL_CLOSE_CON_RPL, do_close_con_rpl); - t4_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req); - t4_register_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl); - t4_register_cpl_handler(CPL_RX_DATA, do_rx_data); - t4_register_cpl_handler(CPL_FW4_ACK, do_fw4_ack); + t4_register_cpl_handler(CPL_PEER_CLOSE, NULL); + t4_register_cpl_handler(CPL_CLOSE_CON_RPL, NULL); + t4_register_cpl_handler(CPL_ABORT_REQ_RSS, NULL); + t4_register_cpl_handler(CPL_ABORT_RPL_RSS, NULL); + t4_register_cpl_handler(CPL_RX_DATA, NULL); + t4_register_cpl_handler(CPL_FW4_ACK, NULL); } #endif diff --git a/sys/dev/cxgbe/tom/t4_ddp.c b/sys/dev/cxgbe/tom/t4_ddp.c index f5f08c8..8dad045 100644 --- a/sys/dev/cxgbe/tom/t4_ddp.c +++ b/sys/dev/cxgbe/tom/t4_ddp.c @@ -392,6 +392,8 @@ handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len) discourage_ddp(toep); /* receive buffer autosize */ + MPASS(toep->vnet == so->so_vnet); + CURVNET_SET(toep->vnet); if (sb->sb_flags & SB_AUTOSIZE && V_tcp_do_autorcvbuf && sb->sb_hiwat < V_tcp_autorcvbuf_max && @@ -405,6 +407,7 @@ handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len) else toep->rx_credits += newsize - hiwat; } + CURVNET_RESTORE(); KASSERT(toep->sb_cc >= sb->sb_cc, ("%s: sb %p has more data (%d) than last time (%d).", diff --git a/sys/dev/cxgbe/tom/t4_listen.c b/sys/dev/cxgbe/tom/t4_listen.c index 2c2b427..844168d 100644 --- a/sys/dev/cxgbe/tom/t4_listen.c +++ b/sys/dev/cxgbe/tom/t4_listen.c @@ -220,6 +220,7 @@ alloc_lctx(struct adapter *sc, struct inpcb *inp, struct vi_info *vi) TAILQ_INIT(&lctx->synq); lctx->inp = inp; + lctx->vnet = inp->inp_socket->so_vnet; in_pcbref(inp); return (lctx); @@ -825,14 +826,16 @@ done_with_synqe(struct adapter *sc, struct synq_entry *synqe) struct inpcb *inp = lctx->inp; struct vi_info *vi = synqe->syn->m_pkthdr.rcvif->if_softc; struct l2t_entry *e = &sc->l2t->l2tab[synqe->l2e_idx]; + int ntids; INP_WLOCK_ASSERT(inp); + ntids = inp->inp_vflag & INP_IPV6 ? 2 : 1; TAILQ_REMOVE(&lctx->synq, synqe, link); inp = release_lctx(sc, lctx); if (inp) INP_WUNLOCK(inp); - remove_tid(sc, synqe->tid); + remove_tid(sc, synqe->tid, ntids); release_tid(sc, synqe->tid, &sc->sge.ctrlq[vi->pi->port_id]); t4_l2t_release(e); release_synqe(synqe); /* removed from synq list */ @@ -1235,7 +1238,7 @@ do_pass_accept_req(struct sge_iq *iq, const struct rss_header *rss, struct l2t_entry *e = NULL; int rscale, mtu_idx, rx_credits, rxqid, ulp_mode; struct synq_entry *synqe = NULL; - int reject_reason, v; + int reject_reason, v, ntids; uint16_t vid; #ifdef INVARIANTS unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); @@ -1253,6 +1256,8 @@ do_pass_accept_req(struct sge_iq *iq, const struct rss_header *rss, pi = sc->port[G_SYN_INTF(be16toh(cpl->l2info))]; + CURVNET_SET(lctx->vnet); + /* * Use the MAC index to lookup the associated VI. If this SYN * didn't match a perfect MAC filter, punt. @@ -1309,6 +1314,8 @@ found: */ if (!ifnet_has_ip6(ifp, &inc.inc6_laddr)) REJECT_PASS_ACCEPT(); + + ntids = 2; } else { /* Don't offload if the ifcap isn't enabled */ @@ -1321,8 +1328,17 @@ found: */ if (!ifnet_has_ip(ifp, inc.inc_laddr)) REJECT_PASS_ACCEPT(); + + ntids = 1; } + /* + * Don't offload if the ifnet that the SYN came in on is not in the same + * vnet as the listening socket. + */ + if (lctx->vnet != ifp->if_vnet) + REJECT_PASS_ACCEPT(); + e = get_l2te_for_nexthop(pi, ifp, &inc); if (e == NULL) REJECT_PASS_ACCEPT(); @@ -1362,7 +1378,6 @@ found: REJECT_PASS_ACCEPT(); } so = inp->inp_socket; - CURVNET_SET(so->so_vnet); mtu_idx = find_best_mtu_idx(sc, &inc, be16toh(cpl->tcpopt.mss)); rscale = cpl->tcpopt.wsf && V_tcp_do_rfc1323 ? select_rcv_wscale() : 0; @@ -1398,7 +1413,7 @@ found: synqe->rcv_bufsize = rx_credits; atomic_store_rel_ptr(&synqe->wr, (uintptr_t)wr); - insert_tid(sc, tid, synqe); + insert_tid(sc, tid, synqe, ntids); TAILQ_INSERT_TAIL(&lctx->synq, synqe, link); hold_synqe(synqe); /* hold for the duration it's in the synq */ hold_lctx(lctx); /* A synqe on the list has a ref on its lctx */ @@ -1409,7 +1424,6 @@ found: */ toe_syncache_add(&inc, &to, &th, inp, tod, synqe); INP_UNLOCK_ASSERT(inp); /* ok to assert, we have a ref on the inp */ - CURVNET_RESTORE(); /* * If we replied during syncache_add (synqe->wr has been consumed), @@ -1427,7 +1441,7 @@ found: if (m) m->m_pkthdr.rcvif = hw_ifp; - remove_tid(sc, synqe->tid); + remove_tid(sc, synqe->tid, ntids); free(wr, M_CXGBE); /* Yank the synqe out of the lctx synq. */ @@ -1459,15 +1473,18 @@ found: if (!(synqe->flags & TPF_SYNQE_EXPANDED)) send_reset_synqe(tod, synqe); INP_WUNLOCK(inp); + CURVNET_RESTORE(); release_synqe(synqe); /* extra hold */ return (__LINE__); } INP_WUNLOCK(inp); + CURVNET_RESTORE(); release_synqe(synqe); /* extra hold */ return (0); reject: + CURVNET_RESTORE(); CTR4(KTR_CXGBE, "%s: stid %u, tid %u, REJECT (%d)", __func__, stid, tid, reject_reason); @@ -1539,6 +1556,7 @@ do_pass_establish(struct sge_iq *iq, const struct rss_header *rss, KASSERT(synqe->flags & TPF_SYNQE, ("%s: tid %u (ctx %p) not a synqe", __func__, tid, synqe)); + CURVNET_SET(lctx->vnet); INP_INFO_RLOCK(&V_tcbinfo); /* for syncache_expand */ INP_WLOCK(inp); @@ -1556,6 +1574,7 @@ do_pass_establish(struct sge_iq *iq, const struct rss_header *rss, INP_WUNLOCK(inp); INP_INFO_RUNLOCK(&V_tcbinfo); + CURVNET_RESTORE(); return (0); } @@ -1581,6 +1600,7 @@ reset: send_reset_synqe(TOEDEV(ifp), synqe); INP_WUNLOCK(inp); INP_INFO_RUNLOCK(&V_tcbinfo); + CURVNET_RESTORE(); return (0); } toep->tid = tid; @@ -1617,6 +1637,8 @@ reset: /* New connection inpcb is already locked by syncache_expand(). */ new_inp = sotoinpcb(so); INP_WLOCK_ASSERT(new_inp); + MPASS(so->so_vnet == lctx->vnet); + toep->vnet = lctx->vnet; /* * This is for the unlikely case where the syncache entry that we added @@ -1640,6 +1662,7 @@ reset: if (inp != NULL) INP_WUNLOCK(inp); INP_INFO_RUNLOCK(&V_tcbinfo); + CURVNET_RESTORE(); release_synqe(synqe); return (0); @@ -1654,4 +1677,14 @@ t4_init_listen_cpl_handlers(void) t4_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_pass_accept_req); t4_register_cpl_handler(CPL_PASS_ESTABLISH, do_pass_establish); } + +void +t4_uninit_listen_cpl_handlers(void) +{ + + t4_register_cpl_handler(CPL_PASS_OPEN_RPL, NULL); + t4_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, NULL); + t4_register_cpl_handler(CPL_PASS_ACCEPT_REQ, NULL); + t4_register_cpl_handler(CPL_PASS_ESTABLISH, NULL); +} #endif diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c index f62fcac..4c58cf9 100644 --- a/sys/dev/cxgbe/tom/t4_tom.c +++ b/sys/dev/cxgbe/tom/t4_tom.c @@ -321,7 +321,7 @@ release_offload_resources(struct toepcb *toep) t4_l2t_release(toep->l2te); if (tid >= 0) { - remove_tid(sc, tid); + remove_tid(sc, tid, toep->ce ? 2 : 1); release_tid(sc, tid, toep->ctrlq); } @@ -432,12 +432,12 @@ final_cpl_received(struct toepcb *toep) } void -insert_tid(struct adapter *sc, int tid, void *ctx) +insert_tid(struct adapter *sc, int tid, void *ctx, int ntids) { struct tid_info *t = &sc->tids; t->tid_tab[tid] = ctx; - atomic_add_int(&t->tids_in_use, 1); + atomic_add_int(&t->tids_in_use, ntids); } void * @@ -457,12 +457,12 @@ update_tid(struct adapter *sc, int tid, void *ctx) } void -remove_tid(struct adapter *sc, int tid) +remove_tid(struct adapter *sc, int tid, int ntids) { struct tid_info *t = &sc->tids; t->tid_tab[tid] = NULL; - atomic_subtract_int(&t->tids_in_use, 1); + atomic_subtract_int(&t->tids_in_use, ntids); } void @@ -811,74 +811,96 @@ update_clip_table(struct adapter *sc, struct tom_data *td) struct in6_addr *lip, tlip; struct clip_head stale; struct clip_entry *ce, *ce_temp; - int rc, gen = atomic_load_acq_int(&in6_ifaddr_gen); + struct vi_info *vi; + int rc, gen, i, j; + uintptr_t last_vnet; ASSERT_SYNCHRONIZED_OP(sc); IN6_IFADDR_RLOCK(); mtx_lock(&td->clip_table_lock); + gen = atomic_load_acq_int(&in6_ifaddr_gen); if (gen == td->clip_gen) goto done; TAILQ_INIT(&stale); TAILQ_CONCAT(&stale, &td->clip_table, link); - TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) { - lip = &ia->ia_addr.sin6_addr; + /* + * last_vnet optimizes the common cases where all if_vnet = NULL (no + * VIMAGE) or all if_vnet = vnet0. + */ + last_vnet = (uintptr_t)(-1); + for_each_port(sc, i) + for_each_vi(sc->port[i], j, vi) { + if (last_vnet == (uintptr_t)vi->ifp->if_vnet) + continue; - KASSERT(!IN6_IS_ADDR_MULTICAST(lip), - ("%s: mcast address in in6_ifaddr list", __func__)); + /* XXX: races with if_vmove */ + CURVNET_SET(vi->ifp->if_vnet); + TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) { + lip = &ia->ia_addr.sin6_addr; + + KASSERT(!IN6_IS_ADDR_MULTICAST(lip), + ("%s: mcast address in in6_ifaddr list", __func__)); + + if (IN6_IS_ADDR_LOOPBACK(lip)) + continue; + if (IN6_IS_SCOPE_EMBED(lip)) { + /* Remove the embedded scope */ + tlip = *lip; + lip = &tlip; + in6_clearscope(lip); + } + /* + * XXX: how to weed out the link local address for the + * loopback interface? It's fe80::1 usually (always?). + */ + + /* + * If it's in the main list then we already know it's + * not stale. + */ + TAILQ_FOREACH(ce, &td->clip_table, link) { + if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) + goto next; + } - if (IN6_IS_ADDR_LOOPBACK(lip)) - continue; - if (IN6_IS_SCOPE_EMBED(lip)) { - /* Remove the embedded scope */ - tlip = *lip; - lip = &tlip; - in6_clearscope(lip); - } - /* - * XXX: how to weed out the link local address for the loopback - * interface? It's fe80::1 usually (always?). - */ - - /* - * If it's in the main list then we already know it's not stale. - */ - TAILQ_FOREACH(ce, &td->clip_table, link) { - if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) - goto next; - } + /* + * If it's in the stale list we should move it to the + * main list. + */ + TAILQ_FOREACH(ce, &stale, link) { + if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) { + TAILQ_REMOVE(&stale, ce, link); + TAILQ_INSERT_TAIL(&td->clip_table, ce, + link); + goto next; + } + } - /* - * If it's in the stale list we should move it to the main list. - */ - TAILQ_FOREACH(ce, &stale, link) { - if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) { - TAILQ_REMOVE(&stale, ce, link); + /* A new IP6 address; add it to the CLIP table */ + ce = malloc(sizeof(*ce), M_CXGBE, M_NOWAIT); + memcpy(&ce->lip, lip, sizeof(ce->lip)); + ce->refcount = 0; + rc = add_lip(sc, lip); + if (rc == 0) TAILQ_INSERT_TAIL(&td->clip_table, ce, link); - goto next; - } - } + else { + char ip[INET6_ADDRSTRLEN]; - /* A new IP6 address; add it to the CLIP table */ - ce = malloc(sizeof(*ce), M_CXGBE, M_NOWAIT); - memcpy(&ce->lip, lip, sizeof(ce->lip)); - ce->refcount = 0; - rc = add_lip(sc, lip); - if (rc == 0) - TAILQ_INSERT_TAIL(&td->clip_table, ce, link); - else { - char ip[INET6_ADDRSTRLEN]; - - inet_ntop(AF_INET6, &ce->lip, &ip[0], sizeof(ip)); - log(LOG_ERR, "%s: could not add %s (%d)\n", - __func__, ip, rc); - free(ce, M_CXGBE); - } + inet_ntop(AF_INET6, &ce->lip, &ip[0], + sizeof(ip)); + log(LOG_ERR, "%s: could not add %s (%d)\n", + __func__, ip, rc); + free(ce, M_CXGBE); + } next: - continue; + continue; + } + CURVNET_RESTORE(); + last_vnet = (uintptr_t)vi->ifp->if_vnet; } /* @@ -1203,6 +1225,10 @@ t4_tom_mod_unload(void) t4_ddp_mod_unload(); + t4_uninit_connect_cpl_handlers(); + t4_uninit_listen_cpl_handlers(); + t4_uninit_cpl_io_handlers(); + return (0); } #endif /* TCP_OFFLOAD */ diff --git a/sys/dev/cxgbe/tom/t4_tom.h b/sys/dev/cxgbe/tom/t4_tom.h index a1d723d..f5ca0cf 100644 --- a/sys/dev/cxgbe/tom/t4_tom.h +++ b/sys/dev/cxgbe/tom/t4_tom.h @@ -123,6 +123,7 @@ struct toepcb { u_int flags; /* miscellaneous flags */ struct tom_data *td; struct inpcb *inp; /* backpointer to host stack's PCB */ + struct vnet *vnet; struct vi_info *vi; /* virtual interface */ struct sge_wrq *ofld_txq; struct sge_ofld_rxq *ofld_rxq; @@ -199,6 +200,7 @@ struct listen_ctx { struct stid_region stid_region; int flags; struct inpcb *inp; /* listening socket's inp */ + struct vnet *vnet; struct sge_wrq *ctrlq; struct sge_ofld_rxq *ofld_rxq; struct clip_entry *ce; @@ -278,10 +280,10 @@ void free_toepcb(struct toepcb *); void offload_socket(struct socket *, struct toepcb *); void undo_offload_socket(struct socket *); void final_cpl_received(struct toepcb *); -void insert_tid(struct adapter *, int, void *); +void insert_tid(struct adapter *, int, void *, int); void *lookup_tid(struct adapter *, int); void update_tid(struct adapter *, int, void *); -void remove_tid(struct adapter *, int); +void remove_tid(struct adapter *, int, int); void release_tid(struct adapter *, int, struct sge_wrq *); int find_best_mtu_idx(struct adapter *, struct in_conninfo *, int); u_long select_rcv_wnd(struct socket *); @@ -296,12 +298,14 @@ void release_lip(struct tom_data *, struct clip_entry *); /* t4_connect.c */ void t4_init_connect_cpl_handlers(void); +void t4_uninit_connect_cpl_handlers(void); int t4_connect(struct toedev *, struct socket *, struct rtentry *, struct sockaddr *); void act_open_failure_cleanup(struct adapter *, u_int, u_int); /* t4_listen.c */ void t4_init_listen_cpl_handlers(void); +void t4_uninit_listen_cpl_handlers(void); int t4_listen_start(struct toedev *, struct tcpcb *); int t4_listen_stop(struct toedev *, struct tcpcb *); void t4_syncache_added(struct toedev *, void *); diff --git a/sys/dev/hpt27xx/hpt27xx_os_bsd.c b/sys/dev/hpt27xx/hpt27xx_os_bsd.c index 7970e35..12e0efa 100644 --- a/sys/dev/hpt27xx/hpt27xx_os_bsd.c +++ b/sys/dev/hpt27xx/hpt27xx_os_bsd.c @@ -120,13 +120,13 @@ void *os_map_pci_bar( if (base & 1) { hba->pcibar[index].type = SYS_RES_IOPORT; - hba->pcibar[index].res = bus_alloc_resource(hba->pcidev, - hba->pcibar[index].type, &hba->pcibar[index].rid, 0, ~0, length, RF_ACTIVE); + hba->pcibar[index].res = bus_alloc_resource_any(hba->pcidev, + hba->pcibar[index].type, &hba->pcibar[index].rid, RF_ACTIVE); hba->pcibar[index].base = (void *)(unsigned long)(base & ~0x1); } else { hba->pcibar[index].type = SYS_RES_MEMORY; - hba->pcibar[index].res = bus_alloc_resource(hba->pcidev, - hba->pcibar[index].type, &hba->pcibar[index].rid, 0, ~0, length, RF_ACTIVE); + hba->pcibar[index].res = bus_alloc_resource_any(hba->pcidev, + hba->pcibar[index].type, &hba->pcibar[index].rid, RF_ACTIVE); hba->pcibar[index].base = (char *)rman_get_virtual(hba->pcibar[index].res) + offset; } diff --git a/sys/dev/hptmv/entry.c b/sys/dev/hptmv/entry.c index 37ff6c2..f4a3dd9 100644 --- a/sys/dev/hptmv/entry.c +++ b/sys/dev/hptmv/entry.c @@ -1356,8 +1356,8 @@ init_adapter(IAL_ADAPTER_T *pAdapter) /* also map EPROM address */ rid = 0x10; - if (!(pAdapter->mem_res = bus_alloc_resource(pAdapter->hpt_dev, SYS_RES_MEMORY, &rid, - 0, ~0, MV_SATA_PCI_BAR0_SPACE_SIZE+0x40000, RF_ACTIVE)) + if (!(pAdapter->mem_res = bus_alloc_resource_any(pAdapter->hpt_dev, + SYS_RES_MEMORY, &rid, RF_ACTIVE)) || !(pMvSataAdapter->adapterIoBaseAddress = rman_get_virtual(pAdapter->mem_res))) { diff --git a/sys/dev/hptnr/hptnr_os_bsd.c b/sys/dev/hptnr/hptnr_os_bsd.c index b019a7c..36f0c3c 100644 --- a/sys/dev/hptnr/hptnr_os_bsd.c +++ b/sys/dev/hptnr/hptnr_os_bsd.c @@ -106,13 +106,13 @@ void *os_map_pci_bar( if (base & 1) { hba->pcibar[index].type = SYS_RES_IOPORT; - hba->pcibar[index].res = bus_alloc_resource(hba->pcidev, - hba->pcibar[index].type, &hba->pcibar[index].rid, 0, ~0, length, RF_ACTIVE); + hba->pcibar[index].res = bus_alloc_resource_any(hba->pcidev, + hba->pcibar[index].type, &hba->pcibar[index].rid, RF_ACTIVE); hba->pcibar[index].base = (void *)(unsigned long)(base & ~0x1); } else { hba->pcibar[index].type = SYS_RES_MEMORY; - hba->pcibar[index].res = bus_alloc_resource(hba->pcidev, - hba->pcibar[index].type, &hba->pcibar[index].rid, 0, ~0, length, RF_ACTIVE); + hba->pcibar[index].res = bus_alloc_resource_any(hba->pcidev, + hba->pcibar[index].type, &hba->pcibar[index].rid, RF_ACTIVE); hba->pcibar[index].base = (char *)rman_get_virtual(hba->pcibar[index].res) + offset; } diff --git a/sys/dev/hptrr/hptrr_os_bsd.c b/sys/dev/hptrr/hptrr_os_bsd.c index 8600a8c..6cd01e5 100644 --- a/sys/dev/hptrr/hptrr_os_bsd.c +++ b/sys/dev/hptrr/hptrr_os_bsd.c @@ -98,8 +98,8 @@ void *os_map_pci_bar( else hba->pcibar[index].type = SYS_RES_MEMORY; - hba->pcibar[index].res = bus_alloc_resource(hba->pcidev, - hba->pcibar[index].type, &hba->pcibar[index].rid, 0, ~0, length, RF_ACTIVE); + hba->pcibar[index].res = bus_alloc_resource_any(hba->pcidev, + hba->pcibar[index].type, &hba->pcibar[index].rid, RF_ACTIVE); hba->pcibar[index].base = (char *)rman_get_virtual(hba->pcibar[index].res) + offset; return hba->pcibar[index].base; diff --git a/sys/dev/iir/iir.c b/sys/dev/iir/iir.c index bf5cec5..e74698e 100644 --- a/sys/dev/iir/iir.c +++ b/sys/dev/iir/iir.c @@ -744,9 +744,9 @@ gdt_next(struct gdt_softc *gdt) ccb->ccb_h.flags)); csio = &ccb->csio; ccbh = &ccb->ccb_h; - cmd = csio->cdb_io.cdb_bytes[0]; - /* Max CDB length is 12 bytes */ - if (csio->cdb_len > 12) { + cmd = scsiio_cdb_ptr(csio)[0]; + /* Max CDB length is 12 bytes, can't be phys addr */ + if (csio->cdb_len > 12 || (ccbh->flags & CAM_CDB_PHYS)) { ccbh->status = CAM_REQ_INVALID; --gdt_stat.io_count_act; xpt_done(ccb); diff --git a/sys/dev/isci/isci_controller.c b/sys/dev/isci/isci_controller.c index b0f4285..d3ec045 100644 --- a/sys/dev/isci/isci_controller.c +++ b/sys/dev/isci/isci_controller.c @@ -740,6 +740,11 @@ void isci_action(struct cam_sim *sim, union ccb *ccb) } break; case XPT_SCSI_IO: + if (ccb->ccb_h.flags & CAM_CDB_PHYS) { + ccb->ccb_h.status = CAM_REQ_INVALID; + xpt_done(ccb); + break; + } isci_io_request_execute_scsi_io(ccb, controller); break; #if __FreeBSD_version >= 900026 @@ -802,6 +807,7 @@ isci_controller_release_queued_ccbs(struct ISCI_CONTROLLER *controller) { struct ISCI_REMOTE_DEVICE *dev; struct ccb_hdr *ccb_h; + uint8_t *ptr; int dev_idx; KASSERT(mtx_owned(&controller->lock), ("controller lock not owned")); @@ -821,8 +827,8 @@ isci_controller_release_queued_ccbs(struct ISCI_CONTROLLER *controller) if (ccb_h == NULL) continue; - isci_log_message(1, "ISCI", "release %p %x\n", ccb_h, - ((union ccb *)ccb_h)->csio.cdb_io.cdb_bytes[0]); + ptr = scsiio_cdb_ptr(&((union ccb *)ccb_h)->csio); + isci_log_message(1, "ISCI", "release %p %x\n", ccb_h, *ptr); dev->queued_ccb_in_progress = (union ccb *)ccb_h; isci_io_request_execute_scsi_io( diff --git a/sys/dev/isci/isci_io_request.c b/sys/dev/isci/isci_io_request.c index 4151900..066e0d9 100644 --- a/sys/dev/isci/isci_io_request.c +++ b/sys/dev/isci/isci_io_request.c @@ -86,6 +86,7 @@ isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, struct ISCI_REMOTE_DEVICE *isci_remote_device; union ccb *ccb; BOOL complete_ccb; + struct ccb_scsiio *csio; complete_ccb = TRUE; isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(scif_controller); @@ -93,7 +94,7 @@ isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, (struct ISCI_REMOTE_DEVICE *) sci_object_get_association(remote_device); ccb = isci_request->ccb; - + csio = &ccb->csio; ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (completion_status) { @@ -124,7 +125,6 @@ isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, SCI_SSP_RESPONSE_IU_T * response_buffer; uint32_t sense_length; int error_code, sense_key, asc, ascq; - struct ccb_scsiio *csio = &ccb->csio; response_buffer = (SCI_SSP_RESPONSE_IU_T *) scif_io_request_get_response_iu_address( @@ -146,7 +146,7 @@ isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x status=%x key=%x asc=%x ascq=%x\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, - ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0], + ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio), csio->scsi_status, sense_key, asc, ascq); break; } @@ -157,7 +157,7 @@ isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, isci_log_message(0, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x remote device reset required\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, - ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]); + ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio)); break; case SCI_IO_FAILURE_TERMINATED: @@ -165,7 +165,7 @@ isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, isci_log_message(0, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x terminated\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, - ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]); + ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio)); break; case SCI_IO_FAILURE_INVALID_STATE: @@ -208,7 +208,7 @@ isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, isci_log_message(1, "ISCI", "isci: bus=%x target=%x lun=%x cdb[0]=%x completion status=%x\n", ccb->ccb_h.path_id, ccb->ccb_h.target_id, - ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0], + ccb->ccb_h.target_lun, scsiio_cdb_ptr(csio), completion_status); ccb->ccb_h.status |= CAM_REQ_CMP_ERR; break; @@ -285,13 +285,13 @@ isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, * get a ready notification for this device. */ isci_log_message(1, "ISCI", "already queued %p %x\n", - ccb, ccb->csio.cdb_io.cdb_bytes[0]); + ccb, scsiio_cdb_ptr(csio)); isci_remote_device->queued_ccb_in_progress = NULL; } else { isci_log_message(1, "ISCI", "queue %p %x\n", ccb, - ccb->csio.cdb_io.cdb_bytes[0]); + scsiio_cdb_ptr(csio)); ccb->ccb_h.status |= CAM_SIM_QUEUED; TAILQ_INSERT_TAIL(&isci_remote_device->queued_ccbs, @@ -373,7 +373,7 @@ scif_cb_io_request_get_cdb_address(void * scif_user_io_request) struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)scif_user_io_request; - return (isci_request->ccb->csio.cdb_io.cdb_bytes); + return (scsiio_cdb_ptr(&isci_request->ccb->csio)); } /** diff --git a/sys/dev/isci/isci_task_request.c b/sys/dev/isci/isci_task_request.c index 6c8be45..2ed0afe 100644 --- a/sys/dev/isci/isci_task_request.c +++ b/sys/dev/isci/isci_task_request.c @@ -210,8 +210,9 @@ isci_task_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller, retry_task = FALSE; isci_log_message(0, "ISCI", "task timeout - not retrying\n"); - scif_cb_domain_device_removed(isci_controller, - isci_remote_device->domain, isci_remote_device); + scif_cb_domain_device_removed(scif_controller, + isci_remote_device->domain->sci_object, + remote_device); } else { retry_task = TRUE; isci_log_message(0, "ISCI", diff --git a/sys/dev/ixgbe/if_ix.c b/sys/dev/ixgbe/if_ix.c index 427f545..f059b2b 100644 --- a/sys/dev/ixgbe/if_ix.c +++ b/sys/dev/ixgbe/if_ix.c @@ -3800,6 +3800,7 @@ ixgbe_handle_msf(void *context, int pending) /* Adjust media types shown in ifconfig */ ifmedia_removeall(&adapter->media); ixgbe_add_media_types(adapter); + ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); return; } diff --git a/sys/dev/jme/if_jme.c b/sys/dev/jme/if_jme.c index 9bdb229..781d337 100644 --- a/sys/dev/jme/if_jme.c +++ b/sys/dev/jme/if_jme.c @@ -803,7 +803,7 @@ jme_attach(device_t dev) } /* Create coalescing sysctl node. */ jme_sysctl_node(sc); - if ((error = jme_dma_alloc(sc) != 0)) + if ((error = jme_dma_alloc(sc)) != 0) goto fail; ifp = sc->jme_ifp = if_alloc(IFT_ETHER); diff --git a/sys/dev/kbd/kbd.c b/sys/dev/kbd/kbd.c index ea84d46..3f076b0 100644 --- a/sys/dev/kbd/kbd.c +++ b/sys/dev/kbd/kbd.c @@ -884,7 +884,7 @@ genkbd_commonioctl(keyboard_t *kbd, u_long cmd, caddr_t arg) omapp->key[i].spcl = mapp->key[i].spcl; omapp->key[i].flgs = mapp->key[i].flgs; } - return (0); + break; case PIO_KEYMAP: /* set keyboard translation table */ case OPIO_KEYMAP: /* set keyboard translation table (compat) */ #ifndef KBD_DISABLE_KEYMAP_LOAD diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c index e84e05a..bb6ba93 100644 --- a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c +++ b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c @@ -306,6 +306,12 @@ mlx5e_update_carrier_work(struct work_struct *work) PRIV_UNLOCK(priv); } +/* + * This function reads the physical port counters from the firmware + * using a pre-defined layout defined by various MLX5E_PPORT_XXX() + * macros. The output is converted from big-endian 64-bit values into + * host endian ones and stored in the "priv->stats.pport" structure. + */ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) { @@ -314,25 +320,32 @@ mlx5e_update_pport_counters(struct mlx5e_priv *priv) struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug; u32 *in; u32 *out; - u64 *ptr; + const u64 *ptr; unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg); unsigned x; unsigned y; + /* allocate firmware request structures */ in = mlx5_vzalloc(sz); out = mlx5_vzalloc(sz); if (in == NULL || out == NULL) goto free_out; - ptr = (uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set); + /* + * Get pointer to the 64-bit counter set which is located at a + * fixed offset in the output firmware request structure: + */ + ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set); MLX5_SET(ppcnt_reg, in, local_port, 1); + /* read IEEE802_3 counter group using predefined counter layout */ MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); for (x = y = 0; x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++) s->arg[y] = be64toh(ptr[x]); + /* read RFC2819 counter group using predefined counter layout */ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++) @@ -341,20 +354,29 @@ mlx5e_update_pport_counters(struct mlx5e_priv *priv) MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++) s_debug->arg[y] = be64toh(ptr[x]); + /* read RFC2863 counter group using predefined counter layout */ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++) s_debug->arg[y] = be64toh(ptr[x]); + /* read physical layer stats counter group using predefined counter layout */ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++) s_debug->arg[y] = be64toh(ptr[x]); free_out: + /* free firmware request structures */ kvfree(in); kvfree(out); } +/* + * This function is called regularly to collect all statistics + * counters from the firmware. The values can be viewed through the + * sysctl interface. Execution is serialized using the priv's global + * configuration lock. + */ static void mlx5e_update_stats_work(struct work_struct *work) { diff --git a/sys/dev/mmc/mmc.c b/sys/dev/mmc/mmc.c index a41d807..3c70a99 100644 --- a/sys/dev/mmc/mmc.c +++ b/sys/dev/mmc/mmc.c @@ -401,7 +401,7 @@ mmc_wait_for_req(struct mmc_softc *sc, struct mmc_request *req) msleep(req, &sc->sc_mtx, 0, "mmcreq", 0); MMC_UNLOCK(sc); if (mmc_debug > 2 || (mmc_debug > 0 && req->cmd->error != MMC_ERR_NONE)) - device_printf(sc->dev, "CMD%d RESULT: %d\n", + device_printf(sc->dev, "CMD%d RESULT: %d\n", req->cmd->opcode, req->cmd->error); return (0); } @@ -511,7 +511,7 @@ mmc_idle_cards(struct mmc_softc *sc) { device_t dev; struct mmc_command cmd; - + dev = sc->dev; mmcbr_set_chip_select(dev, cs_high); mmcbr_update_ios(dev); @@ -795,7 +795,7 @@ mmc_test_bus_width(struct mmc_softc *sc) data.len = 8; data.flags = MMC_DATA_WRITE; mmc_wait_for_cmd(sc, &cmd, 0); - + memset(&cmd, 0, sizeof(cmd)); memset(&data, 0, sizeof(data)); cmd.opcode = MMC_BUSTEST_R; @@ -808,7 +808,7 @@ mmc_test_bus_width(struct mmc_softc *sc) data.flags = MMC_DATA_READ; err = mmc_wait_for_cmd(sc, &cmd, 0); sc->squelched--; - + mmcbr_set_bus_width(sc->dev, bus_width_1); mmcbr_update_ios(sc->dev); @@ -832,7 +832,7 @@ mmc_test_bus_width(struct mmc_softc *sc) data.len = 4; data.flags = MMC_DATA_WRITE; mmc_wait_for_cmd(sc, &cmd, 0); - + memset(&cmd, 0, sizeof(cmd)); memset(&data, 0, sizeof(data)); cmd.opcode = MMC_BUSTEST_R; @@ -1017,7 +1017,7 @@ mmc_decode_csd_sd(uint32_t *raw_csd, struct mmc_csd *csd) csd->r2w_factor = 1 << mmc_get_bits(raw_csd, 128, 26, 3); csd->write_bl_len = 1 << mmc_get_bits(raw_csd, 128, 22, 4); csd->write_bl_partial = mmc_get_bits(raw_csd, 128, 21, 1); - } else + } else panic("unknown SD CSD version"); } @@ -1349,9 +1349,9 @@ mmc_discover_cards(struct mmc_softc *sc) if (ivar->csd.csd_structure > 0) ivar->high_cap = 1; ivar->tran_speed = ivar->csd.tran_speed; - ivar->erase_sector = ivar->csd.erase_sector * + ivar->erase_sector = ivar->csd.erase_sector * ivar->csd.write_bl_len / MMC_SECTOR_SIZE; - + err = mmc_send_status(sc, ivar->rca, &status); if (err != MMC_ERR_NONE) { device_printf(sc->dev, @@ -1446,7 +1446,7 @@ mmc_discover_cards(struct mmc_softc *sc) mmc_decode_csd_mmc(ivar->raw_csd, &ivar->csd); ivar->sec_count = ivar->csd.capacity / MMC_SECTOR_SIZE; ivar->tran_speed = ivar->csd.tran_speed; - ivar->erase_sector = ivar->csd.erase_sector * + ivar->erase_sector = ivar->csd.erase_sector * ivar->csd.write_bl_len / MMC_SECTOR_SIZE; err = mmc_send_status(sc, ivar->rca, &status); @@ -1655,7 +1655,7 @@ mmc_calculate_clock(struct mmc_softc *sc) int nkid, i, f_max; device_t *kids; struct mmc_ivars *ivar; - + f_max = mmcbr_get_f_max(sc->dev); max_dtr = max_hs_dtr = f_max; if ((mmcbr_get_caps(sc->dev) & MMC_CAP_HSPEED)) @@ -1770,7 +1770,7 @@ static void mmc_delayed_attach(void *xsc) { struct mmc_softc *sc = xsc; - + mmc_scan(sc); config_intrhook_disestablish(&sc->config_intrhook); } diff --git a/sys/dev/mmc/mmcreg.h b/sys/dev/mmc/mmcreg.h index f454ddb..080c14e 100644 --- a/sys/dev/mmc/mmcreg.h +++ b/sys/dev/mmc/mmcreg.h @@ -85,8 +85,11 @@ struct mmc_command { #define MMC_RSP_R1B (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE | MMC_RSP_BUSY) #define MMC_RSP_R2 (MMC_RSP_PRESENT | MMC_RSP_136 | MMC_RSP_CRC) #define MMC_RSP_R3 (MMC_RSP_PRESENT) -#define MMC_RSP_R6 (MMC_RSP_PRESENT | MMC_RSP_CRC) -#define MMC_RSP_R7 (MMC_RSP_PRESENT | MMC_RSP_CRC) +#define MMC_RSP_R4 (MMC_RSP_PRESENT) +#define MMC_RSP_R5 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE) +#define MMC_RSP_R5B (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE | MMC_RSP_BUSY) +#define MMC_RSP_R6 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE) +#define MMC_RSP_R7 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE) #define MMC_RSP(x) ((x) & MMC_RSP_MASK) uint32_t retries; uint32_t error; @@ -352,8 +355,8 @@ struct mmc_request { */ #define MMC_OCR_VOLTAGE 0x3fffffffU /* Vdd Voltage mask */ #define MMC_OCR_LOW_VOLTAGE (1u << 7) /* Low Voltage Range -- tbd */ +#define MMC_OCR_MIN_VOLTAGE_SHIFT 7 #define MMC_OCR_200_210 (1U << 8) /* Vdd voltage 2.00 ~ 2.10 */ -#define MMC_OCR_MIN_VOLTAGE_SHIFT 8 #define MMC_OCR_210_220 (1U << 9) /* Vdd voltage 2.10 ~ 2.20 */ #define MMC_OCR_220_230 (1U << 10) /* Vdd voltage 2.20 ~ 2.30 */ #define MMC_OCR_230_240 (1U << 11) /* Vdd voltage 2.30 ~ 2.40 */ diff --git a/sys/dev/mmc/mmcsd.c b/sys/dev/mmc/mmcsd.c index 72094ed..1931f7b 100644 --- a/sys/dev/mmc/mmcsd.c +++ b/sys/dev/mmc/mmcsd.c @@ -160,16 +160,16 @@ mmcsd_attach(device_t dev) d->d_dump = mmcsd_dump; d->d_name = "mmcsd"; d->d_drv1 = sc; - d->d_maxsize = 4*1024*1024; /* Maximum defined SD card AU size. */ d->d_sectorsize = mmc_get_sector_size(dev); + d->d_maxsize = mmc_get_max_data(dev) * d->d_sectorsize; d->d_mediasize = (off_t)mmc_get_media_size(dev) * d->d_sectorsize; - d->d_stripeoffset = 0; d->d_stripesize = mmc_get_erase_sector(dev) * d->d_sectorsize; d->d_unit = device_get_unit(dev); d->d_flags = DISKFLAG_CANDELETE; - d->d_delmaxsize = mmc_get_erase_sector(dev) * d->d_sectorsize * 1; /* conservative */ + d->d_delmaxsize = mmc_get_erase_sector(dev) * d->d_sectorsize; strlcpy(d->d_ident, mmc_get_card_sn_string(dev), sizeof(d->d_ident)); strlcpy(d->d_descr, mmc_get_card_id_string(dev), sizeof(d->d_descr)); + d->d_rotation_rate = DISK_RR_NON_ROTATING; /* * Display in most natural units. There's no cards < 1MB. The SD @@ -545,6 +545,8 @@ mmcsd_task(void *arg) bp->bio_error = EIO; bp->bio_resid = (end - block) * sz; bp->bio_flags |= BIO_ERROR; + } else { + bp->bio_resid = 0; } biodone(bp); } diff --git a/sys/dev/mpr/mpr_sas.c b/sys/dev/mpr/mpr_sas.c index d44e502..c9d83d8 100644 --- a/sys/dev/mpr/mpr_sas.c +++ b/sys/dev/mpr/mpr_sas.c @@ -1846,8 +1846,12 @@ mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb) if (csio->ccb_h.flags & CAM_CDB_POINTER) bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len); - else + else { + KASSERT(csio->cdb_len <= IOCDBLEN, + ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER is not set", + csio->cdb_len)); bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len); + } req->IoFlags = htole16(csio->cdb_len); /* @@ -2429,6 +2433,7 @@ mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm) * driver is being shutdown. */ if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) && + (csio->data_ptr != NULL) && ((csio->data_ptr[0] & 0x1f) == T_DIRECT) && (sc->mapping_table[target_id].device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) && diff --git a/sys/dev/mpr/mpr_sas_lsi.c b/sys/dev/mpr/mpr_sas_lsi.c index 73e2f54..1eb78d1 100644 --- a/sys/dev/mpr/mpr_sas_lsi.c +++ b/sys/dev/mpr/mpr_sas_lsi.c @@ -1056,6 +1056,7 @@ out: mpr_free_command(sc, cm); else if (error == 0) error = EWOULDBLOCK; + cm->cm_data = NULL; free(buffer, M_MPR); return (error); } @@ -1196,18 +1197,18 @@ mprsas_SSU_to_SATA_devices(struct mpr_softc *sc) continue; } - ccb = xpt_alloc_ccb_nowait(); - if (ccb == NULL) { - mpr_dprint(sc, MPR_FAULT, "Unable to alloc CCB to stop " - "unit.\n"); - return; - } - /* * The stop_at_shutdown flag will be set if this device is * a SATA direct-access end device. */ if (target->stop_at_shutdown) { + ccb = xpt_alloc_ccb_nowait(); + if (ccb == NULL) { + mpr_dprint(sc, MPR_FAULT, "Unable to alloc CCB to stop " + "unit.\n"); + return; + } + if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { diff --git a/sys/dev/msk/if_msk.c b/sys/dev/msk/if_msk.c index 024f937..c464628 100644 --- a/sys/dev/msk/if_msk.c +++ b/sys/dev/msk/if_msk.c @@ -1626,7 +1626,7 @@ msk_attach(device_t dev) callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0); msk_sysctl_node(sc_if); - if ((error = msk_txrx_dma_alloc(sc_if) != 0)) + if ((error = msk_txrx_dma_alloc(sc_if)) != 0) goto fail; msk_rx_dma_jalloc(sc_if); diff --git a/sys/dev/nand/nand_geom.c b/sys/dev/nand/nand_geom.c index 8786a0a..98373aa 100644 --- a/sys/dev/nand/nand_geom.c +++ b/sys/dev/nand/nand_geom.c @@ -394,6 +394,7 @@ create_geom_disk(struct nand_chip *chip) snprintf(ndisk->d_ident, sizeof(ndisk->d_ident), "nand: Man:0x%02x Dev:0x%02x", chip->id.man_id, chip->id.dev_id); + ndisk->d_rotation_rate = DISK_RR_NON_ROTATING; disk_create(ndisk, DISK_VERSION); @@ -415,6 +416,7 @@ create_geom_disk(struct nand_chip *chip) snprintf(rdisk->d_ident, sizeof(rdisk->d_ident), "nand_raw: Man:0x%02x Dev:0x%02x", chip->id.man_id, chip->id.dev_id); + disk->d_rotation_rate = DISK_RR_NON_ROTATING; disk_create(rdisk, DISK_VERSION); diff --git a/sys/dev/ntb/if_ntb/if_ntb.c b/sys/dev/ntb/if_ntb/if_ntb.c index 33645c4..47d6dd8 100644 --- a/sys/dev/ntb/if_ntb/if_ntb.c +++ b/sys/dev/ntb/if_ntb/if_ntb.c @@ -237,6 +237,11 @@ ntb_ioctl(struct ifnet *ifp, u_long command, caddr_t data) int error = 0; switch (command) { + case SIOCSIFFLAGS: + case SIOCADDMULTI: + case SIOCDELMULTI: + break; + case SIOCSIFMTU: { if (ifr->ifr_mtu > sc->mtu - ETHER_HDR_LEN) { diff --git a/sys/dev/nvd/nvd.c b/sys/dev/nvd/nvd.c index 989ed92..11e4f58 100644 --- a/sys/dev/nvd/nvd.c +++ b/sys/dev/nvd/nvd.c @@ -338,13 +338,11 @@ nvd_new_disk(struct nvme_namespace *ns, void *ctrlr_arg) */ nvme_strvis(disk->d_ident, nvme_ns_get_serial_number(ns), sizeof(disk->d_ident), NVME_SERIAL_NUMBER_LENGTH); - nvme_strvis(descr, nvme_ns_get_model_number(ns), sizeof(descr), NVME_MODEL_NUMBER_LENGTH); - -#if __FreeBSD_version >= 900034 strlcpy(disk->d_descr, descr, sizeof(descr)); -#endif + + disk->d_rotation_rate = DISK_RR_NON_ROTATING; ndisk->ns = ns; ndisk->disk = disk; diff --git a/sys/dev/pci/pci.c b/sys/dev/pci/pci.c index 792469f..26cf526 100644 --- a/sys/dev/pci/pci.c +++ b/sys/dev/pci/pci.c @@ -269,12 +269,13 @@ static const struct pci_quirk pci_quirks[] = { { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 }, /* - * Atheros AR8161/AR8162/E2200 Ethernet controllers have a bug that - * MSI interrupt does not assert if PCIM_CMD_INTxDIS bit of the - * command register is set. + * Atheros AR8161/AR8162/E2200/E2400 Ethernet controllers have a + * bug that MSI interrupt does not assert if PCIM_CMD_INTxDIS bit + * of the command register is set. */ { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, + { 0xE0A11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* diff --git a/sys/dev/ppbus/vpo.c b/sys/dev/ppbus/vpo.c index 9c9054f..a4f5226 100644 --- a/sys/dev/ppbus/vpo.c +++ b/sys/dev/ppbus/vpo.c @@ -187,17 +187,19 @@ vpo_intr(struct vpo_data *vpo, struct ccb_scsiio *csio) #ifdef VP0_DEBUG int i; #endif + uint8_t *ptr; + ptr = scsiio_cdb_ptr(csio); if (vpo->vpo_isplus) { errno = imm_do_scsi(&vpo->vpo_io, VP0_INITIATOR, csio->ccb_h.target_id, - (char *)&csio->cdb_io.cdb_bytes, csio->cdb_len, + ptr, csio->cdb_len, (char *)csio->data_ptr, csio->dxfer_len, &vpo->vpo_stat, &vpo->vpo_count, &vpo->vpo_error); } else { errno = vpoio_do_scsi(&vpo->vpo_io, VP0_INITIATOR, csio->ccb_h.target_id, - (char *)&csio->cdb_io.cdb_bytes, csio->cdb_len, + ptr, csio->cdb_len, (char *)csio->data_ptr, csio->dxfer_len, &vpo->vpo_stat, &vpo->vpo_count, &vpo->vpo_error); } @@ -208,7 +210,7 @@ vpo_intr(struct vpo_data *vpo, struct ccb_scsiio *csio) /* dump of command */ for (i=0; i<csio->cdb_len; i++) - printf("%x ", ((char *)&csio->cdb_io.cdb_bytes)[i]); + printf("%x ", ((char *)ptr)[i]); printf("\n"); #endif @@ -307,11 +309,15 @@ vpo_action(struct cam_sim *sim, union ccb *ccb) csio = &ccb->csio; + if (ccb->ccb_h.flags & CAM_CDB_PHYS) { + ccb->ccb_h.status = CAM_REQ_INVALID; + xpt_done(ccb); + break; + } #ifdef VP0_DEBUG device_printf(vpo->vpo_dev, "XPT_SCSI_IO (0x%x) request\n", - csio->cdb_io.cdb_bytes[0]); + scsiio_cdb_ptr(csio)); #endif - vpo_intr(vpo, csio); xpt_done(ccb); diff --git a/sys/dev/qlxgbe/ql_def.h b/sys/dev/qlxgbe/ql_def.h index ecc238c..3ffc94c 100644 --- a/sys/dev/qlxgbe/ql_def.h +++ b/sys/dev/qlxgbe/ql_def.h @@ -112,6 +112,16 @@ typedef struct _qla_tx_ring { uint64_t count; } qla_tx_ring_t; +typedef struct _qla_tx_fp { + struct mtx tx_mtx; + char tx_mtx_name[32]; + struct buf_ring *tx_br; + struct task fp_task; + struct taskqueue *fp_taskqueue; + void *ha; + uint32_t txr_idx; +} qla_tx_fp_t; + /* * Adapter structure contains the hardware independant information of the * pci function. @@ -178,10 +188,9 @@ struct qla_host { qla_tx_ring_t tx_ring[NUM_TX_RINGS]; bus_dma_tag_t tx_tag; - struct task tx_task; - struct taskqueue *tx_tq; struct callout tx_callout; - struct mtx tx_lock; + + qla_tx_fp_t tx_fp[MAX_SDS_RINGS]; qla_rx_ring_t rx_ring[MAX_RDS_RINGS]; bus_dma_tag_t rx_tag; diff --git a/sys/dev/qlxgbe/ql_glbl.h b/sys/dev/qlxgbe/ql_glbl.h index 8f92b0e..beafb41 100644 --- a/sys/dev/qlxgbe/ql_glbl.h +++ b/sys/dev/qlxgbe/ql_glbl.h @@ -39,6 +39,7 @@ */ extern void ql_mbx_isr(void *arg); extern void ql_isr(void *arg); +extern uint32_t ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count); /* * from ql_os.c @@ -66,7 +67,7 @@ extern void qla_reset_promisc(qla_host_t *ha); extern int ql_set_allmulti(qla_host_t *ha); extern void qla_reset_allmulti(qla_host_t *ha); extern void ql_update_link_state(qla_host_t *ha); -extern void ql_hw_tx_done(qla_host_t *ha); +extern void ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx); extern int ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id); extern void ql_hw_stop_rcv(qla_host_t *ha); extern void ql_get_stats(qla_host_t *ha); @@ -76,7 +77,7 @@ extern void qla_hw_async_event(qla_host_t *ha); extern int qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb, uint32_t *num_rcvq); -extern int qla_iscsi_pdu(qla_host_t *ha, struct mbuf *mp); +extern int ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp); extern void ql_minidump(qla_host_t *ha); extern int ql_minidump_init(qla_host_t *ha); diff --git a/sys/dev/qlxgbe/ql_hw.c b/sys/dev/qlxgbe/ql_hw.c index 8c40aba..cc4b042a 100644 --- a/sys/dev/qlxgbe/ql_hw.c +++ b/sys/dev/qlxgbe/ql_hw.c @@ -51,7 +51,6 @@ static void qla_del_rcv_cntxt(qla_host_t *ha); static int qla_init_rcv_cntxt(qla_host_t *ha); static void qla_del_xmt_cntxt(qla_host_t *ha); static int qla_init_xmt_cntxt(qla_host_t *ha); -static void qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx); static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox, uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause); static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, @@ -2047,7 +2046,7 @@ ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs, ha->hw.iscsi_pkt_count++; if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { - qla_hw_tx_done_locked(ha, txr_idx); + ql_hw_tx_done_locked(ha, txr_idx); if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= " @@ -2552,15 +2551,8 @@ qla_init_rcv_cntxt(qla_host_t *ha) qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr); rcntxt->sds[i].size = qla_host_to_le32(NUM_STATUS_DESCRIPTORS); - if (ha->msix_count == 2) { - rcntxt->sds[i].intr_id = - qla_host_to_le16(hw->intr_id[0]); - rcntxt->sds[i].intr_src_bit = qla_host_to_le16((i)); - } else { - rcntxt->sds[i].intr_id = - qla_host_to_le16(hw->intr_id[i]); - rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0); - } + rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]); + rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0); } for (i = 0; i < rcntxt_rds_rings; i++) { @@ -2672,17 +2664,11 @@ qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds) add_rcv->sds[i].size = qla_host_to_le32(NUM_STATUS_DESCRIPTORS); - if (ha->msix_count == 2) { - add_rcv->sds[i].intr_id = - qla_host_to_le16(hw->intr_id[0]); - add_rcv->sds[i].intr_src_bit = qla_host_to_le16(j); - } else { - add_rcv->sds[i].intr_id = - qla_host_to_le16(hw->intr_id[j]); - add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0); - } + add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]); + add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0); } + for (i = 0; (i < nsds); i++) { j = i + sds_idx; @@ -2803,6 +2789,7 @@ qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx) q80_rsp_tx_cntxt_t *tcntxt_rsp; uint32_t err; qla_hw_tx_cntxt_t *hw_tx_cntxt; + uint32_t intr_idx; hw_tx_cntxt = &hw->tx_cntxt[txr_idx]; @@ -2818,6 +2805,8 @@ qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx) tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2); tcntxt->count_version |= Q8_MBX_CMD_VERSION; + intr_idx = txr_idx; + #ifdef QL_ENABLE_ISCSI_TLV tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO | @@ -2827,8 +2816,9 @@ qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx) tcntxt->traffic_class = 1; } -#else + intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1); +#else tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO; #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ @@ -2841,10 +2831,9 @@ qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx) qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr); tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS); - tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[0]); + tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]); tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0); - hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS; hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0; @@ -3166,11 +3155,11 @@ ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt, } /* - * Name: qla_hw_tx_done_locked + * Name: ql_hw_tx_done_locked * Function: Handle Transmit Completions */ -static void -qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx) +void +ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx) { qla_tx_buf_t *txb; qla_hw_t *hw = &ha->hw; @@ -3208,34 +3197,6 @@ qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx) return; } -/* - * Name: ql_hw_tx_done - * Function: Handle Transmit Completions - */ -void -ql_hw_tx_done(qla_host_t *ha) -{ - int i; - uint32_t flag = 0; - - if (!mtx_trylock(&ha->tx_lock)) { - QL_DPRINT8(ha, (ha->pci_dev, - "%s: !mtx_trylock(&ha->tx_lock)\n", __func__)); - return; - } - for (i = 0; i < ha->hw.num_tx_rings; i++) { - qla_hw_tx_done_locked(ha, i); - if (ha->hw.tx_cntxt[i].txr_free <= (NUM_TX_DESCRIPTORS >> 1)) - flag = 1; - } - - if (!flag) - ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; - - QLA_TX_UNLOCK(ha); - return; -} - void ql_update_link_state(qla_host_t *ha) { @@ -3655,7 +3616,7 @@ qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits) } int -qla_iscsi_pdu(qla_host_t *ha, struct mbuf *mp) +ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp) { struct ether_vlan_header *eh; uint16_t etype; diff --git a/sys/dev/qlxgbe/ql_hw.h b/sys/dev/qlxgbe/ql_hw.h index e50bc5e..37090ff 100644 --- a/sys/dev/qlxgbe/ql_hw.h +++ b/sys/dev/qlxgbe/ql_hw.h @@ -1543,7 +1543,6 @@ typedef struct _qla_hw_tx_cntxt { uint32_t tx_prod_reg; uint16_t tx_cntxt_id; - uint8_t frame_hdr[QL_FRAME_HDR_SIZE]; } qla_hw_tx_cntxt_t; diff --git a/sys/dev/qlxgbe/ql_isr.c b/sys/dev/qlxgbe/ql_isr.c index ab7f908..19f2d71 100644 --- a/sys/dev/qlxgbe/ql_isr.c +++ b/sys/dev/qlxgbe/ql_isr.c @@ -159,7 +159,12 @@ qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx) ifp->if_ipackets++; mpf->m_pkthdr.flowid = sgc->rss_hash; - M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE); + +#if __FreeBSD_version >= 1100000 + M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE_HASH); +#else + M_HASHTYPE_SET(mpf, M_HASHTYPE_NONE); +#endif /* #if __FreeBSD_version >= 1100000 */ (*ifp->if_input)(ifp, mpf); @@ -450,11 +455,11 @@ qla_rcv_cont_sds(qla_host_t *ha, uint32_t sds_idx, uint32_t comp_idx, } /* - * Name: qla_rcv_isr + * Name: ql_rcv_isr * Function: Main Interrupt Service Routine */ -static uint32_t -qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count) +uint32_t +ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count) { device_t dev; qla_hw_t *hw; @@ -704,7 +709,7 @@ qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count) } if (ha->flags.stop_rcv) - goto qla_rcv_isr_exit; + goto ql_rcv_isr_exit; if (hw->sds[sds_idx].sdsr_next != comp_idx) { QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx); @@ -727,7 +732,7 @@ qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count) if (opcode) ret = -1; -qla_rcv_isr_exit: +ql_rcv_isr_exit: hw->sds[sds_idx].rcv_active = 0; return (ret); @@ -931,7 +936,7 @@ ql_isr(void *arg) int idx; qla_hw_t *hw; struct ifnet *ifp; - uint32_t ret = 0; + qla_tx_fp_t *fp; ha = ivec->ha; hw = &ha->hw; @@ -940,17 +945,12 @@ ql_isr(void *arg) if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings) return; - if (idx == 0) - taskqueue_enqueue(ha->tx_tq, &ha->tx_task); - - ret = qla_rcv_isr(ha, idx, -1); - if (idx == 0) - taskqueue_enqueue(ha->tx_tq, &ha->tx_task); + fp = &ha->tx_fp[idx]; + + if (fp->fp_taskqueue != NULL) + taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); - if (!ha->flags.stop_rcv) { - QL_ENABLE_INTERRUPTS(ha, idx); - } return; } diff --git a/sys/dev/qlxgbe/ql_os.c b/sys/dev/qlxgbe/ql_os.c index ecccdb81..94ddf7a 100644 --- a/sys/dev/qlxgbe/ql_os.c +++ b/sys/dev/qlxgbe/ql_os.c @@ -76,11 +76,11 @@ static void qla_release(qla_host_t *ha); static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error); static void qla_stop(qla_host_t *ha); -static int qla_send(qla_host_t *ha, struct mbuf **m_headp); -static void qla_tx_done(void *context, int pending); static void qla_get_peer(qla_host_t *ha); static void qla_error_recovery(void *context, int pending); static void qla_async_event(void *context, int pending); +static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx, + uint32_t iscsi_pdu); /* * Hooks to the Operating Systems @@ -93,7 +93,14 @@ static void qla_init(void *arg); static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); static int qla_media_change(struct ifnet *ifp); static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); -static void qla_start(struct ifnet *ifp); + +static int qla_transmit(struct ifnet *ifp, struct mbuf *mp); +static void qla_qflush(struct ifnet *ifp); +static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp); +static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp); +static int qla_create_fp_taskqueues(qla_host_t *ha); +static void qla_destroy_fp_taskqueues(qla_host_t *ha); +static void qla_drain_fp_taskqueues(qla_host_t *ha); static device_method_t qla_pci_methods[] = { /* Device interface */ @@ -225,7 +232,6 @@ qla_watchdog(void *arg) qla_hw_t *hw; struct ifnet *ifp; uint32_t i; - qla_hw_tx_cntxt_t *hw_tx_cntxt; hw = &ha->hw; ifp = ha->ifp; @@ -254,19 +260,14 @@ qla_watchdog(void *arg) &ha->async_event_task); } - for (i = 0; i < ha->hw.num_tx_rings; i++) { - hw_tx_cntxt = &hw->tx_cntxt[i]; - if (qla_le32_to_host(*(hw_tx_cntxt->tx_cons)) != - hw_tx_cntxt->txr_comp) { - taskqueue_enqueue(ha->tx_tq, - &ha->tx_task); - break; - } - } + for (i = 0; i < ha->hw.num_sds_rings; i++) { + qla_tx_fp_t *fp = &ha->tx_fp[i]; - if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) { - taskqueue_enqueue(ha->tx_tq, &ha->tx_task); + if (fp->fp_taskqueue != NULL) + taskqueue_enqueue(fp->fp_taskqueue, + &fp->fp_task); } + ha->qla_watchdog_paused = 0; } else { ha->qla_watchdog_paused = 0; @@ -322,9 +323,7 @@ qla_pci_attach(device_t dev) rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, ha->reg_rid); - mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_SPIN); - - mtx_init(&ha->tx_lock, "qla83xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF); + mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); qla_add_sysctls(ha); ql_hw_add_sysctls(ha); @@ -344,8 +343,9 @@ qla_pci_attach(device_t dev) } QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x" - " msix_count 0x%x pci_reg %p\n", __func__, ha, - ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg)); + " msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha, + ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg, + ha->pci_reg1)); /* initialize hardware */ if (ql_init_hw(ha)) { @@ -366,14 +366,15 @@ qla_pci_attach(device_t dev) goto qla_pci_attach_err; } device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x" - " msix_count 0x%x pci_reg %p num_rcvq = %d\n", __func__, ha, - ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg, num_rcvq); + " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n", + __func__, ha, ha->pci_func, rsrc_len, ha->msix_count, + ha->pci_reg, ha->pci_reg1, num_rcvq); #ifdef QL_ENABLE_ISCSI_TLV if ((ha->msix_count < 64) || (num_rcvq != 32)) { ha->hw.num_sds_rings = 15; - ha->hw.num_tx_rings = 32; + ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2; } #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ ha->hw.num_rds_rings = ha->hw.num_sds_rings; @@ -421,8 +422,20 @@ qla_pci_attach(device_t dev) device_printf(dev, "could not setup interrupt\n"); goto qla_pci_attach_err; } + + ha->tx_fp[i].ha = ha; + ha->tx_fp[i].txr_idx = i; + + if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) { + device_printf(dev, "%s: could not allocate tx_br[%d]\n", + __func__, i); + goto qla_pci_attach_err; + } } + if (qla_create_fp_taskqueues(ha) != 0) + goto qla_pci_attach_err; + printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus, ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count); @@ -452,13 +465,6 @@ qla_pci_attach(device_t dev) ha->flags.qla_watchdog_active = 1; ha->flags.qla_watchdog_pause = 0; - - TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha); - ha->tx_tq = taskqueue_create("qla_txq", M_NOWAIT, - taskqueue_thread_enqueue, &ha->tx_tq); - taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq", - device_get_nameunit(ha->pci_dev)); - callout_init(&ha->tx_callout, TRUE); ha->flags.qla_callout_init = 1; @@ -584,11 +590,6 @@ qla_release(qla_host_t *ha) taskqueue_free(ha->err_tq); } - if (ha->tx_tq) { - taskqueue_drain(ha->tx_tq, &ha->tx_task); - taskqueue_free(ha->tx_tq); - } - ql_del_cdev(ha); if (ha->flags.qla_watchdog_active) { @@ -626,13 +627,15 @@ qla_release(qla_host_t *ha) ha->irq_vec[i].irq_rid, ha->irq_vec[i].irq); } + + qla_free_tx_br(ha, &ha->tx_fp[i]); } + qla_destroy_fp_taskqueues(ha); if (ha->msix_count) pci_release_msi(dev); if (ha->flags.lock_init) { - mtx_destroy(&ha->tx_lock); mtx_destroy(&ha->hw_lock); } @@ -812,7 +815,9 @@ qla_init_ifnet(device_t dev, qla_host_t *ha) ifp->if_softc = ha; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = qla_ioctl; - ifp->if_start = qla_start; + + ifp->if_transmit = qla_transmit; + ifp->if_qflush = qla_qflush; IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha)); ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha); @@ -822,12 +827,13 @@ qla_init_ifnet(device_t dev, qla_host_t *ha) ether_ifattach(ifp, qla_get_mac_addr(ha)); - ifp->if_capabilities = IFCAP_HWCSUM | + ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | - IFCAP_JUMBO_MTU; - - ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; - ifp->if_capabilities |= IFCAP_VLAN_HWTSO; + IFCAP_JUMBO_MTU | + IFCAP_VLAN_HWTAGGING | + IFCAP_VLAN_MTU | + IFCAP_VLAN_HWTSO | + IFCAP_LRO; ifp->if_capenable = ifp->if_capabilities; @@ -922,10 +928,13 @@ qla_set_multi(qla_host_t *ha, uint32_t add_multi) if_maddr_runlock(ifp); - if (QLA_LOCK(ha, __func__, 1) == 0) { - ret = ql_hw_set_multi(ha, mta, mcnt, add_multi); - QLA_UNLOCK(ha, __func__); - } + //if (QLA_LOCK(ha, __func__, 1) == 0) { + // ret = ql_hw_set_multi(ha, mta, mcnt, add_multi); + // QLA_UNLOCK(ha, __func__); + //} + QLA_LOCK(ha, __func__, 1); + ret = ql_hw_set_multi(ha, mta, mcnt, add_multi); + QLA_UNLOCK(ha, __func__); return (ret); } @@ -1130,64 +1139,10 @@ qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) return; } -static void -qla_start(struct ifnet *ifp) -{ - struct mbuf *m_head; - qla_host_t *ha = (qla_host_t *)ifp->if_softc; - - QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__)); - - if (!mtx_trylock(&ha->tx_lock)) { - QL_DPRINT8(ha, (ha->pci_dev, - "%s: mtx_trylock(&ha->tx_lock) failed\n", __func__)); - return; - } - - if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != - IFF_DRV_RUNNING) { - QL_DPRINT8(ha, - (ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__)); - QLA_TX_UNLOCK(ha); - return; - } - - if (!ha->hw.link_up || !ha->watchdog_ticks) - ql_update_link_state(ha); - - if (!ha->hw.link_up) { - QL_DPRINT8(ha, (ha->pci_dev, "%s: link down\n", __func__)); - QLA_TX_UNLOCK(ha); - return; - } - - while (ifp->if_snd.ifq_head != NULL) { - IF_DEQUEUE(&ifp->if_snd, m_head); - - if (m_head == NULL) { - QL_DPRINT8(ha, (ha->pci_dev, "%s: m_head == NULL\n", - __func__)); - break; - } - - if (qla_send(ha, &m_head)) { - if (m_head == NULL) - break; - QL_DPRINT8(ha, (ha->pci_dev, "%s: PREPEND\n", __func__)); - ifp->if_drv_flags |= IFF_DRV_OACTIVE; - IF_PREPEND(&ifp->if_snd, m_head); - break; - } - /* Send a copy of the frame to the BPF listener */ - ETHER_BPF_MTAP(ifp, m_head); - } - QLA_TX_UNLOCK(ha); - QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__)); - return; -} static int -qla_send(qla_host_t *ha, struct mbuf **m_headp) +qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx, + uint32_t iscsi_pdu) { bus_dma_segment_t segs[QLA_MAX_SEGMENTS]; bus_dmamap_t map; @@ -1195,29 +1150,9 @@ qla_send(qla_host_t *ha, struct mbuf **m_headp) int ret = -1; uint32_t tx_idx; struct mbuf *m_head = *m_headp; - uint32_t txr_idx = ha->txr_idx; - uint32_t iscsi_pdu = 0; QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__)); - /* check if flowid is set */ - - if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE) { -#ifdef QL_ENABLE_ISCSI_TLV - if (qla_iscsi_pdu(ha, m_head) == 0) { - iscsi_pdu = 1; - txr_idx = m_head->m_pkthdr.flowid & - ((ha->hw.num_tx_rings >> 1) - 1); - } else { - txr_idx = m_head->m_pkthdr.flowid & - (ha->hw.num_tx_rings - 1); - } -#else - txr_idx = m_head->m_pkthdr.flowid & (ha->hw.num_tx_rings - 1); -#endif /* #ifdef QL_ENABLE_ISCSI_TLV */ - } - - tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next; map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map; @@ -1295,16 +1230,302 @@ qla_send(qla_host_t *ha, struct mbuf **m_headp) return (ret); } +static int +qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp) +{ + snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), + "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx); + + mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); + + fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF, + M_NOWAIT, &fp->tx_mtx); + if (fp->tx_br == NULL) { + QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for " + " fp[%d, %d]\n", ha->pci_func, fp->txr_idx)); + return (-ENOMEM); + } + return 0; +} + +static void +qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp) +{ + struct mbuf *mp; + struct ifnet *ifp = ha->ifp; + + if (mtx_initialized(&fp->tx_mtx)) { + + if (fp->tx_br != NULL) { + + mtx_lock(&fp->tx_mtx); + + while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { + m_freem(mp); + } + + mtx_unlock(&fp->tx_mtx); + + buf_ring_free(fp->tx_br, M_DEVBUF); + fp->tx_br = NULL; + } + mtx_destroy(&fp->tx_mtx); + } + return; +} + +static void +qla_fp_taskqueue(void *context, int pending) +{ + qla_tx_fp_t *fp; + qla_host_t *ha; + struct ifnet *ifp; + struct mbuf *mp; + int ret; + uint32_t txr_idx; + uint32_t iscsi_pdu = 0; + uint32_t rx_pkts_left; + + fp = context; + + if (fp == NULL) + return; + + ha = (qla_host_t *)fp->ha; + + ifp = ha->ifp; + + txr_idx = fp->txr_idx; + + mtx_lock(&fp->tx_mtx); + + if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != + IFF_DRV_RUNNING) || (!ha->hw.link_up)) { + mtx_unlock(&fp->tx_mtx); + goto qla_fp_taskqueue_exit; + } + + rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64); + +#ifdef QL_ENABLE_ISCSI_TLV + ql_hw_tx_done_locked(ha, fp->txr_idx); + ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1))); + txr_idx = txr_idx + (ha->hw.num_tx_rings >> 1); +#else + ql_hw_tx_done_locked(ha, fp->txr_idx); +#endif /* #ifdef QL_ENABLE_ISCSI_TLV */ + + mp = drbr_peek(ifp, fp->tx_br); + + while (mp != NULL) { + + if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) { +#ifdef QL_ENABLE_ISCSI_TLV + if (ql_iscsi_pdu(ha, mp) == 0) { + iscsi_pdu = 1; + } +#endif /* #ifdef QL_ENABLE_ISCSI_TLV */ + } + + ret = qla_send(ha, &mp, txr_idx, iscsi_pdu); + + if (ret) { + if (mp != NULL) + drbr_putback(ifp, fp->tx_br, mp); + else { + drbr_advance(ifp, fp->tx_br); + } + + mtx_unlock(&fp->tx_mtx); + + goto qla_fp_taskqueue_exit0; + } else { + drbr_advance(ifp, fp->tx_br); + } + + mp = drbr_peek(ifp, fp->tx_br); + } + + mtx_unlock(&fp->tx_mtx); + +qla_fp_taskqueue_exit0: + + if (rx_pkts_left || ((mp != NULL) && ret)) { + taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); + } else { + if (!ha->flags.stop_rcv) { + QL_ENABLE_INTERRUPTS(ha, fp->txr_idx); + } + } + +qla_fp_taskqueue_exit: + + QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); + return; +} + +static int +qla_create_fp_taskqueues(qla_host_t *ha) +{ + int i; + uint8_t tq_name[32]; + + for (i = 0; i < ha->hw.num_sds_rings; i++) { + + qla_tx_fp_t *fp = &ha->tx_fp[i]; + + bzero(tq_name, sizeof (tq_name)); + snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); + + TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp); + + fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT, + taskqueue_thread_enqueue, + &fp->fp_taskqueue); + + if (fp->fp_taskqueue == NULL) + return (-1); + + taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", + tq_name); + + QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__, + fp->fp_taskqueue)); + } + + return (0); +} + +static void +qla_destroy_fp_taskqueues(qla_host_t *ha) +{ + int i; + + for (i = 0; i < ha->hw.num_sds_rings; i++) { + + qla_tx_fp_t *fp = &ha->tx_fp[i]; + + if (fp->fp_taskqueue != NULL) { + taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); + taskqueue_free(fp->fp_taskqueue); + fp->fp_taskqueue = NULL; + } + } + return; +} + +static void +qla_drain_fp_taskqueues(qla_host_t *ha) +{ + int i; + + for (i = 0; i < ha->hw.num_sds_rings; i++) { + qla_tx_fp_t *fp = &ha->tx_fp[i]; + + if (fp->fp_taskqueue != NULL) { + taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); + } + } + return; +} + +static int +qla_transmit(struct ifnet *ifp, struct mbuf *mp) +{ + qla_host_t *ha = (qla_host_t *)ifp->if_softc; + qla_tx_fp_t *fp; + int rss_id = 0; + int ret = 0; + + QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); + +#if __FreeBSD_version >= 1100000 + if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) +#else + if (mp->m_flags & M_FLOWID) +#endif + rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) % + ha->hw.num_sds_rings; + fp = &ha->tx_fp[rss_id]; + + if (fp->tx_br == NULL) { + ret = EINVAL; + goto qla_transmit_exit; + } + + if (mp != NULL) { + ret = drbr_enqueue(ifp, fp->tx_br, mp); + } + + if (fp->fp_taskqueue != NULL) + taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); + + ret = 0; + +qla_transmit_exit: + + QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); + return ret; +} + +static void +qla_qflush(struct ifnet *ifp) +{ + int i; + qla_tx_fp_t *fp; + struct mbuf *mp; + qla_host_t *ha; + + ha = (qla_host_t *)ifp->if_softc; + + QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); + + for (i = 0; i < ha->hw.num_sds_rings; i++) { + + fp = &ha->tx_fp[i]; + + if (fp == NULL) + continue; + + if (fp->tx_br) { + mtx_lock(&fp->tx_mtx); + + while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { + m_freem(mp); + } + mtx_unlock(&fp->tx_mtx); + } + } + QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); + + return; +} + + static void qla_stop(qla_host_t *ha) { struct ifnet *ifp = ha->ifp; device_t dev; + int i = 0; dev = ha->pci_dev; ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); - QLA_TX_LOCK(ha); QLA_TX_UNLOCK(ha); + + for (i = 0; i < ha->hw.num_sds_rings; i++) { + qla_tx_fp_t *fp; + + fp = &ha->tx_fp[i]; + + if (fp == NULL) + continue; + + if (fp->tx_br != NULL) { + mtx_lock(&fp->tx_mtx); + mtx_unlock(&fp->tx_mtx); + } + } ha->flags.qla_watchdog_pause = 1; @@ -1313,6 +1534,8 @@ qla_stop(qla_host_t *ha) ha->flags.qla_interface_up = 0; + qla_drain_fp_taskqueues(ha); + ql_hw_stop_rcv(ha); ql_del_hw_if(ha); @@ -1653,25 +1876,6 @@ exit_ql_get_mbuf: return (ret); } -static void -qla_tx_done(void *context, int pending) -{ - qla_host_t *ha = context; - struct ifnet *ifp; - - ifp = ha->ifp; - - if (!ifp) - return; - - if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { - QL_DPRINT8(ha, (ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__)); - return; - } - ql_hw_tx_done(ha); - - qla_start(ha->ifp); -} static void qla_get_peer(qla_host_t *ha) @@ -1714,18 +1918,32 @@ qla_error_recovery(void *context, int pending) qla_host_t *ha = context; uint32_t msecs_100 = 100; struct ifnet *ifp = ha->ifp; + int i = 0; (void)QLA_LOCK(ha, __func__, 0); if (ha->flags.qla_interface_up) { - ha->hw.imd_compl = 1; - qla_mdelay(__func__, 300); + ha->hw.imd_compl = 1; + qla_mdelay(__func__, 300); - ql_hw_stop_rcv(ha); + ql_hw_stop_rcv(ha); - ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); - QLA_TX_LOCK(ha); QLA_TX_UNLOCK(ha); + ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); + + for (i = 0; i < ha->hw.num_sds_rings; i++) { + qla_tx_fp_t *fp; + + fp = &ha->tx_fp[i]; + + if (fp == NULL) + continue; + + if (fp->tx_br != NULL) { + mtx_lock(&fp->tx_mtx); + mtx_unlock(&fp->tx_mtx); + } + } } QLA_UNLOCK(ha, __func__); diff --git a/sys/dev/qlxgbe/ql_os.h b/sys/dev/qlxgbe/ql_os.h index 3ba6c3f..db71772 100644 --- a/sys/dev/qlxgbe/ql_os.h +++ b/sys/dev/qlxgbe/ql_os.h @@ -147,8 +147,8 @@ MALLOC_DECLARE(M_QLA83XXBUF); /* * Locks */ -#define QLA_LOCK(ha, str, no_delay) qla_lock(ha, str, no_delay) -#define QLA_UNLOCK(ha, str) qla_unlock(ha, str) +#define QLA_LOCK(ha, str, no_delay) mtx_lock(&ha->hw_lock) +#define QLA_UNLOCK(ha, str) mtx_unlock(&ha->hw_lock) #define QLA_TX_LOCK(ha) mtx_lock(&ha->tx_lock); #define QLA_TX_UNLOCK(ha) mtx_unlock(&ha->tx_lock); diff --git a/sys/dev/qlxgbe/ql_ver.h b/sys/dev/qlxgbe/ql_ver.h index 182fa32..90d61d2 100644 --- a/sys/dev/qlxgbe/ql_ver.h +++ b/sys/dev/qlxgbe/ql_ver.h @@ -36,6 +36,6 @@ #define QLA_VERSION_MAJOR 3 #define QLA_VERSION_MINOR 10 -#define QLA_VERSION_BUILD 31 +#define QLA_VERSION_BUILD 33 #endif /* #ifndef _QL_VER_H_ */ diff --git a/sys/dev/rl/if_rl.c b/sys/dev/rl/if_rl.c index 2ff1310..2f877c9 100644 --- a/sys/dev/rl/if_rl.c +++ b/sys/dev/rl/if_rl.c @@ -1938,15 +1938,13 @@ rl_stop(struct rl_softc *sc) */ for (i = 0; i < RL_TX_LIST_CNT; i++) { if (sc->rl_cdata.rl_tx_chain[i] != NULL) { - if (sc->rl_cdata.rl_tx_chain[i] != NULL) { - bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, - sc->rl_cdata.rl_tx_dmamap[i], - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, - sc->rl_cdata.rl_tx_dmamap[i]); - m_freem(sc->rl_cdata.rl_tx_chain[i]); - sc->rl_cdata.rl_tx_chain[i] = NULL; - } + bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, + sc->rl_cdata.rl_tx_dmamap[i], + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, + sc->rl_cdata.rl_tx_dmamap[i]); + m_freem(sc->rl_cdata.rl_tx_chain[i]); + sc->rl_cdata.rl_tx_chain[i] = NULL; CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000); } diff --git a/sys/dev/sdhci/sdhci.c b/sys/dev/sdhci/sdhci.c index 47d6340..d1b0ecf 100644 --- a/sys/dev/sdhci/sdhci.c +++ b/sys/dev/sdhci/sdhci.c @@ -74,6 +74,7 @@ static void sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock); static void sdhci_start(struct sdhci_slot *slot); static void sdhci_start_data(struct sdhci_slot *slot, struct mmc_data *data); +static void sdhci_card_poll(void *); static void sdhci_card_task(void *, int); /* helper routines */ @@ -90,6 +91,22 @@ static void sdhci_card_task(void *, int); #define SDHCI_200_MAX_DIVIDER 256 #define SDHCI_300_MAX_DIVIDER 2046 +#define SDHCI_CARD_PRESENT_TICKS (hz / 5) +#define SDHCI_INSERT_DELAY_TICKS (hz / 2) + +/* + * Broadcom BCM577xx Controller Constants + */ +#define BCM577XX_DEFAULT_MAX_DIVIDER 256 /* Maximum divider supported by the default clock source. */ +#define BCM577XX_ALT_CLOCK_BASE 63000000 /* Alternative clock's base frequency. */ + +#define BCM577XX_HOST_CONTROL 0x198 +#define BCM577XX_CTRL_CLKSEL_MASK 0xFFFFCFFF +#define BCM577XX_CTRL_CLKSEL_SHIFT 12 +#define BCM577XX_CTRL_CLKSEL_DEFAULT 0x0 +#define BCM577XX_CTRL_CLKSEL_64MHZ 0x3 + + static void sdhci_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { @@ -152,8 +169,7 @@ sdhci_reset(struct sdhci_slot *slot, uint8_t mask) int timeout; if (slot->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { - if (!(RD4(slot, SDHCI_PRESENT_STATE) & - SDHCI_CARD_PRESENT)) + if (!SDHCI_GET_CARD_PRESENT(slot->bus, slot)) return; } @@ -218,10 +234,15 @@ sdhci_init(struct sdhci_slot *slot) slot->intmask = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | - SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT | SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE | SDHCI_INT_ACMD12ERR; + + if (!(slot->quirks & SDHCI_QUIRK_POLL_CARD_PRESENT) && + !(slot->opt & SDHCI_NON_REMOVABLE)) { + slot->intmask |= SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT; + } + WR4(slot, SDHCI_INT_ENABLE, slot->intmask); WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); } @@ -229,6 +250,8 @@ sdhci_init(struct sdhci_slot *slot) static void sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock) { + uint32_t clk_base; + uint32_t clk_sel; uint32_t res; uint16_t clk; uint16_t div; @@ -244,6 +267,22 @@ sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock) /* If no clock requested - left it so. */ if (clock == 0) return; + + /* Determine the clock base frequency */ + clk_base = slot->max_clk; + if (slot->quirks & SDHCI_QUIRK_BCM577XX_400KHZ_CLKSRC) { + clk_sel = RD2(slot, BCM577XX_HOST_CONTROL) & BCM577XX_CTRL_CLKSEL_MASK; + + /* Select clock source appropriate for the requested frequency. */ + if ((clk_base / BCM577XX_DEFAULT_MAX_DIVIDER) > clock) { + clk_base = BCM577XX_ALT_CLOCK_BASE; + clk_sel |= (BCM577XX_CTRL_CLKSEL_64MHZ << BCM577XX_CTRL_CLKSEL_SHIFT); + } else { + clk_sel |= (BCM577XX_CTRL_CLKSEL_DEFAULT << BCM577XX_CTRL_CLKSEL_SHIFT); + } + + WR2(slot, BCM577XX_HOST_CONTROL, clk_sel); + } /* Recalculate timeout clock frequency based on the new sd clock. */ if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) @@ -251,7 +290,7 @@ sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock) if (slot->version < SDHCI_SPEC_300) { /* Looking for highest freq <= clock. */ - res = slot->max_clk; + res = clk_base; for (div = 1; div < SDHCI_200_MAX_DIVIDER; div <<= 1) { if (res <= clock) break; @@ -262,11 +301,11 @@ sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock) } else { /* Version 3.0 divisors are multiples of two up to 1023*2 */ - if (clock >= slot->max_clk) + if (clock >= clk_base) div = 0; else { for (div = 2; div < SDHCI_300_MAX_DIVIDER; div += 2) { - if ((slot->max_clk / div) <= clock) + if ((clk_base / div) <= clock) break; } } @@ -274,8 +313,8 @@ sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock) } if (bootverbose || sdhci_debug) - slot_printf(slot, "Divider %d for freq %d (max %d)\n", - div, clock, slot->max_clk); + slot_printf(slot, "Divider %d for freq %d (base %d)\n", + div, clock, clk_base); /* Now we have got divider, set it. */ clk = (div & SDHCI_DIVIDER_MASK) << SDHCI_DIVIDER_SHIFT; @@ -338,6 +377,13 @@ sdhci_set_power(struct sdhci_slot *slot, u_char power) /* Turn on the power. */ pwr |= SDHCI_POWER_ON; WR1(slot, SDHCI_POWER_CONTROL, pwr); + + if (slot->quirks & SDHCI_QUIRK_INTEL_POWER_UP_RESET) { + WR1(slot, SDHCI_POWER_CONTROL, pwr | 0x10); + DELAY(10); + WR1(slot, SDHCI_POWER_CONTROL, pwr); + DELAY(300); + } } static void @@ -445,23 +491,17 @@ sdhci_transfer_pio(struct sdhci_slot *slot) } } -static void -sdhci_card_delay(void *arg) -{ - struct sdhci_slot *slot = arg; - - taskqueue_enqueue(taskqueue_swi_giant, &slot->card_task); -} - static void sdhci_card_task(void *arg, int pending) { struct sdhci_slot *slot = arg; SDHCI_LOCK(slot); - if (RD4(slot, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT) { + if (SDHCI_GET_CARD_PRESENT(slot->bus, slot)) { if (slot->dev == NULL) { /* If card is present - attach mmc bus. */ + if (bootverbose || sdhci_debug) + slot_printf(slot, "Card inserted\n"); slot->dev = device_add_child(slot->bus, "mmc", -1); device_set_ivars(slot->dev, slot); SDHCI_UNLOCK(slot); @@ -471,6 +511,8 @@ sdhci_card_task(void *arg, int pending) } else { if (slot->dev != NULL) { /* If no card present - detach mmc bus. */ + if (bootverbose || sdhci_debug) + slot_printf(slot, "Card removed\n"); device_t d = slot->dev; slot->dev = NULL; SDHCI_UNLOCK(slot); @@ -480,6 +522,51 @@ sdhci_card_task(void *arg, int pending) } } +static void +sdhci_handle_card_present_locked(struct sdhci_slot *slot, bool is_present) +{ + bool was_present; + + /* + * If there was no card and now there is one, schedule the task to + * create the child device after a short delay. The delay is to + * debounce the card insert (sometimes the card detect pin stabilizes + * before the other pins have made good contact). + * + * If there was a card present and now it's gone, immediately schedule + * the task to delete the child device. No debouncing -- gone is gone, + * because once power is removed, a full card re-init is needed, and + * that happens by deleting and recreating the child device. + */ + was_present = slot->dev != NULL; + if (!was_present && is_present) { + taskqueue_enqueue_timeout(taskqueue_swi_giant, + &slot->card_delayed_task, -SDHCI_INSERT_DELAY_TICKS); + } else if (was_present && !is_present) { + taskqueue_enqueue(taskqueue_swi_giant, &slot->card_task); + } +} + +void +sdhci_handle_card_present(struct sdhci_slot *slot, bool is_present) +{ + + SDHCI_LOCK(slot); + sdhci_handle_card_present_locked(slot, is_present); + SDHCI_UNLOCK(slot); +} + +static void +sdhci_card_poll(void *arg) +{ + struct sdhci_slot *slot = arg; + + sdhci_handle_card_present(slot, + SDHCI_GET_CARD_PRESENT(slot->bus, slot)); + callout_reset(&slot->card_poll_callout, SDHCI_CARD_PRESENT_TICKS, + sdhci_card_poll, slot); +} + int sdhci_init_slot(device_t dev, struct sdhci_slot *slot, int num) { @@ -550,9 +637,11 @@ sdhci_init_slot(device_t dev, struct sdhci_slot *slot, int num) device_printf(dev, "Hardware doesn't specify base clock " "frequency, using %dMHz as default.\n", SDHCI_DEFAULT_MAX_FREQ); } - /* Calculate timeout clock frequency. */ + /* Calculate/set timeout clock frequency. */ if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) { slot->timeout_clk = slot->max_clk / 1000; + } else if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_1MHZ) { + slot->timeout_clk = 1000; } else { slot->timeout_clk = (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; @@ -596,6 +685,8 @@ sdhci_init_slot(device_t dev, struct sdhci_slot *slot, int num) slot->opt &= ~SDHCI_HAVE_DMA; if (slot->quirks & SDHCI_QUIRK_FORCE_DMA) slot->opt |= SDHCI_HAVE_DMA; + if (slot->quirks & SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE) + slot->opt |= SDHCI_NON_REMOVABLE; /* * Use platform-provided transfer backend @@ -608,18 +699,33 @@ sdhci_init_slot(device_t dev, struct sdhci_slot *slot, int num) slot_printf(slot, "%uMHz%s %s%s%s%s %s\n", slot->max_clk / 1000000, (caps & SDHCI_CAN_DO_HISPD) ? " HS" : "", - (caps & MMC_CAP_8_BIT_DATA) ? "8bits" : - ((caps & MMC_CAP_4_BIT_DATA) ? "4bits" : "1bit"), + (slot->host.caps & MMC_CAP_8_BIT_DATA) ? "8bits" : + ((slot->host.caps & MMC_CAP_4_BIT_DATA) ? "4bits" : + "1bit"), (caps & SDHCI_CAN_VDD_330) ? " 3.3V" : "", (caps & SDHCI_CAN_VDD_300) ? " 3.0V" : "", (caps & SDHCI_CAN_VDD_180) ? " 1.8V" : "", (slot->opt & SDHCI_HAVE_DMA) ? "DMA" : "PIO"); sdhci_dumpregs(slot); } - + + slot->timeout = 10; + SYSCTL_ADD_INT(device_get_sysctl_ctx(slot->bus), + SYSCTL_CHILDREN(device_get_sysctl_tree(slot->bus)), OID_AUTO, + "timeout", CTLFLAG_RW, &slot->timeout, 0, + "Maximum timeout for SDHCI transfers (in secs)"); TASK_INIT(&slot->card_task, 0, sdhci_card_task, slot); - callout_init(&slot->card_callout, 1); + TIMEOUT_TASK_INIT(taskqueue_swi_giant, &slot->card_delayed_task, 0, + sdhci_card_task, slot); + callout_init(&slot->card_poll_callout, 1); callout_init_mtx(&slot->timeout_callout, &slot->mtx, 0); + + if ((slot->quirks & SDHCI_QUIRK_POLL_CARD_PRESENT) && + !(slot->opt & SDHCI_NON_REMOVABLE)) { + callout_reset(&slot->card_poll_callout, + SDHCI_CARD_PRESENT_TICKS, sdhci_card_poll, slot); + } + return (0); } @@ -635,8 +741,9 @@ sdhci_cleanup_slot(struct sdhci_slot *slot) device_t d; callout_drain(&slot->timeout_callout); - callout_drain(&slot->card_callout); + callout_drain(&slot->card_poll_callout); taskqueue_drain(taskqueue_swi_giant, &slot->card_task); + taskqueue_drain_timeout(taskqueue_swi_giant, &slot->card_delayed_task); SDHCI_LOCK(slot); d = slot->dev; @@ -682,6 +789,16 @@ sdhci_generic_min_freq(device_t brdev, struct sdhci_slot *slot) return (slot->max_clk / SDHCI_200_MAX_DIVIDER); } +bool +sdhci_generic_get_card_present(device_t brdev, struct sdhci_slot *slot) +{ + + if (slot->opt & SDHCI_NON_REMOVABLE) + return true; + + return (RD4(slot, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); +} + int sdhci_generic_update_ios(device_t brdev, device_t reqdev) { @@ -779,7 +896,7 @@ static void sdhci_start_command(struct sdhci_slot *slot, struct mmc_command *cmd) { int flags, timeout; - uint32_t mask, state; + uint32_t mask; slot->curcmd = cmd; slot->cmd_done = 0; @@ -794,11 +911,9 @@ sdhci_start_command(struct sdhci_slot *slot, struct mmc_command *cmd) return; } - /* Read controller present state. */ - state = RD4(slot, SDHCI_PRESENT_STATE); /* Do not issue command if there is no card, clock or power. * Controller will not detect timeout without clock active. */ - if ((state & SDHCI_CARD_PRESENT) == 0 || + if (!SDHCI_GET_CARD_PRESENT(slot->bus, slot) || slot->power == 0 || slot->clock == 0) { cmd->error = MMC_ERR_FAILED; @@ -824,7 +939,7 @@ sdhci_start_command(struct sdhci_slot *slot, struct mmc_command *cmd) * (It's usually more like 20-30ms in the real world.) */ timeout = 250; - while (state & mask) { + while (mask & RD4(slot, SDHCI_PRESENT_STATE)) { if (timeout == 0) { slot_printf(slot, "Controller never released " "inhibit bit(s).\n"); @@ -835,7 +950,6 @@ sdhci_start_command(struct sdhci_slot *slot, struct mmc_command *cmd) } timeout--; DELAY(1000); - state = RD4(slot, SDHCI_PRESENT_STATE); } /* Prepare command flags. */ @@ -873,7 +987,8 @@ sdhci_start_command(struct sdhci_slot *slot, struct mmc_command *cmd) /* Start command. */ WR2(slot, SDHCI_COMMAND_FLAGS, (cmd->opcode << 8) | (flags & 0xff)); /* Start timeout callout. */ - callout_reset(&slot->timeout_callout, 2*hz, sdhci_timeout, slot); + callout_reset(&slot->timeout_callout, slot->timeout * hz, + sdhci_timeout, slot); } static void @@ -1272,7 +1387,7 @@ sdhci_acmd_irq(struct sdhci_slot *slot) void sdhci_generic_intr(struct sdhci_slot *slot) { - uint32_t intmask; + uint32_t intmask, present; SDHCI_LOCK(slot); /* Read slot interrupt status. */ @@ -1286,22 +1401,16 @@ sdhci_generic_intr(struct sdhci_slot *slot) /* Handle card presence interrupts. */ if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { + present = (intmask & SDHCI_INT_CARD_INSERT) != 0; + slot->intmask &= + ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE); + slot->intmask |= present ? SDHCI_INT_CARD_REMOVE : + SDHCI_INT_CARD_INSERT; + WR4(slot, SDHCI_INT_ENABLE, slot->intmask); + WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); WR4(slot, SDHCI_INT_STATUS, intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)); - - if (intmask & SDHCI_INT_CARD_REMOVE) { - if (bootverbose || sdhci_debug) - slot_printf(slot, "Card removed\n"); - callout_stop(&slot->card_callout); - taskqueue_enqueue(taskqueue_swi_giant, - &slot->card_task); - } - if (intmask & SDHCI_INT_CARD_INSERT) { - if (bootverbose || sdhci_debug) - slot_printf(slot, "Card inserted\n"); - callout_reset(&slot->card_callout, hz / 2, - sdhci_card_delay, slot); - } + sdhci_handle_card_present_locked(slot, present); intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE); } /* Handle command interrupts. */ diff --git a/sys/dev/sdhci/sdhci.h b/sys/dev/sdhci/sdhci.h index b7d1960..4626816 100644 --- a/sys/dev/sdhci/sdhci.h +++ b/sys/dev/sdhci/sdhci.h @@ -63,6 +63,16 @@ #define SDHCI_QUIRK_WAITFOR_RESET_ASSERTED (1<<14) /* Leave controller in standard mode when putting card in HS mode. */ #define SDHCI_QUIRK_DONT_SET_HISPD_BIT (1<<15) +/* Alternate clock source is required when supplying a 400 KHz clock. */ +#define SDHCI_QUIRK_BCM577XX_400KHZ_CLKSRC (1<<16) +/* Card insert/remove interrupts don't work, polling required. */ +#define SDHCI_QUIRK_POLL_CARD_PRESENT (1<<17) +/* All controller slots are non-removable. */ +#define SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE (1<<18) +/* Issue custom Intel controller reset sequence after power-up. */ +#define SDHCI_QUIRK_INTEL_POWER_UP_RESET (1<<19) +/* Data timeout is invalid, use 1 MHz clock instead. */ +#define SDHCI_QUIRK_DATA_TIMEOUT_1MHZ (1<<20) /* * Controller registers @@ -151,6 +161,9 @@ #define SDHCI_CLOCK_CARD_EN 0x0004 #define SDHCI_CLOCK_INT_STABLE 0x0002 #define SDHCI_CLOCK_INT_EN 0x0001 +#define SDHCI_DIVIDERS_MASK \ + ((SDHCI_DIVIDER_MASK << SDHCI_DIVIDER_SHIFT) | \ + (SDHCI_DIVIDER_HI_MASK << SDHCI_DIVIDER_HI_SHIFT)) #define SDHCI_TIMEOUT_CONTROL 0x2E @@ -268,9 +281,11 @@ struct sdhci_slot { device_t dev; /* Slot device */ u_char num; /* Slot number */ u_char opt; /* Slot options */ -#define SDHCI_HAVE_DMA 1 -#define SDHCI_PLATFORM_TRANSFER 2 +#define SDHCI_HAVE_DMA 0x01 +#define SDHCI_PLATFORM_TRANSFER 0x02 +#define SDHCI_NON_REMOVABLE 0x04 u_char version; + int timeout; /* Transfer timeout */ uint32_t max_clk; /* Max possible freq */ uint32_t timeout_clk; /* Timeout freq */ bus_dma_tag_t dmatag; @@ -278,7 +293,9 @@ struct sdhci_slot { u_char *dmamem; bus_addr_t paddr; /* DMA buffer address */ struct task card_task; /* Card presence check task */ - struct callout card_callout; /* Card insert delay callout */ + struct timeout_task + card_delayed_task;/* Card insert delayed task */ + struct callout card_poll_callout;/* Card present polling callout */ struct callout timeout_callout;/* Card command/data response timeout */ struct mmc_host host; /* Host parameters */ struct mmc_request *req; /* Current request */ @@ -316,5 +333,7 @@ int sdhci_generic_acquire_host(device_t brdev, device_t reqdev); int sdhci_generic_release_host(device_t brdev, device_t reqdev); void sdhci_generic_intr(struct sdhci_slot *slot); uint32_t sdhci_generic_min_freq(device_t brdev, struct sdhci_slot *slot); +bool sdhci_generic_get_card_present(device_t brdev, struct sdhci_slot *slot); +void sdhci_handle_card_present(struct sdhci_slot *slot, bool is_present); #endif /* __SDHCI_H__ */ diff --git a/sys/dev/sdhci/sdhci_if.m b/sys/dev/sdhci/sdhci_if.m index b33cdcf..da02d31 100644 --- a/sys/dev/sdhci/sdhci_if.m +++ b/sys/dev/sdhci/sdhci_if.m @@ -152,3 +152,9 @@ METHOD uint32_t min_freq { device_t brdev; struct sdhci_slot *slot; } DEFAULT sdhci_generic_min_freq; + +METHOD bool get_card_present { + device_t brdev; + struct sdhci_slot *slot; +} DEFAULT sdhci_generic_get_card_present; + diff --git a/sys/dev/sdhci/sdhci_pci.c b/sys/dev/sdhci/sdhci_pci.c index b13a0c9..d8e8897 100644 --- a/sys/dev/sdhci/sdhci_pci.c +++ b/sys/dev/sdhci/sdhci_pci.c @@ -63,15 +63,15 @@ __FBSDID("$FreeBSD$"); #define PCI_SDHCI_IFVENDOR 0x02 #define PCI_SLOT_INFO 0x40 /* 8 bits */ -#define PCI_SLOT_INFO_SLOTS(x) (((x >> 4) & 7) + 1) -#define PCI_SLOT_INFO_FIRST_BAR(x) ((x) & 7) +#define PCI_SLOT_INFO_SLOTS(x) (((x >> 4) & 7) + 1) +#define PCI_SLOT_INFO_FIRST_BAR(x) ((x) & 7) /* * RICOH specific PCI registers */ #define SDHC_PCI_MODE_KEY 0xf9 #define SDHC_PCI_MODE 0x150 -#define SDHC_PCI_MODE_SD20 0x10 +#define SDHC_PCI_MODE_SD20 0x10 #define SDHC_PCI_BASE_FREQ_KEY 0xfc #define SDHC_PCI_BASE_FREQ 0xe1 @@ -105,6 +105,21 @@ static const struct sdhci_device { { 0x2381197B, 0xffff, "JMicron JMB38X SD", SDHCI_QUIRK_32BIT_DMA_SIZE | SDHCI_QUIRK_RESET_AFTER_REQUEST }, + { 0x16bc14e4, 0xffff, "Broadcom BCM577xx SDXC/MMC Card Reader", + SDHCI_QUIRK_BCM577XX_400KHZ_CLKSRC }, + { 0x0f148086, 0xffff, "Intel Bay Trail eMMC 4.5 Controller", + SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE | + SDHCI_QUIRK_INTEL_POWER_UP_RESET }, + { 0x0f508086, 0xffff, "Intel Bay Trail eMMC 4.5 Controller", + SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE | + SDHCI_QUIRK_INTEL_POWER_UP_RESET }, + { 0x22948086, 0xffff, "Intel Braswell eMMC 4.5.1 Controller", + SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE | + SDHCI_QUIRK_DATA_TIMEOUT_1MHZ | + SDHCI_QUIRK_INTEL_POWER_UP_RESET }, + { 0x5acc8086, 0xffff, "Intel Apollo Lake eMMC 5.0 Controller", + SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE | + SDHCI_QUIRK_INTEL_POWER_UP_RESET }, { 0, 0xffff, NULL, 0 } }; @@ -117,8 +132,8 @@ struct sdhci_pci_softc { int num_slots; /* Number of slots on this controller */ struct sdhci_slot slots[6]; struct resource *mem_res[6]; /* Memory resource */ - uint8_t cfg_freq; /* Saved mode */ - uint8_t cfg_mode; /* Saved frequency */ + uint8_t cfg_freq; /* Saved frequency */ + uint8_t cfg_mode; /* Saved mode */ }; static int sdhci_enable_msi = 1; @@ -329,12 +344,14 @@ sdhci_pci_attach(device_t dev) /* Allocate memory. */ rid = PCIR_BAR(bar + i); - sc->mem_res[i] = bus_alloc_resource(dev, SYS_RES_MEMORY, - &rid, 0ul, ~0ul, 0x100, RF_ACTIVE); + sc->mem_res[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &rid, RF_ACTIVE); if (sc->mem_res[i] == NULL) { device_printf(dev, "Can't allocate memory for slot %d\n", i); continue; } + + slot->quirks = sc->quirks; if (sdhci_init_slot(dev, slot, i) != 0) continue; @@ -409,11 +426,16 @@ static int sdhci_pci_resume(device_t dev) { struct sdhci_pci_softc *sc = device_get_softc(dev); - int i; + int i, err; for (i = 0; i < sc->num_slots; i++) sdhci_generic_resume(&sc->slots[i]); - return (bus_generic_resume(dev)); + err = bus_generic_resume(dev); + if (err) + return (err); + if (sc->quirks & SDHCI_QUIRK_LOWER_FREQUENCY) + sdhci_lower_frequency(dev); + return (0); } static void @@ -442,11 +464,11 @@ static device_method_t sdhci_methods[] = { DEVMETHOD(bus_write_ivar, sdhci_generic_write_ivar), /* mmcbr_if */ - DEVMETHOD(mmcbr_update_ios, sdhci_generic_update_ios), - DEVMETHOD(mmcbr_request, sdhci_generic_request), - DEVMETHOD(mmcbr_get_ro, sdhci_generic_get_ro), - DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host), - DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host), + DEVMETHOD(mmcbr_update_ios, sdhci_generic_update_ios), + DEVMETHOD(mmcbr_request, sdhci_generic_request), + DEVMETHOD(mmcbr_get_ro, sdhci_generic_get_ro), + DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host), + DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host), /* SDHCI registers accessors */ DEVMETHOD(sdhci_read_1, sdhci_pci_read_1), diff --git a/sys/dev/sfxge/common/efx_mcdi.c b/sys/dev/sfxge/common/efx_mcdi.c index e4a918a..4e7ff53 100644 --- a/sys/dev/sfxge/common/efx_mcdi.c +++ b/sys/dev/sfxge/common/efx_mcdi.c @@ -1725,7 +1725,8 @@ static __checkReturn efx_rc_t efx_mcdi_mac_stats( __in efx_nic_t *enp, __in_opt efsys_mem_t *esmp, - __in efx_stats_action_t action) + __in efx_stats_action_t action, + __in uint16_t period_ms) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_MAC_STATS_IN_LEN, @@ -1750,7 +1751,7 @@ efx_mcdi_mac_stats( MAC_STATS_IN_PERIODIC_CHANGE, enable | events | disable, MAC_STATS_IN_PERIODIC_ENABLE, enable | events, MAC_STATS_IN_PERIODIC_NOEVENT, !events, - MAC_STATS_IN_PERIOD_MS, (enable | events) ? 1000 : 0); + MAC_STATS_IN_PERIOD_MS, (enable | events) ? period_ms : 0); if (esmp != NULL) { int bytes = MC_CMD_MAC_NSTATS * sizeof (uint64_t); @@ -1800,7 +1801,7 @@ efx_mcdi_mac_stats_clear( { efx_rc_t rc; - if ((rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_CLEAR)) != 0) + if ((rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_CLEAR, 0)) != 0) goto fail1; return (0); @@ -1823,7 +1824,7 @@ efx_mcdi_mac_stats_upload( * avoid having to pull the statistics buffer into the cache to * maintain cumulative statistics. */ - if ((rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_UPLOAD)) != 0) + if ((rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_UPLOAD, 0)) != 0) goto fail1; return (0); @@ -1838,7 +1839,7 @@ fail1: efx_mcdi_mac_stats_periodic( __in efx_nic_t *enp, __in efsys_mem_t *esmp, - __in uint16_t period, + __in uint16_t period_ms, __in boolean_t events) { efx_rc_t rc; @@ -1847,14 +1848,17 @@ efx_mcdi_mac_stats_periodic( * The MC DMAs aggregate statistics for our convenience, so we can * avoid having to pull the statistics buffer into the cache to * maintain cumulative statistics. - * Huntington uses a fixed 1sec period, so use that on Siena too. + * Huntington uses a fixed 1sec period. + * Medford uses a fixed 1sec period before v6.2.1.1033 firmware. */ - if (period == 0) - rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_DISABLE); + if (period_ms == 0) + rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_DISABLE, 0); else if (events) - rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_EVENTS); + rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_EVENTS, + period_ms); else - rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_NOEVENTS); + rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_NOEVENTS, + period_ms); if (rc != 0) goto fail1; diff --git a/sys/dev/sfxge/common/efx_mcdi.h b/sys/dev/sfxge/common/efx_mcdi.h index ffa50f1..ee11789 100644 --- a/sys/dev/sfxge/common/efx_mcdi.h +++ b/sys/dev/sfxge/common/efx_mcdi.h @@ -218,7 +218,7 @@ extern __checkReturn efx_rc_t efx_mcdi_mac_stats_periodic( __in efx_nic_t *enp, __in efsys_mem_t *esmp, - __in uint16_t period, + __in uint16_t period_ms, __in boolean_t events); diff --git a/sys/dev/sfxge/sfxge.c b/sys/dev/sfxge/sfxge.c index 8b9edb0..f91275e 100644 --- a/sys/dev/sfxge/sfxge.c +++ b/sys/dev/sfxge/sfxge.c @@ -94,14 +94,6 @@ SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_ring, CTLFLAG_RDTUN, &sfxge_tx_ring_entries, 0, "Maximum number of descriptors in a transmit ring"); -#define SFXGE_PARAM_STATS_UPDATE_PERIOD SFXGE_PARAM(stats_update_period) -static int sfxge_stats_update_period = SFXGE_CALLOUT_TICKS; -TUNABLE_INT(SFXGE_PARAM_STATS_UPDATE_PERIOD, - &sfxge_stats_update_period); -SYSCTL_INT(_hw_sfxge, OID_AUTO, stats_update_period, CTLFLAG_RDTUN, - &sfxge_stats_update_period, 0, - "netstat interface statistics update period in ticks"); - #define SFXGE_PARAM_RESTART_ATTEMPTS SFXGE_PARAM(restart_attempts) static int sfxge_restart_attempts = 3; TUNABLE_INT(SFXGE_PARAM_RESTART_ATTEMPTS, &sfxge_restart_attempts); @@ -558,7 +550,8 @@ sfxge_tick(void *arg) sfxge_port_update_stats(sc); sfxge_tx_update_stats(sc); - callout_reset(&sc->tick_callout, sfxge_stats_update_period, + callout_reset(&sc->tick_callout, + hz * sc->port.stats_update_period_ms / 1000, sfxge_tick, sc); } @@ -623,7 +616,8 @@ sfxge_ifnet_init(struct ifnet *ifp, struct sfxge_softc *sc) if ((rc = sfxge_port_ifmedia_init(sc)) != 0) goto fail; - callout_reset(&sc->tick_callout, sfxge_stats_update_period, + callout_reset(&sc->tick_callout, + hz * sc->port.stats_update_period_ms / 1000, sfxge_tick, sc); return (0); diff --git a/sys/dev/sfxge/sfxge.h b/sys/dev/sfxge/sfxge.h index 62b1dc3..1929318 100644 --- a/sys/dev/sfxge/sfxge.h +++ b/sys/dev/sfxge/sfxge.h @@ -158,7 +158,7 @@ enum sfxge_evq_state { #define SFXGE_EV_BATCH 16384 -#define SFXGE_CALLOUT_TICKS 100 +#define SFXGE_STATS_UPDATE_PERIOD_MS 1000 struct sfxge_evq { /* Structure members below are sorted by usage order */ @@ -247,6 +247,7 @@ struct sfxge_port { #endif struct sfxge_hw_stats phy_stats; struct sfxge_hw_stats mac_stats; + uint16_t stats_update_period_ms; efx_link_mode_t link_mode; uint8_t mcast_addrs[EFX_MAC_MULTICAST_LIST_MAX * EFX_MAC_ADDR_LEN]; diff --git a/sys/dev/sfxge/sfxge_port.c b/sys/dev/sfxge/sfxge_port.c index 6e753a2..edc88ec 100644 --- a/sys/dev/sfxge/sfxge_port.c +++ b/sys/dev/sfxge/sfxge_port.c @@ -43,6 +43,15 @@ __FBSDID("$FreeBSD$"); #include "sfxge.h" +#define SFXGE_PARAM_STATS_UPDATE_PERIOD_MS \ + SFXGE_PARAM(stats_update_period_ms) +static int sfxge_stats_update_period_ms = SFXGE_STATS_UPDATE_PERIOD_MS; +TUNABLE_INT(SFXGE_PARAM_STATS_UPDATE_PERIOD_MS, + &sfxge_stats_update_period_ms); +SYSCTL_INT(_hw_sfxge, OID_AUTO, stats_update_period_ms, CTLFLAG_RDTUN, + &sfxge_stats_update_period_ms, 0, + "netstat interface statistics update period in milliseconds"); + static int sfxge_phy_cap_mask(struct sfxge_softc *, int, uint32_t *); static int @@ -51,6 +60,7 @@ sfxge_mac_stat_update(struct sfxge_softc *sc) struct sfxge_port *port = &sc->port; efsys_mem_t *esmp = &(port->mac_stats.dma_buf); clock_t now; + unsigned int min_ticks; unsigned int count; int rc; @@ -61,8 +71,10 @@ sfxge_mac_stat_update(struct sfxge_softc *sc) goto out; } + min_ticks = (unsigned int)hz * port->stats_update_period_ms / 1000; + now = ticks; - if ((unsigned int)(now - port->mac_stats.update_time) < (unsigned int)hz) { + if ((unsigned int)(now - port->mac_stats.update_time) < min_ticks) { rc = 0; goto out; } @@ -483,9 +495,10 @@ sfxge_port_start(struct sfxge_softc *sc) sfxge_mac_filter_set_locked(sc); - /* Update MAC stats by DMA every second */ + /* Update MAC stats by DMA every period */ if ((rc = efx_mac_stats_periodic(enp, &port->mac_stats.dma_buf, - 1000, B_FALSE)) != 0) + port->stats_update_period_ms, + B_FALSE)) != 0) goto fail6; if ((rc = efx_mac_drain(enp, B_FALSE)) != 0) @@ -642,6 +655,68 @@ sfxge_port_fini(struct sfxge_softc *sc) port->sc = NULL; } +static uint16_t +sfxge_port_stats_update_period_ms(struct sfxge_softc *sc) +{ + int period_ms = sfxge_stats_update_period_ms; + + if (period_ms < 0) { + device_printf(sc->dev, + "treat negative stats update period %d as 0 (disable)\n", + period_ms); + period_ms = 0; + } else if (period_ms > UINT16_MAX) { + device_printf(sc->dev, + "treat too big stats update period %d as %u\n", + period_ms, UINT16_MAX); + period_ms = UINT16_MAX; + } + + return period_ms; +} + +static int +sfxge_port_stats_update_period_ms_handler(SYSCTL_HANDLER_ARGS) +{ + struct sfxge_softc *sc; + struct sfxge_port *port; + unsigned int period_ms; + int error; + + sc = arg1; + port = &sc->port; + + if (req->newptr != NULL) { + error = SYSCTL_IN(req, &period_ms, sizeof(period_ms)); + if (error != 0) + return (error); + + if (period_ms > UINT16_MAX) + return (EINVAL); + + SFXGE_PORT_LOCK(port); + + if (port->stats_update_period_ms != period_ms) { + if (port->init_state == SFXGE_PORT_STARTED) + error = efx_mac_stats_periodic(sc->enp, + &port->mac_stats.dma_buf, + period_ms, B_FALSE); + if (error == 0) + port->stats_update_period_ms = period_ms; + } + + SFXGE_PORT_UNLOCK(port); + } else { + SFXGE_PORT_LOCK(port); + period_ms = port->stats_update_period_ms; + SFXGE_PORT_UNLOCK(port); + + error = SYSCTL_OUT(req, &period_ms, sizeof(period_ms)); + } + + return (error); +} + int sfxge_port_init(struct sfxge_softc *sc) { @@ -690,8 +765,14 @@ sfxge_port_init(struct sfxge_softc *sc) M_SFXGE, M_WAITOK | M_ZERO); if ((rc = sfxge_dma_alloc(sc, EFX_MAC_STATS_SIZE, mac_stats_buf)) != 0) goto fail2; + port->stats_update_period_ms = sfxge_port_stats_update_period_ms(sc); sfxge_mac_stat_init(sc); + SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, + "stats_update_period_ms", CTLTYPE_UINT|CTLFLAG_RW, sc, 0, + sfxge_port_stats_update_period_ms_handler, "IU", + "interface statistics refresh period"); + port->init_state = SFXGE_PORT_INITIALIZED; DBGPRINT(sc->dev, "success"); diff --git a/sys/dev/sfxge/sfxge_tx.c b/sys/dev/sfxge/sfxge_tx.c index cc6fb17..d8ccbf1 100644 --- a/sys/dev/sfxge/sfxge_tx.c +++ b/sys/dev/sfxge/sfxge_tx.c @@ -356,8 +356,22 @@ static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf) KASSERT(!txq->blocked, ("txq->blocked")); +#if SFXGE_TX_PARSE_EARLY + /* + * If software TSO is used, we still need to copy packet header, + * even if we have already parsed it early before enqueue. + */ + if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) && + (txq->tso_fw_assisted == 0)) + prefetch_read_many(mbuf->m_data); +#else + /* + * Prefetch packet header since we need to parse it and extract + * IP ID, TCP sequence number and flags. + */ if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) prefetch_read_many(mbuf->m_data); +#endif if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) { rc = EINTR; diff --git a/sys/dev/sound/pci/als4000.c b/sys/dev/sound/pci/als4000.c index 0cd51e3..404a58f 100644 --- a/sys/dev/sound/pci/als4000.c +++ b/sys/dev/sound/pci/als4000.c @@ -760,8 +760,8 @@ static int als_resource_grab(device_t dev, struct sc_info *sc) { sc->regid = PCIR_BAR(0); - sc->reg = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->regid, 0, ~0, - ALS_CONFIG_SPACE_BYTES, RF_ACTIVE); + sc->reg = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->regid, + RF_ACTIVE); if (sc->reg == 0) { device_printf(dev, "unable to allocate register space\n"); goto bad; diff --git a/sys/dev/sound/pci/cs4281.c b/sys/dev/sound/pci/cs4281.c index 6e1b17d..60e89e3 100644 --- a/sys/dev/sound/pci/cs4281.c +++ b/sys/dev/sound/pci/cs4281.c @@ -790,12 +790,11 @@ cs4281_pci_attach(device_t dev) sc->regid = PCIR_BAR(0); sc->regtype = SYS_RES_MEMORY; - sc->reg = bus_alloc_resource(dev, sc->regtype, &sc->regid, - 0, ~0, CS4281PCI_BA0_SIZE, RF_ACTIVE); + sc->reg = bus_alloc_resource_any(dev, sc->regtype, &sc->regid, RF_ACTIVE); if (!sc->reg) { sc->regtype = SYS_RES_IOPORT; - sc->reg = bus_alloc_resource(dev, sc->regtype, &sc->regid, - 0, ~0, CS4281PCI_BA0_SIZE, RF_ACTIVE); + sc->reg = bus_alloc_resource_any(dev, sc->regtype, &sc->regid, + RF_ACTIVE); if (!sc->reg) { device_printf(dev, "unable to allocate register space\n"); goto bad; @@ -805,8 +804,8 @@ cs4281_pci_attach(device_t dev) sc->sh = rman_get_bushandle(sc->reg); sc->memid = PCIR_BAR(1); - sc->mem = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->memid, 0, - ~0, CS4281PCI_BA1_SIZE, RF_ACTIVE); + sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->memid, + RF_ACTIVE); if (sc->mem == NULL) { device_printf(dev, "unable to allocate fifo space\n"); goto bad; diff --git a/sys/dev/sound/pci/hda/hdaa_patches.c b/sys/dev/sound/pci/hda/hdaa_patches.c index 245815d..11d8e43 100644 --- a/sys/dev/sound/pci/hda/hdaa_patches.c +++ b/sys/dev/sound/pci/hda/hdaa_patches.c @@ -714,6 +714,15 @@ hdaa_patch_direct(struct hdaa_devinfo *devinfo) hda_command(dev, HDA_CMD_12BIT(0, devinfo->nid, 0xf88, 0xc0)); break; + case HDA_CODEC_ALC1150: + if (subid == 0xd9781462) { + /* Too low volume on MSI H170 GAMING M3. */ + hda_command(dev, HDA_CMD_SET_COEFF_INDEX(0, 0x20, + 0x07)); + hda_command(dev, HDA_CMD_SET_PROCESSING_COEFF(0, 0x20, + 0x7cb)); + } + break; } if (subid == APPLE_INTEL_MAC) hda_command(dev, HDA_CMD_12BIT(0, devinfo->nid, diff --git a/sys/dev/sound/pci/hda/hdac.h b/sys/dev/sound/pci/hda/hdac.h index 912a996..55888f2 100644 --- a/sys/dev/sound/pci/hda/hdac.h +++ b/sys/dev/sound/pci/hda/hdac.h @@ -367,6 +367,7 @@ #define HDA_CODEC_ALC889 HDA_CODEC_CONSTRUCT(REALTEK, 0x0889) #define HDA_CODEC_ALC892 HDA_CODEC_CONSTRUCT(REALTEK, 0x0892) #define HDA_CODEC_ALC899 HDA_CODEC_CONSTRUCT(REALTEK, 0x0899) +#define HDA_CODEC_ALC1150 HDA_CODEC_CONSTRUCT(REALTEK, 0x0900) #define HDA_CODEC_ALCXXXX HDA_CODEC_CONSTRUCT(REALTEK, 0xffff) /* Motorola */ diff --git a/sys/dev/sound/pci/hda/hdacc.c b/sys/dev/sound/pci/hda/hdacc.c index c8d617e..d186a82 100644 --- a/sys/dev/sound/pci/hda/hdacc.c +++ b/sys/dev/sound/pci/hda/hdacc.c @@ -111,6 +111,7 @@ static const struct { { HDA_CODEC_ALC889, 0, "Realtek ALC889" }, { HDA_CODEC_ALC892, 0, "Realtek ALC892" }, { HDA_CODEC_ALC899, 0, "Realtek ALC899" }, + { HDA_CODEC_ALC1150, 0, "Realtek ALC1150" }, { HDA_CODEC_AD1882, 0, "Analog Devices AD1882" }, { HDA_CODEC_AD1882A, 0, "Analog Devices AD1882A" }, { HDA_CODEC_AD1883, 0, "Analog Devices AD1883" }, diff --git a/sys/dev/sound/pci/vibes.c b/sys/dev/sound/pci/vibes.c index 733e0d8..acb0e0a 100644 --- a/sys/dev/sound/pci/vibes.c +++ b/sys/dev/sound/pci/vibes.c @@ -739,9 +739,8 @@ sv_attach(device_t dev) { #endif sc->enh_rid = SV_PCI_ENHANCED; sc->enh_type = SYS_RES_IOPORT; - sc->enh_reg = bus_alloc_resource(dev, sc->enh_type, - &sc->enh_rid, 0, ~0, - SV_PCI_ENHANCED_SIZE, RF_ACTIVE); + sc->enh_reg = bus_alloc_resource_any(dev, sc->enh_type, + &sc->enh_rid, RF_ACTIVE); if (sc->enh_reg == NULL) { device_printf(dev, "sv_attach: cannot allocate enh\n"); return ENXIO; @@ -832,9 +831,8 @@ sv_attach(device_t dev) { /* Cache resource short-cuts for dma_a */ sc->dmaa_rid = SV_PCI_DMAA; sc->dmaa_type = SYS_RES_IOPORT; - sc->dmaa_reg = bus_alloc_resource(dev, sc->dmaa_type, - &sc->dmaa_rid, 0, ~0, - SV_PCI_ENHANCED_SIZE, RF_ACTIVE); + sc->dmaa_reg = bus_alloc_resource_any(dev, sc->dmaa_type, + &sc->dmaa_rid, RF_ACTIVE); if (sc->dmaa_reg == NULL) { device_printf(dev, "sv_attach: cannot allocate dmaa\n"); goto fail; @@ -851,9 +849,8 @@ sv_attach(device_t dev) { /* Cache resource short-cuts for dma_c */ sc->dmac_rid = SV_PCI_DMAC; sc->dmac_type = SYS_RES_IOPORT; - sc->dmac_reg = bus_alloc_resource(dev, sc->dmac_type, - &sc->dmac_rid, 0, ~0, - SV_PCI_ENHANCED_SIZE, RF_ACTIVE); + sc->dmac_reg = bus_alloc_resource_any(dev, sc->dmac_type, + &sc->dmac_rid, RF_ACTIVE); if (sc->dmac_reg == NULL) { device_printf(dev, "sv_attach: cannot allocate dmac\n"); goto fail; diff --git a/sys/dev/stge/if_stge.c b/sys/dev/stge/if_stge.c index 3cc45b3..cad1a63 100644 --- a/sys/dev/stge/if_stge.c +++ b/sys/dev/stge/if_stge.c @@ -507,7 +507,7 @@ stge_attach(device_t dev) } } - if ((error = stge_dma_alloc(sc) != 0)) + if ((error = stge_dma_alloc(sc)) != 0) goto fail; /* diff --git a/sys/dev/tws/tws.c b/sys/dev/tws/tws.c index 6bb6dd1..73ab501 100644 --- a/sys/dev/tws/tws.c +++ b/sys/dev/tws/tws.c @@ -257,8 +257,8 @@ tws_attach(device_t dev) #ifndef TWS_PULL_MODE_ENABLE /* Allocate bus space for inbound mfa */ sc->mfa_res_id = TWS_PCI_BAR2; /* BAR2 offset */ - if ((sc->mfa_res = bus_alloc_resource(dev, SYS_RES_MEMORY, - &(sc->mfa_res_id), 0, ~0, 0x100000, RF_ACTIVE)) + if ((sc->mfa_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &(sc->mfa_res_id), RF_ACTIVE)) == NULL) { tws_log(sc, ALLOC_MEMORY_RES); goto attach_fail_2; diff --git a/sys/dev/usb/usb_hub.c b/sys/dev/usb/usb_hub.c index 354e62d..7d74dd8 100644 --- a/sys/dev/usb/usb_hub.c +++ b/sys/dev/usb/usb_hub.c @@ -2272,6 +2272,11 @@ usb_needs_explore(struct usb_bus *bus, uint8_t do_probe) DPRINTF("\n"); + if (cold != 0) { + DPRINTF("Cold\n"); + return; + } + if (bus == NULL) { DPRINTF("No bus pointer!\n"); return; @@ -2337,6 +2342,26 @@ usb_needs_explore_all(void) } /*------------------------------------------------------------------------* + * usb_needs_explore_init + * + * This function will ensure that the USB controllers are not enumerated + * until the "cold" variable is cleared. + *------------------------------------------------------------------------*/ +static void +usb_needs_explore_init(void *arg) +{ + /* + * The cold variable should be cleared prior to this function + * being called: + */ + if (cold == 0) + usb_needs_explore_all(); + else + DPRINTFN(-1, "Cold variable is still set!\n"); +} +SYSINIT(usb_needs_explore_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, usb_needs_explore_init, NULL); + +/*------------------------------------------------------------------------* * usb_bus_power_update * * This function will ensure that all USB devices on the given bus are diff --git a/sys/dev/usb/usb_process.c b/sys/dev/usb/usb_process.c index e70166c..0f17485 100644 --- a/sys/dev/usb/usb_process.c +++ b/sys/dev/usb/usb_process.c @@ -455,14 +455,15 @@ usb_proc_drain(struct usb_process *up) up->up_csleep = 0; cv_signal(&up->up_cv); } +#ifndef EARLY_AP_STARTUP /* Check if we are still cold booted */ - if (cold) { USB_THREAD_SUSPEND(up->up_ptr); printf("WARNING: A USB process has " "been left suspended\n"); break; } +#endif cv_wait(&up->up_cv, up->up_mtx); } /* Check if someone is waiting - should not happen */ diff --git a/sys/dev/vte/if_vte.c b/sys/dev/vte/if_vte.c index dfc06e5..5d06d4f 100644 --- a/sys/dev/vte/if_vte.c +++ b/sys/dev/vte/if_vte.c @@ -426,7 +426,7 @@ vte_attach(device_t dev) /* Reset the ethernet controller. */ vte_reset(sc); - if ((error = vte_dma_alloc(sc) != 0)) + if ((error = vte_dma_alloc(sc)) != 0) goto fail; /* Create device sysctl node. */ |