From 48fe676ff5ddc104ebc346eebf48c7c0e285f833 Mon Sep 17 00:00:00 2001 From: kmacy Date: Sat, 23 Feb 2008 01:06:17 +0000 Subject: - update firmware to 5.0 - add support for T3C - add DDP support (zero-copy receive) - fix TOE transmit of large requests - fix shutdown so that sockets don't remain in CLOSING state indefinitely - register listeners when an interface is brought up after tom is loaded - fix setting of multicast filter - enable link at device attach - exit tick handler if shutdown is in progress - add helper for logging TCB - add sysctls for dumping transmit queues - note that TOE wxill not be MFC'd until after 7.0 has been finalized MFC after: 3 days --- sys/dev/cxgb/common/cxgb_ael1002.c | 47 +- sys/dev/cxgb/common/cxgb_common.h | 40 +- sys/dev/cxgb/common/cxgb_ctl_defs.h | 4 +- sys/dev/cxgb/common/cxgb_firmware_exports.h | 2 + sys/dev/cxgb/common/cxgb_mc5.c | 19 +- sys/dev/cxgb/common/cxgb_mv88e1xxx.c | 35 +- sys/dev/cxgb/common/cxgb_regs.h | 1717 +++++++++++++++++++-------- sys/dev/cxgb/common/cxgb_t3_cpl.h | 10 +- sys/dev/cxgb/common/cxgb_t3_hw.c | 373 ++++-- sys/dev/cxgb/common/cxgb_tcb.h | 7 +- sys/dev/cxgb/common/cxgb_version.h | 2 +- sys/dev/cxgb/common/cxgb_vsc8211.c | 176 ++- sys/dev/cxgb/common/cxgb_xgmac.c | 143 ++- sys/dev/cxgb/cxgb_adapter.h | 27 +- sys/dev/cxgb/cxgb_ioctl.h | 21 +- sys/dev/cxgb/cxgb_l2t.c | 27 +- sys/dev/cxgb/cxgb_l2t.h | 2 - sys/dev/cxgb/cxgb_main.c | 316 +++-- sys/dev/cxgb/cxgb_multiq.c | 13 +- sys/dev/cxgb/cxgb_offload.c | 135 ++- sys/dev/cxgb/cxgb_osdep.h | 47 +- sys/dev/cxgb/cxgb_sge.c | 277 +++-- sys/dev/cxgb/sys/cxgb_support.c | 7 + sys/dev/cxgb/sys/mbufq.h | 2 +- sys/dev/cxgb/sys/mvec.h | 29 +- sys/dev/cxgb/sys/uipc_mvec.c | 4 +- sys/dev/cxgb/t3cdev.h | 3 +- sys/dev/cxgb/t3fw-4.7.0.bin.gz.uu | 451 ------- sys/dev/cxgb/t3fw-5.0.0.bin.gz.uu | 496 ++++++++ sys/dev/cxgb/ulp/toecore/cxgb_toedev.h | 4 +- sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c | 1571 ++++++++++++++++++------ sys/dev/cxgb/ulp/tom/cxgb_cpl_socket.c | 729 +++++++++--- sys/dev/cxgb/ulp/tom/cxgb_ddp.c | 735 ++++++++++++ sys/dev/cxgb/ulp/tom/cxgb_defs.h | 10 +- sys/dev/cxgb/ulp/tom/cxgb_listen.c | 22 +- sys/dev/cxgb/ulp/tom/cxgb_t3_ddp.h | 52 +- sys/dev/cxgb/ulp/tom/cxgb_tcp_subr.c | 694 ----------- sys/dev/cxgb/ulp/tom/cxgb_tcp_usrreq.c | 1362 --------------------- sys/dev/cxgb/ulp/tom/cxgb_toepcb.h | 81 +- sys/dev/cxgb/ulp/tom/cxgb_tom.c | 102 +- sys/dev/cxgb/ulp/tom/cxgb_tom.h | 2 + sys/dev/cxgb/ulp/tom/cxgb_tom_sysctl.c | 18 +- sys/dev/cxgb/ulp/tom/cxgb_vm.c | 180 +++ sys/dev/cxgb/ulp/tom/cxgb_vm.h | 40 + sys/modules/cxgb/cxgb/Makefile | 18 +- sys/modules/cxgb/tom/Makefile | 6 +- 46 files changed, 5952 insertions(+), 4106 deletions(-) delete mode 100644 sys/dev/cxgb/t3fw-4.7.0.bin.gz.uu create mode 100644 sys/dev/cxgb/t3fw-5.0.0.bin.gz.uu create mode 100644 sys/dev/cxgb/ulp/tom/cxgb_ddp.c delete mode 100644 sys/dev/cxgb/ulp/tom/cxgb_tcp_subr.c delete mode 100644 sys/dev/cxgb/ulp/tom/cxgb_tcp_usrreq.c create mode 100644 sys/dev/cxgb/ulp/tom/cxgb_vm.c create mode 100644 sys/dev/cxgb/ulp/tom/cxgb_vm.h diff --git a/sys/dev/cxgb/common/cxgb_ael1002.c b/sys/dev/cxgb/common/cxgb_ael1002.c index c570ed3..a5a258b 100644 --- a/sys/dev/cxgb/common/cxgb_ael1002.c +++ b/sys/dev/cxgb/common/cxgb_ael1002.c @@ -36,6 +36,9 @@ __FBSDID("$FreeBSD$"); #include #endif +#undef msleep +#define msleep t3_os_sleep + enum { AEL100X_TX_DISABLE = 9, AEL100X_TX_CONFIG1 = 0xc002, @@ -52,9 +55,9 @@ static void ael100x_txon(struct cphy *phy) { int tx_on_gpio = phy->addr == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL; - t3_os_sleep(100); + msleep(100); t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio); - t3_os_sleep(30); + msleep(30); } static int ael1002_power_down(struct cphy *phy, int enable) @@ -115,7 +118,6 @@ static int ael100x_get_link_status(struct cphy *phy, int *link_ok, #ifdef C99_NOT_SUPPORTED static struct cphy_ops ael1002_ops = { - NULL, ael1002_reset, ael1002_intr_noop, ael1002_intr_noop, @@ -141,11 +143,14 @@ static struct cphy_ops ael1002_ops = { }; #endif -void t3_ael1002_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, - const struct mdio_ops *mdio_ops) +int t3_ael1002_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, + const struct mdio_ops *mdio_ops) { - cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops); + cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops, + SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE, + "10GBASE-XR"); ael100x_txon(phy); + return 0; } static int ael1006_reset(struct cphy *phy, int wait) @@ -188,7 +193,6 @@ static int ael1006_power_down(struct cphy *phy, int enable) #ifdef C99_NOT_SUPPORTED static struct cphy_ops ael1006_ops = { - NULL, ael1006_reset, ael1006_intr_enable, ael1006_intr_disable, @@ -214,16 +218,18 @@ static struct cphy_ops ael1006_ops = { }; #endif -void t3_ael1006_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, - const struct mdio_ops *mdio_ops) +int t3_ael1006_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, + const struct mdio_ops *mdio_ops) { - cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops); + cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops, + SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE, + "10GBASE-SR"); ael100x_txon(phy); + return 0; } #ifdef C99_NOT_SUPPORTED static struct cphy_ops qt2045_ops = { - NULL, ael1006_reset, ael1006_intr_enable, ael1006_intr_disable, @@ -249,12 +255,14 @@ static struct cphy_ops qt2045_ops = { }; #endif -void t3_qt2045_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, - const struct mdio_ops *mdio_ops) +int t3_qt2045_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, + const struct mdio_ops *mdio_ops) { unsigned int stat; - cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops); + cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops, + SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP, + "10GBASE-CX4"); /* * Some cards where the PHY is supposed to be at address 0 actually @@ -263,6 +271,7 @@ void t3_qt2045_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, if (!phy_addr && !mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &stat) && stat == 0xffff) phy->addr = 1; + return 0; } static int xaui_direct_reset(struct cphy *phy, int wait) @@ -300,7 +309,6 @@ static int xaui_direct_power_down(struct cphy *phy, int enable) #ifdef C99_NOT_SUPPORTED static struct cphy_ops xaui_direct_ops = { - NULL, xaui_direct_reset, ael1002_intr_noop, ael1002_intr_noop, @@ -326,8 +334,11 @@ static struct cphy_ops xaui_direct_ops = { }; #endif -void t3_xaui_direct_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, - const struct mdio_ops *mdio_ops) +int t3_xaui_direct_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, + const struct mdio_ops *mdio_ops) { - cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops); + cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops, + SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP, + "10GBASE-CX4"); + return 0; } diff --git a/sys/dev/cxgb/common/cxgb_common.h b/sys/dev/cxgb/common/cxgb_common.h index 0b4b6aa..f1b5075 100644 --- a/sys/dev/cxgb/common/cxgb_common.h +++ b/sys/dev/cxgb/common/cxgb_common.h @@ -98,8 +98,8 @@ enum { (((x) >> S_TP_VERSION_MICRO) & M_TP_VERSION_MICRO) enum { - FW_VERSION_MAJOR = 4, - FW_VERSION_MINOR = 7, + FW_VERSION_MAJOR = 5, + FW_VERSION_MINOR = 0, FW_VERSION_MICRO = 0 }; @@ -157,10 +157,10 @@ struct adapter_info { }; struct port_type_info { - void (*phy_prep)(struct cphy *phy, adapter_t *adapter, int phy_addr, - const struct mdio_ops *ops); - unsigned int caps; - const char *desc; + int (*phy_prep)(struct cphy *phy, adapter_t *adapter, int phy_addr, + const struct mdio_ops *ops); + + }; struct mc5_stats { @@ -508,7 +508,6 @@ enum { /* PHY operations */ struct cphy_ops { - void (*destroy)(struct cphy *phy); int (*reset)(struct cphy *phy, int wait); int (*intr_enable)(struct cphy *phy); @@ -530,7 +529,9 @@ struct cphy_ops { /* A PHY instance */ struct cphy { int addr; /* PHY address */ + unsigned int caps; /* PHY capabilities */ adapter_t *adapter; /* associated adapter */ + const char *desc; /* PHY description */ unsigned long fifo_errors; /* FIFO over/under-flows */ const struct cphy_ops *ops; /* PHY operations */ int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr, @@ -555,10 +556,13 @@ static inline int mdio_write(struct cphy *phy, int mmd, int reg, /* Convenience initializer */ static inline void cphy_init(struct cphy *phy, adapter_t *adapter, int phy_addr, struct cphy_ops *phy_ops, - const struct mdio_ops *mdio_ops) + const struct mdio_ops *mdio_ops, unsigned int caps, + const char *desc) { phy->adapter = adapter; phy->addr = phy_addr; + phy->caps = caps; + phy->desc = desc; phy->ops = phy_ops; if (mdio_ops) { phy->mdio_read = mdio_ops->read; @@ -667,11 +671,12 @@ int t3_seeprom_wp(adapter_t *adapter, int enable); int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented); int t3_get_tp_version(adapter_t *adapter, u32 *vers); -int t3_check_tpsram_version(adapter_t *adapter); +int t3_check_tpsram_version(adapter_t *adapter, int *must_load); int t3_check_tpsram(adapter_t *adapter, const u8 *tp_ram, unsigned int size); int t3_load_fw(adapter_t *adapter, const const u8 *fw_data, unsigned int size); +int t3_load_boot(adapter_t *adapter, u8 *boot_data, unsigned int size); int t3_get_fw_version(adapter_t *adapter, u32 *vers); -int t3_check_fw_version(adapter_t *adapter); +int t3_check_fw_version(adapter_t *adapter, int *must_load); int t3_init_hw(adapter_t *adapter, u32 fw_params); void mac_prep(struct cmac *mac, adapter_t *adapter, int index); void early_hw_init(adapter_t *adapter, const struct adapter_info *ai); @@ -769,18 +774,21 @@ int t3_vsc7323_set_mtu(adapter_t *adap, unsigned int mtu, int port); int t3_vsc7323_set_addr(adapter_t *adap, u8 addr[6], int port); int t3_vsc7323_enable(adapter_t *adap, int port, int which); int t3_vsc7323_disable(adapter_t *adap, int port, int which); + +int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert); + const struct mac_stats *t3_vsc7323_update_stats(struct cmac *mac); -void t3_mv88e1xxx_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, +int t3_mv88e1xxx_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, const struct mdio_ops *mdio_ops); -void t3_vsc8211_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, +int t3_vsc8211_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, const struct mdio_ops *mdio_ops); -void t3_ael1002_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, +int t3_ael1002_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, const struct mdio_ops *mdio_ops); -void t3_ael1006_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, +int t3_ael1006_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, const struct mdio_ops *mdio_ops); -void t3_qt2045_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, +int t3_qt2045_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, const struct mdio_ops *mdio_ops); -void t3_xaui_direct_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, +int t3_xaui_direct_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, const struct mdio_ops *mdio_ops); #endif /* __CHELSIO_COMMON_H */ diff --git a/sys/dev/cxgb/common/cxgb_ctl_defs.h b/sys/dev/cxgb/common/cxgb_ctl_defs.h index 3a2eb4f..11ed65a 100644 --- a/sys/dev/cxgb/common/cxgb_ctl_defs.h +++ b/sys/dev/cxgb/common/cxgb_ctl_defs.h @@ -125,8 +125,8 @@ struct rdma_info { unsigned int rqt_top; /* RQT last entry address */ unsigned int udbell_len; /* user doorbell region length */ unsigned long udbell_physbase; /* user doorbell physical start addr */ - void volatile *kdb_addr; /* kernel doorbell register address */ - struct pci_dev *pdev; /* associated PCI device */ + void *kdb_addr; /* kernel doorbell register address */ + struct device *pdev; /* associated PCI device */ }; /* diff --git a/sys/dev/cxgb/common/cxgb_firmware_exports.h b/sys/dev/cxgb/common/cxgb_firmware_exports.h index e361c95..55c5078 100644 --- a/sys/dev/cxgb/common/cxgb_firmware_exports.h +++ b/sys/dev/cxgb/common/cxgb_firmware_exports.h @@ -74,6 +74,8 @@ $FreeBSD$ #define FW_WROPCODE_MNGT 0x1D #define FW_MNGTOPCODE_PKTSCHED_SET 0x00 +#define FW_MNGTOPCODE_WRC_SET 0x01 +#define FW_MNGTOPCODE_TUNNEL_CR_FLUSH 0x02 /* Maximum size of a WR sent from the host, limited by the SGE. * diff --git a/sys/dev/cxgb/common/cxgb_mc5.c b/sys/dev/cxgb/common/cxgb_mc5.c index d3eed4a..0e40aca 100644 --- a/sys/dev/cxgb/common/cxgb_mc5.c +++ b/sys/dev/cxgb/common/cxgb_mc5.c @@ -384,7 +384,7 @@ int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters, return err; } -/* +/** * read_mc5_range - dump a part of the memory managed by MC5 * @mc5: the MC5 handle * @start: the start address for the dump @@ -425,8 +425,11 @@ int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start, #define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR) -/* - * MC5 interrupt handler +/** + * t3_mc5_intr_handler - MC5 interrupt handler + * @mc5: the MC5 handle + * + * The MC5 interrupt handler. */ void t3_mc5_intr_handler(struct mc5 *mc5) { @@ -462,6 +465,16 @@ void t3_mc5_intr_handler(struct mc5 *mc5) t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause); } + +/** + * t3_mc5_prep - initialize the SW state for MC5 + * @adapter: the adapter + * @mc5: the MC5 handle + * @mode: whether the TCAM will be in 72- or 144-bit mode + * + * Initialize the SW state associated with MC5. Among other things + * this determines the size of the attached TCAM. + */ void __devinit t3_mc5_prep(adapter_t *adapter, struct mc5 *mc5, int mode) { #define K * 1024 diff --git a/sys/dev/cxgb/common/cxgb_mv88e1xxx.c b/sys/dev/cxgb/common/cxgb_mv88e1xxx.c index 6cee581..8777b82 100644 --- a/sys/dev/cxgb/common/cxgb_mv88e1xxx.c +++ b/sys/dev/cxgb/common/cxgb_mv88e1xxx.c @@ -221,6 +221,16 @@ static int mv88e1xxx_get_link_status(struct cphy *cphy, int *link_ok, return 0; } +static int mv88e1xxx_set_speed_duplex(struct cphy *phy, int speed, int duplex) +{ + int err = t3_set_phy_speed_duplex(phy, speed, duplex); + + /* PHY needs reset for new settings to take effect */ + if (!err) + err = mv88e1xxx_reset(phy, 0); + return err; +} + static int mv88e1xxx_downshift_set(struct cphy *cphy, int downshift_enable) { /* @@ -258,7 +268,6 @@ static int mv88e1xxx_intr_handler(struct cphy *cphy) #ifdef C99_NOT_SUPPORTED static struct cphy_ops mv88e1xxx_ops = { - NULL, mv88e1xxx_reset, mv88e1xxx_intr_enable, mv88e1xxx_intr_disable, @@ -268,7 +277,7 @@ static struct cphy_ops mv88e1xxx_ops = { mv88e1xxx_autoneg_restart, t3_phy_advertise, mv88e1xxx_set_loopback, - t3_set_phy_speed_duplex, + mv88e1xxx_set_speed_duplex, mv88e1xxx_get_link_status, mv88e1xxx_power_down, }; @@ -283,20 +292,28 @@ static struct cphy_ops mv88e1xxx_ops = { .autoneg_restart = mv88e1xxx_autoneg_restart, .advertise = t3_phy_advertise, .set_loopback = mv88e1xxx_set_loopback, - .set_speed_duplex = t3_set_phy_speed_duplex, + .set_speed_duplex = mv88e1xxx_set_speed_duplex, .get_link_status = mv88e1xxx_get_link_status, .power_down = mv88e1xxx_power_down, }; #endif -void t3_mv88e1xxx_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, +int t3_mv88e1xxx_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, const struct mdio_ops *mdio_ops) { - cphy_init(phy, adapter, phy_addr, &mv88e1xxx_ops, mdio_ops); + int err; + + cphy_init(phy, adapter, phy_addr, &mv88e1xxx_ops, mdio_ops, + SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII | + SUPPORTED_TP | SUPPORTED_IRQ, "10/100/1000BASE-T"); /* Configure copper PHY transmitter as class A to reduce EMI. */ - mdio_write(phy, 0, MV88E1XXX_EXTENDED_ADDR, 0xb); - mdio_write(phy, 0, MV88E1XXX_EXTENDED_DATA, 0x8004); - - mv88e1xxx_downshift_set(phy, 1); /* Enable downshift */ + err = mdio_write(phy, 0, MV88E1XXX_EXTENDED_ADDR, 0xb); + + if (!err) + err = mdio_write(phy, 0, MV88E1XXX_EXTENDED_DATA, 0x8004); + if (!err) + err = mv88e1xxx_downshift_set(phy, 1); /* Enable downshift */ + return err; } diff --git a/sys/dev/cxgb/common/cxgb_regs.h b/sys/dev/cxgb/common/cxgb_regs.h index 63744c4..dd8db9a 100644 --- a/sys/dev/cxgb/common/cxgb_regs.h +++ b/sys/dev/cxgb/common/cxgb_regs.h @@ -35,6 +35,38 @@ $FreeBSD$ #define A_SG_CONTROL 0x0 +#define S_CONGMODE 29 +#define V_CONGMODE(x) ((x) << S_CONGMODE) +#define F_CONGMODE V_CONGMODE(1U) + +#define S_TNLFLMODE 28 +#define V_TNLFLMODE(x) ((x) << S_TNLFLMODE) +#define F_TNLFLMODE V_TNLFLMODE(1U) + +#define S_FATLPERREN 27 +#define V_FATLPERREN(x) ((x) << S_FATLPERREN) +#define F_FATLPERREN V_FATLPERREN(1U) + +#define S_URGTNL 26 +#define V_URGTNL(x) ((x) << S_URGTNL) +#define F_URGTNL V_URGTNL(1U) + +#define S_NEWNOTIFY 25 +#define V_NEWNOTIFY(x) ((x) << S_NEWNOTIFY) +#define F_NEWNOTIFY V_NEWNOTIFY(1U) + +#define S_AVOIDCQOVFL 24 +#define V_AVOIDCQOVFL(x) ((x) << S_AVOIDCQOVFL) +#define F_AVOIDCQOVFL V_AVOIDCQOVFL(1U) + +#define S_OPTONEINTMULTQ 23 +#define V_OPTONEINTMULTQ(x) ((x) << S_OPTONEINTMULTQ) +#define F_OPTONEINTMULTQ V_OPTONEINTMULTQ(1U) + +#define S_CQCRDTCTRL 22 +#define V_CQCRDTCTRL(x) ((x) << S_CQCRDTCTRL) +#define F_CQCRDTCTRL V_CQCRDTCTRL(1U) + #define S_EGRENUPBP 21 #define V_EGRENUPBP(x) ((x) << S_EGRENUPBP) #define F_EGRENUPBP V_EGRENUPBP(1U) @@ -94,26 +126,6 @@ $FreeBSD$ #define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE) #define F_GLOBALENABLE V_GLOBALENABLE(1U) -#define S_URGTNL 26 -#define V_URGTNL(x) ((x) << S_URGTNL) -#define F_URGTNL V_URGTNL(1U) - -#define S_NEWNOTIFY 25 -#define V_NEWNOTIFY(x) ((x) << S_NEWNOTIFY) -#define F_NEWNOTIFY V_NEWNOTIFY(1U) - -#define S_AVOIDCQOVFL 24 -#define V_AVOIDCQOVFL(x) ((x) << S_AVOIDCQOVFL) -#define F_AVOIDCQOVFL V_AVOIDCQOVFL(1U) - -#define S_OPTONEINTMULTQ 23 -#define V_OPTONEINTMULTQ(x) ((x) << S_OPTONEINTMULTQ) -#define F_OPTONEINTMULTQ V_OPTONEINTMULTQ(1U) - -#define S_CQCRDTCTRL 22 -#define V_CQCRDTCTRL(x) ((x) << S_CQCRDTCTRL) -#define F_CQCRDTCTRL V_CQCRDTCTRL(1U) - #define A_SG_KDOORBELL 0x4 #define S_SELEGRCNTX 31 @@ -366,11 +378,6 @@ $FreeBSD$ #define A_SG_EGR_PRI_CNT 0x50 -#define S_EGRPRICNT 0 -#define M_EGRPRICNT 0x1f -#define V_EGRPRICNT(x) ((x) << S_EGRPRICNT) -#define G_EGRPRICNT(x) (((x) >> S_EGRPRICNT) & M_EGRPRICNT) - #define S_EGRERROPCODE 24 #define M_EGRERROPCODE 0xff #define V_EGRERROPCODE(x) ((x) << S_EGRERROPCODE) @@ -386,6 +393,11 @@ $FreeBSD$ #define V_EGRLOOPCODE(x) ((x) << S_EGRLOOPCODE) #define G_EGRLOOPCODE(x) (((x) >> S_EGRLOOPCODE) & M_EGRLOOPCODE) +#define S_EGRPRICNT 0 +#define M_EGRPRICNT 0x1f +#define V_EGRPRICNT(x) ((x) << S_EGRPRICNT) +#define G_EGRPRICNT(x) (((x) >> S_EGRPRICNT) & M_EGRPRICNT) + #define A_SG_EGR_RCQ_DRB_THRSH 0x54 #define S_HIRCQDRBTHRSH 16 @@ -407,6 +419,56 @@ $FreeBSD$ #define A_SG_INT_CAUSE 0x5c +#define S_HIRCQPARITYERROR 31 +#define V_HIRCQPARITYERROR(x) ((x) << S_HIRCQPARITYERROR) +#define F_HIRCQPARITYERROR V_HIRCQPARITYERROR(1U) + +#define S_LORCQPARITYERROR 30 +#define V_LORCQPARITYERROR(x) ((x) << S_LORCQPARITYERROR) +#define F_LORCQPARITYERROR V_LORCQPARITYERROR(1U) + +#define S_HIDRBPARITYERROR 29 +#define V_HIDRBPARITYERROR(x) ((x) << S_HIDRBPARITYERROR) +#define F_HIDRBPARITYERROR V_HIDRBPARITYERROR(1U) + +#define S_LODRBPARITYERROR 28 +#define V_LODRBPARITYERROR(x) ((x) << S_LODRBPARITYERROR) +#define F_LODRBPARITYERROR V_LODRBPARITYERROR(1U) + +#define S_FLPARITYERROR 22 +#define M_FLPARITYERROR 0x3f +#define V_FLPARITYERROR(x) ((x) << S_FLPARITYERROR) +#define G_FLPARITYERROR(x) (((x) >> S_FLPARITYERROR) & M_FLPARITYERROR) + +#define S_ITPARITYERROR 20 +#define M_ITPARITYERROR 0x3 +#define V_ITPARITYERROR(x) ((x) << S_ITPARITYERROR) +#define G_ITPARITYERROR(x) (((x) >> S_ITPARITYERROR) & M_ITPARITYERROR) + +#define S_IRPARITYERROR 19 +#define V_IRPARITYERROR(x) ((x) << S_IRPARITYERROR) +#define F_IRPARITYERROR V_IRPARITYERROR(1U) + +#define S_RCPARITYERROR 18 +#define V_RCPARITYERROR(x) ((x) << S_RCPARITYERROR) +#define F_RCPARITYERROR V_RCPARITYERROR(1U) + +#define S_OCPARITYERROR 17 +#define V_OCPARITYERROR(x) ((x) << S_OCPARITYERROR) +#define F_OCPARITYERROR V_OCPARITYERROR(1U) + +#define S_CPPARITYERROR 16 +#define V_CPPARITYERROR(x) ((x) << S_CPPARITYERROR) +#define F_CPPARITYERROR V_CPPARITYERROR(1U) + +#define S_R_REQ_FRAMINGERROR 15 +#define V_R_REQ_FRAMINGERROR(x) ((x) << S_R_REQ_FRAMINGERROR) +#define F_R_REQ_FRAMINGERROR V_R_REQ_FRAMINGERROR(1U) + +#define S_UC_REQ_FRAMINGERROR 14 +#define V_UC_REQ_FRAMINGERROR(x) ((x) << S_UC_REQ_FRAMINGERROR) +#define F_UC_REQ_FRAMINGERROR V_UC_REQ_FRAMINGERROR(1U) + #define S_HICTLDRBDROPERR 13 #define V_HICTLDRBDROPERR(x) ((x) << S_HICTLDRBDROPERR) #define F_HICTLDRBDROPERR V_HICTLDRBDROPERR(1U) @@ -582,6 +644,10 @@ $FreeBSD$ #define A_PCIX_INT_CAUSE 0x84 #define A_PCIX_CFG 0x88 +#define S_DMASTOPEN 19 +#define V_DMASTOPEN(x) ((x) << S_DMASTOPEN) +#define F_DMASTOPEN V_DMASTOPEN(1U) + #define S_CLIDECEN 18 #define V_CLIDECEN(x) ((x) << S_CLIDECEN) #define F_CLIDECEN V_CLIDECEN(1U) @@ -721,16 +787,175 @@ $FreeBSD$ #define V_SLEEPMODE0(x) ((x) << S_SLEEPMODE0) #define F_SLEEPMODE0 V_SLEEPMODE0(1U) +#define A_PCIX_STAT0 0x98 + +#define S_PIOREQFIFOLEVEL 26 +#define M_PIOREQFIFOLEVEL 0x3f +#define V_PIOREQFIFOLEVEL(x) ((x) << S_PIOREQFIFOLEVEL) +#define G_PIOREQFIFOLEVEL(x) (((x) >> S_PIOREQFIFOLEVEL) & M_PIOREQFIFOLEVEL) + +#define S_RFINIST 24 +#define M_RFINIST 0x3 +#define V_RFINIST(x) ((x) << S_RFINIST) +#define G_RFINIST(x) (((x) >> S_RFINIST) & M_RFINIST) + +#define S_RFRESPRDST 22 +#define M_RFRESPRDST 0x3 +#define V_RFRESPRDST(x) ((x) << S_RFRESPRDST) +#define G_RFRESPRDST(x) (((x) >> S_RFRESPRDST) & M_RFRESPRDST) + +#define S_TARCST 19 +#define M_TARCST 0x7 +#define V_TARCST(x) ((x) << S_TARCST) +#define G_TARCST(x) (((x) >> S_TARCST) & M_TARCST) + +#define S_TARXST 16 +#define M_TARXST 0x7 +#define V_TARXST(x) ((x) << S_TARXST) +#define G_TARXST(x) (((x) >> S_TARXST) & M_TARXST) + +#define S_WFREQWRST 13 +#define M_WFREQWRST 0x7 +#define V_WFREQWRST(x) ((x) << S_WFREQWRST) +#define G_WFREQWRST(x) (((x) >> S_WFREQWRST) & M_WFREQWRST) + +#define S_WFRESPFIFOEMPTY 12 +#define V_WFRESPFIFOEMPTY(x) ((x) << S_WFRESPFIFOEMPTY) +#define F_WFRESPFIFOEMPTY V_WFRESPFIFOEMPTY(1U) + +#define S_WFREQFIFOEMPTY 11 +#define V_WFREQFIFOEMPTY(x) ((x) << S_WFREQFIFOEMPTY) +#define F_WFREQFIFOEMPTY V_WFREQFIFOEMPTY(1U) + +#define S_RFRESPFIFOEMPTY 10 +#define V_RFRESPFIFOEMPTY(x) ((x) << S_RFRESPFIFOEMPTY) +#define F_RFRESPFIFOEMPTY V_RFRESPFIFOEMPTY(1U) + +#define S_RFREQFIFOEMPTY 9 +#define V_RFREQFIFOEMPTY(x) ((x) << S_RFREQFIFOEMPTY) +#define F_RFREQFIFOEMPTY V_RFREQFIFOEMPTY(1U) + +#define S_PIORESPFIFOLEVEL 7 +#define M_PIORESPFIFOLEVEL 0x3 +#define V_PIORESPFIFOLEVEL(x) ((x) << S_PIORESPFIFOLEVEL) +#define G_PIORESPFIFOLEVEL(x) (((x) >> S_PIORESPFIFOLEVEL) & M_PIORESPFIFOLEVEL) + +#define S_CFRESPFIFOEMPTY 6 +#define V_CFRESPFIFOEMPTY(x) ((x) << S_CFRESPFIFOEMPTY) +#define F_CFRESPFIFOEMPTY V_CFRESPFIFOEMPTY(1U) + +#define S_CFREQFIFOEMPTY 5 +#define V_CFREQFIFOEMPTY(x) ((x) << S_CFREQFIFOEMPTY) +#define F_CFREQFIFOEMPTY V_CFREQFIFOEMPTY(1U) + +#define S_VPDRESPFIFOEMPTY 4 +#define V_VPDRESPFIFOEMPTY(x) ((x) << S_VPDRESPFIFOEMPTY) +#define F_VPDRESPFIFOEMPTY V_VPDRESPFIFOEMPTY(1U) + +#define S_VPDREQFIFOEMPTY 3 +#define V_VPDREQFIFOEMPTY(x) ((x) << S_VPDREQFIFOEMPTY) +#define F_VPDREQFIFOEMPTY V_VPDREQFIFOEMPTY(1U) + +#define S_PIO_RSPPND 2 +#define V_PIO_RSPPND(x) ((x) << S_PIO_RSPPND) +#define F_PIO_RSPPND V_PIO_RSPPND(1U) + +#define S_DLYTRNPND 1 +#define V_DLYTRNPND(x) ((x) << S_DLYTRNPND) +#define F_DLYTRNPND V_DLYTRNPND(1U) + +#define S_SPLTRNPND 0 +#define V_SPLTRNPND(x) ((x) << S_SPLTRNPND) +#define F_SPLTRNPND V_SPLTRNPND(1U) + +#define A_PCIX_STAT1 0x9c + +#define S_WFINIST 26 +#define M_WFINIST 0xf +#define V_WFINIST(x) ((x) << S_WFINIST) +#define G_WFINIST(x) (((x) >> S_WFINIST) & M_WFINIST) + +#define S_ARBST 23 +#define M_ARBST 0x7 +#define V_ARBST(x) ((x) << S_ARBST) +#define G_ARBST(x) (((x) >> S_ARBST) & M_ARBST) + +#define S_PMIST 21 +#define M_PMIST 0x3 +#define V_PMIST(x) ((x) << S_PMIST) +#define G_PMIST(x) (((x) >> S_PMIST) & M_PMIST) + +#define S_CALST 19 +#define M_CALST 0x3 +#define V_CALST(x) ((x) << S_CALST) +#define G_CALST(x) (((x) >> S_CALST) & M_CALST) + +#define S_CFREQRDST 17 +#define M_CFREQRDST 0x3 +#define V_CFREQRDST(x) ((x) << S_CFREQRDST) +#define G_CFREQRDST(x) (((x) >> S_CFREQRDST) & M_CFREQRDST) + +#define S_CFINIST 15 +#define M_CFINIST 0x3 +#define V_CFINIST(x) ((x) << S_CFINIST) +#define G_CFINIST(x) (((x) >> S_CFINIST) & M_CFINIST) + +#define S_CFRESPRDST 13 +#define M_CFRESPRDST 0x3 +#define V_CFRESPRDST(x) ((x) << S_CFRESPRDST) +#define G_CFRESPRDST(x) (((x) >> S_CFRESPRDST) & M_CFRESPRDST) + +#define S_INICST 10 +#define M_INICST 0x7 +#define V_INICST(x) ((x) << S_INICST) +#define G_INICST(x) (((x) >> S_INICST) & M_INICST) + +#define S_INIXST 7 +#define M_INIXST 0x7 +#define V_INIXST(x) ((x) << S_INIXST) +#define G_INIXST(x) (((x) >> S_INIXST) & M_INIXST) + +#define S_INTST 4 +#define M_INTST 0x7 +#define V_INTST(x) ((x) << S_INTST) +#define G_INTST(x) (((x) >> S_INTST) & M_INTST) + +#define S_PIOST 2 +#define M_PIOST 0x3 +#define V_PIOST(x) ((x) << S_PIOST) +#define G_PIOST(x) (((x) >> S_PIOST) & M_PIOST) + +#define S_RFREQRDST 0 +#define M_RFREQRDST 0x3 +#define V_RFREQRDST(x) ((x) << S_RFREQRDST) +#define G_RFREQRDST(x) (((x) >> S_RFREQRDST) & M_RFREQRDST) + /* registers for module PCIE0 */ #define PCIE0_BASE_ADDR 0x80 #define A_PCIE_INT_ENABLE 0x80 -#define S_BISTERR 15 +#define S_BISTERR 19 #define M_BISTERR 0xff #define V_BISTERR(x) ((x) << S_BISTERR) #define G_BISTERR(x) (((x) >> S_BISTERR) & M_BISTERR) +#define S_TXPARERR 18 +#define V_TXPARERR(x) ((x) << S_TXPARERR) +#define F_TXPARERR V_TXPARERR(1U) + +#define S_RXPARERR 17 +#define V_RXPARERR(x) ((x) << S_RXPARERR) +#define F_RXPARERR V_RXPARERR(1U) + +#define S_RETRYLUTPARERR 16 +#define V_RETRYLUTPARERR(x) ((x) << S_RETRYLUTPARERR) +#define F_RETRYLUTPARERR V_RETRYLUTPARERR(1U) + +#define S_RETRYBUFPARERR 15 +#define V_RETRYBUFPARERR(x) ((x) << S_RETRYBUFPARERR) +#define F_RETRYBUFPARERR V_RETRYBUFPARERR(1U) + #define S_PCIE_MSIXPARERR 12 #define M_PCIE_MSIXPARERR 0x7 #define V_PCIE_MSIXPARERR(x) ((x) << S_PCIE_MSIXPARERR) @@ -787,6 +1012,18 @@ $FreeBSD$ #define A_PCIE_INT_CAUSE 0x84 #define A_PCIE_CFG 0x88 +#define S_PCIE_DMASTOPEN 24 +#define V_PCIE_DMASTOPEN(x) ((x) << S_PCIE_DMASTOPEN) +#define F_PCIE_DMASTOPEN V_PCIE_DMASTOPEN(1U) + +#define S_PRIORITYINTA 23 +#define V_PRIORITYINTA(x) ((x) << S_PRIORITYINTA) +#define F_PRIORITYINTA V_PRIORITYINTA(1U) + +#define S_INIFULLPKT 22 +#define V_INIFULLPKT(x) ((x) << S_INIFULLPKT) +#define F_INIFULLPKT V_INIFULLPKT(1U) + #define S_ENABLELINKDWNDRST 21 #define V_ENABLELINKDWNDRST(x) ((x) << S_ENABLELINKDWNDRST) #define F_ENABLELINKDWNDRST V_ENABLELINKDWNDRST(1U) @@ -825,15 +1062,37 @@ $FreeBSD$ #define V_CRSTWRMMODE(x) ((x) << S_CRSTWRMMODE) #define F_CRSTWRMMODE V_CRSTWRMMODE(1U) -#define S_PRIORITYINTA 23 -#define V_PRIORITYINTA(x) ((x) << S_PRIORITYINTA) -#define F_PRIORITYINTA V_PRIORITYINTA(1U) +#define A_PCIE_MODE 0x8c -#define S_INIFULLPKT 22 -#define V_INIFULLPKT(x) ((x) << S_INIFULLPKT) -#define F_INIFULLPKT V_INIFULLPKT(1U) +#define S_TAR_STATE 29 +#define M_TAR_STATE 0x7 +#define V_TAR_STATE(x) ((x) << S_TAR_STATE) +#define G_TAR_STATE(x) (((x) >> S_TAR_STATE) & M_TAR_STATE) -#define A_PCIE_MODE 0x8c +#define S_RF_STATEINI 26 +#define M_RF_STATEINI 0x7 +#define V_RF_STATEINI(x) ((x) << S_RF_STATEINI) +#define G_RF_STATEINI(x) (((x) >> S_RF_STATEINI) & M_RF_STATEINI) + +#define S_CF_STATEINI 23 +#define M_CF_STATEINI 0x7 +#define V_CF_STATEINI(x) ((x) << S_CF_STATEINI) +#define G_CF_STATEINI(x) (((x) >> S_CF_STATEINI) & M_CF_STATEINI) + +#define S_PIO_STATEPL 20 +#define M_PIO_STATEPL 0x7 +#define V_PIO_STATEPL(x) ((x) << S_PIO_STATEPL) +#define G_PIO_STATEPL(x) (((x) >> S_PIO_STATEPL) & M_PIO_STATEPL) + +#define S_PIO_STATEISC 18 +#define M_PIO_STATEISC 0x3 +#define V_PIO_STATEISC(x) ((x) << S_PIO_STATEISC) +#define G_PIO_STATEISC(x) (((x) >> S_PIO_STATEISC) & M_PIO_STATEISC) + +#define S_NUMFSTTRNSEQRX 10 +#define M_NUMFSTTRNSEQRX 0xff +#define V_NUMFSTTRNSEQRX(x) ((x) << S_NUMFSTTRNSEQRX) +#define G_NUMFSTTRNSEQRX(x) (((x) >> S_NUMFSTTRNSEQRX) & M_NUMFSTTRNSEQRX) #define S_LNKCNTLSTATE 2 #define M_LNKCNTLSTATE 0xff @@ -848,10 +1107,76 @@ $FreeBSD$ #define V_LNKINITIAL(x) ((x) << S_LNKINITIAL) #define F_LNKINITIAL V_LNKINITIAL(1U) -#define S_NUMFSTTRNSEQRX 10 -#define M_NUMFSTTRNSEQRX 0xff -#define V_NUMFSTTRNSEQRX(x) ((x) << S_NUMFSTTRNSEQRX) -#define G_NUMFSTTRNSEQRX(x) (((x) >> S_NUMFSTTRNSEQRX) & M_NUMFSTTRNSEQRX) +#define A_PCIE_STAT 0x90 + +#define S_INI_STATE 28 +#define M_INI_STATE 0xf +#define V_INI_STATE(x) ((x) << S_INI_STATE) +#define G_INI_STATE(x) (((x) >> S_INI_STATE) & M_INI_STATE) + +#define S_WF_STATEINI 24 +#define M_WF_STATEINI 0xf +#define V_WF_STATEINI(x) ((x) << S_WF_STATEINI) +#define G_WF_STATEINI(x) (((x) >> S_WF_STATEINI) & M_WF_STATEINI) + +#define S_PLM_REQFIFOCNT 22 +#define M_PLM_REQFIFOCNT 0x3 +#define V_PLM_REQFIFOCNT(x) ((x) << S_PLM_REQFIFOCNT) +#define G_PLM_REQFIFOCNT(x) (((x) >> S_PLM_REQFIFOCNT) & M_PLM_REQFIFOCNT) + +#define S_ER_REQFIFOEMPTY 21 +#define V_ER_REQFIFOEMPTY(x) ((x) << S_ER_REQFIFOEMPTY) +#define F_ER_REQFIFOEMPTY V_ER_REQFIFOEMPTY(1U) + +#define S_WF_RSPFIFOEMPTY 20 +#define V_WF_RSPFIFOEMPTY(x) ((x) << S_WF_RSPFIFOEMPTY) +#define F_WF_RSPFIFOEMPTY V_WF_RSPFIFOEMPTY(1U) + +#define S_WF_REQFIFOEMPTY 19 +#define V_WF_REQFIFOEMPTY(x) ((x) << S_WF_REQFIFOEMPTY) +#define F_WF_REQFIFOEMPTY V_WF_REQFIFOEMPTY(1U) + +#define S_RF_RSPFIFOEMPTY 18 +#define V_RF_RSPFIFOEMPTY(x) ((x) << S_RF_RSPFIFOEMPTY) +#define F_RF_RSPFIFOEMPTY V_RF_RSPFIFOEMPTY(1U) + +#define S_RF_REQFIFOEMPTY 17 +#define V_RF_REQFIFOEMPTY(x) ((x) << S_RF_REQFIFOEMPTY) +#define F_RF_REQFIFOEMPTY V_RF_REQFIFOEMPTY(1U) + +#define S_RF_ACTEMPTY 16 +#define V_RF_ACTEMPTY(x) ((x) << S_RF_ACTEMPTY) +#define F_RF_ACTEMPTY V_RF_ACTEMPTY(1U) + +#define S_PIO_RSPFIFOCNT 11 +#define M_PIO_RSPFIFOCNT 0x1f +#define V_PIO_RSPFIFOCNT(x) ((x) << S_PIO_RSPFIFOCNT) +#define G_PIO_RSPFIFOCNT(x) (((x) >> S_PIO_RSPFIFOCNT) & M_PIO_RSPFIFOCNT) + +#define S_PIO_REQFIFOCNT 5 +#define M_PIO_REQFIFOCNT 0x3f +#define V_PIO_REQFIFOCNT(x) ((x) << S_PIO_REQFIFOCNT) +#define G_PIO_REQFIFOCNT(x) (((x) >> S_PIO_REQFIFOCNT) & M_PIO_REQFIFOCNT) + +#define S_CF_RSPFIFOEMPTY 4 +#define V_CF_RSPFIFOEMPTY(x) ((x) << S_CF_RSPFIFOEMPTY) +#define F_CF_RSPFIFOEMPTY V_CF_RSPFIFOEMPTY(1U) + +#define S_CF_REQFIFOEMPTY 3 +#define V_CF_REQFIFOEMPTY(x) ((x) << S_CF_REQFIFOEMPTY) +#define F_CF_REQFIFOEMPTY V_CF_REQFIFOEMPTY(1U) + +#define S_CF_ACTEMPTY 2 +#define V_CF_ACTEMPTY(x) ((x) << S_CF_ACTEMPTY) +#define F_CF_ACTEMPTY V_CF_ACTEMPTY(1U) + +#define S_VPD_RSPFIFOEMPTY 1 +#define V_VPD_RSPFIFOEMPTY(x) ((x) << S_VPD_RSPFIFOEMPTY) +#define F_VPD_RSPFIFOEMPTY V_VPD_RSPFIFOEMPTY(1U) + +#define S_VPD_REQFIFOEMPTY 0 +#define V_VPD_REQFIFOEMPTY(x) ((x) << S_VPD_REQFIFOEMPTY) +#define F_VPD_REQFIFOEMPTY V_VPD_REQFIFOEMPTY(1U) #define A_PCIE_CAL 0x90 @@ -883,8 +1208,37 @@ $FreeBSD$ #define G_ZIN(x) (((x) >> S_ZIN) & M_ZIN) #define A_PCIE_WOL 0x94 + +#define S_CF_RSPSTATE 12 +#define M_CF_RSPSTATE 0x3 +#define V_CF_RSPSTATE(x) ((x) << S_CF_RSPSTATE) +#define G_CF_RSPSTATE(x) (((x) >> S_CF_RSPSTATE) & M_CF_RSPSTATE) + +#define S_RF_RSPSTATE 10 +#define M_RF_RSPSTATE 0x3 +#define V_RF_RSPSTATE(x) ((x) << S_RF_RSPSTATE) +#define G_RF_RSPSTATE(x) (((x) >> S_RF_RSPSTATE) & M_RF_RSPSTATE) + +#define S_PME_STATE 7 +#define M_PME_STATE 0x7 +#define V_PME_STATE(x) ((x) << S_PME_STATE) +#define G_PME_STATE(x) (((x) >> S_PME_STATE) & M_PME_STATE) + +#define S_INT_STATE 4 +#define M_INT_STATE 0x7 +#define V_INT_STATE(x) ((x) << S_INT_STATE) +#define G_INT_STATE(x) (((x) >> S_INT_STATE) & M_INT_STATE) + #define A_PCIE_PEX_CTRL0 0x98 +#define S_CPLTIMEOUTRETRY 31 +#define V_CPLTIMEOUTRETRY(x) ((x) << S_CPLTIMEOUTRETRY) +#define F_CPLTIMEOUTRETRY V_CPLTIMEOUTRETRY(1U) + +#define S_STRICTTSMN 30 +#define V_STRICTTSMN(x) ((x) << S_STRICTTSMN) +#define F_STRICTTSMN V_STRICTTSMN(1U) + #define S_NUMFSTTRNSEQ 22 #define M_NUMFSTTRNSEQ 0xff #define V_NUMFSTTRNSEQ(x) ((x) << S_NUMFSTTRNSEQ) @@ -903,26 +1257,8 @@ $FreeBSD$ #define V_CPLPNDCHKEN(x) ((x) << S_CPLPNDCHKEN) #define F_CPLPNDCHKEN V_CPLPNDCHKEN(1U) -#define S_CPLTIMEOUTRETRY 31 -#define V_CPLTIMEOUTRETRY(x) ((x) << S_CPLTIMEOUTRETRY) -#define F_CPLTIMEOUTRETRY V_CPLTIMEOUTRETRY(1U) - -#define S_STRICTTSMN 30 -#define V_STRICTTSMN(x) ((x) << S_STRICTTSMN) -#define F_STRICTTSMN V_STRICTTSMN(1U) - #define A_PCIE_PEX_CTRL1 0x9c -#define S_T3A_DLLPTIMEOUTLMT 11 -#define M_T3A_DLLPTIMEOUTLMT 0xfffff -#define V_T3A_DLLPTIMEOUTLMT(x) ((x) << S_T3A_DLLPTIMEOUTLMT) -#define G_T3A_DLLPTIMEOUTLMT(x) (((x) >> S_T3A_DLLPTIMEOUTLMT) & M_T3A_DLLPTIMEOUTLMT) - -#define S_T3A_ACKLAT 0 -#define M_T3A_ACKLAT 0x7ff -#define V_T3A_ACKLAT(x) ((x) << S_T3A_ACKLAT) -#define G_T3A_ACKLAT(x) (((x) >> S_T3A_ACKLAT) & M_T3A_ACKLAT) - #define S_RXPHYERREN 31 #define V_RXPHYERREN(x) ((x) << S_RXPHYERREN) #define F_RXPHYERREN V_RXPHYERREN(1U) @@ -937,34 +1273,48 @@ $FreeBSD$ #define V_ACKLAT(x) ((x) << S_ACKLAT) #define G_ACKLAT(x) (((x) >> S_ACKLAT) & M_ACKLAT) +#define S_T3A_DLLPTIMEOUTLMT 11 +#define M_T3A_DLLPTIMEOUTLMT 0xfffff +#define V_T3A_DLLPTIMEOUTLMT(x) ((x) << S_T3A_DLLPTIMEOUTLMT) +#define G_T3A_DLLPTIMEOUTLMT(x) (((x) >> S_T3A_DLLPTIMEOUTLMT) & M_T3A_DLLPTIMEOUTLMT) + +#define S_T3A_ACKLAT 0 +#define M_T3A_ACKLAT 0x7ff +#define V_T3A_ACKLAT(x) ((x) << S_T3A_ACKLAT) +#define G_T3A_ACKLAT(x) (((x) >> S_T3A_ACKLAT) & M_T3A_ACKLAT) + #define A_PCIE_PEX_CTRL2 0xa0 -#define S_PMEXITL1REQ 29 +#define S_LNKCNTLDETDIR 30 +#define V_LNKCNTLDETDIR(x) ((x) << S_LNKCNTLDETDIR) +#define F_LNKCNTLDETDIR V_LNKCNTLDETDIR(1U) + +#define S_ENTERL1REN 29 +#define V_ENTERL1REN(x) ((x) << S_ENTERL1REN) +#define F_ENTERL1REN V_ENTERL1REN(1U) + +#define S_PMEXITL1REQ 28 #define V_PMEXITL1REQ(x) ((x) << S_PMEXITL1REQ) #define F_PMEXITL1REQ V_PMEXITL1REQ(1U) -#define S_PMTXIDLE 28 +#define S_PMTXIDLE 27 #define V_PMTXIDLE(x) ((x) << S_PMTXIDLE) #define F_PMTXIDLE V_PMTXIDLE(1U) -#define S_PCIMODELOOP 27 +#define S_PCIMODELOOP 26 #define V_PCIMODELOOP(x) ((x) << S_PCIMODELOOP) #define F_PCIMODELOOP V_PCIMODELOOP(1U) -#define S_L1ASPMTXRXL0STIME 15 +#define S_L1ASPMTXRXL0STIME 14 #define M_L1ASPMTXRXL0STIME 0xfff #define V_L1ASPMTXRXL0STIME(x) ((x) << S_L1ASPMTXRXL0STIME) #define G_L1ASPMTXRXL0STIME(x) (((x) >> S_L1ASPMTXRXL0STIME) & M_L1ASPMTXRXL0STIME) -#define S_L0SIDLETIME 4 +#define S_L0SIDLETIME 3 #define M_L0SIDLETIME 0x7ff #define V_L0SIDLETIME(x) ((x) << S_L0SIDLETIME) #define G_L0SIDLETIME(x) (((x) >> S_L0SIDLETIME) & M_L0SIDLETIME) -#define S_ENTERL23 3 -#define V_ENTERL23(x) ((x) << S_ENTERL23) -#define F_ENTERL23 V_ENTERL23(1U) - #define S_ENTERL1ASPMEN 2 #define V_ENTERL1ASPMEN(x) ((x) << S_ENTERL1ASPMEN) #define F_ENTERL1ASPMEN V_ENTERL1ASPMEN(1U) @@ -977,16 +1327,17 @@ $FreeBSD$ #define V_ENTERL0SEN(x) ((x) << S_ENTERL0SEN) #define F_ENTERL0SEN V_ENTERL0SEN(1U) -#define S_LNKCNTLDETDIR 30 -#define V_LNKCNTLDETDIR(x) ((x) << S_LNKCNTLDETDIR) -#define F_LNKCNTLDETDIR V_LNKCNTLDETDIR(1U) - -#define S_ENTERL1REN 29 -#define V_ENTERL1REN(x) ((x) << S_ENTERL1REN) -#define F_ENTERL1REN V_ENTERL1REN(1U) +#define S_ENTERL23 3 +#define V_ENTERL23(x) ((x) << S_ENTERL23) +#define F_ENTERL23 V_ENTERL23(1U) #define A_PCIE_PEX_ERR 0xa4 +#define S_CPLTIMEOUTID 18 +#define M_CPLTIMEOUTID 0x7f +#define V_CPLTIMEOUTID(x) ((x) << S_CPLTIMEOUTID) +#define G_CPLTIMEOUTID(x) (((x) >> S_CPLTIMEOUTID) & M_CPLTIMEOUTID) + #define S_FLOWCTLOFLOWERR 17 #define V_FLOWCTLOFLOWERR(x) ((x) << S_FLOWCTLOFLOWERR) #define F_FLOWCTLOFLOWERR V_FLOWCTLOFLOWERR(1U) @@ -1059,10 +1410,16 @@ $FreeBSD$ #define V_PSNCPL(x) ((x) << S_PSNCPL) #define F_PSNCPL V_PSNCPL(1U) -#define S_CPLTIMEOUTID 18 -#define M_CPLTIMEOUTID 0x7f -#define V_CPLTIMEOUTID(x) ((x) << S_CPLTIMEOUTID) -#define G_CPLTIMEOUTID(x) (((x) >> S_CPLTIMEOUTID) & M_CPLTIMEOUTID) +#define A_PCIE_SERDES_CTRL 0xa8 + +#define S_PMASEL 3 +#define V_PMASEL(x) ((x) << S_PMASEL) +#define F_PMASEL V_PMASEL(1U) + +#define S_LANE 0 +#define M_LANE 0x7 +#define V_LANE(x) ((x) << S_LANE) +#define G_LANE(x) (((x) >> S_LANE) & M_LANE) #define A_PCIE_PIPE_CTRL 0xa8 @@ -1093,16 +1450,25 @@ $FreeBSD$ #define V_PCLKOFFINP1(x) ((x) << S_PCLKOFFINP1) #define F_PCLKOFFINP1 V_PCLKOFFINP1(1U) -#define S_PMASEL 3 -#define V_PMASEL(x) ((x) << S_PMASEL) -#define F_PMASEL V_PMASEL(1U) +#define A_PCIE_SERDES_QUAD_CTRL0 0xac -#define S_LANE 0 -#define M_LANE 0x7 -#define V_LANE(x) ((x) << S_LANE) -#define G_LANE(x) (((x) >> S_LANE) & M_LANE) +#define S_TESTSIG 10 +#define M_TESTSIG 0x7ffff +#define V_TESTSIG(x) ((x) << S_TESTSIG) +#define G_TESTSIG(x) (((x) >> S_TESTSIG) & M_TESTSIG) + +#define S_OFFSET 2 +#define M_OFFSET 0xff +#define V_OFFSET(x) ((x) << S_OFFSET) +#define G_OFFSET(x) (((x) >> S_OFFSET) & M_OFFSET) + +#define S_OFFSETEN 1 +#define V_OFFSETEN(x) ((x) << S_OFFSETEN) +#define F_OFFSETEN V_OFFSETEN(1U) -#define A_PCIE_SERDES_CTRL 0xac +#define S_IDDQB 0 +#define V_IDDQB(x) ((x) << S_IDDQB) +#define F_IDDQB V_IDDQB(1U) #define S_MANMODE 31 #define V_MANMODE(x) ((x) << S_MANMODE) @@ -1193,89 +1559,27 @@ $FreeBSD$ #define V_PREEMPH(x) ((x) << S_PREEMPH) #define G_PREEMPH(x) (((x) >> S_PREEMPH) & M_PREEMPH) -#define A_PCIE_SERDES_QUAD_CTRL0 0xac +#define A_PCIE_SERDES_QUAD_CTRL1 0xb0 -#define S_TESTSIG 10 -#define M_TESTSIG 0x7ffff -#define V_TESTSIG(x) ((x) << S_TESTSIG) -#define G_TESTSIG(x) (((x) >> S_TESTSIG) & M_TESTSIG) +#define S_FASTINIT 28 +#define V_FASTINIT(x) ((x) << S_FASTINIT) +#define F_FASTINIT V_FASTINIT(1U) -#define S_OFFSET 2 -#define M_OFFSET 0xff -#define V_OFFSET(x) ((x) << S_OFFSET) -#define G_OFFSET(x) (((x) >> S_OFFSET) & M_OFFSET) +#define S_CTCDISABLE 27 +#define V_CTCDISABLE(x) ((x) << S_CTCDISABLE) +#define F_CTCDISABLE V_CTCDISABLE(1U) -#define S_OFFSETEN 1 -#define V_OFFSETEN(x) ((x) << S_OFFSETEN) -#define F_OFFSETEN V_OFFSETEN(1U) +#define S_MANRESETPLL 26 +#define V_MANRESETPLL(x) ((x) << S_MANRESETPLL) +#define F_MANRESETPLL V_MANRESETPLL(1U) -#define S_IDDQB 0 -#define V_IDDQB(x) ((x) << S_IDDQB) -#define F_IDDQB V_IDDQB(1U) +#define S_MANL2PWRDN 25 +#define V_MANL2PWRDN(x) ((x) << S_MANL2PWRDN) +#define F_MANL2PWRDN V_MANL2PWRDN(1U) -#define A_PCIE_SERDES_STATUS0 0xb0 - -#define S_RXERRLANE7 21 -#define M_RXERRLANE7 0x7 -#define V_RXERRLANE7(x) ((x) << S_RXERRLANE7) -#define G_RXERRLANE7(x) (((x) >> S_RXERRLANE7) & M_RXERRLANE7) - -#define S_RXERRLANE6 18 -#define M_RXERRLANE6 0x7 -#define V_RXERRLANE6(x) ((x) << S_RXERRLANE6) -#define G_RXERRLANE6(x) (((x) >> S_RXERRLANE6) & M_RXERRLANE6) - -#define S_RXERRLANE5 15 -#define M_RXERRLANE5 0x7 -#define V_RXERRLANE5(x) ((x) << S_RXERRLANE5) -#define G_RXERRLANE5(x) (((x) >> S_RXERRLANE5) & M_RXERRLANE5) - -#define S_RXERRLANE4 12 -#define M_RXERRLANE4 0x7 -#define V_RXERRLANE4(x) ((x) << S_RXERRLANE4) -#define G_RXERRLANE4(x) (((x) >> S_RXERRLANE4) & M_RXERRLANE4) - -#define S_PCIE_RXERRLANE3 9 -#define M_PCIE_RXERRLANE3 0x7 -#define V_PCIE_RXERRLANE3(x) ((x) << S_PCIE_RXERRLANE3) -#define G_PCIE_RXERRLANE3(x) (((x) >> S_PCIE_RXERRLANE3) & M_PCIE_RXERRLANE3) - -#define S_PCIE_RXERRLANE2 6 -#define M_PCIE_RXERRLANE2 0x7 -#define V_PCIE_RXERRLANE2(x) ((x) << S_PCIE_RXERRLANE2) -#define G_PCIE_RXERRLANE2(x) (((x) >> S_PCIE_RXERRLANE2) & M_PCIE_RXERRLANE2) - -#define S_PCIE_RXERRLANE1 3 -#define M_PCIE_RXERRLANE1 0x7 -#define V_PCIE_RXERRLANE1(x) ((x) << S_PCIE_RXERRLANE1) -#define G_PCIE_RXERRLANE1(x) (((x) >> S_PCIE_RXERRLANE1) & M_PCIE_RXERRLANE1) - -#define S_PCIE_RXERRLANE0 0 -#define M_PCIE_RXERRLANE0 0x7 -#define V_PCIE_RXERRLANE0(x) ((x) << S_PCIE_RXERRLANE0) -#define G_PCIE_RXERRLANE0(x) (((x) >> S_PCIE_RXERRLANE0) & M_PCIE_RXERRLANE0) - -#define A_PCIE_SERDES_QUAD_CTRL1 0xb0 - -#define S_FASTINIT 28 -#define V_FASTINIT(x) ((x) << S_FASTINIT) -#define F_FASTINIT V_FASTINIT(1U) - -#define S_CTCDISABLE 27 -#define V_CTCDISABLE(x) ((x) << S_CTCDISABLE) -#define F_CTCDISABLE V_CTCDISABLE(1U) - -#define S_MANRESETPLL 26 -#define V_MANRESETPLL(x) ((x) << S_MANRESETPLL) -#define F_MANRESETPLL V_MANRESETPLL(1U) - -#define S_MANL2PWRDN 25 -#define V_MANL2PWRDN(x) ((x) << S_MANL2PWRDN) -#define F_MANL2PWRDN V_MANL2PWRDN(1U) - -#define S_MANQUADEN 24 -#define V_MANQUADEN(x) ((x) << S_MANQUADEN) -#define F_MANQUADEN V_MANQUADEN(1U) +#define S_MANQUADEN 24 +#define V_MANQUADEN(x) ((x) << S_MANQUADEN) +#define F_MANQUADEN V_MANQUADEN(1U) #define S_RXEQCTL 22 #define M_RXEQCTL 0x3 @@ -1339,6 +1643,120 @@ $FreeBSD$ #define V_PCLKDETECT(x) ((x) << S_PCLKDETECT) #define F_PCLKDETECT V_PCLKDETECT(1U) +#define A_PCIE_SERDES_STATUS0 0xb0 + +#define S_RXERRLANE7 21 +#define M_RXERRLANE7 0x7 +#define V_RXERRLANE7(x) ((x) << S_RXERRLANE7) +#define G_RXERRLANE7(x) (((x) >> S_RXERRLANE7) & M_RXERRLANE7) + +#define S_RXERRLANE6 18 +#define M_RXERRLANE6 0x7 +#define V_RXERRLANE6(x) ((x) << S_RXERRLANE6) +#define G_RXERRLANE6(x) (((x) >> S_RXERRLANE6) & M_RXERRLANE6) + +#define S_RXERRLANE5 15 +#define M_RXERRLANE5 0x7 +#define V_RXERRLANE5(x) ((x) << S_RXERRLANE5) +#define G_RXERRLANE5(x) (((x) >> S_RXERRLANE5) & M_RXERRLANE5) + +#define S_RXERRLANE4 12 +#define M_RXERRLANE4 0x7 +#define V_RXERRLANE4(x) ((x) << S_RXERRLANE4) +#define G_RXERRLANE4(x) (((x) >> S_RXERRLANE4) & M_RXERRLANE4) + +#define S_PCIE_RXERRLANE3 9 +#define M_PCIE_RXERRLANE3 0x7 +#define V_PCIE_RXERRLANE3(x) ((x) << S_PCIE_RXERRLANE3) +#define G_PCIE_RXERRLANE3(x) (((x) >> S_PCIE_RXERRLANE3) & M_PCIE_RXERRLANE3) + +#define S_PCIE_RXERRLANE2 6 +#define M_PCIE_RXERRLANE2 0x7 +#define V_PCIE_RXERRLANE2(x) ((x) << S_PCIE_RXERRLANE2) +#define G_PCIE_RXERRLANE2(x) (((x) >> S_PCIE_RXERRLANE2) & M_PCIE_RXERRLANE2) + +#define S_PCIE_RXERRLANE1 3 +#define M_PCIE_RXERRLANE1 0x7 +#define V_PCIE_RXERRLANE1(x) ((x) << S_PCIE_RXERRLANE1) +#define G_PCIE_RXERRLANE1(x) (((x) >> S_PCIE_RXERRLANE1) & M_PCIE_RXERRLANE1) + +#define S_PCIE_RXERRLANE0 0 +#define M_PCIE_RXERRLANE0 0x7 +#define V_PCIE_RXERRLANE0(x) ((x) << S_PCIE_RXERRLANE0) +#define G_PCIE_RXERRLANE0(x) (((x) >> S_PCIE_RXERRLANE0) & M_PCIE_RXERRLANE0) + +#define A_PCIE_SERDES_LANE_CTRL 0xb4 + +#define S_EXTBISTCHKERRCLR 22 +#define V_EXTBISTCHKERRCLR(x) ((x) << S_EXTBISTCHKERRCLR) +#define F_EXTBISTCHKERRCLR V_EXTBISTCHKERRCLR(1U) + +#define S_EXTBISTCHKEN 21 +#define V_EXTBISTCHKEN(x) ((x) << S_EXTBISTCHKEN) +#define F_EXTBISTCHKEN V_EXTBISTCHKEN(1U) + +#define S_EXTBISTGENEN 20 +#define V_EXTBISTGENEN(x) ((x) << S_EXTBISTGENEN) +#define F_EXTBISTGENEN V_EXTBISTGENEN(1U) + +#define S_EXTBISTPAT 17 +#define M_EXTBISTPAT 0x7 +#define V_EXTBISTPAT(x) ((x) << S_EXTBISTPAT) +#define G_EXTBISTPAT(x) (((x) >> S_EXTBISTPAT) & M_EXTBISTPAT) + +#define S_EXTPARRESET 16 +#define V_EXTPARRESET(x) ((x) << S_EXTPARRESET) +#define F_EXTPARRESET V_EXTPARRESET(1U) + +#define S_EXTPARLPBK 15 +#define V_EXTPARLPBK(x) ((x) << S_EXTPARLPBK) +#define F_EXTPARLPBK V_EXTPARLPBK(1U) + +#define S_MANRXTERMEN 14 +#define V_MANRXTERMEN(x) ((x) << S_MANRXTERMEN) +#define F_MANRXTERMEN V_MANRXTERMEN(1U) + +#define S_MANBEACONTXEN 13 +#define V_MANBEACONTXEN(x) ((x) << S_MANBEACONTXEN) +#define F_MANBEACONTXEN V_MANBEACONTXEN(1U) + +#define S_MANRXDETECTEN 12 +#define V_MANRXDETECTEN(x) ((x) << S_MANRXDETECTEN) +#define F_MANRXDETECTEN V_MANRXDETECTEN(1U) + +#define S_MANTXIDLEEN 11 +#define V_MANTXIDLEEN(x) ((x) << S_MANTXIDLEEN) +#define F_MANTXIDLEEN V_MANTXIDLEEN(1U) + +#define S_MANRXIDLEEN 10 +#define V_MANRXIDLEEN(x) ((x) << S_MANRXIDLEEN) +#define F_MANRXIDLEEN V_MANRXIDLEEN(1U) + +#define S_MANL1PWRDN 9 +#define V_MANL1PWRDN(x) ((x) << S_MANL1PWRDN) +#define F_MANL1PWRDN V_MANL1PWRDN(1U) + +#define S_MANRESET 8 +#define V_MANRESET(x) ((x) << S_MANRESET) +#define F_MANRESET V_MANRESET(1U) + +#define S_MANFMOFFSET 3 +#define M_MANFMOFFSET 0x1f +#define V_MANFMOFFSET(x) ((x) << S_MANFMOFFSET) +#define G_MANFMOFFSET(x) (((x) >> S_MANFMOFFSET) & M_MANFMOFFSET) + +#define S_MANFMOFFSETEN 2 +#define V_MANFMOFFSETEN(x) ((x) << S_MANFMOFFSETEN) +#define F_MANFMOFFSETEN V_MANFMOFFSETEN(1U) + +#define S_MANLANEEN 1 +#define V_MANLANEEN(x) ((x) << S_MANLANEEN) +#define F_MANLANEEN V_MANLANEEN(1U) + +#define S_INTSERLPBK 0 +#define V_INTSERLPBK(x) ((x) << S_INTSERLPBK) +#define F_INTSERLPBK V_INTSERLPBK(1U) + #define A_PCIE_SERDES_STATUS1 0xb4 #define S_CMULOCK 31 @@ -1441,77 +1859,40 @@ $FreeBSD$ #define V_PCIE_RXOFLOWLANE0(x) ((x) << S_PCIE_RXOFLOWLANE0) #define F_PCIE_RXOFLOWLANE0 V_PCIE_RXOFLOWLANE0(1U) -#define A_PCIE_SERDES_LANE_CTRL 0xb4 - -#define S_EXTBISTCHKERRCLR 22 -#define V_EXTBISTCHKERRCLR(x) ((x) << S_EXTBISTCHKERRCLR) -#define F_EXTBISTCHKERRCLR V_EXTBISTCHKERRCLR(1U) - -#define S_EXTBISTCHKEN 21 -#define V_EXTBISTCHKEN(x) ((x) << S_EXTBISTCHKEN) -#define F_EXTBISTCHKEN V_EXTBISTCHKEN(1U) - -#define S_EXTBISTGENEN 20 -#define V_EXTBISTGENEN(x) ((x) << S_EXTBISTGENEN) -#define F_EXTBISTGENEN V_EXTBISTGENEN(1U) - -#define S_EXTBISTPAT 17 -#define M_EXTBISTPAT 0x7 -#define V_EXTBISTPAT(x) ((x) << S_EXTBISTPAT) -#define G_EXTBISTPAT(x) (((x) >> S_EXTBISTPAT) & M_EXTBISTPAT) - -#define S_EXTPARRESET 16 -#define V_EXTPARRESET(x) ((x) << S_EXTPARRESET) -#define F_EXTPARRESET V_EXTPARRESET(1U) - -#define S_EXTPARLPBK 15 -#define V_EXTPARLPBK(x) ((x) << S_EXTPARLPBK) -#define F_EXTPARLPBK V_EXTPARLPBK(1U) - -#define S_MANRXTERMEN 14 -#define V_MANRXTERMEN(x) ((x) << S_MANRXTERMEN) -#define F_MANRXTERMEN V_MANRXTERMEN(1U) - -#define S_MANBEACONTXEN 13 -#define V_MANBEACONTXEN(x) ((x) << S_MANBEACONTXEN) -#define F_MANBEACONTXEN V_MANBEACONTXEN(1U) - -#define S_MANRXDETECTEN 12 -#define V_MANRXDETECTEN(x) ((x) << S_MANRXDETECTEN) -#define F_MANRXDETECTEN V_MANRXDETECTEN(1U) +#define A_PCIE_SERDES_LANE_STAT 0xb8 -#define S_MANTXIDLEEN 11 -#define V_MANTXIDLEEN(x) ((x) << S_MANTXIDLEEN) -#define F_MANTXIDLEEN V_MANTXIDLEEN(1U) +#define S_EXTBISTCHKERRCNT 8 +#define M_EXTBISTCHKERRCNT 0xffffff +#define V_EXTBISTCHKERRCNT(x) ((x) << S_EXTBISTCHKERRCNT) +#define G_EXTBISTCHKERRCNT(x) (((x) >> S_EXTBISTCHKERRCNT) & M_EXTBISTCHKERRCNT) -#define S_MANRXIDLEEN 10 -#define V_MANRXIDLEEN(x) ((x) << S_MANRXIDLEEN) -#define F_MANRXIDLEEN V_MANRXIDLEEN(1U) +#define S_EXTBISTCHKFMD 7 +#define V_EXTBISTCHKFMD(x) ((x) << S_EXTBISTCHKFMD) +#define F_EXTBISTCHKFMD V_EXTBISTCHKFMD(1U) -#define S_MANL1PWRDN 9 -#define V_MANL1PWRDN(x) ((x) << S_MANL1PWRDN) -#define F_MANL1PWRDN V_MANL1PWRDN(1U) +#define S_BEACONDETECTCHG 6 +#define V_BEACONDETECTCHG(x) ((x) << S_BEACONDETECTCHG) +#define F_BEACONDETECTCHG V_BEACONDETECTCHG(1U) -#define S_MANRESET 8 -#define V_MANRESET(x) ((x) << S_MANRESET) -#define F_MANRESET V_MANRESET(1U) +#define S_RXDETECTCHG 5 +#define V_RXDETECTCHG(x) ((x) << S_RXDETECTCHG) +#define F_RXDETECTCHG V_RXDETECTCHG(1U) -#define S_MANFMOFFSET 3 -#define M_MANFMOFFSET 0x1f -#define V_MANFMOFFSET(x) ((x) << S_MANFMOFFSET) -#define G_MANFMOFFSET(x) (((x) >> S_MANFMOFFSET) & M_MANFMOFFSET) +#define S_TXIDLEDETECTCHG 4 +#define V_TXIDLEDETECTCHG(x) ((x) << S_TXIDLEDETECTCHG) +#define F_TXIDLEDETECTCHG V_TXIDLEDETECTCHG(1U) -#define S_MANFMOFFSETEN 2 -#define V_MANFMOFFSETEN(x) ((x) << S_MANFMOFFSETEN) -#define F_MANFMOFFSETEN V_MANFMOFFSETEN(1U) +#define S_BEACONDETECT 2 +#define V_BEACONDETECT(x) ((x) << S_BEACONDETECT) +#define F_BEACONDETECT V_BEACONDETECT(1U) -#define S_MANLANEEN 1 -#define V_MANLANEEN(x) ((x) << S_MANLANEEN) -#define F_MANLANEEN V_MANLANEEN(1U) +#define S_RXDETECT 1 +#define V_RXDETECT(x) ((x) << S_RXDETECT) +#define F_RXDETECT V_RXDETECT(1U) -#define S_INTSERLPBK 0 -#define V_INTSERLPBK(x) ((x) << S_INTSERLPBK) -#define F_INTSERLPBK V_INTSERLPBK(1U) +#define S_TXIDLEDETECT 0 +#define V_TXIDLEDETECT(x) ((x) << S_TXIDLEDETECT) +#define F_TXIDLEDETECT V_TXIDLEDETECT(1U) #define A_PCIE_SERDES_STATUS2 0xb8 @@ -1643,40 +2024,22 @@ $FreeBSD$ #define V_PCIE_RXADDSKIPLANE0(x) ((x) << S_PCIE_RXADDSKIPLANE0) #define F_PCIE_RXADDSKIPLANE0 V_PCIE_RXADDSKIPLANE0(1U) -#define A_PCIE_SERDES_LANE_STAT 0xb8 +#define A_PCIE_PEX_WMARK 0xbc -#define S_EXTBISTCHKERRCNT 8 -#define M_EXTBISTCHKERRCNT 0xffffff -#define V_EXTBISTCHKERRCNT(x) ((x) << S_EXTBISTCHKERRCNT) -#define G_EXTBISTCHKERRCNT(x) (((x) >> S_EXTBISTCHKERRCNT) & M_EXTBISTCHKERRCNT) - -#define S_EXTBISTCHKFMD 7 -#define V_EXTBISTCHKFMD(x) ((x) << S_EXTBISTCHKFMD) -#define F_EXTBISTCHKFMD V_EXTBISTCHKFMD(1U) +#define S_P_WMARK 18 +#define M_P_WMARK 0x7ff +#define V_P_WMARK(x) ((x) << S_P_WMARK) +#define G_P_WMARK(x) (((x) >> S_P_WMARK) & M_P_WMARK) -#define S_BEACONDETECTCHG 6 -#define V_BEACONDETECTCHG(x) ((x) << S_BEACONDETECTCHG) -#define F_BEACONDETECTCHG V_BEACONDETECTCHG(1U) +#define S_NP_WMARK 11 +#define M_NP_WMARK 0x7f +#define V_NP_WMARK(x) ((x) << S_NP_WMARK) +#define G_NP_WMARK(x) (((x) >> S_NP_WMARK) & M_NP_WMARK) -#define S_RXDETECTCHG 5 -#define V_RXDETECTCHG(x) ((x) << S_RXDETECTCHG) -#define F_RXDETECTCHG V_RXDETECTCHG(1U) - -#define S_TXIDLEDETECTCHG 4 -#define V_TXIDLEDETECTCHG(x) ((x) << S_TXIDLEDETECTCHG) -#define F_TXIDLEDETECTCHG V_TXIDLEDETECTCHG(1U) - -#define S_BEACONDETECT 2 -#define V_BEACONDETECT(x) ((x) << S_BEACONDETECT) -#define F_BEACONDETECT V_BEACONDETECT(1U) - -#define S_RXDETECT 1 -#define V_RXDETECT(x) ((x) << S_RXDETECT) -#define F_RXDETECT V_RXDETECT(1U) - -#define S_TXIDLEDETECT 0 -#define V_TXIDLEDETECT(x) ((x) << S_TXIDLEDETECT) -#define F_TXIDLEDETECT V_TXIDLEDETECT(1U) +#define S_CPL_WMARK 0 +#define M_CPL_WMARK 0x7ff +#define V_CPL_WMARK(x) ((x) << S_CPL_WMARK) +#define G_CPL_WMARK(x) (((x) >> S_CPL_WMARK) & M_CPL_WMARK) #define A_PCIE_SERDES_BIST 0xbc @@ -1819,65 +2182,17 @@ $FreeBSD$ #define S_GPIO2_OUT_VAL 2 #define V_GPIO2_OUT_VAL(x) ((x) << S_GPIO2_OUT_VAL) -#define F_GPIO2_OUT_VAL V_GPIO2_OUT_VAL(1U) - -#define S_GPIO1_OUT_VAL 1 -#define V_GPIO1_OUT_VAL(x) ((x) << S_GPIO1_OUT_VAL) -#define F_GPIO1_OUT_VAL V_GPIO1_OUT_VAL(1U) - -#define S_GPIO0_OUT_VAL 0 -#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL) -#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U) - -#define A_T3DBG_GPIO_IN 0xd4 - -#define S_GPIO11_IN 11 -#define V_GPIO11_IN(x) ((x) << S_GPIO11_IN) -#define F_GPIO11_IN V_GPIO11_IN(1U) - -#define S_GPIO10_IN 10 -#define V_GPIO10_IN(x) ((x) << S_GPIO10_IN) -#define F_GPIO10_IN V_GPIO10_IN(1U) - -#define S_GPIO9_IN 9 -#define V_GPIO9_IN(x) ((x) << S_GPIO9_IN) -#define F_GPIO9_IN V_GPIO9_IN(1U) - -#define S_GPIO8_IN 8 -#define V_GPIO8_IN(x) ((x) << S_GPIO8_IN) -#define F_GPIO8_IN V_GPIO8_IN(1U) - -#define S_GPIO7_IN 7 -#define V_GPIO7_IN(x) ((x) << S_GPIO7_IN) -#define F_GPIO7_IN V_GPIO7_IN(1U) - -#define S_GPIO6_IN 6 -#define V_GPIO6_IN(x) ((x) << S_GPIO6_IN) -#define F_GPIO6_IN V_GPIO6_IN(1U) - -#define S_GPIO5_IN 5 -#define V_GPIO5_IN(x) ((x) << S_GPIO5_IN) -#define F_GPIO5_IN V_GPIO5_IN(1U) - -#define S_GPIO4_IN 4 -#define V_GPIO4_IN(x) ((x) << S_GPIO4_IN) -#define F_GPIO4_IN V_GPIO4_IN(1U) - -#define S_GPIO3_IN 3 -#define V_GPIO3_IN(x) ((x) << S_GPIO3_IN) -#define F_GPIO3_IN V_GPIO3_IN(1U) - -#define S_GPIO2_IN 2 -#define V_GPIO2_IN(x) ((x) << S_GPIO2_IN) -#define F_GPIO2_IN V_GPIO2_IN(1U) +#define F_GPIO2_OUT_VAL V_GPIO2_OUT_VAL(1U) -#define S_GPIO1_IN 1 -#define V_GPIO1_IN(x) ((x) << S_GPIO1_IN) -#define F_GPIO1_IN V_GPIO1_IN(1U) +#define S_GPIO1_OUT_VAL 1 +#define V_GPIO1_OUT_VAL(x) ((x) << S_GPIO1_OUT_VAL) +#define F_GPIO1_OUT_VAL V_GPIO1_OUT_VAL(1U) -#define S_GPIO0_IN 0 -#define V_GPIO0_IN(x) ((x) << S_GPIO0_IN) -#define F_GPIO0_IN V_GPIO0_IN(1U) +#define S_GPIO0_OUT_VAL 0 +#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL) +#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U) + +#define A_T3DBG_GPIO_IN 0xd4 #define S_GPIO11_CHG_DET 27 #define V_GPIO11_CHG_DET(x) ((x) << S_GPIO11_CHG_DET) @@ -1927,6 +2242,54 @@ $FreeBSD$ #define V_GPIO0_CHG_DET(x) ((x) << S_GPIO0_CHG_DET) #define F_GPIO0_CHG_DET V_GPIO0_CHG_DET(1U) +#define S_GPIO11_IN 11 +#define V_GPIO11_IN(x) ((x) << S_GPIO11_IN) +#define F_GPIO11_IN V_GPIO11_IN(1U) + +#define S_GPIO10_IN 10 +#define V_GPIO10_IN(x) ((x) << S_GPIO10_IN) +#define F_GPIO10_IN V_GPIO10_IN(1U) + +#define S_GPIO9_IN 9 +#define V_GPIO9_IN(x) ((x) << S_GPIO9_IN) +#define F_GPIO9_IN V_GPIO9_IN(1U) + +#define S_GPIO8_IN 8 +#define V_GPIO8_IN(x) ((x) << S_GPIO8_IN) +#define F_GPIO8_IN V_GPIO8_IN(1U) + +#define S_GPIO7_IN 7 +#define V_GPIO7_IN(x) ((x) << S_GPIO7_IN) +#define F_GPIO7_IN V_GPIO7_IN(1U) + +#define S_GPIO6_IN 6 +#define V_GPIO6_IN(x) ((x) << S_GPIO6_IN) +#define F_GPIO6_IN V_GPIO6_IN(1U) + +#define S_GPIO5_IN 5 +#define V_GPIO5_IN(x) ((x) << S_GPIO5_IN) +#define F_GPIO5_IN V_GPIO5_IN(1U) + +#define S_GPIO4_IN 4 +#define V_GPIO4_IN(x) ((x) << S_GPIO4_IN) +#define F_GPIO4_IN V_GPIO4_IN(1U) + +#define S_GPIO3_IN 3 +#define V_GPIO3_IN(x) ((x) << S_GPIO3_IN) +#define F_GPIO3_IN V_GPIO3_IN(1U) + +#define S_GPIO2_IN 2 +#define V_GPIO2_IN(x) ((x) << S_GPIO2_IN) +#define F_GPIO2_IN V_GPIO2_IN(1U) + +#define S_GPIO1_IN 1 +#define V_GPIO1_IN(x) ((x) << S_GPIO1_IN) +#define F_GPIO1_IN V_GPIO1_IN(1U) + +#define S_GPIO0_IN 0 +#define V_GPIO0_IN(x) ((x) << S_GPIO0_IN) +#define F_GPIO0_IN V_GPIO0_IN(1U) + #define A_T3DBG_INT_ENABLE 0xd8 #define S_C_LOCK 21 @@ -1949,10 +2312,6 @@ $FreeBSD$ #define V_PX_LOCK(x) ((x) << S_PX_LOCK) #define F_PX_LOCK V_PX_LOCK(1U) -#define S_PE_LOCK 16 -#define V_PE_LOCK(x) ((x) << S_PE_LOCK) -#define F_PE_LOCK V_PE_LOCK(1U) - #define S_GPIO11 11 #define V_GPIO11(x) ((x) << S_GPIO11) #define F_GPIO11 V_GPIO11(1U) @@ -2001,12 +2360,17 @@ $FreeBSD$ #define V_GPIO0(x) ((x) << S_GPIO0) #define F_GPIO0 V_GPIO0(1U) +#define S_PE_LOCK 16 +#define V_PE_LOCK(x) ((x) << S_PE_LOCK) +#define F_PE_LOCK V_PE_LOCK(1U) + #define A_T3DBG_INT_CAUSE 0xdc #define A_T3DBG_DBG0_RST_VALUE 0xe0 #define S_DEBUGDATA 0 +#define M_DEBUGDATA 0xff #define V_DEBUGDATA(x) ((x) << S_DEBUGDATA) -#define F_DEBUGDATA V_DEBUGDATA(1U) +#define G_DEBUGDATA(x) (((x) >> S_DEBUGDATA) & M_DEBUGDATA) #define A_T3DBG_PLL_OCLK_PAD_EN 0xe4 @@ -2014,6 +2378,10 @@ $FreeBSD$ #define V_PCIE_OCLK_EN(x) ((x) << S_PCIE_OCLK_EN) #define F_PCIE_OCLK_EN V_PCIE_OCLK_EN(1U) +#define S_PCLKTREE_DBG_EN 17 +#define V_PCLKTREE_DBG_EN(x) ((x) << S_PCLKTREE_DBG_EN) +#define F_PCLKTREE_DBG_EN V_PCLKTREE_DBG_EN(1U) + #define S_PCIX_OCLK_EN 16 #define V_PCIX_OCLK_EN(x) ((x) << S_PCIX_OCLK_EN) #define F_PCIX_OCLK_EN V_PCIX_OCLK_EN(1U) @@ -2034,16 +2402,8 @@ $FreeBSD$ #define V_C_OCLK_EN(x) ((x) << S_C_OCLK_EN) #define F_C_OCLK_EN V_C_OCLK_EN(1U) -#define S_PCLKTREE_DBG_EN 17 -#define V_PCLKTREE_DBG_EN(x) ((x) << S_PCLKTREE_DBG_EN) -#define F_PCLKTREE_DBG_EN V_PCLKTREE_DBG_EN(1U) - #define A_T3DBG_PLL_LOCK 0xe8 -#define S_PCIE_LOCK 20 -#define V_PCIE_LOCK(x) ((x) << S_PCIE_LOCK) -#define F_PCIE_LOCK V_PCIE_LOCK(1U) - #define S_PCIX_LOCK 16 #define V_PCIX_LOCK(x) ((x) << S_PCIX_LOCK) #define F_PCIX_LOCK V_PCIX_LOCK(1U) @@ -2064,11 +2424,16 @@ $FreeBSD$ #define V_PLL_C_LOCK(x) ((x) << S_PLL_C_LOCK) #define F_PLL_C_LOCK V_PLL_C_LOCK(1U) +#define S_PCIE_LOCK 20 +#define V_PCIE_LOCK(x) ((x) << S_PCIE_LOCK) +#define F_PCIE_LOCK V_PCIE_LOCK(1U) + #define A_T3DBG_SERDES_RBC_CFG 0xec #define S_X_RBC_LANE_SEL 16 +#define M_X_RBC_LANE_SEL 0x3 #define V_X_RBC_LANE_SEL(x) ((x) << S_X_RBC_LANE_SEL) -#define F_X_RBC_LANE_SEL V_X_RBC_LANE_SEL(1U) +#define G_X_RBC_LANE_SEL(x) (((x) >> S_X_RBC_LANE_SEL) & M_X_RBC_LANE_SEL) #define S_X_RBC_DBG_EN 12 #define V_X_RBC_DBG_EN(x) ((x) << S_X_RBC_DBG_EN) @@ -2079,8 +2444,9 @@ $FreeBSD$ #define F_X_SERDES_SEL V_X_SERDES_SEL(1U) #define S_PE_RBC_LANE_SEL 4 +#define M_PE_RBC_LANE_SEL 0x7 #define V_PE_RBC_LANE_SEL(x) ((x) << S_PE_RBC_LANE_SEL) -#define F_PE_RBC_LANE_SEL V_PE_RBC_LANE_SEL(1U) +#define G_PE_RBC_LANE_SEL(x) (((x) >> S_PE_RBC_LANE_SEL) & M_PE_RBC_LANE_SEL) #define S_PE_RBC_DBG_EN 0 #define V_PE_RBC_DBG_EN(x) ((x) << S_PE_RBC_DBG_EN) @@ -2108,10 +2474,6 @@ $FreeBSD$ #define V_PX_LOCK_ACT_LOW(x) ((x) << S_PX_LOCK_ACT_LOW) #define F_PX_LOCK_ACT_LOW V_PX_LOCK_ACT_LOW(1U) -#define S_PE_LOCK_ACT_LOW 16 -#define V_PE_LOCK_ACT_LOW(x) ((x) << S_PE_LOCK_ACT_LOW) -#define F_PE_LOCK_ACT_LOW V_PE_LOCK_ACT_LOW(1U) - #define S_GPIO11_ACT_LOW 11 #define V_GPIO11_ACT_LOW(x) ((x) << S_GPIO11_ACT_LOW) #define F_GPIO11_ACT_LOW V_GPIO11_ACT_LOW(1U) @@ -2160,6 +2522,10 @@ $FreeBSD$ #define V_GPIO0_ACT_LOW(x) ((x) << S_GPIO0_ACT_LOW) #define F_GPIO0_ACT_LOW V_GPIO0_ACT_LOW(1U) +#define S_PE_LOCK_ACT_LOW 16 +#define V_PE_LOCK_ACT_LOW(x) ((x) << S_PE_LOCK_ACT_LOW) +#define F_PE_LOCK_ACT_LOW V_PE_LOCK_ACT_LOW(1U) + #define A_T3DBG_PMON_CFG 0xf4 #define S_PMON_DONE 29 @@ -2171,20 +2537,24 @@ $FreeBSD$ #define F_PMON_FAIL V_PMON_FAIL(1U) #define S_PMON_FDEL_AUTO 22 +#define M_PMON_FDEL_AUTO 0x3f #define V_PMON_FDEL_AUTO(x) ((x) << S_PMON_FDEL_AUTO) -#define F_PMON_FDEL_AUTO V_PMON_FDEL_AUTO(1U) +#define G_PMON_FDEL_AUTO(x) (((x) >> S_PMON_FDEL_AUTO) & M_PMON_FDEL_AUTO) #define S_PMON_CDEL_AUTO 16 +#define M_PMON_CDEL_AUTO 0x3f #define V_PMON_CDEL_AUTO(x) ((x) << S_PMON_CDEL_AUTO) -#define F_PMON_CDEL_AUTO V_PMON_CDEL_AUTO(1U) +#define G_PMON_CDEL_AUTO(x) (((x) >> S_PMON_CDEL_AUTO) & M_PMON_CDEL_AUTO) #define S_PMON_FDEL_MANUAL 10 +#define M_PMON_FDEL_MANUAL 0x3f #define V_PMON_FDEL_MANUAL(x) ((x) << S_PMON_FDEL_MANUAL) -#define F_PMON_FDEL_MANUAL V_PMON_FDEL_MANUAL(1U) +#define G_PMON_FDEL_MANUAL(x) (((x) >> S_PMON_FDEL_MANUAL) & M_PMON_FDEL_MANUAL) #define S_PMON_CDEL_MANUAL 4 +#define M_PMON_CDEL_MANUAL 0x3f #define V_PMON_CDEL_MANUAL(x) ((x) << S_PMON_CDEL_MANUAL) -#define F_PMON_CDEL_MANUAL V_PMON_CDEL_MANUAL(1U) +#define G_PMON_CDEL_MANUAL(x) (((x) >> S_PMON_CDEL_MANUAL) & M_PMON_CDEL_MANUAL) #define S_PMON_MANUAL 1 #define V_PMON_MANUAL(x) ((x) << S_PMON_MANUAL) @@ -2740,6 +3110,54 @@ $FreeBSD$ #define A_CIM_HOST_INT_ENABLE 0x298 +#define S_DTAGPARERR 28 +#define V_DTAGPARERR(x) ((x) << S_DTAGPARERR) +#define F_DTAGPARERR V_DTAGPARERR(1U) + +#define S_ITAGPARERR 27 +#define V_ITAGPARERR(x) ((x) << S_ITAGPARERR) +#define F_ITAGPARERR V_ITAGPARERR(1U) + +#define S_IBQTPPARERR 26 +#define V_IBQTPPARERR(x) ((x) << S_IBQTPPARERR) +#define F_IBQTPPARERR V_IBQTPPARERR(1U) + +#define S_IBQULPPARERR 25 +#define V_IBQULPPARERR(x) ((x) << S_IBQULPPARERR) +#define F_IBQULPPARERR V_IBQULPPARERR(1U) + +#define S_IBQSGEHIPARERR 24 +#define V_IBQSGEHIPARERR(x) ((x) << S_IBQSGEHIPARERR) +#define F_IBQSGEHIPARERR V_IBQSGEHIPARERR(1U) + +#define S_IBQSGELOPARERR 23 +#define V_IBQSGELOPARERR(x) ((x) << S_IBQSGELOPARERR) +#define F_IBQSGELOPARERR V_IBQSGELOPARERR(1U) + +#define S_OBQULPLOPARERR 22 +#define V_OBQULPLOPARERR(x) ((x) << S_OBQULPLOPARERR) +#define F_OBQULPLOPARERR V_OBQULPLOPARERR(1U) + +#define S_OBQULPHIPARERR 21 +#define V_OBQULPHIPARERR(x) ((x) << S_OBQULPHIPARERR) +#define F_OBQULPHIPARERR V_OBQULPHIPARERR(1U) + +#define S_OBQSGEPARERR 20 +#define V_OBQSGEPARERR(x) ((x) << S_OBQSGEPARERR) +#define F_OBQSGEPARERR V_OBQSGEPARERR(1U) + +#define S_DCACHEPARERR 19 +#define V_DCACHEPARERR(x) ((x) << S_DCACHEPARERR) +#define F_DCACHEPARERR V_DCACHEPARERR(1U) + +#define S_ICACHEPARERR 18 +#define V_ICACHEPARERR(x) ((x) << S_ICACHEPARERR) +#define F_ICACHEPARERR V_ICACHEPARERR(1U) + +#define S_DRAMPARERR 17 +#define V_DRAMPARERR(x) ((x) << S_DRAMPARERR) +#define F_DRAMPARERR V_DRAMPARERR(1U) + #define S_TIMER1INTEN 15 #define V_TIMER1INTEN(x) ((x) << S_TIMER1INTEN) #define F_TIMER1INTEN V_TIMER1INTEN(1U) @@ -3043,6 +3461,10 @@ $FreeBSD$ #define V_DBMAXOPCNT(x) ((x) << S_DBMAXOPCNT) #define G_DBMAXOPCNT(x) (((x) >> S_DBMAXOPCNT) & M_DBMAXOPCNT) +#define S_IPV6ENABLE 15 +#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE) +#define F_IPV6ENABLE V_IPV6ENABLE(1U) + #define S_NICMODE 14 #define V_NICMODE(x) ((x) << S_NICMODE) #define F_NICMODE V_NICMODE(1U) @@ -3087,12 +3509,16 @@ $FreeBSD$ #define V_CTUNNEL(x) ((x) << S_CTUNNEL) #define F_CTUNNEL V_CTUNNEL(1U) -#define S_IPV6ENABLE 15 -#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE) -#define F_IPV6ENABLE V_IPV6ENABLE(1U) - #define A_TP_OUT_CONFIG 0x304 +#define S_IPIDSPLITMODE 16 +#define V_IPIDSPLITMODE(x) ((x) << S_IPIDSPLITMODE) +#define F_IPIDSPLITMODE V_IPIDSPLITMODE(1U) + +#define S_VLANEXTRACTIONENABLE2NDPORT 13 +#define V_VLANEXTRACTIONENABLE2NDPORT(x) ((x) << S_VLANEXTRACTIONENABLE2NDPORT) +#define F_VLANEXTRACTIONENABLE2NDPORT V_VLANEXTRACTIONENABLE2NDPORT(1U) + #define S_VLANEXTRACTIONENABLE 12 #define V_VLANEXTRACTIONENABLE(x) ((x) << S_VLANEXTRACTIONENABLE) #define F_VLANEXTRACTIONENABLE V_VLANEXTRACTIONENABLE(1U) @@ -3129,16 +3555,13 @@ $FreeBSD$ #define V_OUT_CETHERNET(x) ((x) << S_OUT_CETHERNET) #define F_OUT_CETHERNET V_OUT_CETHERNET(1U) -#define S_IPIDSPLITMODE 16 -#define V_IPIDSPLITMODE(x) ((x) << S_IPIDSPLITMODE) -#define F_IPIDSPLITMODE V_IPIDSPLITMODE(1U) - -#define S_VLANEXTRACTIONENABLE2NDPORT 13 -#define V_VLANEXTRACTIONENABLE2NDPORT(x) ((x) << S_VLANEXTRACTIONENABLE2NDPORT) -#define F_VLANEXTRACTIONENABLE2NDPORT V_VLANEXTRACTIONENABLE2NDPORT(1U) - #define A_TP_GLOBAL_CONFIG 0x308 +#define S_SYNCOOKIEPARAMS 26 +#define M_SYNCOOKIEPARAMS 0x3f +#define V_SYNCOOKIEPARAMS(x) ((x) << S_SYNCOOKIEPARAMS) +#define G_SYNCOOKIEPARAMS(x) (((x) >> S_SYNCOOKIEPARAMS) & M_SYNCOOKIEPARAMS) + #define S_RXFLOWCONTROLDISABLE 25 #define V_RXFLOWCONTROLDISABLE(x) ((x) << S_RXFLOWCONTROLDISABLE) #define F_RXFLOWCONTROLDISABLE V_RXFLOWCONTROLDISABLE(1U) @@ -3206,11 +3629,6 @@ $FreeBSD$ #define V_IPTTL(x) ((x) << S_IPTTL) #define G_IPTTL(x) (((x) >> S_IPTTL) & M_IPTTL) -#define S_SYNCOOKIEPARAMS 26 -#define M_SYNCOOKIEPARAMS 0x3f -#define V_SYNCOOKIEPARAMS(x) ((x) << S_SYNCOOKIEPARAMS) -#define G_SYNCOOKIEPARAMS(x) (((x) >> S_SYNCOOKIEPARAMS) & M_SYNCOOKIEPARAMS) - #define A_TP_GLOBAL_RX_CREDIT 0x30c #define A_TP_CMM_SIZE 0x310 @@ -3228,16 +3646,16 @@ $FreeBSD$ #define A_TP_CMM_TIMER_BASE 0x318 -#define S_CMTIMERBASE 0 -#define M_CMTIMERBASE 0xfffffff -#define V_CMTIMERBASE(x) ((x) << S_CMTIMERBASE) -#define G_CMTIMERBASE(x) (((x) >> S_CMTIMERBASE) & M_CMTIMERBASE) - #define S_CMTIMERMAXNUM 28 #define M_CMTIMERMAXNUM 0x3 #define V_CMTIMERMAXNUM(x) ((x) << S_CMTIMERMAXNUM) #define G_CMTIMERMAXNUM(x) (((x) >> S_CMTIMERMAXNUM) & M_CMTIMERMAXNUM) +#define S_CMTIMERBASE 0 +#define M_CMTIMERBASE 0xfffffff +#define V_CMTIMERBASE(x) ((x) << S_CMTIMERBASE) +#define G_CMTIMERBASE(x) (((x) >> S_CMTIMERBASE) & M_CMTIMERBASE) + #define A_TP_PMM_SIZE 0x31c #define S_PMSIZE 0 @@ -3339,6 +3757,26 @@ $FreeBSD$ #define A_TP_PC_CONFIG 0x348 +#define S_CMCACHEDISABLE 31 +#define V_CMCACHEDISABLE(x) ((x) << S_CMCACHEDISABLE) +#define F_CMCACHEDISABLE V_CMCACHEDISABLE(1U) + +#define S_ENABLEOCSPIFULL 30 +#define V_ENABLEOCSPIFULL(x) ((x) << S_ENABLEOCSPIFULL) +#define F_ENABLEOCSPIFULL V_ENABLEOCSPIFULL(1U) + +#define S_ENABLEFLMERRORDDP 29 +#define V_ENABLEFLMERRORDDP(x) ((x) << S_ENABLEFLMERRORDDP) +#define F_ENABLEFLMERRORDDP V_ENABLEFLMERRORDDP(1U) + +#define S_LOCKTID 28 +#define V_LOCKTID(x) ((x) << S_LOCKTID) +#define F_LOCKTID V_LOCKTID(1U) + +#define S_FIXRCVWND 27 +#define V_FIXRCVWND(x) ((x) << S_FIXRCVWND) +#define F_FIXRCVWND V_FIXRCVWND(1U) + #define S_TXTOSQUEUEMAPMODE 26 #define V_TXTOSQUEUEMAPMODE(x) ((x) << S_TXTOSQUEUEMAPMODE) #define F_TXTOSQUEUEMAPMODE V_TXTOSQUEUEMAPMODE(1U) @@ -3436,27 +3874,23 @@ $FreeBSD$ #define V_TABLELATENCYDELTA(x) ((x) << S_TABLELATENCYDELTA) #define G_TABLELATENCYDELTA(x) (((x) >> S_TABLELATENCYDELTA) & M_TABLELATENCYDELTA) -#define S_CMCACHEDISABLE 31 -#define V_CMCACHEDISABLE(x) ((x) << S_CMCACHEDISABLE) -#define F_CMCACHEDISABLE V_CMCACHEDISABLE(1U) - -#define S_ENABLEOCSPIFULL 30 -#define V_ENABLEOCSPIFULL(x) ((x) << S_ENABLEOCSPIFULL) -#define F_ENABLEOCSPIFULL V_ENABLEOCSPIFULL(1U) +#define A_TP_PC_CONFIG2 0x34c -#define S_ENABLEFLMERRORDDP 29 -#define V_ENABLEFLMERRORDDP(x) ((x) << S_ENABLEFLMERRORDDP) -#define F_ENABLEFLMERRORDDP V_ENABLEFLMERRORDDP(1U) +#define S_DISBLEDAPARBIT0 15 +#define V_DISBLEDAPARBIT0(x) ((x) << S_DISBLEDAPARBIT0) +#define F_DISBLEDAPARBIT0 V_DISBLEDAPARBIT0(1U) -#define S_LOCKTID 28 -#define V_LOCKTID(x) ((x) << S_LOCKTID) -#define F_LOCKTID V_LOCKTID(1U) +#define S_ENABLEARPMISS 13 +#define V_ENABLEARPMISS(x) ((x) << S_ENABLEARPMISS) +#define F_ENABLEARPMISS V_ENABLEARPMISS(1U) -#define S_FIXRCVWND 27 -#define V_FIXRCVWND(x) ((x) << S_FIXRCVWND) -#define F_FIXRCVWND V_FIXRCVWND(1U) +#define S_ENABLENONOFDTNLSYN 12 +#define V_ENABLENONOFDTNLSYN(x) ((x) << S_ENABLENONOFDTNLSYN) +#define F_ENABLENONOFDTNLSYN V_ENABLENONOFDTNLSYN(1U) -#define A_TP_PC_CONFIG2 0x34c +#define S_ENABLEIPV6RSS 11 +#define V_ENABLEIPV6RSS(x) ((x) << S_ENABLEIPV6RSS) +#define F_ENABLEIPV6RSS V_ENABLEIPV6RSS(1U) #define S_ENABLEDROPRQEMPTYPKT 10 #define V_ENABLEDROPRQEMPTYPKT(x) ((x) << S_ENABLEDROPRQEMPTYPKT) @@ -3482,9 +3916,9 @@ $FreeBSD$ #define V_ENABLETXPORTFROMDA(x) ((x) << S_ENABLETXPORTFROMDA) #define F_ENABLETXPORTFROMDA V_ENABLETXPORTFROMDA(1U) -#define S_CHDRAFULL 4 -#define V_CHDRAFULL(x) ((x) << S_CHDRAFULL) -#define F_CHDRAFULL V_CHDRAFULL(1U) +#define S_ENABLECHDRAFULL 4 +#define V_ENABLECHDRAFULL(x) ((x) << S_ENABLECHDRAFULL) +#define F_ENABLECHDRAFULL V_ENABLECHDRAFULL(1U) #define S_ENABLENONOFDSCBBIT 3 #define V_ENABLENONOFDSCBBIT(x) ((x) << S_ENABLENONOFDSCBBIT) @@ -3502,6 +3936,10 @@ $FreeBSD$ #define V_ENABLEOLDRXFORWARD(x) ((x) << S_ENABLEOLDRXFORWARD) #define F_ENABLEOLDRXFORWARD V_ENABLEOLDRXFORWARD(1U) +#define S_CHDRAFULL 4 +#define V_CHDRAFULL(x) ((x) << S_CHDRAFULL) +#define F_CHDRAFULL V_CHDRAFULL(1U) + #define A_TP_TCP_BACKOFF_REG0 0x350 #define S_TIMERBACKOFFINDEX3 24 @@ -3662,6 +4100,10 @@ $FreeBSD$ #define V_TXPACEAUTO(x) ((x) << S_TXPACEAUTO) #define F_TXPACEAUTO V_TXPACEAUTO(1U) +#define S_RXURGTUNNEL 6 +#define V_RXURGTUNNEL(x) ((x) << S_RXURGTUNNEL) +#define F_RXURGTUNNEL V_RXURGTUNNEL(1U) + #define S_RXURGMODE 5 #define V_RXURGMODE(x) ((x) << S_RXURGMODE) #define F_RXURGMODE V_RXURGMODE(1U) @@ -3683,10 +4125,6 @@ $FreeBSD$ #define V_RXCOALESCEPSHEN(x) ((x) << S_RXCOALESCEPSHEN) #define F_RXCOALESCEPSHEN V_RXCOALESCEPSHEN(1U) -#define S_RXURGTUNNEL 6 -#define V_RXURGTUNNEL(x) ((x) << S_RXURGTUNNEL) -#define F_RXURGTUNNEL V_RXURGTUNNEL(1U) - #define A_TP_PARA_REG4 0x370 #define S_HIGHSPEEDCFG 24 @@ -3720,6 +4158,10 @@ $FreeBSD$ #define V_SCHDENABLE(x) ((x) << S_SCHDENABLE) #define F_SCHDENABLE V_SCHDENABLE(1U) +#define S_RXDDPOFFINIT 3 +#define V_RXDDPOFFINIT(x) ((x) << S_RXDDPOFFINIT) +#define F_RXDDPOFFINIT V_RXDDPOFFINIT(1U) + #define S_ONFLYDDPENABLE 2 #define V_ONFLYDDPENABLE(x) ((x) << S_ONFLYDDPENABLE) #define F_ONFLYDDPENABLE V_ONFLYDDPENABLE(1U) @@ -3739,33 +4181,33 @@ $FreeBSD$ #define V_TXPDUSIZEADJ(x) ((x) << S_TXPDUSIZEADJ) #define G_TXPDUSIZEADJ(x) (((x) >> S_TXPDUSIZEADJ) & M_TXPDUSIZEADJ) -#define S_ENABLEEPDU 14 -#define V_ENABLEEPDU(x) ((x) << S_ENABLEEPDU) -#define F_ENABLEEPDU V_ENABLEEPDU(1U) +#define S_ENABLEDEFERACK 12 +#define V_ENABLEDEFERACK(x) ((x) << S_ENABLEDEFERACK) +#define F_ENABLEDEFERACK V_ENABLEDEFERACK(1U) -#define S_T3A_ENABLEESND 13 -#define V_T3A_ENABLEESND(x) ((x) << S_T3A_ENABLEESND) -#define F_T3A_ENABLEESND V_T3A_ENABLEESND(1U) +#define S_ENABLEESND 11 +#define V_ENABLEESND(x) ((x) << S_ENABLEESND) +#define F_ENABLEESND V_ENABLEESND(1U) -#define S_T3A_ENABLECSND 12 -#define V_T3A_ENABLECSND(x) ((x) << S_T3A_ENABLECSND) -#define F_T3A_ENABLECSND V_T3A_ENABLECSND(1U) +#define S_ENABLECSND 10 +#define V_ENABLECSND(x) ((x) << S_ENABLECSND) +#define F_ENABLECSND V_ENABLECSND(1U) -#define S_T3A_ENABLEDEFERACK 9 -#define V_T3A_ENABLEDEFERACK(x) ((x) << S_T3A_ENABLEDEFERACK) -#define F_T3A_ENABLEDEFERACK V_T3A_ENABLEDEFERACK(1U) +#define S_ENABLEPDUE 9 +#define V_ENABLEPDUE(x) ((x) << S_ENABLEPDUE) +#define F_ENABLEPDUE V_ENABLEPDUE(1U) #define S_ENABLEPDUC 8 #define V_ENABLEPDUC(x) ((x) << S_ENABLEPDUC) #define F_ENABLEPDUC V_ENABLEPDUC(1U) -#define S_ENABLEPDUI 7 -#define V_ENABLEPDUI(x) ((x) << S_ENABLEPDUI) -#define F_ENABLEPDUI V_ENABLEPDUI(1U) +#define S_ENABLEBUFI 7 +#define V_ENABLEBUFI(x) ((x) << S_ENABLEBUFI) +#define F_ENABLEBUFI V_ENABLEBUFI(1U) -#define S_T3A_ENABLEPDUE 6 -#define V_T3A_ENABLEPDUE(x) ((x) << S_T3A_ENABLEPDUE) -#define F_T3A_ENABLEPDUE V_T3A_ENABLEPDUE(1U) +#define S_ENABLEBUFE 6 +#define V_ENABLEBUFE(x) ((x) << S_ENABLEBUFE) +#define F_ENABLEBUFE V_ENABLEBUFE(1U) #define S_ENABLEDEFER 5 #define V_ENABLEDEFER(x) ((x) << S_ENABLEDEFER) @@ -3791,29 +4233,29 @@ $FreeBSD$ #define V_DISABLEPDUXMT(x) ((x) << S_DISABLEPDUXMT) #define F_DISABLEPDUXMT V_DISABLEPDUXMT(1U) -#define S_ENABLEDEFERACK 12 -#define V_ENABLEDEFERACK(x) ((x) << S_ENABLEDEFERACK) -#define F_ENABLEDEFERACK V_ENABLEDEFERACK(1U) +#define S_ENABLEEPDU 14 +#define V_ENABLEEPDU(x) ((x) << S_ENABLEEPDU) +#define F_ENABLEEPDU V_ENABLEEPDU(1U) -#define S_ENABLEESND 11 -#define V_ENABLEESND(x) ((x) << S_ENABLEESND) -#define F_ENABLEESND V_ENABLEESND(1U) +#define S_T3A_ENABLEESND 13 +#define V_T3A_ENABLEESND(x) ((x) << S_T3A_ENABLEESND) +#define F_T3A_ENABLEESND V_T3A_ENABLEESND(1U) -#define S_ENABLECSND 10 -#define V_ENABLECSND(x) ((x) << S_ENABLECSND) -#define F_ENABLECSND V_ENABLECSND(1U) +#define S_T3A_ENABLECSND 12 +#define V_T3A_ENABLECSND(x) ((x) << S_T3A_ENABLECSND) +#define F_T3A_ENABLECSND V_T3A_ENABLECSND(1U) -#define S_ENABLEPDUE 9 -#define V_ENABLEPDUE(x) ((x) << S_ENABLEPDUE) -#define F_ENABLEPDUE V_ENABLEPDUE(1U) +#define S_T3A_ENABLEDEFERACK 9 +#define V_T3A_ENABLEDEFERACK(x) ((x) << S_T3A_ENABLEDEFERACK) +#define F_T3A_ENABLEDEFERACK V_T3A_ENABLEDEFERACK(1U) -#define S_ENABLEBUFI 7 -#define V_ENABLEBUFI(x) ((x) << S_ENABLEBUFI) -#define F_ENABLEBUFI V_ENABLEBUFI(1U) +#define S_ENABLEPDUI 7 +#define V_ENABLEPDUI(x) ((x) << S_ENABLEPDUI) +#define F_ENABLEPDUI V_ENABLEPDUI(1U) -#define S_ENABLEBUFE 6 -#define V_ENABLEBUFE(x) ((x) << S_ENABLEBUFE) -#define F_ENABLEBUFE V_ENABLEBUFE(1U) +#define S_T3A_ENABLEPDUE 6 +#define V_T3A_ENABLEPDUE(x) ((x) << S_T3A_ENABLEPDUE) +#define F_T3A_ENABLEPDUE V_T3A_ENABLEPDUE(1U) #define A_TP_PARA_REG7 0x37c @@ -4302,6 +4744,131 @@ $FreeBSD$ #define G_CMMAXPSTRUCT(x) (((x) >> S_CMMAXPSTRUCT) & M_CMMAXPSTRUCT) #define A_TP_INT_ENABLE 0x470 + +#define S_FLMTXFLSTEMPTY 30 +#define V_FLMTXFLSTEMPTY(x) ((x) << S_FLMTXFLSTEMPTY) +#define F_FLMTXFLSTEMPTY V_FLMTXFLSTEMPTY(1U) + +#define S_FLMRXFLSTEMPTY 29 +#define V_FLMRXFLSTEMPTY(x) ((x) << S_FLMRXFLSTEMPTY) +#define F_FLMRXFLSTEMPTY V_FLMRXFLSTEMPTY(1U) + +#define S_FLMPERRSET 28 +#define V_FLMPERRSET(x) ((x) << S_FLMPERRSET) +#define F_FLMPERRSET V_FLMPERRSET(1U) + +#define S_PROTOCOLSRAMPERR 27 +#define V_PROTOCOLSRAMPERR(x) ((x) << S_PROTOCOLSRAMPERR) +#define F_PROTOCOLSRAMPERR V_PROTOCOLSRAMPERR(1U) + +#define S_ARPLUTPERR 26 +#define V_ARPLUTPERR(x) ((x) << S_ARPLUTPERR) +#define F_ARPLUTPERR V_ARPLUTPERR(1U) + +#define S_CMRCFOPPERR 25 +#define V_CMRCFOPPERR(x) ((x) << S_CMRCFOPPERR) +#define F_CMRCFOPPERR V_CMRCFOPPERR(1U) + +#define S_CMCACHEPERR 24 +#define V_CMCACHEPERR(x) ((x) << S_CMCACHEPERR) +#define F_CMCACHEPERR V_CMCACHEPERR(1U) + +#define S_CMRCFDATAPERR 23 +#define V_CMRCFDATAPERR(x) ((x) << S_CMRCFDATAPERR) +#define F_CMRCFDATAPERR V_CMRCFDATAPERR(1U) + +#define S_DBL2TLUTPERR 22 +#define V_DBL2TLUTPERR(x) ((x) << S_DBL2TLUTPERR) +#define F_DBL2TLUTPERR V_DBL2TLUTPERR(1U) + +#define S_DBTXTIDPERR 21 +#define V_DBTXTIDPERR(x) ((x) << S_DBTXTIDPERR) +#define F_DBTXTIDPERR V_DBTXTIDPERR(1U) + +#define S_DBEXTPERR 20 +#define V_DBEXTPERR(x) ((x) << S_DBEXTPERR) +#define F_DBEXTPERR V_DBEXTPERR(1U) + +#define S_DBOPPERR 19 +#define V_DBOPPERR(x) ((x) << S_DBOPPERR) +#define F_DBOPPERR V_DBOPPERR(1U) + +#define S_TMCACHEPERR 18 +#define V_TMCACHEPERR(x) ((x) << S_TMCACHEPERR) +#define F_TMCACHEPERR V_TMCACHEPERR(1U) + +#define S_ETPOUTCPLFIFOPERR 17 +#define V_ETPOUTCPLFIFOPERR(x) ((x) << S_ETPOUTCPLFIFOPERR) +#define F_ETPOUTCPLFIFOPERR V_ETPOUTCPLFIFOPERR(1U) + +#define S_ETPOUTTCPFIFOPERR 16 +#define V_ETPOUTTCPFIFOPERR(x) ((x) << S_ETPOUTTCPFIFOPERR) +#define F_ETPOUTTCPFIFOPERR V_ETPOUTTCPFIFOPERR(1U) + +#define S_ETPOUTIPFIFOPERR 15 +#define V_ETPOUTIPFIFOPERR(x) ((x) << S_ETPOUTIPFIFOPERR) +#define F_ETPOUTIPFIFOPERR V_ETPOUTIPFIFOPERR(1U) + +#define S_ETPOUTETHFIFOPERR 14 +#define V_ETPOUTETHFIFOPERR(x) ((x) << S_ETPOUTETHFIFOPERR) +#define F_ETPOUTETHFIFOPERR V_ETPOUTETHFIFOPERR(1U) + +#define S_ETPINCPLFIFOPERR 13 +#define V_ETPINCPLFIFOPERR(x) ((x) << S_ETPINCPLFIFOPERR) +#define F_ETPINCPLFIFOPERR V_ETPINCPLFIFOPERR(1U) + +#define S_ETPINTCPOPTFIFOPERR 12 +#define V_ETPINTCPOPTFIFOPERR(x) ((x) << S_ETPINTCPOPTFIFOPERR) +#define F_ETPINTCPOPTFIFOPERR V_ETPINTCPOPTFIFOPERR(1U) + +#define S_ETPINTCPFIFOPERR 11 +#define V_ETPINTCPFIFOPERR(x) ((x) << S_ETPINTCPFIFOPERR) +#define F_ETPINTCPFIFOPERR V_ETPINTCPFIFOPERR(1U) + +#define S_ETPINIPFIFOPERR 10 +#define V_ETPINIPFIFOPERR(x) ((x) << S_ETPINIPFIFOPERR) +#define F_ETPINIPFIFOPERR V_ETPINIPFIFOPERR(1U) + +#define S_ETPINETHFIFOPERR 9 +#define V_ETPINETHFIFOPERR(x) ((x) << S_ETPINETHFIFOPERR) +#define F_ETPINETHFIFOPERR V_ETPINETHFIFOPERR(1U) + +#define S_CTPOUTCPLFIFOPERR 8 +#define V_CTPOUTCPLFIFOPERR(x) ((x) << S_CTPOUTCPLFIFOPERR) +#define F_CTPOUTCPLFIFOPERR V_CTPOUTCPLFIFOPERR(1U) + +#define S_CTPOUTTCPFIFOPERR 7 +#define V_CTPOUTTCPFIFOPERR(x) ((x) << S_CTPOUTTCPFIFOPERR) +#define F_CTPOUTTCPFIFOPERR V_CTPOUTTCPFIFOPERR(1U) + +#define S_CTPOUTIPFIFOPERR 6 +#define V_CTPOUTIPFIFOPERR(x) ((x) << S_CTPOUTIPFIFOPERR) +#define F_CTPOUTIPFIFOPERR V_CTPOUTIPFIFOPERR(1U) + +#define S_CTPOUTETHFIFOPERR 5 +#define V_CTPOUTETHFIFOPERR(x) ((x) << S_CTPOUTETHFIFOPERR) +#define F_CTPOUTETHFIFOPERR V_CTPOUTETHFIFOPERR(1U) + +#define S_CTPINCPLFIFOPERR 4 +#define V_CTPINCPLFIFOPERR(x) ((x) << S_CTPINCPLFIFOPERR) +#define F_CTPINCPLFIFOPERR V_CTPINCPLFIFOPERR(1U) + +#define S_CTPINTCPOPFIFOPERR 3 +#define V_CTPINTCPOPFIFOPERR(x) ((x) << S_CTPINTCPOPFIFOPERR) +#define F_CTPINTCPOPFIFOPERR V_CTPINTCPOPFIFOPERR(1U) + +#define S_CTPINTCPFIFOPERR 2 +#define V_CTPINTCPFIFOPERR(x) ((x) << S_CTPINTCPFIFOPERR) +#define F_CTPINTCPFIFOPERR V_CTPINTCPFIFOPERR(1U) + +#define S_CTPINIPFIFOPERR 1 +#define V_CTPINIPFIFOPERR(x) ((x) << S_CTPINIPFIFOPERR) +#define F_CTPINIPFIFOPERR V_CTPINIPFIFOPERR(1U) + +#define S_CTPINETHFIFOPERR 0 +#define V_CTPINETHFIFOPERR(x) ((x) << S_CTPINETHFIFOPERR) +#define F_CTPINETHFIFOPERR V_CTPINETHFIFOPERR(1U) + #define A_TP_INT_CAUSE 0x474 #define A_TP_FLM_FREE_PS_CNT 0x480 @@ -4334,16 +4901,6 @@ $FreeBSD$ #define A_TP_DEBUG_SEL 0x4a8 #define A_TP_DEBUG_FLAGS 0x4ac -#define S_RXDEBUGFLAGS 16 -#define M_RXDEBUGFLAGS 0xffff -#define V_RXDEBUGFLAGS(x) ((x) << S_RXDEBUGFLAGS) -#define G_RXDEBUGFLAGS(x) (((x) >> S_RXDEBUGFLAGS) & M_RXDEBUGFLAGS) - -#define S_TXDEBUGFLAGS 0 -#define M_TXDEBUGFLAGS 0xffff -#define V_TXDEBUGFLAGS(x) ((x) << S_TXDEBUGFLAGS) -#define G_TXDEBUGFLAGS(x) (((x) >> S_TXDEBUGFLAGS) & M_TXDEBUGFLAGS) - #define S_RXTIMERDACKFIRST 26 #define V_RXTIMERDACKFIRST(x) ((x) << S_RXTIMERDACKFIRST) #define F_RXTIMERDACKFIRST V_RXTIMERDACKFIRST(1U) @@ -4436,13 +4993,23 @@ $FreeBSD$ #define V_TXRCVADVLTMSS(x) ((x) << S_TXRCVADVLTMSS) #define F_TXRCVADVLTMSS V_TXRCVADVLTMSS(1U) +#define S_RXDEBUGFLAGS 16 +#define M_RXDEBUGFLAGS 0xffff +#define V_RXDEBUGFLAGS(x) ((x) << S_RXDEBUGFLAGS) +#define G_RXDEBUGFLAGS(x) (((x) >> S_RXDEBUGFLAGS) & M_RXDEBUGFLAGS) + +#define S_TXDEBUGFLAGS 0 +#define M_TXDEBUGFLAGS 0xffff +#define V_TXDEBUGFLAGS(x) ((x) << S_TXDEBUGFLAGS) +#define G_TXDEBUGFLAGS(x) (((x) >> S_TXDEBUGFLAGS) & M_TXDEBUGFLAGS) + +#define A_TP_PROXY_FLOW_CNTL 0x4b0 #define A_TP_CM_FLOW_CNTL_MODE 0x4b0 #define S_CMFLOWCACHEDISABLE 0 #define V_CMFLOWCACHEDISABLE(x) ((x) << S_CMFLOWCACHEDISABLE) #define F_CMFLOWCACHEDISABLE V_CMFLOWCACHEDISABLE(1U) -#define A_TP_PROXY_FLOW_CNTL 0x4b0 #define A_TP_PC_CONGESTION_CNTL 0x4b4 #define S_EDROPTUNNEL 19 @@ -4811,6 +5378,38 @@ $FreeBSD$ #define A_ULPRX_INT_ENABLE 0x504 +#define S_DATASELFRAMEERR0 7 +#define V_DATASELFRAMEERR0(x) ((x) << S_DATASELFRAMEERR0) +#define F_DATASELFRAMEERR0 V_DATASELFRAMEERR0(1U) + +#define S_DATASELFRAMEERR1 6 +#define V_DATASELFRAMEERR1(x) ((x) << S_DATASELFRAMEERR1) +#define F_DATASELFRAMEERR1 V_DATASELFRAMEERR1(1U) + +#define S_PCMDMUXPERR 5 +#define V_PCMDMUXPERR(x) ((x) << S_PCMDMUXPERR) +#define F_PCMDMUXPERR V_PCMDMUXPERR(1U) + +#define S_ARBFPERR 4 +#define V_ARBFPERR(x) ((x) << S_ARBFPERR) +#define F_ARBFPERR V_ARBFPERR(1U) + +#define S_ARBPF0PERR 3 +#define V_ARBPF0PERR(x) ((x) << S_ARBPF0PERR) +#define F_ARBPF0PERR V_ARBPF0PERR(1U) + +#define S_ARBPF1PERR 2 +#define V_ARBPF1PERR(x) ((x) << S_ARBPF1PERR) +#define F_ARBPF1PERR V_ARBPF1PERR(1U) + +#define S_PARERRPCMD 1 +#define V_PARERRPCMD(x) ((x) << S_PARERRPCMD) +#define F_PARERRPCMD V_PARERRPCMD(1U) + +#define S_PARERRDATA 0 +#define V_PARERRDATA(x) ((x) << S_PARERRDATA) +#define F_PARERRDATA V_PARERRDATA(1U) + #define S_PARERR 0 #define V_PARERR(x) ((x) << S_PARERR) #define F_PARERR V_PARERR(1U) @@ -4893,12 +5492,40 @@ $FreeBSD$ #define A_ULPTX_CONFIG 0x580 +#define S_CFG_CQE_SOP_MASK 1 +#define V_CFG_CQE_SOP_MASK(x) ((x) << S_CFG_CQE_SOP_MASK) +#define F_CFG_CQE_SOP_MASK V_CFG_CQE_SOP_MASK(1U) + #define S_CFG_RR_ARB 0 #define V_CFG_RR_ARB(x) ((x) << S_CFG_RR_ARB) #define F_CFG_RR_ARB V_CFG_RR_ARB(1U) #define A_ULPTX_INT_ENABLE 0x584 +#define S_CMD_FIFO_PERR_SET1 7 +#define V_CMD_FIFO_PERR_SET1(x) ((x) << S_CMD_FIFO_PERR_SET1) +#define F_CMD_FIFO_PERR_SET1 V_CMD_FIFO_PERR_SET1(1U) + +#define S_CMD_FIFO_PERR_SET0 6 +#define V_CMD_FIFO_PERR_SET0(x) ((x) << S_CMD_FIFO_PERR_SET0) +#define F_CMD_FIFO_PERR_SET0 V_CMD_FIFO_PERR_SET0(1U) + +#define S_LSO_HDR_SRAM_PERR_SET1 5 +#define V_LSO_HDR_SRAM_PERR_SET1(x) ((x) << S_LSO_HDR_SRAM_PERR_SET1) +#define F_LSO_HDR_SRAM_PERR_SET1 V_LSO_HDR_SRAM_PERR_SET1(1U) + +#define S_LSO_HDR_SRAM_PERR_SET0 4 +#define V_LSO_HDR_SRAM_PERR_SET0(x) ((x) << S_LSO_HDR_SRAM_PERR_SET0) +#define F_LSO_HDR_SRAM_PERR_SET0 V_LSO_HDR_SRAM_PERR_SET0(1U) + +#define S_IMM_DATA_PERR_SET_CH1 3 +#define V_IMM_DATA_PERR_SET_CH1(x) ((x) << S_IMM_DATA_PERR_SET_CH1) +#define F_IMM_DATA_PERR_SET_CH1 V_IMM_DATA_PERR_SET_CH1(1U) + +#define S_IMM_DATA_PERR_SET_CH0 2 +#define V_IMM_DATA_PERR_SET_CH0(x) ((x) << S_IMM_DATA_PERR_SET_CH0) +#define F_IMM_DATA_PERR_SET_CH0 V_IMM_DATA_PERR_SET_CH0(1U) + #define S_PBL_BOUND_ERR_CH1 1 #define V_PBL_BOUND_ERR_CH1(x) ((x) << S_PBL_BOUND_ERR_CH1) #define F_PBL_BOUND_ERR_CH1 V_PBL_BOUND_ERR_CH1(1U) @@ -5118,6 +5745,10 @@ $FreeBSD$ #define A_MPS_CFG 0x600 +#define S_ENFORCEPKT 11 +#define V_ENFORCEPKT(x) ((x) << S_ENFORCEPKT) +#define F_ENFORCEPKT V_ENFORCEPKT(1U) + #define S_SGETPQID 8 #define M_SGETPQID 0x7 #define V_SGETPQID(x) ((x) << S_SGETPQID) @@ -5155,10 +5786,6 @@ $FreeBSD$ #define V_PORT0ACTIVE(x) ((x) << S_PORT0ACTIVE) #define F_PORT0ACTIVE V_PORT0ACTIVE(1U) -#define S_ENFORCEPKT 11 -#define V_ENFORCEPKT(x) ((x) << S_ENFORCEPKT) -#define F_ENFORCEPKT V_ENFORCEPKT(1U) - #define A_MPS_DRR_CFG1 0x604 #define S_RLDWTTPD1 11 @@ -5280,6 +5907,10 @@ $FreeBSD$ #define V_CPL_PKT_TID(x) ((x) << S_CPL_PKT_TID) #define G_CPL_PKT_TID(x) (((x) >> S_CPL_PKT_TID) & M_CPL_PKT_TID) +#define S_CIM_TO_UP_FULL_SIZE 4 +#define V_CIM_TO_UP_FULL_SIZE(x) ((x) << S_CIM_TO_UP_FULL_SIZE) +#define F_CIM_TO_UP_FULL_SIZE V_CIM_TO_UP_FULL_SIZE(1U) + #define S_CPU_NO_3F_CIM_ENABLE 3 #define V_CPU_NO_3F_CIM_ENABLE(x) ((x) << S_CPU_NO_3F_CIM_ENABLE) #define F_CPU_NO_3F_CIM_ENABLE V_CPU_NO_3F_CIM_ENABLE(1U) @@ -5313,6 +5944,10 @@ $FreeBSD$ #define A_CPL_INTR_ENABLE 0x650 +#define S_CIM_OP_MAP_PERR 5 +#define V_CIM_OP_MAP_PERR(x) ((x) << S_CIM_OP_MAP_PERR) +#define F_CIM_OP_MAP_PERR V_CIM_OP_MAP_PERR(1U) + #define S_CIM_OVFL_ERROR 4 #define V_CIM_OVFL_ERROR(x) ((x) << S_CIM_OVFL_ERROR) #define F_CIM_OVFL_ERROR V_CIM_OVFL_ERROR(1U) @@ -5704,6 +6339,10 @@ $FreeBSD$ #define A_PL_INT_ENABLE0 0x6e0 +#define S_SW 25 +#define V_SW(x) ((x) << S_SW) +#define F_SW V_SW(1U) + #define S_EXT 24 #define V_EXT(x) ((x) << S_EXT) #define F_EXT V_EXT(1U) @@ -5792,18 +6431,14 @@ $FreeBSD$ #define V_SGE3(x) ((x) << S_SGE3) #define F_SGE3 V_SGE3(1U) -#define S_SW 25 -#define V_SW(x) ((x) << S_SW) -#define F_SW V_SW(1U) - #define A_PL_INT_CAUSE0 0x6e4 #define A_PL_INT_ENABLE1 0x6e8 #define A_PL_INT_CAUSE1 0x6ec #define A_PL_RST 0x6f0 -#define S_CRSTWRM 1 -#define V_CRSTWRM(x) ((x) << S_CRSTWRM) -#define F_CRSTWRM V_CRSTWRM(1U) +#define S_FATALPERREN 4 +#define V_FATALPERREN(x) ((x) << S_FATALPERREN) +#define F_FATALPERREN V_FATALPERREN(1U) #define S_SWINT1 3 #define V_SWINT1(x) ((x) << S_SWINT1) @@ -5813,6 +6448,10 @@ $FreeBSD$ #define V_SWINT0(x) ((x) << S_SWINT0) #define F_SWINT0 V_SWINT0(1U) +#define S_CRSTWRM 1 +#define V_CRSTWRM(x) ((x) << S_CRSTWRM) +#define F_CRSTWRM V_CRSTWRM(1U) + #define A_PL_REV 0x6f4 #define S_REV 0 @@ -5861,9 +6500,13 @@ $FreeBSD$ #define V_READ(x) ((x) << S_READ) #define F_READ V_READ(1U) -#define S_CAL_IMP_UPD 23 -#define V_CAL_IMP_UPD(x) ((x) << S_CAL_IMP_UPD) -#define F_CAL_IMP_UPD V_CAL_IMP_UPD(1U) +#define S_IMP_SET_UPDATE 24 +#define V_IMP_SET_UPDATE(x) ((x) << S_IMP_SET_UPDATE) +#define F_IMP_SET_UPDATE V_IMP_SET_UPDATE(1U) + +#define S_CAL_UPDATE 23 +#define V_CAL_UPDATE(x) ((x) << S_CAL_UPDATE) +#define F_CAL_UPDATE V_CAL_UPDATE(1U) #define S_CAL_BUSY 22 #define V_CAL_BUSY(x) ((x) << S_CAL_BUSY) @@ -5915,13 +6558,9 @@ $FreeBSD$ #define V_SET_PD(x) ((x) << S_SET_PD) #define G_SET_PD(x) (((x) >> S_SET_PD) & M_SET_PD) -#define S_IMP_SET_UPDATE 24 -#define V_IMP_SET_UPDATE(x) ((x) << S_IMP_SET_UPDATE) -#define F_IMP_SET_UPDATE V_IMP_SET_UPDATE(1U) - -#define S_CAL_UPDATE 23 -#define V_CAL_UPDATE(x) ((x) << S_CAL_UPDATE) -#define F_CAL_UPDATE V_CAL_UPDATE(1U) +#define S_CAL_IMP_UPD 23 +#define V_CAL_IMP_UPD(x) ((x) << S_CAL_IMP_UPD) +#define F_CAL_IMP_UPD V_CAL_IMP_UPD(1U) #define A_MC5_DB_CONFIG 0x704 @@ -5961,6 +6600,14 @@ $FreeBSD$ #define V_BUILD(x) ((x) << S_BUILD) #define F_BUILD V_BUILD(1U) +#define S_FILTEREN 11 +#define V_FILTEREN(x) ((x) << S_FILTEREN) +#define F_FILTEREN V_FILTEREN(1U) + +#define S_CLIPUPDATE 10 +#define V_CLIPUPDATE(x) ((x) << S_CLIPUPDATE) +#define F_CLIPUPDATE V_CLIPUPDATE(1U) + #define S_TM_IO_PDOWN 9 #define V_TM_IO_PDOWN(x) ((x) << S_TM_IO_PDOWN) #define F_TM_IO_PDOWN V_TM_IO_PDOWN(1U) @@ -5982,6 +6629,10 @@ $FreeBSD$ #define V_DBGIEN(x) ((x) << S_DBGIEN) #define F_DBGIEN V_DBGIEN(1U) +#define S_TCMCFGOVR 3 +#define V_TCMCFGOVR(x) ((x) << S_TCMCFGOVR) +#define F_TCMCFGOVR V_TCMCFGOVR(1U) + #define S_TMRDY 2 #define V_TMRDY(x) ((x) << S_TMRDY) #define F_TMRDY V_TMRDY(1U) @@ -5994,18 +6645,6 @@ $FreeBSD$ #define V_TMMODE(x) ((x) << S_TMMODE) #define F_TMMODE V_TMMODE(1U) -#define S_FILTEREN 11 -#define V_FILTEREN(x) ((x) << S_FILTEREN) -#define F_FILTEREN V_FILTEREN(1U) - -#define S_CLIPUPDATE 10 -#define V_CLIPUPDATE(x) ((x) << S_CLIPUPDATE) -#define F_CLIPUPDATE V_CLIPUPDATE(1U) - -#define S_TCMCFGOVR 3 -#define V_TCMCFGOVR(x) ((x) << S_TCMCFGOVR) -#define F_TCMCFGOVR V_TCMCFGOVR(1U) - #define A_MC5_MISC 0x708 #define S_LIP_CMP_UNAVAILABLE 0 @@ -6021,13 +6660,13 @@ $FreeBSD$ #define G_RTINDX(x) (((x) >> S_RTINDX) & M_RTINDX) #define A_MC5_DB_FILTER_TABLE 0x710 -#define A_MC5_DB_SERVER_INDEX 0x714 #define S_SRINDX 0 #define M_SRINDX 0x3fffff #define V_SRINDX(x) ((x) << S_SRINDX) #define G_SRINDX(x) (((x) >> S_SRINDX) & M_SRINDX) +#define A_MC5_DB_SERVER_INDEX 0x714 #define A_MC5_DB_LIP_RAM_ADDR 0x718 #define S_RAMWR 8 @@ -6115,6 +6754,7 @@ $FreeBSD$ #define V_CLIPMAPADDR(x) ((x) << S_CLIPMAPADDR) #define G_CLIPMAPADDR(x) (((x) >> S_CLIPMAPADDR) & M_CLIPMAPADDR) +#define A_MC5_DB_SIZE 0x73c #define A_MC5_DB_INT_ENABLE 0x740 #define S_MSGSEL 28 @@ -6630,6 +7270,14 @@ $FreeBSD$ #define A_XGM_RXFIFO_CFG 0x884 +#define S_RXFIFO_EMPTY 31 +#define V_RXFIFO_EMPTY(x) ((x) << S_RXFIFO_EMPTY) +#define F_RXFIFO_EMPTY V_RXFIFO_EMPTY(1U) + +#define S_RXFIFO_FULL 30 +#define V_RXFIFO_FULL(x) ((x) << S_RXFIFO_FULL) +#define F_RXFIFO_FULL V_RXFIFO_FULL(1U) + #define S_RXFIFOPAUSEHWM 17 #define M_RXFIFOPAUSEHWM 0xfff #define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM) @@ -6662,6 +7310,22 @@ $FreeBSD$ #define A_XGM_TXFIFO_CFG 0x888 +#define S_TXFIFO_EMPTY 31 +#define V_TXFIFO_EMPTY(x) ((x) << S_TXFIFO_EMPTY) +#define F_TXFIFO_EMPTY V_TXFIFO_EMPTY(1U) + +#define S_TXFIFO_FULL 30 +#define V_TXFIFO_FULL(x) ((x) << S_TXFIFO_FULL) +#define F_TXFIFO_FULL V_TXFIFO_FULL(1U) + +#define S_UNDERUNFIX 22 +#define V_UNDERUNFIX(x) ((x) << S_UNDERUNFIX) +#define F_UNDERUNFIX V_UNDERUNFIX(1U) + +#define S_ENDROPPKT 21 +#define V_ENDROPPKT(x) ((x) << S_ENDROPPKT) +#define F_ENDROPPKT V_ENDROPPKT(1U) + #define S_TXIPG 13 #define M_TXIPG 0xff #define V_TXIPG(x) ((x) << S_TXIPG) @@ -6688,10 +7352,6 @@ $FreeBSD$ #define V_DISPREAMBLE(x) ((x) << S_DISPREAMBLE) #define F_DISPREAMBLE V_DISPREAMBLE(1U) -#define S_ENDROPPKT 21 -#define V_ENDROPPKT(x) ((x) << S_ENDROPPKT) -#define F_ENDROPPKT V_ENDROPPKT(1U) - #define A_XGM_SLOW_TIMER 0x88c #define S_PAUSESLOWTIMEREN 31 @@ -6703,6 +7363,13 @@ $FreeBSD$ #define V_PAUSESLOWTIMER(x) ((x) << S_PAUSESLOWTIMER) #define G_PAUSESLOWTIMER(x) (((x) >> S_PAUSESLOWTIMER) & M_PAUSESLOWTIMER) +#define A_XGM_PAUSE_TIMER 0x890 + +#define S_PAUSETIMER 0 +#define M_PAUSETIMER 0xfffff +#define V_PAUSETIMER(x) ((x) << S_PAUSETIMER) +#define G_PAUSETIMER(x) (((x) >> S_PAUSETIMER) & M_PAUSETIMER) + #define A_XGM_SERDES_CTRL 0x890 #define S_SERDESEN 25 @@ -6761,13 +7428,6 @@ $FreeBSD$ #define V_TXENABLE(x) ((x) << S_TXENABLE) #define F_TXENABLE V_TXENABLE(1U) -#define A_XGM_PAUSE_TIMER 0x890 - -#define S_PAUSETIMER 0 -#define M_PAUSETIMER 0xfffff -#define V_PAUSETIMER(x) ((x) << S_PAUSETIMER) -#define G_PAUSETIMER(x) (((x) >> S_PAUSETIMER) & M_PAUSETIMER) - #define A_XGM_XAUI_PCS_TEST 0x894 #define S_TESTPATTERN 1 @@ -6792,6 +7452,14 @@ $FreeBSD$ #define A_XGM_RGMII_IMP 0x89c +#define S_CALRESET 8 +#define V_CALRESET(x) ((x) << S_CALRESET) +#define F_CALRESET V_CALRESET(1U) + +#define S_CALUPDATE 7 +#define V_CALUPDATE(x) ((x) << S_CALUPDATE) +#define F_CALUPDATE V_CALUPDATE(1U) + #define S_XGM_IMPSETUPDATE 6 #define V_XGM_IMPSETUPDATE(x) ((x) << S_XGM_IMPSETUPDATE) #define F_XGM_IMPSETUPDATE V_XGM_IMPSETUPDATE(1U) @@ -6806,14 +7474,6 @@ $FreeBSD$ #define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU) #define G_RGMIIIMPPU(x) (((x) >> S_RGMIIIMPPU) & M_RGMIIIMPPU) -#define S_CALRESET 8 -#define V_CALRESET(x) ((x) << S_CALRESET) -#define F_CALRESET V_CALRESET(1U) - -#define S_CALUPDATE 7 -#define V_CALUPDATE(x) ((x) << S_CALUPDATE) -#define F_CALUPDATE V_CALUPDATE(1U) - #define A_XGM_XAUI_IMP 0x8a0 #define S_XGM_CALFAULT 29 @@ -6844,6 +7504,23 @@ $FreeBSD$ #define A_XGM_RX_MAX_PKT_SIZE 0x8a8 +#define S_RXMAXFRAMERSIZE 17 +#define M_RXMAXFRAMERSIZE 0x3fff +#define V_RXMAXFRAMERSIZE(x) ((x) << S_RXMAXFRAMERSIZE) +#define G_RXMAXFRAMERSIZE(x) (((x) >> S_RXMAXFRAMERSIZE) & M_RXMAXFRAMERSIZE) + +#define S_RXENERRORGATHER 16 +#define V_RXENERRORGATHER(x) ((x) << S_RXENERRORGATHER) +#define F_RXENERRORGATHER V_RXENERRORGATHER(1U) + +#define S_RXENSINGLEFLIT 15 +#define V_RXENSINGLEFLIT(x) ((x) << S_RXENSINGLEFLIT) +#define F_RXENSINGLEFLIT V_RXENSINGLEFLIT(1U) + +#define S_RXENFRAMER 14 +#define V_RXENFRAMER(x) ((x) << S_RXENFRAMER) +#define F_RXENFRAMER V_RXENFRAMER(1U) + #define S_RXMAXPKTSIZE 0 #define M_RXMAXPKTSIZE 0x3fff #define V_RXMAXPKTSIZE(x) ((x) << S_RXMAXPKTSIZE) @@ -6851,6 +7528,10 @@ $FreeBSD$ #define A_XGM_RESET_CTRL 0x8ac +#define S_XGMAC_STOP_EN 4 +#define V_XGMAC_STOP_EN(x) ((x) << S_XGMAC_STOP_EN) +#define F_XGMAC_STOP_EN V_XGMAC_STOP_EN(1U) + #define S_XG2G_RESET_ 3 #define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_) #define F_XG2G_RESET_ V_XG2G_RESET_(1U) @@ -6930,9 +7611,9 @@ $FreeBSD$ #define A_XGM_INT_ENABLE 0x8d4 -#define S_SERDESCMULOCK_LOSS 24 -#define V_SERDESCMULOCK_LOSS(x) ((x) << S_SERDESCMULOCK_LOSS) -#define F_SERDESCMULOCK_LOSS V_SERDESCMULOCK_LOSS(1U) +#define S_XAUIPCSDECERR 24 +#define V_XAUIPCSDECERR(x) ((x) << S_XAUIPCSDECERR) +#define F_XAUIPCSDECERR V_XAUIPCSDECERR(1U) #define S_RGMIIRXFIFOOVERFLOW 23 #define V_RGMIIRXFIFOOVERFLOW(x) ((x) << S_RGMIIRXFIFOOVERFLOW) @@ -6968,15 +7649,15 @@ $FreeBSD$ #define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW) #define F_RXFIFO_OVERFLOW V_RXFIFO_OVERFLOW(1U) -#define S_SERDESBIST_ERR 8 -#define M_SERDESBIST_ERR 0xf -#define V_SERDESBIST_ERR(x) ((x) << S_SERDESBIST_ERR) -#define G_SERDESBIST_ERR(x) (((x) >> S_SERDESBIST_ERR) & M_SERDESBIST_ERR) +#define S_SERDESBISTERR 8 +#define M_SERDESBISTERR 0xf +#define V_SERDESBISTERR(x) ((x) << S_SERDESBISTERR) +#define G_SERDESBISTERR(x) (((x) >> S_SERDESBISTERR) & M_SERDESBISTERR) -#define S_SERDES_LOS 4 -#define M_SERDES_LOS 0xf -#define V_SERDES_LOS(x) ((x) << S_SERDES_LOS) -#define G_SERDES_LOS(x) (((x) >> S_SERDES_LOS) & M_SERDES_LOS) +#define S_SERDESLOWSIGCHANGE 4 +#define M_SERDESLOWSIGCHANGE 0xf +#define V_SERDESLOWSIGCHANGE(x) ((x) << S_SERDESLOWSIGCHANGE) +#define G_SERDESLOWSIGCHANGE(x) (((x) >> S_SERDESLOWSIGCHANGE) & M_SERDESLOWSIGCHANGE) #define S_XAUIPCSCTCERR 3 #define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR) @@ -6994,15 +7675,19 @@ $FreeBSD$ #define V_XGM_INT(x) ((x) << S_XGM_INT) #define F_XGM_INT V_XGM_INT(1U) -#define S_SERDESBISTERR 8 -#define M_SERDESBISTERR 0xf -#define V_SERDESBISTERR(x) ((x) << S_SERDESBISTERR) -#define G_SERDESBISTERR(x) (((x) >> S_SERDESBISTERR) & M_SERDESBISTERR) +#define S_SERDESCMULOCK_LOSS 24 +#define V_SERDESCMULOCK_LOSS(x) ((x) << S_SERDESCMULOCK_LOSS) +#define F_SERDESCMULOCK_LOSS V_SERDESCMULOCK_LOSS(1U) -#define S_SERDESLOWSIGCHANGE 4 -#define M_SERDESLOWSIGCHANGE 0xf -#define V_SERDESLOWSIGCHANGE(x) ((x) << S_SERDESLOWSIGCHANGE) -#define G_SERDESLOWSIGCHANGE(x) (((x) >> S_SERDESLOWSIGCHANGE) & M_SERDESLOWSIGCHANGE) +#define S_SERDESBIST_ERR 8 +#define M_SERDESBIST_ERR 0xf +#define V_SERDESBIST_ERR(x) ((x) << S_SERDESBIST_ERR) +#define G_SERDESBIST_ERR(x) (((x) >> S_SERDESBIST_ERR) & M_SERDESBIST_ERR) + +#define S_SERDES_LOS 4 +#define M_SERDES_LOS 0xf +#define V_SERDES_LOS(x) ((x) << S_SERDES_LOS) +#define G_SERDES_LOS(x) (((x) >> S_SERDES_LOS) & M_SERDES_LOS) #define A_XGM_INT_CAUSE 0x8d8 #define A_XGM_XAUI_ACT_CTRL 0x8dc @@ -7298,6 +7983,14 @@ $FreeBSD$ #define V_EXTBISTCHKFMD0(x) ((x) << S_EXTBISTCHKFMD0) #define F_EXTBISTCHKFMD0 V_EXTBISTCHKFMD0(1U) +#define S_LOWSIGFORCEEN0 2 +#define V_LOWSIGFORCEEN0(x) ((x) << S_LOWSIGFORCEEN0) +#define F_LOWSIGFORCEEN0 V_LOWSIGFORCEEN0(1U) + +#define S_LOWSIGFORCEVALUE0 1 +#define V_LOWSIGFORCEVALUE0(x) ((x) << S_LOWSIGFORCEVALUE0) +#define F_LOWSIGFORCEVALUE0 V_LOWSIGFORCEVALUE0(1U) + #define S_LOWSIG0 0 #define V_LOWSIG0(x) ((x) << S_LOWSIG0) #define F_LOWSIG0 V_LOWSIG0(1U) @@ -7313,6 +8006,14 @@ $FreeBSD$ #define V_EXTBISTCHKFMD1(x) ((x) << S_EXTBISTCHKFMD1) #define F_EXTBISTCHKFMD1 V_EXTBISTCHKFMD1(1U) +#define S_LOWSIGFORCEEN1 2 +#define V_LOWSIGFORCEEN1(x) ((x) << S_LOWSIGFORCEEN1) +#define F_LOWSIGFORCEEN1 V_LOWSIGFORCEEN1(1U) + +#define S_LOWSIGFORCEVALUE1 1 +#define V_LOWSIGFORCEVALUE1(x) ((x) << S_LOWSIGFORCEVALUE1) +#define F_LOWSIGFORCEVALUE1 V_LOWSIGFORCEVALUE1(1U) + #define S_LOWSIG1 0 #define V_LOWSIG1(x) ((x) << S_LOWSIG1) #define F_LOWSIG1 V_LOWSIG1(1U) @@ -7328,6 +8029,14 @@ $FreeBSD$ #define V_EXTBISTCHKFMD2(x) ((x) << S_EXTBISTCHKFMD2) #define F_EXTBISTCHKFMD2 V_EXTBISTCHKFMD2(1U) +#define S_LOWSIGFORCEEN2 2 +#define V_LOWSIGFORCEEN2(x) ((x) << S_LOWSIGFORCEEN2) +#define F_LOWSIGFORCEEN2 V_LOWSIGFORCEEN2(1U) + +#define S_LOWSIGFORCEVALUE2 1 +#define V_LOWSIGFORCEVALUE2(x) ((x) << S_LOWSIGFORCEVALUE2) +#define F_LOWSIGFORCEVALUE2 V_LOWSIGFORCEVALUE2(1U) + #define S_LOWSIG2 0 #define V_LOWSIG2(x) ((x) << S_LOWSIG2) #define F_LOWSIG2 V_LOWSIG2(1U) @@ -7343,6 +8052,14 @@ $FreeBSD$ #define V_EXTBISTCHKFMD3(x) ((x) << S_EXTBISTCHKFMD3) #define F_EXTBISTCHKFMD3 V_EXTBISTCHKFMD3(1U) +#define S_LOWSIGFORCEEN3 2 +#define V_LOWSIGFORCEEN3(x) ((x) << S_LOWSIGFORCEEN3) +#define F_LOWSIGFORCEEN3 V_LOWSIGFORCEEN3(1U) + +#define S_LOWSIGFORCEVALUE3 1 +#define V_LOWSIGFORCEVALUE3(x) ((x) << S_LOWSIGFORCEVALUE3) +#define F_LOWSIGFORCEVALUE3 V_LOWSIGFORCEVALUE3(1U) + #define S_LOWSIG3 0 #define V_LOWSIG3(x) ((x) << S_LOWSIG3) #define F_LOWSIG3 V_LOWSIG3(1U) diff --git a/sys/dev/cxgb/common/cxgb_t3_cpl.h b/sys/dev/cxgb/common/cxgb_t3_cpl.h index 1f0eb3f..dd24571 100644 --- a/sys/dev/cxgb/common/cxgb_t3_cpl.h +++ b/sys/dev/cxgb/common/cxgb_t3_cpl.h @@ -173,8 +173,9 @@ enum { /* TCP congestion control algorithms */ enum { /* RSS hash type */ RSS_HASH_NONE = 0, - RSS_HASH_2_TUPLE = 1 << 0, - RSS_HASH_4_TUPLE = 1 << 1 + RSS_HASH_2_TUPLE = 1, + RSS_HASH_4_TUPLE = 2, + RSS_HASH_TCPV6 = 3 }; union opcode_tid { @@ -1097,6 +1098,11 @@ struct cpl_rx_data_ddp { #define V_DDP_OFFSET(x) ((x) << S_DDP_OFFSET) #define G_DDP_OFFSET(x) (((x) >> S_DDP_OFFSET) & M_DDP_OFFSET) +#define S_DDP_DACK_MODE 22 +#define M_DDP_DACK_MODE 0x3 +#define V_DDP_DACK_MODE(x) ((x) << S_DDP_DACK_MODE) +#define G_DDP_DACK_MODE(x) (((x) >> S_DDP_DACK_MODE) & M_DDP_DACK_MODE) + #define S_DDP_URG 24 #define V_DDP_URG(x) ((x) << S_DDP_URG) #define F_DDP_URG V_DDP_URG(1U) diff --git a/sys/dev/cxgb/common/cxgb_t3_hw.c b/sys/dev/cxgb/common/cxgb_t3_hw.c index 6c8b53a..d082c74 100644 --- a/sys/dev/cxgb/common/cxgb_t3_hw.c +++ b/sys/dev/cxgb/common/cxgb_t3_hw.c @@ -404,6 +404,29 @@ int t3_phy_advertise(struct cphy *phy, unsigned int advert) } /** + * t3_phy_advertise_fiber - set fiber PHY advertisement register + * @phy: the PHY to operate on + * @advert: bitmap of capabilities the PHY should advertise + * + * Sets a fiber PHY's advertisement register to advertise the + * requested capabilities. + */ +int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert) +{ + unsigned int val = 0; + + if (advert & ADVERTISED_1000baseT_Half) + val |= ADVERTISE_1000XHALF; + if (advert & ADVERTISED_1000baseT_Full) + val |= ADVERTISE_1000XFULL; + if (advert & ADVERTISED_Pause) + val |= ADVERTISE_1000XPAUSE; + if (advert & ADVERTISED_Asym_Pause) + val |= ADVERTISE_1000XPSE_ASYM; + return mdio_write(phy, 0, MII_ADVERTISE, val); +} + +/** * t3_set_phy_speed_duplex - force PHY speed and duplex * @phy: the PHY to operate on * @speed: requested PHY speed @@ -451,8 +474,8 @@ static struct adapter_info t3_adap_info[] = { &mi1_mdio_ops, "Chelsio T302" }, { 1, 0, 0, 0, 0, F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN | - F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0, - SUPPORTED_10000baseT_Full | SUPPORTED_AUI, + F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, + 0, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, &mi1_mdio_ext_ops, "Chelsio T310" }, { 1, 1, 0, 0, 0, F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN | @@ -476,31 +499,20 @@ const struct adapter_info *t3_get_adapter_info(unsigned int id) return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL; } -#define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \ - SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII) -#define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI) - static struct port_type_info port_types[] = { { NULL }, - { t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE, - "10GBASE-XR" }, - { t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ, - "10/100/1000BASE-T" }, - { t3_mv88e1xxx_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ, - "10/100/1000BASE-T" }, - { t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" }, - { NULL, CAPS_10G, "10GBASE-KX4" }, - { t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" }, - { t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE, - "10GBASE-SR" }, - { NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" }, + { t3_ael1002_phy_prep }, + { t3_vsc8211_phy_prep }, + { t3_mv88e1xxx_phy_prep }, + { t3_xaui_direct_phy_prep }, + { NULL }, + { t3_qt2045_phy_prep }, + { t3_ael1006_phy_prep }, + { NULL }, }; -#undef CAPS_1G -#undef CAPS_10G - #define VPD_ENTRY(name, len) \ - u8 name##_kword[2]; u8 name##_len; char name##_data[len] + u8 name##_kword[2]; u8 name##_len; u8 name##_data[len] /* * Partial EEPROM Vital Product Data structure. Includes only the ID and @@ -678,6 +690,15 @@ static int get_vpd_params(adapter_t *adapter, struct vpd_params *p) return 0; } +/* BIOS boot header */ +typedef struct boot_header_s { + u8 signature[2]; /* signature */ + u8 length; /* image length (include header) */ + u8 offset[4]; /* initialization vector */ + u8 reserved[19]; /* reserved */ + u8 exheader[2]; /* offset to expansion header */ +} boot_header_t; + /* serial flash and firmware constants */ enum { SF_ATTEMPTS = 5, /* max retries for SF1 operations */ @@ -694,7 +715,14 @@ enum { FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */ FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */ - FW_MIN_SIZE = 8 /* at least version and csum */ + FW_MIN_SIZE = 8, /* at least version and csum */ + FW_MAX_SIZE = FW_VERS_ADDR - FW_FLASH_BOOT_ADDR, + + BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */ + BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */ + BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */ + BOOT_MIN_SIZE = sizeof(boot_header_t), /* at least basic header */ + BOOT_MAX_SIZE = 0xff*BOOT_SIZE_INC /* 1 byte * length increment */ }; /** @@ -817,16 +845,21 @@ int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords, * @addr: the start address to write * @n: length of data to write * @data: the data to write + * @byte_oriented: whether to store data as bytes or as words * * Writes up to a page of data (256 bytes) to the serial flash starting * at the given address. + * If @byte_oriented is set the write data is stored as a 32-bit + * big-endian array, otherwise in the processor's native endianess. + * */ static int t3_write_flash(adapter_t *adapter, unsigned int addr, - unsigned int n, const u8 *data) + unsigned int n, const u8 *data, + int byte_oriented) { int ret; u32 buf[64]; - unsigned int i, c, left, val, offset = addr & 0xff; + unsigned int c, left, val, offset = addr & 0xff; if (addr + n > SF_SIZE || offset + n > 256) return -EINVAL; @@ -839,8 +872,10 @@ static int t3_write_flash(adapter_t *adapter, unsigned int addr, for (left = n; left; left -= c) { c = min(left, 4U); - for (val = 0, i = 0; i < c; ++i) - val = (val << 8) + *data++; + val = *(const u32*)data; + data += c; + if (byte_oriented) + val = htonl(val); ret = sf1_write(adapter, c, c != left, val); if (ret) @@ -850,7 +885,8 @@ static int t3_write_flash(adapter_t *adapter, unsigned int addr, return ret; /* Read the page to verify the write succeeded */ - ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); + ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, + byte_oriented); if (ret) return ret; @@ -887,16 +923,18 @@ int t3_get_tp_version(adapter_t *adapter, u32 *vers) * @adapter: the adapter * */ -int t3_check_tpsram_version(adapter_t *adapter) +int t3_check_tpsram_version(adapter_t *adapter, int *must_load) { int ret; u32 vers; unsigned int major, minor; - /* Get version loaded in SRAM */ - t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0); - ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0, - 1, 1, 5, 1); + if (adapter->params.rev == T3_REV_A) + return 0; + + *must_load = 1; + + ret = t3_get_tp_version(adapter, &vers); if (ret) return ret; @@ -908,9 +946,16 @@ int t3_check_tpsram_version(adapter_t *adapter) if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR) return 0; - CH_WARN(adapter, "found wrong TP version (%u.%u), " - "driver needs version %d.%d\n", major, minor, - TP_VERSION_MAJOR, TP_VERSION_MINOR); + if (major != TP_VERSION_MAJOR) + CH_ERR(adapter, "found wrong TP version (%u.%u), " + "driver needs version %d.%d\n", major, minor, + TP_VERSION_MAJOR, TP_VERSION_MINOR); + else { + *must_load = 0; + CH_ERR(adapter, "found wrong TP version (%u.%u), " + "driver compiled for version %d.%d\n", major, minor, + TP_VERSION_MAJOR, TP_VERSION_MINOR); + } return -EINVAL; } @@ -966,12 +1011,13 @@ int t3_get_fw_version(adapter_t *adapter, u32 *vers) * Checks if an adapter's FW is compatible with the driver. Returns 0 * if the versions are compatible, a negative error otherwise. */ -int t3_check_fw_version(adapter_t *adapter) +int t3_check_fw_version(adapter_t *adapter, int *must_load) { int ret; u32 vers; unsigned int type, major, minor; + *must_load = 1; ret = t3_get_fw_version(adapter, &vers); if (ret) return ret; @@ -984,9 +1030,21 @@ int t3_check_fw_version(adapter_t *adapter) minor == FW_VERSION_MINOR) return 0; - CH_WARN(adapter, "found wrong FW version (%u.%u), " - "driver needs version %d.%d\n", major, minor, - FW_VERSION_MAJOR, FW_VERSION_MINOR); + if (major != FW_VERSION_MAJOR) + CH_ERR(adapter, "found wrong FW version(%u.%u), " + "driver needs version %u.%u\n", major, minor, + FW_VERSION_MAJOR, FW_VERSION_MINOR); + else if ((int)minor < FW_VERSION_MINOR) { + *must_load = 0; + CH_WARN(adapter, "found old FW minor version(%u.%u), " + "driver compiled for version %u.%u\n", major, minor, + FW_VERSION_MAJOR, FW_VERSION_MINOR); + } else { + CH_WARN(adapter, "found newer FW version(%u.%u), " + "driver compiled for version %u.%u\n", major, minor, + FW_VERSION_MAJOR, FW_VERSION_MINOR); + return 0; + } return -EINVAL; } @@ -1033,7 +1091,7 @@ int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size) if ((size & 3) || size < FW_MIN_SIZE) return -EINVAL; - if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR) + if (size - 8 > FW_MAX_SIZE) return -EFBIG; for (csum = 0, i = 0; i < size / sizeof(csum); i++) @@ -1052,7 +1110,7 @@ int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size) for (addr = FW_FLASH_BOOT_ADDR; size; ) { unsigned int chunk_size = min(size, 256U); - ret = t3_write_flash(adapter, addr, chunk_size, fw_data); + ret = t3_write_flash(adapter, addr, chunk_size, fw_data, 1); if (ret) goto out; @@ -1061,13 +1119,71 @@ int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size) size -= chunk_size; } - ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data); + ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data, 1); out: if (ret) CH_ERR(adapter, "firmware download failed, error %d\n", ret); return ret; } +/* + * t3_load_boot - download boot flash + * @adapter: the adapter + * @boot_data: the boot image to write + * @size: image size + * + * Write the supplied boot image to the card's serial flash. + * The boot image has the following sections: a 28-byte header and the + * boot image. + */ +int t3_load_boot(adapter_t *adapter, u8 *boot_data, unsigned int size) +{ + boot_header_t *header = (boot_header_t *)boot_data; + int ret; + unsigned int addr; + unsigned int boot_sector = BOOT_FLASH_BOOT_ADDR >> 16; + unsigned int boot_end = (BOOT_FLASH_BOOT_ADDR + size - 1) >> 16; + + /* + * Perform some primitive sanity testing to avoid accidentally + * writing garbage over the boot sectors. We ought to check for + * more but it's not worth it for now ... + */ + if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) { + CH_ERR(adapter, "boot image too small/large\n"); + return -EFBIG; + } + if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE) { + CH_ERR(adapter, "boot image missing signature\n"); + return -EINVAL; + } + if (header->length * BOOT_SIZE_INC != size) { + CH_ERR(adapter, "boot image header length != image length\n"); + return -EINVAL; + } + + ret = t3_flash_erase_sectors(adapter, boot_sector, boot_end); + if (ret) + goto out; + + for (addr = BOOT_FLASH_BOOT_ADDR; size; ) { + unsigned int chunk_size = min(size, 256U); + + ret = t3_write_flash(adapter, addr, chunk_size, boot_data, 0); + if (ret) + goto out; + + addr += chunk_size; + boot_data += chunk_size; + size -= chunk_size; + } + +out: + if (ret) + CH_ERR(adapter, "boot image download failed, error %d\n", ret); + return ret; +} + #define CIM_CTL_BASE 0x2000 /** @@ -1175,7 +1291,6 @@ int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc) fc); /* Also disables autoneg */ phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex); - phy->ops->reset(phy, 0); } else phy->ops->autoneg_enable(phy); } else { @@ -1248,7 +1363,13 @@ static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg, return fatal; } -#define SGE_INTR_MASK (F_RSPQDISABLED) +#define SGE_INTR_MASK (F_RSPQDISABLED | \ + F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \ + F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \ + F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ + V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ + F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ + F_HIRCQPARITYERROR) #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \ F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \ F_NFASRCHFAIL) @@ -1265,16 +1386,23 @@ static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg, #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\ F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \ /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \ - V_BISTERR(M_BISTERR) | F_PEXERR) -#define ULPRX_INTR_MASK F_PARERR -#define ULPTX_INTR_MASK 0 -#define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \ + F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \ + F_TXPARERR | V_BISTERR(M_BISTERR)) +#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \ + F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \ + F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0) +#define ULPTX_INTR_MASK 0xfc +#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \ F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \ F_ZERO_SWITCH_ERROR) #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \ F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \ F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \ - F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT) + F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \ + F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \ + F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \ + F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \ + F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR) #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \ V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \ V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR)) @@ -1343,6 +1471,10 @@ static void pcie_intr_handler(adapter_t *adapter) { F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 }, { V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR), "PCI MSI-X table/PBA parity error", -1, 1 }, + { F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1 }, + { F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1 }, + { F_RXPARERR, "PCI Rx parity error", -1, 1 }, + { F_TXPARERR, "PCI Tx parity error", -1, 1 }, { V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 }, { 0 } }; @@ -1367,9 +1499,16 @@ static void tp_intr_handler(adapter_t *adapter) { 0x2000000, "TP out of Tx pages", -1, 1 }, { 0 } }; + static struct intr_info tp_intr_info_t3c[] = { + { 0x1fffffff, "TP parity error", -1, 1 }, + { F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1 }, + { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, + { 0 } + }; if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff, - tp_intr_info, NULL)) + adapter->params.rev < T3_REV_C ? + tp_intr_info : tp_intr_info_t3c, NULL)) t3_fatal_err(adapter); } @@ -1391,10 +1530,22 @@ static void cim_intr_handler(adapter_t *adapter) { F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 }, { F_BLKRDPLINT, "CIM block read from PL space", -1, 1 }, { F_BLKWRPLINT, "CIM block write to PL space", -1, 1 }, + { F_DRAMPARERR, "CIM DRAM parity error", -1, 1 }, + { F_ICACHEPARERR, "CIM icache parity error", -1, 1 }, + { F_DCACHEPARERR, "CIM dcache parity error", -1, 1 }, + { F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1 }, + { F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1 }, + { F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1 }, + { F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1 }, + { F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1 }, + { F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1 }, + { F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1 }, + { F_ITAGPARERR, "CIM itag parity error", -1, 1 }, + { F_DTAGPARERR, "CIM dtag parity error", -1, 1 }, { 0 } }; - if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff, + if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, CIM_INTR_MASK, cim_intr_info, NULL)) t3_fatal_err(adapter); } @@ -1405,7 +1556,14 @@ static void cim_intr_handler(adapter_t *adapter) static void ulprx_intr_handler(adapter_t *adapter) { static struct intr_info ulprx_intr_info[] = { - { F_PARERR, "ULP RX parity error", -1, 1 }, + { F_PARERRDATA, "ULP RX data parity error", -1, 1 }, + { F_PARERRPCMD, "ULP RX command parity error", -1, 1 }, + { F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1 }, + { F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1 }, + { F_ARBFPERR, "ULP RX ArbF parity error", -1, 1 }, + { F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1 }, + { F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1 }, + { F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1 }, { 0 } }; @@ -1424,6 +1582,7 @@ static void ulptx_intr_handler(adapter_t *adapter) STAT_ULP_CH0_PBL_OOB, 0 }, { F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds", STAT_ULP_CH1_PBL_OOB, 0 }, + { 0xfc, "ULP TX parity error", -1, 1 }, { 0 } }; @@ -1498,7 +1657,8 @@ static void pmrx_intr_handler(adapter_t *adapter) static void cplsw_intr_handler(adapter_t *adapter) { static struct intr_info cplsw_intr_info[] = { -// { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, + { F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1 }, + { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, { F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 }, { F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 }, { F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 }, @@ -1632,7 +1792,7 @@ int t3_phy_intr_handler(adapter_t *adapter) mask = gpi - (gpi & (gpi - 1)); gpi -= mask; - if (!(p->port_type->caps & SUPPORTED_IRQ)) + if (!(p->phy.caps & SUPPORTED_IRQ)) continue; if (cause & mask) { @@ -1728,7 +1888,6 @@ void t3_intr_enable(adapter_t *adapter) MC7_INTR_MASK }, { A_MC5_DB_INT_ENABLE, MC5_INTR_MASK }, { A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK }, - { A_TP_INT_ENABLE, 0x3bfffff }, { A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK }, { A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK }, { A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK }, @@ -1738,6 +1897,8 @@ void t3_intr_enable(adapter_t *adapter) adapter->slow_intr_mask = PL_INTR_MASK; t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0); + t3_write_reg(adapter, A_TP_INT_ENABLE, + adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff); if (adapter->params.rev > 0) { t3_write_reg(adapter, A_CPL_INTR_ENABLE, @@ -1889,6 +2050,15 @@ static int t3_sge_write_context(adapter_t *adapter, unsigned int id, 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } +static int clear_sge_ctxt(adapter_t *adap, unsigned int id, unsigned int type) +{ + t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0); + t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0); + t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0); + t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0); + return t3_sge_write_context(adap, id, type); +} + /** * t3_sge_init_ecntxt - initialize an SGE egress context * @adapter: the adapter to configure @@ -2390,20 +2560,6 @@ static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr, } /** - * t3_enable_filters - enable the HW filters - * @adap: the adapter - * - * Enables the HW filters for NIC traffic. - */ -void t3_enable_filters(adapter_t *adap) -{ - t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, 0); - t3_set_reg_field(adap, A_MC5_DB_CONFIG, 0, F_FILTEREN); - t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, 0, V_FIVETUPLELOOKUP(3)); - tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, 0, F_LOOKUPEVERYPKT); -} - -/** * pm_num_pages - calculate the number of pages of the payload memory * @mem_size: the size of the payload memory * @pg_size: the size of each payload memory page @@ -2508,7 +2664,7 @@ static void tp_config(adapter_t *adap, const struct tp_params *p) V_AUTOSTATE2(1) | V_AUTOSTATE1(0) | V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) | F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1)); - t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE, + t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO, F_IPV6ENABLE | F_NICMODE); t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814); t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105); @@ -2519,7 +2675,9 @@ static void tp_config(adapter_t *adap, const struct tp_params *p) F_ENABLEEPCMDAFULL, F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK | F_TXCONGESTIONMODE | F_RXCONGESTIONMODE); - t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0); + t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, + F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN | + F_ENABLEARPMISS | F_DISBLEDAPARBIT0); t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080); t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000); @@ -2534,6 +2692,11 @@ static void tp_config(adapter_t *adap, const struct tp_params *p) } else t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED); + if (adap->params.rev == T3_REV_C) + t3_set_reg_field(adap, A_TP_PC_CONFIG, + V_TABLELATENCYDELTA(M_TABLELATENCYDELTA), + V_TABLELATENCYDELTA(4)); + t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0); t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0); t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0); @@ -2972,7 +3135,7 @@ int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched) if (bpt > 0 && bpt <= 255) { v = bpt * tps; delta = v >= kbps ? v - kbps : kbps - v; - if (delta <= mindelta) { + if (delta < mindelta) { mindelta = delta; selected_cpt = cpt; selected_bpt = bpt; @@ -3383,7 +3546,8 @@ static void config_pcie(adapter_t *adap) V_REPLAYLMT(rpllmt)); t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff); - t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN); + t3_set_reg_field(adap, A_PCIE_CFG, 0, + F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN); } /** @@ -3401,7 +3565,7 @@ static void config_pcie(adapter_t *adap) */ int t3_init_hw(adapter_t *adapter, u32 fw_params) { - int err = -EIO, attempts = 100; + int err = -EIO, attempts, i; const struct vpd_params *vpd = &adapter->params.vpd; if (adapter->params.rev > 0) @@ -3422,6 +3586,10 @@ int t3_init_hw(adapter_t *adapter, u32 fw_params) adapter->params.mc5.nfilters, adapter->params.mc5.nroutes)) goto out_err; + + for (i = 0; i < 32; i++) + if (clear_sge_ctxt(adapter, i, F_CQ)) + goto out_err; } if (tp_init(adapter, &adapter->params.tp)) @@ -3438,7 +3606,12 @@ int t3_init_hw(adapter_t *adapter, u32 fw_params) if (is_pcie(adapter)) config_pcie(adapter); else - t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN); + t3_set_reg_field(adapter, A_PCIX_CFG, 0, + F_DMASTOPEN | F_CLIDECEN); + + if (adapter->params.rev == T3_REV_C) + t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0, + F_CFG_CQE_SOP_MASK); t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff); t3_write_reg(adapter, A_PM1_RX_MODE, 0); @@ -3451,6 +3624,7 @@ int t3_init_hw(adapter_t *adapter, u32 fw_params) V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2)); (void) t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */ + attempts = 100; do { /* wait for uP to initialize */ msleep(20); } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts); @@ -3601,6 +3775,7 @@ void early_hw_init(adapter_t *adapter, const struct adapter_info *ai) t3_write_reg(adapter, A_T3DBG_GPIO_EN, ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL); t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0); + t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff)); if (adapter->params.rev == 0 || !uses_xaui(adapter)) val |= F_ENRGMII; @@ -3651,6 +3826,36 @@ static int t3_reset_adapter(adapter_t *adapter) return 0; } +static int __devinit init_parity(adapter_t *adap) +{ + int i, err, addr; + + if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + for (err = i = 0; !err && i < 16; i++) + err = clear_sge_ctxt(adap, i, F_EGRESS); + for (i = 0xfff0; !err && i <= 0xffff; i++) + err = clear_sge_ctxt(adap, i, F_EGRESS); + for (i = 0; !err && i < SGE_QSETS; i++) + err = clear_sge_ctxt(adap, i, F_RESPONSEQ); + if (err) + return err; + + t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0); + for (i = 0; i < 4; i++) + for (addr = 0; addr <= M_IBQDBGADDR; addr++) { + t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN | + F_IBQDBGWR | V_IBQDBGQID(i) | + V_IBQDBGADDR(addr)); + err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, + F_IBQDBGBUSY, 0, 2, 1); + if (err) + return err; + } + return 0; +} + /** * t3_prep_adapter - prepare SW and HW for operation * @adapter: the adapter @@ -3732,6 +3937,9 @@ int __devinit t3_prep_adapter(adapter_t *adapter, } early_hw_init(adapter, ai); + ret = init_parity(adapter); + if (ret) + return ret; if (adapter->params.nports > 2 && (ret = t3_vsc7323_init(adapter, adapter->params.nports))) @@ -3739,14 +3947,17 @@ int __devinit t3_prep_adapter(adapter_t *adapter, for_each_port(adapter, i) { u8 hw_addr[6]; + const struct port_type_info *pti; struct port_info *p = adap2pinfo(adapter, i); while (!adapter->params.vpd.port_type[j]) ++j; - p->port_type = &port_types[adapter->params.vpd.port_type[j]]; - p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j, - ai->mdio_ops); + pti = &port_types[adapter->params.vpd.port_type[j]]; + ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j, + ai->mdio_ops); + if (ret) + return ret; mac_prep(&p->mac, adapter, j); ++j; @@ -3759,9 +3970,9 @@ int __devinit t3_prep_adapter(adapter_t *adapter, hw_addr[5] = adapter->params.vpd.eth_base[5] + i; t3_os_set_hw_addr(adapter, i, hw_addr); - init_link_config(&p->link_config, p->port_type->caps); + init_link_config(&p->link_config, p->phy.caps); p->phy.ops->power_down(&p->phy, 1); - if (!(p->port_type->caps & SUPPORTED_IRQ)) + if (!(p->phy.caps & SUPPORTED_IRQ)) adapter->params.linkpoll_period = 10; } diff --git a/sys/dev/cxgb/common/cxgb_tcb.h b/sys/dev/cxgb/common/cxgb_tcb.h index 5dc72f5..7785466 100644 --- a/sys/dev/cxgb/common/cxgb_tcb.h +++ b/sys/dev/cxgb/common/cxgb_tcb.h @@ -668,7 +668,10 @@ $FreeBSD$ #define S_TF_DDP_BUF1_FLUSH 28 #define V_TF_DDP_BUF1_FLUSH(x) ((x) << S_TF_DDP_BUF1_FLUSH) -#define S_TF_DDP_PSH_NO_INVALIDATE 29 -#define V_TF_DDP_PSH_NO_INVALIDATE(x) ((x) << S_TF_DDP_PSH_NO_INVALIDATE) +#define S_TF_DDP_PSH_NO_INVALIDATE0 29 +#define V_TF_DDP_PSH_NO_INVALIDATE0(x) ((x) << S_TF_DDP_PSH_NO_INVALIDATE0) + +#define S_TF_DDP_PSH_NO_INVALIDATE1 30 +#define V_TF_DDP_PSH_NO_INVALIDATE1(x) ((x) << S_TF_DDP_PSH_NO_INVALIDATE1) #endif /* _TCB_DEFS_H */ diff --git a/sys/dev/cxgb/common/cxgb_version.h b/sys/dev/cxgb/common/cxgb_version.h index 88296af..5867beb 100644 --- a/sys/dev/cxgb/common/cxgb_version.h +++ b/sys/dev/cxgb/common/cxgb_version.h @@ -37,5 +37,5 @@ $FreeBSD$ #define __CHELSIO_VERSION_H #define DRV_DESC "Chelsio T3 Network Driver" #define DRV_NAME "cxgb" -#define DRV_VERSION "1.0.086" +#define DRV_VERSION "1.0.129a" #endif diff --git a/sys/dev/cxgb/common/cxgb_vsc8211.c b/sys/dev/cxgb/common/cxgb_vsc8211.c index 382ecc7..61bdc9c 100644 --- a/sys/dev/cxgb/common/cxgb_vsc8211.c +++ b/sys/dev/cxgb/common/cxgb_vsc8211.c @@ -36,11 +36,17 @@ __FBSDID("$FreeBSD$"); #include #endif +#undef msleep +#define msleep t3_os_sleep + /* VSC8211 PHY specific registers. */ enum { + VSC8211_SIGDET_CTRL = 19, + VSC8211_EXT_CTRL = 23, VSC8211_INTR_ENABLE = 25, VSC8211_INTR_STATUS = 26, VSC8211_AUX_CTRL_STAT = 28, + VSC8211_EXT_PAGE_AXS = 31, }; enum { @@ -55,11 +61,19 @@ enum { VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */ VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */ VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */ + VSC_INTR_DPLX_CHG = 1 << 12, /* duplex change */ VSC_INTR_LINK_CHG = 1 << 13, /* link change */ + VSC_INTR_SPD_CHG = 1 << 14, /* speed change */ VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */ }; +enum { + VSC_CTRL_CLAUSE37_VIEW = 1 << 4, /* Switch to Clause 37 view */ + VSC_CTRL_MEDIA_MODE_HI = 0xf000 /* High part of media mode select */ +}; + #define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \ + VSC_INTR_DPLX_CHG | VSC_INTR_SPD_CHG | \ VSC_INTR_NEG_DONE) #define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \ VSC_INTR_ENABLE) @@ -189,6 +203,98 @@ static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok, return 0; } +static int vsc8211_get_link_status_fiber(struct cphy *cphy, int *link_ok, + int *speed, int *duplex, int *fc) +{ + unsigned int bmcr, status, lpa, adv; + int err, sp = -1, dplx = -1, pause = 0; + + err = mdio_read(cphy, 0, MII_BMCR, &bmcr); + if (!err) + err = mdio_read(cphy, 0, MII_BMSR, &status); + if (err) + return err; + + if (link_ok) { + /* + * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it + * once more to get the current link state. + */ + if (!(status & BMSR_LSTATUS)) + err = mdio_read(cphy, 0, MII_BMSR, &status); + if (err) + return err; + *link_ok = (status & BMSR_LSTATUS) != 0; + } + if (!(bmcr & BMCR_ANENABLE)) { + dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; + if (bmcr & BMCR_SPEED1000) + sp = SPEED_1000; + else if (bmcr & BMCR_SPEED100) + sp = SPEED_100; + else + sp = SPEED_10; + } else if (status & BMSR_ANEGCOMPLETE) { + err = mdio_read(cphy, 0, MII_LPA, &lpa); + if (!err) + err = mdio_read(cphy, 0, MII_ADVERTISE, &adv); + if (err) + return err; + + if (adv & lpa & ADVERTISE_1000XFULL) { + dplx = DUPLEX_FULL; + sp = SPEED_1000; + } else if (adv & lpa & ADVERTISE_1000XHALF) { + dplx = DUPLEX_HALF; + sp = SPEED_1000; + } + + if (fc && dplx == DUPLEX_FULL) { + if (lpa & adv & ADVERTISE_1000XPAUSE) + pause = PAUSE_RX | PAUSE_TX; + else if ((lpa & ADVERTISE_1000XPAUSE) && + (adv & lpa & ADVERTISE_1000XPSE_ASYM)) + pause = PAUSE_TX; + else if ((lpa & ADVERTISE_1000XPSE_ASYM) && + (adv & ADVERTISE_1000XPAUSE)) + pause = PAUSE_RX; + } + } + if (speed) + *speed = sp; + if (duplex) + *duplex = dplx; + if (fc) + *fc = pause; + return 0; +} + +/* + * Enable/disable auto MDI/MDI-X in forced link speed mode. + */ +static int vsc8211_set_automdi(struct cphy *phy, int enable) +{ + int err; + + if ((err = mdio_write(phy, 0, VSC8211_EXT_PAGE_AXS, 0x52b5)) != 0 || + (err = mdio_write(phy, 0, 18, 0x12)) != 0 || + (err = mdio_write(phy, 0, 17, enable ? 0x2803 : 0x3003)) != 0 || + (err = mdio_write(phy, 0, 16, 0x87fa)) != 0 || + (err = mdio_write(phy, 0, VSC8211_EXT_PAGE_AXS, 0)) != 0) + return err; + return 0; +} + +static int vsc8211_set_speed_duplex(struct cphy *phy, int speed, int duplex) +{ + int err; + + err = t3_set_phy_speed_duplex(phy, speed, duplex); + if (!err) + err = vsc8211_set_automdi(phy, 1); + return err; +} + static int vsc8211_power_down(struct cphy *cphy, int enable) { return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN, @@ -214,7 +320,6 @@ static int vsc8211_intr_handler(struct cphy *cphy) #ifdef C99_NOT_SUPPORTED static struct cphy_ops vsc8211_ops = { - NULL, vsc8211_reset, vsc8211_intr_enable, vsc8211_intr_disable, @@ -224,10 +329,25 @@ static struct cphy_ops vsc8211_ops = { vsc8211_autoneg_restart, t3_phy_advertise, NULL, - t3_set_phy_speed_duplex, + vsc8211_set_speed_duplex, vsc8211_get_link_status, vsc8211_power_down, }; + +static struct cphy_ops vsc8211_fiber_ops = { + vsc8211_reset, + vsc8211_intr_enable, + vsc8211_intr_disable, + vsc8211_intr_clear, + vsc8211_intr_handler, + vsc8211_autoneg_enable, + vsc8211_autoneg_restart, + t3_phy_advertise_fiber, + NULL, + t3_set_phy_speed_duplex, + vsc8211_get_link_status_fiber, + vsc8211_power_down, +}; #else static struct cphy_ops vsc8211_ops = { .reset = vsc8211_reset, @@ -238,15 +358,57 @@ static struct cphy_ops vsc8211_ops = { .autoneg_enable = vsc8211_autoneg_enable, .autoneg_restart = vsc8211_autoneg_restart, .advertise = t3_phy_advertise, - .set_speed_duplex = t3_set_phy_speed_duplex, + .set_speed_duplex = vsc8211_set_speed_duplex, .get_link_status = vsc8211_get_link_status, .power_down = vsc8211_power_down, }; + +static struct cphy_ops vsc8211_fiber_ops = { + .reset = vsc8211_reset, + .intr_enable = vsc8211_intr_enable, + .intr_disable = vsc8211_intr_disable, + .intr_clear = vsc8211_intr_clear, + .intr_handler = vsc8211_intr_handler, + .autoneg_enable = vsc8211_autoneg_enable, + .autoneg_restart = vsc8211_autoneg_restart, + .advertise = t3_phy_advertise_fiber, + .set_speed_duplex = t3_set_phy_speed_duplex, + .get_link_status = vsc8211_get_link_status_fiber, + .power_down = vsc8211_power_down, +}; #endif -void t3_vsc8211_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, - const struct mdio_ops *mdio_ops) +int t3_vsc8211_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr, + const struct mdio_ops *mdio_ops) { - cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops); - t3_os_sleep(20); /* PHY needs ~10ms to start responding to MDIO */ + int err; + unsigned int val; + + cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops, + SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII | + SUPPORTED_TP | SUPPORTED_IRQ, "10/100/1000BASE-T"); + msleep(20); /* PHY needs ~10ms to start responding to MDIO */ + + err = mdio_read(phy, 0, VSC8211_EXT_CTRL, &val); + if (err) + return err; + if (val & VSC_CTRL_MEDIA_MODE_HI) + return 0; /* copper interface, done */ + + phy->caps = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | + SUPPORTED_MII | SUPPORTED_FIBRE | SUPPORTED_IRQ; + phy->desc = "1000BASE-X"; + phy->ops = &vsc8211_fiber_ops; + + if ((err = mdio_write(phy, 0, VSC8211_EXT_PAGE_AXS, 1)) != 0 || + (err = mdio_write(phy, 0, VSC8211_SIGDET_CTRL, 1)) != 0 || + (err = mdio_write(phy, 0, VSC8211_EXT_PAGE_AXS, 0)) != 0 || + (err = mdio_write(phy, 0, VSC8211_EXT_CTRL, + val | VSC_CTRL_CLAUSE37_VIEW)) != 0 || + (err = vsc8211_reset(phy, 0)) != 0) + return err; + + udelay(5); /* delay after reset before next SMI */ + return 0; } diff --git a/sys/dev/cxgb/common/cxgb_xgmac.c b/sys/dev/cxgb/common/cxgb_xgmac.c index ca8801f..745cc4b 100644 --- a/sys/dev/cxgb/common/cxgb_xgmac.c +++ b/sys/dev/cxgb/common/cxgb_xgmac.c @@ -75,6 +75,12 @@ static void xaui_serdes_reset(struct cmac *mac) } } +/** + * t3b_pcs_reset - reset the PCS on T3B+ adapters + * @mac: the XGMAC handle + * + * Reset the XGMAC PCS block on T3B+ adapters. + */ void t3b_pcs_reset(struct cmac *mac) { t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, @@ -84,6 +90,12 @@ void t3b_pcs_reset(struct cmac *mac) F_PCS_RESET_); } +/** + * t3_mac_reset - reset a MAC + * @mac: the MAC to reset + * + * Reset the given MAC. + */ int t3_mac_reset(struct cmac *mac) { static struct addr_val_pair mac_reset_avp[] = { @@ -114,6 +126,7 @@ int t3_mac_reset(struct cmac *mac) t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft, F_RXSTRFRWRD | F_DISERRFRAMES, uses_xaui(adap) ? 0 : F_RXSTRFRWRD); + t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0, F_UNDERUNFIX); if (uses_xaui(adap)) { if (adap->params.rev == 0) { @@ -146,8 +159,10 @@ int t3_mac_reset(struct cmac *mac) t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN); t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN); } - - val = F_MAC_RESET_; + t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + oft, + V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE), + V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE) | F_RXENFRAMER); + val = F_MAC_RESET_ | F_XGMAC_STOP_EN; if (is_10G(adap) || mac->multiport) val |= F_PCS_RESET_; else if (uses_xaui(adap)) @@ -236,7 +251,14 @@ static void set_addr_filter(struct cmac *mac, int idx, const u8 *addr) t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi); } -/* Set one of the station's unicast MAC addresses. */ +/** + * t3_mac_set_address - set one of the station's unicast MAC addresses + * @mac: the MAC handle + * @idx: index of the exact address match filter to use + * @addr: the Ethernet address + * + * Set one of the station's unicast MAC addresses. + */ int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]) { if (mac->multiport) @@ -249,10 +271,14 @@ int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]) return 0; } -/* - * Specify the number of exact address filters that should be reserved for - * unicast addresses. Caller should reload the unicast and multicast addresses - * after calling this. +/** + * t3_mac_set_num_ucast - set the number of unicast addresses needed + * @mac: the MAC handle + * @n: number of unicast addresses needed + * + * Specify the number of exact address filters that should be reserved for + * unicast addresses. Caller should reload the unicast and multicast + * addresses after calling this. */ int t3_mac_set_num_ucast(struct cmac *mac, unsigned char n) { @@ -298,6 +324,14 @@ static int hash_hw_addr(const u8 *addr) return hash; } +/** + * t3_mac_set_rx_mode - set the Rx mode and address filters + * @mac: the MAC to configure + * @rm: structure containing the Rx mode and MAC addresses needed + * + * Configures the MAC Rx mode (promiscuity, etc) and exact and hash + * address filters. + */ int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm) { u32 hash_lo, hash_hi; @@ -344,10 +378,18 @@ static int rx_fifo_hwm(int mtu) return min(hwm, MAC_RXFIFO_SIZE - 8192); } +/** + * t3_mac_set_mtu - set the MAC MTU + * @mac: the MAC to configure + * @mtu: the MTU + * + * Sets the MAC MTU and adjusts the FIFO PAUSE watermarks accordingly. + */ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu) { - int hwm, lwm; - unsigned int thres, v; + int hwm, lwm, divisor; + int ipg; + unsigned int thres, v, reg; adapter_t *adap = mac->adapter; /* @@ -362,27 +404,33 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu) if (mac->multiport) return t3_vsc7323_set_mtu(adap, mtu - 4, mac->ext_port); - if (adap->params.rev == T3_REV_B2 && + if (adap->params.rev >= T3_REV_B2 && (t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) { disable_exact_filters(mac); v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset); t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset, F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST); - /* drain rx FIFO */ - if (t3_wait_op_done(adap, - A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + mac->offset, - 1 << 31, 1, 20, 5)) { + reg = adap->params.rev == T3_REV_B2 ? + A_XGM_RX_MAX_PKT_SIZE_ERR_CNT : A_XGM_RXFIFO_CFG; + + /* drain RX FIFO */ + if (t3_wait_op_done(adap, reg + mac->offset, + F_RXFIFO_EMPTY, 1, 20, 5)) { t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v); enable_exact_filters(mac); return -EIO; } - t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu); + t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, + V_RXMAXPKTSIZE(M_RXMAXPKTSIZE), + V_RXMAXPKTSIZE(mtu)); t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v); enable_exact_filters(mac); } else - t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu); - + t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, + V_RXMAXPKTSIZE(M_RXMAXPKTSIZE), + V_RXMAXPKTSIZE(mtu)); + /* * Adjust the PAUSE frame watermarks. We always set the LWM, and the * HWM only if flow-control is enabled. @@ -405,20 +453,34 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu) thres /= 10; thres = mtu > thres ? (mtu - thres + 7) / 8 : 0; thres = max(thres, 8U); /* need at least 8 */ + ipg = (adap->params.rev == T3_REV_C) ? 0 : 1; t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset, V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG), - V_TXFIFOTHRESH(thres) | V_TXIPG(1)); + V_TXFIFOTHRESH(thres) | V_TXIPG(ipg)); /* Assuming a minimum drain rate of 2.5Gbps... */ - if (adap->params.rev > 0) + if (adap->params.rev > 0) { + divisor = (adap->params.rev == T3_REV_C) ? 64 : 8; t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset, - (hwm - lwm) * 4 / 8); + (hwm - lwm) * 4 / divisor); + } t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset, MAC_RXFIFO_SIZE * 4 * 8 / 512); return 0; } +/** + * t3_mac_set_speed_duplex_fc - set MAC speed, duplex and flow control + * @mac: the MAC to configure + * @speed: the desired speed (10/100/1000/10000) + * @duplex: the desired duplex + * @fc: desired Tx/Rx PAUSE configuration + * + * Set the MAC speed, duplex (actually only full-duplex is supported), and + * flow control. If a parameter value is negative the corresponding + * MAC setting is left at its current value. + */ int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc) { u32 val; @@ -466,6 +528,15 @@ int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc) return 0; } +/** + * t3_mac_enable - enable the MAC in the given directions + * @mac: the MAC to configure + * @which: bitmap indicating which directions to enable + * + * Enables the MAC for operation in the given directions. + * %MAC_DIRECTION_TX enables the Tx direction, and %MAC_DIRECTION_RX + * enables the Rx one. + */ int t3_mac_enable(struct cmac *mac, int which) { int idx = macidx(mac); @@ -478,9 +549,13 @@ int t3_mac_enable(struct cmac *mac, int which) if (which & MAC_DIRECTION_TX) { t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); - t3_write_reg(adap, A_TP_PIO_DATA, 0xc0ede401); + t3_write_reg(adap, A_TP_PIO_DATA, + adap->params.rev == T3_REV_C ? + 0xc4ffff01 : 0xc0ede401); t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE); - t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx); + t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, + adap->params.rev == T3_REV_C ? + 0 : 1 << idx); t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN); @@ -505,6 +580,15 @@ int t3_mac_enable(struct cmac *mac, int which) return 0; } +/** + * t3_mac_disable - disable the MAC in the given directions + * @mac: the MAC to configure + * @which: bitmap indicating which directions to disable + * + * Disables the MAC in the given directions. + * %MAC_DIRECTION_TX disables the Tx direction, and %MAC_DIRECTION_RX + * disables the Rx one. + */ int t3_mac_disable(struct cmac *mac, int which) { adapter_t *adap = mac->adapter; @@ -621,12 +705,15 @@ out: return status; } -/* - * This function is called periodically to accumulate the current values of the - * RMON counters into the port statistics. Since the packet counters are only - * 32 bits they can overflow in ~286 secs at 10G, so the function should be - * called more frequently than that. The byte counters are 45-bit wide, they - * would overflow in ~7.8 hours. +/** + * t3_mac_update_stats - accumulate MAC statistics + * @mac: the MAC handle + * + * This function is called periodically to accumulate the current values + * of the RMON counters into the port statistics. Since the packet + * counters are only 32 bits they can overflow in ~286 secs at 10G, so the + * function should be called more frequently than that. The byte counters + * are 45-bit wide, they would overflow in ~7.8 hours. */ const struct mac_stats *t3_mac_update_stats(struct cmac *mac) { diff --git a/sys/dev/cxgb/cxgb_adapter.h b/sys/dev/cxgb/cxgb_adapter.h index b8969bc..2cb7b93 100644 --- a/sys/dev/cxgb/cxgb_adapter.h +++ b/sys/dev/cxgb/cxgb_adapter.h @@ -46,6 +46,7 @@ $FreeBSD$ #include #include #include +#include #include #include @@ -54,6 +55,7 @@ $FreeBSD$ #include #include + #ifdef CONFIG_DEFINED #include #include @@ -144,6 +146,9 @@ enum { /* adapter flags */ QUEUES_BOUND = (1 << 3), FW_UPTODATE = (1 << 4), TPS_UPTODATE = (1 << 5), + CXGB_SHUTDOWN = (1 << 6), + CXGB_OFLD_INIT = (1 << 7), + TP_PARITY_INIT = (1 << 8), }; #define FL_Q_SIZE 4096 @@ -203,6 +208,7 @@ struct sge_rspq { uint32_t holdoff_tmr; uint32_t next_holdoff; uint32_t imm_data; + uint32_t async_notif; uint32_t cntxt_id; uint32_t offload_pkts; uint32_t offload_bundles; @@ -348,6 +354,8 @@ struct adapter { /* PCI register resources */ int regs_rid; struct resource *regs_res; + int udbs_rid; + struct resource *udbs_res; bus_space_handle_t bh; bus_space_tag_t bt; bus_size_t mmio_len; @@ -508,10 +516,23 @@ static __inline uint8_t * t3_get_next_mcaddr(struct t3_rx_mode *rm) { uint8_t *macaddr = NULL; - - if (rm->idx == 0) - macaddr = (uint8_t *)rm->port->hw_addr; + struct ifnet *ifp = rm->port->ifp; + struct ifmultiaddr *ifma; + int i = 0; + + IF_ADDR_LOCK(ifp); + TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { + if (ifma->ifma_addr->sa_family != AF_LINK) + continue; + if (i == rm->idx) { + macaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); + break; + } + i++; + } + IF_ADDR_UNLOCK(ifp); + rm->idx++; return (macaddr); } diff --git a/sys/dev/cxgb/cxgb_ioctl.h b/sys/dev/cxgb/cxgb_ioctl.h index 65deb44..d5df986 100644 --- a/sys/dev/cxgb/cxgb_ioctl.h +++ b/sys/dev/cxgb/cxgb_ioctl.h @@ -101,15 +101,16 @@ struct ch_mem_range { }; struct ch_qset_params { - uint32_t qset_idx; - int32_t txq_size[3]; - int32_t rspq_size; - int32_t fl_size[2]; - int32_t intr_lat; - int32_t polling; - int32_t cong_thres; - int32_t vector; - int32_t qnum; + uint32_t qset_idx; + int32_t txq_size[3]; + int32_t rspq_size; + int32_t fl_size[2]; + int32_t intr_lat; + int32_t polling; + int32_t lro; + int32_t cong_thres; + int32_t vector; + int32_t qnum; }; struct ch_pktsched_params { @@ -260,4 +261,6 @@ struct mii_data { #define CHELSIO_SET_FILTER _IOW('f', CH_SET_FILTER, struct ch_filter) #define CHELSIO_DEL_FILTER _IOW('f', CH_DEL_FILTER, struct ch_filter) #define CHELSIO_DEVUP _IO('f', CH_DEVUP) + +#define CHELSIO_GET_TCB _IOWR('f', CH_GET_TCB, struct ch_tcb) #endif diff --git a/sys/dev/cxgb/cxgb_l2t.c b/sys/dev/cxgb/cxgb_l2t.c index ad7ad1e..43d09f2 100644 --- a/sys/dev/cxgb/cxgb_l2t.c +++ b/sys/dev/cxgb/cxgb_l2t.c @@ -175,11 +175,8 @@ t3_l2t_send_slow(struct t3cdev *dev, struct mbuf *m, struct l2t_entry *e) sin.sin_family = AF_INET; sin.sin_len = sizeof(struct sockaddr_in); sin.sin_addr.s_addr = e->addr; - - - - printf("send slow on rt=%p eaddr=0x%08x\n", rt, e->addr); - + + CTR2(KTR_CXGB, "send slow on rt=%p eaddr=0x%08x\n", rt, e->addr); again: switch (e->state) { case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ @@ -199,8 +196,6 @@ again: } arpq_enqueue(e, m); mtx_unlock(&e->lock); - printf("enqueueing arp request\n"); - /* * Only the first packet added to the arpq should kick off * resolution. However, because the m_gethdr below can fail, @@ -209,10 +204,9 @@ again: * A better way would be to use a work request to retry L2T * entries when there's no memory. */ - printf("doing arpresolve on 0x%x \n", e->addr); if (arpresolve(rt->rt_ifp, rt, NULL, (struct sockaddr *)&sin, e->dmac) == 0) { - printf("mac=%x:%x:%x:%x:%x:%x\n", + CTR6(KTR_CXGB, "mac=%x:%x:%x:%x:%x:%x\n", e->dmac[0], e->dmac[1], e->dmac[2], e->dmac[3], e->dmac[4], e->dmac[5]); if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL) @@ -224,8 +218,7 @@ again: else m_freem(m); mtx_unlock(&e->lock); - } else - printf("arpresolve returned non-zero\n"); + } } return 0; } @@ -396,8 +389,6 @@ t3_l2t_get(struct t3cdev *dev, struct rtentry *neigh, struct ifnet *ifp, /* Need to allocate a new entry */ e = alloc_l2e(d); if (e) { - printf("initializing new entry\n"); - mtx_lock(&e->lock); /* avoid race with t3_l2t_free */ e->next = d->l2tab[hash].first; d->l2tab[hash].first = e; @@ -472,8 +463,6 @@ t3_l2t_update(struct t3cdev *dev, struct rtentry *neigh, int hash = arp_hash(addr, ifidx, d); struct llinfo_arp *la; - printf("t3_l2t_update called with arp info\n"); - rw_rlock(&d->lock); for (e = d->l2tab[hash].first; e; e = e->next) if (e->addr == addr && e->ifindex == ifidx) { @@ -481,7 +470,7 @@ t3_l2t_update(struct t3cdev *dev, struct rtentry *neigh, goto found; } rw_runlock(&d->lock); - printf("addr=0x%08x not found\n", addr); + CTR1(KTR_CXGB, "t3_l2t_update: addr=0x%08x not found", addr); return; found: @@ -543,6 +532,12 @@ t3_init_l2t(unsigned int l2t_capacity) void t3_free_l2t(struct l2t_data *d) { + int i; + + rw_destroy(&d->lock); + for (i = 0; i < d->nentries; ++i) + mtx_destroy(&d->l2tab[i].lock); + cxgb_free_mem(d); } diff --git a/sys/dev/cxgb/cxgb_l2t.h b/sys/dev/cxgb/cxgb_l2t.h index a5d469b..954d02a 100644 --- a/sys/dev/cxgb/cxgb_l2t.h +++ b/sys/dev/cxgb/cxgb_l2t.h @@ -143,8 +143,6 @@ static inline int l2t_send(struct t3cdev *dev, struct mbuf *m, if (__predict_true(e->state == L2T_STATE_VALID)) { return cxgb_ofld_send(dev, (struct mbuf *)m); } - printf("send slow\n"); - return t3_l2t_send_slow(dev, (struct mbuf *)m, e); } diff --git a/sys/dev/cxgb/cxgb_main.c b/sys/dev/cxgb/cxgb_main.c index 581370b..9db5256 100644 --- a/sys/dev/cxgb/cxgb_main.c +++ b/sys/dev/cxgb/cxgb_main.c @@ -40,6 +40,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -119,6 +120,7 @@ static int cxgb_get_regs_len(void); static int offload_open(struct port_info *pi); static void touch_bars(device_t dev); static int offload_close(struct t3cdev *tdev); +static void cxgb_link_start(struct port_info *p); static device_method_t cxgb_controller_methods[] = { DEVMETHOD(device_probe, cxgb_controller_probe), @@ -281,6 +283,32 @@ struct cxgb_ident { static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset); + +void +cxgb_log_tcb(struct adapter *sc, unsigned int tid) +{ + char buf[TCB_SIZE]; + uint64_t *tcb = (uint64_t *)buf; + int i, error; + struct mc7 *mem = &sc->cm; + + error = t3_mc7_bd_read(mem, tid*TCB_SIZE/8, TCB_SIZE/8, tcb); + if (error) + printf("cxgb_tcb_log failed\n"); + + CTR1(KTR_CXGB, "TCB tid=%u", tid); + for (i = 0; i < TCB_SIZE / 32; i++) { + CTR5(KTR_CXGB, "%1d: %08x %08x %08x %08x", + i, (uint32_t)tcb[1], (uint32_t)(tcb[1] >> 32), + (uint32_t)tcb[0], (uint32_t)(tcb[0] >> 32)); + tcb += 2; + CTR4(KTR_CXGB, " %08x %08x %08x %08x", + (uint32_t)tcb[1], (uint32_t)(tcb[1] >> 32), + (uint32_t)tcb[0], (uint32_t)(tcb[0] >> 32)); + tcb += 2; + } +} + static __inline char t3rev2char(struct adapter *adapter) { @@ -397,7 +425,8 @@ cxgb_controller_attach(device_t dev) int port_qsets = 1; #ifdef MSI_SUPPORTED int msi_needed, reg; -#endif +#endif + int must_load = 0; sc = device_get_softc(dev); sc->dev = dev; sc->msi_count = 0; @@ -434,9 +463,16 @@ cxgb_controller_attach(device_t dev) sc->regs_rid = PCIR_BAR(0); if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->regs_rid, RF_ACTIVE)) == NULL) { - device_printf(dev, "Cannot allocate BAR\n"); + device_printf(dev, "Cannot allocate BAR region 0\n"); return (ENXIO); } + sc->udbs_rid = PCIR_BAR(2); + if ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &sc->udbs_rid, RF_ACTIVE)) == NULL) { + device_printf(dev, "Cannot allocate BAR region 1\n"); + error = ENXIO; + goto out; + } snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d", device_get_unit(dev)); @@ -449,7 +485,7 @@ cxgb_controller_attach(device_t dev) snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d", device_get_unit(dev)); - MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_DEF); + MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN); MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF); MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF); @@ -534,7 +570,7 @@ cxgb_controller_attach(device_t dev) /* Create a periodic callout for checking adapter status */ callout_init(&sc->cxgb_tick_ch, TRUE); - if (t3_check_fw_version(sc) != 0) { + if (t3_check_fw_version(sc, &must_load) != 0 && must_load) { /* * Warn user that a firmware update will be attempted in init. */ @@ -545,7 +581,7 @@ cxgb_controller_attach(device_t dev) sc->flags |= FW_UPTODATE; } - if (t3_check_tpsram_version(sc) != 0) { + if (t3_check_tpsram_version(sc, &must_load) != 0 && must_load) { /* * Warn user that a firmware update will be attempted in init. */ @@ -609,6 +645,8 @@ cxgb_controller_attach(device_t dev) G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers)); + device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]); + callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc); t3_add_attach_sysctls(sc); out: if (error) @@ -634,9 +672,12 @@ cxgb_free(struct adapter *sc) { int i; - + ADAPTER_LOCK(sc); + sc->flags |= CXGB_SHUTDOWN; + ADAPTER_UNLOCK(sc); cxgb_pcpu_shutdown_threads(sc); ADAPTER_LOCK(sc); + /* * drops the lock */ @@ -654,11 +695,7 @@ cxgb_free(struct adapter *sc) bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid, sc->msix_regs_res); } - - if (sc->tq != NULL) { - taskqueue_drain(sc->tq, &sc->ext_intr_task); - taskqueue_drain(sc->tq, &sc->tick_task); - } + t3_sge_deinit_sw(sc); /* * Wait for last callout @@ -672,8 +709,11 @@ cxgb_free(struct adapter *sc) } bus_generic_detach(sc->dev); - if (sc->tq != NULL) + if (sc->tq != NULL) { taskqueue_free(sc->tq); + sc->tq = NULL; + } + if (is_offload(sc)) { cxgb_adapter_unofld(sc); if (isset(&sc->open_device_map, OFFLOAD_DEVMAP_BIT)) @@ -682,11 +722,18 @@ cxgb_free(struct adapter *sc) printf("cxgb_free: DEVMAP_BIT not set\n"); } else printf("not offloading set\n"); + + if (sc->flags & CXGB_OFLD_INIT) + cxgb_offload_deactivate(sc); free(sc->filters, M_DEVBUF); t3_sge_free(sc); cxgb_offload_exit(); - + + if (sc->udbs_res != NULL) + bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid, + sc->udbs_res); + if (sc->regs_res != NULL) bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid, sc->regs_res); @@ -797,8 +844,6 @@ cxgb_setup_msix(adapter_t *sc, int msix_count) return (EINVAL); } sc->msix_irq_rid[k] = rid; - printf("setting up interrupt for port=%d\n", - qs->port->port_id); if (bus_setup_intr(sc->dev, sc->msix_irq_res[k], INTR_MPSAFE|INTR_TYPE_NET, #ifdef INTR_FILTERS @@ -828,10 +873,11 @@ cxgb_port_probe(device_t dev) { struct port_info *p; char buf[80]; - + const char *desc; + p = device_get_softc(dev); - - snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, p->port_type->desc); + desc = p->phy.desc; + snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, desc); device_set_desc_copy(dev, buf); return (0); } @@ -873,9 +919,11 @@ cxgb_port_attach(device_t dev) struct port_info *p; struct ifnet *ifp; int err, media_flags; + struct adapter *sc; + p = device_get_softc(dev); - + sc = p->adapter; snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d", device_get_unit(device_get_parent(dev)), p->port_id); PORT_LOCK_INIT(p, p->lockbuf); @@ -897,11 +945,12 @@ cxgb_port_attach(device_t dev) ifp->if_ioctl = cxgb_ioctl; ifp->if_start = cxgb_start; +#if 0 #ifdef IFNET_MULTIQUEUE ifp->if_flags |= IFF_MULTIQ; ifp->if_mq_start = cxgb_pcpu_start; #endif - +#endif ifp->if_timer = 0; /* Disable ifnet watchdog */ ifp->if_watchdog = NULL; @@ -934,14 +983,14 @@ cxgb_port_attach(device_t dev) } ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change, cxgb_media_status); - - if (!strcmp(p->port_type->desc, "10GBASE-CX4")) { + + if (!strcmp(p->phy.desc, "10GBASE-CX4")) { media_flags = IFM_ETHER | IFM_10G_CX4 | IFM_FDX; - } else if (!strcmp(p->port_type->desc, "10GBASE-SR")) { + } else if (!strcmp(p->phy.desc, "10GBASE-SR")) { media_flags = IFM_ETHER | IFM_10G_SR | IFM_FDX; - } else if (!strcmp(p->port_type->desc, "10GBASE-XR")) { + } else if (!strcmp(p->phy.desc, "10GBASE-XR")) { media_flags = IFM_ETHER | IFM_10G_LR | IFM_FDX; - } else if (!strcmp(p->port_type->desc, "10/100/1000BASE-T")) { + } else if (!strcmp(p->phy.desc, "10/100/1000BASE-T")) { ifmedia_add(&p->media, IFM_ETHER | IFM_10_T, 0, NULL); ifmedia_add(&p->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); @@ -953,7 +1002,7 @@ cxgb_port_attach(device_t dev) 0, NULL); media_flags = 0; } else { - printf("unsupported media type %s\n", p->port_type->desc); + printf("unsupported media type %s\n", p->phy.desc); return (ENXIO); } if (media_flags) { @@ -976,7 +1025,8 @@ cxgb_port_attach(device_t dev) taskqueue_thread_enqueue, &p->tq); #endif t3_sge_init_port(p); - + cxgb_link_start(p); + t3_link_changed(sc, p->port_id); return (0); } @@ -1119,17 +1169,14 @@ t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed, struct port_info *pi = &adapter->port[port_id]; struct cmac *mac = &adapter->port[port_id].mac; - if ((pi->ifp->if_flags & IFF_UP) == 0) - return; - if (link_status) { t3_mac_enable(mac, MAC_DIRECTION_RX); if_link_state_change(pi->ifp, LINK_STATE_UP); } else { - if_link_state_change(pi->ifp, LINK_STATE_DOWN); pi->phy.ops->power_down(&pi->phy, 1); t3_mac_disable(mac, MAC_DIRECTION_RX); t3_link_start(&pi->phy, mac, &pi->link_config); + if_link_state_change(pi->ifp, LINK_STATE_DOWN); } } @@ -1195,6 +1242,84 @@ cxgb_link_start(struct port_info *p) t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); } + +static int +await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, + unsigned long n) +{ + int attempts = 5; + + while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { + if (!--attempts) + return (ETIMEDOUT); + t3_os_sleep(10); + } + return 0; +} + +static int +init_tp_parity(struct adapter *adap) +{ + int i; + struct mbuf *m; + struct cpl_set_tcb_field *greq; + unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts; + + t3_tp_set_offload_mode(adap, 1); + + for (i = 0; i < 16; i++) { + struct cpl_smt_write_req *req; + + m = m_gethdr(M_WAITOK, MT_DATA); + req = mtod(m, struct cpl_smt_write_req *); + m->m_len = m->m_pkthdr.len = sizeof(*req); + memset(req, 0, sizeof(*req)); + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i)); + req->iff = i; + t3_mgmt_tx(adap, m); + } + + for (i = 0; i < 2048; i++) { + struct cpl_l2t_write_req *req; + + m = m_gethdr(M_WAITOK, MT_DATA); + req = mtod(m, struct cpl_l2t_write_req *); + m->m_len = m->m_pkthdr.len = sizeof(*req); + memset(req, 0, sizeof(*req)); + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i)); + req->params = htonl(V_L2T_W_IDX(i)); + t3_mgmt_tx(adap, m); + } + + for (i = 0; i < 2048; i++) { + struct cpl_rte_write_req *req; + + m = m_gethdr(M_WAITOK, MT_DATA); + req = mtod(m, struct cpl_rte_write_req *); + m->m_len = m->m_pkthdr.len = sizeof(*req); + memset(req, 0, sizeof(*req)); + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i)); + req->l2t_idx = htonl(V_L2T_W_IDX(i)); + t3_mgmt_tx(adap, m); + } + + m = m_gethdr(M_WAITOK, MT_DATA); + greq = mtod(m, struct cpl_set_tcb_field *); + m->m_len = m->m_pkthdr.len = sizeof(*greq); + memset(greq, 0, sizeof(*greq)); + greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0)); + greq->mask = htobe64(1); + t3_mgmt_tx(adap, m); + + i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); + t3_tp_set_offload_mode(adap, 0); + return (i); +} + /** * setup_rss - configure Receive Side Steering (per-queue connection demux) * @adap: the adapter @@ -1224,11 +1349,9 @@ setup_rss(adapter_t *adap) nq[pi->tx_chan] += pi->nqsets; } - nq[0] = max(nq[0], 1U); - nq[1] = max(nq[1], 1U); for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) { - rspq_map[i] = i % nq[0]; - rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq[1]) + nq[0]; + rspq_map[i] = nq[0] ? i % nq[0] : 0; + rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0; } /* Calculate the reverse RSS map table */ for (i = 0; i < RSS_TABLE_SIZE; ++i) @@ -1237,7 +1360,8 @@ setup_rss(adapter_t *adap) t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN | F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN | - V_RRCPLCPUSIZE(6), cpus, rspq_map); + F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, + cpus, rspq_map); } @@ -1470,6 +1594,7 @@ cxgb_up(struct adapter *sc) if (err) goto out; + t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT); t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); err = setup_sge_qsets(sc); @@ -1510,8 +1635,18 @@ cxgb_up(struct adapter *sc) t3_sge_start(sc); t3_intr_enable(sc); + if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) && + is_offload(sc) && init_tp_parity(sc) == 0) + sc->flags |= TP_PARITY_INIT; + + if (sc->flags & TP_PARITY_INIT) { + t3_write_reg(sc, A_TP_INT_CAUSE, + F_CMCACHEPERR | F_ARPLUTPERR); + t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff); + } + + if (!(sc->flags & QUEUES_BOUND)) { - printf("bind qsets\n"); bind_qsets(sc); sc->flags |= QUEUES_BOUND; } @@ -1529,7 +1664,6 @@ irq_err: static void cxgb_down_locked(struct adapter *sc) { - int i; t3_sge_stop(sc); t3_intr_disable(sc); @@ -1546,20 +1680,24 @@ cxgb_down_locked(struct adapter *sc) sc->irq_res = NULL; } - if (sc->flags & USING_MSIX) + if (sc->flags & USING_MSIX) cxgb_teardown_msix(sc); - ADAPTER_UNLOCK(sc); - + callout_stop(&sc->cxgb_tick_ch); callout_stop(&sc->sge_timer_ch); callout_drain(&sc->cxgb_tick_ch); callout_drain(&sc->sge_timer_ch); if (sc->tq != NULL) { + printf("draining slow intr\n"); + taskqueue_drain(sc->tq, &sc->slow_intr_task); - for (i = 0; i < sc->params.nports; i++) - taskqueue_drain(sc->tq, &sc->port[i].timer_reclaim_task); + printf("draining ext intr\n"); + taskqueue_drain(sc->tq, &sc->ext_intr_task); + printf("draining tick task\n"); + taskqueue_drain(sc->tq, &sc->tick_task); } + ADAPTER_UNLOCK(sc); } static int @@ -1573,7 +1711,7 @@ offload_open(struct port_info *pi) int adap_up = adapter->open_device_map & PORT_MASK; int err = 0; - printf("device_map=0x%x\n", adapter->open_device_map); + CTR1(KTR_CXGB, "device_map=0x%x", adapter->open_device_map); if (atomic_cmpset_int(&adapter->open_device_map, (adapter->open_device_map & ~(1<open_device_map | (1<open_device_map, OFFLOAD_DEVMAP_BIT)) { - printf("offload_close: DEVMAP_BIT not set\n"); - + if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT)) return (0); - } /* Call back all registered clients */ cxgb_remove_clients(tdev); @@ -1638,7 +1773,6 @@ offload_close(struct t3cdev *tdev) cxgb_down_locked(adapter); else ADAPTER_UNLOCK(adapter); - cxgb_offload_deactivate(adapter); return (0); } @@ -1680,17 +1814,12 @@ cxgb_init_locked(struct port_info *p) if (err) log(LOG_WARNING, "Could not initialize offload capabilities\n"); - else - printf("offload opened\n"); } - cxgb_link_start(p); - t3_link_changed(sc, p->port_id); ifp->if_baudrate = p->link_config.speed * 1000000; device_printf(sc->dev, "enabling interrupts on port=%d\n", p->port_id); t3_port_intr_enable(sc, p->port_id); - callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc); t3_sge_reset_adapter(sc); ifp->if_drv_flags |= IFF_DRV_RUNNING; @@ -1703,10 +1832,10 @@ cxgb_set_rxmode(struct port_info *p) struct t3_rx_mode rm; struct cmac *mac = &p->mac; - PORT_LOCK_ASSERT_OWNED(p); - t3_init_rx_mode(&rm, p); + mtx_lock(&p->adapter->mdio_lock); t3_mac_set_rx_mode(mac, &rm); + mtx_unlock(&p->adapter->mdio_lock); } static void @@ -1745,7 +1874,6 @@ cxgb_set_mtu(struct port_info *p, int mtu) PORT_LOCK(p); ifp->if_mtu = mtu; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - callout_stop(&p->adapter->cxgb_tick_ch); cxgb_stop_locked(p); cxgb_init_locked(p); } @@ -1771,19 +1899,18 @@ cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data) error = cxgb_set_mtu(p, ifr->ifr_mtu); break; case SIOCSIFADDR: - case SIOCGIFADDR: if (ifa->ifa_addr->sa_family == AF_INET) { - PORT_LOCK(p); ifp->if_flags |= IFF_UP; - if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) + if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { + PORT_LOCK(p); cxgb_init_locked(p); + PORT_UNLOCK(p); + } arp_ifinit(ifp, ifa); - PORT_UNLOCK(p); } else error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: - callout_drain(&p->adapter->cxgb_tick_ch); PORT_LOCK(p); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { @@ -1797,12 +1924,13 @@ cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data) } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) cxgb_stop_locked(p); + PORT_UNLOCK(p); + break; + case SIOCADDMULTI: + case SIOCDELMULTI: if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - adapter_t *sc = p->adapter; - callout_reset(&sc->cxgb_tick_ch, hz, - cxgb_tick, sc); + cxgb_set_rxmode(p); } - PORT_UNLOCK(p); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: @@ -1929,7 +2057,7 @@ check_link_status(adapter_t *sc) for (i = 0; i < (sc)->params.nports; ++i) { struct port_info *p = &sc->port[i]; - if (!(p->port_type->caps & SUPPORTED_IRQ)) + if (!(p->phy.caps & SUPPORTED_IRQ)) t3_link_changed(sc, i); p->ifp->if_baudrate = p->link_config.speed * 1000000; } @@ -1940,11 +2068,17 @@ check_t3b2_mac(struct adapter *adapter) { int i; + if(adapter->flags & CXGB_SHUTDOWN) + return; + for_each_port(adapter, i) { struct port_info *p = &adapter->port[i]; struct ifnet *ifp = p->ifp; int status; - + + if(adapter->flags & CXGB_SHUTDOWN) + return; + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) continue; @@ -1974,26 +2108,12 @@ static void cxgb_tick(void *arg) { adapter_t *sc = (adapter_t *)arg; - int i, running = 0; - - for_each_port(sc, i) { - - struct port_info *p = &sc->port[i]; - struct ifnet *ifp = p->ifp; - PORT_LOCK(p); - if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) - running = 1; - PORT_UNLOCK(p); - } - - if (running == 0) + if(sc->flags & CXGB_SHUTDOWN) return; - + taskqueue_enqueue(sc->tq, &sc->tick_task); - - if (sc->open_device_map != 0) - callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc); + callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc); } static void @@ -2002,17 +2122,20 @@ cxgb_tick_handler(void *arg, int count) adapter_t *sc = (adapter_t *)arg; const struct adapter_params *p = &sc->params; + if(sc->flags & CXGB_SHUTDOWN) + return; + ADAPTER_LOCK(sc); if (p->linkpoll_period) check_link_status(sc); /* - * adapter lock can currently only be acquire after the + * adapter lock can currently only be acquired after the * port lock */ ADAPTER_UNLOCK(sc); - if (p->rev == T3_REV_B2 && p->nports < 4) + if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map) check_t3b2_mac(sc); } @@ -2180,7 +2303,7 @@ cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, } case CHELSIO_GET_SGE_CONTEXT: { struct ch_cntxt *ecntxt = (struct ch_cntxt *)data; - mtx_lock(&sc->sge.reg_lock); + mtx_lock_spin(&sc->sge.reg_lock); switch (ecntxt->cntxt_type) { case CNTXT_TYPE_EGRESS: error = t3_sge_read_ecntxt(sc, ecntxt->cntxt_id, @@ -2202,7 +2325,7 @@ cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, error = EINVAL; break; } - mtx_unlock(&sc->sge.reg_lock); + mtx_unlock_spin(&sc->sge.reg_lock); break; } case CHELSIO_GET_SGE_DESC: { @@ -2220,7 +2343,8 @@ cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, case CHELSIO_SET_QSET_PARAMS: { struct qset_params *q; struct ch_qset_params *t = (struct ch_qset_params *)data; - + int i; + if (t->qset_idx >= SGE_QSETS) return (EINVAL); if (!in_range(t->intr_lat, 0, M_NEWTIMER) || @@ -2236,6 +2360,18 @@ cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, MAX_RX_JUMBO_BUFFERS) || !in_range(t->rspq_size, MIN_RSPQ_ENTRIES, MAX_RSPQ_ENTRIES)) return (EINVAL); + + if ((sc->flags & FULL_INIT_DONE) && t->lro > 0) + for_each_port(sc, i) { + pi = adap2pinfo(sc, i); + if (t->qset_idx >= pi->first_qset && + t->qset_idx < pi->first_qset + pi->nqsets +#if 0 + && !pi->rx_csum_offload +#endif + ) + return -EINVAL; + } if ((sc->flags & FULL_INIT_DONE) && (t->rspq_size >= 0 || t->fl_size[0] >= 0 || t->fl_size[1] >= 0 || t->txq_size[0] >= 0 || diff --git a/sys/dev/cxgb/cxgb_multiq.c b/sys/dev/cxgb/cxgb_multiq.c index b1c402c..09e9a1a 100644 --- a/sys/dev/cxgb/cxgb_multiq.c +++ b/sys/dev/cxgb/cxgb_multiq.c @@ -422,13 +422,16 @@ cxgb_pcpu_start_(struct sge_qset *qs, struct mbuf *immpkt, int tx_flush) txq = &qs->txq[TXQ_ETH]; mtx_assert(&txq->lock, MA_OWNED); - KASSERT(qs->idx == 0, ("invalid qs %d", qs->idx)); retry: if (!pi->link_config.link_ok) initerr = ENXIO; else if (qs->qs_flags & QS_EXITING) initerr = ENXIO; + else if ((pi->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + initerr = ENXIO; + else if ((pi->ifp->if_flags & IFF_UP) == 0) + initerr = ENXIO; else if (immpkt) { if (!buf_ring_empty(&txq->txq_mr)) @@ -690,15 +693,15 @@ cxgb_pcpu_shutdown_threads(struct adapter *sc) int i, j; int nqsets; + for (i = 0; i < sc->params.nports; i++) { + struct port_info *pi = &sc->port[i]; + int first = pi->first_qset; + #ifdef IFNET_MULTIQUEUE nqsets = pi->nqsets; #else nqsets = 1; #endif - - for (i = 0; i < sc->params.nports; i++) { - struct port_info *pi = &sc->port[i]; - int first = pi->first_qset; for (j = 0; j < nqsets; j++) { struct sge_qset *qs = &sc->sge.qs[first + j]; diff --git a/sys/dev/cxgb/cxgb_offload.c b/sys/dev/cxgb/cxgb_offload.c index 3ce1a11..2c79b2c 100644 --- a/sys/dev/cxgb/cxgb_offload.c +++ b/sys/dev/cxgb/cxgb_offload.c @@ -105,16 +105,12 @@ cxgb_register_client(struct cxgb_client *client) TAILQ_INSERT_TAIL(&client_list, client, client_entry); if (client->add) { - printf("client->add set\n"); - TAILQ_FOREACH(tdev, &ofld_dev_list, entry) { if (offload_activated(tdev)) { - printf("calling add=%p on %p\n", - client->add, tdev); - client->add(tdev); } else - printf("%p not activated\n", tdev); + CTR1(KTR_CXGB, + "cxgb_register_client: %p not activated", tdev); } } @@ -270,11 +266,10 @@ cxgb_ulp_iscsi_ctl(adapter_t *adapter, unsigned int req, void *data) t3_read_reg(adapter, A_PM1_TX_CFG) >> 17); /* on rx, the iscsi pdu has to be < rx page size and the whole pdu + cpl headers has to fit into one sge buffer */ - uiip->max_rxsz = - (unsigned int)min(adapter->params.tp.rx_pg_size, - (adapter->sge.qs[0].fl[1].buf_size - - sizeof(struct cpl_rx_data) * 2 - - sizeof(struct cpl_rx_data_ddp)) ); + /* also check the max rx data length programmed in TP */ + uiip->max_rxsz = min(uiip->max_rxsz, + ((t3_read_reg(adapter, A_TP_PARA_REG2)) + >> S_MAXRXDATA) & M_MAXRXDATA); break; case ULP_ISCSI_SET_PARAMS: t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask); @@ -297,25 +292,24 @@ cxgb_rdma_ctl(adapter_t *adapter, unsigned int req, void *data) case RDMA_GET_PARAMS: { struct rdma_info *req = data; - req->udbell_physbase = rman_get_start(adapter->regs_res); - req->udbell_len = rman_get_size(adapter->regs_res); + req->udbell_physbase = rman_get_start(adapter->udbs_res); + req->udbell_len = rman_get_size(adapter->udbs_res); req->tpt_base = t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT); req->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); req->pbl_base = t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT); req->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); req->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); req->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); - req->kdb_addr = (void *)(rman_get_start(adapter->regs_res) + A_SG_KDOORBELL); - break; + req->kdb_addr = (void *)((unsigned long)rman_get_virtual(adapter->regs_res) + A_SG_KDOORBELL); break; } case RDMA_CQ_OP: { struct rdma_cq_op *req = data; /* may be called in any context */ - mtx_lock(&adapter->sge.reg_lock); + mtx_lock_spin(&adapter->sge.reg_lock); ret = t3_sge_cqcntxt_op(adapter, req->id, req->op, req->credits); - mtx_unlock(&adapter->sge.reg_lock); + mtx_unlock_spin(&adapter->sge.reg_lock); break; } case RDMA_GET_MEM: { @@ -341,28 +335,28 @@ cxgb_rdma_ctl(adapter_t *adapter, unsigned int req, void *data) case RDMA_CQ_SETUP: { struct rdma_cq_setup *req = data; - mtx_lock(&adapter->sge.reg_lock); + mtx_lock_spin(&adapter->sge.reg_lock); ret = t3_sge_init_cqcntxt(adapter, req->id, req->base_addr, req->size, ASYNC_NOTIF_RSPQ, req->ovfl_mode, req->credits, req->credit_thres); - mtx_unlock(&adapter->sge.reg_lock); + mtx_unlock_spin(&adapter->sge.reg_lock); break; } case RDMA_CQ_DISABLE: - mtx_lock(&adapter->sge.reg_lock); + mtx_lock_spin(&adapter->sge.reg_lock); ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data); - mtx_unlock(&adapter->sge.reg_lock); + mtx_unlock_spin(&adapter->sge.reg_lock); break; case RDMA_CTRL_QP_SETUP: { struct rdma_ctrlqp_setup *req = data; - mtx_lock(&adapter->sge.reg_lock); + mtx_lock_spin(&adapter->sge.reg_lock); ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0, SGE_CNTXT_RDMA, ASYNC_NOTIF_RSPQ, req->base_addr, req->size, FW_RI_TID_START, 1, 0); - mtx_unlock(&adapter->sge.reg_lock); + mtx_unlock_spin(&adapter->sge.reg_lock); break; } default: @@ -380,6 +374,8 @@ cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data) struct iff_mac *iffmacp; struct ddp_params *ddpp; struct adap_ports *ports; + struct ofld_page_info *rx_page_info; + struct tp_params *tp = &adapter->params.tp; int port; switch (req) { @@ -444,6 +440,11 @@ cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data) case FAILOVER_CLEAR: t3_failover_clear(adapter); break; + case GET_RX_PAGE_INFO: + rx_page_info = data; + rx_page_info->page_size = tp->rx_pg_size; + rx_page_info->num = tp->rx_num_pgs; + break; case ULP_ISCSI_GET_PARAMS: case ULP_ISCSI_SET_PARAMS: if (!offload_running(adapter)) @@ -472,8 +473,6 @@ cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data) static int rx_offload_blackhole(struct t3cdev *dev, struct mbuf **m, int n) { - CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data 0x%x\n", - n, *mtod(m[0], uint32_t *)); while (n--) m_freem(m[n]); return 0; @@ -629,7 +628,7 @@ cxgb_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid) m = m_get(M_NOWAIT, MT_DATA); if (__predict_true(m != NULL)) { mk_tid_release(m, tid); - printf("sending tid release\n"); + CTR1(KTR_CXGB, "releasing tid=%u", tid); cxgb_ofld_send(tdev, m); t->tid_tab[tid].ctx = NULL; @@ -708,6 +707,19 @@ do_l2t_write_rpl(struct t3cdev *dev, struct mbuf *m) } static int +do_rte_write_rpl(struct t3cdev *dev, struct mbuf *m) +{ + struct cpl_rte_write_rpl *rpl = cplhdr(m); + + if (rpl->status != CPL_ERR_NONE) + log(LOG_ERR, + "Unexpected L2T_WRITE_RPL status %u for entry %u\n", + rpl->status, GET_TID(rpl)); + + return CPL_RET_BUF_DONE; +} + +static int do_act_open_rpl(struct t3cdev *dev, struct mbuf *m) { struct cpl_act_open_rpl *rpl = cplhdr(m); @@ -903,7 +915,7 @@ cxgb_arp_update_event(void *unused, struct rtentry *rt0, uint8_t *enaddr, struct sockaddr *sa) { - if (TOEDEV(rt0->rt_ifp) == NULL) + if (!is_offloading(rt0->rt_ifp)) return; RT_ADDREF(rt0); @@ -918,15 +930,21 @@ static void cxgb_redirect_event(void *unused, int event, struct rtentry *rt0, struct rtentry *rt1, struct sockaddr *sa) { - struct toedev *tdev0, *tdev1; - /* * ignore events on non-offloaded interfaces */ - tdev0 = TOEDEV(rt0->rt_ifp); - tdev1 = TOEDEV(rt1->rt_ifp); - if (tdev0 == NULL && tdev1 == NULL) + if (!is_offloading(rt0->rt_ifp)) return; + + /* + * Cannot redirect to non-offload device. + */ + if (!is_offloading(rt1->rt_ifp)) { + log(LOG_WARNING, "%s: Redirect to non-offload" + "device ignored.\n", __FUNCTION__); + return; + } + /* * avoid LORs by dropping the route lock but keeping a reference * @@ -952,14 +970,15 @@ static int do_bad_cpl(struct t3cdev *dev, struct mbuf *m) { log(LOG_ERR, "%s: received bad CPL command 0x%x\n", dev->name, - *mtod(m, uint32_t *)); + 0xFF & *mtod(m, uint32_t *)); + kdb_backtrace(); return (CPL_RET_BUF_DONE | CPL_RET_BAD_MSG); } /* * Handlers for each CPL opcode */ -static cpl_handler_func cpl_handlers[NUM_CPL_CMDS]; +static cpl_handler_func cpl_handlers[256]; /* * Add a new handler to the CPL dispatch table. A NULL handler may be supplied @@ -1052,7 +1071,7 @@ void cxgb_neigh_update(struct rtentry *rt, uint8_t *enaddr, struct sockaddr *sa) { - if (is_offloading(rt->rt_ifp)) { + if (rt->rt_ifp && is_offloading(rt->rt_ifp) && (rt->rt_ifp->if_flags & IFCAP_TOE)) { struct t3cdev *tdev = T3CDEV(rt->rt_ifp); PANIC_IF(!tdev); @@ -1159,7 +1178,6 @@ cxgb_free_mem(void *addr) free(addr, M_CXGB); } - /* * Allocate and initialize the TID tables. Returns 0 on success. */ @@ -1208,6 +1226,8 @@ init_tid_tabs(struct tid_info *t, unsigned int ntids, static void free_tid_maps(struct tid_info *t) { + mtx_destroy(&t->stid_lock); + mtx_destroy(&t->atid_lock); cxgb_free_mem(t->tid_tab); } @@ -1227,11 +1247,6 @@ remove_adapter(adapter_t *adap) rw_wunlock(&adapter_list_lock); } -/* - * XXX - */ -#define t3_free_l2t(...) - int cxgb_offload_activate(struct adapter *adapter) { @@ -1265,8 +1280,6 @@ cxgb_offload_activate(struct adapter *adapter) device_printf(adapter->dev, "%s: t3_init_l2t failed\n", __FUNCTION__); goto out_free; } - - natids = min(tid_range.num / 2, MAX_ATIDS); err = init_tid_tabs(&t->tid_maps, tid_range.num, natids, stid_range.num, ATID_BASE, stid_range.base); @@ -1295,9 +1308,10 @@ cxgb_offload_activate(struct adapter *adapter) log(LOG_ERR, "Unable to set offload capabilities\n"); #endif } - printf("adding adapter %p\n", adapter); + CTR1(KTR_CXGB, "adding adapter %p", adapter); add_adapter(adapter); device_printf(adapter->dev, "offload started\n"); + adapter->flags |= CXGB_OFLD_INIT; #if 0 printf("failing as test\n"); return (ENOMEM); @@ -1330,6 +1344,7 @@ cxgb_offload_deactivate(struct adapter *adapter) T3C_DATA(tdev) = NULL; t3_free_l2t(L2DATA(tdev)); L2DATA(tdev) = NULL; + mtx_destroy(&t->tid_release_lock); free(t, M_CXGB); } @@ -1353,6 +1368,26 @@ unregister_tdev(struct t3cdev *tdev) mtx_unlock(&cxgb_db_lock); } +static __inline int +adap2type(struct adapter *adapter) +{ + int type = 0; + + switch (adapter->params.rev) { + case T3_REV_A: + type = T3A; + break; + case T3_REV_B: + case T3_REV_B2: + type = T3B; + break; + case T3_REV_C: + type = T3C; + break; + } + return type; +} + void cxgb_adapter_ofld(struct adapter *adapter) { @@ -1361,9 +1396,8 @@ cxgb_adapter_ofld(struct adapter *adapter) cxgb_set_dummy_ops(tdev); tdev->send = t3_offload_tx; tdev->ctl = cxgb_offload_ctl; - tdev->type = adapter->params.rev == 0 ? - T3A : T3B; - + tdev->type = adap2type(adapter); + register_tdev(tdev); #if 0 offload_proc_dev_init(tdev); @@ -1398,10 +1432,11 @@ cxgb_offload_init(void) TAILQ_INIT(&ofld_dev_list); TAILQ_INIT(&adapter_list); - for (i = 0; i < NUM_CPL_CMDS; ++i) + for (i = 0; i < 0x100; ++i) cpl_handlers[i] = do_bad_cpl; t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl); + t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl); t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl); t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl); t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl); @@ -1425,7 +1460,9 @@ cxgb_offload_init(void) t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl); t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl); t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl); - + t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl); + t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl); + EVENTHANDLER_REGISTER(route_arp_update_event, cxgb_arp_update_event, NULL, EVENTHANDLER_PRI_ANY); EVENTHANDLER_REGISTER(route_redirect_event, cxgb_redirect_event, diff --git a/sys/dev/cxgb/cxgb_osdep.h b/sys/dev/cxgb/cxgb_osdep.h index 9a2fb9e..39bc33b 100644 --- a/sys/dev/cxgb/cxgb_osdep.h +++ b/sys/dev/cxgb/cxgb_osdep.h @@ -83,6 +83,9 @@ struct t3_mbuf_hdr { #define m_set_socket(m, a) ((m)->m_pkthdr.header = (a)) #define m_get_socket(m) ((m)->m_pkthdr.header) +#define KTR_CXGB KTR_SPARE2 +void cxgb_log_tcb(struct adapter *sc, unsigned int tid); + #define MT_DONTFREE 128 #if __FreeBSD_version > 700030 @@ -338,13 +341,14 @@ static const int debug_flags = DBG_RX; #define DBG(...) #endif +#include + #define promisc_rx_mode(rm) ((rm)->port->ifp->if_flags & IFF_PROMISC) #define allmulti_rx_mode(rm) ((rm)->port->ifp->if_flags & IFF_ALLMULTI) -#define CH_ERR(adap, fmt, ...)device_printf(adap->dev, fmt, ##__VA_ARGS__); - -#define CH_WARN(adap, fmt, ...) device_printf(adap->dev, fmt, ##__VA_ARGS__) -#define CH_ALERT(adap, fmt, ...) device_printf(adap->dev, fmt, ##__VA_ARGS__) +#define CH_ERR(adap, fmt, ...) log(LOG_ERR, fmt, ##__VA_ARGS__) +#define CH_WARN(adap, fmt, ...) log(LOG_WARNING, fmt, ##__VA_ARGS__) +#define CH_ALERT(adap, fmt, ...) log(LOG_ALERT, fmt, ##__VA_ARGS__) #define t3_os_sleep(x) DELAY((x) * 1000) @@ -370,7 +374,8 @@ static const int debug_flags = DBG_RX; #define MII_CTRL1000 MII_100T2CR #define ADVERTISE_PAUSE_CAP ANAR_FC -#define ADVERTISE_PAUSE_ASYM 0x0800 +#define ADVERTISE_PAUSE_ASYM ANAR_X_PAUSE_ASYM +#define ADVERTISE_PAUSE ANAR_X_PAUSE_SYM #define ADVERTISE_1000HALF ANAR_X_HD #define ADVERTISE_1000FULL ANAR_X_FD #define ADVERTISE_10FULL ANAR_10_FD @@ -378,6 +383,13 @@ static const int debug_flags = DBG_RX; #define ADVERTISE_100FULL ANAR_TX_FD #define ADVERTISE_100HALF ANAR_TX + +#define ADVERTISE_1000XHALF ANAR_X_HD +#define ADVERTISE_1000XFULL ANAR_X_FD +#define ADVERTISE_1000XPSE_ASYM ANAR_X_PAUSE_ASYM +#define ADVERTISE_1000XPAUSE ANAR_X_PAUSE_SYM + + /* Standard PCI Extended Capaibilities definitions */ #define PCI_CAP_ID_VPD 0x03 #define PCI_VPD_ADDR 2 @@ -399,23 +411,26 @@ static const int debug_flags = DBG_RX; #define udelay(x) DELAY(x) #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define le32_to_cpu(x) le32toh(x) +#define le16_to_cpu(x) le16toh(x) #define cpu_to_le32(x) htole32(x) #define swab32(x) bswap32(x) #define simple_strtoul strtoul -typedef uint8_t u8; -typedef uint16_t u16; -typedef uint32_t u32; -typedef uint64_t u64; +#ifndef LINUX_TYPES_DEFINED +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; -typedef uint8_t __u8; -typedef uint16_t __u16; -typedef uint32_t __u32; -typedef uint8_t __be8; -typedef uint16_t __be16; -typedef uint32_t __be32; -typedef uint64_t __be64; +typedef uint8_t __u8; +typedef uint16_t __u16; +typedef uint32_t __u32; +typedef uint8_t __be8; +typedef uint16_t __be16; +typedef uint32_t __be32; +typedef uint64_t __be64; +#endif #if BYTE_ORDER == BIG_ENDIAN diff --git a/sys/dev/cxgb/cxgb_sge.c b/sys/dev/cxgb/cxgb_sge.c index 78b2651..3b1e7a8 100644 --- a/sys/dev/cxgb/cxgb_sge.c +++ b/sys/dev/cxgb/cxgb_sge.c @@ -73,11 +73,16 @@ __FBSDID("$FreeBSD$"); #endif int txq_fills = 0; -static int recycle_enable = 1; +/* + * XXX don't re-enable this until TOE stops assuming + * we have an m_ext + */ +static int recycle_enable = 0; extern int cxgb_txq_buf_ring_size; int cxgb_cached_allocations; int cxgb_cached; -int cxgb_ext_freed; +int cxgb_ext_freed = 0; +int cxgb_ext_inited = 0; extern int cxgb_use_16k_clusters; extern int cxgb_pcpu_cache_enable; @@ -247,7 +252,7 @@ t3_sge_init(adapter_t *adap, struct sge_params *p) ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */ ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL | - F_CQCRDTCTRL | + F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN | V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS | V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING; #if SGE_NUM_GENBITS == 1 @@ -256,7 +261,6 @@ t3_sge_init(adapter_t *adap, struct sge_params *p) if (adap->params.rev > 0) { if (!(adap->flags & (USING_MSIX | USING_MSI))) ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ; - ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL; } t3_write_reg(adap, A_SG_CONTROL, ctrl); t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) | @@ -264,7 +268,8 @@ t3_sge_init(adapter_t *adap, struct sge_params *p) t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10); t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) | V_TIMEOUT(200 * core_ticks_per_usec(adap))); - t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000); + t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, + adap->params.rev < T3_REV_C ? 1000 : 500); t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256); t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000); t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256); @@ -293,13 +298,14 @@ sgl_len(unsigned int n) * Return a packet containing the immediate data of the given response. */ static int -get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m, void *cl, uint32_t flags) +get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m) { - m->m_len = m->m_pkthdr.len = IMMED_PKT_SIZE; + m->m_len = m->m_pkthdr.len = IMMED_PKT_SIZE; + m->m_ext.ext_buf = NULL; + m->m_ext.ext_type = 0; memcpy(mtod(m, uint8_t *), resp->imm_data, IMMED_PKT_SIZE); - return (0); - + return (0); } static __inline u_int @@ -308,14 +314,33 @@ flits_to_desc(u_int n) return (flit_desc_map[n]); } +#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \ + F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ + V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ + F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ + F_HIRCQPARITYERROR) +#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR) +#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \ + F_RSPQDISABLED) + +/** + * t3_sge_err_intr_handler - SGE async event interrupt handler + * @adapter: the adapter + * + * Interrupt handler for SGE asynchronous (non-data) events. + */ void t3_sge_err_intr_handler(adapter_t *adapter) { unsigned int v, status; - status = t3_read_reg(adapter, A_SG_INT_CAUSE); - + if (status & SGE_PARERR) + CH_ALERT(adapter, "SGE parity error (0x%x)\n", + status & SGE_PARERR); + if (status & SGE_FRAMINGERR) + CH_ALERT(adapter, "SGE framing error (0x%x)\n", + status & SGE_FRAMINGERR); if (status & F_RSPQCREDITOVERFOW) CH_ALERT(adapter, "SGE response queue credit overflow\n"); @@ -328,7 +353,7 @@ t3_sge_err_intr_handler(adapter_t *adapter) } t3_write_reg(adapter, A_SG_INT_CAUSE, status); - if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED)) + if (status & SGE_FATALERR) t3_fatal_err(adapter); } @@ -343,8 +368,6 @@ t3_sge_prep(adapter_t *adap, struct sge_params *p) for (i = 0; i < SGE_QSETS; ++i) { struct qset_params *q = p->qset + i; - q->polling = adap->params.rev > 0; - if (adap->params.nports > 2) { q->coalesce_nsecs = 50000; } else { @@ -354,6 +377,7 @@ t3_sge_prep(adapter_t *adap, struct sge_params *p) q->coalesce_nsecs = 5000; #endif } + q->polling = adap->params.rev > 0; q->rspq_size = RSPQ_Q_SIZE; q->fl_size = FL_Q_SIZE; q->jumbo_size = JUMBO_Q_SIZE; @@ -473,7 +497,7 @@ refill_fl(adapter_t *sc, struct sge_fl *q, int n) struct rx_desc *d = &q->desc[q->pidx]; struct refill_fl_cb_arg cb_arg; caddr_t cl; - int err; + int err, count = 0; int header_size = sizeof(struct m_hdr) + sizeof(struct pkthdr) + sizeof(struct m_ext_) + sizeof(uint32_t); cb_arg.error = 0; @@ -527,10 +551,12 @@ refill_fl(adapter_t *sc, struct sge_fl *q, int n) d = q->desc; } q->credits++; + count++; } done: - t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); + if (count) + t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); } @@ -776,14 +802,6 @@ t3_sge_init_port(struct port_info *pi) void t3_sge_deinit_sw(adapter_t *sc) { - int i; - - callout_drain(&sc->sge_timer_ch); - if (sc->tq) - taskqueue_drain(sc->tq, &sc->slow_intr_task); - for (i = 0; i < sc->params.nports; i++) - if (sc->port[i].tq != NULL) - taskqueue_drain(sc->port[i].tq, &sc->port[i].timer_reclaim_task); mi_deinit(); } @@ -909,8 +927,8 @@ txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs) */ txqs->gen = txq->gen; txq->unacked += ndesc; - txqs->compl = (txq->unacked & 8) << (S_WR_COMPL - 3); - txq->unacked &= 7; + txqs->compl = (txq->unacked & 32) << (S_WR_COMPL - 5); + txq->unacked &= 31; txqs->pidx = txq->pidx; txq->pidx += ndesc; #ifdef INVARIANTS @@ -1209,7 +1227,6 @@ t3_encap(struct sge_qset *qs, struct mbuf **m, int count) struct mbuf_iovec *mi; DPRINTF("t3_encap cpu=%d ", curcpu); - KASSERT(qs->idx == 0, ("invalid qs %d", qs->idx)); mi = NULL; pi = qs->port; @@ -1310,6 +1327,7 @@ t3_encap(struct sge_qset *qs, struct mbuf **m, int count) undersized = (((tmpmi->mi_len < TCPPKTHDRSIZE) && (m0->m_flags & M_VLANTAG)) || (tmpmi->mi_len < TCPPKTHDRSIZE - ETHER_VLAN_ENCAP_LEN)); + if (__predict_false(undersized)) { pkthdr = tmp; dump_mi(mi); @@ -1550,7 +1568,6 @@ again: reclaim_completed_tx_imm(q); if (ret == 1) { mtx_unlock(&q->lock); log(LOG_ERR, "no desc available\n"); - return (ENOSPC); } goto again; @@ -1610,6 +1627,7 @@ again: reclaim_completed_tx_imm(q); q->stops++; } mtx_unlock(&q->lock); + wmb(); t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); } @@ -1648,9 +1666,9 @@ t3_free_qset(adapter_t *sc, struct sge_qset *q) } for (i = 0; i < SGE_RXQ_PER_SET; ++i) { if (q->fl[i].desc) { - mtx_lock(&sc->sge.reg_lock); + mtx_lock_spin(&sc->sge.reg_lock); t3_sge_disable_fl(sc, q->fl[i].cntxt_id); - mtx_unlock(&sc->sge.reg_lock); + mtx_unlock_spin(&sc->sge.reg_lock); bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map); bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc, q->fl[i].desc_map); @@ -1665,9 +1683,9 @@ t3_free_qset(adapter_t *sc, struct sge_qset *q) for (i = 0; i < SGE_TXQ_PER_SET; i++) { if (q->txq[i].desc) { - mtx_lock(&sc->sge.reg_lock); + mtx_lock_spin(&sc->sge.reg_lock); t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0); - mtx_unlock(&sc->sge.reg_lock); + mtx_unlock_spin(&sc->sge.reg_lock); bus_dmamap_unload(q->txq[i].desc_tag, q->txq[i].desc_map); bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc, @@ -1682,9 +1700,9 @@ t3_free_qset(adapter_t *sc, struct sge_qset *q) } if (q->rspq.desc) { - mtx_lock(&sc->sge.reg_lock); + mtx_lock_spin(&sc->sge.reg_lock); t3_sge_disable_rspcntxt(sc, q->rspq.cntxt_id); - mtx_unlock(&sc->sge.reg_lock); + mtx_unlock_spin(&sc->sge.reg_lock); bus_dmamap_unload(q->rspq.desc_tag, q->rspq.desc_map); bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc, @@ -1893,7 +1911,7 @@ write_ofld_wr(adapter_t *adap, struct mbuf *m, struct tx_desc *d = &q->desc[pidx]; struct txq_state txqs; - if (immediate(m) && segs == NULL) { + if (immediate(m) && nsegs == 0) { write_imm(d, m, m->m_len, gen); return; } @@ -1927,18 +1945,25 @@ static __inline unsigned int calc_tx_descs_ofld(struct mbuf *m, unsigned int nsegs) { unsigned int flits, cnt = 0; + int ndescs; - - if (m->m_len <= WR_LEN) - return 1; /* packet fits as immediate data */ + if (m->m_len <= WR_LEN && nsegs == 0) + return (1); /* packet fits as immediate data */ if (m->m_flags & M_IOVEC) cnt = mtomv(m)->mv_count; + else + cnt = nsegs; /* headers */ - flits = ((uint8_t *)m->m_pkthdr.header - mtod(m, uint8_t *)) / 8; + flits = m->m_len / 8; - return flits_to_desc(flits + sgl_len(cnt)); + ndescs = flits_to_desc(flits + sgl_len(cnt)); + + CTR4(KTR_CXGB, "flits=%d sgl_len=%d nsegs=%d ndescs=%d", + flits, sgl_len(cnt), nsegs, ndescs); + + return (ndescs); } /** @@ -1998,7 +2023,6 @@ again: reclaim_completed_tx_(q, 16); write_ofld_wr(adap, m, q, pidx, gen, ndesc, segs, nsegs); check_ring_tx_db(adap, q); - return (0); } @@ -2058,6 +2082,7 @@ again: cleaned = reclaim_completed_tx_(q, 16); set_bit(TXQ_RUNNING, &q->flags); set_bit(TXQ_LAST_PKT_DB, &q->flags); #endif + wmb(); t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); } @@ -2300,7 +2325,7 @@ t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx, #endif q->lro.enabled = lro_default; - mtx_lock(&sc->sge.reg_lock); + mtx_lock_spin(&sc->sge.reg_lock); ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx, q->rspq.phys_addr, q->rspq.size, q->fl[0].buf_size, 1, 0); @@ -2356,7 +2381,7 @@ t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx, device_get_unit(sc->dev), irq_vec_idx); MTX_INIT(&q->rspq.lock, q->rspq.lockbuf, NULL, MTX_DEF); - mtx_unlock(&sc->sge.reg_lock); + mtx_unlock_spin(&sc->sge.reg_lock); t3_update_qset_coalesce(q, p); q->port = pi; @@ -2370,7 +2395,7 @@ t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx, return (0); err_unlock: - mtx_unlock(&sc->sge.reg_lock); + mtx_unlock_spin(&sc->sge.reg_lock); err: t3_free_qset(sc, q); @@ -2419,17 +2444,17 @@ t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad) } static void -ext_free_handler(void *cl, void * arg) +ext_free_handler(void *arg1, void * arg2) { - uintptr_t type = (uintptr_t)arg; + uintptr_t type = (uintptr_t)arg2; uma_zone_t zone; struct mbuf *m; - m = cl; + m = arg1; zone = m_getzonefromtype(type); m->m_ext.ext_type = (int)type; cxgb_ext_freed++; - cxgb_cache_put(zone, cl); + cxgb_cache_put(zone, m); } static void @@ -2443,7 +2468,8 @@ init_cluster_mbuf(caddr_t cl, int flags, int type, uma_zone_t zone) bzero(cl, header_size); m = (struct mbuf *)cl; - + + cxgb_ext_inited++; SLIST_INIT(&m->m_pkthdr.tags); m->m_type = MT_DATA; m->m_flags = flags | M_NOFREE | M_EXT; @@ -2721,9 +2747,30 @@ process_responses(adapter_t *adap, struct sge_qset *qs, int budget) eth = (r->rss_hdr.opcode == CPL_RX_PKT); if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) { - /* XXX */ - printf("async notification\n"); + struct mbuf *m; + + if (cxgb_debug) + printf("async notification\n"); + if (rspq->rspq_mh.mh_head == NULL) { + rspq->rspq_mh.mh_head = m_gethdr(M_DONTWAIT, MT_DATA); + m = rspq->rspq_mh.mh_head; + } else { + m = m_gethdr(M_DONTWAIT, MT_DATA); + } + + /* XXX m is lost here if rspq->rspq_mbuf is not NULL */ + + if (m == NULL) + goto no_mem; + + memcpy(mtod(m, char *), r, AN_PKT_SIZE); + m->m_len = m->m_pkthdr.len = AN_PKT_SIZE; + *mtod(m, char *) = CPL_ASYNC_NOTIF; + rss_csum = htonl(CPL_ASYNC_NOTIF << 24); + eop = 1; + rspq->async_notif++; + goto skip; } else if (flags & F_RSPD_IMM_DATA_VALID) { struct mbuf *m = NULL; @@ -2734,35 +2781,32 @@ process_responses(adapter_t *adap, struct sge_qset *qs, int budget) else m = m_gethdr(M_DONTWAIT, MT_DATA); - /* - * XXX revisit me - */ - if (rspq->rspq_mh.mh_head == NULL && m == NULL) { + if (rspq->rspq_mh.mh_head == NULL && m == NULL) { + no_mem: rspq->next_holdoff = NOMEM_INTR_DELAY; budget_left--; break; } - get_imm_packet(adap, r, rspq->rspq_mh.mh_head, m, flags); - + get_imm_packet(adap, r, rspq->rspq_mh.mh_head); eop = 1; rspq->imm_data++; - } else if (r->len_cq) { + } else if (r->len_cq) { int drop_thresh = eth ? SGE_RX_DROP_THRES : 0; #ifdef DISABLE_MBUF_IOVEC eop = get_packet(adap, drop_thresh, qs, &rspq->rspq_mh, r); #else eop = get_packet(adap, drop_thresh, qs, &rspq->rspq_mbuf, r); +#endif #ifdef IFNET_MULTIQUEUE - rspq->rspq_mbuf->m_pkthdr.rss_hash = rss_hash; + rspq->rspq_mh.mh_head->m_pkthdr.rss_hash = rss_hash; #endif -#endif ethpad = 2; } else { DPRINTF("pure response\n"); rspq->pure_rsps++; } - + skip: if (flags & RSPD_CTRL_MASK) { sleeping |= flags & RSPD_GTS_MASK; handle_rsp_cntrl_info(qs, flags); @@ -2787,7 +2831,8 @@ process_responses(adapter_t *adap, struct sge_qset *qs, int budget) * XXX size mismatch */ m_set_priority(rspq->rspq_mh.mh_head, rss_hash); - + + ngathered = rx_offload(&adap->tdev, rspq, rspq->rspq_mh.mh_head, offload_mbufs, ngathered); rspq->rspq_mh.mh_head = NULL; @@ -2988,12 +3033,8 @@ retry_sbufops: return (err); } - -/* - * broken by recent mbuf changes - */ static int -t3_dump_txq(SYSCTL_HANDLER_ARGS) +t3_dump_txq_eth(SYSCTL_HANDLER_ARGS) { struct sge_txq *txq; struct sge_qset *qs; @@ -3022,7 +3063,7 @@ t3_dump_txq(SYSCTL_HANDLER_ARGS) txq->txq_dump_start = 0; return (EINVAL); } - err = t3_sge_read_ecntxt(qs->port->adapter, txq->cntxt_id, data); + err = t3_sge_read_ecntxt(qs->port->adapter, qs->rspq.cntxt_id, data); if (err) return (err); @@ -3066,6 +3107,67 @@ retry_sbufops: return (err); } +static int +t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS) +{ + struct sge_txq *txq; + struct sge_qset *qs; + int i, j, err, dump_end; + static int multiplier = 1; + struct sbuf *sb; + struct tx_desc *txd; + uint32_t *WR, wr_hi, wr_lo, gen; + + txq = arg1; + qs = txq_to_qset(txq, TXQ_CTRL); + if (txq->txq_dump_count == 0) { + return (0); + } + if (txq->txq_dump_count > 256) { + log(LOG_WARNING, + "dump count is too large %d\n", txq->txq_dump_count); + txq->txq_dump_count = 1; + return (EINVAL); + } + if (txq->txq_dump_start > 255) { + log(LOG_WARNING, + "dump start of %d is greater than queue size\n", + txq->txq_dump_start); + txq->txq_dump_start = 0; + return (EINVAL); + } + +retry_sbufops: + sb = sbuf_new(NULL, NULL, QDUMP_SBUF_SIZE*multiplier, SBUF_FIXEDLEN); + sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx, + txq->txq_dump_start, + (txq->txq_dump_start + txq->txq_dump_count) & 255); + + dump_end = txq->txq_dump_start + txq->txq_dump_count; + for (i = txq->txq_dump_start; i < dump_end; i++) { + txd = &txq->desc[i & (255)]; + WR = (uint32_t *)txd->flit; + wr_hi = ntohl(WR[0]); + wr_lo = ntohl(WR[1]); + gen = G_WR_GEN(wr_lo); + + sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n", + wr_hi, wr_lo, gen); + for (j = 2; j < 30; j += 4) + sbuf_printf(sb, "\t%08x %08x %08x %08x \n", + WR[j], WR[j + 1], WR[j + 2], WR[j + 3]); + + } + if (sbuf_overflowed(sb)) { + sbuf_delete(sb); + multiplier++; + goto retry_sbufops; + } + sbuf_finish(sb); + err = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); + sbuf_delete(sb); + return (err); +} static int t3_lro_enable(SYSCTL_HANDLER_ARGS) @@ -3162,7 +3264,10 @@ t3_add_attach_sysctls(adapter_t *sc) CTLTYPE_INT|CTLFLAG_RW, sc, 0, t3_lro_enable, "I", "enable large receive offload"); - + SYSCTL_ADD_INT(ctx, children, OID_AUTO, + "hw_revision", + CTLFLAG_RD, &sc->params.rev, + 0, "chip model"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "enable_debug", CTLFLAG_RW, &cxgb_debug, @@ -3191,6 +3296,10 @@ t3_add_attach_sysctls(adapter_t *sc) CTLFLAG_RD, &cxgb_ext_freed, 0, "#times a cluster was freed through ext_free"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, + "ext_inited", + CTLFLAG_RD, &cxgb_ext_inited, + 0, "#times a cluster was initialized for ext_free"); + SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mbufs_outstanding", CTLFLAG_RD, &cxgb_mbufs_outstanding, 0, "#mbufs in flight in the driver"); @@ -3240,8 +3349,8 @@ t3_add_configured_sysctls(adapter_t *sc) for (j = 0; j < pi->nqsets; j++) { struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j]; - struct sysctl_oid *qspoid, *rspqpoid, *txqpoid; - struct sysctl_oid_list *qspoidlist, *rspqpoidlist, *txqpoidlist; + struct sysctl_oid *qspoid, *rspqpoid, *txqpoid, *ctrlqpoid; + struct sysctl_oid_list *qspoidlist, *rspqpoidlist, *txqpoidlist, *ctrlqpoidlist; struct sge_txq *txq = &qs->txq[TXQ_ETH]; snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j); @@ -3258,8 +3367,10 @@ t3_add_configured_sysctls(adapter_t *sc) txq_names[0], CTLFLAG_RD, NULL, "txq statistics"); txqpoidlist = SYSCTL_CHILDREN(txqpoid); - - + ctrlqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO, + txq_names[2], CTLFLAG_RD, NULL, "ctrlq statistics"); + ctrlqpoidlist = SYSCTL_CHILDREN(ctrlqpoid); + SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "size", CTLFLAG_RD, &qs->rspq.size, 0, "#entries in response queue"); @@ -3282,8 +3393,7 @@ t3_add_configured_sysctls(adapter_t *sc) CTLTYPE_STRING | CTLFLAG_RD, &qs->rspq, 0, t3_dump_rspq, "A", "dump of the response queue"); - - + SYSCTL_ADD_INT(ctx, txqpoidlist, OID_AUTO, "dropped", CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_drops, 0, "#tunneled packets dropped"); @@ -3340,7 +3450,22 @@ t3_add_configured_sysctls(adapter_t *sc) 0, "txq #entries to dump"); SYSCTL_ADD_PROC(ctx, txqpoidlist, OID_AUTO, "qdump", CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_ETH], - 0, t3_dump_txq, "A", "dump of the transmit queue"); + 0, t3_dump_txq_eth, "A", "dump of the transmit queue"); + + SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_start", + CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_start, + 0, "ctrlq start idx for dump"); + SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_count", + CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_count, + 0, "ctrl #entries to dump"); + SYSCTL_ADD_PROC(ctx, ctrlqpoidlist, OID_AUTO, "qdump", + CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_CTRL], + 0, t3_dump_txq_ctrl, "A", "dump of the transmit queue"); + + + + + } } } diff --git a/sys/dev/cxgb/sys/cxgb_support.c b/sys/dev/cxgb/sys/cxgb_support.c index 87b7e0b..e911dfc 100644 --- a/sys/dev/cxgb/sys/cxgb_support.c +++ b/sys/dev/cxgb/sys/cxgb_support.c @@ -331,3 +331,10 @@ buf_ring_alloc(int count, int flags) return (br); } + +void +buf_ring_free(struct buf_ring *br) +{ + free(br->br_ring, M_DEVBUF); + free(br, M_DEVBUF); +} diff --git a/sys/dev/cxgb/sys/mbufq.h b/sys/dev/cxgb/sys/mbufq.h index 0d6c604..81a4d39 100644 --- a/sys/dev/cxgb/sys/mbufq.h +++ b/sys/dev/cxgb/sys/mbufq.h @@ -103,7 +103,7 @@ mbufq_dequeue(struct mbuf_head *l) } static __inline struct mbuf * -mbufq_peek(struct mbuf_head *l) +mbufq_peek(const struct mbuf_head *l) { return (l->head); } diff --git a/sys/dev/cxgb/sys/mvec.h b/sys/dev/cxgb/sys/mvec.h index f314de7..09dbf12 100644 --- a/sys/dev/cxgb/sys/mvec.h +++ b/sys/dev/cxgb/sys/mvec.h @@ -31,6 +31,7 @@ #ifndef _MVEC_H_ #define _MVEC_H_ +#include int cxgb_cache_init(void); @@ -48,8 +49,10 @@ extern int cxgb_ext_freed; extern int cxgb_mbufs_outstanding; extern int cxgb_pack_outstanding; -#define mtomv(m) ((struct mbuf_vec *)((m)->m_pktdat)) -#define M_IOVEC 0x100000 /* mbuf immediate data area is used for cluster ptrs */ +#define mtomv(m) ((struct mbuf_vec *)((m)->m_pktdat)) +#define M_IOVEC 0x100000 /* mbuf immediate data area is used for cluster ptrs */ +#define M_DDP 0x200000 /* direct data placement mbuf */ +#define EXT_PHYS 10 /* physical/bus address */ /* * duplication from mbuf.h - can't use directly because @@ -59,7 +62,8 @@ struct m_ext_ { caddr_t ext_buf; /* start of buffer */ void (*ext_free) /* free routine if not the usual */ (void *, void *); - void *ext_args; /* optional argument pointer */ + void *ext_arg1; /* optional argument pointer */ + void *ext_arg2; /* optional argument pointer */ u_int ext_size; /* size of buffer, for ext_free */ volatile u_int *ref_cnt; /* pointer to ref count info */ int ext_type; /* type of external storage */ @@ -72,6 +76,11 @@ struct m_ext_ { #define EXT_CLIOVEC 9 #define EXT_JMPIOVEC 10 +#define m_cur_offset m_ext.ext_size /* override to provide ddp offset */ +#define m_seq m_pkthdr.csum_data /* stored sequence */ +#define m_ddp_gl m_ext.ext_buf /* ddp list */ +#define m_ddp_flags m_pkthdr.csum_flags /* ddp flags */ +#define m_ulp_mode m_pkthdr.tso_segsz /* upper level protocol */ extern uma_zone_t zone_miovec; @@ -181,12 +190,22 @@ static __inline int busdma_map_sgl(bus_dma_segment_t *vsegs, bus_dma_segment_t * } struct mbuf *mi_collapse_mbuf(struct mbuf_iovec *mi, struct mbuf *m); -struct mbuf *mi_collapse_sge(struct mbuf_iovec *mi, bus_dma_segment_t *seg); void *mcl_alloc(int seg_count, int *type); void mb_free_ext_fast(struct mbuf_iovec *mi, int type, int idx); static __inline void +mi_collapse_sge(struct mbuf_iovec *mi, bus_dma_segment_t *seg) +{ + mi->mi_flags = 0; + mi->mi_base = (caddr_t)seg->ds_addr; + mi->mi_len = seg->ds_len; + mi->mi_size = 0; + mi->mi_type = EXT_PHYS; + mi->mi_refcnt = NULL; +} + +static __inline void m_free_iovec(struct mbuf *m, int type) { int i; @@ -279,9 +298,11 @@ m_getzonefromtype(int type) case EXT_JUMBO16: zone = zone_jumbo16; break; +#ifdef PACKET_ZONE case EXT_PACKET: zone = zone_pack; break; +#endif default: panic("%s: invalid cluster type %d", __func__, type); } diff --git a/sys/dev/cxgb/sys/uipc_mvec.c b/sys/dev/cxgb/sys/uipc_mvec.c index 52df9fc..0e0ab51 100644 --- a/sys/dev/cxgb/sys/uipc_mvec.c +++ b/sys/dev/cxgb/sys/uipc_mvec.c @@ -412,8 +412,8 @@ mb_free_ext_fast(struct mbuf_iovec *mi, int type, int idx) case EXT_EXTREF: KASSERT(mi->mi_ext.ext_free != NULL, ("%s: ext_free not set", __func__)); - (*(mi->mi_ext.ext_free))(mi->mi_ext.ext_buf, - mi->mi_ext.ext_args); + (*(mi->mi_ext.ext_free))(mi->mi_ext.ext_arg1, + mi->mi_ext.ext_arg2); break; default: dump_mi(mi); diff --git a/sys/dev/cxgb/t3cdev.h b/sys/dev/cxgb/t3cdev.h index 67db552..714557b 100644 --- a/sys/dev/cxgb/t3cdev.h +++ b/sys/dev/cxgb/t3cdev.h @@ -38,7 +38,8 @@ struct cxgb3_client; enum t3ctype { T3A = 0, - T3B + T3B, + T3C }; struct t3cdev { diff --git a/sys/dev/cxgb/t3fw-4.7.0.bin.gz.uu b/sys/dev/cxgb/t3fw-4.7.0.bin.gz.uu deleted file mode 100644 index 2959b8d..0000000 --- a/sys/dev/cxgb/t3fw-4.7.0.bin.gz.uu +++ /dev/null @@ -1,451 +0,0 @@ -/************************************************************************** - -Copyright (c) 2007, Chelsio Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Neither the name of the Chelsio Corporation nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -$FreeBSD$ - -***************************************************************************/ - -begin 755 t3fw-4.7.0.bin.gz -M'XL("-[;X48``W0S9G5A3U]HWO+,3R+1#$@@0YDT( -M86>.VBH>M8V*5:OG-$XM+1U2DU`Z.-2J.!M&PXR"2D`@H'7`*2AU*+9=H`O -MM;9&9#&"TZTP+$001``#CEQ#T$R$B5!7&T)#$#?B"0X811(2$O0W$%H&CL*4 -M-#<,I""*;$>B2(#<0!AC48^$&9DX,LL`KZWP.AM'9F+PF@BO*?#Z+H[0G0!! -M>H]_P-G7IS-(#\&D.1 -M$MP3/3H4/0*&E>#I,'(X@#Q,/3@<2!X,(@\!W47<)YEF1,*0YW^,D3ZP"*J: -M2X:)F)-%/B,P)F/BTF&WX(LEH[W%[P@DMU)9C8@O\B()XQ7X?L!3B1JJ9VJ\ -MC2W1-R0LFOKS\Y979.@]/_YD!>PA$5G^'U-MI!)L/(-GX)G169)CDL5C!-F& -MACF;=`TC%H]X9")OP8I:7G5;/G#YDZN`CABE)T8E$*.F$*.FO^@+3TU7@.%T -MC8^G4LL"R+02'`AI4XXX,4&G]:A -MN'@H+AZ*]WKCS=.]\69#%'D^.SIGOC[SK^J_&`?;:,"EX,GF6E\!EA -M(/@?][!??Q^$Z9H8")\D]3"O#PQ6!`GI)@>1[KW?_C0>WK],R22;7:A'.@O3 -M><8`85L]A3VATD?"]T8QO*4A5MH$;_H"*'/`VFH48$(1.DM^1<.YT -M(4@0;"!L"#D`WY&_?NFI![@419Z8`>7!:G!Q7$O"=I^8!&$))B%)!%$AY"#Y -MA*H-E(7"_C)2-27)'OB._(F&"*&,$Q_"4$R2/U'/?C-21?.?P.??"4CR5ZKL -M\_#Y,X3V6SML]VP8!MRP_V`?&ZF:0YE2^"P1]H7JPTF*Q*PW(8BF'83K7A;5@W/JZ% -MY>@0*VR7IT$(C7RB-!Q`8&#!<@1*0SU,?P"&KZG!$2H-WQ!*PS%X_XW##;L1 -MRB>BGB$L>!T+@\$%_^#5#.M6#,/]_Z3OM/\2"?[/^,,%B/Y%$,&^@Q/P17C^ -M['D0(P`/IR$O`D[3>YX]#]2S?[^GTOP1C/\2I#0'^:>__U@_Y'48ICSQ7CUQ -MW1_A-?-0@-@T"?*J!(A%XP4X\NJQ/_)/A<^FG/!>J3"9]?\.K\WV7B="S-*? -M@OD_^R,_'6*9"G(T.L2Z,3!.OX\C+\?#*\3?T;#,5Z?@R#!XU4*,5/;AB`)B -MH`S>XPXC5<7,B&)Z=3%C6;'/\F+?%<7,E?_#ZG1YRG>5HMVE])Y2!J/4QZ?4 -MU[>4R2QEL4K9[%(.IY3[W]6E"V8N1KN+N;#\GF)&0['/5\6^AXJ9AXM91XK9 -M1XLY7Q=S&[UUH?@(]"N,$(`Y5%;*UT`ZA@G(0<\M==^*=/[Y?B:&G/^7^Q3D -MQS_N/?(XB/"GD("?0NBW*.(:]),X^"=Q)S_U4]]B8T-RJNG)[(`$O@@F?8H@ -MGPD8`#_'_^^N2DDD(8D`#E`JE_HJ8M&5.;2KCM-'`WYBJ)Q,PHE

DV#I$^ -M'9SC6I&K#LO^VYZU&OB4;!P)+;_)R7UF[FY+Z\=K^/-A;E#Z)^W&F6 -M^C6B_H>A_0\W"]2U5>#,ZN.HD98EGR!+*6;:<+8-LZ2ERFHKNXSF#Q%=:G$HFRP95D)3/I&2/\-]E&.?,/6/S*,B5N4)>[O,7\^/&\8-T?$LM,6 -MK)?)HQG6DCE*60M8IS7@(GUFY^ -M*:R_W?;REL]9/'W%0KY+F(=7ID;TMU8OYC6B%8M`1^62L/XS+J!<2E^[CD6\ -MCK#Z4"(67;.9D3=2&2M9O9E1.$H3&[UJ\TC-6!S4`ZOOD7@6:X+9FA[6_TU( -M_S?R0VL0MPTQYO_Y5U-;J4VHU=P7:[97YNA2-[\RR_2^-B$K`?6#M9K;E`]CI>;3%T/Z=Q(XAA4* -M=]GJ^E77L[29;5@75W4KJ^U=:DC`.E#L=S1^N0TUDM;UW!K^JZ%B9B -M;U*OUPFNX(F\9,IQ`D;38-85'!Q*Y,TUD1<0A6IQGB"1-Z/#<8Z_Z3@H">LO -ML@DK`,LF9-O0LB98>R5L@S19>4RLW%YI&MSOF0>^%_"0_F7A_2N(#U'__E06 -M:YK%FI2KXWPRX^.MLY22JOE'QB@VR%(%X,=XZB:MZ8`ALVX*B=059O5!ED.#[EBI.KWEU/O&&F1\":1 -M-\HR(Q"V1U^@NV+DS9N1B)WH=)AKT,P:,(/!F&PB/T>`,W>8]D,&YY^&^4^" -MP<9+3DY9O,*87%_'X4Q6F)-7Y*%&Y,(60:708CBL&"OH!F`R>`BLV!/O()9& -M]4\/Z9\N?[SFD\\Q,%!/(@X;XLY/??H(.')T12/8-KW9.M,V'%O#!0D1_1-+ -MKTD_9%2ZX?@5?%IQL_IVV:VH_E>\8\=WH7#0&M8V%51>IT:O-[)_M-J(89>$ -M^URUZ5))EO)\&X/D:F595)^']`\GC-3`UO;OLBDSV^#8=CB\0PN'U;\+I"XI#:[IQ^G#4[P8Z;X4/@ML077YZ&BQM@/U&_%IP -MW41V(<"`%F);^_W&^8-AZ,OQ/CX3SKX)\YT<6/U/?BY1O;(^*6@V40>`\7HU/CEH,=$7H9S+0[W+16D+/,A -M5CVUI%699ZPEHE$69YJE)D/\X)(RO2KTP84%1RB$4=3"*/T^*8Z$/VK"_"G>Z*H7JCFS.ZQP/)'V@J-U:)$QQ,/-P3X8WM#]G -MF1].A>4IHADIJ5'9PWP?&^;O#`8_`J<*8LO5^/J6/T'+XI`'+3:U+#HB[,'? -M62GLBH>4Q@QH=`+Q@V:L7^"W#V7UHU4_%[Y\9<#L3NC27=:MO1V@&BLX]D]B -M>%#!Z++'E;_FC6H87?'$3RSJY,_+B28V!.6=+MM>L<-V,N_[LIW$AN"*NK)= -MMK.J5KQLMV*/^(@=O%2?[.O[QJ>]N%\?FV_C^+V"K;WM`XKMMF,VB`"A_O/J -M?.TV>QZ1+PY[4`O:Q0]*(A_8%='A,8\#]OV->.PO?E"<@_->N>FH>J(*C8!I6#QF!ZVQ_2H.;0AVW.#$NH%*8R@ -M%";BP3N;2L#!\`<6UC[AEF)N/PI`U7K5^5IE>J7*BU/9$*?FPCQ7=2I_)(F< -MT.D`,X`5N(!;$XM6"DS/`*4++#GNFU+F0VQY:DD_)H?6K([-GF8I[H06*>C! -M!.R8<'NKICF[J6--)VI$YX4\>/6%22K`V2[,TO&VJCG+W#Q+,[4$/2KYU8V'3-WC-9$XWY.8WD`@8\]22+,5VC<5^J_9=X; -MS6UD\[X5:NZ'^_F-Y_?3Z_OAU:^1JSP6?M&Y9EVX)CIVY;V@N-#?_;[#F`^- -M"/L5&O.5^F7L=:.YJ9P9C -MH?DTVOY2]OVZ,9M_R!Z]+GA36];+V4&Q+8+,D5F! -MTA9AWJA,D;+%'\[*]BF4P9Y(Z`2%(XJ&0T"(N/]W1IU0W2("Y]4M@05*;4NH -MIB5,V1+"KM+[]@O]ZE".#8V\_XV"(X`1/Q>J;@DB6L0*CE_(_:.4\2Z"QGL[ -MJ?DY"\*\WSZNYG'6J;5YLESYCL8*&;<.XW"F*&)CVV93`P3--7>%:7"R1Y>X -MRTR#XQ!$'413!:&H`KVZ,-'GOF6?(C<@W[]`:!I$AE0$ZAKW`TI%JCM#[E>" -M#'L[ZXX0&@7[N9#[!4II,C&W4EE:"Z&!2(8*$O6O7'%&P/W*L/OV\/OVH/MV -MC!"*[MM#[JB&@C58MAEL4%,(O^I.7UT2I^/_/Y3RX8H133*X4[[OB%?)[K_Z>*3 -M4G5TS.EN#8$@G*+&?'$B1PYPR38Z53-B0E7D_0\6V*@YKK@. -MP9VEE"`>J/Y1,:'27--6+@J^_TG4_<3\CYING3Z3FTPLI)W>NB)#J,"QI@/Y -M>*+/S1\=/VPSHKX(U&K(/+91S&/YO7G\;SF:ZUM3W(PB?,W3#V)KL\PSOE1& -M:].Y"[#%A088&46-IU21^.)Z%L%LORX\A!>*[;B^+@]26MY42L4(24)%4_L[F.07)J[^`=*,L)]A2_)H140"#B:R\ -M",WUKBMX7DBBSVK+?IZ);("8N'D.N`8&0'K9V]5)5>^8R!,>+!4GLOM`TP4' -M<"JWT14.P9;@\B"?=ZFA]_T4[5FH-M(+@RH#JP282PC<>^>IN_!$VL7\P+R0 -M/7M49GJN+$>^NXYCQ\IEMF0V>XH\-I9IP\J2M1+Q#E(K$YL&SVCFTK31#+]! -MR)5"%!P?@H/(\62_G?'U57Y^DS72Y#446]J9]Y'9.A9^$5W%,N -M++-*X"DS\KY_(NUH07!1""$)@'9$)JZW$;*`4R0UI_?_>4Y;V@V>>0;;`!N@ -M_@QGKD4+@Z[@58+(^[1$6H*)/()`4,*';`?GF.G9`V3!3=3T[)2W/UR&$^Y$ -M%D+-;:@9G`;3L^\IDP#GRE8/A[IW`5@C[KG!\3P1ZY_"JH-8'5I>7^DD2FMC -M)E02TN28L>*8UR`'4EW!X3Q(])ED>I;O]6<@[BNB8R#P*_:0',X;BMB8E-(Q -M'Y<-5T9'K]RM@G8*.%22",I).NUUDNXU_O=.DF4_,)$M'OMR40>,X??J(^_M -MU^*^EPU7]"!1=.^0(H@F#T)2GO@0OS^U9'RJC);D"!314DLQ2[&'YJF"=`5D -M%_1(;33*YT^S_,*02P)MPRDG*",Z-QZL-V^5P.E=-`H-KA`U'"GSYS4* -M*P)./PF[MT/II*T]*,@347-?]J/#O#6\*-6(Q.7[8]N%^Y+5<[,YN]'59R>% -MWLOC?,V!]"?%X:.2("M.CXB=FV6>>5L6K?%]/-+W&;;`A?(?#\TX%4>DCHY* -MI&VD;,YY!ZS)BCR1$6$BU+P^C7BM/..4I?8=T%Z9F+]XR]MY2W*75KQ3GK1I -M3E'J6A/4WO!"P?=N:BQ5_K1$]K"+#O"P6E"T`[3G+MOT7OF[MN4Y*\K>MW]0 -MN")W^;[Y2^_1ZNNJ5I0OSUL('-Q"$3=/?\H):H`3;```9)8&E01*\;%@&&L* -MC34VGM4J8,/(F'C&2WKN3@&S4>`7JO>9*,`:44Z",/S>)UP;RJP3<$:@RFB( -M0K%@N7C(O23X"*(Y -MQ>QFY"E*WBQ]*R,P,P@FK9B=NR1K:=V5+RU+DU`XZ_XDMC& -MI>5+*#7<5E_!8RIAVD70/B]2.ZG`NX] -M%/L=W3([_%X8]RI*-=L`'.'W?$+N_A,JDB9:BAV!1FU/HZ*A+2=`FY[E[-]% -M:K.SB@1J2:`F5EKO4LL"3[U1*"N0;^NODOFY,!YOBAH"PL!'7FT!QI(9U;/M -M;Y:_E7J8);[[*V%%62WQX?=HWK*?EVDB;?F"UI,4)N<+$UDU5.8FESI6Y8$' -M88%`P[$F^CZAL*"I3Q.K,I$WY=1O#^+Q;X6);#OU+N51>,KCLQU&#XJP:SUX -M3.$!8[R)K()XH,$Q\=T3V$]"%LDB?LY>EO0)=L=+^5@*"<*NPRR.Q!CH(NAF -M;NL/N7L\_&X3'"QEM`J[@6VJ^Y$]FJJ:=#G88X3`6N7 -M[JH>(N)D$!US3$Y!.GA<`;YS:=XBU0U"-R,P7@.?&!#>3:T8'?DW4K6)13>^>XSE#DS'0P" -M-CJV) -M]2L<[1,K8BX3,.8(?2RHSRV*N?G.03<>BJUA<&X)68E"%H'"9YL.`Z-9GV@N -MGN.[1@@>Y33;CG-?XFZ:5#;9_IKXKKEZ/.-MH9\=C;IK1/TFP[I5Z\LFP/N& -M^"8WO-@3(N_.%-U-TCHP[*EP&[G/KMF0)7O4YF?C$INRJ)D7=??U\+NSE/#] -MM\(]C?O[U=>S9`UMO'%F4RG?-[QVT>L -M61.UB2?*4LI9A?AG7PV=.4>UD.Z"7XTU^PJC(^Z<\Q\;6^YZ/1/!1^?OAIU -MYXQV@+;FEP"*;D)\3Z1/A5:W]N/"%",B@]@2=:?^.>ULREZ>-Y?_'4?[\]:4 -M`88-7YLSFR*>,Y&X:`WS]DCF'6Q!)>VBVT,\3T"#FT0>I0QN>2!E<#-T12;@ -MKIY5,+B5!F.2\P -M$;UN<5PTD4V(!RF#$YDS(:TL!O450?)M=(IP]RSL,@!WK)$.R=B=+['WA!2W -M(Y+(B87)7^XL^H@PT_>O3?V-5M^?&UB57/T1K)&\S@YK9)<5IK!84PA(+ONQ -MJA338+]V+BW&ZU/&_T$J^9!45O_)JY2>Z<_[^,S7SREEY)T -MNDRM0'ALBX=*LN0F\A+B(?_?"K?URZ]G-Z4OSX"=U9 -M!XYE#9-%,WS^89B?$GAI(:R_SZ7X^DH>;[):FKR:(L5;*:?PU<`[$PI&9`[W -MKO=M>;AQ@&$1YBHW_Z/T<>6OK#EHQ)VQ359XY=6A&Q]%W!E=_C/O&Y;T>E;< -M^3;I=%QZBUI_=7I;$:9FDB=1B9=2*LXF4A=V)!HZ0.RJ0`2WV6&%U)ZL5 -MW7+.WJYHJ)5/J%1(DZE%/4AH!YW4,%,F#"TV#?Y$R7E@&KQ=N55\)P*4E#E8 -M*11RA-SQ*ZN)@Y[?A,HXZ`5"_X_BPB][=._4J]2(;K7;97!,O>/9.O,/?Y)Y -MEJH?3!0+X9+/%:`@5A;1M%EN1@@S$_;] -MWJ;#:DZ?KPK;V8JIL+VM*EF$THPH/.]<3;>U'.M^K586H3$C:NK99P(_\.%E -MO30:M0L9K&F6C$/!?3?DBZH6E$$._$-$7X^\0_:'_=UZ3+NHTOS+(=CDIEZH -M6#OJ"*A8K+O>Z+*H$IUSFA9+.3Y#'EL.*$CP`9".E:MD[-8D\N" -MV;DTM72<>EA@N9C'&<^Y@BGVW%?$1BVWALH5"+O=B+!>H;%?D6ZAL::.]K,+ -M83WA6R(Z*J3O/&N]$;Y"&*_45S(NCY;N^5T:BS`]IFP$N*]_/3%_AS(7%.C,_!-Y_:=+HP -M@%JG@;;?)0SK^Y):S>@4E?P0VE?#@T;$A5:<+0R$#T]^5W&BXN\5K17?A_:M -M*VN!:<1]RV`*N@LM/Q[05U#9QBI#RYH+@LK.\$)%O#HAO0X-Z-M0?C)71K1( -MB)90HL4_K@6/:Q''M0AD+=&REA!9BS"F1:9L"52VB&):I'EB#GU*4%_VNF"? -M$4)NTFC&"#2P+Y,IUBM%1! -M0-\J5BJJ;`D6]\W/$TE;PF-:PH`^(P*Z2O!Y7$ND7R,:YR]0M@1%]BT*ZTO4 -MM`3D'=7X^^]M!.WJE@AU2Y2Z):Y"3>"^2G\AD?_,;#UB&[-VG1+4E(=\GU@= -M5A4.G:,D%%8;@BT*G!*V@4Y>+[`U[/B9D10!#C`1;74EW:"H8IF0*:\ -MPR9+SV*U"GUO<67969FO%KQ"#!!A1)\8`)6Y3;JH4G6^5CI= -M+%U:"?06P^[<,:"8P]%S2N,[]!37B>@++I<']8T!O;YEJ*9%KFXA-#A'VJ(D -M6E2^_?")0HTS%2V:;$+]]W?`)5F+5BIE:G`S;/R5]I0,)FQ#4Y^)'&G^Z5%X -MGQ^!!^4?][-A2!,#L87N3"F34SL!R/>H&O=M[2PG3QK\ -MR4OW/9CL>\)$GH/11-]F$WG*R\]]ZXO&\/EZ_C9/C2'?@D7GCEB0#BV&!>@# -M;G?##@Z\[?IX`T,:BU[6J_#`>8^=JX0E.!><$Q -MF"L5N<9PGH+U8^UD0798%@=I0,I!S0^;H4L/'7J:D'+HJ;7GX/_.G5?C9G// -MF3PY),V[L>3O`&5'C@E]6EE$<[:&PUIVSQQY^ZC?T%J2#R%!UN3,E#=G1=YN -M-!=/C;K=4U4,,LO6$SBGM(0/!W1#(<[O1ZM*M3C3)M-NY)1M*M^LP7VISK[_ -MS.R>&'%[/[4>T9RU]QOI^3;%'ES1DF4B7X=^B>7L`Y!.2"VG'X?=/LOHIAT+U0O)G.[AT&,YK(*`0Q]QPI,FGP(.0A@]>/(?VA^_95P0':FHLK -MAT?7UY4[Y<.CB>$Q#7;YR.CE.;N4PR7*D=$FTD$UMSI.*@W<`DV: -M8X-F]W;UL:?F]M6:8\\L_0#V![4RNJ9(SM^/)0^LO0H(_!TB?Y!U-K[':?FV -MQT3.0MR,$@%(X_R"T3-&GRQB7:*S=M)83Z!M"=DY@GX*VTF)IW,G*Z6!&8+3 -MWU'#=`QI#=VS!^M"M\19JMX$![B0IK+?D$-+.#@%J8XSD7V48=MIWA\*'IH& -M'T(_31)EZ1]A(M.T,#*H,)'+$$0J]853`T`S&$KM3UH^'Z&<+@8GY6/%H`98 -MV3L]VWQI@O#;,T-OSU2TK+GD5+S[;%Z=#V*U(<7Y-CUPL'(F1]VVL+F36=,- -M+-XLADO0T,IH%?#>0RN'45ZHSC:Q$"][F1K!X06R2X;MKDI9H1X4^[UO,+LW -MA]^&J`(QTBTNJ%0!G6LZE`^H?Z*L^6P7,K4 -M0F3!S=K?G[7]>O:KJ-NRB-NC8O"@@D[N"`PYQT!&A#84;#U>(@^XK9#`6:<1 -M[N_?*I%+V[34_B#?Y27KX;?%T&>-NAW=D([=@RGV-*H.9,E[VS`)5W+(L[V( -MA-T61-X.44,1'<)]KMT2K21+?KZ-7_1\_Q&T<_?&KSX%^_J^:=!=J(]3+5[9 -M[W]E(3BH&BMV&1)]\D"Q36\:O`'ACCX&94Q'789N8R+"*!=<-5Q9F.ASW"8P -M#3ZF^+8&]K5C0U#4[NLH!7C)VI/K2`#4@"_,W"]<5EPDV!Z;4RP]( -M4CM5MO6@_6QA67'<%@&RT+;9'QG(KPZH7']V`3C':Z0O.3O.1-8A]1+-?3%T -M0+(V\%*Q[)+-Q=LOK/Z45C]GXX9-)3D3C$CXP59(G&PB5_&R$E1="6MD6?`M -M]8*>,^%*<590V*U_8)#J)<"QWC`L>^**];*P6]AO/2/MPV -M@6/#RB:L>W/'CLUO%B7LWU^=<(X?=NMBCN&_2^#A6@H\4(X'B6Y]P[%CYN() -M&IS+>,!6GL/8QT;NJ<,:,>6/&+59+0T\X\A+*")R)CIW(NL]&$ORX?R+*Y+Q -MCV#+QH1XL#0/ZL4#RIFIY=A1J"L@&CZ$PK"Z(2%[&J&\T_OR$O*)HHG.O1`/ -M$%?HWE9J)L==P0_?2_292^6A].LGKQ\6=&*$0=97.ZH@;ECH&6-9W3.RPN/9T-??6%L>GITO3,ACFQV>D^ -M"=BIJ=3:Y:NH-A8]QX?,+C8]@_$2"M])J0>>LL)N?8(IA+LIOZ@A?GF>).S6 -M1R&W5G*5'.6!K6PY*\6)+C[*DU?2Y,51\S>P)+597E9_:L.017G:^>),BF_4 -MK=SP6[DE@@LZZ&*N9YEF0-;F4Q9?YD]$H_*QSZC#"6D.R_K*7-/J?0&<,0:S -M\QLU_HZE_3#8()>^8T3V0WOU<:>DT,)A3_+C3I!+.0U'""DFOF502KEKK.^! -M$KDT")08D3=3+C+`0:74%Z3EF"-N3;/+['([4=!ICZ,TUA9::$':&S-9KI%JJ=GRTSO0X3#WS!;?&I%'V#)X51B2QJ"0O+$"(ODXZ,YP2,X\4:YQ -M]9&Q-J)0%G(KB-6/M=TF<"[;QB4*:''6`X3U(,NSDR:^-6E/*BN5QOQUU,KM -MH\-N11;,53IIJP]JC`@O^)8.@BN"!-^"&.IN3FS;EU'7I7;I$QNOAMT11M_PO.,YNRZS)<#`8D^D^DS?ZTTTS -MH)\_&%$X=\T7T=!?%'897?KNA3G^!,[L,>RR$5)FTN`:#[Y1T^3L6\_3,->@ -M28,_AM\\'W7SC-;>-.$%_%+NBQ)/NXSB=L)T&8L=BDJ43TR6WT6:"#0'?>_ -M>6SKT=K5RSM'GRX`TP+ -M8;EO`2-+%K_4<-E$=M*78[5V]C>8V3J).CW!'N,/N]-[AN+LNU#02>/*3_@= -M5NDQ02?_Q5F+G1N.?0%3.^G3XKG<"6?J8;H7Q76`'=W'^JN>E_,:+.=K$]E` -M'2&B3EU8?X0(V@&.K2A?Z%&PF.]9Z#A14W&'E6?'0#M[ZD@PGCYT1`2*_60] -M!O3U"5[1RZ<^REAH(IDO!+M-Y!7O/J,QXN;:X)N?'4^3#Z,I="RH9,MJN,5IF%(J8I,L -M;19F=C=`'B?W\C@[[:KQ3SR.^6\\SKOQ#2T9<@/")9L[[?2WU+QH?5NE8ZUL -M>@D6=^;N'VFN4.]@R7'68D@>S -M:#SG(.[\*Q?]]S909<)"J.+AQ42V7(7=J3)_-_Z_RA!Q4VHB8SQK:[X+LLQG -MDB#S;DA$'IVI]3;5YSTT$1%3_<&9=JK6,]?KN16H$MH1SYR_J@--B@)N4/8Z5BSF:7B/,.VT%3;H#L)]17T4AO=4I&)_/)Y)VHX2"(M<&C= -M(*AW0-P[8)DQ$30%]]Y71J-:"<+E3K,X,]3#$=Z\&>JN9Y;V5,WB%!NR+(YD(Q)$ -M1#.B;O*6_:;T.!>T$V>2C,@Y!%%$,T)N,COY*RI84)]IWQH1(84S]"^I@_Q> -M1X1>:S'L&#I5AE.GRNC:3H?942KN/0%F9-;D4VB#89--Y&1Y-`/<+QH&G)>< -M?-+`[XN?7R;0X,E[[L/7*FGR*FIE:%-I353OD2.[JAT6PP?@)#&6I:!.4['W -M4JIRMB.JMR&RMP$JC/-UJ*?/>:>X]P#%[A3"[74[)4IIF[PV"^#LJ5SY=FHI -M%K2S(#<#K=2")C4N^3JJ.H-?(0C00;\)U'<5NR`T22)[:YX3O.T4P8N!!.^- -MYP1O2R?(B.JULZJ$U>W4>:1SRL6UU$D`[TI0]7'[[W9_6``*\>1H/JH)9RDL@[ -M7M8(`&A/I*6:R-W(T#2F[#;A<9H]9P?+@GLA^Q,R;2Q%>K9\NL^*$MBC)2&] -M%]YK,UB5;K$>RZZ=5I%4(+(:%8,=E(T'UMV^!I[^O1/4F -M1O8FPOZ^I//BHJ>_M3J6UHR$],XA<`S@6+YP1^LNFR2S#?I<`.=\S84.EZ?? -MZ07QJQTG*,!O3?0<).K2)PVF#9W,H,/9?CO6G]I:4G(0W\]0.`AHDU2VH>M1$;X#"P$I_O.WC[&DQAC$"?][4<%T$UK;?+I:*4-"G8 -MK^+0H**HI+XK'0S0KHY%7U!OJA=E+ZP:)4N"B^@2FO<,!T16"*MRG9#8XC); -MYRFV=)D='T%/SFR8L."KN1X,.))$_O(G#$@BJZAQQ::=:O9@P#E>):K^$P:` -M)/(2)%MDIXD\H\$#Q;U\+!FW(E:*$596ZF1^BJ/Z5@DQ7N5VRN'"*I6AZ0\C1\Z?D5;1#FK -ML%Y*CF\BFB[A(.?X%H.`@&ERI-[J4JD^IO)V>.POG&3>/DUZ=LU$CD6>S\9G -MW2_F`)P\%_#Y65!CK@??*`VY41IQHQ(T`5R1_[NY9J3R0[3A;Q[(W:$T8HJY -MB%?/M.G94+?6WH"Y:CP:YN-1KXU0OR2M,JK)89[X/#1^*A#^UF -M[XOW\YM@=D\)O9$6=2--NQU"V0L@`PZ-$7*!^+`:.[R?/ -M^T+FF8W&1'2DB;P)^Y/P&/YE=I3@($U+FYJ:.L)N+&^2[O[4;)VX;+U>846P -M_9AYX%O>/D@3CJ!4_`"3BN_1[NDS#VS7[+EM=CNT>VZ9!\H55B9K)Y>5BZ[, -M0!4%3RY!T]D(BHU(!N38L#F>CM*D9U-;0D@6>\@`L31S$4X_9G%_H8!]Y9@7 -M=>-5<$-C]JVJJ79$W?A48^1LV$I-CMI"(W79IC6*^!_H+0/3]WXKV18,NP?V -MB?9W,>P?T^"'"'6H5\FAL;>@C+^BB;3]$3W!T -M$JGW[!48$VF/3(-7>=^R5-N"Y1YIU&Y`=I;9@$%9C&E02O>_8$3(#;^P&WY8 -MO)#RZV90?MU")O3GYH0&WQ@/7;J=7R@.I,O7HYQW4,4A:BF)&)XN'9X)9P\+ -MNG)_HU:D1J9OOO*O@"*#U.'<1S+P$J$7A.=8+3 -M8$S3SPQ??<0-(;@.%C,GT_S^1MNKKA("O88C,COGSV,EP.0KTRP*)[*F9LS: -MFLDK9^"K:J:F[`I.J1-]42,$U+@!0*Y0@LU>=-Y"^>/47S -MC(_CAB-,YN2@ZU^KHE$,FV8!Q;DZSA"3R8),)NSZ@?E]LY0'(()09VM6GIZL -MF5MY>,K9GZH$T-PY_^HY62IOSNYI7YXW/O+ZKN?KAPP;OB9'IVK.PGY`+L:A13]"6)S#F=:*0M`,!$ME(49[X1\?5R -MH,\LD,MX0`.2+"^U09Y0!^8G4+QF'<5K8`N@*PFVV""I83TQL"KB(8\J\Y]O -M$Q!X\HYU\'6<-#FUD<*4IB.5HN#K:6NW8YN!\@1+TRH`M8H6FL6P\\A?NMS` -M`-8#JU^9]QA]6LCU%>+K*XA-:SZ9X/4+'#8$Y*^>^@@X\G4-ES#,<+8-_.S7 -MQ?7(JFZBK.7USZ&UW`ZM9>RC-A5E+;&A77KHSG_.$AO,A>NO0H8&(:T]B4PV -M#6;(<2SLNAFFJQ3NMF]WJ3JR8DO;L-^YJ@O4,D$:*.;DQ(-UGH\A(.?X%&)P -M$%T31//8:UJ7&YIKY,<"4:&P**#I"+5;:L[SMPE"KALV9]BMK$8A*&$WHA5I -MXNMZX)1[F-+0,7KJV.3`'^L.5_3@U\CKKT1?29-`IC+D#)YY4WT+.H->3NAQHB2+(M`#+/3O -MF-E0(UD:`=(ZBHE2.FR8@14;OV2@`PXW?16VK=5LG0VB8SF(5X[9\"X4=-*( -MO>R_4;#D$Q[T"-\5_.$/-E.^8\P_K?0OAGS'&(YUZQ=G9\$\F=!Q9&S#7J3M -M*`8S%!PK=@3;N:S.1BR-B/U3Z9`N;D:`$87.W<#?3>1W"$R=!E/7K5`LC?!4 -M,Z:2A0X7-0%@0%NQ#BLVG?(7O4?^WX*E?=+T)W]QU=1',8LB3(/7H?1)4#JT -M<%>'S@1$HZ'7KGFXH0L:YV?2-WP96T^O"ULY8FU8==^37;X\*K9BD?6\&O_B+CV -M>'?%9>?\>A_Y=81]%,.VA#?LWMNJ.)&G/)&?2-MI(DL11*WS#[_V`+DR?%F/ -M#U;%5?2N37;Z8-5$TJ%___QH=A+YZ1_[ULA*RMA54]_C7-O-NB2L -M`)ZO<62]U-IHRTW!DC',Z;'&.5(W[:7#^ -M-E=]OT;*=[K44C^BF*DNIF^S.6WU+G4IG2AE)OUVZN,9+P%WNE1AY(NO?:>0 -MTN5&OR)<+F5"6E.-5YF]Q)`J0%\N!?52J9]2RG^^5@)UXK*>-U9X"<^5\)*$ -MW#HL/9J>A)6G8W/XZ'#NAK2`:\O.XUNS-PG,AG?"KRV,N+8`W*Y]"SP$#O8_ -MXSF<":FF-\YL4UU8TUD,]>'KH?F8F98GV#:;T8B9G1M+T[+33\_U<,.%Z*_Q -MJ6X(S#]0AR[O`?Q8CCW-;MV0?HZ_-.:!5/VD-(WJ0S03VYI0*F2\Y`\EG-W] -M7/CSR7XL'8S/$`(C>P2+_EJ\K^^$,V:8Y(^RA8S9V+;,8PF4L`PX[Q_'+W5_ -M8R(/([#\R<_+/Y9)O3938Y`5S8L7BJZ]FBW9)F4E8YLD/B.X)=&+QP2'?%2:GL%_%95FIVN?K[GZ -M7<"/Q`=?PR[K["&6F=-`37F4]D.H9:$4`7XXL4#'^WB&?,LSR_KXHC25`^'G -M&%('5?[77I4&"="?:0L\*_=YUG#W;]RPF5@C%S151"H/+-D2L>@@?+/#B%@0 -M_VLO.Z?R*X3*(`$[1#3_>!1XF&^-=/=CX3/]6KF1U]1;(E7-2ZHCUIQ=9D1& -M0KU3&;E1U^2<5K:F"5,U<4"]7RL&W-BW\541><9PMWM_8]0U');%[8N&,'72>1!LZ'42$NG>!H7/.0TLE7UF+*>`S,W8N'71-Q!**+(&.9N -MVT-R22[?D[\Z4=D791Y9;0ZC#+@-Z(,*\8D\ABRNX:*8-< -M:#$,%(1TZ:^V%[1[OE-+(D,['>4A9L?E(SLITUM$F5X(2=51ID%8=]6'#+"G -M$%I@OT&#W^[X^:?\"Z,4QN1]S[]9HSXRFFLIOI,3RLN;S.-\P)MN:&AEU`DX -MKZ.,5NJ`:_GZBN(C=LL,M8>A#ZTUF-O?CW*71;KM_[[8`!P%D?D12B.3U\J1 -M-R\NB$+MV%[8N9.59J9-)V]9S$HTM.42_@+@]G?G:XW8'TZLDC++W.>'Y\!A -M`%A[XE[07-8O>J2/=7.:%%X6HCA_V4Z^="#Y"1[N5E -MQ5O65Y0`4+E!;<149J9GL6*'A)!DJ8B]\H".7: -MA5WM1ZXJNL2[+A^Q]UB[@4N`M-LJ_1%W/C)@=9.D@8[38-W -MD,(P<)#5)N22:/F-,/<;U=P3X'.B,"*U^!%1B9'$H8RYML?&AQ_^%5,S#X1Y'N%5A[H4GJVU. -M]BXZ.Y=6[E2>0Y=;PR+V)9W;.^P5\"!=%D:BM71**A( -MILFR4-_>8:6:+$.)>J-6HM(-\1=6D'NFDF-6A+X4[`X)=_OPU4(>(3QPV<\E -M]+,+]]AVV'>X*H/LPKK<\D"B@*:RULNM!Q56)V$]P*H38G5#YQ)9=>%';$00 -MDLC83'QN*/56T=0GWLQUI]?F!^4%Q#H'L7=3<2\VU0A4K9( -M53KKHM.!^2*%0:_\"X[5$8F,=>`'`G`,CEHZ^KKZ7SHA-ZBH-33I\$]?P_L^3O`4<]KZ?H%V_XPEMJA`/`POBH>(JI8U0?G?RJ>739&_)JQ\W3XUHN>2#WU*2,]!GQ'"DC"%5*F&O>5XF;HZ[2UG_F;!@\* -M[TF/ZLG9\UG1+XQ^C-IC[`_=5U@E5R^B-E+474;UTJR-.Z98RY>5U8,'HE[EE6OU^`<,%!=0JV);BC$J4LI@3/+-U=N(C9RM-0!&%\Y -M;M;J8HFK-V'?4>RM"6MCW?'4HBT\S[?_%,1\1<_:,:#XKLF<1V -M884W0GK&(->H3===1_?NK90#'`J`&;T28/;HY]_4RSA(<(\LK&;U0=_U!V/%+,0%?53X"_#8V:CE^5%QZMO..C['@,(ZN.*D]6 -M(^,9\`ZDP7OEA7\@ME"VC>[KF<;:1>DLDJ9=2NW4&F%9,JDEJB<22BS\&4IV -M]E?]O/)LD+SCL;;C5\CVV23&(C&^5G3JJ1'1<;_'>&K1#Q?!=;]]J)_?&YHY -MJ!^E*YZ#I$GDIK">J>;];\-*>2IJ(K_T%AG5$Z-9E,[O1S6>4KUZT?T3T-&? -M*U90]]W%DZ4A/7\1=__&GDI;ERY_,_W[+T)[1F%*X>XZ26V&HC9S40T]'6\E -M5!^B`/=H;_WJ@!Z=4P/YBB3?P.H7TF&S+!0@:K(#*].]\X83T7T[M+LSIAT) -MZ9$M&+$\O(<#B,">V,B>&&6XX+PSPXT8LC<*$'UNJ7B>X@W^RT+L);94;^6\ -MR:D?0<)F;J@LL6=3#DD[XRJQ["WK'[4$] -MS-A6`<,V40D0H"=.(/-&CX6C!KV@S:&I5I&V=DFA87^_Y@'NVS^RRJ#>OL2( -MB%.=[`Q#I2&&ND%C:I=(QN&^DI&2[4L:YARI@X5`:5#04/\.`+WW4)#-$-#= -M?S8MB3P6WGT&4POWME(;]LT9I8:5NT6:YDRH!9QO,#\[QK-C;+G(B/AP=U$1 -MNRZP.TLY`U6H?H=6)Q&9JAJ.J%I)U4BD]<2I6DIYO?(E7Y-1W2LU/Z>C_33U -M#+3#H7FL^].3;>+NK9A< -MN,LF2\^`##:D^Z.E,R;%S40TZ5FQ[S(H+GO')S8]$T:6OCU;DYZ)1#.0\Z$7 -MW#^ZL02Z)CM=DYW!\50F9D.ZMAZE-BTVI?.I&4Z[Z%CZ=C1,#]]W+(0ID)Y0 -MC*1K-J03]2B_GZ;9E,ZB*DNMK'K]%<95738>V/T:P4&(6/3Y*I#/OWV#?"NT -M^U7.ZQQY[=:3WRU=3SO'5VY_88?.G6P+Z1X5UOTR1@AW'EF7E94)',Q9F%FW -M/-M0N*YASL;,)7?H6=D;,U/?_BPKNT$#(6%CYL8LV"+8FO_8#LCWL1%#KR0; -MTND2FL3SG-HD11#XO$%*-6T\PYM0XLG]1RHX5A<),/?=O9+0OKEH1W -M2X*Z)=2WG=V2D&[._\JWG8R0[GG*%JSF2W -M0O_[8\4&1'E,1VV?O<]5$A)0/=$^E9:_K`(UTTL7\1KI:D.;?7\*P-# -M2/O]XKIX!2=TB$3'9$WQ]WX\WH7#<0MS=6"O>PZ89++E7/GV -MS*'O7,>'N,Z(76B6$DV -M\B$#=D5"Z)+Q+(DD$_8`Q?6]?CUPSH^!"EQKQBNS!`&N%4&NRT"__6ME[3YN -MR/B4DO"4#<*4]4R0R\Z*-R*B6([9B/@AL7_IHSX31-9^-SK0]8-R'$U16Z=R -MULN=3@[GKXH]/[/L0CCOEMAC5,X#:N=!5JN0UXJN."TZ,D*I0.H3%DVM-"(: -M*,1$-B/(P@^X/@4BS3A:;&^]NM>Y<#^74R0BH,S,@]K,`U?T2<^FF4@*$;W[ -MEU\$NISH5!'*FLBW">5O"JD1'AEZL@A)#97.]&>ETN&8^B:@TC?]O>W3`2

)]3^QS1;H^5]]*,Y%;*;[LXH:XN.&N>6&N^<`*/C=Z_FDIR396 -M?9U\BX!;"A7`+-N0O;PY".#SY]ZB#IA+*`!>$KHTZ9*)[(>(E,4S">FQ(O,' -M9R\8Z.J)\R[3R_`&2:?5-T$T;ZJD!"^)+HDID93$EDA+XDID)?(2HD19HBA1 -MEZA*M"4:B4IO(M_L:.4D@]F:DT2^$^AZ)=0U4OF7/LY?:(FT -M5.5X`Z>1Z'0HWT02:=EY\(:EH`XC9(E<+ZNC4=5?^K"QA*^+VN+KA+`Q/!%'XO4BRA4B@X:")ML2(:!0W4)Z/4%[KP@176^WBK -M:I+!1"X!-T-<>*`+CW)%1[BBP[OZPURX#9S[LD=*S2.5YOQGLO.?FD@%U1?''>!@$OG&\1IP -MT+^+/*Z7RH+`ZU)93$X[6> -MZ_CCM:!>(0MJWA]#91G7E"[GU)P\"$XU=S23H%\K"XKJVJ>1!:ME0<E,J%"XL^MHWD:J)#Y0W^#4TN5(0M6RH0:B3_%YV3^,(>*$DF(N^Z"S%7Q[LV*D51&; -MN:UY1ZM:FJ^4YFJD>?6-FI'5JI%5JI&5]7O"NZQA7=]#$?^6O^B/_$HJ?WVC -M1P+,KH!"_L@/EH=TK0WK^BZ@ZYO0KF_$7=^*NJQ!7=\R'A'^7=\PDC&&CV_M -MS)V)=>]LG56ADW^)Q&7F0BKWI2(S7YZ9%]QU2-SUU8ZW9",C9",C-2.C@KH. -M!G0=W#Z',0(C9*/;+@KKR`KF+IHG&2 -MX25KM=QY+!\"1HI0$_D+.`& -MY=QYB)QAXYY;ZKWLW%V<==&+YE46E:"]P$%OUZD'ZD*7*'V07(.*(CHU"*07 -M*DR+P/LLP0E)"D61%<'.ETBI_!`JH1#F-7MY*1@L[+/[W_DQL\/N\W\^S)AT -M`0-74QP/].\8\#SV"KX[@N]N$7UCOGOVB6N.B>L64C^>751Q,(%YBV7T0-/: -MC+A7B\I#R=D7U7GD%./FK20K$>]^R`Q9!X=Z'Y4H!VW*H6*E9GC?A6)DV;"ZG8:)-%05&B41'V12+-*'JFZ5`\OF7_@,! -M<)QI#RZ^#2X.B!N=P<7FI9QO)AX%`>78:&$HN6`F+NX;YG]@71QWHY["6T)J -MRUI(2F-,%]L->C@3,2J0G<(/3.RT6W/RN=3?F8A-@;;]#">`+FA2YQ.%79!Z -M\$M<)U#8#H9YAUF;MC\<)-9_''4AS9E9P3$I,$=]GN*L!(QCUIMJGQ5C.M@P -MK.H`85C9P;W9D*9]3]-FEVGJ[__0$C#&Q4!,H[['[&[ZU/Q-&$K.0W5NN6\_ -M%:@-9+`3+@UIQJP.=\%E\E>-/+"JGQ\P6Y.:3]#LD3XBGJ:IRA@-\5R=(2YD"R8NQQ(-`<%!C?JH7YL0JJ6J[;]>!H`IG#J_$KEI -M6C5D,[*@XV9HI)-M.FY6\]U+.IA.^5)9X,$,C60;#V9QNG8`0*LE],SNAG!C -M-"*C/<=09:#BR-[M&!57M$0:SZ/:EC`J;ZE'GBW([?5[R_P>+ZJNK$4>C,N0 -MW(SJPHV1-M?I:"1T-AR-U#>'ST5=(;G)+S7(36$II4JM4EWKR5A3U]HWO+,3R+1#!D((\R:$ +MD#EQJ.+1MM'::GL\%6WMH:7MB2:A=%"I57$VC(89%84P!K0.B`K64VFQ[<): +MG&A%M#@1#(J(%2EM/:T^K>QWK01M>[[W/-]SO=\_WW=='^W*7GOO-=QKN'_W +M[UYK; +M!#"L?QSVW3BL;ASF&D?',?RN<8G:#[L[#H-Q"2:BK@*CF+IRGH_^HS\3)9\C +M6/;\B.]61'T;P\'D'.Q_^U9,?4.?$26FSL!?$=4!C+K)\/^)NLGQNLG38IZD +M+9MP%]8AHHX#HV$R9I@L,$PF#9.-?WCS.3#&3<;B)@OB)I-QC][0T9M/`JE/ +M"1S;2GJCGXQ%6\"XK60&C!P)I(Z@!T>"J(\DU,?`>)'T2Z:9L3#LT1]CDA^L +M`HFY8IR8.4OL-X%@,IY9.6X`OE@QQ5?][B!J!\IJQORQQTD83\'W(UXAZE#/ +MU/D:N]5T>.:RYW]\U/+*3)/W1T15PAX24Q7_VU3;4()MI\E,,BLZ6W94MGRJ +M("?A\(+MQL,3ED^X9Z$&H*"VISVV?[A%U#I@5$XV*2?/5$Z>K9P\YW%?>"5= +M`\;3]7Y>H58%4NE;22"D/3\.FSV.AF,`5K^!T34NB%I?FB"FUC/.P^@Z%%W' +MN("B\E=@BK6,;V%\+8P+J#5T[/__^W_9GX7Z[3^^B_K-%VY@?B?_8QSB@@\; +M_#K&XM*QN'0LWN^+'TOTQ8^9HZ@O8VY@+"V\&J.HKTIA_#<20[A">^<&QK7" +MJQV%&Q@!\06'SW'Z6#P,,>'T/AE08%Z`K#*MA7`RO!2C`.*JS +M!(:M8_%&&)I@7`ZO2+X?85R)\`\%&)_S/X^_WNB+O][HBR?/_D_C`?NL'`4? +M!K_N0?B,,3#RT3W&IRAOW_XV"M.VR4TPKQ\,=@P+Z:5&L=[]GW\_'=X_@"S"C=TPPMMU;^7V4/Y*&V6DS?/GVP[+?@_;#3E&_ +MW&.8O%DP`E[#3=[R[%+TZWL,Q8/OO.4:,3-\"LL*1[8'PTPH3OO-)Y\4AE`[ +M3(V@PN63"TO`,#I%?0';!&`R"6PL;"`U`M]1OWSHE07&]#4(43`+_1JG[&*;U-@:6A<.^LZ,;BKI&P03?TS`A+.-X.@Q-:'Y2U/?H +M^:]F5#W_/GSWA1&V$=5_#SY_B-%^[83I4F`?"SRP/V&?`R0]+'[)>5 +M4;]1EZ`\Z1MA]7T0YS$L3D#"#KT&T\-Q06.3"NTORF."(1&C=Y;`ZWN:!(M= +MD[`:AA(8=J%VK(;R\4D#K,>(V6';O(W":-1]3<(A#`86K$>@26B&Z0_!\"GL +M34RH2?A,J4DX"N\_Q+."$?AT?/'@4I!LAP&O8XD#23]]FC@)[]^SU*\WLP_RG(:2[J +M#W__6_FP^1!#YI7[KB@D_"',9?C"BQ`K7X0X]3>"Q)Z'^CZKY0_Y73`_Q,_Y +MA"_,W?-_#2_>\UWG:&%^F'96RN_Y_2%.3H#YZ!`[GUD%[P=);&H/O(=A.L3/ +M63-)+![BPR3X;CSD>N-8)*9+(;$XB+\33Y)8C)3$HB`WY,-R0B$6!\/\P?`J +MAL^X>WSY46!.A`'F8_3!_UZQENALH]=K?_VW__S/\'__/DS +M,?;_<;&OE]+>*,7?+*7_HQ1[YO]`K-=+:&^4X&^6T/]1@KWT?Y"_JA2+**%5 +ME^(UI?3:4@:KU(]=ZL\I97+_A^)4P$T),Z*$7EO"6%7BM[K$?TT)<^W_ +M4)P>;_WN4KRWE'ZME,$H]?,K]?R],#,)7AO"1?6 +M?ZV$<;C$[Y\E_A^7,(^4L%I*V)^4<#XMX;;Z9$%\!?H=5@%%<5!6Y(M@7>,$ +MU*CW%MUW8N?_>)\DP"[\Z3X+^_;W>U2>!^*CB/HE@\0S_GXR&CX143_1!XV, +M8:,?,&XE2Z.WR;PTB>1@PN]#`K\/H0\@`BSY7AK\O?0\/^U=_Q+SX>0TR_V7 +M@[+X8ECD`PQ[3\``Y#D^O(I`IUP6J91%A'\?L#Q=HS)AJJK;MLU/QYKH&O*M +M*PFVIE_RQW-'$ZS7&XO51`N-11'6'V+!%C-V6=6)7>CTIA:JR;=B.^FV^LRQ +MM'9OVE4/N@LF@%3B=H)UBX-($A9_4M22<^2*1R$)W/7SQG=IS=_%243$G@1; +M>KEC\D7/P5QGY*I*6O.M[5)0TIVJD@@V1:ZY0:RYS1A!!S7@;-6HGD:`?6QL!:0YM:-9)@[J^! +M&HGT:NH59,0>PLF$/\0HM9OSCLXUR`)*;J_Z9`_>\4/Z$7"6O##FP`(VEW:,T--2)'&/`H);2- +M.SAK'OBI1?B:?!KHLWK>UTC$9JP'VS3),3E.(E9*@IBW"-9]HCQJE;>>T-H0 +M;SU1OGI4(K92Q%F=C^H9V:47!:I%HN(G&MZI#?^]*G%1>$T@\&S<0:QYP'2( +M#2(Z+`KTV3PKX=AI1/!!$!K!OL?I9`GKVR%9:D5'=+#XQK*)8<-"QQ-5 +M[[-XILI4OEN83U:G10SS:I?S6O'*9:"K>D78,-<--"OI&S>QE"]@K$%<&8MO +M*&/D3]+$RM:7,8HFZV.CUY5-TD\C03.P^[?$LU@SK/:,L+NC(7='51]OP#P. +MS%SPQU]]?;5A9KU^2*K?59UK3"M[(NSNOQP3BL8SN[GR:,9A-V.FL&IAI3E? +M4VNK22Y_*_+N,+OK'<@^^[IWU#>!DH!/XE<[<`NURD*]U_%W.#6,/:F)/!-ZO4EPA4P, +M<*#%!V"VC&9?(<''B0%9%JH;4VN7YPL2`S[H52[$17<_8;'FV.Q)>4;. +M._/>WO&21E:SI&6J>HLBQ<4H-*^Y;-3+JC^>?7I08R;BYHP2GPKK&U3U.>[4 +MU?DQQ%&.]MB.%,!PF-?E!FGKL\]^98:.X14SDN_UDZ^9:9'P)C'@5=N\(-@> +M4Z'QBIFW>%XB;^"\RUJ'9]6!>0S&+`OU/@::\L89%C(X_Y6PY'XPV':IB5,> +MKS8G-S=P.+/4UN0U^;@9ZZX25`MM"4?4TP2]`,P"/P`[<=\WB*51=PM#[A:J +M?M[PSOL$&&FF,)<#\Q2D/;@'T=A8/('M,%GM\QWCB0U<,#/B;E9IGWPAH]H# +MQZ_PWZ1W7])8DU6R +M:DU6O:I*JE)46RCO\<-(!V8LR$B'M8VPY\9O!-Q_Q:\`)RQ4&S#CS\6O!-)29:"6L3,)!$D;@V +MD*"$>ZA:D>''G,/#&V_@9CPQ+Y"]%R&+0P2Q)9]DMQ*V0\\H,[)SQ=:,J=82 +M*N(%]<>.7;IHU'I"UYJFF"G$FL$\*`-+PLW3]'O*N; +MGXQOM_M)Q>?YBW/C'-M99X0LAWC/);[#J[QUY2Y'1>6.1X_*Z]6-`E45F2>. +M'/JY,#!DZ%>'(%\$E4Q[*/N`6W[A+-'`57^<#2''B[&&C)HKG4OO0P1K5C=. +M4-=#98+B;XV%O[> +MQN8T75>V]=S[$#%3;I%YXY">0-"T0?M!)G/>C5=WT)"BR)/7(-#\V"$.'[J0 +MKS-$1TB&SG-_96\_1\`6=:J-@K"A3J)!P*G`N0UX19?5/OM*YV7C17N/<>.F +M0.TTP=$KBO&2_"G;NRLO.B8?GE)^B>GKGFCE%DG^J?)=E;L=)_*_+M^CW!)< +MV5"^UW%&VT&6[U,W2EN(SY\+BS#$1FQT^LEC(PP< +M+'AH=_A097Z@WP-A8VL!*>\.5G5+X[J#%-T2>@>N[1:KZK/EA\^JJE)4N[*M +M]M6VA&:?I=,-/;!V7D(06B!,Y'Y_W@7F%3T![O&')]4\(9](MSS<@&$^B$7O +M?>;Q"ID?F,@S6*B;R"Y#_.)IX?3%QIZK+-37&(:K;#AWF!0V^&#;T6/O2:9.@U0BD4#[T6,C2=(>?L +M4NQUJ#.R5=K?""?N=%76Q65DQSRWN24M+B<[CF/?G1QKK?7QBTOD16.&C/>L +MT$]&;,FPDLN9,[B!0U,OD#LR/XC!Y;K[V?;E\^8#,C`];&@:<''S">X+\1S.C%,-$4,QX4,QVNX- +M;28XKQ[!2YF]*_4<_V@Q\/!?C-]0`K&I&Y!'L[>E;[-OR1CC+IQN(UBFAE@B +M2-GCIVQ\8,MZ11,M4T?+.UY!5F*N.E:.)C_]KC8:5T-;2\RQ?5FAEP7IQN,J +M#BW`3=C.?V\8SXJ\\]#ZT[GBR:`%Y/!;II2+#^#8.M<<=73LVGW/F3$M<%FH +MQHLE%NHS#&PY,5>[3DQPIS-:Z>AOG*,$ +MH\V,L9^BP5IFL[.F<&=R7\Y#Q;Y=6**V7?;OP` +M@L7TLR^9L5"H-!(_.+\9?\43_?<7!D7>.6RK3RI^RT*=J`[R:HH@D1N!GA\` +M\[I=AGHF`'DI9>Z*'L?;N>^4]SJOY;^3]_:^M6FMN+J15OE.Q=L.,W"Q\L4L +MA^EDNAH:IA(E.:TT>)MTJP0D@'%@)B.+QI@2SW(*P`P4?2*>%6_*U'!W"I@+ +M!.KH*/8"W&\FS@TQ<=1BIE/@ERU@3,`Y#J'XSA[X"N(@O&4Y$UEU^,5L*\VZ]D!6<*<^8'*+=G[',KMV9:'7X'-8`0<`L<=YAAR6KF(F$;C_H4�?70>/4 +MQC,I`:M\"G`1%?%,AX`%T9?"P1[.!!I]ZQ1U-`-*[C=-P.K`.1.$4!GH# +M#IL!`5H3'94IYFK$7"<.E3:?#+_S.JM52&_%(9IGJ@LTUM1L!QR#\IY-;SO> +MJ3SG[*ZX6-9;?LTA`JEIFVC@?>XJ0?"=?[#NXZR3.-.!!]])*N]D3L;++JA> +M'P4CH7=F0DZJB983A7!>-S@@)]5V91<'-K4>[-!V9ZME09I8>7.#6A%D+?GL +ME*E`D:_:V:%5^$,+S>7.UL3&VA).CGD6(-57NT.T]4+:I@#IG7%;0I5V'$2S +MXN(S0J5WGN9^(*#_A-._Q%FPYSLMU/HQ`#;#:<79I(S&4QP^WP3")W"I9!'( +MKW$B!/WB5N`=P7]R;"S4J;9J5:S6C.'PUS=-W0G(FV*=18#:MD<="V'\YLFY +M/NFK%(^%1R!JUHJPI-$O44H?C^;6H7B0TC/44!)/FSUEWIG!;V;S/ +MA?JA\("`ZO,PO`:T<&`^U;R* +MO6D*-\T+#A6!$![2(+_&X\&@)CK&$!V-\&6N)C9F[:_3FHYJ8V.L\RX%?W>V +MV[@[#A(WZ\)OH/OI[R"LZ5_9ZHZK($Z9F:JJ8>OF3O\.[E60]B6N^?I^-V2W +M%T!3\3A]-(-/)2P9]))Z?GF\@4QNIOC\6=`GV0@KI=UU&".^VZ4R"G2=N%/` +M*A:#$@09F2"=EQN/_+1M0=LE9<&E8I`*9BK(:6"^GXW&L-(8+?%^1^+])YFR +M!2R'@)DF8*7AA:C$6"*!J^;7BX#7X4]G&>5?H4/LE"V#2RA/EQT$S7VI" +MVIN,(ZOPGUG- +M$X^I^S3V[,5^2I=K=6 +M*K@-!(R=Z&E`(ZZ-A3;\%M)0.+J@*VF4!(.6 +MAQ34P43.`")#7H[$N6&ACF'(=M"T$AR:CZNIB7X1MH,'\@(+1(5"R\/&,8IS +MU93(.8UTJ/9\R'=Q(-/9R;HM9"%(#OE.JI$G*Q=5:TKK$3%,KK8\O//'M0IH +M]_,FETI/UEGGZPP:2(;=@(8,>RD0T?N]B_* +M+351M_^UU`%50:#[49'2R=7(,)H0YR$"/JK;4FT]=U\G843>'HZZ_4"[B-G6 +M?>I,7K)JA'9JYYJM(F@>V@Y!VN!'7G!]L\>,C\,P"`'AMZ]#C[FQ05N?L^[J +MNP&?<71=.U(Z&47D^MMOR.NSK=-/::(-FY*Y:R=Q-Q)+'\Z%@J6VG=1%DXFT +M>86*@KB*8%X'L>;+)W,MSF<FC0L5);@I<\F"Q4+781ZF$#(K'!B8PCMH-T"W48CE?9LZ`/FJR, +M\N=K_UKS@H4Z[H5.V!]R9-PA`)L%:BO=*>DQH8ZZEJHSTXLDU4$U`NBJ`,_^ +MQ;H>,A'K*@C*#VYLU%KI>8I0B20@FT`K?7P:N-U!30>;Y9.GKS^5RAX5]Y;5M<*:^JRJ^8> +MT[74*Z2W6EAEC3MID3LG\7!2ED@*%$II,T.I2+P)(7F]L$_S^UUWKFC +M(]\^S[>Y3RW],."R_Z)R597@9)_EX1%?WE8KN)::B#UAH8JP,?\`S?%'/,V0=. +ME.5A+;(ETD1.`'H6=GLA<$EOOP-Q;2MK2,CNP*M_"KG]1M4/Y2/JP_7*+=70 +M9T/3?CMD]I3/L#!66!XFP?D/IZ\Z.@::$W4CY>6W,2FE4]\N'Z^)CEZ[3^LU +ME%J?H3SE6P"\_7^S`&@[""Q4NW?]A_FD&$"G[RH<7S`+1,<<-=*5:"4ES41K +M=EO3WXR\/27D]A35P(:NA,57QD@Z\``RUC5-?/LO'Y\O!J"3OTDL-^'\7%.I +M@%]L.OD32&=DQ(,?N$H!F`EDC)=IC(?Q^'P:/AK/<@LRPIG#@H`E"*_DVE'@ +M88S&<^`#!\YSX(7[(F__A74)AW?^!Q+*F[)<#"7$;K2*!LN\/Q[=YX,4Y,W)P2#QXS^`B +MB`?"G=0!IWY+MN+>V0`'%S)"U-=1@_WA@T,:^/YSZ+P>'-9=SU8',]^E<;>%`^,D%MR,^,9$P1T"TZG\,QP5II`-6V471C/<0MR +M-4PW3AP10('Y;M2U/"?N-]6DBV8X38PI`@?)<@MY3^*^M4*_3P2L/7CY=$ZK +MD->*%ZN5T;%,"IG,3:+:F0R',&KPX,E!1724M_?],44T"6.L%+QR%C/2%#;8 +MC(87$F\'7OZ?M]+8<=TV`8P8'11I%@''1;W::B +M>:<]AQJ:6W4]@H!5D\H%Q0FL-*+V4OGEW)>*YSN%SJNU5S9D:7V':IO@]KSRQ<>\$NO]T9C*].=G??_JN"1LV1<A0+>3$(LL#4G9[H=T%6$)LL?N!.!)]`]'0*(G">G@7.)[X1Y* +M]6,.<*S.7Q4R.(=]>XQ+YI'L!L+FLL;\F&TUOADV.$?%L6O)%"7Y%AQNKD/( +M<`M9;IS7@8,LW0L">(T8G%$^KMH8,?A,GL8YN6KB_KO0X0``?%3U1,7XJDD0 +MP[07SBI_S':;E`6D\N=LC=P?&G&6TP=T5SV)K'XD*>2_W<922=&4;4%6#VZ= +MUP[;`5Z&?%@#^?"A3Y`)AHYXV]5"X_*+?OKH&-OUH\&#$89E-2&#H=[EL:NJ +M+8K'\M=]K5]6'3HHL?8=+XRWGCM5(S[=6?1VU&#DJ=N&$=KZG\3(PD)#FTB? +M]:W+6K^F,,6,J;UF=I#E-;.J^IRVG-7YMLA!OX#/.?KK.U(\C&)RPX.78Z&M +MG1\=%ZUG7IS$O`2=8V"))$Y'YK1YLH@X()VU&=#>^!8QIKI-8+(08QX0X@LFC*)HA4E?[BG^"VE +ME7YP8]JOM.;AFN3:MZ`PJIU.*(Q349C"8LU60GOJ)JI3+*/?YXL+%EXAB=\2 +MDJA_%*84O7VFY\QWT%PF(',9>>L.M)C0S4=FVE?^Z.W\H%/3D7UF-+#4"G]H +MHWWVV=;,\AJ%JV;=>TJ4&8#(6U>K!#T)2:-0R4[Z;&>O<>E-9#MO^Q=5^&SG +M:*2OA85!R';ZK;",\K#BH*]'O(0*MEL,2-E.NG=5FT03!1I-UFL6ZBH<(^(+ +MX6XT=]LR5F?"3CS)^>3/<_>(',Y=4_-53\&XL1;&(6H\1W*K(O16>>2M[6TC +M^>,+)L#Y[%NH]TWI;=,AMH&1&I-#4_EA00"(A`T[QH.IA7Y%P8E8F+?ZAVRUY)$EFS,AK/T%DJ#QM122#;D +M*4F4!+WWJ\#5B!IS?+8:S_.:[416M&7T%Z3'3Z.QV^%T*N#H^4:N8_[O')C% +M1F7`=#&R*(4L0GIK=>Q[*7OB8J:EQ"BB5GP@7/&^&A3&*B+:RE163&EEPE[< +MWW9$QQGTUQ)[.@@ML1\ZX!$:*Z;VOG.WW3)P[`<-!D6$WHKIT+/W!`%@X663 +M/!IW"AFL.;;,]N!;TU7+:I:6XV9Z;\2M)U5=4-U8:AF&^GQ'AV%9M?6G=MB4 +MMGZH0+L;E%"!.E9!5Y5^-.36Q$>N*DL+TT-?]=*.(/Q +M3.&3RMAH5:Q!H0A4*,0*19`Z5B^70P]-KI#*Y6_)Y2EY4^6Q,?)8N3PV5A;SRJ\J.RJ]#!_K+VV$:Z<`YF(+NQBN^#!RX7'V658Z7'RL,+C_- +M"Q7S&H3T!CQP8+CB1)Y"V2Y3MH!DR9D;Q6Y%G' +MM4<&M.)Q(J&F71(Y\$W8P'Y]>V#^4;TH<'\KZ-2U1^C:HW3M<94Z)>FO$8F4 +M!0^M]H\=HHV;5*"N(O3KO]>&UT1<(9.H)XHBBL/S0J$=@7/4*<@E"P3^'<@[ +M-6,TX`(7=7)_VG&&-II!M`AW.Q09V:P.H?\`5Y&3G?5TX5/*\0)&MW3[3]AQ +M!N."5#+@X'W]$C93N.MC!H2(>QS&BZ7_D@[L\2N%6(*+!SXL'X'7ZA_!>6@? +MJWZ(&%@&@-9Z5KZL6GNA7CY'*E]9#4RVA'UY4T$)AV/BE,9WF1!^1`RD5J@D +M`V6@W[\K=.U*/V'Z% +M7ZN.].\X7S1UX_=T'\8FC=[Q$7POW/J/6JASR$_W_PWBN&][P?].\50^W\3? +MZ948\DU8==Z$I1G0_MJ`*7`@'O9OT,#DM[A3F2L/Z&$TGH7RL/2S(CLOC-)"^?*3_ILQW`@4Z +MS=Y#*-:FX/_.)=&15NNUT_DJZ#3L(Y*_`,A('!5^V*$\EJ/GL%;=L44.!#XR +MXWY*&;8A]R75L>S(`;&UY(6H@:DU)2"K?+.2Y)1N11MT6XI(_C!>4VH@F0Z% +M81M'N=U?O1WVLS_J[:&'5L^,B`$F__3'[AK+J15+=G6ZCG]:3MS%V0 +MH93;3OT<-A#'Z$??6:CG9ZC&H5TR]2L9[/YQ,)^%BL(PZ[5/BI/UI`3[@<$; +M)K#AT`,':N``[.CFQLJFE3CHY7C8PX[59.B5^?NU8R7:29%6R@7:FYM +MG%P>U-BB7"&"O07:]$='K9Y=NJ,/K)WK]4+?ATP7A-_>%WMRG;M]PJ4G]^L/%#7Z8W8&5%#A, +MP,7*G15U\QB;.XLU)X'%>XGA%ASN8'0(>&_@U>/0[J31\4P16?X$&L+QA8I+ +M";O#/!ZBD+OPDB;VZGYW/!O=KFFB8=R7%^1$!E/51,$A1>>[A( +M`76LYN,"I>Z?G*HC*CG3`'&%M!I^>WCVES/_C+IIC[A9$$-*"KNY$PCL'`.; +M$'JX<,>76U6!-S-E<-;IA0>'=\A4\K,&M'_/=_NF +M:&S5'LI6]9\E9%S9Q][M?RSLYCN1-Y?I8!%=P@/N?3*#+%MUX2R_^-'Y`-#) +MW1^__B3LZR'+J*?(%*==OG98="45?*2=AE:J_+X")0Z39?2&;V^1,0=W)_2: +M$S%&A>!JPI741+^'#H%E]&>TSJR'?>W:(HFZ^5Z!Z:"!*)@%8AAGN-S/\(T? +MX;K/[E_N!$TKH.'&-M>*%[->#/B"4U@2<(3;DL9;/&E_0_4(+YEHJ;TT@@8D +MM:!,N*FD7%`6E-*L.B1+.Z]U;`:=9XK*2^*J!%BJHTR$C134!E9O/K,4G..U +MTE><>1*M(C7+]$-2Z(!E;^&E$3E;RTIV=:]_E]:\8-N6[5MS9YBQ\(\Z5%52 +MA]A=LFHKKJN&$MF6?HY>T'-G7"G)EH3=G$?P9NEFPK'>,B[GF36;%6$WGXJ\ +M.2.;!@[4SS_'#;D[,3?CO$GC] +M,349I"(EXILB2'*M)9``:<.Y,\J4!8_U[0?X@'F#MW?@69RW!7RR)U$ORJ4!^G7]SX_-*3_A]!^ +M=WB_&QO'`'T*623HE-X,(*8*]SEX$')NA$+4T1S+4-?AW*G0KF9P^L?%962P +MD_'MJ;$9&?*,K,,+8G,R_&82)Y\W1./\IW%#+'Z.#YE>;$8F8R(.W\G1`V]= +M8?W'";5P'_*'#L>OSI>%];>%]'=S-1S-H1UL%2NE"5_^"4]535.51"W9PI+5 +M9_OX^LDM8Q;E_N+'9\;\H_H'POL'M@JZC=#%WLRRS(,LSJ\\OERDC,95TQZB +MPT/I+MOFZCS+^@.!G*D)UJ;/=.1KMLXC8(M*_IH9.PCMU=OG944V#OO9`.X, +ME9QSN$4I)Z3]^S5R[@;[&V"K2BX!6\W8*RD7&>`CZ&B"]%QK1+_+J7"JG,K" +M;F<I)-;;=^_QN/-LEY[6=KOR%Q.,^\OF6:0UFD".E/90T39V\I22[;P546TN+LAY3V +MCUC>'01I?UUC&BN-QOQE\MI=4\+Z/RA-+HM^']NJA^.;+FGPN]"T"-1WG[N<`%055]_"P2 +MY]O?/<1\08\ID9[3#45M\)&\9,R6Y9%^SW+/N_!Z +M8MLRF?;>R:RE>@AT7XK[@W;\N'/#LAOQIPI!-$B@QZ(S3*?>T+1OZ+(OKGAT +M."D5_Y&%WR2L";E;4KTRFUF*^)6=5RW4MQA]`[%S.(`BK/874`D!$T6E`EB, +M?(Z`43H)%G;F=6]A?SBK\ +M.`E,]Q5YZA58Y#M]!#`US_05N^'Y>YFI%HH)$EC/QJ_H1$>XH*X'WS@)S!$W +MOOTR736.IC:RH(*MJB.^/*1M_,8*/N@RPTI`FRUAGB(6UW=.A+-8S?E50XH- +M9)!"'L1II?$<;%6Z2",7[Z4,V2*KIP5R.)6/PSEI5\U_X'#,?^-PCQ:I$K%; +MOKV6TX>\1K'C-:V1M;9M,JS.ZM'Y+)LOG1N]AY4WM<+ZN/SIUI&<=4=.@S:B +M"G'Z1Q,*?N'%H7<'\KN._:J)KDJK^"\!K:V3L1U$(U.U8+%3[5=B?Z=8O +MGOI/&2)NK+90I'=MT7]I=LKI-R#S/IR(_7QZIZ]+_-[`$S'DM+`Y^=Z +M,[<2UXP=#AL[2`3:$K$KI_:-;;WA7ZE<0LX.?$,92\MY2%31M#NA1PEU%O2C +M33ZYV$+5_8$D`$\B3>);OX!R2&[,EMZ8;9OW#&@+OC%3$XT;9!B7.\?6E*D; +MC_$6S]/U/+1UIND7U2P9?$*U3)%2@J]^H-4MJC[\PND+2$\_$>YRZK?D;-AG +M8#TZTZ*786MND^HMV3;7VV8L6!G-B+KQUJI?-=X5%/]$VI?6!)49^T8=S0BY +ML?`\?TTE$RHV[7,SA@ZKTMWH0S6?0T*_8DO8[1,;"G.%1`=`ZU"UF^3A;D:.!K"]7FW>LI,")91O\9PT%+ +M0QH.C5V%NR%&O19Y_9='3&\78GHQD.G-?<3TJLZ#S*CK(ZP:86TG"Y+8_DUQMH6;Y>$`,*3XQBS&!%B,7>]>"@/G,2V@.P/Z!\R:)*D#SAC7G +M9,:?=(68?OKK]4?N_%%5X$CHQM&@:NCDG"3J]MBD`J`SD;;20NW#QJ8TPE2E +MUW_VGO,M#;[^&726F0Z6.B-'-<=OS5;8I<4AUS]YO-@SMCCD6J/,R`Z__HG5 +M_GZ5O26G^:^5Z94"6\)[8/=E,T@'G?Z%WAZ_&'7]0.3U`[#'+QD12'K[VV!D +M&:Q8R/5]2I(`)%$@W-VQUR'+.@M]+T!R/N5"QPOU.[TP?KWKA(5JQ3H2O:?: +M>DQ)HQM]=B&1/L="W8H5^<8`\W\/C0'^P%+ +M/]8NU(.*Q]8-E24CQ709S7<^#J(LA%B54:BL3X*1O2:)^.EW_&`N2J%K?0OC)8UXL.,>KQG4^+$!I`$BB+J'%CO,6ZK2> +M#))>7V0@N:#$*6=1;.4/(I4\:,^P_E\B"U7CG9]_1@'?*B:4Z\R"P.NV6%D4 +M[SFA_P)B1PQDF+&**/^97+DB\H._1IR.EL&B]BB5BDBE(@(]0V7* +M_W8?)K-0EWQS%*WBIRXIFXX8<+$:=N3'D);Y0V=8+EYMQ^-B<;77SU_3QK>Z +MB$<3#Q(*[)(UX0-=-,[CS3G5Y>T]?=6O5D^&#\MM(YM#KT>&7P]7U=>`O-7Y +M;VB.*5*:&(7DVLOS5?75TNNSK27/EY6`GR*N3]9',ZJWLX:%R!\LJ]D,;L8= +MKX>E:.JK]7)_S5$CBT+\5[.KVC=&!B.6\F#*F!HETI8AIW4,J?!/91SL'-^6 +M(%#"1+GR1_8HD?8VRHM.'GK7@'U]FO00NMG3L$>S\6'/XSD`)T\WN20;:LOU +MX+ZAB+Y[(7U#H`V0ZH+?K'63-`OQPR]Z(7>WQDRH%V$^'3-DY$"]0F>7L3JO +M=OEY56L;5"VKO;@E-ZROIS2]TFZ;]R8XX@4R#_M`?$#`#*MG=FC?I:B^2X9= +M$,D>XQAPZOC835Q?>OP1-:"LH,6.9D&O#YG@[ +M2I^1LW00]E(V>\P`L?2+,,XP8?-\H(9]Y5HRG<&P>V"?&'Z3POZQC"[$T.%['Y(S_@;G +MQZV(OFS09AEMNNRZ8O:1F&,C;1[VZ)0DRN3=!X`]H;6,7N5]SM+N#%9Y2T-' +M\7.RK0D$+`N=D\0E?\*(D+[DL+YD(EZ(_+MYR+]+94*_;D%H<%\E=.WV?*`^ +ME*':C'->P]4?HR4EY?@,^?@L.'M8T*5[$2U,3G43 +MK2_R/RT&(A3*%=AJ+UBHGU!?LJSSWHX;CS&9LR1](FTTC@[S@I(\(V>,R61# +M)A/6QUDR^)+F$$00=/YV[:E9^D751V:?^;Y&`$U=T]^(H\)=':IC.=WD-;:<-$ID_\QJ:$7W8,@.1FDV(U,`60)<2 +M5#D@HV'=3V!5QD,>52Y:XA`HR>3=F^#K.'ER6BO"E+:6:G&PY]+&7409T!QG +MZ3L$H%[=3K,E[&GY2P]T#\!F8`\H]WWNDA[B.2?UG%-NW_#.#)]_X')@H&#] +M\_>`J\!X^!)!))PY"WX,Z.%ZRZIM0];2.ZM%UI(8.ZT`W?KW +M6=($:]'FJT;8LC;0F40E6T8S5201YFF%Z:J%^YR[W-JN[-C2L\1O7&TW,M[I +MH(23&P\V>3]:2K-0[T(0EM#U$IJ/,_5XH+FF\0K%1<+BP+86M%5JS1 +MW6693CNK50BVLEOQRG2IIQPTJ;Q$:>QS%\B51D=^7W^X8@*_1'JV17BV\?[J +M]0>AC8/0A#S"$GP+$;LR2J:U6^U^7I_PAMBS<\>AG>N6?3G]=+J,0WOD%)Y^ +M13?@\^,:QGPTV;((_!`+_PKZA=6RE1$@O:M$64I'W[&P8N-7C'3!X::O(W9V +M6.T)R#,D_N`9[D2>H37A16^1CQW#/$SX.+X<`XZ\P\F"X+ +MYB(>)^TJ`?/4'#O10NQ9U>!0KHR(_4/-^RS4AYCO$YH1]#4-!E.GP]0-:]0K +M([PBQE1[?48`?<8.Z#,2WW4PX)K0GKX(Q` +M>(F=YM3C_MYC<>A4S)#N6(Z!Y"R]B#X9C/"$/%KG8!:1/(C[?5-UQ[+;[-9S +M$P(]4CW)1)^`T0Q;.W<-AGC"(SV1:KL@+DO`?H`[NP(:\(KSX'K-N5`/7W4L +MN^'3N)UGV;>YJO9LVS\R'O$#@<8HU!L#L=;QV-!XZ\@:M5$X\%J0^D4?4F?G[8+(X:( +M"(^:=3(<.Q"ZWZ'.RM>9#\5E%?#=-.2F60\ETBY;*+9Z'$L=R]",8VL0592@ +MG5`Y]S#DGH1E=$AG%(5ZPK#+XU==\^-\SE7U;TP&?IPON-I[=K51"$6R4"V/ +MCF0F44__87,'VV2A^FMO@*UAU[I9>X25'G3@IB_N0KWF6'6<[QN1=J@]J]&: +M#[8.%B.]]FLN679^:Z?^@D3]+0?TU9QC.7%PPWF!=1\O[XJ3,RVC)__T;2BC +MFWS[)>)1Y21ZFC(_H65JTQPXC+ER'1F@)_D[WCF_R:V3!RA+F+H2^DY' +MDZ/9K2NE*TN92;\6O3UO(O!DR-5FOO1:GUI.5YD#BDF5G`EI6RU98WUTP"6) +M8E7(0;-<'J"1\Q^M"7G7@'C3A)?(/!DO2CSLFVEZ3D9IQ9YN6\J_DM\F@<:GF_0`;D[@#R:ZTQW>C\[61ES +M5ZZ[7YJ."!&>1>R862ID3!3!$L[L>U3XXT6A##`]4PC,[`DL^G/Q_OXS3EMA +MDM_K%C)>)G9F'9V)"LN$NOUS_$K/9Q;J"`;KG_6H_J-9Z+45C4%V-"]>*+[V +M9HYLIYR53&R7^4W@;HU>/C7PY"+O(23C494SVDENE:&[F"?O;XVV4#<0![GV +M;-BU9Q$!.8<(")!>6TG\1=C@("`1.8\6F,6`;K)E2O`U[25C>9AM_A)0IY8(E`MQ+X[\ +MS6&,N#:>]-#6]W3Q9MUG1B_("%M=*GHVD*Y1(C_2%MR'J)T>^2U-\.N +M*=5F+OB!V\#6U(G4=0+0B4YP/HS/,X=="SS0P&W@6ZR3\91'7IH9=\_LW83Q( +MF%$D3._(_H:(:Q.0/'>0/#51D!19J%0S%@SZ4*&#OD)U]2MJHI:=AZ\67#&# +M3Y.HJC/_,-.>+XXL,F^\+=5U99_JTBRK6=(2'=;;`VE+=>C;31PS1EPQ)U&; +MS9BHT+SVLK]N675%R-D+Z`/:)*K`-H];&-9CNMI9V.G];C:)DIYW58197?=: +M]B**48PH!@)?B<`RN@8#0Z!)O9!1/*XB'*V=W($`':@7"=3FY`.//J-%$'W6 +MUB;)#67ESV)Q_L&:DW"X@]$@X+R`,SH$1"M>L;FRI,5IFU?QQU45:^>*J-Y_ +M1O;^\]^75=!)8698[UWM0D9AB,-8'!78NY]'YL$2]SM55N9:>POH,Y@YA:&B7I!O=D3@ +MK3BOD(M3^$>.K9W:;SFJ"Y*:W,H+M5W`4W/>8";T5B;Q*ZS/NVZ@0_7Q +M(!/RUD=&]K:">[7"&H$5S"T,Y78(KW:V7%'V2!M[6CJNE?0VN058IZ-:A'D* +ML!&'VUX3J"P@>UVU8G4C"9_5!!DVTUMJZG^L/:RJ$FB.PF>.&@G665`=O&[O +M%[8$>Y[4,GH&7(_H?5=IYF2$YD>`$4.I`#_$Y3GP0ZU5,5O)FNARF;3W#=0G +M.CC-O7VB[X(RHF\ZNKUK&^@[XN:>$K?T]^HURZKU5J]96`G-@I\W[U68[Y9E=.8?,3]G`G@?&,T8K7D" +M!`]L>2AC$6VY^0?ON@*DN%YN_'-$KSZL=_F)6D<3>R^=G4>K:-*PO,#>MX)Z%XEZ +ME?)W2D)Z+>&]"Z6]P6&]4M#)\Q5P*$.1CA,--`11R31%-N[?/TZN$&4GQ"B$ +ML8I`F=9[AH$EZ7U%P[&J0R<&]TK">YE\G9"G%!ZZ'.`6!CB%C8[=SMWN:HE3 +MV)!7$:0LI&GMS2K[1VI[D])^B-4@)!K&SKVR&L);'$H)ELBXIGP_L4*DE-#S +M@O)%:I>=VZ#4B.#SRQ=2"R3Y@4&]4[G[E#&OMU6*->U*K=&^[%10@5B=8-+\ +MA20:E(F,L^`+C941Y4(]EX>\>8 +MK^C=3D3G?E/#W)>CW'DILP*-=B]9(=SRM+ +M!,UI5ONKH"Y;@$UG@(58NEXMZ:2DN +M/WP&:]G>R3&+MO%>P,^,JS'R,_"*\5LF`-F6<8TO`5*]:)NL5"";Z%(W"M3) +MVP+=7TE[_9IG,A@SY%D8.GZ*C8=1=SV##PG1`#(SWG$/=M<$N6L`B7M? +MR^=GR-['/YCK#V_1+`QT]TG=-R7NFZS9-'\%3;$"5[Z2(7+7PK&/443(M*G> +M,H+<13'S:\+=^4OC(9)IY,<4*2:F@V2@?V(B0CF_NJW$FAZD(`FQ.YMX2_AA +MFGI9#L/*04N<-%P^8P=,G$LRG(2MY)Y\67:$>X/5!+N^,-!]$GR$'$&W'4X- +MIQV=A"VM2`;BHFMSZW=6;%K)IG*IZ-=*?R +M'N+5,[(5^N<$%2]6S"F?K7I.6/V"\_D(]V$_^NP0=Y'?!.'6N=5_"W07._]* +MIW`ER3$\)U8_%TAWX,KG1)#:QCXG<2CUSZJ5SP6KY4P=Z2\GK=#//=MY^D4] +M*0EWOQ;E?J/QO>*?&<,$VF<>#CU05*/2+4.;:;H>LVYE]C:59FRM5[DL.]#] +MHK+`K!Q[NF.16GX6=?`)@R\\6M]35CVPVNO450]MKK(\9:$"W%]SF:&119W\ +M7$M:K2"S2'6>'W"`2/:LAGP]TFV$_O0!=#(GQ*U?]1-:SGK\'5L1R4;K-"60 +M>DO=.FO)IO(2<$_J?JEVLY[D@)':K6@Y?$L1B2ZE2I)945:]7;F-8T"'H/Q5 +MI%4_]/#T34"&N$GUL>R]G_C.Y1A*SS[:(4RBTJT'?_*J#&:M_59'2B+=(6PW +M4300X@[$^M#&^]Y/]N^O5@%4`,SH*P%FCW[T[YXH.%AP#Q7F3E"2MMC/&,J" +M4:N]G/B;?AZYIGP-_#K?JN7S3=]];>]M-T_0PCZS[1G*B% +M.@?O0#J\UW3_"W.$LAUT?^\T-BS+8%$TPTJT6V^&=2GDMJB>'V&)13_"DIN& +M:WY<>T:BZOK9T/4+]/;8%,&B"+Y!?/*!&3-ROR9X.O$W%\'U@`-X0,!<_0(\ +M`.F*]Q!Q$K4]S!UK/?@J%,HK*/0*?55&]8SJEV7PAW&]MU:?7O2/K#T+=@=#,[FN0U6>JZ[.6U=$SR`ZE=B$. +M2*_V-J\/=!-->L-"7%9@9@T+Z;!9-@3:^IR0Z@S?O.%$]+2&]C3&=&(A;GSI +MA-7A/3>!,LA-B^RA-.&""TV9'BPA9YL`,^652A>KY_*?$!(3V7*3G?,*IWF" +M=21&UICUIAD4KXI*7\\L)W#%I +MTW2F8Q(KC3A8;$A>43:=3I_1C%8_RCU(EO">`7%/OWS&/ZW&OX/-H%/2*PPM:&F`EL#18T%C_=@"3[V"8(R&PY_29 +M]"3J:'C/A]!&[^]`AS:.998FK-TGUA_+@EK`^8P(LX?K(>*:^O?-FG5%3/R_H?,_!AFFX> +MWN72_YS!\TX+*,>__3L^C,">/1$]L^DQ[)AQ#M"V="+42\>)+X!+VK.!4`GW +M.A09F9#=A_285LY[-FX^IL_(CGV=@7C^;;_8C"P86?GJR_J,+"R:@1T/[?9\ +MZ^'.I.MS,O0YF1RO,#%;,@SG<+19M3V#CV8X[:)KY:O1,#U\WY4*4V!G0[D4 +M7;\E0WD.YP_3]-LS6$A8M*KN\^485XTY9%!/E)*#*6/Q1RN`?O_V;R$,A/9( +M.2]P5/4[3GRQ1F2$&`C7^K5J!$1`U`39;E?:[1BT +M:$N[K5+7W('M"UVWN]L52G?WVT_-W@FPLMON'YLO7W)GSIS?/>?<.S?GW+GW +MY-S@>YH1:<)(C(22=5[;T=#@9KR1^9(2^^;&O%=W]!4><-?\D=_0>,"]\9FU +M#8U]5C@D''`?:(`:06U^5`]D0"M.FR81+?5\@D>$SX4RU+,'4A +M$>:^=Q5LJZO>C3^Y.G7Y=-Z%$22>O:UC;ZO8VUS>!?:VAOWZ_\J[H!EQ2$RR +MKJY=OSCOWMRT)7YDWHD>8V"[.>".&I*4,!6F%H1^AYM?]*X0TYBYMTW#?E7. +M1":,P('ZE4NY7<:I]3SES:DO=O"O>#\X7-XB'3QM>JL-]CAGZ!07*2*(8B2) +MW\;;DY+`GI7L448/\P_;!6/X2L7-N'/N:R*V,S.;5+!5>O&]J^PLBPI8DLM_M +MPYE`;.SBKCOF86\4C"O^IJ=7NQD +MB??9J$M/E>>3">Q6R>.R)'87TBQ@ZN!`;AU'3P[!'R;K]R@=<(MI,?T^!SRN +M8S=('I,ELMO>?ETD6F+Y$H7B]_0C(2WGTUQ'3;UNS"0VO<6I#.U0\6-VF++= +MJAG;S3*QA9)YL@Z2+!9UKC,0C<@J`31=MK8F2T00 +M;F@Q;C7'U!P)XZM*AO=[:PE^J$&J8)>HV"XFL^,LW7I*K,DJ]^C*6V3ES9', +M[JB&^XL1I0$K*49B$$/ZS6(N?_FV]Q;.80_3&3Q3:[?9YS?Z?!CVA*EG0G10 +M!OMIS<%DLZ_7XCLC&I)%#Z%;+BG?3J--B#][W=)#Q8@5@H3W_;ZX4BS+8G3+C +M33%SV)C!;P_+ +MBI%%4/@CL?ZR*(VR],@D4XQUH=N._Q44"\NA?/(T2Y[2QX&NU.PW4I!-Z\$ +MQYKFBIN4@G[>EE]GC'JKC@KVYD6?E+#VR)=1+6L2;L(P0"6PRZCZ!Z.[19"] +MU.:]@C,^';CA"GG]94BQ`(O2N$(%"#+BJ_(+]N1)WI$DL+I$=E'T1]2IT40V +MRW*CSA5JXWQL<$T#KNG8C'CV(::6^5EQ.*4FT2[R=\,@4KP?WC`E9$OCYH"* +MP:M6W^`V)1#<@%VC?N)]E#>`P>O6>>A_08/92']I@\%H_98_-884#E"C4C2"+XU;P; +MM?-NU$WU7SGH9=8QJS2L0@G>5;#JY%2+*1,QMIRJ:N-6F5(]=Z.B,\W:0.#= +MZ`\7TCE%);4-SM"SL)VT;`2=?A-+YQ7Q=M-915@_=<5++T>*>-U->5B_R(3! +M8I>2%5KFHN;TFY('J8A1[G'XFYE%O*--F=W]S%%8T(&[D`Z)D:.H`MSFZ/8B +MWL$F.Z2WPH)I+LKT04<]O#,6+>(=^S1V8,B<4^0*;;9B=S1@,@' +M-[O7%9,W)P?B!33ZJ87#T9Q\WD/J@&:P::C:2*JIV? +M3,:5U.XS8:DTQG.&[A]H#7\[!EH9OXE4!4XG9H`:GF=<")\[]THRUFDD53<;!^HQD'$5*H1%;.>F5KE`5IXL.5,6# +M9]2@+0ZTJT#['-#.?,$,*\$+28`F,)%M`L:G#L`$F9>=:.PEB0ZX)&]2G0,= +M`9.CUF1PMP>.#UGT>VC];EJ_PZIO\O=;'4?,CL-FQR%_CPXLC@?-#%5I2IN- +MD'\/@>80_/UA#,AMTN^>QXSNICCP=U*_F'F>50<6*8`!?IU&42J9YM- +M7"D24K"P#W6%OF/.,[[$X`2W`RNJ8J%Q;'Y5-N^JC\G3@`(U2-@O30B.DF/H +M`/6Y+\*VT#;&4X&\))!A6I>S]Y,XD'M(E@`>J.S*&Y2`OA*K7_0$8Q_L!" +M7(]^7FEY7TP3'XF=2OI]B3/TN#.D/N=+"E[3.QXPK4)MCO3I\:,4+4(...\. +M..^^:2`MSKNKIAP+>?#[67EM@^/3>6W]L_/:2H-_:49:>!YT/U\6_*1>ROO& +MWBKCW4KAH@45]"%EP1OUTN1Y+5*/ +M+%`,QX`75B4BF0)4I($>P>MRQ*"]7`N/^7R-,`WU*#C>"%$$4BM`^1J/@L%; +MY&'?$\J"?F/GL-`A`_\6C'IBD6I"R!^')6%ULI`_P<4-TW3T5MBOKB8$Z#@L +M52<+T`G[3.R#HEPLQ&5+7EQ>NG9]137^R--X=LZBPB49=IQ:5%.Q=@->4%.* +M/UG]$IZZ`$]9D)ZR,#UE/IZ77X"GVNT+\.HJO*QT;<4FZZKU%:Z2TO45/ZTJ +M7;?>ZJJN3+>55U>6VL)4VT9;V<:5\ZT+K"E&,[X<7E$!&0O2-FW"[?;[K'8[ +:?.,6/`4>V.%+*D20C-+)Z_\";.?>SN!F```` +` +end diff --git a/sys/dev/cxgb/ulp/toecore/cxgb_toedev.h b/sys/dev/cxgb/ulp/toecore/cxgb_toedev.h index 8e88d6b..c70c37d 100644 --- a/sys/dev/cxgb/ulp/toecore/cxgb_toedev.h +++ b/sys/dev/cxgb/ulp/toecore/cxgb_toedev.h @@ -41,6 +41,8 @@ enum { TOE_ID_CHELSIO_T2, TOE_ID_CHELSIO_T3, TOE_ID_CHELSIO_T3B, -}; + TOE_ID_CHELSIO_T3C, +} + ; #endif diff --git a/sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c b/sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c index 0f2f2ee..96e5b65 100644 --- a/sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c +++ b/sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c @@ -35,6 +35,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -63,9 +64,9 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include - #include #include #include @@ -84,8 +85,6 @@ __FBSDID("$FreeBSD$"); #include #include - - /* * For ULP connections HW may add headers, e.g., for digests, that aren't part * of the messages sent by the host but that are part of the TCP payload and @@ -118,7 +117,7 @@ static unsigned int wrlen __read_mostly; * in the skb and whether it has any payload in its main body. This maps the * length of the gather list represented by an skb into the # of necessary WRs. */ -static unsigned int mbuf_wrs[TX_MAX_SEGS] __read_mostly; +static unsigned int mbuf_wrs[TX_MAX_SEGS + 1] __read_mostly; /* * Max receive window supported by HW in bytes. Only a small part of it can @@ -147,6 +146,37 @@ static void send_abort_rpl(struct mbuf *m, struct toedev *tdev, int rst_status); static inline void free_atid(struct t3cdev *cdev, unsigned int tid); static void handle_syncache_event(int event, void *arg); +static inline void +SBAPPEND(struct sockbuf *sb, struct mbuf *n) +{ + struct mbuf * m; + + m = sb->sb_mb; + while (m) { + KASSERT(((m->m_flags & M_EXT) && (m->m_ext.ext_type == EXT_EXTREF)) || + !(m->m_flags & M_EXT), ("unexpected type M_EXT=%d ext_type=%d m_len=%d\n", + !!(m->m_flags & M_EXT), m->m_ext.ext_type, m->m_len)); + KASSERT(m->m_next != (struct mbuf *)0xffffffff, ("bad next value m_next=%p m_nextpkt=%p m_flags=0x%x", + m->m_next, m->m_nextpkt, m->m_flags)); + m = m->m_next; + } + m = n; + while (m) { + KASSERT(((m->m_flags & M_EXT) && (m->m_ext.ext_type == EXT_EXTREF)) || + !(m->m_flags & M_EXT), ("unexpected type M_EXT=%d ext_type=%d m_len=%d\n", + !!(m->m_flags & M_EXT), m->m_ext.ext_type, m->m_len)); + KASSERT(m->m_next != (struct mbuf *)0xffffffff, ("bad next value m_next=%p m_nextpkt=%p m_flags=0x%x", + m->m_next, m->m_nextpkt, m->m_flags)); + m = m->m_next; + } + sbappend_locked(sb, n); + m = sb->sb_mb; + while (m) { + KASSERT(m->m_next != (struct mbuf *)0xffffffff, ("bad next value m_next=%p m_nextpkt=%p m_flags=0x%x", + m->m_next, m->m_nextpkt, m->m_flags)); + m = m->m_next; + } +} static inline int is_t3a(const struct toedev *dev) @@ -166,6 +196,7 @@ dump_toepcb(struct toepcb *toep) toep->tp_mss_clamp, toep->tp_flags); } +#ifndef RTALLOC2_DEFINED static struct rtentry * rtalloc2(struct sockaddr *dst, int report, u_long ignflags) { @@ -176,7 +207,7 @@ rtalloc2(struct sockaddr *dst, int report, u_long ignflags) return (rt); } - +#endif /* * Determine whether to send a CPL message now or defer it. A message is * deferred if the connection is in SYN_SENT since we don't know the TID yet. @@ -185,39 +216,39 @@ rtalloc2(struct sockaddr *dst, int report, u_long ignflags) * it is sent directly. */ static inline void -send_or_defer(struct socket *so, struct tcpcb *tp, struct mbuf *m, int through_l2t) +send_or_defer(struct toepcb *toep, struct mbuf *m, int through_l2t) { - struct toepcb *toep = tp->t_toe; + struct tcpcb *tp = toep->tp_tp; - if (__predict_false(tp->t_state == TCPS_SYN_SENT)) { INP_LOCK(tp->t_inpcb); mbufq_tail(&toep->out_of_order_queue, m); // defer INP_UNLOCK(tp->t_inpcb); } else if (through_l2t) - l2t_send(T3C_DEV(so), m, toep->tp_l2t); // send through L2T + l2t_send(TOEP_T3C_DEV(toep), m, toep->tp_l2t); // send through L2T else - cxgb_ofld_send(T3C_DEV(so), m); // send directly + cxgb_ofld_send(TOEP_T3C_DEV(toep), m); // send directly } static inline unsigned int -mkprio(unsigned int cntrl, const struct socket *so) +mkprio(unsigned int cntrl, const struct toepcb *toep) { - return cntrl; + return (cntrl); } /* * Populate a TID_RELEASE WR. The skb must be already propely sized. */ static inline void -mk_tid_release(struct mbuf *m, const struct socket *so, unsigned int tid) +mk_tid_release(struct mbuf *m, const struct toepcb *toep, unsigned int tid) { struct cpl_tid_release *req; - m_set_priority(m, mkprio(CPL_PRIORITY_SETUP, so)); + m_set_priority(m, mkprio(CPL_PRIORITY_SETUP, toep)); m->m_pkthdr.len = m->m_len = sizeof(*req); req = mtod(m, struct cpl_tid_release *); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + req->wr.wr_lo = 0; OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); } @@ -257,6 +288,8 @@ make_tx_data_wr(struct socket *so, struct mbuf *m, int len, struct mbuf *tail) } } +#define IMM_LEN 64 /* XXX - see WR_LEN in the cxgb driver */ + int t3_push_frames(struct socket *so, int req_completion) { @@ -266,9 +299,8 @@ t3_push_frames(struct socket *so, int req_completion) struct mbuf *tail, *m0, *last; struct t3cdev *cdev; struct tom_data *d; - int bytes, count, total_bytes; + int i, bytes, count, total_bytes; bus_dma_segment_t segs[TX_MAX_SEGS], *segp; - segp = segs; if (tp->t_state == TCPS_SYN_SENT || tp->t_state == TCPS_CLOSED) { DPRINTF("tcp state=%d\n", tp->t_state); @@ -281,10 +313,9 @@ t3_push_frames(struct socket *so, int req_completion) return (0); } - INP_LOCK_ASSERT(tp->t_inpcb); + INP_LOCK_ASSERT(tp->t_inpcb); SOCKBUF_LOCK(&so->so_snd); - d = TOM_DATA(TOE_DEV(so)); cdev = d->cdev; last = tail = so->so_snd.sb_sndptr ? so->so_snd.sb_sndptr : so->so_snd.sb_mb; @@ -306,61 +337,103 @@ t3_push_frames(struct socket *so, int req_completion) toep->tp_m_last = NULL; while (toep->tp_wr_avail && (tail != NULL)) { count = bytes = 0; + segp = segs; if ((m0 = m_gethdr(M_NOWAIT, MT_DATA)) == NULL) { SOCKBUF_UNLOCK(&so->so_snd); return (0); } - while ((mbuf_wrs[count + 1] <= toep->tp_wr_avail) - && (tail != NULL) && (count < TX_MAX_SEGS)) { - bytes += tail->m_len; - count++; + /* + * If the data in tail fits as in-line, then + * make an immediate data wr. + */ + if (tail->m_len <= IMM_LEN) { + count = 1; + bytes = tail->m_len; last = tail; - /* - * technically an abuse to be using this for a VA - * but less gross than defining my own structure - * or calling pmap_kextract from here :-| - */ - segp->ds_addr = (bus_addr_t)tail->m_data; - segp->ds_len = tail->m_len; - DPRINTF("count=%d wr_needed=%d ds_addr=%p ds_len=%d\n", - count, mbuf_wrs[count], tail->m_data, tail->m_len); - - segp++; tail = tail->m_next; + m_set_sgl(m0, NULL); + m_set_sgllen(m0, 0); + make_tx_data_wr(so, m0, bytes, tail); + m_append(m0, bytes, mtod(last, caddr_t)); + KASSERT(!m0->m_next, ("bad append")); + } else { + while ((mbuf_wrs[count + 1] <= toep->tp_wr_avail) + && (tail != NULL) && (count < TX_MAX_SEGS-1)) { + bytes += tail->m_len; + last = tail; + count++; + /* + * technically an abuse to be using this for a VA + * but less gross than defining my own structure + * or calling pmap_kextract from here :-| + */ + segp->ds_addr = (bus_addr_t)tail->m_data; + segp->ds_len = tail->m_len; + DPRINTF("count=%d wr_needed=%d ds_addr=%p ds_len=%d\n", + count, mbuf_wrs[count], tail->m_data, tail->m_len); + segp++; + tail = tail->m_next; + } + DPRINTF("wr_avail=%d mbuf_wrs[%d]=%d tail=%p\n", + toep->tp_wr_avail, count, mbuf_wrs[count], tail); + + m_set_sgl(m0, segs); + m_set_sgllen(m0, count); + make_tx_data_wr(so, m0, bytes, tail); } - DPRINTF("wr_avail=%d mbuf_wrs[%d]=%d tail=%p\n", - toep->tp_wr_avail, count, mbuf_wrs[count], tail); + m_set_priority(m0, mkprio(CPL_PRIORITY_DATA, toep)); + if (tail) { so->so_snd.sb_sndptr = tail; toep->tp_m_last = NULL; } else toep->tp_m_last = so->so_snd.sb_sndptr = last; + DPRINTF("toep->tp_m_last=%p\n", toep->tp_m_last); so->so_snd.sb_sndptroff += bytes; total_bytes += bytes; toep->tp_write_seq += bytes; - - - SOCKBUF_UNLOCK(&so->so_snd); - - /* - * XXX can drop socket buffer lock here - */ + CTR6(KTR_TOM, "t3_push_frames: wr_avail=%d mbuf_wrs[%d]=%d tail=%p sndptr=%p sndptroff=%d", + toep->tp_wr_avail, count, mbuf_wrs[count], tail, so->so_snd.sb_sndptr, so->so_snd.sb_sndptroff); + if (tail) + CTR4(KTR_TOM, "t3_push_frames: total_bytes=%d tp_m_last=%p tailbuf=%p snd_una=0x%08x", + total_bytes, toep->tp_m_last, tail->m_data, tp->snd_una); + else + CTR3(KTR_TOM, "t3_push_frames: total_bytes=%d tp_m_last=%p snd_una=0x%08x", + total_bytes, toep->tp_m_last, tp->snd_una); + + + i = 0; + while (i < count && m_get_sgllen(m0)) { + if ((count - i) >= 3) { + CTR6(KTR_TOM, + "t3_push_frames: pa=0x%zx len=%d pa=0x%zx len=%d pa=0x%zx len=%d", + segs[i].ds_addr, segs[i].ds_len, segs[i + 1].ds_addr, segs[i + 1].ds_len, + segs[i + 2].ds_addr, segs[i + 2].ds_len); + i += 3; + } else if ((count - i) == 2) { + CTR4(KTR_TOM, + "t3_push_frames: pa=0x%zx len=%d pa=0x%zx len=%d", + segs[i].ds_addr, segs[i].ds_len, segs[i + 1].ds_addr, segs[i + 1].ds_len); + i += 2; + } else { + CTR2(KTR_TOM, "t3_push_frames: pa=0x%zx len=%d", + segs[i].ds_addr, segs[i].ds_len); + i++; + } - toep->tp_wr_avail -= mbuf_wrs[count]; - toep->tp_wr_unacked += mbuf_wrs[count]; + } - make_tx_data_wr(so, m0, bytes, tail); - m_set_priority(m0, mkprio(CPL_PRIORITY_DATA, so)); - m_set_sgl(m0, segs); - m_set_sgllen(m0, count); - /* + /* * remember credits used */ m0->m_pkthdr.csum_data = mbuf_wrs[count]; m0->m_pkthdr.len = bytes; + toep->tp_wr_avail -= mbuf_wrs[count]; + toep->tp_wr_unacked += mbuf_wrs[count]; + if ((req_completion && toep->tp_wr_unacked == mbuf_wrs[count]) || toep->tp_wr_unacked >= toep->tp_wr_max / 2) { struct work_request_hdr *wr = cplhdr(m0); @@ -368,18 +441,16 @@ t3_push_frames(struct socket *so, int req_completion) wr->wr_hi |= htonl(F_WR_COMPL); toep->tp_wr_unacked = 0; } - + KASSERT((m0->m_pkthdr.csum_data > 0) && + (m0->m_pkthdr.csum_data <= 4), ("bad credit count %d", + m0->m_pkthdr.csum_data)); m0->m_type = MT_DONTFREE; enqueue_wr(toep, m0); DPRINTF("sending offload tx with %d bytes in %d segments\n", bytes, count); - l2t_send(cdev, m0, toep->tp_l2t); - if (toep->tp_wr_avail && (tail != NULL)) - SOCKBUF_LOCK(&so->so_snd); } - - SOCKBUF_UNLOCK_ASSERT(&so->so_snd); + SOCKBUF_UNLOCK(&so->so_snd); return (total_bytes); } @@ -467,13 +538,105 @@ t3_send_rx_credits(struct tcpcb *tp, uint32_t credits, uint32_t dack, int nofail req = mtod(m, struct cpl_rx_data_ack *); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + req->wr.wr_lo = 0; OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, toep->tp_tid)); req->credit_dack = htonl(dack | V_RX_CREDITS(credits)); - m_set_priority(m, mkprio(CPL_PRIORITY_ACK, toeptoso(toep))); + m_set_priority(m, mkprio(CPL_PRIORITY_ACK, toep)); cxgb_ofld_send(TOM_DATA(tdev)->cdev, m); return (credits); } +/* + * Send RX_DATA_ACK CPL message to request a modulation timer to be scheduled. + * This is only used in DDP mode, so we take the opportunity to also set the + * DACK mode and flush any Rx credits. + */ +void +t3_send_rx_modulate(struct toepcb *toep) +{ + struct mbuf *m; + struct cpl_rx_data_ack *req; + + m = m_gethdr_nofail(sizeof(*req)); + + req = mtod(m, struct cpl_rx_data_ack *); + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + req->wr.wr_lo = 0; + m->m_pkthdr.len = m->m_len = sizeof(*req); + + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, toep->tp_tid)); + req->credit_dack = htonl(F_RX_MODULATE | F_RX_DACK_CHANGE | + V_RX_DACK_MODE(1) | + V_RX_CREDITS(toep->tp_copied_seq - toep->tp_rcv_wup)); + m_set_priority(m, mkprio(CPL_PRIORITY_CONTROL, toep)); + cxgb_ofld_send(TOEP_T3C_DEV(toep), m); + toep->tp_rcv_wup = toep->tp_copied_seq; +} + +/* + * Handle receipt of an urgent pointer. + */ +static void +handle_urg_ptr(struct socket *so, uint32_t urg_seq) +{ +#ifdef URGENT_DATA_SUPPORTED + struct tcpcb *tp = sototcpcb(so); + + urg_seq--; /* initially points past the urgent data, per BSD */ + + if (tp->urg_data && !after(urg_seq, tp->urg_seq)) + return; /* duplicate pointer */ + sk_send_sigurg(sk); + if (tp->urg_seq == tp->copied_seq && tp->urg_data && + !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { + struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); + + tp->copied_seq++; + if (skb && tp->copied_seq - TCP_SKB_CB(skb)->seq >= skb->len) + tom_eat_skb(sk, skb, 0); + } + tp->urg_data = TCP_URG_NOTYET; + tp->urg_seq = urg_seq; +#endif +} + +/* + * Returns true if a socket cannot accept new Rx data. + */ +static inline int +so_no_receive(const struct socket *so) +{ + return (so->so_state & (SS_ISDISCONNECTED|SS_ISDISCONNECTING)); +} + +/* + * Process an urgent data notification. + */ +static void +rx_urg_notify(struct toepcb *toep, struct mbuf *m) +{ + struct cpl_rx_urg_notify *hdr = cplhdr(m); + struct socket *so = toeptoso(toep); + + VALIDATE_SOCK(so); + + if (!so_no_receive(so)) + handle_urg_ptr(so, ntohl(hdr->seq)); + + m_freem(m); +} + +/* + * Handler for RX_URG_NOTIFY CPL messages. + */ +static int +do_rx_urg_notify(struct t3cdev *cdev, struct mbuf *m, void *ctx) +{ + struct toepcb *toep = (struct toepcb *)ctx; + + rx_urg_notify(toep, m); + return (0); +} /* * Set of states for which we should return RX credits. @@ -485,7 +648,7 @@ t3_send_rx_credits(struct tcpcb *tp, uint32_t credits, uint32_t dack, int nofail * to the HW for the amount of data processed. */ void -t3_cleanup_rbuf(struct tcpcb *tp) +t3_cleanup_rbuf(struct tcpcb *tp, int copied) { struct toepcb *toep = tp->t_toe; struct socket *so; @@ -493,23 +656,38 @@ t3_cleanup_rbuf(struct tcpcb *tp) int dack_mode, must_send, read; u32 thres, credits, dack = 0; + so = tp->t_inpcb->inp_socket; if (!((tp->t_state == TCPS_ESTABLISHED) || (tp->t_state == TCPS_FIN_WAIT_1) || - (tp->t_state == TCPS_FIN_WAIT_2))) + (tp->t_state == TCPS_FIN_WAIT_2))) { + if (copied) { + SOCKBUF_LOCK(&so->so_rcv); + toep->tp_copied_seq += copied; + SOCKBUF_UNLOCK(&so->so_rcv); + } + return; - INP_LOCK_ASSERT(tp->t_inpcb); + } - so = tp->t_inpcb->inp_socket; + INP_LOCK_ASSERT(tp->t_inpcb); SOCKBUF_LOCK(&so->so_rcv); - read = toep->tp_enqueued_bytes - so->so_rcv.sb_cc; - toep->tp_copied_seq += read; - toep->tp_enqueued_bytes -= read; + if (copied) + toep->tp_copied_seq += copied; + else { + read = toep->tp_enqueued_bytes - so->so_rcv.sb_cc; + toep->tp_copied_seq += read; + } credits = toep->tp_copied_seq - toep->tp_rcv_wup; + toep->tp_enqueued_bytes = so->so_rcv.sb_cc; SOCKBUF_UNLOCK(&so->so_rcv); - if (credits > so->so_rcv.sb_mbmax) + if (credits > so->so_rcv.sb_mbmax) { printf("copied_seq=%u rcv_wup=%u credits=%u\n", toep->tp_copied_seq, toep->tp_rcv_wup, credits); - /* + credits = so->so_rcv.sb_mbmax; + } + + + /* * XXX this won't accurately reflect credit return - we need * to look at the difference between the amount that has been * put in the recv sockbuf and what is there now @@ -593,7 +771,7 @@ static int cxgb_toe_rcvd(struct tcpcb *tp) { INP_LOCK_ASSERT(tp->t_inpcb); - t3_cleanup_rbuf(tp); + t3_cleanup_rbuf(tp, 0); return (0); } @@ -631,16 +809,18 @@ static struct toe_usrreqs cxgb_toe_usrreqs = { static void -__set_tcb_field(struct socket *so, struct mbuf *m, uint16_t word, +__set_tcb_field(struct toepcb *toep, struct mbuf *m, uint16_t word, uint64_t mask, uint64_t val, int no_reply) { struct cpl_set_tcb_field *req; - struct tcpcb *tp = sototcpcb(so); - struct toepcb *toep = tp->t_toe; + + CTR4(KTR_TCB, "__set_tcb_field_ulp(tid=%u word=0x%x mask=%jx val=%jx", + toep->tp_tid, word, mask, val); req = mtod(m, struct cpl_set_tcb_field *); m->m_pkthdr.len = m->m_len = sizeof(*req); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + req->wr.wr_lo = 0; OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, toep->tp_tid)); req->reply = V_NO_REPLY(no_reply); req->cpu_idx = 0; @@ -648,8 +828,8 @@ __set_tcb_field(struct socket *so, struct mbuf *m, uint16_t word, req->mask = htobe64(mask); req->val = htobe64(val); - m_set_priority(m, mkprio(CPL_PRIORITY_CONTROL, so)); - send_or_defer(so, tp, m, 0); + m_set_priority(m, mkprio(CPL_PRIORITY_CONTROL, toep)); + send_or_defer(toep, m, 0); } static void @@ -661,13 +841,15 @@ t3_set_tcb_field(struct socket *so, uint16_t word, uint64_t mask, uint64_t val) if (toep == NULL) return; - - if (tp->t_state == TCPS_CLOSED || (toep->tp_flags & TP_ABORT_SHUTDOWN)) + + if (tp->t_state == TCPS_CLOSED || (toep->tp_flags & TP_ABORT_SHUTDOWN)) { + printf("not seting field\n"); return; - + } + m = m_gethdr_nofail(sizeof(struct cpl_set_tcb_field)); - __set_tcb_field(so, m, word, mask, val, 1); + __set_tcb_field(toep, m, word, mask, val, 1); } /* @@ -735,10 +917,11 @@ t3_set_tos(struct socket *so) static void t3_enable_ddp(struct socket *so, int on) { - if (on) + if (on) { + t3_set_tcb_field(so, W_TCB_RX_DDP_FLAGS, V_TF_DDP_OFF(1), V_TF_DDP_OFF(0)); - else + } else t3_set_tcb_field(so, W_TCB_RX_DDP_FLAGS, V_TF_DDP_OFF(1) | TP_DDP_TIMER_WORKAROUND_MASK, @@ -747,7 +930,6 @@ t3_enable_ddp(struct socket *so, int on) } - void t3_set_ddp_tag(struct socket *so, int buf_idx, unsigned int tag_color) { @@ -777,7 +959,7 @@ t3_set_ddp_buf(struct socket *so, int buf_idx, unsigned int offset, static int t3_set_cong_control(struct socket *so, const char *name) { -#ifdef notyet +#ifdef CONGESTION_CONTROL_SUPPORTED int cong_algo; for (cong_algo = 0; cong_algo < ARRAY_SIZE(t3_cong_ops); cong_algo++) @@ -802,12 +984,14 @@ t3_get_tcb(struct socket *so) return (ENOMEM); INP_LOCK_ASSERT(tp->t_inpcb); - m_set_priority(m, mkprio(CPL_PRIORITY_CONTROL, so)); + m_set_priority(m, mkprio(CPL_PRIORITY_CONTROL, toep)); req = mtod(m, struct cpl_get_tcb *); m->m_pkthdr.len = m->m_len = sizeof(*req); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + req->wr.wr_lo = 0; OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_GET_TCB, toep->tp_tid)); req->cpuno = htons(toep->tp_qset); + req->rsvd = 0; if (sototcpcb(so)->t_state == TCPS_SYN_SENT) mbufq_tail(&toep->out_of_order_queue, m); // defer else @@ -863,14 +1047,6 @@ select_mss(struct t3c_data *td, struct tcpcb *tp, unsigned int pmtu) return (idx); } -void -t3_release_ddp_resources(struct toepcb *toep) -{ - /* - * This is a no-op until we have DDP support - */ -} - static inline void free_atid(struct t3cdev *cdev, unsigned int tid) { @@ -915,8 +1091,6 @@ t3_release_offload_resources(struct toepcb *toep) l2t_release(L2DATA(cdev), toep->tp_l2t); toep->tp_l2t = NULL; } - printf("setting toep->tp_tp to NULL\n"); - toep->tp_tp = NULL; if (tp) { INP_LOCK_ASSERT(tp->t_inpcb); @@ -964,16 +1138,16 @@ select_rcv_wscale(int space) if (tcp_do_rfc1323) for (; space > 65535 && wscale < 14; space >>= 1, ++wscale) ; - return wscale; + + return (wscale); } /* * Determine the receive window size for a socket. */ -static unsigned int -select_rcv_wnd(struct socket *so) +static unsigned long +select_rcv_wnd(struct toedev *dev, struct socket *so) { - struct toedev *dev = TOE_DEV(so); struct tom_data *d = TOM_DATA(dev); unsigned int wnd; unsigned int max_rcv_wnd; @@ -981,7 +1155,9 @@ select_rcv_wnd(struct socket *so) if (tcp_do_autorcvbuf) wnd = tcp_autorcvbuf_max; else - wnd = sbspace(&so->so_rcv); + wnd = so->so_rcv.sb_hiwat; + + /* XXX * For receive coalescing to work effectively we need a receive window @@ -991,7 +1167,7 @@ select_rcv_wnd(struct socket *so) wnd = MIN_RCV_WND; /* PR 5138 */ - max_rcv_wnd = (dev->tod_ttid == TOE_ID_CHELSIO_T3B ? + max_rcv_wnd = (dev->tod_ttid < TOE_ID_CHELSIO_T3C ? (uint32_t)d->rx_page_size * 23 : MAX_RCV_WND); @@ -1017,7 +1193,8 @@ init_offload_socket(struct socket *so, struct toedev *dev, unsigned int tid, * or we need to add this */ so->so_snd.sb_flags |= SB_NOCOALESCE; - + so->so_rcv.sb_flags |= SB_NOCOALESCE; + tp->t_toe = toep; toep->tp_tp = tp; toep->tp_toedev = dev; @@ -1033,7 +1210,8 @@ init_offload_socket(struct socket *so, struct toedev *dev, unsigned int tid, * XXX broken * */ - tp->rcv_wnd = select_rcv_wnd(so); + tp->rcv_wnd = select_rcv_wnd(dev, so); + toep->tp_ulp_mode = TOM_TUNABLE(dev, ddp) && !(so->so_options & SO_NO_DDP) && tp->rcv_wnd >= MIN_DDP_RCV_WIN ? ULP_MODE_TCPDDP : 0; toep->tp_qset_idx = 0; @@ -1076,9 +1254,23 @@ calc_opt2(const struct socket *so, struct toedev *dev) flv_valid = (TOM_TUNABLE(dev, cong_alg) != -1); - return V_FLAVORS_VALID(flv_valid) | - V_CONG_CONTROL_FLAVOR(flv_valid ? TOM_TUNABLE(dev, cong_alg) : 0); + return (V_FLAVORS_VALID(flv_valid) | + V_CONG_CONTROL_FLAVOR(flv_valid ? TOM_TUNABLE(dev, cong_alg) : 0)); +} + +#if DEBUG_WR > 1 +static int +count_pending_wrs(const struct toepcb *toep) +{ + const struct mbuf *m; + int n = 0; + + wr_queue_walk(toep, m) + n += m->m_pkthdr.csum_data; + return (n); } +#endif + #if 0 (((*(struct tom_data **)&(dev)->l4opt)->conf.cong_alg) != -1) #endif @@ -1093,18 +1285,18 @@ mk_act_open_req(struct socket *so, struct mbuf *m, struct toepcb *toep = tp->t_toe; struct toedev *tdev = TOE_DEV(so); - m_set_priority((struct mbuf *)m, mkprio(CPL_PRIORITY_SETUP, so)); + m_set_priority((struct mbuf *)m, mkprio(CPL_PRIORITY_SETUP, toep)); req = mtod(m, struct cpl_act_open_req *); m->m_pkthdr.len = m->m_len = sizeof(*req); - + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + req->wr.wr_lo = 0; OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, atid)); req->local_port = inp->inp_lport; req->peer_port = inp->inp_fport; memcpy(&req->local_ip, &inp->inp_laddr, 4); memcpy(&req->peer_ip, &inp->inp_faddr, 4); - DPRINTF("connect smt_idx=%d\n", e->smt_idx); req->opt0h = htonl(calc_opt0h(so, toep->tp_mtu_idx) | V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx)); req->opt0l = htonl(calc_opt0l(so, toep->tp_ulp_mode)); @@ -1144,7 +1336,7 @@ fail_act_open(struct toepcb *toep, int errno) t3_release_offload_resources(toep); if (tp) { INP_LOCK_ASSERT(tp->t_inpcb); - cxgb_tcp_drop(tp, errno); + tcp_drop(tp, errno); } #ifdef notyet @@ -1289,8 +1481,6 @@ t3_connect(struct toedev *tdev, struct socket *so, toep = tp->t_toe; m_set_toep(m, tp->t_toe); - printf("sending off request\n"); - toep->tp_state = TCPS_SYN_SENT; l2t_send(d->cdev, (struct mbuf *)m, e); @@ -1342,7 +1532,7 @@ t3_send_reset(struct toepcb *toep) mode |= CPL_ABORT_POST_CLOSE_REQ; m = m_gethdr_nofail(sizeof(*req)); - m_set_priority(m, mkprio(CPL_PRIORITY_DATA, so)); + m_set_priority(m, mkprio(CPL_PRIORITY_DATA, toep)); set_arp_failure_handler(m, abort_arp_failure); req = mtod(m, struct cpl_abort_req *); @@ -1416,7 +1606,7 @@ t3_tcp_ctloutput(struct socket *so, struct sockopt *sopt) * XXX I need to revisit this */ if ((err = t3_set_cong_control(so, name)) == 0) { -#ifdef notyet +#ifdef CONGESTION_CONTROL_SUPPORTED tp->t_cong_control = strdup(name, M_CXGB); #endif } else @@ -1465,7 +1655,280 @@ t3_ctloutput(struct socket *so, struct sockopt *sopt) if (err != EOPNOTSUPP) return (err); - return tcp_ctloutput(so, sopt); + return (tcp_ctloutput(so, sopt)); +} + +/* + * Returns true if we need to explicitly request RST when we receive new data + * on an RX-closed connection. + */ +static inline int +need_rst_on_excess_rx(const struct toepcb *toep) +{ + return (1); +} + +/* + * Handles Rx data that arrives in a state where the socket isn't accepting + * new data. + */ +static void +handle_excess_rx(struct toepcb *toep, struct mbuf *m) +{ + + if (need_rst_on_excess_rx(toep) && !(toep->tp_flags & TP_ABORT_SHUTDOWN)) + t3_send_reset(toep); + m_freem(m); +} + +/* + * Process a get_tcb_rpl as a DDP completion (similar to RX_DDP_COMPLETE) + * by getting the DDP offset from the TCB. + */ +static void +tcb_rpl_as_ddp_complete(struct toepcb *toep, struct mbuf *m) +{ + struct ddp_state *q = &toep->tp_ddp_state; + struct ddp_buf_state *bsp; + struct cpl_get_tcb_rpl *hdr; + unsigned int ddp_offset; + struct socket *so; + struct tcpcb *tp; + + uint64_t t; + __be64 *tcb; + + so = toeptoso(toep); + tp = toep->tp_tp; + + INP_LOCK_ASSERT(tp->t_inpcb); + SOCKBUF_LOCK(&so->so_rcv); + + /* Note that we only accout for CPL_GET_TCB issued by the DDP code. We + * really need a cookie in order to dispatch the RPLs. + */ + q->get_tcb_count--; + + /* It is a possible that a previous CPL already invalidated UBUF DDP + * and moved the cur_buf idx and hence no further processing of this + * skb is required. However, the app might be sleeping on + * !q->get_tcb_count and we need to wake it up. + */ + if (q->cancel_ubuf && !t3_ddp_ubuf_pending(toep)) { + struct socket *so = toeptoso(toep); + + m_freem(m); + if (__predict_true((so->so_state & SS_NOFDREF) == 0)) + sorwakeup_locked(so); + else + SOCKBUF_UNLOCK(&so->so_rcv); + return; + } + + bsp = &q->buf_state[q->cur_buf]; + hdr = cplhdr(m); + tcb = (__be64 *)(hdr + 1); + if (q->cur_buf == 0) { + t = be64toh(tcb[(31 - W_TCB_RX_DDP_BUF0_OFFSET) / 2]); + ddp_offset = t >> (32 + S_TCB_RX_DDP_BUF0_OFFSET); + } else { + t = be64toh(tcb[(31 - W_TCB_RX_DDP_BUF1_OFFSET) / 2]); + ddp_offset = t >> S_TCB_RX_DDP_BUF1_OFFSET; + } + ddp_offset &= M_TCB_RX_DDP_BUF0_OFFSET; + m->m_cur_offset = bsp->cur_offset; + bsp->cur_offset = ddp_offset; + m->m_len = m->m_pkthdr.len = ddp_offset - m->m_cur_offset; + + CTR5(KTR_TOM, + "tcb_rpl_as_ddp_complete: idx=%d seq=0x%x hwbuf=%u ddp_offset=%u cur_offset=%u", + q->cur_buf, tp->rcv_nxt, q->cur_buf, ddp_offset, m->m_cur_offset); + KASSERT(ddp_offset >= m->m_cur_offset, ("ddp_offset=%u less than cur_offset=%u", + ddp_offset, m->m_cur_offset)); + +#ifdef T3_TRACE + T3_TRACE3(TIDTB(so), + "tcb_rpl_as_ddp_complete: seq 0x%x hwbuf %u ddp_offset %u", + tp->rcv_nxt, q->cur_buf, ddp_offset); +#endif + +#if 0 +{ + unsigned int ddp_flags, rcv_nxt, rx_hdr_offset, buf_idx; + + t = be64toh(tcb[(31 - W_TCB_RX_DDP_FLAGS) / 2]); + ddp_flags = (t >> S_TCB_RX_DDP_FLAGS) & M_TCB_RX_DDP_FLAGS; + + t = be64toh(tcb[(31 - W_TCB_RCV_NXT) / 2]); + rcv_nxt = t >> S_TCB_RCV_NXT; + rcv_nxt &= M_TCB_RCV_NXT; + + t = be64toh(tcb[(31 - W_TCB_RX_HDR_OFFSET) / 2]); + rx_hdr_offset = t >> (32 + S_TCB_RX_HDR_OFFSET); + rx_hdr_offset &= M_TCB_RX_HDR_OFFSET; + + T3_TRACE2(TIDTB(sk), + "tcb_rpl_as_ddp_complete: DDP FLAGS 0x%x dma up to 0x%x", + ddp_flags, rcv_nxt - rx_hdr_offset); + T3_TRACE4(TB(q), + "tcb_rpl_as_ddp_complete: rcvnxt 0x%x hwbuf %u cur_offset %u cancel %u", + tp->rcv_nxt, q->cur_buf, bsp->cur_offset, q->cancel_ubuf); + T3_TRACE3(TB(q), + "tcb_rpl_as_ddp_complete: TCB rcvnxt 0x%x hwbuf 0x%x ddp_offset %u", + rcv_nxt - rx_hdr_offset, ddp_flags, ddp_offset); + T3_TRACE2(TB(q), + "tcb_rpl_as_ddp_complete: flags0 0x%x flags1 0x%x", + q->buf_state[0].flags, q->buf_state[1].flags); + +} +#endif + if (__predict_false(so_no_receive(so) && m->m_pkthdr.len)) { + handle_excess_rx(toep, m); + return; + } + +#ifdef T3_TRACE + if ((int)m->m_pkthdr.len < 0) { + t3_ddp_error(so, "tcb_rpl_as_ddp_complete: neg len"); + } +#endif + if (bsp->flags & DDP_BF_NOCOPY) { +#ifdef T3_TRACE + T3_TRACE0(TB(q), + "tcb_rpl_as_ddp_complete: CANCEL UBUF"); + + if (!q->cancel_ubuf && !(sk->sk_shutdown & RCV_SHUTDOWN)) { + printk("!cancel_ubuf"); + t3_ddp_error(sk, "tcb_rpl_as_ddp_complete: !cancel_ubuf"); + } +#endif + m->m_ddp_flags = DDP_BF_PSH | DDP_BF_NOCOPY | 1; + bsp->flags &= ~(DDP_BF_NOCOPY|DDP_BF_NODATA); + q->cur_buf ^= 1; + } else if (bsp->flags & DDP_BF_NOFLIP) { + + m->m_ddp_flags = 1; /* always a kernel buffer */ + + /* now HW buffer carries a user buffer */ + bsp->flags &= ~DDP_BF_NOFLIP; + bsp->flags |= DDP_BF_NOCOPY; + + /* It is possible that the CPL_GET_TCB_RPL doesn't indicate + * any new data in which case we're done. If in addition the + * offset is 0, then there wasn't a completion for the kbuf + * and we need to decrement the posted count. + */ + if (m->m_pkthdr.len == 0) { + if (ddp_offset == 0) { + q->kbuf_posted--; + bsp->flags |= DDP_BF_NODATA; + } + SOCKBUF_UNLOCK(&so->so_rcv); + + m_free(m); + return; + } + } else { + SOCKBUF_UNLOCK(&so->so_rcv); + /* This reply is for a CPL_GET_TCB_RPL to cancel the UBUF DDP, + * but it got here way late and nobody cares anymore. + */ + m_free(m); + return; + } + + m->m_ddp_gl = (unsigned char *)bsp->gl; + m->m_flags |= M_DDP; + m->m_seq = tp->rcv_nxt; + tp->rcv_nxt += m->m_pkthdr.len; + tp->t_rcvtime = ticks; +#ifdef T3_TRACE + T3_TRACE3(TB(q), + "tcb_rpl_as_ddp_complete: seq 0x%x hwbuf %u lskb->len %u", + m->m_seq, q->cur_buf, m->m_pkthdr.len); +#endif + CTR3(KTR_TOM, "tcb_rpl_as_ddp_complete: seq 0x%x hwbuf %u m->m_pktlen %u", + m->m_seq, q->cur_buf, m->m_pkthdr.len); + if (m->m_pkthdr.len == 0) + q->user_ddp_pending = 0; + else + SBAPPEND(&so->so_rcv, m); + if (__predict_true((so->so_state & SS_NOFDREF) == 0)) + sorwakeup_locked(so); + else + SOCKBUF_UNLOCK(&so->so_rcv); +} + +/* + * Process a CPL_GET_TCB_RPL. These can also be generated by the DDP code, + * in that case they are similar to DDP completions. + */ +static int +do_get_tcb_rpl(struct t3cdev *cdev, struct mbuf *m, void *ctx) +{ + struct toepcb *toep = (struct toepcb *)ctx; + + /* OK if socket doesn't exist */ + if (toep == NULL) { + printf("null toep in do_get_tcb_rpl\n"); + return (CPL_RET_BUF_DONE); + } + + INP_LOCK(toep->tp_tp->t_inpcb); + tcb_rpl_as_ddp_complete(toep, m); + INP_UNLOCK(toep->tp_tp->t_inpcb); + + return (0); +} + +static void +handle_ddp_data(struct toepcb *toep, struct mbuf *m) +{ + struct tcpcb *tp = toep->tp_tp; + struct socket *so = toeptoso(toep); + struct ddp_state *q; + struct ddp_buf_state *bsp; + struct cpl_rx_data *hdr = cplhdr(m); + unsigned int rcv_nxt = ntohl(hdr->seq); + + if (tp->rcv_nxt == rcv_nxt) + return; + + INP_LOCK_ASSERT(tp->t_inpcb); + SOCKBUF_LOCK(&so->so_rcv); + q = &toep->tp_ddp_state; + bsp = &q->buf_state[q->cur_buf]; + KASSERT(SEQ_GT(rcv_nxt, tp->rcv_nxt), ("tp->rcv_nxt=0x%08x decreased rcv_nxt=0x08%x", + rcv_nxt, tp->rcv_nxt)); + m->m_len = m->m_pkthdr.len = rcv_nxt - tp->rcv_nxt; + KASSERT(m->m_len > 0, ("%s m_len=%d", __FUNCTION__, m->m_len)); + CTR3(KTR_TOM, "rcv_nxt=0x%x tp->rcv_nxt=0x%x len=%d", + rcv_nxt, tp->rcv_nxt, m->m_pkthdr.len); + +#ifdef T3_TRACE + if ((int)m->m_pkthdr.len < 0) { + t3_ddp_error(so, "handle_ddp_data: neg len"); + } +#endif + + m->m_ddp_gl = (unsigned char *)bsp->gl; + m->m_flags |= M_DDP; + m->m_cur_offset = bsp->cur_offset; + m->m_ddp_flags = DDP_BF_PSH | (bsp->flags & DDP_BF_NOCOPY) | 1; + if (bsp->flags & DDP_BF_NOCOPY) + bsp->flags &= ~DDP_BF_NOCOPY; + + m->m_seq = tp->rcv_nxt; + tp->rcv_nxt = rcv_nxt; + bsp->cur_offset += m->m_pkthdr.len; + if (!(bsp->flags & DDP_BF_NOFLIP)) + q->cur_buf ^= 1; + /* + * For now, don't re-enable DDP after a connection fell out of DDP + * mode. + */ + q->ubuf_ddp_ready = 0; + SOCKBUF_UNLOCK(&so->so_rcv); } /* @@ -1481,32 +1944,33 @@ new_rx_data(struct toepcb *toep, struct mbuf *m) INP_LOCK(tp->t_inpcb); -#ifdef notyet - if (__predict_false(sk_no_receive(sk))) { - handle_excess_rx(so, skb); + if (__predict_false(so_no_receive(so))) { + handle_excess_rx(toep, m); + INP_UNLOCK(tp->t_inpcb); + TRACE_EXIT; return; } - if (ULP_MODE(tp) == ULP_MODE_TCPDDP) - handle_ddp_data(so, skb); + if (toep->tp_ulp_mode == ULP_MODE_TCPDDP) + handle_ddp_data(toep, m); + + m->m_seq = ntohl(hdr->seq); + m->m_ulp_mode = 0; /* for iSCSI */ - TCP_SKB_CB(skb)->seq = ntohl(hdr->seq); - TCP_SKB_CB(skb)->flags = 0; - skb_ulp_mode(skb) = 0; /* for iSCSI */ -#endif #if VALIDATE_SEQ - if (__predict_false(TCP_SKB_CB(skb)->seq != tp->rcv_nxt)) { - printk(KERN_ERR + if (__predict_false(m->m_seq != tp->rcv_nxt)) { + log(LOG_ERR, "%s: TID %u: Bad sequence number %u, expected %u\n", - TOE_DEV(sk)->name, TID(tp), TCP_SKB_CB(skb)->seq, + TOE_DEV(toeptoso(toep))->name, toep->tp_tid, m->m_seq, tp->rcv_nxt); - __kfree_skb(skb); + m_freem(m); + INP_UNLOCK(tp->t_inpcb); return; } #endif m_adj(m, sizeof(*hdr)); -#ifdef notyet +#ifdef URGENT_DATA_SUPPORTED /* * We don't handle urgent data yet */ @@ -1521,8 +1985,8 @@ new_rx_data(struct toepcb *toep, struct mbuf *m) toep->tp_delack_mode = hdr->dack_mode; toep->tp_delack_seq = tp->rcv_nxt; } - - DPRINTF("appending mbuf=%p pktlen=%d m_len=%d len=%d\n", m, m->m_pkthdr.len, m->m_len, len); + CTR6(KTR_TOM, "appending mbuf=%p pktlen=%d m_len=%d len=%d rcv_nxt=0x%x enqueued_bytes=%d", + m, m->m_pkthdr.len, m->m_len, len, tp->rcv_nxt, toep->tp_enqueued_bytes); if (len < m->m_pkthdr.len) m->m_pkthdr.len = m->m_len = len; @@ -1532,21 +1996,29 @@ new_rx_data(struct toepcb *toep, struct mbuf *m) toep->tp_enqueued_bytes += m->m_pkthdr.len; #ifdef T3_TRACE T3_TRACE2(TIDTB(sk), - "new_rx_data: seq 0x%x len %u", - TCP_SKB_CB(skb)->seq, skb->len); + "new_rx_data: seq 0x%x len %u", + m->m_seq, m->m_pkthdr.len); #endif + INP_UNLOCK(tp->t_inpcb); SOCKBUF_LOCK(&so->so_rcv); if (sb_notify(&so->so_rcv)) DPRINTF("rx_data so=%p flags=0x%x len=%d\n", so, so->so_rcv.sb_flags, m->m_pkthdr.len); - sbappend_locked(&so->so_rcv, m); - KASSERT(so->so_rcv.sb_cc < so->so_rcv.sb_mbmax, + SBAPPEND(&so->so_rcv, m); + +#ifdef notyet + /* + * We're giving too many credits to the card - but disable this check so we can keep on moving :-| + * + */ + KASSERT(so->so_rcv.sb_cc < (so->so_rcv.sb_mbmax << 1), ("so=%p, data contents exceed mbmax, sb_cc=%d sb_mbmax=%d", so, so->so_rcv.sb_cc, so->so_rcv.sb_mbmax)); +#endif - INP_UNLOCK(tp->t_inpcb); - DPRINTF("sb_cc=%d sb_mbcnt=%d\n", + + CTR2(KTR_TOM, "sb_cc=%d sb_mbcnt=%d", so->so_rcv.sb_cc, so->so_rcv.sb_mbcnt); if (__predict_true((so->so_state & SS_NOFDREF) == 0)) @@ -1571,22 +2043,26 @@ do_rx_data(struct t3cdev *cdev, struct mbuf *m, void *ctx) } static void -new_rx_data_ddp(struct socket *so, struct mbuf *m) +new_rx_data_ddp(struct toepcb *toep, struct mbuf *m) { - struct tcpcb *tp = sototcpcb(so); - struct toepcb *toep = tp->t_toe; + struct tcpcb *tp; struct ddp_state *q; struct ddp_buf_state *bsp; struct cpl_rx_data_ddp *hdr; unsigned int ddp_len, rcv_nxt, ddp_report, end_offset, buf_idx; + struct socket *so = toeptoso(toep); + int nomoredata = 0; -#ifdef notyet - if (unlikely(sk_no_receive(sk))) { - handle_excess_rx(so, m); + tp = sototcpcb(so); + + INP_LOCK(tp->t_inpcb); + if (__predict_false(so_no_receive(so))) { + + handle_excess_rx(toep, m); + INP_UNLOCK(tp->t_inpcb); return; } -#endif - tp = sototcpcb(so); + q = &toep->tp_ddp_state; hdr = cplhdr(m); ddp_report = ntohl(hdr->u.ddp_report); @@ -1603,69 +2079,91 @@ new_rx_data_ddp(struct socket *so, struct mbuf *m) "new_rx_data_ddp: ddp_report 0x%x", ddp_report); #endif - + CTR4(KTR_TOM, + "new_rx_data_ddp: tp->rcv_nxt 0x%x cur_offset %u " + "hdr seq 0x%x len %u", + tp->rcv_nxt, bsp->cur_offset, ntohl(hdr->seq), + ntohs(hdr->len)); + CTR3(KTR_TOM, + "new_rx_data_ddp: offset %u ddp_report 0x%x buf_idx=%d", + G_DDP_OFFSET(ddp_report), ddp_report, buf_idx); + ddp_len = ntohs(hdr->len); rcv_nxt = ntohl(hdr->seq) + ddp_len; - /* - * Overload to store old rcv_next - */ - m->m_pkthdr.csum_data = tp->rcv_nxt; + m->m_seq = tp->rcv_nxt; tp->rcv_nxt = rcv_nxt; + tp->t_rcvtime = ticks; /* * Store the length in m->m_len. We are changing the meaning of * m->m_len here, we need to be very careful that nothing from now on * interprets ->len of this packet the usual way. */ - m->m_len = tp->rcv_nxt - m->m_pkthdr.csum_data; - + m->m_len = m->m_pkthdr.len = rcv_nxt - m->m_seq; + INP_UNLOCK(tp->t_inpcb); + CTR3(KTR_TOM, + "new_rx_data_ddp: m_len=%u rcv_next 0x%08x rcv_nxt_prev=0x%08x ", + m->m_len, rcv_nxt, m->m_seq); /* * Figure out where the new data was placed in the buffer and store it * in when. Assumes the buffer offset starts at 0, consumer needs to * account for page pod's pg_offset. */ end_offset = G_DDP_OFFSET(ddp_report) + ddp_len; -#ifdef notyet - TCP_SKB_CB(skb)->when = end_offset - skb->len; + m->m_cur_offset = end_offset - m->m_pkthdr.len; - /* - * We store in mac.raw the address of the gather list where the - * placement happened. - */ - skb->mac.raw = (unsigned char *)bsp->gl; -#endif + SOCKBUF_LOCK(&so->so_rcv); + m->m_ddp_gl = (unsigned char *)bsp->gl; + m->m_flags |= M_DDP; bsp->cur_offset = end_offset; + toep->tp_enqueued_bytes += m->m_pkthdr.len; /* + * Length is only meaningful for kbuf + */ + if (!(bsp->flags & DDP_BF_NOCOPY)) + KASSERT(m->m_len <= bsp->gl->dgl_length, + ("length received exceeds ddp pages: len=%d dgl_length=%d", + m->m_len, bsp->gl->dgl_length)); + + KASSERT(m->m_len > 0, ("%s m_len=%d", __FUNCTION__, m->m_len)); + KASSERT(m->m_next == NULL, ("m_len=%p", m->m_next)); + + + /* * Bit 0 of flags stores whether the DDP buffer is completed. * Note that other parts of the code depend on this being in bit 0. */ if ((bsp->flags & DDP_BF_NOINVAL) && end_offset != bsp->gl->dgl_length) { -#if 0 - TCP_SKB_CB(skb)->flags = 0; /* potential spurious completion */ -#endif panic("spurious ddp completion"); } else { - m->m_pkthdr.csum_flags = !!(ddp_report & F_DDP_BUF_COMPLETE); - if (m->m_pkthdr.csum_flags && !(bsp->flags & DDP_BF_NOFLIP)) + m->m_ddp_flags = !!(ddp_report & F_DDP_BUF_COMPLETE); + if (m->m_ddp_flags && !(bsp->flags & DDP_BF_NOFLIP)) q->cur_buf ^= 1; /* flip buffers */ } if (bsp->flags & DDP_BF_NOCOPY) { - m->m_pkthdr.csum_flags |= (bsp->flags & DDP_BF_NOCOPY); + m->m_ddp_flags |= (bsp->flags & DDP_BF_NOCOPY); bsp->flags &= ~DDP_BF_NOCOPY; } if (ddp_report & F_DDP_PSH) - m->m_pkthdr.csum_flags |= DDP_BF_PSH; + m->m_ddp_flags |= DDP_BF_PSH; + if (nomoredata) + m->m_ddp_flags |= DDP_BF_NODATA; + + if (__predict_false(G_DDP_DACK_MODE(ddp_report) != toep->tp_delack_mode)) { + toep->tp_delack_mode = G_DDP_DACK_MODE(ddp_report); + toep->tp_delack_seq = tp->rcv_nxt; + } + + SBAPPEND(&so->so_rcv, m); - tp->t_rcvtime = ticks; - sbappendstream_locked(&so->so_rcv, m); -#ifdef notyet - if (!sock_flag(sk, SOCK_DEAD)) - sk->sk_data_ready(sk, 0); -#endif + if ((so->so_state & SS_NOFDREF) == 0) + sorwakeup_locked(so); + else + SOCKBUF_UNLOCK(&so->so_rcv); } #define DDP_ERR (F_DDP_PPOD_MISMATCH | F_DDP_LLIMIT_ERR | F_DDP_ULIMIT_ERR |\ @@ -1680,7 +2178,6 @@ static int do_rx_data_ddp(struct t3cdev *cdev, struct mbuf *m, void *ctx) { struct toepcb *toep = ctx; - struct socket *so = toeptoso(toep); const struct cpl_rx_data_ddp *hdr = cplhdr(m); VALIDATE_SOCK(so); @@ -1688,40 +2185,50 @@ do_rx_data_ddp(struct t3cdev *cdev, struct mbuf *m, void *ctx) if (__predict_false(ntohl(hdr->ddpvld_status) & DDP_ERR)) { log(LOG_ERR, "RX_DATA_DDP for TID %u reported error 0x%x\n", GET_TID(hdr), G_DDP_VALID(ntohl(hdr->ddpvld_status))); - return CPL_RET_BUF_DONE; + return (CPL_RET_BUF_DONE); } #if 0 skb->h.th = tcphdr_skb->h.th; #endif - new_rx_data_ddp(so, m); + new_rx_data_ddp(toep, m); return (0); } static void -process_ddp_complete(struct socket *so, struct mbuf *m) +process_ddp_complete(struct toepcb *toep, struct mbuf *m) { - struct tcpcb *tp = sototcpcb(so); - struct toepcb *toep = tp->t_toe; + struct tcpcb *tp = toep->tp_tp; + struct socket *so = toeptoso(toep); struct ddp_state *q; struct ddp_buf_state *bsp; struct cpl_rx_ddp_complete *hdr; unsigned int ddp_report, buf_idx, when; + int nomoredata = 0; -#ifdef notyet - if (unlikely(sk_no_receive(sk))) { - handle_excess_rx(sk, skb); + INP_LOCK(tp->t_inpcb); + if (__predict_false(so_no_receive(so))) { + struct inpcb *inp = sotoinpcb(so); + + handle_excess_rx(toep, m); + INP_UNLOCK(inp); return; } -#endif q = &toep->tp_ddp_state; hdr = cplhdr(m); ddp_report = ntohl(hdr->ddp_report); buf_idx = (ddp_report >> S_DDP_BUF_IDX) & 1; - bsp = &q->buf_state[buf_idx]; + m->m_pkthdr.csum_data = tp->rcv_nxt; + + SOCKBUF_LOCK(&so->so_rcv); + bsp = &q->buf_state[buf_idx]; when = bsp->cur_offset; - m->m_len = G_DDP_OFFSET(ddp_report) - when; + m->m_len = m->m_pkthdr.len = G_DDP_OFFSET(ddp_report) - when; + tp->rcv_nxt += m->m_len; + tp->t_rcvtime = ticks; + INP_UNLOCK(tp->t_inpcb); + KASSERT(m->m_len > 0, ("%s m_len=%d", __FUNCTION__, m->m_len)); #ifdef T3_TRACE T3_TRACE5(TIDTB(sk), "process_ddp_complete: tp->rcv_nxt 0x%x cur_offset %u " @@ -1729,34 +2236,47 @@ process_ddp_complete(struct socket *so, struct mbuf *m) tp->rcv_nxt, bsp->cur_offset, ddp_report, G_DDP_OFFSET(ddp_report), skb->len); #endif - + CTR5(KTR_TOM, + "process_ddp_complete: tp->rcv_nxt 0x%x cur_offset %u " + "ddp_report 0x%x offset %u, len %u", + tp->rcv_nxt, bsp->cur_offset, ddp_report, + G_DDP_OFFSET(ddp_report), m->m_len); + bsp->cur_offset += m->m_len; - if (!(bsp->flags & DDP_BF_NOFLIP)) + if (!(bsp->flags & DDP_BF_NOFLIP)) { q->cur_buf ^= 1; /* flip buffers */ - + if (G_DDP_OFFSET(ddp_report) < q->kbuf[0]->dgl_length) + nomoredata=1; + } + #ifdef T3_TRACE T3_TRACE4(TIDTB(sk), "process_ddp_complete: tp->rcv_nxt 0x%x cur_offset %u " "ddp_report %u offset %u", tp->rcv_nxt, bsp->cur_offset, ddp_report, G_DDP_OFFSET(ddp_report)); -#endif -#if 0 - skb->mac.raw = (unsigned char *)bsp->gl; -#endif - m->m_pkthdr.csum_flags = (bsp->flags & DDP_BF_NOCOPY) | 1; +#endif + CTR4(KTR_TOM, + "process_ddp_complete: tp->rcv_nxt 0x%x cur_offset %u " + "ddp_report %u offset %u", + tp->rcv_nxt, bsp->cur_offset, ddp_report, + G_DDP_OFFSET(ddp_report)); + + m->m_ddp_gl = (unsigned char *)bsp->gl; + m->m_flags |= M_DDP; + m->m_ddp_flags = (bsp->flags & DDP_BF_NOCOPY) | 1; if (bsp->flags & DDP_BF_NOCOPY) bsp->flags &= ~DDP_BF_NOCOPY; - m->m_pkthdr.csum_data = tp->rcv_nxt; - tp->rcv_nxt += m->m_len; + if (nomoredata) + m->m_ddp_flags |= DDP_BF_NODATA; - tp->t_rcvtime = ticks; - sbappendstream_locked(&so->so_rcv, m); -#ifdef notyet - if (!sock_flag(sk, SOCK_DEAD)) - sk->sk_data_ready(sk, 0); -#endif + SBAPPEND(&so->so_rcv, m); + + if ((so->so_state & SS_NOFDREF) == 0) + sorwakeup_locked(so); + else + SOCKBUF_UNLOCK(&so->so_rcv); } /* @@ -1766,13 +2286,12 @@ static int do_rx_ddp_complete(struct t3cdev *cdev, struct mbuf *m, void *ctx) { struct toepcb *toep = ctx; - struct socket *so = toeptoso(toep); VALIDATE_SOCK(so); #if 0 skb->h.th = tcphdr_skb->h.th; #endif - process_ddp_complete(so, m); + process_ddp_complete(toep, m); return (0); } @@ -1801,6 +2320,65 @@ enter_timewait(struct socket *so) } /* + * For TCP DDP a PEER_CLOSE may also be an implicit RX_DDP_COMPLETE. This + * function deals with the data that may be reported along with the FIN. + * Returns -1 if no further processing of the PEER_CLOSE is needed, >= 0 to + * perform normal FIN-related processing. In the latter case 1 indicates that + * there was an implicit RX_DDP_COMPLETE and the skb should not be freed, 0 the + * skb can be freed. + */ +static int +handle_peer_close_data(struct socket *so, struct mbuf *m) +{ + struct tcpcb *tp = sototcpcb(so); + struct toepcb *toep = tp->t_toe; + struct ddp_state *q; + struct ddp_buf_state *bsp; + struct cpl_peer_close *req = cplhdr(m); + unsigned int rcv_nxt = ntohl(req->rcv_nxt) - 1; /* exclude FIN */ + + if (tp->rcv_nxt == rcv_nxt) /* no data */ + return (0); + + if (__predict_false(so_no_receive(so))) { + handle_excess_rx(toep, m); + + /* + * Although we discard the data we want to process the FIN so + * that PEER_CLOSE + data behaves the same as RX_DATA_DDP + + * PEER_CLOSE without data. In particular this PEER_CLOSE + * may be what will close the connection. We return 1 because + * handle_excess_rx() already freed the packet. + */ + return (1); + } + + INP_LOCK_ASSERT(tp->t_inpcb); + q = &toep->tp_ddp_state; + SOCKBUF_LOCK(&so->so_rcv); + bsp = &q->buf_state[q->cur_buf]; + m->m_len = m->m_pkthdr.len = rcv_nxt - tp->rcv_nxt; + KASSERT(m->m_len > 0, ("%s m_len=%d", __FUNCTION__, m->m_len)); + m->m_ddp_gl = (unsigned char *)bsp->gl; + m->m_flags |= M_DDP; + m->m_cur_offset = bsp->cur_offset; + m->m_ddp_flags = + DDP_BF_PSH | (bsp->flags & DDP_BF_NOCOPY) | 1; + m->m_seq = tp->rcv_nxt; + tp->rcv_nxt = rcv_nxt; + bsp->cur_offset += m->m_pkthdr.len; + if (!(bsp->flags & DDP_BF_NOFLIP)) + q->cur_buf ^= 1; + tp->t_rcvtime = ticks; + SBAPPEND(&so->so_rcv, m); + if (__predict_true((so->so_state & SS_NOFDREF) == 0)) + sorwakeup_locked(so); + else + SOCKBUF_UNLOCK(&so->so_rcv); + return (1); +} + +/* * Handle a peer FIN. */ static void @@ -1808,9 +2386,8 @@ do_peer_fin(struct socket *so, struct mbuf *m) { struct tcpcb *tp = sototcpcb(so); struct toepcb *toep = tp->t_toe; - int keep = 0, dead = (so->so_state & SS_NOFDREF); - - DPRINTF("do_peer_fin state=%d dead=%d\n", tp->t_state, !!dead); + int keep = 0; + DPRINTF("do_peer_fin state=%d\n", tp->t_state); #ifdef T3_TRACE T3_TRACE0(TIDTB(sk),"do_peer_fin:"); @@ -1821,20 +2398,32 @@ do_peer_fin(struct socket *so, struct mbuf *m) goto out; } - -#ifdef notyet - if (ULP_MODE(tp) == ULP_MODE_TCPDDP) { - keep = handle_peer_close_data(so, skb); - if (keep < 0) - return; - } - sk->sk_shutdown |= RCV_SHUTDOWN; - sock_set_flag(so, SOCK_DONE); -#endif INP_INFO_WLOCK(&tcbinfo); INP_LOCK(tp->t_inpcb); - if (TCPS_HAVERCVDFIN(tp->t_state) == 0) + if (toep->tp_ulp_mode == ULP_MODE_TCPDDP) { + keep = handle_peer_close_data(so, m); + if (keep < 0) { + INP_INFO_WUNLOCK(&tcbinfo); + INP_UNLOCK(tp->t_inpcb); + return; + } + } + if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { socantrcvmore(so); + /* + * If connection is half-synchronized + * (ie NEEDSYN flag on) then delay ACK, + * so it may be piggybacked when SYN is sent. + * Otherwise, since we received a FIN then no + * more input can be expected, send ACK now. + */ + if (tp->t_flags & TF_NEEDSYN) + tp->t_flags |= TF_DELACK; + else + tp->t_flags |= TF_ACKNOW; + tp->rcv_nxt++; + } + switch (tp->t_state) { case TCPS_SYN_RECEIVED: tp->t_starttime = ticks; @@ -1858,8 +2447,9 @@ do_peer_fin(struct socket *so, struct mbuf *m) t3_release_offload_resources(toep); if (toep->tp_flags & TP_ABORT_RPL_PENDING) { tp = tcp_close(tp); - } else + } else { enter_timewait(so); + } break; default: log(LOG_ERR, @@ -1870,23 +2460,17 @@ do_peer_fin(struct socket *so, struct mbuf *m) if (tp) INP_UNLOCK(tp->t_inpcb); - if (!dead) { - DPRINTF("waking up waiters on %p rcv_notify=%d flags=0x%x\n", so, sb_notify(&so->so_rcv), so->so_rcv.sb_flags); - - sorwakeup(so); - sowwakeup(so); - wakeup(&so->so_timeo); -#ifdef notyet - sk->sk_state_change(sk); + DPRINTF("waking up waiters on %p rcv_notify=%d flags=0x%x\n", so, sb_notify(&so->so_rcv), so->so_rcv.sb_flags); - /* Do not send POLL_HUP for half duplex close. */ - if ((sk->sk_shutdown & SEND_SHUTDOWN) || - sk->sk_state == TCP_CLOSE) - sk_wake_async(so, 1, POLL_HUP); - else - sk_wake_async(so, 1, POLL_IN); +#ifdef notyet + /* Do not send POLL_HUP for half duplex close. */ + if ((sk->sk_shutdown & SEND_SHUTDOWN) || + sk->sk_state == TCP_CLOSE) + sk_wake_async(so, 1, POLL_HUP); + else + sk_wake_async(so, 1, POLL_IN); #endif - } + out: if (!keep) m_free(m); @@ -1929,8 +2513,10 @@ process_close_con_rpl(struct socket *so, struct mbuf *m) if (toep->tp_flags & TP_ABORT_RPL_PENDING) { tp = tcp_close(tp); - } else + } else { enter_timewait(so); + soisdisconnected(so); + } break; case TCPS_LAST_ACK: /* @@ -1942,21 +2528,29 @@ process_close_con_rpl(struct socket *so, struct mbuf *m) tp = tcp_close(tp); break; case TCPS_FIN_WAIT_1: -#ifdef notyet - dst_confirm(sk->sk_dst_cache); -#endif - soisdisconnecting(so); - - if ((so->so_state & SS_NOFDREF) == 0) { - /* - * Wake up lingering close - */ - sowwakeup(so); - sorwakeup(so); - wakeup(&so->so_timeo); - } else if ((so->so_options & SO_LINGER) && so->so_linger == 0 && + /* + * If we can't receive any more + * data, then closing user can proceed. + * Starting the timer is contrary to the + * specification, but if we don't get a FIN + * we'll hang forever. + * + * XXXjl: + * we should release the tp also, and use a + * compressed state. + */ + if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { + int timeout; + + soisdisconnected(so); + timeout = (tcp_fast_finwait2_recycle) ? + tcp_finwait2_timeout : tcp_maxidle; + tcp_timer_activate(tp, TT_2MSL, timeout); + } + tp->t_state = TCPS_FIN_WAIT_2; + if ((so->so_options & SO_LINGER) && so->so_linger == 0 && (toep->tp_flags & TP_ABORT_SHUTDOWN) == 0) { - tp = cxgb_tcp_drop(tp, 0); + tp = tcp_drop(tp, 0); } break; @@ -1970,7 +2564,7 @@ process_close_con_rpl(struct socket *so, struct mbuf *m) if (tp) INP_UNLOCK(tp->t_inpcb); out: - m_free(m); + m_freem(m); } /* @@ -2006,6 +2600,8 @@ process_abort_rpl(struct socket *so, struct mbuf *m) "process_abort_rpl: GTS rpl pending %d", sock_flag(sk, ABORT_RPL_PENDING)); #endif + + INP_INFO_WLOCK(&tcbinfo); INP_LOCK(tp->t_inpcb); if (toep->tp_flags & TP_ABORT_RPL_PENDING) { @@ -2020,16 +2616,14 @@ process_abort_rpl(struct socket *so, struct mbuf *m) !is_t3a(TOE_DEV(so))) { if (toep->tp_flags & TP_ABORT_REQ_RCVD) panic("TP_ABORT_REQ_RCVD set"); - INP_INFO_WLOCK(&tcbinfo); - INP_LOCK(tp->t_inpcb); t3_release_offload_resources(toep); tp = tcp_close(tp); - INP_INFO_WUNLOCK(&tcbinfo); } } } if (tp) INP_UNLOCK(tp->t_inpcb); + INP_INFO_WUNLOCK(&tcbinfo); m_free(m); } @@ -2089,7 +2683,7 @@ discard: } /* - * Convert the status code of an ABORT_REQ into a Linux error code. Also + * Convert the status code of an ABORT_REQ into a FreeBSD error code. Also * indicate whether RST should be sent in response. */ static int @@ -2289,10 +2883,8 @@ process_abort_req(struct socket *so, struct mbuf *m, struct toedev *tdev) (is_t3a(TOE_DEV(so)) && (toep->tp_flags & TP_CLOSE_CON_REQUESTED))) { so->so_error = abort_status_to_errno(so, req->status, &rst_status); -#if 0 - if (!sock_flag(sk, SOCK_DEAD)) - sk->sk_error_report(sk); -#endif + if (__predict_true((so->so_state & SS_NOFDREF) == 0)) + sorwakeup(so); /* * SYN_RECV needs special processing. If abort_syn_rcv() * returns 0 is has taken care of the abort. @@ -2513,7 +3105,8 @@ syncache_add_accept_req(struct cpl_pass_accept_req *req, struct socket *lso, str struct tcphdr th; struct inpcb *inp; int mss, wsf, sack, ts; - + uint32_t rcv_isn = ntohl(req->rcv_isn); + bzero(&to, sizeof(struct tcpopt)); inp = sotoinpcb(lso); @@ -2522,10 +3115,11 @@ syncache_add_accept_req(struct cpl_pass_accept_req *req, struct socket *lso, str */ inc.inc_fport = th.th_sport = req->peer_port; inc.inc_lport = th.th_dport = req->local_port; - toep->tp_iss = th.th_seq = req->rcv_isn; + th.th_seq = req->rcv_isn; th.th_flags = TH_SYN; - toep->tp_delack_seq = toep->tp_rcv_wup = toep->tp_copied_seq = ntohl(req->rcv_isn); + toep->tp_iss = toep->tp_delack_seq = toep->tp_rcv_wup = toep->tp_copied_seq = rcv_isn + 1; + inc.inc_isipv6 = 0; inc.inc_len = 0; @@ -2543,7 +3137,6 @@ syncache_add_accept_req(struct cpl_pass_accept_req *req, struct socket *lso, str to.to_mss = mss; to.to_wscale = wsf; to.to_flags = (mss ? TOF_MSS : 0) | (wsf ? TOF_SCALE : 0) | (ts ? TOF_TS : 0) | (sack ? TOF_SACKPERM : 0); - INP_INFO_WLOCK(&tcbinfo); INP_LOCK(inp); syncache_offload_add(&inc, &to, &th, inp, &lso, &cxgb_toe_usrreqs, toep); @@ -2654,34 +3247,31 @@ process_pass_accept_req(struct socket *so, struct mbuf *m, struct toedev *tdev, newtoep->tp_flags = TP_SYN_RCVD; newtoep->tp_tid = tid; newtoep->tp_toedev = tdev; + tp->rcv_wnd = select_rcv_wnd(tdev, so); - printf("inserting tid=%d\n", tid); cxgb_insert_tid(cdev, d->client, newtoep, tid); SOCK_LOCK(so); LIST_INSERT_HEAD(&lctx->synq_head, newtoep, synq_entry); SOCK_UNLOCK(so); - - if (lctx->ulp_mode) { + newtoep->tp_ulp_mode = TOM_TUNABLE(tdev, ddp) && !(so->so_options & SO_NO_DDP) && + tp->rcv_wnd >= MIN_DDP_RCV_WIN ? ULP_MODE_TCPDDP : 0; + + if (newtoep->tp_ulp_mode) { ddp_mbuf = m_gethdr(M_NOWAIT, MT_DATA); - if (!ddp_mbuf) + if (ddp_mbuf == NULL) newtoep->tp_ulp_mode = 0; - else - newtoep->tp_ulp_mode = lctx->ulp_mode; } - + + CTR4(KTR_TOM, "ddp=%d rcv_wnd=%ld min_win=%d ulp_mode=%d", + TOM_TUNABLE(tdev, ddp), tp->rcv_wnd, MIN_DDP_RCV_WIN, newtoep->tp_ulp_mode); set_arp_failure_handler(reply_mbuf, pass_accept_rpl_arp_failure); - - DPRINTF("adding request to syn cache\n"); - /* * XXX workaround for lack of syncache drop */ toepcb_hold(newtoep); syncache_add_accept_req(req, so, newtoep); - - rpl = cplhdr(reply_mbuf); reply_mbuf->m_pkthdr.len = reply_mbuf->m_len = sizeof(*rpl); @@ -2692,50 +3282,34 @@ process_pass_accept_req(struct socket *so, struct mbuf *m, struct toedev *tdev, rpl->rsvd = rpl->opt2; /* workaround for HW bug */ rpl->peer_ip = req->peer_ip; // req->peer_ip is not overwritten - DPRINTF("accept smt_idx=%d\n", e->smt_idx); - rpl->opt0h = htonl(calc_opt0h(so, select_mss(td, NULL, dst->rt_ifp->if_mtu)) | V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx)); - rpl->opt0l_status = htonl(calc_opt0l(so, lctx->ulp_mode) | + rpl->opt0l_status = htonl(calc_opt0l(so, newtoep->tp_ulp_mode) | CPL_PASS_OPEN_ACCEPT); DPRINTF("opt0l_status=%08x\n", rpl->opt0l_status); - m_set_priority(reply_mbuf, mkprio(CPL_PRIORITY_SETUP, so)); - -#ifdef DEBUG_PRINT - { - int i; - - DPRINTF("rpl:\n"); - uint32_t *rplbuf = mtod(reply_mbuf, uint32_t *); - - for (i = 0; i < sizeof(*rpl)/sizeof(uint32_t); i++) - DPRINTF("[%d] %08x\n", i, rplbuf[i]); - } -#endif - + m_set_priority(reply_mbuf, mkprio(CPL_PRIORITY_SETUP, newtoep)); l2t_send(cdev, reply_mbuf, e); m_free(m); -#ifdef notyet - /* - * XXX this call path has to be converted to not depend on sockets - */ - if (newtoep->tp_ulp_mode) - __set_tcb_field(newso, ddp_mbuf, W_TCB_RX_DDP_FLAGS, + if (newtoep->tp_ulp_mode) { + __set_tcb_field(newtoep, ddp_mbuf, W_TCB_RX_DDP_FLAGS, V_TF_DDP_OFF(1) | TP_DDP_TIMER_WORKAROUND_MASK, V_TF_DDP_OFF(1) | - TP_DDP_TIMER_WORKAROUND_VAL, 1); + TP_DDP_TIMER_WORKAROUND_VAL, 1); + } else + printf("not offloading\n"); + + -#endif return; reject: if (tdev->tod_ttid == TOE_ID_CHELSIO_T3) mk_pass_accept_rpl(reply_mbuf, m); else - mk_tid_release(reply_mbuf, NULL, tid); + mk_tid_release(reply_mbuf, newtoep, tid); cxgb_ofld_send(cdev, reply_mbuf); m_free(m); out: @@ -2793,7 +3367,7 @@ do_pass_accept_req(struct t3cdev *cdev, struct mbuf *m, void *ctx) /* * Called when a connection is established to translate the TCP options - * reported by HW to Linux's native format. + * reported by HW to FreeBSD's native format. */ static void assign_rxopt(struct socket *so, unsigned int opt) @@ -2808,8 +3382,9 @@ assign_rxopt(struct socket *so, unsigned int opt) tp->t_flags |= G_TCPOPT_TSTAMP(opt) ? TF_RCVD_TSTMP : 0; tp->t_flags |= G_TCPOPT_SACK(opt) ? TF_SACK_PERMIT : 0; tp->t_flags |= G_TCPOPT_WSCALE_OK(opt) ? TF_RCVD_SCALE : 0; - if (tp->t_flags & TF_RCVD_SCALE) - tp->rcv_scale = 0; + if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == + (TF_RCVD_SCALE|TF_REQ_SCALE)) + tp->rcv_scale = tp->request_r_scale; } /* @@ -2831,8 +3406,6 @@ make_established(struct socket *so, u32 snd_isn, unsigned int opt) #if 0 inet_sk(sk)->id = tp->write_seq ^ jiffies; #endif - - /* * XXX not clear what rcv_wup maps to */ @@ -2851,7 +3424,9 @@ make_established(struct socket *so, u32 snd_isn, unsigned int opt) */ dst_confirm(sk->sk_dst_cache); #endif + tp->t_starttime = ticks; tp->t_state = TCPS_ESTABLISHED; + soisconnected(so); } static int @@ -2948,23 +3523,21 @@ do_pass_establish(struct t3cdev *cdev, struct mbuf *m, void *ctx) tp = sototcpcb(so); INP_LOCK(tp->t_inpcb); -#ifdef notyet - so->so_snd.sb_flags |= SB_TOE; - so->so_rcv.sb_flags |= SB_TOE; -#endif + + so->so_snd.sb_flags |= SB_NOCOALESCE; + so->so_rcv.sb_flags |= SB_NOCOALESCE; + toep->tp_tp = tp; toep->tp_flags = 0; tp->t_toe = toep; reset_wr_list(toep); - tp->rcv_wnd = select_rcv_wnd(so); - DPRINTF("rcv_wnd=%ld\n", tp->rcv_wnd); + tp->rcv_wnd = select_rcv_wnd(tdev, so); + tp->rcv_nxt = toep->tp_copied_seq; install_offload_ops(so); toep->tp_wr_max = toep->tp_wr_avail = TOM_TUNABLE(tdev, max_wrs); toep->tp_wr_unacked = 0; toep->tp_qset = G_QNUM(ntohl(m->m_pkthdr.csum_data)); - toep->tp_ulp_mode = TOM_TUNABLE(tdev, ddp) && !(so->so_options & SO_NO_DDP) && - tp->rcv_wnd >= MIN_DDP_RCV_WIN ? ULP_MODE_TCPDDP : 0; toep->tp_qset_idx = 0; toep->tp_mtu_idx = select_mss(td, tp, toep->tp_l2t->neigh->rt_ifp->if_mtu); @@ -2975,8 +3548,9 @@ do_pass_establish(struct t3cdev *cdev, struct mbuf *m, void *ctx) make_established(so, ntohl(req->snd_isn), ntohs(req->tcp_opt)); INP_INFO_WUNLOCK(&tcbinfo); INP_UNLOCK(tp->t_inpcb); - soisconnected(so); + CTR1(KTR_TOM, "do_pass_establish tid=%u", toep->tp_tid); + cxgb_log_tcb(cdev->adapter, toep->tp_tid); #ifdef notyet /* * XXX not sure how these checks map to us @@ -3066,14 +3640,10 @@ socket_act_establish(struct socket *so, struct mbuf *m) fixup_and_send_ofo(so); if (__predict_false(so->so_state & SS_NOFDREF)) { -#ifdef notyet - /* - * XXX not clear what should be done here - * appears to correspond to sorwakeup_locked + /* + * XXX does this even make sense? */ - sk->sk_state_change(sk); - sk_wake_async(so, 0, POLL_OUT); -#endif + sorwakeup(so); } m_free(m); #ifdef notyet @@ -3095,8 +3665,7 @@ socket_act_establish(struct socket *so, struct mbuf *m) sk->sk_write_space(sk); #endif - soisconnected(so); - toep->tp_state = tp->t_state = TCPS_ESTABLISHED; + toep->tp_state = tp->t_state; tcpstat.tcps_connects++; } @@ -3139,6 +3708,9 @@ do_act_establish(struct t3cdev *cdev, struct mbuf *m, void *ctx) socket_act_establish(so, m); INP_UNLOCK(tp->t_inpcb); + CTR1(KTR_TOM, "do_act_establish tid=%u", toep->tp_tid); + cxgb_log_tcb(cdev->adapter, toep->tp_tid); + return (0); } @@ -3156,7 +3728,7 @@ wr_ack(struct toepcb *toep, struct mbuf *m) u32 snd_una = ntohl(hdr->snd_una); int bytes = 0; - DPRINTF("wr_ack: snd_una=%u credits=%d\n", snd_una, credits); + CTR2(KTR_SPARE2, "wr_ack: snd_una=%u credits=%d", snd_una, credits); INP_LOCK(tp->t_inpcb); @@ -3166,18 +3738,21 @@ wr_ack(struct toepcb *toep, struct mbuf *m) while (credits) { struct mbuf *p = peek_wr(toep); - DPRINTF("p->credits=%d p->bytes=%d\n", p->m_pkthdr.csum_data, p->m_pkthdr.len) ; if (__predict_false(!p)) { log(LOG_ERR, "%u WR_ACK credits for TID %u with " - "nothing pending, state %u\n", - credits, toep->tp_tid, tp->t_state); + "nothing pending, state %u wr_avail=%u\n", + credits, toep->tp_tid, tp->t_state, toep->tp_wr_avail); break; } + CTR2(KTR_TOM, + "wr_ack: p->credits=%d p->bytes=%d", p->m_pkthdr.csum_data, p->m_pkthdr.len); + + KASSERT(p->m_pkthdr.csum_data != 0, ("empty request still on list")); if (__predict_false(credits < p->m_pkthdr.csum_data)) { + #if DEBUG_WR > 1 struct tx_data_wr *w = cplhdr(p); -#ifdef notyet log(LOG_ERR, "TID %u got %u WR credits, need %u, len %u, " "main body %u, frags %u, seq # %u, ACK una %u," @@ -3185,8 +3760,7 @@ wr_ack(struct toepcb *toep, struct mbuf *m) toep->tp_tid, credits, p->csum, p->len, p->len - p->data_len, skb_shinfo(p)->nr_frags, ntohl(w->sndseq), snd_una, ntohl(hdr->snd_nxt), - WR_AVAIL(tp), count_pending_wrs(tp) - credits); -#endif + toep->tp_wr_avail, count_pending_wrs(tp) - credits); #endif p->m_pkthdr.csum_data -= credits; break; @@ -3194,7 +3768,9 @@ wr_ack(struct toepcb *toep, struct mbuf *m) dequeue_wr(toep); credits -= p->m_pkthdr.csum_data; bytes += p->m_pkthdr.len; - DPRINTF("done with wr of %d bytes\n", p->m_pkthdr.len); + CTR3(KTR_TOM, + "wr_ack: done with wr of %d bytes remain credits=%d wr credits=%d", + p->m_pkthdr.len, credits, p->m_pkthdr.csum_data); m_free(p); } @@ -3228,7 +3804,7 @@ wr_ack(struct toepcb *toep, struct mbuf *m) toep->tp_flags &= ~TP_TX_WAIT_IDLE; } if (bytes) { - DPRINTF("sbdrop(%d)\n", bytes); + CTR1(KTR_SPARE2, "wr_ack: sbdrop(%d)", bytes); SOCKBUF_LOCK(&so->so_snd); sbdrop_locked(&so->so_snd, bytes); sowwakeup_locked(so); @@ -3250,15 +3826,21 @@ do_wr_ack(struct t3cdev *dev, struct mbuf *m, void *ctx) { struct toepcb *toep = (struct toepcb *)ctx; - DPRINTF("do_wr_ack\n"); - dump_toepcb(toep); - VALIDATE_SOCK(so); wr_ack(toep, m); return 0; } +/* + * Handler for TRACE_PKT CPL messages. Just sink these packets. + */ +static int +do_trace_pkt(struct t3cdev *dev, struct mbuf *m, void *ctx) +{ + m_freem(m); + return 0; +} /* * Reset a connection that is on a listener's SYN queue or accept queue, @@ -3320,6 +3902,336 @@ t3_reset_synq(struct listen_ctx *lctx) SOCK_UNLOCK(lctx->lso); } + +int +t3_setup_ppods(struct socket *so, const struct ddp_gather_list *gl, + unsigned int nppods, unsigned int tag, unsigned int maxoff, + unsigned int pg_off, unsigned int color) +{ + unsigned int i, j, pidx; + struct pagepod *p; + struct mbuf *m; + struct ulp_mem_io *req; + struct tcpcb *tp = sototcpcb(so); + struct toepcb *toep = tp->t_toe; + unsigned int tid = toep->tp_tid; + const struct tom_data *td = TOM_DATA(TOE_DEV(so)); + unsigned int ppod_addr = tag * PPOD_SIZE + td->ddp_llimit; + + CTR6(KTR_TOM, "t3_setup_ppods(gl=%p nppods=%u tag=%u maxoff=%u pg_off=%u color=%u)", + gl, nppods, tag, maxoff, pg_off, color); + + for (i = 0; i < nppods; ++i) { + m = m_gethdr_nofail(sizeof(*req) + PPOD_SIZE); + m_set_priority(m, mkprio(CPL_PRIORITY_CONTROL, toep)); + req = mtod(m, struct ulp_mem_io *); + m->m_pkthdr.len = m->m_len = sizeof(*req) + PPOD_SIZE; + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS)); + req->wr.wr_lo = 0; + req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(ppod_addr >> 5) | + V_ULPTX_CMD(ULP_MEM_WRITE)); + req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE / 32) | + V_ULPTX_NFLITS(PPOD_SIZE / 8 + 1)); + + p = (struct pagepod *)(req + 1); + if (__predict_false(i < nppods - NUM_SENTINEL_PPODS)) { + p->pp_vld_tid = htonl(F_PPOD_VALID | V_PPOD_TID(tid)); + p->pp_pgsz_tag_color = htonl(V_PPOD_TAG(tag) | + V_PPOD_COLOR(color)); + p->pp_max_offset = htonl(maxoff); + p->pp_page_offset = htonl(pg_off); + p->pp_rsvd = 0; + for (pidx = 4 * i, j = 0; j < 5; ++j, ++pidx) + p->pp_addr[j] = pidx < gl->dgl_nelem ? + htobe64(VM_PAGE_TO_PHYS(gl->dgl_pages[pidx])) : 0; + } else + p->pp_vld_tid = 0; /* mark sentinel page pods invalid */ + send_or_defer(toep, m, 0); + ppod_addr += PPOD_SIZE; + } + return (0); +} + +/* + * Build a CPL_BARRIER message as payload of a ULP_TX_PKT command. + */ +static inline void +mk_cpl_barrier_ulp(struct cpl_barrier *b) +{ + struct ulp_txpkt *txpkt = (struct ulp_txpkt *)b; + + txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT)); + txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*b) / 8)); + b->opcode = CPL_BARRIER; +} + +/* + * Build a CPL_GET_TCB message as payload of a ULP_TX_PKT command. + */ +static inline void +mk_get_tcb_ulp(struct cpl_get_tcb *req, unsigned int tid, unsigned int cpuno) +{ + struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req; + + txpkt = (struct ulp_txpkt *)req; + txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT)); + txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_GET_TCB, tid)); + req->cpuno = htons(cpuno); +} + +/* + * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command. + */ +static inline void +mk_set_tcb_field_ulp(struct cpl_set_tcb_field *req, unsigned int tid, + unsigned int word, uint64_t mask, uint64_t val) +{ + struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req; + + CTR4(KTR_TCB, "mk_set_tcb_field_ulp(tid=%u word=0x%x mask=%jx val=%jx", + tid, word, mask, val); + + txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT)); + txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); + req->reply = V_NO_REPLY(1); + req->cpu_idx = 0; + req->word = htons(word); + req->mask = htobe64(mask); + req->val = htobe64(val); +} + +/* + * Build a CPL_RX_DATA_ACK message as payload of a ULP_TX_PKT command. + */ +static void +mk_rx_data_ack_ulp(struct cpl_rx_data_ack *ack, unsigned int tid, unsigned int credits) +{ + struct ulp_txpkt *txpkt = (struct ulp_txpkt *)ack; + + txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT)); + txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*ack) / 8)); + OPCODE_TID(ack) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, tid)); + ack->credit_dack = htonl(F_RX_MODULATE | F_RX_DACK_CHANGE | + V_RX_DACK_MODE(1) | V_RX_CREDITS(credits)); +} + +void +t3_cancel_ddpbuf(struct toepcb *toep, unsigned int bufidx) +{ + unsigned int wrlen; + struct mbuf *m; + struct work_request_hdr *wr; + struct cpl_barrier *lock; + struct cpl_set_tcb_field *req; + struct cpl_get_tcb *getreq; + struct ddp_state *p = &toep->tp_ddp_state; + + SOCKBUF_LOCK_ASSERT(&toeptoso(toep)->so_rcv); + wrlen = sizeof(*wr) + sizeof(*req) + 2 * sizeof(*lock) + + sizeof(*getreq); + m = m_gethdr_nofail(wrlen); + m_set_priority(m, mkprio(CPL_PRIORITY_CONTROL, toep)); + wr = mtod(m, struct work_request_hdr *); + bzero(wr, wrlen); + + wr->wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS)); + m->m_pkthdr.len = m->m_len = wrlen; + + lock = (struct cpl_barrier *)(wr + 1); + mk_cpl_barrier_ulp(lock); + + req = (struct cpl_set_tcb_field *)(lock + 1); + + CTR1(KTR_TCB, "t3_cancel_ddpbuf(bufidx=%u)", bufidx); + + /* Hmmm, not sure if this actually a good thing: reactivating + * the other buffer might be an issue if it has been completed + * already. However, that is unlikely, since the fact that the UBUF + * is not completed indicates that there is no oustanding data. + */ + if (bufidx == 0) + mk_set_tcb_field_ulp(req, toep->tp_tid, W_TCB_RX_DDP_FLAGS, + V_TF_DDP_ACTIVE_BUF(1) | + V_TF_DDP_BUF0_VALID(1), + V_TF_DDP_ACTIVE_BUF(1)); + else + mk_set_tcb_field_ulp(req, toep->tp_tid, W_TCB_RX_DDP_FLAGS, + V_TF_DDP_ACTIVE_BUF(1) | + V_TF_DDP_BUF1_VALID(1), 0); + + getreq = (struct cpl_get_tcb *)(req + 1); + mk_get_tcb_ulp(getreq, toep->tp_tid, toep->tp_qset); + + mk_cpl_barrier_ulp((struct cpl_barrier *)(getreq + 1)); + + /* Keep track of the number of oustanding CPL_GET_TCB requests + */ + p->get_tcb_count++; + +#ifdef T3_TRACE + T3_TRACE1(TIDTB(so), + "t3_cancel_ddpbuf: bufidx %u", bufidx); +#endif + cxgb_ofld_send(TOEP_T3C_DEV(toep), m); +} + +/** + * t3_overlay_ddpbuf - overlay an existing DDP buffer with a new one + * @sk: the socket associated with the buffers + * @bufidx: index of HW DDP buffer (0 or 1) + * @tag0: new tag for HW buffer 0 + * @tag1: new tag for HW buffer 1 + * @len: new length for HW buf @bufidx + * + * Sends a compound WR to overlay a new DDP buffer on top of an existing + * buffer by changing the buffer tag and length and setting the valid and + * active flag accordingly. The caller must ensure the new buffer is at + * least as big as the existing one. Since we typically reprogram both HW + * buffers this function sets both tags for convenience. Read the TCB to + * determine how made data was written into the buffer before the overlay + * took place. + */ +void +t3_overlay_ddpbuf(struct toepcb *toep, unsigned int bufidx, unsigned int tag0, + unsigned int tag1, unsigned int len) +{ + unsigned int wrlen; + struct mbuf *m; + struct work_request_hdr *wr; + struct cpl_get_tcb *getreq; + struct cpl_set_tcb_field *req; + struct ddp_state *p = &toep->tp_ddp_state; + + CTR4(KTR_TCB, "t3_setup_ppods(bufidx=%u tag0=%u tag1=%u len=%u)", + bufidx, tag0, tag1, len); + SOCKBUF_LOCK_ASSERT(&toeptoso(toep)->so_rcv); + wrlen = sizeof(*wr) + 3 * sizeof(*req) + sizeof(*getreq); + m = m_gethdr_nofail(wrlen); + m_set_priority(m, mkprio(CPL_PRIORITY_CONTROL, toep)); + wr = mtod(m, struct work_request_hdr *); + m->m_pkthdr.len = m->m_len = wrlen; + bzero(wr, wrlen); + + + /* Set the ATOMIC flag to make sure that TP processes the following + * CPLs in an atomic manner and no wire segments can be interleaved. + */ + wr->wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC); + req = (struct cpl_set_tcb_field *)(wr + 1); + mk_set_tcb_field_ulp(req, toep->tp_tid, W_TCB_RX_DDP_BUF0_TAG, + V_TCB_RX_DDP_BUF0_TAG(M_TCB_RX_DDP_BUF0_TAG) | + V_TCB_RX_DDP_BUF1_TAG(M_TCB_RX_DDP_BUF1_TAG) << 32, + V_TCB_RX_DDP_BUF0_TAG(tag0) | + V_TCB_RX_DDP_BUF1_TAG((uint64_t)tag1) << 32); + req++; + if (bufidx == 0) { + mk_set_tcb_field_ulp(req, toep->tp_tid, W_TCB_RX_DDP_BUF0_LEN, + V_TCB_RX_DDP_BUF0_LEN(M_TCB_RX_DDP_BUF0_LEN), + V_TCB_RX_DDP_BUF0_LEN((uint64_t)len)); + req++; + mk_set_tcb_field_ulp(req, toep->tp_tid, W_TCB_RX_DDP_FLAGS, + V_TF_DDP_PUSH_DISABLE_0(1) | + V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_ACTIVE_BUF(1), + V_TF_DDP_PUSH_DISABLE_0(0) | + V_TF_DDP_BUF0_VALID(1)); + } else { + mk_set_tcb_field_ulp(req, toep->tp_tid, W_TCB_RX_DDP_BUF1_LEN, + V_TCB_RX_DDP_BUF1_LEN(M_TCB_RX_DDP_BUF1_LEN), + V_TCB_RX_DDP_BUF1_LEN((uint64_t)len)); + req++; + mk_set_tcb_field_ulp(req, toep->tp_tid, W_TCB_RX_DDP_FLAGS, + V_TF_DDP_PUSH_DISABLE_1(1) | + V_TF_DDP_BUF1_VALID(1) | V_TF_DDP_ACTIVE_BUF(1), + V_TF_DDP_PUSH_DISABLE_1(0) | + V_TF_DDP_BUF1_VALID(1) | V_TF_DDP_ACTIVE_BUF(1)); + } + + getreq = (struct cpl_get_tcb *)(req + 1); + mk_get_tcb_ulp(getreq, toep->tp_tid, toep->tp_qset); + + /* Keep track of the number of oustanding CPL_GET_TCB requests + */ + p->get_tcb_count++; + +#ifdef T3_TRACE + T3_TRACE4(TIDTB(sk), + "t3_overlay_ddpbuf: bufidx %u tag0 %u tag1 %u " + "len %d", + bufidx, tag0, tag1, len); +#endif + cxgb_ofld_send(TOEP_T3C_DEV(toep), m); +} + +/* + * Sends a compound WR containing all the CPL messages needed to program the + * two HW DDP buffers, namely optionally setting up the length and offset of + * each buffer, programming the DDP flags, and optionally sending RX_DATA_ACK. + */ +void +t3_setup_ddpbufs(struct toepcb *toep, unsigned int len0, unsigned int offset0, + unsigned int len1, unsigned int offset1, + uint64_t ddp_flags, uint64_t flag_mask, int modulate) +{ + unsigned int wrlen; + struct mbuf *m; + struct work_request_hdr *wr; + struct cpl_set_tcb_field *req; + + CTR6(KTR_TCB, "t3_setup_ddpbufs(len0=%u offset0=%u len1=%u offset1=%u ddp_flags=0x%08x%08x ", + len0, offset0, len1, offset1, ddp_flags >> 32, ddp_flags & 0xffffffff); + + SOCKBUF_LOCK_ASSERT(&toeptoso(toep)->so_rcv); + wrlen = sizeof(*wr) + sizeof(*req) + (len0 ? sizeof(*req) : 0) + + (len1 ? sizeof(*req) : 0) + + (modulate ? sizeof(struct cpl_rx_data_ack) : 0); + m = m_gethdr_nofail(wrlen); + m_set_priority(m, mkprio(CPL_PRIORITY_CONTROL, toep)); + wr = mtod(m, struct work_request_hdr *); + bzero(wr, wrlen); + + wr->wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS)); + m->m_pkthdr.len = m->m_len = wrlen; + + req = (struct cpl_set_tcb_field *)(wr + 1); + if (len0) { /* program buffer 0 offset and length */ + mk_set_tcb_field_ulp(req, toep->tp_tid, W_TCB_RX_DDP_BUF0_OFFSET, + V_TCB_RX_DDP_BUF0_OFFSET(M_TCB_RX_DDP_BUF0_OFFSET) | + V_TCB_RX_DDP_BUF0_LEN(M_TCB_RX_DDP_BUF0_LEN), + V_TCB_RX_DDP_BUF0_OFFSET((uint64_t)offset0) | + V_TCB_RX_DDP_BUF0_LEN((uint64_t)len0)); + req++; + } + if (len1) { /* program buffer 1 offset and length */ + mk_set_tcb_field_ulp(req, toep->tp_tid, W_TCB_RX_DDP_BUF1_OFFSET, + V_TCB_RX_DDP_BUF1_OFFSET(M_TCB_RX_DDP_BUF1_OFFSET) | + V_TCB_RX_DDP_BUF1_LEN(M_TCB_RX_DDP_BUF1_LEN) << 32, + V_TCB_RX_DDP_BUF1_OFFSET((uint64_t)offset1) | + V_TCB_RX_DDP_BUF1_LEN((uint64_t)len1) << 32); + req++; + } + + mk_set_tcb_field_ulp(req, toep->tp_tid, W_TCB_RX_DDP_FLAGS, flag_mask, + ddp_flags); + + if (modulate) { + mk_rx_data_ack_ulp((struct cpl_rx_data_ack *)(req + 1), toep->tp_tid, + toep->tp_copied_seq - toep->tp_rcv_wup); + toep->tp_rcv_wup = toep->tp_copied_seq; + } + +#ifdef T3_TRACE + T3_TRACE5(TIDTB(sk), + "t3_setup_ddpbufs: len0 %u len1 %u ddp_flags 0x%08x%08x " + "modulate %d", + len0, len1, ddp_flags >> 32, ddp_flags & 0xffffffff, + modulate); +#endif + + cxgb_ofld_send(TOEP_T3C_DEV(toep), m); +} + void t3_init_wr_tab(unsigned int wr_len) { @@ -3353,7 +4265,6 @@ t3_init_cpl_io(void) tcphdr_skb->h.raw = tcphdr_skb->data; memset(tcphdr_skb->data, 0, tcphdr_skb->len); #endif - t3tom_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish); t3tom_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl); @@ -3367,11 +4278,9 @@ t3_init_cpl_io(void) t3tom_register_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl); t3tom_register_cpl_handler(CPL_RX_DATA_DDP, do_rx_data_ddp); t3tom_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_rx_ddp_complete); -#ifdef notyet t3tom_register_cpl_handler(CPL_RX_URG_NOTIFY, do_rx_urg_notify); t3tom_register_cpl_handler(CPL_TRACE_PKT, do_trace_pkt); t3tom_register_cpl_handler(CPL_GET_TCB_RPL, do_get_tcb_rpl); -#endif return (0); } diff --git a/sys/dev/cxgb/ulp/tom/cxgb_cpl_socket.c b/sys/dev/cxgb/ulp/tom/cxgb_cpl_socket.c index a3dd692..6edeacd 100644 --- a/sys/dev/cxgb/ulp/tom/cxgb_cpl_socket.c +++ b/sys/dev/cxgb/ulp/tom/cxgb_cpl_socket.c @@ -38,14 +38,18 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include +#include #include #include #include #include +#include #include +#include #include #include @@ -56,6 +60,7 @@ __FBSDID("$FreeBSD$"); #include +#include #include #include @@ -72,6 +77,7 @@ __FBSDID("$FreeBSD$"); #include #include #include + #include #include #include @@ -85,6 +91,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include static int (*pru_sosend)(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, @@ -94,13 +101,11 @@ static int (*pru_soreceive)(struct socket *so, struct sockaddr **paddr, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp); -#ifdef notyet -#define VM_HOLD_WRITEABLE 0x1 -static int vm_fault_hold_user_pages(vm_offset_t addr, int len, vm_page_t *mp, - int *count, int flags); -#endif -static void vm_fault_unhold_pages(vm_page_t *m, int count); #define TMP_IOV_MAX 16 +#ifndef PG_FRAME +#define PG_FRAME ~PAGE_MASK +#endif +#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) void t3_init_socket_ops(void) @@ -110,20 +115,8 @@ t3_init_socket_ops(void) prp = pffindtype(AF_INET, SOCK_STREAM); pru_sosend = prp->pr_usrreqs->pru_sosend; pru_soreceive = prp->pr_usrreqs->pru_soreceive; -#ifdef TCP_USRREQS_OVERLOAD - tcp_usrreqs.pru_connect = cxgb_tcp_usrreqs.pru_connect; - tcp_usrreqs.pru_abort = cxgb_tcp_usrreqs.pru_abort; - tcp_usrreqs.pru_listen = cxgb_tcp_usrreqs.pru_listen; - tcp_usrreqs.pru_send = cxgb_tcp_usrreqs.pru_send; - tcp_usrreqs.pru_abort = cxgb_tcp_usrreqs.pru_abort; - tcp_usrreqs.pru_disconnect = cxgb_tcp_usrreqs.pru_disconnect; - tcp_usrreqs.pru_close = cxgb_tcp_usrreqs.pru_close; - tcp_usrreqs.pru_shutdown = cxgb_tcp_usrreqs.pru_shutdown; - tcp_usrreqs.pru_rcvd = cxgb_tcp_usrreqs.pru_rcvd; -#endif } - struct cxgb_dma_info { size_t cdi_mapped; int cdi_nsegs; @@ -182,21 +175,172 @@ iov_adj(struct iovec **iov, int *iovcnt, size_t count) } } - static void -cxgb_zero_copy_free(void *cl, void *arg) {} +cxgb_zero_copy_free(void *cl, void *arg) +{ + struct mbuf_vec *mv; + struct mbuf *m = (struct mbuf *)cl; + + mv = mtomv(m); + /* + * Physical addresses, don't try to free should be unheld separately from sbdrop + * + */ + mv->mv_count = 0; + m_free_iovec(m, m->m_type); +} + static int cxgb_hold_iovec_pages(struct uio *uio, vm_page_t *m, int *held, int flags) { + struct iovec *iov = uio->uio_iov; + int iovcnt = uio->uio_iovcnt; + int err, i, count, totcount, maxcount, totbytes, npages, curbytes; + uint64_t start, end; + vm_page_t *mp; + + totbytes = totcount = 0; + maxcount = *held; + + mp = m; + for (totcount = i = 0; (i < iovcnt) && (totcount < maxcount); i++, iov++) { + count = maxcount - totcount; + + start = (uintptr_t)iov->iov_base; + end = (uintptr_t)((caddr_t)iov->iov_base + iov->iov_len); + start &= PG_FRAME; + end += PAGE_MASK; + end &= PG_FRAME; + npages = (end - start) >> PAGE_SHIFT; + + count = min(count, npages); + + err = vm_fault_hold_user_pages((vm_offset_t)iov->iov_base, mp, count, flags); + if (err) { + vm_fault_unhold_pages(m, totcount); + return (err); + } + mp += count; + totcount += count; + curbytes = iov->iov_len; + if (count != npages) + curbytes = count*PAGE_SIZE - (((uintptr_t)iov->iov_base)&PAGE_MASK); + totbytes += curbytes; + } + uio->uio_resid -= totbytes; - return (EINVAL); + return (0); +} + +/* + * Returns whether a connection should enable DDP. This happens when all of + * the following conditions are met: + * - the connection's ULP mode is DDP + * - DDP is not already enabled + * - the last receive was above the DDP threshold + * - receive buffers are in user space + * - receive side isn't shutdown (handled by caller) + * - the connection's receive window is big enough so that sizable buffers + * can be posted without closing the window in the middle of DDP (checked + * when the connection is offloaded) + */ +static int +so_should_ddp(const struct toepcb *toep, int last_recv_len) +{ + + DPRINTF("ulp_mode=%d last_recv_len=%d ddp_thresh=%d rcv_wnd=%ld ddp_copy_limit=%d\n", + toep->tp_ulp_mode, last_recv_len, TOM_TUNABLE(toep->tp_toedev, ddp_thres), + toep->tp_tp->rcv_wnd, (TOM_TUNABLE(toep->tp_toedev, ddp_copy_limit) + DDP_RSVD_WIN)); + + return toep->tp_ulp_mode == ULP_MODE_TCPDDP && (toep->tp_ddp_state.kbuf[0] == NULL) && + last_recv_len > TOM_TUNABLE(toep->tp_toedev, ddp_thres) && + toep->tp_tp->rcv_wnd > + (TOM_TUNABLE(toep->tp_toedev, ddp_copy_limit) + DDP_RSVD_WIN); +} + +static inline int +is_ddp(const struct mbuf *m) +{ + return (m->m_flags & M_DDP); +} + +static inline int +is_ddp_psh(const struct mbuf *m) +{ + return is_ddp(m) && (m->m_pkthdr.csum_flags & DDP_BF_PSH); +} + +static int +m_uiomove(const struct mbuf *m, int offset, int len, struct uio *uio) +{ + int curlen, startlen, resid_init, err = 0; + caddr_t buf; + + DPRINTF("m_uiomove(m=%p, offset=%d, len=%d, ...)\n", + m, offset, len); + + startlen = len; + resid_init = uio->uio_resid; + while (m && len) { + buf = mtod(m, caddr_t); + curlen = m->m_len; + if (offset && (offset < curlen)) { + curlen -= offset; + buf += offset; + offset = 0; + } else if (offset) { + offset -= curlen; + m = m->m_next; + continue; + } + err = uiomove(buf, min(len, curlen), uio); + if (err) { + printf("uiomove returned %d\n", err); + return (err); + } + + len -= min(len, curlen); + m = m->m_next; + } + DPRINTF("copied %d bytes - resid_init=%d uio_resid=%d\n", + startlen - len, resid_init, uio->uio_resid); + return (err); +} + +/* + * Copy data from an sk_buff to an iovec. Deals with RX_DATA, which carry the + * data in the sk_buff body, and with RX_DATA_DDP, which place the data in a + * DDP buffer. + */ +static inline int +copy_data(const struct mbuf *m, int offset, int len, struct uio *uio) +{ + struct iovec *to = uio->uio_iov; + int err; + + + if (__predict_true(!is_ddp(m))) { /* RX_DATA */ + return m_uiomove(m, offset, len, uio); + } if (__predict_true(m->m_ddp_flags & DDP_BF_NOCOPY)) { /* user DDP */ + to->iov_len -= len; + to->iov_base = ((caddr_t)to->iov_base) + len; + uio->uio_iov = to; + uio->uio_resid -= len; + return (0); + } + err = t3_ddp_copy(m, offset, uio, len); /* kernel DDP */ + return (err); } static void -cxgb_wait_dma_completion(struct toepcb *tp) +cxgb_wait_dma_completion(struct toepcb *toep) { + struct mtx *lock; + lock = &toep->tp_tp->t_inpcb->inp_mtx; + INP_LOCK(toep->tp_tp->t_inpcb); + cv_wait_unlock(&toep->tp_cv, lock); } static int @@ -234,7 +378,13 @@ cxgb_vm_page_to_miov(struct toepcb *toep, struct uio *uio, struct mbuf **m) mi_collapse_sge(mi, segs); *m = m0; - + + /* + * This appears to be a no-op at the moment + * as busdma is all or nothing need to make + * sure the tag values are large enough + * + */ if (cdi.cdi_mapped < uio->uio_resid) { uio->uio_resid -= cdi.cdi_mapped; } else @@ -305,10 +455,11 @@ sendmore: } uio->uio_resid -= m->m_pkthdr.len; sent += m->m_pkthdr.len; - sbappend_locked(&so->so_snd, m); + sbappend(&so->so_snd, m); t3_push_frames(so, TRUE); iov_adj(&uiotmp.uio_iov, &iovcnt, uiotmp.uio_resid); } + /* * Wait for pending I/O to be DMA'd to the card * @@ -357,7 +508,7 @@ cxgb_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, zcopy_thres = TOM_TUNABLE(tdev, zcopy_sosend_partial_thres); zcopy_enabled = TOM_TUNABLE(tdev, zcopy_sosend_enabled); - if ((uio->uio_resid > zcopy_thres) && + if (uio && (uio->uio_resid > zcopy_thres) && (uio->uio_iovcnt < TMP_IOV_MAX) && ((so->so_state & SS_NBIO) == 0) && zcopy_enabled) { rv = t3_sosend(so, uio); @@ -368,36 +519,378 @@ cxgb_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, return pru_sosend(so, addr, uio, top, control, flags, td); } +/* + * Following replacement or removal of the first mbuf on the first mbuf chain + * of a socket buffer, push necessary state changes back into the socket + * buffer so that other consumers see the values consistently. 'nextrecord' + * is the callers locally stored value of the original value of + * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes. + * NOTE: 'nextrecord' may be NULL. + */ +static __inline void +sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord) +{ + + SOCKBUF_LOCK_ASSERT(sb); + /* + * First, update for the new value of nextrecord. If necessary, make + * it the first record. + */ + if (sb->sb_mb != NULL) + sb->sb_mb->m_nextpkt = nextrecord; + else + sb->sb_mb = nextrecord; + + /* + * Now update any dependent socket buffer fields to reflect the new + * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the + * addition of a second clause that takes care of the case where + * sb_mb has been updated, but remains the last record. + */ + if (sb->sb_mb == NULL) { + sb->sb_mbtail = NULL; + sb->sb_lastrecord = NULL; + } else if (sb->sb_mb->m_nextpkt == NULL) + sb->sb_lastrecord = sb->sb_mb; +} + +#define IS_NONBLOCKING(so) ((so)->so_state & SS_NBIO) + static int -t3_soreceive(struct socket *so, struct uio *uio) +t3_soreceive(struct socket *so, int *flagsp, struct uio *uio) { -#ifdef notyet - int i, rv, count, hold_resid, sent, iovcnt; - struct iovec iovtmp[TMP_IOV_MAX], *iovtmpp, *iov; struct tcpcb *tp = sototcpcb(so); struct toepcb *toep = tp->t_toe; struct mbuf *m; - struct uio uiotmp; + uint32_t offset; + int err, flags, avail, len, copied, copied_unacked; + int target; /* Read at least this many bytes */ + int user_ddp_ok; + struct ddp_state *p; + struct inpcb *inp = sotoinpcb(so); + + avail = offset = copied = copied_unacked = 0; + flags = flagsp ? (*flagsp &~ MSG_EOR) : 0; + err = sblock(&so->so_rcv, SBLOCKWAIT(flags)); + p = &toep->tp_ddp_state; + + if (err) + return (err); + SOCKBUF_LOCK(&so->so_rcv); + p->user_ddp_pending = 0; +restart: + len = uio->uio_resid; + m = so->so_rcv.sb_mb; + target = (flags & MSG_WAITALL) ? len : so->so_rcv.sb_lowat; + user_ddp_ok = p->ubuf_ddp_ready; + p->cancel_ubuf = 0; + + if (len == 0) + goto done; +#if 0 + while (m && m->m_len == 0) { + so->so_rcv.sb_mb = m_free(m); + m = so->so_rcv.sb_mb; + } +#endif + if (m) + goto got_mbuf; + + /* empty receive queue */ + if (copied >= target && (so->so_rcv.sb_mb == NULL) && + !p->user_ddp_pending) + goto done; + + if (copied) { + if (so->so_error || tp->t_state == TCPS_CLOSED || + (so->so_state & (SS_ISDISCONNECTING|SS_ISDISCONNECTED))) + goto done; + } else { + if (so->so_state & SS_NOFDREF) + goto done; + if (so->so_error) { + err = so->so_error; + so->so_error = 0; + goto done; + } + if (so->so_rcv.sb_state & SBS_CANTRCVMORE) + goto done; + if (so->so_state & (SS_ISDISCONNECTING|SS_ISDISCONNECTED)) + goto done; + if (tp->t_state == TCPS_CLOSED) { + err = ENOTCONN; + goto done; + } + } + if (so->so_rcv.sb_mb && !p->user_ddp_pending) { + SOCKBUF_UNLOCK(&so->so_rcv); + INP_LOCK(inp); + t3_cleanup_rbuf(tp, copied_unacked); + INP_UNLOCK(inp); + SOCKBUF_LOCK(&so->so_rcv); + copied_unacked = 0; + goto restart; + } + if (p->kbuf[0] && user_ddp_ok && !p->user_ddp_pending && + uio->uio_iov->iov_len > p->kbuf[0]->dgl_length && + p->ubuf_ddp_ready) { + p->user_ddp_pending = + !t3_overlay_ubuf(so, uio, IS_NONBLOCKING(so), flags, 1, 1); + if (p->user_ddp_pending) { + p->kbuf_posted++; + user_ddp_ok = 0; + } + } + if (p->kbuf[0] && (p->kbuf_posted == 0)) { + t3_post_kbuf(so, 1, IS_NONBLOCKING(so)); + p->kbuf_posted++; + } + if (p->user_ddp_pending) { + /* One shot at DDP if we already have enough data */ + if (copied >= target) + user_ddp_ok = 0; + + DPRINTF("sbwaiting 1\n"); + if ((err = sbwait(&so->so_rcv)) != 0) + goto done; +//for timers to work await_ddp_completion(sk, flags, &timeo); + } else if (copied >= target) + goto done; + else { + if (copied_unacked) { + int i = 0; + + SOCKBUF_UNLOCK(&so->so_rcv); + INP_LOCK(inp); + t3_cleanup_rbuf(tp, copied_unacked); + INP_UNLOCK(inp); + copied_unacked = 0; + if (mp_ncpus > 1) + while (i++ < 200 && so->so_rcv.sb_mb == NULL) + cpu_spinwait(); + SOCKBUF_LOCK(&so->so_rcv); + } + + if (so->so_rcv.sb_mb) + goto restart; + DPRINTF("sbwaiting 2 copied=%d target=%d avail=%d so=%p mb=%p cc=%d\n", copied, target, avail, so, + so->so_rcv.sb_mb, so->so_rcv.sb_cc); + if ((err = sbwait(&so->so_rcv)) != 0) + goto done; + } + goto restart; +got_mbuf: + KASSERT(((m->m_flags & M_EXT) && (m->m_ext.ext_type == EXT_EXTREF)) || !(m->m_flags & M_EXT), ("unexpected type M_EXT=%d ext_type=%d m_len=%d m_pktlen=%d\n", !!(m->m_flags & M_EXT), m->m_ext.ext_type, m->m_len, m->m_pkthdr.len)); + KASSERT(m->m_next != (struct mbuf *)0xffffffff, ("bad next value m_next=%p m_nextpkt=%p m_flags=0x%x m->m_len=%d", + m->m_next, m->m_nextpkt, m->m_flags, m->m_len)); + if (m->m_pkthdr.len == 0) { + if ((m->m_ddp_flags & DDP_BF_NOCOPY) == 0) + panic("empty mbuf and NOCOPY not set\n"); + CTR0(KTR_TOM, "ddp done notification"); + p->user_ddp_pending = 0; + sbdroprecord_locked(&so->so_rcv); + goto done; + } + + offset = toep->tp_copied_seq + copied_unacked - m->m_seq; + DPRINTF("m=%p copied_seq=0x%x copied_unacked=%d m_seq=0x%x offset=%d pktlen=%d is_ddp(m)=%d\n", + m, toep->tp_copied_seq, copied_unacked, m->m_seq, offset, m->m_pkthdr.len, !!is_ddp(m)); + + if (offset >= m->m_pkthdr.len) + panic("t3_soreceive: OFFSET >= LEN offset %d copied_seq 0x%x seq 0x%x " + "pktlen %d ddp flags 0x%x", offset, toep->tp_copied_seq + copied_unacked, m->m_seq, + m->m_pkthdr.len, m->m_ddp_flags); + + avail = m->m_pkthdr.len - offset; + if (len < avail) { + if (is_ddp(m) && (m->m_ddp_flags & DDP_BF_NOCOPY)) + panic("bad state in t3_soreceive len=%d avail=%d offset=%d\n", len, avail, offset); + avail = len; + } + CTR4(KTR_TOM, "t3_soreceive: m_len=%u offset=%u len=%u m_seq=0%08x", m->m_pkthdr.len, offset, len, m->m_seq); + +#ifdef URGENT_DATA_SUPPORTED /* - * Events requiring iteration: - * - number of pages exceeds max hold pages for process or system - * - number of pages exceeds maximum sg entries for a single WR - * - * We're limited to holding 128 pages at once - and we're limited to - * 34 SG entries per work request, but each SG entry can be any number - * of contiguous pages - * + * Check if the data we are preparing to copy contains urgent + * data. Either stop short of urgent data or skip it if it's + * first and we are not delivering urgent data inline. + */ + if (__predict_false(toep->tp_urg_data)) { + uint32_t urg_offset = tp->rcv_up - tp->copied_seq + copied_unacked; + + if (urg_offset < avail) { + if (urg_offset) { + /* stop short of the urgent data */ + avail = urg_offset; + } else if ((so->so_options & SO_OOBINLINE) == 0) { + /* First byte is urgent, skip */ + toep->tp_copied_seq++; + offset++; + avail--; + if (!avail) + goto skip_copy; + } + } + } +#endif + if (is_ddp_psh(m) || offset) { + user_ddp_ok = 0; +#ifdef T3_TRACE + T3_TRACE0(TIDTB(so), "t3_sosend: PSH"); +#endif + } + + if (user_ddp_ok && !p->user_ddp_pending && + uio->uio_iov->iov_len > p->kbuf[0]->dgl_length && + p->ubuf_ddp_ready) { + p->user_ddp_pending = + !t3_overlay_ubuf(so, uio, IS_NONBLOCKING(so), flags, 1, 1); + if (p->user_ddp_pending) { + p->kbuf_posted++; + user_ddp_ok = 0; + } + DPRINTF("user_ddp_pending=%d\n", p->user_ddp_pending); + } else + DPRINTF("user_ddp_ok=%d user_ddp_pending=%d iov_len=%ld dgl_length=%d ubuf_ddp_ready=%d ulp_mode=%d is_ddp(m)=%d flags=0x%x ubuf=%p kbuf_posted=%d\n", + user_ddp_ok, p->user_ddp_pending, uio->uio_iov->iov_len, p->kbuf[0] ? p->kbuf[0]->dgl_length : 0, + p->ubuf_ddp_ready, toep->tp_ulp_mode, !!is_ddp(m), m->m_ddp_flags, p->ubuf, p->kbuf_posted); + + /* + * If MSG_TRUNC is specified the data is discarded. + * XXX need to check pr_atomic */ + KASSERT(avail > 0, ("avail=%d resid=%d offset=%d", avail, uio->uio_resid, offset)); + if (__predict_true(!(flags & MSG_TRUNC))) { + int resid = uio->uio_resid; + + SOCKBUF_UNLOCK(&so->so_rcv); + if ((err = copy_data(m, offset, avail, uio))) { + if (err) + err = EFAULT; + goto done_unlocked; + } + SOCKBUF_LOCK(&so->so_rcv); + if (avail != (resid - uio->uio_resid)) + printf("didn't copy all bytes :-/ avail=%d offset=%d pktlen=%d resid=%d uio_resid=%d copied=%d copied_unacked=%d is_ddp(m)=%d\n", + avail, offset, m->m_pkthdr.len, resid, uio->uio_resid, copied, copied_unacked, is_ddp(m)); + } + + copied += avail; + copied_unacked += avail; + len -= avail; + +#ifdef URGENT_DATA_SUPPORTED +skip_copy: + if (tp->urg_data && after(tp->copied_seq + copied_unacked, tp->urg_seq)) + tp->urg_data = 0; +#endif + /* + * If the buffer is fully consumed free it. If it's a DDP + * buffer also handle any events it indicates. + */ + if (avail + offset >= m->m_pkthdr.len) { + unsigned int fl = m->m_ddp_flags; + int exitnow, got_psh = 0, nomoredata = 0; + int count; + struct mbuf *nextrecord; + + if (p->kbuf[0] != NULL && is_ddp(m) && (fl & 1)) { + if (is_ddp_psh(m) && p->user_ddp_pending) + got_psh = 1; + + if (fl & DDP_BF_NOCOPY) + p->user_ddp_pending = 0; + else if ((fl & DDP_BF_NODATA) && IS_NONBLOCKING(so)) { + p->kbuf_posted--; + nomoredata = 1; + } else { + p->kbuf_posted--; + p->ubuf_ddp_ready = 1; + } + } - uiotmp = *uio; - iovcnt = uio->uio_iovcnt; - iov = uio->uio_iov; - sent = 0; - re; -#endif - return (0); + nextrecord = m->m_nextpkt; + count = m->m_pkthdr.len; + while (count > 0) { + count -= m->m_len; + KASSERT(((m->m_flags & M_EXT) && (m->m_ext.ext_type == EXT_EXTREF)) || !(m->m_flags & M_EXT), ("unexpected type M_EXT=%d ext_type=%d m_len=%d\n", !!(m->m_flags & M_EXT), m->m_ext.ext_type, m->m_len)); + sbfree(&so->so_rcv, m); + so->so_rcv.sb_mb = m_free(m); + m = so->so_rcv.sb_mb; + } + sockbuf_pushsync(&so->so_rcv, nextrecord); +#if 0 + sbdrop_locked(&so->so_rcv, m->m_pkthdr.len); +#endif + exitnow = got_psh || nomoredata; + if ((so->so_rcv.sb_mb == NULL) && exitnow) + goto done; + if (copied_unacked > (so->so_rcv.sb_hiwat >> 2)) { + SOCKBUF_UNLOCK(&so->so_rcv); + INP_LOCK(inp); + t3_cleanup_rbuf(tp, copied_unacked); + INP_UNLOCK(inp); + copied_unacked = 0; + SOCKBUF_LOCK(&so->so_rcv); + } + } + if (len > 0) + goto restart; + + done: + /* + * If we can still receive decide what to do in preparation for the + * next receive. Note that RCV_SHUTDOWN is set if the connection + * transitioned to CLOSE but not if it was in that state to begin with. + */ + if (__predict_true((so->so_state & (SS_ISDISCONNECTING|SS_ISDISCONNECTED)) == 0)) { + if (p->user_ddp_pending) { + SOCKBUF_UNLOCK(&so->so_rcv); + SOCKBUF_LOCK(&so->so_rcv); + user_ddp_ok = 0; + t3_cancel_ubuf(toep); + if (so->so_rcv.sb_mb) { + if (copied < 0) + copied = 0; + if (len > 0) + goto restart; + } + p->user_ddp_pending = 0; + } + if ((p->kbuf[0] != NULL) && (p->kbuf_posted == 0)) { +#ifdef T3_TRACE + T3_TRACE0(TIDTB(so), + "chelsio_recvmsg: about to exit, repost kbuf"); +#endif + + t3_post_kbuf(so, 1, IS_NONBLOCKING(so)); + p->kbuf_posted++; + } else if (so_should_ddp(toep, copied) && uio->uio_iovcnt == 1) { + CTR1(KTR_TOM ,"entering ddp on tid=%u", toep->tp_tid); + if (!t3_enter_ddp(so, TOM_TUNABLE(TOE_DEV(so), + ddp_copy_limit), 0, IS_NONBLOCKING(so))) + p->kbuf_posted = 1; + } + } +#ifdef T3_TRACE + T3_TRACE5(TIDTB(so), + "chelsio_recvmsg <-: copied %d len %d buffers_freed %d " + "kbuf_posted %d user_ddp_pending %u", + copied, len, buffers_freed, p ? p->kbuf_posted : -1, + p->user_ddp_pending); +#endif + SOCKBUF_UNLOCK(&so->so_rcv); +done_unlocked: + if (copied_unacked) { + INP_LOCK(inp); + t3_cleanup_rbuf(tp, copied_unacked); + INP_UNLOCK(inp); + } + sbunlock(&so->so_rcv); + + return (err); } static int @@ -405,9 +898,11 @@ cxgb_soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) { struct toedev *tdev; - int rv, zcopy_thres, zcopy_enabled; + int rv, zcopy_thres, zcopy_enabled, flags; struct tcpcb *tp = sototcpcb(so); + flags = flagsp ? *flagsp &~ MSG_EOR : 0; + /* * In order to use DMA direct from userspace the following * conditions must be met: @@ -421,150 +916,30 @@ cxgb_soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, * - iovcnt is 1 * */ - if (tp->t_flags & TF_TOE) { + + if ((tp->t_flags & TF_TOE) && uio && ((flags & (MSG_WAITALL|MSG_OOB|MSG_PEEK|MSG_DONTWAIT)) == 0) + && (uio->uio_iovcnt == 1) && (mp0 == NULL)) { tdev = TOE_DEV(so); zcopy_thres = TOM_TUNABLE(tdev, ddp_thres); zcopy_enabled = TOM_TUNABLE(tdev, ddp); if ((uio->uio_resid > zcopy_thres) && - (uio->uio_iovcnt == 1) && ((so->so_state & SS_NBIO) == 0) + (uio->uio_iovcnt == 1) && zcopy_enabled) { - rv = t3_soreceive(so, uio); + rv = t3_soreceive(so, flagsp, uio); if (rv != EAGAIN) return (rv); - } - } - + else + printf("returned EAGAIN\n"); + } + } else if ((tp->t_flags & TF_TOE) && uio && mp0 == NULL) + printf("skipping t3_soreceive flags=0x%x iovcnt=%d sb_state=0x%x\n", + flags, uio->uio_iovcnt, so->so_rcv.sb_state); return pru_soreceive(so, psa, uio, mp0, controlp, flagsp); } - void t3_install_socket_ops(struct socket *so) { so->so_proto->pr_usrreqs->pru_sosend = cxgb_sosend; so->so_proto->pr_usrreqs->pru_soreceive = cxgb_soreceive; } - -/* - * This routine takes a user address range and does the following: - * - validate that the user has access to those pages (flags indicates read or write) - if not fail - * - validate that count is enough to hold range number of pages - if not fail - * - fault in any non-resident pages - * - if the user is doing a read force a write fault for any COWed pages - * - if the user is doing a read mark all pages as dirty - * - hold all pages - * - return number of pages in count - */ -#ifdef notyet -static int -vm_fault_hold_user_pages(vm_offset_t addr, int len, vm_page_t *mp, int *count, int flags) -{ - - vm_offset_t start, va; - vm_paddr_t pa; - int pageslen, faults, rv; - - struct thread *td; - vm_map_t map; - pmap_t pmap; - vm_page_t m, *pages; - vm_prot_t prot; - - start = addr & ~PAGE_MASK; - pageslen = roundup2(addr + len, PAGE_SIZE); - if (*count < (pageslen >> PAGE_SHIFT)) - return (EFBIG); - - *count = pageslen >> PAGE_SHIFT; - /* - * Check that virtual address range is legal - * This check is somewhat bogus as on some architectures kernel - * and user do not share VA - however, it appears that all FreeBSD - * architectures define it - */ - if (addr + len > VM_MAXUSER_ADDRESS) - return (EFAULT); - - td = curthread; - map = &td->td_proc->p_vmspace->vm_map; - pmap = &td->td_proc->p_vmspace->vm_pmap; - pages = mp; - - prot = (flags & VM_HOLD_WRITEABLE) ? VM_PROT_WRITE : VM_PROT_READ; - bzero(pages, sizeof(vm_page_t *) * (*count)); -retry: - - /* - * First optimistically assume that all pages are resident (and R/W if for write) - * if so just mark pages as held (and dirty if for write) and return - */ - vm_page_lock_queues(); - for (pages = mp, faults = 0, va = start; va < pageslen; va += PAGE_SIZE, pages++) { - /* - * Assure that we only hold the page once - */ - if (*pages == NULL) { - /* - * page queue mutex is recursable so this is OK - * it would be really nice if we had an unlocked version of this so - * we were only acquiring the pmap lock 1 time as opposed to potentially - * many dozens of times - */ - m = pmap_extract_and_hold(pmap, va, prot); - if (m == NULL) { - faults++; - continue; - } - *pages = m; - if (flags & VM_HOLD_WRITEABLE) - vm_page_dirty(m); - } - } - vm_page_unlock_queues(); - - if (faults == 0) - return (0); - /* - * Pages either have insufficient permissions or are not present - * trigger a fault where neccessary - * - */ - for (va = start; va < pageslen; va += PAGE_SIZE) { - m = NULL; - pa = pmap_extract(pmap, va); - rv = 0; - if (pa) - m = PHYS_TO_VM_PAGE(pa); - if (flags & VM_HOLD_WRITEABLE) { - if (m == NULL || (m->flags & PG_WRITEABLE) == 0) - rv = vm_fault(map, va, VM_PROT_WRITE, VM_FAULT_DIRTY); - } else if (m == NULL) - rv = vm_fault(map, va, VM_PROT_READ, VM_FAULT_NORMAL); - if (rv) - goto error; - } - goto retry; - -error: - vm_page_lock_queues(); - for (pages = mp, va = start; va < pageslen; va += PAGE_SIZE, pages++) - if (*pages) - vm_page_unhold(*pages); - vm_page_unlock_queues(); - return (EFAULT); -} -#endif - -static void -vm_fault_unhold_pages(vm_page_t *mp, int count) -{ - - KASSERT(count >= 0, ("negative count %d", count)); - vm_page_lock_queues(); - while (count--) { - vm_page_unhold(*mp); - mp++; - } - vm_page_unlock_queues(); -} - diff --git a/sys/dev/cxgb/ulp/tom/cxgb_ddp.c b/sys/dev/cxgb/ulp/tom/cxgb_ddp.c new file mode 100644 index 0000000..8bdcb65 --- /dev/null +++ b/sys/dev/cxgb/ulp/tom/cxgb_ddp.c @@ -0,0 +1,735 @@ +/************************************************************************** + +Copyright (c) 2007, Chelsio Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Neither the name of the Chelsio Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include +#include + + +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_SCHEDULE_TIMEOUT 300 + +/* + * Return the # of page pods needed to accommodate a # of pages. + */ +static inline unsigned int +pages2ppods(unsigned int pages) +{ + return (pages + PPOD_PAGES - 1) / PPOD_PAGES + NUM_SENTINEL_PPODS; +} + +/** + * t3_pin_pages - pin a user memory range and prepare it for DDP + * @addr - the starting address + * @len - the length of the range + * @newgl - contains the pages and physical addresses of the pinned range + * @gl - an existing gather list, may be %NULL + * + * Pins the pages in the user-space memory range [addr, addr + len) and + * maps them for DMA. Returns a gather list with the pinned pages and + * their physical addresses. If @gl is non NULL the pages it describes + * are compared against the pages for [addr, addr + len), and if the + * existing gather list already covers the range a new list is not + * allocated. Returns 0 on success, or a negative errno. On success if + * a new gather list was allocated it is returned in @newgl. + */ +static int +t3_pin_pages(bus_dma_tag_t tag, bus_dmamap_t map, vm_offset_t addr, + size_t len, struct ddp_gather_list **newgl, + const struct ddp_gather_list *gl) +{ + int i = 0, err; + size_t pg_off; + unsigned int npages; + struct ddp_gather_list *p; + + /* + * XXX need x86 agnostic check + */ + if (addr + len > VM_MAXUSER_ADDRESS) + return (EFAULT); + + pg_off = addr & PAGE_MASK; + npages = (pg_off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + p = malloc(sizeof(struct ddp_gather_list) + npages * sizeof(vm_page_t *), + M_DEVBUF, M_NOWAIT|M_ZERO); + if (p == NULL) + return (ENOMEM); + + err = vm_fault_hold_user_pages(addr, p->dgl_pages, npages, VM_HOLD_WRITEABLE); + if (err) + goto free_gl; + + if (gl && gl->dgl_offset == pg_off && gl->dgl_nelem >= npages && + gl->dgl_length >= len) { + for (i = 0; i < npages; i++) + if (p->dgl_pages[i] != gl->dgl_pages[i]) + goto different_gl; + err = 0; + goto unpin; + } + +different_gl: + p->dgl_length = len; + p->dgl_offset = pg_off; + p->dgl_nelem = npages; +#ifdef NEED_BUSDMA + p->phys_addr[0] = pci_map_page(pdev, p->pages[0], pg_off, + PAGE_SIZE - pg_off, + PCI_DMA_FROMDEVICE) - pg_off; + for (i = 1; i < npages; ++i) + p->phys_addr[i] = pci_map_page(pdev, p->pages[i], 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); +#endif + *newgl = p; + return (0); +unpin: + vm_fault_unhold_pages(p->dgl_pages, npages); + +free_gl: + + free(p, M_DEVBUF); + *newgl = NULL; + return (err); +} + +static void +unmap_ddp_gl(const struct ddp_gather_list *gl) +{ +#ifdef NEED_BUSDMA + int i; + + if (!gl->nelem) + return; + + pci_unmap_page(pdev, gl->phys_addr[0] + gl->offset, + PAGE_SIZE - gl->offset, PCI_DMA_FROMDEVICE); + for (i = 1; i < gl->nelem; ++i) + pci_unmap_page(pdev, gl->phys_addr[i], PAGE_SIZE, + PCI_DMA_FROMDEVICE); + +#endif +} + +static void +ddp_gl_free_pages(struct ddp_gather_list *gl, int dirty) +{ + /* + * XXX mark pages as dirty before unholding + */ + vm_fault_unhold_pages(gl->dgl_pages, gl->dgl_nelem); +} + +void +t3_free_ddp_gl(struct ddp_gather_list *gl) +{ + unmap_ddp_gl(gl); + ddp_gl_free_pages(gl, 0); + free(gl, M_DEVBUF); +} + +/* Max # of page pods for a buffer, enough for 1MB buffer at 4KB page size */ +#define MAX_PPODS 64U + +/* + * Allocate page pods for DDP buffer 1 (the user buffer) and set up the tag in + * the TCB. We allocate page pods in multiples of PPOD_CLUSTER_SIZE. First we + * try to allocate enough page pods to accommodate the whole buffer, subject to + * the MAX_PPODS limit. If that fails we try to allocate PPOD_CLUSTER_SIZE page + * pods before failing entirely. + */ +static int +alloc_buf1_ppods(struct socket *so, struct ddp_state *p, + unsigned long addr, unsigned int len) +{ + int err, tag, npages, nppods; + struct tom_data *d = TOM_DATA(TOE_DEV(so)); + + SOCKBUF_LOCK_ASSERT(&so->so_rcv); + npages = ((addr & PAGE_MASK) + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + nppods = min(pages2ppods(npages), MAX_PPODS); + nppods = roundup2(nppods, PPOD_CLUSTER_SIZE); + err = t3_alloc_ppods(d, nppods, &tag); + if (err && nppods > PPOD_CLUSTER_SIZE) { + nppods = PPOD_CLUSTER_SIZE; + err = t3_alloc_ppods(d, nppods, &tag); + } + if (err) + return (ENOMEM); + + p->ubuf_nppods = nppods; + p->ubuf_tag = tag; +#if NUM_DDP_KBUF == 1 + t3_set_ddp_tag(so, 1, tag << 6); +#endif + return (0); +} + +/* + * Starting offset for the user DDP buffer. A non-0 value ensures a DDP flush + * won't block indefinitely if there's nothing to place (which should be rare). + */ +#define UBUF_OFFSET 1 + +static __inline unsigned long +select_ddp_flags(const struct socket *so, int buf_idx, + int nonblock, int rcv_flags) +{ + if (buf_idx == 1) { + if (__predict_false(rcv_flags & MSG_WAITALL)) + return V_TF_DDP_PSH_NO_INVALIDATE0(1) | + V_TF_DDP_PSH_NO_INVALIDATE1(1) | + V_TF_DDP_PUSH_DISABLE_1(1); + if (nonblock) + return V_TF_DDP_BUF1_FLUSH(1); + + return V_TF_DDP_BUF1_FLUSH(!TOM_TUNABLE(TOE_DEV(so), + ddp_push_wait)); + } + + if (__predict_false(rcv_flags & MSG_WAITALL)) + return V_TF_DDP_PSH_NO_INVALIDATE0(1) | + V_TF_DDP_PSH_NO_INVALIDATE1(1) | + V_TF_DDP_PUSH_DISABLE_0(1); + if (nonblock) + return V_TF_DDP_BUF0_FLUSH(1); + + return V_TF_DDP_BUF0_FLUSH(!TOM_TUNABLE(TOE_DEV(so), ddp_push_wait)); +} + +/* + * Reposts the kernel DDP buffer after it has been previously become full and + * invalidated. We just need to reset the offset and adjust the DDP flags. + * Conveniently, we can set the flags and the offset with a single message. + * Note that this function does not set the buffer length. Again conveniently + * our kernel buffer is of fixed size. If the length needs to be changed it + * needs to be done separately. + */ +static void +t3_repost_kbuf(struct socket *so, unsigned int bufidx, int modulate, + int activate, int nonblock) +{ + struct toepcb *toep = sototcpcb(so)->t_toe; + struct ddp_state *p = &toep->tp_ddp_state; + unsigned long flags; + + SOCKBUF_LOCK_ASSERT(&so->so_rcv); + p->buf_state[bufidx].cur_offset = p->kbuf[bufidx]->dgl_offset; + p->buf_state[bufidx].flags = p->kbuf_noinval ? DDP_BF_NOINVAL : 0; + p->buf_state[bufidx].gl = p->kbuf[bufidx]; + p->cur_buf = bufidx; + p->kbuf_idx = bufidx; + + flags = select_ddp_flags(so, bufidx, nonblock, 0); + if (!bufidx) + t3_setup_ddpbufs(toep, 0, 0, 0, 0, flags | + V_TF_DDP_PSH_NO_INVALIDATE0(p->kbuf_noinval) | + V_TF_DDP_PSH_NO_INVALIDATE1(p->kbuf_noinval) | + V_TF_DDP_BUF0_VALID(1), + V_TF_DDP_BUF0_FLUSH(1) | + V_TF_DDP_PSH_NO_INVALIDATE0(1) | + V_TF_DDP_PSH_NO_INVALIDATE1(1) | V_TF_DDP_OFF(1) | + V_TF_DDP_BUF0_VALID(1) | + V_TF_DDP_ACTIVE_BUF(activate), modulate); + else + t3_setup_ddpbufs(toep, 0, 0, 0, 0, flags | + V_TF_DDP_PSH_NO_INVALIDATE0(p->kbuf_noinval) | + V_TF_DDP_PSH_NO_INVALIDATE1(p->kbuf_noinval) | + V_TF_DDP_BUF1_VALID(1) | + V_TF_DDP_ACTIVE_BUF(activate), + V_TF_DDP_BUF1_FLUSH(1) | + V_TF_DDP_PSH_NO_INVALIDATE0(1) | + V_TF_DDP_PSH_NO_INVALIDATE1(1) | V_TF_DDP_OFF(1) | + V_TF_DDP_BUF1_VALID(1) | V_TF_DDP_ACTIVE_BUF(1), + modulate); + +} + +/** + * setup_uio_ppods - setup HW page pods for a user iovec + * @sk: the associated socket + * @uio: the uio + * @oft: additional bytes to map before the start of the buffer + * + * Pins a user iovec and sets up HW page pods for DDP into it. We allocate + * page pods for user buffers on the first call per socket. Afterwards we + * limit the buffer length to whatever the existing page pods can accommodate. + * Returns a negative error code or the length of the mapped buffer. + * + * The current implementation handles iovecs with only one entry. + */ +static int +setup_uio_ppods(struct socket *so, const struct uio *uio, int oft, int *length) +{ + int err; + unsigned int len; + struct ddp_gather_list *gl = NULL; + struct toepcb *toep = sototcpcb(so)->t_toe; + struct ddp_state *p = &toep->tp_ddp_state; + struct iovec *iov = uio->uio_iov; + vm_offset_t addr = (vm_offset_t)iov->iov_base - oft; + + SOCKBUF_LOCK_ASSERT(&so->so_rcv); + if (__predict_false(p->ubuf_nppods == 0)) { + err = alloc_buf1_ppods(so, p, addr, iov->iov_len + oft); + if (err) + return (err); + } + + len = (p->ubuf_nppods - NUM_SENTINEL_PPODS) * PPOD_PAGES * PAGE_SIZE; + len -= addr & PAGE_MASK; + if (len > M_TCB_RX_DDP_BUF0_LEN) + len = M_TCB_RX_DDP_BUF0_LEN; + len = min(len, sototcpcb(so)->rcv_wnd - 32768); + len = min(len, iov->iov_len + oft); + + if (len <= p->kbuf[0]->dgl_length) { + printf("length too short\n"); + return (EINVAL); + } + + err = t3_pin_pages(toep->tp_rx_dmat, toep->tp_dmamap, addr, len, &gl, p->ubuf); + if (err) + return (err); + if (gl) { + if (p->ubuf) + t3_free_ddp_gl(p->ubuf); + p->ubuf = gl; + t3_setup_ppods(so, gl, pages2ppods(gl->dgl_nelem), p->ubuf_tag, len, + gl->dgl_offset, 0); + } + *length = len; + return (0); +} + +/* + * + */ +void +t3_cancel_ubuf(struct toepcb *toep) +{ + struct ddp_state *p = &toep->tp_ddp_state; + int ubuf_pending = t3_ddp_ubuf_pending(toep); + struct socket *so = toeptoso(toep); + int err = 0, count=0; + + if (p->ubuf == NULL) + return; + + SOCKBUF_LOCK_ASSERT(&so->so_rcv); + p->cancel_ubuf = 1; + while (ubuf_pending && !(so->so_rcv.sb_state & SBS_CANTRCVMORE)) { +#ifdef T3_TRACE + T3_TRACE3(TB(p), + "t3_cancel_ubuf: flags0 0x%x flags1 0x%x get_tcb_count %d", + p->buf_state[0].flags & (DDP_BF_NOFLIP | DDP_BF_NOCOPY), + p->buf_state[1].flags & (DDP_BF_NOFLIP | DDP_BF_NOCOPY), + p->get_tcb_count); +#endif + CTR3(KTR_TOM, + "t3_cancel_ubuf: flags0 0x%x flags1 0x%x get_tcb_count %d", + p->buf_state[0].flags & (DDP_BF_NOFLIP | DDP_BF_NOCOPY), + p->buf_state[1].flags & (DDP_BF_NOFLIP | DDP_BF_NOCOPY), + p->get_tcb_count); + if (p->get_tcb_count == 0) + t3_cancel_ddpbuf(toep, p->cur_buf); + else + CTR5(KTR_TOM, "waiting err=%d get_tcb_count=%d timeo=%d so=%p SBS_CANTRCVMORE=%d", + err, p->get_tcb_count, so->so_rcv.sb_timeo, so, + !!(so->so_rcv.sb_state & SBS_CANTRCVMORE)); + + while (p->get_tcb_count && !(so->so_rcv.sb_state & SBS_CANTRCVMORE)) { + if (count & 0xfffffff) + CTR5(KTR_TOM, "waiting err=%d get_tcb_count=%d timeo=%d so=%p count=%d", + err, p->get_tcb_count, so->so_rcv.sb_timeo, so, count); + count++; + err = sbwait(&so->so_rcv); + } + ubuf_pending = t3_ddp_ubuf_pending(toep); + } + p->cancel_ubuf = 0; +} + +#define OVERLAY_MASK (V_TF_DDP_PSH_NO_INVALIDATE0(1) | \ + V_TF_DDP_PSH_NO_INVALIDATE1(1) | \ + V_TF_DDP_BUF1_FLUSH(1) | \ + V_TF_DDP_BUF0_FLUSH(1) | \ + V_TF_DDP_PUSH_DISABLE_1(1) | \ + V_TF_DDP_PUSH_DISABLE_0(1) | \ + V_TF_DDP_INDICATE_OUT(1)) + +/* + * Post a user buffer as an overlay on top of the current kernel buffer. + */ +int +t3_overlay_ubuf(struct socket *so, const struct uio *uio, + int nonblock, int rcv_flags, int modulate, int post_kbuf) +{ + int err, len, ubuf_idx; + unsigned long flags; + struct toepcb *toep = sototcpcb(so)->t_toe; + struct ddp_state *p = &toep->tp_ddp_state; + + if (p->kbuf[0] == NULL) { + return (EINVAL); + } + + SOCKBUF_LOCK_ASSERT(&so->so_rcv); + err = setup_uio_ppods(so, uio, 0, &len); + if (err) { + return (err); + } + + ubuf_idx = p->kbuf_idx; + p->buf_state[ubuf_idx].flags = DDP_BF_NOFLIP; + /* Use existing offset */ + /* Don't need to update .gl, user buffer isn't copied. */ + p->cur_buf = ubuf_idx; + + flags = select_ddp_flags(so, ubuf_idx, nonblock, rcv_flags); + + if (post_kbuf) { + struct ddp_buf_state *dbs = &p->buf_state[ubuf_idx ^ 1]; + + dbs->cur_offset = 0; + dbs->flags = 0; + dbs->gl = p->kbuf[ubuf_idx ^ 1]; + p->kbuf_idx ^= 1; + flags |= p->kbuf_idx ? + V_TF_DDP_BUF1_VALID(1) | V_TF_DDP_PUSH_DISABLE_1(0) : + V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_PUSH_DISABLE_0(0); + } + + if (ubuf_idx == 0) { + t3_overlay_ddpbuf(toep, 0, p->ubuf_tag << 6, p->kbuf_tag[1] << 6, + len); + t3_setup_ddpbufs(toep, 0, 0, p->kbuf[1]->dgl_length, 0, + flags, + OVERLAY_MASK | flags, 1); + } else { + t3_overlay_ddpbuf(toep, 1, p->kbuf_tag[0] << 6, p->ubuf_tag << 6, + len); + t3_setup_ddpbufs(toep, p->kbuf[0]->dgl_length, 0, 0, 0, + flags, + OVERLAY_MASK | flags, 1); + } +#ifdef T3_TRACE + T3_TRACE5(TIDTB(so), + "t3_overlay_ubuf: tag %u flags 0x%x mask 0x%x ubuf_idx %d " + " kbuf_idx %d", + p->ubuf_tag, flags, OVERLAY_MASK, ubuf_idx, p->kbuf_idx); +#endif + CTR3(KTR_TOM, + "t3_overlay_ubuf: tag %u flags 0x%x mask 0x%x", + p->ubuf_tag, flags, OVERLAY_MASK); + CTR3(KTR_TOM, + "t3_overlay_ubuf: ubuf_idx %d kbuf_idx %d post_kbuf %d", + ubuf_idx, p->kbuf_idx, post_kbuf); + + return (0); +} + +/* + * Clean up DDP state that needs to survive until socket close time, such as the + * DDP buffers. The buffers are already unmapped at this point as unmapping + * needs the PCI device and a socket may close long after the device is removed. + */ +void +t3_cleanup_ddp(struct toepcb *toep) +{ + struct ddp_state *p = &toep->tp_ddp_state; + int idx; + + for (idx = 0; idx < NUM_DDP_KBUF; idx++) + if (p->kbuf[idx]) { + ddp_gl_free_pages(p->kbuf[idx], 0); + free(p->kbuf[idx], M_DEVBUF); + } + if (p->ubuf) { + ddp_gl_free_pages(p->ubuf, 0); + free(p->ubuf, M_DEVBUF); + p->ubuf = NULL; + } + toep->tp_ulp_mode = 0; +} + +/* + * This is a companion to t3_cleanup_ddp() and releases the HW resources + * associated with a connection's DDP state, such as the page pods. + * It's called when HW is done with a connection. The rest of the state + * remains available until both HW and the app are done with the connection. + */ +void +t3_release_ddp_resources(struct toepcb *toep) +{ + struct ddp_state *p = &toep->tp_ddp_state; + struct tom_data *d = TOM_DATA(toep->tp_toedev); + int idx; + + for (idx = 0; idx < NUM_DDP_KBUF; idx++) { + t3_free_ppods(d, p->kbuf_tag[idx], + p->kbuf_nppods[idx]); + unmap_ddp_gl(p->kbuf[idx]); + } + + if (p->ubuf_nppods) { + t3_free_ppods(d, p->ubuf_tag, p->ubuf_nppods); + p->ubuf_nppods = 0; + } + if (p->ubuf) + unmap_ddp_gl(p->ubuf); + +} + +void +t3_post_kbuf(struct socket *so, int modulate, int nonblock) +{ + struct toepcb *toep = sototcpcb(so)->t_toe; + struct ddp_state *p = &toep->tp_ddp_state; + + t3_set_ddp_tag(so, p->cur_buf, p->kbuf_tag[p->cur_buf] << 6); + t3_set_ddp_buf(so, p->cur_buf, 0, p->kbuf[p->cur_buf]->dgl_length); + t3_repost_kbuf(so, p->cur_buf, modulate, 1, nonblock); +#ifdef T3_TRACE + T3_TRACE1(TIDTB(so), + "t3_post_kbuf: cur_buf = kbuf_idx = %u ", p->cur_buf); +#endif + CTR1(KTR_TOM, + "t3_post_kbuf: cur_buf = kbuf_idx = %u ", p->cur_buf); +} + +/* + * Prepare a socket for DDP. Must be called when the socket is known to be + * open. + */ +int +t3_enter_ddp(struct socket *so, unsigned int kbuf_size, unsigned int waitall, int nonblock) +{ + int i, err = ENOMEM; + static vm_pindex_t color; + unsigned int nppods, kbuf_pages, idx = 0; + struct toepcb *toep = sototcpcb(so)->t_toe; + struct ddp_state *p = &toep->tp_ddp_state; + struct tom_data *d = TOM_DATA(toep->tp_toedev); + + + if (kbuf_size > M_TCB_RX_DDP_BUF0_LEN) + return (EINVAL); + + SOCKBUF_LOCK_ASSERT(&so->so_rcv); + + kbuf_pages = (kbuf_size + PAGE_SIZE - 1) >> PAGE_SHIFT; + nppods = pages2ppods(kbuf_pages); + + p->kbuf_noinval = !!waitall; + p->kbuf_tag[NUM_DDP_KBUF - 1] = -1; + for (idx = 0; idx < NUM_DDP_KBUF; idx++) { + p->kbuf[idx] = + malloc(sizeof (struct ddp_gather_list) + kbuf_pages * + sizeof(vm_page_t *), M_DEVBUF, M_NOWAIT|M_ZERO); + if (p->kbuf[idx] == NULL) + goto err; + err = t3_alloc_ppods(d, nppods, &p->kbuf_tag[idx]); + if (err) { + printf("t3_alloc_ppods failed err=%d\n", err); + goto err; + } + + p->kbuf_nppods[idx] = nppods; + p->kbuf[idx]->dgl_length = kbuf_size; + p->kbuf[idx]->dgl_offset = 0; + p->kbuf[idx]->dgl_nelem = kbuf_pages; + + for (i = 0; i < kbuf_pages; ++i) { + p->kbuf[idx]->dgl_pages[i] = vm_page_alloc(NULL, color, + VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | VM_ALLOC_WIRED | + VM_ALLOC_ZERO); + if (p->kbuf[idx]->dgl_pages[i] == NULL) { + p->kbuf[idx]->dgl_nelem = i; + printf("failed to allocate kbuf pages\n"); + goto err; + } + } +#ifdef NEED_BUSDMA + /* + * XXX we'll need this for VT-d or any platform with an iommu :-/ + * + */ + for (i = 0; i < kbuf_pages; ++i) + p->kbuf[idx]->phys_addr[i] = + pci_map_page(p->pdev, p->kbuf[idx]->pages[i], + 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); +#endif + t3_setup_ppods(so, p->kbuf[idx], nppods, p->kbuf_tag[idx], + p->kbuf[idx]->dgl_length, 0, 0); + } + cxgb_log_tcb(TOEP_T3C_DEV(toep)->adapter, toep->tp_tid); + + t3_set_ddp_tag(so, 0, p->kbuf_tag[0] << 6); + t3_set_ddp_buf(so, 0, 0, p->kbuf[0]->dgl_length); + t3_repost_kbuf(so, 0, 0, 1, nonblock); + + t3_set_rcv_coalesce_enable(so, + TOM_TUNABLE(TOE_DEV(so), ddp_rcvcoalesce)); + +#ifdef T3_TRACE + T3_TRACE4(TIDTB(so), + "t3_enter_ddp: kbuf_size %u waitall %u tag0 %d tag1 %d", + kbuf_size, waitall, p->kbuf_tag[0], p->kbuf_tag[1]); +#endif + CTR4(KTR_TOM, + "t3_enter_ddp: kbuf_size %u waitall %u tag0 %d tag1 %d", + kbuf_size, waitall, p->kbuf_tag[0], p->kbuf_tag[1]); + DELAY(100000); + cxgb_log_tcb(TOEP_T3C_DEV(toep)->adapter, toep->tp_tid); + return (0); + +err: + t3_release_ddp_resources(toep); + t3_cleanup_ddp(toep); + return (err); +} + +int +t3_ddp_copy(const struct mbuf *m, int offset, struct uio *uio, int len) +{ + int page_off, resid_init, err; + struct ddp_gather_list *gl = (struct ddp_gather_list *)m->m_ddp_gl; + + resid_init = uio->uio_resid; + + if (!gl->dgl_pages) + panic("pages not set\n"); + + offset += gl->dgl_offset + m->m_cur_offset; + page_off = offset & PAGE_MASK; + KASSERT(len <= gl->dgl_length, + ("len=%d > dgl_length=%d in ddp_copy\n", len, gl->dgl_length)); + + err = uiomove_fromphys(gl->dgl_pages, page_off, len, uio); + return (err); +} + + +/* + * Allocate n page pods. Returns -1 on failure or the page pod tag. + */ +int +t3_alloc_ppods(struct tom_data *td, unsigned int n, int *ptag) +{ + unsigned int i, j; + + if (__predict_false(!td->ppod_map)) { + printf("ppod_map not set\n"); + return (EINVAL); + } + + mtx_lock(&td->ppod_map_lock); + for (i = 0; i < td->nppods; ) { + + for (j = 0; j < n; ++j) /* scan ppod_map[i..i+n-1] */ + if (td->ppod_map[i + j]) { + i = i + j + 1; + goto next; + } + memset(&td->ppod_map[i], 1, n); /* allocate range */ + mtx_unlock(&td->ppod_map_lock); + CTR2(KTR_TOM, + "t3_alloc_ppods: n=%u tag=%u", n, i); + *ptag = i; + return (0); + next: ; + } + mtx_unlock(&td->ppod_map_lock); + return (0); +} + +void +t3_free_ppods(struct tom_data *td, unsigned int tag, unsigned int n) +{ + /* No need to take ppod_lock here */ + memset(&td->ppod_map[tag], 0, n); +} diff --git a/sys/dev/cxgb/ulp/tom/cxgb_defs.h b/sys/dev/cxgb/ulp/tom/cxgb_defs.h index 9077295..8989fd9 100644 --- a/sys/dev/cxgb/ulp/tom/cxgb_defs.h +++ b/sys/dev/cxgb/ulp/tom/cxgb_defs.h @@ -40,6 +40,13 @@ $FreeBSD$ #define toeptoso(toep) ((toep)->tp_tp->t_inpcb->inp_socket) #define sototoep(so) (sototcpcb((so))->t_toe) +#define TRACE_ENTER printf("%s:%s entered\n", __FUNCTION__, __FILE__) +#define TRACE_EXIT printf("%s:%s:%d exited\n", __FUNCTION__, __FILE__, __LINE__) + +#define KTR_TOM KTR_SPARE2 +#define KTR_TCB KTR_SPARE3 + +struct toepcb; struct listen_ctx; typedef void (*defer_handler_t)(struct toedev *dev, struct mbuf *m); @@ -54,7 +61,8 @@ void t3_init_listen_cpl_handlers(void); int t3_init_cpl_io(void); void t3_init_wr_tab(unsigned int wr_len); uint32_t t3_send_rx_credits(struct tcpcb *tp, uint32_t credits, uint32_t dack, int nofail); -void t3_cleanup_rbuf(struct tcpcb *tp); +void t3_send_rx_modulate(struct toepcb *toep); +void t3_cleanup_rbuf(struct tcpcb *tp, int copied); void t3_init_socket_ops(void); void t3_install_socket_ops(struct socket *so); diff --git a/sys/dev/cxgb/ulp/tom/cxgb_listen.c b/sys/dev/cxgb/ulp/tom/cxgb_listen.c index a88b26e..acbad6f 100644 --- a/sys/dev/cxgb/ulp/tom/cxgb_listen.c +++ b/sys/dev/cxgb/ulp/tom/cxgb_listen.c @@ -180,7 +180,6 @@ listen_hash_add(struct tom_data *d, struct socket *so, unsigned int stid) return p; } -#if 0 /* * Given a pointer to a listening socket return its server TID by consulting * the socket->stid map. Returns -1 if the socket is not in the map. @@ -191,16 +190,15 @@ listen_hash_find(struct tom_data *d, struct socket *so) int stid = -1, bucket = listen_hashfn(so); struct listen_info *p; - spin_lock(&d->listen_lock); + mtx_lock(&d->listen_lock); for (p = d->listen_hash_tab[bucket]; p; p = p->next) - if (p->sk == sk) { + if (p->so == so) { stid = p->stid; break; } - spin_unlock(&d->listen_lock); + mtx_unlock(&d->listen_lock); return stid; } -#endif /* * Delete the listen_info structure for a listening socket. Returns the server @@ -244,28 +242,24 @@ t3_listen_start(struct toedev *dev, struct socket *so, struct t3cdev *cdev) if (!TOM_TUNABLE(dev, activated)) return; - printf("start listen\n"); + if (listen_hash_find(d, so) != -1) + return; - ctx = malloc(sizeof(*ctx), M_CXGB, M_NOWAIT); + CTR1(KTR_TOM, "start listen on port %u", ntohs(inp->inp_lport)); + ctx = malloc(sizeof(*ctx), M_CXGB, M_NOWAIT|M_ZERO); if (!ctx) return; ctx->tom_data = d; ctx->lso = so; - ctx->ulp_mode = 0; /* DDP if the default */ + ctx->ulp_mode = TOM_TUNABLE(dev, ddp) && !(so->so_options & SO_NO_DDP) ? ULP_MODE_TCPDDP : 0; LIST_INIT(&ctx->synq_head); stid = cxgb_alloc_stid(d->cdev, d->client, ctx); if (stid < 0) goto free_ctx; -#ifdef notyet - /* - * XXX need to mark inpcb as referenced - */ - sock_hold(sk); -#endif m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) goto free_stid; diff --git a/sys/dev/cxgb/ulp/tom/cxgb_t3_ddp.h b/sys/dev/cxgb/ulp/tom/cxgb_t3_ddp.h index 9fa42b5..e37c9b1 100644 --- a/sys/dev/cxgb/ulp/tom/cxgb_t3_ddp.h +++ b/sys/dev/cxgb/ulp/tom/cxgb_t3_ddp.h @@ -1,4 +1,3 @@ - /************************************************************************** Copyright (c) 2007, Chelsio Inc. @@ -86,7 +85,6 @@ struct pagepod { #define M_PPOD_PGSZ 0x3 #define V_PPOD_PGSZ(x) ((x) << S_PPOD_PGSZ) -struct pci_dev; #include #include #include @@ -96,8 +94,7 @@ struct ddp_gather_list { unsigned int dgl_length; unsigned int dgl_offset; unsigned int dgl_nelem; - vm_page_t *dgl_pages; - bus_addr_t dgl_phys_addr[0]; + vm_page_t dgl_pages[0]; }; struct ddp_buf_state { @@ -107,7 +104,6 @@ struct ddp_buf_state { }; struct ddp_state { - struct pci_dev *pdev; struct ddp_buf_state buf_state[2]; /* per buffer state */ int cur_buf; unsigned short kbuf_noinval; @@ -119,6 +115,7 @@ struct ddp_state { int get_tcb_count; unsigned int kbuf_posted; int cancel_ubuf; + int user_ddp_pending; unsigned int kbuf_nppods[NUM_DDP_KBUF]; unsigned int kbuf_tag[NUM_DDP_KBUF]; struct ddp_gather_list *kbuf[NUM_DDP_KBUF]; /* kernel buffer for DDP prefetch */ @@ -132,54 +129,51 @@ enum { DDP_BF_PSH = 1 << 3, /* set in skb->flags if the a DDP was completed with a segment having the PSH flag set */ + DDP_BF_NODATA = 1 << 4, /* buffer completed before filling */ }; -#ifdef notyet +#include + /* * Returns 1 if a UBUF DMA buffer might be active. */ -static inline int t3_ddp_ubuf_pending(struct sock *so) +static inline int +t3_ddp_ubuf_pending(struct toepcb *toep) { - struct tcp_sock *tp = tcp_sk(sk); - struct ddp_state *p = DDP_STATE(tp); + struct ddp_state *p = &toep->tp_ddp_state; /* When the TOM_TUNABLE(ddp) is enabled, we're always in ULP_MODE DDP, * but DDP_STATE() is only valid if the connection actually enabled * DDP. */ - if (!p) - return 0; + if (p->kbuf[0] == NULL) + return (0); return (p->buf_state[0].flags & (DDP_BF_NOFLIP | DDP_BF_NOCOPY)) || (p->buf_state[1].flags & (DDP_BF_NOFLIP | DDP_BF_NOCOPY)); } -#endif int t3_setup_ppods(struct socket *so, const struct ddp_gather_list *gl, unsigned int nppods, unsigned int tag, unsigned int maxoff, unsigned int pg_off, unsigned int color); -int t3_alloc_ppods(struct tom_data *td, unsigned int n); +int t3_alloc_ppods(struct tom_data *td, unsigned int n, int *tag); void t3_free_ppods(struct tom_data *td, unsigned int tag, unsigned int n); -void t3_free_ddp_gl(struct pci_dev *pdev, struct ddp_gather_list *gl); -int t3_pin_pages(struct pci_dev *pdev, unsigned long uaddr, size_t len, - struct ddp_gather_list **newgl, - const struct ddp_gather_list *gl); -int t3_ddp_copy(const struct mbuf *skb, int offset, struct iovec *to, - int len); +void t3_free_ddp_gl(struct ddp_gather_list *gl); +int t3_ddp_copy(const struct mbuf *m, int offset, struct uio *uio, int len); //void t3_repost_kbuf(struct socket *so, int modulate, int activate); -void t3_post_kbuf(struct socket *so, int modulate); -int t3_post_ubuf(struct socket *so, const struct iovec *iov, int nonblock, +void t3_post_kbuf(struct socket *so, int modulate, int nonblock); +int t3_post_ubuf(struct socket *so, const struct uio *uio, int nonblock, int rcv_flags, int modulate, int post_kbuf); -void t3_cancel_ubuf(struct socket *so); -int t3_overlay_ubuf(struct socket *so, const struct iovec *iov, int nonblock, - int rcv_flags, int modulate, int post_kbuf); -int t3_enter_ddp(struct socket *so, unsigned int kbuf_size, unsigned int waitall); -void t3_cleanup_ddp(struct socket *so); +void t3_cancel_ubuf(struct toepcb *toep); +int t3_overlay_ubuf(struct socket *so, const struct uio *uio, int nonblock, + int rcv_flags, int modulate, int post_kbuf); +int t3_enter_ddp(struct socket *so, unsigned int kbuf_size, unsigned int waitall, int nonblock); +void t3_cleanup_ddp(struct toepcb *toep); void t3_release_ddp_resources(struct toepcb *toep); -void t3_cancel_ddpbuf(struct socket *so, unsigned int bufidx); -void t3_overlay_ddpbuf(struct socket *so, unsigned int bufidx, unsigned int tag0, +void t3_cancel_ddpbuf(struct toepcb *, unsigned int bufidx); +void t3_overlay_ddpbuf(struct toepcb *, unsigned int bufidx, unsigned int tag0, unsigned int tag1, unsigned int len); -void t3_setup_ddpbufs(struct socket *so, unsigned int len0, unsigned int offset0, +void t3_setup_ddpbufs(struct toepcb *, unsigned int len0, unsigned int offset0, unsigned int len1, unsigned int offset1, uint64_t ddp_flags, uint64_t flag_mask, int modulate); #endif /* T3_DDP_H */ diff --git a/sys/dev/cxgb/ulp/tom/cxgb_tcp_subr.c b/sys/dev/cxgb/ulp/tom/cxgb_tcp_subr.c deleted file mode 100644 index 2eca099..0000000 --- a/sys/dev/cxgb/ulp/tom/cxgb_tcp_subr.c +++ /dev/null @@ -1,694 +0,0 @@ -/*- - * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 - */ - -#include -__FBSDID("$FreeBSD$"); - -#include "opt_compat.h" -#include "opt_inet.h" -#include "opt_inet6.h" -#include "opt_ipsec.h" -#include "opt_mac.h" -#include "opt_tcpdebug.h" - -#include -#include -#include -#include -#include -#include -#include -#ifdef INET6 -#include -#endif -#include -#include -#include -#include -#include -#include - -#include - -#include -#include - -#include -#include -#include -#ifdef INET6 -#include -#endif -#include -#ifdef INET6 -#include -#endif -#include -#include -#ifdef INET6 -#include -#include -#include -#endif -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef INET6 -#include -#endif -#include -#ifdef TCPDEBUG -#include -#endif -#include - -#ifdef IPSEC -#include -#include -#ifdef INET6 -#include -#endif -#include -#endif /*IPSEC*/ - -#include -#include - -#include - -#include - - -SYSCTL_NODE(_net_inet_tcp, 0, cxgb, CTLFLAG_RW, 0, "chelsio TOE"); - -static int tcp_log_debug = 0; -SYSCTL_INT(_net_inet_tcp_cxgb, OID_AUTO, log_debug, CTLFLAG_RW, - &tcp_log_debug, 0, "Log errors caused by incoming TCP segments"); - -static int tcp_tcbhashsize = 0; -SYSCTL_INT(_net_inet_tcp_cxgb, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN, - &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable"); - -static int do_tcpdrain = 1; -SYSCTL_INT(_net_inet_tcp_cxgb, OID_AUTO, do_tcpdrain, CTLFLAG_RW, - &do_tcpdrain, 0, - "Enable tcp_drain routine for extra help when low on mbufs"); - -SYSCTL_INT(_net_inet_tcp_cxgb, OID_AUTO, pcbcount, CTLFLAG_RD, - &tcbinfo.ipi_count, 0, "Number of active PCBs"); - -static int icmp_may_rst = 1; -SYSCTL_INT(_net_inet_tcp_cxgb, OID_AUTO, icmp_may_rst, CTLFLAG_RW, - &icmp_may_rst, 0, - "Certain ICMP unreachable messages may abort connections in SYN_SENT"); - -static int tcp_isn_reseed_interval = 0; -SYSCTL_INT(_net_inet_tcp_cxgb, OID_AUTO, isn_reseed_interval, CTLFLAG_RW, - &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret"); - -/* - * TCP bandwidth limiting sysctls. Note that the default lower bound of - * 1024 exists only for debugging. A good production default would be - * something like 6100. - */ -SYSCTL_NODE(_net_inet_tcp, OID_AUTO, inflight, CTLFLAG_RW, 0, - "TCP inflight data limiting"); - -static int tcp_inflight_enable = 1; -SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, enable, CTLFLAG_RW, - &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting"); - -static int tcp_inflight_debug = 0; -SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, debug, CTLFLAG_RW, - &tcp_inflight_debug, 0, "Debug TCP inflight calculations"); - -static int tcp_inflight_rttthresh; -SYSCTL_PROC(_net_inet_tcp_inflight, OID_AUTO, rttthresh, CTLTYPE_INT|CTLFLAG_RW, - &tcp_inflight_rttthresh, 0, sysctl_msec_to_ticks, "I", - "RTT threshold below which inflight will deactivate itself"); - -static int tcp_inflight_min = 6144; -SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, min, CTLFLAG_RW, - &tcp_inflight_min, 0, "Lower-bound for TCP inflight window"); - -static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT; -SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, max, CTLFLAG_RW, - &tcp_inflight_max, 0, "Upper-bound for TCP inflight window"); - -static int tcp_inflight_stab = 20; -SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, stab, CTLFLAG_RW, - &tcp_inflight_stab, 0, "Inflight Algorithm Stabilization 20 = 2 packets"); - -uma_zone_t sack_hole_zone; - -static struct inpcb *tcp_notify(struct inpcb *, int); -static struct inpcb *cxgb_tcp_drop_syn_sent(struct inpcb *inp, int errno); - -/* - * Target size of TCP PCB hash tables. Must be a power of two. - * - * Note that this can be overridden by the kernel environment - * variable net.inet.tcp.tcbhashsize - */ -#ifndef TCBHASHSIZE -#define TCBHASHSIZE 512 -#endif - -/* - * XXX - * Callouts should be moved into struct tcp directly. They are currently - * separate because the tcpcb structure is exported to userland for sysctl - * parsing purposes, which do not know about callouts. - */ -struct tcpcb_mem { - struct tcpcb tcb; - struct tcp_timer tt; -}; - -MALLOC_DEFINE(M_TCPLOG, "tcplog", "TCP address and flags print buffers"); - -/* - * Drop a TCP connection, reporting - * the specified error. If connection is synchronized, - * then send a RST to peer. - */ -struct tcpcb * -cxgb_tcp_drop(struct tcpcb *tp, int errno) -{ - struct socket *so = tp->t_inpcb->inp_socket; - - INP_INFO_WLOCK_ASSERT(&tcbinfo); - INP_LOCK_ASSERT(tp->t_inpcb); - - if (TCPS_HAVERCVDSYN(tp->t_state)) { - tp->t_state = TCPS_CLOSED; - (void) tcp_gen_reset(tp); - tcpstat.tcps_drops++; - } else - tcpstat.tcps_conndrops++; - if (errno == ETIMEDOUT && tp->t_softerror) - errno = tp->t_softerror; - so->so_error = errno; - return (cxgb_tcp_close(tp)); -} - -/* - * Attempt to close a TCP control block, marking it as dropped, and freeing - * the socket if we hold the only reference. - */ -struct tcpcb * -cxgb_tcp_close(struct tcpcb *tp) -{ - struct inpcb *inp = tp->t_inpcb; - struct socket *so; - - INP_INFO_WLOCK_ASSERT(&tcbinfo); - INP_LOCK_ASSERT(inp); - - if (tp->t_state == TCPS_LISTEN) - tcp_gen_listen_close(tp); - in_pcbdrop(inp); - tcpstat.tcps_closed++; - KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL")); - so = inp->inp_socket; - soisdisconnected(so); - if (inp->inp_vflag & INP_SOCKREF) { - KASSERT(so->so_state & SS_PROTOREF, - ("tcp_close: !SS_PROTOREF")); - inp->inp_vflag &= ~INP_SOCKREF; - INP_UNLOCK(inp); - ACCEPT_LOCK(); - SOCK_LOCK(so); - so->so_state &= ~SS_PROTOREF; - sofree(so); - return (NULL); - } - return (tp); -} - -/* - * Notify a tcp user of an asynchronous error; - * store error as soft error, but wake up user - * (for now, won't do anything until can select for soft error). - * - * Do not wake up user since there currently is no mechanism for - * reporting soft errors (yet - a kqueue filter may be added). - */ -static struct inpcb * -tcp_notify(struct inpcb *inp, int error) -{ - struct tcpcb *tp; - - INP_INFO_WLOCK_ASSERT(&tcbinfo); - INP_LOCK_ASSERT(inp); - - if ((inp->inp_vflag & INP_TIMEWAIT) || - (inp->inp_vflag & INP_DROPPED)) - return (inp); - - tp = intotcpcb(inp); - KASSERT(tp != NULL, ("tcp_notify: tp == NULL")); - - /* - * Ignore some errors if we are hooked up. - * If connection hasn't completed, has retransmitted several times, - * and receives a second error, give up now. This is better - * than waiting a long time to establish a connection that - * can never complete. - */ - if (tp->t_state == TCPS_ESTABLISHED && - (error == EHOSTUNREACH || error == ENETUNREACH || - error == EHOSTDOWN)) { - return (inp); - } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && - tp->t_softerror) { - tp = cxgb_tcp_drop(tp, error); - if (tp != NULL) - return (inp); - else - return (NULL); - } else { - tp->t_softerror = error; - return (inp); - } -#if 0 - wakeup( &so->so_timeo); - sorwakeup(so); - sowwakeup(so); -#endif -} - -void -cxgb_tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip) -{ - struct ip *ip = vip; - struct tcphdr *th; - struct in_addr faddr; - struct inpcb *inp; - struct tcpcb *tp; - struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify; - struct icmp *icp; - struct in_conninfo inc; - tcp_seq icmp_tcp_seq; - int mtu; - - faddr = ((struct sockaddr_in *)sa)->sin_addr; - if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) - return; - - if (cmd == PRC_MSGSIZE) - notify = tcp_mtudisc; - else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB || - cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && ip) - notify = cxgb_tcp_drop_syn_sent; - /* - * Redirects don't need to be handled up here. - */ - else if (PRC_IS_REDIRECT(cmd)) - return; - /* - * Source quench is depreciated. - */ - else if (cmd == PRC_QUENCH) - return; - /* - * Hostdead is ugly because it goes linearly through all PCBs. - * XXX: We never get this from ICMP, otherwise it makes an - * excellent DoS attack on machines with many connections. - */ - else if (cmd == PRC_HOSTDEAD) - ip = NULL; - else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) - return; - if (ip != NULL) { - icp = (struct icmp *)((caddr_t)ip - - offsetof(struct icmp, icmp_ip)); - th = (struct tcphdr *)((caddr_t)ip - + (ip->ip_hl << 2)); - INP_INFO_WLOCK(&tcbinfo); - inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport, - ip->ip_src, th->th_sport, 0, NULL); - if (inp != NULL) { - INP_LOCK(inp); - if (!(inp->inp_vflag & INP_TIMEWAIT) && - !(inp->inp_vflag & INP_DROPPED) && - !(inp->inp_socket == NULL)) { - icmp_tcp_seq = htonl(th->th_seq); - tp = intotcpcb(inp); - if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) && - SEQ_LT(icmp_tcp_seq, tp->snd_max)) { - if (cmd == PRC_MSGSIZE) { - /* - * MTU discovery: - * If we got a needfrag set the MTU - * in the route to the suggested new - * value (if given) and then notify. - */ - bzero(&inc, sizeof(inc)); - inc.inc_flags = 0; /* IPv4 */ - inc.inc_faddr = faddr; - - mtu = ntohs(icp->icmp_nextmtu); - /* - * If no alternative MTU was - * proposed, try the next smaller - * one. ip->ip_len has already - * been swapped in icmp_input(). - */ - if (!mtu) - mtu = ip_next_mtu(ip->ip_len, - 1); - if (mtu < max(296, (tcp_minmss) - + sizeof(struct tcpiphdr))) - mtu = 0; - if (!mtu) - mtu = tcp_mssdflt - + sizeof(struct tcpiphdr); - /* - * Only cache the the MTU if it - * is smaller than the interface - * or route MTU. tcp_mtudisc() - * will do right thing by itself. - */ - if (mtu <= tcp_maxmtu(&inc, NULL)) - tcp_hc_updatemtu(&inc, mtu); - } - - inp = (*notify)(inp, inetctlerrmap[cmd]); - } - } - if (inp != NULL) - INP_UNLOCK(inp); - } else { - inc.inc_fport = th->th_dport; - inc.inc_lport = th->th_sport; - inc.inc_faddr = faddr; - inc.inc_laddr = ip->ip_src; -#ifdef INET6 - inc.inc_isipv6 = 0; -#endif - syncache_unreach(&inc, th); - } - INP_INFO_WUNLOCK(&tcbinfo); - } else - in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify); -} - -#ifdef INET6 -void -tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d) -{ - struct tcphdr th; - struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify; - struct ip6_hdr *ip6; - struct mbuf *m; - struct ip6ctlparam *ip6cp = NULL; - const struct sockaddr_in6 *sa6_src = NULL; - int off; - struct tcp_portonly { - u_int16_t th_sport; - u_int16_t th_dport; - } *thp; - - if (sa->sa_family != AF_INET6 || - sa->sa_len != sizeof(struct sockaddr_in6)) - return; - - if (cmd == PRC_MSGSIZE) - notify = tcp_mtudisc; - else if (!PRC_IS_REDIRECT(cmd) && - ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) - return; - /* Source quench is depreciated. */ - else if (cmd == PRC_QUENCH) - return; - - /* if the parameter is from icmp6, decode it. */ - if (d != NULL) { - ip6cp = (struct ip6ctlparam *)d; - m = ip6cp->ip6c_m; - ip6 = ip6cp->ip6c_ip6; - off = ip6cp->ip6c_off; - sa6_src = ip6cp->ip6c_src; - } else { - m = NULL; - ip6 = NULL; - off = 0; /* fool gcc */ - sa6_src = &sa6_any; - } - - if (ip6 != NULL) { - struct in_conninfo inc; - /* - * XXX: We assume that when IPV6 is non NULL, - * M and OFF are valid. - */ - - /* check if we can safely examine src and dst ports */ - if (m->m_pkthdr.len < off + sizeof(*thp)) - return; - - bzero(&th, sizeof(th)); - m_copydata(m, off, sizeof(*thp), (caddr_t)&th); - - in6_pcbnotify(&tcbinfo, sa, th.th_dport, - (struct sockaddr *)ip6cp->ip6c_src, - th.th_sport, cmd, NULL, notify); - - inc.inc_fport = th.th_dport; - inc.inc_lport = th.th_sport; - inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr; - inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr; - inc.inc_isipv6 = 1; - INP_INFO_WLOCK(&tcbinfo); - syncache_unreach(&inc, &th); - INP_INFO_WUNLOCK(&tcbinfo); - } else - in6_pcbnotify(&tcbinfo, sa, 0, (const struct sockaddr *)sa6_src, - 0, cmd, NULL, notify); -} -#endif /* INET6 */ - - -/* - * Following is where TCP initial sequence number generation occurs. - * - * There are two places where we must use initial sequence numbers: - * 1. In SYN-ACK packets. - * 2. In SYN packets. - * - * All ISNs for SYN-ACK packets are generated by the syncache. See - * tcp_syncache.c for details. - * - * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling - * depends on this property. In addition, these ISNs should be - * unguessable so as to prevent connection hijacking. To satisfy - * the requirements of this situation, the algorithm outlined in - * RFC 1948 is used, with only small modifications. - * - * Implementation details: - * - * Time is based off the system timer, and is corrected so that it - * increases by one megabyte per second. This allows for proper - * recycling on high speed LANs while still leaving over an hour - * before rollover. - * - * As reading the *exact* system time is too expensive to be done - * whenever setting up a TCP connection, we increment the time - * offset in two ways. First, a small random positive increment - * is added to isn_offset for each connection that is set up. - * Second, the function tcp_isn_tick fires once per clock tick - * and increments isn_offset as necessary so that sequence numbers - * are incremented at approximately ISN_BYTES_PER_SECOND. The - * random positive increments serve only to ensure that the same - * exact sequence number is never sent out twice (as could otherwise - * happen when a port is recycled in less than the system tick - * interval.) - * - * net.inet.tcp.isn_reseed_interval controls the number of seconds - * between seeding of isn_secret. This is normally set to zero, - * as reseeding should not be necessary. - * - * Locking of the global variables isn_secret, isn_last_reseed, isn_offset, - * isn_offset_old, and isn_ctx is performed using the TCP pcbinfo lock. In - * general, this means holding an exclusive (write) lock. - */ - -#define ISN_BYTES_PER_SECOND 1048576 -#define ISN_STATIC_INCREMENT 4096 -#define ISN_RANDOM_INCREMENT (4096 - 1) - - -/* - * When a specific ICMP unreachable message is received and the - * connection state is SYN-SENT, drop the connection. This behavior - * is controlled by the icmp_may_rst sysctl. - */ -static struct inpcb * -cxgb_tcp_drop_syn_sent(struct inpcb *inp, int errno) -{ - struct tcpcb *tp; - - INP_INFO_WLOCK_ASSERT(&tcbinfo); - INP_LOCK_ASSERT(inp); - - if ((inp->inp_vflag & INP_TIMEWAIT) || - (inp->inp_vflag & INP_DROPPED)) - return (inp); - - tp = intotcpcb(inp); - if (tp->t_state != TCPS_SYN_SENT) - return (inp); - - tp = cxgb_tcp_drop(tp, errno); - if (tp != NULL) - return (inp); - else - return (NULL); -} - -static int -cxgb_sysctl_drop(SYSCTL_HANDLER_ARGS) -{ - /* addrs[0] is a foreign socket, addrs[1] is a local one. */ - struct sockaddr_storage addrs[2]; - struct inpcb *inp; - struct tcpcb *tp; - struct tcptw *tw; - struct sockaddr_in *fin, *lin; -#ifdef INET6 - struct sockaddr_in6 *fin6, *lin6; - struct in6_addr f6, l6; -#endif - int error; - - inp = NULL; - fin = lin = NULL; -#ifdef INET6 - fin6 = lin6 = NULL; -#endif - error = 0; - - if (req->oldptr != NULL || req->oldlen != 0) - return (EINVAL); - if (req->newptr == NULL) - return (EPERM); - if (req->newlen < sizeof(addrs)) - return (ENOMEM); - error = SYSCTL_IN(req, &addrs, sizeof(addrs)); - if (error) - return (error); - - switch (addrs[0].ss_family) { -#ifdef INET6 - case AF_INET6: - fin6 = (struct sockaddr_in6 *)&addrs[0]; - lin6 = (struct sockaddr_in6 *)&addrs[1]; - if (fin6->sin6_len != sizeof(struct sockaddr_in6) || - lin6->sin6_len != sizeof(struct sockaddr_in6)) - return (EINVAL); - if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) { - if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr)) - return (EINVAL); - in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]); - in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]); - fin = (struct sockaddr_in *)&addrs[0]; - lin = (struct sockaddr_in *)&addrs[1]; - break; - } - error = sa6_embedscope(fin6, ip6_use_defzone); - if (error) - return (error); - error = sa6_embedscope(lin6, ip6_use_defzone); - if (error) - return (error); - break; -#endif - case AF_INET: - fin = (struct sockaddr_in *)&addrs[0]; - lin = (struct sockaddr_in *)&addrs[1]; - if (fin->sin_len != sizeof(struct sockaddr_in) || - lin->sin_len != sizeof(struct sockaddr_in)) - return (EINVAL); - break; - default: - return (EINVAL); - } - INP_INFO_WLOCK(&tcbinfo); - switch (addrs[0].ss_family) { -#ifdef INET6 - case AF_INET6: - inp = in6_pcblookup_hash(&tcbinfo, &f6, fin6->sin6_port, - &l6, lin6->sin6_port, 0, NULL); - break; -#endif - case AF_INET: - inp = in_pcblookup_hash(&tcbinfo, fin->sin_addr, fin->sin_port, - lin->sin_addr, lin->sin_port, 0, NULL); - break; - } - if (inp != NULL) { - INP_LOCK(inp); - if (inp->inp_vflag & INP_TIMEWAIT) { - /* - * XXXRW: There currently exists a state where an - * inpcb is present, but its timewait state has been - * discarded. For now, don't allow dropping of this - * type of inpcb. - */ - tw = intotw(inp); - if (tw != NULL) - tcp_twclose(tw, 0); - else - INP_UNLOCK(inp); - } else if (!(inp->inp_vflag & INP_DROPPED) && - !(inp->inp_socket->so_options & SO_ACCEPTCONN)) { - tp = intotcpcb(inp); - tp = cxgb_tcp_drop(tp, ECONNABORTED); - if (tp != NULL) - INP_UNLOCK(inp); - } else - INP_UNLOCK(inp); - } else - error = ESRCH; - INP_INFO_WUNLOCK(&tcbinfo); - return (error); -} - -SYSCTL_PROC(_net_inet_tcp_cxgb, TCPCTL_DROP, drop, - CTLTYPE_STRUCT|CTLFLAG_WR|CTLFLAG_SKIP, NULL, - 0, cxgb_sysctl_drop, "", "Drop TCP connection"); - diff --git a/sys/dev/cxgb/ulp/tom/cxgb_tcp_usrreq.c b/sys/dev/cxgb/ulp/tom/cxgb_tcp_usrreq.c deleted file mode 100644 index bd940b2..0000000 --- a/sys/dev/cxgb/ulp/tom/cxgb_tcp_usrreq.c +++ /dev/null @@ -1,1362 +0,0 @@ -/*- - * Copyright (c) 1982, 1986, 1988, 1993 - * The Regents of the University of California. - * Copyright (c) 2006-2007 Robert N. M. Watson - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * From: @(#)tcp_usrreq.c 8.2 (Berkeley) 1/3/94 - */ - -#include -__FBSDID("$FreeBSD$"); - -#include "opt_ddb.h" -#include "opt_inet.h" -#include "opt_inet6.h" -#include "opt_tcpdebug.h" - -#include -#include -#include -#include -#include -#include -#ifdef INET6 -#include -#endif /* INET6 */ -#include -#include -#include -#include -#include - -#ifdef DDB -#include -#endif - -#include -#include - -#include -#include -#ifdef INET6 -#include -#endif -#include -#ifdef INET6 -#include -#endif -#include -#include -#ifdef INET6 -#include -#include -#endif -#include -#include -#include -#include -#include -#include -#ifdef TCPDEBUG -#include -#endif -#include -#include - - -/* - * TCP protocol interface to socket abstraction. - */ -static int tcp_attach(struct socket *); -static int tcp_connect(struct tcpcb *, struct sockaddr *, - struct thread *td); -#ifdef INET6 -static int tcp6_connect(struct tcpcb *, struct sockaddr *, - struct thread *td); -#endif /* INET6 */ -static void tcp_disconnect(struct tcpcb *); -static void tcp_usrclosed(struct tcpcb *); - -#ifdef TCPDEBUG -#define TCPDEBUG0 int ostate = 0 -#define TCPDEBUG1() ostate = tp ? tp->t_state : 0 -#define TCPDEBUG2(req) if (tp && (so->so_options & SO_DEBUG)) \ - tcp_trace(TA_USER, ostate, tp, 0, 0, req) -#else -#define TCPDEBUG0 -#define TCPDEBUG1() -#define TCPDEBUG2(req) -#endif - -/* - * TCP attaches to socket via pru_attach(), reserving space, - * and an internet control block. - */ -static int -tcp_usr_attach(struct socket *so, int proto, struct thread *td) -{ - struct inpcb *inp; - struct tcpcb *tp = NULL; - int error; - TCPDEBUG0; - - inp = sotoinpcb(so); - KASSERT(inp == NULL, ("tcp_usr_attach: inp != NULL")); - TCPDEBUG1(); - - error = tcp_attach(so); - if (error) - goto out; - - if ((so->so_options & SO_LINGER) && so->so_linger == 0) - so->so_linger = TCP_LINGERTIME; - - inp = sotoinpcb(so); - tp = intotcpcb(inp); -out: - TCPDEBUG2(PRU_ATTACH); - return error; -} - -/* - * tcp_detach is called when the socket layer loses its final reference - * to the socket, be it a file descriptor reference, a reference from TCP, - * etc. At this point, there is only one case in which we will keep around - * inpcb state: time wait. - * - * This function can probably be re-absorbed back into tcp_usr_detach() now - * that there is a single detach path. - */ -static void -tcp_detach(struct socket *so, struct inpcb *inp) -{ - struct tcpcb *tp; -#ifdef INET6 - int isipv6 = INP_CHECK_SOCKAF(so, AF_INET6) != 0; -#endif - - INP_INFO_WLOCK_ASSERT(&tcbinfo); - INP_LOCK_ASSERT(inp); - - KASSERT(so->so_pcb == inp, ("tcp_detach: so_pcb != inp")); - KASSERT(inp->inp_socket == so, ("tcp_detach: inp_socket != so")); - - tp = intotcpcb(inp); - - if (inp->inp_vflag & INP_TIMEWAIT) { - /* - * There are two cases to handle: one in which the time wait - * state is being discarded (INP_DROPPED), and one in which - * this connection will remain in timewait. In the former, - * it is time to discard all state (except tcptw, which has - * already been discarded by the timewait close code, which - * should be further up the call stack somewhere). In the - * latter case, we detach from the socket, but leave the pcb - * present until timewait ends. - * - * XXXRW: Would it be cleaner to free the tcptw here? - */ - if (inp->inp_vflag & INP_DROPPED) { - KASSERT(tp == NULL, ("tcp_detach: INP_TIMEWAIT && " - "INP_DROPPED && tp != NULL")); -#ifdef INET6 - if (isipv6) { - in6_pcbdetach(inp); - in6_pcbfree(inp); - } else { -#endif - in_pcbdetach(inp); - in_pcbfree(inp); -#ifdef INET6 - } -#endif - } else { -#ifdef INET6 - if (isipv6) - in6_pcbdetach(inp); - else -#endif - in_pcbdetach(inp); - INP_UNLOCK(inp); - } - } else { - /* - * If the connection is not in timewait, we consider two - * two conditions: one in which no further processing is - * necessary (dropped || embryonic), and one in which TCP is - * not yet done, but no longer requires the socket, so the - * pcb will persist for the time being. - * - * XXXRW: Does the second case still occur? - */ - if (inp->inp_vflag & INP_DROPPED || - tp->t_state < TCPS_SYN_SENT) { - tcp_discardcb(tp); -#ifdef INET6 - if (isipv6) { - in6_pcbdetach(inp); - in6_pcbfree(inp); - } else { -#endif - in_pcbdetach(inp); - in_pcbfree(inp); -#ifdef INET6 - } -#endif - } else { -#ifdef INET6 - if (isipv6) - in6_pcbdetach(inp); - else -#endif - in_pcbdetach(inp); - } - } -} - -/* - * pru_detach() detaches the TCP protocol from the socket. - * If the protocol state is non-embryonic, then can't - * do this directly: have to initiate a pru_disconnect(), - * which may finish later; embryonic TCB's can just - * be discarded here. - */ -static void -tcp_usr_detach(struct socket *so) -{ - struct inpcb *inp; - - inp = sotoinpcb(so); - KASSERT(inp != NULL, ("tcp_usr_detach: inp == NULL")); - INP_INFO_WLOCK(&tcbinfo); - INP_LOCK(inp); - KASSERT(inp->inp_socket != NULL, - ("tcp_usr_detach: inp_socket == NULL")); - tcp_detach(so, inp); - INP_INFO_WUNLOCK(&tcbinfo); -} - -/* - * Give the socket an address. - */ -static int -tcp_usr_bind(struct socket *so, struct sockaddr *nam, struct thread *td) -{ - int error = 0; - struct inpcb *inp; - struct tcpcb *tp = NULL; - struct sockaddr_in *sinp; - - sinp = (struct sockaddr_in *)nam; - if (nam->sa_len != sizeof (*sinp)) - return (EINVAL); - /* - * Must check for multicast addresses and disallow binding - * to them. - */ - if (sinp->sin_family == AF_INET && - IN_MULTICAST(ntohl(sinp->sin_addr.s_addr))) - return (EAFNOSUPPORT); - - TCPDEBUG0; - INP_INFO_WLOCK(&tcbinfo); - inp = sotoinpcb(so); - KASSERT(inp != NULL, ("tcp_usr_bind: inp == NULL")); - INP_LOCK(inp); - if (inp->inp_vflag & (INP_TIMEWAIT | INP_DROPPED)) { - error = EINVAL; - goto out; - } - tp = intotcpcb(inp); - TCPDEBUG1(); - error = in_pcbbind(inp, nam, td->td_ucred); -out: - TCPDEBUG2(PRU_BIND); - INP_UNLOCK(inp); - INP_INFO_WUNLOCK(&tcbinfo); - - return (error); -} - -#ifdef INET6 -static int -tcp6_usr_bind(struct socket *so, struct sockaddr *nam, struct thread *td) -{ - int error = 0; - struct inpcb *inp; - struct tcpcb *tp = NULL; - struct sockaddr_in6 *sin6p; - - sin6p = (struct sockaddr_in6 *)nam; - if (nam->sa_len != sizeof (*sin6p)) - return (EINVAL); - /* - * Must check for multicast addresses and disallow binding - * to them. - */ - if (sin6p->sin6_family == AF_INET6 && - IN6_IS_ADDR_MULTICAST(&sin6p->sin6_addr)) - return (EAFNOSUPPORT); - - TCPDEBUG0; - INP_INFO_WLOCK(&tcbinfo); - inp = sotoinpcb(so); - KASSERT(inp != NULL, ("tcp6_usr_bind: inp == NULL")); - INP_LOCK(inp); - if (inp->inp_vflag & (INP_TIMEWAIT | INP_DROPPED)) { - error = EINVAL; - goto out; - } - tp = intotcpcb(inp); - TCPDEBUG1(); - inp->inp_vflag &= ~INP_IPV4; - inp->inp_vflag |= INP_IPV6; - if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) { - if (IN6_IS_ADDR_UNSPECIFIED(&sin6p->sin6_addr)) - inp->inp_vflag |= INP_IPV4; - else if (IN6_IS_ADDR_V4MAPPED(&sin6p->sin6_addr)) { - struct sockaddr_in sin; - - in6_sin6_2_sin(&sin, sin6p); - inp->inp_vflag |= INP_IPV4; - inp->inp_vflag &= ~INP_IPV6; - error = in_pcbbind(inp, (struct sockaddr *)&sin, - td->td_ucred); - goto out; - } - } - error = in6_pcbbind(inp, nam, td->td_ucred); -out: - TCPDEBUG2(PRU_BIND); - INP_UNLOCK(inp); - INP_INFO_WUNLOCK(&tcbinfo); - return (error); -} -#endif /* INET6 */ - -/* - * Prepare to accept connections. - */ -static int -tcp_usr_listen(struct socket *so, int backlog, struct thread *td) -{ - int error = 0; - struct inpcb *inp; - struct tcpcb *tp = NULL; - - TCPDEBUG0; - INP_INFO_WLOCK(&tcbinfo); - inp = sotoinpcb(so); - KASSERT(inp != NULL, ("tcp_usr_listen: inp == NULL")); - INP_LOCK(inp); - if (inp->inp_vflag & (INP_TIMEWAIT | INP_DROPPED)) { - error = EINVAL; - goto out; - } - tp = intotcpcb(inp); - TCPDEBUG1(); - SOCK_LOCK(so); - error = solisten_proto_check(so); - if (error == 0 && inp->inp_lport == 0) - error = in_pcbbind(inp, (struct sockaddr *)0, td->td_ucred); - if (error == 0) { - tp->t_state = TCPS_LISTEN; - solisten_proto(so, backlog); - tcp_gen_listen_open(tp); - } - SOCK_UNLOCK(so); - -out: - TCPDEBUG2(PRU_LISTEN); - INP_UNLOCK(inp); - INP_INFO_WUNLOCK(&tcbinfo); - return (error); -} - -#ifdef INET6 -static int -tcp6_usr_listen(struct socket *so, int backlog, struct thread *td) -{ - int error = 0; - struct inpcb *inp; - struct tcpcb *tp = NULL; - - TCPDEBUG0; - INP_INFO_WLOCK(&tcbinfo); - inp = sotoinpcb(so); - KASSERT(inp != NULL, ("tcp6_usr_listen: inp == NULL")); - INP_LOCK(inp); - if (inp->inp_vflag & (INP_TIMEWAIT | INP_DROPPED)) { - error = EINVAL; - goto out; - } - tp = intotcpcb(inp); - TCPDEBUG1(); - SOCK_LOCK(so); - error = solisten_proto_check(so); - if (error == 0 && inp->inp_lport == 0) { - inp->inp_vflag &= ~INP_IPV4; - if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) - inp->inp_vflag |= INP_IPV4; - error = in6_pcbbind(inp, (struct sockaddr *)0, td->td_ucred); - } - if (error == 0) { - tp->t_state = TCPS_LISTEN; - solisten_proto(so, backlog); - } - SOCK_UNLOCK(so); - -out: - TCPDEBUG2(PRU_LISTEN); - INP_UNLOCK(inp); - INP_INFO_WUNLOCK(&tcbinfo); - return (error); -} -#endif /* INET6 */ - -/* - * Initiate connection to peer. - * Create a template for use in transmissions on this connection. - * Enter SYN_SENT state, and mark socket as connecting. - * Start keep-alive timer, and seed output sequence space. - * Send initial segment on connection. - */ -static int -tcp_usr_connect(struct socket *so, struct sockaddr *nam, struct thread *td) -{ - int error = 0; - struct inpcb *inp; - struct tcpcb *tp = NULL; - struct sockaddr_in *sinp; - - sinp = (struct sockaddr_in *)nam; - if (nam->sa_len != sizeof (*sinp)) - return (EINVAL); - /* - * Must disallow TCP ``connections'' to multicast addresses. - */ - if (sinp->sin_family == AF_INET - && IN_MULTICAST(ntohl(sinp->sin_addr.s_addr))) - return (EAFNOSUPPORT); - if (jailed(td->td_ucred)) - prison_remote_ip(td->td_ucred, 0, &sinp->sin_addr.s_addr); - - TCPDEBUG0; - INP_INFO_WLOCK(&tcbinfo); - inp = sotoinpcb(so); - KASSERT(inp != NULL, ("tcp_usr_connect: inp == NULL")); - INP_LOCK(inp); - if (inp->inp_vflag & (INP_TIMEWAIT | INP_DROPPED)) { - error = EINVAL; - goto out; - } - tp = intotcpcb(inp); - TCPDEBUG1(); - if ((error = tcp_connect(tp, nam, td)) != 0) - goto out; - printf("calling tcp_gen_connect\n"); - - error = tcp_gen_connect(so, nam); -out: - TCPDEBUG2(PRU_CONNECT); - INP_UNLOCK(inp); - INP_INFO_WUNLOCK(&tcbinfo); - return (error); -} - -#ifdef INET6 -static int -tcp6_usr_connect(struct socket *so, struct sockaddr *nam, struct thread *td) -{ - int error = 0; - struct inpcb *inp; - struct tcpcb *tp = NULL; - struct sockaddr_in6 *sin6p; - - TCPDEBUG0; - - sin6p = (struct sockaddr_in6 *)nam; - if (nam->sa_len != sizeof (*sin6p)) - return (EINVAL); - /* - * Must disallow TCP ``connections'' to multicast addresses. - */ - if (sin6p->sin6_family == AF_INET6 - && IN6_IS_ADDR_MULTICAST(&sin6p->sin6_addr)) - return (EAFNOSUPPORT); - - INP_INFO_WLOCK(&tcbinfo); - inp = sotoinpcb(so); - KASSERT(inp != NULL, ("tcp6_usr_connect: inp == NULL")); - INP_LOCK(inp); - if (inp->inp_vflag & (INP_TIMEWAIT | INP_DROPPED)) { - error = EINVAL; - goto out; - } - tp = intotcpcb(inp); - TCPDEBUG1(); - if (IN6_IS_ADDR_V4MAPPED(&sin6p->sin6_addr)) { - struct sockaddr_in sin; - - if ((inp->inp_flags & IN6P_IPV6_V6ONLY) != 0) { - error = EINVAL; - goto out; - } - - in6_sin6_2_sin(&sin, sin6p); - inp->inp_vflag |= INP_IPV4; - inp->inp_vflag &= ~INP_IPV6; - if ((error = tcp_connect(tp, (struct sockaddr *)&sin, td)) != 0) - goto out; - error = tcp_gen_connect(so, nam); - goto out; - } - inp->inp_vflag &= ~INP_IPV4; - inp->inp_vflag |= INP_IPV6; - inp->inp_inc.inc_isipv6 = 1; - if ((error = tcp6_connect(tp, nam, td)) != 0) - goto out; - error = tcp_gen_connect(so, nam); - -out: - TCPDEBUG2(PRU_CONNECT); - INP_UNLOCK(inp); - INP_INFO_WUNLOCK(&tcbinfo); - return (error); -} -#endif /* INET6 */ - -/* - * Initiate disconnect from peer. - * If connection never passed embryonic stage, just drop; - * else if don't need to let data drain, then can just drop anyways, - * else have to begin TCP shutdown process: mark socket disconnecting, - * drain unread data, state switch to reflect user close, and - * send segment (e.g. FIN) to peer. Socket will be really disconnected - * when peer sends FIN and acks ours. - * - * SHOULD IMPLEMENT LATER PRU_CONNECT VIA REALLOC TCPCB. - */ -static int -tcp_usr_disconnect(struct socket *so) -{ - struct inpcb *inp; - struct tcpcb *tp = NULL; - int error = 0; - - TCPDEBUG0; - INP_INFO_WLOCK(&tcbinfo); - inp = sotoinpcb(so); - KASSERT(inp != NULL, ("tcp_usr_disconnect: inp == NULL")); - INP_LOCK(inp); - if (inp->inp_vflag & (INP_TIMEWAIT | INP_DROPPED)) { - error = ECONNRESET; - goto out; - } - tp = intotcpcb(inp); - TCPDEBUG1(); - tcp_disconnect(tp); -out: - TCPDEBUG2(PRU_DISCONNECT); - INP_UNLOCK(inp); - INP_INFO_WUNLOCK(&tcbinfo); - return (error); -} - -/* - * Accept a connection. Essentially all the work is - * done at higher levels; just return the address - * of the peer, storing through addr. - */ -static int -tcp_usr_accept(struct socket *so, struct sockaddr **nam) -{ - int error = 0; - struct inpcb *inp = NULL; - struct tcpcb *tp = NULL; - struct in_addr addr; - in_port_t port = 0; - TCPDEBUG0; - - if (so->so_state & SS_ISDISCONNECTED) - return (ECONNABORTED); - - inp = sotoinpcb(so); - KASSERT(inp != NULL, ("tcp_usr_accept: inp == NULL")); - INP_LOCK(inp); - if (inp->inp_vflag & (INP_TIMEWAIT | INP_DROPPED)) { - error = ECONNABORTED; - goto out; - } - tp = intotcpcb(inp); - TCPDEBUG1(); - - /* - * We inline in_getpeeraddr and COMMON_END here, so that we can - * copy the data of interest and defer the malloc until after we - * release the lock. - */ - port = inp->inp_fport; - addr = inp->inp_faddr; - -out: - TCPDEBUG2(PRU_ACCEPT); - INP_UNLOCK(inp); - if (error == 0) - *nam = in_sockaddr(port, &addr); - return error; -} - -#ifdef INET6 -static int -tcp6_usr_accept(struct socket *so, struct sockaddr **nam) -{ - struct inpcb *inp = NULL; - int error = 0; - struct tcpcb *tp = NULL; - struct in_addr addr; - struct in6_addr addr6; - in_port_t port = 0; - int v4 = 0; - TCPDEBUG0; - - if (so->so_state & SS_ISDISCONNECTED) - return (ECONNABORTED); - - inp = sotoinpcb(so); - KASSERT(inp != NULL, ("tcp6_usr_accept: inp == NULL")); - INP_LOCK(inp); - if (inp->inp_vflag & (INP_TIMEWAIT | INP_DROPPED)) { - error = ECONNABORTED; - goto out; - } - tp = intotcpcb(inp); - TCPDEBUG1(); - - /* - * We inline in6_mapped_peeraddr and COMMON_END here, so that we can - * copy the data of interest and defer the malloc until after we - * release the lock. - */ - if (inp->inp_vflag & INP_IPV4) { - v4 = 1; - port = inp->inp_fport; - addr = inp->inp_faddr; - } else { - port = inp->inp_fport; - addr6 = inp->in6p_faddr; - } - -out: - TCPDEBUG2(PRU_ACCEPT); - INP_UNLOCK(inp); - if (error == 0) { - if (v4) - *nam = in6_v4mapsin6_sockaddr(port, &addr); - else - *nam = in6_sockaddr(port, &addr6); - } - return error; -} -#endif /* INET6 */ - -/* - * Mark the connection as being incapable of further output. - */ -static int -tcp_usr_shutdown(struct socket *so) -{ - int error = 0; - struct inpcb *inp; - struct tcpcb *tp = NULL; - - TCPDEBUG0; - INP_INFO_WLOCK(&tcbinfo); - inp = sotoinpcb(so); - KASSERT(inp != NULL, ("inp == NULL")); - INP_LOCK(inp); - if (inp->inp_vflag & (INP_TIMEWAIT | INP_DROPPED)) { - error = ECONNRESET; - goto out; - } - tp = intotcpcb(inp); - TCPDEBUG1(); - socantsendmore(so); - tcp_usrclosed(tp); - error = tcp_gen_disconnect(tp); - -out: - TCPDEBUG2(PRU_SHUTDOWN); - INP_UNLOCK(inp); - INP_INFO_WUNLOCK(&tcbinfo); - - return (error); -} - -/* - * After a receive, possibly send window update to peer. - */ -static int -tcp_usr_rcvd(struct socket *so, int flags) -{ - struct inpcb *inp; - struct tcpcb *tp = NULL; - int error = 0; - - TCPDEBUG0; - inp = sotoinpcb(so); - KASSERT(inp != NULL, ("tcp_usr_rcvd: inp == NULL")); - INP_LOCK(inp); - if (inp->inp_vflag & (INP_TIMEWAIT | INP_DROPPED)) { - error = ECONNRESET; - goto out; - } - tp = intotcpcb(inp); - TCPDEBUG1(); - tcp_gen_rcvd(tp); - -out: - TCPDEBUG2(PRU_RCVD); - INP_UNLOCK(inp); - return (error); -} - -/* - * Do a send by putting data in output queue and updating urgent - * marker if URG set. Possibly send more data. Unlike the other - * pru_*() routines, the mbuf chains are our responsibility. We - * must either enqueue them or free them. The other pru_* routines - * generally are caller-frees. - */ -static int -tcp_usr_send(struct socket *so, int flags, struct mbuf *m, - struct sockaddr *nam, struct mbuf *control, struct thread *td) -{ - int error = 0; - struct inpcb *inp; - struct tcpcb *tp = NULL; - int headlocked = 0; -#ifdef INET6 - int isipv6; -#endif - TCPDEBUG0; - - /* - * We require the pcbinfo lock in two cases: - * - * (1) An implied connect is taking place, which can result in - * binding IPs and ports and hence modification of the pcb hash - * chains. - * - * (2) PRUS_EOF is set, resulting in explicit close on the send. - */ - if ((nam != NULL) || (flags & PRUS_EOF)) { - INP_INFO_WLOCK(&tcbinfo); - headlocked = 1; - } - inp = sotoinpcb(so); - KASSERT(inp != NULL, ("tcp_usr_send: inp == NULL")); - INP_LOCK(inp); - if (inp->inp_vflag & (INP_TIMEWAIT | INP_DROPPED)) { - if (control) - m_freem(control); - if (m) - m_freem(m); - error = ECONNRESET; - goto out; - } -#ifdef INET6 - isipv6 = nam && nam->sa_family == AF_INET6; -#endif /* INET6 */ - tp = intotcpcb(inp); - TCPDEBUG1(); - if (control) { - /* TCP doesn't do control messages (rights, creds, etc) */ - if (control->m_len) { - m_freem(control); - if (m) - m_freem(m); - error = EINVAL; - goto out; - } - m_freem(control); /* empty control, just free it */ - } - if (!(flags & PRUS_OOB)) { - sbappendstream(&so->so_snd, m); - if (nam && tp->t_state < TCPS_SYN_SENT) { - /* - * Do implied connect if not yet connected, - * initialize window to default value, and - * initialize maxseg/maxopd using peer's cached - * MSS. - */ - INP_INFO_WLOCK_ASSERT(&tcbinfo); -#ifdef INET6 - if (isipv6) - error = tcp6_connect(tp, nam, td); - else -#endif /* INET6 */ - error = tcp_connect(tp, nam, td); - if (error) - goto out; - tp->snd_wnd = TTCP_CLIENT_SND_WND; - tcp_mss(tp, -1); - } - if (flags & PRUS_EOF) { - /* - * Close the send side of the connection after - * the data is sent. - */ - INP_INFO_WLOCK_ASSERT(&tcbinfo); - socantsendmore(so); - tcp_usrclosed(tp); - } - if (headlocked) { - INP_INFO_WUNLOCK(&tcbinfo); - headlocked = 0; - } - if (tp != NULL) { - if (flags & PRUS_MORETOCOME) - tp->t_flags |= TF_MORETOCOME; - error = tcp_gen_send(tp); - if (flags & PRUS_MORETOCOME) - tp->t_flags &= ~TF_MORETOCOME; - } - } else { - /* - * XXXRW: PRUS_EOF not implemented with PRUS_OOB? - */ - SOCKBUF_LOCK(&so->so_snd); - if (sbspace(&so->so_snd) < -512) { - SOCKBUF_UNLOCK(&so->so_snd); - m_freem(m); - error = ENOBUFS; - goto out; - } - /* - * According to RFC961 (Assigned Protocols), - * the urgent pointer points to the last octet - * of urgent data. We continue, however, - * to consider it to indicate the first octet - * of data past the urgent section. - * Otherwise, snd_up should be one lower. - */ - sbappendstream_locked(&so->so_snd, m); - SOCKBUF_UNLOCK(&so->so_snd); - if (nam && tp->t_state < TCPS_SYN_SENT) { - /* - * Do implied connect if not yet connected, - * initialize window to default value, and - * initialize maxseg/maxopd using peer's cached - * MSS. - */ - INP_INFO_WLOCK_ASSERT(&tcbinfo); -#ifdef INET6 - if (isipv6) - error = tcp6_connect(tp, nam, td); - else -#endif /* INET6 */ - error = tcp_connect(tp, nam, td); - if (error) - goto out; - tp->snd_wnd = TTCP_CLIENT_SND_WND; - tcp_mss(tp, -1); - INP_INFO_WUNLOCK(&tcbinfo); - headlocked = 0; - } else if (nam) { - INP_INFO_WUNLOCK(&tcbinfo); - headlocked = 0; - } - tp->snd_up = tp->snd_una + so->so_snd.sb_cc; - tp->t_flags |= TF_FORCEDATA; - error = tcp_gen_send(tp); - tp->t_flags &= ~TF_FORCEDATA; - } -out: - TCPDEBUG2((flags & PRUS_OOB) ? PRU_SENDOOB : - ((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND)); - INP_UNLOCK(inp); - if (headlocked) - INP_INFO_WUNLOCK(&tcbinfo); - return (error); -} - -/* - * Abort the TCP. Drop the connection abruptly. - */ -static void -tcp_usr_abort(struct socket *so) -{ - struct inpcb *inp; - struct tcpcb *tp = NULL; - TCPDEBUG0; - - inp = sotoinpcb(so); - KASSERT(inp != NULL, ("tcp_usr_abort: inp == NULL")); - - INP_INFO_WLOCK(&tcbinfo); - INP_LOCK(inp); - KASSERT(inp->inp_socket != NULL, - ("tcp_usr_abort: inp_socket == NULL")); - - /* - * If we still have full TCP state, and we're not dropped, drop. - */ - if (!(inp->inp_vflag & INP_TIMEWAIT) && - !(inp->inp_vflag & INP_DROPPED)) { - tp = intotcpcb(inp); - TCPDEBUG1(); - cxgb_tcp_drop(tp, ECONNABORTED); - TCPDEBUG2(PRU_ABORT); - } - if (!(inp->inp_vflag & INP_DROPPED)) { - SOCK_LOCK(so); - so->so_state |= SS_PROTOREF; - SOCK_UNLOCK(so); - inp->inp_vflag |= INP_SOCKREF; - } - INP_UNLOCK(inp); - INP_INFO_WUNLOCK(&tcbinfo); -} - -/* - * TCP socket is closed. Start friendly disconnect. - */ -static void -tcp_usr_close(struct socket *so) -{ - struct inpcb *inp; - struct tcpcb *tp = NULL; - TCPDEBUG0; - - inp = sotoinpcb(so); - KASSERT(inp != NULL, ("tcp_usr_close: inp == NULL")); - - INP_INFO_WLOCK(&tcbinfo); - INP_LOCK(inp); - KASSERT(inp->inp_socket != NULL, - ("tcp_usr_close: inp_socket == NULL")); - - /* - * If we still have full TCP state, and we're not dropped, initiate - * a disconnect. - */ - if (!(inp->inp_vflag & INP_TIMEWAIT) && - !(inp->inp_vflag & INP_DROPPED)) { - tp = intotcpcb(inp); - TCPDEBUG1(); - tcp_disconnect(tp); - TCPDEBUG2(PRU_CLOSE); - } - if (!(inp->inp_vflag & INP_DROPPED)) { - SOCK_LOCK(so); - so->so_state |= SS_PROTOREF; - SOCK_UNLOCK(so); - inp->inp_vflag |= INP_SOCKREF; - } - INP_UNLOCK(inp); - INP_INFO_WUNLOCK(&tcbinfo); -} - -/* - * Receive out-of-band data. - */ -static int -tcp_usr_rcvoob(struct socket *so, struct mbuf *m, int flags) -{ - int error = 0; - struct inpcb *inp; - struct tcpcb *tp = NULL; - - TCPDEBUG0; - inp = sotoinpcb(so); - KASSERT(inp != NULL, ("tcp_usr_rcvoob: inp == NULL")); - INP_LOCK(inp); - if (inp->inp_vflag & (INP_TIMEWAIT | INP_DROPPED)) { - error = ECONNRESET; - goto out; - } - tp = intotcpcb(inp); - TCPDEBUG1(); - if ((so->so_oobmark == 0 && - (so->so_rcv.sb_state & SBS_RCVATMARK) == 0) || - so->so_options & SO_OOBINLINE || - tp->t_oobflags & TCPOOB_HADDATA) { - error = EINVAL; - goto out; - } - if ((tp->t_oobflags & TCPOOB_HAVEDATA) == 0) { - error = EWOULDBLOCK; - goto out; - } - m->m_len = 1; - *mtod(m, caddr_t) = tp->t_iobc; - if ((flags & MSG_PEEK) == 0) - tp->t_oobflags ^= (TCPOOB_HAVEDATA | TCPOOB_HADDATA); - -out: - TCPDEBUG2(PRU_RCVOOB); - INP_UNLOCK(inp); - return (error); -} - -struct pr_usrreqs cxgb_tcp_usrreqs = { - .pru_abort = tcp_usr_abort, - .pru_accept = tcp_usr_accept, - .pru_attach = tcp_usr_attach, - .pru_bind = tcp_usr_bind, - .pru_connect = tcp_usr_connect, - .pru_control = in_control, - .pru_detach = tcp_usr_detach, - .pru_disconnect = tcp_usr_disconnect, - .pru_listen = tcp_usr_listen, - .pru_peeraddr = in_getpeeraddr, - .pru_rcvd = tcp_usr_rcvd, - .pru_rcvoob = tcp_usr_rcvoob, - .pru_send = tcp_usr_send, - .pru_shutdown = tcp_usr_shutdown, - .pru_sockaddr = in_getsockaddr, - .pru_sosetlabel = in_pcbsosetlabel, - .pru_close = tcp_usr_close, -}; - -#ifdef INET6 -struct pr_usrreqs cxgb_tcp6_usrreqs = { - .pru_abort = tcp_usr_abort, - .pru_accept = tcp6_usr_accept, - .pru_attach = tcp_usr_attach, - .pru_bind = tcp6_usr_bind, - .pru_connect = tcp6_usr_connect, - .pru_control = in6_control, - .pru_detach = tcp_usr_detach, - .pru_disconnect = tcp_usr_disconnect, - .pru_listen = tcp6_usr_listen, - .pru_peeraddr = in6_mapped_peeraddr, - .pru_rcvd = tcp_usr_rcvd, - .pru_rcvoob = tcp_usr_rcvoob, - .pru_send = tcp_usr_send, - .pru_shutdown = tcp_usr_shutdown, - .pru_sockaddr = in6_mapped_sockaddr, - .pru_sosetlabel = in_pcbsosetlabel, - .pru_close = tcp_usr_close, -}; -#endif /* INET6 */ - -/* - * Common subroutine to open a TCP connection to remote host specified - * by struct sockaddr_in in mbuf *nam. Call in_pcbbind to assign a local - * port number if needed. Call in_pcbconnect_setup to do the routing and - * to choose a local host address (interface). If there is an existing - * incarnation of the same connection in TIME-WAIT state and if the remote - * host was sending CC options and if the connection duration was < MSL, then - * truncate the previous TIME-WAIT state and proceed. - * Initialize connection parameters and enter SYN-SENT state. - */ -static int -tcp_connect(struct tcpcb *tp, struct sockaddr *nam, struct thread *td) -{ - struct inpcb *inp = tp->t_inpcb, *oinp; - struct socket *so = inp->inp_socket; - struct in_addr laddr; - u_short lport; - int error; - - INP_INFO_WLOCK_ASSERT(&tcbinfo); - INP_LOCK_ASSERT(inp); - - if (inp->inp_lport == 0) { - error = in_pcbbind(inp, (struct sockaddr *)0, td->td_ucred); - if (error) - return error; - } - - /* - * Cannot simply call in_pcbconnect, because there might be an - * earlier incarnation of this same connection still in - * TIME_WAIT state, creating an ADDRINUSE error. - */ - laddr = inp->inp_laddr; - lport = inp->inp_lport; - error = in_pcbconnect_setup(inp, nam, &laddr.s_addr, &lport, - &inp->inp_faddr.s_addr, &inp->inp_fport, &oinp, td->td_ucred); - if (error && oinp == NULL) - return error; - if (oinp) - return EADDRINUSE; - inp->inp_laddr = laddr; - in_pcbrehash(inp); - - /* - * Compute window scaling to request: - * Scale to fit into sweet spot. See tcp_syncache.c. - * XXX: This should move to tcp_output(). - */ - while (tp->request_r_scale < TCP_MAX_WINSHIFT && - (TCP_MAXWIN << tp->request_r_scale) < sb_max) - tp->request_r_scale++; - - soisconnecting(so); - tcpstat.tcps_connattempt++; - tp->t_state = TCPS_SYN_SENT; - tcp_timer_activate(tp, TT_KEEP, tcp_keepinit); - tp->iss = tcp_new_isn(tp); - tp->t_bw_rtseq = tp->iss; - tcp_sendseqinit(tp); - - return 0; -} - -#ifdef INET6 -static int -tcp6_connect(struct tcpcb *tp, struct sockaddr *nam, struct thread *td) -{ - struct inpcb *inp = tp->t_inpcb, *oinp; - struct socket *so = inp->inp_socket; - struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)nam; - struct in6_addr *addr6; - int error; - - INP_INFO_WLOCK_ASSERT(&tcbinfo); - INP_LOCK_ASSERT(inp); - - if (inp->inp_lport == 0) { - error = in6_pcbbind(inp, (struct sockaddr *)0, td->td_ucred); - if (error) - return error; - } - - /* - * Cannot simply call in_pcbconnect, because there might be an - * earlier incarnation of this same connection still in - * TIME_WAIT state, creating an ADDRINUSE error. - * in6_pcbladdr() also handles scope zone IDs. - */ - error = in6_pcbladdr(inp, nam, &addr6); - if (error) - return error; - oinp = in6_pcblookup_hash(inp->inp_pcbinfo, - &sin6->sin6_addr, sin6->sin6_port, - IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) - ? addr6 - : &inp->in6p_laddr, - inp->inp_lport, 0, NULL); - if (oinp) - return EADDRINUSE; - if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) - inp->in6p_laddr = *addr6; - inp->in6p_faddr = sin6->sin6_addr; - inp->inp_fport = sin6->sin6_port; - /* update flowinfo - draft-itojun-ipv6-flowlabel-api-00 */ - inp->in6p_flowinfo &= ~IPV6_FLOWLABEL_MASK; - if (inp->in6p_flags & IN6P_AUTOFLOWLABEL) - inp->in6p_flowinfo |= - (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK); - in_pcbrehash(inp); - - /* Compute window scaling to request. */ - while (tp->request_r_scale < TCP_MAX_WINSHIFT && - (TCP_MAXWIN << tp->request_r_scale) < so->so_rcv.sb_hiwat) - tp->request_r_scale++; - - soisconnecting(so); - tcpstat.tcps_connattempt++; - tp->t_state = TCPS_SYN_SENT; - tcp_timer_activate(tp, TT_KEEP, tcp_keepinit); - tp->iss = tcp_new_isn(tp); - tp->t_bw_rtseq = tp->iss; - tcp_sendseqinit(tp); - - return 0; -} -#endif /* INET6 */ - -/* - * tcp_sendspace and tcp_recvspace are the default send and receive window - * sizes, respectively. These are obsolescent (this information should - * be set by the route). - */ -u_long tcp_sendspace = 1024*32; -SYSCTL_ULONG(_net_inet_tcp_cxgb, TCPCTL_SENDSPACE, sendspace, CTLFLAG_RW, - &tcp_sendspace , 0, "Maximum outgoing TCP datagram size"); -u_long tcp_recvspace = 1024*64; -SYSCTL_ULONG(_net_inet_tcp_cxgb, TCPCTL_RECVSPACE, recvspace, CTLFLAG_RW, - &tcp_recvspace , 0, "Maximum incoming TCP datagram size"); - -/* - * Attach TCP protocol to socket, allocating - * internet protocol control block, tcp control block, - * bufer space, and entering LISTEN state if to accept connections. - */ -static int -tcp_attach(struct socket *so) -{ - struct tcpcb *tp; - struct inpcb *inp; - int error; -#ifdef INET6 - int isipv6 = INP_CHECK_SOCKAF(so, AF_INET6) != 0; -#endif - - if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { - error = soreserve(so, tcp_sendspace, tcp_recvspace); - if (error) - return (error); - } - so->so_rcv.sb_flags |= SB_AUTOSIZE; - so->so_snd.sb_flags |= SB_AUTOSIZE; - INP_INFO_WLOCK(&tcbinfo); - error = in_pcballoc(so, &tcbinfo); - if (error) { - INP_INFO_WUNLOCK(&tcbinfo); - return (error); - } - inp = sotoinpcb(so); -#ifdef INET6 - if (isipv6) { - inp->inp_vflag |= INP_IPV6; - inp->in6p_hops = -1; /* use kernel default */ - } - else -#endif - inp->inp_vflag |= INP_IPV4; - tp = tcp_newtcpcb(inp); - if (tp == NULL) { -#ifdef INET6 - if (isipv6) { - in6_pcbdetach(inp); - in6_pcbfree(inp); - } else { -#endif - in_pcbdetach(inp); - in_pcbfree(inp); -#ifdef INET6 - } -#endif - INP_INFO_WUNLOCK(&tcbinfo); - return (ENOBUFS); - } - tp->t_state = TCPS_CLOSED; - INP_UNLOCK(inp); - INP_INFO_WUNLOCK(&tcbinfo); - return (0); -} - -/* - * Initiate (or continue) disconnect. - * If embryonic state, just send reset (once). - * If in ``let data drain'' option and linger null, just drop. - * Otherwise (hard), mark socket disconnecting and drop - * current input data; switch states based on user close, and - * send segment to peer (with FIN). - */ -static void -tcp_disconnect(struct tcpcb *tp) -{ - struct inpcb *inp = tp->t_inpcb; - struct socket *so = inp->inp_socket; - - INP_INFO_WLOCK_ASSERT(&tcbinfo); - INP_LOCK_ASSERT(inp); - - /* - * Neither tcp_close() nor tcp_drop() should return NULL, as the - * socket is still open. - */ - if (tp->t_state < TCPS_ESTABLISHED) { - tp = cxgb_tcp_close(tp); - KASSERT(tp != NULL, - ("tcp_disconnect: tcp_close() returned NULL")); - } else if ((so->so_options & SO_LINGER) && so->so_linger == 0) { - tp = cxgb_tcp_drop(tp, 0); - KASSERT(tp != NULL, - ("tcp_disconnect: tcp_drop() returned NULL")); - } else { - soisdisconnecting(so); - sbflush(&so->so_rcv); - tcp_usrclosed(tp); - if (!(inp->inp_vflag & INP_DROPPED)) - tcp_gen_disconnect(tp); - } -} - -/* - * User issued close, and wish to trail through shutdown states: - * if never received SYN, just forget it. If got a SYN from peer, - * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN. - * If already got a FIN from peer, then almost done; go to LAST_ACK - * state. In all other cases, have already sent FIN to peer (e.g. - * after PRU_SHUTDOWN), and just have to play tedious game waiting - * for peer to send FIN or not respond to keep-alives, etc. - * We can let the user exit from the close as soon as the FIN is acked. - */ -static void -tcp_usrclosed(struct tcpcb *tp) -{ - - INP_INFO_WLOCK_ASSERT(&tcbinfo); - INP_LOCK_ASSERT(tp->t_inpcb); - - switch (tp->t_state) { - case TCPS_LISTEN: - tcp_gen_listen_close(tp); - case TCPS_CLOSED: - tp->t_state = TCPS_CLOSED; - tp = cxgb_tcp_close(tp); - /* - * tcp_close() should never return NULL here as the socket is - * still open. - */ - KASSERT(tp != NULL, - ("tcp_usrclosed: tcp_close() returned NULL")); - break; - - case TCPS_SYN_SENT: - case TCPS_SYN_RECEIVED: - tp->t_flags |= TF_NEEDFIN; - break; - - case TCPS_ESTABLISHED: - tp->t_state = TCPS_FIN_WAIT_1; - break; - - case TCPS_CLOSE_WAIT: - tp->t_state = TCPS_LAST_ACK; - break; - } - if (tp->t_state >= TCPS_FIN_WAIT_2) { - soisdisconnected(tp->t_inpcb->inp_socket); - /* Prevent the connection hanging in FIN_WAIT_2 forever. */ - if (tp->t_state == TCPS_FIN_WAIT_2) { - int timeout; - - timeout = (tcp_fast_finwait2_recycle) ? - tcp_finwait2_timeout : tcp_maxidle; - tcp_timer_activate(tp, TT_2MSL, timeout); - } - } -} diff --git a/sys/dev/cxgb/ulp/tom/cxgb_toepcb.h b/sys/dev/cxgb/ulp/tom/cxgb_toepcb.h index a078bee..8a9c498 100644 --- a/sys/dev/cxgb/ulp/tom/cxgb_toepcb.h +++ b/sys/dev/cxgb/ulp/tom/cxgb_toepcb.h @@ -30,45 +30,49 @@ #ifndef CXGB_TOEPCB_H_ #define CXGB_TOEPCB_H_ #include +#include #include struct toepcb { - struct toedev *tp_toedev; - struct l2t_entry *tp_l2t; - pr_ctloutput_t *tp_ctloutput; - unsigned int tp_tid; - int tp_wr_max; - int tp_wr_avail; - int tp_wr_unacked; - int tp_delack_mode; - int tp_mtu_idx; - int tp_ulp_mode; - int tp_qset_idx; - int tp_mss_clamp; - int tp_qset; - int tp_flags; - int tp_enqueued_bytes; - int tp_page_count; - int tp_state; - - tcp_seq tp_iss; - tcp_seq tp_delack_seq; - tcp_seq tp_rcv_wup; - tcp_seq tp_copied_seq; - uint64_t tp_write_seq; - - volatile int tp_refcount; - vm_page_t *tp_pages; + struct toedev *tp_toedev; + struct l2t_entry *tp_l2t; + pr_ctloutput_t *tp_ctloutput; + unsigned int tp_tid; + int tp_wr_max; + int tp_wr_avail; + int tp_wr_unacked; + int tp_delack_mode; + int tp_mtu_idx; + int tp_ulp_mode; + int tp_qset_idx; + int tp_mss_clamp; + int tp_qset; + int tp_flags; + int tp_enqueued_bytes; + int tp_page_count; + int tp_state; + + tcp_seq tp_iss; + tcp_seq tp_delack_seq; + tcp_seq tp_rcv_wup; + tcp_seq tp_copied_seq; + uint64_t tp_write_seq; + + volatile int tp_refcount; + vm_page_t *tp_pages; - struct tcpcb *tp_tp; - struct mbuf *tp_m_last; - bus_dma_tag_t tp_tx_dmat; - bus_dmamap_t tp_dmamap; - - LIST_ENTRY(toepcb) synq_entry; - struct mbuf_head wr_list; - struct mbuf_head out_of_order_queue; - struct ddp_state tp_ddp_state; + struct tcpcb *tp_tp; + struct mbuf *tp_m_last; + bus_dma_tag_t tp_tx_dmat; + bus_dma_tag_t tp_rx_dmat; + bus_dmamap_t tp_dmamap; + + LIST_ENTRY(toepcb) synq_entry; + struct mbuf_head wr_list; + struct mbuf_head out_of_order_queue; + struct ddp_state tp_ddp_state; + struct cv tp_cv; + }; static inline void @@ -95,7 +99,7 @@ enqueue_wr(struct toepcb *toep, struct mbuf *m) } static inline struct mbuf * -peek_wr(struct toepcb *toep) +peek_wr(const struct toepcb *toep) { return (mbufq_peek(&toep->wr_list)); @@ -108,5 +112,10 @@ dequeue_wr(struct toepcb *toep) return (mbufq_dequeue(&toep->wr_list)); } +#define wr_queue_walk(toep, m) \ + for (m = peek_wr(toep); m; m = m->m_nextpkt) + + + #endif diff --git a/sys/dev/cxgb/ulp/tom/cxgb_tom.c b/sys/dev/cxgb/ulp/tom/cxgb_tom.c index b5b87b7..4015cd3 100644 --- a/sys/dev/cxgb/ulp/tom/cxgb_tom.c +++ b/sys/dev/cxgb/ulp/tom/cxgb_tom.c @@ -34,11 +34,13 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include #include #include +#include #include #include #include @@ -90,16 +92,20 @@ static TAILQ_HEAD(, tom_data) cxgb_list; static struct mtx cxgb_list_lock; static int t3_toe_attach(struct toedev *dev, const struct offload_id *entry); +static void cxgb_register_listeners(void); + /* * Handlers for each CPL opcode */ -static cxgb_cpl_handler_func tom_cpl_handlers[NUM_CPL_CMDS]; +static cxgb_cpl_handler_func tom_cpl_handlers[256]; + static eventhandler_tag listen_tag; static struct offload_id t3_toe_id_tab[] = { { TOE_ID_CHELSIO_T3, 0 }, { TOE_ID_CHELSIO_T3B, 0 }, + { TOE_ID_CHELSIO_T3C, 0 }, { 0 } }; @@ -138,7 +144,7 @@ toepcb_alloc(void) { struct toepcb *toep; - toep = malloc(sizeof(struct toepcb), M_DEVBUF, M_NOWAIT); + toep = malloc(sizeof(struct toepcb), M_DEVBUF, M_NOWAIT|M_ZERO); if (toep == NULL) return (NULL); @@ -150,8 +156,8 @@ toepcb_alloc(void) void toepcb_init(struct toepcb *toep) { - bzero(toep, sizeof(*toep)); toep->tp_refcount = 1; + cv_init(&toep->tp_cv, "toep cv"); } void @@ -164,12 +170,9 @@ void toepcb_release(struct toepcb *toep) { if (toep->tp_refcount == 1) { - printf("doing final toepcb free\n"); - free(toep, M_DEVBUF); return; } - atomic_add_acq_int(&toep->tp_refcount, -1); } @@ -179,13 +182,30 @@ toepcb_release(struct toepcb *toep) static void t3cdev_add(struct tom_data *t) { - printf("t3cdev_add\n"); - mtx_lock(&cxgb_list_lock); TAILQ_INSERT_TAIL(&cxgb_list, t, entry); mtx_unlock(&cxgb_list_lock); } +static inline int +cdev2type(struct t3cdev *cdev) +{ + int type = 0; + + switch (cdev->type) { + case T3A: + type = TOE_ID_CHELSIO_T3; + break; + case T3B: + type = TOE_ID_CHELSIO_T3B; + break; + case T3C: + type = TOE_ID_CHELSIO_T3C; + break; + } + return (type); +} + /* * Allocate a TOM data structure, * initialize its cpl_handlers @@ -200,11 +220,7 @@ t3c_tom_add(struct t3cdev *cdev) struct toedev *tdev; struct adap_ports *port_info; - printf("%s called\n", __FUNCTION__); - - t = malloc(sizeof(*t), M_CXGB, M_NOWAIT|M_ZERO); - if (t == NULL) return; @@ -224,8 +240,7 @@ t3c_tom_add(struct t3cdev *cdev) /* Register TCP offload device */ tdev = &t->tdev; - tdev->tod_ttid = (cdev->type == T3A ? - TOE_ID_CHELSIO_T3 : TOE_ID_CHELSIO_T3B); + tdev->tod_ttid = cdev2type(cdev); tdev->tod_lldev = cdev->lldev; if (register_toedev(tdev, "toe%d")) { @@ -234,13 +249,11 @@ t3c_tom_add(struct t3cdev *cdev) } TOM_DATA(tdev) = t; - printf("nports=%d\n", port_info->nports); for (i = 0; i < port_info->nports; i++) { struct ifnet *ifp = port_info->lldevs[i]; TOEDEV(ifp) = tdev; - printf("enabling toe on %p\n", ifp); - + CTR1(KTR_TOM, "enabling toe on %p", ifp); ifp->if_capabilities |= IFCAP_TOE4; ifp->if_capenable |= IFCAP_TOE4; } @@ -251,6 +264,7 @@ t3c_tom_add(struct t3cdev *cdev) /* Activate TCP offload device */ activate_offload(tdev); + cxgb_register_listeners(); return; out_free_all: @@ -269,8 +283,8 @@ static int do_bad_cpl(struct t3cdev *cdev, struct mbuf *m, void *ctx) { log(LOG_ERR, "%s: received bad CPL command %u\n", cdev->name, - *mtod(m, unsigned int *)); - + 0xFF & *mtod(m, unsigned int *)); + kdb_backtrace(); return (CPL_RET_BUF_DONE | CPL_RET_BAD_MSG); } @@ -282,7 +296,7 @@ do_bad_cpl(struct t3cdev *cdev, struct mbuf *m, void *ctx) void t3tom_register_cpl_handler(unsigned int opcode, cxgb_cpl_handler_func h) { - if (opcode < NUM_CPL_CMDS) + if (opcode < 256) tom_cpl_handlers[opcode] = h ? h : do_bad_cpl; else log(LOG_ERR, "Chelsio T3 TOM: handler registration for " @@ -327,7 +341,7 @@ init_cpl_handlers(void) { int i; - for (i = 0; i < NUM_CPL_CMDS; ++i) + for (i = 0; i < 256; ++i) tom_cpl_handlers[i] = do_bad_cpl; t3_init_listen_cpl_handlers(); @@ -349,7 +363,7 @@ t3_toe_attach(struct toedev *dev, const struct offload_id *entry) #endif t3_init_tunables(t); mtx_init(&t->listen_lock, "tom data listeners", NULL, MTX_DEF); - + CTR2(KTR_TOM, "t3_toe_attach dev=%p entry=%p", dev, entry); /* Adjust TOE activation for this module */ t->conf.activated = activated; @@ -374,19 +388,14 @@ t3_toe_attach(struct toedev *dev, const struct offload_id *entry) t->ddp_ulimit = ddp.ulimit; t->pdev = ddp.pdev; t->rx_page_size = rx_page_info.page_size; -#ifdef notyet /* OK if this fails, we just can't do DDP */ t->nppods = (ddp.ulimit + 1 - ddp.llimit) / PPOD_SIZE; - t->ppod_map = t3_alloc_mem(t->nppods); -#endif + t->ppod_map = malloc(t->nppods, M_DEVBUF, M_WAITOK|M_ZERO); -#if 0 - spin_lock_init(&t->ppod_map_lock); - tom_proc_init(dev); -#ifdef CONFIG_SYSCTL - t->sysctl = t3_sysctl_register(dev, &t->conf); -#endif -#endif + mtx_init(&t->ppod_map_lock, "ppod map", NULL, MTX_DEF); + + + t3_sysctl_register(cdev->adapter, &t->conf); return (0); } @@ -411,11 +420,8 @@ cxgb_toe_listen_stop(void *unused, struct tcpcb *tp) mtx_lock(&cxgb_list_lock); TAILQ_FOREACH(p, &cxgb_list, entry) { - if (tp->t_state == TCPS_LISTEN) { - printf("stopping listen on port=%d\n", - ntohs(tp->t_inpcb->inp_lport)); + if (tp->t_state == TCPS_LISTEN) t3_listen_stop(&p->tdev, so, p->cdev); - } } mtx_unlock(&cxgb_list_lock); } @@ -439,23 +445,12 @@ cxgb_register_listeners(void) static int t3_tom_init(void) { - -#if 0 - struct socket *sock; - err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); - if (err < 0) { - printk(KERN_ERR "Could not create TCP socket, error %d\n", err); - return err; - } - - t3_def_state_change = sock->sk->sk_state_change; - t3_def_data_ready = sock->sk->sk_data_ready; - t3_def_error_report = sock->sk->sk_error_report; - sock_release(sock); -#endif init_cpl_handlers(); - if (t3_init_cpl_io() < 0) + if (t3_init_cpl_io() < 0) { + log(LOG_ERR, + "Unable to initialize cpl io ops\n"); return -1; + } t3_init_socket_ops(); /* Register with the TOE device layer. */ @@ -466,7 +461,6 @@ t3_tom_init(void) return -1; } INP_INFO_WLOCK(&tcbinfo); - INP_INFO_WUNLOCK(&tcbinfo); mtx_init(&cxgb_list_lock, "cxgb tom list", NULL, MTX_DEF); @@ -477,10 +471,8 @@ t3_tom_init(void) TAILQ_INIT(&cxgb_list); /* Register to offloading devices */ - printf("setting add to %p\n", t3c_tom_add); t3c_tom_client.add = t3c_tom_add; cxgb_register_client(&t3c_tom_client); - cxgb_register_listeners(); return (0); } @@ -491,8 +483,6 @@ t3_tom_load(module_t mod, int cmd, void *arg) switch (cmd) { case MOD_LOAD: - printf("wheeeeee ...\n"); - t3_tom_init(); break; case MOD_QUIESCE: diff --git a/sys/dev/cxgb/ulp/tom/cxgb_tom.h b/sys/dev/cxgb/ulp/tom/cxgb_tom.h index 8d60bbd..bcda2c3 100644 --- a/sys/dev/cxgb/ulp/tom/cxgb_tom.h +++ b/sys/dev/cxgb/ulp/tom/cxgb_tom.h @@ -138,6 +138,8 @@ struct listen_ctx { void t3_init_tunables(struct tom_data *t); +void t3_sysctl_register(struct adapter *sc, const struct tom_tunables *p); + static __inline struct mbuf * m_gethdr_nofail(int len) { diff --git a/sys/dev/cxgb/ulp/tom/cxgb_tom_sysctl.c b/sys/dev/cxgb/ulp/tom/cxgb_tom_sysctl.c index 7219922..b4ff748 100644 --- a/sys/dev/cxgb/ulp/tom/cxgb_tom_sysctl.c +++ b/sys/dev/cxgb/ulp/tom/cxgb_tom_sysctl.c @@ -66,6 +66,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -82,7 +83,7 @@ static struct tom_tunables default_tunable_vals = { .delack = 1, .max_conn = -1, .soft_backlog_limit = 0, - .ddp = 0, + .ddp = 1, .ddp_thres = 14 * 4096, .ddp_copy_limit = 13 * 4096, .ddp_push_wait = 1, @@ -96,7 +97,8 @@ static struct tom_tunables default_tunable_vals = { .activated = 1, }; -void t3_init_tunables(struct tom_data *t) +void +t3_init_tunables(struct tom_data *t) { t->conf = default_tunable_vals; @@ -104,3 +106,15 @@ void t3_init_tunables(struct tom_data *t) t->conf.mss = T3C_DATA(t->cdev)->tx_max_chunk; t->conf.max_wrs = T3C_DATA(t->cdev)->max_wrs; } + +void +t3_sysctl_register(struct adapter *sc, const struct tom_tunables *p) +{ + struct sysctl_ctx_list *ctx; + struct sysctl_oid_list *children; + + ctx = device_get_sysctl_ctx(sc->dev); + children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); + +} + diff --git a/sys/dev/cxgb/ulp/tom/cxgb_vm.c b/sys/dev/cxgb/ulp/tom/cxgb_vm.c new file mode 100644 index 0000000..7036005 --- /dev/null +++ b/sys/dev/cxgb/ulp/tom/cxgb_vm.c @@ -0,0 +1,180 @@ +/************************************************************************** + +Copyright (c) 2007, Chelsio Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Neither the name of the Chelsio Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#define TRACE_ENTER printf("%s:%s entered", __FUNCTION__, __FILE__) +#define TRACE_EXIT printf("%s:%s:%d exited", __FUNCTION__, __FILE__, __LINE__) + +/* + * This routine takes a user address range and does the following: + * - validate that the user has access to those pages (flags indicates read or write) - if not fail + * - validate that count is enough to hold range number of pages - if not fail + * - fault in any non-resident pages + * - if the user is doing a read force a write fault for any COWed pages + * - if the user is doing a read mark all pages as dirty + * - hold all pages + * - return number of pages in count + */ +int +vm_fault_hold_user_pages(vm_offset_t addr, vm_page_t *mp, int count, int flags) +{ + + vm_offset_t end, va; + vm_paddr_t pa; + int faults, rv; + + struct thread *td; + vm_map_t map; + pmap_t pmap; + vm_page_t m, *pages; + vm_prot_t prot; + + + /* + * Check that virtual address range is legal + * This check is somewhat bogus as on some architectures kernel + * and user do not share VA - however, it appears that all FreeBSD + * architectures define it + */ + end = addr + (count * PAGE_SIZE); + if (end > VM_MAXUSER_ADDRESS) { + printf("bad address passed\n"); + return (EFAULT); + } + + td = curthread; + map = &td->td_proc->p_vmspace->vm_map; + pmap = &td->td_proc->p_vmspace->vm_pmap; + pages = mp; + + prot = VM_PROT_READ; + prot |= (flags & VM_HOLD_WRITEABLE) ? VM_PROT_WRITE : 0; + bzero(pages, sizeof(vm_page_t *) * count); +retry: + + /* + * First optimistically assume that all pages are resident (and R/W if for write) + * if so just mark pages as held (and dirty if for write) and return + */ + vm_page_lock_queues(); + for (pages = mp, faults = 0, va = addr; va < end; va += PAGE_SIZE, pages++) { + /* + * Assure that we only hold the page once + */ + if (*pages == NULL) { + /* + * page queue mutex is recursable so this is OK + * it would be really nice if we had an unlocked version of this so + * we were only acquiring the pmap lock 1 time as opposed to potentially + * many dozens of times + */ + m = pmap_extract_and_hold(pmap, va, prot); + if (m == NULL) { + faults++; + continue; + } + + *pages = m; + if (flags & VM_HOLD_WRITEABLE) + vm_page_dirty(m); + } + } + vm_page_unlock_queues(); + + if (faults == 0) { + return (0); + } + + /* + * Pages either have insufficient permissions or are not present + * trigger a fault where neccessary + * + */ + for (va = addr; va < end; va += PAGE_SIZE) { + m = NULL; + pa = pmap_extract(pmap, va); + rv = 0; + if (pa) + m = PHYS_TO_VM_PAGE(pa); + if (flags & VM_HOLD_WRITEABLE) { + if (m == NULL || (m->flags & PG_WRITEABLE) == 0) + rv = vm_fault(map, va, VM_PROT_WRITE, VM_FAULT_DIRTY); + } else if (m == NULL) + rv = vm_fault(map, va, VM_PROT_READ, VM_FAULT_NORMAL); + if (rv) { + printf("vm_fault bad return rv=%d va=0x%zx\n", rv, va); + + goto error; + } + } + + goto retry; + +error: + vm_page_lock_queues(); + for (pages = mp, va = addr; va < end; va += PAGE_SIZE, pages++) + if (*pages) + vm_page_unhold(*pages); + vm_page_unlock_queues(); + return (EFAULT); +} + +void +vm_fault_unhold_pages(vm_page_t *mp, int count) +{ + + KASSERT(count >= 0, ("negative count %d", count)); + vm_page_lock_queues(); + while (count--) { + vm_page_unhold(*mp); + mp++; + } + vm_page_unlock_queues(); +} diff --git a/sys/dev/cxgb/ulp/tom/cxgb_vm.h b/sys/dev/cxgb/ulp/tom/cxgb_vm.h new file mode 100644 index 0000000..29418b6 --- /dev/null +++ b/sys/dev/cxgb/ulp/tom/cxgb_vm.h @@ -0,0 +1,40 @@ +/************************************************************************** + +Copyright (c) 2007, Chelsio Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Neither the name of the Chelsio Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + + +$FreeBSD$ + +***************************************************************************/ +#ifndef CXGB_VM_H_ +#define CXGB_VM_H_ + +#define VM_HOLD_WRITEABLE 0x1 + +int vm_fault_hold_user_pages(vm_offset_t addr, vm_page_t *mp, int count, int flags); +void vm_fault_unhold_pages(vm_page_t *mp, int count); + +#endif diff --git a/sys/modules/cxgb/cxgb/Makefile b/sys/modules/cxgb/cxgb/Makefile index 8365c02..6114ef9 100644 --- a/sys/modules/cxgb/cxgb/Makefile +++ b/sys/modules/cxgb/cxgb/Makefile @@ -7,26 +7,30 @@ KMOD= if_cxgb SRCS= cxgb_mc5.c cxgb_vsc8211.c cxgb_ael1002.c cxgb_mv88e1xxx.c SRCS+= cxgb_xgmac.c cxgb_vsc7323.c cxgb_t3_hw.c cxgb_main.c SRCS+= cxgb_sge.c cxgb_lro.c cxgb_offload.c cxgb_l2t.c -SRCS+= device_if.h bus_if.h pci_if.h opt_zero.h opt_sched.h opt_global.h +SRCS+= device_if.h bus_if.h pci_if.h opt_zero.h opt_sched.h opt_global.h SRCS+= uipc_mvec.c cxgb_support.c SRCS+= cxgb_multiq.c CFLAGS+= -DCONFIG_CHELSIO_T3_CORE -g -DCONFIG_DEFINED -DDEFAULT_JUMBO -I${CXGB} -DSMP -#CFLAGS+= -DDISABLE_MBUF_IOVEC +CFLAGS+= -DDISABLE_MBUF_IOVEC #CFLAGS+= -DIFNET_MULTIQUEUE +#CFLAGS+= -DDISABLE_MBUF_IOVEC +#CFLAGS+= -DDEBUG -DDEBUG_PRINT #CFLAGS+= -DINVARIANT_SUPPORT -DINVARIANTS #CFLAGS+= -DWITNESS -#CFLAGS+= -DDEBUG -DDEBUG_PRINT +#CFLAGS += -DLOCK_PROFILING + +#CFLAGS+= -DWITNESS .if ${MACHINE_ARCH} != "ia64" # ld is broken on ia64 -t3fw-4.7.0.bin: ${CXGB}/t3fw-4.7.0.bin.gz.uu - uudecode -p < ${CXGB}/t3fw-4.7.0.bin.gz.uu \ +t3fw-5.0.0.bin: ${CXGB}/t3fw-5.0.0.bin.gz.uu + uudecode -p < ${CXGB}/t3fw-5.0.0.bin.gz.uu \ | gzip -dc > ${.TARGET} -FIRMWS= t3fw-4.7.0.bin:t3fw470 -CLEANFILES+= t3fw-4.7.0.bin +FIRMWS= t3fw-5.0.0.bin:t3fw500 +CLEANFILES+= t3fw-5.0.0.bin t3b_protocol_sram-1.1.0.bin: ${CXGB}/t3b_protocol_sram-1.1.0.bin.gz.uu uudecode -p < ${CXGB}/t3b_protocol_sram-1.1.0.bin.gz.uu \ diff --git a/sys/modules/cxgb/tom/Makefile b/sys/modules/cxgb/tom/Makefile index a4e4562..7134386 100644 --- a/sys/modules/cxgb/tom/Makefile +++ b/sys/modules/cxgb/tom/Makefile @@ -1,11 +1,13 @@ # $FreeBSD$ + TOM = ${.CURDIR}/../../../dev/cxgb/ulp/tom .PATH: ${TOM} KMOD= tom SRCS= cxgb_tom.c cxgb_cpl_io.c cxgb_listen.c cxgb_tom_sysctl.c cxgb_cpl_socket.c -#SRCS+= cxgb_tcp_subr.c cxgb_tcp_usrreq.c -SRCS+= opt_compat.h opt_inet.h opt_inet6.h opt_ipsec.h opt_mac.h opt_tcpdebug.h opt_ddb.h +SRCS+= cxgb_ddp.c cxgb_vm.c +SRCS+= opt_compat.h opt_inet.h opt_inet6.h opt_ipsec.h opt_mac.h +SRCS+= opt_tcpdebug.h opt_ddb.h opt_sched.h opt_global.h opt_ktr.h SRCS+= device_if.h bus_if.h pci_if.h #CFLAGS+= -DDEBUG_PRINT -DDEBUG -- cgit v1.1