summaryrefslogtreecommitdiffstats
path: root/sys/dev
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/aac/aac.c4
-rw-r--r--sys/dev/aac/aac_cam.c2
-rw-r--r--sys/dev/acpi_support/acpi_wmi.c2
-rw-r--r--sys/dev/acpica/Osd/OsdInterrupt.c2
-rw-r--r--sys/dev/acpica/Osd/OsdMemory.c2
-rw-r--r--sys/dev/acpica/Osd/OsdSchedule.c2
-rw-r--r--sys/dev/acpica/Osd/OsdSynch.c2
-rw-r--r--sys/dev/acpica/acpi.c2
-rw-r--r--sys/dev/acpica/acpi_cmbat.c3
-rw-r--r--sys/dev/acpica/acpi_ec.c2
-rw-r--r--sys/dev/acpica/acpi_pci_link.c2
-rw-r--r--sys/dev/acpica/acpi_perf.c2
-rw-r--r--sys/dev/acpica/acpi_powerres.c2
-rw-r--r--sys/dev/acpica/acpi_smbat.c3
-rw-r--r--sys/dev/acpica/acpi_thermal.c2
-rw-r--r--sys/dev/acpica/acpi_video.c2
-rw-r--r--sys/dev/ae/if_ae.c17
-rw-r--r--sys/dev/ahci/ahci.c23
-rw-r--r--sys/dev/amr/amr.c2
-rw-r--r--sys/dev/amr/amr_cam.c2
-rw-r--r--sys/dev/an/if_an.c3
-rw-r--r--sys/dev/ata/ata-all.c2
-rw-r--r--sys/dev/ata/ata-disk.c9
-rw-r--r--sys/dev/ata/ata-pci.c25
-rw-r--r--sys/dev/ata/chipsets/ata-promise.c9
-rw-r--r--sys/dev/ata/chipsets/ata-siliconimage.c13
-rw-r--r--sys/dev/ath/ah_osdep.c34
-rw-r--r--sys/dev/ath/ath_hal/ah.c10
-rw-r--r--sys/dev/ath/ath_hal/ah.h8
-rw-r--r--sys/dev/ath/ath_hal/ah_devid.h4
-rw-r--r--sys/dev/ath/ath_hal/ah_internal.h3
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416.h7
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416_ani.c145
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416_attach.c25
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416_cal.c10
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416_interrupts.c17
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416_misc.c51
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416_reset.c45
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416phy.h6
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416reg.h12
-rw-r--r--sys/dev/ath/ath_hal/ar9002/ar9280_attach.c9
-rw-r--r--sys/dev/ath/ath_hal/ar9002/ar9287_attach.c20
-rw-r--r--sys/dev/ath/ath_rate/amrr/amrr.c24
-rw-r--r--sys/dev/ath/ath_rate/onoe/onoe.c24
-rw-r--r--sys/dev/ath/ath_rate/sample/sample.c526
-rw-r--r--sys/dev/ath/ath_rate/sample/sample.h8
-rw-r--r--sys/dev/ath/if_ath.c1207
-rw-r--r--sys/dev/ath/if_ath_ahb.c3
-rw-r--r--sys/dev/ath/if_ath_debug.c49
-rw-r--r--sys/dev/ath/if_ath_debug.h5
-rw-r--r--sys/dev/ath/if_ath_keycache.c23
-rw-r--r--sys/dev/ath/if_ath_keycache.h4
-rw-r--r--sys/dev/ath/if_ath_misc.h14
-rw-r--r--sys/dev/ath/if_ath_pci.c5
-rw-r--r--sys/dev/ath/if_ath_sysctl.c122
-rw-r--r--sys/dev/ath/if_ath_tx.c3252
-rw-r--r--sys/dev/ath/if_ath_tx.h83
-rw-r--r--sys/dev/ath/if_ath_tx_ht.c616
-rw-r--r--sys/dev/ath/if_ath_tx_ht.h29
-rw-r--r--sys/dev/ath/if_athioctl.h10
-rw-r--r--sys/dev/ath/if_athrate.h23
-rw-r--r--sys/dev/ath/if_athvar.h264
-rw-r--r--sys/dev/atkbdc/psm.c4
-rw-r--r--sys/dev/bce/if_bce.c2
-rw-r--r--sys/dev/bge/if_bge.c132
-rw-r--r--sys/dev/bge/if_bgereg.h41
-rw-r--r--sys/dev/bktr/bktr_os.c2
-rw-r--r--sys/dev/bktr/bktr_reg.h4
-rw-r--r--sys/dev/bm/if_bm.c218
-rw-r--r--sys/dev/bm/if_bmreg.h10
-rw-r--r--sys/dev/bm/if_bmvar.h12
-rw-r--r--sys/dev/bwn/if_bwn.c3
-rw-r--r--sys/dev/bxe/if_bxe.c2
-rw-r--r--sys/dev/cardbus/cardbus.c2
-rw-r--r--sys/dev/cesa/cesa.c1614
-rw-r--r--sys/dev/cesa/cesa.h350
-rw-r--r--sys/dev/ciss/ciss.c3
-rw-r--r--sys/dev/cs/if_cs.c2
-rw-r--r--sys/dev/cxgb/ulp/tom/cxgb_tom_sysctl.c2
-rw-r--r--sys/dev/cxgbe/t4_main.c3
-rw-r--r--sys/dev/dc/dcphy.c11
-rw-r--r--sys/dev/dc/if_dc.c238
-rw-r--r--sys/dev/dc/if_dcreg.h23
-rw-r--r--sys/dev/dcons/dcons_os.c2
-rw-r--r--sys/dev/e1000/if_em.c2
-rw-r--r--sys/dev/e1000/if_igb.c2
-rw-r--r--sys/dev/esp/am53c974reg.h72
-rw-r--r--sys/dev/esp/esp_pci.c654
-rw-r--r--sys/dev/esp/esp_sbus.c84
-rw-r--r--sys/dev/esp/ncr53c9x.c175
-rw-r--r--sys/dev/esp/ncr53c9xreg.h4
-rw-r--r--sys/dev/esp/ncr53c9xvar.h20
-rw-r--r--sys/dev/fb/vesa.c2
-rw-r--r--sys/dev/fdc/fdc.c2
-rw-r--r--sys/dev/firewire/fwmem.c4
-rw-r--r--sys/dev/firewire/if_fwe.c4
-rw-r--r--sys/dev/firewire/if_fwip.c4
-rw-r--r--sys/dev/firewire/sbp.c5
-rw-r--r--sys/dev/firewire/sbp_targ.c2
-rw-r--r--sys/dev/gpio/gpiobus.c11
-rw-r--r--sys/dev/hifn/hifn7751.c3
-rw-r--r--sys/dev/hwpmc/hwpmc_mips24k.c62
-rw-r--r--sys/dev/iicbus/ad7417.c2
-rw-r--r--sys/dev/iicbus/max6690.c2
-rw-r--r--sys/dev/iir/iir.c2
-rw-r--r--sys/dev/ipmi/ipmi.c5
-rw-r--r--sys/dev/iscsi/initiator/isc_subr.c2
-rw-r--r--sys/dev/iscsi/initiator/iscsi.c2
-rw-r--r--sys/dev/iscsi/initiator/iscsivar.h1
-rw-r--r--sys/dev/isp/DriverManual.txt2
-rw-r--r--sys/dev/isp/isp.c67
-rw-r--r--sys/dev/isp/isp_freebsd.c8
-rw-r--r--sys/dev/isp/isp_freebsd.h1
-rw-r--r--sys/dev/isp/isp_pci.c2
-rw-r--r--sys/dev/kbd/kbd.c2
-rw-r--r--sys/dev/lmc/if_lmc.c4
-rw-r--r--sys/dev/malo/if_malo.c2
-rw-r--r--sys/dev/malo/if_malo_pci.c2
-rw-r--r--sys/dev/md/md.c5
-rw-r--r--sys/dev/mfi/mfi.c15
-rw-r--r--sys/dev/mfi/mfi_cam.c1
-rw-r--r--sys/dev/mfi/mfi_debug.c1
-rw-r--r--sys/dev/mfi/mfi_disk.c1
-rw-r--r--sys/dev/mfi/mfi_pci.c24
-rw-r--r--sys/dev/mfi/mfivar.h20
-rw-r--r--sys/dev/mii/brgphy.c1
-rw-r--r--sys/dev/mii/mii.c159
-rw-r--r--sys/dev/mii/mii_bitbang.c180
-rw-r--r--sys/dev/mii/mii_bitbang.h54
-rw-r--r--sys/dev/mii/miidevs1
-rw-r--r--sys/dev/mii/miivar.h4
-rw-r--r--sys/dev/mmc/mmc.c2
-rw-r--r--sys/dev/mps/mps_sas.c2
-rw-r--r--sys/dev/mpt/mpt.c2
-rw-r--r--sys/dev/msk/if_msk.c192
-rw-r--r--sys/dev/msk/if_mskreg.h52
-rw-r--r--sys/dev/mvs/mvs.c2
-rw-r--r--sys/dev/mvs/mvs_pci.c9
-rw-r--r--sys/dev/mvs/mvs_soc.c9
-rw-r--r--sys/dev/mwl/if_mwl.c2
-rw-r--r--sys/dev/mwl/mwlhal.c3
-rw-r--r--sys/dev/netmap/head.diff654
-rw-r--r--sys/dev/netmap/if_em_netmap.h383
-rw-r--r--sys/dev/netmap/if_igb_netmap.h378
-rw-r--r--sys/dev/netmap/if_lem_netmap.h344
-rw-r--r--sys/dev/netmap/if_re_netmap.h415
-rw-r--r--sys/dev/netmap/ixgbe_netmap.h376
-rw-r--r--sys/dev/netmap/netmap.c1762
-rw-r--r--sys/dev/netmap/netmap_kern.h221
-rw-r--r--sys/dev/nge/if_nge.c230
-rw-r--r--sys/dev/nge/if_ngereg.h24
-rw-r--r--sys/dev/nmdm/nmdm.c2
-rw-r--r--sys/dev/ofw/openfirm.c2
-rw-r--r--sys/dev/pccard/pccard.c2
-rw-r--r--sys/dev/pccbb/pccbb.c2
-rw-r--r--sys/dev/pccbb/pccbb_isa.c2
-rw-r--r--sys/dev/ppbus/ppb_base.c5
-rw-r--r--sys/dev/ppbus/ppbconf.c10
-rw-r--r--sys/dev/ppc/ppc.c9
-rw-r--r--sys/dev/puc/puc.c2
-rw-r--r--sys/dev/puc/pucdata.c39
-rw-r--r--sys/dev/qlxgb/README.txt99
-rw-r--r--sys/dev/qlxgb/qla_dbg.c263
-rw-r--r--sys/dev/qlxgb/qla_dbg.h85
-rw-r--r--sys/dev/qlxgb/qla_def.h208
-rw-r--r--sys/dev/qlxgb/qla_glbl.h109
-rw-r--r--sys/dev/qlxgb/qla_hw.c1776
-rw-r--r--sys/dev/qlxgb/qla_hw.h831
-rw-r--r--sys/dev/qlxgb/qla_inline.h229
-rw-r--r--sys/dev/qlxgb/qla_ioctl.c119
-rw-r--r--sys/dev/qlxgb/qla_ioctl.h64
-rw-r--r--sys/dev/qlxgb/qla_isr.c416
-rw-r--r--sys/dev/qlxgb/qla_misc.c624
-rw-r--r--sys/dev/qlxgb/qla_os.c1481
-rw-r--r--sys/dev/qlxgb/qla_os.h176
-rw-r--r--sys/dev/qlxgb/qla_reg.h248
-rw-r--r--sys/dev/qlxgb/qla_ver.h41
-rw-r--r--sys/dev/quicc/quicc_core.c2
-rw-r--r--sys/dev/re/if_re.c48
-rw-r--r--sys/dev/rndtest/rndtest.c4
-rw-r--r--sys/dev/rt/if_rt.c2
-rw-r--r--sys/dev/safe/safe.c3
-rw-r--r--sys/dev/scc/scc_core.c2
-rw-r--r--sys/dev/sdhci/sdhci.c2
-rw-r--r--sys/dev/sfxge/common/efsys.h834
-rw-r--r--sys/dev/sfxge/common/efx.h1893
-rw-r--r--sys/dev/sfxge/common/efx_bootcfg.c342
-rw-r--r--sys/dev/sfxge/common/efx_ev.c1112
-rw-r--r--sys/dev/sfxge/common/efx_filter.c1017
-rw-r--r--sys/dev/sfxge/common/efx_impl.h734
-rw-r--r--sys/dev/sfxge/common/efx_intr.c354
-rw-r--r--sys/dev/sfxge/common/efx_mac.c684
-rw-r--r--sys/dev/sfxge/common/efx_mcdi.c733
-rw-r--r--sys/dev/sfxge/common/efx_mcdi.h238
-rw-r--r--sys/dev/sfxge/common/efx_mon.c269
-rw-r--r--sys/dev/sfxge/common/efx_nic.c674
-rw-r--r--sys/dev/sfxge/common/efx_nvram.c372
-rw-r--r--sys/dev/sfxge/common/efx_phy.c752
-rw-r--r--sys/dev/sfxge/common/efx_port.c226
-rw-r--r--sys/dev/sfxge/common/efx_regs.h3846
-rw-r--r--sys/dev/sfxge/common/efx_regs_ef10.h2682
-rw-r--r--sys/dev/sfxge/common/efx_regs_mcdi.h2786
-rw-r--r--sys/dev/sfxge/common/efx_regs_pci.h2376
-rw-r--r--sys/dev/sfxge/common/efx_rx.c816
-rw-r--r--sys/dev/sfxge/common/efx_sram.c294
-rw-r--r--sys/dev/sfxge/common/efx_tx.c430
-rw-r--r--sys/dev/sfxge/common/efx_types.h1605
-rw-r--r--sys/dev/sfxge/common/efx_vpd.c999
-rw-r--r--sys/dev/sfxge/common/efx_wol.c396
-rw-r--r--sys/dev/sfxge/common/siena_flash.h132
-rw-r--r--sys/dev/sfxge/common/siena_impl.h477
-rw-r--r--sys/dev/sfxge/common/siena_mac.c545
-rw-r--r--sys/dev/sfxge/common/siena_mon.c248
-rw-r--r--sys/dev/sfxge/common/siena_nic.c964
-rw-r--r--sys/dev/sfxge/common/siena_nvram.c985
-rw-r--r--sys/dev/sfxge/common/siena_phy.c857
-rw-r--r--sys/dev/sfxge/common/siena_sram.c172
-rw-r--r--sys/dev/sfxge/common/siena_vpd.c603
-rw-r--r--sys/dev/sfxge/sfxge.c775
-rw-r--r--sys/dev/sfxge/sfxge.h303
-rw-r--r--sys/dev/sfxge/sfxge_dma.c202
-rw-r--r--sys/dev/sfxge/sfxge_ev.c862
-rw-r--r--sys/dev/sfxge/sfxge_intr.c556
-rw-r--r--sys/dev/sfxge/sfxge_mcdi.c250
-rw-r--r--sys/dev/sfxge/sfxge_port.c789
-rw-r--r--sys/dev/sfxge/sfxge_rx.c1233
-rw-r--r--sys/dev/sfxge/sfxge_rx.h189
-rw-r--r--sys/dev/sfxge/sfxge_tx.c1491
-rw-r--r--sys/dev/sfxge/sfxge_tx.h185
-rw-r--r--sys/dev/siba/siba_core.c12
-rw-r--r--sys/dev/siis/siis.c11
-rw-r--r--sys/dev/sio/sio_pci.c3
-rw-r--r--sys/dev/sis/if_sis.c226
-rw-r--r--sys/dev/sis/if_sisreg.h19
-rw-r--r--sys/dev/smc/if_smc.c145
-rw-r--r--sys/dev/sound/midi/midi.c2
-rw-r--r--sys/dev/sound/pci/envy24.c2
-rw-r--r--sys/dev/sound/pci/envy24ht.c2
-rw-r--r--sys/dev/sound/pci/maestro.c2
-rw-r--r--sys/dev/sound/pci/spicds.c2
-rw-r--r--sys/dev/sound/pcm/ac97.c2
-rw-r--r--sys/dev/sound/pcm/feeder.c2
-rw-r--r--sys/dev/sound/pcm/mixer.c2
-rw-r--r--sys/dev/sound/usb/uaudio.c2
-rw-r--r--sys/dev/ste/if_ste.c225
-rw-r--r--sys/dev/ste/if_stereg.h22
-rw-r--r--sys/dev/stge/if_stge.c236
-rw-r--r--sys/dev/stge/if_stgereg.h20
-rw-r--r--sys/dev/syscons/scterm-teken.c20
-rw-r--r--sys/dev/syscons/syscons.c4
-rw-r--r--sys/dev/tdfx/tdfx_pci.c2
-rw-r--r--sys/dev/ti/if_ti.c2277
-rw-r--r--sys/dev/ti/if_tireg.h292
-rw-r--r--sys/dev/tl/if_tl.c346
-rw-r--r--sys/dev/tl/if_tlreg.h37
-rw-r--r--sys/dev/twa/tw_osl_freebsd.c2
-rw-r--r--sys/dev/twe/twe_freebsd.c2
-rw-r--r--sys/dev/tws/tws_services.c2
-rw-r--r--sys/dev/tws/tws_services.h1
-rw-r--r--sys/dev/uart/uart_core.c2
-rw-r--r--sys/dev/uart/uart_dev_ns8250.c7
-rw-r--r--sys/dev/ubsec/ubsec.c3
-rw-r--r--sys/dev/usb/controller/at91dci.c4
-rw-r--r--sys/dev/usb/controller/atmegadci.c3
-rw-r--r--sys/dev/usb/controller/avr32dci.c15
-rw-r--r--sys/dev/usb/controller/ehci.c4
-rw-r--r--sys/dev/usb/controller/musb_otg.c2
-rw-r--r--sys/dev/usb/controller/ohci.c4
-rw-r--r--sys/dev/usb/controller/uhci.c4
-rw-r--r--sys/dev/usb/controller/usb_controller.c2
-rw-r--r--sys/dev/usb/controller/uss820dci.c3
-rw-r--r--sys/dev/usb/controller/xhci.c31
-rw-r--r--sys/dev/usb/input/atp.c2
-rw-r--r--sys/dev/usb/input/uep.c2
-rw-r--r--sys/dev/usb/input/uhid.c2
-rw-r--r--sys/dev/usb/input/ukbd.c2
-rw-r--r--sys/dev/usb/input/ums.c2
-rw-r--r--sys/dev/usb/misc/udbp.c2
-rw-r--r--sys/dev/usb/net/if_aue.c2
-rw-r--r--sys/dev/usb/net/if_axe.c2
-rw-r--r--sys/dev/usb/net/if_cdce.c2
-rw-r--r--sys/dev/usb/net/if_cue.c2
-rw-r--r--sys/dev/usb/net/if_ipheth.c2
-rw-r--r--sys/dev/usb/net/if_kue.c7
-rw-r--r--sys/dev/usb/net/if_mos.c2
-rw-r--r--sys/dev/usb/net/if_rue.c2
-rw-r--r--sys/dev/usb/net/if_udav.c2
-rw-r--r--sys/dev/usb/net/if_usie.c2
-rw-r--r--sys/dev/usb/net/uhso.c8
-rw-r--r--sys/dev/usb/net/usb_ethernet.c3
-rw-r--r--sys/dev/usb/quirk/usb_quirk.c5
-rw-r--r--sys/dev/usb/serial/u3g.c2
-rw-r--r--sys/dev/usb/serial/ubsa.c2
-rw-r--r--sys/dev/usb/serial/ubser.c2
-rw-r--r--sys/dev/usb/serial/uchcom.c2
-rw-r--r--sys/dev/usb/serial/ufoma.c4
-rw-r--r--sys/dev/usb/serial/uftdi.c4
-rw-r--r--sys/dev/usb/serial/ulpt.c2
-rw-r--r--sys/dev/usb/serial/umcs.c2
-rw-r--r--sys/dev/usb/serial/umodem.c10
-rw-r--r--sys/dev/usb/serial/umoscom.c2
-rw-r--r--sys/dev/usb/serial/uplcom.c4
-rw-r--r--sys/dev/usb/serial/usb_serial.c2
-rw-r--r--sys/dev/usb/serial/uslcom.c195
-rw-r--r--sys/dev/usb/serial/uvisor.c5
-rw-r--r--sys/dev/usb/serial/uvscom.c2
-rw-r--r--sys/dev/usb/storage/umass.c39
-rw-r--r--sys/dev/usb/storage/urio.c6
-rw-r--r--sys/dev/usb/storage/ustorage_fs.c9
-rw-r--r--sys/dev/usb/template/usb_template.c4
-rw-r--r--sys/dev/usb/usb.h4
-rw-r--r--sys/dev/usb/usb_busdma.c14
-rw-r--r--sys/dev/usb/usb_compat_linux.c12
-rw-r--r--sys/dev/usb/usb_dev.c10
-rw-r--r--sys/dev/usb/usb_device.c3
-rw-r--r--sys/dev/usb/usb_generic.c6
-rw-r--r--sys/dev/usb/usb_hub.c2
-rw-r--r--sys/dev/usb/usb_ioctl.h4
-rw-r--r--sys/dev/usb/usb_msctest.c32
-rw-r--r--sys/dev/usb/usb_process.c11
-rw-r--r--sys/dev/usb/usb_request.c2
-rw-r--r--sys/dev/usb/usb_transfer.c4
-rw-r--r--sys/dev/usb/usb_util.c53
-rw-r--r--sys/dev/usb/usb_util.h1
-rw-r--r--sys/dev/usb/usbdevs9
-rw-r--r--sys/dev/usb/wlan/if_rum.c2
-rw-r--r--sys/dev/usb/wlan/if_run.c4
-rw-r--r--sys/dev/usb/wlan/if_uath.c20
-rw-r--r--sys/dev/usb/wlan/if_upgt.c39
-rw-r--r--sys/dev/usb/wlan/if_ural.c2
-rw-r--r--sys/dev/usb/wlan/if_urtw.c4
-rw-r--r--sys/dev/usb/wlan/if_zyd.c6
-rw-r--r--sys/dev/virtio/balloon/virtio_balloon.c569
-rw-r--r--sys/dev/virtio/balloon/virtio_balloon.h41
-rw-r--r--sys/dev/virtio/block/virtio_blk.c1149
-rw-r--r--sys/dev/virtio/block/virtio_blk.h106
-rw-r--r--sys/dev/virtio/network/if_vtnet.c2746
-rw-r--r--sys/dev/virtio/network/if_vtnetvar.h240
-rw-r--r--sys/dev/virtio/network/virtio_net.h138
-rw-r--r--sys/dev/virtio/pci/virtio_pci.c1081
-rw-r--r--sys/dev/virtio/pci/virtio_pci.h64
-rw-r--r--sys/dev/virtio/virtio.c283
-rw-r--r--sys/dev/virtio/virtio.h130
-rw-r--r--sys/dev/virtio/virtio_bus_if.m92
-rw-r--r--sys/dev/virtio/virtio_if.m43
-rw-r--r--sys/dev/virtio/virtio_ring.h119
-rw-r--r--sys/dev/virtio/virtqueue.c755
-rw-r--r--sys/dev/virtio/virtqueue.h98
-rw-r--r--sys/dev/wb/if_wb.c272
-rw-r--r--sys/dev/wb/if_wbreg.h22
-rw-r--r--sys/dev/wi/if_wi.c3
-rw-r--r--sys/dev/xe/if_xe.c2
-rw-r--r--sys/dev/xen/balloon/balloon.c7
-rw-r--r--sys/dev/xen/blkback/blkback.c2
-rw-r--r--sys/dev/xen/blkfront/blkfront.c2
-rw-r--r--sys/dev/xl/if_xl.c205
-rw-r--r--sys/dev/xl/if_xlreg.h32
357 files changed, 69579 insertions, 4676 deletions
diff --git a/sys/dev/aac/aac.c b/sys/dev/aac/aac.c
index 45cfa02..68854ae 100644
--- a/sys/dev/aac/aac.c
+++ b/sys/dev/aac/aac.c
@@ -219,10 +219,10 @@ static struct cdevsw aac_cdevsw = {
.d_name = "aac",
};
-MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver");
+static MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver");
/* sysctl node */
-SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD, 0, "AAC driver parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD, 0, "AAC driver parameters");
/*
* Device Interface
diff --git a/sys/dev/aac/aac_cam.c b/sys/dev/aac/aac_cam.c
index b5c3b26..2fafa97 100644
--- a/sys/dev/aac/aac_cam.c
+++ b/sys/dev/aac/aac_cam.c
@@ -104,7 +104,7 @@ static driver_t aac_pass_driver = {
DRIVER_MODULE(aacp, aac, aac_pass_driver, aac_pass_devclass, 0, 0);
MODULE_DEPEND(aacp, cam, 1, 1, 1);
-MALLOC_DEFINE(M_AACCAM, "aaccam", "AAC CAM info");
+static MALLOC_DEFINE(M_AACCAM, "aaccam", "AAC CAM info");
static void
aac_cam_rescan(struct aac_softc *sc, uint32_t channel, uint32_t target_id)
diff --git a/sys/dev/acpi_support/acpi_wmi.c b/sys/dev/acpi_support/acpi_wmi.c
index 5b95cc9..2acc262 100644
--- a/sys/dev/acpi_support/acpi_wmi.c
+++ b/sys/dev/acpi_support/acpi_wmi.c
@@ -53,7 +53,7 @@ __FBSDID("$FreeBSD$");
#include <dev/acpica/acpivar.h>
#include "acpi_wmi_if.h"
-MALLOC_DEFINE(M_ACPIWMI, "acpiwmi", "ACPI-WMI mapping");
+static MALLOC_DEFINE(M_ACPIWMI, "acpiwmi", "ACPI-WMI mapping");
#define _COMPONENT ACPI_OEM
ACPI_MODULE_NAME("ACPI_WMI");
diff --git a/sys/dev/acpica/Osd/OsdInterrupt.c b/sys/dev/acpica/Osd/OsdInterrupt.c
index 0ca2b55..3fb1e70 100644
--- a/sys/dev/acpica/Osd/OsdInterrupt.c
+++ b/sys/dev/acpica/Osd/OsdInterrupt.c
@@ -51,7 +51,7 @@ __FBSDID("$FreeBSD$");
#define _COMPONENT ACPI_OS_SERVICES
ACPI_MODULE_NAME("INTERRUPT")
-MALLOC_DEFINE(M_ACPIINTR, "acpiintr", "ACPI interrupt");
+static MALLOC_DEFINE(M_ACPIINTR, "acpiintr", "ACPI interrupt");
struct acpi_intr {
SLIST_ENTRY(acpi_intr) ai_link;
diff --git a/sys/dev/acpica/Osd/OsdMemory.c b/sys/dev/acpica/Osd/OsdMemory.c
index 4b7eca2..615c4b5 100644
--- a/sys/dev/acpica/Osd/OsdMemory.c
+++ b/sys/dev/acpica/Osd/OsdMemory.c
@@ -40,7 +40,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm.h>
#include <vm/pmap.h>
-MALLOC_DEFINE(M_ACPICA, "acpica", "ACPI CA memory pool");
+static MALLOC_DEFINE(M_ACPICA, "acpica", "ACPI CA memory pool");
void *
AcpiOsAllocate(ACPI_SIZE Size)
diff --git a/sys/dev/acpica/Osd/OsdSchedule.c b/sys/dev/acpica/Osd/OsdSchedule.c
index 32d49bc..fd1b931 100644
--- a/sys/dev/acpica/Osd/OsdSchedule.c
+++ b/sys/dev/acpica/Osd/OsdSchedule.c
@@ -65,7 +65,7 @@ TUNABLE_INT("debug.acpi.max_tasks", &acpi_max_tasks);
static int acpi_max_threads = ACPI_MAX_THREADS;
TUNABLE_INT("debug.acpi.max_threads", &acpi_max_threads);
-MALLOC_DEFINE(M_ACPITASK, "acpitask", "ACPI deferred task");
+static MALLOC_DEFINE(M_ACPITASK, "acpitask", "ACPI deferred task");
struct acpi_task_ctx {
struct task at_task;
diff --git a/sys/dev/acpica/Osd/OsdSynch.c b/sys/dev/acpica/Osd/OsdSynch.c
index 5e2e777..84e331c 100644
--- a/sys/dev/acpica/Osd/OsdSynch.c
+++ b/sys/dev/acpica/Osd/OsdSynch.c
@@ -45,7 +45,7 @@ __FBSDID("$FreeBSD$");
#define _COMPONENT ACPI_OS_SERVICES
ACPI_MODULE_NAME("SYNCH")
-MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
+static MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
/*
* Convert milliseconds to ticks.
diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c
index 5047e61..bff74fb 100644
--- a/sys/dev/acpica/acpi.c
+++ b/sys/dev/acpica/acpi.c
@@ -68,7 +68,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_param.h>
-MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices");
+static MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices");
/* Hooks for the ACPI CA debugging infrastructure */
#define _COMPONENT ACPI_BUS
diff --git a/sys/dev/acpica/acpi_cmbat.c b/sys/dev/acpica/acpi_cmbat.c
index 6c6525d..ba44da8 100644
--- a/sys/dev/acpica/acpi_cmbat.c
+++ b/sys/dev/acpica/acpi_cmbat.c
@@ -46,7 +46,8 @@ __FBSDID("$FreeBSD$");
#include <dev/acpica/acpivar.h>
#include <dev/acpica/acpiio.h>
-MALLOC_DEFINE(M_ACPICMBAT, "acpicmbat", "ACPI control method battery data");
+static MALLOC_DEFINE(M_ACPICMBAT, "acpicmbat",
+ "ACPI control method battery data");
/* Number of times to retry initialization before giving up. */
#define ACPI_CMBAT_RETRY_MAX 6
diff --git a/sys/dev/acpica/acpi_ec.c b/sys/dev/acpica/acpi_ec.c
index 7bf1b58..5c71d3d 100644
--- a/sys/dev/acpica/acpi_ec.c
+++ b/sys/dev/acpica/acpi_ec.c
@@ -179,7 +179,7 @@ struct acpi_ec_softc {
ACPI_SERIAL_DECL(ec, "ACPI embedded controller");
SYSCTL_DECL(_debug_acpi);
-SYSCTL_NODE(_debug_acpi, OID_AUTO, ec, CTLFLAG_RD, NULL, "EC debugging");
+static SYSCTL_NODE(_debug_acpi, OID_AUTO, ec, CTLFLAG_RD, NULL, "EC debugging");
static int ec_burst_mode;
TUNABLE_INT("debug.acpi.ec.burst", &ec_burst_mode);
diff --git a/sys/dev/acpica/acpi_pci_link.c b/sys/dev/acpica/acpi_pci_link.c
index d5d2d82..ba03d72 100644
--- a/sys/dev/acpica/acpi_pci_link.c
+++ b/sys/dev/acpica/acpi_pci_link.c
@@ -120,7 +120,7 @@ struct link_res_request {
int link_index;
};
-MALLOC_DEFINE(M_PCI_LINK, "pci_link", "ACPI PCI Link structures");
+static MALLOC_DEFINE(M_PCI_LINK, "pci_link", "ACPI PCI Link structures");
static int pci_link_interrupt_weights[NUM_ACPI_INTERRUPTS];
static int pci_link_bios_isa_irqs;
diff --git a/sys/dev/acpica/acpi_perf.c b/sys/dev/acpica/acpi_perf.c
index 31b1c63..3f047cc 100644
--- a/sys/dev/acpica/acpi_perf.c
+++ b/sys/dev/acpica/acpi_perf.c
@@ -135,7 +135,7 @@ static devclass_t acpi_perf_devclass;
DRIVER_MODULE(acpi_perf, cpu, acpi_perf_driver, acpi_perf_devclass, 0, 0);
MODULE_DEPEND(acpi_perf, acpi, 1, 1, 1);
-MALLOC_DEFINE(M_ACPIPERF, "acpi_perf", "ACPI Performance states");
+static MALLOC_DEFINE(M_ACPIPERF, "acpi_perf", "ACPI Performance states");
static void
acpi_perf_identify(driver_t *driver, device_t parent)
diff --git a/sys/dev/acpica/acpi_powerres.c b/sys/dev/acpica/acpi_powerres.c
index 9c43c88..b661594 100644
--- a/sys/dev/acpica/acpi_powerres.c
+++ b/sys/dev/acpica/acpi_powerres.c
@@ -55,7 +55,7 @@ __FBSDID("$FreeBSD$");
* on, turned off, etc.
*/
-MALLOC_DEFINE(M_ACPIPWR, "acpipwr", "ACPI power resources");
+static MALLOC_DEFINE(M_ACPIPWR, "acpipwr", "ACPI power resources");
/* Hooks for the ACPI CA debugging infrastructure */
#define _COMPONENT ACPI_POWERRES
diff --git a/sys/dev/acpica/acpi_smbat.c b/sys/dev/acpica/acpi_smbat.c
index bdaf945..0647772 100644
--- a/sys/dev/acpica/acpi_smbat.c
+++ b/sys/dev/acpica/acpi_smbat.c
@@ -63,7 +63,8 @@ static int acpi_smbat_get_bst(device_t dev, struct acpi_bst *bst);
ACPI_SERIAL_DECL(smbat, "ACPI Smart Battery");
SYSCTL_DECL(_debug_acpi);
-SYSCTL_NODE(_debug_acpi, OID_AUTO, batt, CTLFLAG_RD, NULL, "Battery debugging");
+static SYSCTL_NODE(_debug_acpi, OID_AUTO, batt, CTLFLAG_RD, NULL,
+ "Battery debugging");
/* On some laptops with smart batteries, enabling battery monitoring
* software causes keystrokes from atkbd to be lost. This has also been
diff --git a/sys/dev/acpica/acpi_thermal.c b/sys/dev/acpica/acpi_thermal.c
index 18996bd..32e5c2d 100644
--- a/sys/dev/acpica/acpi_thermal.c
+++ b/sys/dev/acpica/acpi_thermal.c
@@ -245,7 +245,7 @@ acpi_tz_attach(device_t dev)
SYSCTL_ADD_INT(&acpi_tz_sysctl_ctx,
SYSCTL_CHILDREN(acpi_tz_sysctl_tree),
OID_AUTO, "polling_rate", CTLFLAG_RW,
- &acpi_tz_polling_rate, 0, "monitor polling rate");
+ &acpi_tz_polling_rate, 0, "monitor polling interval in seconds");
SYSCTL_ADD_INT(&acpi_tz_sysctl_ctx,
SYSCTL_CHILDREN(acpi_tz_sysctl_tree), OID_AUTO,
"user_override", CTLFLAG_RW, &acpi_tz_override, 0,
diff --git a/sys/dev/acpica/acpi_video.c b/sys/dev/acpica/acpi_video.c
index f8133aa..eac2d07 100644
--- a/sys/dev/acpica/acpi_video.c
+++ b/sys/dev/acpica/acpi_video.c
@@ -183,7 +183,7 @@ static struct acpi_video_output_queue crt_units, tv_units,
*/
ACPI_SERIAL_DECL(video, "ACPI video");
ACPI_SERIAL_DECL(video_output, "ACPI video output");
-MALLOC_DEFINE(M_ACPIVIDEO, "acpivideo", "ACPI video extension");
+static MALLOC_DEFINE(M_ACPIVIDEO, "acpivideo", "ACPI video extension");
static int
acpi_video_modevent(struct module *mod __unused, int evt, void *cookie __unused)
diff --git a/sys/dev/ae/if_ae.c b/sys/dev/ae/if_ae.c
index 96329fd..3b882ee 100644
--- a/sys/dev/ae/if_ae.c
+++ b/sys/dev/ae/if_ae.c
@@ -1431,7 +1431,7 @@ ae_tx_avail_size(ae_softc_t *sc)
else
avail = sc->txd_ack - sc->txd_cur;
- return (avail - 4); /* 4-byte header. */
+ return (avail);
}
static int
@@ -1448,7 +1448,7 @@ ae_encap(ae_softc_t *sc, struct mbuf **m_head)
len = m0->m_pkthdr.len;
if ((sc->flags & AE_FLAG_TXAVAIL) == 0 ||
- ae_tx_avail_size(sc) < len) {
+ len + sizeof(ae_txd_t) + 3 > ae_tx_avail_size(sc)) {
#ifdef AE_DEBUG
if_printf(sc->ifp, "No free Tx available.\n");
#endif
@@ -1457,11 +1457,10 @@ ae_encap(ae_softc_t *sc, struct mbuf **m_head)
hdr = (ae_txd_t *)(sc->txd_base + sc->txd_cur);
bzero(hdr, sizeof(*hdr));
- sc->txd_cur = (sc->txd_cur + 4) % AE_TXD_BUFSIZE_DEFAULT; /* Header
- size. */
- to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur; /* Space available to
- * the end of the ring
- */
+ /* Skip header size. */
+ sc->txd_cur = (sc->txd_cur + sizeof(ae_txd_t)) % AE_TXD_BUFSIZE_DEFAULT;
+ /* Space available to the end of the ring */
+ to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur;
if (to_end >= len) {
m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur));
} else {
@@ -1840,8 +1839,8 @@ ae_tx_intr(ae_softc_t *sc)
/*
* Move txd ack and align on 4-byte boundary.
*/
- sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) + 4 + 3) & ~3) %
- AE_TXD_BUFSIZE_DEFAULT;
+ sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) +
+ sizeof(ae_txs_t) + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
if ((flags & AE_TXS_SUCCESS) != 0)
ifp->if_opackets++;
diff --git a/sys/dev/ahci/ahci.c b/sys/dev/ahci/ahci.c
index 0a20f2c..6aa131c 100644
--- a/sys/dev/ahci/ahci.c
+++ b/sys/dev/ahci/ahci.c
@@ -101,7 +101,7 @@ static void ahci_process_request_sense(device_t dev, union ccb *ccb);
static void ahciaction(struct cam_sim *sim, union ccb *ccb);
static void ahcipoll(struct cam_sim *sim);
-MALLOC_DEFINE(M_AHCI, "AHCI driver", "AHCI driver data buffers");
+static MALLOC_DEFINE(M_AHCI, "AHCI driver", "AHCI driver data buffers");
static struct {
uint32_t id;
@@ -498,13 +498,14 @@ ahci_attach(device_t dev)
}
/* Attach all channels on this controller */
for (unit = 0; unit < ctlr->channels; unit++) {
- if ((ctlr->ichannels & (1 << unit)) == 0)
- continue;
child = device_add_child(dev, "ahcich", -1);
- if (child == NULL)
+ if (child == NULL) {
device_printf(dev, "failed to add channel device\n");
- else
- device_set_ivars(child, (void *)(intptr_t)unit);
+ continue;
+ }
+ device_set_ivars(child, (void *)(intptr_t)unit);
+ if ((ctlr->ichannels & (1 << unit)) == 0)
+ device_disable(child);
}
bus_generic_attach(dev);
return 0;
@@ -514,15 +515,11 @@ static int
ahci_detach(device_t dev)
{
struct ahci_controller *ctlr = device_get_softc(dev);
- device_t *children;
- int nchildren, i;
+ int i;
/* Detach & delete all children */
- if (!device_get_children(dev, &children, &nchildren)) {
- for (i = 0; i < nchildren; i++)
- device_delete_child(dev, children[i]);
- free(children, M_TEMP);
- }
+ device_delete_all_children(dev);
+
/* Free interrupts. */
for (i = 0; i < ctlr->numirqs; i++) {
if (ctlr->irqs[i].r_irq) {
diff --git a/sys/dev/amr/amr.c b/sys/dev/amr/amr.c
index 5ccf9b8..4cb42e2 100644
--- a/sys/dev/amr/amr.c
+++ b/sys/dev/amr/amr.c
@@ -175,7 +175,7 @@ static void amr_init_sysctl(struct amr_softc *sc);
static int amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr,
int32_t flag, struct thread *td);
-MALLOC_DEFINE(M_AMR, "amr", "AMR memory");
+static MALLOC_DEFINE(M_AMR, "amr", "AMR memory");
/********************************************************************************
********************************************************************************
diff --git a/sys/dev/amr/amr_cam.c b/sys/dev/amr/amr_cam.c
index bcd036e..d03ac6d 100644
--- a/sys/dev/amr/amr_cam.c
+++ b/sys/dev/amr/amr_cam.c
@@ -109,7 +109,7 @@ static driver_t amr_pass_driver = {
DRIVER_MODULE(amrp, amr, amr_pass_driver, amr_pass_devclass, 0, 0);
MODULE_DEPEND(amrp, cam, 1, 1, 1);
-MALLOC_DEFINE(M_AMRCAM, "amrcam", "AMR CAM memory");
+static MALLOC_DEFINE(M_AMRCAM, "amrcam", "AMR CAM memory");
/***********************************************************************
* Enqueue/dequeue functions
diff --git a/sys/dev/an/if_an.c b/sys/dev/an/if_an.c
index 0898961..f52f292 100644
--- a/sys/dev/an/if_an.c
+++ b/sys/dev/an/if_an.c
@@ -203,7 +203,8 @@ static char an_conf_cache[256];
/* sysctl vars */
-SYSCTL_NODE(_hw, OID_AUTO, an, CTLFLAG_RD, 0, "Wireless driver parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, an, CTLFLAG_RD, 0,
+ "Wireless driver parameters");
/* XXX violate ethernet/netgraph callback hooks */
extern void (*ng_ether_attach_p)(struct ifnet *ifp);
diff --git a/sys/dev/ata/ata-all.c b/sys/dev/ata/ata-all.c
index 34453ad..1e577b6 100644
--- a/sys/dev/ata/ata-all.c
+++ b/sys/dev/ata/ata-all.c
@@ -103,7 +103,7 @@ static int ata_dma = 1;
static int atapi_dma = 1;
/* sysctl vars */
-SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters");
TUNABLE_INT("hw.ata.ata_dma", &ata_dma);
SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RDTUN, &ata_dma, 0,
"ATA disk DMA mode control");
diff --git a/sys/dev/ata/ata-disk.c b/sys/dev/ata/ata-disk.c
index bcaf6c4..c872f62 100644
--- a/sys/dev/ata/ata-disk.c
+++ b/sys/dev/ata/ata-disk.c
@@ -172,8 +172,6 @@ ad_detach(device_t dev)
{
struct ad_softc *adp = device_get_ivars(dev);
struct ata_device *atadev = device_get_softc(dev);
- device_t *children;
- int nchildren, i;
/* check that we have a valid disk to detach */
if (!device_get_ivars(dev))
@@ -183,12 +181,7 @@ ad_detach(device_t dev)
callout_drain(&atadev->spindown_timer);
/* detach & delete all children */
- if (!device_get_children(dev, &children, &nchildren)) {
- for (i = 0; i < nchildren; i++)
- if (children[i])
- device_delete_child(dev, children[i]);
- free(children, M_TEMP);
- }
+ device_delete_all_children(dev);
/* destroy disk from the system so we don't get any further requests */
disk_destroy(adp->disk);
diff --git a/sys/dev/ata/ata-pci.c b/sys/dev/ata/ata-pci.c
index b0359e6..9fcd003 100644
--- a/sys/dev/ata/ata-pci.c
+++ b/sys/dev/ata/ata-pci.c
@@ -136,15 +136,10 @@ int
ata_pci_detach(device_t dev)
{
struct ata_pci_controller *ctlr = device_get_softc(dev);
- device_t *children;
- int nchildren, i;
/* detach & delete all children */
- if (!device_get_children(dev, &children, &nchildren)) {
- for (i = 0; i < nchildren; i++)
- device_delete_child(dev, children[i]);
- free(children, M_TEMP);
- }
+ device_delete_all_children(dev);
+
if (ctlr->r_irq) {
bus_teardown_intr(dev, ctlr->r_irq, ctlr->handle);
bus_release_resource(dev, SYS_RES_IRQ, ctlr->r_irq_rid, ctlr->r_irq);
@@ -153,10 +148,20 @@ ata_pci_detach(device_t dev)
}
if (ctlr->chipdeinit != NULL)
ctlr->chipdeinit(dev);
- if (ctlr->r_res2)
+ if (ctlr->r_res2) {
+#ifdef __sparc64__
+ bus_space_unmap(rman_get_bustag(ctlr->r_res2),
+ rman_get_bushandle(ctlr->r_res2), rman_get_size(ctlr->r_res2));
+#endif
bus_release_resource(dev, ctlr->r_type2, ctlr->r_rid2, ctlr->r_res2);
- if (ctlr->r_res1)
+ }
+ if (ctlr->r_res1) {
+#ifdef __sparc64__
+ bus_space_unmap(rman_get_bustag(ctlr->r_res1),
+ rman_get_bushandle(ctlr->r_res1), rman_get_size(ctlr->r_res1));
+#endif
bus_release_resource(dev, ctlr->r_type1, ctlr->r_rid1, ctlr->r_res1);
+ }
return 0;
}
@@ -775,7 +780,6 @@ driver_t ata_pcichannel_driver = {
DRIVER_MODULE(ata, atapci, ata_pcichannel_driver, ata_devclass, 0, 0);
-
/*
* misc support fucntions
*/
@@ -936,4 +940,3 @@ ata_mode2idx(int mode)
return (mode & ATA_MODE_MASK) + 5;
return (mode & ATA_MODE_MASK) - ATA_PIO0;
}
-
diff --git a/sys/dev/ata/chipsets/ata-promise.c b/sys/dev/ata/chipsets/ata-promise.c
index 2f8b2a5..6f2f57c 100644
--- a/sys/dev/ata/chipsets/ata-promise.c
+++ b/sys/dev/ata/chipsets/ata-promise.c
@@ -94,7 +94,6 @@ static void ata_promise_next_hpkt(struct ata_pci_controller *ctlr);
#define PR_SATA 0x40
#define PR_SATA2 0x80
-
/*
* Promise chipset support functions
*/
@@ -250,6 +249,14 @@ ata_promise_chipinit(device_t dev)
&ctlr->r_rid1, RF_ACTIVE)))
goto failnfree;
+#ifdef __sparc64__
+ if (ctlr->chip->cfg2 == PR_SX4X &&
+ !bus_space_map(rman_get_bustag(ctlr->r_res1),
+ rman_get_bushandle(ctlr->r_res1), rman_get_size(ctlr->r_res1),
+ BUS_SPACE_MAP_LINEAR, NULL))
+ goto failnfree;
+#endif
+
ctlr->r_type2 = SYS_RES_MEMORY;
ctlr->r_rid2 = PCIR_BAR(3);
if (!(ctlr->r_res2 = bus_alloc_resource_any(dev, ctlr->r_type2,
diff --git a/sys/dev/ata/chipsets/ata-siliconimage.c b/sys/dev/ata/chipsets/ata-siliconimage.c
index 2882b17..52eae96 100644
--- a/sys/dev/ata/chipsets/ata-siliconimage.c
+++ b/sys/dev/ata/chipsets/ata-siliconimage.c
@@ -80,7 +80,6 @@ static void ata_siiprb_dmainit(device_t dev);
#define SII_BUG 0x04
#define SII_4CH 0x08
-
/*
* Silicon Image Inc. (SiI) (former CMD) chipset support functions
*/
@@ -141,6 +140,17 @@ ata_sii_chipinit(device_t dev)
bus_release_resource(dev, ctlr->r_type1, ctlr->r_rid1,ctlr->r_res1);
return ENXIO;
}
+#ifdef __sparc64__
+ if (!bus_space_map(rman_get_bustag(ctlr->r_res2),
+ rman_get_bushandle(ctlr->r_res2), rman_get_size(ctlr->r_res2),
+ BUS_SPACE_MAP_LINEAR, NULL)) {
+ bus_release_resource(dev, ctlr->r_type1, ctlr->r_rid1,
+ ctlr->r_res1);
+ bus_release_resource(dev, ctlr->r_type2, ctlr->r_rid2,
+ ctlr->r_res2);
+ return (ENXIO);
+ }
+#endif
ctlr->ch_attach = ata_siiprb_ch_attach;
ctlr->ch_detach = ata_siiprb_ch_detach;
ctlr->reset = ata_siiprb_reset;
@@ -432,7 +442,6 @@ ata_sii_setmode(device_t dev, int target, int mode)
return (mode);
}
-
struct ata_siiprb_dma_prdentry {
u_int64_t addr;
u_int32_t count;
diff --git a/sys/dev/ath/ah_osdep.c b/sys/dev/ath/ah_osdep.c
index 54dfb73..37b07c1 100644
--- a/sys/dev/ath/ah_osdep.c
+++ b/sys/dev/ath/ah_osdep.c
@@ -38,6 +38,8 @@
#include <sys/bus.h>
#include <sys/malloc.h>
#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
#include <machine/stdarg.h>
@@ -59,6 +61,17 @@
#define BUSTAG(ah) ((ah)->ah_st)
#endif
+/*
+ * This lock is used to seralise register access for chips which have
+ * problems w/ SMP CPUs issuing concurrent PCI transactions.
+ *
+ * XXX This is a global lock for now; it should be pushed to
+ * a per-device lock in some platform-independent fashion.
+ */
+struct mtx ah_regser_mtx;
+MTX_SYSINIT(ah_regser, &ah_regser_mtx, "Atheros register access mutex",
+ MTX_SPIN);
+
extern void ath_hal_printf(struct ath_hal *, const char*, ...)
__printflike(2,3);
extern void ath_hal_vprintf(struct ath_hal *, const char*, __va_list)
@@ -76,7 +89,8 @@ extern void DO_HALDEBUG(struct ath_hal *ah, u_int mask, const char* fmt, ...);
/* NB: put this here instead of the driver to avoid circular references */
SYSCTL_NODE(_hw, OID_AUTO, ath, CTLFLAG_RD, 0, "Atheros driver parameters");
-SYSCTL_NODE(_hw_ath, OID_AUTO, hal, CTLFLAG_RD, 0, "Atheros HAL parameters");
+static SYSCTL_NODE(_hw_ath, OID_AUTO, hal, CTLFLAG_RD, 0,
+ "Atheros HAL parameters");
#ifdef AH_DEBUG
int ath_hal_debug = 0;
@@ -85,7 +99,7 @@ SYSCTL_INT(_hw_ath_hal, OID_AUTO, debug, CTLFLAG_RW, &ath_hal_debug,
TUNABLE_INT("hw.ath.hal.debug", &ath_hal_debug);
#endif /* AH_DEBUG */
-MALLOC_DEFINE(M_ATH_HAL, "ath_hal", "ath hal data");
+static MALLOC_DEFINE(M_ATH_HAL, "ath_hal", "ath hal data");
void*
ath_hal_malloc(size_t size)
@@ -249,12 +263,16 @@ ath_hal_reg_write(struct ath_hal *ah, u_int32_t reg, u_int32_t val)
alq_post(ath_hal_alq, ale);
}
}
+ if (ah->ah_config.ah_serialise_reg_war)
+ mtx_lock_spin(&ah_regser_mtx);
#if _BYTE_ORDER == _BIG_ENDIAN
if (OS_REG_UNSWAPPED(reg))
bus_space_write_4(tag, h, reg, val);
else
#endif
bus_space_write_stream_4(tag, h, reg, val);
+ if (ah->ah_config.ah_serialise_reg_war)
+ mtx_unlock_spin(&ah_regser_mtx);
}
u_int32_t
@@ -264,12 +282,16 @@ ath_hal_reg_read(struct ath_hal *ah, u_int32_t reg)
bus_space_handle_t h = ah->ah_sh;
u_int32_t val;
+ if (ah->ah_config.ah_serialise_reg_war)
+ mtx_lock_spin(&ah_regser_mtx);
#if _BYTE_ORDER == _BIG_ENDIAN
if (OS_REG_UNSWAPPED(reg))
val = bus_space_read_4(tag, h, reg);
else
#endif
val = bus_space_read_stream_4(tag, h, reg);
+ if (ah->ah_config.ah_serialise_reg_war)
+ mtx_unlock_spin(&ah_regser_mtx);
if (ath_hal_alq) {
struct ale *ale = ath_hal_alq_get(ah);
if (ale) {
@@ -315,12 +337,16 @@ ath_hal_reg_write(struct ath_hal *ah, u_int32_t reg, u_int32_t val)
bus_space_tag_t tag = BUSTAG(ah);
bus_space_handle_t h = ah->ah_sh;
+ if (ah->ah_config.ah_serialise_reg_war)
+ mtx_lock_spin(&ah_regser_mtx);
#if _BYTE_ORDER == _BIG_ENDIAN
if (OS_REG_UNSWAPPED(reg))
bus_space_write_4(tag, h, reg, val);
else
#endif
bus_space_write_stream_4(tag, h, reg, val);
+ if (ah->ah_config.ah_serialise_reg_war)
+ mtx_unlock_spin(&ah_regser_mtx);
}
u_int32_t
@@ -330,12 +356,16 @@ ath_hal_reg_read(struct ath_hal *ah, u_int32_t reg)
bus_space_handle_t h = ah->ah_sh;
u_int32_t val;
+ if (ah->ah_config.ah_serialise_reg_war)
+ mtx_lock_spin(&ah_regser_mtx);
#if _BYTE_ORDER == _BIG_ENDIAN
if (OS_REG_UNSWAPPED(reg))
val = bus_space_read_4(tag, h, reg);
else
#endif
val = bus_space_read_stream_4(tag, h, reg);
+ if (ah->ah_config.ah_serialise_reg_war)
+ mtx_unlock_spin(&ah_regser_mtx);
return val;
}
#endif /* AH_DEBUG || AH_REGOPS_FUNC */
diff --git a/sys/dev/ath/ath_hal/ah.c b/sys/dev/ath/ath_hal/ah.c
index 383ae8f..3af16aa 100644
--- a/sys/dev/ath/ath_hal/ah.c
+++ b/sys/dev/ath/ath_hal/ah.c
@@ -114,11 +114,15 @@ ath_hal_mac_name(struct ath_hal *ah)
case AR_XSREV_VERSION_SOWL:
return "9160";
case AR_XSREV_VERSION_MERLIN:
- return "9280";
+ if (AH_PRIVATE(ah)->ah_ispcie)
+ return "9280";
+ return "9220";
case AR_XSREV_VERSION_KITE:
return "9285";
case AR_XSREV_VERSION_KIWI:
- return "9287";
+ if (AH_PRIVATE(ah)->ah_ispcie)
+ return "9287";
+ return "9227";
}
return "????";
}
@@ -661,6 +665,8 @@ ath_hal_getcapability(struct ath_hal *ah, HAL_CAPABILITY_TYPE type,
return pCap->halHasLongRxDescTsf ? HAL_OK : HAL_ENOTSUPP;
case HAL_CAP_BB_READ_WAR: /* Baseband read WAR */
return pCap->halHasBBReadWar? HAL_OK : HAL_ENOTSUPP;
+ case HAL_CAP_SERIALISE_WAR: /* PCI register serialisation */
+ return pCap->halSerialiseRegWar ? HAL_OK : HAL_ENOTSUPP;
default:
return HAL_EINVAL;
}
diff --git a/sys/dev/ath/ath_hal/ah.h b/sys/dev/ath/ath_hal/ah.h
index a5d9156..b195d02 100644
--- a/sys/dev/ath/ath_hal/ah.h
+++ b/sys/dev/ath/ath_hal/ah.h
@@ -150,6 +150,7 @@ typedef enum {
HAL_CAP_RXDESC_SELFLINK = 242, /* support a self-linked tail RX descriptor */
HAL_CAP_LONG_RXDESC_TSF = 243, /* hardware supports 32bit TSF in RX descriptor */
HAL_CAP_BB_READ_WAR = 244, /* baseband read WAR */
+ HAL_CAP_SERIALISE_WAR = 245, /* serialise register access on PCI */
} HAL_CAPABILITY_TYPE;
/*
@@ -780,6 +781,8 @@ typedef struct
int ah_dma_beacon_response_time;/* in TU's */
int ah_sw_beacon_response_time; /* in TU's */
int ah_additional_swba_backoff; /* in TU's */
+ int ah_force_full_reset; /* force full chip reset rather then warm reset */
+ int ah_serialise_reg_war; /* force serialisation of register IO */
} HAL_OPS_CONFIG;
/*
@@ -810,6 +813,8 @@ struct ath_hal {
uint16_t *ah_eepromdata; /* eeprom buffer, if needed */
+ uint32_t ah_intrstate[8]; /* last int state */
+
HAL_OPS_CONFIG ah_config;
const HAL_RATE_TABLE *__ahdecl(*ah_getRateTable)(struct ath_hal *,
u_int mode);
@@ -1023,6 +1028,9 @@ struct ath_hal {
struct ath_desc *);
void __ahdecl(*ah_set11nBurstDuration)(struct ath_hal *,
struct ath_desc *, u_int);
+ uint32_t __ahdecl(*ah_get_mib_cycle_counts_pct) (struct ath_hal *,
+ uint32_t *, uint32_t *, uint32_t *, uint32_t *);
+
uint32_t __ahdecl(*ah_get11nExtBusy)(struct ath_hal *);
void __ahdecl(*ah_set11nMac2040)(struct ath_hal *,
HAL_HT_MACMODE);
diff --git a/sys/dev/ath/ath_hal/ah_devid.h b/sys/dev/ath/ath_hal/ah_devid.h
index c7a98dd..e6bea21 100644
--- a/sys/dev/ath/ath_hal/ah_devid.h
+++ b/sys/dev/ath/ath_hal/ah_devid.h
@@ -73,11 +73,11 @@
/* AR5416 compatible devid's */
#define AR5416_DEVID_PCI 0x0023 /* AR5416 PCI (MB/CB) Owl */
-#define AR5416_DEVID_PCIE 0x0024 /* AR5416 PCI-E (XB) Owl */
+#define AR5416_DEVID_PCIE 0x0024 /* AR5418 PCI-E (XB) Owl */
#define AR5416_AR9130_DEVID 0x000b /* AR9130 SoC WiMAC */
#define AR9160_DEVID_PCI 0x0027 /* AR9160 PCI Sowl */
#define AR9280_DEVID_PCI 0x0029 /* AR9280 PCI Merlin */
-#define AR9280_DEVID_PCIE 0x002a /* AR9280 PCI-E Merlin */
+#define AR9280_DEVID_PCIE 0x002a /* AR9220 PCI-E Merlin */
#define AR9285_DEVID_PCIE 0x002b /* AR9285 PCI-E Kite */
#define AR2427_DEVID_PCIE 0x002c /* AR2427 PCI-E w/ 802.11n bonded out */
#define AR9287_DEVID_PCI 0x002d /* AR9227 PCI Kiwi */
diff --git a/sys/dev/ath/ath_hal/ah_internal.h b/sys/dev/ath/ath_hal/ah_internal.h
index 4378c83..a253b65 100644
--- a/sys/dev/ath/ath_hal/ah_internal.h
+++ b/sys/dev/ath/ath_hal/ah_internal.h
@@ -210,7 +210,8 @@ typedef struct {
halHasRxSelfLinkedTail : 1,
halSupportsFastClock5GHz : 1, /* Hardware supports 5ghz fast clock; check eeprom/channel before using */
halHasLongRxDescTsf : 1,
- halHasBBReadWar : 1;
+ halHasBBReadWar : 1,
+ halSerialiseRegWar : 1;
uint32_t halWirelessModes;
uint16_t halTotalQueues;
uint16_t halKeyCacheSize;
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416.h b/sys/dev/ath/ath_hal/ar5416/ar5416.h
index 35d9a4d..5006c31 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416.h
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416.h
@@ -112,11 +112,13 @@ struct ath_hal_5416 {
int ah_hangs; /* h/w hangs state */
uint8_t ah_keytype[AR5416_KEYTABLE_SIZE];
/*
- * Extension Channel Rx Clear State
+ * Primary/Extension Channel Tx, Rx, Rx Clear State
*/
uint32_t ah_cycleCount;
uint32_t ah_ctlBusy;
uint32_t ah_extBusy;
+ uint32_t ah_rxBusy;
+ uint32_t ah_txBusy;
uint32_t ah_rx_chainmask;
uint32_t ah_tx_chainmask;
@@ -194,6 +196,9 @@ extern uint32_t ar5416GetCurRssi(struct ath_hal *ah);
extern HAL_BOOL ar5416SetAntennaSwitch(struct ath_hal *, HAL_ANT_SETTING);
extern HAL_BOOL ar5416SetDecompMask(struct ath_hal *, uint16_t, int);
extern void ar5416SetCoverageClass(struct ath_hal *, uint8_t, int);
+extern uint32_t ar5416GetMibCycleCountsPct(struct ath_hal *ah,
+ uint32_t *rxc_pcnt, uint32_t *rxextc_pcnt, uint32_t *rxf_pcnt,
+ uint32_t *txf_pcnt);
extern uint32_t ar5416Get11nExtBusy(struct ath_hal *ah);
extern void ar5416Set11nMac2040(struct ath_hal *ah, HAL_HT_MACMODE mode);
extern HAL_HT_RXCLEAR ar5416Get11nRxClear(struct ath_hal *ah);
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416_ani.c b/sys/dev/ath/ath_hal/ar5416/ar5416_ani.c
index 5af6b24..deaacd7 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416_ani.c
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416_ani.c
@@ -227,7 +227,7 @@ ar5416AniControl(struct ath_hal *ah, HAL_ANI_CMD cmd, int param)
u_int level = param;
HALDEBUG(ah, HAL_DEBUG_ANI, "%s: HAL_ANI_NOISE_IMMUNITY_LEVEL: set level = %d\n", __func__, level);
- if (level >= params->maxNoiseImmunityLevel) {
+ if (level > params->maxNoiseImmunityLevel) {
HALDEBUG(ah, HAL_DEBUG_ANI,
"%s: immunity level out of range (%u > %u)\n",
__func__, level, params->maxNoiseImmunityLevel);
@@ -314,7 +314,7 @@ ar5416AniControl(struct ath_hal *ah, HAL_ANI_CMD cmd, int param)
u_int level = param;
HALDEBUG(ah, HAL_DEBUG_ANI, "%s: HAL_ANI_FIRSTEP_LEVEL: level = %d\n", __func__, level);
- if (level >= params->maxFirstepLevel) {
+ if (level > params->maxFirstepLevel) {
HALDEBUG(ah, HAL_DEBUG_ANI,
"%s: firstep level out of range (%u > %u)\n",
__func__, level, params->maxFirstepLevel);
@@ -333,7 +333,7 @@ ar5416AniControl(struct ath_hal *ah, HAL_ANI_CMD cmd, int param)
u_int level = param;
HALDEBUG(ah, HAL_DEBUG_ANI, "%s: HAL_ANI_SPUR_IMMUNITY_LEVEL: level = %d\n", __func__, level);
- if (level >= params->maxSpurImmunityLevel) {
+ if (level > params->maxSpurImmunityLevel) {
HALDEBUG(ah, HAL_DEBUG_ANI,
"%s: spur immunity level out of range (%u > %u)\n",
__func__, level, params->maxSpurImmunityLevel);
@@ -342,11 +342,6 @@ ar5416AniControl(struct ath_hal *ah, HAL_ANI_CMD cmd, int param)
OS_REG_RMW_FIELD(ah, AR_PHY_TIMING5,
AR_PHY_TIMING5_CYCPWR_THR1, params->cycPwrThr1[level]);
- /* Only set the ext channel cycpwr_thr1 field for ht/40 */
- if (IEEE80211_IS_CHAN_HT40(AH_PRIVATE(ah)->ah_curchan))
- OS_REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
- AR_PHY_EXT_TIMING5_CYCPWR_THR1, params->cycPwrThr1[level]);
-
if (level > aniState->spurImmunityLevel)
ahp->ah_stats.ast_ani_spurup++;
else if (level < aniState->spurImmunityLevel)
@@ -384,20 +379,30 @@ ar5416AniOfdmErrTrigger(struct ath_hal *ah)
aniState = ahp->ah_curani;
params = aniState->params;
/* First, raise noise immunity level, up to max */
- if ((AH5416(ah)->ah_ani_function & (1 << HAL_ANI_NOISE_IMMUNITY_LEVEL)) &&
- (aniState->noiseImmunityLevel+1 < params->maxNoiseImmunityLevel)) {
- ar5416AniControl(ah, HAL_ANI_NOISE_IMMUNITY_LEVEL,
- aniState->noiseImmunityLevel + 1);
- return;
+ if (aniState->noiseImmunityLevel+1 < params->maxNoiseImmunityLevel) {
+ if (ar5416AniControl(ah, HAL_ANI_NOISE_IMMUNITY_LEVEL,
+ aniState->noiseImmunityLevel + 1))
+ return;
}
/* then, raise spur immunity level, up to max */
- if ((AH5416(ah)->ah_ani_function & (1 << HAL_ANI_SPUR_IMMUNITY_LEVEL)) &&
- (aniState->spurImmunityLevel+1 < params->maxSpurImmunityLevel)) {
- ar5416AniControl(ah, HAL_ANI_SPUR_IMMUNITY_LEVEL,
- aniState->spurImmunityLevel + 1);
- return;
+ if (aniState->spurImmunityLevel+1 < params->maxSpurImmunityLevel) {
+ if (ar5416AniControl(ah, HAL_ANI_SPUR_IMMUNITY_LEVEL,
+ aniState->spurImmunityLevel + 1))
+ return;
}
+ /*
+ * In the case of AP mode operation, we cannot bucketize beacons
+ * according to RSSI. Instead, raise Firstep level, up to max, and
+ * simply return.
+ */
+ if (AH_PRIVATE(ah)->ah_opmode == HAL_M_HOSTAP) {
+ if (aniState->firstepLevel < params->maxFirstepLevel) {
+ if (ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL,
+ aniState->firstepLevel + 1))
+ return;
+ }
+ }
if (ANI_ENA_RSSI(ah)) {
int32_t rssi = BEACON_RSSI(ahp);
if (rssi > params->rssiThrHigh) {
@@ -418,9 +423,9 @@ ar5416AniOfdmErrTrigger(struct ath_hal *ah)
* raise firstep level
*/
if (aniState->firstepLevel+1 < params->maxFirstepLevel) {
- ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel + 1);
- return;
+ if (ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL,
+ aniState->firstepLevel + 1))
+ return;
}
} else if (rssi > params->rssiThrLow) {
/*
@@ -432,9 +437,9 @@ ar5416AniOfdmErrTrigger(struct ath_hal *ah)
HAL_ANI_OFDM_WEAK_SIGNAL_DETECTION,
AH_TRUE);
if (aniState->firstepLevel+1 < params->maxFirstepLevel)
- ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel + 1);
- return;
+ if (ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL,
+ aniState->firstepLevel + 1))
+ return;
} else {
/*
* Beacon rssi is low, if in 11b/g mode, turn off ofdm
@@ -447,9 +452,9 @@ ar5416AniOfdmErrTrigger(struct ath_hal *ah)
HAL_ANI_OFDM_WEAK_SIGNAL_DETECTION,
AH_FALSE);
if (aniState->firstepLevel > 0)
- ar5416AniControl(ah,
- HAL_ANI_FIRSTEP_LEVEL, 0);
- return;
+ if (ar5416AniControl(ah,
+ HAL_ANI_FIRSTEP_LEVEL, 0))
+ return;
}
}
}
@@ -582,6 +587,16 @@ ar5416AniReset(struct ath_hal *ah, const struct ieee80211_channel *chan,
goto finish;
}
+ /*
+ * Use a restrictive set of ANI parameters for hostap mode.
+ */
+ if (opmode == HAL_M_HOSTAP) {
+ if (IEEE80211_IS_CHAN_2GHZ(chan))
+ AH5416(ah)->ah_ani_function =
+ HAL_ANI_SPUR_IMMUNITY_LEVEL | HAL_ANI_FIRSTEP_LEVEL;
+ else
+ AH5416(ah)->ah_ani_function = 0;
+ }
/*
* Automatic processing is done only in station mode right now.
@@ -611,7 +626,7 @@ ar5416AniReset(struct ath_hal *ah, const struct ieee80211_channel *chan,
ar5416AniControl(ah, HAL_ANI_NOISE_IMMUNITY_LEVEL, 0);
ar5416AniControl(ah, HAL_ANI_SPUR_IMMUNITY_LEVEL, 0);
ar5416AniControl(ah, HAL_ANI_OFDM_WEAK_SIGNAL_DETECTION,
- AH_TRUE);
+ AH_FALSE);
ar5416AniControl(ah, HAL_ANI_CCK_WEAK_SIGNAL_THR, AH_FALSE);
ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL, 0);
ichan->privFlags |= CHANNEL_ANI_SETUP;
@@ -715,6 +730,19 @@ ar5416AniLowerImmunity(struct ath_hal *ah)
aniState = ahp->ah_curani;
params = aniState->params;
+
+ /*
+ * In the case of AP mode operation, we cannot bucketize beacons
+ * according to RSSI. Instead, lower Firstep level, down to min, and
+ * simply return.
+ */
+ if (AH_PRIVATE(ah)->ah_opmode == HAL_M_HOSTAP) {
+ if (aniState->firstepLevel > 0) {
+ if (ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL,
+ aniState->firstepLevel - 1))
+ return;
+ }
+ }
if (ANI_ENA_RSSI(ah)) {
int32_t rssi = BEACON_RSSI(ahp);
if (rssi > params->rssiThrHigh) {
@@ -729,41 +757,41 @@ ar5416AniLowerImmunity(struct ath_hal *ah)
* detection or lower firstep level.
*/
if (aniState->ofdmWeakSigDetectOff) {
- ar5416AniControl(ah,
+ if (ar5416AniControl(ah,
HAL_ANI_OFDM_WEAK_SIGNAL_DETECTION,
- AH_TRUE);
- return;
+ AH_TRUE))
+ return;
}
if (aniState->firstepLevel > 0) {
- ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel - 1);
- return;
+ if (ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL,
+ aniState->firstepLevel - 1))
+ return;
}
} else {
/*
* Beacon rssi is low, reduce firstep level.
*/
if (aniState->firstepLevel > 0) {
- ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel - 1);
- return;
+ if (ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL,
+ aniState->firstepLevel - 1))
+ return;
}
}
}
/* then lower spur immunity level, down to zero */
if (aniState->spurImmunityLevel > 0) {
- ar5416AniControl(ah, HAL_ANI_SPUR_IMMUNITY_LEVEL,
- aniState->spurImmunityLevel - 1);
- return;
+ if (ar5416AniControl(ah, HAL_ANI_SPUR_IMMUNITY_LEVEL,
+ aniState->spurImmunityLevel - 1))
+ return;
}
/*
* if all else fails, lower noise immunity level down to a min value
* zero for now
*/
if (aniState->noiseImmunityLevel > 0) {
- ar5416AniControl(ah, HAL_ANI_NOISE_IMMUNITY_LEVEL,
- aniState->noiseImmunityLevel - 1);
- return;
+ if (ar5416AniControl(ah, HAL_ANI_NOISE_IMMUNITY_LEVEL,
+ aniState->noiseImmunityLevel - 1))
+ return;
}
}
@@ -782,15 +810,15 @@ ar5416AniGetListenTime(struct ath_hal *ah)
{
struct ath_hal_5212 *ahp = AH5212(ah);
struct ar5212AniState *aniState;
- uint32_t txFrameCount, rxFrameCount, cycleCount;
+ uint32_t rxc_pct, extc_pct, rxf_pct, txf_pct;
int32_t listenTime;
+ int good;
- txFrameCount = OS_REG_READ(ah, AR_TFCNT);
- rxFrameCount = OS_REG_READ(ah, AR_RFCNT);
- cycleCount = OS_REG_READ(ah, AR_CCCNT);
+ good = ar5416GetMibCycleCountsPct(ah,
+ &rxc_pct, &extc_pct, &rxf_pct, &txf_pct);
aniState = ahp->ah_curani;
- if (aniState->cycleCount == 0 || aniState->cycleCount > cycleCount) {
+ if (good == 0) {
/*
* Cycle counter wrap (or initial call); it's not possible
* to accurately calculate a value because the registers
@@ -799,14 +827,18 @@ ar5416AniGetListenTime(struct ath_hal *ah)
listenTime = 0;
ahp->ah_stats.ast_ani_lzero++;
} else {
- int32_t ccdelta = cycleCount - aniState->cycleCount;
- int32_t rfdelta = rxFrameCount - aniState->rxFrameCount;
- int32_t tfdelta = txFrameCount - aniState->txFrameCount;
+ int32_t ccdelta = AH5416(ah)->ah_cycleCount - aniState->cycleCount;
+ int32_t rfdelta = AH5416(ah)->ah_rxBusy - aniState->rxFrameCount;
+ int32_t tfdelta = AH5416(ah)->ah_txBusy - aniState->txFrameCount;
listenTime = (ccdelta - rfdelta - tfdelta) / CLOCK_RATE;
}
- aniState->cycleCount = cycleCount;
- aniState->txFrameCount = txFrameCount;
- aniState->rxFrameCount = rxFrameCount;
+ aniState->cycleCount = AH5416(ah)->ah_cycleCount;
+ aniState->txFrameCount = AH5416(ah)->ah_rxBusy;
+ aniState->rxFrameCount = AH5416(ah)->ah_txBusy;
+
+ HALDEBUG(ah, HAL_DEBUG_ANI, "rxc=%d, extc=%d, rxf=%d, txf=%d\n",
+ rxc_pct, extc_pct, rxf_pct, txf_pct);
+
return listenTime;
}
@@ -873,10 +905,13 @@ ar5416AniPoll(struct ath_hal *ah, const struct ieee80211_channel *chan)
/* XXX can aniState be null? */
if (aniState == AH_NULL)
return;
+
+ /* Always update from the MIB, for statistics gathering */
+ listenTime = ar5416AniGetListenTime(ah);
+
if (!ANI_ENA(ah))
return;
- listenTime = ar5416AniGetListenTime(ah);
if (listenTime < 0) {
ahp->ah_stats.ast_ani_lneg++;
/* restart ANI period if listenTime is invalid */
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416_attach.c b/sys/dev/ath/ath_hal/ar5416/ar5416_attach.c
index 630efc0..37412fa 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416_attach.c
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416_attach.c
@@ -908,17 +908,36 @@ ar5416FillCapabilityInfo(struct ath_hal *ah)
pCap->halRfSilentSupport = AH_TRUE;
}
+ /*
+ * The MAC will mark frames as RXed if there's a descriptor
+ * to write them to. So if it hits a self-linked final descriptor,
+ * it'll keep ACKing frames even though they're being silently
+ * dropped. Thus, this particular feature of the driver can't
+ * be used for 802.11n devices.
+ */
ahpriv->ah_rxornIsFatal = AH_FALSE;
+ /*
+ * If it's a PCI NIC, ask the HAL OS layer to serialise
+ * register access, or SMP machines may cause the hardware
+ * to hang. This is applicable to AR5416 and AR9220; I'm not
+ * sure about AR9160 or AR9227.
+ */
+ if (! AH_PRIVATE(ah)->ah_ispcie)
+ pCap->halSerialiseRegWar = 1;
+
return AH_TRUE;
}
static const char*
ar5416Probe(uint16_t vendorid, uint16_t devid)
{
- if (vendorid == ATHEROS_VENDOR_ID &&
- (devid == AR5416_DEVID_PCI || devid == AR5416_DEVID_PCIE))
- return "Atheros 5416";
+ if (vendorid == ATHEROS_VENDOR_ID) {
+ if (devid == AR5416_DEVID_PCI)
+ return "Atheros 5416";
+ if (devid == AR5416_DEVID_PCIE)
+ return "Atheros 5418";
+ }
return AH_NULL;
}
AH_CHIP(AR5416, ar5416Probe, ar5416Attach);
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416_cal.c b/sys/dev/ath/ath_hal/ar5416/ar5416_cal.c
index cb57870..d51417f4 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416_cal.c
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416_cal.c
@@ -642,15 +642,7 @@ ar5416LoadNF(struct ath_hal *ah, const struct ieee80211_channel *chan)
OS_REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
/* Wait for load to complete, should be fast, a few 10s of us. */
- /*
- * XXX For now, don't be so aggressive in waiting for the NF
- * XXX load to complete. A very busy 11n RX load will cause this
- * XXX to always fail; so just leave it.
- * XXX Later on we may wish to split longcal into two parts - one to do
- * XXX the initial longcal, and one to load in an updated NF value
- * XXX once it's finished - say, by checking it every 500ms.
- */
- if (! ar5212WaitNFCalComplete(ah, 5)) {
+ if (! ar5212WaitNFCalComplete(ah, 1000)) {
/*
* We timed out waiting for the noisefloor to load, probably due to an
* in-progress rx. Simply return here and allow the load plenty of time
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416_interrupts.c b/sys/dev/ath/ath_hal/ar5416/ar5416_interrupts.c
index 3bc2cc5..35511ae 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416_interrupts.c
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416_interrupts.c
@@ -70,6 +70,13 @@ ar5416GetPendingInterrupts(struct ath_hal *ah, HAL_INT *masked)
uint32_t isr, isr0, isr1, sync_cause = 0;
HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
+#ifdef AH_INTERRUPT_DEBUGGING
+ /*
+ * Blank the interrupt debugging area regardless.
+ */
+ bzero(&ah->ah_intrstate, sizeof(ah->ah_intrstate));
+#endif
+
/*
* Verify there's a mac interrupt and the RTC is on.
*/
@@ -90,6 +97,16 @@ ar5416GetPendingInterrupts(struct ath_hal *ah, HAL_INT *masked)
return AH_FALSE;
}
+#ifdef AH_INTERRUPT_DEBUGGING
+ ah->ah_intrstate[0] = isr;
+ ah->ah_intrstate[1] = OS_REG_READ(ah, AR_ISR_S0);
+ ah->ah_intrstate[2] = OS_REG_READ(ah, AR_ISR_S1);
+ ah->ah_intrstate[3] = OS_REG_READ(ah, AR_ISR_S2);
+ ah->ah_intrstate[4] = OS_REG_READ(ah, AR_ISR_S3);
+ ah->ah_intrstate[5] = OS_REG_READ(ah, AR_ISR_S4);
+ ah->ah_intrstate[6] = OS_REG_READ(ah, AR_ISR_S5);
+#endif
+
if (isr != 0) {
struct ath_hal_5212 *ahp = AH5212(ah);
uint32_t mask2;
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416_misc.c b/sys/dev/ath/ath_hal/ar5416/ar5416_misc.c
index 880900f..b1c1b98 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416_misc.c
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416_misc.c
@@ -172,6 +172,57 @@ ar5416SetCoverageClass(struct ath_hal *ah, uint8_t coverageclass, int now)
}
/*
+ * Return the busy for rx_frame, rx_clear, and tx_frame
+ */
+uint32_t
+ar5416GetMibCycleCountsPct(struct ath_hal *ah, uint32_t *rxc_pcnt,
+ uint32_t *extc_pcnt, uint32_t *rxf_pcnt, uint32_t *txf_pcnt)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ u_int32_t good = 1;
+
+ /* XXX freeze/unfreeze mib counters */
+ uint32_t rc = OS_REG_READ(ah, AR_RCCNT);
+ uint32_t ec = OS_REG_READ(ah, AR_EXTRCCNT);
+ uint32_t rf = OS_REG_READ(ah, AR_RFCNT);
+ uint32_t tf = OS_REG_READ(ah, AR_TFCNT);
+ uint32_t cc = OS_REG_READ(ah, AR_CCCNT); /* read cycles last */
+
+ if (ahp->ah_cycleCount == 0 || ahp->ah_cycleCount > cc) {
+ /*
+ * Cycle counter wrap (or initial call); it's not possible
+ * to accurately calculate a value because the registers
+ * right shift rather than wrap--so punt and return 0.
+ */
+ HALDEBUG(ah, HAL_DEBUG_ANY,
+ "%s: cycle counter wrap. ExtBusy = 0\n", __func__);
+ good = 0;
+ } else {
+ uint32_t cc_d = cc - ahp->ah_cycleCount;
+ uint32_t rc_d = rc - ahp->ah_ctlBusy;
+ uint32_t ec_d = ec - ahp->ah_extBusy;
+ uint32_t rf_d = rf - ahp->ah_rxBusy;
+ uint32_t tf_d = tf - ahp->ah_txBusy;
+
+ if (cc_d != 0) {
+ *rxc_pcnt = rc_d * 100 / cc_d;
+ *rxf_pcnt = rf_d * 100 / cc_d;
+ *txf_pcnt = tf_d * 100 / cc_d;
+ *extc_pcnt = ec_d * 100 / cc_d;
+ } else {
+ good = 0;
+ }
+ }
+ ahp->ah_cycleCount = cc;
+ ahp->ah_rxBusy = rf;
+ ahp->ah_ctlBusy = rc;
+ ahp->ah_txBusy = tf;
+ ahp->ah_extBusy = ec;
+
+ return good;
+}
+
+/*
* Return approximation of extension channel busy over an time interval
* 0% (clear) -> 100% (busy)
*
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416_reset.c b/sys/dev/ath/ath_hal/ar5416/ar5416_reset.c
index 97d292e..86a3f92 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416_reset.c
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416_reset.c
@@ -146,7 +146,9 @@ ar5416Reset(struct ath_hal *ah, HAL_OPMODE opmode,
/* For chips on which the RTC reset is done, save TSF before it gets cleared */
if (AR_SREV_HOWL(ah) ||
- (AR_SREV_MERLIN(ah) && ath_hal_eepromGetFlag(ah, AR_EEP_OL_PWRCTRL)))
+ (AR_SREV_MERLIN(ah) &&
+ ath_hal_eepromGetFlag(ah, AR_EEP_OL_PWRCTRL)) ||
+ (ah->ah_config.ah_force_full_reset))
tsf = ar5416GetTsf64(ah);
/* Mark PHY as inactive; marked active in ar5416InitBB() */
@@ -722,6 +724,20 @@ ar5416SetRfMode(struct ath_hal *ah, const struct ieee80211_channel *chan)
rfMode |= IEEE80211_IS_CHAN_5GHZ(chan) ?
AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ;
}
+
+ /*
+ * Set half/quarter mode flags if required.
+ *
+ * This doesn't change the IFS timings at all; that needs to
+ * be done as part of the MAC setup. Similarly, the PLL
+ * configuration also needs some changes for the half/quarter
+ * rate clock.
+ */
+ if (IEEE80211_IS_CHAN_HALF(chan))
+ rfMode |= AR_PHY_MODE_HALF;
+ else if (IEEE80211_IS_CHAN_QUARTER(chan))
+ rfMode |= AR_PHY_MODE_QUARTER;
+
OS_REG_WRITE(ah, AR_PHY_MODE, rfMode);
}
@@ -733,12 +749,15 @@ ar5416ChipReset(struct ath_hal *ah, const struct ieee80211_channel *chan)
{
OS_MARK(ah, AH_MARK_CHIPRESET, chan ? chan->ic_freq : 0);
/*
- * Warm reset is optimistic.
+ * Warm reset is optimistic for open-loop TX power control.
*/
if (AR_SREV_MERLIN(ah) &&
ath_hal_eepromGetFlag(ah, AR_EEP_OL_PWRCTRL)) {
if (!ar5416SetResetReg(ah, HAL_RESET_POWER_ON))
return AH_FALSE;
+ } else if (ah->ah_config.ah_force_full_reset) {
+ if (!ar5416SetResetReg(ah, HAL_RESET_POWER_ON))
+ return AH_FALSE;
} else {
if (!ar5416SetResetReg(ah, HAL_RESET_WARM))
return AH_FALSE;
@@ -1178,7 +1197,7 @@ ar5416GetRfgain(struct ath_hal *ah)
HAL_BOOL
ar5416Disable(struct ath_hal *ah)
{
- if (!ar5212SetPowerMode(ah, HAL_PM_AWAKE, AH_TRUE))
+ if (!ar5416SetPowerMode(ah, HAL_PM_AWAKE, AH_TRUE))
return AH_FALSE;
if (! ar5416SetResetReg(ah, HAL_RESET_COLD))
return AH_FALSE;
@@ -1209,6 +1228,12 @@ ar5416PhyDisable(struct ath_hal *ah)
HAL_BOOL
ar5416SetResetReg(struct ath_hal *ah, uint32_t type)
{
+ /*
+ * Set force wake
+ */
+ OS_REG_WRITE(ah, AR_RTC_FORCE_WAKE,
+ AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
+
switch (type) {
case HAL_RESET_POWER_ON:
return ar5416SetResetPowerOn(ah);
@@ -1239,10 +1264,15 @@ ar5416SetResetPowerOn(struct ath_hal *ah)
AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
/*
- * RTC reset and clear
+ * PowerOn reset can be used in open loop power control or failure recovery.
+ * If we do RTC reset while DMA is still running, hardware may corrupt memory.
+ * Therefore, we need to reset AHB first to stop DMA.
*/
if (! AR_SREV_HOWL(ah))
OS_REG_WRITE(ah, AR_RC, AR_RC_AHB);
+ /*
+ * RTC reset and clear
+ */
OS_REG_WRITE(ah, AR_RTC_RESET, 0);
OS_DELAY(20);
@@ -1293,6 +1323,11 @@ ar5416SetReset(struct ath_hal *ah, int type)
#endif /* AH_SUPPORT_AR9130 */
/*
* Reset AHB
+ *
+ * (In case the last interrupt source was a bus timeout.)
+ * XXX TODO: this is not the way to do it! It should be recorded
+ * XXX by the interrupt handler and passed _into_ the
+ * XXX reset path routine so this occurs.
*/
tmpReg = OS_REG_READ(ah, AR_INTR_SYNC_CAUSE);
if (tmpReg & (AR_INTR_SYNC_LOCAL_TIMEOUT|AR_INTR_SYNC_RADM_CPL_TIMEOUT)) {
@@ -2608,7 +2643,7 @@ ar5416OverrideIni(struct ath_hal *ah, const struct ieee80211_channel *chan)
if (!AR_SREV_9271(ah))
val &= ~AR_PCU_MISC_MODE2_HWWAR1;
- if (AR_SREV_KIWI_11_OR_LATER(ah))
+ if (AR_SREV_KIWI_10_OR_LATER(ah))
val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
OS_REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416phy.h b/sys/dev/ath/ath_hal/ar5416/ar5416phy.h
index 82e3801..a983277 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416phy.h
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416phy.h
@@ -121,12 +121,6 @@
#define AR_PHY_EXT_MINCCA_PWR_S 23
#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
#define AR_PHY_EXT_CCA_THRESH62_S 16
-/*
- * This duplicates AR_PHY_EXT_CCA_CYCPWR_THR1; it reads more like
- * an ANI register this way.
- */
-#define AR_PHY_EXT_TIMING5_CYCPWR_THR1 0x0000FE00
-#define AR_PHY_EXT_TIMING5_CYCPWR_THR1_S 9
#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
#define AR9280_PHY_EXT_MINCCA_PWR_S 16
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416reg.h b/sys/dev/ath/ath_hal/ar5416/ar5416reg.h
index 8644f0d..6f92eb7 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416reg.h
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416reg.h
@@ -612,10 +612,10 @@
#define AR_XSREV_REVISION_KITE_11 1 /* Kite 1.1 */
#define AR_XSREV_REVISION_KITE_12 2 /* Kite 1.2 */
#define AR_XSREV_VERSION_KIWI 0x180 /* Kiwi (AR9287) */
-#define AR_XSREV_REVISION_KIWI_10 0
-#define AR_XSREV_REVISION_KIWI_11 1
-#define AR_XSREV_REVISION_KIWI_12 2
-#define AR_XSREV_REVISION_KIWI_13 3
+#define AR_XSREV_REVISION_KIWI_10 0 /* Kiwi 1.0 */
+#define AR_XSREV_REVISION_KIWI_11 1 /* Kiwi 1.1 */
+#define AR_XSREV_REVISION_KIWI_12 2 /* Kiwi 1.2 */
+#define AR_XSREV_REVISION_KIWI_13 3 /* Kiwi 1.3 */
/* Owl (AR5416) */
#define AR_SREV_OWL(_ah) \
@@ -701,6 +701,10 @@
#define AR_SREV_KIWI(_ah) \
(AH_PRIVATE((_ah))->ah_macVersion == AR_XSREV_VERSION_KIWI)
+#define AR_SREV_KIWI_10_OR_LATER(_ah) \
+ (AH_PRIVATE((_ah))->ah_macVersion >= AR_XSREV_VERSION_KIWI)
+
+/* XXX TODO: make these handle macVersion > Kiwi */
#define AR_SREV_KIWI_11_OR_LATER(_ah) \
(AR_SREV_KIWI(_ah) && \
AH_PRIVATE((_ah))->ah_macRev >= AR_XSREV_REVISION_KIWI_11)
diff --git a/sys/dev/ath/ath_hal/ar9002/ar9280_attach.c b/sys/dev/ath/ath_hal/ar9002/ar9280_attach.c
index 62864b5..437e8ee 100644
--- a/sys/dev/ath/ath_hal/ar9002/ar9280_attach.c
+++ b/sys/dev/ath/ath_hal/ar9002/ar9280_attach.c
@@ -892,9 +892,12 @@ ar9280SetAntennaSwitch(struct ath_hal *ah, HAL_ANT_SETTING settings)
static const char*
ar9280Probe(uint16_t vendorid, uint16_t devid)
{
- if (vendorid == ATHEROS_VENDOR_ID &&
- (devid == AR9280_DEVID_PCI || devid == AR9280_DEVID_PCIE))
- return "Atheros 9280";
+ if (vendorid == ATHEROS_VENDOR_ID) {
+ if (devid == AR9280_DEVID_PCI)
+ return "Atheros 9220";
+ if (devid == AR9280_DEVID_PCIE)
+ return "Atheros 9280";
+ }
return AH_NULL;
}
AH_CHIP(AR9280, ar9280Probe, ar9280Attach);
diff --git a/sys/dev/ath/ath_hal/ar9002/ar9287_attach.c b/sys/dev/ath/ath_hal/ar9002/ar9287_attach.c
index 5c0de21..78f7493 100644
--- a/sys/dev/ath/ath_hal/ar9002/ar9287_attach.c
+++ b/sys/dev/ath/ath_hal/ar9002/ar9287_attach.c
@@ -76,7 +76,7 @@ ar9287AniSetup(struct ath_hal *ah)
/*
* These are the parameters from the AR5416 ANI code;
* they likely need quite a bit of adjustment for the
- * AR9280.
+ * AR9287.
*/
static const struct ar5212AniParams aniparams = {
.maxNoiseImmunityLevel = 4, /* levels 0..4 */
@@ -402,13 +402,6 @@ ar9287WriteIni(struct ath_hal *ah, const struct ieee80211_channel *chan)
regWrites = ath_hal_ini_write(ah, &AH5212(ah)->ah_ini_common, 1, regWrites);
}
-#define AR_BASE_FREQ_2GHZ 2300
-#define AR_BASE_FREQ_5GHZ 4900
-#define AR_SPUR_FEEQ_BOUND_HT40 19
-#define AR_SPUR_FEEQ_BOUND_HT20 10
-
-
-
/*
* Fill all software cached or static hardware state information.
* Return failure if capabilities are to come from EEPROM and
@@ -460,7 +453,7 @@ ar9287FillCapabilityInfo(struct ath_hal *ah)
* This has been disabled - having the HAL flip chainmasks on/off
* when attempting to implement 11n disrupts things. For now, just
* leave this flipped off and worry about implementing TX diversity
- * for legacy and MCS0-7 when 11n is fully functioning.
+ * for legacy and MCS0-15 when 11n is fully functioning.
*/
HAL_BOOL
ar9287SetAntennaSwitch(struct ath_hal *ah, HAL_ANT_SETTING settings)
@@ -471,9 +464,12 @@ ar9287SetAntennaSwitch(struct ath_hal *ah, HAL_ANT_SETTING settings)
static const char*
ar9287Probe(uint16_t vendorid, uint16_t devid)
{
- if (vendorid == ATHEROS_VENDOR_ID &&
- (devid == AR9287_DEVID_PCI || devid == AR9287_DEVID_PCIE))
- return "Atheros 9287";
+ if (vendorid == ATHEROS_VENDOR_ID) {
+ if (devid == AR9287_DEVID_PCI)
+ return "Atheros 9227";
+ if (devid == AR9287_DEVID_PCIE)
+ return "Atheros 9287";
+ }
return AH_NULL;
}
AH_CHIP(AR9287, ar9287Probe, ar9287Attach);
diff --git a/sys/dev/ath/ath_rate/amrr/amrr.c b/sys/dev/ath/ath_rate/amrr/amrr.c
index 5fee76e..b10b826 100644
--- a/sys/dev/ath/ath_rate/amrr/amrr.c
+++ b/sys/dev/ath/ath_rate/amrr/amrr.c
@@ -122,19 +122,21 @@ ath_rate_findrate(struct ath_softc *sc, struct ath_node *an,
*/
void
ath_rate_getxtxrates(struct ath_softc *sc, struct ath_node *an,
- uint8_t rix0, uint8_t *rix, uint8_t *try)
+ uint8_t rix0, struct ath_rc_series *rc)
{
struct amrr_node *amn = ATH_NODE_AMRR(an);
-/* rix[0] = amn->amn_tx_rate0; */
- rix[1] = amn->amn_tx_rate1;
- rix[2] = amn->amn_tx_rate2;
- rix[3] = amn->amn_tx_rate3;
+ rc[0].flags = rc[1].flags = rc[2].flags = rc[3].flags = 0;
- try[0] = amn->amn_tx_try0;
- try[1] = amn->amn_tx_try1;
- try[2] = amn->amn_tx_try2;
- try[3] = amn->amn_tx_try3;
+ rc[0].rix = amn->amn_tx_rate0;
+ rc[1].rix = amn->amn_tx_rate1;
+ rc[2].rix = amn->amn_tx_rate2;
+ rc[3].rix = amn->amn_tx_rate3;
+
+ rc[0].tries = amn->amn_tx_try0;
+ rc[1].tries = amn->amn_tx_try1;
+ rc[2].tries = amn->amn_tx_try2;
+ rc[3].tries = amn->amn_tx_try3;
}
@@ -153,10 +155,10 @@ ath_rate_setupxtxdesc(struct ath_softc *sc, struct ath_node *an,
void
ath_rate_tx_complete(struct ath_softc *sc, struct ath_node *an,
- const struct ath_buf *bf)
+ const struct ath_rc_series *rc, const struct ath_tx_status *ts,
+ int frame_size, int nframes, int nbad)
{
struct amrr_node *amn = ATH_NODE_AMRR(an);
- const struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
int sr = ts->ts_shortretry;
int lr = ts->ts_longretry;
int retry_count = sr + lr;
diff --git a/sys/dev/ath/ath_rate/onoe/onoe.c b/sys/dev/ath/ath_rate/onoe/onoe.c
index 77236ad..b5e2c2d 100644
--- a/sys/dev/ath/ath_rate/onoe/onoe.c
+++ b/sys/dev/ath/ath_rate/onoe/onoe.c
@@ -130,19 +130,21 @@ ath_rate_findrate(struct ath_softc *sc, struct ath_node *an,
*/
void
ath_rate_getxtxrates(struct ath_softc *sc, struct ath_node *an,
- uint8_t rix0, uint8_t *rix, uint8_t *try)
+ uint8_t rix0, struct ath_rc_series *rc)
{
struct onoe_node *on = ATH_NODE_ONOE(an);
-/* rix[0] = on->on_tx_rate0; */
- rix[1] = on->on_tx_rate1;
- rix[2] = on->on_tx_rate2;
- rix[3] = on->on_tx_rate3;
+ rc[0].flags = rc[1].flags = rc[2].flags = rc[3].flags = 0;
- try[0] = on->on_tx_try0;
- try[1] = 2;
- try[2] = 2;
- try[3] = 2;
+ rc[0].rix = on->on_tx_rate0;
+ rc[1].rix = on->on_tx_rate1;
+ rc[2].rix = on->on_tx_rate2;
+ rc[3].rix = on->on_tx_rate3;
+
+ rc[0].tries = on->on_tx_try0;
+ rc[1].tries = 2;
+ rc[2].tries = 2;
+ rc[3].tries = 2;
}
void
@@ -160,10 +162,10 @@ ath_rate_setupxtxdesc(struct ath_softc *sc, struct ath_node *an,
void
ath_rate_tx_complete(struct ath_softc *sc, struct ath_node *an,
- const struct ath_buf *bf)
+ const struct ath_rc_series *rc, const struct ath_tx_status *ts,
+ int frame_size, int nframes, int nbad)
{
struct onoe_node *on = ATH_NODE_ONOE(an);
- const struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
if (ts->ts_status == 0)
on->on_tx_ok++;
diff --git a/sys/dev/ath/ath_rate/sample/sample.c b/sys/dev/ath/ath_rate/sample/sample.c
index 27bb98c..47ca063 100644
--- a/sys/dev/ath/ath_rate/sample/sample.c
+++ b/sys/dev/ath/ath_rate/sample/sample.c
@@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$");
*/
#include "opt_inet.h"
#include "opt_wlan.h"
+#include "opt_ah.h"
#include <sys/param.h>
#include <sys/systm.h>
@@ -146,6 +147,8 @@ ath_rate_node_cleanup(struct ath_softc *sc, struct ath_node *an)
static int
dot11rate(const HAL_RATE_TABLE *rt, int rix)
{
+ if (rix < 0)
+ return -1;
return rt->info[rix].phy == IEEE80211_T_HT ?
rt->info[rix].dot11Rate : (rt->info[rix].dot11Rate & IEEE80211_RATE_VAL) / 2;
}
@@ -153,6 +156,8 @@ dot11rate(const HAL_RATE_TABLE *rt, int rix)
static const char *
dot11rate_label(const HAL_RATE_TABLE *rt, int rix)
{
+ if (rix < 0)
+ return "";
return rt->info[rix].phy == IEEE80211_T_HT ? "MCS" : "Mb ";
}
@@ -165,12 +170,13 @@ pick_best_rate(struct ath_node *an, const HAL_RATE_TABLE *rt,
int size_bin, int require_acked_before)
{
struct sample_node *sn = ATH_NODE_SAMPLE(an);
- int best_rate_rix, best_rate_tt;
+ int best_rate_rix, best_rate_tt, best_rate_pct;
uint32_t mask;
- int rix, tt;
+ int rix, tt, pct;
best_rate_rix = 0;
best_rate_tt = 0;
+ best_rate_pct = 0;
for (mask = sn->ratemask, rix = 0; mask != 0; mask >>= 1, rix++) {
if ((mask & 1) == 0) /* not a supported rate */
continue;
@@ -187,13 +193,54 @@ pick_best_rate(struct ath_node *an, const HAL_RATE_TABLE *rt,
!sn->stats[size_bin][rix].packets_acked))
continue;
+ /* Calculate percentage if possible */
+ if (sn->stats[size_bin][rix].total_packets > 0) {
+ pct = sn->stats[size_bin][rix].ewma_pct;
+ } else {
+ /* XXX for now, assume 95% ok */
+ pct = 95;
+ }
+
/* don't use a bit-rate that has been failing */
if (sn->stats[size_bin][rix].successive_failures > 3)
continue;
- if (best_rate_tt == 0 || tt < best_rate_tt) {
- best_rate_tt = tt;
- best_rate_rix = rix;
+ /*
+ * For HT, Don't use a bit rate that is much more
+ * lossy than the best.
+ *
+ * XXX this isn't optimal; it's just designed to
+ * eliminate rates that are going to be obviously
+ * worse.
+ */
+ if (an->an_node.ni_flags & IEEE80211_NODE_HT) {
+ if (best_rate_pct > (pct + 50))
+ continue;
+ }
+
+ /*
+ * For non-MCS rates, use the current average txtime for
+ * comparison.
+ */
+ if (! (an->an_node.ni_flags & IEEE80211_NODE_HT)) {
+ if (best_rate_tt == 0 || tt <= best_rate_tt) {
+ best_rate_tt = tt;
+ best_rate_rix = rix;
+ best_rate_pct = pct;
+ }
+ }
+
+ /*
+ * Since 2 stream rates have slightly higher TX times,
+ * allow a little bit of leeway. This should later
+ * be abstracted out and properly handled.
+ */
+ if (an->an_node.ni_flags & IEEE80211_NODE_HT) {
+ if (best_rate_tt == 0 || (tt * 8 <= best_rate_tt * 10)) {
+ best_rate_tt = tt;
+ best_rate_rix = rix;
+ best_rate_pct = pct;
+ }
}
}
return (best_rate_tt ? best_rate_rix : -1);
@@ -252,6 +299,28 @@ pick_sample_rate(struct sample_softc *ssc , struct ath_node *an,
goto nextrate;
}
+ /*
+ * When doing aggregation, successive failures don't happen
+ * as often, as sometimes some of the sub-frames get through.
+ *
+ * If the sample rix average tx time is greater than the
+ * average tx time of the current rix, don't immediately use
+ * the rate for sampling.
+ */
+ if (an->an_node.ni_flags & IEEE80211_NODE_HT) {
+ if ((sn->stats[size_bin][rix].average_tx_time * 10 >
+ sn->stats[size_bin][current_rix].average_tx_time * 9) &&
+ (ticks - sn->stats[size_bin][rix].last_tx < ssc->stale_failure_timeout)) {
+ mask &= ~(1<<rix);
+ goto nextrate;
+ }
+ }
+
+ /*
+ * XXX TODO
+ * For HT, limit sample somehow?
+ */
+
/* Don't sample more than 2 rates higher for rates > 11M for non-HT rates */
if (! (an->an_node.ni_flags & IEEE80211_NODE_HT)) {
if (DOT11RATE(rix) > 2*11 && rix > current_rix + 2) {
@@ -315,6 +384,96 @@ ath_rate_update_static_rix(struct ath_softc *sc, struct ieee80211_node *ni)
}
}
+/*
+ * Pick a non-HT rate to begin using.
+ */
+static int
+ath_rate_pick_seed_rate_legacy(struct ath_softc *sc, struct ath_node *an,
+ int frameLen)
+{
+#define DOT11RATE(ix) (rt->info[ix].dot11Rate & IEEE80211_RATE_VAL)
+#define MCS(ix) (rt->info[ix].dot11Rate | IEEE80211_RATE_MCS)
+#define RATE(ix) (DOT11RATE(ix) / 2)
+ int rix = -1;
+ const HAL_RATE_TABLE *rt = sc->sc_currates;
+ struct sample_node *sn = ATH_NODE_SAMPLE(an);
+ const int size_bin = size_to_bin(frameLen);
+
+ /* no packet has been sent successfully yet */
+ for (rix = rt->rateCount-1; rix > 0; rix--) {
+ if ((sn->ratemask & (1<<rix)) == 0)
+ continue;
+
+ /* Skip HT rates */
+ if (rt->info[rix].phy == IEEE80211_T_HT)
+ continue;
+
+ /*
+ * Pick the highest rate <= 36 Mbps
+ * that hasn't failed.
+ */
+ if (DOT11RATE(rix) <= 72 &&
+ sn->stats[size_bin][rix].successive_failures == 0) {
+ break;
+ }
+ }
+ return rix;
+#undef RATE
+#undef MCS
+#undef DOT11RATE
+}
+
+/*
+ * Pick a HT rate to begin using.
+ *
+ * Don't use any non-HT rates; only consider HT rates.
+ */
+static int
+ath_rate_pick_seed_rate_ht(struct ath_softc *sc, struct ath_node *an,
+ int frameLen)
+{
+#define DOT11RATE(ix) (rt->info[ix].dot11Rate & IEEE80211_RATE_VAL)
+#define MCS(ix) (rt->info[ix].dot11Rate | IEEE80211_RATE_MCS)
+#define RATE(ix) (DOT11RATE(ix) / 2)
+ int rix = -1, ht_rix = -1;
+ const HAL_RATE_TABLE *rt = sc->sc_currates;
+ struct sample_node *sn = ATH_NODE_SAMPLE(an);
+ const int size_bin = size_to_bin(frameLen);
+
+ /* no packet has been sent successfully yet */
+ for (rix = rt->rateCount-1; rix > 0; rix--) {
+ /* Skip rates we can't use */
+ if ((sn->ratemask & (1<<rix)) == 0)
+ continue;
+
+ /* Keep a copy of the last seen HT rate index */
+ if (rt->info[rix].phy == IEEE80211_T_HT)
+ ht_rix = rix;
+
+ /* Skip non-HT rates */
+ if (rt->info[rix].phy != IEEE80211_T_HT)
+ continue;
+
+ /*
+ * Pick a medium-speed rate regardless of stream count
+ * which has not seen any failures. Higher rates may fail;
+ * we'll try them later.
+ */
+ if (((MCS(rix) & 0x7) <= 4) &&
+ sn->stats[size_bin][rix].successive_failures == 0) {
+ break;
+ }
+ }
+
+ /*
+ * If all the MCS rates have successive failures, rix should be
+ * > 0; otherwise use the lowest MCS rix (hopefully MCS 0.)
+ */
+ return MAX(rix, ht_rix);
+#undef RATE
+#undef MCS
+#undef DOT11RATE
+}
void
@@ -358,9 +517,14 @@ ath_rate_findrate(struct ath_softc *sc, struct ath_node *an,
if (sn->sample_tt[size_bin] < average_tx_time * (sn->packets_since_sample[size_bin]*ssc->sample_rate/100)) {
rix = pick_sample_rate(ssc, an, rt, size_bin);
IEEE80211_NOTE(an->an_node.ni_vap, IEEE80211_MSG_RATECTL,
- &an->an_node, "size %u sample rate %d current rate %d",
- bin_to_size(size_bin), RATE(rix),
- RATE(sn->current_rix[size_bin]));
+ &an->an_node, "att %d sample_tt %d size %u sample rate %d %s current rate %d %s",
+ average_tx_time,
+ sn->sample_tt[size_bin],
+ bin_to_size(size_bin),
+ dot11rate(rt, rix),
+ dot11rate_label(rt, rix),
+ dot11rate(rt, sn->current_rix[size_bin]),
+ dot11rate_label(rt, sn->current_rix[size_bin]));
if (rix != sn->current_rix[size_bin]) {
sn->current_sample_rix[size_bin] = rix;
} else {
@@ -371,29 +535,58 @@ ath_rate_findrate(struct ath_softc *sc, struct ath_node *an,
change_rates = 0;
if (!sn->packets_sent[size_bin] || best_rix == -1) {
/* no packet has been sent successfully yet */
- for (rix = rt->rateCount-1; rix > 0; rix--) {
- if ((sn->ratemask & (1<<rix)) == 0)
- continue;
- /*
- * Pick the highest rate <= 36 Mbps
- * that hasn't failed.
- */
- if (DOT11RATE(rix) <= 72 &&
- sn->stats[size_bin][rix].successive_failures == 0) {
- break;
- }
- }
change_rates = 1;
- best_rix = rix;
+ if (an->an_node.ni_flags & IEEE80211_NODE_HT)
+ best_rix =
+ ath_rate_pick_seed_rate_ht(sc, an, frameLen);
+ else
+ best_rix =
+ ath_rate_pick_seed_rate_legacy(sc, an, frameLen);
} else if (sn->packets_sent[size_bin] < 20) {
/* let the bit-rate switch quickly during the first few packets */
+ IEEE80211_NOTE(an->an_node.ni_vap,
+ IEEE80211_MSG_RATECTL, &an->an_node,
+ "%s: switching quickly..", __func__);
change_rates = 1;
} else if (ticks - ssc->min_switch > sn->ticks_since_switch[size_bin]) {
/* min_switch seconds have gone by */
+ IEEE80211_NOTE(an->an_node.ni_vap,
+ IEEE80211_MSG_RATECTL, &an->an_node,
+ "%s: min_switch %d > ticks_since_switch %d..",
+ __func__, ticks - ssc->min_switch, sn->ticks_since_switch[size_bin]);
change_rates = 1;
- } else if (2*average_tx_time < sn->stats[size_bin][sn->current_rix[size_bin]].average_tx_time) {
+ } else if ((! (an->an_node.ni_flags & IEEE80211_NODE_HT)) &&
+ (2*average_tx_time < sn->stats[size_bin][sn->current_rix[size_bin]].average_tx_time)) {
/* the current bit-rate is twice as slow as the best one */
+ IEEE80211_NOTE(an->an_node.ni_vap,
+ IEEE80211_MSG_RATECTL, &an->an_node,
+ "%s: 2x att (= %d) < cur_rix att %d",
+ __func__,
+ 2 * average_tx_time, sn->stats[size_bin][sn->current_rix[size_bin]].average_tx_time);
change_rates = 1;
+ } else if ((an->an_node.ni_flags & IEEE80211_NODE_HT)) {
+ int cur_rix = sn->current_rix[size_bin];
+ int cur_att = sn->stats[size_bin][cur_rix].average_tx_time;
+ /*
+ * If the node is HT, upgrade it if the MCS rate is
+ * higher and the average tx time is within 20% of
+ * the current rate. It can fail a little.
+ *
+ * This is likely not optimal!
+ */
+#if 0
+ printf("cur rix/att %x/%d, best rix/att %x/%d\n",
+ MCS(cur_rix), cur_att, MCS(best_rix), average_tx_time);
+#endif
+ if ((MCS(best_rix) > MCS(cur_rix)) &&
+ (average_tx_time * 8) <= (cur_att * 10)) {
+ IEEE80211_NOTE(an->an_node.ni_vap,
+ IEEE80211_MSG_RATECTL, &an->an_node,
+ "%s: HT: best_rix 0x%d > cur_rix 0x%x, average_tx_time %d, cur_att %d",
+ __func__,
+ MCS(best_rix), MCS(cur_rix), average_tx_time, cur_att);
+ change_rates = 1;
+ }
}
sn->packets_since_sample[size_bin]++;
@@ -445,22 +638,24 @@ done:
*/
void
ath_rate_getxtxrates(struct ath_softc *sc, struct ath_node *an,
- uint8_t rix0, uint8_t *rix, uint8_t *try)
+ uint8_t rix0, struct ath_rc_series *rc)
{
struct sample_node *sn = ATH_NODE_SAMPLE(an);
const struct txschedule *sched = &sn->sched[rix0];
KASSERT(rix0 == sched->r0, ("rix0 (%x) != sched->r0 (%x)!\n", rix0, sched->r0));
-/* rix[0] = sched->r0; */
- rix[1] = sched->r1;
- rix[2] = sched->r2;
- rix[3] = sched->r3;
+ rc[0].flags = rc[1].flags = rc[2].flags = rc[3].flags = 0;
+
+ rc[0].rix = sched->r0;
+ rc[1].rix = sched->r1;
+ rc[2].rix = sched->r2;
+ rc[3].rix = sched->r3;
- try[0] = sched->t0;
- try[1] = sched->t1;
- try[2] = sched->t2;
- try[3] = sched->t3;
+ rc[0].tries = sched->t0;
+ rc[1].tries = sched->t1;
+ rc[2].tries = sched->t2;
+ rc[3].tries = sched->t3;
}
void
@@ -488,6 +683,71 @@ ath_rate_setupxtxdesc(struct ath_softc *sc, struct ath_node *an,
s3code, sched->t3); /* series 3 */
}
+/*
+ * Update the EWMA percentage.
+ *
+ * This is a simple hack to track an EWMA based on the current
+ * rate scenario. For the rate codes which failed, this will
+ * record a 0% against it. For the rate code which succeeded,
+ * EWMA will record the nbad*100/nframes percentage against it.
+ */
+static void
+update_ewma_stats(struct ath_softc *sc, struct ath_node *an,
+ int frame_size,
+ int rix0, int tries0,
+ int rix1, int tries1,
+ int rix2, int tries2,
+ int rix3, int tries3,
+ int short_tries, int tries, int status,
+ int nframes, int nbad)
+{
+ struct sample_node *sn = ATH_NODE_SAMPLE(an);
+ struct sample_softc *ssc = ATH_SOFTC_SAMPLE(sc);
+ const int size_bin = size_to_bin(frame_size);
+ int tries_so_far;
+ int pct;
+ int rix = rix0;
+
+ /* Calculate percentage based on current rate */
+ if (nframes == 0)
+ nframes = nbad = 1;
+ pct = ((nframes - nbad) * 1000) / nframes;
+
+ /* Figure out which rate index succeeded */
+ tries_so_far = tries0;
+
+ if (tries1 && tries_so_far < tries) {
+ tries_so_far += tries1;
+ rix = rix1;
+ /* XXX bump ewma pct */
+ }
+
+ if (tries2 && tries_so_far < tries) {
+ tries_so_far += tries2;
+ rix = rix2;
+ /* XXX bump ewma pct */
+ }
+
+ if (tries3 && tries_so_far < tries) {
+ rix = rix3;
+ /* XXX bump ewma pct */
+ }
+
+ /* rix is the successful rate, update EWMA for final rix */
+ if (sn->stats[size_bin][rix].total_packets <
+ ssc->smoothing_minpackets) {
+ /* just average the first few packets */
+ int a_pct = (sn->stats[size_bin][rix].packets_acked * 1000) /
+ (sn->stats[size_bin][rix].total_packets);
+ sn->stats[size_bin][rix].ewma_pct = a_pct;
+ } else {
+ /* use a ewma */
+ sn->stats[size_bin][rix].ewma_pct =
+ ((sn->stats[size_bin][rix].ewma_pct * ssc->smoothing_rate) +
+ (pct * (100 - ssc->smoothing_rate))) / 100;
+ }
+}
+
static void
update_stats(struct ath_softc *sc, struct ath_node *an,
int frame_size,
@@ -495,10 +755,14 @@ update_stats(struct ath_softc *sc, struct ath_node *an,
int rix1, int tries1,
int rix2, int tries2,
int rix3, int tries3,
- int short_tries, int tries, int status)
+ int short_tries, int tries, int status,
+ int nframes, int nbad)
{
struct sample_node *sn = ATH_NODE_SAMPLE(an);
struct sample_softc *ssc = ATH_SOFTC_SAMPLE(sc);
+#ifdef IEEE80211_DEBUG
+ const HAL_RATE_TABLE *rt = sc->sc_currates;
+#endif
const int size_bin = size_to_bin(frame_size);
const int size = bin_to_size(size_bin);
int tt, tries_so_far;
@@ -537,7 +801,7 @@ update_stats(struct ath_softc *sc, struct ath_node *an,
/* just average the first few packets */
int avg_tx = sn->stats[size_bin][rix0].average_tx_time;
int packets = sn->stats[size_bin][rix0].total_packets;
- sn->stats[size_bin][rix0].average_tx_time = (tt+(avg_tx*packets))/(packets+1);
+ sn->stats[size_bin][rix0].average_tx_time = (tt+(avg_tx*packets))/(packets+nframes);
} else {
/* use a ewma */
sn->stats[size_bin][rix0].average_tx_time =
@@ -545,38 +809,50 @@ update_stats(struct ath_softc *sc, struct ath_node *an,
(tt * (100 - ssc->smoothing_rate))) / 100;
}
- if (status != 0) {
+ /*
+ * XXX Don't mark the higher bit rates as also having failed; as this
+ * unfortunately stops those rates from being tasted when trying to
+ * TX. This happens with 11n aggregation.
+ */
+ if (nframes == nbad) {
+#if 0
int y;
- sn->stats[size_bin][rix0].successive_failures++;
+#endif
+ sn->stats[size_bin][rix0].successive_failures += nbad;
+#if 0
for (y = size_bin+1; y < NUM_PACKET_SIZE_BINS; y++) {
/*
* Also say larger packets failed since we
* assume if a small packet fails at a
* bit-rate then a larger one will also.
*/
- sn->stats[y][rix0].successive_failures++;
+ sn->stats[y][rix0].successive_failures += nbad;
sn->stats[y][rix0].last_tx = ticks;
sn->stats[y][rix0].tries += tries;
- sn->stats[y][rix0].total_packets++;
+ sn->stats[y][rix0].total_packets += nframes;
}
+#endif
} else {
- sn->stats[size_bin][rix0].packets_acked++;
+ sn->stats[size_bin][rix0].packets_acked += (nframes - nbad);
sn->stats[size_bin][rix0].successive_failures = 0;
}
sn->stats[size_bin][rix0].tries += tries;
sn->stats[size_bin][rix0].last_tx = ticks;
- sn->stats[size_bin][rix0].total_packets++;
+ sn->stats[size_bin][rix0].total_packets += nframes;
if (rix0 == sn->current_sample_rix[size_bin]) {
IEEE80211_NOTE(an->an_node.ni_vap, IEEE80211_MSG_RATECTL,
&an->an_node,
-"%s: size %d %s sample rate %d tries (%d/%d) tt %d avg_tt (%d/%d)",
+"%s: size %d %s sample rate %d %s tries (%d/%d) tt %d avg_tt (%d/%d) nfrm %d nbad %d",
__func__,
size,
status ? "FAIL" : "OK",
- rix0, short_tries, tries, tt,
+ dot11rate(rt, rix0),
+ dot11rate_label(rt, rix0),
+ short_tries, tries, tt,
sn->stats[size_bin][rix0].average_tx_time,
- sn->stats[size_bin][rix0].perfect_tx_time);
+ sn->stats[size_bin][rix0].perfect_tx_time,
+ nframes, nbad);
sn->sample_tt[size_bin] = tt;
sn->current_sample_rix[size_bin] = -1;
}
@@ -591,21 +867,21 @@ badrate(struct ifnet *ifp, int series, int hwrate, int tries, int status)
void
ath_rate_tx_complete(struct ath_softc *sc, struct ath_node *an,
- const struct ath_buf *bf)
+ const struct ath_rc_series *rc, const struct ath_tx_status *ts,
+ int frame_size, int nframes, int nbad)
{
struct ifnet *ifp = sc->sc_ifp;
struct ieee80211com *ic = ifp->if_l2com;
struct sample_node *sn = ATH_NODE_SAMPLE(an);
- const struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
- const struct ath_desc *ds0 = &bf->bf_desc[0];
- int final_rix, short_tries, long_tries, frame_size;
+ int final_rix, short_tries, long_tries;
const HAL_RATE_TABLE *rt = sc->sc_currates;
+ int status = ts->ts_status;
int mrr;
final_rix = rt->rateCodeToIndex[ts->ts_rate];
short_tries = ts->ts_shortretry;
long_tries = ts->ts_longretry + 1;
- frame_size = ds0->ds_ctl0 & 0x0fff; /* low-order 12 bits of ds_ctl0 */
+
if (frame_size == 0) /* NB: should not happen */
frame_size = 1500;
@@ -615,63 +891,73 @@ ath_rate_tx_complete(struct ath_softc *sc, struct ath_node *an,
"%s: size %d %s rate/try %d/%d no rates yet",
__func__,
bin_to_size(size_to_bin(frame_size)),
- ts->ts_status ? "FAIL" : "OK",
+ status ? "FAIL" : "OK",
short_tries, long_tries);
return;
}
mrr = sc->sc_mrretry && !(ic->ic_flags & IEEE80211_F_USEPROT);
if (!mrr || ts->ts_finaltsi == 0) {
if (!IS_RATE_DEFINED(sn, final_rix)) {
- badrate(ifp, 0, ts->ts_rate, long_tries, ts->ts_status);
+ badrate(ifp, 0, ts->ts_rate, long_tries, status);
return;
}
/*
* Only one rate was used; optimize work.
*/
IEEE80211_NOTE(an->an_node.ni_vap, IEEE80211_MSG_RATECTL,
- &an->an_node, "%s: size %d (%d bytes) %s rate/try %d %s/%d/%d",
+ &an->an_node, "%s: size %d (%d bytes) %s rate/try %d %s/%d/%d nframes/nbad [%d/%d]",
__func__,
bin_to_size(size_to_bin(frame_size)),
frame_size,
- ts->ts_status ? "FAIL" : "OK",
- dot11rate(rt, final_rix), dot11rate_label(rt, final_rix), short_tries, long_tries);
+ status ? "FAIL" : "OK",
+ dot11rate(rt, final_rix), dot11rate_label(rt, final_rix),
+ short_tries, long_tries, nframes, nbad);
update_stats(sc, an, frame_size,
final_rix, long_tries,
0, 0,
0, 0,
0, 0,
- short_tries, long_tries, ts->ts_status);
+ short_tries, long_tries, status,
+ nframes, nbad);
+ update_ewma_stats(sc, an, frame_size,
+ final_rix, long_tries,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ short_tries, long_tries, status,
+ nframes, nbad);
+
} else {
- int hwrates[4], tries[4], rix[4];
int finalTSIdx = ts->ts_finaltsi;
int i;
/*
* Process intermediate rates that failed.
*/
- ath_hal_gettxcompletionrates(sc->sc_ah, ds0, hwrates, tries);
-
- for (i = 0; i < 4; i++) {
- rix[i] = rt->rateCodeToIndex[hwrates[i]];
- }
IEEE80211_NOTE(an->an_node.ni_vap, IEEE80211_MSG_RATECTL,
&an->an_node,
-"%s: size %d (%d bytes) finaltsidx %d tries %d %s rate/try [%d %s/%d %d %s/%d %d %s/%d %d %s/%d]",
+"%s: size %d (%d bytes) finaltsidx %d tries %d %s rate/try [%d %s/%d %d %s/%d %d %s/%d %d %s/%d] nframes/nbad [%d/%d]",
__func__,
bin_to_size(size_to_bin(frame_size)),
frame_size,
finalTSIdx,
- long_tries,
- ts->ts_status ? "FAIL" : "OK",
- dot11rate(rt, rix[0]), dot11rate_label(rt, rix[0]), tries[0],
- dot11rate(rt, rix[1]), dot11rate_label(rt, rix[1]), tries[1],
- dot11rate(rt, rix[2]), dot11rate_label(rt, rix[2]), tries[2],
- dot11rate(rt, rix[3]), dot11rate_label(rt, rix[3]), tries[3]);
+ long_tries,
+ status ? "FAIL" : "OK",
+ dot11rate(rt, rc[0].rix),
+ dot11rate_label(rt, rc[0].rix), rc[0].tries,
+ dot11rate(rt, rc[1].rix),
+ dot11rate_label(rt, rc[1].rix), rc[1].tries,
+ dot11rate(rt, rc[2].rix),
+ dot11rate_label(rt, rc[2].rix), rc[2].tries,
+ dot11rate(rt, rc[3].rix),
+ dot11rate_label(rt, rc[3].rix), rc[3].tries,
+ nframes, nbad);
for (i = 0; i < 4; i++) {
- if (tries[i] && !IS_RATE_DEFINED(sn, rix[i]))
- badrate(ifp, 0, hwrates[i], tries[i], ts->ts_status);
+ if (rc[i].tries && !IS_RATE_DEFINED(sn, rc[i].rix))
+ badrate(ifp, 0, rc[i].ratecode, rc[i].tries,
+ status);
}
/*
@@ -681,48 +967,62 @@ ath_rate_tx_complete(struct ath_softc *sc, struct ath_node *an,
* sample higher rates 1 try at a time doing so
* may unfairly penalize them.
*/
- if (tries[0]) {
- update_stats(sc, an, frame_size,
- rix[0], tries[0],
- rix[1], tries[1],
- rix[2], tries[2],
- rix[3], tries[3],
- short_tries, long_tries,
- long_tries > tries[0]);
- long_tries -= tries[0];
+ if (rc[0].tries) {
+ update_stats(sc, an, frame_size,
+ rc[0].rix, rc[0].tries,
+ rc[1].rix, rc[1].tries,
+ rc[2].rix, rc[2].tries,
+ rc[3].rix, rc[3].tries,
+ short_tries, long_tries,
+ long_tries > rc[0].tries,
+ nframes, nbad);
+ long_tries -= rc[0].tries;
}
- if (tries[1] && finalTSIdx > 0) {
- update_stats(sc, an, frame_size,
- rix[1], tries[1],
- rix[2], tries[2],
- rix[3], tries[3],
- 0, 0,
- short_tries, long_tries,
- ts->ts_status);
- long_tries -= tries[1];
+ if (rc[1].tries && finalTSIdx > 0) {
+ update_stats(sc, an, frame_size,
+ rc[1].rix, rc[1].tries,
+ rc[2].rix, rc[2].tries,
+ rc[3].rix, rc[3].tries,
+ 0, 0,
+ short_tries, long_tries,
+ status,
+ nframes, nbad);
+ long_tries -= rc[1].tries;
}
- if (tries[2] && finalTSIdx > 1) {
- update_stats(sc, an, frame_size,
- rix[2], tries[2],
- rix[3], tries[3],
+ if (rc[2].tries && finalTSIdx > 1) {
+ update_stats(sc, an, frame_size,
+ rc[2].rix, rc[2].tries,
+ rc[3].rix, rc[3].tries,
0, 0,
0, 0,
- short_tries, long_tries,
- ts->ts_status);
- long_tries -= tries[2];
+ short_tries, long_tries,
+ status,
+ nframes, nbad);
+ long_tries -= rc[2].tries;
}
- if (tries[3] && finalTSIdx > 2) {
- update_stats(sc, an, frame_size,
- rix[3], tries[3],
+ if (rc[3].tries && finalTSIdx > 2) {
+ update_stats(sc, an, frame_size,
+ rc[3].rix, rc[3].tries,
0, 0,
0, 0,
0, 0,
- short_tries, long_tries,
- ts->ts_status);
+ short_tries, long_tries,
+ status,
+ nframes, nbad);
}
+
+ update_ewma_stats(sc, an, frame_size,
+ rc[0].rix, rc[0].tries,
+ rc[1].rix, rc[1].tries,
+ rc[2].rix, rc[2].tries,
+ rc[3].rix, rc[3].tries,
+ short_tries, long_tries,
+ long_tries > rc[0].tries,
+ nframes, nbad);
+
}
}
@@ -844,6 +1144,7 @@ ath_rate_ctl_reset(struct ath_softc *sc, struct ieee80211_node *ni)
sn->stats[y][rix].total_packets = 0;
sn->stats[y][rix].packets_acked = 0;
sn->stats[y][rix].last_tx = 0;
+ sn->stats[y][rix].ewma_pct = 0;
sn->stats[y][rix].perfect_tx_time =
calc_usecs_unicast_packet(sc, size, rix, 0, 0,
@@ -881,18 +1182,24 @@ sample_stats(void *arg, struct ieee80211_node *ni)
uint32_t mask;
int rix, y;
- printf("\n[%s] refcnt %d static_rix %d ratemask 0x%x\n",
+ printf("\n[%s] refcnt %d static_rix (%d %s) ratemask 0x%x\n",
ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni),
- sn->static_rix, sn->ratemask);
+ dot11rate(rt, sn->static_rix),
+ dot11rate_label(rt, sn->static_rix),
+ sn->ratemask);
for (y = 0; y < NUM_PACKET_SIZE_BINS; y++) {
printf("[%4u] cur rix %d (%d %s) since switch: packets %d ticks %u\n",
bin_to_size(y), sn->current_rix[y],
dot11rate(rt, sn->current_rix[y]),
dot11rate_label(rt, sn->current_rix[y]),
sn->packets_since_switch[y], sn->ticks_since_switch[y]);
- printf("[%4u] last sample %d cur sample %d packets sent %d\n",
- bin_to_size(y), sn->last_sample_rix[y],
- sn->current_sample_rix[y], sn->packets_sent[y]);
+ printf("[%4u] last sample (%d %s) cur sample (%d %s) packets sent %d\n",
+ bin_to_size(y),
+ dot11rate(rt, sn->last_sample_rix[y]),
+ dot11rate_label(rt, sn->last_sample_rix[y]),
+ dot11rate(rt, sn->current_sample_rix[y]),
+ dot11rate_label(rt, sn->current_sample_rix[y]),
+ sn->packets_sent[y]);
printf("[%4u] packets since sample %d sample tt %u\n",
bin_to_size(y), sn->packets_since_sample[y],
sn->sample_tt[y]);
@@ -903,13 +1210,16 @@ sample_stats(void *arg, struct ieee80211_node *ni)
for (y = 0; y < NUM_PACKET_SIZE_BINS; y++) {
if (sn->stats[y][rix].total_packets == 0)
continue;
- printf("[%2u %s:%4u] %8d:%-8d (%3d%%) T %8d F %4d avg %5u last %u\n",
+ printf("[%2u %s:%4u] %8ju:%-8ju (%3d%%) (EWMA %3d.%1d%%) T %8ju F %4d avg %5u last %u\n",
dot11rate(rt, rix), dot11rate_label(rt, rix),
bin_to_size(y),
- sn->stats[y][rix].total_packets,
- sn->stats[y][rix].packets_acked,
- (100*sn->stats[y][rix].packets_acked)/sn->stats[y][rix].total_packets,
- sn->stats[y][rix].tries,
+ (uintmax_t) sn->stats[y][rix].total_packets,
+ (uintmax_t) sn->stats[y][rix].packets_acked,
+ (int) ((sn->stats[y][rix].packets_acked * 100ULL) /
+ sn->stats[y][rix].total_packets),
+ sn->stats[y][rix].ewma_pct / 10,
+ sn->stats[y][rix].ewma_pct % 10,
+ (uintmax_t) sn->stats[y][rix].tries,
sn->stats[y][rix].successive_failures,
sn->stats[y][rix].average_tx_time,
ticks - sn->stats[y][rix].last_tx);
diff --git a/sys/dev/ath/ath_rate/sample/sample.h b/sys/dev/ath/ath_rate/sample/sample.h
index b39e0be..805ae46 100644
--- a/sys/dev/ath/ath_rate/sample/sample.h
+++ b/sys/dev/ath/ath_rate/sample/sample.h
@@ -51,15 +51,17 @@ struct sample_softc {
int max_successive_failures;
int stale_failure_timeout; /* how long to honor max_successive_failures */
int min_switch; /* min time between rate changes */
+ int min_good_pct; /* min good percentage for a rate to be considered */
};
#define ATH_SOFTC_SAMPLE(sc) ((struct sample_softc *)sc->sc_rc)
struct rate_stats {
unsigned average_tx_time;
int successive_failures;
- int tries;
- int total_packets;
- int packets_acked;
+ uint64_t tries;
+ uint64_t total_packets; /* pkts total since assoc */
+ uint64_t packets_acked; /* pkts acked since assoc */
+ int ewma_pct; /* EWMA percentage */
unsigned perfect_tx_time; /* transmit time for 0 retries */
int last_tx;
};
diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c
index e1d2c0f..462bc3f 100644
--- a/sys/dev/ath/if_ath.c
+++ b/sys/dev/ath/if_ath.c
@@ -64,6 +64,8 @@ __FBSDID("$FreeBSD$");
#include <sys/taskqueue.h>
#include <sys/priv.h>
#include <sys/module.h>
+#include <sys/ktr.h>
+#include <sys/smp.h> /* for mp_ncpus */
#include <machine/bus.h>
@@ -106,6 +108,8 @@ __FBSDID("$FreeBSD$");
#include <dev/ath/ath_tx99/ath_tx99.h>
#endif
+#define ATH_KTR_INTR KTR_SPARE4
+#define ATH_KTR_ERR KTR_SPARE3
/*
* ATH_BCBUF determines the number of vap's that can transmit
@@ -163,6 +167,7 @@ static int ath_desc_alloc(struct ath_softc *);
static void ath_desc_free(struct ath_softc *);
static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
const uint8_t [IEEE80211_ADDR_LEN]);
+static void ath_node_cleanup(struct ieee80211_node *);
static void ath_node_free(struct ieee80211_node *);
static void ath_node_getsignal(const struct ieee80211_node *,
int8_t *, int8_t *);
@@ -170,7 +175,8 @@ static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *);
static void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
int subtype, int rssi, int nf);
static void ath_setdefantenna(struct ath_softc *, u_int);
-static void ath_rx_proc(void *, int);
+static void ath_rx_proc(struct ath_softc *sc, int);
+static void ath_rx_tasklet(void *, int);
static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
static int ath_tx_setup(struct ath_softc *, int, int);
@@ -180,10 +186,9 @@ static void ath_tx_cleanup(struct ath_softc *);
static void ath_tx_proc_q0(void *, int);
static void ath_tx_proc_q0123(void *, int);
static void ath_tx_proc(void *, int);
-static void ath_tx_draintxq(struct ath_softc *, struct ath_txq *);
static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
-static void ath_draintxq(struct ath_softc *);
-static void ath_stoprecv(struct ath_softc *);
+static void ath_draintxq(struct ath_softc *, ATH_RESET_TYPE reset_type);
+static void ath_stoprecv(struct ath_softc *, int);
static int ath_startrecv(struct ath_softc *);
static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
static void ath_scan_start(struct ieee80211com *);
@@ -382,7 +387,7 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
"%s taskq", ifp->if_xname);
- TASK_INIT(&sc->sc_rxtask, 0, ath_rx_proc, sc);
+ TASK_INIT(&sc->sc_rxtask, 0, ath_rx_tasklet, sc);
TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
@@ -671,6 +676,17 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
#endif
/*
+ * Check if the hardware requires PCI register serialisation.
+ * Some of the Owl based MACs require this.
+ */
+ if (mp_ncpus > 1 &&
+ ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR,
+ 0, NULL) == HAL_OK) {
+ sc->sc_ah->ah_config.ah_serialise_reg_war = 1;
+ device_printf(sc->sc_dev, "Enabling register serialisation\n");
+ }
+
+ /*
* Indicate we need the 802.11 header padded to a
* 32-bit boundary for 4-address and QoS frames.
*/
@@ -712,11 +728,26 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
ic->ic_node_alloc = ath_node_alloc;
sc->sc_node_free = ic->ic_node_free;
ic->ic_node_free = ath_node_free;
+ sc->sc_node_cleanup = ic->ic_node_cleanup;
+ ic->ic_node_cleanup = ath_node_cleanup;
ic->ic_node_getsignal = ath_node_getsignal;
ic->ic_scan_start = ath_scan_start;
ic->ic_scan_end = ath_scan_end;
ic->ic_set_channel = ath_set_channel;
+ /* 802.11n specific - but just override anyway */
+ sc->sc_addba_request = ic->ic_addba_request;
+ sc->sc_addba_response = ic->ic_addba_response;
+ sc->sc_addba_stop = ic->ic_addba_stop;
+ sc->sc_bar_response = ic->ic_bar_response;
+ sc->sc_addba_response_timeout = ic->ic_addba_response_timeout;
+
+ ic->ic_addba_request = ath_addba_request;
+ ic->ic_addba_response = ath_addba_response;
+ ic->ic_addba_response_timeout = ath_addba_response_timeout;
+ ic->ic_addba_stop = ath_addba_stop;
+ ic->ic_bar_response = ath_bar_response;
+
ieee80211_radiotap_attach(ic,
&sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
ATH_TX_RADIOTAP_PRESENT,
@@ -952,7 +983,7 @@ ath_vap_create(struct ieee80211com *ic,
/*
* Check that a beacon buffer is available; the code below assumes it.
*/
- if (needbeacon & STAILQ_EMPTY(&sc->sc_bbuf)) {
+ if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) {
device_printf(sc->sc_dev, "no beacon buffer available\n");
goto bad;
}
@@ -1014,8 +1045,8 @@ ath_vap_create(struct ieee80211com *ic,
* multicast frames. We know a beacon buffer is
* available because we checked above.
*/
- avp->av_bcbuf = STAILQ_FIRST(&sc->sc_bbuf);
- STAILQ_REMOVE_HEAD(&sc->sc_bbuf, bf_list);
+ avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf);
+ TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list);
if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
/*
* Assign the vap to a beacon xmit slot. As above
@@ -1112,6 +1143,7 @@ ath_vap_delete(struct ieee80211vap *vap)
struct ath_hal *ah = sc->sc_ah;
struct ath_vap *avp = ATH_VAP(vap);
+ DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
/*
* Quiesce the hardware while we remove the vap. In
@@ -1119,11 +1151,32 @@ ath_vap_delete(struct ieee80211vap *vap)
* the vap state by any frames pending on the tx queues.
*/
ath_hal_intrset(ah, 0); /* disable interrupts */
- ath_draintxq(sc); /* stop xmit side */
- ath_stoprecv(sc); /* stop recv side */
+ ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */
+ /* XXX Do all frames from all vaps/nodes need draining here? */
+ ath_stoprecv(sc, 1); /* stop recv side */
}
ieee80211_vap_detach(vap);
+
+ /*
+ * XXX Danger Will Robinson! Danger!
+ *
+ * Because ieee80211_vap_detach() can queue a frame (the station
+ * diassociate message?) after we've drained the TXQ and
+ * flushed the software TXQ, we will end up with a frame queued
+ * to a node whose vap is about to be freed.
+ *
+ * To work around this, flush the hardware/software again.
+ * This may be racy - the ath task may be running and the packet
+ * may be being scheduled between sw->hw txq. Tsk.
+ *
+ * TODO: figure out why a new node gets allocated somewhere around
+ * here (after the ath_tx_swq() call; and after an ath_stop_locked()
+ * call!)
+ */
+
+ ath_draintxq(sc, ATH_RESET_DEFAULT);
+
ATH_LOCK(sc);
/*
* Reclaim beacon state. Note this must be done before
@@ -1171,7 +1224,6 @@ ath_vap_delete(struct ieee80211vap *vap)
sc->sc_swbmiss = 0;
}
#endif
- ATH_UNLOCK(sc);
free(avp, M_80211_VAP);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
@@ -1192,6 +1244,7 @@ ath_vap_delete(struct ieee80211vap *vap)
}
ath_hal_intrset(ah, sc->sc_imask);
}
+ ATH_UNLOCK(sc);
}
void
@@ -1304,6 +1357,23 @@ ath_intr(void *arg)
struct ifnet *ifp = sc->sc_ifp;
struct ath_hal *ah = sc->sc_ah;
HAL_INT status = 0;
+ uint32_t txqs;
+
+ /*
+ * If we're inside a reset path, just print a warning and
+ * clear the ISR. The reset routine will finish it for us.
+ */
+ ATH_PCU_LOCK(sc);
+ if (sc->sc_inreset_cnt) {
+ HAL_INT status;
+ ath_hal_getisr(ah, &status); /* clear ISR */
+ ath_hal_intrset(ah, 0); /* disable further intr's */
+ DPRINTF(sc, ATH_DEBUG_ANY,
+ "%s: in reset, ignoring: status=0x%x\n",
+ __func__, status);
+ ATH_PCU_UNLOCK(sc);
+ return;
+ }
if (sc->sc_invalid) {
/*
@@ -1311,10 +1381,14 @@ ath_intr(void *arg)
* Note this can happen early on if the IRQ is shared.
*/
DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
+ ATH_PCU_UNLOCK(sc);
return;
}
- if (!ath_hal_intrpend(ah)) /* shared irq, not for us */
+ if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */
+ ATH_PCU_UNLOCK(sc);
return;
+ }
+
if ((ifp->if_flags & IFF_UP) == 0 ||
(ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
HAL_INT status;
@@ -1323,8 +1397,10 @@ ath_intr(void *arg)
__func__, ifp->if_flags);
ath_hal_getisr(ah, &status); /* clear ISR */
ath_hal_intrset(ah, 0); /* disable further intr's */
+ ATH_PCU_UNLOCK(sc);
return;
}
+
/*
* Figure out the reason(s) for the interrupt. Note
* that the hal returns a pseudo-ISR that may include
@@ -1333,12 +1409,36 @@ ath_intr(void *arg)
*/
ath_hal_getisr(ah, &status); /* NB: clears ISR too */
DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
+ CTR1(ATH_KTR_INTR, "ath_intr: mask=0x%.8x", status);
+#ifdef ATH_KTR_INTR_DEBUG
+ CTR5(ATH_KTR_INTR,
+ "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x",
+ ah->ah_intrstate[0],
+ ah->ah_intrstate[1],
+ ah->ah_intrstate[2],
+ ah->ah_intrstate[3],
+ ah->ah_intrstate[6]);
+#endif
status &= sc->sc_imask; /* discard unasked for bits */
/* Short-circuit un-handled interrupts */
- if (status == 0x0)
+ if (status == 0x0) {
+ ATH_PCU_UNLOCK(sc);
return;
+ }
+
+ /*
+ * Take a note that we're inside the interrupt handler, so
+ * the reset routines know to wait.
+ */
+ sc->sc_intr_cnt++;
+ ATH_PCU_UNLOCK(sc);
+ /*
+ * Handle the interrupt. We won't run concurrent with the reset
+ * or channel change routines as they'll wait for sc_intr_cnt
+ * to be 0 before continuing.
+ */
if (status & HAL_INT_FATAL) {
sc->sc_stats.ast_hardware++;
ath_hal_intrset(ah, 0); /* disable intr's until reset */
@@ -1377,7 +1477,9 @@ ath_intr(void *arg)
}
}
if (status & HAL_INT_RXEOL) {
- int imask = sc->sc_imask;
+ int imask;
+ CTR0(ATH_KTR_ERR, "ath_intr: RXEOL");
+ ATH_PCU_LOCK(sc);
/*
* NB: the hardware should re-read the link when
* RXE bit is written, but it doesn't work at
@@ -1393,26 +1495,54 @@ ath_intr(void *arg)
* by a call to ath_reset() somehow, the
* interrupt mask will be correctly reprogrammed.
*/
+ imask = sc->sc_imask;
imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN);
ath_hal_intrset(ah, imask);
/*
+ * Only blank sc_rxlink if we've not yet kicked
+ * the PCU.
+ *
+ * This isn't entirely correct - the correct solution
+ * would be to have a PCU lock and engage that for
+ * the duration of the PCU fiddling; which would include
+ * running the RX process. Otherwise we could end up
+ * messing up the RX descriptor chain and making the
+ * RX desc list much shorter.
+ */
+ if (! sc->sc_kickpcu)
+ sc->sc_rxlink = NULL;
+ sc->sc_kickpcu = 1;
+ /*
* Enqueue an RX proc, to handled whatever
* is in the RX queue.
* This will then kick the PCU.
*/
taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
- sc->sc_rxlink = NULL;
- sc->sc_kickpcu = 1;
+ ATH_PCU_UNLOCK(sc);
}
if (status & HAL_INT_TXURN) {
sc->sc_stats.ast_txurn++;
/* bump tx trigger level */
ath_hal_updatetxtriglevel(ah, AH_TRUE);
}
- if (status & HAL_INT_RX)
+ if (status & HAL_INT_RX) {
+ sc->sc_stats.ast_rx_intr++;
taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
- if (status & HAL_INT_TX)
+ }
+ if (status & HAL_INT_TX) {
+ sc->sc_stats.ast_tx_intr++;
+ /*
+ * Grab all the currently set bits in the HAL txq bitmap
+ * and blank them. This is the only place we should be
+ * doing this.
+ */
+ ATH_PCU_LOCK(sc);
+ txqs = 0xffffffff;
+ ath_hal_gettxintrtxqs(sc->sc_ah, &txqs);
+ sc->sc_txq_active |= txqs;
taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
+ ATH_PCU_UNLOCK(sc);
+ }
if (status & HAL_INT_BMISS) {
sc->sc_stats.ast_bmiss++;
taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
@@ -1423,6 +1553,7 @@ ath_intr(void *arg)
sc->sc_stats.ast_tx_cst++;
if (status & HAL_INT_MIB) {
sc->sc_stats.ast_mib++;
+ ATH_PCU_LOCK(sc);
/*
* Disable interrupts until we service the MIB
* interrupt; otherwise it will continue to fire.
@@ -1433,13 +1564,25 @@ ath_intr(void *arg)
* clear whatever condition caused the interrupt.
*/
ath_hal_mibevent(ah, &sc->sc_halstats);
- ath_hal_intrset(ah, sc->sc_imask);
+ /*
+ * Don't reset the interrupt if we've just
+ * kicked the PCU, or we may get a nested
+ * RXEOL before the rxproc has had a chance
+ * to run.
+ */
+ if (sc->sc_kickpcu == 0)
+ ath_hal_intrset(ah, sc->sc_imask);
+ ATH_PCU_UNLOCK(sc);
}
if (status & HAL_INT_RXORN) {
/* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
+ CTR0(ATH_KTR_ERR, "ath_intr: RXORN");
sc->sc_stats.ast_rxorn++;
}
}
+ ATH_PCU_LOCK(sc);
+ sc->sc_intr_cnt--;
+ ATH_PCU_UNLOCK(sc);
}
static void
@@ -1464,7 +1607,7 @@ ath_fatal_proc(void *arg, int pending)
state[0], state[1] , state[2], state[3],
state[4], state[5]);
}
- ath_reset(ifp);
+ ath_reset(ifp, ATH_RESET_NOLOSS);
}
static void
@@ -1524,7 +1667,7 @@ ath_bmiss_proc(void *arg, int pending)
if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs);
- ath_reset(ifp);
+ ath_reset(ifp, ATH_RESET_NOLOSS);
} else
ieee80211_beacon_miss(ifp->if_l2com);
}
@@ -1609,6 +1752,13 @@ ath_init(void *arg)
sc->sc_beacons = 0;
/*
+ * Initial aggregation settings.
+ */
+ sc->sc_hwq_limit = ATH_AGGR_MIN_QDEPTH;
+ sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW;
+ sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH;
+
+ /*
* Setup the hardware after reset: the key cache
* is filled as needed and the receive engine is
* set going. Frame transmit is handled entirely
@@ -1697,9 +1847,9 @@ ath_stop_locked(struct ifnet *ifp)
}
ath_hal_intrset(ah, 0);
}
- ath_draintxq(sc);
+ ath_draintxq(sc, ATH_RESET_DEFAULT);
if (!sc->sc_invalid) {
- ath_stoprecv(sc);
+ ath_stoprecv(sc, 1);
ath_hal_phydisable(ah);
} else
sc->sc_rxlink = NULL;
@@ -1707,6 +1857,46 @@ ath_stop_locked(struct ifnet *ifp)
}
}
+#define MAX_TXRX_ITERATIONS 1000
+static void
+ath_txrx_stop(struct ath_softc *sc)
+{
+ int i = MAX_TXRX_ITERATIONS;
+
+ ATH_UNLOCK_ASSERT(sc);
+ /* Stop any new TX/RX from occuring */
+ taskqueue_block(sc->sc_tq);
+
+ ATH_PCU_LOCK(sc);
+ /*
+ * Sleep until all the pending operations have completed.
+ *
+ * The caller must ensure that reset has been incremented
+ * or the pending operations may continue being queued.
+ */
+ while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt ||
+ sc->sc_txstart_cnt || sc->sc_intr_cnt) {
+ if (i <= 0)
+ break;
+ msleep(sc, &sc->sc_mtx, 0, "ath_txrx_stop", 1);
+ i--;
+ }
+ ATH_PCU_UNLOCK(sc);
+
+ if (i <= 0)
+ device_printf(sc->sc_dev,
+ "%s: didn't finish after %d iterations\n",
+ __func__, MAX_TXRX_ITERATIONS);
+}
+#undef MAX_TXRX_ITERATIONS
+
+static void
+ath_txrx_start(struct ath_softc *sc)
+{
+
+ taskqueue_unblock(sc->sc_tq);
+}
+
static void
ath_stop(struct ifnet *ifp)
{
@@ -1725,16 +1915,47 @@ ath_stop(struct ifnet *ifp)
* to reset or reload hardware state.
*/
int
-ath_reset(struct ifnet *ifp)
+ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type)
{
struct ath_softc *sc = ifp->if_softc;
struct ieee80211com *ic = ifp->if_l2com;
struct ath_hal *ah = sc->sc_ah;
HAL_STATUS status;
+ int i;
+
+ DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
+ /* XXX ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */
+ ATH_PCU_UNLOCK_ASSERT(sc);
+ ATH_UNLOCK_ASSERT(sc);
+
+ ATH_PCU_LOCK(sc);
+ /* XXX if we're already inside a reset, print out a big warning */
+ if (sc->sc_inreset_cnt > 0) {
+ device_printf(sc->sc_dev,
+ "%s: concurrent ath_reset()! Danger!\n",
+ __func__);
+ }
+ sc->sc_inreset_cnt++;
ath_hal_intrset(ah, 0); /* disable interrupts */
- ath_draintxq(sc); /* stop xmit side */
- ath_stoprecv(sc); /* stop recv side */
+ ATH_PCU_UNLOCK(sc);
+
+ /*
+ * Should now wait for pending TX/RX to complete
+ * and block future ones from occuring. This needs to be
+ * done before the TX queue is drained.
+ */
+ ath_txrx_stop(sc);
+ ath_draintxq(sc, reset_type); /* stop xmit side */
+
+ /*
+ * Regardless of whether we're doing a no-loss flush or
+ * not, stop the PCU and handle what's in the RX queue.
+ * That way frames aren't dropped which shouldn't be.
+ */
+ ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS));
+ ath_rx_proc(sc, 0);
+
ath_settkipmic(sc); /* configure TKIP MIC handling */
/* NB: indicate channel change so we do a full reset */
if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status))
@@ -1761,8 +1982,59 @@ ath_reset(struct ifnet *ifp)
#endif
ath_beacon_config(sc, NULL);
}
+
+ /*
+ * Release the reset lock and re-enable interrupts here.
+ * If an interrupt was being processed in ath_intr(),
+ * it would disable interrupts at this point. So we have
+ * to atomically enable interrupts and decrement the
+ * reset counter - this way ath_intr() doesn't end up
+ * disabling interrupts without a corresponding enable
+ * in the rest or channel change path.
+ */
+ ATH_PCU_LOCK(sc);
+ sc->sc_inreset_cnt--;
+ /* XXX only do this if sc_inreset_cnt == 0? */
ath_hal_intrset(ah, sc->sc_imask);
+ ATH_PCU_UNLOCK(sc);
+
+ /*
+ * TX and RX can be started here. If it were started with
+ * sc_inreset_cnt > 0, the TX and RX path would abort.
+ * Thus if this is a nested call through the reset or
+ * channel change code, TX completion will occur but
+ * RX completion and ath_start / ath_tx_start will not
+ * run.
+ */
+
+ /* Restart TX/RX as needed */
+ ath_txrx_start(sc);
+
+ /* XXX Restart TX completion and pending TX */
+ if (reset_type == ATH_RESET_NOLOSS) {
+ for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
+ if (ATH_TXQ_SETUP(sc, i)) {
+ ATH_TXQ_LOCK(&sc->sc_txq[i]);
+ ath_txq_restart_dma(sc, &sc->sc_txq[i]);
+ ath_txq_sched(sc, &sc->sc_txq[i]);
+ ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
+ }
+ }
+ }
+
+ /*
+ * This may have been set during an ath_start() call which
+ * set this once it detected a concurrent TX was going on.
+ * So, clear it.
+ */
+ /* XXX do this inside of IF_LOCK? */
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ /* Handle any frames in the TX queue */
+ /*
+ * XXX should this be done by the caller, rather than
+ * ath_reset() ?
+ */
ath_start(ifp); /* restart xmit */
return 0;
}
@@ -1786,7 +2058,8 @@ ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
return 0;
}
- return ath_reset(ifp);
+ /* XXX? Full or NOLOSS? */
+ return ath_reset(ifp, ATH_RESET_FULL);
}
struct ath_buf *
@@ -1796,19 +2069,90 @@ _ath_getbuf_locked(struct ath_softc *sc)
ATH_TXBUF_LOCK_ASSERT(sc);
- bf = STAILQ_FIRST(&sc->sc_txbuf);
+ bf = TAILQ_FIRST(&sc->sc_txbuf);
+ if (bf == NULL) {
+ sc->sc_stats.ast_tx_getnobuf++;
+ } else {
+ if (bf->bf_flags & ATH_BUF_BUSY) {
+ sc->sc_stats.ast_tx_getbusybuf++;
+ bf = NULL;
+ }
+ }
+
if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0)
- STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
+ TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list);
else
bf = NULL;
+
if (bf == NULL) {
DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
- STAILQ_FIRST(&sc->sc_txbuf) == NULL ?
+ TAILQ_FIRST(&sc->sc_txbuf) == NULL ?
"out of xmit buffers" : "xmit buffer busy");
+ return NULL;
}
+
+ /* Valid bf here; clear some basic fields */
+ bf->bf_next = NULL; /* XXX just to be sure */
+ bf->bf_last = NULL; /* XXX again, just to be sure */
+ bf->bf_comp = NULL; /* XXX again, just to be sure */
+ bzero(&bf->bf_state, sizeof(bf->bf_state));
+
return bf;
}
+/*
+ * When retrying a software frame, buffers marked ATH_BUF_BUSY
+ * can't be thrown back on the queue as they could still be
+ * in use by the hardware.
+ *
+ * This duplicates the buffer, or returns NULL.
+ *
+ * The descriptor is also copied but the link pointers and
+ * the DMA segments aren't copied; this frame should thus
+ * be again passed through the descriptor setup/chain routines
+ * so the link is correct.
+ *
+ * The caller must free the buffer using ath_freebuf().
+ *
+ * XXX TODO: this call shouldn't fail as it'll cause packet loss
+ * XXX in the TX pathway when retries are needed.
+ * XXX Figure out how to keep some buffers free, or factor the
+ * XXX number of busy buffers into the xmit path (ath_start())
+ * XXX so we don't over-commit.
+ */
+struct ath_buf *
+ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf)
+{
+ struct ath_buf *tbf;
+
+ tbf = ath_getbuf(sc);
+ if (tbf == NULL)
+ return NULL; /* XXX failure? Why? */
+
+ /* Copy basics */
+ tbf->bf_next = NULL;
+ tbf->bf_nseg = bf->bf_nseg;
+ tbf->bf_txflags = bf->bf_txflags;
+ tbf->bf_flags = bf->bf_flags & ~ATH_BUF_BUSY;
+ tbf->bf_status = bf->bf_status;
+ tbf->bf_m = bf->bf_m;
+ tbf->bf_node = bf->bf_node;
+ /* will be setup by the chain/setup function */
+ tbf->bf_lastds = NULL;
+ /* for now, last == self */
+ tbf->bf_last = tbf;
+ tbf->bf_comp = bf->bf_comp;
+
+ /* NOTE: DMA segments will be setup by the setup/chain functions */
+
+ /* The caller has to re-init the descriptor + links */
+
+ /* Copy state */
+ memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state));
+
+ return tbf;
+}
+
struct ath_buf *
ath_getbuf(struct ath_softc *sc)
{
@@ -1821,6 +2165,7 @@ ath_getbuf(struct ath_softc *sc)
DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
sc->sc_stats.ast_tx_qstop++;
+ /* XXX do this inside of IF_LOCK? */
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
}
ATH_TXBUF_UNLOCK(sc);
@@ -1838,6 +2183,20 @@ ath_start(struct ifnet *ifp)
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
return;
+
+ /* XXX is it ok to hold the ATH_LOCK here? */
+ ATH_PCU_LOCK(sc);
+ if (sc->sc_inreset_cnt > 0) {
+ device_printf(sc->sc_dev,
+ "%s: sc_inreset_cnt > 0; bailing\n", __func__);
+ /* XXX do this inside of IF_LOCK? */
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ ATH_PCU_UNLOCK(sc);
+ return;
+ }
+ sc->sc_txstart_cnt++;
+ ATH_PCU_UNLOCK(sc);
+
for (;;) {
/*
* Grab a TX buffer and associated resources.
@@ -1849,7 +2208,7 @@ ath_start(struct ifnet *ifp)
IFQ_DEQUEUE(&ifp->if_snd, m);
if (m == NULL) {
ATH_TXBUF_LOCK(sc);
- STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
+ TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
ATH_TXBUF_UNLOCK(sc);
break;
}
@@ -1860,7 +2219,7 @@ ath_start(struct ifnet *ifp)
* buffers to send all the fragments so all
* go out or none...
*/
- STAILQ_INIT(&frags);
+ TAILQ_INIT(&frags);
if ((m->m_flags & M_FRAG) &&
!ath_txfrag_setup(sc, &frags, m, ni)) {
DPRINTF(sc, ATH_DEBUG_XMIT,
@@ -1892,7 +2251,7 @@ ath_start(struct ifnet *ifp)
bf->bf_m = NULL;
bf->bf_node = NULL;
ATH_TXBUF_LOCK(sc);
- STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
+ TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
ath_txfrag_cleanup(sc, &frags, ni);
ATH_TXBUF_UNLOCK(sc);
if (ni != NULL)
@@ -1913,14 +2272,18 @@ ath_start(struct ifnet *ifp)
goto reclaim;
}
m = next;
- bf = STAILQ_FIRST(&frags);
+ bf = TAILQ_FIRST(&frags);
KASSERT(bf != NULL, ("no buf for txfrag"));
- STAILQ_REMOVE_HEAD(&frags, bf_list);
+ TAILQ_REMOVE(&frags, bf, bf_list);
goto nextfrag;
}
sc->sc_wd_timer = 5;
}
+
+ ATH_PCU_LOCK(sc);
+ sc->sc_txstart_cnt--;
+ ATH_PCU_UNLOCK(sc);
}
static int
@@ -2339,6 +2702,8 @@ ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf)
/* setup descriptors */
ds = bf->bf_desc;
+ bf->bf_last = bf;
+ bf->bf_lastds = ds;
flags = HAL_TXDESC_NOACK;
if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) {
@@ -2414,11 +2779,13 @@ ath_beacon_update(struct ieee80211vap *vap, int item)
static void
ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
{
- STAILQ_CONCAT(&dst->axq_q, &src->axq_q);
+ TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list);
dst->axq_link = src->axq_link;
src->axq_link = NULL;
dst->axq_depth += src->axq_depth;
+ dst->axq_aggr_depth += src->axq_aggr_depth;
src->axq_depth = 0;
+ src->axq_aggr_depth = 0;
}
/*
@@ -2609,7 +2976,7 @@ ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap)
* Move frames from the s/w mcast q to the h/w cab q.
* XXX MORE_DATA bit
*/
- bfm = STAILQ_FIRST(&avp->av_mcastq.axq_q);
+ bfm = TAILQ_FIRST(&avp->av_mcastq.axq_q);
if (cabq->axq_link != NULL) {
*cabq->axq_link = bfm->bf_daddr;
} else
@@ -2620,7 +2987,7 @@ ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap)
sc->sc_stats.ast_cabq_xmit += nmcastq;
}
/* NB: gated by beacon so safe to start here */
- if (! STAILQ_EMPTY(&(cabq->axq_q)))
+ if (! TAILQ_EMPTY(&(cabq->axq_q)))
ath_hal_txstart(ah, cabq->axq_qnum);
ATH_TXQ_UNLOCK(&avp->av_mcastq);
ATH_TXQ_UNLOCK(cabq);
@@ -2676,11 +3043,19 @@ ath_bstuck_proc(void *arg, int pending)
{
struct ath_softc *sc = arg;
struct ifnet *ifp = sc->sc_ifp;
+ uint32_t hangs = 0;
+
+ if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0)
+ if_printf(ifp, "bb hang detected (0x%x)\n", hangs);
if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n",
sc->sc_bmisscount);
sc->sc_stats.ast_bstuck++;
- ath_reset(ifp);
+ /*
+ * This assumes that there's no simultaneous channel mode change
+ * occuring.
+ */
+ ath_reset(ifp, ATH_RESET_NOLOSS);
}
/*
@@ -2699,7 +3074,7 @@ ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf)
ieee80211_free_node(bf->bf_node);
bf->bf_node = NULL;
}
- STAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list);
+ TAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list);
}
/*
@@ -2710,7 +3085,7 @@ ath_beacon_free(struct ath_softc *sc)
{
struct ath_buf *bf;
- STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) {
+ TAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) {
if (bf->bf_m != NULL) {
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
m_freem(bf->bf_m);
@@ -3029,7 +3404,7 @@ ath_descdma_setup(struct ath_softc *sc,
}
dd->dd_bufptr = bf;
- STAILQ_INIT(head);
+ TAILQ_INIT(head);
for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * desc_len)) {
bf->bf_desc = (struct ath_desc *) ds;
bf->bf_daddr = DS2PHYS(dd, ds);
@@ -3055,7 +3430,8 @@ ath_descdma_setup(struct ath_softc *sc,
ath_descdma_cleanup(sc, dd, head);
return error;
}
- STAILQ_INSERT_TAIL(head, bf, bf_list);
+ bf->bf_lastds = bf->bf_desc; /* Just an initial value */
+ TAILQ_INSERT_TAIL(head, bf, bf_list);
}
return 0;
fail3:
@@ -3084,7 +3460,7 @@ ath_descdma_cleanup(struct ath_softc *sc,
bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
bus_dma_tag_destroy(dd->dd_dmat);
- STAILQ_FOREACH(bf, head, bf_list) {
+ TAILQ_FOREACH(bf, head, bf_list) {
if (bf->bf_m) {
m_freem(bf->bf_m);
bf->bf_m = NULL;
@@ -3103,7 +3479,7 @@ ath_descdma_cleanup(struct ath_softc *sc,
}
}
- STAILQ_INIT(head);
+ TAILQ_INIT(head);
free(dd->dd_bufptr, M_ATHDEV);
memset(dd, 0, sizeof(*dd));
}
@@ -3162,19 +3538,38 @@ ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
}
ath_rate_node_init(sc, an);
+ /* Setup the mutex - there's no associd yet so set the name to NULL */
+ snprintf(an->an_name, sizeof(an->an_name), "%s: node %p",
+ device_get_nameunit(sc->sc_dev), an);
+ mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF);
+
+ /* XXX setup ath_tid */
+ ath_tx_tid_init(sc, an);
+
DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an);
return &an->an_node;
}
static void
+ath_node_cleanup(struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ath_softc *sc = ic->ic_ifp->if_softc;
+
+ /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */
+ ath_tx_node_flush(sc, ATH_NODE(ni));
+ ath_rate_node_cleanup(sc, ATH_NODE(ni));
+ sc->sc_node_cleanup(ni);
+}
+
+static void
ath_node_free(struct ieee80211_node *ni)
{
struct ieee80211com *ic = ni->ni_ic;
- struct ath_softc *sc = ic->ic_ifp->if_softc;
+ struct ath_softc *sc = ic->ic_ifp->if_softc;
DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni);
-
- ath_rate_node_cleanup(sc, ATH_NODE(ni));
+ mtx_destroy(&ATH_NODE(ni)->an_mtx);
sc->sc_node_free(ni);
}
@@ -3454,13 +3849,35 @@ ath_handle_micerror(struct ieee80211com *ic,
}
}
+/*
+ * Only run the RX proc if it's not already running.
+ * Since this may get run as part of the reset/flush path,
+ * the task can't clash with an existing, running tasklet.
+ */
static void
-ath_rx_proc(void *arg, int npending)
+ath_rx_tasklet(void *arg, int npending)
+{
+ struct ath_softc *sc = arg;
+
+ CTR1(ATH_KTR_INTR, "ath_rx_proc: pending=%d", npending);
+ DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending);
+ ATH_PCU_LOCK(sc);
+ if (sc->sc_inreset_cnt > 0) {
+ device_printf(sc->sc_dev,
+ "%s: sc_inreset_cnt > 0; skipping\n", __func__);
+ ATH_PCU_UNLOCK(sc);
+ return;
+ }
+ ATH_PCU_UNLOCK(sc);
+ ath_rx_proc(sc, 1);
+}
+
+static void
+ath_rx_proc(struct ath_softc *sc, int resched)
{
#define PA2DESC(_sc, _pa) \
((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
- struct ath_softc *sc = arg;
struct ath_buf *bf;
struct ifnet *ifp = sc->sc_ifp;
struct ieee80211com *ic = ifp->if_l2com;
@@ -3473,14 +3890,23 @@ ath_rx_proc(void *arg, int npending)
HAL_STATUS status;
int16_t nf;
u_int64_t tsf;
+ int npkts = 0;
- DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending);
+ /* XXX we must not hold the ATH_LOCK here */
+ ATH_UNLOCK_ASSERT(sc);
+ ATH_PCU_UNLOCK_ASSERT(sc);
+
+ ATH_PCU_LOCK(sc);
+ sc->sc_rxproc_cnt++;
+ ATH_PCU_UNLOCK(sc);
+
+ DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: called\n", __func__);
ngood = 0;
nf = ath_hal_getchannoise(ah, sc->sc_curchan);
sc->sc_stats.ast_rx_noise = nf;
tsf = ath_hal_gettsf64(ah);
do {
- bf = STAILQ_FIRST(&sc->sc_rxbuf);
+ bf = TAILQ_FIRST(&sc->sc_rxbuf);
if (sc->sc_rxslink && bf == NULL) { /* NB: shouldn't happen */
if_printf(ifp, "%s: no buffer!\n", __func__);
break;
@@ -3500,7 +3926,7 @@ ath_rx_proc(void *arg, int npending)
*/
/* XXX make debug msg */
if_printf(ifp, "%s: no mbuf!\n", __func__);
- STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list);
+ TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
goto rx_next;
}
ds = bf->bf_desc;
@@ -3530,7 +3956,9 @@ ath_rx_proc(void *arg, int npending)
#endif
if (status == HAL_EINPROGRESS)
break;
- STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list);
+
+ TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
+ npkts++;
/* These aren't specifically errors */
if (rs->rs_flags & HAL_RX_GI)
@@ -3624,8 +4052,10 @@ rx_error:
/* NB: bpf needs the mbuf length setup */
len = rs->rs_datalen;
m->m_pkthdr.len = m->m_len = len;
+ bf->bf_m = NULL;
ath_rx_tap(ifp, m, rs, tsf, nf);
ieee80211_radiotap_rx_all(ic, m);
+ m_freem(m);
}
/* XXX pass MIC errors up for s/w reclaculation */
goto rx_next;
@@ -3797,7 +4227,7 @@ rx_accept:
ath_led_event(sc, 0);
}
rx_next:
- STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
+ TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
} while (ath_rxbuf_init(sc, bf) == 0);
/* rx signal state monitoring */
@@ -3805,8 +4235,9 @@ rx_next:
if (ngood)
sc->sc_lastrx = tsf;
+ CTR2(ATH_KTR_INTR, "ath_rx_proc: npkts=%d, ngood=%d", npkts, ngood);
/* Queue DFS tasklet if needed */
- if (ath_dfs_tasklet_needed(sc, sc->sc_curchan))
+ if (resched && ath_dfs_tasklet_needed(sc, sc->sc_curchan))
taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask);
/*
@@ -3814,21 +4245,30 @@ rx_next:
* need to be handled, kick the PCU if there's
* been an RXEOL condition.
*/
- if (sc->sc_kickpcu) {
- sc->sc_kickpcu = 0;
- ath_stoprecv(sc);
- sc->sc_imask |= (HAL_INT_RXEOL | HAL_INT_RXORN);
- if (ath_startrecv(sc) != 0) {
- if_printf(ifp,
- "%s: couldn't restart RX after RXEOL; resetting\n",
- __func__);
- ath_reset(ifp);
- return;
- }
+ ATH_PCU_LOCK(sc);
+ if (resched && sc->sc_kickpcu) {
+ CTR0(ATH_KTR_ERR, "ath_rx_proc: kickpcu");
+ device_printf(sc->sc_dev, "%s: kickpcu; handled %d packets\n",
+ __func__, npkts);
+
+ /* XXX rxslink? */
+ /*
+ * XXX can we hold the PCU lock here?
+ * Are there any net80211 buffer calls involved?
+ */
+ bf = TAILQ_FIRST(&sc->sc_rxbuf);
+ ath_hal_putrxbuf(ah, bf->bf_daddr);
+ ath_hal_rxena(ah); /* enable recv descriptors */
+ ath_mode_init(sc); /* set filters, etc. */
+ ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */
+
ath_hal_intrset(ah, sc->sc_imask);
+ sc->sc_kickpcu = 0;
}
+ ATH_PCU_UNLOCK(sc);
- if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
+ /* XXX check this inside of IF_LOCK? */
+ if (resched && (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
#ifdef IEEE80211_SUPPORT_SUPERG
ieee80211_ff_age_all(ic, 100);
#endif
@@ -3836,6 +4276,10 @@ rx_next:
ath_start(ifp);
}
#undef PA2DESC
+
+ ATH_PCU_LOCK(sc);
+ sc->sc_rxproc_cnt--;
+ ATH_PCU_UNLOCK(sc);
}
static void
@@ -3844,9 +4288,12 @@ ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
txq->axq_qnum = qnum;
txq->axq_ac = 0;
txq->axq_depth = 0;
+ txq->axq_aggr_depth = 0;
txq->axq_intrcnt = 0;
txq->axq_link = NULL;
- STAILQ_INIT(&txq->axq_q);
+ txq->axq_softc = sc;
+ TAILQ_INIT(&txq->axq_q);
+ TAILQ_INIT(&txq->axq_tidq);
ATH_TXQ_LOCK_INIT(sc, txq);
}
@@ -3972,10 +4419,15 @@ ath_txq_update(struct ath_softc *sc, int ac)
qi.tqi_burstTime = qi.tqi_readyTime;
} else {
#endif
+ /*
+ * XXX shouldn't this just use the default flags
+ * used in the previous queue setup?
+ */
qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
| HAL_TXQ_TXERRINT_ENABLE
| HAL_TXQ_TXDESCINT_ENABLE
| HAL_TXQ_TXURNINT_ENABLE
+ | HAL_TXQ_TXEOLINT_ENABLE
;
qi.tqi_aifs = wmep->wmep_aifsn;
qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
@@ -4056,21 +4508,159 @@ ath_tx_findrix(const struct ath_softc *sc, uint8_t rate)
return (rix == 0xff ? 0 : rix);
}
+static void
+ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts,
+ struct ath_buf *bf)
+{
+ struct ieee80211_node *ni = bf->bf_node;
+ struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = ifp->if_l2com;
+ int sr, lr, pri;
+
+ if (ts->ts_status == 0) {
+ u_int8_t txant = ts->ts_antenna;
+ sc->sc_stats.ast_ant_tx[txant]++;
+ sc->sc_ant_tx[txant]++;
+ if (ts->ts_finaltsi != 0)
+ sc->sc_stats.ast_tx_altrate++;
+ pri = M_WME_GETAC(bf->bf_m);
+ if (pri >= WME_AC_VO)
+ ic->ic_wme.wme_hipri_traffic++;
+ if ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0)
+ ni->ni_inact = ni->ni_inact_reload;
+ } else {
+ if (ts->ts_status & HAL_TXERR_XRETRY)
+ sc->sc_stats.ast_tx_xretries++;
+ if (ts->ts_status & HAL_TXERR_FIFO)
+ sc->sc_stats.ast_tx_fifoerr++;
+ if (ts->ts_status & HAL_TXERR_FILT)
+ sc->sc_stats.ast_tx_filtered++;
+ if (ts->ts_status & HAL_TXERR_XTXOP)
+ sc->sc_stats.ast_tx_xtxop++;
+ if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED)
+ sc->sc_stats.ast_tx_timerexpired++;
+
+ if (ts->ts_status & HAL_TX_DATA_UNDERRUN)
+ sc->sc_stats.ast_tx_data_underrun++;
+ if (ts->ts_status & HAL_TX_DELIM_UNDERRUN)
+ sc->sc_stats.ast_tx_delim_underrun++;
+
+ if (bf->bf_m->m_flags & M_FF)
+ sc->sc_stats.ast_ff_txerr++;
+ }
+ /* XXX when is this valid? */
+ if (ts->ts_status & HAL_TX_DESC_CFG_ERR)
+ sc->sc_stats.ast_tx_desccfgerr++;
+
+ sr = ts->ts_shortretry;
+ lr = ts->ts_longretry;
+ sc->sc_stats.ast_tx_shortretry += sr;
+ sc->sc_stats.ast_tx_longretry += lr;
+
+}
+
+/*
+ * The default completion. If fail is 1, this means
+ * "please don't retry the frame, and just return -1 status
+ * to the net80211 stack.
+ */
+void
+ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
+{
+ struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
+ int st;
+
+ if (fail == 1)
+ st = -1;
+ else
+ st = ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0) ?
+ ts->ts_status : HAL_TXERR_XRETRY;
+
+ if (bf->bf_state.bfs_dobaw)
+ device_printf(sc->sc_dev,
+ "%s: dobaw should've been cleared!\n", __func__);
+ if (bf->bf_next != NULL)
+ device_printf(sc->sc_dev,
+ "%s: bf_next not NULL!\n", __func__);
+
+ /*
+ * Do any tx complete callback. Note this must
+ * be done before releasing the node reference.
+ * This will free the mbuf, release the net80211
+ * node and recycle the ath_buf.
+ */
+ ath_tx_freebuf(sc, bf, st);
+}
+
+/*
+ * Update rate control with the given completion status.
+ */
+void
+ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
+ struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen,
+ int nframes, int nbad)
+{
+ struct ath_node *an;
+
+ /* Only for unicast frames */
+ if (ni == NULL)
+ return;
+
+ an = ATH_NODE(ni);
+
+ if ((ts->ts_status & HAL_TXERR_FILT) == 0) {
+ ATH_NODE_LOCK(an);
+ ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad);
+ ATH_NODE_UNLOCK(an);
+ }
+}
+
+/*
+ * Update the busy status of the last frame on the free list.
+ * When doing TDMA, the busy flag tracks whether the hardware
+ * currently points to this buffer or not, and thus gated DMA
+ * may restart by re-reading the last descriptor in this
+ * buffer.
+ *
+ * This should be called in the completion function once one
+ * of the buffers has been used.
+ */
+static void
+ath_tx_update_busy(struct ath_softc *sc)
+{
+ struct ath_buf *last;
+
+ /*
+ * Since the last frame may still be marked
+ * as ATH_BUF_BUSY, unmark it here before
+ * finishing the frame processing.
+ * Since we've completed a frame (aggregate
+ * or otherwise), the hardware has moved on
+ * and is no longer referencing the previous
+ * descriptor.
+ */
+ ATH_TXBUF_LOCK_ASSERT(sc);
+ last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s);
+ if (last != NULL)
+ last->bf_flags &= ~ATH_BUF_BUSY;
+}
+
+
/*
* Process completed xmit descriptors from the specified queue.
+ * Kick the packet scheduler if needed. This can occur from this
+ * particular task.
*/
static int
-ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
+ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched)
{
struct ath_hal *ah = sc->sc_ah;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
- struct ath_buf *bf, *last;
- struct ath_desc *ds, *ds0;
+ struct ath_buf *bf;
+ struct ath_desc *ds;
struct ath_tx_status *ts;
struct ieee80211_node *ni;
struct ath_node *an;
- int sr, lr, pri, nacked;
+ int nacked;
HAL_STATUS status;
DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n",
@@ -4081,13 +4671,12 @@ ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
for (;;) {
ATH_TXQ_LOCK(txq);
txq->axq_intrcnt = 0; /* reset periodic desc intr count */
- bf = STAILQ_FIRST(&txq->axq_q);
+ bf = TAILQ_FIRST(&txq->axq_q);
if (bf == NULL) {
ATH_TXQ_UNLOCK(txq);
break;
}
- ds0 = &bf->bf_desc[0];
- ds = &bf->bf_desc[bf->bf_nseg - 1];
+ ds = bf->bf_lastds; /* XXX must be setup correctly! */
ts = &bf->bf_status.ds_txstat;
status = ath_hal_txprocdesc(ah, ds, ts);
#ifdef ATH_DEBUG
@@ -4099,104 +4688,72 @@ ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
ATH_TXQ_UNLOCK(txq);
break;
}
- ATH_TXQ_REMOVE_HEAD(txq, bf_list);
+ ATH_TXQ_REMOVE(txq, bf, bf_list);
#ifdef IEEE80211_SUPPORT_TDMA
if (txq->axq_depth > 0) {
/*
* More frames follow. Mark the buffer busy
* so it's not re-used while the hardware may
* still re-read the link field in the descriptor.
+ *
+ * Use the last buffer in an aggregate as that
+ * is where the hardware may be - intermediate
+ * descriptors won't be "busy".
*/
- bf->bf_flags |= ATH_BUF_BUSY;
+ bf->bf_last->bf_flags |= ATH_BUF_BUSY;
} else
#else
if (txq->axq_depth == 0)
#endif
txq->axq_link = NULL;
- ATH_TXQ_UNLOCK(txq);
+ if (bf->bf_state.bfs_aggr)
+ txq->axq_aggr_depth--;
ni = bf->bf_node;
+ /*
+ * If unicast frame was ack'd update RSSI,
+ * including the last rx time used to
+ * workaround phantom bmiss interrupts.
+ */
+ if (ni != NULL && ts->ts_status == 0 &&
+ ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0)) {
+ nacked++;
+ sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
+ ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
+ ts->ts_rssi);
+ }
+ ATH_TXQ_UNLOCK(txq);
+
+ /* If unicast frame, update general statistics */
if (ni != NULL) {
an = ATH_NODE(ni);
- if (ts->ts_status == 0) {
- u_int8_t txant = ts->ts_antenna;
- sc->sc_stats.ast_ant_tx[txant]++;
- sc->sc_ant_tx[txant]++;
- if (ts->ts_finaltsi != 0)
- sc->sc_stats.ast_tx_altrate++;
- pri = M_WME_GETAC(bf->bf_m);
- if (pri >= WME_AC_VO)
- ic->ic_wme.wme_hipri_traffic++;
- if ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0)
- ni->ni_inact = ni->ni_inact_reload;
- } else {
- if (ts->ts_status & HAL_TXERR_XRETRY)
- sc->sc_stats.ast_tx_xretries++;
- if (ts->ts_status & HAL_TXERR_FIFO)
- sc->sc_stats.ast_tx_fifoerr++;
- if (ts->ts_status & HAL_TXERR_FILT)
- sc->sc_stats.ast_tx_filtered++;
- if (ts->ts_status & HAL_TXERR_XTXOP)
- sc->sc_stats.ast_tx_xtxop++;
- if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED)
- sc->sc_stats.ast_tx_timerexpired++;
-
- /* XXX HAL_TX_DATA_UNDERRUN */
- /* XXX HAL_TX_DELIM_UNDERRUN */
-
- if (bf->bf_m->m_flags & M_FF)
- sc->sc_stats.ast_ff_txerr++;
- }
- /* XXX when is this valid? */
- if (ts->ts_status & HAL_TX_DESC_CFG_ERR)
- sc->sc_stats.ast_tx_desccfgerr++;
-
- sr = ts->ts_shortretry;
- lr = ts->ts_longretry;
- sc->sc_stats.ast_tx_shortretry += sr;
- sc->sc_stats.ast_tx_longretry += lr;
- /*
- * Hand the descriptor to the rate control algorithm.
- */
+ /* update statistics */
+ ath_tx_update_stats(sc, ts, bf);
+ }
+
+ /*
+ * Call the completion handler.
+ * The completion handler is responsible for
+ * calling the rate control code.
+ *
+ * Frames with no completion handler get the
+ * rate control code called here.
+ */
+ if (bf->bf_comp == NULL) {
if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
(bf->bf_txflags & HAL_TXDESC_NOACK) == 0) {
/*
- * If frame was ack'd update statistics,
- * including the last rx time used to
- * workaround phantom bmiss interrupts.
+ * XXX assume this isn't an aggregate
+ * frame.
*/
- if (ts->ts_status == 0) {
- nacked++;
- sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
- ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
- ts->ts_rssi);
- }
- ath_rate_tx_complete(sc, an, bf);
+ ath_tx_update_ratectrl(sc, ni,
+ bf->bf_state.bfs_rc, ts,
+ bf->bf_state.bfs_pktlen, 1,
+ (ts->ts_status == 0 ? 0 : 1));
}
- /*
- * Do any tx complete callback. Note this must
- * be done before releasing the node reference.
- */
- if (bf->bf_m->m_flags & M_TXCB)
- ieee80211_process_callback(ni, bf->bf_m,
- (bf->bf_txflags & HAL_TXDESC_NOACK) == 0 ?
- ts->ts_status : HAL_TXERR_XRETRY);
- ieee80211_free_node(ni);
- }
- bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
-
- m_freem(bf->bf_m);
- bf->bf_m = NULL;
- bf->bf_node = NULL;
-
- ATH_TXBUF_LOCK(sc);
- last = STAILQ_LAST(&sc->sc_txbuf, ath_buf, bf_list);
- if (last != NULL)
- last->bf_flags &= ~ATH_BUF_BUSY;
- STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
- ATH_TXBUF_UNLOCK(sc);
+ ath_tx_default_comp(sc, bf, 0);
+ } else
+ bf->bf_comp(sc, bf, 0);
}
#ifdef IEEE80211_SUPPORT_SUPERG
/*
@@ -4205,16 +4762,18 @@ ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
if (txq->axq_depth <= 1)
ieee80211_ff_flush(ic, txq->axq_ac);
#endif
+
+ /* Kick the TXQ scheduler */
+ if (dosched) {
+ ATH_TXQ_LOCK(txq);
+ ath_txq_sched(sc, txq);
+ ATH_TXQ_UNLOCK(txq);
+ }
+
return nacked;
}
-static __inline int
-txqactive(struct ath_hal *ah, int qnum)
-{
- u_int32_t txqs = 1<<qnum;
- ath_hal_gettxintrtxqs(ah, &txqs);
- return (txqs & (1<<qnum));
-}
+#define TXQACTIVE(t, q) ( (t) & (1 << (q)))
/*
* Deferred processing of transmit interrupt; special-cased
@@ -4225,17 +4784,30 @@ ath_tx_proc_q0(void *arg, int npending)
{
struct ath_softc *sc = arg;
struct ifnet *ifp = sc->sc_ifp;
+ uint32_t txqs;
- if (txqactive(sc->sc_ah, 0) && ath_tx_processq(sc, &sc->sc_txq[0]))
+ ATH_PCU_LOCK(sc);
+ sc->sc_txproc_cnt++;
+ txqs = sc->sc_txq_active;
+ sc->sc_txq_active &= ~txqs;
+ ATH_PCU_UNLOCK(sc);
+
+ if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1))
+ /* XXX why is lastrx updated in tx code? */
sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
- if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
- ath_tx_processq(sc, sc->sc_cabq);
+ if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
+ ath_tx_processq(sc, sc->sc_cabq, 1);
+ /* XXX check this inside of IF_LOCK? */
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
sc->sc_wd_timer = 0;
if (sc->sc_softled)
ath_led_event(sc, sc->sc_txrix);
+ ATH_PCU_LOCK(sc);
+ sc->sc_txproc_cnt--;
+ ATH_PCU_UNLOCK(sc);
+
ath_start(ifp);
}
@@ -4249,30 +4821,42 @@ ath_tx_proc_q0123(void *arg, int npending)
struct ath_softc *sc = arg;
struct ifnet *ifp = sc->sc_ifp;
int nacked;
+ uint32_t txqs;
+
+ ATH_PCU_LOCK(sc);
+ sc->sc_txproc_cnt++;
+ txqs = sc->sc_txq_active;
+ sc->sc_txq_active &= ~txqs;
+ ATH_PCU_UNLOCK(sc);
/*
* Process each active queue.
*/
nacked = 0;
- if (txqactive(sc->sc_ah, 0))
- nacked += ath_tx_processq(sc, &sc->sc_txq[0]);
- if (txqactive(sc->sc_ah, 1))
- nacked += ath_tx_processq(sc, &sc->sc_txq[1]);
- if (txqactive(sc->sc_ah, 2))
- nacked += ath_tx_processq(sc, &sc->sc_txq[2]);
- if (txqactive(sc->sc_ah, 3))
- nacked += ath_tx_processq(sc, &sc->sc_txq[3]);
- if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
- ath_tx_processq(sc, sc->sc_cabq);
+ if (TXQACTIVE(txqs, 0))
+ nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1);
+ if (TXQACTIVE(txqs, 1))
+ nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1);
+ if (TXQACTIVE(txqs, 2))
+ nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1);
+ if (TXQACTIVE(txqs, 3))
+ nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1);
+ if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
+ ath_tx_processq(sc, sc->sc_cabq, 1);
if (nacked)
sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
+ /* XXX check this inside of IF_LOCK? */
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
sc->sc_wd_timer = 0;
if (sc->sc_softled)
ath_led_event(sc, sc->sc_txrix);
+ ATH_PCU_LOCK(sc);
+ sc->sc_txproc_cnt--;
+ ATH_PCU_UNLOCK(sc);
+
ath_start(ifp);
}
@@ -4285,33 +4869,106 @@ ath_tx_proc(void *arg, int npending)
struct ath_softc *sc = arg;
struct ifnet *ifp = sc->sc_ifp;
int i, nacked;
+ uint32_t txqs;
+
+ ATH_PCU_LOCK(sc);
+ sc->sc_txproc_cnt++;
+ txqs = sc->sc_txq_active;
+ sc->sc_txq_active &= ~txqs;
+ ATH_PCU_UNLOCK(sc);
/*
* Process each active queue.
*/
nacked = 0;
for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
- if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
- nacked += ath_tx_processq(sc, &sc->sc_txq[i]);
+ if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i))
+ nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1);
if (nacked)
sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
+ /* XXX check this inside of IF_LOCK? */
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
sc->sc_wd_timer = 0;
if (sc->sc_softled)
ath_led_event(sc, sc->sc_txrix);
+ ATH_PCU_LOCK(sc);
+ sc->sc_txproc_cnt--;
+ ATH_PCU_UNLOCK(sc);
+
ath_start(ifp);
}
+#undef TXQACTIVE
-static void
+/*
+ * Return a buffer to the pool and update the 'busy' flag on the
+ * previous 'tail' entry.
+ *
+ * This _must_ only be called when the buffer is involved in a completed
+ * TX. The logic is that if it was part of an active TX, the previous
+ * buffer on the list is now not involved in a halted TX DMA queue, waiting
+ * for restart (eg for TDMA.)
+ *
+ * The caller must free the mbuf and recycle the node reference.
+ */
+void
+ath_freebuf(struct ath_softc *sc, struct ath_buf *bf)
+{
+ bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
+ bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE);
+
+ KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__));
+ KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__));
+
+ ATH_TXBUF_LOCK(sc);
+ ath_tx_update_busy(sc);
+ TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
+ ATH_TXBUF_UNLOCK(sc);
+}
+
+/*
+ * This is currently used by ath_tx_draintxq() and
+ * ath_tx_tid_free_pkts().
+ *
+ * It recycles a single ath_buf.
+ */
+void
+ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status)
+{
+ struct ieee80211_node *ni = bf->bf_node;
+ struct mbuf *m0 = bf->bf_m;
+
+ bf->bf_node = NULL;
+ bf->bf_m = NULL;
+
+ /* Free the buffer, it's not needed any longer */
+ ath_freebuf(sc, bf);
+
+ if (ni != NULL) {
+ /*
+ * Do any callback and reclaim the node reference.
+ */
+ if (m0->m_flags & M_TXCB)
+ ieee80211_process_callback(ni, m0, status);
+ ieee80211_free_node(ni);
+ }
+ m_freem(m0);
+
+ /*
+ * XXX the buffer used to be freed -after-, but the DMA map was
+ * freed where ath_freebuf() now is. I've no idea what this
+ * will do.
+ */
+}
+
+void
ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
{
#ifdef ATH_DEBUG
struct ath_hal *ah = sc->sc_ah;
#endif
- struct ieee80211_node *ni;
struct ath_buf *bf;
u_int ix;
@@ -4320,50 +4977,55 @@ ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
* we do not need to block ath_tx_proc
*/
ATH_TXBUF_LOCK(sc);
- bf = STAILQ_LAST(&sc->sc_txbuf, ath_buf, bf_list);
+ bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s);
if (bf != NULL)
bf->bf_flags &= ~ATH_BUF_BUSY;
ATH_TXBUF_UNLOCK(sc);
+
for (ix = 0;; ix++) {
ATH_TXQ_LOCK(txq);
- bf = STAILQ_FIRST(&txq->axq_q);
+ bf = TAILQ_FIRST(&txq->axq_q);
if (bf == NULL) {
txq->axq_link = NULL;
ATH_TXQ_UNLOCK(txq);
break;
}
- ATH_TXQ_REMOVE_HEAD(txq, bf_list);
- ATH_TXQ_UNLOCK(txq);
+ ATH_TXQ_REMOVE(txq, bf, bf_list);
+ if (bf->bf_state.bfs_aggr)
+ txq->axq_aggr_depth--;
#ifdef ATH_DEBUG
if (sc->sc_debug & ATH_DEBUG_RESET) {
struct ieee80211com *ic = sc->sc_ifp->if_l2com;
ath_printtxbuf(sc, bf, txq->axq_qnum, ix,
- ath_hal_txprocdesc(ah, bf->bf_desc,
+ ath_hal_txprocdesc(ah, bf->bf_lastds,
&bf->bf_status.ds_txstat) == HAL_OK);
ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *),
bf->bf_m->m_len, 0, -1);
}
#endif /* ATH_DEBUG */
- bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
- ni = bf->bf_node;
- bf->bf_node = NULL;
- if (ni != NULL) {
- /*
- * Do any callback and reclaim the node reference.
- */
- if (bf->bf_m->m_flags & M_TXCB)
- ieee80211_process_callback(ni, bf->bf_m, -1);
- ieee80211_free_node(ni);
- }
- m_freem(bf->bf_m);
- bf->bf_m = NULL;
+ /*
+ * Since we're now doing magic in the completion
+ * functions, we -must- call it for aggregation
+ * destinations or BAW tracking will get upset.
+ */
+ /*
+ * Clear ATH_BUF_BUSY; the completion handler
+ * will free the buffer.
+ */
+ ATH_TXQ_UNLOCK(txq);
bf->bf_flags &= ~ATH_BUF_BUSY;
-
- ATH_TXBUF_LOCK(sc);
- STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
- ATH_TXBUF_UNLOCK(sc);
+ if (bf->bf_comp)
+ bf->bf_comp(sc, bf, 1);
+ else
+ ath_tx_default_comp(sc, bf, 1);
}
+
+ /*
+ * Drain software queued frames which are on
+ * active TIDs.
+ */
+ ath_tx_txq_drain(sc, txq);
}
static void
@@ -4378,17 +5040,16 @@ ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
(void) ath_hal_stoptxdma(ah, txq->axq_qnum);
}
-/*
- * Drain the transmit queues and reclaim resources.
- */
-static void
-ath_draintxq(struct ath_softc *sc)
+static int
+ath_stoptxdma(struct ath_softc *sc)
{
struct ath_hal *ah = sc->sc_ah;
- struct ifnet *ifp = sc->sc_ifp;
int i;
/* XXX return value */
+ if (sc->sc_invalid)
+ return 0;
+
if (!sc->sc_invalid) {
/* don't touch the hardware if marked invalid */
DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
@@ -4400,15 +5061,42 @@ ath_draintxq(struct ath_softc *sc)
if (ATH_TXQ_SETUP(sc, i))
ath_tx_stopdma(sc, &sc->sc_txq[i]);
}
- for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
- if (ATH_TXQ_SETUP(sc, i))
- ath_tx_draintxq(sc, &sc->sc_txq[i]);
+
+ return 1;
+}
+
+/*
+ * Drain the transmit queues and reclaim resources.
+ */
+static void
+ath_draintxq(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
+{
+#ifdef ATH_DEBUG
+ struct ath_hal *ah = sc->sc_ah;
+#endif
+ struct ifnet *ifp = sc->sc_ifp;
+ int i;
+
+ (void) ath_stoptxdma(sc);
+
+ for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
+ /*
+ * XXX TODO: should we just handle the completed TX frames
+ * here, whether or not the reset is a full one or not?
+ */
+ if (ATH_TXQ_SETUP(sc, i)) {
+ if (reset_type == ATH_RESET_NOLOSS)
+ ath_tx_processq(sc, &sc->sc_txq[i], 0);
+ else
+ ath_tx_draintxq(sc, &sc->sc_txq[i]);
+ }
+ }
#ifdef ATH_DEBUG
if (sc->sc_debug & ATH_DEBUG_RESET) {
- struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf);
+ struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf);
if (bf != NULL && bf->bf_m != NULL) {
ath_printtxbuf(sc, bf, sc->sc_bhalq, 0,
- ath_hal_txprocdesc(ah, bf->bf_desc,
+ ath_hal_txprocdesc(ah, bf->bf_lastds,
&bf->bf_status.ds_txstat) == HAL_OK);
ieee80211_dump_pkt(ifp->if_l2com,
mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len,
@@ -4416,6 +5104,7 @@ ath_draintxq(struct ath_softc *sc)
}
}
#endif /* ATH_DEBUG */
+ /* XXX check this inside of IF_LOCK? */
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
sc->sc_wd_timer = 0;
}
@@ -4424,7 +5113,7 @@ ath_draintxq(struct ath_softc *sc)
* Disable the receive h/w in preparation for a reset.
*/
static void
-ath_stoprecv(struct ath_softc *sc)
+ath_stoprecv(struct ath_softc *sc, int dodelay)
{
#define PA2DESC(_sc, _pa) \
((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
@@ -4434,7 +5123,8 @@ ath_stoprecv(struct ath_softc *sc)
ath_hal_stoppcurecv(ah); /* disable PCU */
ath_hal_setrxfilter(ah, 0); /* clear recv filter */
ath_hal_stopdmarecv(ah); /* disable DMA engine */
- DELAY(3000); /* 3ms is long enough for 1 frame */
+ if (dodelay)
+ DELAY(3000); /* 3ms is long enough for 1 frame */
#ifdef ATH_DEBUG
if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) {
struct ath_buf *bf;
@@ -4443,7 +5133,7 @@ ath_stoprecv(struct ath_softc *sc)
printf("%s: rx queue %p, link %p\n", __func__,
(caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink);
ix = 0;
- STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
+ TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
struct ath_desc *ds = bf->bf_desc;
struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
HAL_STATUS status = ath_hal_rxprocdesc(ah, ds,
@@ -4473,7 +5163,7 @@ ath_startrecv(struct ath_softc *sc)
sc->sc_rxlink = NULL;
sc->sc_rxpending = NULL;
- STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
+ TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
int error = ath_rxbuf_init(sc, bf);
if (error != 0) {
DPRINTF(sc, ATH_DEBUG_RECV,
@@ -4483,7 +5173,7 @@ ath_startrecv(struct ath_softc *sc)
}
}
- bf = STAILQ_FIRST(&sc->sc_rxbuf);
+ bf = TAILQ_FIRST(&sc->sc_rxbuf);
ath_hal_putrxbuf(ah, bf->bf_daddr);
ath_hal_rxena(ah); /* enable recv descriptors */
ath_mode_init(sc); /* set filters, etc. */
@@ -4521,6 +5211,22 @@ ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
struct ifnet *ifp = sc->sc_ifp;
struct ieee80211com *ic = ifp->if_l2com;
struct ath_hal *ah = sc->sc_ah;
+ int ret = 0;
+ int dointr = 0;
+
+ /* Treat this as an interface reset */
+ ATH_PCU_LOCK(sc);
+ if (sc->sc_inreset_cnt > 0)
+ device_printf(sc->sc_dev, "%s: danger! concurrent reset!\n",
+ __func__);
+ sc->sc_inreset_cnt++;
+ if (chan != sc->sc_curchan) {
+ dointr = 1;
+ /* XXX only do this if inreset_cnt is 1? */
+ ath_hal_intrset(ah, 0);
+ }
+ ATH_PCU_UNLOCK(sc);
+ ath_txrx_stop(sc);
DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n",
__func__, ieee80211_chan2ieee(ic, chan),
@@ -4533,15 +5239,27 @@ ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
* hardware at the new frequency, and then re-enable
* the relevant bits of the h/w.
*/
+#if 0
ath_hal_intrset(ah, 0); /* disable interrupts */
- ath_draintxq(sc); /* clear pending tx frames */
- ath_stoprecv(sc); /* turn off frame recv */
+#endif
+ ath_stoprecv(sc, 1); /* turn off frame recv */
+ /*
+ * First, handle completed TX/RX frames.
+ */
+ ath_rx_proc(sc, 0);
+ ath_draintxq(sc, ATH_RESET_NOLOSS);
+ /*
+ * Next, flush the non-scheduled frames.
+ */
+ ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */
+
if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) {
if_printf(ifp, "%s: unable to reset "
"channel %u (%u MHz, flags 0x%x), hal status %u\n",
__func__, ieee80211_chan2ieee(ic, chan),
chan->ic_freq, chan->ic_flags, status);
- return EIO;
+ ret = EIO;
+ goto finish;
}
sc->sc_diversity = ath_hal_getdiversity(ah);
@@ -4554,7 +5272,8 @@ ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
if (ath_startrecv(sc) != 0) {
if_printf(ifp, "%s: unable to restart recv logic\n",
__func__);
- return EIO;
+ ret = EIO;
+ goto finish;
}
/*
@@ -4576,12 +5295,28 @@ ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
ath_beacon_config(sc, NULL);
}
+#if 0
/*
* Re-enable interrupts.
*/
ath_hal_intrset(ah, sc->sc_imask);
+#endif
}
- return 0;
+
+finish:
+ ATH_PCU_LOCK(sc);
+ sc->sc_inreset_cnt--;
+ /* XXX only do this if sc_inreset_cnt == 0? */
+ if (dointr)
+ ath_hal_intrset(ah, sc->sc_imask);
+ ATH_PCU_UNLOCK(sc);
+
+ /* XXX do this inside of IF_LOCK? */
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ ath_txrx_start(sc);
+ /* XXX ath_start? */
+
+ return ret;
}
/*
@@ -4624,7 +5359,18 @@ ath_calibrate(void *arg)
DPRINTF(sc, ATH_DEBUG_CALIBRATE,
"%s: rfgain change\n", __func__);
sc->sc_stats.ast_per_rfgain++;
- ath_reset(ifp);
+ /*
+ * Drop lock - we can't hold it across the
+ * ath_reset() call. Instead, we'll drop
+ * out here, do a reset, then reschedule
+ * the callout.
+ */
+ callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
+ sc->sc_resetcal = 0;
+ sc->sc_doresetcal = AH_TRUE;
+ ATH_UNLOCK(sc);
+ ath_reset(ifp, ATH_RESET_NOLOSS);
+ return;
}
/*
* If this long cal is after an idle period, then
@@ -5004,7 +5750,7 @@ ath_setup_stationkey(struct ieee80211_node *ni)
ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY;
IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr);
/* NB: this will create a pass-thru key entry */
- ath_keyset(sc, &ni->ni_ucastkey, vap->iv_bss);
+ ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss);
}
}
@@ -5279,6 +6025,7 @@ static void
ath_watchdog(void *arg)
{
struct ath_softc *sc = arg;
+ int do_reset = 0;
if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) {
struct ifnet *ifp = sc->sc_ifp;
@@ -5290,10 +6037,20 @@ ath_watchdog(void *arg)
hangs & 0xff ? "bb" : "mac", hangs);
} else
if_printf(ifp, "device timeout\n");
- ath_reset(ifp);
+ do_reset = 1;
ifp->if_oerrors++;
sc->sc_stats.ast_watchdog++;
}
+
+ /*
+ * We can't hold the lock across the ath_reset() call.
+ */
+ if (do_reset) {
+ ATH_UNLOCK(sc);
+ ath_reset(sc->sc_ifp, ATH_RESET_NOLOSS);
+ ATH_LOCK(sc);
+ }
+
callout_schedule(&sc->sc_wd_ch, hz);
}
diff --git a/sys/dev/ath/if_ath_ahb.c b/sys/dev/ath/if_ath_ahb.c
index a2bca05..21c3529 100644
--- a/sys/dev/ath/if_ath_ahb.c
+++ b/sys/dev/ath/if_ath_ahb.c
@@ -190,11 +190,13 @@ ath_ahb_attach(device_t dev)
}
ATH_LOCK_INIT(sc);
+ ATH_PCU_LOCK_INIT(sc);
error = ath_attach(AR9130_DEVID, sc);
if (error == 0) /* success */
return 0;
+ ATH_PCU_LOCK_DESTROY(sc);
ATH_LOCK_DESTROY(sc);
bus_dma_tag_destroy(sc->sc_dmat);
bad3:
@@ -234,6 +236,7 @@ ath_ahb_detach(device_t dev)
if (sc->sc_eepromdata)
free(sc->sc_eepromdata, M_TEMP);
+ ATH_PCU_LOCK_DESTROY(sc);
ATH_LOCK_DESTROY(sc);
return (0);
diff --git a/sys/dev/ath/if_ath_debug.c b/sys/dev/ath/if_ath_debug.c
index b5691ac..ac08f1f 100644
--- a/sys/dev/ath/if_ath_debug.c
+++ b/sys/dev/ath/if_ath_debug.c
@@ -123,33 +123,44 @@ ath_printrxbuf(struct ath_softc *sc, const struct ath_buf *bf,
}
void
-ath_printtxbuf(struct ath_softc *sc, const struct ath_buf *bf,
+ath_printtxbuf(struct ath_softc *sc, const struct ath_buf *first_bf,
u_int qnum, u_int ix, int done)
{
- const struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
+ const struct ath_tx_status *ts = &first_bf->bf_last->bf_status.ds_txstat;
+ const struct ath_buf *bf = first_bf;
struct ath_hal *ah = sc->sc_ah;
const struct ath_desc *ds;
int i;
printf("Q%u[%3u]", qnum, ix);
- for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) {
- printf(" (DS.V:%p DS.P:%p) L:%08x D:%08x F:04%x%s\n"
- " %08x %08x %08x %08x %08x %08x\n",
- ds, (const struct ath_desc *)bf->bf_daddr + i,
- ds->ds_link, ds->ds_data, bf->bf_txflags,
- !done ? "" : (ts->ts_status == 0) ? " *" : " !",
- ds->ds_ctl0, ds->ds_ctl1,
- ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3]);
- if (ah->ah_magic == 0x20065416) {
- printf(" %08x %08x %08x %08x %08x %08x %08x %08x\n",
- ds->ds_hw[4], ds->ds_hw[5], ds->ds_hw[6],
- ds->ds_hw[7], ds->ds_hw[8], ds->ds_hw[9],
- ds->ds_hw[10],ds->ds_hw[11]);
- printf(" %08x %08x %08x %08x %08x %08x %08x %08x\n",
- ds->ds_hw[12],ds->ds_hw[13],ds->ds_hw[14],
- ds->ds_hw[15],ds->ds_hw[16],ds->ds_hw[17],
- ds->ds_hw[18], ds->ds_hw[19]);
+ while (bf != NULL) {
+ for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) {
+ printf(" (DS.V:%p DS.P:%p) L:%08x D:%08x F:%04x%s\n"
+ " TXF: %04x Seq: %d swtry: %d ADDBAW?: %d DOBAW?: %d\n"
+ " %08x %08x %08x %08x %08x %08x\n",
+ ds, (const struct ath_desc *)bf->bf_daddr + i,
+ ds->ds_link, ds->ds_data, bf->bf_txflags,
+ !done ? "" : (ts->ts_status == 0) ? " *" : " !",
+ bf->bf_state.bfs_flags,
+ bf->bf_state.bfs_seqno,
+ bf->bf_state.bfs_retries,
+ bf->bf_state.bfs_addedbaw,
+ bf->bf_state.bfs_dobaw,
+ ds->ds_ctl0, ds->ds_ctl1,
+ ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3]);
+ if (ah->ah_magic == 0x20065416) {
+ printf(" %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ ds->ds_hw[4], ds->ds_hw[5], ds->ds_hw[6],
+ ds->ds_hw[7], ds->ds_hw[8], ds->ds_hw[9],
+ ds->ds_hw[10],ds->ds_hw[11]);
+ printf(" %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ ds->ds_hw[12],ds->ds_hw[13],ds->ds_hw[14],
+ ds->ds_hw[15],ds->ds_hw[16],ds->ds_hw[17],
+ ds->ds_hw[18], ds->ds_hw[19]);
+ }
}
+ printf(" [end]\n");
+ bf = bf->bf_next;
}
}
diff --git a/sys/dev/ath/if_ath_debug.h b/sys/dev/ath/if_ath_debug.h
index c21914f..58199ff 100644
--- a/sys/dev/ath/if_ath_debug.h
+++ b/sys/dev/ath/if_ath_debug.h
@@ -57,6 +57,11 @@ enum {
ATH_DEBUG_TDMA = 0x00800000, /* TDMA processing */
ATH_DEBUG_TDMA_TIMER = 0x01000000, /* TDMA timer processing */
ATH_DEBUG_REGDOMAIN = 0x02000000, /* regulatory processing */
+ ATH_DEBUG_SW_TX = 0x04000000, /* per-packet software TX */
+ ATH_DEBUG_SW_TX_BAW = 0x08000000, /* BAW handling */
+ ATH_DEBUG_SW_TX_CTRL = 0x10000000, /* queue control */
+ ATH_DEBUG_SW_TX_AGGR = 0x20000000, /* aggregate TX */
+ ATH_DEBUG_SW_TX_RETRIES = 0x40000000, /* software TX retries */
ATH_DEBUG_FATAL = 0x80000000, /* fatal errors */
ATH_DEBUG_ANY = 0xffffffff
};
diff --git a/sys/dev/ath/if_ath_keycache.c b/sys/dev/ath/if_ath_keycache.c
index 842f766..e959c7a 100644
--- a/sys/dev/ath/if_ath_keycache.c
+++ b/sys/dev/ath/if_ath_keycache.c
@@ -178,7 +178,8 @@ ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k,
* cache slots for TKIP with hardware MIC support.
*/
int
-ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k,
+ath_keyset(struct ath_softc *sc, struct ieee80211vap *vap,
+ const struct ieee80211_key *k,
struct ieee80211_node *bss)
{
#define N(a) (sizeof(a)/sizeof(a[0]))
@@ -212,7 +213,23 @@ ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k,
} else
hk.kv_type = HAL_CIPHER_CLR;
- if ((k->wk_flags & IEEE80211_KEY_GROUP) && sc->sc_mcastkey) {
+ /*
+ * XXX TODO: check this:
+ *
+ * Group keys on hardware that supports multicast frame
+ * key search should only be done in adhoc/hostap mode,
+ * not STA mode.
+ *
+ * XXX TODO: what about mesh, tdma?
+ */
+#if 0
+ if ((vap->iv_opmode == IEEE80211_M_HOSTAP ||
+ vap->iv_opmode == IEEE80211_M_IBSS) &&
+#else
+ if (
+#endif
+ (k->wk_flags & IEEE80211_KEY_GROUP) &&
+ sc->sc_mcastkey) {
/*
* Group keys on hardware that supports multicast frame
* key search use a MAC that is the sender's address with
@@ -493,5 +510,5 @@ ath_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
{
struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
- return ath_keyset(sc, k, vap->iv_bss);
+ return ath_keyset(sc, vap, k, vap->iv_bss);
}
diff --git a/sys/dev/ath/if_ath_keycache.h b/sys/dev/ath/if_ath_keycache.h
index f1696cb..0b79e6f 100644
--- a/sys/dev/ath/if_ath_keycache.h
+++ b/sys/dev/ath/if_ath_keycache.h
@@ -37,7 +37,7 @@ extern int ath_key_alloc(struct ieee80211vap *, struct ieee80211_key *,
extern int ath_key_delete(struct ieee80211vap *, const struct ieee80211_key *);
extern int ath_key_set(struct ieee80211vap *, const struct ieee80211_key *,
const u_int8_t mac[IEEE80211_ADDR_LEN]);
-extern int ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k,
- struct ieee80211_node *bss);
+extern int ath_keyset(struct ath_softc *sc, struct ieee80211vap *vap,
+ const struct ieee80211_key *k, struct ieee80211_node *bss);
#endif
diff --git a/sys/dev/ath/if_ath_misc.h b/sys/dev/ath/if_ath_misc.h
index 35feea2..c48590e 100644
--- a/sys/dev/ath/if_ath_misc.h
+++ b/sys/dev/ath/if_ath_misc.h
@@ -52,7 +52,19 @@ extern int ath_tx_findrix(const struct ath_softc *sc, uint8_t rate);
extern struct ath_buf * ath_getbuf(struct ath_softc *sc);
extern struct ath_buf * _ath_getbuf_locked(struct ath_softc *sc);
+extern struct ath_buf * ath_buf_clone(struct ath_softc *sc,
+ const struct ath_buf *bf);
+extern void ath_freebuf(struct ath_softc *sc, struct ath_buf *bf);
-extern int ath_reset(struct ifnet *);
+extern int ath_reset(struct ifnet *, ATH_RESET_TYPE);
+extern void ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq);
+extern void ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf,
+ int fail);
+extern void ath_tx_update_ratectrl(struct ath_softc *sc,
+ struct ieee80211_node *ni, struct ath_rc_series *rc,
+ struct ath_tx_status *ts, int frmlen, int nframes, int nbad);
+
+extern void ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf,
+ int status);
#endif
diff --git a/sys/dev/ath/if_ath_pci.c b/sys/dev/ath/if_ath_pci.c
index 4824773..536930b 100644
--- a/sys/dev/ath/if_ath_pci.c
+++ b/sys/dev/ath/if_ath_pci.c
@@ -78,8 +78,10 @@ struct ath_pci_softc {
static void
ath_pci_setup(device_t dev)
{
+#ifdef ATH_PCI_LATENCY_WAR
/* Override the system latency timer */
pci_write_config(dev, PCIR_LATTIMER, 0x80, 1);
+#endif
/* If a PCI NIC, force wakeup */
#ifdef ATH_PCI_WAKEUP_WAR
@@ -190,11 +192,13 @@ ath_pci_attach(device_t dev)
}
ATH_LOCK_INIT(sc);
+ ATH_PCU_LOCK_INIT(sc);
error = ath_attach(pci_get_device(dev), sc);
if (error == 0) /* success */
return 0;
+ ATH_PCU_LOCK_DESTROY(sc);
ATH_LOCK_DESTROY(sc);
bus_dma_tag_destroy(sc->sc_dmat);
bad3:
@@ -230,6 +234,7 @@ ath_pci_detach(device_t dev)
bus_dma_tag_destroy(sc->sc_dmat);
bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, psc->sc_sr);
+ ATH_PCU_LOCK_DESTROY(sc);
ATH_LOCK_DESTROY(sc);
return (0);
diff --git a/sys/dev/ath/if_ath_sysctl.c b/sys/dev/ath/if_ath_sysctl.c
index 9e52f79..5323504 100644
--- a/sys/dev/ath/if_ath_sysctl.c
+++ b/sys/dev/ath/if_ath_sysctl.c
@@ -263,7 +263,8 @@ ath_sysctl_tpscale(SYSCTL_HANDLER_ARGS)
if (error || !req->newptr)
return error;
return !ath_hal_settpscale(sc->sc_ah, scale) ? EINVAL :
- (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0;
+ (ifp->if_drv_flags & IFF_DRV_RUNNING) ?
+ ath_reset(ifp, ATH_RESET_NOLOSS) : 0;
}
static int
@@ -295,7 +296,70 @@ ath_sysctl_rfkill(SYSCTL_HANDLER_ARGS)
return 0;
if (!ath_hal_setrfkill(ah, rfkill))
return EINVAL;
- return (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0;
+ return (ifp->if_drv_flags & IFF_DRV_RUNNING) ?
+ ath_reset(ifp, ATH_RESET_FULL) : 0;
+}
+
+static int
+ath_sysctl_txagg(SYSCTL_HANDLER_ARGS)
+{
+ struct ath_softc *sc = arg1;
+ int i, t, param = 0;
+ int error;
+ struct ath_buf *bf;
+
+ error = sysctl_handle_int(oidp, &param, 0, req);
+ if (error || !req->newptr)
+ return error;
+
+ if (param != 1)
+ return 0;
+
+ printf("no tx bufs (empty list): %d\n", sc->sc_stats.ast_tx_getnobuf);
+ printf("no tx bufs (was busy): %d\n", sc->sc_stats.ast_tx_getbusybuf);
+
+ printf("aggr single packet: %d\n",
+ sc->sc_aggr_stats.aggr_single_pkt);
+ printf("aggr single packet w/ BAW closed: %d\n",
+ sc->sc_aggr_stats.aggr_baw_closed_single_pkt);
+ printf("aggr non-baw packet: %d\n",
+ sc->sc_aggr_stats.aggr_nonbaw_pkt);
+ printf("aggr aggregate packet: %d\n",
+ sc->sc_aggr_stats.aggr_aggr_pkt);
+ printf("aggr single packet low hwq: %d\n",
+ sc->sc_aggr_stats.aggr_low_hwq_single_pkt);
+ printf("aggr sched, no work: %d\n",
+ sc->sc_aggr_stats.aggr_sched_nopkt);
+ for (i = 0; i < 64; i++) {
+ printf("%2d: %10d ", i, sc->sc_aggr_stats.aggr_pkts[i]);
+ if (i % 4 == 3)
+ printf("\n");
+ }
+ printf("\n");
+
+ for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
+ if (ATH_TXQ_SETUP(sc, i)) {
+ printf("HW TXQ %d: axq_depth=%d, axq_aggr_depth=%d\n",
+ i,
+ sc->sc_txq[i].axq_depth,
+ sc->sc_txq[i].axq_aggr_depth);
+ }
+ }
+
+ i = t = 0;
+ ATH_TXBUF_LOCK(sc);
+ TAILQ_FOREACH(bf, &sc->sc_txbuf, bf_list) {
+ if (bf->bf_flags & ATH_BUF_BUSY) {
+ printf("Busy: %d\n", t);
+ i++;
+ }
+ t++;
+ }
+ ATH_TXBUF_UNLOCK(sc);
+ printf("Total TX buffers: %d; Total TX buffers busy: %d\n",
+ t, i);
+
+ return 0;
}
static int
@@ -366,7 +430,7 @@ ath_sysctl_intmit(SYSCTL_HANDLER_ARGS)
* things in an inconsistent state.
*/
if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
- ath_reset(sc->sc_ifp);
+ ath_reset(sc->sc_ifp, ATH_RESET_NOLOSS);
return 0;
}
@@ -387,6 +451,24 @@ ath_sysctl_setcca(SYSCTL_HANDLER_ARGS)
}
#endif /* IEEE80211_SUPPORT_TDMA */
+static int
+ath_sysctl_forcebstuck(SYSCTL_HANDLER_ARGS)
+{
+ struct ath_softc *sc = arg1;
+ int val = 0;
+ int error;
+
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ return error;
+ if (val == 0)
+ return 0;
+
+ taskqueue_enqueue_fast(sc->sc_tq, &sc->sc_bstucktask);
+ val = 0;
+ return 0;
+}
+
void
ath_sysctlattach(struct ath_softc *sc)
{
@@ -465,6 +547,15 @@ ath_sysctlattach(struct ath_softc *sc)
"rfkill", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_rfkill, "I", "enable/disable RF kill switch");
}
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "txagg", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
+ ath_sysctl_txagg, "I", "");
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "forcebstuck", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
+ ath_sysctl_forcebstuck, "I", "");
+
if (ath_hal_hasintmit(ah)) {
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"intmit", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
@@ -474,6 +565,17 @@ ath_sysctlattach(struct ath_softc *sc)
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"monpass", CTLFLAG_RW, &sc->sc_monpass, 0,
"mask of error frames to pass when monitoring");
+
+ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "hwq_limit", CTLFLAG_RW, &sc->sc_hwq_limit, 0,
+ "");
+ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "tid_hwq_lo", CTLFLAG_RW, &sc->sc_tid_hwq_lo, 0,
+ "");
+ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "tid_hwq_hi", CTLFLAG_RW, &sc->sc_tid_hwq_hi, 0,
+ "");
+
#ifdef IEEE80211_SUPPORT_TDMA
if (ath_hal_macversion(ah) > 0x78) {
sc->sc_tdmadbaprep = 2;
@@ -510,6 +612,8 @@ ath_sysctl_clearstats(SYSCTL_HANDLER_ARGS)
if (val == 0)
return 0; /* Not clearing the stats is still valid */
memset(&sc->sc_stats, 0, sizeof(sc->sc_stats));
+ memset(&sc->sc_aggr_stats, 0, sizeof(sc->sc_aggr_stats));
+
val = 0;
return 0;
}
@@ -789,4 +893,16 @@ ath_sysctl_hal_attach(struct ath_softc *sc)
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "swba_backoff", CTLFLAG_RW,
&sc->sc_ah->ah_config.ah_additional_swba_backoff, 0,
"Atheros HAL additional SWBA backoff time");
+
+ sc->sc_ah->ah_config.ah_force_full_reset = 0;
+ SYSCTL_ADD_INT(ctx, child, OID_AUTO, "force_full_reset", CTLFLAG_RW,
+ &sc->sc_ah->ah_config.ah_force_full_reset, 0,
+ "Force full chip reset rather than a warm reset");
+
+ /*
+ * This is initialised by the driver.
+ */
+ SYSCTL_ADD_INT(ctx, child, OID_AUTO, "serialise_reg_war", CTLFLAG_RW,
+ &sc->sc_ah->ah_config.ah_serialise_reg_war, 0,
+ "Force register access serialisation");
}
diff --git a/sys/dev/ath/if_ath_tx.c b/sys/dev/ath/if_ath_tx.c
index f4cee6d..5625bf2 100644
--- a/sys/dev/ath/if_ath_tx.c
+++ b/sys/dev/ath/if_ath_tx.c
@@ -77,6 +77,7 @@ __FBSDID("$FreeBSD$");
#ifdef IEEE80211_SUPPORT_TDMA
#include <net80211/ieee80211_tdma.h>
#endif
+#include <net80211/ieee80211_ht.h>
#include <net/bpf.h>
@@ -100,6 +101,20 @@ __FBSDID("$FreeBSD$");
#include <dev/ath/if_ath_tx_ht.h>
/*
+ * How many retries to perform in software
+ */
+#define SWMAX_RETRIES 10
+
+static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an,
+ int tid);
+static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an,
+ int tid);
+static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc,
+ struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0);
+static int ath_tx_action_frame_override_queue(struct ath_softc *sc,
+ struct ieee80211_node *ni, struct mbuf *m0, int *tid);
+
+/*
* Whether to use the 11n rate scenario functions or not
*/
static inline int
@@ -108,6 +123,56 @@ ath_tx_is_11n(struct ath_softc *sc)
return (sc->sc_ah->ah_magic == 0x20065416);
}
+/*
+ * Obtain the current TID from the given frame.
+ *
+ * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.)
+ * This has implications for which AC/priority the packet is placed
+ * in.
+ */
+static int
+ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0)
+{
+ const struct ieee80211_frame *wh;
+ int pri = M_WME_GETAC(m0);
+
+ wh = mtod(m0, const struct ieee80211_frame *);
+ if (! IEEE80211_QOS_HAS_SEQ(wh))
+ return IEEE80211_NONQOS_TID;
+ else
+ return WME_AC_TO_TID(pri);
+}
+
+/*
+ * Determine what the correct AC queue for the given frame
+ * should be.
+ *
+ * This code assumes that the TIDs map consistently to
+ * the underlying hardware (or software) ath_txq.
+ * Since the sender may try to set an AC which is
+ * arbitrary, non-QoS TIDs may end up being put on
+ * completely different ACs. There's no way to put a
+ * TID into multiple ath_txq's for scheduling, so
+ * for now we override the AC/TXQ selection and set
+ * non-QOS TID frames into the BE queue.
+ *
+ * This may be completely incorrect - specifically,
+ * some management frames may end up out of order
+ * compared to the QoS traffic they're controlling.
+ * I'll look into this later.
+ */
+static int
+ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0)
+{
+ const struct ieee80211_frame *wh;
+ int pri = M_WME_GETAC(m0);
+ wh = mtod(m0, const struct ieee80211_frame *);
+ if (IEEE80211_QOS_HAS_SEQ(wh))
+ return pri;
+
+ return WME_AC_BE;
+}
+
void
ath_txfrag_cleanup(struct ath_softc *sc,
ath_bufhead *frags, struct ieee80211_node *ni)
@@ -116,10 +181,10 @@ ath_txfrag_cleanup(struct ath_softc *sc,
ATH_TXBUF_LOCK_ASSERT(sc);
- STAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
+ TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
/* NB: bf assumed clean */
- STAILQ_REMOVE_HEAD(frags, bf_list);
- STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
+ TAILQ_REMOVE(frags, bf, bf_list);
+ TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
ieee80211_node_decref(ni);
}
}
@@ -144,11 +209,11 @@ ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
break;
}
ieee80211_node_incref(ni);
- STAILQ_INSERT_TAIL(frags, bf, bf_list);
+ TAILQ_INSERT_TAIL(frags, bf, bf_list);
}
ATH_TXBUF_UNLOCK(sc);
- return !STAILQ_EMPTY(frags);
+ return !TAILQ_EMPTY(frags);
}
/*
@@ -225,8 +290,11 @@ ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
return 0;
}
+/*
+ * Chain together segments+descriptors for a non-11n frame.
+ */
static void
-ath_tx_chaindesclist(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
+ath_tx_chaindesclist(struct ath_softc *sc, struct ath_buf *bf)
{
struct ath_hal *ah = sc->sc_ah;
struct ath_desc *ds, *ds0;
@@ -252,17 +320,170 @@ ath_tx_chaindesclist(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *
"%s: %d: %08x %08x %08x %08x %08x %08x\n",
__func__, i, ds->ds_link, ds->ds_data,
ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]);
+ bf->bf_lastds = ds;
}
-
}
+/*
+ * Fill in the descriptor list for a aggregate subframe.
+ *
+ * The subframe is returned with the ds_link field in the last subframe
+ * pointing to 0.
+ */
static void
-ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
+ath_tx_chaindesclist_subframe(struct ath_softc *sc, struct ath_buf *bf)
{
struct ath_hal *ah = sc->sc_ah;
+ struct ath_desc *ds, *ds0;
+ int i;
+
+ ds0 = ds = bf->bf_desc;
+
+ /*
+ * There's no need to call ath_hal_setupfirsttxdesc here;
+ * That's only going to occur for the first frame in an aggregate.
+ */
+ for (i = 0; i < bf->bf_nseg; i++, ds++) {
+ ds->ds_data = bf->bf_segs[i].ds_addr;
+ if (i == bf->bf_nseg - 1)
+ ds->ds_link = 0;
+ else
+ ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1);
+
+ /*
+ * This performs the setup for an aggregate frame.
+ * This includes enabling the aggregate flags if needed.
+ */
+ ath_hal_chaintxdesc(ah, ds,
+ bf->bf_state.bfs_pktlen,
+ bf->bf_state.bfs_hdrlen,
+ HAL_PKT_TYPE_AMPDU, /* forces aggregate bits to be set */
+ bf->bf_state.bfs_keyix,
+ 0, /* cipher, calculated from keyix */
+ bf->bf_state.bfs_ndelim,
+ bf->bf_segs[i].ds_len, /* segment length */
+ i == 0, /* first segment */
+ i == bf->bf_nseg - 1 /* last segment */
+ );
+
+ DPRINTF(sc, ATH_DEBUG_XMIT,
+ "%s: %d: %08x %08x %08x %08x %08x %08x\n",
+ __func__, i, ds->ds_link, ds->ds_data,
+ ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]);
+ bf->bf_lastds = ds;
+ }
+}
+
+/*
+ * Setup segments+descriptors for an 11n aggregate.
+ * bf_first is the first buffer in the aggregate.
+ * The descriptor list must already been linked together using
+ * bf->bf_next.
+ */
+static void
+ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first)
+{
+ struct ath_buf *bf, *bf_prev = NULL;
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n",
+ __func__, bf_first->bf_state.bfs_nframes,
+ bf_first->bf_state.bfs_al);
+
+ /*
+ * Setup all descriptors of all subframes.
+ */
+ bf = bf_first;
+ while (bf != NULL) {
+ DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
+ "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n",
+ __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen,
+ SEQNO(bf->bf_state.bfs_seqno));
+
+ /* Sub-frame setup */
+ ath_tx_chaindesclist_subframe(sc, bf);
+
+ /*
+ * Link the last descriptor of the previous frame
+ * to the beginning descriptor of this frame.
+ */
+ if (bf_prev != NULL)
+ bf_prev->bf_lastds->ds_link = bf->bf_daddr;
+
+ /* Save a copy so we can link the next descriptor in */
+ bf_prev = bf;
+ bf = bf->bf_next;
+ }
+
+ /*
+ * Setup first descriptor of first frame.
+ * chaintxdesc() overwrites the descriptor entries;
+ * setupfirsttxdesc() merges in things.
+ * Otherwise various fields aren't set correctly (eg flags).
+ */
+ ath_hal_setupfirsttxdesc(sc->sc_ah,
+ bf_first->bf_desc,
+ bf_first->bf_state.bfs_al,
+ bf_first->bf_state.bfs_flags | HAL_TXDESC_INTREQ,
+ bf_first->bf_state.bfs_txpower,
+ bf_first->bf_state.bfs_txrate0,
+ bf_first->bf_state.bfs_try0,
+ bf_first->bf_state.bfs_txantenna,
+ bf_first->bf_state.bfs_ctsrate,
+ bf_first->bf_state.bfs_ctsduration);
+
+ /*
+ * Setup the last descriptor in the list.
+ * bf_prev points to the last; bf is NULL here.
+ */
+ ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_desc, bf_first->bf_desc);
- /* Fill in the details in the descriptor list */
- ath_tx_chaindesclist(sc, txq, bf);
+ /*
+ * Set the first descriptor bf_lastds field to point to
+ * the last descriptor in the last subframe, that's where
+ * the status update will occur.
+ */
+ bf_first->bf_lastds = bf_prev->bf_lastds;
+
+ /*
+ * And bf_last in the first descriptor points to the end of
+ * the aggregate list.
+ */
+ bf_first->bf_last = bf_prev;
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__);
+}
+
+static void
+ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_buf *bf)
+{
+ ATH_TXQ_LOCK_ASSERT(txq);
+ KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
+ ("%s: busy status 0x%x", __func__, bf->bf_flags));
+ if (txq->axq_link != NULL) {
+ struct ath_buf *last = ATH_TXQ_LAST(txq, axq_q_s);
+ struct ieee80211_frame *wh;
+
+ /* mark previous frame */
+ wh = mtod(last->bf_m, struct ieee80211_frame *);
+ wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
+ bus_dmamap_sync(sc->sc_dmat, last->bf_dmamap,
+ BUS_DMASYNC_PREWRITE);
+
+ /* link descriptor */
+ *txq->axq_link = bf->bf_daddr;
+ }
+ ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
+ txq->axq_link = &bf->bf_lastds->ds_link;
+}
+
+/*
+ * Hand-off packet to a hardware queue.
+ */
+static void
+ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
+{
+ struct ath_hal *ah = sc->sc_ah;
/*
* Insert the frame on the outbound list and pass it on
@@ -272,10 +493,44 @@ ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
* the SWBA handler since frames only go out on DTIM and
* to avoid possible races.
*/
- ATH_TXQ_LOCK(txq);
+ ATH_TXQ_LOCK_ASSERT(txq);
KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
- ("busy status 0x%x", bf->bf_flags));
- if (txq->axq_qnum != ATH_TXQ_SWQ) {
+ ("%s: busy status 0x%x", __func__, bf->bf_flags));
+ KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
+ ("ath_tx_handoff_hw called for mcast queue"));
+
+#if 0
+ /*
+ * This causes a LOR. Find out where the PCU lock is being
+ * held whilst the TXQ lock is grabbed - that shouldn't
+ * be occuring.
+ */
+ ATH_PCU_LOCK(sc);
+ if (sc->sc_inreset_cnt) {
+ ATH_PCU_UNLOCK(sc);
+ DPRINTF(sc, ATH_DEBUG_RESET,
+ "%s: called with sc_in_reset != 0\n",
+ __func__);
+ DPRINTF(sc, ATH_DEBUG_XMIT,
+ "%s: queued: TXDP[%u] = %p (%p) depth %d\n",
+ __func__, txq->axq_qnum,
+ (caddr_t)bf->bf_daddr, bf->bf_desc,
+ txq->axq_depth);
+ ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
+ if (bf->bf_state.bfs_aggr)
+ txq->axq_aggr_depth++;
+ /*
+ * There's no need to update axq_link; the hardware
+ * is in reset and once the reset is complete, any
+ * non-empty queues will simply have DMA restarted.
+ */
+ return;
+ }
+ ATH_PCU_UNLOCK(sc);
+#endif
+
+ /* For now, so not to generate whitespace diffs */
+ if (1) {
#ifdef IEEE80211_SUPPORT_TDMA
int qbusy;
@@ -323,7 +578,7 @@ ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
* is/was empty.
*/
ath_hal_puttxbuf(ah, txq->axq_qnum,
- STAILQ_FIRST(&txq->axq_q)->bf_daddr);
+ TAILQ_FIRST(&txq->axq_q)->bf_daddr);
txq->axq_flags &= ~ATH_TXQ_PUTPENDING;
DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT,
"%s: Q%u restarted\n", __func__,
@@ -347,26 +602,52 @@ ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
(caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth);
}
#endif /* IEEE80211_SUPPORT_TDMA */
- txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
+ if (bf->bf_state.bfs_aggr)
+ txq->axq_aggr_depth++;
+ txq->axq_link = &bf->bf_lastds->ds_link;
ath_hal_txstart(ah, txq->axq_qnum);
- } else {
- if (txq->axq_link != NULL) {
- struct ath_buf *last = ATH_TXQ_LAST(txq);
- struct ieee80211_frame *wh;
+ }
+}
- /* mark previous frame */
- wh = mtod(last->bf_m, struct ieee80211_frame *);
- wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
- bus_dmamap_sync(sc->sc_dmat, last->bf_dmamap,
- BUS_DMASYNC_PREWRITE);
+/*
+ * Restart TX DMA for the given TXQ.
+ *
+ * This must be called whether the queue is empty or not.
+ */
+void
+ath_txq_restart_dma(struct ath_softc *sc, struct ath_txq *txq)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_buf *bf;
- /* link descriptor */
- *txq->axq_link = bf->bf_daddr;
- }
- ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
- txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
- }
- ATH_TXQ_UNLOCK(txq);
+ ATH_TXQ_LOCK_ASSERT(txq);
+
+ /* This is always going to be cleared, empty or not */
+ txq->axq_flags &= ~ATH_TXQ_PUTPENDING;
+
+ bf = TAILQ_FIRST(&txq->axq_q);
+ if (bf == NULL)
+ return;
+
+ ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
+ txq->axq_link = &bf->bf_lastds->ds_link;
+ ath_hal_txstart(ah, txq->axq_qnum);
+}
+
+/*
+ * Hand off a packet to the hardware (or mcast queue.)
+ *
+ * The relevant hardware txq should be locked.
+ */
+static void
+ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
+{
+ ATH_TXQ_LOCK_ASSERT(txq);
+
+ if (txq->axq_qnum == ATH_TXQ_SWQ)
+ ath_tx_handoff_mcast(sc, txq, bf);
+ else
+ ath_tx_handoff_hw(sc, txq, bf);
}
static int
@@ -422,7 +703,7 @@ ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
static uint8_t
ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt,
- int rix, int cix, int shortPreamble)
+ int cix, int shortPreamble)
{
uint8_t ctsrate;
@@ -442,7 +723,6 @@ ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt,
return ctsrate;
}
-
/*
* Calculate the RTS/CTS duration for legacy frames.
*/
@@ -488,9 +768,238 @@ ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix,
return ctsduration;
}
-int
-ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf,
- struct mbuf *m0)
+/*
+ * Update the given ath_buf with updated rts/cts setup and duration
+ * values.
+ *
+ * To support rate lookups for each software retry, the rts/cts rate
+ * and cts duration must be re-calculated.
+ *
+ * This function assumes the RTS/CTS flags have been set as needed;
+ * mrr has been disabled; and the rate control lookup has been done.
+ *
+ * XXX TODO: MRR need only be disabled for the pre-11n NICs.
+ * XXX The 11n NICs support per-rate RTS/CTS configuration.
+ */
+static void
+ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf)
+{
+ uint16_t ctsduration = 0;
+ uint8_t ctsrate = 0;
+ uint8_t rix = bf->bf_state.bfs_rc[0].rix;
+ uint8_t cix = 0;
+ const HAL_RATE_TABLE *rt = sc->sc_currates;
+
+ /*
+ * No RTS/CTS enabled? Don't bother.
+ */
+ if ((bf->bf_state.bfs_flags &
+ (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) {
+ /* XXX is this really needed? */
+ bf->bf_state.bfs_ctsrate = 0;
+ bf->bf_state.bfs_ctsduration = 0;
+ return;
+ }
+
+ /*
+ * If protection is enabled, use the protection rix control
+ * rate. Otherwise use the rate0 control rate.
+ */
+ if (bf->bf_state.bfs_doprot)
+ rix = sc->sc_protrix;
+ else
+ rix = bf->bf_state.bfs_rc[0].rix;
+
+ /*
+ * If the raw path has hard-coded ctsrate0 to something,
+ * use it.
+ */
+ if (bf->bf_state.bfs_ctsrate0 != 0)
+ cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0);
+ else
+ /* Control rate from above */
+ cix = rt->info[rix].controlRate;
+
+ /* Calculate the rtscts rate for the given cix */
+ ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix,
+ bf->bf_state.bfs_shpream);
+
+ /* The 11n chipsets do ctsduration calculations for you */
+ if (! ath_tx_is_11n(sc))
+ ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix,
+ bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen,
+ rt, bf->bf_state.bfs_flags);
+
+ /* Squirrel away in ath_buf */
+ bf->bf_state.bfs_ctsrate = ctsrate;
+ bf->bf_state.bfs_ctsduration = ctsduration;
+
+ /*
+ * Must disable multi-rate retry when using RTS/CTS.
+ * XXX TODO: only for pre-11n NICs.
+ */
+ bf->bf_state.bfs_ismrr = 0;
+ bf->bf_state.bfs_try0 =
+ bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */
+}
+
+/*
+ * Setup the descriptor chain for a normal or fast-frame
+ * frame.
+ */
+static void
+ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf)
+{
+ struct ath_desc *ds = bf->bf_desc;
+ struct ath_hal *ah = sc->sc_ah;
+
+ ath_hal_setuptxdesc(ah, ds
+ , bf->bf_state.bfs_pktlen /* packet length */
+ , bf->bf_state.bfs_hdrlen /* header length */
+ , bf->bf_state.bfs_atype /* Atheros packet type */
+ , bf->bf_state.bfs_txpower /* txpower */
+ , bf->bf_state.bfs_txrate0
+ , bf->bf_state.bfs_try0 /* series 0 rate/tries */
+ , bf->bf_state.bfs_keyix /* key cache index */
+ , bf->bf_state.bfs_txantenna /* antenna mode */
+ , bf->bf_state.bfs_flags /* flags */
+ , bf->bf_state.bfs_ctsrate /* rts/cts rate */
+ , bf->bf_state.bfs_ctsduration /* rts/cts duration */
+ );
+
+ /*
+ * This will be overriden when the descriptor chain is written.
+ */
+ bf->bf_lastds = ds;
+ bf->bf_last = bf;
+
+ /* XXX TODO: Setup descriptor chain */
+}
+
+/*
+ * Do a rate lookup.
+ *
+ * This performs a rate lookup for the given ath_buf only if it's required.
+ * Non-data frames and raw frames don't require it.
+ *
+ * This populates the primary and MRR entries; MRR values are
+ * then disabled later on if something requires it (eg RTS/CTS on
+ * pre-11n chipsets.
+ *
+ * This needs to be done before the RTS/CTS fields are calculated
+ * as they may depend upon the rate chosen.
+ */
+static void
+ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf)
+{
+ uint8_t rate, rix;
+ int try0;
+
+ if (! bf->bf_state.bfs_doratelookup)
+ return;
+
+ /* Get rid of any previous state */
+ bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
+
+ ATH_NODE_LOCK(ATH_NODE(bf->bf_node));
+ ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream,
+ bf->bf_state.bfs_pktlen, &rix, &try0, &rate);
+
+ /* In case MRR is disabled, make sure rc[0] is setup correctly */
+ bf->bf_state.bfs_rc[0].rix = rix;
+ bf->bf_state.bfs_rc[0].ratecode = rate;
+ bf->bf_state.bfs_rc[0].tries = try0;
+
+ if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY)
+ ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix,
+ bf->bf_state.bfs_rc);
+ ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node));
+
+ sc->sc_txrix = rix; /* for LED blinking */
+ sc->sc_lastdatarix = rix; /* for fast frames */
+ bf->bf_state.bfs_try0 = try0;
+ bf->bf_state.bfs_txrate0 = rate;
+}
+
+/*
+ * Set the rate control fields in the given descriptor based on
+ * the bf_state fields and node state.
+ *
+ * The bfs fields should already be set with the relevant rate
+ * control information, including whether MRR is to be enabled.
+ *
+ * Since the FreeBSD HAL currently sets up the first TX rate
+ * in ath_hal_setuptxdesc(), this will setup the MRR
+ * conditionally for the pre-11n chips, and call ath_buf_set_rate
+ * unconditionally for 11n chips. These require the 11n rate
+ * scenario to be set if MCS rates are enabled, so it's easier
+ * to just always call it. The caller can then only set rates 2, 3
+ * and 4 if multi-rate retry is needed.
+ */
+static void
+ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
+ struct ath_buf *bf)
+{
+ struct ath_rc_series *rc = bf->bf_state.bfs_rc;
+
+ /* If mrr is disabled, blank tries 1, 2, 3 */
+ if (! bf->bf_state.bfs_ismrr)
+ rc[1].tries = rc[2].tries = rc[3].tries = 0;
+
+ /*
+ * Always call - that way a retried descriptor will
+ * have the MRR fields overwritten.
+ *
+ * XXX TODO: see if this is really needed - setting up
+ * the first descriptor should set the MRR fields to 0
+ * for us anyway.
+ */
+ if (ath_tx_is_11n(sc)) {
+ ath_buf_set_rate(sc, ni, bf);
+ } else {
+ ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc
+ , rc[1].ratecode, rc[1].tries
+ , rc[2].ratecode, rc[2].tries
+ , rc[3].ratecode, rc[3].tries
+ );
+ }
+}
+
+/*
+ * Transmit the given frame to the hardware.
+ *
+ * The frame must already be setup; rate control must already have
+ * been done.
+ *
+ * XXX since the TXQ lock is being held here (and I dislike holding
+ * it for this long when not doing software aggregation), later on
+ * break this function into "setup_normal" and "xmit_normal". The
+ * lock only needs to be held for the ath_tx_handoff call.
+ */
+static void
+ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_buf *bf)
+{
+
+ ATH_TXQ_LOCK_ASSERT(txq);
+
+ /* Setup the descriptor before handoff */
+ ath_tx_do_ratelookup(sc, bf);
+ ath_tx_rate_fill_rcflags(sc, bf);
+ ath_tx_set_rtscts(sc, bf);
+ ath_tx_setds(sc, bf);
+ ath_tx_set_ratectrl(sc, bf->bf_node, bf);
+ ath_tx_chaindesclist(sc, bf);
+
+ /* Hand off to hardware */
+ ath_tx_handoff(sc, txq, bf);
+}
+
+
+
+static int
+ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
+ struct ath_buf *bf, struct mbuf *m0)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ath_vap *avp = ATH_VAP(vap);
@@ -499,22 +1008,17 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf
struct ieee80211com *ic = ifp->if_l2com;
const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
int error, iswep, ismcast, isfrag, ismrr;
- int keyix, hdrlen, pktlen, try0;
- u_int8_t rix, txrate, ctsrate;
- u_int8_t cix = 0xff; /* NB: silence compiler */
+ int keyix, hdrlen, pktlen, try0 = 0;
+ u_int8_t rix = 0, txrate = 0;
struct ath_desc *ds;
struct ath_txq *txq;
struct ieee80211_frame *wh;
- u_int subtype, flags, ctsduration;
+ u_int subtype, flags;
HAL_PKT_TYPE atype;
const HAL_RATE_TABLE *rt;
HAL_BOOL shortPreamble;
struct ath_node *an;
u_int pri;
- uint8_t try[4], rate[4];
-
- bzero(try, sizeof(try));
- bzero(rate, sizeof(rate));
wh = mtod(m0, struct ieee80211_frame *);
iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
@@ -528,7 +1032,8 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf
pktlen = m0->m_pkthdr.len - (hdrlen & 3);
/* Handle encryption twiddling if needed */
- if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, &pktlen, &keyix)) {
+ if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,
+ &pktlen, &keyix)) {
ath_freetx(m0);
return EIO;
}
@@ -624,12 +1129,12 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf
txrate |= rt->info[rix].shortPreamble;
try0 = ATH_TXMAXTRY; /* XXX?too many? */
} else {
- ath_rate_findrate(sc, an, shortPreamble, pktlen,
- &rix, &try0, &txrate);
- sc->sc_txrix = rix; /* for LED blinking */
- sc->sc_lastdatarix = rix; /* for fast frames */
- if (try0 != ATH_TXMAXTRY)
- ismrr = 1;
+ /*
+ * Do rate lookup on each TX, rather than using
+ * the hard-coded TX information decided here.
+ */
+ ismrr = 1;
+ bf->bf_state.bfs_doratelookup = 1;
}
if (cap->cap_wmeParams[pri].wmep_noackPolicy)
flags |= HAL_TXDESC_NOACK;
@@ -660,7 +1165,6 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf
} else if (pktlen > vap->iv_rtsthreshold &&
(ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */
- cix = rt->info[rix].controlRate;
sc->sc_stats.ast_tx_rts++;
}
if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */
@@ -683,22 +1187,20 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf
if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
rt->info[rix].phy == IEEE80211_T_OFDM &&
(flags & HAL_TXDESC_NOACK) == 0) {
+ bf->bf_state.bfs_doprot = 1;
/* XXX fragments must use CCK rates w/ protection */
- if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
+ if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
flags |= HAL_TXDESC_RTSENA;
- else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
+ } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
flags |= HAL_TXDESC_CTSENA;
- if (isfrag) {
- /*
- * For frags it would be desirable to use the
- * highest CCK rate for RTS/CTS. But stations
- * farther away may detect it at a lower CCK rate
- * so use the configured protection rate instead
- * (for now).
- */
- cix = rt->info[sc->sc_protrix].controlRate;
- } else
- cix = rt->info[sc->sc_protrix].controlRate;
+ }
+ /*
+ * For frags it would be desirable to use the
+ * highest CCK rate for RTS/CTS. But stations
+ * farther away may detect it at a lower CCK rate
+ * so use the configured protection rate instead
+ * (for now).
+ */
sc->sc_stats.ast_tx_protect++;
}
@@ -756,23 +1258,28 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf
}
/*
- * Calculate RTS/CTS rate and duration if needed.
+ * Determine if a tx interrupt should be generated for
+ * this descriptor. We take a tx interrupt to reap
+ * descriptors when the h/w hits an EOL condition or
+ * when the descriptor is specifically marked to generate
+ * an interrupt. We periodically mark descriptors in this
+ * way to insure timely replenishing of the supply needed
+ * for sending frames. Defering interrupts reduces system
+ * load and potentially allows more concurrent work to be
+ * done but if done to aggressively can cause senders to
+ * backup.
+ *
+ * NB: use >= to deal with sc_txintrperiod changing
+ * dynamically through sysctl.
*/
- ctsduration = 0;
- if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
- ctsrate = ath_tx_get_rtscts_rate(ah, rt, rix, cix, shortPreamble);
+ if (flags & HAL_TXDESC_INTREQ) {
+ txq->axq_intrcnt = 0;
+ } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
+ flags |= HAL_TXDESC_INTREQ;
+ txq->axq_intrcnt = 0;
+ }
- /* The 11n chipsets do ctsduration calculations for you */
- if (! ath_tx_is_11n(sc))
- ctsduration = ath_tx_calc_ctsduration(ah, rix, cix, shortPreamble,
- pktlen, rt, flags);
- /*
- * Must disable multi-rate retry when using RTS/CTS.
- */
- ismrr = 0;
- try0 = ATH_TXMGTTRY; /* XXX */
- } else
- ctsrate = 0;
+ /* This point forward is actual TX bits */
/*
* At this point we are committed to sending the frame
@@ -801,71 +1308,187 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf
ieee80211_radiotap_tx(vap, m0);
}
+ /* Blank the legacy rate array */
+ bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
+
/*
- * Determine if a tx interrupt should be generated for
- * this descriptor. We take a tx interrupt to reap
- * descriptors when the h/w hits an EOL condition or
- * when the descriptor is specifically marked to generate
- * an interrupt. We periodically mark descriptors in this
- * way to insure timely replenishing of the supply needed
- * for sending frames. Defering interrupts reduces system
- * load and potentially allows more concurrent work to be
- * done but if done to aggressively can cause senders to
- * backup.
+ * ath_buf_set_rate needs at least one rate/try to setup
+ * the rate scenario.
+ */
+ bf->bf_state.bfs_rc[0].rix = rix;
+ bf->bf_state.bfs_rc[0].tries = try0;
+ bf->bf_state.bfs_rc[0].ratecode = txrate;
+
+ /* Store the decided rate index values away */
+ bf->bf_state.bfs_pktlen = pktlen;
+ bf->bf_state.bfs_hdrlen = hdrlen;
+ bf->bf_state.bfs_atype = atype;
+ bf->bf_state.bfs_txpower = ni->ni_txpower;
+ bf->bf_state.bfs_txrate0 = txrate;
+ bf->bf_state.bfs_try0 = try0;
+ bf->bf_state.bfs_keyix = keyix;
+ bf->bf_state.bfs_txantenna = sc->sc_txantenna;
+ bf->bf_state.bfs_flags = flags;
+ bf->bf_txflags = flags;
+ bf->bf_state.bfs_shpream = shortPreamble;
+
+ /* XXX this should be done in ath_tx_setrate() */
+ bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */
+ bf->bf_state.bfs_ctsrate = 0; /* calculated later */
+ bf->bf_state.bfs_ctsduration = 0;
+ bf->bf_state.bfs_ismrr = ismrr;
+
+ return 0;
+}
+
+/*
+ * Direct-dispatch the current frame to the hardware.
+ *
+ * This can be called by the net80211 code.
+ *
+ * XXX what about locking? Or, push the seqno assign into the
+ * XXX aggregate scheduler so its serialised?
+ */
+int
+ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
+ struct ath_buf *bf, struct mbuf *m0)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ath_vap *avp = ATH_VAP(vap);
+ int r;
+ u_int pri;
+ int tid;
+ struct ath_txq *txq;
+ int ismcast;
+ const struct ieee80211_frame *wh;
+ int is_ampdu, is_ampdu_tx, is_ampdu_pending;
+ ieee80211_seq seqno;
+ uint8_t type, subtype;
+
+ /*
+ * Determine the target hardware queue.
*
- * NB: use >= to deal with sc_txintrperiod changing
- * dynamically through sysctl.
+ * For multicast frames, the txq gets overridden to be the
+ * software TXQ and it's done via direct-dispatch.
+ *
+ * For any other frame, we do a TID/QoS lookup inside the frame
+ * to see what the TID should be. If it's a non-QoS frame, the
+ * AC and TID are overridden. The TID/TXQ code assumes the
+ * TID is on a predictable hardware TXQ, so we don't support
+ * having a node TID queued to multiple hardware TXQs.
+ * This may change in the future but would require some locking
+ * fudgery.
*/
- if (flags & HAL_TXDESC_INTREQ) {
- txq->axq_intrcnt = 0;
- } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
- flags |= HAL_TXDESC_INTREQ;
- txq->axq_intrcnt = 0;
- }
+ pri = ath_tx_getac(sc, m0);
+ tid = ath_tx_gettid(sc, m0);
- if (ath_tx_is_11n(sc)) {
- rate[0] = rix;
- try[0] = try0;
+ txq = sc->sc_ac2q[pri];
+ wh = mtod(m0, struct ieee80211_frame *);
+ ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
+ type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+
+ /* A-MPDU TX */
+ is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid);
+ is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid);
+ is_ampdu = is_ampdu_tx | is_ampdu_pending;
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n",
+ __func__, tid, pri, is_ampdu);
+
+ /* Multicast frames go onto the software multicast queue */
+ if (ismcast)
+ txq = &avp->av_mcastq;
+
+ if ((! is_ampdu) && (vap->iv_ps_sta || avp->av_mcastq.axq_depth))
+ txq = &avp->av_mcastq;
+
+ /* Do the generic frame setup */
+ /* XXX should just bzero the bf_state? */
+ bf->bf_state.bfs_dobaw = 0;
+
+ /* A-MPDU TX? Manually set sequence number */
+ /* Don't do it whilst pending; the net80211 layer still assigns them */
+ /* XXX do we need locking here? */
+ if (is_ampdu_tx) {
+ ATH_TXQ_LOCK(txq);
+ /*
+ * Always call; this function will
+ * handle making sure that null data frames
+ * don't get a sequence number from the current
+ * TID and thus mess with the BAW.
+ */
+ seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0);
+ if (IEEE80211_QOS_HAS_SEQ(wh) &&
+ subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) {
+ bf->bf_state.bfs_dobaw = 1;
+ }
+ ATH_TXQ_UNLOCK(txq);
}
/*
- * Formulate first tx descriptor with tx controls.
+ * If needed, the sequence number has been assigned.
+ * Squirrel it away somewhere easy to get to.
*/
- /* XXX check return value? */
- /* XXX is this ok to call for 11n descriptors? */
- /* XXX or should it go through the first, next, last 11n calls? */
- ath_hal_setuptxdesc(ah, ds
- , pktlen /* packet length */
- , hdrlen /* header length */
- , atype /* Atheros packet type */
- , ni->ni_txpower /* txpower */
- , txrate, try0 /* series 0 rate/tries */
- , keyix /* key cache index */
- , sc->sc_txantenna /* antenna mode */
- , flags /* flags */
- , ctsrate /* rts/cts rate */
- , ctsduration /* rts/cts duration */
- );
- bf->bf_txflags = flags;
+ bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT;
+
+ /* Is ampdu pending? fetch the seqno and print it out */
+ if (is_ampdu_pending)
+ DPRINTF(sc, ATH_DEBUG_SW_TX,
+ "%s: tid %d: ampdu pending, seqno %d\n",
+ __func__, tid, M_SEQNO_GET(m0));
+
+ /* This also sets up the DMA map */
+ r = ath_tx_normal_setup(sc, ni, bf, m0);
+
+ if (r != 0)
+ return r;
+
+ /* At this point m0 could have changed! */
+ m0 = bf->bf_m;
+
+#if 1
/*
- * Setup the multi-rate retry state only when we're
- * going to use it. This assumes ath_hal_setuptxdesc
- * initializes the descriptors (so we don't have to)
- * when the hardware supports multi-rate retry and
- * we don't use it.
+ * If it's a multicast frame, do a direct-dispatch to the
+ * destination hardware queue. Don't bother software
+ * queuing it.
*/
- if (ismrr) {
- if (ath_tx_is_11n(sc))
- ath_rate_getxtxrates(sc, an, rix, rate, try);
- else
- ath_rate_setupxtxdesc(sc, an, ds, shortPreamble, rix);
- }
-
- if (ath_tx_is_11n(sc)) {
- ath_buf_set_rate(sc, ni, bf, pktlen, flags, ctsrate, (atype == HAL_PKT_TYPE_PSPOLL), rate, try);
- }
+ /*
+ * If it's a BAR frame, do a direct dispatch to the
+ * destination hardware queue. Don't bother software
+ * queuing it, as the TID will now be paused.
+ * Sending a BAR frame can occur from the net80211 txa timer
+ * (ie, retries) or from the ath txtask (completion call.)
+ * It queues directly to hardware because the TID is paused
+ * at this point (and won't be unpaused until the BAR has
+ * either been TXed successfully or max retries has been
+ * reached.)
+ */
+ if (txq == &avp->av_mcastq) {
+ ATH_TXQ_LOCK(txq);
+ ath_tx_xmit_normal(sc, txq, bf);
+ ATH_TXQ_UNLOCK(txq);
+ } else if (type == IEEE80211_FC0_TYPE_CTL &&
+ subtype == IEEE80211_FC0_SUBTYPE_BAR) {
+ DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
+ "%s: BAR: TX'ing direct\n", __func__);
+ ATH_TXQ_LOCK(txq);
+ ath_tx_xmit_normal(sc, txq, bf);
+ ATH_TXQ_UNLOCK(txq);
+ } else {
+ /* add to software queue */
+ ath_tx_swq(sc, ni, txq, bf);
+ }
+#else
+ /*
+ * For now, since there's no software queue,
+ * direct-dispatch to the hardware.
+ */
+ ATH_TXQ_LOCK(txq);
+ ath_tx_xmit_normal(sc, txq, bf);
+ ATH_TXQ_UNLOCK(txq);
+#endif
- ath_tx_handoff(sc, txq, bf);
return 0;
}
@@ -880,17 +1503,15 @@ ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
struct ieee80211vap *vap = ni->ni_vap;
int error, ismcast, ismrr;
int keyix, hdrlen, pktlen, try0, txantenna;
- u_int8_t rix, cix, txrate, ctsrate, rate1, rate2, rate3;
+ u_int8_t rix, txrate;
struct ieee80211_frame *wh;
- u_int flags, ctsduration;
+ u_int flags;
HAL_PKT_TYPE atype;
const HAL_RATE_TABLE *rt;
struct ath_desc *ds;
u_int pri;
- uint8_t try[4], rate[4];
-
- bzero(try, sizeof(try));
- bzero(rate, sizeof(rate));
+ int o_tid = -1;
+ int do_override;
wh = mtod(m0, struct ieee80211_frame *);
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
@@ -902,14 +1523,24 @@ ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
/* XXX honor IEEE80211_BPF_DATAPAD */
pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n",
+ __func__, ismcast);
+
/* Handle encryption twiddling if needed */
- if (! ath_tx_tag_crypto(sc, ni, m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, &hdrlen, &pktlen, &keyix)) {
+ if (! ath_tx_tag_crypto(sc, ni,
+ m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,
+ &hdrlen, &pktlen, &keyix)) {
ath_freetx(m0);
return EIO;
}
/* packet header may have moved, reset our local pointer */
wh = mtod(m0, struct ieee80211_frame *);
+ /* Do the generic frame setup */
+ /* XXX should just bzero the bf_state? */
+ bf->bf_state.bfs_dobaw = 0;
+
error = ath_tx_dmasetup(sc, bf, m0);
if (error != 0)
return error;
@@ -921,8 +1552,11 @@ ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
flags |= HAL_TXDESC_INTREQ; /* force interrupt */
if (params->ibp_flags & IEEE80211_BPF_RTS)
flags |= HAL_TXDESC_RTSENA;
- else if (params->ibp_flags & IEEE80211_BPF_CTS)
+ else if (params->ibp_flags & IEEE80211_BPF_CTS) {
+ /* XXX assume 11g/11n protection? */
+ bf->bf_state.bfs_doprot = 1;
flags |= HAL_TXDESC_CTSENA;
+ }
/* XXX leave ismcast to injector? */
if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
flags |= HAL_TXDESC_NOACK;
@@ -940,23 +1574,18 @@ ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
if (txantenna == 0) /* XXX? */
txantenna = sc->sc_txantenna;
- ctsduration = 0;
- if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
- cix = ath_tx_findrix(sc, params->ibp_ctsrate);
- ctsrate = ath_tx_get_rtscts_rate(ah, rt, rix, cix, params->ibp_flags & IEEE80211_BPF_SHORTPRE);
- /* The 11n chipsets do ctsduration calculations for you */
- if (! ath_tx_is_11n(sc))
- ctsduration = ath_tx_calc_ctsduration(ah, rix, cix,
- params->ibp_flags & IEEE80211_BPF_SHORTPRE, pktlen,
- rt, flags);
- /*
- * Must disable multi-rate retry when using RTS/CTS.
- */
- ismrr = 0; /* XXX */
- } else
- ctsrate = 0;
+ /*
+ * Since ctsrate is fixed, store it away for later
+ * use when the descriptor fields are being set.
+ */
+ if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA))
+ bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate;
pri = params->ibp_pri & 3;
+ /* Override pri if the frame isn't a QoS one */
+ if (! IEEE80211_QOS_HAS_SEQ(wh))
+ pri = ath_tx_getac(sc, m0);
+
/*
* NB: we mark all packets as type PSPOLL so the h/w won't
* set the sequence number, duration, etc.
@@ -988,77 +1617,95 @@ ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
*/
ds = bf->bf_desc;
/* XXX check return value? */
- ath_hal_setuptxdesc(ah, ds
- , pktlen /* packet length */
- , hdrlen /* header length */
- , atype /* Atheros packet type */
- , params->ibp_power /* txpower */
- , txrate, try0 /* series 0 rate/tries */
- , keyix /* key cache index */
- , txantenna /* antenna mode */
- , flags /* flags */
- , ctsrate /* rts/cts rate */
- , ctsduration /* rts/cts duration */
- );
+
+ /* Store the decided rate index values away */
+ bf->bf_state.bfs_pktlen = pktlen;
+ bf->bf_state.bfs_hdrlen = hdrlen;
+ bf->bf_state.bfs_atype = atype;
+ bf->bf_state.bfs_txpower = params->ibp_power;
+ bf->bf_state.bfs_txrate0 = txrate;
+ bf->bf_state.bfs_try0 = try0;
+ bf->bf_state.bfs_keyix = keyix;
+ bf->bf_state.bfs_txantenna = txantenna;
+ bf->bf_state.bfs_flags = flags;
bf->bf_txflags = flags;
+ bf->bf_state.bfs_shpream =
+ !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE);
- if (ath_tx_is_11n(sc)) {
- rate[0] = ath_tx_findrix(sc, params->ibp_rate0);
- try[0] = params->ibp_try0;
-
- if (ismrr) {
- /* Remember, rate[] is actually an array of rix's -adrian */
- rate[0] = ath_tx_findrix(sc, params->ibp_rate0);
- rate[1] = ath_tx_findrix(sc, params->ibp_rate1);
- rate[2] = ath_tx_findrix(sc, params->ibp_rate2);
- rate[3] = ath_tx_findrix(sc, params->ibp_rate3);
-
- try[0] = params->ibp_try0;
- try[1] = params->ibp_try1;
- try[2] = params->ibp_try2;
- try[3] = params->ibp_try3;
- }
- } else {
- if (ismrr) {
- rix = ath_tx_findrix(sc, params->ibp_rate1);
- rate1 = rt->info[rix].rateCode;
- if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
- rate1 |= rt->info[rix].shortPreamble;
- if (params->ibp_try2) {
- rix = ath_tx_findrix(sc, params->ibp_rate2);
- rate2 = rt->info[rix].rateCode;
- if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
- rate2 |= rt->info[rix].shortPreamble;
- } else
- rate2 = 0;
- if (params->ibp_try3) {
- rix = ath_tx_findrix(sc, params->ibp_rate3);
- rate3 = rt->info[rix].rateCode;
- if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
- rate3 |= rt->info[rix].shortPreamble;
- } else
- rate3 = 0;
- ath_hal_setupxtxdesc(ah, ds
- , rate1, params->ibp_try1 /* series 1 */
- , rate2, params->ibp_try2 /* series 2 */
- , rate3, params->ibp_try3 /* series 3 */
- );
- }
- }
+ /* XXX this should be done in ath_tx_setrate() */
+ bf->bf_state.bfs_ctsrate = 0;
+ bf->bf_state.bfs_ctsduration = 0;
+ bf->bf_state.bfs_ismrr = ismrr;
- if (ath_tx_is_11n(sc)) {
- /*
- * notice that rix doesn't include any of the "magic" flags txrate
- * does for communicating "other stuff" to the HAL.
- */
- ath_buf_set_rate(sc, ni, bf, pktlen, flags, ctsrate, (atype == HAL_PKT_TYPE_PSPOLL), rate, try);
+ /* Blank the legacy rate array */
+ bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
+
+ bf->bf_state.bfs_rc[0].rix =
+ ath_tx_findrix(sc, params->ibp_rate0);
+ bf->bf_state.bfs_rc[0].tries = try0;
+ bf->bf_state.bfs_rc[0].ratecode = txrate;
+
+ if (ismrr) {
+ int rix;
+
+ rix = ath_tx_findrix(sc, params->ibp_rate1);
+ bf->bf_state.bfs_rc[1].rix = rix;
+ bf->bf_state.bfs_rc[1].tries = params->ibp_try1;
+
+ rix = ath_tx_findrix(sc, params->ibp_rate2);
+ bf->bf_state.bfs_rc[2].rix = rix;
+ bf->bf_state.bfs_rc[2].tries = params->ibp_try2;
+
+ rix = ath_tx_findrix(sc, params->ibp_rate3);
+ bf->bf_state.bfs_rc[3].rix = rix;
+ bf->bf_state.bfs_rc[3].tries = params->ibp_try3;
}
+ /*
+ * All the required rate control decisions have been made;
+ * fill in the rc flags.
+ */
+ ath_tx_rate_fill_rcflags(sc, bf);
/* NB: no buffered multicast in power save support */
- ath_tx_handoff(sc, sc->sc_ac2q[pri], bf);
+
+ /* XXX If it's an ADDBA, override the correct queue */
+ do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid);
+
+ /* Map ADDBA to the correct priority */
+ if (do_override) {
+#if 0
+ device_printf(sc->sc_dev,
+ "%s: overriding tid %d pri %d -> %d\n",
+ __func__, o_tid, pri, TID_TO_WME_AC(o_tid));
+#endif
+ pri = TID_TO_WME_AC(o_tid);
+ }
+
+ /*
+ * If we're overiding the ADDBA destination, dump directly
+ * into the hardware queue, right after any pending
+ * frames to that node are.
+ */
+ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n",
+ __func__, do_override);
+
+ if (do_override) {
+ ATH_TXQ_LOCK(sc->sc_ac2q[pri]);
+ ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[pri]);
+ } else {
+ /* Queue to software queue */
+ ath_tx_swq(sc, ni, sc->sc_ac2q[pri], bf);
+ }
+
return 0;
}
+/*
+ * Send a raw frame.
+ *
+ * This can be called by net80211.
+ */
int
ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
@@ -1069,6 +1716,17 @@ ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
struct ath_buf *bf;
int error;
+ ATH_PCU_LOCK(sc);
+ if (sc->sc_inreset_cnt > 0) {
+ device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; bailing\n",
+ __func__);
+ error = EIO;
+ ATH_PCU_UNLOCK(sc);
+ goto bad0;
+ }
+ sc->sc_txstart_cnt++;
+ ATH_PCU_UNLOCK(sc);
+
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__,
(ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ?
@@ -1111,14 +1769,2168 @@ ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
ifp->if_opackets++;
sc->sc_stats.ast_tx_raw++;
+ ATH_PCU_LOCK(sc);
+ sc->sc_txstart_cnt--;
+ ATH_PCU_UNLOCK(sc);
+
return 0;
bad2:
ATH_TXBUF_LOCK(sc);
- STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
+ TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
ATH_TXBUF_UNLOCK(sc);
bad:
+ ATH_PCU_LOCK(sc);
+ sc->sc_txstart_cnt--;
+ ATH_PCU_UNLOCK(sc);
+bad0:
ifp->if_oerrors++;
sc->sc_stats.ast_tx_raw_fail++;
ieee80211_free_node(ni);
+
return error;
}
+
+/* Some helper functions */
+
+/*
+ * ADDBA (and potentially others) need to be placed in the same
+ * hardware queue as the TID/node it's relating to. This is so
+ * it goes out after any pending non-aggregate frames to the
+ * same node/TID.
+ *
+ * If this isn't done, the ADDBA can go out before the frames
+ * queued in hardware. Even though these frames have a sequence
+ * number -earlier- than the ADDBA can be transmitted (but
+ * no frames whose sequence numbers are after the ADDBA should
+ * be!) they'll arrive after the ADDBA - and the receiving end
+ * will simply drop them as being out of the BAW.
+ *
+ * The frames can't be appended to the TID software queue - it'll
+ * never be sent out. So these frames have to be directly
+ * dispatched to the hardware, rather than queued in software.
+ * So if this function returns true, the TXQ has to be
+ * overridden and it has to be directly dispatched.
+ *
+ * It's a dirty hack, but someone's gotta do it.
+ */
+
+/*
+ * XXX doesn't belong here!
+ */
+static int
+ieee80211_is_action(struct ieee80211_frame *wh)
+{
+ /* Type: Management frame? */
+ if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
+ IEEE80211_FC0_TYPE_MGT)
+ return 0;
+
+ /* Subtype: Action frame? */
+ if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) !=
+ IEEE80211_FC0_SUBTYPE_ACTION)
+ return 0;
+
+ return 1;
+}
+
+#define MS(_v, _f) (((_v) & _f) >> _f##_S)
+/*
+ * Return an alternate TID for ADDBA request frames.
+ *
+ * Yes, this likely should be done in the net80211 layer.
+ */
+static int
+ath_tx_action_frame_override_queue(struct ath_softc *sc,
+ struct ieee80211_node *ni,
+ struct mbuf *m0, int *tid)
+{
+ struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *);
+ struct ieee80211_action_ba_addbarequest *ia;
+ uint8_t *frm;
+ uint16_t baparamset;
+
+ /* Not action frame? Bail */
+ if (! ieee80211_is_action(wh))
+ return 0;
+
+ /* XXX Not needed for frames we send? */
+#if 0
+ /* Correct length? */
+ if (! ieee80211_parse_action(ni, m))
+ return 0;
+#endif
+
+ /* Extract out action frame */
+ frm = (u_int8_t *)&wh[1];
+ ia = (struct ieee80211_action_ba_addbarequest *) frm;
+
+ /* Not ADDBA? Bail */
+ if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA)
+ return 0;
+ if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST)
+ return 0;
+
+ /* Extract TID, return it */
+ baparamset = le16toh(ia->rq_baparamset);
+ *tid = (int) MS(baparamset, IEEE80211_BAPS_TID);
+
+ return 1;
+}
+#undef MS
+
+/* Per-node software queue operations */
+
+/*
+ * Add the current packet to the given BAW.
+ * It is assumed that the current packet
+ *
+ * + fits inside the BAW;
+ * + already has had a sequence number allocated.
+ *
+ * Since the BAW status may be modified by both the ath task and
+ * the net80211/ifnet contexts, the TID must be locked.
+ */
+void
+ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an,
+ struct ath_tid *tid, struct ath_buf *bf)
+{
+ int index, cindex;
+ struct ieee80211_tx_ampdu *tap;
+
+ ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]);
+
+ if (bf->bf_state.bfs_isretried)
+ return;
+
+ tap = ath_tx_get_tx_tid(an, tid->tid);
+
+ if (bf->bf_state.bfs_addedbaw)
+ device_printf(sc->sc_dev,
+ "%s: re-added? tid=%d, seqno %d; window %d:%d; baw head=%d tail=%d\n",
+ __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
+ tap->txa_start, tap->txa_wnd, tid->baw_head, tid->baw_tail);
+
+ /*
+ * ni->ni_txseqs[] is the currently allocated seqno.
+ * the txa state contains the current baw start.
+ */
+ index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno));
+ cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
+ DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
+ "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d baw head=%d tail=%d\n",
+ __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
+ tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, tid->baw_tail);
+
+
+#if 0
+ assert(tid->tx_buf[cindex] == NULL);
+#endif
+ if (tid->tx_buf[cindex] != NULL) {
+ device_printf(sc->sc_dev,
+ "%s: ba packet dup (index=%d, cindex=%d, "
+ "head=%d, tail=%d)\n",
+ __func__, index, cindex, tid->baw_head, tid->baw_tail);
+ device_printf(sc->sc_dev,
+ "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n",
+ __func__,
+ tid->tx_buf[cindex],
+ SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno),
+ bf,
+ SEQNO(bf->bf_state.bfs_seqno)
+ );
+ }
+ tid->tx_buf[cindex] = bf;
+
+ if (index >= ((tid->baw_tail - tid->baw_head) & (ATH_TID_MAX_BUFS - 1))) {
+ tid->baw_tail = cindex;
+ INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
+ }
+}
+
+/*
+ * Flip the BAW buffer entry over from the existing one to the new one.
+ *
+ * When software retransmitting a (sub-)frame, it is entirely possible that
+ * the frame ath_buf is marked as BUSY and can't be immediately reused.
+ * In that instance the buffer is cloned and the new buffer is used for
+ * retransmit. We thus need to update the ath_buf slot in the BAW buf
+ * tracking array to maintain consistency.
+ */
+static void
+ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an,
+ struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf)
+{
+ int index, cindex;
+ struct ieee80211_tx_ampdu *tap;
+ int seqno = SEQNO(old_bf->bf_state.bfs_seqno);
+
+ ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]);
+
+ tap = ath_tx_get_tx_tid(an, tid->tid);
+ index = ATH_BA_INDEX(tap->txa_start, seqno);
+ cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
+
+ /*
+ * Just warn for now; if it happens then we should find out
+ * about it. It's highly likely the aggregation session will
+ * soon hang.
+ */
+ if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) {
+ device_printf(sc->sc_dev, "%s: retransmitted buffer"
+ " has mismatching seqno's, BA session may hang.\n",
+ __func__);
+ device_printf(sc->sc_dev, "%s: old seqno=%d, new_seqno=%d\n",
+ __func__,
+ old_bf->bf_state.bfs_seqno,
+ new_bf->bf_state.bfs_seqno);
+ }
+
+ if (tid->tx_buf[cindex] != old_bf) {
+ device_printf(sc->sc_dev, "%s: ath_buf pointer incorrect; "
+ " has m BA session may hang.\n",
+ __func__);
+ device_printf(sc->sc_dev, "%s: old bf=%p, new bf=%p\n",
+ __func__,
+ old_bf, new_bf);
+ }
+
+ tid->tx_buf[cindex] = new_bf;
+}
+
+/*
+ * seq_start - left edge of BAW
+ * seq_next - current/next sequence number to allocate
+ *
+ * Since the BAW status may be modified by both the ath task and
+ * the net80211/ifnet contexts, the TID must be locked.
+ */
+static void
+ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an,
+ struct ath_tid *tid, const struct ath_buf *bf)
+{
+ int index, cindex;
+ struct ieee80211_tx_ampdu *tap;
+ int seqno = SEQNO(bf->bf_state.bfs_seqno);
+
+ ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]);
+
+ tap = ath_tx_get_tx_tid(an, tid->tid);
+ index = ATH_BA_INDEX(tap->txa_start, seqno);
+ cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
+ "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, baw head=%d, tail=%d\n",
+ __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index,
+ cindex, tid->baw_head, tid->baw_tail);
+
+ /*
+ * If this occurs then we have a big problem - something else
+ * has slid tap->txa_start along without updating the BAW
+ * tracking start/end pointers. Thus the TX BAW state is now
+ * completely busted.
+ *
+ * But for now, since I haven't yet fixed TDMA and buffer cloning,
+ * it's quite possible that a cloned buffer is making its way
+ * here and causing it to fire off. Disable TDMA for now.
+ */
+ if (tid->tx_buf[cindex] != bf) {
+ device_printf(sc->sc_dev,
+ "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n",
+ __func__,
+ bf, SEQNO(bf->bf_state.bfs_seqno),
+ tid->tx_buf[cindex],
+ SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno));
+ }
+
+ tid->tx_buf[cindex] = NULL;
+
+ while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
+ INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
+ INCR(tid->baw_head, ATH_TID_MAX_BUFS);
+ }
+ DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: baw is now %d:%d, baw head=%d\n",
+ __func__, tap->txa_start, tap->txa_wnd, tid->baw_head);
+}
+
+/*
+ * Mark the current node/TID as ready to TX.
+ *
+ * This is done to make it easy for the software scheduler to
+ * find which nodes have data to send.
+ *
+ * The TXQ lock must be held.
+ */
+static void
+ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid)
+{
+ struct ath_txq *txq = sc->sc_ac2q[tid->ac];
+
+ ATH_TXQ_LOCK_ASSERT(txq);
+
+ if (tid->paused)
+ return; /* paused, can't schedule yet */
+
+ if (tid->sched)
+ return; /* already scheduled */
+
+ tid->sched = 1;
+
+ TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
+}
+
+/*
+ * Mark the current node as no longer needing to be polled for
+ * TX packets.
+ *
+ * The TXQ lock must be held.
+ */
+static void
+ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid)
+{
+ struct ath_txq *txq = sc->sc_ac2q[tid->ac];
+
+ ATH_TXQ_LOCK_ASSERT(txq);
+
+ if (tid->sched == 0)
+ return;
+
+ tid->sched = 0;
+ TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);
+}
+
+/*
+ * Assign a sequence number manually to the given frame.
+ *
+ * This should only be called for A-MPDU TX frames.
+ */
+static ieee80211_seq
+ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
+ struct ath_buf *bf, struct mbuf *m0)
+{
+ struct ieee80211_frame *wh;
+ int tid, pri;
+ ieee80211_seq seqno;
+ uint8_t subtype;
+
+ /* TID lookup */
+ wh = mtod(m0, struct ieee80211_frame *);
+ pri = M_WME_GETAC(m0); /* honor classification */
+ tid = WME_AC_TO_TID(pri);
+ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n",
+ __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
+
+ /* XXX Is it a control frame? Ignore */
+
+ /* Does the packet require a sequence number? */
+ if (! IEEE80211_QOS_HAS_SEQ(wh))
+ return -1;
+
+ /*
+ * Is it a QOS NULL Data frame? Give it a sequence number from
+ * the default TID (IEEE80211_NONQOS_TID.)
+ *
+ * The RX path of everything I've looked at doesn't include the NULL
+ * data frame sequence number in the aggregation state updates, so
+ * assigning it a sequence number there will cause a BAW hole on the
+ * RX side.
+ */
+ subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+ if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) {
+ seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
+ INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
+ } else {
+ /* Manually assign sequence number */
+ seqno = ni->ni_txseqs[tid];
+ INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE);
+ }
+ *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
+ M_SEQNO_SET(m0, seqno);
+
+ /* Return so caller can do something with it if needed */
+ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: -> seqno=%d\n", __func__, seqno);
+ return seqno;
+}
+
+/*
+ * Attempt to direct dispatch an aggregate frame to hardware.
+ * If the frame is out of BAW, queue.
+ * Otherwise, schedule it as a single frame.
+ */
+static void
+ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, struct ath_buf *bf)
+{
+ struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
+ struct ath_txq *txq = bf->bf_state.bfs_txq;
+ struct ieee80211_tx_ampdu *tap;
+
+ ATH_TXQ_LOCK_ASSERT(txq);
+
+ tap = ath_tx_get_tx_tid(an, tid->tid);
+
+ /* paused? queue */
+ if (tid->paused) {
+ ATH_TXQ_INSERT_TAIL(tid, bf, bf_list);
+ return;
+ }
+
+ /* outside baw? queue */
+ if (bf->bf_state.bfs_dobaw &&
+ (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
+ SEQNO(bf->bf_state.bfs_seqno)))) {
+ ATH_TXQ_INSERT_TAIL(tid, bf, bf_list);
+ ath_tx_tid_sched(sc, tid);
+ return;
+ }
+
+ /* Direct dispatch to hardware */
+ ath_tx_do_ratelookup(sc, bf);
+ ath_tx_rate_fill_rcflags(sc, bf);
+ ath_tx_set_rtscts(sc, bf);
+ ath_tx_setds(sc, bf);
+ ath_tx_set_ratectrl(sc, bf->bf_node, bf);
+ ath_tx_chaindesclist(sc, bf);
+
+ /* Statistics */
+ sc->sc_aggr_stats.aggr_low_hwq_single_pkt++;
+
+ /* Track per-TID hardware queue depth correctly */
+ tid->hwq_depth++;
+
+ /* Add to BAW */
+ if (bf->bf_state.bfs_dobaw) {
+ ath_tx_addto_baw(sc, an, tid, bf);
+ bf->bf_state.bfs_addedbaw = 1;
+ }
+
+ /* Set completion handler, multi-frame aggregate or not */
+ bf->bf_comp = ath_tx_aggr_comp;
+
+ /* Hand off to hardware */
+ ath_tx_handoff(sc, txq, bf);
+}
+
+/*
+ * Attempt to send the packet.
+ * If the queue isn't busy, direct-dispatch.
+ * If the queue is busy enough, queue the given packet on the
+ * relevant software queue.
+ */
+void
+ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_txq *txq,
+ struct ath_buf *bf)
+{
+ struct ath_node *an = ATH_NODE(ni);
+ struct ieee80211_frame *wh;
+ struct ath_tid *atid;
+ int pri, tid;
+ struct mbuf *m0 = bf->bf_m;
+
+ /* Fetch the TID - non-QoS frames get assigned to TID 16 */
+ wh = mtod(m0, struct ieee80211_frame *);
+ pri = ath_tx_getac(sc, m0);
+ tid = ath_tx_gettid(sc, m0);
+ atid = &an->an_tid[tid];
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n",
+ __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
+
+ /* Set local packet state, used to queue packets to hardware */
+ bf->bf_state.bfs_tid = tid;
+ bf->bf_state.bfs_txq = txq;
+ bf->bf_state.bfs_pri = pri;
+
+ /*
+ * If the hardware queue isn't busy, queue it directly.
+ * If the hardware queue is busy, queue it.
+ * If the TID is paused or the traffic it outside BAW, software
+ * queue it.
+ */
+ ATH_TXQ_LOCK(txq);
+ if (atid->paused) {
+ /* TID is paused, queue */
+ ATH_TXQ_INSERT_TAIL(atid, bf, bf_list);
+ } else if (ath_tx_ampdu_pending(sc, an, tid)) {
+ /* AMPDU pending; queue */
+ ATH_TXQ_INSERT_TAIL(atid, bf, bf_list);
+ /* XXX sched? */
+ } else if (ath_tx_ampdu_running(sc, an, tid)) {
+ /* AMPDU running, attempt direct dispatch if possible */
+ if (txq->axq_depth < sc->sc_hwq_limit)
+ ath_tx_xmit_aggr(sc, an, bf);
+ else {
+ ATH_TXQ_INSERT_TAIL(atid, bf, bf_list);
+ ath_tx_tid_sched(sc, atid);
+ }
+ } else if (txq->axq_depth < sc->sc_hwq_limit) {
+ /* AMPDU not running, attempt direct dispatch */
+ ath_tx_xmit_normal(sc, txq, bf);
+ } else {
+ /* Busy; queue */
+ ATH_TXQ_INSERT_TAIL(atid, bf, bf_list);
+ ath_tx_tid_sched(sc, atid);
+ }
+ ATH_TXQ_UNLOCK(txq);
+}
+
+/*
+ * Do the basic frame setup stuff that's required before the frame
+ * is added to a software queue.
+ *
+ * All frames get mostly the same treatment and it's done once.
+ * Retransmits fiddle with things like the rate control setup,
+ * setting the retransmit bit in the packet; doing relevant DMA/bus
+ * syncing and relinking it (back) into the hardware TX queue.
+ *
+ * Note that this may cause the mbuf to be reallocated, so
+ * m0 may not be valid.
+ */
+
+
+/*
+ * Configure the per-TID node state.
+ *
+ * This likely belongs in if_ath_node.c but I can't think of anywhere
+ * else to put it just yet.
+ *
+ * This sets up the SLISTs and the mutex as appropriate.
+ */
+void
+ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an)
+{
+ int i, j;
+ struct ath_tid *atid;
+
+ for (i = 0; i < IEEE80211_TID_SIZE; i++) {
+ atid = &an->an_tid[i];
+ TAILQ_INIT(&atid->axq_q);
+ atid->tid = i;
+ atid->an = an;
+ for (j = 0; j < ATH_TID_MAX_BUFS; j++)
+ atid->tx_buf[j] = NULL;
+ atid->baw_head = atid->baw_tail = 0;
+ atid->paused = 0;
+ atid->sched = 0;
+ atid->hwq_depth = 0;
+ atid->cleanup_inprogress = 0;
+ if (i == IEEE80211_NONQOS_TID)
+ atid->ac = WME_AC_BE;
+ else
+ atid->ac = TID_TO_WME_AC(i);
+ }
+}
+
+/*
+ * Pause the current TID. This stops packets from being transmitted
+ * on it.
+ *
+ * Since this is also called from upper layers as well as the driver,
+ * it will get the TID lock.
+ */
+static void
+ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid)
+{
+ ATH_TXQ_LOCK(sc->sc_ac2q[tid->ac]);
+ tid->paused++;
+ DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: paused = %d\n",
+ __func__, tid->paused);
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[tid->ac]);
+}
+
+/*
+ * Unpause the current TID, and schedule it if needed.
+ */
+static void
+ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid)
+{
+ ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]);
+
+ tid->paused--;
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: unpaused = %d\n",
+ __func__, tid->paused);
+
+ if (tid->paused || tid->axq_depth == 0) {
+ return;
+ }
+
+ ath_tx_tid_sched(sc, tid);
+ /* Punt some frames to the hardware if needed */
+ ath_txq_sched(sc, sc->sc_ac2q[tid->ac]);
+}
+
+/*
+ * Free any packets currently pending in the software TX queue.
+ *
+ * This will be called when a node is being deleted.
+ *
+ * It can also be called on an active node during an interface
+ * reset or state transition.
+ *
+ * (From Linux/reference):
+ *
+ * TODO: For frame(s) that are in the retry state, we will reuse the
+ * sequence number(s) without setting the retry bit. The
+ * alternative is to give up on these and BAR the receiver's window
+ * forward.
+ */
+static void
+ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid,
+ ath_bufhead *bf_cq)
+{
+ struct ath_buf *bf;
+ struct ieee80211_tx_ampdu *tap;
+ struct ieee80211_node *ni = &an->an_node;
+ int t = 0;
+ struct ath_txq *txq = sc->sc_ac2q[tid->ac];
+
+ tap = ath_tx_get_tx_tid(an, tid->tid);
+
+ ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]);
+
+ /* Walk the queue, free frames */
+ for (;;) {
+ bf = TAILQ_FIRST(&tid->axq_q);
+ if (bf == NULL) {
+ break;
+ }
+
+ if (t == 0) {
+ device_printf(sc->sc_dev,
+ "%s: node %p: tid %d: txq_depth=%d, "
+ "txq_aggr_depth=%d, sched=%d, paused=%d, "
+ "hwq_depth=%d, incomp=%d, baw_head=%d, baw_tail=%d "
+ "txa_start=%d, ni_txseqs=%d\n",
+ __func__, ni, tid->tid, txq->axq_depth,
+ txq->axq_aggr_depth, tid->sched, tid->paused,
+ tid->hwq_depth, tid->incomp, tid->baw_head,
+ tid->baw_tail, tap == NULL ? -1 : tap->txa_start,
+ ni->ni_txseqs[tid->tid]);
+ t = 1;
+ }
+
+
+ /*
+ * If the current TID is running AMPDU, update
+ * the BAW.
+ */
+ if (ath_tx_ampdu_running(sc, an, tid->tid) &&
+ bf->bf_state.bfs_dobaw) {
+ /*
+ * Only remove the frame from the BAW if it's
+ * been transmitted at least once; this means
+ * the frame was in the BAW to begin with.
+ */
+ if (bf->bf_state.bfs_retries > 0) {
+ ath_tx_update_baw(sc, an, tid, bf);
+ bf->bf_state.bfs_dobaw = 0;
+ }
+ /*
+ * This has become a non-fatal error now
+ */
+ if (! bf->bf_state.bfs_addedbaw)
+ device_printf(sc->sc_dev,
+ "%s: wasn't added: seqno %d\n",
+ __func__, SEQNO(bf->bf_state.bfs_seqno));
+ }
+ ATH_TXQ_REMOVE(tid, bf, bf_list);
+ TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
+ }
+
+ /*
+ * Now that it's completed, grab the TID lock and update
+ * the sequence number and BAW window.
+ * Because sequence numbers have been assigned to frames
+ * that haven't been sent yet, it's entirely possible
+ * we'll be called with some pending frames that have not
+ * been transmitted.
+ *
+ * The cleaner solution is to do the sequence number allocation
+ * when the packet is first transmitted - and thus the "retries"
+ * check above would be enough to update the BAW/seqno.
+ */
+
+ /* But don't do it for non-QoS TIDs */
+ if (tap) {
+#if 0
+ DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
+ "%s: node %p: TID %d: sliding BAW left edge to %d\n",
+ __func__, an, tid->tid, tap->txa_start);
+#endif
+ ni->ni_txseqs[tid->tid] = tap->txa_start;
+ tid->baw_tail = tid->baw_head;
+ }
+}
+
+/*
+ * Flush all software queued packets for the given node.
+ *
+ * This occurs when a completion handler frees the last buffer
+ * for a node, and the node is thus freed. This causes the node
+ * to be cleaned up, which ends up calling ath_tx_node_flush.
+ */
+void
+ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)
+{
+ int tid;
+ ath_bufhead bf_cq;
+ struct ath_buf *bf;
+
+ TAILQ_INIT(&bf_cq);
+
+ for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
+ struct ath_tid *atid = &an->an_tid[tid];
+ struct ath_txq *txq = sc->sc_ac2q[atid->ac];
+
+ /* Remove this tid from the list of active tids */
+ ATH_TXQ_LOCK(txq);
+ ath_tx_tid_unsched(sc, atid);
+
+ /* Free packets */
+ ath_tx_tid_drain(sc, an, atid, &bf_cq);
+ ATH_TXQ_UNLOCK(txq);
+ }
+
+ /* Handle completed frames */
+ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
+ TAILQ_REMOVE(&bf_cq, bf, bf_list);
+ ath_tx_default_comp(sc, bf, 0);
+ }
+}
+
+/*
+ * Drain all the software TXQs currently with traffic queued.
+ */
+void
+ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
+{
+ struct ath_tid *tid;
+ ath_bufhead bf_cq;
+ struct ath_buf *bf;
+
+ TAILQ_INIT(&bf_cq);
+ ATH_TXQ_LOCK(txq);
+
+ /*
+ * Iterate over all active tids for the given txq,
+ * flushing and unsched'ing them
+ */
+ while (! TAILQ_EMPTY(&txq->axq_tidq)) {
+ tid = TAILQ_FIRST(&txq->axq_tidq);
+ ath_tx_tid_drain(sc, tid->an, tid, &bf_cq);
+ ath_tx_tid_unsched(sc, tid);
+ }
+
+ ATH_TXQ_UNLOCK(txq);
+
+ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
+ TAILQ_REMOVE(&bf_cq, bf, bf_list);
+ ath_tx_default_comp(sc, bf, 0);
+ }
+}
+
+/*
+ * Handle completion of non-aggregate session frames.
+ */
+void
+ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
+{
+ struct ieee80211_node *ni = bf->bf_node;
+ struct ath_node *an = ATH_NODE(ni);
+ int tid = bf->bf_state.bfs_tid;
+ struct ath_tid *atid = &an->an_tid[tid];
+ struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
+
+ /* The TID state is protected behind the TXQ lock */
+ ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n",
+ __func__, bf, fail, atid->hwq_depth - 1);
+
+ atid->hwq_depth--;
+ if (atid->hwq_depth < 0)
+ device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n",
+ __func__, atid->hwq_depth);
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
+
+ /*
+ * punt to rate control if we're not being cleaned up
+ * during a hw queue drain and the frame wanted an ACK.
+ */
+ if (fail == 0 && ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0))
+ ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
+ ts, bf->bf_state.bfs_pktlen,
+ 1, (ts->ts_status == 0) ? 0 : 1);
+
+ ath_tx_default_comp(sc, bf, fail);
+}
+
+/*
+ * Handle cleanup of aggregate session packets that aren't
+ * an A-MPDU.
+ *
+ * There's no need to update the BAW here - the session is being
+ * torn down.
+ */
+static void
+ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf)
+{
+ struct ieee80211_node *ni = bf->bf_node;
+ struct ath_node *an = ATH_NODE(ni);
+ int tid = bf->bf_state.bfs_tid;
+ struct ath_tid *atid = &an->an_tid[tid];
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n",
+ __func__, tid, atid->incomp);
+
+ ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
+ atid->incomp--;
+ if (atid->incomp == 0) {
+ DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
+ "%s: TID %d: cleaned up! resume!\n",
+ __func__, tid);
+ atid->cleanup_inprogress = 0;
+ ath_tx_tid_resume(sc, atid);
+ }
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
+
+ ath_tx_default_comp(sc, bf, 0);
+}
+
+/*
+ * Performs transmit side cleanup when TID changes from aggregated to
+ * unaggregated.
+ *
+ * - Discard all retry frames from the s/w queue.
+ * - Fix the tx completion function for all buffers in s/w queue.
+ * - Count the number of unacked frames, and let transmit completion
+ * handle it later.
+ *
+ * The caller is responsible for pausing the TID.
+ */
+static void
+ath_tx_cleanup(struct ath_softc *sc, struct ath_node *an, int tid)
+{
+ struct ath_tid *atid = &an->an_tid[tid];
+ struct ieee80211_tx_ampdu *tap;
+ struct ath_buf *bf, *bf_next;
+ ath_bufhead bf_cq;
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
+ "%s: TID %d: called\n", __func__, tid);
+
+ TAILQ_INIT(&bf_cq);
+ ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
+
+ /*
+ * Update the frames in the software TX queue:
+ *
+ * + Discard retry frames in the queue
+ * + Fix the completion function to be non-aggregate
+ */
+ bf = TAILQ_FIRST(&atid->axq_q);
+ while (bf) {
+ if (bf->bf_state.bfs_isretried) {
+ bf_next = TAILQ_NEXT(bf, bf_list);
+ TAILQ_REMOVE(&atid->axq_q, bf, bf_list);
+ atid->axq_depth--;
+ if (bf->bf_state.bfs_dobaw) {
+ ath_tx_update_baw(sc, an, atid, bf);
+ if (! bf->bf_state.bfs_addedbaw)
+ device_printf(sc->sc_dev,
+ "%s: wasn't added: seqno %d\n",
+ __func__, SEQNO(bf->bf_state.bfs_seqno));
+ }
+ bf->bf_state.bfs_dobaw = 0;
+ /*
+ * Call the default completion handler with "fail" just
+ * so upper levels are suitably notified about this.
+ */
+ TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
+ bf = bf_next;
+ continue;
+ }
+ /* Give these the default completion handler */
+ bf->bf_comp = ath_tx_normal_comp;
+ bf = TAILQ_NEXT(bf, bf_list);
+ }
+
+ /* The caller is required to pause the TID */
+#if 0
+ /* Pause the TID */
+ ath_tx_tid_pause(sc, atid);
+#endif
+
+ /*
+ * Calculate what hardware-queued frames exist based
+ * on the current BAW size. Ie, what frames have been
+ * added to the TX hardware queue for this TID but
+ * not yet ACKed.
+ */
+ tap = ath_tx_get_tx_tid(an, tid);
+ /* Need the lock - fiddling with BAW */
+ while (atid->baw_head != atid->baw_tail) {
+ if (atid->tx_buf[atid->baw_head]) {
+ atid->incomp++;
+ atid->cleanup_inprogress = 1;
+ atid->tx_buf[atid->baw_head] = NULL;
+ }
+ INCR(atid->baw_head, ATH_TID_MAX_BUFS);
+ INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
+ }
+
+ /*
+ * If cleanup is required, defer TID scheduling
+ * until all the HW queued packets have been
+ * sent.
+ */
+ if (! atid->cleanup_inprogress)
+ ath_tx_tid_resume(sc, atid);
+
+ if (atid->cleanup_inprogress)
+ DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
+ "%s: TID %d: cleanup needed: %d packets\n",
+ __func__, tid, atid->incomp);
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
+
+ /* Handle completing frames and fail them */
+ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
+ TAILQ_REMOVE(&bf_cq, bf, bf_list);
+ ath_tx_default_comp(sc, bf, 1);
+ }
+}
+
+static void
+ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
+{
+ struct ieee80211_frame *wh;
+
+ wh = mtod(bf->bf_m, struct ieee80211_frame *);
+ /* Only update/resync if needed */
+ if (bf->bf_state.bfs_isretried == 0) {
+ wh->i_fc[1] |= IEEE80211_FC1_RETRY;
+ bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
+ BUS_DMASYNC_PREWRITE);
+ }
+ sc->sc_stats.ast_tx_swretries++;
+ bf->bf_state.bfs_isretried = 1;
+ bf->bf_state.bfs_retries ++;
+}
+
+static struct ath_buf *
+ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
+ struct ath_tid *tid, struct ath_buf *bf)
+{
+ struct ath_buf *nbf;
+ int error;
+
+ nbf = ath_buf_clone(sc, bf);
+
+#if 0
+ device_printf(sc->sc_dev, "%s: ATH_BUF_BUSY; cloning\n",
+ __func__);
+#endif
+
+ if (nbf == NULL) {
+ /* Failed to clone */
+ device_printf(sc->sc_dev,
+ "%s: failed to clone a busy buffer\n",
+ __func__);
+ return NULL;
+ }
+
+ /* Setup the dma for the new buffer */
+ error = ath_tx_dmasetup(sc, nbf, nbf->bf_m);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "%s: failed to setup dma for clone\n",
+ __func__);
+ /*
+ * Put this at the head of the list, not tail;
+ * that way it doesn't interfere with the
+ * busy buffer logic (which uses the tail of
+ * the list.)
+ */
+ ATH_TXBUF_LOCK(sc);
+ TAILQ_INSERT_HEAD(&sc->sc_txbuf, nbf, bf_list);
+ ATH_TXBUF_UNLOCK(sc);
+ return NULL;
+ }
+
+ /* Update BAW if required, before we free the original buf */
+ if (bf->bf_state.bfs_dobaw)
+ ath_tx_switch_baw_buf(sc, an, tid, bf, nbf);
+
+ /* Free current buffer; return the older buffer */
+ bf->bf_m = NULL;
+ bf->bf_node = NULL;
+ ath_freebuf(sc, bf);
+ return nbf;
+}
+
+/*
+ * Handle retrying an unaggregate frame in an aggregate
+ * session.
+ *
+ * If too many retries occur, pause the TID, wait for
+ * any further retransmits (as there's no reason why
+ * non-aggregate frames in an aggregate session are
+ * transmitted in-order; they just have to be in-BAW)
+ * and then queue a BAR.
+ */
+static void
+ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf)
+{
+ struct ieee80211_node *ni = bf->bf_node;
+ struct ath_node *an = ATH_NODE(ni);
+ int tid = bf->bf_state.bfs_tid;
+ struct ath_tid *atid = &an->an_tid[tid];
+ struct ieee80211_tx_ampdu *tap;
+ int txseq;
+
+ ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
+
+ tap = ath_tx_get_tx_tid(an, tid);
+
+ /*
+ * If the buffer is marked as busy, we can't directly
+ * reuse it. Instead, try to clone the buffer.
+ * If the clone is successful, recycle the old buffer.
+ * If the clone is unsuccessful, set bfs_retries to max
+ * to force the next bit of code to free the buffer
+ * for us.
+ */
+ if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
+ (bf->bf_flags & ATH_BUF_BUSY)) {
+ struct ath_buf *nbf;
+ nbf = ath_tx_retry_clone(sc, an, atid, bf);
+ if (nbf)
+ /* bf has been freed at this point */
+ bf = nbf;
+ else
+ bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
+ }
+
+ if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
+ DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
+ "%s: exceeded retries; seqno %d\n",
+ __func__, SEQNO(bf->bf_state.bfs_seqno));
+ sc->sc_stats.ast_tx_swretrymax++;
+
+ /* Update BAW anyway */
+ if (bf->bf_state.bfs_dobaw) {
+ ath_tx_update_baw(sc, an, atid, bf);
+ if (! bf->bf_state.bfs_addedbaw)
+ device_printf(sc->sc_dev,
+ "%s: wasn't added: seqno %d\n",
+ __func__, SEQNO(bf->bf_state.bfs_seqno));
+ }
+ bf->bf_state.bfs_dobaw = 0;
+
+ /* Send BAR frame */
+ /*
+ * This'll end up going into net80211 and back out
+ * again, via ic->ic_raw_xmit().
+ */
+ txseq = tap->txa_start;
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
+
+ device_printf(sc->sc_dev,
+ "%s: TID %d: send BAR; seq %d\n", __func__, tid, txseq);
+
+ /* XXX TODO: send BAR */
+
+ /* Free buffer, bf is free after this call */
+ ath_tx_default_comp(sc, bf, 0);
+ return;
+ }
+
+ /*
+ * This increments the retry counter as well as
+ * sets the retry flag in the ath_buf and packet
+ * body.
+ */
+ ath_tx_set_retry(sc, bf);
+
+ /*
+ * Insert this at the head of the queue, so it's
+ * retried before any current/subsequent frames.
+ */
+ ATH_TXQ_INSERT_HEAD(atid, bf, bf_list);
+ ath_tx_tid_sched(sc, atid);
+
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
+}
+
+/*
+ * Common code for aggregate excessive retry/subframe retry.
+ * If retrying, queues buffers to bf_q. If not, frees the
+ * buffers.
+ *
+ * XXX should unify this with ath_tx_aggr_retry_unaggr()
+ */
+static int
+ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf,
+ ath_bufhead *bf_q)
+{
+ struct ieee80211_node *ni = bf->bf_node;
+ struct ath_node *an = ATH_NODE(ni);
+ int tid = bf->bf_state.bfs_tid;
+ struct ath_tid *atid = &an->an_tid[tid];
+
+ ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[atid->ac]);
+
+ ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
+ ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0);
+ /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */
+
+ /*
+ * If the buffer is marked as busy, we can't directly
+ * reuse it. Instead, try to clone the buffer.
+ * If the clone is successful, recycle the old buffer.
+ * If the clone is unsuccessful, set bfs_retries to max
+ * to force the next bit of code to free the buffer
+ * for us.
+ */
+ if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
+ (bf->bf_flags & ATH_BUF_BUSY)) {
+ struct ath_buf *nbf;
+ nbf = ath_tx_retry_clone(sc, an, atid, bf);
+ if (nbf)
+ /* bf has been freed at this point */
+ bf = nbf;
+ else
+ bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
+ }
+
+ if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
+ sc->sc_stats.ast_tx_swretrymax++;
+ DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
+ "%s: max retries: seqno %d\n",
+ __func__, SEQNO(bf->bf_state.bfs_seqno));
+ ath_tx_update_baw(sc, an, atid, bf);
+ if (! bf->bf_state.bfs_addedbaw)
+ device_printf(sc->sc_dev,
+ "%s: wasn't added: seqno %d\n",
+ __func__, SEQNO(bf->bf_state.bfs_seqno));
+ bf->bf_state.bfs_dobaw = 0;
+ return 1;
+ }
+
+ ath_tx_set_retry(sc, bf);
+ bf->bf_next = NULL; /* Just to make sure */
+
+ TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
+ return 0;
+}
+
+/*
+ * error pkt completion for an aggregate destination
+ */
+static void
+ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first,
+ struct ath_tid *tid)
+{
+ struct ieee80211_node *ni = bf_first->bf_node;
+ struct ath_node *an = ATH_NODE(ni);
+ struct ath_buf *bf_next, *bf;
+ ath_bufhead bf_q;
+ int drops = 0;
+ struct ieee80211_tx_ampdu *tap;
+ ath_bufhead bf_cq;
+
+ TAILQ_INIT(&bf_q);
+ TAILQ_INIT(&bf_cq);
+ sc->sc_stats.ast_tx_aggrfail++;
+
+ /*
+ * Update rate control - all frames have failed.
+ *
+ * XXX use the length in the first frame in the series;
+ * XXX just so things are consistent for now.
+ */
+ ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc,
+ &bf_first->bf_status.ds_txstat,
+ bf_first->bf_state.bfs_pktlen,
+ bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes);
+
+ ATH_TXQ_LOCK(sc->sc_ac2q[tid->ac]);
+ tap = ath_tx_get_tx_tid(an, tid->tid);
+
+ /* Retry all subframes */
+ bf = bf_first;
+ while (bf) {
+ bf_next = bf->bf_next;
+ bf->bf_next = NULL; /* Remove it from the aggr list */
+ if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
+ drops++;
+ bf->bf_next = NULL;
+ TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
+ }
+ bf = bf_next;
+ }
+
+ /* Prepend all frames to the beginning of the queue */
+ while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
+ TAILQ_REMOVE(&bf_q, bf, bf_list);
+ ATH_TXQ_INSERT_HEAD(tid, bf, bf_list);
+ }
+
+ ath_tx_tid_sched(sc, tid);
+
+ /*
+ * send bar if we dropped any frames
+ *
+ * Keep the txq lock held for now, as we need to ensure
+ * that ni_txseqs[] is consistent (as it's being updated
+ * in the ifnet TX context or raw TX context.)
+ */
+ if (drops) {
+ int txseq = tap->txa_start;
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[tid->ac]);
+ device_printf(sc->sc_dev,
+ "%s: TID %d: send BAR; seq %d\n",
+ __func__, tid->tid, txseq);
+
+ /* XXX TODO: send BAR */
+ } else {
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[tid->ac]);
+ }
+
+ /* Complete frames which errored out */
+ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
+ TAILQ_REMOVE(&bf_cq, bf, bf_list);
+ ath_tx_default_comp(sc, bf, 0);
+ }
+}
+
+/*
+ * Handle clean-up of packets from an aggregate list.
+ *
+ * There's no need to update the BAW here - the session is being
+ * torn down.
+ */
+static void
+ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)
+{
+ struct ath_buf *bf, *bf_next;
+ struct ieee80211_node *ni = bf_first->bf_node;
+ struct ath_node *an = ATH_NODE(ni);
+ int tid = bf_first->bf_state.bfs_tid;
+ struct ath_tid *atid = &an->an_tid[tid];
+
+ bf = bf_first;
+
+ ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
+
+ /* update incomp */
+ while (bf) {
+ atid->incomp--;
+ bf = bf->bf_next;
+ }
+
+ if (atid->incomp == 0) {
+ DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
+ "%s: TID %d: cleaned up! resume!\n",
+ __func__, tid);
+ atid->cleanup_inprogress = 0;
+ ath_tx_tid_resume(sc, atid);
+ }
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
+
+ /* Handle frame completion */
+ while (bf) {
+ bf_next = bf->bf_next;
+ ath_tx_default_comp(sc, bf, 1);
+ bf = bf_next;
+ }
+}
+
+/*
+ * Handle completion of an set of aggregate frames.
+ *
+ * XXX for now, simply complete each sub-frame.
+ *
+ * Note: the completion handler is the last descriptor in the aggregate,
+ * not the last descriptor in the first frame.
+ */
+static void
+ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, int fail)
+{
+ //struct ath_desc *ds = bf->bf_lastds;
+ struct ieee80211_node *ni = bf_first->bf_node;
+ struct ath_node *an = ATH_NODE(ni);
+ int tid = bf_first->bf_state.bfs_tid;
+ struct ath_tid *atid = &an->an_tid[tid];
+ struct ath_tx_status ts;
+ struct ieee80211_tx_ampdu *tap;
+ ath_bufhead bf_q;
+ ath_bufhead bf_cq;
+ int seq_st, tx_ok;
+ int hasba, isaggr;
+ uint32_t ba[2];
+ struct ath_buf *bf, *bf_next;
+ int ba_index;
+ int drops = 0;
+ int nframes = 0, nbad = 0, nf;
+ int pktlen;
+ /* XXX there's too much on the stack? */
+ struct ath_rc_series rc[4];
+ int txseq;
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n",
+ __func__, atid->hwq_depth);
+
+ /* The TID state is kept behind the TXQ lock */
+ ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
+
+ atid->hwq_depth--;
+ if (atid->hwq_depth < 0)
+ device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n",
+ __func__, atid->hwq_depth);
+
+ /*
+ * Punt cleanup to the relevant function, not our problem now
+ */
+ if (atid->cleanup_inprogress) {
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
+ ath_tx_comp_cleanup_aggr(sc, bf_first);
+ return;
+ }
+
+ /*
+ * Take a copy; this may be needed -after- bf_first
+ * has been completed and freed.
+ */
+ ts = bf_first->bf_status.ds_txstat;
+ /*
+ * XXX for now, use the first frame in the aggregate for
+ * XXX rate control completion; it's at least consistent.
+ */
+ pktlen = bf_first->bf_state.bfs_pktlen;
+
+ /*
+ * handle errors first
+ */
+ if (ts.ts_status & HAL_TXERR_XRETRY) {
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
+ ath_tx_comp_aggr_error(sc, bf_first, atid);
+ return;
+ }
+
+ TAILQ_INIT(&bf_q);
+ TAILQ_INIT(&bf_cq);
+ tap = ath_tx_get_tx_tid(an, tid);
+
+ /*
+ * extract starting sequence and block-ack bitmap
+ */
+ /* XXX endian-ness of seq_st, ba? */
+ seq_st = ts.ts_seqnum;
+ hasba = !! (ts.ts_flags & HAL_TX_BA);
+ tx_ok = (ts.ts_status == 0);
+ isaggr = bf_first->bf_state.bfs_aggr;
+ ba[0] = ts.ts_ba_low;
+ ba[1] = ts.ts_ba_high;
+
+ /*
+ * Copy the TX completion status and the rate control
+ * series from the first descriptor, as it may be freed
+ * before the rate control code can get its grubby fingers
+ * into things.
+ */
+ memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc));
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
+ "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n",
+ __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags,
+ isaggr, seq_st, hasba, ba[0], ba[1]);
+
+ /* Occasionally, the MAC sends a tx status for the wrong TID. */
+ if (tid != ts.ts_tid) {
+ device_printf(sc->sc_dev, "%s: tid %d != hw tid %d\n",
+ __func__, tid, ts.ts_tid);
+ tx_ok = 0;
+ }
+
+ /* AR5416 BA bug; this requires an interface reset */
+ if (isaggr && tx_ok && (! hasba)) {
+ device_printf(sc->sc_dev,
+ "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, seq_st=%d\n",
+ __func__, hasba, tx_ok, isaggr, seq_st);
+ /* XXX TODO: schedule an interface reset */
+ }
+
+ /*
+ * Walk the list of frames, figure out which ones were correctly
+ * sent and which weren't.
+ */
+ bf = bf_first;
+ nf = bf_first->bf_state.bfs_nframes;
+
+ /* bf_first is going to be invalid once this list is walked */
+ bf_first = NULL;
+
+ /*
+ * Walk the list of completed frames and determine
+ * which need to be completed and which need to be
+ * retransmitted.
+ *
+ * For completed frames, the completion functions need
+ * to be called at the end of this function as the last
+ * node reference may free the node.
+ *
+ * Finally, since the TXQ lock can't be held during the
+ * completion callback (to avoid lock recursion),
+ * the completion calls have to be done outside of the
+ * lock.
+ */
+ while (bf) {
+ nframes++;
+ ba_index = ATH_BA_INDEX(seq_st, SEQNO(bf->bf_state.bfs_seqno));
+ bf_next = bf->bf_next;
+ bf->bf_next = NULL; /* Remove it from the aggr list */
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
+ "%s: checking bf=%p seqno=%d; ack=%d\n",
+ __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
+ ATH_BA_ISSET(ba, ba_index));
+
+ if (tx_ok && ATH_BA_ISSET(ba, ba_index)) {
+ ath_tx_update_baw(sc, an, atid, bf);
+ bf->bf_state.bfs_dobaw = 0;
+ if (! bf->bf_state.bfs_addedbaw)
+ device_printf(sc->sc_dev,
+ "%s: wasn't added: seqno %d\n",
+ __func__, SEQNO(bf->bf_state.bfs_seqno));
+ bf->bf_next = NULL;
+ TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
+ } else {
+ if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
+ drops++;
+ bf->bf_next = NULL;
+ TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
+ }
+ nbad++;
+ }
+ bf = bf_next;
+ }
+
+ /*
+ * Now that the BAW updates have been done, unlock
+ *
+ * txseq is grabbed before the lock is released so we
+ * have a consistent view of what -was- in the BAW.
+ * Anything after this point will not yet have been
+ * TXed.
+ */
+ txseq = tap->txa_start;
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
+
+ if (nframes != nf)
+ device_printf(sc->sc_dev,
+ "%s: num frames seen=%d; bf nframes=%d\n",
+ __func__, nframes, nf);
+
+ /*
+ * Now we know how many frames were bad, call the rate
+ * control code.
+ */
+ if (fail == 0)
+ ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes, nbad);
+
+ /*
+ * send bar if we dropped any frames
+ */
+ if (drops) {
+ device_printf(sc->sc_dev,
+ "%s: TID %d: send BAR; seq %d\n", __func__, tid, txseq);
+ /* XXX TODO: send BAR */
+ }
+
+ /* Prepend all frames to the beginning of the queue */
+ ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
+ while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
+ TAILQ_REMOVE(&bf_q, bf, bf_list);
+ ATH_TXQ_INSERT_HEAD(atid, bf, bf_list);
+ }
+ ath_tx_tid_sched(sc, atid);
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
+ "%s: txa_start now %d\n", __func__, tap->txa_start);
+
+ /* Do deferred completion */
+ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
+ TAILQ_REMOVE(&bf_cq, bf, bf_list);
+ ath_tx_default_comp(sc, bf, 0);
+ }
+}
+
+/*
+ * Handle completion of unaggregated frames in an ADDBA
+ * session.
+ *
+ * Fail is set to 1 if the entry is being freed via a call to
+ * ath_tx_draintxq().
+ */
+static void
+ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail)
+{
+ struct ieee80211_node *ni = bf->bf_node;
+ struct ath_node *an = ATH_NODE(ni);
+ int tid = bf->bf_state.bfs_tid;
+ struct ath_tid *atid = &an->an_tid[tid];
+ struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
+
+ /*
+ * Update rate control status here, before we possibly
+ * punt to retry or cleanup.
+ *
+ * Do it outside of the TXQ lock.
+ */
+ if (fail == 0 && ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0))
+ ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
+ &bf->bf_status.ds_txstat,
+ bf->bf_state.bfs_pktlen,
+ 1, (ts->ts_status == 0) ? 0 : 1);
+
+ /*
+ * This is called early so atid->hwq_depth can be tracked.
+ * This unfortunately means that it's released and regrabbed
+ * during retry and cleanup. That's rather inefficient.
+ */
+ ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
+
+ if (tid == IEEE80211_NONQOS_TID)
+ device_printf(sc->sc_dev, "%s: TID=16!\n", __func__);
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: tid=%d, hwq_depth=%d\n",
+ __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth);
+
+ atid->hwq_depth--;
+ if (atid->hwq_depth < 0)
+ device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n",
+ __func__, atid->hwq_depth);
+
+ /*
+ * If a cleanup is in progress, punt to comp_cleanup;
+ * rather than handling it here. It's thus their
+ * responsibility to clean up, call the completion
+ * function in net80211, etc.
+ */
+ if (atid->cleanup_inprogress) {
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
+ ath_tx_comp_cleanup_unaggr(sc, bf);
+ return;
+ }
+
+ /*
+ * Don't bother with the retry check if all frames
+ * are being failed (eg during queue deletion.)
+ */
+ if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) {
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
+ ath_tx_aggr_retry_unaggr(sc, bf);
+ return;
+ }
+
+ /* Success? Complete */
+ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n",
+ __func__, tid, SEQNO(bf->bf_state.bfs_seqno));
+ if (bf->bf_state.bfs_dobaw) {
+ ath_tx_update_baw(sc, an, atid, bf);
+ bf->bf_state.bfs_dobaw = 0;
+ if (! bf->bf_state.bfs_addedbaw)
+ device_printf(sc->sc_dev,
+ "%s: wasn't added: seqno %d\n",
+ __func__, SEQNO(bf->bf_state.bfs_seqno));
+ }
+
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
+
+ ath_tx_default_comp(sc, bf, fail);
+ /* bf is freed at this point */
+}
+
+void
+ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
+{
+ if (bf->bf_state.bfs_aggr)
+ ath_tx_aggr_comp_aggr(sc, bf, fail);
+ else
+ ath_tx_aggr_comp_unaggr(sc, bf, fail);
+}
+
+/*
+ * Schedule some packets from the given node/TID to the hardware.
+ *
+ * This is the aggregate version.
+ */
+void
+ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
+ struct ath_tid *tid)
+{
+ struct ath_buf *bf;
+ struct ath_txq *txq = sc->sc_ac2q[tid->ac];
+ struct ieee80211_tx_ampdu *tap;
+ struct ieee80211_node *ni = &an->an_node;
+ ATH_AGGR_STATUS status;
+ ath_bufhead bf_q;
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
+ ATH_TXQ_LOCK_ASSERT(txq);
+
+ tap = ath_tx_get_tx_tid(an, tid->tid);
+
+ if (tid->tid == IEEE80211_NONQOS_TID)
+ device_printf(sc->sc_dev, "%s: called for TID=NONQOS_TID?\n",
+ __func__);
+
+ for (;;) {
+ status = ATH_AGGR_DONE;
+
+ /*
+ * If the upper layer has paused the TID, don't
+ * queue any further packets.
+ *
+ * This can also occur from the completion task because
+ * of packet loss; but as its serialised with this code,
+ * it won't "appear" half way through queuing packets.
+ */
+ if (tid->paused)
+ break;
+
+ bf = TAILQ_FIRST(&tid->axq_q);
+ if (bf == NULL) {
+ break;
+ }
+
+ /*
+ * If the packet doesn't fall within the BAW (eg a NULL
+ * data frame), schedule it directly; continue.
+ */
+ if (! bf->bf_state.bfs_dobaw) {
+ DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: non-baw packet\n",
+ __func__);
+ ATH_TXQ_REMOVE(tid, bf, bf_list);
+ bf->bf_state.bfs_aggr = 0;
+ ath_tx_do_ratelookup(sc, bf);
+ ath_tx_rate_fill_rcflags(sc, bf);
+ ath_tx_set_rtscts(sc, bf);
+ ath_tx_setds(sc, bf);
+ ath_tx_chaindesclist(sc, bf);
+ ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
+ ath_tx_set_ratectrl(sc, ni, bf);
+
+ sc->sc_aggr_stats.aggr_nonbaw_pkt++;
+
+ /* Queue the packet; continue */
+ goto queuepkt;
+ }
+
+ TAILQ_INIT(&bf_q);
+
+ /*
+ * Do a rate control lookup on the first frame in the
+ * list. The rate control code needs that to occur
+ * before it can determine whether to TX.
+ * It's inaccurate because the rate control code doesn't
+ * really "do" aggregate lookups, so it only considers
+ * the size of the first frame.
+ */
+ ath_tx_do_ratelookup(sc, bf);
+ bf->bf_state.bfs_rc[3].rix = 0;
+ bf->bf_state.bfs_rc[3].tries = 0;
+ ath_tx_rate_fill_rcflags(sc, bf);
+
+ status = ath_tx_form_aggr(sc, an, tid, &bf_q);
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
+ "%s: ath_tx_form_aggr() status=%d\n", __func__, status);
+
+ /*
+ * No frames to be picked up - out of BAW
+ */
+ if (TAILQ_EMPTY(&bf_q))
+ break;
+
+ /*
+ * This assumes that the descriptor list in the ath_bufhead
+ * are already linked together via bf_next pointers.
+ */
+ bf = TAILQ_FIRST(&bf_q);
+
+ /*
+ * If it's the only frame send as non-aggregate
+ * assume that ath_tx_form_aggr() has checked
+ * whether it's in the BAW and added it appropriately.
+ */
+ if (bf->bf_state.bfs_nframes == 1) {
+ DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
+ "%s: single-frame aggregate\n", __func__);
+ bf->bf_state.bfs_aggr = 0;
+ ath_tx_set_rtscts(sc, bf);
+ ath_tx_setds(sc, bf);
+ ath_tx_chaindesclist(sc, bf);
+ ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
+ ath_tx_set_ratectrl(sc, ni, bf);
+ if (status == ATH_AGGR_BAW_CLOSED)
+ sc->sc_aggr_stats.aggr_baw_closed_single_pkt++;
+ else
+ sc->sc_aggr_stats.aggr_single_pkt++;
+ } else {
+ DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
+ "%s: multi-frame aggregate: %d frames, length %d\n",
+ __func__, bf->bf_state.bfs_nframes,
+ bf->bf_state.bfs_al);
+ bf->bf_state.bfs_aggr = 1;
+ sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++;
+ sc->sc_aggr_stats.aggr_aggr_pkt++;
+
+ /*
+ * Update the rate and rtscts information based on the
+ * rate decision made by the rate control code;
+ * the first frame in the aggregate needs it.
+ */
+ ath_tx_set_rtscts(sc, bf);
+
+ /*
+ * Setup the relevant descriptor fields
+ * for aggregation. The first descriptor
+ * already points to the rest in the chain.
+ */
+ ath_tx_setds_11n(sc, bf);
+
+ /*
+ * setup first desc with rate and aggr info
+ */
+ ath_tx_set_ratectrl(sc, ni, bf);
+ }
+ queuepkt:
+ //txq = bf->bf_state.bfs_txq;
+
+ /* Set completion handler, multi-frame aggregate or not */
+ bf->bf_comp = ath_tx_aggr_comp;
+
+ if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)
+ device_printf(sc->sc_dev, "%s: TID=16?\n", __func__);
+
+ /* Punt to txq */
+ ath_tx_handoff(sc, txq, bf);
+
+ /* Track outstanding buffer count to hardware */
+ /* aggregates are "one" buffer */
+ tid->hwq_depth++;
+
+ /*
+ * Break out if ath_tx_form_aggr() indicated
+ * there can't be any further progress (eg BAW is full.)
+ * Checking for an empty txq is done above.
+ *
+ * XXX locking on txq here?
+ */
+ if (txq->axq_aggr_depth >= sc->sc_hwq_limit ||
+ status == ATH_AGGR_BAW_CLOSED)
+ break;
+ }
+}
+
+/*
+ * Schedule some packets from the given node/TID to the hardware.
+ */
+void
+ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,
+ struct ath_tid *tid)
+{
+ struct ath_buf *bf;
+ struct ath_txq *txq = sc->sc_ac2q[tid->ac];
+ struct ieee80211_node *ni = &an->an_node;
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n",
+ __func__, an, tid->tid);
+
+ ATH_TXQ_LOCK_ASSERT(txq);
+
+ /* Check - is AMPDU pending or running? then print out something */
+ if (ath_tx_ampdu_pending(sc, an, tid->tid))
+ device_printf(sc->sc_dev, "%s: tid=%d, ampdu pending?\n",
+ __func__, tid->tid);
+ if (ath_tx_ampdu_running(sc, an, tid->tid))
+ device_printf(sc->sc_dev, "%s: tid=%d, ampdu running?\n",
+ __func__, tid->tid);
+
+ for (;;) {
+
+ /*
+ * If the upper layers have paused the TID, don't
+ * queue any further packets.
+ */
+ if (tid->paused)
+ break;
+
+ bf = TAILQ_FIRST(&tid->axq_q);
+ if (bf == NULL) {
+ break;
+ }
+
+ ATH_TXQ_REMOVE(tid, bf, bf_list);
+
+ KASSERT(txq == bf->bf_state.bfs_txq, ("txqs not equal!\n"));
+
+ /* Sanity check! */
+ if (tid->tid != bf->bf_state.bfs_tid) {
+ device_printf(sc->sc_dev, "%s: bfs_tid %d !="
+ " tid %d\n",
+ __func__, bf->bf_state.bfs_tid, tid->tid);
+ }
+ /* Normal completion handler */
+ bf->bf_comp = ath_tx_normal_comp;
+
+ /* Program descriptors + rate control */
+ ath_tx_do_ratelookup(sc, bf);
+ ath_tx_rate_fill_rcflags(sc, bf);
+ ath_tx_set_rtscts(sc, bf);
+ ath_tx_setds(sc, bf);
+ ath_tx_chaindesclist(sc, bf);
+ ath_tx_set_ratectrl(sc, ni, bf);
+
+ /* Track outstanding buffer count to hardware */
+ /* aggregates are "one" buffer */
+ tid->hwq_depth++;
+
+ /* Punt to hardware or software txq */
+ ath_tx_handoff(sc, txq, bf);
+ }
+}
+
+/*
+ * Schedule some packets to the given hardware queue.
+ *
+ * This function walks the list of TIDs (ie, ath_node TIDs
+ * with queued traffic) and attempts to schedule traffic
+ * from them.
+ *
+ * TID scheduling is implemented as a FIFO, with TIDs being
+ * added to the end of the queue after some frames have been
+ * scheduled.
+ */
+void
+ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
+{
+ struct ath_tid *tid, *next, *last;
+
+ ATH_TXQ_LOCK_ASSERT(txq);
+
+ /*
+ * Don't schedule if the hardware queue is busy.
+ * This (hopefully) gives some more time to aggregate
+ * some packets in the aggregation queue.
+ */
+ if (txq->axq_aggr_depth >= sc->sc_hwq_limit) {
+ sc->sc_aggr_stats.aggr_sched_nopkt++;
+ return;
+ }
+
+ last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
+
+ TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
+ /*
+ * Suspend paused queues here; they'll be resumed
+ * once the addba completes or times out.
+ */
+ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n",
+ __func__, tid->tid, tid->paused);
+ ath_tx_tid_unsched(sc, tid);
+ if (tid->paused) {
+ continue;
+ }
+ if (ath_tx_ampdu_running(sc, tid->an, tid->tid))
+ ath_tx_tid_hw_queue_aggr(sc, tid->an, tid);
+ else
+ ath_tx_tid_hw_queue_norm(sc, tid->an, tid);
+
+ /* Not empty? Re-schedule */
+ if (tid->axq_depth != 0)
+ ath_tx_tid_sched(sc, tid);
+
+ /* Give the software queue time to aggregate more packets */
+ if (txq->axq_aggr_depth >= sc->sc_hwq_limit) {
+ break;
+ }
+
+ /*
+ * If this was the last entry on the original list, stop.
+ * Otherwise nodes that have been rescheduled onto the end
+ * of the TID FIFO list will just keep being rescheduled.
+ */
+ if (tid == last)
+ break;
+ }
+}
+
+/*
+ * TX addba handling
+ */
+
+/*
+ * Return net80211 TID struct pointer, or NULL for none
+ */
+struct ieee80211_tx_ampdu *
+ath_tx_get_tx_tid(struct ath_node *an, int tid)
+{
+ struct ieee80211_node *ni = &an->an_node;
+ struct ieee80211_tx_ampdu *tap;
+ int ac;
+
+ if (tid == IEEE80211_NONQOS_TID)
+ return NULL;
+
+ ac = TID_TO_WME_AC(tid);
+
+ tap = &ni->ni_tx_ampdu[ac];
+ return tap;
+}
+
+/*
+ * Is AMPDU-TX running?
+ */
+static int
+ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid)
+{
+ struct ieee80211_tx_ampdu *tap;
+
+ if (tid == IEEE80211_NONQOS_TID)
+ return 0;
+
+ tap = ath_tx_get_tx_tid(an, tid);
+ if (tap == NULL)
+ return 0; /* Not valid; default to not running */
+
+ return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING);
+}
+
+/*
+ * Is AMPDU-TX negotiation pending?
+ */
+static int
+ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid)
+{
+ struct ieee80211_tx_ampdu *tap;
+
+ if (tid == IEEE80211_NONQOS_TID)
+ return 0;
+
+ tap = ath_tx_get_tx_tid(an, tid);
+ if (tap == NULL)
+ return 0; /* Not valid; default to not pending */
+
+ return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND);
+}
+
+/*
+ * Is AMPDU-TX pending for the given TID?
+ */
+
+
+/*
+ * Method to handle sending an ADDBA request.
+ *
+ * We tap this so the relevant flags can be set to pause the TID
+ * whilst waiting for the response.
+ *
+ * XXX there's no timeout handler we can override?
+ */
+int
+ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
+ int dialogtoken, int baparamset, int batimeout)
+{
+ struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
+ int tid = WME_AC_TO_TID(tap->txa_ac);
+ struct ath_node *an = ATH_NODE(ni);
+ struct ath_tid *atid = &an->an_tid[tid];
+
+ /*
+ * XXX danger Will Robinson!
+ *
+ * Although the taskqueue may be running and scheduling some more
+ * packets, these should all be _before_ the addba sequence number.
+ * However, net80211 will keep self-assigning sequence numbers
+ * until addba has been negotiated.
+ *
+ * In the past, these packets would be "paused" (which still works
+ * fine, as they're being scheduled to the driver in the same
+ * serialised method which is calling the addba request routine)
+ * and when the aggregation session begins, they'll be dequeued
+ * as aggregate packets and added to the BAW. However, now there's
+ * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these
+ * packets. Thus they never get included in the BAW tracking and
+ * this can cause the initial burst of packets after the addba
+ * negotiation to "hang", as they quickly fall outside the BAW.
+ *
+ * The "eventual" solution should be to tag these packets with
+ * dobaw. Although net80211 has given us a sequence number,
+ * it'll be "after" the left edge of the BAW and thus it'll
+ * fall within it.
+ */
+ ath_tx_tid_pause(sc, atid);
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
+ "%s: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n",
+ __func__, dialogtoken, baparamset, batimeout);
+ DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
+ "%s: txa_start=%d, ni_txseqs=%d\n",
+ __func__, tap->txa_start, ni->ni_txseqs[tid]);
+
+ return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
+ batimeout);
+}
+
+/*
+ * Handle an ADDBA response.
+ *
+ * We unpause the queue so TX'ing can resume.
+ *
+ * Any packets TX'ed from this point should be "aggregate" (whether
+ * aggregate or not) so the BAW is updated.
+ *
+ * Note! net80211 keeps self-assigning sequence numbers until
+ * ampdu is negotiated. This means the initially-negotiated BAW left
+ * edge won't match the ni->ni_txseq.
+ *
+ * So, being very dirty, the BAW left edge is "slid" here to match
+ * ni->ni_txseq.
+ *
+ * What likely SHOULD happen is that all packets subsequent to the
+ * addba request should be tagged as aggregate and queued as non-aggregate
+ * frames; thus updating the BAW. For now though, I'll just slide the
+ * window.
+ */
+int
+ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
+ int status, int code, int batimeout)
+{
+ struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
+ int tid = WME_AC_TO_TID(tap->txa_ac);
+ struct ath_node *an = ATH_NODE(ni);
+ struct ath_tid *atid = &an->an_tid[tid];
+ int r;
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
+ "%s: called; status=%d, code=%d, batimeout=%d\n", __func__,
+ status, code, batimeout);
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
+ "%s: txa_start=%d, ni_txseqs=%d\n",
+ __func__, tap->txa_start, ni->ni_txseqs[tid]);
+
+ /*
+ * Call this first, so the interface flags get updated
+ * before the TID is unpaused. Otherwise a race condition
+ * exists where the unpaused TID still doesn't yet have
+ * IEEE80211_AGGR_RUNNING set.
+ */
+ r = sc->sc_addba_response(ni, tap, status, code, batimeout);
+
+ ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
+ /*
+ * XXX dirty!
+ * Slide the BAW left edge to wherever net80211 left it for us.
+ * Read above for more information.
+ */
+ tap->txa_start = ni->ni_txseqs[tid];
+ ath_tx_tid_resume(sc, atid);
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
+ return r;
+}
+
+
+/*
+ * Stop ADDBA on a queue.
+ */
+void
+ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
+{
+ struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
+ int tid = WME_AC_TO_TID(tap->txa_ac);
+ struct ath_node *an = ATH_NODE(ni);
+ struct ath_tid *atid = &an->an_tid[tid];
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: called\n", __func__);
+
+ /* Pause TID traffic early, so there aren't any races */
+ ath_tx_tid_pause(sc, atid);
+
+ /* There's no need to hold the TXQ lock here */
+ sc->sc_addba_stop(ni, tap);
+
+ /*
+ * ath_tx_cleanup will resume the TID if possible, otherwise
+ * it'll set the cleanup flag, and it'll be unpaused once
+ * things have been cleaned up.
+ */
+ ath_tx_cleanup(sc, an, tid);
+}
+
+/*
+ * Note: net80211 bar_timeout() doesn't call this function on BAR failure;
+ * it simply tears down the aggregation session. Ew.
+ *
+ * It however will call ieee80211_ampdu_stop() which will call
+ * ic->ic_addba_stop().
+ *
+ * XXX This uses a hard-coded max BAR count value; the whole
+ * XXX BAR TX success or failure should be better handled!
+ */
+void
+ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
+ int status)
+{
+ struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
+ int tid = WME_AC_TO_TID(tap->txa_ac);
+ struct ath_node *an = ATH_NODE(ni);
+ struct ath_tid *atid = &an->an_tid[tid];
+ int attempts = tap->txa_attempts;
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
+ "%s: called; status=%d\n", __func__, status);
+
+ /* Note: This may update the BAW details */
+ sc->sc_bar_response(ni, tap, status);
+
+ /* Unpause the TID */
+ /*
+ * XXX if this is attempt=50, the TID will be downgraded
+ * XXX to a non-aggregate session. So we must unpause the
+ * XXX TID here or it'll never be done.
+ */
+ if (status == 0 || attempts == 50) {
+ ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
+ ath_tx_tid_resume(sc, atid);
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
+ }
+}
+
+/*
+ * This is called whenever the pending ADDBA request times out.
+ * Unpause and reschedule the TID.
+ */
+void
+ath_addba_response_timeout(struct ieee80211_node *ni,
+ struct ieee80211_tx_ampdu *tap)
+{
+ struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
+ int tid = WME_AC_TO_TID(tap->txa_ac);
+ struct ath_node *an = ATH_NODE(ni);
+ struct ath_tid *atid = &an->an_tid[tid];
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
+ "%s: called; resuming\n", __func__);
+
+ /* Note: This updates the aggregate state to (again) pending */
+ sc->sc_addba_response_timeout(ni, tap);
+
+ /* Unpause the TID; which reschedules it */
+ ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
+ ath_tx_tid_resume(sc, atid);
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
+}
diff --git a/sys/dev/ath/if_ath_tx.h b/sys/dev/ath/if_ath_tx.h
index e181f7a..958acf9 100644
--- a/sys/dev/ath/if_ath_tx.h
+++ b/sys/dev/ath/if_ath_tx.h
@@ -31,7 +31,58 @@
#ifndef __IF_ATH_TX_H__
#define __IF_ATH_TX_H__
+/*
+ * some general macros
+ */
+#define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1)
+/*
+ * return block-ack bitmap index given sequence and starting sequence
+ */
+#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_RANGE - 1))
+
+#define WME_BA_BMP_SIZE 64
+#define WME_MAX_BA WME_BA_BMP_SIZE
+
+/*
+ * How 'busy' to try and keep the hardware txq
+ */
+#define ATH_AGGR_MIN_QDEPTH 2
+
+/*
+ * Watermark for scheduling TIDs in order to maximise aggregation.
+ *
+ * If hwq_depth is greater than this, don't schedule the TID
+ * for packet scheduling - the hardware is already busy servicing
+ * this TID.
+ *
+ * If hwq_depth is less than this, schedule the TID for packet
+ * scheduling in the completion handler.
+ */
+#define ATH_AGGR_SCHED_HIGH 4
+#define ATH_AGGR_SCHED_LOW 2
+
+/*
+ * return whether a bit at index _n in bitmap _bm is set
+ * _sz is the size of the bitmap
+ */
+#define ATH_BA_ISSET(_bm, _n) (((_n) < (WME_BA_BMP_SIZE)) && \
+ ((_bm)[(_n) >> 5] & (1 << ((_n) & 31))))
+
+
+/* extracting the seqno from buffer seqno */
+#define SEQNO(_a) ((_a) >> IEEE80211_SEQ_SEQ_SHIFT)
+
+/*
+ * Whether the current sequence number is within the
+ * BAW.
+ */
+#define BAW_WITHIN(_start, _bawsz, _seqno) \
+ ((((_seqno) - (_start)) & 4095) < (_bawsz))
+
+extern void ath_txq_restart_dma(struct ath_softc *sc, struct ath_txq *txq);
extern void ath_freetx(struct mbuf *m);
+extern void ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an);
+extern void ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq);
extern void ath_txfrag_cleanup(struct ath_softc *sc, ath_bufhead *frags,
struct ieee80211_node *ni);
extern int ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
@@ -41,4 +92,36 @@ extern int ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
extern int ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params);
+/* software queue stuff */
+extern void ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni,
+ struct ath_txq *txq, struct ath_buf *bf);
+extern void ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an);
+extern void ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
+ struct ath_tid *tid);
+extern void ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,
+ struct ath_tid *tid);
+extern void ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq);
+extern void ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf,
+ int fail);
+extern void ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf,
+ int fail);
+extern void ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an,
+ struct ath_tid *tid, struct ath_buf *bf);
+extern struct ieee80211_tx_ampdu * ath_tx_get_tx_tid(struct ath_node *an,
+ int tid);
+
+/* TX addba handling */
+extern int ath_addba_request(struct ieee80211_node *ni,
+ struct ieee80211_tx_ampdu *tap, int dialogtoken,
+ int baparamset, int batimeout);
+extern int ath_addba_response(struct ieee80211_node *ni,
+ struct ieee80211_tx_ampdu *tap, int dialogtoken,
+ int code, int batimeout);
+extern void ath_addba_stop(struct ieee80211_node *ni,
+ struct ieee80211_tx_ampdu *tap);
+extern void ath_bar_response(struct ieee80211_node *ni,
+ struct ieee80211_tx_ampdu *tap, int status);
+extern void ath_addba_response_timeout(struct ieee80211_node *ni,
+ struct ieee80211_tx_ampdu *tap);
+
#endif
diff --git a/sys/dev/ath/if_ath_tx_ht.c b/sys/dev/ath/if_ath_tx_ht.c
index 348a1499..bec7064 100644
--- a/sys/dev/ath/if_ath_tx_ht.c
+++ b/sys/dev/ath/if_ath_tx_ht.c
@@ -86,17 +86,357 @@ __FBSDID("$FreeBSD$");
#include <dev/ath/ath_tx99/ath_tx99.h>
#endif
+#include <dev/ath/if_ath_tx.h> /* XXX for some support functions */
#include <dev/ath/if_ath_tx_ht.h>
+#include <dev/ath/if_athrate.h>
+#include <dev/ath/if_ath_debug.h>
+
+/*
+ * XXX net80211?
+ */
+#define IEEE80211_AMPDU_SUBFRAME_DEFAULT 32
+
+#define ATH_AGGR_DELIM_SZ 4 /* delimiter size */
+#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
+#define ATH_AGGR_ENCRYPTDELIM 10 /* number of delimiters for encryption padding */
+
+/*
+ * returns delimiter padding required given the packet length
+ */
+#define ATH_AGGR_GET_NDELIM(_len) \
+ (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \
+ (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2)
+
+#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
+
+int ath_max_4ms_framelen[4][32] = {
+ [MCS_HT20] = {
+ 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
+ 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
+ 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
+ 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
+ },
+ [MCS_HT20_SGI] = {
+ 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
+ 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
+ 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
+ 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
+ },
+ [MCS_HT40] = {
+ 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
+ 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
+ 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
+ 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
+ },
+ [MCS_HT40_SGI] = {
+ 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
+ 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
+ 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
+ 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
+ }
+};
+
+/*
+ * XXX should be in net80211
+ */
+static int ieee80211_mpdudensity_map[] = {
+ 0, /* IEEE80211_HTCAP_MPDUDENSITY_NA */
+ 25, /* IEEE80211_HTCAP_MPDUDENSITY_025 */
+ 50, /* IEEE80211_HTCAP_MPDUDENSITY_05 */
+ 100, /* IEEE80211_HTCAP_MPDUDENSITY_1 */
+ 200, /* IEEE80211_HTCAP_MPDUDENSITY_2 */
+ 400, /* IEEE80211_HTCAP_MPDUDENSITY_4 */
+ 800, /* IEEE80211_HTCAP_MPDUDENSITY_8 */
+ 1600, /* IEEE80211_HTCAP_MPDUDENSITY_16 */
+};
+
+/*
+ * XXX should be in the HAL/net80211 ?
+ */
+#define BITS_PER_BYTE 8
+#define OFDM_PLCP_BITS 22
+#define HT_RC_2_MCS(_rc) ((_rc) & 0x7f)
+#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
+#define L_STF 8
+#define L_LTF 8
+#define L_SIG 4
+#define HT_SIG 8
+#define HT_STF 4
+#define HT_LTF(_ns) (4 * (_ns))
+#define SYMBOL_TIME(_ns) ((_ns) << 2) // ns * 4 us
+#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) // ns * 3.6 us
+#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
+#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
+#define IS_HT_RATE(_rate) ((_rate) & 0x80)
+
+const uint32_t bits_per_symbol[][2] = {
+ /* 20MHz 40MHz */
+ { 26, 54 }, // 0: BPSK
+ { 52, 108 }, // 1: QPSK 1/2
+ { 78, 162 }, // 2: QPSK 3/4
+ { 104, 216 }, // 3: 16-QAM 1/2
+ { 156, 324 }, // 4: 16-QAM 3/4
+ { 208, 432 }, // 5: 64-QAM 2/3
+ { 234, 486 }, // 6: 64-QAM 3/4
+ { 260, 540 }, // 7: 64-QAM 5/6
+ { 52, 108 }, // 8: BPSK
+ { 104, 216 }, // 9: QPSK 1/2
+ { 156, 324 }, // 10: QPSK 3/4
+ { 208, 432 }, // 11: 16-QAM 1/2
+ { 312, 648 }, // 12: 16-QAM 3/4
+ { 416, 864 }, // 13: 64-QAM 2/3
+ { 468, 972 }, // 14: 64-QAM 3/4
+ { 520, 1080 }, // 15: 64-QAM 5/6
+ { 78, 162 }, // 16: BPSK
+ { 156, 324 }, // 17: QPSK 1/2
+ { 234, 486 }, // 18: QPSK 3/4
+ { 312, 648 }, // 19: 16-QAM 1/2
+ { 468, 972 }, // 20: 16-QAM 3/4
+ { 624, 1296 }, // 21: 64-QAM 2/3
+ { 702, 1458 }, // 22: 64-QAM 3/4
+ { 780, 1620 }, // 23: 64-QAM 5/6
+ { 104, 216 }, // 24: BPSK
+ { 208, 432 }, // 25: QPSK 1/2
+ { 312, 648 }, // 26: QPSK 3/4
+ { 416, 864 }, // 27: 16-QAM 1/2
+ { 624, 1296 }, // 28: 16-QAM 3/4
+ { 832, 1728 }, // 29: 64-QAM 2/3
+ { 936, 1944 }, // 30: 64-QAM 3/4
+ { 1040, 2160 }, // 31: 64-QAM 5/6
+};
+
+/*
+ * Fill in the rate array information based on the current
+ * node configuration and the choices made by the rate
+ * selection code and ath_buf setup code.
+ *
+ * Later on, this may end up also being made by the
+ * rate control code, but for now it can live here.
+ *
+ * This needs to be called just before the packet is
+ * queued to the software queue or hardware queue,
+ * so all of the needed fields in bf_state are setup.
+ */
+void
+ath_tx_rate_fill_rcflags(struct ath_softc *sc, struct ath_buf *bf)
+{
+ struct ieee80211_node *ni = bf->bf_node;
+ struct ieee80211com *ic = ni->ni_ic;
+ const HAL_RATE_TABLE *rt = sc->sc_currates;
+ struct ath_rc_series *rc = bf->bf_state.bfs_rc;
+ uint8_t rate;
+ int i;
+
+ for (i = 0; i < ATH_RC_NUM; i++) {
+ rc[i].flags = 0;
+ if (rc[i].tries == 0)
+ continue;
+
+ rate = rt->info[rc[i].rix].rateCode;
+
+ /*
+ * XXX only do this for legacy rates?
+ */
+ if (bf->bf_state.bfs_shpream)
+ rate |= rt->info[rc[i].rix].shortPreamble;
+
+ /*
+ * Save this, used by the TX and completion code
+ */
+ rc[i].ratecode = rate;
+
+ if (bf->bf_state.bfs_flags &
+ (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA))
+ rc[i].flags |= ATH_RC_RTSCTS_FLAG;
+
+ /* Only enable shortgi, 2040, dual-stream if HT is set */
+ if (IS_HT_RATE(rate)) {
+ rc[i].flags |= ATH_RC_HT_FLAG;
+
+ if (ni->ni_chw == 40)
+ rc[i].flags |= ATH_RC_CW40_FLAG;
+
+ if (ni->ni_chw == 40 &&
+ ic->ic_htcaps & IEEE80211_HTCAP_SHORTGI40 &&
+ ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40)
+ rc[i].flags |= ATH_RC_SGI_FLAG;
+
+ if (ni->ni_chw == 20 &&
+ ic->ic_htcaps & IEEE80211_HTCAP_SHORTGI20 &&
+ ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20)
+ rc[i].flags |= ATH_RC_SGI_FLAG;
+
+ /* XXX dual stream? and 3-stream? */
+ }
+
+ /*
+ * Calculate the maximum 4ms frame length based
+ * on the MCS rate, SGI and channel width flags.
+ */
+ if ((rc[i].flags & ATH_RC_HT_FLAG) &&
+ (HT_RC_2_MCS(rate) < 32)) {
+ int j;
+ if (rc[i].flags & ATH_RC_CW40_FLAG) {
+ if (rc[i].flags & ATH_RC_SGI_FLAG)
+ j = MCS_HT40_SGI;
+ else
+ j = MCS_HT40;
+ } else {
+ if (rc[i].flags & ATH_RC_SGI_FLAG)
+ j = MCS_HT20_SGI;
+ else
+ j = MCS_HT20;
+ }
+ rc[i].max4msframelen =
+ ath_max_4ms_framelen[j][HT_RC_2_MCS(rate)];
+ } else
+ rc[i].max4msframelen = 0;
+ DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
+ "%s: i=%d, rate=0x%x, flags=0x%x, max4ms=%d\n",
+ __func__, i, rate, rc[i].flags, rc[i].max4msframelen);
+ }
+}
+
+/*
+ * Return the number of delimiters to be added to
+ * meet the minimum required mpdudensity.
+ *
+ * Caller should make sure that the rate is HT.
+ *
+ * TODO: is this delimiter calculation supposed to be the
+ * total frame length, the hdr length, the data length (including
+ * delimiters, padding, CRC, etc) or ?
+ *
+ * TODO: this should ensure that the rate control information
+ * HAS been setup for the first rate.
+ *
+ * TODO: ensure this is only called for MCS rates.
+ *
+ * TODO: enforce MCS < 31
+ */
+static int
+ath_compute_num_delims(struct ath_softc *sc, struct ath_buf *first_bf,
+ uint16_t pktlen)
+{
+ const HAL_RATE_TABLE *rt = sc->sc_currates;
+ struct ieee80211_node *ni = first_bf->bf_node;
+ struct ieee80211vap *vap = ni->ni_vap;
+ int ndelim, mindelim = 0;
+ int mpdudensity; /* in 1/100'th of a microsecond */
+ uint8_t rc, rix, flags;
+ int width, half_gi;
+ uint32_t nsymbits, nsymbols;
+ uint16_t minlen;
+
+ /*
+ * vap->iv_ampdu_density is a value, rather than the actual
+ * density.
+ */
+ if (vap->iv_ampdu_density > IEEE80211_HTCAP_MPDUDENSITY_16)
+ mpdudensity = 1600; /* maximum density */
+ else
+ mpdudensity = ieee80211_mpdudensity_map[vap->iv_ampdu_density];
+
+ /* Select standard number of delimiters based on frame length */
+ ndelim = ATH_AGGR_GET_NDELIM(pktlen);
+
+ /*
+ * If encryption is enabled, add extra delimiters to let the
+ * crypto hardware catch up. This could be tuned per-MAC and
+ * per-rate, but for now we'll simply assume encryption is
+ * always enabled.
+ */
+ ndelim += ATH_AGGR_ENCRYPTDELIM;
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
+ "%s: pktlen=%d, ndelim=%d, mpdudensity=%d\n",
+ __func__, pktlen, ndelim, mpdudensity);
+
+ /*
+ * If the MPDU density is 0, we can return here.
+ * Otherwise, we need to convert the desired mpdudensity
+ * into a byte length, based on the rate in the subframe.
+ */
+ if (mpdudensity == 0)
+ return ndelim;
+
+ /*
+ * Convert desired mpdu density from microeconds to bytes based
+ * on highest rate in rate series (i.e. first rate) to determine
+ * required minimum length for subframe. Take into account
+ * whether high rate is 20 or 40Mhz and half or full GI.
+ */
+ rix = first_bf->bf_state.bfs_rc[0].rix;
+ rc = rt->info[rix].rateCode;
+ flags = first_bf->bf_state.bfs_rc[0].flags;
+ width = !! (flags & ATH_RC_CW40_FLAG);
+ half_gi = !! (flags & ATH_RC_SGI_FLAG);
+
+ /*
+ * mpdudensity is in 1/100th of a usec, so divide by 100
+ */
+ if (half_gi)
+ nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
+ else
+ nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
+ nsymbols /= 100;
+
+ if (nsymbols == 0)
+ nsymbols = 1;
+
+ nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
+ minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
+
+ /*
+ * Min length is the minimum frame length for the
+ * required MPDU density.
+ */
+ if (pktlen < minlen) {
+ mindelim = (minlen - pktlen) / ATH_AGGR_DELIM_SZ;
+ ndelim = MAX(mindelim, ndelim);
+ }
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
+ "%s: pktlen=%d, minlen=%d, rix=%x, rc=%x, width=%d, hgi=%d, ndelim=%d\n",
+ __func__, pktlen, minlen, rix, rc, width, half_gi, ndelim);
+
+ return ndelim;
+}
+
+/*
+ * Fetch the aggregation limit.
+ *
+ * It's the lowest of the four rate series 4ms frame length.
+ */
+static int
+ath_get_aggr_limit(struct ath_softc *sc, struct ath_buf *bf)
+{
+ int amin = 65530;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ if (bf->bf_state.bfs_rc[i].tries == 0)
+ continue;
+ amin = MIN(amin, bf->bf_state.bfs_rc[i].max4msframelen);
+ }
+
+ DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: max frame len= %d\n",
+ __func__, amin);
+
+ return amin;
+}
/*
* Setup a 11n rate series structure
*
* This should be called for both legacy and MCS rates.
+ *
+ * It, along with ath_buf_set_rate, must be called -after- a burst
+ * or aggregate is setup.
*/
static void
ath_rateseries_setup(struct ath_softc *sc, struct ieee80211_node *ni,
- HAL_11N_RATE_SERIES *series, unsigned int pktlen, uint8_t *rix,
- uint8_t *try, int flags)
+ struct ath_buf *bf, HAL_11N_RATE_SERIES *series)
{
#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
struct ieee80211com *ic = ni->ni_ic;
@@ -104,18 +444,34 @@ ath_rateseries_setup(struct ath_softc *sc, struct ieee80211_node *ni,
HAL_BOOL shortPreamble = AH_FALSE;
const HAL_RATE_TABLE *rt = sc->sc_currates;
int i;
+ int pktlen;
+ int flags = bf->bf_state.bfs_flags;
+ struct ath_rc_series *rc = bf->bf_state.bfs_rc;
if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
(ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE))
shortPreamble = AH_TRUE;
+ /*
+ * If this is the first frame in an aggregate series,
+ * use the aggregate length.
+ */
+ if (bf->bf_state.bfs_aggr)
+ pktlen = bf->bf_state.bfs_al;
+ else
+ pktlen = bf->bf_state.bfs_pktlen;
+
+ /*
+ * XXX TODO: modify this routine to use the bfs_rc[x].flags
+ * XXX fields.
+ */
memset(series, 0, sizeof(HAL_11N_RATE_SERIES) * 4);
for (i = 0; i < 4; i++) {
/* Only set flags for actual TX attempts */
- if (try[i] == 0)
+ if (rc[i].tries == 0)
continue;
- series[i].Tries = try[i];
+ series[i].Tries = rc[i].tries;
/*
* XXX this isn't strictly correct - sc_txchainmask
@@ -154,7 +510,7 @@ ath_rateseries_setup(struct ath_softc *sc, struct ieee80211_node *ni,
ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20)
series[i].RateFlags |= HAL_RATESERIES_HALFGI;
- series[i].Rate = rt->info[rix[i]].rateCode;
+ series[i].Rate = rt->info[rc[i].rix].rateCode;
/* PktDuration doesn't include slot, ACK, RTS, etc timing - it's just the packet duration */
if (series[i].Rate & IEEE80211_RATE_MCS) {
@@ -166,9 +522,10 @@ ath_rateseries_setup(struct ath_softc *sc, struct ieee80211_node *ni,
, series[i].RateFlags & HAL_RATESERIES_HALFGI);
} else {
if (shortPreamble)
- series[i].Rate |= rt->info[rix[i]].shortPreamble;
+ series[i].Rate |=
+ rt->info[rc[i].rix].shortPreamble;
series[i].PktDuration = ath_hal_computetxtime(ah,
- rt, pktlen, rix[i], shortPreamble);
+ rt, pktlen, rc[i].rix, shortPreamble);
}
}
#undef HT_RC_2_STREAMS
@@ -200,25 +557,28 @@ ath_rateseries_print(HAL_11N_RATE_SERIES *series)
*/
void
-ath_buf_set_rate(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf,
- int pktlen, int flags, uint8_t ctsrate, int is_pspoll, uint8_t *rix, uint8_t *try)
+ath_buf_set_rate(struct ath_softc *sc, struct ieee80211_node *ni,
+ struct ath_buf *bf)
{
HAL_11N_RATE_SERIES series[4];
struct ath_desc *ds = bf->bf_desc;
struct ath_desc *lastds = NULL;
struct ath_hal *ah = sc->sc_ah;
+ int is_pspoll = (bf->bf_state.bfs_atype == HAL_PKT_TYPE_PSPOLL);
+ int ctsrate = bf->bf_state.bfs_ctsrate;
+ int flags = bf->bf_state.bfs_flags;
/* Setup rate scenario */
memset(&series, 0, sizeof(series));
- ath_rateseries_setup(sc, ni, series, pktlen, rix, try, flags);
+ ath_rateseries_setup(sc, ni, bf, series);
/* Enforce AR5416 aggregate limit - can't do RTS w/ an agg frame > 8k */
/* Enforce RTS and CTS are mutually exclusive */
/* Get a pointer to the last tx descriptor in the list */
- lastds = &bf->bf_desc[bf->bf_nseg - 1];
+ lastds = bf->bf_lastds;
#if 0
printf("pktlen: %d; flags 0x%x\n", pktlen, flags);
@@ -238,6 +598,238 @@ ath_buf_set_rate(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf
ath_hal_setuplasttxdesc(ah, lastds, ds);
/* Set burst duration */
- /* This should only be done if aggregate protection is enabled */
+ /*
+ * This is only required when doing 11n burst, not aggregation
+ * ie, if there's a second frame in a RIFS or A-MPDU burst
+ * w/ >1 A-MPDU frame bursting back to back.
+ * Normal A-MPDU doesn't do bursting -between- aggregates.
+ *
+ * .. and it's highly likely this won't ever be implemented
+ */
//ath_hal_set11nburstduration(ah, ds, 8192);
}
+
+/*
+ * Form an aggregate packet list.
+ *
+ * This function enforces the aggregate restrictions/requirements.
+ *
+ * These are:
+ *
+ * + The aggregate size maximum (64k for AR9160 and later, 8K for
+ * AR5416 when doing RTS frame protection.)
+ * + Maximum number of sub-frames for an aggregate
+ * + The aggregate delimiter size, giving MACs time to do whatever is
+ * needed before each frame
+ * + Enforce the BAW limit
+ *
+ * Each descriptor queued should have the DMA setup.
+ * The rate series, descriptor setup, linking, etc is all done
+ * externally. This routine simply chains them together.
+ * ath_tx_setds_11n() will take care of configuring the per-
+ * descriptor setup, and ath_buf_set_rate() will configure the
+ * rate control.
+ *
+ * Note that the TID lock is only grabbed when dequeuing packets from
+ * the TID queue. If some code in another thread adds to the head of this
+ * list, very strange behaviour will occur. Since retransmission is the
+ * only reason this will occur, and this routine is designed to be called
+ * from within the scheduler task, it won't ever clash with the completion
+ * task.
+ *
+ * So if you want to call this from an upper layer context (eg, to direct-
+ * dispatch aggregate frames to the hardware), please keep this in mind.
+ */
+ATH_AGGR_STATUS
+ath_tx_form_aggr(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid,
+ ath_bufhead *bf_q)
+{
+ //struct ieee80211_node *ni = &an->an_node;
+ struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
+ int nframes = 0;
+ uint16_t aggr_limit = 0, al = 0, bpad = 0, al_delta, h_baw;
+ struct ieee80211_tx_ampdu *tap;
+ int status = ATH_AGGR_DONE;
+ int prev_frames = 0; /* XXX for AR5416 burst, not done here */
+ int prev_al = 0; /* XXX also for AR5416 burst */
+
+ ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]);
+
+ tap = ath_tx_get_tx_tid(an, tid->tid);
+ if (tap == NULL) {
+ status = ATH_AGGR_ERROR;
+ goto finish;
+ }
+
+ h_baw = tap->txa_wnd / 2;
+
+ for (;;) {
+ bf = TAILQ_FIRST(&tid->axq_q);
+ if (bf_first == NULL)
+ bf_first = bf;
+ if (bf == NULL) {
+ status = ATH_AGGR_DONE;
+ break;
+ } else {
+ /*
+ * It's the first frame;
+ * set the aggregation limit based on the
+ * rate control decision that has been made.
+ */
+ aggr_limit = ath_get_aggr_limit(sc, bf_first);
+ }
+
+ /* Set this early just so things don't get confused */
+ bf->bf_next = NULL;
+
+ /*
+ * Don't unlock the tid lock until we're sure we are going
+ * to queue this frame.
+ */
+
+ /*
+ * If the frame doesn't have a sequence number that we're
+ * tracking in the BAW (eg NULL QOS data frame), we can't
+ * aggregate it. Stop the aggregation process; the sender
+ * can then TX what's in the list thus far and then
+ * TX the frame individually.
+ */
+ if (! bf->bf_state.bfs_dobaw) {
+ status = ATH_AGGR_NONAGGR;
+ break;
+ }
+
+ /*
+ * If any of the rates are non-HT, this packet
+ * can't be aggregated.
+ * XXX TODO: add a bf_state flag which gets marked
+ * if any active rate is non-HT.
+ */
+
+ /*
+ * If the packet has a sequence number, do not
+ * step outside of the block-ack window.
+ */
+ if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
+ SEQNO(bf->bf_state.bfs_seqno))) {
+ status = ATH_AGGR_BAW_CLOSED;
+ break;
+ }
+
+ /*
+ * XXX TODO: AR5416 has an 8K aggregation size limit
+ * when RTS is enabled, and RTS is required for dual-stream
+ * rates.
+ *
+ * For now, limit all aggregates for the AR5416 to be 8K.
+ */
+
+ /*
+ * do not exceed aggregation limit
+ */
+ al_delta = ATH_AGGR_DELIM_SZ + bf->bf_state.bfs_pktlen;
+ if (nframes &&
+ (aggr_limit < (al + bpad + al_delta + prev_al))) {
+ status = ATH_AGGR_LIMITED;
+ break;
+ }
+
+ /*
+ * Do not exceed subframe limit.
+ */
+ if ((nframes + prev_frames) >= MIN((h_baw),
+ IEEE80211_AMPDU_SUBFRAME_DEFAULT)) {
+ status = ATH_AGGR_LIMITED;
+ break;
+ }
+
+ /*
+ * this packet is part of an aggregate.
+ */
+ ATH_TXQ_REMOVE(tid, bf, bf_list);
+
+ /* The TID lock is required for the BAW update */
+ ath_tx_addto_baw(sc, an, tid, bf);
+ bf->bf_state.bfs_addedbaw = 1;
+
+ /*
+ * XXX TODO: If any frame in the aggregate requires RTS/CTS,
+ * set the first frame.
+ */
+
+ /*
+ * XXX enforce ACK for aggregate frames (this needs to be
+ * XXX handled more gracefully?
+ */
+ if (bf->bf_state.bfs_flags & HAL_TXDESC_NOACK) {
+ device_printf(sc->sc_dev,
+ "%s: HAL_TXDESC_NOACK set for an aggregate frame?\n",
+ __func__);
+ bf->bf_state.bfs_flags &= (~HAL_TXDESC_NOACK);
+ }
+
+ /*
+ * Add the now owned buffer (which isn't
+ * on the software TXQ any longer) to our
+ * aggregate frame list.
+ */
+ TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
+ nframes ++;
+
+ /* Completion handler */
+ bf->bf_comp = ath_tx_aggr_comp;
+
+ /*
+ * add padding for previous frame to aggregation length
+ */
+ al += bpad + al_delta;
+
+ /*
+ * Calculate delimiters needed for the current frame
+ */
+ bf->bf_state.bfs_ndelim =
+ ath_compute_num_delims(sc, bf_first,
+ bf->bf_state.bfs_pktlen);
+
+ /*
+ * Calculate the padding needed from this set of delimiters,
+ * used when calculating if the next frame will fit in
+ * the aggregate.
+ */
+ bpad = PADBYTES(al_delta) + (bf->bf_state.bfs_ndelim << 2);
+
+ /*
+ * Chain the buffers together
+ */
+ if (bf_prev)
+ bf_prev->bf_next = bf;
+ bf_prev = bf;
+
+ /*
+ * XXX TODO: if any sub-frames have RTS/CTS enabled;
+ * enable it for the entire aggregate.
+ */
+
+#if 0
+ /*
+ * terminate aggregation on a small packet boundary
+ */
+ if (bf->bf_state.bfs_pktlen < ATH_AGGR_MINPLEN) {
+ status = ATH_AGGR_SHORTPKT;
+ break;
+ }
+#endif
+
+ }
+
+finish:
+ /*
+ * Just in case the list was empty when we tried to
+ * dequeue a packet ..
+ */
+ if (bf_first) {
+ bf_first->bf_state.bfs_al = al;
+ bf_first->bf_state.bfs_nframes = nframes;
+ }
+ return status;
+}
diff --git a/sys/dev/ath/if_ath_tx_ht.h b/sys/dev/ath/if_ath_tx_ht.h
index cf16c46..543c56b 100644
--- a/sys/dev/ath/if_ath_tx_ht.h
+++ b/sys/dev/ath/if_ath_tx_ht.h
@@ -31,9 +31,32 @@
#ifndef __IF_ATH_TX_HT_H__
#define __IF_ATH_TX_HT_H__
+enum {
+ MCS_HT20,
+ MCS_HT20_SGI,
+ MCS_HT40,
+ MCS_HT40_SGI,
+};
+
+typedef enum {
+ ATH_AGGR_DONE,
+ ATH_AGGR_BAW_CLOSED,
+ ATH_AGGR_LIMITED,
+ ATH_AGGR_SHORTPKT,
+ ATH_AGGR_8K_LIMITED,
+ ATH_AGGR_ERROR,
+ ATH_AGGR_NONAGGR,
+} ATH_AGGR_STATUS;
+
+extern int ath_max_4ms_framelen[4][32];
+
+extern void ath_tx_rate_fill_rcflags(struct ath_softc *sc, struct ath_buf *bf);
+
extern void ath_buf_set_rate(struct ath_softc *sc,
- struct ieee80211_node *ni, struct ath_buf *bf,
- int pktlen, int flags, uint8_t ctsrate, int is_pspoll,
- uint8_t *rix, uint8_t *try);
+ struct ieee80211_node *ni, struct ath_buf *bf);
+
+extern ATH_AGGR_STATUS
+ ath_tx_form_aggr(struct ath_softc *sc, struct ath_node *an,
+ struct ath_tid *tid, ath_bufhead *bf_q);
#endif
diff --git a/sys/dev/ath/if_athioctl.h b/sys/dev/ath/if_athioctl.h
index 065a49c..680694f 100644
--- a/sys/dev/ath/if_athioctl.h
+++ b/sys/dev/ath/if_athioctl.h
@@ -35,6 +35,16 @@
#ifndef _DEV_ATH_ATHIOCTL_H
#define _DEV_ATH_ATHIOCTL_H
+struct ath_tx_aggr_stats {
+ u_int32_t aggr_pkts[64];
+ u_int32_t aggr_single_pkt;
+ u_int32_t aggr_nonbaw_pkt;
+ u_int32_t aggr_aggr_pkt;
+ u_int32_t aggr_baw_closed_single_pkt;
+ u_int32_t aggr_low_hwq_single_pkt;
+ u_int32_t aggr_sched_nopkt;
+};
+
struct ath_stats {
u_int32_t ast_watchdog; /* device reset by watchdog */
u_int32_t ast_hardware; /* fatal hardware error interrupts */
diff --git a/sys/dev/ath/if_athrate.h b/sys/dev/ath/if_athrate.h
index 55b99a2..10f6040 100644
--- a/sys/dev/ath/if_athrate.h
+++ b/sys/dev/ath/if_athrate.h
@@ -77,6 +77,21 @@ struct ath_ratectrl {
struct ath_ratectrl *ath_rate_attach(struct ath_softc *);
void ath_rate_detach(struct ath_ratectrl *);
+#define ATH_RC_NUM 4
+
+#define ATH_RC_DS_FLAG 0x01 /* dual-stream rate */
+#define ATH_RC_CW40_FLAG 0x02 /* use HT40 */
+#define ATH_RC_SGI_FLAG 0x04 /* use short-GI */
+#define ATH_RC_HT_FLAG 0x08 /* use HT */
+#define ATH_RC_RTSCTS_FLAG 0x10 /* enable RTS/CTS protection */
+
+struct ath_rc_series {
+ uint8_t rix; /* ratetable index, not rate code */
+ uint8_t ratecode; /* hardware rate code */
+ uint8_t tries;
+ uint8_t flags;
+ uint32_t max4msframelen;
+};
/*
* State storage handling.
@@ -105,7 +120,7 @@ void ath_rate_newassoc(struct ath_softc *, struct ath_node *,
* Return the four TX rate index and try counts for the current data packet.
*/
void ath_rate_getxtxrates(struct ath_softc *sc, struct ath_node *an,
- uint8_t rix0, uint8_t *rix, uint8_t *try);
+ uint8_t rix0, struct ath_rc_series *rc);
/*
* Return the transmit info for a data packet. If multi-rate state
@@ -127,8 +142,12 @@ void ath_rate_setupxtxdesc(struct ath_softc *, struct ath_node *,
* supplied transmit descriptor. The routine is invoked both
* for packets that were successfully sent and for those that
* failed (consult the descriptor for details).
+ *
+ * For A-MPDU frames, nframes and nbad indicate how many frames
+ * were in the aggregate, and how many failed.
*/
struct ath_buf;
void ath_rate_tx_complete(struct ath_softc *, struct ath_node *,
- const struct ath_buf *);
+ const struct ath_rc_series *, const struct ath_tx_status *,
+ int pktlen, int nframes, int nbad);
#endif /* _ATH_RATECTRL_H_ */
diff --git a/sys/dev/ath/if_athvar.h b/sys/dev/ath/if_athvar.h
index 1eaf709..331bea4 100644
--- a/sys/dev/ath/if_athvar.h
+++ b/sys/dev/ath/if_athvar.h
@@ -83,12 +83,73 @@ struct taskqueue;
struct kthread;
struct ath_buf;
+#define ATH_TID_MAX_BUFS (2 * IEEE80211_AGGR_BAWMAX)
+
+/*
+ * Per-TID state
+ *
+ * Note that TID 16 (WME_NUM_TID+1) is for handling non-QoS frames.
+ */
+struct ath_tid {
+ TAILQ_HEAD(,ath_buf) axq_q; /* pending buffers */
+ u_int axq_depth; /* SW queue depth */
+ char axq_name[48]; /* lock name */
+ struct ath_node *an; /* pointer to parent */
+ int tid; /* tid */
+ int ac; /* which AC gets this trafic */
+ int hwq_depth; /* how many buffers are on HW */
+
+ /*
+ * Entry on the ath_txq; when there's traffic
+ * to send
+ */
+ TAILQ_ENTRY(ath_tid) axq_qelem;
+ int sched;
+ int paused; /* >0 if the TID has been paused */
+
+ /*
+ * Is the TID being cleaned up after a transition
+ * from aggregation to non-aggregation?
+ * When this is set to 1, this TID will be paused
+ * and no further traffic will be queued until all
+ * the hardware packets pending for this TID have been
+ * TXed/completed; at which point (non-aggregation)
+ * traffic will resume being TXed.
+ */
+ int cleanup_inprogress;
+ /*
+ * How many hardware-queued packets are
+ * waiting to be cleaned up.
+ * This is only valid if cleanup_inprogress is 1.
+ */
+ int incomp;
+
+ /*
+ * The following implements a ring representing
+ * the frames in the current BAW.
+ * To avoid copying the array content each time
+ * the BAW is moved, the baw_head/baw_tail point
+ * to the current BAW begin/end; when the BAW is
+ * shifted the head/tail of the array are also
+ * appropriately shifted.
+ */
+ /* active tx buffers, beginning at current BAW */
+ struct ath_buf *tx_buf[ATH_TID_MAX_BUFS];
+ /* where the baw head is in the array */
+ int baw_head;
+ /* where the BAW tail is in the array */
+ int baw_tail;
+};
+
/* driver-specific node state */
struct ath_node {
struct ieee80211_node an_node; /* base class */
u_int8_t an_mgmtrix; /* min h/w rate index */
u_int8_t an_mcastrix; /* mcast h/w rate index */
struct ath_buf *an_ff_buf[WME_NUM_AC]; /* ff staging area */
+ struct ath_tid an_tid[IEEE80211_TID_SIZE]; /* per-TID state */
+ char an_name[32]; /* eg "wlan0_a1" */
+ struct mtx an_mtx; /* protecting the ath_node state */
/* variable-length rate control state follows */
};
#define ATH_NODE(ni) ((struct ath_node *)(ni))
@@ -109,7 +170,8 @@ struct ath_node {
#define ATH_RSSI(x) ATH_EP_RND(x, HAL_RSSI_EP_MULTIPLIER)
struct ath_buf {
- STAILQ_ENTRY(ath_buf) bf_list;
+ TAILQ_ENTRY(ath_buf) bf_list;
+ struct ath_buf * bf_next; /* next buffer in the aggregate */
int bf_nseg;
uint16_t bf_txflags; /* tx descriptor flags */
uint16_t bf_flags; /* status flags (below) */
@@ -119,11 +181,65 @@ struct ath_buf {
bus_dmamap_t bf_dmamap; /* DMA map for mbuf chain */
struct mbuf *bf_m; /* mbuf for buf */
struct ieee80211_node *bf_node; /* pointer to the node */
+ struct ath_desc *bf_lastds; /* last descriptor for comp status */
+ struct ath_buf *bf_last; /* last buffer in aggregate, or self for non-aggregate */
bus_size_t bf_mapsize;
#define ATH_MAX_SCATTER ATH_TXDESC /* max(tx,rx,beacon) desc's */
bus_dma_segment_t bf_segs[ATH_MAX_SCATTER];
+
+ /* Completion function to call on TX complete (fail or not) */
+ /*
+ * "fail" here is set to 1 if the queue entries were removed
+ * through a call to ath_tx_draintxq().
+ */
+ void(* bf_comp) (struct ath_softc *sc, struct ath_buf *bf, int fail);
+
+ /* This state is kept to support software retries and aggregation */
+ struct {
+ int bfs_seqno; /* sequence number of this packet */
+ int bfs_retries; /* retry count */
+ uint16_t bfs_tid; /* packet TID (or TID_MAX for no QoS) */
+ uint16_t bfs_pri; /* packet AC priority */
+ struct ath_txq *bfs_txq; /* eventual dest hardware TXQ */
+ uint16_t bfs_pktdur; /* packet duration (at current rate?) */
+ uint16_t bfs_nframes; /* number of frames in aggregate */
+ uint16_t bfs_ndelim; /* number of delims for padding */
+
+ int bfs_aggr:1; /* part of aggregate? */
+ int bfs_aggrburst:1; /* part of aggregate burst? */
+ int bfs_isretried:1; /* retried frame? */
+ int bfs_dobaw:1; /* actually check against BAW? */
+ int bfs_addedbaw:1; /* has been added to the BAW */
+ int bfs_shpream:1; /* use short preamble */
+ int bfs_istxfrag:1; /* is fragmented */
+ int bfs_ismrr:1; /* do multi-rate TX retry */
+ int bfs_doprot:1; /* do RTS/CTS based protection */
+ int bfs_doratelookup:1; /* do rate lookup before each TX */
+ int bfs_nfl; /* next fragment length */
+
+ /*
+ * These fields are passed into the
+ * descriptor setup functions.
+ */
+ HAL_PKT_TYPE bfs_atype; /* packet type */
+ int bfs_pktlen; /* length of this packet */
+ int bfs_hdrlen; /* length of this packet header */
+ uint16_t bfs_al; /* length of aggregate */
+ int bfs_flags; /* HAL descriptor flags */
+ int bfs_txrate0; /* first TX rate */
+ int bfs_try0; /* first try count */
+ uint8_t bfs_ctsrate0; /* Non-zero - use this as ctsrate */
+ int bfs_keyix; /* crypto key index */
+ int bfs_txpower; /* tx power */
+ int bfs_txantenna; /* TX antenna config */
+ enum ieee80211_protmode bfs_protmode;
+ HAL_11N_RATE_SERIES bfs_rc11n[ATH_RC_NUM]; /* 11n TX series */
+ int bfs_ctsrate; /* CTS rate */
+ int bfs_ctsduration; /* CTS duration (pre-11n NICs) */
+ struct ath_rc_series bfs_rc[ATH_RC_NUM]; /* non-11n TX series */
+ } bf_state;
};
-typedef STAILQ_HEAD(, ath_buf) ath_bufhead;
+typedef TAILQ_HEAD(ath_bufhead_s, ath_buf) ath_bufhead;
#define ATH_BUF_BUSY 0x00000002 /* (tx) desc owned by h/w */
@@ -151,19 +267,28 @@ struct ath_descdma {
* hardware queue).
*/
struct ath_txq {
+ struct ath_softc *axq_softc; /* Needed for scheduling */
u_int axq_qnum; /* hardware q number */
#define ATH_TXQ_SWQ (HAL_NUM_TX_QUEUES+1) /* qnum for s/w only queue */
u_int axq_ac; /* WME AC */
u_int axq_flags;
#define ATH_TXQ_PUTPENDING 0x0001 /* ath_hal_puttxbuf pending */
u_int axq_depth; /* queue depth (stat only) */
+ u_int axq_aggr_depth; /* how many aggregates are queued */
u_int axq_intrcnt; /* interrupt count */
u_int32_t *axq_link; /* link ptr in last TX desc */
- STAILQ_HEAD(, ath_buf) axq_q; /* transmit queue */
+ TAILQ_HEAD(axq_q_s, ath_buf) axq_q; /* transmit queue */
struct mtx axq_lock; /* lock on q and link */
char axq_name[12]; /* e.g. "ath0_txq4" */
+
+ /* Per-TID traffic queue for software -> hardware TX */
+ TAILQ_HEAD(axq_t_s,ath_tid) axq_tidq;
};
+#define ATH_NODE_LOCK(_an) mtx_lock(&(_an)->an_mtx)
+#define ATH_NODE_UNLOCK(_an) mtx_unlock(&(_an)->an_mtx)
+#define ATH_NODE_LOCK_ASSERT(_an) mtx_assert(&(_an)->an_mtx, MA_OWNED)
+
#define ATH_TXQ_LOCK_INIT(_sc, _tq) do { \
snprintf((_tq)->axq_name, sizeof((_tq)->axq_name), "%s_txq%u", \
device_get_nameunit((_sc)->sc_dev), (_tq)->axq_qnum); \
@@ -173,19 +298,21 @@ struct ath_txq {
#define ATH_TXQ_LOCK(_tq) mtx_lock(&(_tq)->axq_lock)
#define ATH_TXQ_UNLOCK(_tq) mtx_unlock(&(_tq)->axq_lock)
#define ATH_TXQ_LOCK_ASSERT(_tq) mtx_assert(&(_tq)->axq_lock, MA_OWNED)
+#define ATH_TXQ_IS_LOCKED(_tq) mtx_owned(&(_tq)->axq_lock)
+#define ATH_TXQ_INSERT_HEAD(_tq, _elm, _field) do { \
+ TAILQ_INSERT_HEAD(&(_tq)->axq_q, (_elm), _field); \
+ (_tq)->axq_depth++; \
+} while (0)
#define ATH_TXQ_INSERT_TAIL(_tq, _elm, _field) do { \
- STAILQ_INSERT_TAIL(&(_tq)->axq_q, (_elm), _field); \
+ TAILQ_INSERT_TAIL(&(_tq)->axq_q, (_elm), _field); \
(_tq)->axq_depth++; \
} while (0)
-#define ATH_TXQ_REMOVE_HEAD(_tq, _field) do { \
- STAILQ_REMOVE_HEAD(&(_tq)->axq_q, _field); \
+#define ATH_TXQ_REMOVE(_tq, _elm, _field) do { \
+ TAILQ_REMOVE(&(_tq)->axq_q, _elm, _field); \
(_tq)->axq_depth--; \
} while (0)
-/* NB: this does not do the "head empty check" that STAILQ_LAST does */
-#define ATH_TXQ_LAST(_tq) \
- ((struct ath_buf *)(void *) \
- ((char *)((_tq)->axq_q.stqh_last) - __offsetof(struct ath_buf, bf_list)))
+#define ATH_TXQ_LAST(_tq, _field) TAILQ_LAST(&(_tq)->axq_q, _field)
struct ath_vap {
struct ieee80211vap av_vap; /* base class */
@@ -205,9 +332,20 @@ struct ath_vap {
struct taskqueue;
struct ath_tx99;
+/*
+ * Whether to reset the TX/RX queue with or without
+ * a queue flush.
+ */
+typedef enum {
+ ATH_RESET_DEFAULT = 0,
+ ATH_RESET_NOLOSS = 1,
+ ATH_RESET_FULL = 2,
+} ATH_RESET_TYPE;
+
struct ath_softc {
struct ifnet *sc_ifp; /* interface common */
struct ath_stats sc_stats; /* interface statistics */
+ struct ath_tx_aggr_stats sc_aggr_stats;
int sc_debug;
int sc_nvaps; /* # vaps */
int sc_nstavaps; /* # station vaps */
@@ -216,12 +354,15 @@ struct ath_softc {
u_int8_t sc_nbssid0; /* # vap's using base mac */
uint32_t sc_bssidmask; /* bssid mask */
+ void (*sc_node_cleanup)(struct ieee80211_node *);
void (*sc_node_free)(struct ieee80211_node *);
device_t sc_dev;
HAL_BUS_TAG sc_st; /* bus space tag */
HAL_BUS_HANDLE sc_sh; /* bus space handle */
bus_dma_tag_t sc_dmat; /* bus DMA tag */
struct mtx sc_mtx; /* master lock (recursive) */
+ struct mtx sc_pcu_mtx; /* PCU access mutex */
+ char sc_pcu_mtx_name[32];
struct taskqueue *sc_tq; /* private task queue */
struct ath_hal *sc_ah; /* Atheros HAL */
struct ath_ratectrl *sc_rc; /* tx rate control support */
@@ -255,7 +396,6 @@ struct ath_softc {
sc_setcca : 1,/* set/clr CCA with TDMA */
sc_resetcal : 1,/* reset cal state next trip */
sc_rxslink : 1,/* do self-linked final descriptor */
- sc_kickpcu : 1,/* kick PCU RX on next RX proc */
sc_rxtsf32 : 1;/* RX dec TSF is 32 bits */
uint32_t sc_eerd; /* regdomain from EEPROM */
uint32_t sc_eecc; /* country code from EEPROM */
@@ -282,7 +422,26 @@ struct ath_softc {
u_int sc_fftxqmin; /* min frames before staging */
u_int sc_fftxqmax; /* max frames before drop */
u_int sc_txantenna; /* tx antenna (fixed or auto) */
+
HAL_INT sc_imask; /* interrupt mask copy */
+
+ /*
+ * These are modified in the interrupt handler as well as
+ * the task queues and other contexts. Thus these must be
+ * protected by a mutex, or they could clash.
+ *
+ * For now, access to these is behind the ATH_LOCK,
+ * just to save time.
+ */
+ uint32_t sc_txq_active; /* bitmap of active TXQs */
+ uint32_t sc_kickpcu; /* whether to kick the PCU */
+ uint32_t sc_rxproc_cnt; /* In RX processing */
+ uint32_t sc_txproc_cnt; /* In TX processing */
+ uint32_t sc_txstart_cnt; /* In TX output (raw/start) */
+ uint32_t sc_inreset_cnt; /* In active reset/chanchange */
+ uint32_t sc_txrx_cnt; /* refcount on stop/start'ing TX */
+ uint32_t sc_intr_cnt; /* refcount on interrupt handling */
+
u_int sc_keymax; /* size of key cache */
u_int8_t sc_keymap[ATH_KEYBYTES];/* key use bit map */
@@ -360,10 +519,38 @@ struct ath_softc {
int sc_txchainmask; /* currently configured TX chainmask */
int sc_rxchainmask; /* currently configured RX chainmask */
+ /*
+ * Aggregation twiddles
+ *
+ * hwq_limit: how busy to keep the hardware queue - don't schedule
+ * further packets to the hardware, regardless of the TID
+ * tid_hwq_lo: how low the per-TID hwq count has to be before the
+ * TID will be scheduled again
+ * tid_hwq_hi: how many frames to queue to the HWQ before the TID
+ * stops being scheduled.
+ */
+ int sc_hwq_limit;
+ int sc_tid_hwq_lo;
+ int sc_tid_hwq_hi;
+
/* DFS related state */
void *sc_dfs; /* Used by an optional DFS module */
int sc_dodfs; /* Whether to enable DFS rx filter bits */
struct task sc_dfstask; /* DFS processing task */
+
+ /* TX AMPDU handling */
+ int (*sc_addba_request)(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *, int, int, int);
+ int (*sc_addba_response)(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *, int, int, int);
+ void (*sc_addba_stop)(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *);
+ void (*sc_addba_response_timeout)
+ (struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *);
+ void (*sc_bar_response)(struct ieee80211_node *ni,
+ struct ieee80211_tx_ampdu *tap,
+ int status);
};
#define ATH_LOCK_INIT(_sc) \
@@ -373,6 +560,40 @@ struct ath_softc {
#define ATH_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
#define ATH_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
#define ATH_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
+#define ATH_UNLOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_NOTOWNED)
+
+/*
+ * The PCU lock is non-recursive and should be treated as a spinlock.
+ * Although currently the interrupt code is run in netisr context and
+ * doesn't require this, this may change in the future.
+ * Please keep this in mind when protecting certain code paths
+ * with the PCU lock.
+ *
+ * The PCU lock is used to serialise access to the PCU so things such
+ * as TX, RX, state change (eg channel change), channel reset and updates
+ * from interrupt context (eg kickpcu, txqactive bits) do not clash.
+ *
+ * Although the current single-thread taskqueue mechanism protects the
+ * majority of these situations by simply serialising them, there are
+ * a few others which occur at the same time. These include the TX path
+ * (which only acquires ATH_LOCK when recycling buffers to the free list),
+ * ath_set_channel, the channel scanning API and perhaps quite a bit more.
+ */
+#define ATH_PCU_LOCK_INIT(_sc) do {\
+ snprintf((_sc)->sc_pcu_mtx_name, \
+ sizeof((_sc)->sc_pcu_mtx_name), \
+ "%s PCU lock", \
+ device_get_nameunit((_sc)->sc_dev)); \
+ mtx_init(&(_sc)->sc_pcu_mtx, (_sc)->sc_pcu_mtx_name, \
+ NULL, MTX_DEF); \
+ } while (0)
+#define ATH_PCU_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_pcu_mtx)
+#define ATH_PCU_LOCK(_sc) mtx_lock(&(_sc)->sc_pcu_mtx)
+#define ATH_PCU_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_pcu_mtx)
+#define ATH_PCU_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_pcu_mtx, \
+ MA_OWNED)
+#define ATH_PCU_UNLOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_pcu_mtx, \
+ MA_NOTOWNED)
#define ATH_TXQ_SETUP(sc, i) ((sc)->sc_txqsetup & (1<<i))
@@ -686,24 +907,33 @@ void ath_intr(void *);
#define ath_hal_gettxcompletionrates(_ah, _ds, _rates, _tries) \
((*(_ah)->ah_getTxCompletionRates)((_ah), (_ds), (_rates), (_tries)))
-#define ath_hal_chaintxdesc(_ah, _ds, _pktlen, _hdrlen, _type, _keyix, \
- _cipher, _delims, _seglen, _first, _last) \
- ((*(_ah)->ah_chainTxDesc((_ah), (_ds), (_pktlen), (_hdrlen), \
- (_type), (_keyix), (_cipher), (_delims), (_seglen), \
- (_first), (_last))))
#define ath_hal_setupfirsttxdesc(_ah, _ds, _aggrlen, _flags, _txpower, \
_txr0, _txtr0, _antm, _rcr, _rcd) \
((*(_ah)->ah_setupFirstTxDesc)((_ah), (_ds), (_aggrlen), (_flags), \
(_txpower), (_txr0), (_txtr0), (_antm), (_rcr), (_rcd)))
+#define ath_hal_chaintxdesc(_ah, _ds, _pktlen, _hdrlen, _type, _keyix, \
+ _cipher, _delims, _seglen, _first, _last) \
+ ((*(_ah)->ah_chainTxDesc)((_ah), (_ds), (_pktlen), (_hdrlen), \
+ (_type), (_keyix), (_cipher), (_delims), (_seglen), \
+ (_first), (_last)))
#define ath_hal_setuplasttxdesc(_ah, _ds, _ds0) \
((*(_ah)->ah_setupLastTxDesc)((_ah), (_ds), (_ds0)))
+
#define ath_hal_set11nratescenario(_ah, _ds, _dur, _rt, _series, _ns, _flags) \
((*(_ah)->ah_set11nRateScenario)((_ah), (_ds), (_dur), (_rt), \
(_series), (_ns), (_flags)))
+
+#define ath_hal_set11n_aggr_first(_ah, _ds, _len, _num) \
+ ((*(_ah)->ah_set11nAggrFirst)((_ah), (_ds), (_len), (_num)))
#define ath_hal_set11naggrmiddle(_ah, _ds, _num) \
- ((*(_ah)->ah_set11nAggrMiddle((_ah), (_ds), (_num))))
+ ((*(_ah)->ah_set11nAggrMiddle)((_ah), (_ds), (_num)))
+#define ath_hal_set11n_aggr_last(_ah, _ds) \
+ ((*(_ah)->ah_set11nAggrLast)((_ah), (_ds)))
+
#define ath_hal_set11nburstduration(_ah, _ds, _dur) \
((*(_ah)->ah_set11nBurstDuration)((_ah), (_ds), (_dur)))
+#define ath_hal_clr11n_aggr(_ah, _ds) \
+ ((*(_ah)->ah_clr11nAggr)((_ah), (_ds)))
/*
* This is badly-named; you need to set the correct parameters
diff --git a/sys/dev/atkbdc/psm.c b/sys/dev/atkbdc/psm.c
index 0cd17ae..659c780 100644
--- a/sys/dev/atkbdc/psm.c
+++ b/sys/dev/atkbdc/psm.c
@@ -2262,8 +2262,8 @@ psmtimeout(void *arg)
}
/* Add all sysctls under the debug.psm and hw.psm nodes */
-SYSCTL_NODE(_debug, OID_AUTO, psm, CTLFLAG_RD, 0, "ps/2 mouse");
-SYSCTL_NODE(_hw, OID_AUTO, psm, CTLFLAG_RD, 0, "ps/2 mouse");
+static SYSCTL_NODE(_debug, OID_AUTO, psm, CTLFLAG_RD, 0, "ps/2 mouse");
+static SYSCTL_NODE(_hw, OID_AUTO, psm, CTLFLAG_RD, 0, "ps/2 mouse");
SYSCTL_INT(_debug_psm, OID_AUTO, loglevel, CTLFLAG_RW, &verbose, 0,
"Verbosity level");
diff --git a/sys/dev/bce/if_bce.c b/sys/dev/bce/if_bce.c
index dae8d5f..39a93aa 100644
--- a/sys/dev/bce/if_bce.c
+++ b/sys/dev/bce/if_bce.c
@@ -489,7 +489,7 @@ DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
/****************************************************************************/
/* Tunable device values */
/****************************************************************************/
-SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters");
/* Allowable values are TRUE or FALSE */
static int bce_verbose = TRUE;
diff --git a/sys/dev/bge/if_bge.c b/sys/dev/bge/if_bge.c
index 4deecc8..f7d1730 100644
--- a/sys/dev/bge/if_bge.c
+++ b/sys/dev/bge/if_bge.c
@@ -301,6 +301,7 @@ static const struct bge_revision {
{ BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
{ BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
{ BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
+ { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" },
{ BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
{ BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
{ BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
@@ -349,6 +350,7 @@ static const struct bge_revision const bge_majorrevs[] = {
{ BGE_ASICREV_BCM57780, "unknown BCM57780" },
{ BGE_ASICREV_BCM5717, "unknown BCM5717" },
{ BGE_ASICREV_BCM5719, "unknown BCM5719" },
+ { BGE_ASICREV_BCM5720, "unknown BCM5720" },
{ 0, NULL }
};
@@ -437,6 +439,7 @@ static int bge_init_tx_ring(struct bge_softc *);
static int bge_chipinit(struct bge_softc *);
static int bge_blockinit(struct bge_softc *);
+static uint32_t bge_dma_swap_options(struct bge_softc *);
static int bge_has_eaddr(struct bge_softc *);
static uint32_t bge_readmem_ind(struct bge_softc *, int);
@@ -517,7 +520,7 @@ static int bge_allow_asf = 1;
TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
-SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
"Allow ASF mode if available");
@@ -1368,25 +1371,44 @@ bge_stop_fw(struct bge_softc *sc)
int i;
if (sc->bge_asf_mode) {
- bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_PAUSE);
+ bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
- CSR_READ_4(sc, BGE_RX_CPU_EVENT) | (1 << 14));
+ CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
for (i = 0; i < 100; i++ ) {
- if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) & (1 << 14)))
+ if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
+ BGE_RX_CPU_DRV_EVENT))
break;
DELAY(10);
}
}
}
+static uint32_t
+bge_dma_swap_options(struct bge_softc *sc)
+{
+ uint32_t dma_options;
+
+ dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
+ BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
+#if BYTE_ORDER == BIG_ENDIAN
+ dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
+#endif
+ if ((sc)->bge_asicrev == BGE_ASICREV_BCM5720)
+ dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
+ BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
+ BGE_MODECTL_HTX2B_ENABLE;
+
+ return (dma_options);
+}
+
/*
* Do endian, PCI and DMA initialization.
*/
static int
bge_chipinit(struct bge_softc *sc)
{
- uint32_t dma_rw_ctl, misc_ctl;
+ uint32_t dma_rw_ctl, misc_ctl, mode_ctl;
uint16_t val;
int i;
@@ -1504,9 +1526,8 @@ bge_chipinit(struct bge_softc *sc)
/*
* Set up general mode register.
*/
- CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
- BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
- BGE_MODECTL_TX_NO_PHDR_CSUM);
+ mode_ctl = bge_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR |
+ BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
/*
* BCM5701 B5 have a bug causing data corruption when using
@@ -1516,13 +1537,15 @@ bge_chipinit(struct bge_softc *sc)
*/
if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
- BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
+ mode_ctl |= BGE_MODECTL_FORCE_PCI32;
/*
* Tell the firmware the driver is running
*/
if (sc->bge_asf_mode & ASF_STACKUP)
- BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
+ mode_ctl |= BGE_MODECTL_STACKUP;
+
+ CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
/*
* Disable memory write invalidate. Apparently it is not supported
@@ -1582,8 +1605,7 @@ bge_blockinit(struct bge_softc *sc)
}
/* Configure mbuf pool watermarks */
- if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
- sc->bge_asicrev == BGE_ASICREV_BCM57765) {
+ if (BGE_IS_5717_PLUS(sc)) {
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
if (sc->bge_ifp->if_mtu > ETHERMTU) {
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
@@ -1718,7 +1740,8 @@ bge_blockinit(struct bge_softc *sc)
BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
}
if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
- sc->bge_asicrev == BGE_ASICREV_BCM5719)
+ sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
+ sc->bge_asicrev == BGE_ASICREV_BCM5720)
rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
else
rcb->bge_nicaddr = BGE_STD_RX_RINGS;
@@ -1751,7 +1774,8 @@ bge_blockinit(struct bge_softc *sc)
rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
- sc->bge_asicrev == BGE_ASICREV_BCM5719)
+ sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
+ sc->bge_asicrev == BGE_ASICREV_BCM5720)
rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
else
rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
@@ -1840,7 +1864,8 @@ bge_blockinit(struct bge_softc *sc)
RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
- sc->bge_asicrev == BGE_ASICREV_BCM5719)
+ sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
+ sc->bge_asicrev == BGE_ASICREV_BCM5720)
RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
else
RCB_WRITE_4(sc, vrcb, bge_nicaddr,
@@ -1854,7 +1879,8 @@ bge_blockinit(struct bge_softc *sc)
* return ring control blocks, located in NIC memory.
*/
if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
- sc->bge_asicrev == BGE_ASICREV_BCM5719) {
+ sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
+ sc->bge_asicrev == BGE_ASICREV_BCM5720) {
/* Should be 17, use 16 until we get an SRAM map. */
limit = 16;
} else if (!BGE_IS_5705_PLUS(sc))
@@ -1898,7 +1924,11 @@ bge_blockinit(struct bge_softc *sc)
BGE_TX_BACKOFF_SEED_MASK);
/* Set inter-packet gap */
- CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
+ val = 0x2620;
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
+ val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
+ (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
+ CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
/*
* Specify which ring to use for packets that don't match
@@ -2053,6 +2083,11 @@ bge_blockinit(struct bge_softc *sc)
sc->bge_asicrev == BGE_ASICREV_BCM57780)
val |= BGE_RDMAMODE_TSO6_ENABLE;
}
+
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
+ val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
+ BGE_RDMAMODE_H2BNC_VLAN_DET;
+
if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
@@ -2063,7 +2098,8 @@ bge_blockinit(struct bge_softc *sc)
* Adjust tx margin to prevent TX data corruption and
* fix internal FIFO overflow.
*/
- if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
+ sc->bge_asicrev == BGE_ASICREV_BCM5720) {
dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
@@ -2080,7 +2116,8 @@ bge_blockinit(struct bge_softc *sc)
BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
}
- if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
+ sc->bge_asicrev == BGE_ASICREV_BCM5720) {
CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
@@ -2239,6 +2276,7 @@ bge_probe(device_t dev)
case BCOM_DEVICEID_BCM5717:
case BCOM_DEVICEID_BCM5718:
case BCOM_DEVICEID_BCM5719:
+ case BCOM_DEVICEID_BCM5720:
id = pci_read_config(dev,
BGE_PCI_GEN2_PRODID_ASICREV, 4);
break;
@@ -2763,6 +2801,7 @@ bge_attach(device_t dev)
case BCOM_DEVICEID_BCM5717:
case BCOM_DEVICEID_BCM5718:
case BCOM_DEVICEID_BCM5719:
+ case BCOM_DEVICEID_BCM5720:
sc->bge_chipid = pci_read_config(dev,
BGE_PCI_GEN2_PRODID_ASICREV, 4);
break;
@@ -2794,12 +2833,14 @@ bge_attach(device_t dev)
* BCM5704 | 1 | X | 1 | X |
* BCM5717 | 1 | 8 | 2 | 9 |
* BCM5719 | 1 | 8 | 2 | 9 |
+ * BCM5720 | 1 | 8 | 2 | 9 |
*
* Other addresses may respond but they are not
* IEEE compliant PHYs and should be ignored.
*/
if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
- sc->bge_asicrev == BGE_ASICREV_BCM5719) {
+ sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
+ sc->bge_asicrev == BGE_ASICREV_BCM5720) {
f = pci_get_function(dev);
if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
if (CSR_READ_4(sc, BGE_SGDIG_STS) &
@@ -2834,6 +2875,7 @@ bge_attach(device_t dev)
switch (sc->bge_asicrev) {
case BGE_ASICREV_BCM5717:
case BGE_ASICREV_BCM5719:
+ case BGE_ASICREV_BCM5720:
case BGE_ASICREV_BCM57765:
sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
@@ -2889,6 +2931,7 @@ bge_attach(device_t dev)
sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
sc->bge_asicrev != BGE_ASICREV_BCM5719 &&
+ sc->bge_asicrev != BGE_ASICREV_BCM5720 &&
sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
sc->bge_asicrev != BGE_ASICREV_BCM57765 &&
sc->bge_asicrev != BGE_ASICREV_BCM57780) {
@@ -3009,7 +3052,8 @@ bge_attach(device_t dev)
*/
sc->bge_flags |= BGE_FLAG_PCIE;
sc->bge_expcap = reg;
- if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
+ sc->bge_asicrev == BGE_ASICREV_BCM5720)
pci_set_max_read_req(dev, 2048);
else if (pci_get_max_read_req(dev) != 4096)
pci_set_max_read_req(dev, 4096);
@@ -3595,8 +3639,7 @@ bge_reset(struct bge_softc *sc)
}
/* Fix up byte swapping. */
- CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
- BGE_MODECTL_BYTESWAP_DATA);
+ CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc));
/* Tell the ASF firmware we are up */
if (sc->bge_asf_mode & ASF_STACKUP)
@@ -3627,6 +3670,10 @@ bge_reset(struct bge_softc *sc)
}
DELAY(10000);
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
+ BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
+ CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
+
return (0);
}
@@ -4107,11 +4154,13 @@ bge_asf_driver_up(struct bge_softc *sc)
else {
sc->bge_asf_count = 2;
bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
- BGE_FW_DRV_ALIVE);
+ BGE_FW_CMD_DRV_ALIVE);
bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
- bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB, 3);
+ bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
+ BGE_FW_HB_TIMEOUT_SEC);
CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
- CSR_READ_4(sc, BGE_RX_CPU_EVENT) | (1 << 14));
+ CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
+ BGE_RX_CPU_DRV_EVENT);
}
}
}
@@ -4242,8 +4291,30 @@ bge_stats_update_regs(struct bge_softc *sc)
CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
stats->NoMoreRxBDs +=
CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
- stats->InputDiscards +=
- CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
+ /*
+ * XXX
+ * Unlike other controllers, BGE_RXLP_LOCSTAT_IFIN_DROPS
+ * counter of BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0
+ * includes number of unwanted multicast frames. This comes
+ * from silicon bug and known workaround to get rough(not
+ * exact) counter is to enable interrupt on MBUF low water
+ * attention. This can be accomplished by setting
+ * BGE_HCCMODE_ATTN bit of BGE_HCC_MODE,
+ * BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE and
+ * BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL.
+ * However that change would generate more interrupts and
+ * there are still possibilities of losing multiple frames
+ * during BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling.
+ * Given that the workaround still would not get correct
+ * counter I don't think it's worth to implement it. So
+ * ignore reading the counter on controllers that have the
+ * silicon bug.
+ */
+ if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
+ sc->bge_chipid != BGE_CHIPID_BCM5719_A0 &&
+ sc->bge_chipid != BGE_CHIPID_BCM5720_A0)
+ stats->InputDiscards +=
+ CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
stats->InputErrors +=
CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
stats->RecvThresholdHit +=
@@ -4839,6 +4910,11 @@ bge_init_locked(struct bge_softc *sc)
mode = CSR_READ_4(sc, BGE_TX_MODE);
if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
+ mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
+ mode |= CSR_READ_4(sc, BGE_TX_MODE) &
+ (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
+ }
/* Turn on transmitter. */
CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
diff --git a/sys/dev/bge/if_bgereg.h b/sys/dev/bge/if_bgereg.h
index 7973c86..3ba4176 100644
--- a/sys/dev/bge/if_bgereg.h
+++ b/sys/dev/bge/if_bgereg.h
@@ -91,8 +91,16 @@
/* Firmware interface */
#define BGE_SRAM_DATA_SIG_MAGIC 0x4B657654 /* 'KevT' */
-#define BGE_FW_DRV_ALIVE 0x00000001
-#define BGE_FW_PAUSE 0x00000002
+
+#define BGE_FW_CMD_DRV_ALIVE 0x00000001
+#define BGE_FW_CMD_PAUSE 0x00000002
+#define BGE_FW_CMD_IPV4_ADDR_CHANGE 0x00000003
+#define BGE_FW_CMD_IPV6_ADDR_CHANGE 0x00000004
+#define BGE_FW_CMD_LINK_UPDATE 0x0000000C
+#define BGE_FW_CMD_DRV_ALIVE2 0x0000000D
+#define BGE_FW_CMD_DRV_ALIVE3 0x0000000E
+
+#define BGE_FW_HB_TIMEOUT_SEC 3
#define BGE_FW_DRV_STATE_START 0x00000001
#define BGE_FW_DRV_STATE_START_DONE 0x80000001
@@ -250,15 +258,6 @@
#define BGE_PCIMISCCTL_ASICREV_SHIFT 16
#define BGE_HIF_SWAP_OPTIONS (BGE_PCIMISCCTL_ENDIAN_WORDSWAP)
-#if BYTE_ORDER == LITTLE_ENDIAN
-#define BGE_DMA_SWAP_OPTIONS \
- BGE_MODECTL_WORDSWAP_NONFRAME| \
- BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA
-#else
-#define BGE_DMA_SWAP_OPTIONS \
- BGE_MODECTL_WORDSWAP_NONFRAME|BGE_MODECTL_BYTESWAP_NONFRAME| \
- BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA
-#endif
#define BGE_INIT \
(BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_CLEAR_INTA| \
@@ -331,6 +330,7 @@
#define BGE_CHIPID_BCM5717_A0 0x05717000
#define BGE_CHIPID_BCM5717_B0 0x05717100
#define BGE_CHIPID_BCM5719_A0 0x05719000
+#define BGE_CHIPID_BCM5720_A0 0x05720000
#define BGE_CHIPID_BCM57765_A0 0x57785000
#define BGE_CHIPID_BCM57765_B0 0x57785100
@@ -355,6 +355,7 @@
/* BGE_PCI_PRODID_ASICREV ASIC rev. identifiers. */
#define BGE_ASICREV_BCM5717 0x5717
#define BGE_ASICREV_BCM5719 0x5719
+#define BGE_ASICREV_BCM5720 0x5720
#define BGE_ASICREV_BCM5761 0x5761
#define BGE_ASICREV_BCM5784 0x5784
#define BGE_ASICREV_BCM5785 0x5785
@@ -799,6 +800,8 @@
#define BGE_TXMODE_BIGBACKOFF_ENABLE 0x00000020
#define BGE_TXMODE_LONGPAUSE_ENABLE 0x00000040
#define BGE_TXMODE_MBUF_LOCKUP_FIX 0x00000100
+#define BGE_TXMODE_JMB_FRM_LEN 0x00400000
+#define BGE_TXMODE_CNT_DN_MODE 0x00800000
/* Transmit MAC status register */
#define BGE_TXSTAT_RX_XOFFED 0x00000001
@@ -812,6 +815,8 @@
#define BGE_TXLEN_SLOTTIME 0x000000FF
#define BGE_TXLEN_IPG 0x00000F00
#define BGE_TXLEN_CRS 0x00003000
+#define BGE_TXLEN_JMB_FRM_LEN_MSK 0x00FF0000
+#define BGE_TXLEN_CNT_DN_VAL_MSK 0xFF000000
/* Receive MAC mode register */
#define BGE_RXMODE_RESET 0x00000001
@@ -1269,6 +1274,7 @@
#define BGE_CPMU_LSPD_1000MB_CLK 0x360C
#define BGE_CPMU_LNK_AWARE_PWRMD 0x3610
#define BGE_CPMU_HST_ACC 0x361C
+#define BGE_CPMU_CLCK_ORIDE 0x3624
#define BGE_CPMU_CLCK_STAT 0x3630
#define BGE_CPMU_MUTEX_REQ 0x365C
#define BGE_CPMU_MUTEX_GNT 0x3660
@@ -1296,6 +1302,9 @@
#define BGE_CPMU_HST_ACC_MACCLK_MASK 0x001F0000
#define BGE_CPMU_HST_ACC_MACCLK_6_25 0x00130000
+/* Clock Speed Override Policy register */
+#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000
+
/* CPMU Clock Status register */
#define BGE_CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001F0000
#define BGE_CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000
@@ -1543,6 +1552,7 @@
#define BGE_RDMAMODE_MULT_DMA_RD_DIS 0x01000000
#define BGE_RDMAMODE_TSO4_ENABLE 0x08000000
#define BGE_RDMAMODE_TSO6_ENABLE 0x10000000
+#define BGE_RDMAMODE_H2BNC_VLAN_DET 0x20000000
/* Read DMA status register */
#define BGE_RDMASTAT_PCI_TGT_ABRT_ATTN 0x00000004
@@ -1895,6 +1905,8 @@
#define BGE_EE_DELAY 0x6848
#define BGE_FASTBOOT_PC 0x6894
+#define BGE_RX_CPU_DRV_EVENT 0x00004000
+
/*
* NVRAM Control registers
*/
@@ -1951,14 +1963,18 @@
#define BGE_MODECTL_WORDSWAP_NONFRAME 0x00000004
#define BGE_MODECTL_BYTESWAP_DATA 0x00000010
#define BGE_MODECTL_WORDSWAP_DATA 0x00000020
+#define BGE_MODECTL_BYTESWAP_B2HRX_DATA 0x00000040
+#define BGE_MODECTL_WORDSWAP_B2HRX_DATA 0x00000080
#define BGE_MODECTL_NO_FRAME_CRACKING 0x00000200
#define BGE_MODECTL_NO_RX_CRC 0x00000400
#define BGE_MODECTL_RX_BADFRAMES 0x00000800
#define BGE_MODECTL_NO_TX_INTR 0x00002000
#define BGE_MODECTL_NO_RX_INTR 0x00004000
#define BGE_MODECTL_FORCE_PCI32 0x00008000
+#define BGE_MODECTL_B2HRX_ENABLE 0x00008000
#define BGE_MODECTL_STACKUP 0x00010000
#define BGE_MODECTL_HOST_SEND_BDS 0x00020000
+#define BGE_MODECTL_HTX2B_ENABLE 0x00040000
#define BGE_MODECTL_TX_NO_PHDR_CSUM 0x00100000
#define BGE_MODECTL_RX_NO_PHDR_CSUM 0x00800000
#define BGE_MODECTL_TX_ATTN_INTR 0x01000000
@@ -2288,7 +2304,8 @@ struct bge_status_block {
#define BCOM_DEVICEID_BCM5717 0x1655
#define BCOM_DEVICEID_BCM5718 0x1656
#define BCOM_DEVICEID_BCM5719 0x1657
-#define BCOM_DEVICEID_BCM5720 0x1658
+#define BCOM_DEVICEID_BCM5720_PP 0x1658 /* Not released to public. */
+#define BCOM_DEVICEID_BCM5720 0x165F
#define BCOM_DEVICEID_BCM5721 0x1659
#define BCOM_DEVICEID_BCM5722 0x165A
#define BCOM_DEVICEID_BCM5723 0x165B
diff --git a/sys/dev/bktr/bktr_os.c b/sys/dev/bktr/bktr_os.c
index b667700..4eb39ae 100644
--- a/sys/dev/bktr/bktr_os.c
+++ b/sys/dev/bktr/bktr_os.c
@@ -108,7 +108,7 @@ int bt848_amsound = 0; /* hard-wire AM sound at 6.5 Hz (france),
int bt848_dolby = 0;
#endif
-SYSCTL_NODE(_hw, OID_AUTO, bt848, CTLFLAG_RW, 0, "Bt848 Driver mgmt");
+static SYSCTL_NODE(_hw, OID_AUTO, bt848, CTLFLAG_RW, 0, "Bt848 Driver mgmt");
SYSCTL_INT(_hw_bt848, OID_AUTO, card, CTLFLAG_RW, &bt848_card, -1, "");
SYSCTL_INT(_hw_bt848, OID_AUTO, tuner, CTLFLAG_RW, &bt848_tuner, -1, "");
SYSCTL_INT(_hw_bt848, OID_AUTO, reverse_mute, CTLFLAG_RW, &bt848_reverse_mute, -1, "");
diff --git a/sys/dev/bktr/bktr_reg.h b/sys/dev/bktr/bktr_reg.h
index 982fe2c..4a3af1a 100644
--- a/sys/dev/bktr/bktr_reg.h
+++ b/sys/dev/bktr/bktr_reg.h
@@ -717,10 +717,6 @@ struct bt848_card_sig {
/* ioctl_cmd_t int on old versions, u_long on new versions */
/***********************************************************/
-#if (__FreeBSD__ == 2)
-typedef int ioctl_cmd_t;
-#endif
-
#if defined(__FreeBSD__)
typedef u_long ioctl_cmd_t;
#endif
diff --git a/sys/dev/bm/if_bm.c b/sys/dev/bm/if_bm.c
index 1977f92..c478e7b 100644
--- a/sys/dev/bm/if_bm.c
+++ b/sys/dev/bm/if_bm.c
@@ -63,6 +63,7 @@ __FBSDID("$FreeBSD$");
#include <sys/rman.h>
#include <dev/mii/mii.h>
+#include <dev/mii/mii_bitbang.h>
#include <dev/mii/miivar.h>
#include <dev/ofw/ofw_bus.h>
@@ -105,17 +106,28 @@ static void bm_tick (void *xsc);
static int bm_ifmedia_upd (struct ifnet *);
static void bm_ifmedia_sts (struct ifnet *, struct ifmediareq *);
-static void bm_miicsr_dwrite (struct bm_softc *, u_int16_t);
-static void bm_mii_writebit (struct bm_softc *, int);
-static int bm_mii_readbit (struct bm_softc *);
-static void bm_mii_sync (struct bm_softc *);
-static void bm_mii_send (struct bm_softc *, u_int32_t, int);
-static int bm_mii_readreg (struct bm_softc *, struct bm_mii_frame *);
-static int bm_mii_writereg (struct bm_softc *, struct bm_mii_frame *);
static int bm_miibus_readreg (device_t, int, int);
static int bm_miibus_writereg (device_t, int, int, int);
static void bm_miibus_statchg (device_t);
+/*
+ * MII bit-bang glue
+ */
+static uint32_t bm_mii_bitbang_read(device_t);
+static void bm_mii_bitbang_write(device_t, uint32_t);
+
+static const struct mii_bitbang_ops bm_mii_bitbang_ops = {
+ bm_mii_bitbang_read,
+ bm_mii_bitbang_write,
+ {
+ BM_MII_DATAOUT, /* MII_BIT_MDO */
+ BM_MII_DATAIN, /* MII_BIT_MDI */
+ BM_MII_CLK, /* MII_BIT_MDC */
+ BM_MII_OENABLE, /* MII_BIT_DIR_HOST_PHY */
+ 0, /* MII_BIT_DIR_PHY_HOST */
+ }
+};
+
static device_method_t bm_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, bm_probe),
@@ -150,171 +162,36 @@ DRIVER_MODULE(miibus, bm, miibus_driver, miibus_devclass, 0, 0);
*/
/*
- * Write to the MII csr, introducing a delay to allow valid
- * MII clock pulses to be formed
+ * Write the MII serial port for the MII bit-bang module.
*/
static void
-bm_miicsr_dwrite(struct bm_softc *sc, u_int16_t val)
-{
- CSR_WRITE_2(sc, BM_MII_CSR, val);
- /*
- * Assume this is a clock toggle and generate a 1us delay
- * to cover both MII's 160ns high/low minimum and 400ns
- * cycle miniumum
- */
- DELAY(1);
-}
-
-/*
- * Write a bit to the MII bus.
- */
-static void
-bm_mii_writebit(struct bm_softc *sc, int bit)
-{
- u_int16_t regval;
-
- regval = BM_MII_OENABLE;
- if (bit)
- regval |= BM_MII_DATAOUT;
-
- bm_miicsr_dwrite(sc, regval);
- bm_miicsr_dwrite(sc, regval | BM_MII_CLK);
- bm_miicsr_dwrite(sc, regval);
-}
-
-/*
- * Read a bit from the MII bus.
- */
-static int
-bm_mii_readbit(struct bm_softc *sc)
-{
- u_int16_t regval, bitin;
-
- /* ~BM_MII_OENABLE */
- regval = 0;
-
- bm_miicsr_dwrite(sc, regval);
- bm_miicsr_dwrite(sc, regval | BM_MII_CLK);
- bm_miicsr_dwrite(sc, regval);
- bitin = CSR_READ_2(sc, BM_MII_CSR) & BM_MII_DATAIN;
-
- return (bitin == BM_MII_DATAIN);
-}
-
-/*
- * Sync the PHYs by setting data bit and strobing the clock 32 times.
- */
-static void
-bm_mii_sync(struct bm_softc *sc)
-{
- int i;
- u_int16_t regval;
-
- regval = BM_MII_OENABLE | BM_MII_DATAOUT;
-
- bm_miicsr_dwrite(sc, regval);
- for (i = 0; i < 32; i++) {
- bm_miicsr_dwrite(sc, regval | BM_MII_CLK);
- bm_miicsr_dwrite(sc, regval);
- }
-}
-
-/*
- * Clock a series of bits through the MII.
- */
-static void
-bm_mii_send(struct bm_softc *sc, u_int32_t bits, int cnt)
-{
- int i;
-
- for (i = (0x1 << (cnt - 1)); i; i >>= 1)
- bm_mii_writebit(sc, bits & i);
-}
-
-/*
- * Read a PHY register through the MII.
- */
-static int
-bm_mii_readreg(struct bm_softc *sc, struct bm_mii_frame *frame)
+bm_mii_bitbang_write(device_t dev, uint32_t val)
{
- int i, ack, bit;
-
- /*
- * Set up frame for RX.
- */
- frame->mii_stdelim = BM_MII_STARTDELIM;
- frame->mii_opcode = BM_MII_READOP;
- frame->mii_turnaround = 0;
- frame->mii_data = 0;
-
- /*
- * Sync the PHYs
- */
- bm_mii_sync(sc);
-
- /*
- * Send command/address info
- */
- bm_mii_send(sc, frame->mii_stdelim, 2);
- bm_mii_send(sc, frame->mii_opcode, 2);
- bm_mii_send(sc, frame->mii_phyaddr, 5);
- bm_mii_send(sc, frame->mii_regaddr, 5);
-
- /*
- * Check for ack.
- */
- ack = bm_mii_readbit(sc);
-
- /*
- * Now try reading data bits. If the ack failed, we still
- * need to clock through 16 cycles to keep the PHY(s) in sync.
- */
- for (i = 0x8000; i; i >>= 1) {
- bit = bm_mii_readbit(sc);
- if (!ack && bit)
- frame->mii_data |= i;
- }
+ struct bm_softc *sc;
- /*
- * Skip through idle bit-times
- */
- bm_mii_writebit(sc, 0);
- bm_mii_writebit(sc, 0);
+ sc = device_get_softc(dev);
- return ((ack) ? 1 : 0);
+ CSR_WRITE_2(sc, BM_MII_CSR, val);
+ CSR_BARRIER(sc, BM_MII_CSR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
/*
- * Write to a PHY register through the MII.
+ * Read the MII serial port for the MII bit-bang module.
*/
-static int
-bm_mii_writereg(struct bm_softc *sc, struct bm_mii_frame *frame)
+static uint32_t
+bm_mii_bitbang_read(device_t dev)
{
- /*
- * Set up frame for tx
- */
- frame->mii_stdelim = BM_MII_STARTDELIM;
- frame->mii_opcode = BM_MII_WRITEOP;
- frame->mii_turnaround = BM_MII_TURNAROUND;
-
- /*
- * Sync the phy and start the bitbang write sequence
- */
- bm_mii_sync(sc);
+ struct bm_softc *sc;
+ uint32_t reg;
- bm_mii_send(sc, frame->mii_stdelim, 2);
- bm_mii_send(sc, frame->mii_opcode, 2);
- bm_mii_send(sc, frame->mii_phyaddr, 5);
- bm_mii_send(sc, frame->mii_regaddr, 5);
- bm_mii_send(sc, frame->mii_turnaround, 2);
- bm_mii_send(sc, frame->mii_data, 16);
+ sc = device_get_softc(dev);
- /*
- * Idle bit.
- */
- bm_mii_writebit(sc, 0);
+ reg = CSR_READ_2(sc, BM_MII_CSR);
+ CSR_BARRIER(sc, BM_MII_CSR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
- return (0);
+ return (reg);
}
/*
@@ -323,34 +200,15 @@ bm_mii_writereg(struct bm_softc *sc, struct bm_mii_frame *frame)
static int
bm_miibus_readreg(device_t dev, int phy, int reg)
{
- struct bm_softc *sc;
- struct bm_mii_frame frame;
-
- sc = device_get_softc(dev);
- bzero(&frame, sizeof(frame));
-
- frame.mii_phyaddr = phy;
- frame.mii_regaddr = reg;
- bm_mii_readreg(sc, &frame);
-
- return (frame.mii_data);
+ return (mii_bitbang_readreg(dev, &bm_mii_bitbang_ops, phy, reg));
}
static int
bm_miibus_writereg(device_t dev, int phy, int reg, int data)
{
- struct bm_softc *sc;
- struct bm_mii_frame frame;
-
- sc = device_get_softc(dev);
- bzero(&frame, sizeof(frame));
-
- frame.mii_phyaddr = phy;
- frame.mii_regaddr = reg;
- frame.mii_data = data;
- bm_mii_writereg(sc, &frame);
+ mii_bitbang_readreg(dev, &bm_mii_bitbang_ops, phy, reg);
return (0);
}
diff --git a/sys/dev/bm/if_bmreg.h b/sys/dev/bm/if_bmreg.h
index 15ee1fe..39d9b48 100644
--- a/sys/dev/bm/if_bmreg.h
+++ b/sys/dev/bm/if_bmreg.h
@@ -129,14 +129,6 @@
#define BM_MII_DATAIN 0x0008 /* MDIO data in */
/*
- * MII constants
- */
-#define BM_MII_STARTDELIM 0x01
-#define BM_MII_READOP 0x02
-#define BM_MII_WRITEOP 0x01
-#define BM_MII_TURNAROUND 0x02
-
-/*
* Various flags
*/
@@ -174,3 +166,5 @@
#define CSR_READ_1(sc, reg) \
bus_read_1(sc->sc_memr, reg)
+#define CSR_BARRIER(sc, reg, length, flags) \
+ bus_barrier(sc->sc_memr, reg, length, flags)
diff --git a/sys/dev/bm/if_bmvar.h b/sys/dev/bm/if_bmvar.h
index 7d31489..b50d65b 100644
--- a/sys/dev/bm/if_bmvar.h
+++ b/sys/dev/bm/if_bmvar.h
@@ -46,7 +46,6 @@
/*
* software state for transmit job mbufs (may be elements of mbuf chains)
*/
-
struct bm_txsoft {
struct mbuf *txs_mbuf; /* head of our mbuf chain */
bus_dmamap_t txs_dmamap; /* our DMA map */
@@ -71,7 +70,6 @@ struct bm_rxsoft {
bus_dma_segment_t segment;
};
-
struct bm_softc {
struct ifnet *sc_ifp;
struct mtx sc_mtx;
@@ -113,13 +111,3 @@ struct bm_softc {
dbdma_channel_t *sc_txdma, *sc_rxdma;
};
-
-struct bm_mii_frame {
- u_int8_t mii_stdelim;
- u_int8_t mii_opcode;
- u_int8_t mii_phyaddr;
- u_int8_t mii_regaddr;
- u_int8_t mii_turnaround;
- u_int16_t mii_data;
-};
-
diff --git a/sys/dev/bwn/if_bwn.c b/sys/dev/bwn/if_bwn.c
index c924a34..af10ace 100644
--- a/sys/dev/bwn/if_bwn.c
+++ b/sys/dev/bwn/if_bwn.c
@@ -73,7 +73,8 @@ __FBSDID("$FreeBSD$");
#include <dev/bwn/if_bwnreg.h>
#include <dev/bwn/if_bwnvar.h>
-SYSCTL_NODE(_hw, OID_AUTO, bwn, CTLFLAG_RD, 0, "Broadcom driver parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, bwn, CTLFLAG_RD, 0,
+ "Broadcom driver parameters");
/*
* Tunable & sysctl variables.
diff --git a/sys/dev/bxe/if_bxe.c b/sys/dev/bxe/if_bxe.c
index 930a4f0..9957359 100644
--- a/sys/dev/bxe/if_bxe.c
+++ b/sys/dev/bxe/if_bxe.c
@@ -423,7 +423,7 @@ DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
/*
* Tunable device values
*/
-SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
/* Allowable values are TRUE (1) or FALSE (0). */
static int bxe_dcc_enable = FALSE;
diff --git a/sys/dev/cardbus/cardbus.c b/sys/dev/cardbus/cardbus.c
index 39d20b5..ea75770 100644
--- a/sys/dev/cardbus/cardbus.c
+++ b/sys/dev/cardbus/cardbus.c
@@ -54,7 +54,7 @@ __FBSDID("$FreeBSD$");
#include "pcib_if.h"
/* sysctl vars */
-SYSCTL_NODE(_hw, OID_AUTO, cardbus, CTLFLAG_RD, 0, "CardBus parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, cardbus, CTLFLAG_RD, 0, "CardBus parameters");
int cardbus_debug = 0;
TUNABLE_INT("hw.cardbus.debug", &cardbus_debug);
diff --git a/sys/dev/cesa/cesa.c b/sys/dev/cesa/cesa.c
new file mode 100644
index 0000000..54bb8e3
--- /dev/null
+++ b/sys/dev/cesa/cesa.c
@@ -0,0 +1,1614 @@
+/*-
+ * Copyright (C) 2009-2011 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * CESA SRAM Memory Map:
+ *
+ * +------------------------+ <= sc->sc_sram_base + CESA_SRAM_SIZE
+ * | |
+ * | DATA |
+ * | |
+ * +------------------------+ <= sc->sc_sram_base + CESA_DATA(0)
+ * | struct cesa_sa_data |
+ * +------------------------+
+ * | struct cesa_sa_hdesc |
+ * +------------------------+ <= sc->sc_sram_base
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mbuf.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+
+#include <machine/bus.h>
+#include <machine/intr.h>
+#include <machine/resource.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <sys/md5.h>
+#include <crypto/sha1.h>
+#include <crypto/rijndael/rijndael.h>
+#include <opencrypto/cryptodev.h>
+#include "cryptodev_if.h"
+
+#include <arm/mv/mvreg.h>
+#include <arm/mv/mvwin.h>
+#include <arm/mv/mvvar.h>
+#include "cesa.h"
+
+#undef DEBUG
+
+static int cesa_probe(device_t);
+static int cesa_attach(device_t);
+static int cesa_detach(device_t);
+static void cesa_intr(void *);
+static int cesa_newsession(device_t, u_int32_t *, struct cryptoini *);
+static int cesa_freesession(device_t, u_int64_t);
+static int cesa_process(device_t, struct cryptop *, int);
+
+static struct resource_spec cesa_res_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
+ { -1, 0 }
+};
+
+static device_method_t cesa_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, cesa_probe),
+ DEVMETHOD(device_attach, cesa_attach),
+ DEVMETHOD(device_detach, cesa_detach),
+
+ /* Bus interface */
+ DEVMETHOD(bus_print_child, bus_generic_print_child),
+ DEVMETHOD(bus_driver_added, bus_generic_driver_added),
+
+ /* Crypto device methods */
+ DEVMETHOD(cryptodev_newsession, cesa_newsession),
+ DEVMETHOD(cryptodev_freesession,cesa_freesession),
+ DEVMETHOD(cryptodev_process, cesa_process),
+
+ { 0, 0 }
+};
+
+static driver_t cesa_driver = {
+ "cesa",
+ cesa_methods,
+ sizeof (struct cesa_softc)
+};
+static devclass_t cesa_devclass;
+
+DRIVER_MODULE(cesa, simplebus, cesa_driver, cesa_devclass, 0, 0);
+MODULE_DEPEND(cesa, crypto, 1, 1, 1);
+
+static void
+cesa_dump_cshd(struct cesa_softc *sc, struct cesa_sa_hdesc *cshd)
+{
+#ifdef DEBUG
+ device_t dev;
+
+ dev = sc->sc_dev;
+ device_printf(dev, "CESA SA Hardware Descriptor:\n");
+ device_printf(dev, "\t\tconfig: 0x%08X\n", cshd->cshd_config);
+ device_printf(dev, "\t\te_src: 0x%08X\n", cshd->cshd_enc_src);
+ device_printf(dev, "\t\te_dst: 0x%08X\n", cshd->cshd_enc_dst);
+ device_printf(dev, "\t\te_dlen: 0x%08X\n", cshd->cshd_enc_dlen);
+ device_printf(dev, "\t\te_key: 0x%08X\n", cshd->cshd_enc_key);
+ device_printf(dev, "\t\te_iv_1: 0x%08X\n", cshd->cshd_enc_iv);
+ device_printf(dev, "\t\te_iv_2: 0x%08X\n", cshd->cshd_enc_iv_buf);
+ device_printf(dev, "\t\tm_src: 0x%08X\n", cshd->cshd_mac_src);
+ device_printf(dev, "\t\tm_dst: 0x%08X\n", cshd->cshd_mac_dst);
+ device_printf(dev, "\t\tm_dlen: 0x%08X\n", cshd->cshd_mac_dlen);
+ device_printf(dev, "\t\tm_tlen: 0x%08X\n", cshd->cshd_mac_total_dlen);
+ device_printf(dev, "\t\tm_iv_i: 0x%08X\n", cshd->cshd_mac_iv_in);
+ device_printf(dev, "\t\tm_iv_o: 0x%08X\n", cshd->cshd_mac_iv_out);
+#endif
+}
+
+static void
+cesa_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ struct cesa_dma_mem *cdm;
+
+ if (error)
+ return;
+
+ KASSERT(nseg == 1, ("Got wrong number of DMA segments, should be 1."));
+ cdm = arg;
+ cdm->cdm_paddr = segs->ds_addr;
+}
+
+static int
+cesa_alloc_dma_mem(struct cesa_softc *sc, struct cesa_dma_mem *cdm,
+ bus_size_t size)
+{
+ int error;
+
+ KASSERT(cdm->cdm_vaddr == NULL,
+ ("%s(): DMA memory descriptor in use.", __func__));
+
+ error = bus_dma_tag_create(NULL, /* parent */
+ PAGE_SIZE, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filtfunc, filtfuncarg */
+ size, 1, /* maxsize, nsegments */
+ size, 0, /* maxsegsz, flags */
+ NULL, NULL, /* lockfunc, lockfuncarg */
+ &cdm->cdm_tag); /* dmat */
+ if (error) {
+ device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
+ " %i!\n", error);
+
+ goto err1;
+ }
+
+ error = bus_dmamem_alloc(cdm->cdm_tag, &cdm->cdm_vaddr,
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO, &cdm->cdm_map);
+ if (error) {
+ device_printf(sc->sc_dev, "failed to allocate DMA safe"
+ " memory, error %i!\n", error);
+
+ goto err2;
+ }
+
+ error = bus_dmamap_load(cdm->cdm_tag, cdm->cdm_map, cdm->cdm_vaddr,
+ size, cesa_alloc_dma_mem_cb, cdm, BUS_DMA_NOWAIT);
+ if (error) {
+ device_printf(sc->sc_dev, "cannot get address of the DMA"
+ " memory, error %i\n", error);
+
+ goto err3;
+ }
+
+ return (0);
+err3:
+ bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map);
+err2:
+ bus_dma_tag_destroy(cdm->cdm_tag);
+err1:
+ cdm->cdm_vaddr = NULL;
+ return (error);
+}
+
+static void
+cesa_free_dma_mem(struct cesa_dma_mem *cdm)
+{
+
+ bus_dmamap_unload(cdm->cdm_tag, cdm->cdm_map);
+ bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map);
+ bus_dma_tag_destroy(cdm->cdm_tag);
+ cdm->cdm_vaddr = NULL;
+}
+
+static void
+cesa_sync_dma_mem(struct cesa_dma_mem *cdm, bus_dmasync_op_t op)
+{
+
+ /* Sync only if dma memory is valid */
+ if (cdm->cdm_vaddr != NULL)
+ bus_dmamap_sync(cdm->cdm_tag, cdm->cdm_map, op);
+}
+
+static void
+cesa_sync_desc(struct cesa_softc *sc, bus_dmasync_op_t op)
+{
+
+ cesa_sync_dma_mem(&sc->sc_tdesc_cdm, op);
+ cesa_sync_dma_mem(&sc->sc_sdesc_cdm, op);
+ cesa_sync_dma_mem(&sc->sc_requests_cdm, op);
+}
+
+static struct cesa_session *
+cesa_alloc_session(struct cesa_softc *sc)
+{
+ struct cesa_session *cs;
+
+ CESA_GENERIC_ALLOC_LOCKED(sc, cs, sessions);
+
+ return (cs);
+}
+
+static struct cesa_session *
+cesa_get_session(struct cesa_softc *sc, uint32_t sid)
+{
+
+ if (sid >= CESA_SESSIONS)
+ return (NULL);
+
+ return (&sc->sc_sessions[sid]);
+}
+
+static void
+cesa_free_session(struct cesa_softc *sc, struct cesa_session *cs)
+{
+
+ CESA_GENERIC_FREE_LOCKED(sc, cs, sessions);
+}
+
+static struct cesa_request *
+cesa_alloc_request(struct cesa_softc *sc)
+{
+ struct cesa_request *cr;
+
+ CESA_GENERIC_ALLOC_LOCKED(sc, cr, requests);
+ if (!cr)
+ return (NULL);
+
+ STAILQ_INIT(&cr->cr_tdesc);
+ STAILQ_INIT(&cr->cr_sdesc);
+
+ return (cr);
+}
+
+static void
+cesa_free_request(struct cesa_softc *sc, struct cesa_request *cr)
+{
+
+ /* Free TDMA descriptors assigned to this request */
+ CESA_LOCK(sc, tdesc);
+ STAILQ_CONCAT(&sc->sc_free_tdesc, &cr->cr_tdesc);
+ CESA_UNLOCK(sc, tdesc);
+
+ /* Free SA descriptors assigned to this request */
+ CESA_LOCK(sc, sdesc);
+ STAILQ_CONCAT(&sc->sc_free_sdesc, &cr->cr_sdesc);
+ CESA_UNLOCK(sc, sdesc);
+
+ /* Unload DMA memory asociated with request */
+ if (cr->cr_dmap_loaded) {
+ bus_dmamap_unload(sc->sc_data_dtag, cr->cr_dmap);
+ cr->cr_dmap_loaded = 0;
+ }
+
+ CESA_GENERIC_FREE_LOCKED(sc, cr, requests);
+}
+
+static void
+cesa_enqueue_request(struct cesa_softc *sc, struct cesa_request *cr)
+{
+
+ CESA_LOCK(sc, requests);
+ STAILQ_INSERT_TAIL(&sc->sc_ready_requests, cr, cr_stq);
+ CESA_UNLOCK(sc, requests);
+}
+
+static struct cesa_tdma_desc *
+cesa_alloc_tdesc(struct cesa_softc *sc)
+{
+ struct cesa_tdma_desc *ctd;
+
+ CESA_GENERIC_ALLOC_LOCKED(sc, ctd, tdesc);
+
+ if (!ctd)
+ device_printf(sc->sc_dev, "TDMA descriptors pool exhaused. "
+ "Consider increasing CESA_TDMA_DESCRIPTORS.\n");
+
+ return (ctd);
+}
+
+static struct cesa_sa_desc *
+cesa_alloc_sdesc(struct cesa_softc *sc, struct cesa_request *cr)
+{
+ struct cesa_sa_desc *csd;
+
+ CESA_GENERIC_ALLOC_LOCKED(sc, csd, sdesc);
+ if (!csd) {
+ device_printf(sc->sc_dev, "SA descriptors pool exhaused. "
+ "Consider increasing CESA_SA_DESCRIPTORS.\n");
+ return (NULL);
+ }
+
+ STAILQ_INSERT_TAIL(&cr->cr_sdesc, csd, csd_stq);
+
+ /* Fill-in SA descriptor with default values */
+ csd->csd_cshd->cshd_enc_key = CESA_SA_DATA(csd_key);
+ csd->csd_cshd->cshd_enc_iv = CESA_SA_DATA(csd_iv);
+ csd->csd_cshd->cshd_enc_iv_buf = CESA_SA_DATA(csd_iv);
+ csd->csd_cshd->cshd_enc_src = 0;
+ csd->csd_cshd->cshd_enc_dst = 0;
+ csd->csd_cshd->cshd_enc_dlen = 0;
+ csd->csd_cshd->cshd_mac_dst = CESA_SA_DATA(csd_hash);
+ csd->csd_cshd->cshd_mac_iv_in = CESA_SA_DATA(csd_hiv_in);
+ csd->csd_cshd->cshd_mac_iv_out = CESA_SA_DATA(csd_hiv_out);
+ csd->csd_cshd->cshd_mac_src = 0;
+ csd->csd_cshd->cshd_mac_dlen = 0;
+
+ return (csd);
+}
+
+static struct cesa_tdma_desc *
+cesa_tdma_copy(struct cesa_softc *sc, bus_addr_t dst, bus_addr_t src,
+ bus_size_t size)
+{
+ struct cesa_tdma_desc *ctd;
+
+ ctd = cesa_alloc_tdesc(sc);
+ if (!ctd)
+ return (NULL);
+
+ ctd->ctd_cthd->cthd_dst = dst;
+ ctd->ctd_cthd->cthd_src = src;
+ ctd->ctd_cthd->cthd_byte_count = size;
+
+ /* Handle special control packet */
+ if (size != 0)
+ ctd->ctd_cthd->cthd_flags = CESA_CTHD_OWNED;
+ else
+ ctd->ctd_cthd->cthd_flags = 0;
+
+ return (ctd);
+}
+
+static struct cesa_tdma_desc *
+cesa_tdma_copyin_sa_data(struct cesa_softc *sc, struct cesa_request *cr)
+{
+
+ return (cesa_tdma_copy(sc, sc->sc_sram_base +
+ sizeof(struct cesa_sa_hdesc), cr->cr_csd_paddr,
+ sizeof(struct cesa_sa_data)));
+}
+
+static struct cesa_tdma_desc *
+cesa_tdma_copyout_sa_data(struct cesa_softc *sc, struct cesa_request *cr)
+{
+
+ return (cesa_tdma_copy(sc, cr->cr_csd_paddr, sc->sc_sram_base +
+ sizeof(struct cesa_sa_hdesc), sizeof(struct cesa_sa_data)));
+}
+
+static struct cesa_tdma_desc *
+cesa_tdma_copy_sdesc(struct cesa_softc *sc, struct cesa_sa_desc *csd)
+{
+
+ return (cesa_tdma_copy(sc, sc->sc_sram_base, csd->csd_cshd_paddr,
+ sizeof(struct cesa_sa_hdesc)));
+}
+
+static void
+cesa_append_tdesc(struct cesa_request *cr, struct cesa_tdma_desc *ctd)
+{
+ struct cesa_tdma_desc *ctd_prev;
+
+ if (!STAILQ_EMPTY(&cr->cr_tdesc)) {
+ ctd_prev = STAILQ_LAST(&cr->cr_tdesc, cesa_tdma_desc, ctd_stq);
+ ctd_prev->ctd_cthd->cthd_next = ctd->ctd_cthd_paddr;
+ }
+
+ ctd->ctd_cthd->cthd_next = 0;
+ STAILQ_INSERT_TAIL(&cr->cr_tdesc, ctd, ctd_stq);
+}
+
+static int
+cesa_append_packet(struct cesa_softc *sc, struct cesa_request *cr,
+ struct cesa_packet *cp, struct cesa_sa_desc *csd)
+{
+ struct cesa_tdma_desc *ctd, *tmp;
+
+ /* Copy SA descriptor for this packet */
+ ctd = cesa_tdma_copy_sdesc(sc, csd);
+ if (!ctd)
+ return (ENOMEM);
+
+ cesa_append_tdesc(cr, ctd);
+
+ /* Copy data to be processed */
+ STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyin, ctd_stq, tmp)
+ cesa_append_tdesc(cr, ctd);
+ STAILQ_INIT(&cp->cp_copyin);
+
+ /* Insert control descriptor */
+ ctd = cesa_tdma_copy(sc, 0, 0, 0);
+ if (!ctd)
+ return (ENOMEM);
+
+ cesa_append_tdesc(cr, ctd);
+
+ /* Copy back results */
+ STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyout, ctd_stq, tmp)
+ cesa_append_tdesc(cr, ctd);
+ STAILQ_INIT(&cp->cp_copyout);
+
+ return (0);
+}
+
+static int
+cesa_set_mkey(struct cesa_session *cs, int alg, const uint8_t *mkey, int mklen)
+{
+ uint8_t ipad[CESA_MAX_HMAC_BLOCK_LEN];
+ uint8_t opad[CESA_MAX_HMAC_BLOCK_LEN];
+ SHA1_CTX sha1ctx;
+ MD5_CTX md5ctx;
+ uint32_t *hout;
+ uint32_t *hin;
+ int i;
+
+ memset(ipad, HMAC_IPAD_VAL, CESA_MAX_HMAC_BLOCK_LEN);
+ memset(opad, HMAC_OPAD_VAL, CESA_MAX_HMAC_BLOCK_LEN);
+ for (i = 0; i < mklen; i++) {
+ ipad[i] ^= mkey[i];
+ opad[i] ^= mkey[i];
+ }
+
+ hin = (uint32_t *)cs->cs_hiv_in;
+ hout = (uint32_t *)cs->cs_hiv_out;
+
+ switch (alg) {
+ case CRYPTO_MD5_HMAC:
+ MD5Init(&md5ctx);
+ MD5Update(&md5ctx, ipad, MD5_HMAC_BLOCK_LEN);
+ memcpy(hin, md5ctx.state, sizeof(md5ctx.state));
+ MD5Init(&md5ctx);
+ MD5Update(&md5ctx, opad, MD5_HMAC_BLOCK_LEN);
+ memcpy(hout, md5ctx.state, sizeof(md5ctx.state));
+ break;
+ case CRYPTO_SHA1_HMAC:
+ SHA1Init(&sha1ctx);
+ SHA1Update(&sha1ctx, ipad, SHA1_HMAC_BLOCK_LEN);
+ memcpy(hin, sha1ctx.h.b32, sizeof(sha1ctx.h.b32));
+ SHA1Init(&sha1ctx);
+ SHA1Update(&sha1ctx, opad, SHA1_HMAC_BLOCK_LEN);
+ memcpy(hout, sha1ctx.h.b32, sizeof(sha1ctx.h.b32));
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ for (i = 0; i < CESA_MAX_HASH_LEN / sizeof(uint32_t); i++) {
+ hin[i] = htobe32(hin[i]);
+ hout[i] = htobe32(hout[i]);
+ }
+
+ return (0);
+}
+
+static int
+cesa_prep_aes_key(struct cesa_session *cs)
+{
+ uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)];
+ uint32_t *dkey;
+ int i;
+
+ rijndaelKeySetupEnc(ek, cs->cs_key, cs->cs_klen * 8);
+
+ cs->cs_config &= ~CESA_CSH_AES_KLEN_MASK;
+ dkey = (uint32_t *)cs->cs_aes_dkey;
+
+ switch (cs->cs_klen) {
+ case 16:
+ cs->cs_config |= CESA_CSH_AES_KLEN_128;
+ for (i = 0; i < 4; i++)
+ *dkey++ = htobe32(ek[4 * 10 + i]);
+ break;
+ case 24:
+ cs->cs_config |= CESA_CSH_AES_KLEN_192;
+ for (i = 0; i < 4; i++)
+ *dkey++ = htobe32(ek[4 * 12 + i]);
+ for (i = 0; i < 2; i++)
+ *dkey++ = htobe32(ek[4 * 11 + 2 + i]);
+ break;
+ case 32:
+ cs->cs_config |= CESA_CSH_AES_KLEN_256;
+ for (i = 0; i < 4; i++)
+ *dkey++ = htobe32(ek[4 * 14 + i]);
+ for (i = 0; i < 4; i++)
+ *dkey++ = htobe32(ek[4 * 13 + i]);
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+static int
+cesa_is_hash(int alg)
+{
+
+ switch (alg) {
+ case CRYPTO_MD5:
+ case CRYPTO_MD5_HMAC:
+ case CRYPTO_SHA1:
+ case CRYPTO_SHA1_HMAC:
+ return (1);
+ default:
+ return (0);
+ }
+}
+
+static void
+cesa_start_packet(struct cesa_packet *cp, unsigned int size)
+{
+
+ cp->cp_size = size;
+ cp->cp_offset = 0;
+ STAILQ_INIT(&cp->cp_copyin);
+ STAILQ_INIT(&cp->cp_copyout);
+}
+
+static int
+cesa_fill_packet(struct cesa_softc *sc, struct cesa_packet *cp,
+ bus_dma_segment_t *seg)
+{
+ struct cesa_tdma_desc *ctd;
+ unsigned int bsize;
+
+ /* Calculate size of block copy */
+ bsize = MIN(seg->ds_len, cp->cp_size - cp->cp_offset);
+
+ if (bsize > 0) {
+ ctd = cesa_tdma_copy(sc, sc->sc_sram_base +
+ CESA_DATA(cp->cp_offset), seg->ds_addr, bsize);
+ if (!ctd)
+ return (-ENOMEM);
+
+ STAILQ_INSERT_TAIL(&cp->cp_copyin, ctd, ctd_stq);
+
+ ctd = cesa_tdma_copy(sc, seg->ds_addr, sc->sc_sram_base +
+ CESA_DATA(cp->cp_offset), bsize);
+ if (!ctd)
+ return (-ENOMEM);
+
+ STAILQ_INSERT_TAIL(&cp->cp_copyout, ctd, ctd_stq);
+
+ seg->ds_len -= bsize;
+ seg->ds_addr += bsize;
+ cp->cp_offset += bsize;
+ }
+
+ return (bsize);
+}
+
+static void
+cesa_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ unsigned int mpsize, fragmented;
+ unsigned int mlen, mskip, tmlen;
+ struct cesa_chain_info *cci;
+ unsigned int elen, eskip;
+ unsigned int skip, len;
+ struct cesa_sa_desc *csd;
+ struct cesa_request *cr;
+ struct cesa_softc *sc;
+ struct cesa_packet cp;
+ bus_dma_segment_t seg;
+ uint32_t config;
+ int size;
+
+ cci = arg;
+ sc = cci->cci_sc;
+ cr = cci->cci_cr;
+
+ if (error) {
+ cci->cci_error = error;
+ return;
+ }
+
+ elen = cci->cci_enc ? cci->cci_enc->crd_len : 0;
+ eskip = cci->cci_enc ? cci->cci_enc->crd_skip : 0;
+ mlen = cci->cci_mac ? cci->cci_mac->crd_len : 0;
+ mskip = cci->cci_mac ? cci->cci_mac->crd_skip : 0;
+
+ if (elen && mlen &&
+ ((eskip > mskip && ((eskip - mskip) & (cr->cr_cs->cs_ivlen - 1))) ||
+ (mskip > eskip && ((mskip - eskip) & (cr->cr_cs->cs_mblen - 1))) ||
+ (eskip > (mskip + mlen)) || (mskip > (eskip + elen)))) {
+ /*
+ * Data alignment in the request does not meet CESA requiremnts
+ * for combined encryption/decryption and hashing. We have to
+ * split the request to separate operations and process them
+ * one by one.
+ */
+ config = cci->cci_config;
+ if ((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC) {
+ config &= ~CESA_CSHD_OP_MASK;
+
+ cci->cci_config = config | CESA_CSHD_MAC;
+ cci->cci_enc = NULL;
+ cci->cci_mac = cr->cr_mac;
+ cesa_create_chain_cb(cci, segs, nseg, cci->cci_error);
+
+ cci->cci_config = config | CESA_CSHD_ENC;
+ cci->cci_enc = cr->cr_enc;
+ cci->cci_mac = NULL;
+ cesa_create_chain_cb(cci, segs, nseg, cci->cci_error);
+ } else {
+ config &= ~CESA_CSHD_OP_MASK;
+
+ cci->cci_config = config | CESA_CSHD_ENC;
+ cci->cci_enc = cr->cr_enc;
+ cci->cci_mac = NULL;
+ cesa_create_chain_cb(cci, segs, nseg, cci->cci_error);
+
+ cci->cci_config = config | CESA_CSHD_MAC;
+ cci->cci_enc = NULL;
+ cci->cci_mac = cr->cr_mac;
+ cesa_create_chain_cb(cci, segs, nseg, cci->cci_error);
+ }
+
+ return;
+ }
+
+ tmlen = mlen;
+ fragmented = 0;
+ mpsize = CESA_MAX_PACKET_SIZE;
+ mpsize &= ~((cr->cr_cs->cs_ivlen - 1) | (cr->cr_cs->cs_mblen - 1));
+
+ if (elen && mlen) {
+ skip = MIN(eskip, mskip);
+ len = MAX(elen + eskip, mlen + mskip) - skip;
+ } else if (elen) {
+ skip = eskip;
+ len = elen;
+ } else {
+ skip = mskip;
+ len = mlen;
+ }
+
+ /* Start first packet in chain */
+ cesa_start_packet(&cp, MIN(mpsize, len));
+
+ while (nseg-- && len > 0) {
+ seg = *(segs++);
+
+ /*
+ * Skip data in buffer on which neither ENC nor MAC operation
+ * is requested.
+ */
+ if (skip > 0) {
+ size = MIN(skip, seg.ds_len);
+ skip -= size;
+
+ seg.ds_addr += size;
+ seg.ds_len -= size;
+
+ if (eskip > 0)
+ eskip -= size;
+
+ if (mskip > 0)
+ mskip -= size;
+
+ if (seg.ds_len == 0)
+ continue;
+ }
+
+ while (1) {
+ /*
+ * Fill in current packet with data. Break if there is
+ * no more data in current DMA segment or an error
+ * occured.
+ */
+ size = cesa_fill_packet(sc, &cp, &seg);
+ if (size <= 0) {
+ error = -size;
+ break;
+ }
+
+ len -= size;
+
+ /* If packet is full, append it to the chain */
+ if (cp.cp_size == cp.cp_offset) {
+ csd = cesa_alloc_sdesc(sc, cr);
+ if (!csd) {
+ error = ENOMEM;
+ break;
+ }
+
+ /* Create SA descriptor for this packet */
+ csd->csd_cshd->cshd_config = cci->cci_config;
+ csd->csd_cshd->cshd_mac_total_dlen = tmlen;
+
+ /*
+ * Enable fragmentation if request will not fit
+ * into one packet.
+ */
+ if (len > 0) {
+ if (!fragmented) {
+ fragmented = 1;
+ csd->csd_cshd->cshd_config |=
+ CESA_CSHD_FRAG_FIRST;
+ } else
+ csd->csd_cshd->cshd_config |=
+ CESA_CSHD_FRAG_MIDDLE;
+ } else if (fragmented)
+ csd->csd_cshd->cshd_config |=
+ CESA_CSHD_FRAG_LAST;
+
+ if (eskip < cp.cp_size && elen > 0) {
+ csd->csd_cshd->cshd_enc_src =
+ CESA_DATA(eskip);
+ csd->csd_cshd->cshd_enc_dst =
+ CESA_DATA(eskip);
+ csd->csd_cshd->cshd_enc_dlen =
+ MIN(elen, cp.cp_size - eskip);
+ }
+
+ if (mskip < cp.cp_size && mlen > 0) {
+ csd->csd_cshd->cshd_mac_src =
+ CESA_DATA(mskip);
+ csd->csd_cshd->cshd_mac_dlen =
+ MIN(mlen, cp.cp_size - mskip);
+ }
+
+ elen -= csd->csd_cshd->cshd_enc_dlen;
+ eskip -= MIN(eskip, cp.cp_size);
+ mlen -= csd->csd_cshd->cshd_mac_dlen;
+ mskip -= MIN(mskip, cp.cp_size);
+
+ cesa_dump_cshd(sc, csd->csd_cshd);
+
+ /* Append packet to the request */
+ error = cesa_append_packet(sc, cr, &cp, csd);
+ if (error)
+ break;
+
+ /* Start a new packet, as current is full */
+ cesa_start_packet(&cp, MIN(mpsize, len));
+ }
+ }
+
+ if (error)
+ break;
+ }
+
+ if (error) {
+ /*
+ * Move all allocated resources to the request. They will be
+ * freed later.
+ */
+ STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyin);
+ STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyout);
+ cci->cci_error = error;
+ }
+}
+
+static void
+cesa_create_chain_cb2(void *arg, bus_dma_segment_t *segs, int nseg,
+ bus_size_t size, int error)
+{
+
+ cesa_create_chain_cb(arg, segs, nseg, error);
+}
+
+static int
+cesa_create_chain(struct cesa_softc *sc, struct cesa_request *cr)
+{
+ struct cesa_chain_info cci;
+ struct cesa_tdma_desc *ctd;
+ uint32_t config;
+ int error;
+
+ error = 0;
+ CESA_LOCK_ASSERT(sc, sessions);
+
+ /* Create request metadata */
+ if (cr->cr_enc) {
+ if (cr->cr_enc->crd_alg == CRYPTO_AES_CBC &&
+ (cr->cr_enc->crd_flags & CRD_F_ENCRYPT) == 0)
+ memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_aes_dkey,
+ cr->cr_cs->cs_klen);
+ else
+ memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_key,
+ cr->cr_cs->cs_klen);
+ }
+
+ if (cr->cr_mac) {
+ memcpy(cr->cr_csd->csd_hiv_in, cr->cr_cs->cs_hiv_in,
+ CESA_MAX_HASH_LEN);
+ memcpy(cr->cr_csd->csd_hiv_out, cr->cr_cs->cs_hiv_out,
+ CESA_MAX_HASH_LEN);
+ }
+
+ ctd = cesa_tdma_copyin_sa_data(sc, cr);
+ if (!ctd)
+ return (ENOMEM);
+
+ cesa_append_tdesc(cr, ctd);
+
+ /* Prepare SA configuration */
+ config = cr->cr_cs->cs_config;
+
+ if (cr->cr_enc && (cr->cr_enc->crd_flags & CRD_F_ENCRYPT) == 0)
+ config |= CESA_CSHD_DECRYPT;
+ if (cr->cr_enc && !cr->cr_mac)
+ config |= CESA_CSHD_ENC;
+ if (!cr->cr_enc && cr->cr_mac)
+ config |= CESA_CSHD_MAC;
+ if (cr->cr_enc && cr->cr_mac)
+ config |= (config & CESA_CSHD_DECRYPT) ? CESA_CSHD_MAC_AND_ENC :
+ CESA_CSHD_ENC_AND_MAC;
+
+ /* Create data packets */
+ cci.cci_sc = sc;
+ cci.cci_cr = cr;
+ cci.cci_enc = cr->cr_enc;
+ cci.cci_mac = cr->cr_mac;
+ cci.cci_config = config;
+ cci.cci_error = 0;
+
+ if (cr->cr_crp->crp_flags & CRYPTO_F_IOV)
+ error = bus_dmamap_load_uio(sc->sc_data_dtag,
+ cr->cr_dmap, (struct uio *)cr->cr_crp->crp_buf,
+ cesa_create_chain_cb2, &cci, BUS_DMA_NOWAIT);
+ else if (cr->cr_crp->crp_flags & CRYPTO_F_IMBUF)
+ error = bus_dmamap_load_mbuf(sc->sc_data_dtag,
+ cr->cr_dmap, (struct mbuf *)cr->cr_crp->crp_buf,
+ cesa_create_chain_cb2, &cci, BUS_DMA_NOWAIT);
+ else
+ error = bus_dmamap_load(sc->sc_data_dtag,
+ cr->cr_dmap, cr->cr_crp->crp_buf,
+ cr->cr_crp->crp_ilen, cesa_create_chain_cb, &cci,
+ BUS_DMA_NOWAIT);
+
+ if (!error)
+ cr->cr_dmap_loaded = 1;
+
+ if (cci.cci_error)
+ error = cci.cci_error;
+
+ if (error)
+ return (error);
+
+ /* Read back request metadata */
+ ctd = cesa_tdma_copyout_sa_data(sc, cr);
+ if (!ctd)
+ return (ENOMEM);
+
+ cesa_append_tdesc(cr, ctd);
+
+ return (0);
+}
+
+static void
+cesa_execute(struct cesa_softc *sc)
+{
+ struct cesa_tdma_desc *prev_ctd, *ctd;
+ struct cesa_request *prev_cr, *cr;
+
+ CESA_LOCK(sc, requests);
+
+ /*
+ * If ready list is empty, there is nothing to execute. If queued list
+ * is not empty, the hardware is busy and we cannot start another
+ * execution.
+ */
+ if (STAILQ_EMPTY(&sc->sc_ready_requests) ||
+ !STAILQ_EMPTY(&sc->sc_queued_requests)) {
+ CESA_UNLOCK(sc, requests);
+ return;
+ }
+
+ /* Move all ready requests to queued list */
+ STAILQ_CONCAT(&sc->sc_queued_requests, &sc->sc_ready_requests);
+ STAILQ_INIT(&sc->sc_ready_requests);
+
+ /* Create one execution chain from all requests on the list */
+ if (STAILQ_FIRST(&sc->sc_queued_requests) !=
+ STAILQ_LAST(&sc->sc_queued_requests, cesa_request, cr_stq)) {
+ prev_cr = NULL;
+ cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_POSTREAD |
+ BUS_DMASYNC_POSTWRITE);
+
+ STAILQ_FOREACH(cr, &sc->sc_queued_requests, cr_stq) {
+ if (prev_cr) {
+ ctd = STAILQ_FIRST(&cr->cr_tdesc);
+ prev_ctd = STAILQ_LAST(&prev_cr->cr_tdesc,
+ cesa_tdma_desc, ctd_stq);
+
+ prev_ctd->ctd_cthd->cthd_next =
+ ctd->ctd_cthd_paddr;
+ }
+
+ prev_cr = cr;
+ }
+
+ cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_PREREAD |
+ BUS_DMASYNC_PREWRITE);
+ }
+
+ /* Start chain execution in hardware */
+ cr = STAILQ_FIRST(&sc->sc_queued_requests);
+ ctd = STAILQ_FIRST(&cr->cr_tdesc);
+
+ CESA_WRITE(sc, CESA_TDMA_ND, ctd->ctd_cthd_paddr);
+ CESA_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE);
+
+ CESA_UNLOCK(sc, requests);
+}
+
+static int
+cesa_setup_sram(struct cesa_softc *sc)
+{
+ phandle_t sram_node;
+ ihandle_t sram_ihandle;
+ pcell_t sram_handle, sram_reg;
+
+ if (OF_getprop(ofw_bus_get_node(sc->sc_dev), "sram-handle",
+ (void *)&sram_handle, sizeof(sram_handle)) <= 0)
+ return (ENXIO);
+
+ sram_ihandle = (ihandle_t)sram_handle;
+ sram_ihandle = fdt32_to_cpu(sram_ihandle);
+ sram_node = OF_instance_to_package(sram_ihandle);
+
+ if (OF_getprop(sram_node, "reg", (void *)&sram_reg,
+ sizeof(sram_reg)) <= 0)
+ return (ENXIO);
+
+ sc->sc_sram_base = fdt32_to_cpu(sram_reg);
+
+ return (0);
+}
+
+static int
+cesa_probe(device_t dev)
+{
+ if (!ofw_bus_is_compatible(dev, "mrvl,cesa"))
+ return (ENXIO);
+
+ device_set_desc(dev, "Marvell Cryptographic Engine and Security "
+ "Accelerator");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+cesa_attach(device_t dev)
+{
+ struct cesa_softc *sc;
+ uint32_t d, r;
+ int error;
+ int i;
+
+ sc = device_get_softc(dev);
+ sc->sc_blocked = 0;
+ sc->sc_error = 0;
+ sc->sc_dev = dev;
+
+ error = cesa_setup_sram(sc);
+ if (error) {
+ device_printf(dev, "could not setup SRAM\n");
+ return (error);
+ }
+
+ soc_id(&d, &r);
+
+ switch (d) {
+ case MV_DEV_88F6281:
+ sc->sc_tperr = 0;
+ break;
+ case MV_DEV_MV78100:
+ case MV_DEV_MV78100_Z0:
+ sc->sc_tperr = CESA_ICR_TPERR;
+ break;
+ default:
+ return (ENXIO);
+ }
+
+ /* Initialize mutexes */
+ mtx_init(&sc->sc_sc_lock, device_get_nameunit(dev),
+ "CESA Shared Data", MTX_DEF);
+ mtx_init(&sc->sc_tdesc_lock, device_get_nameunit(dev),
+ "CESA TDMA Descriptors Pool", MTX_DEF);
+ mtx_init(&sc->sc_sdesc_lock, device_get_nameunit(dev),
+ "CESA SA Descriptors Pool", MTX_DEF);
+ mtx_init(&sc->sc_requests_lock, device_get_nameunit(dev),
+ "CESA Requests Pool", MTX_DEF);
+ mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev),
+ "CESA Sessions Pool", MTX_DEF);
+
+ /* Allocate I/O and IRQ resources */
+ error = bus_alloc_resources(dev, cesa_res_spec, sc->sc_res);
+ if (error) {
+ device_printf(dev, "could not allocate resources\n");
+ goto err0;
+ }
+
+ sc->sc_bsh = rman_get_bushandle(*(sc->sc_res));
+ sc->sc_bst = rman_get_bustag(*(sc->sc_res));
+
+ /* Setup interrupt handler */
+ error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE,
+ NULL, cesa_intr, sc, &(sc->sc_icookie));
+ if (error) {
+ device_printf(dev, "could not setup engine completion irq\n");
+ goto err1;
+ }
+
+ /* Create DMA tag for processed data */
+ error = bus_dma_tag_create(NULL, /* parent */
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filtfunc, filtfuncarg */
+ CESA_MAX_REQUEST_SIZE, /* maxsize */
+ CESA_MAX_FRAGMENTS, /* nsegments */
+ CESA_MAX_REQUEST_SIZE, 0, /* maxsegsz, flags */
+ NULL, NULL, /* lockfunc, lockfuncarg */
+ &sc->sc_data_dtag); /* dmat */
+ if (error)
+ goto err2;
+
+ /* Initialize data structures: TDMA Descriptors Pool */
+ error = cesa_alloc_dma_mem(sc, &sc->sc_tdesc_cdm,
+ CESA_TDMA_DESCRIPTORS * sizeof(struct cesa_tdma_hdesc));
+ if (error)
+ goto err3;
+
+ STAILQ_INIT(&sc->sc_free_tdesc);
+ for (i = 0; i < CESA_TDMA_DESCRIPTORS; i++) {
+ sc->sc_tdesc[i].ctd_cthd =
+ (struct cesa_tdma_hdesc *)(sc->sc_tdesc_cdm.cdm_vaddr) + i;
+ sc->sc_tdesc[i].ctd_cthd_paddr = sc->sc_tdesc_cdm.cdm_paddr +
+ (i * sizeof(struct cesa_tdma_hdesc));
+ STAILQ_INSERT_TAIL(&sc->sc_free_tdesc, &sc->sc_tdesc[i],
+ ctd_stq);
+ }
+
+ /* Initialize data structures: SA Descriptors Pool */
+ error = cesa_alloc_dma_mem(sc, &sc->sc_sdesc_cdm,
+ CESA_SA_DESCRIPTORS * sizeof(struct cesa_sa_hdesc));
+ if (error)
+ goto err4;
+
+ STAILQ_INIT(&sc->sc_free_sdesc);
+ for (i = 0; i < CESA_SA_DESCRIPTORS; i++) {
+ sc->sc_sdesc[i].csd_cshd =
+ (struct cesa_sa_hdesc *)(sc->sc_sdesc_cdm.cdm_vaddr) + i;
+ sc->sc_sdesc[i].csd_cshd_paddr = sc->sc_sdesc_cdm.cdm_paddr +
+ (i * sizeof(struct cesa_sa_hdesc));
+ STAILQ_INSERT_TAIL(&sc->sc_free_sdesc, &sc->sc_sdesc[i],
+ csd_stq);
+ }
+
+ /* Initialize data structures: Requests Pool */
+ error = cesa_alloc_dma_mem(sc, &sc->sc_requests_cdm,
+ CESA_REQUESTS * sizeof(struct cesa_sa_data));
+ if (error)
+ goto err5;
+
+ STAILQ_INIT(&sc->sc_free_requests);
+ STAILQ_INIT(&sc->sc_ready_requests);
+ STAILQ_INIT(&sc->sc_queued_requests);
+ for (i = 0; i < CESA_REQUESTS; i++) {
+ sc->sc_requests[i].cr_csd =
+ (struct cesa_sa_data *)(sc->sc_requests_cdm.cdm_vaddr) + i;
+ sc->sc_requests[i].cr_csd_paddr =
+ sc->sc_requests_cdm.cdm_paddr +
+ (i * sizeof(struct cesa_sa_data));
+
+ /* Preallocate DMA maps */
+ error = bus_dmamap_create(sc->sc_data_dtag, 0,
+ &sc->sc_requests[i].cr_dmap);
+ if (error && i > 0) {
+ i--;
+ do {
+ bus_dmamap_destroy(sc->sc_data_dtag,
+ sc->sc_requests[i].cr_dmap);
+ } while (i--);
+
+ goto err6;
+ }
+
+ STAILQ_INSERT_TAIL(&sc->sc_free_requests, &sc->sc_requests[i],
+ cr_stq);
+ }
+
+ /* Initialize data structures: Sessions Pool */
+ STAILQ_INIT(&sc->sc_free_sessions);
+ for (i = 0; i < CESA_SESSIONS; i++) {
+ sc->sc_sessions[i].cs_sid = i;
+ STAILQ_INSERT_TAIL(&sc->sc_free_sessions, &sc->sc_sessions[i],
+ cs_stq);
+ }
+
+ /*
+ * Initialize TDMA:
+ * - Burst limit: 128 bytes,
+ * - Outstanding reads enabled,
+ * - No byte-swap.
+ */
+ CESA_WRITE(sc, CESA_TDMA_CR, CESA_TDMA_CR_DBL128 | CESA_TDMA_CR_SBL128 |
+ CESA_TDMA_CR_ORDEN | CESA_TDMA_CR_NBS | CESA_TDMA_CR_ENABLE);
+
+ /*
+ * Initialize SA:
+ * - SA descriptor is present at beginning of CESA SRAM,
+ * - Multi-packet chain mode,
+ * - Cooperation with TDMA enabled.
+ */
+ CESA_WRITE(sc, CESA_SA_DPR, 0);
+ CESA_WRITE(sc, CESA_SA_CR, CESA_SA_CR_ACTIVATE_TDMA |
+ CESA_SA_CR_WAIT_FOR_TDMA | CESA_SA_CR_MULTI_MODE);
+
+ /* Unmask interrupts */
+ CESA_WRITE(sc, CESA_ICR, 0);
+ CESA_WRITE(sc, CESA_ICM, CESA_ICM_ACCTDMA | sc->sc_tperr);
+ CESA_WRITE(sc, CESA_TDMA_ECR, 0);
+ CESA_WRITE(sc, CESA_TDMA_EMR, CESA_TDMA_EMR_MISS |
+ CESA_TDMA_EMR_DOUBLE_HIT | CESA_TDMA_EMR_BOTH_HIT |
+ CESA_TDMA_EMR_DATA_ERROR);
+
+ /* Register in OCF */
+ sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
+ if (sc->sc_cid) {
+ device_printf(dev, "could not get crypto driver id\n");
+ goto err7;
+ }
+
+ crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
+
+ return (0);
+err7:
+ for (i = 0; i < CESA_REQUESTS; i++)
+ bus_dmamap_destroy(sc->sc_data_dtag,
+ sc->sc_requests[i].cr_dmap);
+err6:
+ cesa_free_dma_mem(&sc->sc_requests_cdm);
+err5:
+ cesa_free_dma_mem(&sc->sc_sdesc_cdm);
+err4:
+ cesa_free_dma_mem(&sc->sc_tdesc_cdm);
+err3:
+ bus_dma_tag_destroy(sc->sc_data_dtag);
+err2:
+ bus_teardown_intr(dev, sc->sc_res[1], sc->sc_icookie);
+err1:
+ bus_release_resources(dev, cesa_res_spec, sc->sc_res);
+err0:
+ mtx_destroy(&sc->sc_sessions_lock);
+ mtx_destroy(&sc->sc_requests_lock);
+ mtx_destroy(&sc->sc_sdesc_lock);
+ mtx_destroy(&sc->sc_tdesc_lock);
+ mtx_destroy(&sc->sc_sc_lock);
+ return (ENXIO);
+}
+
+static int
+cesa_detach(device_t dev)
+{
+ struct cesa_softc *sc;
+ int i;
+
+ sc = device_get_softc(dev);
+
+ /* TODO: Wait for queued requests completion before shutdown. */
+
+ /* Mask interrupts */
+ CESA_WRITE(sc, CESA_ICM, 0);
+ CESA_WRITE(sc, CESA_TDMA_EMR, 0);
+
+ /* Unregister from OCF */
+ crypto_unregister_all(sc->sc_cid);
+
+ /* Free DMA Maps */
+ for (i = 0; i < CESA_REQUESTS; i++)
+ bus_dmamap_destroy(sc->sc_data_dtag,
+ sc->sc_requests[i].cr_dmap);
+
+ /* Free DMA Memory */
+ cesa_free_dma_mem(&sc->sc_requests_cdm);
+ cesa_free_dma_mem(&sc->sc_sdesc_cdm);
+ cesa_free_dma_mem(&sc->sc_tdesc_cdm);
+
+ /* Free DMA Tag */
+ bus_dma_tag_destroy(sc->sc_data_dtag);
+
+ /* Stop interrupt */
+ bus_teardown_intr(dev, sc->sc_res[1], sc->sc_icookie);
+
+ /* Relase I/O and IRQ resources */
+ bus_release_resources(dev, cesa_res_spec, sc->sc_res);
+
+ /* Destory mutexes */
+ mtx_destroy(&sc->sc_sessions_lock);
+ mtx_destroy(&sc->sc_requests_lock);
+ mtx_destroy(&sc->sc_sdesc_lock);
+ mtx_destroy(&sc->sc_tdesc_lock);
+ mtx_destroy(&sc->sc_sc_lock);
+
+ return (0);
+}
+
+static void
+cesa_intr(void *arg)
+{
+ STAILQ_HEAD(, cesa_request) requests;
+ struct cesa_request *cr, *tmp;
+ struct cesa_softc *sc;
+ uint32_t ecr, icr;
+ int blocked;
+
+ sc = arg;
+
+ /* Ack interrupt */
+ ecr = CESA_READ(sc, CESA_TDMA_ECR);
+ CESA_WRITE(sc, CESA_TDMA_ECR, 0);
+ icr = CESA_READ(sc, CESA_ICR);
+ CESA_WRITE(sc, CESA_ICR, 0);
+
+ /* Check for TDMA errors */
+ if (ecr & CESA_TDMA_ECR_MISS) {
+ device_printf(sc->sc_dev, "TDMA Miss error detected!\n");
+ sc->sc_error = EIO;
+ }
+
+ if (ecr & CESA_TDMA_ECR_DOUBLE_HIT) {
+ device_printf(sc->sc_dev, "TDMA Double Hit error detected!\n");
+ sc->sc_error = EIO;
+ }
+
+ if (ecr & CESA_TDMA_ECR_BOTH_HIT) {
+ device_printf(sc->sc_dev, "TDMA Both Hit error detected!\n");
+ sc->sc_error = EIO;
+ }
+
+ if (ecr & CESA_TDMA_ECR_DATA_ERROR) {
+ device_printf(sc->sc_dev, "TDMA Data error detected!\n");
+ sc->sc_error = EIO;
+ }
+
+ /* Check for CESA errors */
+ if (icr & sc->sc_tperr) {
+ device_printf(sc->sc_dev, "CESA SRAM Parity error detected!\n");
+ sc->sc_error = EIO;
+ }
+
+ /* If there is nothing more to do, return */
+ if ((icr & CESA_ICR_ACCTDMA) == 0)
+ return;
+
+ /* Get all finished requests */
+ CESA_LOCK(sc, requests);
+ STAILQ_INIT(&requests);
+ STAILQ_CONCAT(&requests, &sc->sc_queued_requests);
+ STAILQ_INIT(&sc->sc_queued_requests);
+ CESA_UNLOCK(sc, requests);
+
+ /* Execute all ready requests */
+ cesa_execute(sc);
+
+ /* Process completed requests */
+ cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_POSTREAD |
+ BUS_DMASYNC_POSTWRITE);
+
+ STAILQ_FOREACH_SAFE(cr, &requests, cr_stq, tmp) {
+ bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ cr->cr_crp->crp_etype = sc->sc_error;
+ if (cr->cr_mac)
+ crypto_copyback(cr->cr_crp->crp_flags,
+ cr->cr_crp->crp_buf, cr->cr_mac->crd_inject,
+ cr->cr_cs->cs_hlen, cr->cr_csd->csd_hash);
+
+ crypto_done(cr->cr_crp);
+ cesa_free_request(sc, cr);
+ }
+
+ cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_PREREAD |
+ BUS_DMASYNC_PREWRITE);
+
+ sc->sc_error = 0;
+
+ /* Unblock driver if it ran out of resources */
+ CESA_LOCK(sc, sc);
+ blocked = sc->sc_blocked;
+ sc->sc_blocked = 0;
+ CESA_UNLOCK(sc, sc);
+
+ if (blocked)
+ crypto_unblock(sc->sc_cid, blocked);
+}
+
+static int
+cesa_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
+{
+ struct cesa_session *cs;
+ struct cesa_softc *sc;
+ struct cryptoini *enc;
+ struct cryptoini *mac;
+ int error;
+
+ sc = device_get_softc(dev);
+ enc = NULL;
+ mac = NULL;
+ error = 0;
+
+ /* Check and parse input */
+ if (cesa_is_hash(cri->cri_alg))
+ mac = cri;
+ else
+ enc = cri;
+
+ cri = cri->cri_next;
+
+ if (cri) {
+ if (!enc && !cesa_is_hash(cri->cri_alg))
+ enc = cri;
+
+ if (!mac && cesa_is_hash(cri->cri_alg))
+ mac = cri;
+
+ if (cri->cri_next || !(enc && mac))
+ return (EINVAL);
+ }
+
+ if ((enc && (enc->cri_klen / 8) > CESA_MAX_KEY_LEN) ||
+ (mac && (mac->cri_klen / 8) > CESA_MAX_MKEY_LEN))
+ return (E2BIG);
+
+ /* Allocate session */
+ cs = cesa_alloc_session(sc);
+ if (!cs)
+ return (ENOMEM);
+
+ /* Prepare CESA configuration */
+ cs->cs_config = 0;
+ cs->cs_ivlen = 1;
+ cs->cs_mblen = 1;
+
+ if (enc) {
+ switch (enc->cri_alg) {
+ case CRYPTO_AES_CBC:
+ cs->cs_config |= CESA_CSHD_AES | CESA_CSHD_CBC;
+ cs->cs_ivlen = AES_BLOCK_LEN;
+ break;
+ case CRYPTO_DES_CBC:
+ cs->cs_config |= CESA_CSHD_DES | CESA_CSHD_CBC;
+ cs->cs_ivlen = DES_BLOCK_LEN;
+ break;
+ case CRYPTO_3DES_CBC:
+ cs->cs_config |= CESA_CSHD_3DES | CESA_CSHD_3DES_EDE |
+ CESA_CSHD_CBC;
+ cs->cs_ivlen = DES3_BLOCK_LEN;
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+ }
+
+ if (!error && mac) {
+ switch (mac->cri_alg) {
+ case CRYPTO_MD5:
+ cs->cs_config |= CESA_CSHD_MD5;
+ cs->cs_mblen = 1;
+ cs->cs_hlen = MD5_HASH_LEN;
+ break;
+ case CRYPTO_MD5_HMAC:
+ cs->cs_config |= CESA_CSHD_MD5_HMAC;
+ cs->cs_mblen = MD5_HMAC_BLOCK_LEN;
+ cs->cs_hlen = CESA_HMAC_HASH_LENGTH;
+ break;
+ case CRYPTO_SHA1:
+ cs->cs_config |= CESA_CSHD_SHA1;
+ cs->cs_mblen = 1;
+ cs->cs_hlen = SHA1_HASH_LEN;
+ break;
+ case CRYPTO_SHA1_HMAC:
+ cs->cs_config |= CESA_CSHD_SHA1_HMAC;
+ cs->cs_mblen = SHA1_HMAC_BLOCK_LEN;
+ cs->cs_hlen = CESA_HMAC_HASH_LENGTH;
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+ }
+
+ /* Save cipher key */
+ if (!error && enc && enc->cri_key) {
+ cs->cs_klen = enc->cri_klen / 8;
+ memcpy(cs->cs_key, enc->cri_key, cs->cs_klen);
+ if (enc->cri_alg == CRYPTO_AES_CBC)
+ error = cesa_prep_aes_key(cs);
+ }
+
+ /* Save digest key */
+ if (!error && mac && mac->cri_key)
+ error = cesa_set_mkey(cs, mac->cri_alg, mac->cri_key,
+ mac->cri_klen / 8);
+
+ if (error) {
+ cesa_free_session(sc, cs);
+ return (EINVAL);
+ }
+
+ *sidp = cs->cs_sid;
+
+ return (0);
+}
+
+static int
+cesa_freesession(device_t dev, uint64_t tid)
+{
+ struct cesa_session *cs;
+ struct cesa_softc *sc;
+
+ sc = device_get_softc(dev);
+ cs = cesa_get_session(sc, CRYPTO_SESID2LID(tid));
+ if (!cs)
+ return (EINVAL);
+
+ /* Free session */
+ cesa_free_session(sc, cs);
+
+ return (0);
+}
+
+static int
+cesa_process(device_t dev, struct cryptop *crp, int hint)
+{
+ struct cesa_request *cr;
+ struct cesa_session *cs;
+ struct cryptodesc *crd;
+ struct cryptodesc *enc;
+ struct cryptodesc *mac;
+ struct cesa_softc *sc;
+ int error;
+
+ sc = device_get_softc(dev);
+ crd = crp->crp_desc;
+ enc = NULL;
+ mac = NULL;
+ error = 0;
+
+ /* Check session ID */
+ cs = cesa_get_session(sc, CRYPTO_SESID2LID(crp->crp_sid));
+ if (!cs) {
+ crp->crp_etype = EINVAL;
+ crypto_done(crp);
+ return (0);
+ }
+
+ /* Check and parse input */
+ if (crp->crp_ilen > CESA_MAX_REQUEST_SIZE) {
+ crp->crp_etype = E2BIG;
+ crypto_done(crp);
+ return (0);
+ }
+
+ if (cesa_is_hash(crd->crd_alg))
+ mac = crd;
+ else
+ enc = crd;
+
+ crd = crd->crd_next;
+
+ if (crd) {
+ if (!enc && !cesa_is_hash(crd->crd_alg))
+ enc = crd;
+
+ if (!mac && cesa_is_hash(crd->crd_alg))
+ mac = crd;
+
+ if (crd->crd_next || !(enc && mac)) {
+ crp->crp_etype = EINVAL;
+ crypto_done(crp);
+ return (0);
+ }
+ }
+
+ /*
+ * Get request descriptor. Block driver if there is no free
+ * descriptors in pool.
+ */
+ cr = cesa_alloc_request(sc);
+ if (!cr) {
+ CESA_LOCK(sc, sc);
+ sc->sc_blocked = CRYPTO_SYMQ;
+ CESA_UNLOCK(sc, sc);
+ return (ERESTART);
+ }
+
+ /* Prepare request */
+ cr->cr_crp = crp;
+ cr->cr_enc = enc;
+ cr->cr_mac = mac;
+ cr->cr_cs = cs;
+
+ CESA_LOCK(sc, sessions);
+ cesa_sync_desc(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ if (enc && enc->crd_flags & CRD_F_ENCRYPT) {
+ if (enc->crd_flags & CRD_F_IV_EXPLICIT)
+ memcpy(cr->cr_csd->csd_iv, enc->crd_iv, cs->cs_ivlen);
+ else
+ arc4rand(cr->cr_csd->csd_iv, cs->cs_ivlen, 0);
+
+ if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0)
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ enc->crd_inject, cs->cs_ivlen, cr->cr_csd->csd_iv);
+ } else if (enc) {
+ if (enc->crd_flags & CRD_F_IV_EXPLICIT)
+ memcpy(cr->cr_csd->csd_iv, enc->crd_iv, cs->cs_ivlen);
+ else
+ crypto_copydata(crp->crp_flags, crp->crp_buf,
+ enc->crd_inject, cs->cs_ivlen, cr->cr_csd->csd_iv);
+ }
+
+ if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) {
+ if ((enc->crd_klen / 8) <= CESA_MAX_KEY_LEN) {
+ cs->cs_klen = enc->crd_klen / 8;
+ memcpy(cs->cs_key, enc->crd_key, cs->cs_klen);
+ if (enc->crd_alg == CRYPTO_AES_CBC)
+ error = cesa_prep_aes_key(cs);
+ } else
+ error = E2BIG;
+ }
+
+ if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) {
+ if ((mac->crd_klen / 8) <= CESA_MAX_MKEY_LEN)
+ error = cesa_set_mkey(cs, mac->crd_alg, mac->crd_key,
+ mac->crd_klen / 8);
+ else
+ error = E2BIG;
+ }
+
+ /* Convert request to chain of TDMA and SA descriptors */
+ if (!error)
+ error = cesa_create_chain(sc, cr);
+
+ cesa_sync_desc(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ CESA_UNLOCK(sc, sessions);
+
+ if (error) {
+ cesa_free_request(sc, cr);
+ crp->crp_etype = error;
+ crypto_done(crp);
+ return (0);
+ }
+
+ bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, BUS_DMASYNC_PREREAD |
+ BUS_DMASYNC_PREWRITE);
+
+ /* Enqueue request to execution */
+ cesa_enqueue_request(sc, cr);
+
+ /* Start execution, if we have no more requests in queue */
+ if ((hint & CRYPTO_HINT_MORE) == 0)
+ cesa_execute(sc);
+
+ return (0);
+}
diff --git a/sys/dev/cesa/cesa.h b/sys/dev/cesa/cesa.h
new file mode 100644
index 0000000..2c953f0
--- /dev/null
+++ b/sys/dev/cesa/cesa.h
@@ -0,0 +1,350 @@
+/*-
+ * Copyright (C) 2009-2011 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_CESA_H_
+#define _DEV_CESA_H_
+
+/* Maximum number of allocated sessions */
+#define CESA_SESSIONS 64
+
+/* Maximum number of queued requests */
+#define CESA_REQUESTS 256
+
+/*
+ * CESA is able to process data only in CESA SRAM, which is quite small (2 kB).
+ * We have to fit a packet there, which contains SA descriptor, keys, IV
+ * and data to be processed. Every request must be converted into chain of
+ * packets and each packet can hold about 1.75 kB of data.
+ *
+ * To process each packet we need at least 1 SA descriptor and at least 4 TDMA
+ * descriptors. However there are cases when we use 2 SA and 8 TDMA descriptors
+ * per packet. Number of used TDMA descriptors can increase beyond given values
+ * if data in the request is fragmented in physical memory.
+ *
+ * The driver uses preallocated SA and TDMA descriptors pools to get best
+ * performace. Size of these pools should match expected request size. Example:
+ *
+ * Expected average request size: 1.5 kB (Ethernet MTU)
+ * Packets per average request: (1.5 kB / 1.75 kB) = 1
+ * SA decriptors per average request (worst case): 1 * 2 = 2
+ * TDMA desctiptors per average request (worst case): 1 * 8 = 8
+ *
+ * More TDMA descriptors should be allocated, if data fragmentation is expected
+ * (for example while processing mbufs larger than MCLBYTES). The driver may use
+ * 2 additional TDMA descriptors per each discontinuity in the physical data
+ * layout.
+ */
+
+/* Values below are optimized for requests containing about 1.5 kB of data */
+#define CESA_SA_DESC_PER_REQ 2
+#define CESA_TDMA_DESC_PER_REQ 8
+
+#define CESA_SA_DESCRIPTORS (CESA_SA_DESC_PER_REQ * CESA_REQUESTS)
+#define CESA_TDMA_DESCRIPTORS (CESA_TDMA_DESC_PER_REQ * CESA_REQUESTS)
+
+/* Useful constants */
+#define CESA_HMAC_HASH_LENGTH 12
+#define CESA_MAX_FRAGMENTS 64
+#define CESA_SRAM_SIZE 2048
+
+/*
+ * CESA_MAX_HASH_LEN is maximum length of hash generated by CESA.
+ * As CESA suports only MD5 and SHA1 this equals to 20 bytes.
+ * However we increase the value to 24 bytes to meet alignment
+ * requirements in cesa_sa_data structure.
+ */
+#define CESA_MAX_HASH_LEN 24
+#define CESA_MAX_KEY_LEN 32
+#define CESA_MAX_IV_LEN 16
+#define CESA_MAX_HMAC_BLOCK_LEN 64
+#define CESA_MAX_MKEY_LEN CESA_MAX_HMAC_BLOCK_LEN
+#define CESA_MAX_PACKET_SIZE (CESA_SRAM_SIZE - CESA_DATA(0))
+#define CESA_MAX_REQUEST_SIZE 65535
+
+/* Locking macros */
+#define CESA_LOCK(sc, what) mtx_lock(&(sc)->sc_ ## what ## _lock)
+#define CESA_UNLOCK(sc, what) mtx_unlock(&(sc)->sc_ ## what ## _lock)
+#define CESA_LOCK_ASSERT(sc, what) \
+ mtx_assert(&(sc)->sc_ ## what ## _lock, MA_OWNED)
+
+/* Registers read/write macros */
+#define CESA_READ(sc, reg) \
+ bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
+#define CESA_WRITE(sc, reg, val) \
+ bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
+
+/* Generic allocator for objects */
+#define CESA_GENERIC_ALLOC_LOCKED(sc, obj, pool) do { \
+ CESA_LOCK(sc, pool); \
+ \
+ if (STAILQ_EMPTY(&(sc)->sc_free_ ## pool)) \
+ obj = NULL; \
+ else { \
+ obj = STAILQ_FIRST(&(sc)->sc_free_ ## pool); \
+ STAILQ_REMOVE_HEAD(&(sc)->sc_free_ ## pool, \
+ obj ## _stq); \
+ } \
+ \
+ CESA_UNLOCK(sc, pool); \
+} while (0)
+
+#define CESA_GENERIC_FREE_LOCKED(sc, obj, pool) do { \
+ CESA_LOCK(sc, pool); \
+ STAILQ_INSERT_TAIL(&(sc)->sc_free_ ## pool, obj, \
+ obj ## _stq); \
+ CESA_UNLOCK(sc, pool); \
+} while (0)
+
+/* CESA SRAM offset calculation macros */
+#define CESA_SA_DATA(member) \
+ (sizeof(struct cesa_sa_hdesc) + offsetof(struct cesa_sa_data, member))
+#define CESA_DATA(offset) \
+ (sizeof(struct cesa_sa_hdesc) + sizeof(struct cesa_sa_data) + offset)
+
+struct cesa_tdma_hdesc {
+ uint16_t cthd_byte_count;
+ uint16_t cthd_flags;
+ uint32_t cthd_src;
+ uint32_t cthd_dst;
+ uint32_t cthd_next;
+};
+
+struct cesa_sa_hdesc {
+ uint32_t cshd_config;
+ uint16_t cshd_enc_src;
+ uint16_t cshd_enc_dst;
+ uint32_t cshd_enc_dlen;
+ uint32_t cshd_enc_key;
+ uint16_t cshd_enc_iv;
+ uint16_t cshd_enc_iv_buf;
+ uint16_t cshd_mac_src;
+ uint16_t cshd_mac_total_dlen;
+ uint16_t cshd_mac_dst;
+ uint16_t cshd_mac_dlen;
+ uint16_t cshd_mac_iv_in;
+ uint16_t cshd_mac_iv_out;
+};
+
+struct cesa_sa_data {
+ uint8_t csd_key[CESA_MAX_KEY_LEN];
+ uint8_t csd_iv[CESA_MAX_IV_LEN];
+ uint8_t csd_hiv_in[CESA_MAX_HASH_LEN];
+ uint8_t csd_hiv_out[CESA_MAX_HASH_LEN];
+ uint8_t csd_hash[CESA_MAX_HASH_LEN];
+};
+
+struct cesa_dma_mem {
+ void *cdm_vaddr;
+ bus_addr_t cdm_paddr;
+ bus_dma_tag_t cdm_tag;
+ bus_dmamap_t cdm_map;
+};
+
+struct cesa_tdma_desc {
+ struct cesa_tdma_hdesc *ctd_cthd;
+ bus_addr_t ctd_cthd_paddr;
+
+ STAILQ_ENTRY(cesa_tdma_desc) ctd_stq;
+};
+
+struct cesa_sa_desc {
+ struct cesa_sa_hdesc *csd_cshd;
+ bus_addr_t csd_cshd_paddr;
+
+ STAILQ_ENTRY(cesa_sa_desc) csd_stq;
+};
+
+struct cesa_session {
+ uint32_t cs_sid;
+ uint32_t cs_config;
+ unsigned int cs_klen;
+ unsigned int cs_ivlen;
+ unsigned int cs_hlen;
+ unsigned int cs_mblen;
+ uint8_t cs_key[CESA_MAX_KEY_LEN];
+ uint8_t cs_aes_dkey[CESA_MAX_KEY_LEN];
+ uint8_t cs_hiv_in[CESA_MAX_HASH_LEN];
+ uint8_t cs_hiv_out[CESA_MAX_HASH_LEN];
+
+ STAILQ_ENTRY(cesa_session) cs_stq;
+};
+
+struct cesa_request {
+ struct cesa_sa_data *cr_csd;
+ bus_addr_t cr_csd_paddr;
+ struct cryptop *cr_crp;
+ struct cryptodesc *cr_enc;
+ struct cryptodesc *cr_mac;
+ struct cesa_session *cr_cs;
+ bus_dmamap_t cr_dmap;
+ int cr_dmap_loaded;
+
+ STAILQ_HEAD(, cesa_tdma_desc) cr_tdesc;
+ STAILQ_HEAD(, cesa_sa_desc) cr_sdesc;
+
+ STAILQ_ENTRY(cesa_request) cr_stq;
+};
+
+struct cesa_packet {
+ STAILQ_HEAD(, cesa_tdma_desc) cp_copyin;
+ STAILQ_HEAD(, cesa_tdma_desc) cp_copyout;
+ unsigned int cp_size;
+ unsigned int cp_offset;
+};
+
+struct cesa_softc {
+ device_t sc_dev;
+ int32_t sc_cid;
+ struct resource *sc_res[2];
+ void *sc_icookie;
+ bus_dma_tag_t sc_data_dtag;
+ bus_space_tag_t sc_bst;
+ bus_space_handle_t sc_bsh;
+ int sc_error;
+ int sc_tperr;
+
+ struct mtx sc_sc_lock;
+ int sc_blocked;
+
+ /* TDMA descriptors pool */
+ struct mtx sc_tdesc_lock;
+ struct cesa_tdma_desc sc_tdesc[CESA_TDMA_DESCRIPTORS];
+ struct cesa_dma_mem sc_tdesc_cdm;
+ STAILQ_HEAD(, cesa_tdma_desc) sc_free_tdesc;
+
+ /* SA descriptors pool */
+ struct mtx sc_sdesc_lock;
+ struct cesa_sa_desc sc_sdesc[CESA_SA_DESCRIPTORS];
+ struct cesa_dma_mem sc_sdesc_cdm;
+ STAILQ_HEAD(, cesa_sa_desc) sc_free_sdesc;
+
+ /* Requests pool */
+ struct mtx sc_requests_lock;
+ struct cesa_request sc_requests[CESA_REQUESTS];
+ struct cesa_dma_mem sc_requests_cdm;
+ STAILQ_HEAD(, cesa_request) sc_free_requests;
+ STAILQ_HEAD(, cesa_request) sc_ready_requests;
+ STAILQ_HEAD(, cesa_request) sc_queued_requests;
+
+ /* Sessions pool */
+ struct mtx sc_sessions_lock;
+ struct cesa_session sc_sessions[CESA_SESSIONS];
+ STAILQ_HEAD(, cesa_session) sc_free_sessions;
+
+ /* CESA SRAM Address */
+ bus_addr_t sc_sram_base;
+};
+
+struct cesa_chain_info {
+ struct cesa_softc *cci_sc;
+ struct cesa_request *cci_cr;
+ struct cryptodesc *cci_enc;
+ struct cryptodesc *cci_mac;
+ uint32_t cci_config;
+ int cci_error;
+};
+
+/* CESA descriptors flags definitions */
+#define CESA_CTHD_OWNED (1 << 15)
+
+#define CESA_CSHD_MAC (0 << 0)
+#define CESA_CSHD_ENC (1 << 0)
+#define CESA_CSHD_MAC_AND_ENC (2 << 0)
+#define CESA_CSHD_ENC_AND_MAC (3 << 0)
+#define CESA_CSHD_OP_MASK (3 << 0)
+
+#define CESA_CSHD_MD5 (4 << 4)
+#define CESA_CSHD_SHA1 (5 << 4)
+#define CESA_CSHD_MD5_HMAC ((6 << 4) | (1 << 7))
+#define CESA_CSHD_SHA1_HMAC ((7 << 4) | (1 << 7))
+
+#define CESA_CSHD_DES (1 << 8)
+#define CESA_CSHD_3DES (2 << 8)
+#define CESA_CSHD_AES (3 << 8)
+
+#define CESA_CSHD_DECRYPT (1 << 12)
+#define CESA_CSHD_CBC (1 << 16)
+#define CESA_CSHD_3DES_EDE (1 << 20)
+
+#define CESA_CSH_AES_KLEN_128 (0 << 24)
+#define CESA_CSH_AES_KLEN_192 (1 << 24)
+#define CESA_CSH_AES_KLEN_256 (2 << 24)
+#define CESA_CSH_AES_KLEN_MASK (3 << 24)
+
+#define CESA_CSHD_FRAG_FIRST (1 << 30)
+#define CESA_CSHD_FRAG_LAST (2 << 30)
+#define CESA_CSHD_FRAG_MIDDLE (3 << 30)
+
+/* CESA registers definitions */
+#define CESA_ICR 0xDE20
+#define CESA_ICR_ACCTDMA (1 << 7)
+#define CESA_ICR_TPERR (1 << 12)
+
+#define CESA_ICM 0xDE24
+#define CESA_ICM_ACCTDMA CESA_ICR_ACCTDMA
+#define CESA_ICM_TPERR CESA_ICR_TPERR
+
+/* CESA TDMA registers definitions */
+#define CESA_TDMA_ND 0x0830
+
+#define CESA_TDMA_CR 0x0840
+#define CESA_TDMA_CR_DBL128 (4 << 0)
+#define CESA_TDMA_CR_ORDEN (1 << 4)
+#define CESA_TDMA_CR_SBL128 (4 << 6)
+#define CESA_TDMA_CR_NBS (1 << 11)
+#define CESA_TDMA_CR_ENABLE (1 << 12)
+#define CESA_TDMA_CR_FETCHND (1 << 13)
+#define CESA_TDMA_CR_ACTIVE (1 << 14)
+
+#define CESA_TDMA_ECR 0x08C8
+#define CESA_TDMA_ECR_MISS (1 << 0)
+#define CESA_TDMA_ECR_DOUBLE_HIT (1 << 1)
+#define CESA_TDMA_ECR_BOTH_HIT (1 << 2)
+#define CESA_TDMA_ECR_DATA_ERROR (1 << 3)
+
+#define CESA_TDMA_EMR 0x08CC
+#define CESA_TDMA_EMR_MISS CESA_TDMA_ECR_MISS
+#define CESA_TDMA_EMR_DOUBLE_HIT CESA_TDMA_ECR_DOUBLE_HIT
+#define CESA_TDMA_EMR_BOTH_HIT CESA_TDMA_ECR_BOTH_HIT
+#define CESA_TDMA_EMR_DATA_ERROR CESA_TDMA_ECR_DATA_ERROR
+
+/* CESA SA registers definitions */
+#define CESA_SA_CMD 0xDE00
+#define CESA_SA_CMD_ACTVATE (1 << 0)
+
+#define CESA_SA_DPR 0xDE04
+
+#define CESA_SA_CR 0xDE08
+#define CESA_SA_CR_WAIT_FOR_TDMA (1 << 7)
+#define CESA_SA_CR_ACTIVATE_TDMA (1 << 9)
+#define CESA_SA_CR_MULTI_MODE (1 << 11)
+
+#define CESA_SA_SR 0xDE0C
+#define CESA_SA_SR_ACTIVE (1 << 0)
+
+#endif
diff --git a/sys/dev/ciss/ciss.c b/sys/dev/ciss/ciss.c
index 016d5f3..b530de1 100644
--- a/sys/dev/ciss/ciss.c
+++ b/sys/dev/ciss/ciss.c
@@ -105,7 +105,8 @@
#include <dev/ciss/cissio.h>
#include <dev/ciss/cissvar.h>
-MALLOC_DEFINE(CISS_MALLOC_CLASS, "ciss_data", "ciss internal data buffers");
+static MALLOC_DEFINE(CISS_MALLOC_CLASS, "ciss_data",
+ "ciss internal data buffers");
/* pci interface */
static int ciss_lookup(device_t dev);
diff --git a/sys/dev/cs/if_cs.c b/sys/dev/cs/if_cs.c
index 3b75efc..3789ea4 100644
--- a/sys/dev/cs/if_cs.c
+++ b/sys/dev/cs/if_cs.c
@@ -103,7 +103,7 @@ devclass_t cs_devclass;
driver_intr_t csintr;
/* sysctl vars */
-SYSCTL_NODE(_hw, OID_AUTO, cs, CTLFLAG_RD, 0, "cs device parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, cs, CTLFLAG_RD, 0, "cs device parameters");
int cs_ignore_cksum_failure = 0;
TUNABLE_INT("hw.cs.ignore_checksum_failure", &cs_ignore_cksum_failure);
diff --git a/sys/dev/cxgb/ulp/tom/cxgb_tom_sysctl.c b/sys/dev/cxgb/ulp/tom/cxgb_tom_sysctl.c
index be22676..926b445 100644
--- a/sys/dev/cxgb/ulp/tom/cxgb_tom_sysctl.c
+++ b/sys/dev/cxgb/ulp/tom/cxgb_tom_sysctl.c
@@ -78,7 +78,7 @@ __FBSDID("$FreeBSD$");
/* Avoid clutter in the hw.* space, keep all toe tunables within hw.cxgb */
SYSCTL_DECL(_hw_cxgb);
-SYSCTL_NODE(_hw_cxgb, OID_AUTO, toe, CTLFLAG_RD, 0, "TOE parameters");
+static SYSCTL_NODE(_hw_cxgb, OID_AUTO, toe, CTLFLAG_RD, 0, "TOE parameters");
static struct tom_tunables default_tunable_vals = {
.max_host_sndbuf = 32 * 1024,
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index 18b813d..b7cb09a 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -129,7 +129,8 @@ MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4 Ethernet driver and services");
/*
* Tunables.
*/
-SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe driver parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0,
+ "cxgbe driver parameters");
static int force_firmware_install = 0;
TUNABLE_INT("hw.cxgbe.force_firmware_install", &force_firmware_install);
diff --git a/sys/dev/dc/dcphy.c b/sys/dev/dc/dcphy.c
index 5c60ad5..ab44993 100644
--- a/sys/dev/dc/dcphy.c
+++ b/sys/dev/dc/dcphy.c
@@ -294,7 +294,7 @@ static void
dcphy_status(struct mii_softc *sc)
{
struct mii_data *mii = sc->mii_pdata;
- int reg, anlpar, tstat = 0;
+ int anlpar, tstat;
struct dc_softc *dc_sc;
dc_sc = mii->mii_ifp->if_softc;
@@ -305,13 +305,12 @@ dcphy_status(struct mii_softc *sc)
if ((mii->mii_ifp->if_flags & IFF_UP) == 0)
return;
- reg = CSR_READ_4(dc_sc, DC_10BTSTAT);
- if (!(reg & DC_TSTAT_LS10) || !(reg & DC_TSTAT_LS100))
+ tstat = CSR_READ_4(dc_sc, DC_10BTSTAT);
+ if (!(tstat & DC_TSTAT_LS10) || !(tstat & DC_TSTAT_LS100))
mii->mii_media_status |= IFM_ACTIVE;
if (CSR_READ_4(dc_sc, DC_10BTCTRL) & DC_TCTL_AUTONEGENBL) {
/* Erg, still trying, I guess... */
- tstat = CSR_READ_4(dc_sc, DC_10BTSTAT);
if ((tstat & DC_TSTAT_ANEGSTAT) != DC_ASTAT_AUTONEGCMP) {
if ((DC_IS_MACRONIX(dc_sc) || DC_IS_PNICII(dc_sc)) &&
(tstat & DC_TSTAT_ANEGSTAT) == DC_ASTAT_DISABLE)
@@ -351,9 +350,9 @@ dcphy_status(struct mii_softc *sc)
* and hope that the user is clever enough to manually
* change the media settings if we're wrong.
*/
- if (!(reg & DC_TSTAT_LS100))
+ if (!(tstat & DC_TSTAT_LS100))
mii->mii_media_active |= IFM_100_TX | IFM_HDX;
- else if (!(reg & DC_TSTAT_LS10))
+ else if (!(tstat & DC_TSTAT_LS10))
mii->mii_media_active |= IFM_10_T | IFM_HDX;
else
mii->mii_media_active |= IFM_NONE;
diff --git a/sys/dev/dc/if_dc.c b/sys/dev/dc/if_dc.c
index 184fa8f..b9ea19a 100644
--- a/sys/dev/dc/if_dc.c
+++ b/sys/dev/dc/if_dc.c
@@ -122,6 +122,7 @@ __FBSDID("$FreeBSD$");
#include <sys/rman.h>
#include <dev/mii/mii.h>
+#include <dev/mii/mii_bitbang.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
@@ -149,7 +150,7 @@ MODULE_DEPEND(dc, miibus, 1, 1, 1);
/*
* Various supported device vendors/types and their names.
*/
-static const struct dc_type dc_devs[] = {
+static const struct dc_type const dc_devs[] = {
{ DC_DEVID(DC_VENDORID_DEC, DC_DEVICEID_21143), 0,
"Intel 21143 10/100BaseTX" },
{ DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009), 0,
@@ -272,12 +273,6 @@ static void dc_eeprom_getword_xircom(struct dc_softc *, int, uint16_t *);
static void dc_eeprom_width(struct dc_softc *);
static void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int);
-static void dc_mii_writebit(struct dc_softc *, int);
-static int dc_mii_readbit(struct dc_softc *);
-static void dc_mii_sync(struct dc_softc *);
-static void dc_mii_send(struct dc_softc *, uint32_t, int);
-static int dc_mii_readreg(struct dc_softc *, struct dc_mii_frame *);
-static int dc_mii_writereg(struct dc_softc *, struct dc_mii_frame *);
static int dc_miibus_readreg(device_t, int, int);
static int dc_miibus_writereg(device_t, int, int, int);
static void dc_miibus_statchg(device_t);
@@ -307,6 +302,24 @@ static int dc_decode_leaf_sym(struct dc_softc *, struct dc_eblock_sym *);
static void dc_apply_fixup(struct dc_softc *, int);
static int dc_check_multiport(struct dc_softc *);
+/*
+ * MII bit-bang glue
+ */
+static uint32_t dc_mii_bitbang_read(device_t);
+static void dc_mii_bitbang_write(device_t, uint32_t);
+
+static const struct mii_bitbang_ops dc_mii_bitbang_ops = {
+ dc_mii_bitbang_read,
+ dc_mii_bitbang_write,
+ {
+ DC_SIO_MII_DATAOUT, /* MII_BIT_MDO */
+ DC_SIO_MII_DATAIN, /* MII_BIT_MDI */
+ DC_SIO_MII_CLK, /* MII_BIT_MDC */
+ 0, /* MII_BIT_DIR_HOST_PHY */
+ DC_SIO_MII_DIR, /* MII_BIT_DIR_PHY_HOST */
+ }
+};
+
#ifdef DC_USEIOSPACE
#define DC_RES SYS_RES_IOPORT
#define DC_RID DC_PCI_CFBIO
@@ -611,185 +624,45 @@ dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt, int be)
}
/*
- * The following two routines are taken from the Macronix 98713
- * Application Notes pp.19-21.
- */
-/*
- * Write a bit to the MII bus.
+ * Write the MII serial port for the MII bit-bang module.
*/
static void
-dc_mii_writebit(struct dc_softc *sc, int bit)
+dc_mii_bitbang_write(device_t dev, uint32_t val)
{
- uint32_t reg;
+ struct dc_softc *sc;
- reg = DC_SIO_ROMCTL_WRITE | (bit != 0 ? DC_SIO_MII_DATAOUT : 0);
- CSR_WRITE_4(sc, DC_SIO, reg);
- CSR_BARRIER_4(sc, DC_SIO,
- BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
- DELAY(1);
+ sc = device_get_softc(dev);
- CSR_WRITE_4(sc, DC_SIO, reg | DC_SIO_MII_CLK);
- CSR_BARRIER_4(sc, DC_SIO,
- BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
- DELAY(1);
- CSR_WRITE_4(sc, DC_SIO, reg);
+ CSR_WRITE_4(sc, DC_SIO, val);
CSR_BARRIER_4(sc, DC_SIO,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
- DELAY(1);
}
/*
- * Read a bit from the MII bus.
+ * Read the MII serial port for the MII bit-bang module.
*/
-static int
-dc_mii_readbit(struct dc_softc *sc)
+static uint32_t
+dc_mii_bitbang_read(device_t dev)
{
- uint32_t reg;
-
- reg = DC_SIO_ROMCTL_READ | DC_SIO_MII_DIR;
- CSR_WRITE_4(sc, DC_SIO, reg);
- CSR_BARRIER_4(sc, DC_SIO,
- BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
- DELAY(1);
- (void)CSR_READ_4(sc, DC_SIO);
- CSR_WRITE_4(sc, DC_SIO, reg | DC_SIO_MII_CLK);
- CSR_BARRIER_4(sc, DC_SIO,
- BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
- DELAY(1);
- CSR_WRITE_4(sc, DC_SIO, reg);
- CSR_BARRIER_4(sc, DC_SIO,
- BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
- DELAY(1);
- if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN)
- return (1);
-
- return (0);
-}
+ struct dc_softc *sc;
+ uint32_t val;
-/*
- * Sync the PHYs by setting data bit and strobing the clock 32 times.
- */
-static void
-dc_mii_sync(struct dc_softc *sc)
-{
- int i;
+ sc = device_get_softc(dev);
- CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
+ val = CSR_READ_4(sc, DC_SIO);
CSR_BARRIER_4(sc, DC_SIO,
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
- DELAY(1);
-
- for (i = 0; i < 32; i++)
- dc_mii_writebit(sc, 1);
-}
-
-/*
- * Clock a series of bits through the MII.
- */
-static void
-dc_mii_send(struct dc_softc *sc, uint32_t bits, int cnt)
-{
- int i;
-
- for (i = (0x1 << (cnt - 1)); i; i >>= 1)
- dc_mii_writebit(sc, bits & i);
-}
-
-/*
- * Read an PHY register through the MII.
- */
-static int
-dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame)
-{
- int i;
-
- /*
- * Set up frame for RX.
- */
- frame->mii_stdelim = DC_MII_STARTDELIM;
- frame->mii_opcode = DC_MII_READOP;
-
- /*
- * Sync the PHYs.
- */
- dc_mii_sync(sc);
-
- /*
- * Send command/address info.
- */
- dc_mii_send(sc, frame->mii_stdelim, 2);
- dc_mii_send(sc, frame->mii_opcode, 2);
- dc_mii_send(sc, frame->mii_phyaddr, 5);
- dc_mii_send(sc, frame->mii_regaddr, 5);
-
- /*
- * Now try reading data bits. If the turnaround failed, we still
- * need to clock through 16 cycles to keep the PHY(s) in sync.
- */
- frame->mii_turnaround = dc_mii_readbit(sc);
- if (frame->mii_turnaround != 0) {
- for (i = 0; i < 16; i++)
- dc_mii_readbit(sc);
- goto fail;
- }
- for (i = 0x8000; i; i >>= 1) {
- if (dc_mii_readbit(sc))
- frame->mii_data |= i;
- }
-
-fail:
- /* Clock the idle bits. */
- dc_mii_writebit(sc, 0);
- dc_mii_writebit(sc, 0);
-
- if (frame->mii_turnaround != 0)
- return (1);
- return (0);
-}
-
-/*
- * Write to a PHY register through the MII.
- */
-static int
-dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame)
-{
-
- /*
- * Set up frame for TX.
- */
- frame->mii_stdelim = DC_MII_STARTDELIM;
- frame->mii_opcode = DC_MII_WRITEOP;
- frame->mii_turnaround = DC_MII_TURNAROUND;
-
- /*
- * Sync the PHYs.
- */
- dc_mii_sync(sc);
-
- dc_mii_send(sc, frame->mii_stdelim, 2);
- dc_mii_send(sc, frame->mii_opcode, 2);
- dc_mii_send(sc, frame->mii_phyaddr, 5);
- dc_mii_send(sc, frame->mii_regaddr, 5);
- dc_mii_send(sc, frame->mii_turnaround, 2);
- dc_mii_send(sc, frame->mii_data, 16);
-
- /* Clock the idle bits. */
- dc_mii_writebit(sc, 0);
- dc_mii_writebit(sc, 0);
-
- return (0);
+ return (val);
}
static int
dc_miibus_readreg(device_t dev, int phy, int reg)
{
- struct dc_mii_frame frame;
- struct dc_softc *sc;
+ struct dc_softc *sc;
int i, rval, phy_reg = 0;
sc = device_get_softc(dev);
- bzero(&frame, sizeof(frame));
if (sc->dc_pmode != DC_PMODE_MII) {
if (phy == (MII_NPHY - 1)) {
@@ -881,34 +754,29 @@ dc_miibus_readreg(device_t dev, int phy, int reg)
}
rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF;
-
if (rval == 0xFFFF)
return (0);
return (rval);
}
- frame.mii_phyaddr = phy;
- frame.mii_regaddr = reg;
if (sc->dc_type == DC_TYPE_98713) {
phy_reg = CSR_READ_4(sc, DC_NETCFG);
CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
}
- dc_mii_readreg(sc, &frame);
+ rval = mii_bitbang_readreg(dev, &dc_mii_bitbang_ops, phy, reg);
if (sc->dc_type == DC_TYPE_98713)
CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
- return (frame.mii_data);
+ return (rval);
}
static int
dc_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct dc_softc *sc;
- struct dc_mii_frame frame;
int i, phy_reg = 0;
sc = device_get_softc(dev);
- bzero(&frame, sizeof(frame));
if (DC_IS_PNIC(sc)) {
CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE |
@@ -964,15 +832,11 @@ dc_miibus_writereg(device_t dev, int phy, int reg, int data)
return (0);
}
- frame.mii_phyaddr = phy;
- frame.mii_regaddr = reg;
- frame.mii_data = data;
-
if (sc->dc_type == DC_TYPE_98713) {
phy_reg = CSR_READ_4(sc, DC_NETCFG);
CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
}
- dc_mii_writereg(sc, &frame);
+ mii_bitbang_writereg(dev, &dc_mii_bitbang_ops, phy, reg, data);
if (sc->dc_type == DC_TYPE_98713)
CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
@@ -996,12 +860,11 @@ dc_miibus_statchg(device_t dev)
return;
ifm = &mii->mii_media;
- if (DC_IS_DAVICOM(sc) &&
- IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
+ if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
dc_setcfg(sc, ifm->ifm_media);
- sc->dc_if_media = ifm->ifm_media;
return;
- }
+ } else if (!DC_IS_ADMTEK(sc))
+ dc_setcfg(sc, mii->mii_media_active);
sc->dc_link = 0;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
@@ -1011,17 +874,8 @@ dc_miibus_statchg(device_t dev)
case IFM_100_TX:
sc->dc_link = 1;
break;
- default:
- break;
}
}
- if (sc->dc_link == 0)
- return;
-
- sc->dc_if_media = mii->mii_media_active;
- if (DC_IS_ADMTEK(sc))
- return;
- dc_setcfg(sc, mii->mii_media_active);
}
/*
@@ -2602,9 +2456,6 @@ dc_attach(device_t dev)
if (sc->dc_pmode != DC_PMODE_SIA)
sc->dc_pmode = DC_PMODE_SYM;
sc->dc_flags |= DC_21143_NWAY;
- mii_attach(dev, &sc->dc_miibus, ifp, dc_ifmedia_upd,
- dc_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY,
- MII_OFFSET_ANY, 0);
/*
* For non-MII cards, we need to have the 21143
* drive the LEDs. Except there are some systems
@@ -2615,7 +2466,9 @@ dc_attach(device_t dev)
if (!(pci_get_subvendor(dev) == 0x1033 &&
pci_get_subdevice(dev) == 0x8028))
sc->dc_flags |= DC_TULIP_LEDS;
- error = 0;
+ error = mii_attach(dev, &sc->dc_miibus, ifp, dc_ifmedia_upd,
+ dc_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY,
+ MII_OFFSET_ANY, 0);
}
if (error) {
@@ -2684,7 +2537,7 @@ dc_detach(device_t dev)
ifp = sc->dc_ifp;
#ifdef DEVICE_POLLING
- if (ifp->if_capenable & IFCAP_POLLING)
+ if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
@@ -2708,7 +2561,7 @@ dc_detach(device_t dev)
if (sc->dc_res)
bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res);
- if (ifp)
+ if (ifp != NULL)
if_free(ifp);
dc_dma_free(sc);
@@ -2758,7 +2611,6 @@ dc_list_tx_init(struct dc_softc *sc)
return (0);
}
-
/*
* Initialize the RX descriptors and allocate mbufs for them. Note that
* we arrange the descriptors in a closed ring, so that the last descriptor
diff --git a/sys/dev/dc/if_dcreg.h b/sys/dev/dc/if_dcreg.h
index acd80c0..e290881 100644
--- a/sys/dev/dc/if_dcreg.h
+++ b/sys/dev/dc/if_dcreg.h
@@ -531,27 +531,9 @@ struct dc_mediainfo {
struct dc_type {
uint32_t dc_devid;
uint8_t dc_minrev;
- char *dc_name;
+ const char *dc_name;
};
-struct dc_mii_frame {
- uint8_t mii_stdelim;
- uint8_t mii_opcode;
- uint8_t mii_phyaddr;
- uint8_t mii_regaddr;
- uint8_t mii_turnaround;
- uint16_t mii_data;
-};
-
-/*
- * MII constants
- */
-#define DC_MII_STARTDELIM 0x01
-#define DC_MII_READOP 0x02
-#define DC_MII_WRITEOP 0x01
-#define DC_MII_TURNAROUND 0x02
-
-
/*
* Registers specific to clone devices.
* This mainly relates to RX filter programming: not all 21x4x clones
@@ -778,7 +760,6 @@ struct dc_softc {
int dc_pnic_rx_bug_save;
unsigned char *dc_pnic_rx_buf;
int dc_if_flags;
- int dc_if_media;
uint32_t dc_flags;
uint32_t dc_txthresh;
uint32_t dc_eaddr[2];
@@ -827,7 +808,7 @@ struct dc_softc {
#define CSR_READ_4(sc, reg) \
bus_space_read_4(sc->dc_btag, sc->dc_bhandle, reg)
-#define CSR_BARRIER_4(sc, reg, flags) \
+#define CSR_BARRIER_4(sc, reg, flags) \
bus_space_barrier(sc->dc_btag, sc->dc_bhandle, reg, 4, flags)
#define DC_TIMEOUT 1000
diff --git a/sys/dev/dcons/dcons_os.c b/sys/dev/dcons/dcons_os.c
index b7de414..d638d41 100644
--- a/sys/dev/dcons/dcons_os.c
+++ b/sys/dev/dcons/dcons_os.c
@@ -93,7 +93,7 @@ static int poll_hz = DCONS_POLL_HZ;
static struct dcons_softc sc[DCONS_NPORT];
-SYSCTL_NODE(_kern, OID_AUTO, dcons, CTLFLAG_RD, 0, "Dumb Console");
+static SYSCTL_NODE(_kern, OID_AUTO, dcons, CTLFLAG_RD, 0, "Dumb Console");
SYSCTL_INT(_kern_dcons, OID_AUTO, poll_hz, CTLFLAG_RW, &poll_hz, 0,
"dcons polling rate");
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index 4d8ba65..10d556e 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -330,7 +330,7 @@ MODULE_DEPEND(em, ether, 1, 1, 1);
#define CSUM_TSO 0
#endif
-SYSCTL_NODE(_hw, OID_AUTO, em, CTLFLAG_RD, 0, "EM driver parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, em, CTLFLAG_RD, 0, "EM driver parameters");
static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
diff --git a/sys/dev/e1000/if_igb.c b/sys/dev/e1000/if_igb.c
index ff2e424..4ae4204 100644
--- a/sys/dev/e1000/if_igb.c
+++ b/sys/dev/e1000/if_igb.c
@@ -300,7 +300,7 @@ MODULE_DEPEND(igb, ether, 1, 1, 1);
* Tunable default values.
*********************************************************************/
-SYSCTL_NODE(_hw, OID_AUTO, igb, CTLFLAG_RD, 0, "IGB driver parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, igb, CTLFLAG_RD, 0, "IGB driver parameters");
/* Descriptor defaults */
static int igb_rxd = IGB_DEFAULT_RXD;
diff --git a/sys/dev/esp/am53c974reg.h b/sys/dev/esp/am53c974reg.h
new file mode 100644
index 0000000..86260fd
--- /dev/null
+++ b/sys/dev/esp/am53c974reg.h
@@ -0,0 +1,72 @@
+/* $NetBSD: pcscpreg.h,v 1.2 2008/04/28 20:23:55 martin Exp $ */
+
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Izumi Tsutsui.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _AM53C974_H_
+#define _AM53C974_H_
+
+/*
+ * Am53c974 DMA engine registers
+ */
+
+#define DMA_CMD 0x40 /* Command */
+#define DMACMD_RSVD 0xFFFFFF28 /* reserved */
+#define DMACMD_DIR 0x00000080 /* Transfer Direction (read:1) */
+#define DMACMD_INTE 0x00000040 /* DMA Interrupt Enable */
+#define DMACMD_MDL 0x00000010 /* Map to Memory Description List */
+#define DMACMD_DIAG 0x00000004 /* Diagnostic */
+#define DMACMD_CMD 0x00000003 /* Command Code Bit */
+#define DMACMD_IDLE 0x00000000 /* Idle */
+#define DMACMD_BLAST 0x00000001 /* Blast */
+#define DMACMD_ABORT 0x00000002 /* Abort */
+#define DMACMD_START 0x00000003 /* Start */
+
+#define DMA_STC 0x44 /* Start Transfer Count */
+#define DMA_SPA 0x48 /* Start Physical Address */
+#define DMA_WBC 0x4C /* Working Byte Counter */
+#define DMA_WAC 0x50 /* Working Address Counter */
+
+#define DMA_STAT 0x54 /* Status Register */
+#define DMASTAT_RSVD 0xFFFFFF80 /* reserved */
+#define DMASTAT_PABT 0x00000040 /* PCI master/target Abort */
+#define DMASTAT_BCMP 0x00000020 /* BLAST Complete */
+#define DMASTAT_SINT 0x00000010 /* SCSI Interrupt */
+#define DMASTAT_DONE 0x00000008 /* DMA Transfer Terminated */
+#define DMASTAT_ABT 0x00000004 /* DMA Transfer Aborted */
+#define DMASTAT_ERR 0x00000002 /* DMA Transfer Error */
+#define DMASTAT_PWDN 0x00000001 /* Power Down Indicator */
+
+#define DMA_SMDLA 0x58 /* Starting Memory Descpritor List Address */
+#define DMA_WMAC 0x5C /* Working MDL Counter */
+#define DMA_SBAC 0x70 /* SCSI Bus and Control */
+
+#endif /* _AM53C974_H_ */
diff --git a/sys/dev/esp/esp_pci.c b/sys/dev/esp/esp_pci.c
new file mode 100644
index 0000000..2fa2030
--- /dev/null
+++ b/sys/dev/esp/esp_pci.c
@@ -0,0 +1,654 @@
+/*-
+ * Copyright (c) 2011 Marius Strobl <marius@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $NetBSD: pcscp.c,v 1.45 2010/11/13 13:52:08 uebayasi Exp $ */
+
+/*-
+ * Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center; Izumi Tsutsui.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * esp_pci.c: device dependent code for AMD Am53c974 (PCscsi-PCI)
+ * written by Izumi Tsutsui <tsutsui@NetBSD.org>
+ *
+ * Technical manual available at
+ * http://www.amd.com/files/connectivitysolutions/networking/archivednetworking/19113.pdf
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/resource.h>
+#include <sys/rman.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/scsi/scsi_all.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include <dev/esp/ncr53c9xreg.h>
+#include <dev/esp/ncr53c9xvar.h>
+
+#include <dev/esp/am53c974reg.h>
+
+#define PCI_DEVICE_ID_AMD53C974 0x20201022
+
+struct esp_pci_softc {
+ struct ncr53c9x_softc sc_ncr53c9x; /* glue to MI code */
+ struct device *sc_dev;
+
+ struct resource *sc_res[2];
+#define ESP_PCI_RES_INTR 0
+#define ESP_PCI_RES_IO 1
+
+ bus_dma_tag_t sc_pdmat;
+
+ bus_dma_tag_t sc_xferdmat; /* DMA tag for transfers */
+ bus_dmamap_t sc_xferdmam; /* DMA map for transfers */
+
+ void *sc_ih; /* interrupt handler */
+
+ size_t sc_dmasize; /* DMA size */
+ void **sc_dmaaddr; /* DMA address */
+ size_t *sc_dmalen; /* DMA length */
+ int sc_active; /* DMA state */
+ int sc_datain; /* DMA Data Direction */
+};
+
+static struct resource_spec esp_pci_res_spec[] = {
+ { SYS_RES_IRQ, 0, RF_SHAREABLE | RF_ACTIVE }, /* ESP_PCI_RES_INTR */
+ { SYS_RES_IOPORT, PCIR_BAR(0), RF_ACTIVE }, /* ESP_PCI_RES_IO */
+ { -1, 0 }
+};
+
+#define READ_DMAREG(sc, reg) \
+ bus_read_4((sc)->sc_res[ESP_PCI_RES_IO], (reg))
+#define WRITE_DMAREG(sc, reg, var) \
+ bus_write_4((sc)->sc_res[ESP_PCI_RES_IO], (reg), (var))
+
+#define READ_ESPREG(sc, reg) \
+ bus_read_1((sc)->sc_res[ESP_PCI_RES_IO], (reg) << 2)
+#define WRITE_ESPREG(sc, reg, val) \
+ bus_write_1((sc)->sc_res[ESP_PCI_RES_IO], (reg) << 2, (val))
+
+static int esp_pci_probe(device_t);
+static int esp_pci_attach(device_t);
+static int esp_pci_detach(device_t);
+static int esp_pci_suspend(device_t);
+static int esp_pci_resume(device_t);
+
+static device_method_t esp_pci_methods[] = {
+ DEVMETHOD(device_probe, esp_pci_probe),
+ DEVMETHOD(device_attach, esp_pci_attach),
+ DEVMETHOD(device_detach, esp_pci_detach),
+ DEVMETHOD(device_suspend, esp_pci_suspend),
+ DEVMETHOD(device_resume, esp_pci_resume),
+
+ KOBJMETHOD_END
+};
+
+static driver_t esp_pci_driver = {
+ "esp",
+ esp_pci_methods,
+ sizeof(struct esp_pci_softc)
+};
+
+DRIVER_MODULE(esp, pci, esp_pci_driver, esp_devclass, 0, 0);
+MODULE_DEPEND(esp, pci, 1, 1, 1);
+
+/*
+ * Functions and the switch for the MI code
+ */
+static void esp_pci_dma_go(struct ncr53c9x_softc *);
+static int esp_pci_dma_intr(struct ncr53c9x_softc *);
+static int esp_pci_dma_isactive(struct ncr53c9x_softc *);
+
+static int esp_pci_dma_isintr(struct ncr53c9x_softc *);
+static void esp_pci_dma_reset(struct ncr53c9x_softc *);
+static int esp_pci_dma_setup(struct ncr53c9x_softc *, void **, size_t *,
+ int, size_t *);
+static void esp_pci_dma_stop(struct ncr53c9x_softc *);
+static void esp_pci_write_reg(struct ncr53c9x_softc *, int, uint8_t);
+static uint8_t esp_pci_read_reg(struct ncr53c9x_softc *, int);
+static void esp_pci_xfermap(void *arg, bus_dma_segment_t *segs, int nseg,
+ int error);
+
+static struct ncr53c9x_glue esp_pci_glue = {
+ esp_pci_read_reg,
+ esp_pci_write_reg,
+ esp_pci_dma_isintr,
+ esp_pci_dma_reset,
+ esp_pci_dma_intr,
+ esp_pci_dma_setup,
+ esp_pci_dma_go,
+ esp_pci_dma_stop,
+ esp_pci_dma_isactive,
+};
+
+static int
+esp_pci_probe(device_t dev)
+{
+
+ if (pci_get_devid(dev) == PCI_DEVICE_ID_AMD53C974) {
+ device_set_desc(dev, "AMD Am53C974 Fast-SCSI");
+ return (BUS_PROBE_DEFAULT);
+ }
+
+ return (ENXIO);
+}
+
+/*
+ * Attach this instance, and then all the sub-devices
+ */
+static int
+esp_pci_attach(device_t dev)
+{
+ struct esp_pci_softc *esc;
+ struct ncr53c9x_softc *sc;
+ int error;
+
+ esc = device_get_softc(dev);
+ sc = &esc->sc_ncr53c9x;
+
+ NCR_LOCK_INIT(sc);
+
+ esc->sc_dev = dev;
+ sc->sc_glue = &esp_pci_glue;
+
+ pci_enable_busmaster(dev);
+
+ error = bus_alloc_resources(dev, esp_pci_res_spec, esc->sc_res);
+ if (error != 0) {
+ device_printf(dev, "failed to allocate resources\n");
+ bus_release_resources(dev, esp_pci_res_spec, esc->sc_res);
+ return (error);
+ }
+
+ error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
+ BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+ BUS_SPACE_MAXSIZE_32BIT, BUS_SPACE_UNRESTRICTED,
+ BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &esc->sc_pdmat);
+ if (error != 0) {
+ device_printf(dev, "cannot create parent DMA tag\n");
+ goto fail_res;
+ }
+
+ /*
+ * XXX More of this should be in ncr53c9x_attach(), but
+ * XXX should we really poke around the chip that much in
+ * XXX the MI code? Think about this more...
+ */
+
+ /*
+ * Set up static configuration info.
+ *
+ * XXX we should read the configuration from the EEPROM.
+ */
+ sc->sc_id = 7;
+ sc->sc_cfg1 = sc->sc_id | NCRCFG1_PARENB;
+ sc->sc_cfg2 = NCRCFG2_SCSI2 | NCRCFG2_FE;
+ sc->sc_cfg3 = NCRAMDCFG3_IDM | NCRAMDCFG3_FCLK;
+ sc->sc_cfg4 = NCRAMDCFG4_GE12NS | NCRAMDCFG4_RADE;
+ sc->sc_rev = NCR_VARIANT_AM53C974;
+ sc->sc_features = NCR_F_FASTSCSI | NCR_F_DMASELECT;
+ sc->sc_cfg3_fscsi = NCRAMDCFG3_FSCSI;
+ sc->sc_freq = 40; /* MHz */
+
+ /*
+ * This is the value used to start sync negotiations
+ * Note that the NCR register "SYNCTP" is programmed
+ * in "clocks per byte", and has a minimum value of 4.
+ * The SCSI period used in negotiation is one-fourth
+ * of the time (in nanoseconds) needed to transfer one byte.
+ * Since the chip's clock is given in MHz, we have the following
+ * formula: 4 * period = (1000 / freq) * 4
+ */
+ sc->sc_minsync = 1000 / sc->sc_freq;
+
+ sc->sc_maxxfer = DFLTPHYS; /* see below */
+ sc->sc_maxoffset = 15;
+ sc->sc_extended_geom = 1;
+
+#define MDL_SEG_SIZE 0x1000 /* 4kbyte per segment */
+
+ /*
+ * Create the DMA tag and map for the data transfers.
+ *
+ * Note: given that bus_dma(9) only adheres to the requested alignment
+ * for the first segment (and that also only for bus_dmamem_alloc()ed
+ * DMA maps) we can't use the Memory Descriptor List. However, also
+ * when not using the MDL, the maximum transfer size apparently is
+ * limited to 4k so we have to split transfers up, which plain sucks.
+ */
+ error = bus_dma_tag_create(esc->sc_pdmat, PAGE_SIZE, 0,
+ BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+ MDL_SEG_SIZE, 1, MDL_SEG_SIZE, BUS_DMA_ALLOCNOW,
+ busdma_lock_mutex, &sc->sc_lock, &esc->sc_xferdmat);
+ if (error != 0) {
+ device_printf(dev, "cannot create transfer DMA tag\n");
+ goto fail_pdmat;
+ }
+ error = bus_dmamap_create(esc->sc_xferdmat, 0, &esc->sc_xferdmam);
+ if (error != 0) {
+ device_printf(dev, "cannnot create transfer DMA map\n");
+ goto fail_xferdmat;
+ }
+
+ error = bus_setup_intr(dev, esc->sc_res[ESP_PCI_RES_INTR],
+ INTR_MPSAFE | INTR_TYPE_CAM, NULL, ncr53c9x_intr, sc,
+ &esc->sc_ih);
+ if (error != 0) {
+ device_printf(dev, "cannot set up interrupt\n");
+ goto fail_xferdmam;
+ }
+
+ /* Do the common parts of attachment. */
+ sc->sc_dev = esc->sc_dev;
+ error = ncr53c9x_attach(sc);
+ if (error != 0) {
+ device_printf(esc->sc_dev, "ncr53c9x_attach failed\n");
+ goto fail_intr;
+ }
+
+ return (0);
+
+ fail_intr:
+ bus_teardown_intr(esc->sc_dev, esc->sc_res[ESP_PCI_RES_INTR],
+ esc->sc_ih);
+ fail_xferdmam:
+ bus_dmamap_destroy(esc->sc_xferdmat, esc->sc_xferdmam);
+ fail_xferdmat:
+ bus_dma_tag_destroy(esc->sc_xferdmat);
+ fail_pdmat:
+ bus_dma_tag_destroy(esc->sc_pdmat);
+ fail_res:
+ bus_release_resources(dev, esp_pci_res_spec, esc->sc_res);
+ NCR_LOCK_DESTROY(sc);
+
+ return (error);
+}
+
+static int
+esp_pci_detach(device_t dev)
+{
+ struct ncr53c9x_softc *sc;
+ struct esp_pci_softc *esc;
+ int error;
+
+ esc = device_get_softc(dev);
+ sc = &esc->sc_ncr53c9x;
+
+ bus_teardown_intr(esc->sc_dev, esc->sc_res[ESP_PCI_RES_INTR],
+ esc->sc_ih);
+ error = ncr53c9x_detach(sc);
+ if (error != 0)
+ return (error);
+ bus_dmamap_destroy(esc->sc_xferdmat, esc->sc_xferdmam);
+ bus_dma_tag_destroy(esc->sc_xferdmat);
+ bus_dma_tag_destroy(esc->sc_pdmat);
+ bus_release_resources(dev, esp_pci_res_spec, esc->sc_res);
+ NCR_LOCK_DESTROY(sc);
+
+ return (0);
+}
+
+static int
+esp_pci_suspend(device_t dev)
+{
+
+ return (ENXIO);
+}
+
+static int
+esp_pci_resume(device_t dev)
+{
+
+ return (ENXIO);
+}
+
+static void
+esp_pci_xfermap(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ struct esp_pci_softc *esc = (struct esp_pci_softc *)arg;
+
+ if (error != 0)
+ return;
+
+ KASSERT(nsegs == 1, ("%s: bad transfer segment count %d", __func__,
+ nsegs));
+ KASSERT(segs[0].ds_len <= MDL_SEG_SIZE,
+ ("%s: bad transfer segment length %ld", __func__,
+ (long)segs[0].ds_len));
+
+ /* Program the DMA Starting Physical Address. */
+ WRITE_DMAREG(esc, DMA_SPA, segs[0].ds_addr);
+}
+
+/*
+ * Glue functions
+ */
+
+static uint8_t
+esp_pci_read_reg(struct ncr53c9x_softc *sc, int reg)
+{
+ struct esp_pci_softc *esc = (struct esp_pci_softc *)sc;
+
+ return (READ_ESPREG(esc, reg));
+}
+
+static void
+esp_pci_write_reg(struct ncr53c9x_softc *sc, int reg, uint8_t v)
+{
+ struct esp_pci_softc *esc = (struct esp_pci_softc *)sc;
+
+ WRITE_ESPREG(esc, reg, v);
+}
+
+static int
+esp_pci_dma_isintr(struct ncr53c9x_softc *sc)
+{
+ struct esp_pci_softc *esc = (struct esp_pci_softc *)sc;
+
+ return (READ_ESPREG(esc, NCR_STAT) & NCRSTAT_INT) != 0;
+}
+
+static void
+esp_pci_dma_reset(struct ncr53c9x_softc *sc)
+{
+ struct esp_pci_softc *esc = (struct esp_pci_softc *)sc;
+
+ WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE);
+
+ esc->sc_active = 0;
+}
+
+static int
+esp_pci_dma_intr(struct ncr53c9x_softc *sc)
+{
+ struct esp_pci_softc *esc = (struct esp_pci_softc *)sc;
+ bus_dma_tag_t xferdmat;
+ bus_dmamap_t xferdmam;
+ size_t dmasize;
+ int datain, i, resid, trans;
+ uint32_t dmastat;
+ char *p = NULL;
+
+ xferdmat = esc->sc_xferdmat;
+ xferdmam = esc->sc_xferdmam;
+ datain = esc->sc_datain;
+
+ dmastat = READ_DMAREG(esc, DMA_STAT);
+
+ if ((dmastat & DMASTAT_ERR) != 0) {
+ /* XXX not tested... */
+ WRITE_DMAREG(esc, DMA_CMD, DMACMD_ABORT | (datain != 0 ?
+ DMACMD_DIR : 0));
+
+ device_printf(esc->sc_dev, "DMA error detected; Aborting.\n");
+ bus_dmamap_sync(xferdmat, xferdmam, datain != 0 ?
+ BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(xferdmat, xferdmam);
+ return (-1);
+ }
+
+ if ((dmastat & DMASTAT_ABT) != 0) {
+ /* XXX what should be done? */
+ device_printf(esc->sc_dev, "DMA aborted.\n");
+ WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE | (datain != 0 ?
+ DMACMD_DIR : 0));
+ esc->sc_active = 0;
+ return (0);
+ }
+
+ KASSERT(esc->sc_active != 0, ("%s: DMA wasn't active", __func__));
+
+ /* DMA has stopped. */
+
+ esc->sc_active = 0;
+
+ dmasize = esc->sc_dmasize;
+ if (dmasize == 0) {
+ /* A "Transfer Pad" operation completed. */
+ NCR_DMA(("%s: discarded %d bytes (tcl=%d, tcm=%d)\n",
+ __func__, READ_ESPREG(esc, NCR_TCL) |
+ (READ_ESPREG(esc, NCR_TCM) << 8),
+ READ_ESPREG(esc, NCR_TCL), READ_ESPREG(esc, NCR_TCM)));
+ return (0);
+ }
+
+ resid = 0;
+ /*
+ * If a transfer onto the SCSI bus gets interrupted by the device
+ * (e.g. for a SAVEPOINTER message), the data in the FIFO counts
+ * as residual since the ESP counter registers get decremented as
+ * bytes are clocked into the FIFO.
+ */
+ if (datain == 0 &&
+ (resid = (READ_ESPREG(esc, NCR_FFLAG) & NCRFIFO_FF)) != 0)
+ NCR_DMA(("%s: empty esp FIFO of %d ", __func__, resid));
+
+ if ((sc->sc_espstat & NCRSTAT_TC) == 0) {
+ /*
+ * "Terminal count" is off, so read the residue
+ * out of the ESP counter registers.
+ */
+ if (datain != 0) {
+ resid = READ_ESPREG(esc, NCR_FFLAG) & NCRFIFO_FF;
+ while (resid > 1)
+ resid =
+ READ_ESPREG(esc, NCR_FFLAG) & NCRFIFO_FF;
+ WRITE_DMAREG(esc, DMA_CMD, DMACMD_BLAST | DMACMD_DIR);
+
+ for (i = 0; i < 0x8000; i++) /* XXX 0x8000 ? */
+ if ((READ_DMAREG(esc, DMA_STAT) &
+ DMASTAT_BCMP) != 0)
+ break;
+
+ /* See the below comments... */
+ if (resid != 0)
+ p = *esc->sc_dmaaddr;
+ }
+
+ resid += READ_ESPREG(esc, NCR_TCL) |
+ (READ_ESPREG(esc, NCR_TCM) << 8) |
+ (READ_ESPREG(esc, NCR_TCH) << 16);
+ } else
+ while ((dmastat & DMASTAT_DONE) == 0)
+ dmastat = READ_DMAREG(esc, DMA_STAT);
+
+ WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE | (datain != 0 ?
+ DMACMD_DIR : 0));
+
+ /* Sync the transfer buffer. */
+ bus_dmamap_sync(xferdmat, xferdmam, datain != 0 ?
+ BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(xferdmat, xferdmam);
+
+ trans = dmasize - resid;
+
+ /*
+ * From the technical manual notes:
+ *
+ * "In some odd byte conditions, one residual byte will be left
+ * in the SCSI FIFO, and the FIFO flags will never count to 0.
+ * When this happens, the residual byte should be retrieved
+ * via PIO following completion of the BLAST operation."
+ */
+ if (p != NULL) {
+ p += trans;
+ *p = READ_ESPREG(esc, NCR_FIFO);
+ trans++;
+ }
+
+ if (trans < 0) { /* transferred < 0 ? */
+#if 0
+ /*
+ * This situation can happen in perfectly normal operation
+ * if the ESP is reselected while using DMA to select
+ * another target. As such, don't print the warning.
+ */
+ device_printf(dev, "xfer (%d) > req (%d)\n", trans, dmasize);
+#endif
+ trans = dmasize;
+ }
+
+ NCR_DMA(("%s: tcl=%d, tcm=%d, tch=%d; trans=%d, resid=%d\n", __func__,
+ READ_ESPREG(esc, NCR_TCL), READ_ESPREG(esc, NCR_TCM),
+ READ_ESPREG(esc, NCR_TCH), trans, resid));
+
+ *esc->sc_dmalen -= trans;
+ *esc->sc_dmaaddr = (char *)*esc->sc_dmaaddr + trans;
+
+ return (0);
+}
+
+static int
+esp_pci_dma_setup(struct ncr53c9x_softc *sc, void **addr, size_t *len,
+ int datain, size_t *dmasize)
+{
+ struct esp_pci_softc *esc = (struct esp_pci_softc *)sc;
+ int error;
+
+ WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE | (datain != 0 ? DMACMD_DIR :
+ 0));
+
+ *dmasize = esc->sc_dmasize = ulmin(*dmasize, MDL_SEG_SIZE);
+ esc->sc_dmaaddr = addr;
+ esc->sc_dmalen = len;
+ esc->sc_datain = datain;
+
+ /*
+ * There's no need to set up DMA for a "Transfer Pad" operation.
+ */
+ if (*dmasize == 0)
+ return (0);
+
+ /* Set the transfer length. */
+ WRITE_DMAREG(esc, DMA_STC, *dmasize);
+
+ /*
+ * Load the transfer buffer and program the DMA address.
+ * Note that the NCR53C9x core can't handle EINPROGRESS so we set
+ * BUS_DMA_NOWAIT.
+ */
+ error = bus_dmamap_load(esc->sc_xferdmat, esc->sc_xferdmam,
+ *esc->sc_dmaaddr, *dmasize, esp_pci_xfermap, sc, BUS_DMA_NOWAIT);
+
+ return (error);
+}
+
+static void
+esp_pci_dma_go(struct ncr53c9x_softc *sc)
+{
+ struct esp_pci_softc *esc = (struct esp_pci_softc *)sc;
+ int datain;
+
+ datain = esc->sc_datain;
+
+ /* No DMA transfer for a "Transfer Pad" operation */
+ if (esc->sc_dmasize == 0)
+ return;
+
+ /* Sync the transfer buffer. */
+ bus_dmamap_sync(esc->sc_xferdmat, esc->sc_xferdmam, datain != 0 ?
+ BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
+
+ /* Set the DMA engine to the IDLE state. */
+ /* XXX DMA Transfer Interrupt Enable bit is broken? */
+ WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE | /* DMACMD_INTE | */
+ (datain != 0 ? DMACMD_DIR : 0));
+
+ /* Issue a DMA start command. */
+ WRITE_DMAREG(esc, DMA_CMD, DMACMD_START | /* DMACMD_INTE | */
+ (datain != 0 ? DMACMD_DIR : 0));
+
+ esc->sc_active = 1;
+}
+
+static void
+esp_pci_dma_stop(struct ncr53c9x_softc *sc)
+{
+ struct esp_pci_softc *esc = (struct esp_pci_softc *)sc;
+
+ /* DMA stop */
+ /* XXX what should we do here ? */
+ WRITE_DMAREG(esc, DMA_CMD,
+ DMACMD_ABORT | (esc->sc_datain != 0 ? DMACMD_DIR : 0));
+ bus_dmamap_unload(esc->sc_xferdmat, esc->sc_xferdmam);
+
+ esc->sc_active = 0;
+}
+
+static int
+esp_pci_dma_isactive(struct ncr53c9x_softc *sc)
+{
+ struct esp_pci_softc *esc = (struct esp_pci_softc *)sc;
+
+ /* XXX should we check esc->sc_active? */
+ if ((READ_DMAREG(esc, DMA_CMD) & DMACMD_CMD) != DMACMD_IDLE)
+ return (1);
+
+ return (0);
+}
diff --git a/sys/dev/esp/esp_sbus.c b/sys/dev/esp/esp_sbus.c
index 62a4592..c1ff021 100644
--- a/sys/dev/esp/esp_sbus.c
+++ b/sys/dev/esp/esp_sbus.c
@@ -68,13 +68,13 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/module.h>
#include <sys/mutex.h>
+#include <sys/rman.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/openfirm.h>
#include <machine/bus.h>
#include <machine/ofw_machdep.h>
#include <machine/resource.h>
-#include <sys/rman.h>
#include <cam/cam.h>
#include <cam/cam_ccb.h>
@@ -92,7 +92,7 @@ __FBSDID("$FreeBSD$");
struct esp_softc {
struct ncr53c9x_softc sc_ncr53c9x; /* glue to MI code */
- struct device *sc_dev;
+ device_t sc_dev;
struct resource *sc_res;
@@ -102,8 +102,6 @@ struct esp_softc {
struct lsi64854_softc *sc_dma; /* pointer to my DMA */
};
-static devclass_t esp_devclass;
-
static int esp_probe(device_t);
static int esp_dma_attach(device_t);
static int esp_dma_detach(device_t);
@@ -118,7 +116,8 @@ static device_method_t esp_dma_methods[] = {
DEVMETHOD(device_detach, esp_dma_detach),
DEVMETHOD(device_suspend, esp_suspend),
DEVMETHOD(device_resume, esp_resume),
- {0, 0}
+
+ KOBJMETHOD_END
};
static driver_t esp_dma_driver = {
@@ -136,7 +135,8 @@ static device_method_t esp_sbus_methods[] = {
DEVMETHOD(device_detach, esp_sbus_detach),
DEVMETHOD(device_suspend, esp_suspend),
DEVMETHOD(device_resume, esp_resume),
- {0, 0}
+
+ KOBJMETHOD_END
};
static driver_t esp_sbus_driver = {
@@ -175,7 +175,6 @@ static const struct ncr53c9x_glue const esp_sbus_glue = {
esp_dma_go,
esp_dma_stop,
esp_dma_isactive,
- NULL, /* gl_clear_latched_intr */
};
static int
@@ -245,9 +244,9 @@ esp_sbus_attach(device_t dev)
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
- BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
- 0, /* nsegments */
- BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
+ BUS_SPACE_MAXSIZE, /* maxsize */
+ BUS_SPACE_UNRESTRICTED, /* nsegments */
+ BUS_SPACE_MAXSIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* no locking */
&lsc->sc_parent_dmat);
@@ -292,8 +291,10 @@ esp_sbus_attach(device_t dev)
}
for (i = 0; i < nchildren; i++) {
if (device_is_attached(children[i]) &&
- sbus_get_slot(children[i]) == sbus_get_slot(dev) &&
- strcmp(ofw_bus_get_name(children[i]), "dma") == 0) {
+ sbus_get_slot(children[i]) ==
+ sbus_get_slot(dev) &&
+ strcmp(ofw_bus_get_name(children[i]),
+ "dma") == 0) {
/* XXX hackery */
esc->sc_dma = (struct lsi64854_softc *)
device_get_softc(children[i]);
@@ -453,13 +454,6 @@ espattach(struct esp_softc *esc, const struct ncr53c9x_glue *gluep)
NCR_LOCK_INIT(sc);
- /* Attach the DMA engine. */
- error = lsi64854_attach(esc->sc_dma);
- if (error != 0) {
- device_printf(esc->sc_dev, "lsi64854_attach failed\n");
- goto fail_lock;
- }
-
sc->sc_id = OF_getscsinitid(esc->sc_dev);
#ifdef ESP_SBUS_DEBUG
@@ -516,9 +510,9 @@ espattach(struct esp_softc *esc, const struct ncr53c9x_glue *gluep)
NCR_WRITE_REG(sc, NCR_CFG2, sc->sc_cfg2);
if ((NCR_READ_REG(sc, NCR_CFG2) & ~NCRCFG2_RSVD) !=
- (NCRCFG2_SCSI2 | NCRCFG2_RPE)) {
+ (NCRCFG2_SCSI2 | NCRCFG2_RPE))
sc->sc_rev = NCR_VARIANT_ESP100;
- } else {
+ else {
sc->sc_cfg2 = NCRCFG2_SCSI2;
NCR_WRITE_REG(sc, NCR_CFG2, sc->sc_cfg2);
sc->sc_cfg3 = 0;
@@ -526,9 +520,9 @@ espattach(struct esp_softc *esc, const struct ncr53c9x_glue *gluep)
sc->sc_cfg3 = (NCRCFG3_CDB | NCRCFG3_FCLK);
NCR_WRITE_REG(sc, NCR_CFG3, sc->sc_cfg3);
if (NCR_READ_REG(sc, NCR_CFG3) !=
- (NCRCFG3_CDB | NCRCFG3_FCLK)) {
+ (NCRCFG3_CDB | NCRCFG3_FCLK))
sc->sc_rev = NCR_VARIANT_ESP100A;
- } else {
+ else {
/* NCRCFG2_FE enables > 64K transfers. */
sc->sc_cfg2 |= NCRCFG2_FE;
sc->sc_cfg3 = 0;
@@ -543,9 +537,11 @@ espattach(struct esp_softc *esc, const struct ncr53c9x_glue *gluep)
case 0x02:
if ((uid & 0x07) == 0x02)
- sc->sc_rev = NCR_VARIANT_FAS216;
+ sc->sc_rev =
+ NCR_VARIANT_FAS216;
else
- sc->sc_rev = NCR_VARIANT_FAS236;
+ sc->sc_rev =
+ NCR_VARIANT_FAS236;
break;
case 0x0a:
@@ -560,7 +556,8 @@ espattach(struct esp_softc *esc, const struct ncr53c9x_glue *gluep)
*/
device_printf(esc->sc_dev,
"Unknown chip\n");
- goto fail_lsi;
+ error = ENXIO;
+ goto fail_lock;
}
}
}
@@ -571,12 +568,6 @@ espattach(struct esp_softc *esc, const struct ncr53c9x_glue *gluep)
#endif
/*
- * XXX minsync and maxxfer _should_ be set up in MI code,
- * XXX but it appears to have some dependency on what sort
- * XXX of DMA we're hooked up to, etc.
- */
-
- /*
* This is the value used to start sync negotiations
* Note that the NCR register "SYNCTP" is programmed
* in "clocks per byte", and has a minimum value of 4.
@@ -587,31 +578,27 @@ espattach(struct esp_softc *esc, const struct ncr53c9x_glue *gluep)
*/
sc->sc_minsync = 1000 / sc->sc_freq;
+ /*
+ * Except for some variants the maximum transfer size is 64k.
+ */
+ sc->sc_maxxfer = 64 * 1024;
sc->sc_maxoffset = 15;
sc->sc_extended_geom = 1;
/*
* Alas, we must now modify the value a bit, because it's
- * only valid when can switch on FASTCLK and FASTSCSI bits
- * in config register 3...
+ * only valid when we can switch on FASTCLK and FASTSCSI bits
+ * in the config register 3...
*/
switch (sc->sc_rev) {
case NCR_VARIANT_ESP100:
sc->sc_maxwidth = MSG_EXT_WDTR_BUS_8_BIT;
- sc->sc_maxxfer = 64 * 1024;
sc->sc_minsync = 0; /* No synch on old chip? */
break;
case NCR_VARIANT_ESP100A:
- sc->sc_maxwidth = MSG_EXT_WDTR_BUS_8_BIT;
- sc->sc_maxxfer = 64 * 1024;
- /* Min clocks/byte is 5 */
- sc->sc_minsync = ncr53c9x_cpb2stp(sc, 5);
- break;
-
case NCR_VARIANT_ESP200:
sc->sc_maxwidth = MSG_EXT_WDTR_BUS_8_BIT;
- sc->sc_maxxfer = 16 * 1024 * 1024;
/* Min clocks/byte is 5 */
sc->sc_minsync = ncr53c9x_cpb2stp(sc, 5);
break;
@@ -642,6 +629,19 @@ espattach(struct esp_softc *esc, const struct ncr53c9x_glue *gluep)
break;
}
+ /*
+ * Given that we allocate resources based on sc->sc_maxxfer it doesn't
+ * make sense to supply a value higher than the maximum actually used.
+ */
+ sc->sc_maxxfer = min(sc->sc_maxxfer, MAXPHYS);
+
+ /* Attach the DMA engine. */
+ error = lsi64854_attach(esc->sc_dma);
+ if (error != 0) {
+ device_printf(esc->sc_dev, "lsi64854_attach failed\n");
+ goto fail_lock;
+ }
+
/* Establish interrupt channel. */
i = 0;
if ((esc->sc_irqres = bus_alloc_resource_any(esc->sc_dev, SYS_RES_IRQ,
diff --git a/sys/dev/esp/ncr53c9x.c b/sys/dev/esp/ncr53c9x.c
index 1d46318..b9d4990 100644
--- a/sys/dev/esp/ncr53c9x.c
+++ b/sys/dev/esp/ncr53c9x.c
@@ -123,6 +123,8 @@ __FBSDID("$FreeBSD$");
#include <dev/esp/ncr53c9xreg.h>
#include <dev/esp/ncr53c9xvar.h>
+devclass_t esp_devclass;
+
MODULE_DEPEND(esp, cam, 1, 1, 1);
#ifdef NCR53C9X_DEBUG
@@ -179,8 +181,7 @@ static inline int ncr53c9x_stp2cpb(struct ncr53c9x_softc *sc,
#define NCR_SET_COUNT(sc, size) do { \
NCR_WRITE_REG((sc), NCR_TCL, (size)); \
NCR_WRITE_REG((sc), NCR_TCM, (size) >> 8); \
- if ((sc->sc_cfg2 & NCRCFG2_FE) || \
- (sc->sc_rev == NCR_VARIANT_FAS366)) \
+ if ((sc->sc_features & NCR_F_LARGEXFER) != 0) \
NCR_WRITE_REG((sc), NCR_TCH, (size) >> 16); \
if (sc->sc_rev == NCR_VARIANT_FAS366) \
NCR_WRITE_REG(sc, NCR_RCH, 0); \
@@ -391,6 +392,7 @@ ncr53c9x_attach(struct ncr53c9x_softc *sc)
ecb = &sc->ecb_array[i];
ecb->sc = sc;
ecb->tag_id = i;
+ callout_init_mtx(&ecb->ch, &sc->sc_lock, 0);
TAILQ_INSERT_HEAD(&sc->free_list, ecb, free_links);
}
@@ -449,10 +451,10 @@ ncr53c9x_detach(struct ncr53c9x_softc *sc)
xpt_register_async(0, ncr53c9x_async, sc->sc_sim, sc->sc_path);
xpt_free_path(sc->sc_path);
xpt_bus_deregister(cam_sim_path(sc->sc_sim));
+ cam_sim_free(sc->sc_sim, TRUE);
NCR_UNLOCK(sc);
- cam_sim_free(sc->sc_sim, TRUE);
free(sc->ecb_array, M_DEVBUF);
free(sc->sc_tinfo, M_DEVBUF);
if (sc->sc_imess_self)
@@ -504,6 +506,8 @@ ncr53c9x_reset(struct ncr53c9x_softc *sc)
/* FALLTHROUGH */
case NCR_VARIANT_ESP100A:
sc->sc_features |= NCR_F_SELATN3;
+ if ((sc->sc_cfg2 & NCRCFG2_FE) != 0)
+ sc->sc_features |= NCR_F_LARGEXFER;
NCR_WRITE_REG(sc, NCR_CFG2, sc->sc_cfg2);
/* FALLTHROUGH */
case NCR_VARIANT_ESP100:
@@ -514,8 +518,8 @@ ncr53c9x_reset(struct ncr53c9x_softc *sc)
break;
case NCR_VARIANT_FAS366:
- sc->sc_features |=
- NCR_F_HASCFG3 | NCR_F_FASTSCSI | NCR_F_SELATN3;
+ sc->sc_features |= NCR_F_HASCFG3 | NCR_F_FASTSCSI |
+ NCR_F_SELATN3 | NCR_F_LARGEXFER;
sc->sc_cfg3 = NCRFASCFG3_FASTCLK | NCRFASCFG3_OBAUTO;
if (sc->sc_id > 7)
sc->sc_cfg3 |= NCRFASCFG3_IDBIT3;
@@ -711,9 +715,6 @@ ncr53c9x_readregs(struct ncr53c9x_softc *sc)
sc->sc_espintr = NCR_READ_REG(sc, NCR_INTR);
- if (sc->sc_glue->gl_clear_latched_intr != NULL)
- (*sc->sc_glue->gl_clear_latched_intr)(sc);
-
/*
* Determine the SCSI bus phase, return either a real SCSI bus phase
* or some pseudo phase we use to detect certain exceptions.
@@ -806,7 +807,7 @@ ncr53c9x_select(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
struct ncr53c9x_tinfo *ti;
uint8_t *cmd;
size_t dmasize;
- int clen, selatn3, selatns;
+ int clen, error, selatn3, selatns;
int lun = ecb->ccb->ccb_h.target_lun;
int target = ecb->ccb->ccb_h.target_id;
@@ -887,13 +888,19 @@ ncr53c9x_select(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
dmasize = clen;
sc->sc_cmdlen = clen;
sc->sc_cmdp = cmd;
- NCRDMA_SETUP(sc, &sc->sc_cmdp, &sc->sc_cmdlen, 0, &dmasize);
+ error = NCRDMA_SETUP(sc, &sc->sc_cmdp, &sc->sc_cmdlen, 0,
+ &dmasize);
+ if (error != 0) {
+ sc->sc_cmdlen = 0;
+ sc->sc_cmdp = NULL;
+ goto cmd;
+ }
+
/* Program the SCSI counter. */
NCR_SET_COUNT(sc, dmasize);
/* Load the count in. */
- /* if (sc->sc_rev != NCR_VARIANT_FAS366) */
- NCRCMD(sc, NCRCMD_NOP | NCRCMD_DMA);
+ NCRCMD(sc, NCRCMD_NOP | NCRCMD_DMA);
/* And get the target's attention. */
if (selatn3) {
@@ -906,6 +913,7 @@ ncr53c9x_select(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
return;
}
+cmd:
/*
* Who am I? This is where we tell the target that we are
* happy for it to disconnect etc.
@@ -989,13 +997,11 @@ ncr53c9x_action(struct cam_sim *sim, union ccb *ccb)
case XPT_RESET_BUS:
ncr53c9x_init(sc, 1);
ccb->ccb_h.status = CAM_REQ_CMP;
- xpt_done(ccb);
- return;
+ break;
case XPT_CALC_GEOMETRY:
cam_calc_geometry(&ccb->ccg, sc->sc_extended_geom);
- xpt_done(ccb);
- return;
+ break;
case XPT_PATH_INQ:
cpi = &ccb->cpi;
@@ -1009,19 +1015,19 @@ ncr53c9x_action(struct cam_sim *sim, union ccb *ccb)
cpi->max_target = sc->sc_ntarg - 1;
cpi->max_lun = 7;
cpi->initiator_id = sc->sc_id;
- cpi->bus_id = 0;
- cpi->base_transfer_speed = 3300;
strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
- strncpy(cpi->hba_vid, "Sun", HBA_IDLEN);
+ strncpy(cpi->hba_vid, "NCR", HBA_IDLEN);
strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
cpi->unit_number = cam_sim_unit(sim);
- cpi->transport = XPORT_SPI;
- cpi->transport_version = 2;
+ cpi->bus_id = 0;
+ cpi->base_transfer_speed = 3300;
cpi->protocol = PROTO_SCSI;
cpi->protocol_version = SCSI_REV_2;
+ cpi->transport = XPORT_SPI;
+ cpi->transport_version = 2;
+ cpi->maxio = sc->sc_maxxfer;
ccb->ccb_h.status = CAM_REQ_CMP;
- xpt_done(ccb);
- return;
+ break;
case XPT_GET_TRAN_SETTINGS:
cts = &ccb->cts;
@@ -1064,28 +1070,24 @@ ncr53c9x_action(struct cam_sim *sim, union ccb *ccb)
CTS_SPI_VALID_DISC;
scsi->valid = CTS_SCSI_VALID_TQ;
ccb->ccb_h.status = CAM_REQ_CMP;
- xpt_done(ccb);
- return;
+ break;
case XPT_ABORT:
device_printf(sc->sc_dev, "XPT_ABORT called\n");
ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
- xpt_done(ccb);
- return;
+ break;
case XPT_TERM_IO:
device_printf(sc->sc_dev, "XPT_TERM_IO called\n");
ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
- xpt_done(ccb);
- return;
+ break;
case XPT_RESET_DEV:
case XPT_SCSI_IO:
if (ccb->ccb_h.target_id < 0 ||
ccb->ccb_h.target_id >= sc->sc_ntarg) {
ccb->ccb_h.status = CAM_PATH_INVALID;
- xpt_done(ccb);
- return;
+ goto done;
}
/* Get an ECB to use. */
ecb = ncr53c9x_get_ecb(sc);
@@ -1097,8 +1099,7 @@ ncr53c9x_action(struct cam_sim *sim, union ccb *ccb)
xpt_freeze_simq(sim, 1);
ccb->ccb_h.status = CAM_REQUEUE_REQ;
device_printf(sc->sc_dev, "unable to allocate ecb\n");
- xpt_done(ccb);
- return;
+ goto done;
}
/* Initialize ecb. */
@@ -1127,7 +1128,7 @@ ncr53c9x_action(struct cam_sim *sim, union ccb *ccb)
ecb->flags |= ECB_READY;
if (sc->sc_state == NCR_IDLE)
ncr53c9x_sched(sc);
- break;
+ return;
case XPT_SET_TRAN_SETTINGS:
cts = &ccb->cts;
@@ -1165,16 +1166,16 @@ ncr53c9x_action(struct cam_sim *sim, union ccb *ccb)
}
ccb->ccb_h.status = CAM_REQ_CMP;
- xpt_done(ccb);
- return;
+ break;
default:
device_printf(sc->sc_dev, "Unhandled function code %d\n",
ccb->ccb_h.func_code);
ccb->ccb_h.status = CAM_PROVIDE_FAIL;
- xpt_done(ccb);
- return;
}
+
+done:
+ xpt_done(ccb);
}
/*
@@ -1329,11 +1330,10 @@ ncr53c9x_sched(struct ncr53c9x_softc *sc)
sc->sc_nexus = ecb;
ncr53c9x_select(sc, ecb);
break;
- } else {
+ } else
NCR_TRACE(("[%s %d:%d busy] \n", __func__,
ecb->ccb->ccb_h.target_id,
ecb->ccb->ccb_h.target_lun));
- }
}
}
@@ -1412,10 +1412,10 @@ ncr53c9x_done(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
*/
if (ccb->ccb_h.status == CAM_REQ_CMP) {
ccb->csio.scsi_status = ecb->stat;
- if ((ecb->flags & ECB_ABORT) != 0) {
+ if ((ecb->flags & ECB_ABORT) != 0)
ccb->ccb_h.status = CAM_CMD_TIMEOUT;
- } else if ((ecb->flags & ECB_SENSE) != 0 &&
- (ecb->stat != SCSI_STATUS_CHECK_COND)) {
+ else if ((ecb->flags & ECB_SENSE) != 0 &&
+ (ecb->stat != SCSI_STATUS_CHECK_COND)) {
ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR |
CAM_AUTOSNS_VALID;
@@ -1439,13 +1439,15 @@ ncr53c9x_done(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
}
ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
}
- } else {
+ } else
ccb->csio.resid = ecb->dleft;
- }
if (ecb->stat == SCSI_STATUS_QUEUE_FULL)
ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
else if (ecb->stat == SCSI_STATUS_BUSY)
ccb->ccb_h.status = CAM_SCSI_BUSY;
+ } else if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
+ ccb->ccb_h.status |= CAM_DEV_QFRZN;
+ xpt_freeze_devq(ccb->ccb_h.path, 1);
}
#ifdef NCR53C9X_DEBUG
@@ -1473,7 +1475,7 @@ ncr53c9x_done(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb)
}
}
- if (ccb->ccb_h.status == CAM_SEL_TIMEOUT) {
+ if ((ccb->ccb_h.status & CAM_SEL_TIMEOUT) != 0) {
/* Selection timeout -- discard this LUN if empty. */
if (li->untagged == NULL && li->used == 0) {
if (lun < NCR_NLUN)
@@ -2030,8 +2032,8 @@ gotit:
default:
xpt_print_path(ecb->ccb->ccb_h.path);
- printf("unrecognized MESSAGE EXTENDED;"
- " sending REJECT\n");
+ printf("unrecognized MESSAGE EXTENDED 0x%x;"
+ " sending REJECT\n", sc->sc_imess[2]);
goto reject;
}
break;
@@ -2039,7 +2041,8 @@ gotit:
default:
NCR_MSGS(("ident "));
xpt_print_path(ecb->ccb->ccb_h.path);
- printf("unrecognized MESSAGE; sending REJECT\n");
+ printf("unrecognized MESSAGE 0x%x; sending REJECT\n",
+ sc->sc_imess[0]);
/* FALLTHROUGH */
reject:
ncr53c9x_sched_msgout(SEND_REJECT);
@@ -2109,6 +2112,7 @@ ncr53c9x_msgout(struct ncr53c9x_softc *sc)
struct ncr53c9x_tinfo *ti;
struct ncr53c9x_ecb *ecb;
size_t size;
+ int error;
#ifdef NCR53C9X_DEBUG
int i;
#endif
@@ -2246,17 +2250,14 @@ ncr53c9x_msgout(struct ncr53c9x_softc *sc)
NCR_MSGS(("> "));
}
#endif
- if (sc->sc_rev == NCR_VARIANT_FAS366) {
- /*
- * XXX FIFO size
- */
- ncr53c9x_flushfifo(sc);
- ncr53c9x_wrfifo(sc, sc->sc_omp, sc->sc_omlen);
- NCRCMD(sc, NCRCMD_TRANS);
- } else {
+
+ if (sc->sc_rev != NCR_VARIANT_FAS366) {
/* (Re)send the message. */
size = ulmin(sc->sc_omlen, sc->sc_maxxfer);
- NCRDMA_SETUP(sc, &sc->sc_omp, &sc->sc_omlen, 0, &size);
+ error = NCRDMA_SETUP(sc, &sc->sc_omp, &sc->sc_omlen, 0, &size);
+ if (error != 0)
+ goto cmd;
+
/* Program the SCSI counter. */
NCR_SET_COUNT(sc, size);
@@ -2264,7 +2265,16 @@ ncr53c9x_msgout(struct ncr53c9x_softc *sc)
NCRCMD(sc, NCRCMD_NOP | NCRCMD_DMA);
NCRCMD(sc, NCRCMD_TRANS | NCRCMD_DMA);
NCRDMA_GO(sc);
+ return;
}
+
+cmd:
+ /*
+ * XXX FIFO size
+ */
+ ncr53c9x_flushfifo(sc);
+ ncr53c9x_wrfifo(sc, sc->sc_omp, sc->sc_omlen);
+ NCRCMD(sc, NCRCMD_TRANS);
}
void
@@ -2299,7 +2309,7 @@ ncr53c9x_intr1(struct ncr53c9x_softc *sc)
struct ncr53c9x_tinfo *ti;
struct timeval cur, wait;
size_t size;
- int i, nfifo;
+ int error, i, nfifo;
uint8_t msg;
NCR_LOCK_ASSERT(sc, MA_OWNED);
@@ -2974,8 +2984,14 @@ msgin:
size = ecb->clen;
sc->sc_cmdlen = size;
sc->sc_cmdp = (void *)&ecb->cmd.cmd;
- NCRDMA_SETUP(sc, &sc->sc_cmdp, &sc->sc_cmdlen,
+ error = NCRDMA_SETUP(sc, &sc->sc_cmdp, &sc->sc_cmdlen,
0, &size);
+ if (error != 0) {
+ sc->sc_cmdlen = 0;
+ sc->sc_cmdp = NULL;
+ goto cmd;
+ }
+
/* Program the SCSI counter. */
NCR_SET_COUNT(sc, size);
@@ -2985,30 +3001,51 @@ msgin:
/* Start the command transfer. */
NCRCMD(sc, NCRCMD_TRANS | NCRCMD_DMA);
NCRDMA_GO(sc);
- } else {
- ncr53c9x_wrfifo(sc, (uint8_t *)&ecb->cmd.cmd,
- ecb->clen);
- NCRCMD(sc, NCRCMD_TRANS);
+ sc->sc_prevphase = COMMAND_PHASE;
+ break;
}
+cmd:
+ ncr53c9x_wrfifo(sc, (uint8_t *)&ecb->cmd.cmd, ecb->clen);
+ NCRCMD(sc, NCRCMD_TRANS);
sc->sc_prevphase = COMMAND_PHASE;
break;
case DATA_OUT_PHASE:
NCR_PHASE(("DATA_OUT_PHASE [%ld] ", (long)sc->sc_dleft));
+ sc->sc_prevphase = DATA_OUT_PHASE;
NCRCMD(sc, NCRCMD_FLUSH);
size = ulmin(sc->sc_dleft, sc->sc_maxxfer);
- NCRDMA_SETUP(sc, &sc->sc_dp, &sc->sc_dleft, 0, &size);
- sc->sc_prevphase = DATA_OUT_PHASE;
+ error = NCRDMA_SETUP(sc, &sc->sc_dp, &sc->sc_dleft, 0, &size);
goto setup_xfer;
case DATA_IN_PHASE:
NCR_PHASE(("DATA_IN_PHASE "));
+ sc->sc_prevphase = DATA_IN_PHASE;
if (sc->sc_rev == NCR_VARIANT_ESP100)
NCRCMD(sc, NCRCMD_FLUSH);
size = ulmin(sc->sc_dleft, sc->sc_maxxfer);
- NCRDMA_SETUP(sc, &sc->sc_dp, &sc->sc_dleft, 1, &size);
- sc->sc_prevphase = DATA_IN_PHASE;
- setup_xfer:
+ error = NCRDMA_SETUP(sc, &sc->sc_dp, &sc->sc_dleft, 1, &size);
+setup_xfer:
+ if (error != 0) {
+ switch (error) {
+ case EFBIG:
+ ecb->ccb->ccb_h.status |= CAM_REQ_TOO_BIG;
+ break;
+ case EINPROGRESS:
+ panic("%s: cannot deal with deferred DMA",
+ __func__);
+ case EINVAL:
+ ecb->ccb->ccb_h.status |= CAM_REQ_INVALID;
+ break;
+ case ENOMEM:
+ ecb->ccb->ccb_h.status |= CAM_REQUEUE_REQ;
+ break;
+ default:
+ ecb->ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
+ }
+ goto finish;
+ }
+
/* Target returned to data phase: wipe "done" memory */
ecb->flags &= ~ECB_TENTATIVE_DONE;
diff --git a/sys/dev/esp/ncr53c9xreg.h b/sys/dev/esp/ncr53c9xreg.h
index ab03421..dffbf7c 100644
--- a/sys/dev/esp/ncr53c9xreg.h
+++ b/sys/dev/esp/ncr53c9xreg.h
@@ -31,6 +31,9 @@
/* $FreeBSD$ */
+#ifndef _NCR53C9XREG_H_
+#define _NCR53C9XREG_H_
+
/*
* Register addresses, relative to some base address
*/
@@ -288,3 +291,4 @@
#define NCRFAS_STAT2_OSHUTTLE 0x40 /* next byte from FIFO is MSB */
#define NCRFAS_STAT2_EMPTY 0x80 /* FIFO is empty */
+#endif /* _NCR53C9XREG_H_ */
diff --git a/sys/dev/esp/ncr53c9xvar.h b/sys/dev/esp/ncr53c9xvar.h
index 38285fc..88ae049 100644
--- a/sys/dev/esp/ncr53c9xvar.h
+++ b/sys/dev/esp/ncr53c9xvar.h
@@ -68,8 +68,8 @@
/* $FreeBSD$ */
-#ifndef _DEV_IC_NCR53C9XVAR_H_
-#define _DEV_IC_NCR53C9XVAR_H_
+#ifndef _NCR53C9XVAR_H_
+#define _NCR53C9XVAR_H_
#include <sys/lock.h>
@@ -115,7 +115,8 @@
* scsi_status,sense_data}.
*/
struct ncr53c9x_ecb {
- /* These fields are preserved between alloc and free */
+ /* These fields are preserved between alloc and free. */
+ struct callout ch;
struct ncr53c9x_softc *sc;
int tag_id;
int flags;
@@ -130,7 +131,6 @@ struct ncr53c9x_ecb {
#define ECB_RESET 0x80
#define ECB_TENTATIVE_DONE 0x100
int timeout;
- struct callout ch;
struct {
uint8_t msg[3]; /* Selection Id msg and tags */
@@ -290,7 +290,7 @@ extern int ncr53c9x_debug;
struct ncr53c9x_softc;
/*
- * Function switch used as glue to MD code.
+ * Function switch used as glue to MD code
*/
struct ncr53c9x_glue {
/* Mandatory entry points. */
@@ -304,9 +304,6 @@ struct ncr53c9x_glue {
void (*gl_dma_go)(struct ncr53c9x_softc *);
void (*gl_dma_stop)(struct ncr53c9x_softc *);
int (*gl_dma_isactive)(struct ncr53c9x_softc *);
-
- /* Optional entry points. */
- void (*gl_clear_latched_intr)(struct ncr53c9x_softc *);
};
struct ncr53c9x_softc {
@@ -330,7 +327,7 @@ struct ncr53c9x_softc {
uint8_t sc_ccf; /* Clock Conversion */
uint8_t sc_timeout;
- /* register copies, see espreadregs() */
+ /* register copies, see ncr53c9x_readregs() */
uint8_t sc_espintr;
uint8_t sc_espstat;
uint8_t sc_espstep;
@@ -415,6 +412,7 @@ struct ncr53c9x_softc {
#define NCR_F_FASTSCSI 0x02 /* chip supports Fast mode */
#define NCR_F_DMASELECT 0x04 /* can do dmaselect */
#define NCR_F_SELATN3 0x08 /* chip supports SELATN3 command */
+#define NCR_F_LARGEXFER 0x10 /* chip supports transfers > 64k */
/* values for sc_msgout */
#define SEND_DEV_RESET 0x0001
@@ -499,8 +497,10 @@ struct ncr53c9x_softc {
#define ncr53c9x_cpb2stp(sc, cpb) \
((250 * (cpb)) / (sc)->sc_freq)
+extern devclass_t esp_devclass;
+
int ncr53c9x_attach(struct ncr53c9x_softc *sc);
int ncr53c9x_detach(struct ncr53c9x_softc *sc);
void ncr53c9x_intr(void *arg);
-#endif /* _DEV_IC_NCR53C9XVAR_H_ */
+#endif /* _NCR53C9XVAR_H_ */
diff --git a/sys/dev/fb/vesa.c b/sys/dev/fb/vesa.c
index 0c05699..ee97ff1 100644
--- a/sys/dev/fb/vesa.c
+++ b/sys/dev/fb/vesa.c
@@ -95,7 +95,7 @@ static size_t vesa_bios_size = 0;
/* VESA video adapter */
static video_adapter_t *vesa_adp = NULL;
-SYSCTL_NODE(_debug, OID_AUTO, vesa, CTLFLAG_RD, NULL, "VESA debugging");
+static SYSCTL_NODE(_debug, OID_AUTO, vesa, CTLFLAG_RD, NULL, "VESA debugging");
static int vesa_shadow_rom = 0;
TUNABLE_INT("debug.vesa.shadow_rom", &vesa_shadow_rom);
SYSCTL_INT(_debug_vesa, OID_AUTO, shadow_rom, CTLFLAG_RDTUN, &vesa_shadow_rom,
diff --git a/sys/dev/fdc/fdc.c b/sys/dev/fdc/fdc.c
index 01998ec..00cdcd6 100644
--- a/sys/dev/fdc/fdc.c
+++ b/sys/dev/fdc/fdc.c
@@ -267,7 +267,7 @@ static driver_filter_t fdc_intr_fast;
static void fdc_reset(struct fdc_data *);
static int fd_probe_disk(struct fd_data *, int *);
-SYSCTL_NODE(_debug, OID_AUTO, fdc, CTLFLAG_RW, 0, "fdc driver");
+static SYSCTL_NODE(_debug, OID_AUTO, fdc, CTLFLAG_RW, 0, "fdc driver");
static int fifo_threshold = 8;
SYSCTL_INT(_debug_fdc, OID_AUTO, fifo, CTLFLAG_RW, &fifo_threshold, 0,
diff --git a/sys/dev/firewire/fwmem.c b/sys/dev/firewire/fwmem.c
index 1799682..a342d14 100644
--- a/sys/dev/firewire/fwmem.c
+++ b/sys/dev/firewire/fwmem.c
@@ -73,7 +73,7 @@ __FBSDID("$FreeBSD$");
static int fwmem_speed=2, fwmem_debug=0;
static struct fw_eui64 fwmem_eui64;
SYSCTL_DECL(_hw_firewire);
-SYSCTL_NODE(_hw_firewire, OID_AUTO, fwmem, CTLFLAG_RD, 0,
+static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwmem, CTLFLAG_RD, 0,
"FireWire Memory Access");
SYSCTL_UINT(_hw_firewire_fwmem, OID_AUTO, eui64_hi, CTLFLAG_RW,
&fwmem_eui64.hi, 0, "Fwmem target EUI64 high");
@@ -84,7 +84,7 @@ SYSCTL_INT(_hw_firewire_fwmem, OID_AUTO, speed, CTLFLAG_RW, &fwmem_speed, 0,
SYSCTL_INT(_debug, OID_AUTO, fwmem_debug, CTLFLAG_RW, &fwmem_debug, 0,
"Fwmem driver debug flag");
-MALLOC_DEFINE(M_FWMEM, "fwmem", "fwmem/FireWire");
+static MALLOC_DEFINE(M_FWMEM, "fwmem", "fwmem/FireWire");
#define MAXLEN (512 << fwmem_speed)
diff --git a/sys/dev/firewire/if_fwe.c b/sys/dev/firewire/if_fwe.c
index 62616cf..52a9c8d 100644
--- a/sys/dev/firewire/if_fwe.c
+++ b/sys/dev/firewire/if_fwe.c
@@ -86,10 +86,10 @@ static int stream_ch = 1;
static int tx_speed = 2;
static int rx_queue_len = FWMAXQUEUE;
-MALLOC_DEFINE(M_FWE, "if_fwe", "Ethernet over FireWire interface");
+static MALLOC_DEFINE(M_FWE, "if_fwe", "Ethernet over FireWire interface");
SYSCTL_INT(_debug, OID_AUTO, if_fwe_debug, CTLFLAG_RW, &fwedebug, 0, "");
SYSCTL_DECL(_hw_firewire);
-SYSCTL_NODE(_hw_firewire, OID_AUTO, fwe, CTLFLAG_RD, 0,
+static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwe, CTLFLAG_RD, 0,
"Ethernet emulation subsystem");
SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, stream_ch, CTLFLAG_RW, &stream_ch, 0,
"Stream channel to use");
diff --git a/sys/dev/firewire/if_fwip.c b/sys/dev/firewire/if_fwip.c
index 66cdb4a..e7c4a66 100644
--- a/sys/dev/firewire/if_fwip.c
+++ b/sys/dev/firewire/if_fwip.c
@@ -99,10 +99,10 @@ static int broadcast_channel = 0xc0 | 0x1f; /* tag | channel(XXX) */
static int tx_speed = 2;
static int rx_queue_len = FWMAXQUEUE;
-MALLOC_DEFINE(M_FWIP, "if_fwip", "IP over FireWire interface");
+static MALLOC_DEFINE(M_FWIP, "if_fwip", "IP over FireWire interface");
SYSCTL_INT(_debug, OID_AUTO, if_fwip_debug, CTLFLAG_RW, &fwipdebug, 0, "");
SYSCTL_DECL(_hw_firewire);
-SYSCTL_NODE(_hw_firewire, OID_AUTO, fwip, CTLFLAG_RD, 0,
+static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwip, CTLFLAG_RD, 0,
"Firewire ip subsystem");
SYSCTL_INT(_hw_firewire_fwip, OID_AUTO, rx_queue_len, CTLFLAG_RW, &rx_queue_len,
0, "Length of the receive queue");
diff --git a/sys/dev/firewire/sbp.c b/sys/dev/firewire/sbp.c
index 2387bf9..9204449 100644
--- a/sys/dev/firewire/sbp.c
+++ b/sys/dev/firewire/sbp.c
@@ -132,7 +132,8 @@ static int use_doorbell = 0;
static int sbp_tags = 0;
SYSCTL_DECL(_hw_firewire);
-SYSCTL_NODE(_hw_firewire, OID_AUTO, sbp, CTLFLAG_RD, 0, "SBP-II Subsystem");
+static SYSCTL_NODE(_hw_firewire, OID_AUTO, sbp, CTLFLAG_RD, 0,
+ "SBP-II Subsystem");
SYSCTL_INT(_debug, OID_AUTO, sbp_debug, CTLFLAG_RW, &debug, 0,
"SBP debug flag");
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, auto_login, CTLFLAG_RW, &auto_login, 0,
@@ -276,7 +277,7 @@ static void sbp_mgm_timeout (void *arg);
static void sbp_timeout (void *arg);
static void sbp_mgm_orb (struct sbp_dev *, int, struct sbp_ocb *);
-MALLOC_DEFINE(M_SBP, "sbp", "SBP-II/FireWire");
+static MALLOC_DEFINE(M_SBP, "sbp", "SBP-II/FireWire");
/* cam related functions */
static void sbp_action(struct cam_sim *sim, union ccb *ccb);
diff --git a/sys/dev/firewire/sbp_targ.c b/sys/dev/firewire/sbp_targ.c
index f04b968..9be31bc4 100644
--- a/sys/dev/firewire/sbp_targ.c
+++ b/sys/dev/firewire/sbp_targ.c
@@ -95,7 +95,7 @@
#define F_HOLD (1 << 3)
#define F_FREEZED (1 << 4)
-MALLOC_DEFINE(M_SBP_TARG, "sbp_targ", "SBP-II/FireWire target mode");
+static MALLOC_DEFINE(M_SBP_TARG, "sbp_targ", "SBP-II/FireWire target mode");
static int debug = 0;
diff --git a/sys/dev/gpio/gpiobus.c b/sys/dev/gpio/gpiobus.c
index d973694..54644a2 100644
--- a/sys/dev/gpio/gpiobus.c
+++ b/sys/dev/gpio/gpiobus.c
@@ -219,8 +219,7 @@ static int
gpiobus_detach(device_t dev)
{
struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev);
- int err, ndevs, i;
- device_t *devlist;
+ int err;
KASSERT(mtx_initialized(&sc->sc_mtx),
("gpiobus mutex not initialized"));
@@ -228,16 +227,14 @@ gpiobus_detach(device_t dev)
if ((err = bus_generic_detach(dev)) != 0)
return (err);
- if ((err = device_get_children(dev, &devlist, &ndevs)) != 0)
- return (err);
- for (i = 0; i < ndevs; i++)
- device_delete_child(dev, devlist[i]);
+
+ /* detach and delete all children */
+ device_delete_all_children(dev);
if (sc->sc_pins_mapped) {
free(sc->sc_pins_mapped, M_DEVBUF);
sc->sc_pins_mapped = NULL;
}
- free(devlist, M_TEMP);
return (0);
}
diff --git a/sys/dev/hifn/hifn7751.c b/sys/dev/hifn/hifn7751.c
index 2416f2b..2da5a9e 100644
--- a/sys/dev/hifn/hifn7751.c
+++ b/sys/dev/hifn/hifn7751.c
@@ -184,7 +184,8 @@ READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
}
#define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val)
-SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, "Hifn driver parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0,
+ "Hifn driver parameters");
#ifdef HIFN_DEBUG
static int hifn_debug = 0;
diff --git a/sys/dev/hwpmc/hwpmc_mips24k.c b/sys/dev/hwpmc/hwpmc_mips24k.c
index 0b2a117..4970171 100644
--- a/sys/dev/hwpmc/hwpmc_mips24k.c
+++ b/sys/dev/hwpmc/hwpmc_mips24k.c
@@ -254,6 +254,8 @@ mips24k_allocate_pmc(int cpu, int ri, struct pmc *pm,
config |= MIPS24K_PMC_USER_ENABLE;
if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0)
config |= MIPS24K_PMC_ENABLE;
+ if (caps & PMC_CAP_INTERRUPT)
+ config |= MIPS24K_PMC_INTERRUPT_ENABLE;
pm->pm_md.pm_mips24k.pm_mips24k_evsel = config;
@@ -404,7 +406,65 @@ mips24k_release_pmc(int cpu, int ri, struct pmc *pmc)
static int
mips24k_intr(int cpu, struct trapframe *tf)
{
- return 0;
+ int error;
+ int retval, ri;
+ struct pmc *pm;
+ struct mips24k_cpu *pc;
+ uint32_t r, r0, r2;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[mips24k,%d] CPU %d out of range", __LINE__, cpu));
+
+ retval = 0;
+ pc = mips24k_pcpu[cpu];
+
+ /* Stop PMCs without clearing the counter */
+ r0 = mips_rd_perfcnt0();
+ mips_wr_perfcnt0(r0 & ~(0x1f));
+ r2 = mips_rd_perfcnt2();
+ mips_wr_perfcnt2(r2 & ~(0x1f));
+
+ for (ri = 0; ri < mips24k_npmcs; ri++) {
+ pm = mips24k_pcpu[cpu]->pc_mipspmcs[ri].phw_pmc;
+ if (pm == NULL)
+ continue;
+ if (! PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ continue;
+
+ r = mips24k_pmcn_read(ri);
+
+ /* If bit 31 is set, the counter has overflowed */
+ if ((r & 0x80000000) == 0)
+ continue;
+
+ retval = 1;
+ if (pm->pm_state != PMC_STATE_RUNNING)
+ continue;
+ error = pmc_process_interrupt(cpu, pm, tf,
+ TRAPF_USERMODE(tf));
+ if (error) {
+ /* Clear/disable the relevant counter */
+ if (ri == 0)
+ r0 = 0;
+ else if (ri == 1)
+ r2 = 0;
+ mips24k_stop_pmc(cpu, ri);
+ }
+
+ /* Reload sampling count */
+ mips24k_write_pmc(cpu, ri, pm->pm_sc.pm_reloadcount);
+ }
+
+ /*
+ * Re-enable the PMC counters where they left off.
+ *
+ * Any counter which overflowed will have its sample count
+ * reloaded in the loop above.
+ */
+ mips_wr_perfcnt0(r0);
+ mips_wr_perfcnt2(r2);
+
+ return retval;
}
static int
diff --git a/sys/dev/iicbus/ad7417.c b/sys/dev/iicbus/ad7417.c
index 6ae16be..acd2d89 100644
--- a/sys/dev/iicbus/ad7417.c
+++ b/sys/dev/iicbus/ad7417.c
@@ -121,7 +121,7 @@ static driver_t ad7417_driver = {
static devclass_t ad7417_devclass;
DRIVER_MODULE(ad7417, iicbus, ad7417_driver, ad7417_devclass, 0, 0);
-MALLOC_DEFINE(M_AD7417, "ad7417", "Supply-Monitor AD7417");
+static MALLOC_DEFINE(M_AD7417, "ad7417", "Supply-Monitor AD7417");
static int
diff --git a/sys/dev/iicbus/max6690.c b/sys/dev/iicbus/max6690.c
index 83f3b50..ab312e7 100644
--- a/sys/dev/iicbus/max6690.c
+++ b/sys/dev/iicbus/max6690.c
@@ -101,7 +101,7 @@ static driver_t max6690_driver = {
static devclass_t max6690_devclass;
DRIVER_MODULE(max6690, iicbus, max6690_driver, max6690_devclass, 0, 0);
-MALLOC_DEFINE(M_MAX6690, "max6690", "Temp-Monitor MAX6690");
+static MALLOC_DEFINE(M_MAX6690, "max6690", "Temp-Monitor MAX6690");
static int
max6690_read(device_t dev, uint32_t addr, uint8_t reg, uint8_t *data)
diff --git a/sys/dev/iir/iir.c b/sys/dev/iir/iir.c
index 2be7c39..057194a 100644
--- a/sys/dev/iir/iir.c
+++ b/sys/dev/iir/iir.c
@@ -69,7 +69,7 @@ __FBSDID("$FreeBSD$");
#include <dev/iir/iir.h>
-MALLOC_DEFINE(M_GDTBUF, "iirbuf", "iir driver buffer");
+static MALLOC_DEFINE(M_GDTBUF, "iirbuf", "iir driver buffer");
struct gdt_softc *gdt_wait_gdt;
int gdt_wait_index;
diff --git a/sys/dev/ipmi/ipmi.c b/sys/dev/ipmi/ipmi.c
index 74a7d03..6b7d464 100644
--- a/sys/dev/ipmi/ipmi.c
+++ b/sys/dev/ipmi/ipmi.c
@@ -63,7 +63,8 @@ static void ipmi_dtor(void *arg);
int ipmi_attached = 0;
static int on = 1;
-SYSCTL_NODE(_hw, OID_AUTO, ipmi, CTLFLAG_RD, 0, "IPMI driver parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, ipmi, CTLFLAG_RD, 0,
+ "IPMI driver parameters");
SYSCTL_INT(_hw_ipmi, OID_AUTO, on, CTLFLAG_RW,
&on, 0, "");
@@ -75,7 +76,7 @@ static struct cdevsw ipmi_cdevsw = {
.d_name = "ipmi",
};
-MALLOC_DEFINE(M_IPMI, "ipmi", "ipmi");
+static MALLOC_DEFINE(M_IPMI, "ipmi", "ipmi");
static int
ipmi_open(struct cdev *cdev, int flags, int fmt, struct thread *td)
diff --git a/sys/dev/iscsi/initiator/isc_subr.c b/sys/dev/iscsi/initiator/isc_subr.c
index 7a8138b..4d75a87 100644
--- a/sys/dev/iscsi/initiator/isc_subr.c
+++ b/sys/dev/iscsi/initiator/isc_subr.c
@@ -58,7 +58,7 @@ __FBSDID("$FreeBSD$");
#include <dev/iscsi/initiator/iscsi.h>
#include <dev/iscsi/initiator/iscsivar.h>
-MALLOC_DEFINE(M_ISC, "iSC", "iSCSI driver options");
+static MALLOC_DEFINE(M_ISC, "iSC", "iSCSI driver options");
static char *
i_strdupin(char *s, size_t maxlen)
diff --git a/sys/dev/iscsi/initiator/iscsi.c b/sys/dev/iscsi/initiator/iscsi.c
index 292ce8f..4c06b98 100644
--- a/sys/dev/iscsi/initiator/iscsi.c
+++ b/sys/dev/iscsi/initiator/iscsi.c
@@ -66,7 +66,7 @@ static struct isc_softc *isc;
MALLOC_DEFINE(M_ISCSI, "iSCSI", "iSCSI driver");
MALLOC_DEFINE(M_ISCSIBUF, "iSCbuf", "iSCSI buffers");
-MALLOC_DEFINE(M_TMP, "iSCtmp", "iSCSI tmp");
+static MALLOC_DEFINE(M_TMP, "iSCtmp", "iSCSI tmp");
#ifdef ISCSI_INITIATOR_DEBUG
int iscsi_debug = ISCSI_INITIATOR_DEBUG;
diff --git a/sys/dev/iscsi/initiator/iscsivar.h b/sys/dev/iscsi/initiator/iscsivar.h
index 0a56f80..875d7ad 100644
--- a/sys/dev/iscsi/initiator/iscsivar.h
+++ b/sys/dev/iscsi/initiator/iscsivar.h
@@ -64,7 +64,6 @@ typedef uint32_t digest_t(const void *, int len, uint32_t ocrc);
MALLOC_DECLARE(M_ISCSI);
MALLOC_DECLARE(M_ISCSIBUF);
-MALLOC_DECLARE(M_PDU);
#define ISOK2DIG(dig, pp) ((dig != NULL) && ((pp->ipdu.bhs.opcode & 0x1f) != ISCSI_LOGIN_CMD))
diff --git a/sys/dev/isp/DriverManual.txt b/sys/dev/isp/DriverManual.txt
index 0ed5a1b..3d2f0fc 100644
--- a/sys/dev/isp/DriverManual.txt
+++ b/sys/dev/isp/DriverManual.txt
@@ -327,7 +327,7 @@ here in clarifying some of this.
A succesful execution of isp_init will lead to the driver 'registering'
itself with this platform's SCSI subsystem. One assumed action for this
-is the registry of a function the SCSI subsystem for this platform
+is the registry of a function that the SCSI subsystem for this platform
will call when it has a SCSI command to run.
The platform specific module function that receives this will do whatever
diff --git a/sys/dev/isp/isp.c b/sys/dev/isp/isp.c
index dfba62e..00a04b6 100644
--- a/sys/dev/isp/isp.c
+++ b/sys/dev/isp/isp.c
@@ -748,11 +748,13 @@ isp_reset(ispsoftc_t *isp, int do_load_defaults)
if (dodnld && IS_24XX(isp)) {
const uint32_t *ptr = isp->isp_mdvec->dv_ispfw;
+ int wordload;
/*
* Keep loading until we run out of f/w.
*/
code_org = ptr[2]; /* 1st load address is our start addr */
+ wordload = 0;
for (;;) {
uint32_t la, wi, wl;
@@ -777,6 +779,7 @@ isp_reset(ispsoftc_t *isp, int do_load_defaults)
wl--;
}
MEMORYBARRIER(isp, SYNC_REQUEST, 0, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)), -1);
+ again:
ISP_MEMZERO(&mbs, sizeof (mbs));
if (la < 0x10000 && nw < 0x10000) {
mbs.param[0] = MBOX_LOAD_RISC_RAM_2100;
@@ -786,6 +789,23 @@ isp_reset(ispsoftc_t *isp, int do_load_defaults)
mbs.param[4] = nw;
mbs.param[6] = DMA_WD3(isp->isp_rquest_dma);
mbs.param[7] = DMA_WD2(isp->isp_rquest_dma);
+ isp_prt(isp, ISP_LOGDEBUG0, "LOAD RISC RAM 2100 %u words at load address 0x%x", nw, la);
+ } else if (wordload) {
+ union {
+ const uint32_t *cp;
+ uint32_t *np;
+ } ucd;
+ ucd.cp = (const uint32_t *)cp;
+ mbs.param[0] = MBOX_WRITE_RAM_WORD_EXTENDED;
+ mbs.param[1] = la;
+ mbs.param[2] = (*ucd.np);
+ mbs.param[3] = (*ucd.np) >> 16;
+ mbs.param[8] = la >> 16;
+ isp->isp_mbxwrk0 = nw - 1;
+ isp->isp_mbxworkp = ucd.np+1;
+ isp->isp_mbxwrk1 = (la + 1);
+ isp->isp_mbxwrk8 = (la + 1) >> 16;
+ isp_prt(isp, ISP_LOGDEBUG0, "WRITE RAM WORD EXTENDED %u words at load address 0x%x", nw, la);
} else {
mbs.param[0] = MBOX_LOAD_RISC_RAM;
mbs.param[1] = la;
@@ -796,10 +816,16 @@ isp_reset(ispsoftc_t *isp, int do_load_defaults)
mbs.param[6] = DMA_WD3(isp->isp_rquest_dma);
mbs.param[7] = DMA_WD2(isp->isp_rquest_dma);
mbs.param[8] = la >> 16;
+ isp_prt(isp, ISP_LOGDEBUG0, "LOAD RISC RAM %u words at load address 0x%x", nw, la);
}
mbs.logval = MBLOGALL;
isp_mboxcmd(isp, &mbs);
if (mbs.param[0] != MBOX_COMMAND_COMPLETE) {
+ if (mbs.param[0] == MBOX_HOST_INTERFACE_ERROR) {
+ isp_prt(isp, ISP_LOGERR, "switching to word load");
+ wordload = 1;
+ goto again;
+ }
isp_prt(isp, ISP_LOGERR, "F/W Risc Ram Load Failed");
ISP_RESET0(isp);
return;
@@ -855,6 +881,7 @@ isp_reset(ispsoftc_t *isp, int do_load_defaults)
mbs.param[4] = nw;
mbs.param[6] = DMA_WD3(isp->isp_rquest_dma);
mbs.param[7] = DMA_WD2(isp->isp_rquest_dma);
+ isp_prt(isp, ISP_LOGDEBUG1, "LOAD RISC RAM 2100 %u words at load address 0x%x\n", nw, la);
} else {
mbs.param[0] = MBOX_LOAD_RISC_RAM;
mbs.param[1] = la;
@@ -864,6 +891,7 @@ isp_reset(ispsoftc_t *isp, int do_load_defaults)
mbs.param[6] = DMA_WD3(isp->isp_rquest_dma);
mbs.param[7] = DMA_WD2(isp->isp_rquest_dma);
mbs.param[8] = la >> 16;
+ isp_prt(isp, ISP_LOGDEBUG1, "LOAD RISC RAM %u words at load address 0x%x\n", nw, la);
}
mbs.logval = MBLOGALL;
isp_mboxcmd(isp, &mbs);
@@ -910,6 +938,7 @@ isp_reset(ispsoftc_t *isp, int do_load_defaults)
mbs.param[1] = code_org;
mbs.param[2] = ucd.np[0];
mbs.logval = MBLOGNONE;
+ isp_prt(isp, ISP_LOGDEBUG1, "WRITE RAM %u words at load address 0x%x\n", ucd.np[3], code_org);
isp_mboxcmd(isp, &mbs);
if (mbs.param[0] != MBOX_COMMAND_COMPLETE) {
isp_prt(isp, ISP_LOGERR, "F/W download failed at word %d", isp->isp_mbxwrk1 - code_org);
@@ -6589,23 +6618,39 @@ isp_mbox_continue(ispsoftc_t *isp)
mbs.param[1] = isp->isp_mbxwrk1++;
break;
case MBOX_WRITE_RAM_WORD_EXTENDED:
+ if (IS_24XX(isp)) {
+ uint32_t *lptr = (uint32_t *)ptr;
+ mbs.param[2] = lptr[0];
+ mbs.param[3] = lptr[0] >> 16;
+ lptr++;
+ ptr = (uint16_t *)lptr;
+ } else {
+ mbs.param[2] = *ptr++;
+ }
offset = isp->isp_mbxwrk1;
offset |= isp->isp_mbxwrk8 << 16;
-
- mbs.param[2] = *ptr++;
mbs.param[1] = offset;
mbs.param[8] = offset >> 16;
- isp->isp_mbxwrk1 = ++offset;
+ offset++;
+ isp->isp_mbxwrk1 = offset;
isp->isp_mbxwrk8 = offset >> 16;
break;
case MBOX_READ_RAM_WORD_EXTENDED:
+ if (IS_24XX(isp)) {
+ uint32_t *lptr = (uint32_t *)ptr;
+ uint32_t val = isp->isp_mboxtmp[2];
+ val |= (isp->isp_mboxtmp[3]) << 16;
+ *lptr++ = val;
+ ptr = (uint16_t *)lptr;
+ } else {
+ *ptr++ = isp->isp_mboxtmp[2];
+ }
offset = isp->isp_mbxwrk1;
offset |= isp->isp_mbxwrk8 << 16;
-
- *ptr++ = isp->isp_mboxtmp[2];
mbs.param[1] = offset;
mbs.param[8] = offset >> 16;
- isp->isp_mbxwrk1 = ++offset;
+ offset++;
+ isp->isp_mbxwrk1 = offset;
isp->isp_mbxwrk8 = offset >> 16;
break;
}
@@ -6830,7 +6875,7 @@ static const uint32_t mbpfc[] = {
ISPOPMAP(0x00, 0x00), /* 0x0c: */
ISPOPMAP(0x10f, 0x01), /* 0x0d: MBOX_WRITE_RAM_WORD_EXTENDED */
ISPOPMAP(0x01, 0x05), /* 0x0e: MBOX_CHECK_FIRMWARE */
- ISPOPMAP(0x10f, 0x05), /* 0x0f: MBOX_READ_RAM_WORD_EXTENDED */
+ ISPOPMAP(0x103, 0x0d), /* 0x0f: MBOX_READ_RAM_WORD_EXTENDED */
ISPOPMAP(0x1f, 0x11), /* 0x10: MBOX_INIT_REQ_QUEUE */
ISPOPMAP(0x2f, 0x21), /* 0x11: MBOX_INIT_RES_QUEUE */
ISPOPMAP(0x0f, 0x01), /* 0x12: MBOX_EXECUTE_IOCB */
@@ -6962,13 +7007,13 @@ static const char *fc_mbcmd_names[] = {
"MAILBOX REG TEST",
"VERIFY CHECKSUM",
"ABOUT FIRMWARE",
- "LOAD RAM",
+ "LOAD RAM (2100)",
"DUMP RAM",
- "WRITE RAM WORD EXTENDED",
+ "LOAD RISC RAM",
NULL,
- "READ RAM WORD EXTENDED",
+ "WRITE RAM WORD EXTENDED",
"CHECK FIRMWARE",
- NULL,
+ "READ RAM WORD EXTENDED",
"INIT REQUEST QUEUE",
"INIT RESULT QUEUE",
"EXECUTE IOCB",
diff --git a/sys/dev/isp/isp_freebsd.c b/sys/dev/isp/isp_freebsd.c
index 8d62f5e..3d676ab 100644
--- a/sys/dev/isp/isp_freebsd.c
+++ b/sys/dev/isp/isp_freebsd.c
@@ -175,6 +175,14 @@ isp_attach_chan(ispsoftc_t *isp, struct cam_devq *devq, int chan)
isp_prt(isp, ISP_LOGERR, "cannot create test target thread");
}
#endif
+ if (chan == 0) {
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(isp->isp_osinfo.dev);
+ struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev);
+ SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "wwnn", CTLFLAG_RD, &FCPARAM(isp, 0)->isp_wwnn, "World Wide Node Name");
+ SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "wwpn", CTLFLAG_RD, &FCPARAM(isp, 0)->isp_wwpn, "World Wide Port Name");
+ SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "loop_down_limit", CTLFLAG_RW, &ISP_FC_PC(isp, 0)->loop_down_limit, 0, "Loop Down Limit");
+ SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "gone_device_time", CTLFLAG_RW, &ISP_FC_PC(isp, 0)->gone_device_time, 0, "Gone Device Time");
+ }
}
return (0);
}
diff --git a/sys/dev/isp/isp_freebsd.h b/sys/dev/isp/isp_freebsd.h
index 5d1b500..0875e64 100644
--- a/sys/dev/isp/isp_freebsd.h
+++ b/sys/dev/isp/isp_freebsd.h
@@ -38,6 +38,7 @@
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/condvar.h>
+#include <sys/sysctl.h>
#include <sys/proc.h>
#include <sys/bus.h>
diff --git a/sys/dev/isp/isp_pci.c b/sys/dev/isp/isp_pci.c
index b6b23fa..9a0bc4a 100644
--- a/sys/dev/isp/isp_pci.c
+++ b/sys/dev/isp/isp_pci.c
@@ -1458,6 +1458,7 @@ imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
imushp->error = EINVAL;
return;
}
+ isp_prt(imushp->isp, ISP_LOGDEBUG0, "request/result area @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len);
imushp->isp->isp_rquest = imushp->vbase;
imushp->isp->isp_rquest_dma = segs->ds_addr;
segs->ds_addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp));
@@ -1487,6 +1488,7 @@ imc1(void *arg, bus_dma_segment_t *segs, int nseg, int error)
imushp->error = EINVAL;
return;
}
+ isp_prt(imushp->isp, ISP_LOGDEBUG0, "scdma @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len);
FCPARAM(imushp->isp, imushp->chan)->isp_scdma = segs->ds_addr;
FCPARAM(imushp->isp, imushp->chan)->isp_scratch = imushp->vbase;
}
diff --git a/sys/dev/kbd/kbd.c b/sys/dev/kbd/kbd.c
index 01e262e..8036762 100644
--- a/sys/dev/kbd/kbd.c
+++ b/sys/dev/kbd/kbd.c
@@ -81,7 +81,7 @@ static keyboard_switch_t *kbdsw_ini;
keyboard_switch_t **kbdsw = &kbdsw_ini;
static int keymap_restrict_change;
-SYSCTL_NODE(_hw, OID_AUTO, kbd, CTLFLAG_RD, 0, "kbd");
+static SYSCTL_NODE(_hw, OID_AUTO, kbd, CTLFLAG_RD, 0, "kbd");
SYSCTL_INT(_hw_kbd, OID_AUTO, keymap_restrict_change, CTLFLAG_RW,
&keymap_restrict_change, 0, "restrict ability to change keymap");
diff --git a/sys/dev/lmc/if_lmc.c b/sys/dev/lmc/if_lmc.c
index d5fb9ba..40dc1e5 100644
--- a/sys/dev/lmc/if_lmc.c
+++ b/sys/dev/lmc/if_lmc.c
@@ -4945,7 +4945,9 @@ lmc_ifnet_detach(softc_t *sc)
/* Detach from the ifnet kernel interface. */
if_detach(sc->ifp);
-# if (__FreeBSD_version >= 600000)
+# if (defined(__FreeBSD__) && __FreeBSD_version >= 800082)
+ if_free(sc->ifp);
+# elif (defined(__FreeBSD__) && __FreeBSD_version >= 600000)
if_free_type(sc->ifp, NSPPP ? IFT_PPP : IFT_OTHER);
# endif
}
diff --git a/sys/dev/malo/if_malo.c b/sys/dev/malo/if_malo.c
index ae6ef88..6a7a786 100644
--- a/sys/dev/malo/if_malo.c
+++ b/sys/dev/malo/if_malo.c
@@ -123,7 +123,7 @@ enum {
} while (0)
#endif
-MALLOC_DEFINE(M_MALODEV, "malodev", "malo driver dma buffers");
+static MALLOC_DEFINE(M_MALODEV, "malodev", "malo driver dma buffers");
static struct ieee80211vap *malo_vap_create(struct ieee80211com *ic,
const char name[IFNAMSIZ], int unit, int opmode, int flags,
diff --git a/sys/dev/malo/if_malo_pci.c b/sys/dev/malo/if_malo_pci.c
index 9c2027b..de9c39d 100644
--- a/sys/dev/malo/if_malo_pci.c
+++ b/sys/dev/malo/if_malo_pci.c
@@ -79,7 +79,7 @@ struct malo_pci_softc {
* Tunable variables.
*/
SYSCTL_DECL(_hw_malo);
-SYSCTL_NODE(_hw_malo, OID_AUTO, pci, CTLFLAG_RD, 0,
+static SYSCTL_NODE(_hw_malo, OID_AUTO, pci, CTLFLAG_RD, 0,
"Marvell 88W8335 driver PCI parameters");
static int msi_disable = 0; /* MSI disabled */
diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c
index 078eca8..35d5832 100644
--- a/sys/dev/md/md.c
+++ b/sys/dev/md/md.c
@@ -1370,6 +1370,11 @@ g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
indent, (uintmax_t) mp->fwsectors);
sbuf_printf(sb, "%s<length>%ju</length>\n",
indent, (uintmax_t) mp->mediasize);
+ sbuf_printf(sb, "%s<compression>%s</compression>\n", indent,
+ (mp->flags & MD_COMPRESS) == 0 ? "off": "on");
+ sbuf_printf(sb, "%s<access>%s</access>\n", indent,
+ (mp->flags & MD_READONLY) == 0 ? "read-write":
+ "read-only");
sbuf_printf(sb, "%s<type>%s</type>\n", indent,
type);
if (mp->type == MD_VNODE && mp->vnode != NULL)
diff --git a/sys/dev/mfi/mfi.c b/sys/dev/mfi/mfi.c
index 8a3d277..e55d7da 100644
--- a/sys/dev/mfi/mfi.c
+++ b/sys/dev/mfi/mfi.c
@@ -484,15 +484,8 @@ mfi_attach(struct mfi_softc *sc)
mtx_unlock(&sc->mfi_io_lock);
/*
- * Set up the interrupt handler. XXX This should happen in
- * mfi_pci.c
+ * Set up the interrupt handler.
*/
- sc->mfi_irq_rid = 0;
- if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
- &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
- device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
- return (EINVAL);
- }
if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO,
NULL, mfi_intr, sc, &sc->mfi_intr)) {
device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
@@ -932,6 +925,12 @@ mfi_intr(void *arg)
if (sc->mfi_check_clear_intr(sc))
return;
+ /*
+ * Do a dummy read to flush the interrupt ACK that we just performed,
+ * ensuring that everything is really, truly consistent.
+ */
+ (void)sc->mfi_read_fw_status(sc);
+
pi = sc->mfi_comms->hw_pi;
ci = sc->mfi_comms->hw_ci;
mtx_lock(&sc->mfi_io_lock);
diff --git a/sys/dev/mfi/mfi_cam.c b/sys/dev/mfi/mfi_cam.c
index c49daff..aa76a10 100644
--- a/sys/dev/mfi/mfi_cam.c
+++ b/sys/dev/mfi/mfi_cam.c
@@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
#include <sys/uio.h>
#include <sys/proc.h>
#include <sys/signalvar.h>
+#include <sys/sysctl.h>
#include <cam/cam.h>
#include <cam/cam_ccb.h>
diff --git a/sys/dev/mfi/mfi_debug.c b/sys/dev/mfi/mfi_debug.c
index 515e6ed..8fe7778 100644
--- a/sys/dev/mfi/mfi_debug.c
+++ b/sys/dev/mfi/mfi_debug.c
@@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/malloc.h>
#include <sys/selinfo.h>
+#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <sys/uio.h>
#include <machine/resource.h>
diff --git a/sys/dev/mfi/mfi_disk.c b/sys/dev/mfi/mfi_disk.c
index 4594ca2..ad656d3c 100644
--- a/sys/dev/mfi/mfi_disk.c
+++ b/sys/dev/mfi/mfi_disk.c
@@ -35,6 +35,7 @@ __FBSDID("$FreeBSD$");
#include <sys/selinfo.h>
#include <sys/module.h>
#include <sys/malloc.h>
+#include <sys/sysctl.h>
#include <sys/uio.h>
#include <sys/bio.h>
diff --git a/sys/dev/mfi/mfi_pci.c b/sys/dev/mfi/mfi_pci.c
index 685aa0b..8de2e12 100644
--- a/sys/dev/mfi/mfi_pci.c
+++ b/sys/dev/mfi/mfi_pci.c
@@ -66,6 +66,7 @@ __FBSDID("$FreeBSD$");
#include <sys/conf.h>
#include <sys/bio.h>
#include <sys/malloc.h>
+#include <sys/sysctl.h>
#include <sys/uio.h>
#include <machine/bus.h>
@@ -107,6 +108,11 @@ static devclass_t mfi_devclass;
DRIVER_MODULE(mfi, pci, mfi_pci_driver, mfi_devclass, 0, 0);
MODULE_VERSION(mfi, 1);
+static int mfi_msi = 0;
+TUNABLE_INT("hw.mfi.msi", &mfi_msi);
+SYSCTL_INT(_hw_mfi, OID_AUTO, msi, CTLFLAG_RDTUN, &mfi_msi, 0,
+ "Enable use of MSI interrupts");
+
struct mfi_ident {
uint16_t vendor;
uint16_t device;
@@ -169,7 +175,7 @@ mfi_pci_attach(device_t dev)
struct mfi_softc *sc;
struct mfi_ident *m;
uint32_t command;
- int error;
+ int count, error;
sc = device_get_softc(dev);
bzero(sc, sizeof(*sc));
@@ -226,6 +232,20 @@ mfi_pci_attach(device_t dev)
goto out;
}
+ /* Allocate IRQ resource. */
+ sc->mfi_irq_rid = 0;
+ count = 1;
+ if (mfi_msi && pci_alloc_msi(sc->mfi_dev, &count) == 0) {
+ device_printf(sc->mfi_dev, "Using MSI\n");
+ sc->mfi_irq_rid = 1;
+ }
+ if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
+ &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
+ device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
+ error = EINVAL;
+ goto out;
+ }
+
error = mfi_attach(sc);
out:
if (error) {
@@ -280,6 +300,8 @@ mfi_pci_free(struct mfi_softc *sc)
bus_release_resource(sc->mfi_dev, SYS_RES_MEMORY,
sc->mfi_regs_rid, sc->mfi_regs_resource);
}
+ if (sc->mfi_irq_rid != 0)
+ pci_release_msi(sc->mfi_dev);
return;
}
diff --git a/sys/dev/mfi/mfivar.h b/sys/dev/mfi/mfivar.h
index 14ea884..9fcd0d9 100644
--- a/sys/dev/mfi/mfivar.h
+++ b/sys/dev/mfi/mfivar.h
@@ -352,12 +352,29 @@ mfi_dequeue_bio(struct mfi_softc *sc)
return (bp);
}
+/*
+ * This is from the original scsi_extract_sense() in CAM. It's copied
+ * here because CAM now uses a non-inline version that follows more complex
+ * additions to the SPC spec, and we don't want to force a dependency on
+ * the CAM module for such a trivial action.
+ */
+static __inline void
+mfi_extract_sense(struct scsi_sense_data_fixed *sense,
+ int *error_code, int *sense_key, int *asc, int *ascq)
+{
+
+ *error_code = sense->error_code & SSD_ERRCODE;
+ *sense_key = sense->flags & SSD_KEY;
+ *asc = (sense->extra_len >= 5) ? sense->add_sense_code : 0;
+ *ascq = (sense->extra_len >= 6) ? sense->add_sense_code_qual : 0;
+}
+
static __inline void
mfi_print_sense(struct mfi_softc *sc, void *sense)
{
int error, key, asc, ascq;
- scsi_extract_sense((struct scsi_sense_data *)sense,
+ mfi_extract_sense((struct scsi_sense_data_fixed *)sense,
&error, &key, &asc, &ascq);
device_printf(sc->mfi_dev, "sense error %d, sense_key %d, "
"asc %d, ascq %d\n", error, key, asc, ascq);
@@ -378,6 +395,7 @@ mfi_print_sense(struct mfi_softc *sc, void *sense)
(sc)->mfi_bhandle, (reg))
MALLOC_DECLARE(M_MFIBUF);
+SYSCTL_DECL(_hw_mfi);
#define MFI_CMD_TIMEOUT 30
#define MFI_MAXPHYS (128 * 1024)
diff --git a/sys/dev/mii/brgphy.c b/sys/dev/mii/brgphy.c
index 88090b7..2c14ea5 100644
--- a/sys/dev/mii/brgphy.c
+++ b/sys/dev/mii/brgphy.c
@@ -141,6 +141,7 @@ static const struct mii_phydesc brgphys[] = {
MII_PHY_DESC(BROADCOM2, BCM5784),
MII_PHY_DESC(BROADCOM3, BCM5717C),
MII_PHY_DESC(BROADCOM3, BCM5719C),
+ MII_PHY_DESC(BROADCOM3, BCM5720C),
MII_PHY_DESC(BROADCOM3, BCM57765),
MII_PHY_DESC(xxBROADCOM_ALT1, BCM5906),
MII_PHY_END
diff --git a/sys/dev/mii/mii.c b/sys/dev/mii/mii.c
index 224f85d..2c99bc0 100644
--- a/sys/dev/mii/mii.c
+++ b/sys/dev/mii/mii.c
@@ -48,7 +48,6 @@ __FBSDID("$FreeBSD$");
#include <net/if.h>
#include <net/if_media.h>
-#include <net/route.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
@@ -57,18 +56,20 @@ MODULE_VERSION(miibus, 1);
#include "miibus_if.h"
-static int miibus_print_child(device_t dev, device_t child);
-static int miibus_read_ivar(device_t dev, device_t child, int which,
- uintptr_t *result);
-static int miibus_child_location_str(device_t bus, device_t child, char *buf,
- size_t buflen);
-static int miibus_child_pnpinfo_str(device_t bus, device_t child, char *buf,
- size_t buflen);
-static int miibus_readreg(device_t, int, int);
-static int miibus_writereg(device_t, int, int, int);
-static void miibus_statchg(device_t);
-static void miibus_linkchg(device_t);
-static void miibus_mediainit(device_t);
+static device_attach_t miibus_attach;
+static bus_child_location_str_t miibus_child_location_str;
+static bus_child_pnpinfo_str_t miibus_child_pnpinfo_str;
+static device_detach_t miibus_detach;
+static bus_hinted_child_t miibus_hinted_child;
+static bus_print_child_t miibus_print_child;
+static device_probe_t miibus_probe;
+static bus_read_ivar_t miibus_read_ivar;
+static miibus_readreg_t miibus_readreg;
+static miibus_statchg_t miibus_statchg;
+static miibus_writereg_t miibus_writereg;
+static miibus_linkchg_t miibus_linkchg;
+static miibus_mediainit_t miibus_mediainit;
+
static unsigned char mii_bitreverse(unsigned char x);
static device_method_t miibus_methods[] = {
@@ -84,6 +85,7 @@ static device_method_t miibus_methods[] = {
DEVMETHOD(bus_driver_added, bus_generic_driver_added),
DEVMETHOD(bus_child_pnpinfo_str, miibus_child_pnpinfo_str),
DEVMETHOD(bus_child_location_str, miibus_child_location_str),
+ DEVMETHOD(bus_hinted_child, miibus_hinted_child),
/* MII interface */
DEVMETHOD(miibus_readreg, miibus_readreg),
@@ -107,10 +109,11 @@ struct miibus_ivars {
struct ifnet *ifp;
ifm_change_cb_t ifmedia_upd;
ifm_stat_cb_t ifmedia_sts;
- int mii_flags;
+ u_int mii_flags;
+ u_int mii_offset;
};
-int
+static int
miibus_probe(device_t dev)
{
@@ -119,7 +122,7 @@ miibus_probe(device_t dev)
return (BUS_PROBE_SPECIFIC);
}
-int
+static int
miibus_attach(device_t dev)
{
struct miibus_ivars *ivars;
@@ -129,7 +132,6 @@ miibus_attach(device_t dev)
int i, nchildren;
mii = device_get_softc(dev);
- nchildren = 0;
if (device_get_children(dev, &children, &nchildren) == 0) {
for (i = 0; i < nchildren; i++) {
ma = device_get_ivars(children[i]);
@@ -152,7 +154,7 @@ miibus_attach(device_t dev)
return (bus_generic_attach(dev));
}
-int
+static int
miibus_detach(device_t dev)
{
struct mii_data *mii;
@@ -201,7 +203,7 @@ miibus_read_ivar(device_t dev, device_t child __unused, int which,
}
static int
-miibus_child_pnpinfo_str(device_t bus __unused, device_t child, char *buf,
+miibus_child_pnpinfo_str(device_t dev __unused, device_t child, char *buf,
size_t buflen)
{
struct mii_attach_args *ma;
@@ -214,7 +216,7 @@ miibus_child_pnpinfo_str(device_t bus __unused, device_t child, char *buf,
}
static int
-miibus_child_location_str(device_t bus __unused, device_t child, char *buf,
+miibus_child_location_str(device_t dev __unused, device_t child, char *buf,
size_t buflen)
{
struct mii_attach_args *ma;
@@ -224,6 +226,60 @@ miibus_child_location_str(device_t bus __unused, device_t child, char *buf,
return (0);
}
+static void
+miibus_hinted_child(device_t dev, const char *name, int unit)
+{
+ struct miibus_ivars *ivars;
+ struct mii_attach_args *args, *ma;
+ device_t *children, phy;
+ int i, nchildren;
+ u_int val;
+
+ if (resource_int_value(name, unit, "phyno", &val) != 0)
+ return;
+ if (device_get_children(dev, &children, &nchildren) != 0)
+ return;
+ ma = NULL;
+ for (i = 0; i < nchildren; i++) {
+ args = device_get_ivars(children[i]);
+ if (args->mii_phyno == val) {
+ ma = args;
+ break;
+ }
+ }
+ free(children, M_TEMP);
+
+ /*
+ * Don't add a PHY that was automatically identified by having media
+ * in its BMSR twice, only allow to alter its attach arguments.
+ */
+ if (ma == NULL) {
+ ma = malloc(sizeof(struct mii_attach_args), M_DEVBUF,
+ M_NOWAIT);
+ if (ma == NULL)
+ return;
+ phy = device_add_child(dev, name, unit);
+ if (phy == NULL) {
+ free(ma, M_DEVBUF);
+ return;
+ }
+ ivars = device_get_ivars(dev);
+ ma->mii_phyno = val;
+ ma->mii_offset = ivars->mii_offset++;
+ ma->mii_id1 = 0;
+ ma->mii_id2 = 0;
+ ma->mii_capmask = BMSR_DEFCAPMASK;
+ device_set_ivars(phy, ma);
+ }
+
+ if (resource_int_value(name, unit, "id1", &val) == 0)
+ ma->mii_id1 = val;
+ if (resource_int_value(name, unit, "id2", &val) == 0)
+ ma->mii_id2 = val;
+ if (resource_int_value(name, unit, "capmask", &val) == 0)
+ ma->mii_capmask = val;
+}
+
static int
miibus_readreg(device_t dev, int phy, int reg)
{
@@ -307,9 +363,10 @@ mii_attach(device_t dev, device_t *miibus, struct ifnet *ifp,
int phyloc, int offloc, int flags)
{
struct miibus_ivars *ivars;
- struct mii_attach_args ma, *args;
+ struct mii_attach_args *args, ma;
device_t *children, phy;
- int bmsr, first, i, nchildren, offset, phymax, phymin, rv;
+ int bmsr, first, i, nchildren, phymax, phymin, rv;
+ uint32_t phymask;
if (phyloc != MII_PHY_ANY && offloc != MII_OFFSET_ANY) {
printf("%s: phyloc and offloc specified\n", __func__);
@@ -366,27 +423,30 @@ mii_attach(device_t dev, device_t *miibus, struct ifnet *ifp,
ma.mii_capmask = capmask;
- phy = NULL;
- offset = 0;
+ if (resource_int_value(device_get_name(*miibus),
+ device_get_unit(*miibus), "phymask", &phymask) != 0)
+ phymask = 0xffffffff;
+
+ if (device_get_children(*miibus, &children, &nchildren) != 0) {
+ children = NULL;
+ nchildren = 0;
+ }
+ ivars->mii_offset = 0;
for (ma.mii_phyno = phymin; ma.mii_phyno <= phymax; ma.mii_phyno++) {
/*
* Make sure we haven't already configured a PHY at this
* address. This allows mii_attach() to be called
* multiple times.
*/
- if (device_get_children(*miibus, &children, &nchildren) == 0) {
- for (i = 0; i < nchildren; i++) {
- args = device_get_ivars(children[i]);
- if (args->mii_phyno == ma.mii_phyno) {
- /*
- * Yes, there is already something
- * configured at this address.
- */
- free(children, M_TEMP);
- goto skip;
- }
+ for (i = 0; i < nchildren; i++) {
+ args = device_get_ivars(children[i]);
+ if (args->mii_phyno == ma.mii_phyno) {
+ /*
+ * Yes, there is already something
+ * configured at this address.
+ */
+ goto skip;
}
- free(children, M_TEMP);
}
/*
@@ -405,18 +465,24 @@ mii_attach(device_t dev, device_t *miibus, struct ifnet *ifp,
* There is a PHY at this address. If we were given an
* `offset' locator, skip this PHY if it doesn't match.
*/
- if (offloc != MII_OFFSET_ANY && offloc != offset)
+ if (offloc != MII_OFFSET_ANY && offloc != ivars->mii_offset)
+ goto skip;
+
+ /*
+ * Skip this PHY if it's not included in the phymask hint.
+ */
+ if ((phymask & (1 << ma.mii_phyno)) == 0)
goto skip;
/*
- * Extract the IDs. Braindead PHYs will be handled by
+ * Extract the IDs. Braindead PHYs will be handled by
* the `ukphy' driver, as we have no ID information to
* match on.
*/
ma.mii_id1 = MIIBUS_READREG(dev, ma.mii_phyno, MII_PHYIDR1);
ma.mii_id2 = MIIBUS_READREG(dev, ma.mii_phyno, MII_PHYIDR2);
- ma.mii_offset = offset;
+ ma.mii_offset = ivars->mii_offset;
args = malloc(sizeof(struct mii_attach_args), M_DEVBUF,
M_NOWAIT);
if (args == NULL)
@@ -429,15 +495,24 @@ mii_attach(device_t dev, device_t *miibus, struct ifnet *ifp,
}
device_set_ivars(phy, args);
skip:
- offset++;
+ ivars->mii_offset++;
}
+ free(children, M_TEMP);
if (first != 0) {
- if (phy == NULL) {
+ rv = device_probe(*miibus);
+ if (rv != 0)
+ goto fail;
+ bus_enumerate_hinted_children(*miibus);
+ rv = device_get_children(*miibus, &children, &nchildren);
+ if (rv != 0)
+ goto fail;
+ free(children, M_TEMP);
+ if (nchildren == 0) {
rv = ENXIO;
goto fail;
}
- rv = bus_generic_attach(dev);
+ rv = device_attach(*miibus);
if (rv != 0)
goto fail;
diff --git a/sys/dev/mii/mii_bitbang.c b/sys/dev/mii/mii_bitbang.c
new file mode 100644
index 0000000..6a5f5dc
--- /dev/null
+++ b/sys/dev/mii/mii_bitbang.c
@@ -0,0 +1,180 @@
+/* $NetBSD: mii_bitbang.c,v 1.12 2008/05/04 17:06:09 xtraeme Exp $ */
+
+/*-
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following didevlaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following didevlaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Common module for bit-bang'ing the MII.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/module.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/mii_bitbang.h>
+
+MODULE_VERSION(mii_bitbang, 1);
+
+static void mii_bitbang_sendbits(device_t dev, mii_bitbang_ops_t ops,
+ uint32_t data, int nbits);
+
+#define MWRITE(x) \
+do { \
+ ops->mbo_write(dev, (x)); \
+ DELAY(1); \
+} while (/* CONSTCOND */ 0)
+
+#define MREAD ops->mbo_read(dev)
+
+#define MDO ops->mbo_bits[MII_BIT_MDO]
+#define MDI ops->mbo_bits[MII_BIT_MDI]
+#define MDC ops->mbo_bits[MII_BIT_MDC]
+#define MDIRPHY ops->mbo_bits[MII_BIT_DIR_HOST_PHY]
+#define MDIRHOST ops->mbo_bits[MII_BIT_DIR_PHY_HOST]
+
+/*
+ * mii_bitbang_sync:
+ *
+ * Synchronize the MII.
+ */
+void
+mii_bitbang_sync(device_t dev, mii_bitbang_ops_t ops)
+{
+ int i;
+ uint32_t v;
+
+ v = MDIRPHY | MDO;
+
+ MWRITE(v);
+ for (i = 0; i < 32; i++) {
+ MWRITE(v | MDC);
+ MWRITE(v);
+ }
+}
+
+/*
+ * mii_bitbang_sendbits:
+ *
+ * Send a series of bits to the MII.
+ */
+static void
+mii_bitbang_sendbits(device_t dev, mii_bitbang_ops_t ops, uint32_t data,
+ int nbits)
+{
+ int i;
+ uint32_t v;
+
+ v = MDIRPHY;
+ MWRITE(v);
+
+ for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
+ if (data & i)
+ v |= MDO;
+ else
+ v &= ~MDO;
+ MWRITE(v);
+ MWRITE(v | MDC);
+ MWRITE(v);
+ }
+}
+
+/*
+ * mii_bitbang_readreg:
+ *
+ * Read a PHY register by bit-bang'ing the MII.
+ */
+int
+mii_bitbang_readreg(device_t dev, mii_bitbang_ops_t ops, int phy, int reg)
+{
+ int i, error, val;
+
+ mii_bitbang_sync(dev, ops);
+
+ mii_bitbang_sendbits(dev, ops, MII_COMMAND_START, 2);
+ mii_bitbang_sendbits(dev, ops, MII_COMMAND_READ, 2);
+ mii_bitbang_sendbits(dev, ops, phy, 5);
+ mii_bitbang_sendbits(dev, ops, reg, 5);
+
+ /* Switch direction to PHY->host, without a clock transition. */
+ MWRITE(MDIRHOST);
+
+ /* Turnaround clock. */
+ MWRITE(MDIRHOST | MDC);
+ MWRITE(MDIRHOST);
+
+ /* Check for error. */
+ error = MREAD & MDI;
+
+ /* Idle clock. */
+ MWRITE(MDIRHOST | MDC);
+ MWRITE(MDIRHOST);
+
+ val = 0;
+ for (i = 0; i < 16; i++) {
+ val <<= 1;
+ /* Read data prior to clock low-high transition. */
+ if (error == 0 && (MREAD & MDI) != 0)
+ val |= 1;
+
+ MWRITE(MDIRHOST | MDC);
+ MWRITE(MDIRHOST);
+ }
+
+ /* Set direction to host->PHY, without a clock transition. */
+ MWRITE(MDIRPHY);
+
+ return (error != 0 ? 0 : val);
+}
+
+/*
+ * mii_bitbang_writereg:
+ *
+ * Write a PHY register by bit-bang'ing the MII.
+ */
+void
+mii_bitbang_writereg(device_t dev, mii_bitbang_ops_t ops, int phy, int reg,
+ int val)
+{
+
+ mii_bitbang_sync(dev, ops);
+
+ mii_bitbang_sendbits(dev, ops, MII_COMMAND_START, 2);
+ mii_bitbang_sendbits(dev, ops, MII_COMMAND_WRITE, 2);
+ mii_bitbang_sendbits(dev, ops, phy, 5);
+ mii_bitbang_sendbits(dev, ops, reg, 5);
+ mii_bitbang_sendbits(dev, ops, MII_COMMAND_ACK, 2);
+ mii_bitbang_sendbits(dev, ops, val, 16);
+
+ MWRITE(MDIRPHY);
+}
diff --git a/sys/dev/mii/mii_bitbang.h b/sys/dev/mii/mii_bitbang.h
new file mode 100644
index 0000000..2bc7427
--- /dev/null
+++ b/sys/dev/mii/mii_bitbang.h
@@ -0,0 +1,54 @@
+/* $NetBSD: mii_bitbang.h,v 1.6 2009/05/12 14:31:27 cegger Exp $ */
+
+/*-
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#define MII_BIT_MDO 0 /* data out (host->PHY) */
+#define MII_BIT_MDI 1 /* data in (PHY->host) */
+#define MII_BIT_MDC 2 /* clock */
+#define MII_BIT_DIR_HOST_PHY 3 /* set direction: host->PHY */
+#define MII_BIT_DIR_PHY_HOST 4 /* set direction: PHY->host */
+#define MII_NBITS 5
+
+struct mii_bitbang_ops {
+ uint32_t (*mbo_read)(device_t);
+ void (*mbo_write)(device_t, uint32_t);
+ uint32_t mbo_bits[MII_NBITS];
+};
+
+typedef const struct mii_bitbang_ops *mii_bitbang_ops_t;
+
+int mii_bitbang_readreg(device_t dev, mii_bitbang_ops_t ops,
+ int phy, int reg);
+void mii_bitbang_sync(device_t dev, mii_bitbang_ops_t ops);
+void mii_bitbang_writereg(device_t dev, mii_bitbang_ops_t ops,
+ int phy, int reg, int val);
diff --git a/sys/dev/mii/miidevs b/sys/dev/mii/miidevs
index 1b98542..d069c68 100644
--- a/sys/dev/mii/miidevs
+++ b/sys/dev/mii/miidevs
@@ -179,6 +179,7 @@ model BROADCOM2 BCM5709S 0x003f BCM5709S 1000/2500baseSX PHY
model BROADCOM3 BCM5717C 0x0020 BCM5717C 1000BASE-T media interface
model BROADCOM3 BCM5719C 0x0022 BCM5719C 1000BASE-T media interface
model BROADCOM3 BCM57765 0x0024 BCM57765 1000BASE-T media interface
+model BROADCOM3 BCM5720C 0x0036 BCM5720C 1000BASE-T media interface
model xxBROADCOM_ALT1 BCM5906 0x0004 BCM5906 10/100baseTX media interface
/* Cicada Semiconductor PHYs (now owned by Vitesse?) */
diff --git a/sys/dev/mii/miivar.h b/sys/dev/mii/miivar.h
index 9af5b4e..34b0e9ed 100644
--- a/sys/dev/mii/miivar.h
+++ b/sys/dev/mii/miivar.h
@@ -246,10 +246,6 @@ MIIBUS_ACCESSOR(flags, FLAGS, u_int)
extern devclass_t miibus_devclass;
extern driver_t miibus_driver;
-int miibus_probe(device_t);
-int miibus_attach(device_t);
-int miibus_detach(device_t);
-
int mii_attach(device_t, device_t *, struct ifnet *, ifm_change_cb_t,
ifm_stat_cb_t, int, int, int, int);
void mii_down(struct mii_data *);
diff --git a/sys/dev/mmc/mmc.c b/sys/dev/mmc/mmc.c
index 6ff47c3..a5dfe92 100644
--- a/sys/dev/mmc/mmc.c
+++ b/sys/dev/mmc/mmc.c
@@ -105,7 +105,7 @@ struct mmc_ivars {
#define CMD_RETRIES 3
-SYSCTL_NODE(_hw, OID_AUTO, mmc, CTLFLAG_RD, NULL, "mmc driver");
+static SYSCTL_NODE(_hw, OID_AUTO, mmc, CTLFLAG_RD, NULL, "mmc driver");
static int mmc_debug;
SYSCTL_INT(_hw_mmc, OID_AUTO, debug, CTLFLAG_RW, &mmc_debug, 0, "Debug level");
diff --git a/sys/dev/mps/mps_sas.c b/sys/dev/mps/mps_sas.c
index 28ddca1..cf4757a 100644
--- a/sys/dev/mps/mps_sas.c
+++ b/sys/dev/mps/mps_sas.c
@@ -119,7 +119,7 @@ struct mpssas_devprobe {
#define MPSSAS_DISCOVERY_TIMEOUT 20
#define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
-MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
+static MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
static __inline int mpssas_set_lun(uint8_t *lun, u_int ccblun);
static struct mpssas_target * mpssas_alloc_target(struct mpssas_softc *,
diff --git a/sys/dev/mpt/mpt.c b/sys/dev/mpt/mpt.c
index 9dfd73b..5f0e0cf 100644
--- a/sys/dev/mpt/mpt.c
+++ b/sys/dev/mpt/mpt.c
@@ -2084,7 +2084,7 @@ mpt_send_port_enable(struct mpt_softc *mpt, int port)
mpt_send_cmd(mpt, req);
error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
- FALSE, (mpt->is_sas || mpt->is_fc)? 30000 : 3000);
+ FALSE, (mpt->is_sas || mpt->is_fc)? 300000 : 30000);
if (error != 0) {
mpt_prt(mpt, "port %d enable timed out\n", port);
return (-1);
diff --git a/sys/dev/msk/if_msk.c b/sys/dev/msk/if_msk.c
index b060949..36407a2 100644
--- a/sys/dev/msk/if_msk.c
+++ b/sys/dev/msk/if_msk.c
@@ -700,7 +700,7 @@ msk_init_rx_ring(struct msk_if_softc *sc_if)
{
struct msk_ring_data *rd;
struct msk_rxdesc *rxd;
- int i, prod;
+ int i, nbuf, prod;
MSK_IF_LOCK_ASSERT(sc_if);
@@ -710,11 +710,18 @@ msk_init_rx_ring(struct msk_if_softc *sc_if)
rd = &sc_if->msk_rdata;
bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
- prod = sc_if->msk_cdata.msk_rx_prod;
- i = 0;
+ for (i = prod = 0; i < MSK_RX_RING_CNT; i++) {
+ rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
+ rxd->rx_m = NULL;
+ rxd->rx_le = &rd->msk_rx_ring[prod];
+ MSK_INC(prod, MSK_RX_RING_CNT);
+ }
+ nbuf = MSK_RX_BUF_CNT;
+ prod = 0;
/* Have controller know how to compute Rx checksum. */
if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
(sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
+#ifdef MSK_64BIT_DMA
rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
rxd->rx_m = NULL;
rxd->rx_le = &rd->msk_rx_ring[prod];
@@ -723,15 +730,21 @@ msk_init_rx_ring(struct msk_if_softc *sc_if)
rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
MSK_INC(prod, MSK_RX_RING_CNT);
MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
- i++;
- }
- for (; i < MSK_RX_RING_CNT; i++) {
+#endif
rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
rxd->rx_m = NULL;
rxd->rx_le = &rd->msk_rx_ring[prod];
+ rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
+ ETHER_HDR_LEN);
+ rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
+ MSK_INC(prod, MSK_RX_RING_CNT);
+ MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
+ nbuf--;
+ }
+ for (i = 0; i < nbuf; i++) {
if (msk_newbuf(sc_if, prod) != 0)
return (ENOBUFS);
- MSK_INC(prod, MSK_RX_RING_CNT);
+ MSK_RX_INC(prod, MSK_RX_RING_CNT);
}
bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
@@ -739,10 +752,11 @@ msk_init_rx_ring(struct msk_if_softc *sc_if)
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Update prefetch unit. */
- sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
+ sc_if->msk_cdata.msk_rx_prod = prod;
CSR_WRITE_2(sc_if->msk_softc,
Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
- sc_if->msk_cdata.msk_rx_prod);
+ (sc_if->msk_cdata.msk_rx_prod + MSK_RX_RING_CNT - 1) %
+ MSK_RX_RING_CNT);
if (msk_rx_fill(sc_if, 0) != 0)
return (ENOBUFS);
return (0);
@@ -753,7 +767,7 @@ msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
{
struct msk_ring_data *rd;
struct msk_rxdesc *rxd;
- int i, prod;
+ int i, nbuf, prod;
MSK_IF_LOCK_ASSERT(sc_if);
@@ -764,11 +778,18 @@ msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
rd = &sc_if->msk_rdata;
bzero(rd->msk_jumbo_rx_ring,
sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
- prod = sc_if->msk_cdata.msk_rx_prod;
- i = 0;
+ for (i = prod = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
+ rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
+ rxd->rx_m = NULL;
+ rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
+ MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
+ }
+ nbuf = MSK_RX_BUF_CNT;
+ prod = 0;
/* Have controller know how to compute Rx checksum. */
if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
(sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
+#ifdef MSK_64BIT_DMA
rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
rxd->rx_m = NULL;
rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
@@ -777,25 +798,33 @@ msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
- i++;
- }
- for (; i < MSK_JUMBO_RX_RING_CNT; i++) {
+#endif
rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
rxd->rx_m = NULL;
rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
+ rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
+ ETHER_HDR_LEN);
+ rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
+ MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
+ MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
+ nbuf--;
+ }
+ for (i = 0; i < nbuf; i++) {
if (msk_jumbo_newbuf(sc_if, prod) != 0)
return (ENOBUFS);
- MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
+ MSK_RX_INC(prod, MSK_JUMBO_RX_RING_CNT);
}
bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
sc_if->msk_cdata.msk_jumbo_rx_ring_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
+ /* Update prefetch unit. */
+ sc_if->msk_cdata.msk_rx_prod = prod;
CSR_WRITE_2(sc_if->msk_softc,
Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
- sc_if->msk_cdata.msk_rx_prod);
+ (sc_if->msk_cdata.msk_rx_prod + MSK_JUMBO_RX_RING_CNT - 1) %
+ MSK_JUMBO_RX_RING_CNT);
if (msk_rx_fill(sc_if, 1) != 0)
return (ENOBUFS);
return (0);
@@ -813,6 +842,7 @@ msk_init_tx_ring(struct msk_if_softc *sc_if)
sc_if->msk_cdata.msk_tx_prod = 0;
sc_if->msk_cdata.msk_tx_cons = 0;
sc_if->msk_cdata.msk_tx_cnt = 0;
+ sc_if->msk_cdata.msk_tx_high_addr = 0;
rd = &sc_if->msk_rdata;
bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
@@ -834,6 +864,12 @@ msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
struct msk_rxdesc *rxd;
struct mbuf *m;
+#ifdef MSK_64BIT_DMA
+ rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
+ rx_le = rxd->rx_le;
+ rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
+ MSK_INC(idx, MSK_RX_RING_CNT);
+#endif
rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
m = rxd->rx_m;
rx_le = rxd->rx_le;
@@ -847,6 +883,12 @@ msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
struct msk_rxdesc *rxd;
struct mbuf *m;
+#ifdef MSK_64BIT_DMA
+ rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
+ rx_le = rxd->rx_le;
+ rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
+ MSK_INC(idx, MSK_JUMBO_RX_RING_CNT);
+#endif
rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
m = rxd->rx_m;
rx_le = rxd->rx_le;
@@ -884,10 +926,18 @@ msk_newbuf(struct msk_if_softc *sc_if, int idx)
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
+#ifdef MSK_64BIT_DMA
+ rx_le = rxd->rx_le;
+ rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr));
+ rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
+ MSK_INC(idx, MSK_RX_RING_CNT);
+ rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
+#endif
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
+ rxd->rx_m = NULL;
}
map = rxd->rx_dmamap;
rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
@@ -937,11 +987,19 @@ msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
+#ifdef MSK_64BIT_DMA
+ rx_le = rxd->rx_le;
+ rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr));
+ rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
+ MSK_INC(idx, MSK_JUMBO_RX_RING_CNT);
+ rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
+#endif
if (rxd->rx_m != NULL) {
bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
rxd->rx_dmamap);
+ rxd->rx_m = NULL;
}
map = rxd->rx_dmamap;
rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
@@ -1472,7 +1530,7 @@ mskc_reset(struct msk_softc *sc)
/* Clear status list. */
bzero(sc->msk_stat_ring,
- sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
+ sizeof(struct msk_stat_desc) * sc->msk_stat_count);
sc->msk_stat_cons = 0;
bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@@ -1483,7 +1541,7 @@ mskc_reset(struct msk_softc *sc)
CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
/* Set the status list last index. */
- CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
+ CSR_WRITE_2(sc, STAT_LAST_IDX, sc->msk_stat_count - 1);
if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
/* WA for dev. #4.3 */
@@ -2083,17 +2141,29 @@ static int
msk_status_dma_alloc(struct msk_softc *sc)
{
struct msk_dmamap_arg ctx;
- int error;
+ bus_size_t stat_sz;
+ int count, error;
+ /*
+ * It seems controller requires number of status LE entries
+ * is power of 2 and the maximum number of status LE entries
+ * is 4096. For dual-port controllers, the number of status
+ * LE entries should be large enough to hold both port's
+ * status updates.
+ */
+ count = 3 * MSK_RX_RING_CNT + MSK_TX_RING_CNT;
+ count = imin(4096, roundup2(count, 1024));
+ sc->msk_stat_count = count;
+ stat_sz = count * sizeof(struct msk_stat_desc);
error = bus_dma_tag_create(
bus_get_dma_tag(sc->msk_dev), /* parent */
MSK_STAT_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
- MSK_STAT_RING_SZ, /* maxsize */
+ stat_sz, /* maxsize */
1, /* nsegments */
- MSK_STAT_RING_SZ, /* maxsegsize */
+ stat_sz, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->msk_stat_tag);
@@ -2114,9 +2184,8 @@ msk_status_dma_alloc(struct msk_softc *sc)
}
ctx.msk_busaddr = 0;
- error = bus_dmamap_load(sc->msk_stat_tag,
- sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ,
- msk_dmamap_cb, &ctx, 0);
+ error = bus_dmamap_load(sc->msk_stat_tag, sc->msk_stat_map,
+ sc->msk_stat_ring, stat_sz, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->msk_dev,
"failed to load DMA'able memory for status ring\n");
@@ -2157,27 +2226,10 @@ msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
int error, i;
/* Create parent DMA tag. */
- /*
- * XXX
- * It seems that Yukon II supports full 64bits DMA operations. But
- * it needs two descriptors(list elements) for 64bits DMA operations.
- * Since we don't know what DMA address mappings(32bits or 64bits)
- * would be used in advance for each mbufs, we limits its DMA space
- * to be in range of 32bits address space. Otherwise, we should check
- * what DMA address is used and chain another descriptor for the
- * 64bits DMA operation. This also means descriptor ring size is
- * variable. Limiting DMA address to be in 32bit address space greatly
- * simplifies descriptor handling and possibly would increase
- * performance a bit due to efficient handling of descriptors.
- * Apart from harassing checksum offloading mechanisms, it seems
- * it's really bad idea to use a separate descriptor for 64bit
- * DMA operation to save small descriptor memory. Anyway, I've
- * never seen these exotic scheme on ethernet interface hardware.
- */
error = bus_dma_tag_create(
bus_get_dma_tag(sc_if->msk_if_dev), /* parent */
1, 0, /* alignment, boundary */
- BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
@@ -2283,7 +2335,7 @@ msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
ctx.msk_busaddr = 0;
error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
- MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0);
+ MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc_if->msk_if_dev,
"failed to load DMA'able memory for Tx ring\n");
@@ -2304,7 +2356,7 @@ msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
ctx.msk_busaddr = 0;
error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
- MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0);
+ MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc_if->msk_if_dev,
"failed to load DMA'able memory for Rx ring\n");
@@ -2421,7 +2473,7 @@ msk_rx_dma_jalloc(struct msk_if_softc *sc_if)
error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
sc_if->msk_cdata.msk_jumbo_rx_ring_map,
sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
- msk_dmamap_cb, &ctx, 0);
+ msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc_if->msk_if_dev,
"failed to load DMA'able memory for jumbo Rx ring\n");
@@ -2781,6 +2833,18 @@ msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
}
}
+#ifdef MSK_64BIT_DMA
+ if (MSK_ADDR_HI(txsegs[0].ds_addr) !=
+ sc_if->msk_cdata.msk_tx_high_addr) {
+ sc_if->msk_cdata.msk_tx_high_addr =
+ MSK_ADDR_HI(txsegs[0].ds_addr);
+ tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
+ tx_le->msk_addr = htole32(MSK_ADDR_HI(txsegs[0].ds_addr));
+ tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
+ sc_if->msk_cdata.msk_tx_cnt++;
+ MSK_INC(prod, MSK_TX_RING_CNT);
+ }
+#endif
si = prod;
tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
@@ -2795,6 +2859,20 @@ msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
for (i = 1; i < nseg; i++) {
tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
+#ifdef MSK_64BIT_DMA
+ if (MSK_ADDR_HI(txsegs[i].ds_addr) !=
+ sc_if->msk_cdata.msk_tx_high_addr) {
+ sc_if->msk_cdata.msk_tx_high_addr =
+ MSK_ADDR_HI(txsegs[i].ds_addr);
+ tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
+ tx_le->msk_addr =
+ htole32(MSK_ADDR_HI(txsegs[i].ds_addr));
+ tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
+ sc_if->msk_cdata.msk_tx_cnt++;
+ MSK_INC(prod, MSK_TX_RING_CNT);
+ tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
+ }
+#endif
tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
tx_le->msk_control = htole32(txsegs[i].ds_len | control |
OP_BUFFER | HW_OWNER);
@@ -3147,7 +3225,12 @@ msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
msk_discard_rxbuf(sc_if, cons);
break;
}
+#ifdef MSK_64BIT_DMA
+ rxd = &sc_if->msk_cdata.msk_rxdesc[(cons + 1) %
+ MSK_RX_RING_CNT];
+#else
rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
+#endif
m = rxd->rx_m;
if (msk_newbuf(sc_if, cons) != 0) {
ifp->if_iqdrops++;
@@ -3175,8 +3258,8 @@ msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
MSK_IF_LOCK(sc_if);
} while (0);
- MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
- MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
+ MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
+ MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
}
static void
@@ -3207,7 +3290,12 @@ msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
msk_discard_jumbo_rxbuf(sc_if, cons);
break;
}
+#ifdef MSK_64BIT_DMA
+ jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[(cons + 1) %
+ MSK_JUMBO_RX_RING_CNT];
+#else
jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
+#endif
m = jrxd->rx_m;
if (msk_jumbo_newbuf(sc_if, cons) != 0) {
ifp->if_iqdrops++;
@@ -3235,8 +3323,8 @@ msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
MSK_IF_LOCK(sc_if);
} while (0);
- MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
- MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
+ MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
+ MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
}
static void
@@ -3581,7 +3669,7 @@ msk_handle_events(struct msk_softc *sc)
control & STLE_OP_MASK);
break;
}
- MSK_INC(cons, MSK_STAT_RING_CNT);
+ MSK_INC(cons, sc->msk_stat_count);
if (rxprog > sc->msk_process_limit)
break;
}
diff --git a/sys/dev/msk/if_mskreg.h b/sys/dev/msk/if_mskreg.h
index 583f5e8..5465deb 100644
--- a/sys/dev/msk/if_mskreg.h
+++ b/sys/dev/msk/if_mskreg.h
@@ -2315,35 +2315,48 @@ struct msk_stat_desc {
#define BMU_UDP_CHECK (0x57<<16) /* Descr with UDP ext (YUKON only) */
#define BMU_BBC 0xffff /* Bit 15.. 0: Buffer Byte Counter */
+/*
+ * Controller requires an additional LE op code for 64bit DMA operation.
+ * Driver uses fixed number of RX buffers such that this limitation
+ * reduces number of available RX buffers with 64bit DMA so double
+ * number of RX buffers on platforms that support 64bit DMA. For TX
+ * side, controller requires an additional OP_ADDR64 op code if a TX
+ * buffer uses different high address value than previously used one.
+ * Driver monitors high DMA address change in TX and inserts an
+ * OP_ADDR64 op code if the high DMA address is changed. Driver
+ * allocates 50% more total TX buffers on platforms that support 64bit
+ * DMA.
+ */
+#if (BUS_SPACE_MAXADDR > 0xFFFFFFFF)
+#define MSK_64BIT_DMA
+#define MSK_TX_RING_CNT 384
+#define MSK_RX_RING_CNT 512
+#else
+#undef MSK_64BIT_DMA
#define MSK_TX_RING_CNT 256
#define MSK_RX_RING_CNT 256
+#endif
#define MSK_RX_BUF_ALIGN 8
#define MSK_JUMBO_RX_RING_CNT MSK_RX_RING_CNT
-#define MSK_STAT_RING_CNT ((1 + 3) * (MSK_TX_RING_CNT + MSK_RX_RING_CNT))
#define MSK_MAXTXSEGS 32
#define MSK_TSO_MAXSGSIZE 4096
#define MSK_TSO_MAXSIZE (65535 + sizeof(struct ether_vlan_header))
/*
- * It seems that the hardware requires extra decriptors(LEs) to offload
- * TCP/UDP checksum, VLAN hardware tag inserstion and TSO.
+ * It seems that the hardware requires extra descriptors(LEs) to offload
+ * TCP/UDP checksum, VLAN hardware tag insertion and TSO.
*
* 1 descriptor for TCP/UDP checksum offload.
* 1 descriptor VLAN hardware tag insertion.
* 1 descriptor for TSO(TCP Segmentation Offload)
- * 1 descriptor for 64bits DMA : Not applicatable due to the use of
- * BUS_SPACE_MAXADDR_32BIT in parent DMA tag creation.
+ * 1 descriptor for each 64bits DMA transfers
*/
+#ifdef MSK_64BIT_DMA
+#define MSK_RESERVED_TX_DESC_CNT (MSK_MAXTXSEGS + 3)
+#else
#define MSK_RESERVED_TX_DESC_CNT 3
+#endif
-/*
- * Jumbo buffer stuff. Note that we must allocate more jumbo
- * buffers than there are descriptors in the receive ring. This
- * is because we don't know how long it will take for a packet
- * to be released after we hand it off to the upper protocol
- * layers. To be safe, we allocate 1.5 times the number of
- * receive descriptors.
- */
#define MSK_JUMBO_FRAMELEN 9022
#define MSK_JUMBO_MTU (MSK_JUMBO_FRAMELEN-ETHER_HDR_LEN-ETHER_CRC_LEN)
#define MSK_MAX_FRAMELEN \
@@ -2380,6 +2393,7 @@ struct msk_chain_data {
bus_dmamap_t msk_jumbo_rx_sparemap;
uint16_t msk_tso_mtu;
uint32_t msk_last_csum;
+ uint32_t msk_tx_high_addr;
int msk_tx_prod;
int msk_tx_cons;
int msk_tx_cnt;
@@ -2411,10 +2425,17 @@ struct msk_ring_data {
(sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT)
#define MSK_JUMBO_RX_RING_SZ \
(sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT)
-#define MSK_STAT_RING_SZ \
- (sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT)
#define MSK_INC(x, y) (x) = (x + 1) % y
+#ifdef MSK_64BIT_DMA
+#define MSK_RX_INC(x, y) (x) = (x + 2) % y
+#define MSK_RX_BUF_CNT (MSK_RX_RING_CNT / 2)
+#define MSK_JUMBO_RX_BUF_CNT (MSK_JUMBO_RX_RING_CNT / 2)
+#else
+#define MSK_RX_INC(x, y) (x) = (x + 1) % y
+#define MSK_RX_BUF_CNT MSK_RX_RING_CNT
+#define MSK_JUMBO_RX_BUF_CNT MSK_JUMBO_RX_RING_CNT
+#endif
#define MSK_PCI_BUS 0
#define MSK_PCIX_BUS 1
@@ -2519,6 +2540,7 @@ struct msk_softc {
int msk_int_holdoff;
int msk_process_limit;
int msk_stat_cons;
+ int msk_stat_count;
struct mtx msk_mtx;
};
diff --git a/sys/dev/mvs/mvs.c b/sys/dev/mvs/mvs.c
index 54808c5..e128616 100644
--- a/sys/dev/mvs/mvs.c
+++ b/sys/dev/mvs/mvs.c
@@ -94,7 +94,7 @@ static void mvs_process_request_sense(device_t dev, union ccb *ccb);
static void mvsaction(struct cam_sim *sim, union ccb *ccb);
static void mvspoll(struct cam_sim *sim);
-MALLOC_DEFINE(M_MVS, "MVS driver", "MVS driver data buffers");
+static MALLOC_DEFINE(M_MVS, "MVS driver", "MVS driver data buffers");
#define recovery_type spriv_field0
#define RECOVERY_NONE 0
diff --git a/sys/dev/mvs/mvs_pci.c b/sys/dev/mvs/mvs_pci.c
index e2e37da..36dd93d 100644
--- a/sys/dev/mvs/mvs_pci.c
+++ b/sys/dev/mvs/mvs_pci.c
@@ -177,15 +177,10 @@ static int
mvs_detach(device_t dev)
{
struct mvs_controller *ctlr = device_get_softc(dev);
- device_t *children;
- int nchildren, i;
/* Detach & delete all children */
- if (!device_get_children(dev, &children, &nchildren)) {
- for (i = 0; i < nchildren; i++)
- device_delete_child(dev, children[i]);
- free(children, M_TEMP);
- }
+ device_delete_all_children(dev);
+
/* Free interrupt. */
if (ctlr->irq.r_irq) {
bus_teardown_intr(dev, ctlr->irq.r_irq,
diff --git a/sys/dev/mvs/mvs_soc.c b/sys/dev/mvs/mvs_soc.c
index 5c1116c..670bfec 100644
--- a/sys/dev/mvs/mvs_soc.c
+++ b/sys/dev/mvs/mvs_soc.c
@@ -173,15 +173,10 @@ static int
mvs_detach(device_t dev)
{
struct mvs_controller *ctlr = device_get_softc(dev);
- device_t *children;
- int nchildren, i;
/* Detach & delete all children */
- if (!device_get_children(dev, &children, &nchildren)) {
- for (i = 0; i < nchildren; i++)
- device_delete_child(dev, children[i]);
- free(children, M_TEMP);
- }
+ device_delete_all_children(dev);
+
/* Free interrupt. */
if (ctlr->irq.r_irq) {
bus_teardown_intr(dev, ctlr->irq.r_irq,
diff --git a/sys/dev/mwl/if_mwl.c b/sys/dev/mwl/if_mwl.c
index 662f201..5af96ba 100644
--- a/sys/dev/mwl/if_mwl.c
+++ b/sys/dev/mwl/if_mwl.c
@@ -262,7 +262,7 @@ static void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
} while (0)
#endif
-MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
+static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
/*
* Each packet has fixed front matter: a 2-byte length
diff --git a/sys/dev/mwl/mwlhal.c b/sys/dev/mwl/mwlhal.c
index ffc956f..4547fe5 100644
--- a/sys/dev/mwl/mwlhal.c
+++ b/sys/dev/mwl/mwlhal.c
@@ -190,7 +190,8 @@ static void dumpresult(struct mwl_hal_priv *, int showresult);
#endif /* MWLHAL_DEBUG */
SYSCTL_DECL(_hw_mwl);
-SYSCTL_NODE(_hw_mwl, OID_AUTO, hal, CTLFLAG_RD, 0, "Marvell HAL parameters");
+static SYSCTL_NODE(_hw_mwl, OID_AUTO, hal, CTLFLAG_RD, 0,
+ "Marvell HAL parameters");
static __inline void
MWL_HAL_LOCK(struct mwl_hal_priv *mh)
diff --git a/sys/dev/netmap/head.diff b/sys/dev/netmap/head.diff
new file mode 100644
index 0000000..51a8e34
--- /dev/null
+++ b/sys/dev/netmap/head.diff
@@ -0,0 +1,654 @@
+Index: conf/NOTES
+===================================================================
+--- conf/NOTES (revision 227552)
++++ conf/NOTES (working copy)
+@@ -799,6 +799,12 @@
+ # option. DHCP requires bpf.
+ device bpf
+
++# The `netmap' device implements memory-mapped access to network
++# devices from userspace, enabling wire-speed packet capture and
++# generation even at 10Gbit/s. Requires support in the device
++# driver. Supported drivers are ixgbe, e1000, re.
++device netmap
++
+ # The `disc' device implements a minimal network interface,
+ # which throws away all packets sent and never receives any. It is
+ # included for testing and benchmarking purposes.
+Index: conf/files
+===================================================================
+--- conf/files (revision 227552)
++++ conf/files (working copy)
+@@ -1507,6 +1507,7 @@
+ dev/my/if_my.c optional my
+ dev/ncv/ncr53c500.c optional ncv
+ dev/ncv/ncr53c500_pccard.c optional ncv pccard
++dev/netmap/netmap.c optional netmap
+ dev/nge/if_nge.c optional nge
+ dev/nxge/if_nxge.c optional nxge
+ dev/nxge/xgehal/xgehal-device.c optional nxge
+Index: conf/options
+===================================================================
+--- conf/options (revision 227552)
++++ conf/options (working copy)
+@@ -689,6 +689,7 @@
+
+ # various 'device presence' options.
+ DEV_BPF opt_bpf.h
++DEV_NETMAP opt_global.h
+ DEV_MCA opt_mca.h
+ DEV_CARP opt_carp.h
+ DEV_SPLASH opt_splash.h
+Index: dev/e1000/if_igb.c
+===================================================================
+--- dev/e1000/if_igb.c (revision 227552)
++++ dev/e1000/if_igb.c (working copy)
+@@ -369,6 +369,9 @@
+ &igb_rx_process_limit, 0,
+ "Maximum number of received packets to process at a time, -1 means unlimited");
+
++#ifdef DEV_NETMAP
++#include <dev/netmap/if_igb_netmap.h>
++#endif /* DEV_NETMAP */
+ /*********************************************************************
+ * Device identification routine
+ *
+@@ -664,6 +667,9 @@
+ adapter->led_dev = led_create(igb_led_func, adapter,
+ device_get_nameunit(dev));
+
++#ifdef DEV_NETMAP
++ igb_netmap_attach(adapter);
++#endif /* DEV_NETMAP */
+ INIT_DEBUGOUT("igb_attach: end");
+
+ return (0);
+@@ -742,6 +748,9 @@
+
+ callout_drain(&adapter->timer);
+
++#ifdef DEV_NETMAP
++ netmap_detach(adapter->ifp);
++#endif /* DEV_NETMAP */
+ igb_free_pci_resources(adapter);
+ bus_generic_detach(dev);
+ if_free(ifp);
+@@ -3212,6 +3221,10 @@
+ struct adapter *adapter = txr->adapter;
+ struct igb_tx_buffer *txbuf;
+ int i;
++#ifdef DEV_NETMAP
++ struct netmap_slot *slot = netmap_reset(NA(adapter->ifp),
++ NR_TX, txr->me, 0);
++#endif
+
+ /* Clear the old descriptor contents */
+ IGB_TX_LOCK(txr);
+@@ -3231,6 +3244,13 @@
+ m_freem(txbuf->m_head);
+ txbuf->m_head = NULL;
+ }
++#ifdef DEV_NETMAP
++ if (slot) {
++ netmap_load_map(txr->txtag, txbuf->map,
++ NMB(slot), adapter->rx_mbuf_sz);
++ slot++;
++ }
++#endif /* DEV_NETMAP */
+ /* clear the watch index */
+ txbuf->next_eop = -1;
+ }
+@@ -3626,6 +3646,19 @@
+
+ IGB_TX_LOCK_ASSERT(txr);
+
++#ifdef DEV_NETMAP
++ if (ifp->if_capenable & IFCAP_NETMAP) {
++ struct netmap_adapter *na = NA(ifp);
++
++ selwakeuppri(&na->tx_rings[txr->me].si, PI_NET);
++ IGB_TX_UNLOCK(txr);
++ IGB_CORE_LOCK(adapter);
++ selwakeuppri(&na->tx_rings[na->num_queues + 1].si, PI_NET);
++ IGB_CORE_UNLOCK(adapter);
++ IGB_TX_LOCK(txr); // the caller is supposed to own the lock
++ return FALSE;
++ }
++#endif /* DEV_NETMAP */
+ if (txr->tx_avail == adapter->num_tx_desc) {
+ txr->queue_status = IGB_QUEUE_IDLE;
+ return FALSE;
+@@ -3949,6 +3982,10 @@
+ bus_dma_segment_t pseg[1], hseg[1];
+ struct lro_ctrl *lro = &rxr->lro;
+ int rsize, nsegs, error = 0;
++#ifdef DEV_NETMAP
++ struct netmap_slot *slot = netmap_reset(NA(rxr->adapter->ifp),
++ NR_RX, rxr->me, 0);
++#endif
+
+ adapter = rxr->adapter;
+ dev = adapter->dev;
+@@ -3974,6 +4011,18 @@
+ struct mbuf *mh, *mp;
+
+ rxbuf = &rxr->rx_buffers[j];
++#ifdef DEV_NETMAP
++ if (slot) {
++ netmap_load_map(rxr->ptag,
++ rxbuf->pmap, NMB(slot),
++ adapter->rx_mbuf_sz);
++ /* Update descriptor */
++ rxr->rx_base[j].read.pkt_addr =
++ htole64(vtophys(NMB(slot)));
++ slot++;
++ continue;
++ }
++#endif /* DEV_NETMAP */
+ if (rxr->hdr_split == FALSE)
+ goto skip_head;
+
+@@ -4436,6 +4485,19 @@
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
++#ifdef DEV_NETMAP
++ if (ifp->if_capenable & IFCAP_NETMAP) {
++ struct netmap_adapter *na = NA(ifp);
++
++ selwakeuppri(&na->rx_rings[rxr->me].si, PI_NET);
++ IGB_RX_UNLOCK(rxr);
++ IGB_CORE_LOCK(adapter);
++ selwakeuppri(&na->rx_rings[na->num_queues + 1].si, PI_NET);
++ IGB_CORE_UNLOCK(adapter);
++ return (0);
++ }
++#endif /* DEV_NETMAP */
++
+ /* Main clean loop */
+ for (i = rxr->next_to_check; count != 0;) {
+ struct mbuf *sendmp, *mh, *mp;
+Index: dev/e1000/if_lem.c
+===================================================================
+--- dev/e1000/if_lem.c (revision 227552)
++++ dev/e1000/if_lem.c (working copy)
+@@ -316,6 +316,10 @@
+ /* Global used in WOL setup with multiport cards */
+ static int global_quad_port_a = 0;
+
++#ifdef DEV_NETMAP
++#include <dev/netmap/if_lem_netmap.h>
++#endif /* DEV_NETMAP */
++
+ /*********************************************************************
+ * Device identification routine
+ *
+@@ -646,6 +650,9 @@
+ adapter->led_dev = led_create(lem_led_func, adapter,
+ device_get_nameunit(dev));
+
++#ifdef DEV_NETMAP
++ lem_netmap_attach(adapter);
++#endif /* DEV_NETMAP */
+ INIT_DEBUGOUT("lem_attach: end");
+
+ return (0);
+@@ -724,6 +731,9 @@
+ callout_drain(&adapter->timer);
+ callout_drain(&adapter->tx_fifo_timer);
+
++#ifdef DEV_NETMAP
++ netmap_detach(ifp);
++#endif /* DEV_NETMAP */
+ lem_free_pci_resources(adapter);
+ bus_generic_detach(dev);
+ if_free(ifp);
+@@ -2637,6 +2647,9 @@
+ lem_setup_transmit_structures(struct adapter *adapter)
+ {
+ struct em_buffer *tx_buffer;
++#ifdef DEV_NETMAP
++ struct netmap_slot *slot = netmap_reset(NA(adapter->ifp), NR_TX, 0, 0);
++#endif
+
+ /* Clear the old ring contents */
+ bzero(adapter->tx_desc_base,
+@@ -2650,6 +2663,15 @@
+ bus_dmamap_unload(adapter->txtag, tx_buffer->map);
+ m_freem(tx_buffer->m_head);
+ tx_buffer->m_head = NULL;
++#ifdef DEV_NETMAP
++ if (slot) {
++ /* reload the map for netmap mode */
++ netmap_load_map(adapter->txtag,
++ tx_buffer->map, NMB(slot),
++ NA(adapter->ifp)->buff_size);
++ slot++;
++ }
++#endif /* DEV_NETMAP */
+ tx_buffer->next_eop = -1;
+ }
+
+@@ -2951,6 +2973,12 @@
+
+ EM_TX_LOCK_ASSERT(adapter);
+
++#ifdef DEV_NETMAP
++ if (ifp->if_capenable & IFCAP_NETMAP) {
++ selwakeuppri(&NA(ifp)->tx_rings[0].si, PI_NET);
++ return;
++ }
++#endif /* DEV_NETMAP */
+ if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
+ return;
+
+@@ -3181,6 +3209,9 @@
+ {
+ struct em_buffer *rx_buffer;
+ int i, error;
++#ifdef DEV_NETMAP
++ struct netmap_slot *slot = netmap_reset(NA(adapter->ifp), NR_RX, 0, 0);
++#endif
+
+ /* Reset descriptor ring */
+ bzero(adapter->rx_desc_base,
+@@ -3200,6 +3231,18 @@
+
+ /* Allocate new ones. */
+ for (i = 0; i < adapter->num_rx_desc; i++) {
++#ifdef DEV_NETMAP
++ if (slot) {
++ netmap_load_map(adapter->rxtag,
++ rx_buffer->map, NMB(slot),
++ NA(adapter->ifp)->buff_size);
++ /* Update descriptor */
++ adapter->rx_desc_base[i].buffer_addr =
++ htole64(vtophys(NMB(slot)));
++ slot++;
++ continue;
++ }
++#endif /* DEV_NETMAP */
+ error = lem_get_buf(adapter, i);
+ if (error)
+ return (error);
+@@ -3407,6 +3450,14 @@
+ bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
+ BUS_DMASYNC_POSTREAD);
+
++#ifdef DEV_NETMAP
++ if (ifp->if_capenable & IFCAP_NETMAP) {
++ selwakeuppri(&NA(ifp)->rx_rings[0].si, PI_NET);
++ EM_RX_UNLOCK(adapter);
++ return (0);
++ }
++#endif /* DEV_NETMAP */
++
+ if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
+ if (done != NULL)
+ *done = rx_sent;
+Index: dev/e1000/if_em.c
+===================================================================
+--- dev/e1000/if_em.c (revision 227552)
++++ dev/e1000/if_em.c (working copy)
+@@ -399,6 +399,10 @@
+ /* Global used in WOL setup with multiport cards */
+ static int global_quad_port_a = 0;
+
++#ifdef DEV_NETMAP
++#include <dev/netmap/if_em_netmap.h>
++#endif /* DEV_NETMAP */
++
+ /*********************************************************************
+ * Device identification routine
+ *
+@@ -714,6 +718,9 @@
+
+ adapter->led_dev = led_create(em_led_func, adapter,
+ device_get_nameunit(dev));
++#ifdef DEV_NETMAP
++ em_netmap_attach(adapter);
++#endif /* DEV_NETMAP */
+
+ INIT_DEBUGOUT("em_attach: end");
+
+@@ -785,6 +792,10 @@
+ ether_ifdetach(adapter->ifp);
+ callout_drain(&adapter->timer);
+
++#ifdef DEV_NETMAP
++ netmap_detach(ifp);
++#endif /* DEV_NETMAP */
++
+ em_free_pci_resources(adapter);
+ bus_generic_detach(dev);
+ if_free(ifp);
+@@ -3213,6 +3224,10 @@
+ struct adapter *adapter = txr->adapter;
+ struct em_buffer *txbuf;
+ int i;
++#ifdef DEV_NETMAP
++ struct netmap_slot *slot = netmap_reset(NA(adapter->ifp),
++ NR_TX, txr->me, 0);
++#endif
+
+ /* Clear the old descriptor contents */
+ EM_TX_LOCK(txr);
+@@ -3232,6 +3247,16 @@
+ m_freem(txbuf->m_head);
+ txbuf->m_head = NULL;
+ }
++#ifdef DEV_NETMAP
++ if (slot) {
++ /* reload the map for netmap mode */
++ netmap_load_map(txr->txtag,
++ txbuf->map, NMB(slot),
++ adapter->rx_mbuf_sz);
++ slot++;
++ }
++#endif /* DEV_NETMAP */
++
+ /* clear the watch index */
+ txbuf->next_eop = -1;
+ }
+@@ -3682,6 +3707,12 @@
+ struct ifnet *ifp = adapter->ifp;
+
+ EM_TX_LOCK_ASSERT(txr);
++#ifdef DEV_NETMAP
++ if (ifp->if_capenable & IFCAP_NETMAP) {
++ selwakeuppri(&NA(ifp)->tx_rings[txr->me].si, PI_NET);
++ return (FALSE);
++ }
++#endif /* DEV_NETMAP */
+
+ /* No work, make sure watchdog is off */
+ if (txr->tx_avail == adapter->num_tx_desc) {
+@@ -3978,6 +4009,33 @@
+ if (++j == adapter->num_rx_desc)
+ j = 0;
+ }
++#ifdef DEV_NETMAP
++ {
++ /* slot is NULL if we are not in netmap mode */
++ struct netmap_slot *slot = netmap_reset(NA(adapter->ifp),
++ NR_RX, rxr->me, rxr->next_to_check);
++ /*
++ * we need to restore all buffer addresses in the ring as they might
++ * be in the wrong state if we are exiting from netmap mode.
++ */
++ for (j = 0; j != adapter->num_rx_desc; ++j) {
++ void *addr;
++ rxbuf = &rxr->rx_buffers[j];
++ if (rxbuf->m_head == NULL && !slot)
++ continue;
++ addr = slot ? NMB(slot) : rxbuf->m_head->m_data;
++ // XXX load or reload ?
++ netmap_load_map(rxr->rxtag, rxbuf->map, addr, adapter->rx_mbuf_sz);
++ /* Update descriptor */
++ rxr->rx_base[j].buffer_addr = htole64(vtophys(addr));
++ bus_dmamap_sync(rxr->rxtag, rxbuf->map, BUS_DMASYNC_PREREAD);
++ if (slot)
++ slot++;
++ }
++ /* Setup our descriptor indices */
++ NA(adapter->ifp)->rx_rings[rxr->me].nr_hwcur = rxr->next_to_check;
++ }
++#endif /* DEV_NETMAP */
+
+ fail:
+ rxr->next_to_refresh = i;
+@@ -4247,6 +4305,14 @@
+
+ EM_RX_LOCK(rxr);
+
++#ifdef DEV_NETMAP
++ if (ifp->if_capenable & IFCAP_NETMAP) {
++ selwakeuppri(&NA(ifp)->rx_rings[rxr->me].si, PI_NET);
++ EM_RX_UNLOCK(rxr);
++ return (0);
++ }
++#endif /* DEV_NETMAP */
++
+ for (i = rxr->next_to_check, processed = 0; count != 0;) {
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+Index: dev/re/if_re.c
+===================================================================
+--- dev/re/if_re.c (revision 227552)
++++ dev/re/if_re.c (working copy)
+@@ -291,6 +291,10 @@
+ static void re_setwol (struct rl_softc *);
+ static void re_clrwol (struct rl_softc *);
+
++#ifdef DEV_NETMAP
++#include <dev/netmap/if_re_netmap.h>
++#endif /* !DEV_NETMAP */
++
+ #ifdef RE_DIAG
+ static int re_diag (struct rl_softc *);
+ #endif
+@@ -1583,6 +1587,9 @@
+ */
+ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+
++#ifdef DEV_NETMAP
++ re_netmap_attach(sc);
++#endif /* DEV_NETMAP */
+ #ifdef RE_DIAG
+ /*
+ * Perform hardware diagnostic on the original RTL8169.
+@@ -1778,6 +1785,9 @@
+ bus_dma_tag_destroy(sc->rl_ldata.rl_stag);
+ }
+
++#ifdef DEV_NETMAP
++ netmap_detach(ifp);
++#endif /* DEV_NETMAP */
+ if (sc->rl_parent_tag)
+ bus_dma_tag_destroy(sc->rl_parent_tag);
+
+@@ -1952,6 +1962,9 @@
+ sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc));
+ for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++)
+ sc->rl_ldata.rl_tx_desc[i].tx_m = NULL;
++#ifdef DEV_NETMAP
++ re_netmap_tx_init(sc);
++#endif /* DEV_NETMAP */
+ /* Set EOR. */
+ desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1];
+ desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR);
+@@ -1979,6 +1992,9 @@
+ if ((error = re_newbuf(sc, i)) != 0)
+ return (error);
+ }
++#ifdef DEV_NETMAP
++ re_netmap_rx_init(sc);
++#endif /* DEV_NETMAP */
+
+ /* Flush the RX descriptors */
+
+@@ -2035,6 +2051,12 @@
+ RL_LOCK_ASSERT(sc);
+
+ ifp = sc->rl_ifp;
++#ifdef DEV_NETMAP
++ if (ifp->if_capenable & IFCAP_NETMAP) {
++ selwakeuppri(&NA(ifp)->rx_rings->si, PI_NET);
++ return 0;
++ }
++#endif /* DEV_NETMAP */
+ if (ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
+ jumbo = 1;
+ else
+@@ -2276,6 +2298,12 @@
+ return;
+
+ ifp = sc->rl_ifp;
++#ifdef DEV_NETMAP
++ if (ifp->if_capenable & IFCAP_NETMAP) {
++ selwakeuppri(&NA(ifp)->tx_rings[0].si, PI_NET);
++ return;
++ }
++#endif /* DEV_NETMAP */
+ /* Invalidate the TX descriptor list */
+ bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
+ sc->rl_ldata.rl_tx_list_map,
+@@ -2794,6 +2822,20 @@
+
+ sc = ifp->if_softc;
+
++#ifdef DEV_NETMAP
++ if (ifp->if_capenable & IFCAP_NETMAP) {
++ struct netmap_kring *kring = &NA(ifp)->tx_rings[0];
++ if (sc->rl_ldata.rl_tx_prodidx != kring->nr_hwcur) {
++ /* kick the tx unit */
++ CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
++#ifdef RE_TX_MODERATION
++ CSR_WRITE_4(sc, RL_TIMERCNT, 1);
++#endif
++ sc->rl_watchdog_timer = 5;
++ }
++ return;
++ }
++#endif /* DEV_NETMAP */
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
+ return;
+Index: dev/ixgbe/ixgbe.c
+===================================================================
+--- dev/ixgbe/ixgbe.c (revision 227552)
++++ dev/ixgbe/ixgbe.c (working copy)
+@@ -313,6 +313,10 @@
+ static int fdir_pballoc = 1;
+ #endif
+
++#ifdef DEV_NETMAP
++#include <dev/netmap/ixgbe_netmap.h>
++#endif /* DEV_NETMAP */
++
+ /*********************************************************************
+ * Device identification routine
+ *
+@@ -578,6 +582,9 @@
+
+ ixgbe_add_hw_stats(adapter);
+
++#ifdef DEV_NETMAP
++ ixgbe_netmap_attach(adapter);
++#endif /* DEV_NETMAP */
+ INIT_DEBUGOUT("ixgbe_attach: end");
+ return (0);
+ err_late:
+@@ -652,6 +659,9 @@
+
+ ether_ifdetach(adapter->ifp);
+ callout_drain(&adapter->timer);
++#ifdef DEV_NETMAP
++ netmap_detach(adapter->ifp);
++#endif /* DEV_NETMAP */
+ ixgbe_free_pci_resources(adapter);
+ bus_generic_detach(dev);
+ if_free(adapter->ifp);
+@@ -1719,6 +1729,7 @@
+ if (++i == adapter->num_tx_desc)
+ i = 0;
+
++ // XXX should we sync each buffer ?
+ txbuf->m_head = NULL;
+ txbuf->eop_index = -1;
+ }
+@@ -2813,6 +2824,10 @@
+ struct adapter *adapter = txr->adapter;
+ struct ixgbe_tx_buf *txbuf;
+ int i;
++#ifdef DEV_NETMAP
++ struct netmap_slot *slot = netmap_reset(NA(adapter->ifp),
++ NR_TX, txr->me, 0);
++#endif
+
+ /* Clear the old ring contents */
+ IXGBE_TX_LOCK(txr);
+@@ -2832,6 +2847,13 @@
+ m_freem(txbuf->m_head);
+ txbuf->m_head = NULL;
+ }
++#ifdef DEV_NETMAP
++ if (slot) {
++ netmap_load_map(txr->txtag, txbuf->map,
++ NMB(slot), adapter->rx_mbuf_sz);
++ slot++;
++ }
++#endif /* DEV_NETMAP */
+ /* Clear the EOP index */
+ txbuf->eop_index = -1;
+ }
+@@ -3310,6 +3332,20 @@
+
+ mtx_assert(&txr->tx_mtx, MA_OWNED);
+
++#ifdef DEV_NETMAP
++ if (ifp->if_capenable & IFCAP_NETMAP) {
++ struct netmap_adapter *na = NA(ifp);
++
++ selwakeuppri(&na->tx_rings[txr->me].si, PI_NET);
++ IXGBE_TX_UNLOCK(txr);
++ IXGBE_CORE_LOCK(adapter);
++ selwakeuppri(&na->tx_rings[na->num_queues + 1].si, PI_NET);
++ IXGBE_CORE_UNLOCK(adapter);
++ IXGBE_TX_LOCK(txr); // the caller is supposed to own the lock
++ return (FALSE);
++ }
++#endif /* DEV_NETMAP */
++
+ if (txr->tx_avail == adapter->num_tx_desc) {
+ txr->queue_status = IXGBE_QUEUE_IDLE;
+ return FALSE;
+@@ -3698,6 +3734,10 @@
+ bus_dma_segment_t pseg[1], hseg[1];
+ struct lro_ctrl *lro = &rxr->lro;
+ int rsize, nsegs, error = 0;
++#ifdef DEV_NETMAP
++ struct netmap_slot *slot = netmap_reset(NA(rxr->adapter->ifp),
++ NR_RX, rxr->me, 0);
++#endif /* DEV_NETMAP */
+
+ adapter = rxr->adapter;
+ ifp = adapter->ifp;
+@@ -3721,6 +3761,18 @@
+ struct mbuf *mh, *mp;
+
+ rxbuf = &rxr->rx_buffers[j];
++#ifdef DEV_NETMAP
++ if (slot) {
++ netmap_load_map(rxr->ptag,
++ rxbuf->pmap, NMB(slot),
++ adapter->rx_mbuf_sz);
++ /* Update descriptor */
++ rxr->rx_base[j].read.pkt_addr =
++ htole64(vtophys(NMB(slot)));
++ slot++;
++ continue;
++ }
++#endif /* DEV_NETMAP */
+ /*
+ ** Don't allocate mbufs if not
+ ** doing header split, its wasteful
+@@ -4148,6 +4200,18 @@
+
+ IXGBE_RX_LOCK(rxr);
+
++#ifdef DEV_NETMAP
++ if (ifp->if_capenable & IFCAP_NETMAP) {
++ struct netmap_adapter *na = NA(ifp);
++
++ selwakeuppri(&na->rx_rings[rxr->me].si, PI_NET);
++ IXGBE_RX_UNLOCK(rxr);
++ IXGBE_CORE_LOCK(adapter);
++ selwakeuppri(&na->rx_rings[na->num_queues + 1].si, PI_NET);
++ IXGBE_CORE_UNLOCK(adapter);
++ return (0);
++ }
++#endif /* DEV_NETMAP */
+ for (i = rxr->next_to_check; count != 0;) {
+ struct mbuf *sendmp, *mh, *mp;
+ u32 rsc, ptype;
diff --git a/sys/dev/netmap/if_em_netmap.h b/sys/dev/netmap/if_em_netmap.h
new file mode 100644
index 0000000..0e220e7
--- /dev/null
+++ b/sys/dev/netmap/if_em_netmap.h
@@ -0,0 +1,383 @@
+/*
+ * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * $FreeBSD$
+ * $Id: if_em_netmap.h 9662 2011-11-16 13:18:06Z luigi $
+ *
+ * netmap changes for if_em.
+ */
+
+#include <net/netmap.h>
+#include <sys/selinfo.h>
+#include <vm/vm.h>
+#include <vm/pmap.h> /* vtophys ? */
+#include <dev/netmap/netmap_kern.h>
+
+static void em_netmap_block_tasks(struct adapter *);
+static void em_netmap_unblock_tasks(struct adapter *);
+static int em_netmap_reg(struct ifnet *, int onoff);
+static int em_netmap_txsync(void *, u_int, int);
+static int em_netmap_rxsync(void *, u_int, int);
+static void em_netmap_lock_wrapper(void *, int, u_int);
+
+static void
+em_netmap_attach(struct adapter *adapter)
+{
+ struct netmap_adapter na;
+
+ bzero(&na, sizeof(na));
+
+ na.ifp = adapter->ifp;
+ na.separate_locks = 1;
+ na.num_tx_desc = adapter->num_tx_desc;
+ na.num_rx_desc = adapter->num_rx_desc;
+ na.nm_txsync = em_netmap_txsync;
+ na.nm_rxsync = em_netmap_rxsync;
+ na.nm_lock = em_netmap_lock_wrapper;
+ na.nm_register = em_netmap_reg;
+ /*
+ * adapter->rx_mbuf_sz is set by SIOCSETMTU, but in netmap mode
+ * we allocate the buffers on the first register. So we must
+ * disallow a SIOCSETMTU when if_capenable & IFCAP_NETMAP is set.
+ */
+ na.buff_size = MCLBYTES;
+ netmap_attach(&na, adapter->num_queues);
+}
+
+
+/*
+ * wrapper to export locks to the generic code
+ */
+static void
+em_netmap_lock_wrapper(void *_a, int what, u_int queueid)
+{
+ struct adapter *adapter = _a;
+
+ ASSERT(queueid < adapter->num_queues);
+ switch (what) {
+ case NETMAP_CORE_LOCK:
+ EM_CORE_LOCK(adapter);
+ break;
+ case NETMAP_CORE_UNLOCK:
+ EM_CORE_UNLOCK(adapter);
+ break;
+ case NETMAP_TX_LOCK:
+ EM_TX_LOCK(&adapter->tx_rings[queueid]);
+ break;
+ case NETMAP_TX_UNLOCK:
+ EM_TX_UNLOCK(&adapter->tx_rings[queueid]);
+ break;
+ case NETMAP_RX_LOCK:
+ EM_RX_LOCK(&adapter->rx_rings[queueid]);
+ break;
+ case NETMAP_RX_UNLOCK:
+ EM_RX_UNLOCK(&adapter->rx_rings[queueid]);
+ break;
+ }
+}
+
+
+static void
+em_netmap_block_tasks(struct adapter *adapter)
+{
+ if (adapter->msix > 1) { /* MSIX */
+ int i;
+ struct tx_ring *txr = adapter->tx_rings;
+ struct rx_ring *rxr = adapter->rx_rings;
+
+ for (i = 0; i < adapter->num_queues; i++, txr++, rxr++) {
+ taskqueue_block(txr->tq);
+ taskqueue_drain(txr->tq, &txr->tx_task);
+ taskqueue_block(rxr->tq);
+ taskqueue_drain(rxr->tq, &rxr->rx_task);
+ }
+ } else { /* legacy */
+ taskqueue_block(adapter->tq);
+ taskqueue_drain(adapter->tq, &adapter->link_task);
+ taskqueue_drain(adapter->tq, &adapter->que_task);
+ }
+}
+
+
+static void
+em_netmap_unblock_tasks(struct adapter *adapter)
+{
+ if (adapter->msix > 1) {
+ struct tx_ring *txr = adapter->tx_rings;
+ struct rx_ring *rxr = adapter->rx_rings;
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ taskqueue_unblock(txr->tq);
+ taskqueue_unblock(rxr->tq);
+ }
+ } else { /* legacy */
+ taskqueue_unblock(adapter->tq);
+ }
+}
+
+/*
+ * register-unregister routine
+ */
+static int
+em_netmap_reg(struct ifnet *ifp, int onoff)
+{
+ struct adapter *adapter = ifp->if_softc;
+ struct netmap_adapter *na = NA(ifp);
+ int error = 0;
+
+ if (na == NULL)
+ return EINVAL; /* no netmap support here */
+
+ em_disable_intr(adapter);
+
+ /* Tell the stack that the interface is no longer active */
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+ em_netmap_block_tasks(adapter);
+
+ if (onoff) {
+ ifp->if_capenable |= IFCAP_NETMAP;
+
+ /* save if_transmit for later restore.
+ * XXX also if_start and if_qflush ?
+ */
+ na->if_transmit = ifp->if_transmit;
+ ifp->if_transmit = netmap_start;
+
+ em_init_locked(adapter);
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
+ error = ENOMEM;
+ goto fail;
+ }
+ } else {
+fail:
+ /* restore if_transmit */
+ ifp->if_transmit = na->if_transmit;
+ ifp->if_capenable &= ~IFCAP_NETMAP;
+ em_init_locked(adapter); /* also enable intr */
+
+ }
+ em_netmap_unblock_tasks(adapter);
+ return (error);
+}
+
+/*
+ * Reconcile hardware and user view of the transmit ring, see
+ * ixgbe.c for details.
+ */
+static int
+em_netmap_txsync(void *a, u_int ring_nr, int do_lock)
+{
+ struct adapter *adapter = a;
+ struct tx_ring *txr = &adapter->tx_rings[ring_nr];
+ struct netmap_adapter *na = NA(adapter->ifp);
+ struct netmap_kring *kring = &na->tx_rings[ring_nr];
+ struct netmap_ring *ring = kring->ring;
+ int j, k, n, lim = kring->nkr_num_slots - 1;
+
+ /* generate an interrupt approximately every half ring */
+ int report_frequency = kring->nkr_num_slots >> 1;
+
+ k = ring->cur;
+ if ( (kring->nr_kflags & NR_REINIT) || k > lim)
+ return netmap_ring_reinit(kring);
+
+ if (do_lock)
+ EM_TX_LOCK(txr);
+ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+ BUS_DMASYNC_POSTREAD);
+
+ /* record completed transmissions TODO
+ *
+ * instead of using TDH, we could read the transmitted status bit.
+ */
+ j = E1000_READ_REG(&adapter->hw, E1000_TDH(ring_nr));
+ if (j >= kring->nkr_num_slots) { /* XXX can happen */
+ D("TDH wrap %d", j);
+ j -= kring->nkr_num_slots;
+ }
+ int delta = j - txr->next_to_clean;
+ if (delta) {
+ /* new transmissions were completed, increment
+ ring->nr_hwavail. */
+ if (delta < 0)
+ delta += kring->nkr_num_slots;
+ txr->next_to_clean = j;
+ kring->nr_hwavail += delta;
+ }
+
+ /* update avail to what the hardware knows */
+ ring->avail = kring->nr_hwavail;
+
+ j = kring->nr_hwcur;
+ if (j != k) { /* we have packets to send */
+ n = 0;
+ while (j != k) {
+ struct netmap_slot *slot = &ring->slot[j];
+ struct e1000_tx_desc *curr = &txr->tx_base[j];
+ struct em_buffer *txbuf = &txr->tx_buffers[j];
+ int flags = ((slot->flags & NS_REPORT) ||
+ j == 0 || j == report_frequency) ?
+ E1000_TXD_CMD_RS : 0;
+ void *addr = NMB(slot);
+ int len = slot->len;
+ if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
+ if (do_lock)
+ EM_TX_UNLOCK(txr);
+ return netmap_ring_reinit(kring);
+ }
+
+ slot->flags &= ~NS_REPORT;
+ curr->upper.data = 0;
+ curr->lower.data =
+ htole32(
+ adapter->txd_cmd |
+ (E1000_TXD_CMD_EOP | flags) |
+ slot->len);
+ if (slot->flags & NS_BUF_CHANGED) {
+ curr->buffer_addr = htole64(vtophys(addr));
+ /* buffer has changed, unload and reload map */
+ netmap_reload_map(txr->txtag, txbuf->map,
+ addr, na->buff_size);
+ slot->flags &= ~NS_BUF_CHANGED;
+ }
+
+ bus_dmamap_sync(txr->txtag, txbuf->map,
+ BUS_DMASYNC_PREWRITE);
+ j = (j == lim) ? 0 : j + 1;
+ n++;
+ }
+ kring->nr_hwcur = ring->cur;
+
+ /* decrease avail by number of sent packets */
+ ring->avail -= n;
+ kring->nr_hwavail = ring->avail;
+
+ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me),
+ ring->cur);
+ }
+ if (do_lock)
+ EM_TX_UNLOCK(txr);
+ return 0;
+}
+
+/*
+ * Reconcile kernel and user view of the receive ring, see ixgbe.c
+ */
+static int
+em_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
+{
+ struct adapter *adapter = a;
+ struct rx_ring *rxr = &adapter->rx_rings[ring_nr];
+ struct netmap_adapter *na = NA(adapter->ifp);
+ struct netmap_kring *kring = &na->rx_rings[ring_nr];
+ struct netmap_ring *ring = kring->ring;
+ int j, k, n, lim = kring->nkr_num_slots - 1;
+
+ k = ring->cur;
+ if ( (kring->nr_kflags & NR_REINIT) || k > lim)
+ return netmap_ring_reinit(kring);
+
+ if (do_lock)
+ EM_RX_LOCK(rxr);
+ /* XXX check sync modes */
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ /* acknowledge all the received packets. */
+ j = rxr->next_to_check;
+ for (n = 0; ; n++) {
+ struct e1000_rx_desc *curr = &rxr->rx_base[j];
+
+ if ((curr->status & E1000_RXD_STAT_DD) == 0)
+ break;
+ ring->slot[j].len = le16toh(curr->length);
+ bus_dmamap_sync(rxr->tag, rxr->rx_buffers[j].map,
+ BUS_DMASYNC_POSTREAD);
+ j = (j == lim) ? 0 : j + 1;
+ }
+ if (n) {
+ rxr->next_to_check = j;
+ kring->nr_hwavail += n;
+ }
+
+ /* skip past packets that userspace has already processed:
+ * making them available for reception.
+ * advance nr_hwcur and issue a bus_dmamap_sync on the
+ * buffers so it is safe to write to them.
+ * Also increase nr_hwavail
+ */
+ j = kring->nr_hwcur;
+ if (j != k) { /* userspace has read some packets. */
+ n = 0;
+ while (j != k) {
+ struct netmap_slot *slot = &ring->slot[j];
+ struct e1000_rx_desc *curr = &rxr->rx_base[j];
+ struct em_buffer *rxbuf = &rxr->rx_buffers[j];
+ void *addr = NMB(slot);
+
+ if (addr == netmap_buffer_base) { /* bad buf */
+ if (do_lock)
+ EM_RX_UNLOCK(rxr);
+ return netmap_ring_reinit(kring);
+ }
+
+ curr->status = 0;
+ if (slot->flags & NS_BUF_CHANGED) {
+ curr->buffer_addr = htole64(vtophys(addr));
+ /* buffer has changed, unload and reload map */
+ netmap_reload_map(rxr->rxtag, rxbuf->map,
+ addr, na->buff_size);
+ slot->flags &= ~NS_BUF_CHANGED;
+ }
+
+ bus_dmamap_sync(rxr->rxtag, rxbuf->map,
+ BUS_DMASYNC_PREREAD);
+
+ j = (j == lim) ? 0 : j + 1;
+ n++;
+ }
+ kring->nr_hwavail -= n;
+ kring->nr_hwcur = ring->cur;
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ /*
+ * IMPORTANT: we must leave one free slot in the ring,
+ * so move j back by one unit
+ */
+ j = (j == 0) ? lim : j - 1;
+ E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), j);
+ }
+ /* tell userspace that there are new packets */
+ ring->avail = kring->nr_hwavail ;
+ if (do_lock)
+ EM_RX_UNLOCK(rxr);
+ return 0;
+}
diff --git a/sys/dev/netmap/if_igb_netmap.h b/sys/dev/netmap/if_igb_netmap.h
new file mode 100644
index 0000000..0c14706
--- /dev/null
+++ b/sys/dev/netmap/if_igb_netmap.h
@@ -0,0 +1,378 @@
+/*
+ * Copyright (C) 2011 Universita` di Pisa. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * $FreeBSD$
+ * $Id: if_igb_netmap.h 9662 2011-11-16 13:18:06Z luigi $
+ *
+ * netmap modifications for igb
+ * contribured by Ahmed Kooli
+ */
+
+#include <net/netmap.h>
+#include <sys/selinfo.h>
+#include <vm/vm.h>
+#include <vm/pmap.h> /* vtophys ? */
+#include <dev/netmap/netmap_kern.h>
+
+static int igb_netmap_reg(struct ifnet *, int onoff);
+static int igb_netmap_txsync(void *, u_int, int);
+static int igb_netmap_rxsync(void *, u_int, int);
+static void igb_netmap_lock_wrapper(void *, int, u_int);
+
+
+static void
+igb_netmap_attach(struct adapter *adapter)
+{
+ struct netmap_adapter na;
+
+ bzero(&na, sizeof(na));
+
+ na.ifp = adapter->ifp;
+ na.separate_locks = 1;
+ na.num_tx_desc = adapter->num_tx_desc;
+ na.num_rx_desc = adapter->num_rx_desc;
+ na.nm_txsync = igb_netmap_txsync;
+ na.nm_rxsync = igb_netmap_rxsync;
+ na.nm_lock = igb_netmap_lock_wrapper;
+ na.nm_register = igb_netmap_reg;
+ /*
+ * adapter->rx_mbuf_sz is set by SIOCSETMTU, but in netmap mode
+ * we allocate the buffers on the first register. So we must
+ * disallow a SIOCSETMTU when if_capenable & IFCAP_NETMAP is set.
+ */
+ na.buff_size = MCLBYTES;
+ netmap_attach(&na, adapter->num_queues);
+}
+
+
+/*
+ * wrapper to export locks to the generic code
+ */
+static void
+igb_netmap_lock_wrapper(void *_a, int what, u_int queueid)
+{
+ struct adapter *adapter = _a;
+
+ ASSERT(queueid < adapter->num_queues);
+ switch (what) {
+ case NETMAP_CORE_LOCK:
+ IGB_CORE_LOCK(adapter);
+ break;
+ case NETMAP_CORE_UNLOCK:
+ IGB_CORE_UNLOCK(adapter);
+ break;
+ case NETMAP_TX_LOCK:
+ IGB_TX_LOCK(&adapter->tx_rings[queueid]);
+ break;
+ case NETMAP_TX_UNLOCK:
+ IGB_TX_UNLOCK(&adapter->tx_rings[queueid]);
+ break;
+ case NETMAP_RX_LOCK:
+ IGB_RX_LOCK(&adapter->rx_rings[queueid]);
+ break;
+ case NETMAP_RX_UNLOCK:
+ IGB_RX_UNLOCK(&adapter->rx_rings[queueid]);
+ break;
+ }
+}
+
+
+/*
+ * support for netmap register/unregisted. We are already under core lock.
+ * only called on the first init or the last unregister.
+ */
+static int
+igb_netmap_reg(struct ifnet *ifp, int onoff)
+{
+ struct adapter *adapter = ifp->if_softc;
+ struct netmap_adapter *na = NA(ifp);
+ int error = 0;
+
+ if (!na)
+ return EINVAL;
+
+ igb_disable_intr(adapter);
+
+ /* Tell the stack that the interface is no longer active */
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+ if (onoff) {
+ ifp->if_capenable |= IFCAP_NETMAP;
+
+ /* save if_transmit to restore it later */
+ na->if_transmit = ifp->if_transmit;
+ ifp->if_transmit = netmap_start;
+
+ igb_init_locked(adapter);
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
+ error = ENOMEM;
+ goto fail;
+ }
+ } else {
+fail:
+ /* restore if_transmit */
+ ifp->if_transmit = na->if_transmit;
+ ifp->if_capenable &= ~IFCAP_NETMAP;
+ igb_init_locked(adapter); /* also enables intr */
+ }
+ return (error);
+}
+
+
+/*
+ * Reconcile kernel and user view of the transmit ring.
+ *
+ * Userspace has filled tx slots up to cur (excluded).
+ * The last unused slot previously known to the kernel was nr_hwcur,
+ * and the last interrupt reported nr_hwavail slots available
+ * (using the special value -1 to indicate idle transmit ring).
+ * The function must first update avail to what the kernel
+ * knows, subtract the newly used slots (cur - nr_hwcur)
+ * from both avail and nr_hwavail, and set nr_hwcur = cur
+ * issuing a dmamap_sync on all slots.
+ *
+ * Check parameters in the struct netmap_ring.
+ * We don't use avail, only check for bogus values.
+ * Make sure cur is valid, and same goes for buffer indexes and lengths.
+ * To avoid races, read the values once, and never use those from
+ * the ring afterwards.
+ */
+static int
+igb_netmap_txsync(void *a, u_int ring_nr, int do_lock)
+{
+ struct adapter *adapter = a;
+ struct tx_ring *txr = &adapter->tx_rings[ring_nr];
+ struct netmap_adapter *na = NA(adapter->ifp);
+ struct netmap_kring *kring = &na->tx_rings[ring_nr];
+ struct netmap_ring *ring = kring->ring;
+ int j, k, n, lim = kring->nkr_num_slots - 1;
+
+ /* generate an interrupt approximately every half ring */
+ int report_frequency = kring->nkr_num_slots >> 1;
+
+ k = ring->cur; /* ring is not protected by any lock */
+ if ( (kring->nr_kflags & NR_REINIT) || k > lim)
+ return netmap_ring_reinit(kring);
+
+ if (do_lock)
+ IGB_TX_LOCK(txr);
+ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+ BUS_DMASYNC_POSTREAD);
+
+ /* record completed transmissions. TODO
+ *
+ * Instead of reading from the TDH register, we could and try to check
+ * the status bit of descriptor packets.
+ */
+ j = E1000_READ_REG(&adapter->hw, E1000_TDH(ring_nr));
+ if (j >= kring->nkr_num_slots) /* XXX can it happen ? */
+ j -= kring->nkr_num_slots;
+ int delta = j - txr->next_to_clean;
+ if (delta) {
+ /* new tx were completed */
+ if (delta < 0)
+ delta += kring->nkr_num_slots;
+ txr->next_to_clean = j;
+ kring->nr_hwavail += delta;
+ }
+
+ /* update avail to what the hardware knows */
+ ring->avail = kring->nr_hwavail;
+
+ j = kring->nr_hwcur;
+ if (j != k) { /* we have new packets to send */
+ u32 olinfo_status = 0;
+ n = 0;
+
+ /* 82575 needs the queue index added */
+ if (adapter->hw.mac.type == e1000_82575)
+ olinfo_status |= txr->me << 4;
+
+ while (j != k) {
+ struct netmap_slot *slot = &ring->slot[j];
+ struct igb_tx_buffer *txbuf = &txr->tx_buffers[j];
+ union e1000_adv_tx_desc *curr =
+ (union e1000_adv_tx_desc *)&txr->tx_base[j];
+ void *addr = NMB(slot);
+ int flags = ((slot->flags & NS_REPORT) ||
+ j == 0 || j == report_frequency) ?
+ E1000_ADVTXD_DCMD_RS : 0;
+ int len = slot->len;
+
+ if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
+ if (do_lock)
+ IGB_TX_UNLOCK(txr);
+ return netmap_ring_reinit(kring);
+ }
+
+ slot->flags &= ~NS_REPORT;
+ curr->read.buffer_addr = htole64(vtophys(addr));
+ curr->read.olinfo_status =
+ htole32(olinfo_status |
+ (len<< E1000_ADVTXD_PAYLEN_SHIFT));
+ curr->read.cmd_type_len =
+ htole32(len | E1000_ADVTXD_DTYP_DATA |
+ E1000_ADVTXD_DCMD_IFCS |
+ E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_EOP | flags);
+ if (slot->flags & NS_BUF_CHANGED) {
+ /* buffer has changed, unload and reload map */
+ netmap_reload_map(txr->txtag, txbuf->map,
+ addr, na->buff_size);
+ slot->flags &= ~NS_BUF_CHANGED;
+ }
+
+ bus_dmamap_sync(txr->txtag, txbuf->map,
+ BUS_DMASYNC_PREWRITE);
+ j = (j == lim) ? 0 : j + 1;
+ n++;
+ }
+ kring->nr_hwcur = k;
+
+ /* decrease avail by number of sent packets */
+ ring->avail -= n;
+ kring->nr_hwavail = ring->avail;
+
+ /* Set the watchdog */
+ txr->queue_status = IGB_QUEUE_WORKING;
+ txr->watchdog_time = ticks;
+
+ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), k);
+ }
+ if (do_lock)
+ IGB_TX_UNLOCK(txr);
+ return 0;
+}
+
+
+/*
+ * Reconcile kernel and user view of the receive ring.
+ *
+ * Userspace has read rx slots up to cur (excluded).
+ * The last unread slot previously known to the kernel was nr_hwcur,
+ * and the last interrupt reported nr_hwavail slots available.
+ * We must subtract the newly consumed slots (cur - nr_hwcur)
+ * from nr_hwavail, clearing the descriptors for the next
+ * read, tell the hardware that they are available,
+ * and set nr_hwcur = cur and avail = nr_hwavail.
+ * issuing a dmamap_sync on all slots.
+ */
+static int
+igb_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
+{
+ struct adapter *adapter = a;
+ struct rx_ring *rxr = &adapter->rx_rings[ring_nr];
+ struct netmap_adapter *na = NA(adapter->ifp);
+ struct netmap_kring *kring = &na->rx_rings[ring_nr];
+ struct netmap_ring *ring = kring->ring;
+ int j, k, n, lim = kring->nkr_num_slots - 1;
+
+ k = ring->cur; /* ring is not protected by any lock */
+ if ( (kring->nr_kflags & NR_REINIT) || k > lim)
+ return netmap_ring_reinit(kring);
+
+ if (do_lock)
+ IGB_RX_LOCK(rxr);
+
+ /* Sync the ring. */
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ j = rxr->next_to_check;
+ for (n = 0; ; n++) {
+ union e1000_adv_rx_desc *curr = &rxr->rx_base[j];
+ uint32_t staterr = le32toh(curr->wb.upper.status_error);
+
+ if ((staterr & E1000_RXD_STAT_DD) == 0)
+ break;
+ ring->slot[j].len = le16toh(curr->wb.upper.length);
+
+ bus_dmamap_sync(rxr->ptag,
+ rxr->rx_buffers[j].pmap, BUS_DMASYNC_POSTREAD);
+ j = (j == lim) ? 0 : j + 1;
+ }
+ if (n) {
+ rxr->next_to_check = j;
+ kring->nr_hwavail += n;
+ if (kring->nr_hwavail >= lim - 10) {
+ ND("rx ring %d almost full %d", ring_nr, kring->nr_hwavail);
+ }
+ }
+
+ /* skip past packets that userspace has already processed,
+ * making them available for reception.
+ * advance nr_hwcur and issue a bus_dmamap_sync on the
+ * buffers so it is safe to write to them.
+ * Also increase nr_hwavail
+ */
+ j = kring->nr_hwcur;
+ if (j != k) { /* userspace has read some packets. */
+ n = 0;
+ while (j != k) {
+ struct netmap_slot *slot = ring->slot + j;
+ union e1000_adv_rx_desc *curr = &rxr->rx_base[j];
+ struct igb_rx_buf *rxbuf = rxr->rx_buffers + j;
+ void *addr = NMB(slot);
+
+ if (addr == netmap_buffer_base) { /* bad buf */
+ if (do_lock)
+ IGB_RX_UNLOCK(rxr);
+ return netmap_ring_reinit(kring);
+ }
+
+ curr->wb.upper.status_error = 0;
+ curr->read.pkt_addr = htole64(vtophys(addr));
+ if (slot->flags & NS_BUF_CHANGED) {
+ netmap_reload_map(rxr->ptag, rxbuf->pmap,
+ addr, na->buff_size);
+ slot->flags &= ~NS_BUF_CHANGED;
+ }
+
+ bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+ BUS_DMASYNC_PREREAD);
+
+ j = (j == lim) ? 0 : j + 1;
+ n++;
+ }
+ kring->nr_hwavail -= n;
+ kring->nr_hwcur = ring->cur;
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ /* IMPORTANT: we must leave one free slot in the ring,
+ * so move j back by one unit
+ */
+ j = (j == 0) ? lim : j - 1;
+ E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), j);
+ }
+ /* tell userspace that there are new packets */
+ ring->avail = kring->nr_hwavail ;
+ if (do_lock)
+ IGB_RX_UNLOCK(rxr);
+ return 0;
+}
diff --git a/sys/dev/netmap/if_lem_netmap.h b/sys/dev/netmap/if_lem_netmap.h
new file mode 100644
index 0000000..a8f3498
--- /dev/null
+++ b/sys/dev/netmap/if_lem_netmap.h
@@ -0,0 +1,344 @@
+/*
+ * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * $FreeBSD$
+ * $Id: if_lem_netmap.h 9662 2011-11-16 13:18:06Z luigi $
+ *
+ * netmap support for if_lem.c
+ */
+
+#include <net/netmap.h>
+#include <sys/selinfo.h>
+#include <vm/vm.h>
+#include <vm/pmap.h> /* vtophys ? */
+#include <dev/netmap/netmap_kern.h>
+
+static int lem_netmap_reg(struct ifnet *, int onoff);
+static int lem_netmap_txsync(void *, u_int, int);
+static int lem_netmap_rxsync(void *, u_int, int);
+static void lem_netmap_lock_wrapper(void *, int, u_int);
+
+
+SYSCTL_NODE(_dev, OID_AUTO, lem, CTLFLAG_RW, 0, "lem card");
+
+static void
+lem_netmap_attach(struct adapter *adapter)
+{
+ struct netmap_adapter na;
+
+ bzero(&na, sizeof(na));
+
+ na.ifp = adapter->ifp;
+ na.separate_locks = 1;
+ na.num_tx_desc = adapter->num_tx_desc;
+ na.num_rx_desc = adapter->num_rx_desc;
+ na.nm_txsync = lem_netmap_txsync;
+ na.nm_rxsync = lem_netmap_rxsync;
+ na.nm_lock = lem_netmap_lock_wrapper;
+ na.nm_register = lem_netmap_reg;
+ na.buff_size = MCLBYTES;
+ netmap_attach(&na, 1);
+}
+
+
+static void
+lem_netmap_lock_wrapper(void *_a, int what, u_int ringid)
+{
+ struct adapter *adapter = _a;
+
+ /* only one ring here so ignore the ringid */
+ switch (what) {
+ case NETMAP_CORE_LOCK:
+ EM_CORE_LOCK(adapter);
+ break;
+ case NETMAP_CORE_UNLOCK:
+ EM_CORE_UNLOCK(adapter);
+ break;
+ case NETMAP_TX_LOCK:
+ EM_TX_LOCK(adapter);
+ break;
+ case NETMAP_TX_UNLOCK:
+ EM_TX_UNLOCK(adapter);
+ break;
+ case NETMAP_RX_LOCK:
+ EM_RX_LOCK(adapter);
+ break;
+ case NETMAP_RX_UNLOCK:
+ EM_RX_UNLOCK(adapter);
+ break;
+ }
+}
+
+
+/*
+ * Reconcile kernel and user view of the transmit ring. see ixgbe.c
+ */
+static int
+lem_netmap_txsync(void *a, u_int ring_nr, int do_lock)
+{
+ struct adapter *adapter = a;
+ struct netmap_adapter *na = NA(adapter->ifp);
+ struct netmap_kring *kring = &na->tx_rings[0];
+ struct netmap_ring *ring = kring->ring;
+ int j, k, n, lim = kring->nkr_num_slots - 1;
+
+ /* generate an interrupt approximately every half ring */
+ int report_frequency = kring->nkr_num_slots >> 1;
+
+ k = ring->cur;
+ if ( (kring->nr_kflags & NR_REINIT) || k > lim)
+ return netmap_ring_reinit(kring);
+
+ if (do_lock)
+ EM_TX_LOCK(adapter);
+ bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
+ BUS_DMASYNC_POSTREAD);
+
+ /* record completed transmissions TODO
+ *
+ * instead of using TDH, we could read the transmitted status bit.
+ */
+ j = E1000_READ_REG(&adapter->hw, E1000_TDH(0));
+ if (j >= kring->nkr_num_slots) { /* can it happen ? */
+ D("bad TDH %d", j);
+ j -= kring->nkr_num_slots;
+ }
+ int delta = j - adapter->next_tx_to_clean;
+ if (delta) {
+ if (delta < 0)
+ delta += kring->nkr_num_slots;
+ adapter->next_tx_to_clean = j;
+ kring->nr_hwavail += delta;
+ }
+
+ /* update avail to what the hardware knows */
+ ring->avail = kring->nr_hwavail;
+
+ j = kring->nr_hwcur;
+ if (j != k) { /* we have new packets to send */
+ n = 0;
+ while (j != k) {
+ struct netmap_slot *slot = &ring->slot[j];
+ struct e1000_tx_desc *curr = &adapter->tx_desc_base[j];
+ struct em_buffer *txbuf = &adapter->tx_buffer_area[j];
+ void *addr = NMB(slot);
+ int flags = ((slot->flags & NS_REPORT) ||
+ j == 0 || j == report_frequency) ?
+ E1000_TXD_CMD_RS : 0;
+ int len = slot->len;
+
+ if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
+ if (do_lock)
+ EM_TX_UNLOCK(adapter);
+ return netmap_ring_reinit(kring);
+ }
+
+ curr->upper.data = 0;
+ /* always interrupt. XXX make it conditional */
+ curr->lower.data =
+ htole32( adapter->txd_cmd | len |
+ (E1000_TXD_CMD_EOP | flags) );
+ if (slot->flags & NS_BUF_CHANGED) {
+ curr->buffer_addr = htole64(vtophys(addr));
+ /* buffer has changed, unload and reload map */
+ netmap_reload_map(adapter->txtag, txbuf->map,
+ addr, na->buff_size);
+ slot->flags &= ~NS_BUF_CHANGED;
+ }
+
+ bus_dmamap_sync(adapter->txtag, txbuf->map,
+ BUS_DMASYNC_PREWRITE);
+ j = (j == lim) ? 0 : j + 1;
+ n++;
+ }
+ kring->nr_hwcur = ring->cur;
+
+ /* decrease avail by number of sent packets */
+ ring->avail -= n;
+ kring->nr_hwavail = ring->avail;
+
+ bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), ring->cur);
+ }
+ if (do_lock)
+ EM_TX_UNLOCK(adapter);
+ return 0;
+}
+
+
+/*
+ * Reconcile kernel and user view of the receive ring. see ixgbe.c
+ */
+static int
+lem_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
+{
+ struct adapter *adapter = a;
+ struct netmap_adapter *na = NA(adapter->ifp);
+ struct netmap_kring *kring = &na->rx_rings[0];
+ struct netmap_ring *ring = kring->ring;
+ int j, k, n, lim = kring->nkr_num_slots - 1;
+
+ k = ring->cur;
+ if ( (kring->nr_kflags & NR_REINIT) || k > lim)
+ return netmap_ring_reinit(kring);
+
+ if (do_lock)
+ EM_RX_LOCK(adapter);
+ /* XXX check sync modes */
+ bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ /* acknowldge all the received packets. */
+ j = adapter->next_rx_desc_to_check;
+ for (n = 0; ; n++) {
+ struct e1000_rx_desc *curr = &adapter->rx_desc_base[j];
+ int len = le16toh(adapter->rx_desc_base[j].length) - 4; // CRC
+
+ if ((curr->status & E1000_RXD_STAT_DD) == 0)
+ break;
+
+ if (len < 0) {
+ D("bogus pkt size at %d", j);
+ len = 0;
+ }
+ ring->slot[j].len = len;
+ bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[j].map,
+ BUS_DMASYNC_POSTREAD);
+ j = (j == lim) ? 0 : j + 1;
+ }
+ if (n) {
+ adapter->next_rx_desc_to_check = j;
+ kring->nr_hwavail += n;
+ }
+
+ /* skip past packets that userspace has already processed,
+ * making them available for reception. We don't need to set
+ * the length as it is the same for all slots.
+ */
+ j = kring->nr_hwcur;
+ if (j != k) { /* userspace has read some packets. */
+ n = 0;
+ while (j != k) {
+ struct netmap_slot *slot = &ring->slot[j];
+ struct e1000_rx_desc *curr = &adapter->rx_desc_base[j];
+ struct em_buffer *rxbuf = &adapter->rx_buffer_area[j];
+ void *addr = NMB(slot);
+
+ if (addr == netmap_buffer_base) { /* bad buf */
+ if (do_lock)
+ EM_RX_UNLOCK(adapter);
+ return netmap_ring_reinit(kring);
+ }
+ curr = &adapter->rx_desc_base[j];
+ curr->status = 0;
+ if (slot->flags & NS_BUF_CHANGED) {
+ curr->buffer_addr = htole64(vtophys(addr));
+ /* buffer has changed, unload and reload map */
+ netmap_reload_map(adapter->rxtag, rxbuf->map,
+ addr, na->buff_size);
+ slot->flags &= ~NS_BUF_CHANGED;
+ }
+
+ bus_dmamap_sync(adapter->rxtag, rxbuf->map,
+ BUS_DMASYNC_PREREAD);
+
+ j = (j == lim) ? 0 : j + 1;
+ n++;
+ }
+ kring->nr_hwavail -= n;
+ kring->nr_hwcur = ring->cur;
+ bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ /*
+ * IMPORTANT: we must leave one free slot in the ring,
+ * so move j back by one unit
+ */
+ j = (j == 0) ? lim : j - 1;
+ E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), j);
+ }
+
+ /* tell userspace that there are new packets */
+ ring->avail = kring->nr_hwavail ;
+ if (do_lock)
+ EM_RX_UNLOCK(adapter);
+ return 0;
+}
+
+
+/*
+ * Register/unregister routine
+ */
+static int
+lem_netmap_reg(struct ifnet *ifp, int onoff)
+{
+ struct adapter *adapter = ifp->if_softc;
+ struct netmap_adapter *na = NA(ifp);
+ int error = 0;
+
+ if (!na)
+ return EINVAL;
+
+ lem_disable_intr(adapter);
+
+ /* Tell the stack that the interface is no longer active */
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+ /* lem_netmap_block_tasks(adapter); */
+#ifndef EM_LEGACY_IRQ
+ taskqueue_block(adapter->tq);
+ taskqueue_drain(adapter->tq, &adapter->rxtx_task);
+ taskqueue_drain(adapter->tq, &adapter->link_task);
+#endif /* !EM_LEGCY_IRQ */
+ if (onoff) {
+ ifp->if_capenable |= IFCAP_NETMAP;
+
+ /* save if_transmit to restore it when exiting.
+ * XXX what about if_start and if_qflush ?
+ */
+ na->if_transmit = ifp->if_transmit;
+ ifp->if_transmit = netmap_start;
+
+ lem_init_locked(adapter);
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
+ error = ENOMEM;
+ goto fail;
+ }
+ } else {
+fail:
+ /* restore non-netmap mode */
+ ifp->if_transmit = na->if_transmit;
+ ifp->if_capenable &= ~IFCAP_NETMAP;
+ lem_init_locked(adapter); /* also enables intr */
+ }
+
+#ifndef EM_LEGACY_IRQ
+ taskqueue_unblock(adapter->tq);
+#endif /* !EM_LEGCY_IRQ */
+
+ return (error);
+}
diff --git a/sys/dev/netmap/if_re_netmap.h b/sys/dev/netmap/if_re_netmap.h
new file mode 100644
index 0000000..efccf3a
--- /dev/null
+++ b/sys/dev/netmap/if_re_netmap.h
@@ -0,0 +1,415 @@
+/*
+ * Copyright (C) 2011 Luigi Rizzo. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * $FreeBSD$
+ * $Id: if_re_netmap.h 9662 2011-11-16 13:18:06Z luigi $
+ *
+ * netmap support for if_re
+ */
+
+#include <net/netmap.h>
+#include <sys/selinfo.h>
+#include <vm/vm.h>
+#include <vm/pmap.h> /* vtophys ? */
+#include <dev/netmap/netmap_kern.h>
+
+static int re_netmap_reg(struct ifnet *, int onoff);
+static int re_netmap_txsync(void *, u_int, int);
+static int re_netmap_rxsync(void *, u_int, int);
+static void re_netmap_lock_wrapper(void *, int, u_int);
+
+static void
+re_netmap_attach(struct rl_softc *sc)
+{
+ struct netmap_adapter na;
+
+ bzero(&na, sizeof(na));
+
+ na.ifp = sc->rl_ifp;
+ na.separate_locks = 0;
+ na.num_tx_desc = sc->rl_ldata.rl_tx_desc_cnt;
+ na.num_rx_desc = sc->rl_ldata.rl_rx_desc_cnt;
+ na.nm_txsync = re_netmap_txsync;
+ na.nm_rxsync = re_netmap_rxsync;
+ na.nm_lock = re_netmap_lock_wrapper;
+ na.nm_register = re_netmap_reg;
+ na.buff_size = MCLBYTES;
+ netmap_attach(&na, 1);
+}
+
+
+/*
+ * wrapper to export locks to the generic code
+ * We should not use the tx/rx locks
+ */
+static void
+re_netmap_lock_wrapper(void *_a, int what, u_int queueid)
+{
+ struct rl_softc *adapter = _a;
+
+ switch (what) {
+ case NETMAP_CORE_LOCK:
+ RL_LOCK(adapter);
+ break;
+ case NETMAP_CORE_UNLOCK:
+ RL_UNLOCK(adapter);
+ break;
+
+ case NETMAP_TX_LOCK:
+ case NETMAP_RX_LOCK:
+ case NETMAP_TX_UNLOCK:
+ case NETMAP_RX_UNLOCK:
+ D("invalid lock call %d, no tx/rx locks here", what);
+ break;
+ }
+}
+
+
+/*
+ * support for netmap register/unregisted. We are already under core lock.
+ * only called on the first register or the last unregister.
+ */
+static int
+re_netmap_reg(struct ifnet *ifp, int onoff)
+{
+ struct rl_softc *adapter = ifp->if_softc;
+ struct netmap_adapter *na = NA(ifp);
+ int error = 0;
+
+ if (!na)
+ return EINVAL;
+ /* Tell the stack that the interface is no longer active */
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+ re_stop(adapter);
+
+ if (onoff) {
+ ifp->if_capenable |= IFCAP_NETMAP;
+
+ /* save if_transmit and restore it */
+ na->if_transmit = ifp->if_transmit;
+ /* XXX if_start and if_qflush ??? */
+ ifp->if_transmit = netmap_start;
+
+ re_init_locked(adapter);
+
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
+ error = ENOMEM;
+ goto fail;
+ }
+ } else {
+fail:
+ /* restore if_transmit */
+ ifp->if_transmit = na->if_transmit;
+ ifp->if_capenable &= ~IFCAP_NETMAP;
+ re_init_locked(adapter); /* also enables intr */
+ }
+ return (error);
+
+}
+
+
+/*
+ * Reconcile kernel and user view of the transmit ring.
+ *
+ * Userspace has filled tx slots up to cur (excluded).
+ * The last unused slot previously known to the kernel was nr_hwcur,
+ * and the last interrupt reported nr_hwavail slots available
+ * (using the special value -1 to indicate idle transmit ring).
+ * The function must first update avail to what the kernel
+ * knows (translating the -1 to nkr_num_slots - 1),
+ * subtract the newly used slots (cur - nr_hwcur)
+ * from both avail and nr_hwavail, and set nr_hwcur = cur
+ * issuing a dmamap_sync on all slots.
+ */
+static int
+re_netmap_txsync(void *a, u_int ring_nr, int do_lock)
+{
+ struct rl_softc *sc = a;
+ struct rl_txdesc *txd = sc->rl_ldata.rl_tx_desc;
+ struct netmap_adapter *na = NA(sc->rl_ifp);
+ struct netmap_kring *kring = &na->tx_rings[ring_nr];
+ struct netmap_ring *ring = kring->ring;
+ int j, k, n, lim = kring->nkr_num_slots - 1;
+
+ k = ring->cur;
+ if ( (kring->nr_kflags & NR_REINIT) || k > lim)
+ return netmap_ring_reinit(kring);
+
+ if (do_lock)
+ RL_LOCK(sc);
+
+ /* Sync the TX descriptor list */
+ bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
+ sc->rl_ldata.rl_tx_list_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ /* record completed transmissions */
+ for (n = 0, j = sc->rl_ldata.rl_tx_considx;
+ j != sc->rl_ldata.rl_tx_prodidx;
+ n++, j = RL_TX_DESC_NXT(sc, j)) {
+ uint32_t cmdstat =
+ le32toh(sc->rl_ldata.rl_tx_list[j].rl_cmdstat);
+ if (cmdstat & RL_TDESC_STAT_OWN)
+ break;
+ }
+ if (n > 0) {
+ sc->rl_ldata.rl_tx_considx = j;
+ sc->rl_ldata.rl_tx_free += n;
+ kring->nr_hwavail += n;
+ }
+
+ /* update avail to what the hardware knows */
+ ring->avail = kring->nr_hwavail;
+
+ /* we trust prodidx, not hwcur */
+ j = kring->nr_hwcur = sc->rl_ldata.rl_tx_prodidx;
+ if (j != k) { /* we have new packets to send */
+ n = 0;
+ while (j != k) {
+ struct netmap_slot *slot = &ring->slot[j];
+ struct rl_desc *desc = &sc->rl_ldata.rl_tx_list[j];
+ int cmd = slot->len | RL_TDESC_CMD_EOF |
+ RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF ;
+ void *addr = NMB(slot);
+ int len = slot->len;
+
+ if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
+ if (do_lock)
+ RL_UNLOCK(sc);
+ return netmap_ring_reinit(kring);
+ }
+
+ if (j == lim) /* mark end of ring */
+ cmd |= RL_TDESC_CMD_EOR;
+
+ if (slot->flags & NS_BUF_CHANGED) {
+ uint64_t paddr = vtophys(addr);
+ desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
+ desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
+ /* buffer has changed, unload and reload map */
+ netmap_reload_map(sc->rl_ldata.rl_tx_mtag,
+ txd[j].tx_dmamap, addr, na->buff_size);
+ slot->flags &= ~NS_BUF_CHANGED;
+ }
+ slot->flags &= ~NS_REPORT;
+ desc->rl_cmdstat = htole32(cmd);
+ bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
+ txd[j].tx_dmamap, BUS_DMASYNC_PREWRITE);
+ j = (j == lim) ? 0 : j + 1;
+ n++;
+ }
+ sc->rl_ldata.rl_tx_prodidx = kring->nr_hwcur = ring->cur;
+
+ /* decrease avail by number of sent packets */
+ ring->avail -= n;
+ kring->nr_hwavail = ring->avail;
+
+ bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
+ sc->rl_ldata.rl_tx_list_map,
+ BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
+
+ /* start ? */
+ CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
+ }
+ if (do_lock)
+ RL_UNLOCK(sc);
+ return 0;
+}
+
+
+/*
+ * Reconcile kernel and user view of the receive ring.
+ *
+ * Userspace has read rx slots up to cur (excluded).
+ * The last unread slot previously known to the kernel was nr_hwcur,
+ * and the last interrupt reported nr_hwavail slots available.
+ * We must subtract the newly consumed slots (cur - nr_hwcur)
+ * from nr_hwavail, clearing the descriptors for the next
+ * read, tell the hardware that they are available,
+ * and set nr_hwcur = cur and avail = nr_hwavail.
+ * issuing a dmamap_sync on all slots.
+ */
+static int
+re_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
+{
+ struct rl_softc *sc = a;
+ struct rl_rxdesc *rxd = sc->rl_ldata.rl_rx_desc;
+ struct netmap_adapter *na = NA(sc->rl_ifp);
+ struct netmap_kring *kring = &na->rx_rings[ring_nr];
+ struct netmap_ring *ring = kring->ring;
+ int j, k, n, lim = kring->nkr_num_slots - 1;
+
+ k = ring->cur;
+ if ( (kring->nr_kflags & NR_REINIT) || k > lim)
+ return netmap_ring_reinit(kring);
+
+ if (do_lock)
+ RL_LOCK(sc);
+ /* XXX check sync modes */
+ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
+ sc->rl_ldata.rl_rx_list_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ /*
+ * The device uses all the buffers in the ring, so we need
+ * another termination condition in addition to RL_RDESC_STAT_OWN
+ * cleared (all buffers could have it cleared. The easiest one
+ * is to limit the amount of data reported up to 'lim'
+ */
+ j = sc->rl_ldata.rl_rx_prodidx;
+ for (n = kring->nr_hwavail; n < lim ; n++) {
+ struct rl_desc *cur_rx = &sc->rl_ldata.rl_rx_list[j];
+ uint32_t rxstat = le32toh(cur_rx->rl_cmdstat);
+ uint32_t total_len;
+
+ if ((rxstat & RL_RDESC_STAT_OWN) != 0)
+ break;
+ total_len = rxstat & sc->rl_rxlenmask;
+ /* XXX subtract crc */
+ total_len = (total_len < 4) ? 0 : total_len - 4;
+ kring->ring->slot[j].len = total_len;
+ /* sync was in re_newbuf() */
+ bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
+ rxd[j].rx_dmamap, BUS_DMASYNC_POSTREAD);
+ j = RL_RX_DESC_NXT(sc, j);
+ }
+ if (n != kring->nr_hwavail) {
+ sc->rl_ldata.rl_rx_prodidx = j;
+ sc->rl_ifp->if_ipackets += n - kring->nr_hwavail;
+ kring->nr_hwavail = n;
+ }
+
+ /* skip past packets that userspace has already processed,
+ * making them available for reception.
+ * advance nr_hwcur and issue a bus_dmamap_sync on the
+ * buffers so it is safe to write to them.
+ * Also increase nr_hwavail
+ */
+ j = kring->nr_hwcur;
+ if (j != k) { /* userspace has read some packets. */
+ n = 0;
+ while (j != k) {
+ struct netmap_slot *slot = ring->slot + j;
+ struct rl_desc *desc = &sc->rl_ldata.rl_rx_list[j];
+ int cmd = na->buff_size | RL_RDESC_CMD_OWN;
+ void *addr = NMB(slot);
+
+ if (addr == netmap_buffer_base) { /* bad buf */
+ if (do_lock)
+ RL_UNLOCK(sc);
+ return netmap_ring_reinit(kring);
+ }
+
+ if (j == lim) /* mark end of ring */
+ cmd |= RL_RDESC_CMD_EOR;
+
+ desc->rl_cmdstat = htole32(cmd);
+ slot->flags &= ~NS_REPORT;
+ if (slot->flags & NS_BUF_CHANGED) {
+ uint64_t paddr = vtophys(addr);
+ desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
+ desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
+ netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
+ rxd[j].rx_dmamap, addr, na->buff_size);
+ slot->flags &= ~NS_BUF_CHANGED;
+ }
+ bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
+ rxd[j].rx_dmamap, BUS_DMASYNC_PREREAD);
+ j = (j == lim) ? 0 : j + 1;
+ n++;
+ }
+ kring->nr_hwavail -= n;
+ kring->nr_hwcur = k;
+ /* Flush the RX DMA ring */
+
+ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
+ sc->rl_ldata.rl_rx_list_map,
+ BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
+ }
+ /* tell userspace that there are new packets */
+ ring->avail = kring->nr_hwavail ;
+ if (do_lock)
+ RL_UNLOCK(sc);
+ return 0;
+}
+
+static void
+re_netmap_tx_init(struct rl_softc *sc)
+{
+ struct rl_txdesc *txd;
+ struct rl_desc *desc;
+ int i;
+ struct netmap_adapter *na = NA(sc->rl_ifp);
+ struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
+
+ /* slot is NULL if we are not in netmap mode */
+ if (!slot)
+ return;
+ /* in netmap mode, overwrite addresses and maps */
+ txd = sc->rl_ldata.rl_tx_desc;
+ desc = sc->rl_ldata.rl_tx_list;
+
+ for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
+ void *addr = NMB(slot+i);
+ uint64_t paddr = vtophys(addr);
+
+ desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
+ desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
+ netmap_load_map(sc->rl_ldata.rl_tx_mtag,
+ txd[i].tx_dmamap, addr, na->buff_size);
+ }
+}
+
+static void
+re_netmap_rx_init(struct rl_softc *sc)
+{
+ /* slot is NULL if we are not in netmap mode */
+ struct netmap_adapter *na = NA(sc->rl_ifp);
+ struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
+ struct rl_desc *desc = sc->rl_ldata.rl_rx_list;
+ uint32_t cmdstat;
+ int i;
+
+ if (!slot)
+ return;
+
+ for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
+ void *addr = NMB(slot+i);
+ uint64_t paddr = vtophys(addr);
+
+ desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
+ desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
+ cmdstat = slot[i].len = na->buff_size; // XXX
+ if (i == sc->rl_ldata.rl_rx_desc_cnt - 1)
+ cmdstat |= RL_RDESC_CMD_EOR;
+ desc[i].rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
+
+ netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
+ sc->rl_ldata.rl_rx_desc[i].rx_dmamap,
+ addr, na->buff_size);
+ }
+}
diff --git a/sys/dev/netmap/ixgbe_netmap.h b/sys/dev/netmap/ixgbe_netmap.h
new file mode 100644
index 0000000..a4d5491
--- /dev/null
+++ b/sys/dev/netmap/ixgbe_netmap.h
@@ -0,0 +1,376 @@
+/*
+ * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * $FreeBSD$
+ * $Id: ixgbe_netmap.h 9662 2011-11-16 13:18:06Z luigi $
+ *
+ * netmap modifications for ixgbe
+ */
+
+#include <net/netmap.h>
+#include <sys/selinfo.h>
+// #include <vm/vm.h>
+// #include <vm/pmap.h> /* vtophys ? */
+#include <dev/netmap/netmap_kern.h>
+
+static int ixgbe_netmap_reg(struct ifnet *, int onoff);
+static int ixgbe_netmap_txsync(void *, u_int, int);
+static int ixgbe_netmap_rxsync(void *, u_int, int);
+static void ixgbe_netmap_lock_wrapper(void *, int, u_int);
+
+
+SYSCTL_NODE(_dev, OID_AUTO, ixgbe, CTLFLAG_RW, 0, "ixgbe card");
+
+static void
+ixgbe_netmap_attach(struct adapter *adapter)
+{
+ struct netmap_adapter na;
+
+ bzero(&na, sizeof(na));
+
+ na.ifp = adapter->ifp;
+ na.separate_locks = 1;
+ na.num_tx_desc = adapter->num_tx_desc;
+ na.num_rx_desc = adapter->num_rx_desc;
+ na.nm_txsync = ixgbe_netmap_txsync;
+ na.nm_rxsync = ixgbe_netmap_rxsync;
+ na.nm_lock = ixgbe_netmap_lock_wrapper;
+ na.nm_register = ixgbe_netmap_reg;
+ /*
+ * adapter->rx_mbuf_sz is set by SIOCSETMTU, but in netmap mode
+ * we allocate the buffers on the first register. So we must
+ * disallow a SIOCSETMTU when if_capenable & IFCAP_NETMAP is set.
+ */
+ na.buff_size = MCLBYTES;
+ netmap_attach(&na, adapter->num_queues);
+}
+
+
+/*
+ * wrapper to export locks to the generic code
+ */
+static void
+ixgbe_netmap_lock_wrapper(void *_a, int what, u_int queueid)
+{
+ struct adapter *adapter = _a;
+
+ ASSERT(queueid < adapter->num_queues);
+ switch (what) {
+ case NETMAP_CORE_LOCK:
+ IXGBE_CORE_LOCK(adapter);
+ break;
+ case NETMAP_CORE_UNLOCK:
+ IXGBE_CORE_UNLOCK(adapter);
+ break;
+ case NETMAP_TX_LOCK:
+ IXGBE_TX_LOCK(&adapter->tx_rings[queueid]);
+ break;
+ case NETMAP_TX_UNLOCK:
+ IXGBE_TX_UNLOCK(&adapter->tx_rings[queueid]);
+ break;
+ case NETMAP_RX_LOCK:
+ IXGBE_RX_LOCK(&adapter->rx_rings[queueid]);
+ break;
+ case NETMAP_RX_UNLOCK:
+ IXGBE_RX_UNLOCK(&adapter->rx_rings[queueid]);
+ break;
+ }
+}
+
+
+/*
+ * support for netmap register/unregisted. We are already under core lock.
+ * only called on the first init or the last unregister.
+ */
+static int
+ixgbe_netmap_reg(struct ifnet *ifp, int onoff)
+{
+ struct adapter *adapter = ifp->if_softc;
+ struct netmap_adapter *na = NA(ifp);
+ int error = 0;
+
+ if (!na)
+ return EINVAL;
+
+ ixgbe_disable_intr(adapter);
+
+ /* Tell the stack that the interface is no longer active */
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+ if (onoff) {
+ ifp->if_capenable |= IFCAP_NETMAP;
+
+ /* save if_transmit to restore it later */
+ na->if_transmit = ifp->if_transmit;
+ ifp->if_transmit = netmap_start;
+
+ ixgbe_init_locked(adapter);
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
+ error = ENOMEM;
+ goto fail;
+ }
+ } else {
+fail:
+ /* restore if_transmit */
+ ifp->if_transmit = na->if_transmit;
+ ifp->if_capenable &= ~IFCAP_NETMAP;
+ ixgbe_init_locked(adapter); /* also enables intr */
+ }
+ return (error);
+}
+
+
+/*
+ * Reconcile kernel and user view of the transmit ring.
+ *
+ * Userspace has filled tx slots up to cur (excluded).
+ * The last unused slot previously known to the kernel was nr_hwcur,
+ * and the last interrupt reported nr_hwavail slots available
+ * (using the special value -1 to indicate idle transmit ring).
+ * The function must first update avail to what the kernel
+ * knows, subtract the newly used slots (cur - nr_hwcur)
+ * from both avail and nr_hwavail, and set nr_hwcur = cur
+ * issuing a dmamap_sync on all slots.
+ *
+ * Check parameters in the struct netmap_ring.
+ * We don't use avail, only check for bogus values.
+ * Make sure cur is valid, and same goes for buffer indexes and lengths.
+ * To avoid races, read the values once, and never use those from
+ * the ring afterwards.
+ */
+static int
+ixgbe_netmap_txsync(void *a, u_int ring_nr, int do_lock)
+{
+ struct adapter *adapter = a;
+ struct tx_ring *txr = &adapter->tx_rings[ring_nr];
+ struct netmap_adapter *na = NA(adapter->ifp);
+ struct netmap_kring *kring = &na->tx_rings[ring_nr];
+ struct netmap_ring *ring = kring->ring;
+ int j, k, n = 0, lim = kring->nkr_num_slots - 1;
+
+ /* generate an interrupt approximately every half ring */
+ int report_frequency = kring->nkr_num_slots >> 1;
+
+ k = ring->cur; /* ring is not protected by any lock */
+ if ( (kring->nr_kflags & NR_REINIT) || k > lim)
+ return netmap_ring_reinit(kring);
+
+ if (do_lock)
+ IXGBE_TX_LOCK(txr);
+ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+ BUS_DMASYNC_POSTREAD);
+
+ /* update avail to what the hardware knows */
+ ring->avail = kring->nr_hwavail;
+
+ j = kring->nr_hwcur;
+ if (j != k) { /* we have new packets to send */
+ while (j != k) {
+ struct netmap_slot *slot = &ring->slot[j];
+ struct ixgbe_tx_buf *txbuf = &txr->tx_buffers[j];
+ union ixgbe_adv_tx_desc *curr = &txr->tx_base[j];
+ void *addr = NMB(slot);
+ int flags = ((slot->flags & NS_REPORT) ||
+ j == 0 || j == report_frequency) ?
+ IXGBE_TXD_CMD_RS : 0;
+ int len = slot->len;
+
+ if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
+ if (do_lock)
+ IXGBE_TX_UNLOCK(txr);
+ return netmap_ring_reinit(kring);
+ }
+
+ slot->flags &= ~NS_REPORT;
+ curr->read.buffer_addr = htole64(vtophys(addr));
+ curr->read.olinfo_status = 0;
+ curr->read.cmd_type_len =
+ htole32(txr->txd_cmd | len |
+ (IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_IFCS |
+ IXGBE_TXD_CMD_EOP | flags) );
+ if (slot->flags & NS_BUF_CHANGED) {
+ /* buffer has changed, unload and reload map */
+ netmap_reload_map(txr->txtag, txbuf->map,
+ addr, na->buff_size);
+ slot->flags &= ~NS_BUF_CHANGED;
+ }
+
+ bus_dmamap_sync(txr->txtag, txbuf->map,
+ BUS_DMASYNC_PREWRITE);
+ j = (j == lim) ? 0 : j + 1;
+ n++;
+ }
+ kring->nr_hwcur = k;
+
+ /* decrease avail by number of sent packets */
+ ring->avail -= n;
+ kring->nr_hwavail = ring->avail;
+
+ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), k);
+ }
+
+ if (n == 0 || kring->nr_hwavail < 1) {
+ /* record completed transmissions. TODO
+ *
+ * The datasheet discourages the use of TDH to find out the
+ * number of sent packets; the right way to do so, is to check
+ * the DD bit inside the status of a packet descriptor. On the
+ * other hand, we avoid to set the `report status' bit for
+ * *all* outgoing packets (kind of interrupt mitigation),
+ * consequently the DD bit is not guaranteed to be set for all
+ * the packets: thats way, for the moment we continue to use
+ * TDH.
+ */
+ j = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(ring_nr));
+ if (j >= kring->nkr_num_slots) { /* XXX can happen */
+ D("TDH wrap %d", j);
+ j -= kring->nkr_num_slots;
+ }
+ int delta = j - txr->next_to_clean;
+ if (delta) {
+ /* new transmissions were completed, increment
+ ring->nr_hwavail. */
+ if (delta < 0)
+ delta += kring->nkr_num_slots;
+ txr->next_to_clean = j;
+ kring->nr_hwavail += delta;
+ ring->avail = kring->nr_hwavail;
+ }
+ }
+
+ if (do_lock)
+ IXGBE_TX_UNLOCK(txr);
+ return 0;
+}
+
+
+/*
+ * Reconcile kernel and user view of the receive ring.
+ *
+ * Userspace has read rx slots up to cur (excluded).
+ * The last unread slot previously known to the kernel was nr_hwcur,
+ * and the last interrupt reported nr_hwavail slots available.
+ * We must subtract the newly consumed slots (cur - nr_hwcur)
+ * from nr_hwavail, clearing the descriptors for the next
+ * read, tell the hardware that they are available,
+ * and set nr_hwcur = cur and avail = nr_hwavail.
+ * issuing a dmamap_sync on all slots.
+ */
+static int
+ixgbe_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
+{
+ struct adapter *adapter = a;
+ struct rx_ring *rxr = &adapter->rx_rings[ring_nr];
+ struct netmap_adapter *na = NA(adapter->ifp);
+ struct netmap_kring *kring = &na->rx_rings[ring_nr];
+ struct netmap_ring *ring = kring->ring;
+ int j, k, n, lim = kring->nkr_num_slots - 1;
+
+ k = ring->cur; /* ring is not protected by any lock */
+ if ( (kring->nr_kflags & NR_REINIT) || k > lim)
+ return netmap_ring_reinit(kring);
+
+ if (do_lock)
+ IXGBE_RX_LOCK(rxr);
+ /* XXX check sync modes */
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ j = rxr->next_to_check;
+ for (n = 0; ; n++) {
+ union ixgbe_adv_rx_desc *curr = &rxr->rx_base[j];
+ uint32_t staterr = le32toh(curr->wb.upper.status_error);
+
+ if ((staterr & IXGBE_RXD_STAT_DD) == 0)
+ break;
+ ring->slot[j].len = le16toh(curr->wb.upper.length);
+ bus_dmamap_sync(rxr->ptag,
+ rxr->rx_buffers[j].pmap, BUS_DMASYNC_POSTREAD);
+ j = (j == lim) ? 0 : j + 1;
+ }
+ if (n) {
+ rxr->next_to_check = j;
+ kring->nr_hwavail += n;
+ if (kring->nr_hwavail >= lim - 10) {
+ ND("rx ring %d almost full %d", ring_nr, kring->nr_hwavail);
+ }
+ }
+
+ /* skip past packets that userspace has already processed,
+ * making them available for reception.
+ * advance nr_hwcur and issue a bus_dmamap_sync on the
+ * buffers so it is safe to write to them.
+ * Also increase nr_hwavail
+ */
+ j = kring->nr_hwcur;
+ if (j != k) { /* userspace has read some packets. */
+ n = 0;
+ while (j != k) {
+ struct netmap_slot *slot = ring->slot + j;
+ union ixgbe_adv_rx_desc *curr = &rxr->rx_base[j];
+ struct ixgbe_rx_buf *rxbuf = rxr->rx_buffers + j;
+ void *addr = NMB(slot);
+
+ if (addr == netmap_buffer_base) { /* bad buf */
+ if (do_lock)
+ IXGBE_RX_UNLOCK(rxr);
+ return netmap_ring_reinit(kring);
+ }
+
+ curr->wb.upper.status_error = 0;
+ curr->read.pkt_addr = htole64(vtophys(addr));
+ if (slot->flags & NS_BUF_CHANGED) {
+ netmap_reload_map(rxr->ptag, rxbuf->pmap,
+ addr, na->buff_size);
+ slot->flags &= ~NS_BUF_CHANGED;
+ }
+
+ bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+ BUS_DMASYNC_PREREAD);
+
+ j = (j == lim) ? 0 : j + 1;
+ n++;
+ }
+ kring->nr_hwavail -= n;
+ kring->nr_hwcur = ring->cur;
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ /* IMPORTANT: we must leave one free slot in the ring,
+ * so move j back by one unit
+ */
+ j = (j == 0) ? lim : j - 1;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(rxr->me), j);
+ }
+ /* tell userspace that there are new packets */
+ ring->avail = kring->nr_hwavail ;
+ if (do_lock)
+ IXGBE_RX_UNLOCK(rxr);
+ return 0;
+}
diff --git a/sys/dev/netmap/netmap.c b/sys/dev/netmap/netmap.c
new file mode 100644
index 0000000..7645a4e
--- /dev/null
+++ b/sys/dev/netmap/netmap.c
@@ -0,0 +1,1762 @@
+/*
+ * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * $FreeBSD$
+ * $Id: netmap.c 9662 2011-11-16 13:18:06Z luigi $
+ *
+ * This module supports memory mapped access to network devices,
+ * see netmap(4).
+ *
+ * The module uses a large, memory pool allocated by the kernel
+ * and accessible as mmapped memory by multiple userspace threads/processes.
+ * The memory pool contains packet buffers and "netmap rings",
+ * i.e. user-accessible copies of the interface's queues.
+ *
+ * Access to the network card works like this:
+ * 1. a process/thread issues one or more open() on /dev/netmap, to create
+ * select()able file descriptor on which events are reported.
+ * 2. on each descriptor, the process issues an ioctl() to identify
+ * the interface that should report events to the file descriptor.
+ * 3. on each descriptor, the process issues an mmap() request to
+ * map the shared memory region within the process' address space.
+ * The list of interesting queues is indicated by a location in
+ * the shared memory region.
+ * 4. using the functions in the netmap(4) userspace API, a process
+ * can look up the occupation state of a queue, access memory buffers,
+ * and retrieve received packets or enqueue packets to transmit.
+ * 5. using some ioctl()s the process can synchronize the userspace view
+ * of the queue with the actual status in the kernel. This includes both
+ * receiving the notification of new packets, and transmitting new
+ * packets on the output interface.
+ * 6. select() or poll() can be used to wait for events on individual
+ * transmit or receive queues (or all queues for a given interface).
+ */
+
+#include <sys/cdefs.h> /* prerequisite */
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/module.h>
+#include <sys/errno.h>
+#include <sys/param.h> /* defines used in kernel.h */
+#include <sys/kernel.h> /* types used in module initialization */
+#include <sys/conf.h> /* cdevsw struct */
+#include <sys/uio.h> /* uio struct */
+#include <sys/sockio.h>
+#include <sys/socketvar.h> /* struct socket */
+#include <sys/malloc.h>
+#include <sys/mman.h> /* PROT_EXEC */
+#include <sys/poll.h>
+#include <vm/vm.h> /* vtophys */
+#include <vm/pmap.h> /* vtophys */
+#include <sys/socket.h> /* sockaddrs */
+#include <machine/bus.h>
+#include <sys/selinfo.h>
+#include <sys/sysctl.h>
+#include <net/if.h>
+#include <net/bpf.h> /* BIOCIMMEDIATE */
+#include <net/netmap.h>
+#include <dev/netmap/netmap_kern.h>
+#include <machine/bus.h> /* bus_dmamap_* */
+
+MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map");
+
+/*
+ * lock and unlock for the netmap memory allocator
+ */
+#define NMA_LOCK() mtx_lock(&netmap_mem_d->nm_mtx);
+#define NMA_UNLOCK() mtx_unlock(&netmap_mem_d->nm_mtx);
+
+/*
+ * Default amount of memory pre-allocated by the module.
+ * We start with a large size and then shrink our demand
+ * according to what is avalable when the module is loaded.
+ * At the moment the block is contiguous, but we can easily
+ * restrict our demand to smaller units (16..64k)
+ */
+#define NETMAP_MEMORY_SIZE (64 * 1024 * PAGE_SIZE)
+static void * netmap_malloc(size_t size, const char *msg);
+static void netmap_free(void *addr, const char *msg);
+
+/*
+ * Allocator for a pool of packet buffers. For each buffer we have
+ * one entry in the bitmap to signal the state. Allocation scans
+ * the bitmap, but since this is done only on attach, we are not
+ * too worried about performance
+ * XXX if we need to allocate small blocks, a translation
+ * table is used both for kernel virtual address and physical
+ * addresses.
+ */
+struct netmap_buf_pool {
+ u_int total_buffers; /* total buffers. */
+ u_int free;
+ u_int bufsize;
+ char *base; /* buffer base address */
+ uint32_t *bitmap; /* one bit per buffer, 1 means free */
+};
+struct netmap_buf_pool nm_buf_pool;
+/* XXX move these two vars back into netmap_buf_pool */
+u_int netmap_total_buffers;
+char *netmap_buffer_base;
+
+/* user-controlled variables */
+int netmap_verbose;
+
+static int no_timestamp; /* don't timestamp on rxsync */
+
+SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args");
+SYSCTL_INT(_dev_netmap, OID_AUTO, verbose,
+ CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
+SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp,
+ CTLFLAG_RW, &no_timestamp, 0, "no_timestamp");
+SYSCTL_INT(_dev_netmap, OID_AUTO, total_buffers,
+ CTLFLAG_RD, &nm_buf_pool.total_buffers, 0, "total_buffers");
+SYSCTL_INT(_dev_netmap, OID_AUTO, free_buffers,
+ CTLFLAG_RD, &nm_buf_pool.free, 0, "free_buffers");
+
+/*
+ * Allocate n buffers from the ring, and fill the slot.
+ * Buffer 0 is the 'junk' buffer.
+ */
+static void
+netmap_new_bufs(struct netmap_buf_pool *p, struct netmap_slot *slot, u_int n)
+{
+ uint32_t bi = 0; /* index in the bitmap */
+ uint32_t mask, j, i = 0; /* slot counter */
+
+ if (n > p->free) {
+ D("only %d out of %d buffers available", i, n);
+ return;
+ }
+ /* termination is guaranteed by p->free */
+ while (i < n && p->free > 0) {
+ uint32_t cur = p->bitmap[bi];
+ if (cur == 0) { /* bitmask is fully used */
+ bi++;
+ continue;
+ }
+ /* locate a slot */
+ for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) ;
+ p->bitmap[bi] &= ~mask; /* slot in use */
+ p->free--;
+ slot[i].buf_idx = bi*32+j;
+ slot[i].len = p->bufsize;
+ slot[i].flags = NS_BUF_CHANGED;
+ i++;
+ }
+ ND("allocated %d buffers, %d available", n, p->free);
+}
+
+
+static void
+netmap_free_buf(struct netmap_buf_pool *p, uint32_t i)
+{
+ uint32_t pos, mask;
+ if (i >= p->total_buffers) {
+ D("invalid free index %d", i);
+ return;
+ }
+ pos = i / 32;
+ mask = 1 << (i % 32);
+ if (p->bitmap[pos] & mask) {
+ D("slot %d already free", i);
+ return;
+ }
+ p->bitmap[pos] |= mask;
+ p->free++;
+}
+
+
+/* Descriptor of the memory objects handled by our memory allocator. */
+struct netmap_mem_obj {
+ TAILQ_ENTRY(netmap_mem_obj) nmo_next; /* next object in the
+ chain. */
+ int nmo_used; /* flag set on used memory objects. */
+ size_t nmo_size; /* size of the memory area reserved for the
+ object. */
+ void *nmo_data; /* pointer to the memory area. */
+};
+
+/* Wrap our memory objects to make them ``chainable``. */
+TAILQ_HEAD(netmap_mem_obj_h, netmap_mem_obj);
+
+
+/* Descriptor of our custom memory allocator. */
+struct netmap_mem_d {
+ struct mtx nm_mtx; /* lock used to handle the chain of memory
+ objects. */
+ struct netmap_mem_obj_h nm_molist; /* list of memory objects */
+ size_t nm_size; /* total amount of memory used for rings etc. */
+ size_t nm_totalsize; /* total amount of allocated memory
+ (the difference is used for buffers) */
+ size_t nm_buf_start; /* offset of packet buffers.
+ This is page-aligned. */
+ size_t nm_buf_len; /* total memory for buffers */
+ void *nm_buffer; /* pointer to the whole pre-allocated memory
+ area. */
+};
+
+
+/* Structure associated to each thread which registered an interface. */
+struct netmap_priv_d {
+ struct netmap_if *np_nifp; /* netmap interface descriptor. */
+
+ struct ifnet *np_ifp; /* device for which we hold a reference */
+ int np_ringid; /* from the ioctl */
+ u_int np_qfirst, np_qlast; /* range of rings to scan */
+ uint16_t np_txpoll;
+};
+
+
+static struct cdev *netmap_dev; /* /dev/netmap character device. */
+static struct netmap_mem_d *netmap_mem_d; /* Our memory allocator. */
+
+
+static d_mmap_t netmap_mmap;
+static d_ioctl_t netmap_ioctl;
+static d_poll_t netmap_poll;
+
+#ifdef NETMAP_KEVENT
+static d_kqfilter_t netmap_kqfilter;
+#endif
+
+static struct cdevsw netmap_cdevsw = {
+ .d_version = D_VERSION,
+ .d_name = "netmap",
+ .d_mmap = netmap_mmap,
+ .d_ioctl = netmap_ioctl,
+ .d_poll = netmap_poll,
+#ifdef NETMAP_KEVENT
+ .d_kqfilter = netmap_kqfilter,
+#endif
+};
+
+#ifdef NETMAP_KEVENT
+static int netmap_kqread(struct knote *, long);
+static int netmap_kqwrite(struct knote *, long);
+static void netmap_kqdetach(struct knote *);
+
+static struct filterops netmap_read_filterops = {
+ .f_isfd = 1,
+ .f_attach = NULL,
+ .f_detach = netmap_kqdetach,
+ .f_event = netmap_kqread,
+};
+
+static struct filterops netmap_write_filterops = {
+ .f_isfd = 1,
+ .f_attach = NULL,
+ .f_detach = netmap_kqdetach,
+ .f_event = netmap_kqwrite,
+};
+
+/*
+ * support for the kevent() system call.
+ *
+ * This is the kevent filter, and is executed each time a new event
+ * is triggered on the device. This function execute some operation
+ * depending on the received filter.
+ *
+ * The implementation should test the filters and should implement
+ * filter operations we are interested on (a full list in /sys/event.h).
+ *
+ * On a match we should:
+ * - set kn->kn_fop
+ * - set kn->kn_hook
+ * - call knlist_add() to deliver the event to the application.
+ *
+ * Return 0 if the event should be delivered to the application.
+ */
+static int
+netmap_kqfilter(struct cdev *dev, struct knote *kn)
+{
+ /* declare variables needed to read/write */
+
+ switch(kn->kn_filter) {
+ case EVFILT_READ:
+ if (netmap_verbose)
+ D("%s kqfilter: EVFILT_READ" ifp->if_xname);
+
+ /* read operations */
+ kn->kn_fop = &netmap_read_filterops;
+ break;
+
+ case EVFILT_WRITE:
+ if (netmap_verbose)
+ D("%s kqfilter: EVFILT_WRITE" ifp->if_xname);
+
+ /* write operations */
+ kn->kn_fop = &netmap_write_filterops;
+ break;
+
+ default:
+ if (netmap_verbose)
+ D("%s kqfilter: invalid filter" ifp->if_xname);
+ return(EINVAL);
+ }
+
+ kn->kn_hook = 0;//
+ knlist_add(&netmap_sc->tun_rsel.si_note, kn, 0);
+
+ return (0);
+}
+#endif /* NETMAP_KEVENT */
+
+/*
+ * File descriptor's private data destructor.
+ *
+ * Call nm_register(ifp,0) to stop netmap mode on the interface and
+ * revert to normal operation. We expect that np_ifp has not gone.
+ */
+static void
+netmap_dtor(void *data)
+{
+ struct netmap_priv_d *priv = data;
+ struct ifnet *ifp = priv->np_ifp;
+ struct netmap_adapter *na = NA(ifp);
+ struct netmap_if *nifp = priv->np_nifp;
+
+ if (0)
+ printf("%s starting for %p ifp %p\n", __FUNCTION__, priv,
+ priv ? priv->np_ifp : NULL);
+
+ na->nm_lock(ifp->if_softc, NETMAP_CORE_LOCK, 0);
+
+ na->refcount--;
+ if (na->refcount <= 0) { /* last instance */
+ u_int i;
+
+ D("deleting last netmap instance for %s", ifp->if_xname);
+ /*
+ * there is a race here with *_netmap_task() and
+ * netmap_poll(), which don't run under NETMAP_CORE_LOCK.
+ * na->refcount == 0 && na->ifp->if_capenable & IFCAP_NETMAP
+ * (aka NETMAP_DELETING(na)) are a unique marker that the
+ * device is dying.
+ * Before destroying stuff we sleep a bit, and then complete
+ * the job. NIOCREG should realize the condition and
+ * loop until they can continue; the other routines
+ * should check the condition at entry and quit if
+ * they cannot run.
+ */
+ na->nm_lock(ifp->if_softc, NETMAP_CORE_UNLOCK, 0);
+ tsleep(na, 0, "NIOCUNREG", 4);
+ na->nm_lock(ifp->if_softc, NETMAP_CORE_LOCK, 0);
+ na->nm_register(ifp, 0); /* off, clear IFCAP_NETMAP */
+ /* Wake up any sleeping threads. netmap_poll will
+ * then return POLLERR
+ */
+ for (i = 0; i < na->num_queues + 2; i++) {
+ selwakeuppri(&na->tx_rings[i].si, PI_NET);
+ selwakeuppri(&na->rx_rings[i].si, PI_NET);
+ }
+ /* release all buffers */
+ NMA_LOCK();
+ for (i = 0; i < na->num_queues + 1; i++) {
+ int j, lim;
+ struct netmap_ring *ring;
+
+ ND("tx queue %d", i);
+ ring = na->tx_rings[i].ring;
+ lim = na->tx_rings[i].nkr_num_slots;
+ for (j = 0; j < lim; j++)
+ netmap_free_buf(&nm_buf_pool,
+ ring->slot[j].buf_idx);
+
+ ND("rx queue %d", i);
+ ring = na->rx_rings[i].ring;
+ lim = na->rx_rings[i].nkr_num_slots;
+ for (j = 0; j < lim; j++)
+ netmap_free_buf(&nm_buf_pool,
+ ring->slot[j].buf_idx);
+ }
+ NMA_UNLOCK();
+ netmap_free(na->tx_rings[0].ring, "shadow rings");
+ wakeup(na);
+ }
+ netmap_free(nifp, "nifp");
+
+ na->nm_lock(ifp->if_softc, NETMAP_CORE_UNLOCK, 0);
+
+ if_rele(ifp);
+
+ bzero(priv, sizeof(*priv)); /* XXX for safety */
+ free(priv, M_DEVBUF);
+}
+
+
+
+/*
+ * Create and return a new ``netmap_if`` object, and possibly also
+ * rings and packet buffors.
+ *
+ * Return NULL on failure.
+ */
+static void *
+netmap_if_new(const char *ifname, struct netmap_adapter *na)
+{
+ struct netmap_if *nifp;
+ struct netmap_ring *ring;
+ char *buff;
+ u_int i, len, ofs;
+ u_int n = na->num_queues + 1; /* shorthand, include stack queue */
+
+ /*
+ * the descriptor is followed inline by an array of offsets
+ * to the tx and rx rings in the shared memory region.
+ */
+ len = sizeof(struct netmap_if) + 2 * n * sizeof(ssize_t);
+ nifp = netmap_malloc(len, "nifp");
+ if (nifp == NULL)
+ return (NULL);
+
+ /* initialize base fields */
+ *(int *)(uintptr_t)&nifp->ni_num_queues = na->num_queues;
+ strncpy(nifp->ni_name, ifname, IFNAMSIZ);
+
+ (na->refcount)++; /* XXX atomic ? we are under lock */
+ if (na->refcount > 1)
+ goto final;
+
+ /*
+ * If this is the first instance, allocate the shadow rings and
+ * buffers for this card (one for each hw queue, one for the host).
+ * The rings are contiguous, but have variable size.
+ * The entire block is reachable at
+ * na->tx_rings[0].ring
+ */
+
+ len = n * (2 * sizeof(struct netmap_ring) +
+ (na->num_tx_desc + na->num_rx_desc) *
+ sizeof(struct netmap_slot) );
+ buff = netmap_malloc(len, "shadow rings");
+ if (buff == NULL) {
+ D("failed to allocate %d bytes for %s shadow ring",
+ len, ifname);
+error:
+ (na->refcount)--;
+ netmap_free(nifp, "nifp, rings failed");
+ return (NULL);
+ }
+ /* do we have the bufers ? we are in need of num_tx_desc buffers for
+ * each tx ring and num_tx_desc buffers for each rx ring. */
+ len = n * (na->num_tx_desc + na->num_rx_desc);
+ NMA_LOCK();
+ if (nm_buf_pool.free < len) {
+ NMA_UNLOCK();
+ netmap_free(buff, "not enough bufs");
+ goto error;
+ }
+ /*
+ * in the kring, store the pointers to the shared rings
+ * and initialize the rings. We are under NMA_LOCK().
+ */
+ ofs = 0;
+ for (i = 0; i < n; i++) {
+ struct netmap_kring *kring;
+ int numdesc;
+
+ /* Transmit rings */
+ kring = &na->tx_rings[i];
+ numdesc = na->num_tx_desc;
+ bzero(kring, sizeof(*kring));
+ kring->na = na;
+
+ ring = kring->ring = (struct netmap_ring *)(buff + ofs);
+ *(ssize_t *)(uintptr_t)&ring->buf_ofs =
+ nm_buf_pool.base - (char *)ring;
+ ND("txring[%d] at %p ofs %d", i, ring, ring->buf_ofs);
+ *(int *)(int *)(uintptr_t)&ring->num_slots =
+ kring->nkr_num_slots = numdesc;
+
+ /*
+ * IMPORTANT:
+ * Always keep one slot empty, so we can detect new
+ * transmissions comparing cur and nr_hwcur (they are
+ * the same only if there are no new transmissions).
+ */
+ ring->avail = kring->nr_hwavail = numdesc - 1;
+ ring->cur = kring->nr_hwcur = 0;
+ netmap_new_bufs(&nm_buf_pool, ring->slot, numdesc);
+
+ ofs += sizeof(struct netmap_ring) +
+ numdesc * sizeof(struct netmap_slot);
+
+ /* Receive rings */
+ kring = &na->rx_rings[i];
+ numdesc = na->num_rx_desc;
+ bzero(kring, sizeof(*kring));
+ kring->na = na;
+
+ ring = kring->ring = (struct netmap_ring *)(buff + ofs);
+ *(ssize_t *)(uintptr_t)&ring->buf_ofs =
+ nm_buf_pool.base - (char *)ring;
+ ND("rxring[%d] at %p offset %d", i, ring, ring->buf_ofs);
+ *(int *)(int *)(uintptr_t)&ring->num_slots =
+ kring->nkr_num_slots = numdesc;
+ ring->cur = kring->nr_hwcur = 0;
+ ring->avail = kring->nr_hwavail = 0; /* empty */
+ netmap_new_bufs(&nm_buf_pool, ring->slot, numdesc);
+ ofs += sizeof(struct netmap_ring) +
+ numdesc * sizeof(struct netmap_slot);
+ }
+ NMA_UNLOCK();
+ for (i = 0; i < n+1; i++) {
+ // XXX initialize the selrecord structs.
+ }
+final:
+ /*
+ * fill the slots for the rx and tx queues. They contain the offset
+ * between the ring and nifp, so the information is usable in
+ * userspace to reach the ring from the nifp.
+ */
+ for (i = 0; i < n; i++) {
+ char *base = (char *)nifp;
+ *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] =
+ (char *)na->tx_rings[i].ring - base;
+ *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n] =
+ (char *)na->rx_rings[i].ring - base;
+ }
+ return (nifp);
+}
+
+
+/*
+ * mmap(2) support for the "netmap" device.
+ *
+ * Expose all the memory previously allocated by our custom memory
+ * allocator: this way the user has only to issue a single mmap(2), and
+ * can work on all the data structures flawlessly.
+ *
+ * Return 0 on success, -1 otherwise.
+ */
+static int
+#if __FreeBSD_version < 900000
+netmap_mmap(__unused struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr,
+ int nprot)
+#else
+netmap_mmap(__unused struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
+ int nprot, __unused vm_memattr_t *memattr)
+#endif
+{
+ if (nprot & PROT_EXEC)
+ return (-1); // XXX -1 or EINVAL ?
+ ND("request for offset 0x%x", (uint32_t)offset);
+ *paddr = vtophys(netmap_mem_d->nm_buffer) + offset;
+
+ return (0);
+}
+
+
+/*
+ * handler for synchronization of the queues from/to the host
+ */
+static void
+netmap_sync_to_host(struct netmap_adapter *na)
+{
+ struct netmap_kring *kring = &na->tx_rings[na->num_queues];
+ struct netmap_ring *ring = kring->ring;
+ struct mbuf *head = NULL, *tail = NULL, *m;
+ u_int n, lim = kring->nkr_num_slots - 1;
+
+ na->nm_lock(na->ifp->if_softc, NETMAP_CORE_LOCK, 0);
+
+ /* Take packets from hwcur to cur and pass them up.
+ * In case of no buffers we give up. At the end of the loop,
+ * the queue is drained in all cases.
+ */
+ for (n = kring->nr_hwcur; n != ring->cur;) {
+ struct netmap_slot *slot = &ring->slot[n];
+
+ n = (n == lim) ? 0 : n + 1;
+ if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE) {
+ D("bad pkt at %d len %d", n, slot->len);
+ continue;
+ }
+ m = m_devget(NMB(slot), slot->len, 0, na->ifp, NULL);
+
+ if (m == NULL)
+ break;
+ if (tail)
+ tail->m_nextpkt = m;
+ else
+ head = m;
+ tail = m;
+ m->m_nextpkt = NULL;
+ }
+ kring->nr_hwcur = ring->cur;
+ kring->nr_hwavail = ring->avail = lim;
+ na->nm_lock(na->ifp->if_softc, NETMAP_CORE_UNLOCK, 0);
+
+ /* send packets up, outside the lock */
+ while ((m = head) != NULL) {
+ head = head->m_nextpkt;
+ m->m_nextpkt = NULL;
+ m->m_pkthdr.rcvif = na->ifp;
+ if (netmap_verbose & NM_VERB_HOST)
+ D("sending up pkt %p size %d", m, m->m_pkthdr.len);
+ (na->ifp->if_input)(na->ifp, m);
+ }
+}
+
+/*
+ * This routine also does the selrecord if called from the poll handler
+ * (we know because td != NULL).
+ */
+static void
+netmap_sync_from_host(struct netmap_adapter *na, struct thread *td)
+{
+ struct netmap_kring *kring = &na->rx_rings[na->num_queues];
+ struct netmap_ring *ring = kring->ring;
+ int delta;
+
+ na->nm_lock(na->ifp->if_softc, NETMAP_CORE_LOCK, 0);
+
+ /* skip past packets processed by userspace,
+ * and then sync cur/avail with hwcur/hwavail
+ */
+ delta = ring->cur - kring->nr_hwcur;
+ if (delta < 0)
+ delta += kring->nkr_num_slots;
+ kring->nr_hwavail -= delta;
+ kring->nr_hwcur = ring->cur;
+ ring->avail = kring->nr_hwavail;
+ if (ring->avail == 0 && td)
+ selrecord(td, &kring->si);
+ if (ring->avail && (netmap_verbose & NM_VERB_HOST))
+ D("%d pkts from stack", ring->avail);
+ na->nm_lock(na->ifp->if_softc, NETMAP_CORE_UNLOCK, 0);
+}
+
+
+/*
+ * get a refcounted reference to an interface.
+ * Return ENXIO if the interface does not exist, EINVAL if netmap
+ * is not supported by the interface.
+ * If successful, hold a reference.
+ */
+static int
+get_ifp(const char *name, struct ifnet **ifp)
+{
+ *ifp = ifunit_ref(name);
+ if (*ifp == NULL)
+ return (ENXIO);
+ /* can do this if the capability exists and if_pspare[0]
+ * points to the netmap descriptor.
+ */
+ if ((*ifp)->if_capabilities & IFCAP_NETMAP && NA(*ifp))
+ return 0; /* valid pointer, we hold the refcount */
+ if_rele(*ifp);
+ return EINVAL; // not NETMAP capable
+}
+
+
+/*
+ * Error routine called when txsync/rxsync detects an error.
+ * Can't do much more than resetting cur = hwcur, avail = hwavail.
+ * Return 1 on reinit.
+ */
+int
+netmap_ring_reinit(struct netmap_kring *kring)
+{
+ struct netmap_ring *ring = kring->ring;
+ u_int i, lim = kring->nkr_num_slots - 1;
+ int errors = 0;
+
+ D("called for %s", kring->na->ifp->if_xname);
+ if (ring->cur > lim)
+ errors++;
+ for (i = 0; i <= lim; i++) {
+ u_int idx = ring->slot[i].buf_idx;
+ u_int len = ring->slot[i].len;
+ if (idx < 2 || idx >= netmap_total_buffers) {
+ if (!errors++)
+ D("bad buffer at slot %d idx %d len %d ", i, idx, len);
+ ring->slot[i].buf_idx = 0;
+ ring->slot[i].len = 0;
+ } else if (len > NETMAP_BUF_SIZE) {
+ ring->slot[i].len = 0;
+ if (!errors++)
+ D("bad len %d at slot %d idx %d",
+ len, i, idx);
+ }
+ }
+ if (errors) {
+ int pos = kring - kring->na->tx_rings;
+ int n = kring->na->num_queues + 2;
+
+ D("total %d errors", errors);
+ errors++;
+ D("%s %s[%d] reinit, cur %d -> %d avail %d -> %d",
+ kring->na->ifp->if_xname,
+ pos < n ? "TX" : "RX", pos < n ? pos : pos - n,
+ ring->cur, kring->nr_hwcur,
+ ring->avail, kring->nr_hwavail);
+ ring->cur = kring->nr_hwcur;
+ ring->avail = kring->nr_hwavail;
+ ring->flags |= NR_REINIT;
+ kring->na->flags |= NR_REINIT;
+ }
+ return (errors ? 1 : 0);
+}
+
+/*
+ * Clean the reinit flag for our rings.
+ * XXX at the moment, clear for all rings
+ */
+static void
+netmap_clean_reinit(struct netmap_adapter *na)
+{
+ //struct netmap_kring *kring;
+ u_int i;
+
+ na->flags &= ~NR_REINIT;
+ D("--- NR_REINIT reset on %s", na->ifp->if_xname);
+ for (i = 0; i < na->num_queues + 1; i++) {
+ na->tx_rings[i].ring->flags &= ~NR_REINIT;
+ na->rx_rings[i].ring->flags &= ~NR_REINIT;
+ }
+}
+
+/*
+ * Set the ring ID. For devices with a single queue, a request
+ * for all rings is the same as a single ring.
+ */
+static int
+netmap_set_ringid(struct netmap_priv_d *priv, u_int ringid)
+{
+ struct ifnet *ifp = priv->np_ifp;
+ struct netmap_adapter *na = NA(ifp);
+ void *adapter = na->ifp->if_softc; /* shorthand */
+ u_int i = ringid & NETMAP_RING_MASK;
+ /* first time we don't lock */
+ int need_lock = (priv->np_qfirst != priv->np_qlast);
+
+ if ( (ringid & NETMAP_HW_RING) && i >= na->num_queues) {
+ D("invalid ring id %d", i);
+ return (EINVAL);
+ }
+ if (need_lock)
+ na->nm_lock(adapter, NETMAP_CORE_LOCK, 0);
+ priv->np_ringid = ringid;
+ if (ringid & NETMAP_SW_RING) {
+ priv->np_qfirst = na->num_queues;
+ priv->np_qlast = na->num_queues + 1;
+ } else if (ringid & NETMAP_HW_RING) {
+ priv->np_qfirst = i;
+ priv->np_qlast = i + 1;
+ } else {
+ priv->np_qfirst = 0;
+ priv->np_qlast = na->num_queues;
+ }
+ priv->np_txpoll = (ringid & NETMAP_NO_TX_POLL) ? 0 : 1;
+ if (need_lock)
+ na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0);
+ if (ringid & NETMAP_SW_RING)
+ D("ringid %s set to SW RING", ifp->if_xname);
+ else if (ringid & NETMAP_HW_RING)
+ D("ringid %s set to HW RING %d", ifp->if_xname,
+ priv->np_qfirst);
+ else
+ D("ringid %s set to all %d HW RINGS", ifp->if_xname,
+ priv->np_qlast);
+ return 0;
+}
+
+/*
+ * ioctl(2) support for the "netmap" device.
+ *
+ * Following a list of accepted commands:
+ * - NIOCGINFO
+ * - SIOCGIFADDR just for convenience
+ * - NIOCREGIF
+ * - NIOCUNREGIF
+ * - NIOCTXSYNC
+ * - NIOCRXSYNC
+ *
+ * Return 0 on success, errno otherwise.
+ */
+static int
+netmap_ioctl(__unused struct cdev *dev, u_long cmd, caddr_t data,
+ __unused int fflag, __unused struct thread *td)
+{
+ struct netmap_priv_d *priv = NULL;
+ struct ifnet *ifp;
+ struct nmreq *nmr = (struct nmreq *) data;
+ struct netmap_adapter *na;
+ void *adapter;
+ int error;
+ u_int i;
+ struct netmap_if *nifp;
+
+ error = devfs_get_cdevpriv((void **)&priv);
+ if (error != ENOENT && error != 0)
+ return (error);
+
+ error = 0; /* Could be ENOENT */
+ switch (cmd) {
+ case NIOCGINFO: /* return capabilities etc */
+ /* memsize is always valid */
+ nmr->nr_memsize = netmap_mem_d->nm_totalsize;
+ nmr->nr_offset = 0;
+ nmr->nr_numrings = 0;
+ nmr->nr_numslots = 0;
+ if (nmr->nr_name[0] == '\0') /* just get memory info */
+ break;
+ error = get_ifp(nmr->nr_name, &ifp); /* get a refcount */
+ if (error)
+ break;
+ na = NA(ifp); /* retrieve netmap_adapter */
+ nmr->nr_numrings = na->num_queues;
+ nmr->nr_numslots = na->num_tx_desc;
+ if_rele(ifp); /* return the refcount */
+ break;
+
+ case NIOCREGIF:
+ if (priv != NULL) /* thread already registered */
+ return netmap_set_ringid(priv, nmr->nr_ringid);
+ /* find the interface and a reference */
+ error = get_ifp(nmr->nr_name, &ifp); /* keep reference */
+ if (error)
+ break;
+ na = NA(ifp); /* retrieve netmap adapter */
+ adapter = na->ifp->if_softc; /* shorthand */
+ /*
+ * Allocate the private per-thread structure.
+ * XXX perhaps we can use a blocking malloc ?
+ */
+ priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF,
+ M_NOWAIT | M_ZERO);
+ if (priv == NULL) {
+ error = ENOMEM;
+ if_rele(ifp); /* return the refcount */
+ break;
+ }
+
+
+ for (i = 10; i > 0; i--) {
+ na->nm_lock(adapter, NETMAP_CORE_LOCK, 0);
+ if (!NETMAP_DELETING(na))
+ break;
+ na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0);
+ tsleep(na, 0, "NIOCREGIF", hz/10);
+ }
+ if (i == 0) {
+ D("too many NIOCREGIF attempts, give up");
+ error = EINVAL;
+ free(priv, M_DEVBUF);
+ if_rele(ifp); /* return the refcount */
+ break;
+ }
+
+ priv->np_ifp = ifp; /* store the reference */
+ error = netmap_set_ringid(priv, nmr->nr_ringid);
+ if (error)
+ goto error;
+ priv->np_nifp = nifp = netmap_if_new(nmr->nr_name, na);
+ if (nifp == NULL) { /* allocation failed */
+ error = ENOMEM;
+ } else if (ifp->if_capenable & IFCAP_NETMAP) {
+ /* was already set */
+ } else {
+ /* Otherwise set the card in netmap mode
+ * and make it use the shared buffers.
+ */
+ error = na->nm_register(ifp, 1); /* mode on */
+ if (error) {
+ /*
+ * do something similar to netmap_dtor().
+ */
+ netmap_free(na->tx_rings[0].ring, "rings, reg.failed");
+ free(na->tx_rings, M_DEVBUF);
+ na->tx_rings = na->rx_rings = NULL;
+ na->refcount--;
+ netmap_free(nifp, "nifp, rings failed");
+ nifp = NULL;
+ }
+ }
+
+ if (error) { /* reg. failed, release priv and ref */
+error:
+ na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0);
+ free(priv, M_DEVBUF);
+ if_rele(ifp); /* return the refcount */
+ break;
+ }
+
+ na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0);
+ error = devfs_set_cdevpriv(priv, netmap_dtor);
+
+ if (error != 0) {
+ /* could not assign the private storage for the
+ * thread, call the destructor explicitly.
+ */
+ netmap_dtor(priv);
+ break;
+ }
+
+ /* return the offset of the netmap_if object */
+ nmr->nr_numrings = na->num_queues;
+ nmr->nr_numslots = na->num_tx_desc;
+ nmr->nr_memsize = netmap_mem_d->nm_totalsize;
+ nmr->nr_offset =
+ ((char *) nifp - (char *) netmap_mem_d->nm_buffer);
+ break;
+
+ case NIOCUNREGIF:
+ if (priv == NULL)
+ return (ENXIO);
+
+ /* the interface is unregistered inside the
+ destructor of the private data. */
+ devfs_clear_cdevpriv();
+ break;
+
+ case NIOCTXSYNC:
+ case NIOCRXSYNC:
+ if (priv == NULL)
+ return (ENXIO);
+ ifp = priv->np_ifp; /* we have a reference */
+ na = NA(ifp); /* retrieve netmap adapter */
+ adapter = ifp->if_softc; /* shorthand */
+
+ if (na->flags & NR_REINIT)
+ netmap_clean_reinit(na);
+
+ if (priv->np_qfirst == na->num_queues) {
+ /* queues to/from host */
+ if (cmd == NIOCTXSYNC)
+ netmap_sync_to_host(na);
+ else
+ netmap_sync_from_host(na, NULL);
+ return error;
+ }
+
+ for (i = priv->np_qfirst; i < priv->np_qlast; i++) {
+ if (cmd == NIOCTXSYNC) {
+ struct netmap_kring *kring = &na->tx_rings[i];
+ if (netmap_verbose & NM_VERB_TXSYNC)
+ D("sync tx ring %d cur %d hwcur %d",
+ i, kring->ring->cur,
+ kring->nr_hwcur);
+ na->nm_txsync(adapter, i, 1 /* do lock */);
+ if (netmap_verbose & NM_VERB_TXSYNC)
+ D("after sync tx ring %d cur %d hwcur %d",
+ i, kring->ring->cur,
+ kring->nr_hwcur);
+ } else {
+ na->nm_rxsync(adapter, i, 1 /* do lock */);
+ microtime(&na->rx_rings[i].ring->ts);
+ }
+ }
+
+ break;
+
+ case BIOCIMMEDIATE:
+ case BIOCGHDRCMPLT:
+ case BIOCSHDRCMPLT:
+ case BIOCSSEESENT:
+ D("ignore BIOCIMMEDIATE/BIOCSHDRCMPLT/BIOCSHDRCMPLT/BIOCSSEESENT");
+ break;
+
+ default:
+ {
+ /*
+ * allow device calls
+ */
+ struct socket so;
+ bzero(&so, sizeof(so));
+ error = get_ifp(nmr->nr_name, &ifp); /* keep reference */
+ if (error)
+ break;
+ so.so_vnet = ifp->if_vnet;
+ // so->so_proto not null.
+ error = ifioctl(&so, cmd, data, td);
+ if_rele(ifp);
+ }
+ }
+
+ return (error);
+}
+
+
+/*
+ * select(2) and poll(2) handlers for the "netmap" device.
+ *
+ * Can be called for one or more queues.
+ * Return true the event mask corresponding to ready events.
+ * If there are no ready events, do a selrecord on either individual
+ * selfd or on the global one.
+ * Device-dependent parts (locking and sync of tx/rx rings)
+ * are done through callbacks.
+ */
+static int
+netmap_poll(__unused struct cdev *dev, int events, struct thread *td)
+{
+ struct netmap_priv_d *priv = NULL;
+ struct netmap_adapter *na;
+ struct ifnet *ifp;
+ struct netmap_kring *kring;
+ u_int i, check_all, want_tx, want_rx, revents = 0;
+ void *adapter;
+
+ if (devfs_get_cdevpriv((void **)&priv) != 0 || priv == NULL)
+ return POLLERR;
+
+ ifp = priv->np_ifp;
+ // XXX check for deleting() ?
+ if ( (ifp->if_capenable & IFCAP_NETMAP) == 0)
+ return POLLERR;
+
+ if (netmap_verbose & 0x8000)
+ D("device %s events 0x%x", ifp->if_xname, events);
+ want_tx = events & (POLLOUT | POLLWRNORM);
+ want_rx = events & (POLLIN | POLLRDNORM);
+
+ adapter = ifp->if_softc;
+ na = NA(ifp); /* retrieve netmap adapter */
+
+ /* pending reinit, report up as a poll error. Pending
+ * reads and writes are lost.
+ */
+ if (na->flags & NR_REINIT) {
+ netmap_clean_reinit(na);
+ revents |= POLLERR;
+ }
+ /* how many queues we are scanning */
+ i = priv->np_qfirst;
+ if (i == na->num_queues) { /* from/to host */
+ if (priv->np_txpoll || want_tx) {
+ /* push any packets up, then we are always ready */
+ kring = &na->tx_rings[i];
+ netmap_sync_to_host(na);
+ revents |= want_tx;
+ }
+ if (want_rx) {
+ kring = &na->rx_rings[i];
+ if (kring->ring->avail == 0)
+ netmap_sync_from_host(na, td);
+ if (kring->ring->avail > 0) {
+ revents |= want_rx;
+ }
+ }
+ return (revents);
+ }
+
+ /*
+ * check_all is set if the card has more than one queue and
+ * the client is polling all of them. If true, we sleep on
+ * the "global" selfd, otherwise we sleep on individual selfd
+ * (we can only sleep on one of them per direction).
+ * The interrupt routine in the driver should always wake on
+ * the individual selfd, and also on the global one if the card
+ * has more than one ring.
+ *
+ * If the card has only one lock, we just use that.
+ * If the card has separate ring locks, we just use those
+ * unless we are doing check_all, in which case the whole
+ * loop is wrapped by the global lock.
+ * We acquire locks only when necessary: if poll is called
+ * when buffers are available, we can just return without locks.
+ *
+ * rxsync() is only called if we run out of buffers on a POLLIN.
+ * txsync() is called if we run out of buffers on POLLOUT, or
+ * there are pending packets to send. The latter can be disabled
+ * passing NETMAP_NO_TX_POLL in the NIOCREG call.
+ */
+ check_all = (i + 1 != priv->np_qlast);
+
+ /*
+ * core_lock indicates what to do with the core lock.
+ * The core lock is used when either the card has no individual
+ * locks, or it has individual locks but we are cheking all
+ * rings so we need the core lock to avoid missing wakeup events.
+ *
+ * It has three possible states:
+ * NO_CL we don't need to use the core lock, e.g.
+ * because we are protected by individual locks.
+ * NEED_CL we need the core lock. In this case, when we
+ * call the lock routine, move to LOCKED_CL
+ * to remember to release the lock once done.
+ * LOCKED_CL core lock is set, so we need to release it.
+ */
+ enum {NO_CL, NEED_CL, LOCKED_CL };
+ int core_lock = (check_all || !na->separate_locks) ?
+ NEED_CL:NO_CL;
+ /*
+ * We start with a lock free round which is good if we have
+ * data available. If this fails, then lock and call the sync
+ * routines.
+ */
+ for (i = priv->np_qfirst; want_rx && i < priv->np_qlast; i++) {
+ kring = &na->rx_rings[i];
+ if (kring->ring->avail > 0) {
+ revents |= want_rx;
+ want_rx = 0; /* also breaks the loop */
+ }
+ }
+ for (i = priv->np_qfirst; want_tx && i < priv->np_qlast; i++) {
+ kring = &na->tx_rings[i];
+ if (kring->ring->avail > 0) {
+ revents |= want_tx;
+ want_tx = 0; /* also breaks the loop */
+ }
+ }
+
+ /*
+ * If we to push packets out (priv->np_txpoll) or want_tx is
+ * still set, we do need to run the txsync calls (on all rings,
+ * to avoid that the tx rings stall).
+ */
+ if (priv->np_txpoll || want_tx) {
+ for (i = priv->np_qfirst; i < priv->np_qlast; i++) {
+ kring = &na->tx_rings[i];
+ if (!want_tx && kring->ring->cur == kring->nr_hwcur)
+ continue;
+ if (core_lock == NEED_CL) {
+ na->nm_lock(adapter, NETMAP_CORE_LOCK, 0);
+ core_lock = LOCKED_CL;
+ }
+ if (na->separate_locks)
+ na->nm_lock(adapter, NETMAP_TX_LOCK, i);
+ if (netmap_verbose & NM_VERB_TXSYNC)
+ D("send %d on %s %d",
+ kring->ring->cur,
+ ifp->if_xname, i);
+ if (na->nm_txsync(adapter, i, 0 /* no lock */))
+ revents |= POLLERR;
+
+ if (want_tx) {
+ if (kring->ring->avail > 0) {
+ /* stop at the first ring. We don't risk
+ * starvation.
+ */
+ revents |= want_tx;
+ want_tx = 0;
+ } else if (!check_all)
+ selrecord(td, &kring->si);
+ }
+ if (na->separate_locks)
+ na->nm_lock(adapter, NETMAP_TX_UNLOCK, i);
+ }
+ }
+
+ /*
+ * now if want_rx is still set we need to lock and rxsync.
+ * Do it on all rings because otherwise we starve.
+ */
+ if (want_rx) {
+ for (i = priv->np_qfirst; i < priv->np_qlast; i++) {
+ kring = &na->rx_rings[i];
+ if (core_lock == NEED_CL) {
+ na->nm_lock(adapter, NETMAP_CORE_LOCK, 0);
+ core_lock = LOCKED_CL;
+ }
+ if (na->separate_locks)
+ na->nm_lock(adapter, NETMAP_RX_LOCK, i);
+
+ if (na->nm_rxsync(adapter, i, 0 /* no lock */))
+ revents |= POLLERR;
+ if (no_timestamp == 0 ||
+ kring->ring->flags & NR_TIMESTAMP)
+ microtime(&kring->ring->ts);
+
+ if (kring->ring->avail > 0)
+ revents |= want_rx;
+ else if (!check_all)
+ selrecord(td, &kring->si);
+ if (na->separate_locks)
+ na->nm_lock(adapter, NETMAP_RX_UNLOCK, i);
+ }
+ }
+ if (check_all && revents == 0) {
+ i = na->num_queues + 1; /* the global queue */
+ if (want_tx)
+ selrecord(td, &na->tx_rings[i].si);
+ if (want_rx)
+ selrecord(td, &na->rx_rings[i].si);
+ }
+ if (core_lock == LOCKED_CL)
+ na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0);
+
+ return (revents);
+}
+
+/*------- driver support routines ------*/
+
+/*
+ * Initialize a ``netmap_adapter`` object created by driver on attach.
+ * We allocate a block of memory with room for a struct netmap_adapter
+ * plus two sets of N+2 struct netmap_kring (where N is the number
+ * of hardware rings):
+ * krings 0..N-1 are for the hardware queues.
+ * kring N is for the host stack queue
+ * kring N+1 is only used for the selinfo for all queues.
+ * Return 0 on success, ENOMEM otherwise.
+ */
+int
+netmap_attach(struct netmap_adapter *na, int num_queues)
+{
+ int n = num_queues + 2;
+ int size = sizeof(*na) + 2 * n * sizeof(struct netmap_kring);
+ void *buf;
+ struct ifnet *ifp = na->ifp;
+
+ if (ifp == NULL) {
+ D("ifp not set, giving up");
+ return EINVAL;
+ }
+ na->refcount = 0;
+ na->num_queues = num_queues;
+
+ buf = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (buf) {
+ ifp->if_pspare[0] = buf;
+ na->tx_rings = (void *)((char *)buf + sizeof(*na));
+ na->rx_rings = na->tx_rings + n;
+ bcopy(na, buf, sizeof(*na));
+ ifp->if_capabilities |= IFCAP_NETMAP;
+ }
+ D("%s for %s", buf ? "ok" : "failed", ifp->if_xname);
+
+ return (buf ? 0 : ENOMEM);
+}
+
+
+/*
+ * Free the allocated memory linked to the given ``netmap_adapter``
+ * object.
+ */
+void
+netmap_detach(struct ifnet *ifp)
+{
+ u_int i;
+ struct netmap_adapter *na = NA(ifp);
+
+ if (!na)
+ return;
+
+ for (i = 0; i < na->num_queues + 2; i++) {
+ knlist_destroy(&na->tx_rings[i].si.si_note);
+ knlist_destroy(&na->rx_rings[i].si.si_note);
+ }
+ bzero(na, sizeof(*na));
+ ifp->if_pspare[0] = NULL;
+ free(na, M_DEVBUF);
+}
+
+
+/*
+ * intercept packets coming from the network stack and present
+ * them to netmap as incoming packets on a separate ring.
+ * We are not locked when called.
+ */
+int
+netmap_start(struct ifnet *ifp, struct mbuf *m)
+{
+ struct netmap_adapter *na = NA(ifp);
+ u_int i, len, n = na->num_queues;
+ int error = EBUSY;
+ struct netmap_kring *kring = &na->rx_rings[n];
+ struct netmap_slot *slot;
+
+ len = m->m_pkthdr.len;
+ if (netmap_verbose & NM_VERB_HOST)
+ D("%s packet %d len %d from the stack", ifp->if_xname,
+ kring->nr_hwcur + kring->nr_hwavail, len);
+ na->nm_lock(ifp->if_softc, NETMAP_CORE_LOCK, 0);
+ if (kring->nr_hwavail >= (int)kring->nkr_num_slots - 1) {
+ D("stack ring %s full\n", ifp->if_xname);
+ goto done; /* no space */
+ }
+ if (len > na->buff_size) {
+ D("drop packet size %d > %d", len, na->buff_size);
+ goto done; /* too long for us */
+ }
+
+ /* compute the insert position */
+ i = kring->nr_hwcur + kring->nr_hwavail;
+ if (i >= kring->nkr_num_slots)
+ i -= kring->nkr_num_slots;
+ slot = &kring->ring->slot[i];
+ m_copydata(m, 0, len, NMB(slot));
+ slot->len = len;
+ kring->nr_hwavail++;
+ if (netmap_verbose & NM_VERB_HOST)
+ D("wake up host ring %s %d", na->ifp->if_xname, na->num_queues);
+ selwakeuppri(&kring->si, PI_NET);
+ error = 0;
+done:
+ na->nm_lock(ifp->if_softc, NETMAP_CORE_UNLOCK, 0);
+
+ /* release the mbuf in either cases of success or failure. As an
+ * alternative, put the mbuf in a free list and free the list
+ * only when really necessary.
+ */
+ m_freem(m);
+
+ return (error);
+}
+
+
+/*
+ * netmap_reset() is called by the driver routines when reinitializing
+ * a ring. The driver is in charge of locking to protect the kring.
+ * If netmap mode is not set just return NULL.
+ * Otherwise set NR_REINIT (in the ring and in na) to signal
+ * that a ring has been reinitialized,
+ * set cur = hwcur = 0 and avail = hwavail = num_slots - 1 .
+ * IT IS IMPORTANT to leave one slot free even in the tx ring because
+ * we rely on cur=hwcur only for empty rings.
+ * These are good defaults but can be overridden later in the device
+ * specific code if, after a reinit, the ring does not start from 0
+ * (e.g. if_em.c does this).
+ *
+ * XXX we shouldn't be touching the ring, but there is a
+ * race anyways and this is our best option.
+ *
+ * XXX setting na->flags makes the syscall code faster, as there is
+ * only one place to check. On the other hand, we will need a better
+ * way to notify multiple threads that rings have been reset.
+ * One way is to increment na->rst_count at each ring reset.
+ * Each thread in its own priv structure will keep a matching counter,
+ * and on a reset will acknowledge and clean its own rings.
+ */
+struct netmap_slot *
+netmap_reset(struct netmap_adapter *na, enum txrx tx, int n,
+ u_int new_cur)
+{
+ struct netmap_kring *kring;
+ struct netmap_ring *ring;
+ struct netmap_slot *slot;
+ u_int i;
+
+ if (na == NULL)
+ return NULL; /* no netmap support here */
+ if (!(na->ifp->if_capenable & IFCAP_NETMAP))
+ return NULL; /* nothing to reinitialize */
+ kring = tx == NR_TX ? na->tx_rings + n : na->rx_rings + n;
+ ring = kring->ring;
+ if (tx == NR_TX) {
+ /*
+ * The last argument is the new value of next_to_clean.
+ *
+ * In the TX ring, we have P pending transmissions (from
+ * next_to_clean to nr_hwcur) followed by nr_hwavail free slots.
+ * Generally we can use all the slots in the ring so
+ * P = ring_size - nr_hwavail hence (modulo ring_size):
+ * next_to_clean == nr_hwcur + nr_hwavail
+ *
+ * If, upon a reset, nr_hwavail == ring_size and next_to_clean
+ * does not change we have nothing to report. Otherwise some
+ * pending packets may be lost, or newly injected packets will.
+ */
+ /* if hwcur does not change, nothing to report.
+ * otherwise remember the change so perhaps we can
+ * shift the block at the next reinit
+ */
+ if (new_cur == kring->nr_hwcur &&
+ kring->nr_hwavail == kring->nkr_num_slots - 1) {
+ /* all ok */
+ D("+++ NR_REINIT ok on %s TX[%d]", na->ifp->if_xname, n);
+ } else {
+ D("+++ NR_REINIT set on %s TX[%d]", na->ifp->if_xname, n);
+ }
+ ring->flags |= NR_REINIT;
+ na->flags |= NR_REINIT;
+ ring->avail = kring->nr_hwavail = kring->nkr_num_slots - 1;
+ ring->cur = kring->nr_hwcur = new_cur;
+ } else {
+ /*
+ * The last argument is the next free slot.
+ * In the RX ring we have nr_hwavail full buffers starting
+ * from nr_hwcur.
+ * If nr_hwavail == 0 and nr_hwcur does not change we are ok
+ * otherwise we might be in trouble as the buffers are
+ * changing.
+ */
+ if (new_cur == kring->nr_hwcur && kring->nr_hwavail == 0) {
+ /* all ok */
+ D("+++ NR_REINIT ok on %s RX[%d]", na->ifp->if_xname, n);
+ } else {
+ D("+++ NR_REINIT set on %s RX[%d]", na->ifp->if_xname, n);
+ }
+ ring->flags |= NR_REINIT;
+ na->flags |= NR_REINIT;
+ ring->avail = kring->nr_hwavail = 0; /* no data */
+ ring->cur = kring->nr_hwcur = new_cur;
+ }
+
+ slot = ring->slot;
+ /*
+ * Check that buffer indexes are correct. If we find a
+ * bogus value we are a bit in trouble because we cannot
+ * recover easily. Best we can do is (probably) persistently
+ * reset the ring.
+ */
+ for (i = 0; i < kring->nkr_num_slots; i++) {
+ if (slot[i].buf_idx >= netmap_total_buffers) {
+ D("invalid buf_idx %d at slot %d", slot[i].buf_idx, i);
+ slot[i].buf_idx = 0; /* XXX reset */
+ }
+ /* XXX we don't really need to set the length */
+ slot[i].len = 0;
+ }
+ /* wakeup possible waiters, both on the ring and on the global
+ * selfd. Perhaps a bit early now but the device specific
+ * routine is locked so hopefully we won't have a race.
+ */
+ selwakeuppri(&kring->si, PI_NET);
+ selwakeuppri(&kring[na->num_queues + 1 - n].si, PI_NET);
+ return kring->ring->slot;
+}
+
+static void
+ns_dmamap_cb(__unused void *arg, __unused bus_dma_segment_t * segs,
+ __unused int nseg, __unused int error)
+{
+}
+
+/* unload a bus_dmamap and create a new one. Used when the
+ * buffer in the slot is changed.
+ * XXX buflen is probably not needed, buffers have constant size.
+ */
+void
+netmap_reload_map(bus_dma_tag_t tag, bus_dmamap_t map,
+ void *buf, bus_size_t buflen)
+{
+ bus_addr_t paddr;
+ bus_dmamap_unload(tag, map);
+ bus_dmamap_load(tag, map, buf, buflen, ns_dmamap_cb, &paddr,
+ BUS_DMA_NOWAIT);
+}
+
+void
+netmap_load_map(bus_dma_tag_t tag, bus_dmamap_t map,
+ void *buf, bus_size_t buflen)
+{
+ bus_addr_t paddr;
+ bus_dmamap_load(tag, map, buf, buflen, ns_dmamap_cb, &paddr,
+ BUS_DMA_NOWAIT);
+}
+
+/*------ netmap memory allocator -------*/
+/*
+ * Request for a chunk of memory.
+ *
+ * Memory objects are arranged into a list, hence we need to walk this
+ * list until we find an object with the needed amount of data free.
+ * This sounds like a completely inefficient implementation, but given
+ * the fact that data allocation is done once, we can handle it
+ * flawlessly.
+ *
+ * Return NULL on failure.
+ */
+static void *
+netmap_malloc(size_t size, __unused const char *msg)
+{
+ struct netmap_mem_obj *mem_obj, *new_mem_obj;
+ void *ret = NULL;
+
+ NMA_LOCK();
+ TAILQ_FOREACH(mem_obj, &netmap_mem_d->nm_molist, nmo_next) {
+ if (mem_obj->nmo_used != 0 || mem_obj->nmo_size < size)
+ continue;
+
+ new_mem_obj = malloc(sizeof(struct netmap_mem_obj), M_NETMAP,
+ M_WAITOK | M_ZERO);
+ TAILQ_INSERT_BEFORE(mem_obj, new_mem_obj, nmo_next);
+
+ new_mem_obj->nmo_used = 1;
+ new_mem_obj->nmo_size = size;
+ new_mem_obj->nmo_data = mem_obj->nmo_data;
+ memset(new_mem_obj->nmo_data, 0, new_mem_obj->nmo_size);
+
+ mem_obj->nmo_size -= size;
+ mem_obj->nmo_data = (char *) mem_obj->nmo_data + size;
+ if (mem_obj->nmo_size == 0) {
+ TAILQ_REMOVE(&netmap_mem_d->nm_molist, mem_obj,
+ nmo_next);
+ free(mem_obj, M_NETMAP);
+ }
+
+ ret = new_mem_obj->nmo_data;
+
+ break;
+ }
+ NMA_UNLOCK();
+ ND("%s: %d bytes at %p", msg, size, ret);
+
+ return (ret);
+}
+
+/*
+ * Return the memory to the allocator.
+ *
+ * While freeing a memory object, we try to merge adjacent chunks in
+ * order to reduce memory fragmentation.
+ */
+static void
+netmap_free(void *addr, const char *msg)
+{
+ size_t size;
+ struct netmap_mem_obj *cur, *prev, *next;
+
+ if (addr == NULL) {
+ D("NULL addr for %s", msg);
+ return;
+ }
+
+ NMA_LOCK();
+ TAILQ_FOREACH(cur, &netmap_mem_d->nm_molist, nmo_next) {
+ if (cur->nmo_data == addr && cur->nmo_used)
+ break;
+ }
+ if (cur == NULL) {
+ NMA_UNLOCK();
+ D("invalid addr %s %p", msg, addr);
+ return;
+ }
+
+ size = cur->nmo_size;
+ cur->nmo_used = 0;
+
+ /* merge current chunk of memory with the previous one,
+ if present. */
+ prev = TAILQ_PREV(cur, netmap_mem_obj_h, nmo_next);
+ if (prev && prev->nmo_used == 0) {
+ TAILQ_REMOVE(&netmap_mem_d->nm_molist, cur, nmo_next);
+ prev->nmo_size += cur->nmo_size;
+ free(cur, M_NETMAP);
+ cur = prev;
+ }
+
+ /* merge with the next one */
+ next = TAILQ_NEXT(cur, nmo_next);
+ if (next && next->nmo_used == 0) {
+ TAILQ_REMOVE(&netmap_mem_d->nm_molist, next, nmo_next);
+ cur->nmo_size += next->nmo_size;
+ free(next, M_NETMAP);
+ }
+ NMA_UNLOCK();
+ ND("freed %s %d bytes at %p", msg, size, addr);
+}
+
+
+/*
+ * Initialize the memory allocator.
+ *
+ * Create the descriptor for the memory , allocate the pool of memory
+ * and initialize the list of memory objects with a single chunk
+ * containing the whole pre-allocated memory marked as free.
+ *
+ * Start with a large size, then halve as needed if we fail to
+ * allocate the block. While halving, always add one extra page
+ * because buffers 0 and 1 are used for special purposes.
+ * Return 0 on success, errno otherwise.
+ */
+static int
+netmap_memory_init(void)
+{
+ struct netmap_mem_obj *mem_obj;
+ void *buf = NULL;
+ int i, n, sz = NETMAP_MEMORY_SIZE;
+ int extra_sz = 0; // space for rings and two spare buffers
+
+ for (; !buf && sz >= 1<<20; sz >>=1) {
+ extra_sz = sz/200;
+ extra_sz = (extra_sz + 2*PAGE_SIZE - 1) & ~(PAGE_SIZE-1);
+ buf = contigmalloc(sz + extra_sz,
+ M_NETMAP,
+ M_WAITOK | M_ZERO,
+ 0, /* low address */
+ -1UL, /* high address */
+ PAGE_SIZE, /* alignment */
+ 0 /* boundary */
+ );
+ }
+ if (buf == NULL)
+ return (ENOMEM);
+ sz += extra_sz;
+ netmap_mem_d = malloc(sizeof(struct netmap_mem_d), M_NETMAP,
+ M_WAITOK | M_ZERO);
+ mtx_init(&netmap_mem_d->nm_mtx, "netmap memory allocator lock", NULL,
+ MTX_DEF);
+ TAILQ_INIT(&netmap_mem_d->nm_molist);
+ netmap_mem_d->nm_buffer = buf;
+ netmap_mem_d->nm_totalsize = sz;
+
+ /*
+ * A buffer takes 2k, a slot takes 8 bytes + ring overhead,
+ * so the ratio is 200:1. In other words, we can use 1/200 of
+ * the memory for the rings, and the rest for the buffers,
+ * and be sure we never run out.
+ */
+ netmap_mem_d->nm_size = sz/200;
+ netmap_mem_d->nm_buf_start =
+ (netmap_mem_d->nm_size + PAGE_SIZE - 1) & ~(PAGE_SIZE-1);
+ netmap_mem_d->nm_buf_len = sz - netmap_mem_d->nm_buf_start;
+
+ nm_buf_pool.base = netmap_mem_d->nm_buffer;
+ nm_buf_pool.base += netmap_mem_d->nm_buf_start;
+ netmap_buffer_base = nm_buf_pool.base;
+ D("netmap_buffer_base %p (offset %d)",
+ netmap_buffer_base, netmap_mem_d->nm_buf_start);
+ /* number of buffers, they all start as free */
+
+ netmap_total_buffers = nm_buf_pool.total_buffers =
+ netmap_mem_d->nm_buf_len / NETMAP_BUF_SIZE;
+ nm_buf_pool.bufsize = NETMAP_BUF_SIZE;
+
+ D("Have %d MB, use %dKB for rings, %d buffers at %p",
+ (sz >> 20), (netmap_mem_d->nm_size >> 10),
+ nm_buf_pool.total_buffers, nm_buf_pool.base);
+
+ /* allocate and initialize the bitmap. Entry 0 is considered
+ * always busy (used as default when there are no buffers left).
+ */
+ n = (nm_buf_pool.total_buffers + 31) / 32;
+ nm_buf_pool.bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP,
+ M_WAITOK | M_ZERO);
+ nm_buf_pool.bitmap[0] = ~3; /* slot 0 and 1 always busy */
+ for (i = 1; i < n; i++)
+ nm_buf_pool.bitmap[i] = ~0;
+ nm_buf_pool.free = nm_buf_pool.total_buffers - 2;
+
+ mem_obj = malloc(sizeof(struct netmap_mem_obj), M_NETMAP,
+ M_WAITOK | M_ZERO);
+ TAILQ_INSERT_HEAD(&netmap_mem_d->nm_molist, mem_obj, nmo_next);
+ mem_obj->nmo_used = 0;
+ mem_obj->nmo_size = netmap_mem_d->nm_size;
+ mem_obj->nmo_data = netmap_mem_d->nm_buffer;
+
+ return (0);
+}
+
+
+/*
+ * Finalize the memory allocator.
+ *
+ * Free all the memory objects contained inside the list, and deallocate
+ * the pool of memory; finally free the memory allocator descriptor.
+ */
+static void
+netmap_memory_fini(void)
+{
+ struct netmap_mem_obj *mem_obj;
+
+ while (!TAILQ_EMPTY(&netmap_mem_d->nm_molist)) {
+ mem_obj = TAILQ_FIRST(&netmap_mem_d->nm_molist);
+ TAILQ_REMOVE(&netmap_mem_d->nm_molist, mem_obj, nmo_next);
+ if (mem_obj->nmo_used == 1) {
+ printf("netmap: leaked %d bytes at %p\n",
+ mem_obj->nmo_size,
+ mem_obj->nmo_data);
+ }
+ free(mem_obj, M_NETMAP);
+ }
+ contigfree(netmap_mem_d->nm_buffer, netmap_mem_d->nm_totalsize, M_NETMAP);
+ // XXX mutex_destroy(nm_mtx);
+ free(netmap_mem_d, M_NETMAP);
+}
+
+
+/*
+ * Module loader.
+ *
+ * Create the /dev/netmap device and initialize all global
+ * variables.
+ *
+ * Return 0 on success, errno on failure.
+ */
+static int
+netmap_init(void)
+{
+ int error;
+
+
+ error = netmap_memory_init();
+ if (error != 0) {
+ printf("netmap: unable to initialize the memory allocator.");
+ return (error);
+ }
+ printf("netmap: loaded module with %d Mbytes\n",
+ netmap_mem_d->nm_totalsize >> 20);
+
+ netmap_dev = make_dev(&netmap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660,
+ "netmap");
+
+ return (0);
+}
+
+
+/*
+ * Module unloader.
+ *
+ * Free all the memory, and destroy the ``/dev/netmap`` device.
+ */
+static void
+netmap_fini(void)
+{
+ destroy_dev(netmap_dev);
+
+ netmap_memory_fini();
+
+ printf("netmap: unloaded module.\n");
+}
+
+
+/*
+ * Kernel entry point.
+ *
+ * Initialize/finalize the module and return.
+ *
+ * Return 0 on success, errno on failure.
+ */
+static int
+netmap_loader(__unused struct module *module, int event, __unused void *arg)
+{
+ int error = 0;
+
+ switch (event) {
+ case MOD_LOAD:
+ error = netmap_init();
+ break;
+
+ case MOD_UNLOAD:
+ netmap_fini();
+ break;
+
+ default:
+ error = EOPNOTSUPP;
+ break;
+ }
+
+ return (error);
+}
+
+
+DEV_MODULE(netmap, netmap_loader, NULL);
diff --git a/sys/dev/netmap/netmap_kern.h b/sys/dev/netmap/netmap_kern.h
new file mode 100644
index 0000000..5434609
--- /dev/null
+++ b/sys/dev/netmap/netmap_kern.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * $FreeBSD$
+ * $Id: netmap_kern.h 9662 2011-11-16 13:18:06Z luigi $
+ *
+ * The header contains the definitions of constants and function
+ * prototypes used only in kernelspace.
+ */
+
+#ifndef _NET_NETMAP_KERN_H_
+#define _NET_NETMAP_KERN_H_
+
+#ifdef MALLOC_DECLARE
+MALLOC_DECLARE(M_NETMAP);
+#endif
+
+#define ND(format, ...)
+#define D(format, ...) \
+ do { \
+ struct timeval __xxts; \
+ microtime(&__xxts); \
+ printf("%03d.%06d %s [%d] " format "\n",\
+ (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \
+ __FUNCTION__, __LINE__, ##__VA_ARGS__); \
+ } while (0)
+
+struct netmap_adapter;
+
+/*
+ * private, kernel view of a ring.
+ *
+ * XXX 20110627-todo
+ * The index in the NIC and netmap ring is offset by nkr_hwofs slots.
+ * This is so that, on a reset, buffers owned by userspace are not
+ * modified by the kernel. In particular:
+ * RX rings: the next empty buffer (hwcur + hwavail + hwofs) coincides
+ * the next empty buffer as known by the hardware (next_to_check or so).
+ * TX rings: hwcur + hwofs coincides with next_to_send
+ */
+struct netmap_kring {
+ struct netmap_ring *ring;
+ u_int nr_hwcur;
+ int nr_hwavail;
+ u_int nr_kflags;
+ u_int nkr_num_slots;
+
+ u_int nkr_hwofs; /* offset between NIC and netmap ring */
+ struct netmap_adapter *na; // debugging
+ struct selinfo si; /* poll/select wait queue */
+};
+
+/*
+ * This struct is part of and extends the 'struct adapter' (or
+ * equivalent) device descriptor. It contains all fields needed to
+ * support netmap operation.
+ */
+struct netmap_adapter {
+ int refcount; /* number of user-space descriptors using this
+ interface, which is equal to the number of
+ struct netmap_if objs in the mapped region. */
+
+ int separate_locks; /* set if the interface suports different
+ locks for rx, tx and core. */
+
+ u_int num_queues; /* number of tx/rx queue pairs: this is
+ a duplicate field needed to simplify the
+ signature of ``netmap_detach``. */
+
+ u_int num_tx_desc; /* number of descriptor in each queue */
+ u_int num_rx_desc;
+ u_int buff_size;
+
+ u_int flags; /* NR_REINIT */
+ /* tx_rings and rx_rings are private but allocated
+ * as a contiguous chunk of memory. Each array has
+ * N+1 entries, for the adapter queues and for the host queue.
+ */
+ struct netmap_kring *tx_rings; /* array of TX rings. */
+ struct netmap_kring *rx_rings; /* array of RX rings. */
+
+ /* copy of if_qflush and if_transmit pointers, to intercept
+ * packets from the network stack when netmap is active.
+ * XXX probably if_qflush is not necessary.
+ */
+ void (*if_qflush)(struct ifnet *);
+ int (*if_transmit)(struct ifnet *, struct mbuf *);
+
+ /* references to the ifnet and device routines, used by
+ * the generic netmap functions.
+ */
+ struct ifnet *ifp; /* adapter is ifp->if_softc */
+
+ int (*nm_register)(struct ifnet *, int onoff);
+ void (*nm_lock)(void *, int what, u_int ringid);
+ int (*nm_txsync)(void *, u_int ring, int lock);
+ int (*nm_rxsync)(void *, u_int ring, int lock);
+};
+
+/*
+ * The combination of "enable" (ifp->if_capabilities &IFCAP_NETMAP)
+ * and refcount gives the status of the interface, namely:
+ *
+ * enable refcount Status
+ *
+ * FALSE 0 normal operation
+ * FALSE != 0 -- (impossible)
+ * TRUE 1 netmap mode
+ * TRUE 0 being deleted.
+ */
+
+#define NETMAP_DELETING(_na) ( ((_na)->refcount == 0) && \
+ ( (_na)->ifp->if_capenable & IFCAP_NETMAP) )
+
+/*
+ * parameters for (*nm_lock)(adapter, what, index)
+ */
+enum {
+ NETMAP_NO_LOCK = 0,
+ NETMAP_CORE_LOCK, NETMAP_CORE_UNLOCK,
+ NETMAP_TX_LOCK, NETMAP_TX_UNLOCK,
+ NETMAP_RX_LOCK, NETMAP_RX_UNLOCK,
+};
+
+/*
+ * The following are support routines used by individual drivers to
+ * support netmap operation.
+ *
+ * netmap_attach() initializes a struct netmap_adapter, allocating the
+ * struct netmap_ring's and the struct selinfo.
+ *
+ * netmap_detach() frees the memory allocated by netmap_attach().
+ *
+ * netmap_start() replaces the if_transmit routine of the interface,
+ * and is used to intercept packets coming from the stack.
+ *
+ * netmap_load_map/netmap_reload_map are helper routines to set/reset
+ * the dmamap for a packet buffer
+ *
+ * netmap_reset() is a helper routine to be called in the driver
+ * when reinitializing a ring.
+ */
+int netmap_attach(struct netmap_adapter *, int);
+void netmap_detach(struct ifnet *);
+int netmap_start(struct ifnet *, struct mbuf *);
+enum txrx { NR_RX = 0, NR_TX = 1 };
+struct netmap_slot *netmap_reset(struct netmap_adapter *na,
+ enum txrx tx, int n, u_int new_cur);
+void netmap_load_map(bus_dma_tag_t tag, bus_dmamap_t map,
+ void *buf, bus_size_t buflen);
+void netmap_reload_map(bus_dma_tag_t tag, bus_dmamap_t map,
+ void *buf, bus_size_t buflen);
+int netmap_ring_reinit(struct netmap_kring *);
+
+/*
+ * XXX eventually, get rid of netmap_total_buffers and netmap_buffer_base
+ * in favour of the structure
+ */
+// struct netmap_buf_pool;
+// extern struct netmap_buf_pool nm_buf_pool;
+extern u_int netmap_total_buffers;
+extern char *netmap_buffer_base;
+extern int netmap_verbose; // XXX debugging
+enum { /* verbose flags */
+ NM_VERB_ON = 1, /* generic verbose */
+ NM_VERB_HOST = 0x2, /* verbose host stack */
+ NM_VERB_RXSYNC = 0x10, /* verbose on rxsync/txsync */
+ NM_VERB_TXSYNC = 0x20,
+ NM_VERB_RXINTR = 0x100, /* verbose on rx/tx intr (driver) */
+ NM_VERB_TXINTR = 0x200,
+ NM_VERB_NIC_RXSYNC = 0x1000, /* verbose on rx/tx intr (driver) */
+ NM_VERB_NIC_TXSYNC = 0x2000,
+};
+
+/*
+ * return a pointer to the struct netmap adapter from the ifp
+ */
+#define NA(_ifp) ((struct netmap_adapter *)(_ifp)->if_pspare[0])
+
+
+/*
+ * return the address of a buffer.
+ * XXX this is a special version with hardwired 2k bufs
+ * On error return netmap_buffer_base which is detected as a bad pointer.
+ */
+static inline char *
+NMB(struct netmap_slot *slot)
+{
+ uint32_t i = slot->buf_idx;
+ return (i >= netmap_total_buffers) ? netmap_buffer_base :
+#if NETMAP_BUF_SIZE == 2048
+ netmap_buffer_base + (i << 11);
+#else
+ netmap_buffer_base + (i *NETMAP_BUF_SIZE);
+#endif
+}
+
+#endif /* _NET_NETMAP_KERN_H_ */
diff --git a/sys/dev/nge/if_nge.c b/sys/dev/nge/if_nge.c
index db20ad2..3d7ad63 100644
--- a/sys/dev/nge/if_nge.c
+++ b/sys/dev/nge/if_nge.c
@@ -117,6 +117,7 @@ __FBSDID("$FreeBSD$");
#include <net/if_vlan_var.h>
#include <dev/mii/mii.h>
+#include <dev/mii/mii_bitbang.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
@@ -138,7 +139,7 @@ MODULE_DEPEND(nge, miibus, 1, 1, 1);
/*
* Various supported device vendors/types and their names.
*/
-static struct nge_type nge_devs[] = {
+static const struct nge_type const nge_devs[] = {
{ NGE_VENDORID, NGE_DEVICEID,
"National Semiconductor Gigabit Ethernet" },
{ 0, 0, NULL }
@@ -180,11 +181,6 @@ static void nge_eeprom_putbyte(struct nge_softc *, int);
static void nge_eeprom_getword(struct nge_softc *, int, uint16_t *);
static void nge_read_eeprom(struct nge_softc *, caddr_t, int, int);
-static void nge_mii_sync(struct nge_softc *);
-static void nge_mii_send(struct nge_softc *, uint32_t, int);
-static int nge_mii_readreg(struct nge_softc *, struct nge_mii_frame *);
-static int nge_mii_writereg(struct nge_softc *, struct nge_mii_frame *);
-
static int nge_miibus_readreg(device_t, int, int);
static int nge_miibus_writereg(device_t, int, int, int);
static void nge_miibus_statchg(device_t);
@@ -200,6 +196,24 @@ static void nge_sysctl_node(struct nge_softc *);
static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
static int sysctl_hw_nge_int_holdoff(SYSCTL_HANDLER_ARGS);
+/*
+ * MII bit-bang glue
+ */
+static uint32_t nge_mii_bitbang_read(device_t);
+static void nge_mii_bitbang_write(device_t, uint32_t);
+
+static const struct mii_bitbang_ops nge_mii_bitbang_ops = {
+ nge_mii_bitbang_read,
+ nge_mii_bitbang_write,
+ {
+ NGE_MEAR_MII_DATA, /* MII_BIT_MDO */
+ NGE_MEAR_MII_DATA, /* MII_BIT_MDI */
+ NGE_MEAR_MII_CLK, /* MII_BIT_MDC */
+ NGE_MEAR_MII_DIR, /* MII_BIT_DIR_HOST_PHY */
+ 0, /* MII_BIT_DIR_PHY_HOST */
+ }
+};
+
static device_method_t nge_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, nge_probe),
@@ -366,180 +380,42 @@ nge_read_eeprom(struct nge_softc *sc, caddr_t dest, int off, int cnt)
}
/*
- * Sync the PHYs by setting data bit and strobing the clock 32 times.
- */
-static void
-nge_mii_sync(struct nge_softc *sc)
-{
- int i;
-
- SIO_SET(NGE_MEAR_MII_DIR|NGE_MEAR_MII_DATA);
-
- for (i = 0; i < 32; i++) {
- SIO_SET(NGE_MEAR_MII_CLK);
- DELAY(1);
- SIO_CLR(NGE_MEAR_MII_CLK);
- DELAY(1);
- }
-}
-
-/*
- * Clock a series of bits through the MII.
- */
-static void
-nge_mii_send(struct nge_softc *sc, uint32_t bits, int cnt)
-{
- int i;
-
- SIO_CLR(NGE_MEAR_MII_CLK);
-
- for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
- if (bits & i) {
- SIO_SET(NGE_MEAR_MII_DATA);
- } else {
- SIO_CLR(NGE_MEAR_MII_DATA);
- }
- DELAY(1);
- SIO_CLR(NGE_MEAR_MII_CLK);
- DELAY(1);
- SIO_SET(NGE_MEAR_MII_CLK);
- }
-}
-
-/*
- * Read an PHY register through the MII.
+ * Read the MII serial port for the MII bit-bang module.
*/
-static int
-nge_mii_readreg(struct nge_softc *sc, struct nge_mii_frame *frame)
+static uint32_t
+nge_mii_bitbang_read(device_t dev)
{
- int i, ack;
-
- /*
- * Set up frame for RX.
- */
- frame->mii_stdelim = NGE_MII_STARTDELIM;
- frame->mii_opcode = NGE_MII_READOP;
- frame->mii_turnaround = 0;
- frame->mii_data = 0;
-
- CSR_WRITE_4(sc, NGE_MEAR, 0);
-
- /*
- * Turn on data xmit.
- */
- SIO_SET(NGE_MEAR_MII_DIR);
-
- nge_mii_sync(sc);
-
- /*
- * Send command/address info.
- */
- nge_mii_send(sc, frame->mii_stdelim, 2);
- nge_mii_send(sc, frame->mii_opcode, 2);
- nge_mii_send(sc, frame->mii_phyaddr, 5);
- nge_mii_send(sc, frame->mii_regaddr, 5);
-
- /* Idle bit */
- SIO_CLR((NGE_MEAR_MII_CLK|NGE_MEAR_MII_DATA));
- DELAY(1);
- SIO_SET(NGE_MEAR_MII_CLK);
- DELAY(1);
-
- /* Turn off xmit. */
- SIO_CLR(NGE_MEAR_MII_DIR);
- /* Check for ack */
- SIO_CLR(NGE_MEAR_MII_CLK);
- DELAY(1);
- ack = CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA;
- SIO_SET(NGE_MEAR_MII_CLK);
- DELAY(1);
-
- /*
- * Now try reading data bits. If the ack failed, we still
- * need to clock through 16 cycles to keep the PHY(s) in sync.
- */
- if (ack) {
- for (i = 0; i < 16; i++) {
- SIO_CLR(NGE_MEAR_MII_CLK);
- DELAY(1);
- SIO_SET(NGE_MEAR_MII_CLK);
- DELAY(1);
- }
- goto fail;
- }
-
- for (i = 0x8000; i; i >>= 1) {
- SIO_CLR(NGE_MEAR_MII_CLK);
- DELAY(1);
- if (!ack) {
- if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA)
- frame->mii_data |= i;
- DELAY(1);
- }
- SIO_SET(NGE_MEAR_MII_CLK);
- DELAY(1);
- }
+ struct nge_softc *sc;
+ uint32_t val;
-fail:
+ sc = device_get_softc(dev);
- SIO_CLR(NGE_MEAR_MII_CLK);
- DELAY(1);
- SIO_SET(NGE_MEAR_MII_CLK);
- DELAY(1);
+ val = CSR_READ_4(sc, NGE_MEAR);
+ CSR_BARRIER_4(sc, NGE_MEAR,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
- if (ack)
- return (1);
- return (0);
+ return (val);
}
/*
- * Write to a PHY register through the MII.
+ * Write the MII serial port for the MII bit-bang module.
*/
-static int
-nge_mii_writereg(struct nge_softc *sc, struct nge_mii_frame *frame)
+static void
+nge_mii_bitbang_write(device_t dev, uint32_t val)
{
+ struct nge_softc *sc;
- /*
- * Set up frame for TX.
- */
-
- frame->mii_stdelim = NGE_MII_STARTDELIM;
- frame->mii_opcode = NGE_MII_WRITEOP;
- frame->mii_turnaround = NGE_MII_TURNAROUND;
-
- /*
- * Turn on data output.
- */
- SIO_SET(NGE_MEAR_MII_DIR);
-
- nge_mii_sync(sc);
-
- nge_mii_send(sc, frame->mii_stdelim, 2);
- nge_mii_send(sc, frame->mii_opcode, 2);
- nge_mii_send(sc, frame->mii_phyaddr, 5);
- nge_mii_send(sc, frame->mii_regaddr, 5);
- nge_mii_send(sc, frame->mii_turnaround, 2);
- nge_mii_send(sc, frame->mii_data, 16);
-
- /* Idle bit. */
- SIO_SET(NGE_MEAR_MII_CLK);
- DELAY(1);
- SIO_CLR(NGE_MEAR_MII_CLK);
- DELAY(1);
-
- /*
- * Turn off xmit.
- */
- SIO_CLR(NGE_MEAR_MII_DIR);
+ sc = device_get_softc(dev);
- return (0);
+ CSR_WRITE_4(sc, NGE_MEAR, val);
+ CSR_BARRIER_4(sc, NGE_MEAR,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
static int
nge_miibus_readreg(device_t dev, int phy, int reg)
{
struct nge_softc *sc;
- struct nge_mii_frame frame;
int rv;
sc = device_get_softc(dev);
@@ -583,20 +459,13 @@ nge_miibus_readreg(device_t dev, int phy, int reg)
return (CSR_READ_4(sc, reg));
}
- bzero((char *)&frame, sizeof(frame));
-
- frame.mii_phyaddr = phy;
- frame.mii_regaddr = reg;
- nge_mii_readreg(sc, &frame);
-
- return (frame.mii_data);
+ return (mii_bitbang_readreg(dev, &nge_mii_bitbang_ops, phy, reg));
}
static int
nge_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct nge_softc *sc;
- struct nge_mii_frame frame;
sc = device_get_softc(dev);
if ((sc->nge_flags & NGE_FLAG_TBI) != 0) {
@@ -633,12 +502,7 @@ nge_miibus_writereg(device_t dev, int phy, int reg, int data)
return (0);
}
- bzero((char *)&frame, sizeof(frame));
-
- frame.mii_phyaddr = phy;
- frame.mii_regaddr = reg;
- frame.mii_data = data;
- nge_mii_writereg(sc, &frame);
+ mii_bitbang_writereg(dev, &nge_mii_bitbang_ops, phy, reg, data);
return (0);
}
@@ -813,7 +677,7 @@ nge_rxfilter(struct nge_softc *sc)
rxfilt = CSR_READ_4(sc, NGE_RXFILT_CTL);
rxfilt &= ~NGE_RXFILTCTL_ENABLE;
CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt);
- CSR_BARRIER_WRITE_4(sc, NGE_RXFILT_CTL);
+ CSR_BARRIER_4(sc, NGE_RXFILT_CTL, BUS_SPACE_BARRIER_WRITE);
rxfilt &= ~(NGE_RXFILTCTL_ALLMULTI | NGE_RXFILTCTL_ALLPHYS);
rxfilt &= ~NGE_RXFILTCTL_BROAD;
@@ -882,7 +746,7 @@ done:
/* Turn the receive filter on. */
rxfilt |= NGE_RXFILTCTL_ENABLE;
CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt);
- CSR_BARRIER_WRITE_4(sc, NGE_RXFILT_CTL);
+ CSR_BARRIER_4(sc, NGE_RXFILT_CTL, BUS_SPACE_BARRIER_WRITE);
}
static void
@@ -932,7 +796,7 @@ nge_reset(struct nge_softc *sc)
static int
nge_probe(device_t dev)
{
- struct nge_type *t;
+ const struct nge_type *t;
t = nge_devs;
@@ -2216,7 +2080,7 @@ nge_init_locked(struct nge_softc *sc)
/* Disable Rx filter prior to programming Rx filter. */
CSR_WRITE_4(sc, NGE_RXFILT_CTL, 0);
- CSR_BARRIER_WRITE_4(sc, NGE_RXFILT_CTL);
+ CSR_BARRIER_4(sc, NGE_RXFILT_CTL, BUS_SPACE_BARRIER_WRITE);
mii = device_get_softc(sc->nge_miibus);
@@ -2704,12 +2568,12 @@ nge_wol(struct nge_softc *sc)
* (i.e. Silent Rx mode.)
*/
CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 0);
- CSR_BARRIER_WRITE_4(sc, NGE_RX_LISTPTR_HI);
+ CSR_BARRIER_4(sc, NGE_RX_LISTPTR_HI, BUS_SPACE_BARRIER_WRITE);
CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 0);
- CSR_BARRIER_WRITE_4(sc, NGE_RX_LISTPTR_LO);
+ CSR_BARRIER_4(sc, NGE_RX_LISTPTR_LO, BUS_SPACE_BARRIER_WRITE);
/* Enable Rx again. */
NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
- CSR_BARRIER_WRITE_4(sc, NGE_CSR);
+ CSR_BARRIER_4(sc, NGE_CSR, BUS_SPACE_BARRIER_WRITE);
/* Configure WOL events. */
reg = 0;
diff --git a/sys/dev/nge/if_ngereg.h b/sys/dev/nge/if_ngereg.h
index 4ad1bf9..9df0d92 100644
--- a/sys/dev/nge/if_ngereg.h
+++ b/sys/dev/nge/if_ngereg.h
@@ -611,26 +611,9 @@ struct nge_ring_data {
struct nge_type {
uint16_t nge_vid;
uint16_t nge_did;
- char *nge_name;
+ const char *nge_name;
};
-struct nge_mii_frame {
- uint8_t mii_stdelim;
- uint8_t mii_opcode;
- uint8_t mii_phyaddr;
- uint8_t mii_regaddr;
- uint8_t mii_turnaround;
- uint16_t mii_data;
-};
-
-/*
- * MII constants
- */
-#define NGE_MII_STARTDELIM 0x01
-#define NGE_MII_READOP 0x02
-#define NGE_MII_WRITEOP 0x01
-#define NGE_MII_TURNAROUND 0x02
-
#define NGE_JUMBO_FRAMELEN 9022
#define NGE_JUMBO_MTU \
(NGE_JUMBO_FRAMELEN - sizeof(struct ether_vlan_header) - ETHER_CRC_LEN)
@@ -691,8 +674,9 @@ struct nge_softc {
*/
#define CSR_WRITE_4(sc, reg, val) \
bus_write_4((sc)->nge_res, reg, val)
-#define CSR_BARRIER_WRITE_4(sc, reg) \
- bus_barrier((sc)->nge_res, reg, 4, BUS_SPACE_BARRIER_WRITE)
+
+#define CSR_BARRIER_4(sc, reg, flags) \
+ bus_barrier((sc)->nge_res, reg, 4, flags)
#define CSR_READ_4(sc, reg) \
bus_read_4((sc)->nge_res, reg)
diff --git a/sys/dev/nmdm/nmdm.c b/sys/dev/nmdm/nmdm.c
index 2476eb2..ceee762 100644
--- a/sys/dev/nmdm/nmdm.c
+++ b/sys/dev/nmdm/nmdm.c
@@ -52,7 +52,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/taskqueue.h>
-MALLOC_DEFINE(M_NMDM, "nullmodem", "nullmodem data structures");
+static MALLOC_DEFINE(M_NMDM, "nullmodem", "nullmodem data structures");
static tsw_inwakeup_t nmdm_outwakeup;
static tsw_outwakeup_t nmdm_inwakeup;
diff --git a/sys/dev/ofw/openfirm.c b/sys/dev/ofw/openfirm.c
index a8cb8f7..9ff72df 100644
--- a/sys/dev/ofw/openfirm.c
+++ b/sys/dev/ofw/openfirm.c
@@ -127,7 +127,7 @@ OF_init(void *cookie)
* then statically initialize the OFW object.
*/
kobj_class_compile_static(ofw_def_impl, &ofw_kernel_kops);
- kobj_init((kobj_t)ofw_obj, ofw_def_impl);
+ kobj_init_static((kobj_t)ofw_obj, ofw_def_impl);
rv = OFW_INIT(ofw_obj, cookie);
diff --git a/sys/dev/pccard/pccard.c b/sys/dev/pccard/pccard.c
index 1de571c..43bc4b3 100644
--- a/sys/dev/pccard/pccard.c
+++ b/sys/dev/pccard/pccard.c
@@ -59,7 +59,7 @@ __FBSDID("$FreeBSD$");
#define PCCARDDEBUG
/* sysctl vars */
-SYSCTL_NODE(_hw, OID_AUTO, pccard, CTLFLAG_RD, 0, "PCCARD parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, pccard, CTLFLAG_RD, 0, "PCCARD parameters");
int pccard_debug = 0;
TUNABLE_INT("hw.pccard.debug", &pccard_debug);
diff --git a/sys/dev/pccbb/pccbb.c b/sys/dev/pccbb/pccbb.c
index 3c60f37..b116b54 100644
--- a/sys/dev/pccbb/pccbb.c
+++ b/sys/dev/pccbb/pccbb.c
@@ -129,7 +129,7 @@ __FBSDID("$FreeBSD$");
devclass_t cbb_devclass;
/* sysctl vars */
-SYSCTL_NODE(_hw, OID_AUTO, cbb, CTLFLAG_RD, 0, "CBB parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, cbb, CTLFLAG_RD, 0, "CBB parameters");
/* There's no way to say TUNEABLE_LONG to get the right types */
u_long cbb_start_mem = CBB_START_MEM;
diff --git a/sys/dev/pccbb/pccbb_isa.c b/sys/dev/pccbb/pccbb_isa.c
index 677967d..fb45e64 100644
--- a/sys/dev/pccbb/pccbb_isa.c
+++ b/sys/dev/pccbb/pccbb_isa.c
@@ -68,7 +68,7 @@ __FBSDID("$FreeBSD$");
*****************************************************************************/
/* sysctl vars */
-SYSCTL_NODE(_hw, OID_AUTO, pcic, CTLFLAG_RD, 0, "PCIC parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, pcic, CTLFLAG_RD, 0, "PCIC parameters");
static int isa_intr_mask = EXCA_INT_MASK_ALLOWED;
TUNABLE_INT("hw.cbb.intr_mask", &isa_intr_mask);
diff --git a/sys/dev/ppbus/ppb_base.c b/sys/dev/ppbus/ppb_base.c
index 30c42a4..62769b0 100644
--- a/sys/dev/ppbus/ppb_base.c
+++ b/sys/dev/ppbus/ppb_base.c
@@ -236,11 +236,8 @@ ppb_unlock(device_t bus)
void
_ppb_assert_locked(device_t bus, const char *file, int line)
{
-#ifdef INVARIANTS
- struct ppb_data *ppb = DEVTOSOFTC(bus);
- _mtx_assert(ppb->ppc_lock, MA_OWNED, file, line);
-#endif
+ mtx_assert_(DEVTOSOFTC(bus)->ppc_lock, MA_OWNED, file, line);
}
void
diff --git a/sys/dev/ppbus/ppbconf.c b/sys/dev/ppbus/ppbconf.c
index 9e21c67..858e5b2 100644
--- a/sys/dev/ppbus/ppbconf.c
+++ b/sys/dev/ppbus/ppbconf.c
@@ -422,20 +422,14 @@ ppbus_attach(device_t dev)
static int
ppbus_detach(device_t dev)
{
- device_t *children;
- int error, nchildren, i;
+ int error;
error = bus_generic_detach(dev);
if (error)
return (error);
/* detach & delete all children */
- if (!device_get_children(dev, &children, &nchildren)) {
- for (i = 0; i < nchildren; i++)
- if (children[i])
- device_delete_child(dev, children[i]);
- free(children, M_TEMP);
- }
+ device_delete_all_children(dev);
return (0);
}
diff --git a/sys/dev/ppc/ppc.c b/sys/dev/ppc/ppc.c
index 1322a33..ef505d8 100644
--- a/sys/dev/ppc/ppc.c
+++ b/sys/dev/ppc/ppc.c
@@ -1851,20 +1851,13 @@ int
ppc_detach(device_t dev)
{
struct ppc_data *ppc = DEVTOSOFTC(dev);
- device_t *children;
- int nchildren, i;
if (ppc->res_irq == 0) {
return (ENXIO);
}
/* detach & delete all children */
- if (!device_get_children(dev, &children, &nchildren)) {
- for (i = 0; i < nchildren; i++)
- if (children[i])
- device_delete_child(dev, children[i]);
- free(children, M_TEMP);
- }
+ device_delete_all_children(dev);
if (ppc->res_irq != 0) {
bus_teardown_intr(dev, ppc->res_irq, ppc->intr_cookie);
diff --git a/sys/dev/puc/puc.c b/sys/dev/puc/puc.c
index 9bb3ceb..7b93306 100644
--- a/sys/dev/puc/puc.c
+++ b/sys/dev/puc/puc.c
@@ -68,7 +68,7 @@ struct puc_port {
devclass_t puc_devclass;
const char puc_driver_name[] = "puc";
-MALLOC_DEFINE(M_PUC, "PUC", "PUC driver");
+static MALLOC_DEFINE(M_PUC, "PUC", "PUC driver");
struct puc_bar *
puc_get_bar(struct puc_softc *sc, int rid)
diff --git a/sys/dev/puc/pucdata.c b/sys/dev/puc/pucdata.c
index 80dff6e..94d1c2c 100644
--- a/sys/dev/puc/pucdata.c
+++ b/sys/dev/puc/pucdata.c
@@ -51,6 +51,7 @@ static puc_config_f puc_config_amc;
static puc_config_f puc_config_diva;
static puc_config_f puc_config_exar;
static puc_config_f puc_config_icbook;
+static puc_config_f puc_config_moxa;
static puc_config_f puc_config_oxford_pcie;
static puc_config_f puc_config_quatech;
static puc_config_f puc_config_syba;
@@ -518,12 +519,25 @@ const struct puc_cfg puc_pci_devices[] = {
PUC_PORT_4S, 0x18, 0, 8,
},
+ { 0x1393, 0x1042, 0xffff, 0,
+ "Moxa Technologies, Smartio CP-104JU/PCI",
+ DEFAULT_RCLK * 8,
+ PUC_PORT_4S, 0x18, 0, 8,
+ },
+
{ 0x1393, 0x1043, 0xffff, 0,
"Moxa Technologies, Smartio CP-104EL/PCIe",
DEFAULT_RCLK * 8,
PUC_PORT_4S, 0x18, 0, 8,
},
+ { 0x1393, 0x1045, 0xffff, 0,
+ "Moxa Technologies, Smartio CP-104EL-A/PCIe",
+ DEFAULT_RCLK * 8,
+ PUC_PORT_4S, 0x14, 0, -1,
+ .config_function = puc_config_moxa
+ },
+
{ 0x1393, 0x1120, 0xffff, 0,
"Moxa Technologies, CP-112UL",
DEFAULT_RCLK * 8,
@@ -850,6 +864,18 @@ const struct puc_cfg puc_pci_devices[] = {
PUC_PORT_2S, 0x10, 4, 0,
},
+ { 0x14d2, 0xa007, 0xffff, 0,
+ "Titan VScom PCIex-800H",
+ DEFAULT_RCLK * 8,
+ PUC_PORT_4S, 0x10, 0, 8,
+ },
+
+ { 0x14d2, 0xa008, 0xffff, 0,
+ "Titan VScom PCIex-800H",
+ DEFAULT_RCLK * 8,
+ PUC_PORT_4S, 0x10, 0, 8,
+ },
+
{ 0x14db, 0x2130, 0xffff, 0,
"Avlab Technology, PCI IO 2S",
DEFAULT_RCLK,
@@ -1086,6 +1112,19 @@ puc_config_icbook(struct puc_softc *sc, enum puc_cfg_cmd cmd, int port,
}
static int
+puc_config_moxa(struct puc_softc *sc, enum puc_cfg_cmd cmd, int port,
+ intptr_t *res)
+{
+ const struct puc_cfg *cfg = sc->sc_cfg;
+
+ if (cmd == PUC_CFG_GET_OFS && cfg->device == 0x1045) {
+ *res = ((port == 3) ? 7 : port) * 0x200;
+ return 0;
+ }
+ return (ENXIO);
+}
+
+static int
puc_config_quatech(struct puc_softc *sc, enum puc_cfg_cmd cmd, int port,
intptr_t *res)
{
diff --git a/sys/dev/qlxgb/README.txt b/sys/dev/qlxgb/README.txt
new file mode 100644
index 0000000..d9773cc
--- /dev/null
+++ b/sys/dev/qlxgb/README.txt
@@ -0,0 +1,99 @@
+# $FreeBSD$
+
+ README File
+ QLogic 3200 and 8200 series Single/Dual Port
+10 Gigabit Ethernet & CNA Adapter Driver for FreeBSD 7.x/8.x/9.x
+
+ QLogic Corporation.
+ All rights reserved.
+
+
+Table of Contents
+1. Package Contents
+2. OS Support
+3. Supported Features
+4. Using the Driver
+ 4.1 Installing the driver
+ 4.2 Removing the driver
+5. Driver Parameters
+6. Additional Notes
+7. Contacting Support
+
+1. Package Contents
+ * Documentation
+ - README (this document) version:1.0
+ - Release Notes Version:1.0
+ * Driver (if_qlxgb.ko)
+ - FreeBSD 7.x/8.x/9.x
+ * Firmware: pre-flashed on QLogic adapter;
+
+2. OS Support
+
+The Qlogic 10Gigabit Ethernet/CNA driver is compatible with the
+following OS platforms:
+ * FreeBSD 7.x/8.x/9.x (64-bit) [Intel EM64T, AMD64]
+
+3. Supported Features
+10Gigabit Ethernet NIC/CNA driver supports following features
+
+* Large Segment Offload over TCP IPV4
+* Large Segment Offload over TCP IPV6
+* Receive Side scaling
+* TCP over IPv4 checksum offload
+* UDP over IPv4 checksum offload
+* IPV4 checksum offload
+* TCP over IPv6 checksum offload
+* UDP over IPv6 checksum offload
+* Jumbo frames
+* VLAN Tag
+
+
+4. Using the driver
+
+ 4.1 Installing the driver
+
+ - copy the driver file (if_qlxgb.ko) into some directory (say qla_driver)
+ - cd <to qla_driver>
+ - kldload -v ./if_qlxgb.ko
+
+ 4.2 Removing the driver
+
+ - kldunload if_qlxgb
+
+5. Parameters to set prior to installing the driver
+
+ - Add the following lines to /etc/sysctl.conf and reboot the machine prior
+ to installing the driver
+
+ kern.ipc.nmbjumbo9=262144
+ net.inet.tcp.recvbuf_max=262144
+ net.inet.tcp.recvbuf_inc=16384
+ kern.ipc.nmbclusters=1000000
+ kern.ipc.maxsockbuf=2097152
+ net.inet.tcp.recvspace=131072
+ net.inet.tcp.sendbuf_max=262144
+ net.inet.tcp.sendspace=65536
+
+ - If you do not want to reboot the system please run the following commands
+
+ login or su to root
+
+ sysctl kern.ipc.nmbjumbo9=262144
+ sysctl net.inet.tcp.recvbuf_max=262144
+ sysctl net.inet.tcp.recvbuf_inc=16384
+ sysctl kern.ipc.nmbclusters=1000000
+ sysctl kern.ipc.maxsockbuf=2097152
+ sysctl net.inet.tcp.recvspace=131072
+ sysctl net.inet.tcp.sendbuf_max=262144
+ sysctl net.inet.tcp.sendspace=65536
+
+6. Contacting Support
+Please feel free to contact your QLogic approved reseller or QLogic
+Technical Support at any phase of integration for assistance. QLogic
+Technical Support can be reached by the following methods:
+Web: http://support.qlogic.com
+E-mail: support@qlogic.com
+(c) Copyright 2011. All rights reserved worldwide. QLogic, the QLogic
+logo, and the Powered by QLogic logo are registered trademarks of
+QLogic Corporation. All other brand and product names are trademarks
+or registered trademarks of their respective owners.
diff --git a/sys/dev/qlxgb/qla_dbg.c b/sys/dev/qlxgb/qla_dbg.c
new file mode 100644
index 0000000..5fc6f46
--- /dev/null
+++ b/sys/dev/qlxgb/qla_dbg.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * File : qla_dbg.c
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "qla_os.h"
+#include "qla_reg.h"
+#include "qla_hw.h"
+#include "qla_def.h"
+#include "qla_inline.h"
+#include "qla_ver.h"
+#include "qla_glbl.h"
+#include "qla_dbg.h"
+
+
+uint32_t dbg_level = 0 ;
+/*
+ * Name: qla_dump_buf32
+ * Function: dumps a buffer as 32 bit words
+ */
+void qla_dump_buf32(qla_host_t *ha, char *msg, void *dbuf32, uint32_t len32)
+{
+ device_t dev;
+ uint32_t i = 0;
+ uint32_t *buf;
+
+ dev = ha->pci_dev;
+ buf = dbuf32;
+
+ device_printf(dev, "%s: %s dump start\n", __func__, msg);
+
+ while (len32 >= 4) {
+ device_printf(dev,"0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, buf[0], buf[1], buf[2], buf[3]);
+ i += 4 * 4;
+ len32 -= 4;
+ buf += 4;
+ }
+ switch (len32) {
+ case 1:
+ device_printf(dev,"0x%08x: 0x%08x\n", i, buf[0]);
+ break;
+ case 2:
+ device_printf(dev,"0x%08x: 0x%08x 0x%08x\n", i, buf[0], buf[1]);
+ break;
+ case 3:
+ device_printf(dev,"0x%08x: 0x%08x 0x%08x 0x%08x\n",
+ i, buf[0], buf[1], buf[2]);
+ break;
+ default:
+ break;
+ }
+ device_printf(dev, "%s: %s dump end\n", __func__, msg);
+}
+
+/*
+ * Name: qla_dump_buf16
+ * Function: dumps a buffer as 16 bit words
+ */
+void qla_dump_buf16(qla_host_t *ha, char *msg, void *dbuf16, uint32_t len16)
+{
+ device_t dev;
+ uint32_t i = 0;
+ uint16_t *buf;
+
+ dev = ha->pci_dev;
+ buf = dbuf16;
+
+ device_printf(dev, "%s: %s dump start\n", __func__, msg);
+
+ while (len16 >= 8) {
+ device_printf(dev,"0x%08x: 0x%04x 0x%04x 0x%04x 0x%04x"
+ " 0x%04x 0x%04x 0x%04x 0x%04x\n", i, buf[0],
+ buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
+ i += 16;
+ len16 -= 8;
+ buf += 8;
+ }
+ switch (len16) {
+ case 1:
+ device_printf(dev,"0x%08x: 0x%04x\n", i, buf[0]);
+ break;
+ case 2:
+ device_printf(dev,"0x%08x: 0x%04x 0x%04x\n", i, buf[0], buf[1]);
+ break;
+ case 3:
+ device_printf(dev,"0x%08x: 0x%04x 0x%04x 0x%04x\n",
+ i, buf[0], buf[1], buf[2]);
+ break;
+ case 4:
+ device_printf(dev,"0x%08x: 0x%04x 0x%04x 0x%04x 0x%04x\n", i,
+ buf[0], buf[1], buf[2], buf[3]);
+ break;
+ case 5:
+ device_printf(dev,"0x%08x:"
+ " 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4]);
+ break;
+ case 6:
+ device_printf(dev,"0x%08x:"
+ " 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
+ break;
+ case 7:
+ device_printf(dev,"0x%04x: 0x%04x 0x%04x 0x%04x 0x%04x"
+ " 0x%04x 0x%04x 0x%04x\n", i, buf[0], buf[1],
+ buf[2], buf[3], buf[4], buf[5], buf[6]);
+ break;
+ default:
+ break;
+ }
+ device_printf(dev, "%s: %s dump end\n", __func__, msg);
+}
+
+/*
+ * Name: qla_dump_buf8
+ * Function: dumps a buffer as bytes
+ */
+void qla_dump_buf8(qla_host_t *ha, char *msg, void *dbuf, uint32_t len)
+{
+ device_t dev;
+ uint32_t i = 0;
+ uint8_t *buf;
+
+ dev = ha->pci_dev;
+ buf = dbuf;
+
+ device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
+
+ while (len >= 16) {
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3],
+ buf[4], buf[5], buf[6], buf[7],
+ buf[8], buf[9], buf[10], buf[11],
+ buf[12], buf[13], buf[14], buf[15]);
+ i += 16;
+ len -= 16;
+ buf += 16;
+ }
+ switch (len) {
+ case 1:
+ device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
+ break;
+ case 2:
+ device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
+ break;
+ case 3:
+ device_printf(dev,"0x%08x: %02x %02x %02x\n",
+ i, buf[0], buf[1], buf[2]);
+ break;
+ case 4:
+ device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3]);
+ break;
+ case 5:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4]);
+ break;
+ case 6:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
+ break;
+ case 7:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
+ break;
+ case 8:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7]);
+ break;
+ case 9:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8]);
+ break;
+ case 10:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8], buf[9]);
+ break;
+ case 11:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8], buf[9], buf[10]);
+ break;
+ case 12:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8], buf[9], buf[10], buf[11]);
+ break;
+ case 13:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
+ break;
+ case 14:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
+ buf[13]);
+ break;
+ case 15:
+ device_printf(dev,"0x%08x:"
+ " %02x %02x %02x %02x %02x %02x %02x %02x"
+ " %02x %02x %02x %02x %02x %02x %02x\n", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
+ buf[13], buf[14]);
+ break;
+ default:
+ break;
+ }
+
+ device_printf(dev, "%s: %s dump end\n", __func__, msg);
+}
diff --git a/sys/dev/qlxgb/qla_dbg.h b/sys/dev/qlxgb/qla_dbg.h
new file mode 100644
index 0000000..1f0d184
--- /dev/null
+++ b/sys/dev/qlxgb/qla_dbg.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * File : qla_dbg.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#ifndef _QL_DBG_H_
+#define _QL_DBG_H_
+
+extern uint32_t dbg_level;
+
+extern void qla_dump_buf8(qla_host_t *ha, char *str, void *dbuf,
+ uint32_t len);
+extern void qla_dump_buf16(qla_host_t *ha, char *str, void *dbuf,
+ uint32_t len16);
+extern void qla_dump_buf32(qla_host_t *ha, char *str, void *dbuf,
+ uint32_t len32);
+
+
+#define DBG 1
+
+#if DBG
+
+#define QL_DPRINT1(x) if (dbg_level & 0x0001) device_printf x
+#define QL_DPRINT2(x) if (dbg_level & 0x0002) device_printf x
+#define QL_DPRINT4(x) if (dbg_level & 0x0004) device_printf x
+#define QL_DPRINT8(x) if (dbg_level & 0x0008) device_printf x
+#define QL_DPRINT10(x) if (dbg_level & 0x0010) device_printf x
+#define QL_DPRINT20(x) if (dbg_level & 0x0020) device_printf x
+#define QL_DPRINT40(x) if (dbg_level & 0x0040) device_printf x
+#define QL_DPRINT80(x) if (dbg_level & 0x0080) device_printf x
+
+#define QL_DUMP_BUFFER8(h, s, b, n) if (dbg_level & 0x08000000)\
+ qla_dump_buf8(h, s, b, n)
+#define QL_DUMP_BUFFER16(h, s, b, n) if (dbg_level & 0x08000000)\
+ qla_dump_buf16(h, s, b, n)
+#define QL_DUMP_BUFFER32(h, s, b, n) if (dbg_level & 0x08000000)\
+ qla_dump_buf32(h, s, b, n)
+
+#else
+
+#define QL_DPRINT1(x)
+#define QL_DPRINT2(x)
+#define QL_DPRINT4(x)
+#define QL_DPRINT8(x)
+#define QL_DPRINT10(x)
+#define QL_DPRINT20(x)
+#define QL_DPRINT40(x)
+#define QL_DPRINT80(x)
+
+#define QL_DUMP_BUFFER8(h, s, b, n)
+#define QL_DUMP_BUFFER16(h, s, b, n)
+#define QL_DUMP_BUFFER32(h, s, b, n)
+
+#endif
+
+#endif /* #ifndef _QL_DBG_H_ */
diff --git a/sys/dev/qlxgb/qla_def.h b/sys/dev/qlxgb/qla_def.h
new file mode 100644
index 0000000..d40d5e2
--- /dev/null
+++ b/sys/dev/qlxgb/qla_def.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * File: qla_def.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#ifndef _QLA_DEF_H_
+#define _QLA_DEF_H_
+
+#define BIT_0 (0x1 << 0)
+#define BIT_1 (0x1 << 1)
+#define BIT_2 (0x1 << 2)
+#define BIT_3 (0x1 << 3)
+#define BIT_4 (0x1 << 4)
+#define BIT_5 (0x1 << 5)
+#define BIT_6 (0x1 << 6)
+#define BIT_7 (0x1 << 7)
+#define BIT_8 (0x1 << 8)
+#define BIT_9 (0x1 << 9)
+#define BIT_10 (0x1 << 10)
+#define BIT_11 (0x1 << 11)
+#define BIT_12 (0x1 << 12)
+#define BIT_13 (0x1 << 13)
+#define BIT_14 (0x1 << 14)
+#define BIT_15 (0x1 << 15)
+#define BIT_16 (0x1 << 16)
+#define BIT_17 (0x1 << 17)
+#define BIT_18 (0x1 << 18)
+#define BIT_19 (0x1 << 19)
+#define BIT_20 (0x1 << 20)
+#define BIT_21 (0x1 << 21)
+#define BIT_22 (0x1 << 22)
+#define BIT_23 (0x1 << 23)
+#define BIT_24 (0x1 << 24)
+#define BIT_25 (0x1 << 25)
+#define BIT_26 (0x1 << 26)
+#define BIT_27 (0x1 << 27)
+#define BIT_28 (0x1 << 28)
+#define BIT_29 (0x1 << 29)
+#define BIT_30 (0x1 << 30)
+#define BIT_31 (0x1 << 31)
+
+struct qla_rx_buf {
+ struct mbuf *m_head;
+ bus_dmamap_t map;
+ bus_addr_t paddr;
+ uint32_t handle;
+ void *next;
+};
+typedef struct qla_rx_buf qla_rx_buf_t;
+
+struct qla_tx_buf {
+ struct mbuf *m_head;
+ bus_dmamap_t map;
+};
+typedef struct qla_tx_buf qla_tx_buf_t;
+
+#define QLA_MAX_SEGMENTS 63 /* maximum # of segs in a sg list */
+#define QLA_MAX_FRAME_SIZE MJUM9BYTES
+#define QLA_STD_FRAME_SIZE 1514
+#define QLA_MAX_TSO_FRAME_SIZE ((64 * 1024 - 1) + 22)
+
+/* Number of MSIX/MSI Vectors required */
+#define Q8_MSI_COUNT 4
+
+struct qla_ivec {
+ struct resource *irq;
+ void *handle;
+ int irq_rid;
+ void *ha;
+ struct task rcv_task;
+ struct taskqueue *rcv_tq;
+};
+
+typedef struct qla_ivec qla_ivec_t;
+
+#define QLA_WATCHDOG_CALLOUT_TICKS 1
+
+/*
+ * Adapter structure contains the hardware independant information of the
+ * pci function.
+ */
+struct qla_host {
+ volatile struct {
+ volatile uint32_t
+ qla_watchdog_active :1,
+ qla_watchdog_exit :1,
+ qla_watchdog_pause :1,
+ lro_init :1,
+ stop_rcv :1,
+ link_up :1,
+ parent_tag :1,
+ lock_init :1;
+ } flags;
+
+ device_t pci_dev;
+
+ uint8_t pci_func;
+ uint16_t watchdog_ticks;
+ uint8_t resvd;
+
+ /* ioctl related */
+ struct cdev *ioctl_dev;
+
+ /* register mapping */
+ struct resource *pci_reg;
+ int reg_rid;
+
+ /* interrupts */
+ struct resource *irq;
+ int msix_count;
+ void *intr_handle;
+ qla_ivec_t irq_vec[Q8_MSI_COUNT];
+
+ /* parent dma tag */
+ bus_dma_tag_t parent_tag;
+
+ /* interface to o.s */
+ struct ifnet *ifp;
+
+ struct ifmedia media;
+ uint16_t max_frame_size;
+ uint16_t rsrvd0;
+ int if_flags;
+
+ /* hardware access lock */
+ struct mtx hw_lock;
+ volatile uint32_t hw_lock_held;
+
+ /* transmit and receive buffers */
+ qla_tx_buf_t tx_buf[NUM_TX_DESCRIPTORS];
+ bus_dma_tag_t tx_tag;
+ struct mtx tx_lock;
+ struct task tx_task;
+ struct taskqueue *tx_tq;
+ struct callout tx_callout;
+
+ qla_rx_buf_t rx_buf[NUM_RX_DESCRIPTORS];
+ qla_rx_buf_t rx_jbuf[NUM_RX_JUMBO_DESCRIPTORS];
+ bus_dma_tag_t rx_tag;
+
+ struct mtx rx_lock;
+ struct mtx rxj_lock;
+
+ /* stats */
+ uint32_t err_m_getcl;
+ uint32_t err_m_getjcl;
+ uint32_t err_tx_dmamap_create;
+ uint32_t err_tx_dmamap_load;
+ uint32_t err_tx_defrag;
+
+ uint64_t rx_frames;
+ uint64_t rx_bytes;
+
+ uint64_t tx_frames;
+ uint64_t tx_bytes;
+
+ uint32_t fw_ver_major;
+ uint32_t fw_ver_minor;
+ uint32_t fw_ver_sub;
+ uint32_t fw_ver_build;
+
+ /* hardware specific */
+ qla_hw_t hw;
+
+ /* debug stuff */
+ volatile const char *qla_lock;
+ volatile const char *qla_unlock;
+};
+typedef struct qla_host qla_host_t;
+
+/* note that align has to be a power of 2 */
+#define QL_ALIGN(size, align) (size + (align - 1)) & ~(align - 1);
+#define QL_MIN(x, y) ((x < y) ? x : y)
+
+#define QL_RUNNING(ifp) \
+ ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == \
+ IFF_DRV_RUNNING)
+
+#endif /* #ifndef _QLA_DEF_H_ */
diff --git a/sys/dev/qlxgb/qla_glbl.h b/sys/dev/qlxgb/qla_glbl.h
new file mode 100644
index 0000000..21ee99c
--- /dev/null
+++ b/sys/dev/qlxgb/qla_glbl.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * File: qla_glbl.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ * Content: Contains prototypes of the exported functions from each file.
+ */
+#ifndef _QLA_GLBL_H_
+#define _QLA_GLBL_H_
+
+/*
+ * from qla_isr.c
+ */
+extern void qla_isr(void *arg);
+extern void qla_rcv(void *context, int pending);
+
+/*
+ * from qla_os.c
+ */
+extern uint32_t std_replenish;
+extern uint32_t jumbo_replenish;
+extern uint32_t rcv_pkt_thres;
+extern uint32_t rcv_pkt_thres_d;
+extern uint32_t snd_pkt_thres;
+extern uint32_t free_pkt_thres;
+
+extern int qla_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf);
+extern void qla_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf);
+extern void qla_start(struct ifnet *ifp);
+extern int qla_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp,
+ uint32_t jumbo);
+
+
+/*
+ * from qla_flash.c
+ */
+extern int qla_flash_rd32_words(qla_host_t *ha, uint32_t addr,
+ uint32_t *val, uint32_t num);
+extern int qla_flash_rd32(qla_host_t *ha, uint32_t addr, uint32_t *val);
+
+/*
+ * from qla_hw.c
+ */
+extern int qla_get_msix_count(qla_host_t *ha);
+extern int qla_alloc_dma(qla_host_t *ha);
+extern void qla_free_dma(qla_host_t *ha);
+extern void qla_hw_add_sysctls(qla_host_t *ha);
+extern int qla_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
+ uint32_t *tx_idx, struct mbuf *mp);
+extern int qla_init_hw_if(qla_host_t *ha);
+extern void qla_get_hw_caps(qla_host_t *ha);
+extern void qla_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt,
+ uint32_t add_multi);
+extern void qla_del_hw_if(qla_host_t *ha);
+extern void qla_set_promisc(qla_host_t *ha);
+extern void qla_set_allmulti(qla_host_t *ha);
+extern void qla_reset_promisc_allmulti(qla_host_t *ha);
+extern void qla_config_ipv4_addr(qla_host_t *ha, uint32_t ipv4_addr);
+extern int qla_hw_tx_compl(qla_host_t *ha);
+extern void qla_update_link_state(qla_host_t *ha);
+extern void qla_hw_tx_done(qla_host_t *ha);
+extern int qla_config_lro(qla_host_t *ha);
+extern void qla_free_lro(qla_host_t *ha);
+extern int qla_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id);
+extern void qla_hw_stop_rcv(qla_host_t *ha);
+
+/*
+ * from qla_misc.c
+ */
+extern int qla_init_hw(qla_host_t *ha);
+extern int qla_rdwr_indreg32(qla_host_t *ha, uint32_t addr, uint32_t *val,
+ uint32_t rd);
+extern int qla_rd_flash32(qla_host_t *ha, uint32_t addr, uint32_t *data);
+
+/*
+ * from qla_ioctl.c
+ */
+extern int qla_make_cdev(qla_host_t *ha);
+extern void qla_del_cdev(qla_host_t *ha);
+extern int qla_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
+ struct thread *td);
+
+#endif /* #ifndef_QLA_GLBL_H_ */
diff --git a/sys/dev/qlxgb/qla_hw.c b/sys/dev/qlxgb/qla_hw.c
new file mode 100644
index 0000000..477eb57
--- /dev/null
+++ b/sys/dev/qlxgb/qla_hw.c
@@ -0,0 +1,1776 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * File: qla_hw.c
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ * Content: Contains Hardware dependant functions
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "qla_os.h"
+#include "qla_reg.h"
+#include "qla_hw.h"
+#include "qla_def.h"
+#include "qla_inline.h"
+#include "qla_ver.h"
+#include "qla_glbl.h"
+#include "qla_dbg.h"
+
+static uint32_t sysctl_num_rds_rings = 2;
+static uint32_t sysctl_num_sds_rings = 4;
+
+/*
+ * Static Functions
+ */
+
+static void qla_init_cntxt_regions(qla_host_t *ha);
+static int qla_issue_cmd(qla_host_t *ha, qla_cdrp_t *cdrp);
+static int qla_fw_cmd(qla_host_t *ha, void *fw_cmd, uint32_t size);
+static int qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr,
+ uint16_t cntxt_id, uint32_t add_multi);
+static void qla_del_rcv_cntxt(qla_host_t *ha);
+static int qla_init_rcv_cntxt(qla_host_t *ha);
+static void qla_del_xmt_cntxt(qla_host_t *ha);
+static int qla_init_xmt_cntxt(qla_host_t *ha);
+static int qla_get_max_rds(qla_host_t *ha);
+static int qla_get_max_sds(qla_host_t *ha);
+static int qla_get_max_rules(qla_host_t *ha);
+static int qla_get_max_rcv_cntxts(qla_host_t *ha);
+static int qla_get_max_tx_cntxts(qla_host_t *ha);
+static int qla_get_max_mtu(qla_host_t *ha);
+static int qla_get_max_lro(qla_host_t *ha);
+static int qla_get_flow_control(qla_host_t *ha);
+static void qla_hw_tx_done_locked(qla_host_t *ha);
+
+int
+qla_get_msix_count(qla_host_t *ha)
+{
+ return (sysctl_num_sds_rings);
+}
+
+/*
+ * Name: qla_hw_add_sysctls
+ * Function: Add P3Plus specific sysctls
+ */
+void
+qla_hw_add_sysctls(qla_host_t *ha)
+{
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "num_rds_rings", CTLFLAG_RD, &sysctl_num_rds_rings,
+ sysctl_num_rds_rings, "Number of Rcv Descriptor Rings");
+
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "num_sds_rings", CTLFLAG_RD, &sysctl_num_sds_rings,
+ sysctl_num_sds_rings, "Number of Status Descriptor Rings");
+}
+
+/*
+ * Name: qla_free_dma
+ * Function: Frees the DMA'able memory allocated in qla_alloc_dma()
+ */
+void
+qla_free_dma(qla_host_t *ha)
+{
+ uint32_t i;
+
+ if (ha->hw.dma_buf.flags.context) {
+ qla_free_dmabuf(ha, &ha->hw.dma_buf.context);
+ ha->hw.dma_buf.flags.context = 0;
+ }
+
+ if (ha->hw.dma_buf.flags.sds_ring) {
+ for (i = 0; i < ha->hw.num_sds_rings; i++)
+ qla_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
+ ha->hw.dma_buf.flags.sds_ring = 0;
+ }
+
+ if (ha->hw.dma_buf.flags.rds_ring) {
+ for (i = 0; i < ha->hw.num_rds_rings; i++)
+ qla_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
+ ha->hw.dma_buf.flags.rds_ring = 0;
+ }
+
+ if (ha->hw.dma_buf.flags.tx_ring) {
+ qla_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
+ ha->hw.dma_buf.flags.tx_ring = 0;
+ }
+}
+
+/*
+ * Name: qla_alloc_dma
+ * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
+ */
+int
+qla_alloc_dma(qla_host_t *ha)
+{
+ device_t dev;
+ uint32_t i, j, size;
+
+ dev = ha->pci_dev;
+
+ QL_DPRINT2((dev, "%s: enter\n", __func__));
+
+ ha->hw.num_rds_rings = (uint16_t)sysctl_num_rds_rings;
+ ha->hw.num_sds_rings = (uint16_t)sysctl_num_sds_rings;
+
+ /*
+ * Allocate Transmit Ring
+ */
+
+ ha->hw.dma_buf.tx_ring.alignment = 8;
+ ha->hw.dma_buf.tx_ring.size =
+ (sizeof(q80_tx_cmd_t)) * NUM_TX_DESCRIPTORS;
+
+ if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.tx_ring)) {
+ device_printf(dev, "%s: tx ring alloc failed\n", __func__);
+ goto qla_alloc_dma_exit;
+ }
+ ha->hw.dma_buf.flags.tx_ring = 1;
+
+ QL_DPRINT2((dev, "%s: tx_ring phys %p virt %p\n",
+ __func__, (void *)(ha->hw.dma_buf.tx_ring.dma_addr),
+ ha->hw.dma_buf.tx_ring.dma_b));
+ /*
+ * Allocate Receive Descriptor Rings
+ */
+
+ for (i = 0; i < ha->hw.num_rds_rings; i++) {
+ ha->hw.dma_buf.rds_ring[i].alignment = 8;
+
+ if (i == RDS_RING_INDEX_NORMAL) {
+ ha->hw.dma_buf.rds_ring[i].size =
+ (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
+ } else if (i == RDS_RING_INDEX_JUMBO) {
+ ha->hw.dma_buf.rds_ring[i].size =
+ (sizeof(q80_recv_desc_t)) *
+ NUM_RX_JUMBO_DESCRIPTORS;
+ } else
+ break;
+
+ if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i])) {
+ QL_DPRINT4((dev, "%s: rds ring alloc failed\n",
+ __func__));
+
+ for (j = 0; j < i; j++)
+ qla_free_dmabuf(ha,
+ &ha->hw.dma_buf.rds_ring[j]);
+
+ goto qla_alloc_dma_exit;
+ }
+ QL_DPRINT4((dev, "%s: rx_ring[%d] phys %p virt %p\n",
+ __func__, i,
+ (void *)(ha->hw.dma_buf.rds_ring[i].dma_addr),
+ ha->hw.dma_buf.rds_ring[i].dma_b));
+ }
+ ha->hw.dma_buf.flags.rds_ring = 1;
+
+ /*
+ * Allocate Status Descriptor Rings
+ */
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+ ha->hw.dma_buf.sds_ring[i].alignment = 8;
+ ha->hw.dma_buf.sds_ring[i].size =
+ (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
+
+ if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i])) {
+ device_printf(dev, "%s: sds ring alloc failed\n",
+ __func__);
+
+ for (j = 0; j < i; j++)
+ qla_free_dmabuf(ha,
+ &ha->hw.dma_buf.sds_ring[j]);
+
+ goto qla_alloc_dma_exit;
+ }
+ QL_DPRINT4((dev, "%s: sds_ring[%d] phys %p virt %p\n",
+ __func__, i,
+ (void *)(ha->hw.dma_buf.sds_ring[i].dma_addr),
+ ha->hw.dma_buf.sds_ring[i].dma_b));
+ }
+ ha->hw.dma_buf.flags.sds_ring = 1;
+
+ /*
+ * Allocate Context Area
+ */
+ size = QL_ALIGN((sizeof (q80_tx_cntxt_req_t)), QL_BUFFER_ALIGN);
+
+ size += QL_ALIGN((sizeof (q80_tx_cntxt_rsp_t)), QL_BUFFER_ALIGN);
+
+ size += QL_ALIGN((sizeof (q80_rcv_cntxt_req_t)), QL_BUFFER_ALIGN);
+
+ size += QL_ALIGN((sizeof (q80_rcv_cntxt_rsp_t)), QL_BUFFER_ALIGN);
+
+ size += sizeof (uint32_t); /* for tx consumer index */
+
+ size = QL_ALIGN(size, PAGE_SIZE);
+
+ ha->hw.dma_buf.context.alignment = 8;
+ ha->hw.dma_buf.context.size = size;
+
+ if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.context)) {
+ device_printf(dev, "%s: context alloc failed\n", __func__);
+ goto qla_alloc_dma_exit;
+ }
+ ha->hw.dma_buf.flags.context = 1;
+ QL_DPRINT2((dev, "%s: context phys %p virt %p\n",
+ __func__, (void *)(ha->hw.dma_buf.context.dma_addr),
+ ha->hw.dma_buf.context.dma_b));
+
+ qla_init_cntxt_regions(ha);
+
+ return 0;
+
+qla_alloc_dma_exit:
+ qla_free_dma(ha);
+ return -1;
+}
+
+/*
+ * Name: qla_init_cntxt_regions
+ * Function: Initializes Tx/Rx Contexts.
+ */
+static void
+qla_init_cntxt_regions(qla_host_t *ha)
+{
+ qla_hw_t *hw;
+ q80_tx_cntxt_req_t *tx_cntxt_req;
+ q80_rcv_cntxt_req_t *rx_cntxt_req;
+ bus_addr_t phys_addr;
+ uint32_t i;
+ device_t dev;
+ uint32_t size;
+
+ dev = ha->pci_dev;
+
+ hw = &ha->hw;
+
+ hw->tx_ring_base = hw->dma_buf.tx_ring.dma_b;
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++)
+ hw->sds[i].sds_ring_base =
+ (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
+
+
+ phys_addr = hw->dma_buf.context.dma_addr;
+
+ memset((void *)hw->dma_buf.context.dma_b, 0,
+ ha->hw.dma_buf.context.size);
+
+ hw->tx_cntxt_req =
+ (q80_tx_cntxt_req_t *)hw->dma_buf.context.dma_b;
+ hw->tx_cntxt_req_paddr = phys_addr;
+
+ size = QL_ALIGN((sizeof (q80_tx_cntxt_req_t)), QL_BUFFER_ALIGN);
+
+ hw->tx_cntxt_rsp =
+ (q80_tx_cntxt_rsp_t *)((uint8_t *)hw->tx_cntxt_req + size);
+ hw->tx_cntxt_rsp_paddr = hw->tx_cntxt_req_paddr + size;
+
+ size = QL_ALIGN((sizeof (q80_tx_cntxt_rsp_t)), QL_BUFFER_ALIGN);
+
+ hw->rx_cntxt_req =
+ (q80_rcv_cntxt_req_t *)((uint8_t *)hw->tx_cntxt_rsp + size);
+ hw->rx_cntxt_req_paddr = hw->tx_cntxt_rsp_paddr + size;
+
+ size = QL_ALIGN((sizeof (q80_rcv_cntxt_req_t)), QL_BUFFER_ALIGN);
+
+ hw->rx_cntxt_rsp =
+ (q80_rcv_cntxt_rsp_t *)((uint8_t *)hw->rx_cntxt_req + size);
+ hw->rx_cntxt_rsp_paddr = hw->rx_cntxt_req_paddr + size;
+
+ size = QL_ALIGN((sizeof (q80_rcv_cntxt_rsp_t)), QL_BUFFER_ALIGN);
+
+ hw->tx_cons = (uint32_t *)((uint8_t *)hw->rx_cntxt_rsp + size);
+ hw->tx_cons_paddr = hw->rx_cntxt_rsp_paddr + size;
+
+ /*
+ * Initialize the Transmit Context Request so that we don't need to
+ * do it everytime we need to create a context
+ */
+ tx_cntxt_req = hw->tx_cntxt_req;
+
+ tx_cntxt_req->rsp_dma_addr = qla_host_to_le64(hw->tx_cntxt_rsp_paddr);
+
+ tx_cntxt_req->cmd_cons_dma_addr = qla_host_to_le64(hw->tx_cons_paddr);
+
+ tx_cntxt_req->caps[0] = qla_host_to_le32((CNTXT_CAP0_BASEFW |
+ CNTXT_CAP0_LEGACY_MN | CNTXT_CAP0_LSO));
+
+ tx_cntxt_req->intr_mode = qla_host_to_le32(CNTXT_INTR_MODE_SHARED);
+
+ tx_cntxt_req->phys_addr =
+ qla_host_to_le64(hw->dma_buf.tx_ring.dma_addr);
+
+ tx_cntxt_req->num_entries = qla_host_to_le32(NUM_TX_DESCRIPTORS);
+
+ /*
+ * Initialize the Receive Context Request
+ */
+
+ rx_cntxt_req = hw->rx_cntxt_req;
+
+ rx_cntxt_req->rx_req.rsp_dma_addr =
+ qla_host_to_le64(hw->rx_cntxt_rsp_paddr);
+
+ rx_cntxt_req->rx_req.caps[0] = qla_host_to_le32(CNTXT_CAP0_BASEFW |
+ CNTXT_CAP0_LEGACY_MN |
+ CNTXT_CAP0_JUMBO |
+ CNTXT_CAP0_LRO|
+ CNTXT_CAP0_HW_LRO);
+
+ rx_cntxt_req->rx_req.intr_mode =
+ qla_host_to_le32(CNTXT_INTR_MODE_SHARED);
+
+ rx_cntxt_req->rx_req.rds_intr_mode =
+ qla_host_to_le32(CNTXT_INTR_MODE_UNIQUE);
+
+ rx_cntxt_req->rx_req.rds_ring_offset = 0;
+ rx_cntxt_req->rx_req.sds_ring_offset = qla_host_to_le32(
+ (hw->num_rds_rings * sizeof(q80_rq_rds_ring_t)));
+ rx_cntxt_req->rx_req.num_rds_rings =
+ qla_host_to_le16(hw->num_rds_rings);
+ rx_cntxt_req->rx_req.num_sds_rings =
+ qla_host_to_le16(hw->num_sds_rings);
+
+ for (i = 0; i < hw->num_rds_rings; i++) {
+ rx_cntxt_req->rds_req[i].phys_addr =
+ qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
+
+ if (i == RDS_RING_INDEX_NORMAL) {
+ rx_cntxt_req->rds_req[i].buf_size =
+ qla_host_to_le64(MCLBYTES);
+ rx_cntxt_req->rds_req[i].size =
+ qla_host_to_le32(NUM_RX_DESCRIPTORS);
+ } else {
+ rx_cntxt_req->rds_req[i].buf_size =
+ qla_host_to_le64(MJUM9BYTES);
+ rx_cntxt_req->rds_req[i].size =
+ qla_host_to_le32(NUM_RX_JUMBO_DESCRIPTORS);
+ }
+ }
+
+ for (i = 0; i < hw->num_sds_rings; i++) {
+ rx_cntxt_req->sds_req[i].phys_addr =
+ qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
+ rx_cntxt_req->sds_req[i].size =
+ qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
+ rx_cntxt_req->sds_req[i].msi_index = qla_host_to_le16(i);
+ }
+
+ QL_DPRINT2((ha->pci_dev, "%s: tx_cntxt_req = %p paddr %p\n",
+ __func__, hw->tx_cntxt_req, (void *)hw->tx_cntxt_req_paddr));
+ QL_DPRINT2((ha->pci_dev, "%s: tx_cntxt_rsp = %p paddr %p\n",
+ __func__, hw->tx_cntxt_rsp, (void *)hw->tx_cntxt_rsp_paddr));
+ QL_DPRINT2((ha->pci_dev, "%s: rx_cntxt_req = %p paddr %p\n",
+ __func__, hw->rx_cntxt_req, (void *)hw->rx_cntxt_req_paddr));
+ QL_DPRINT2((ha->pci_dev, "%s: rx_cntxt_rsp = %p paddr %p\n",
+ __func__, hw->rx_cntxt_rsp, (void *)hw->rx_cntxt_rsp_paddr));
+ QL_DPRINT2((ha->pci_dev, "%s: tx_cons = %p paddr %p\n",
+ __func__, hw->tx_cons, (void *)hw->tx_cons_paddr));
+}
+
+/*
+ * Name: qla_issue_cmd
+ * Function: Issues commands on the CDRP interface and returns responses.
+ */
+static int
+qla_issue_cmd(qla_host_t *ha, qla_cdrp_t *cdrp)
+{
+ int ret = 0;
+ uint32_t signature;
+ uint32_t count = 400; /* 4 seconds or 400 10ms intervals */
+ uint32_t data;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ signature = 0xcafe0000 | 0x0100 | ha->pci_func;
+
+ ret = qla_sem_lock(ha, Q8_SEM5_LOCK, 0, (uint32_t)ha->pci_func);
+
+ if (ret) {
+ device_printf(dev, "%s: SEM5_LOCK lock failed\n", __func__);
+ return (ret);
+ }
+
+ WRITE_OFFSET32(ha, Q8_NX_CDRP_SIGNATURE, signature);
+
+ WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG1, (cdrp->cmd_arg1));
+ WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG2, (cdrp->cmd_arg2));
+ WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG3, (cdrp->cmd_arg3));
+
+ WRITE_OFFSET32(ha, Q8_NX_CDRP_CMD_RSP, cdrp->cmd);
+
+ while (count) {
+ qla_mdelay(__func__, 10);
+
+ data = READ_REG32(ha, Q8_NX_CDRP_CMD_RSP);
+
+ if ((!(data & 0x80000000)))
+ break;
+ count--;
+ }
+ if ((!count) || (data != 1))
+ ret = -1;
+
+ cdrp->rsp = READ_REG32(ha, Q8_NX_CDRP_CMD_RSP);
+ cdrp->rsp_arg1 = READ_REG32(ha, Q8_NX_CDRP_ARG1);
+ cdrp->rsp_arg2 = READ_REG32(ha, Q8_NX_CDRP_ARG2);
+ cdrp->rsp_arg3 = READ_REG32(ha, Q8_NX_CDRP_ARG3);
+
+ qla_sem_unlock(ha, Q8_SEM5_UNLOCK);
+
+ if (ret) {
+ device_printf(dev, "%s: "
+ "cmd[0x%08x] = 0x%08x\n"
+ "\tsig[0x%08x] = 0x%08x\n"
+ "\targ1[0x%08x] = 0x%08x\n"
+ "\targ2[0x%08x] = 0x%08x\n"
+ "\targ3[0x%08x] = 0x%08x\n",
+ __func__, Q8_NX_CDRP_CMD_RSP, cdrp->cmd,
+ Q8_NX_CDRP_SIGNATURE, signature,
+ Q8_NX_CDRP_ARG1, cdrp->cmd_arg1,
+ Q8_NX_CDRP_ARG2, cdrp->cmd_arg2,
+ Q8_NX_CDRP_ARG3, cdrp->cmd_arg3);
+
+ device_printf(dev, "%s: exit (ret = 0x%x)\n"
+ "\t\t rsp = 0x%08x\n"
+ "\t\t arg1 = 0x%08x\n"
+ "\t\t arg2 = 0x%08x\n"
+ "\t\t arg3 = 0x%08x\n",
+ __func__, ret, cdrp->rsp,
+ cdrp->rsp_arg1, cdrp->rsp_arg2, cdrp->rsp_arg3);
+ }
+
+ return (ret);
+}
+
+#define QLA_TX_MIN_FREE 2
+
+/*
+ * Name: qla_fw_cmd
+ * Function: Issues firmware control commands on the Tx Ring.
+ */
+static int
+qla_fw_cmd(qla_host_t *ha, void *fw_cmd, uint32_t size)
+{
+ device_t dev;
+ q80_tx_cmd_t *tx_cmd;
+ qla_hw_t *hw = &ha->hw;
+ int count = 100;
+
+ dev = ha->pci_dev;
+
+ QLA_TX_LOCK(ha);
+
+ if (hw->txr_free <= QLA_TX_MIN_FREE) {
+ while (count--) {
+ qla_hw_tx_done_locked(ha);
+ if (hw->txr_free > QLA_TX_MIN_FREE)
+ break;
+
+ QLA_TX_UNLOCK(ha);
+ qla_mdelay(__func__, 10);
+ QLA_TX_LOCK(ha);
+ }
+ if (hw->txr_free <= QLA_TX_MIN_FREE) {
+ QLA_TX_UNLOCK(ha);
+ device_printf(dev, "%s: xmit queue full\n", __func__);
+ return (-1);
+ }
+ }
+ tx_cmd = &hw->tx_ring_base[hw->txr_next];
+
+ bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
+
+ bcopy(fw_cmd, tx_cmd, size);
+
+ hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
+ hw->txr_free--;
+
+ QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->txr_next);
+
+ QLA_TX_UNLOCK(ha);
+
+ return (0);
+}
+
+/*
+ * Name: qla_config_rss
+ * Function: Configure RSS for the context/interface.
+ */
+const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL };
+
+static int
+qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
+{
+ qla_fw_cds_config_rss_t rss_config;
+ int ret, i;
+
+ bzero(&rss_config, sizeof(qla_fw_cds_config_rss_t));
+
+ rss_config.hdr.cmd = Q8_FWCD_CNTRL_REQ;
+ rss_config.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_RSS;
+ rss_config.hdr.cntxt_id = cntxt_id;
+
+ rss_config.hash_type = (Q8_FWCD_RSS_HASH_TYPE_IPV4_TCP_IP |
+ Q8_FWCD_RSS_HASH_TYPE_IPV6_TCP_IP);
+ rss_config.flags = Q8_FWCD_RSS_FLAGS_ENABLE_RSS;
+
+ rss_config.ind_tbl_mask = 0x7;
+
+ for (i = 0; i < 5; i++)
+ rss_config.rss_key[i] = rss_key[i];
+
+ ret = qla_fw_cmd(ha, &rss_config, sizeof(qla_fw_cds_config_rss_t));
+
+ return ret;
+}
+
+/*
+ * Name: qla_config_intr_coalesce
+ * Function: Configure Interrupt Coalescing.
+ */
+static int
+qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable)
+{
+ qla_fw_cds_config_intr_coalesc_t intr_coalesce;
+ int ret;
+
+ bzero(&intr_coalesce, sizeof(qla_fw_cds_config_intr_coalesc_t));
+
+ intr_coalesce.hdr.cmd = Q8_FWCD_CNTRL_REQ;
+ intr_coalesce.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_INTR_COALESCING;
+ intr_coalesce.hdr.cntxt_id = cntxt_id;
+
+ intr_coalesce.flags = 0x04;
+ intr_coalesce.max_rcv_pkts = 256;
+ intr_coalesce.max_rcv_usecs = 3;
+ intr_coalesce.max_snd_pkts = 64;
+ intr_coalesce.max_snd_usecs = 4;
+
+ if (tenable) {
+ intr_coalesce.usecs_to = 1000; /* 1 millisecond */
+ intr_coalesce.timer_type = Q8_FWCMD_INTR_COALESC_TIMER_PERIODIC;
+ intr_coalesce.sds_ring_bitmask =
+ Q8_FWCMD_INTR_COALESC_SDS_RING_0;
+ }
+
+ ret = qla_fw_cmd(ha, &intr_coalesce,
+ sizeof(qla_fw_cds_config_intr_coalesc_t));
+
+ return ret;
+}
+
+
+/*
+ * Name: qla_config_mac_addr
+ * Function: binds a MAC address to the context/interface.
+ * Can be unicast, multicast or broadcast.
+ */
+static int
+qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint16_t cntxt_id,
+ uint32_t add_multi)
+{
+ qla_fw_cds_config_mac_addr_t mac_config;
+ int ret;
+
+// device_printf(ha->pci_dev,
+// "%s: mac_addr %02x:%02x:%02x:%02x:%02x:%02x\n", __func__,
+// mac_addr[0], mac_addr[1], mac_addr[2],
+// mac_addr[3], mac_addr[4], mac_addr[5]);
+
+ bzero(&mac_config, sizeof(qla_fw_cds_config_mac_addr_t));
+
+ mac_config.hdr.cmd = Q8_FWCD_CNTRL_REQ;
+ mac_config.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_MAC_ADDR;
+ mac_config.hdr.cntxt_id = cntxt_id;
+
+ if (add_multi)
+ mac_config.cmd = Q8_FWCD_ADD_MAC_ADDR;
+ else
+ mac_config.cmd = Q8_FWCD_DEL_MAC_ADDR;
+ bcopy(mac_addr, mac_config.mac_addr,6);
+
+ ret = qla_fw_cmd(ha, &mac_config, sizeof(qla_fw_cds_config_mac_addr_t));
+
+ return ret;
+}
+
+
+/*
+ * Name: qla_set_mac_rcv_mode
+ * Function: Enable/Disable AllMulticast and Promiscous Modes.
+ */
+static int
+qla_set_mac_rcv_mode(qla_host_t *ha, uint16_t cntxt_id, uint32_t mode)
+{
+ qla_set_mac_rcv_mode_t rcv_mode;
+ int ret;
+
+ bzero(&rcv_mode, sizeof(qla_set_mac_rcv_mode_t));
+
+ rcv_mode.hdr.cmd = Q8_FWCD_CNTRL_REQ;
+ rcv_mode.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_MAC_RCV_MODE;
+ rcv_mode.hdr.cntxt_id = cntxt_id;
+
+ rcv_mode.mode = mode;
+
+ ret = qla_fw_cmd(ha, &rcv_mode, sizeof(qla_set_mac_rcv_mode_t));
+
+ return ret;
+}
+
+void
+qla_set_promisc(qla_host_t *ha)
+{
+ (void)qla_set_mac_rcv_mode(ha,
+ (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id,
+ Q8_MAC_RCV_ENABLE_PROMISCUOUS);
+}
+
+void
+qla_set_allmulti(qla_host_t *ha)
+{
+ (void)qla_set_mac_rcv_mode(ha,
+ (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id,
+ Q8_MAC_RCV_ENABLE_ALLMULTI);
+}
+
+void
+qla_reset_promisc_allmulti(qla_host_t *ha)
+{
+ (void)qla_set_mac_rcv_mode(ha,
+ (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id,
+ Q8_MAC_RCV_RESET_PROMISC_ALLMULTI);
+}
+
+/*
+ * Name: qla_config_ipv4_addr
+ * Function: Configures the Destination IP Addr for LRO.
+ */
+void
+qla_config_ipv4_addr(qla_host_t *ha, uint32_t ipv4_addr)
+{
+ qla_config_ipv4_t ip_conf;
+
+ bzero(&ip_conf, sizeof(qla_config_ipv4_t));
+
+ ip_conf.hdr.cmd = Q8_FWCD_CNTRL_REQ;
+ ip_conf.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_IPADDR;
+ ip_conf.hdr.cntxt_id = (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id;
+
+ ip_conf.cmd = (uint64_t)Q8_CONFIG_CMD_IP_ENABLE;
+ ip_conf.ipv4_addr = (uint64_t)ipv4_addr;
+
+ (void)qla_fw_cmd(ha, &ip_conf, sizeof(qla_config_ipv4_t));
+
+ return;
+}
+
+/*
+ * Name: qla_tx_tso
+ * Function: Checks if the packet to be transmitted is a candidate for
+ * Large TCP Segment Offload. If yes, the appropriate fields in the Tx
+ * Ring Structure are plugged in.
+ */
+static int
+qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd)
+{
+ struct ether_vlan_header *eh;
+ struct ip *ip = NULL;
+ struct tcphdr *th = NULL;
+ uint32_t ehdrlen, hdrlen, ip_hlen, tcp_hlen;
+ uint16_t etype, opcode, offload = 1;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ if (mp->m_pkthdr.len <= ha->max_frame_size)
+ return (-1);
+
+ eh = mtod(mp, struct ether_vlan_header *);
+
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ etype = ntohs(eh->evl_proto);
+ } else {
+ ehdrlen = ETHER_HDR_LEN;
+ etype = ntohs(eh->evl_encap_proto);
+ }
+
+ switch (etype) {
+ case ETHERTYPE_IP:
+ ip = (struct ip *)(mp->m_data + ehdrlen);
+ ip_hlen = ip->ip_hl << 2;
+ opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
+
+ if (ip->ip_p != IPPROTO_TCP) {
+ offload = 0;
+ } else
+ th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
+ break;
+
+ default:
+ QL_DPRINT8((dev, "%s: type!=ip\n", __func__));
+ offload = 0;
+ break;
+ }
+
+ if (!offload)
+ return (-1);
+
+ tcp_hlen = th->th_off << 2;
+
+ hdrlen = ehdrlen + ip_hlen + tcp_hlen;
+
+ if (mp->m_len < hdrlen) {
+ device_printf(dev, "%s: (mp->m_len < hdrlen)\n", __func__);
+ return (-1);
+ }
+
+ tx_cmd->flags_opcode = opcode ;
+ tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
+ tx_cmd->ip_hdr_off = ehdrlen;
+ tx_cmd->mss = mp->m_pkthdr.tso_segsz;
+ tx_cmd->total_hdr_len = hdrlen;
+
+ /* Check for Multicast least significant bit of MSB == 1 */
+ if (eh->evl_dhost[0] & 0x01) {
+ tx_cmd->flags_opcode = Q8_TX_CMD_FLAGS_MULTICAST;
+ }
+
+ return (0);
+}
+
+/*
+ * Name: qla_tx_chksum
+ * Function: Checks if the packet to be transmitted is a candidate for
+ * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
+ * Ring Structure are plugged in.
+ */
+static int
+qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd)
+{
+ struct ether_vlan_header *eh;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ uint32_t ehdrlen, ip_hlen;
+ uint16_t etype, opcode, offload = 1;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
+ return (-1);
+
+ eh = mtod(mp, struct ether_vlan_header *);
+
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ etype = ntohs(eh->evl_proto);
+ } else {
+ ehdrlen = ETHER_HDR_LEN;
+ etype = ntohs(eh->evl_encap_proto);
+ }
+
+
+ switch (etype) {
+ case ETHERTYPE_IP:
+ ip = (struct ip *)(mp->m_data + ehdrlen);
+
+ ip_hlen = ip->ip_hl << 2;
+
+ if (mp->m_len < (ehdrlen + ip_hlen)) {
+ device_printf(dev, "%s: ipv4 mlen\n", __func__);
+ offload = 0;
+ break;
+ }
+
+ if (ip->ip_p == IPPROTO_TCP)
+ opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
+ else if (ip->ip_p == IPPROTO_UDP)
+ opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
+ else {
+ device_printf(dev, "%s: ipv4\n", __func__);
+ offload = 0;
+ }
+ break;
+
+ case ETHERTYPE_IPV6:
+ ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
+
+ ip_hlen = sizeof(struct ip6_hdr);
+
+ if (mp->m_len < (ehdrlen + ip_hlen)) {
+ device_printf(dev, "%s: ipv6 mlen\n", __func__);
+ offload = 0;
+ break;
+ }
+
+ if (ip6->ip6_nxt == IPPROTO_TCP)
+ opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
+ else if (ip6->ip6_nxt == IPPROTO_UDP)
+ opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
+ else {
+ device_printf(dev, "%s: ipv6\n", __func__);
+ offload = 0;
+ }
+ break;
+
+ default:
+ offload = 0;
+ break;
+ }
+ if (!offload)
+ return (-1);
+
+ tx_cmd->flags_opcode = opcode;
+
+ tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
+
+ return (0);
+}
+
+/*
+ * Name: qla_hw_send
+ * Function: Transmits a packet. It first checks if the packet is a
+ * candidate for Large TCP Segment Offload and then for UDP/TCP checksum
+ * offload. If either of these creteria are not met, it is transmitted
+ * as a regular ethernet frame.
+ */
+int
+qla_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
+ uint32_t *tx_idx, struct mbuf *mp)
+{
+ struct ether_vlan_header *eh;
+ qla_hw_t *hw = &ha->hw;
+ q80_tx_cmd_t *tx_cmd, tso_cmd;
+ bus_dma_segment_t *c_seg;
+ uint32_t num_tx_cmds, hdr_len = 0;
+ uint32_t total_length = 0, bytes, tx_cmd_count = 0;
+ device_t dev;
+ int i;
+
+ dev = ha->pci_dev;
+
+ /*
+ * Always make sure there is atleast one empty slot in the tx_ring
+ * tx_ring is considered full when there only one entry available
+ */
+ num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
+
+ total_length = mp->m_pkthdr.len;
+ if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
+ device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
+ __func__, total_length);
+ return (-1);
+ }
+
+ bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
+
+ if (qla_tx_tso(ha, mp, &tso_cmd) == 0) {
+ /* find the additional tx_cmd descriptors required */
+
+ hdr_len = tso_cmd.total_hdr_len;
+
+ bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
+ bytes = QL_MIN(bytes, hdr_len);
+
+ num_tx_cmds++;
+ hdr_len -= bytes;
+
+ while (hdr_len) {
+ bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
+ hdr_len -= bytes;
+ num_tx_cmds++;
+ }
+ hdr_len = tso_cmd.total_hdr_len;
+ }
+
+ if (hw->txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
+ qla_hw_tx_done_locked(ha);
+ if (hw->txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
+ QL_DPRINT8((dev, "%s: (hw->txr_free <= "
+ "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
+ __func__));
+ return (-1);
+ }
+ }
+
+ *tx_idx = hw->txr_next;
+
+ tx_cmd = &hw->tx_ring_base[hw->txr_next];
+
+ if (hdr_len == 0) {
+ if ((nsegs > Q8_TX_MAX_SEGMENTS) ||
+ (mp->m_pkthdr.len > ha->max_frame_size)){
+ /* TBD: copy into private buffer and send it */
+ device_printf(dev,
+ "%s: (nsegs[%d, %d, 0x%x] > Q8_TX_MAX_SEGMENTS)\n",
+ __func__, nsegs, mp->m_pkthdr.len,
+ mp->m_pkthdr.csum_flags);
+ qla_dump_buf8(ha, "qla_hw_send: wrong pkt",
+ mtod(mp, char *), mp->m_len);
+ return (EINVAL);
+ }
+ bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
+ if (qla_tx_chksum(ha, mp, tx_cmd) != 0)
+ tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
+ } else {
+ bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
+ }
+
+ eh = mtod(mp, struct ether_vlan_header *);
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
+ tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
+ else if (mp->m_flags & M_VLANTAG) {
+ tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
+ Q8_TX_CMD_FLAGS_HW_VLAN_ID);
+ tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
+ }
+
+
+ tx_cmd->n_bufs = (uint8_t)nsegs;
+ tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
+ tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
+ tx_cmd->port_cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
+
+ c_seg = segs;
+
+ while (1) {
+ for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
+
+ switch (i) {
+ case 0:
+ tx_cmd->buf1_addr = c_seg->ds_addr;
+ tx_cmd->buf1_len = c_seg->ds_len;
+ break;
+
+ case 1:
+ tx_cmd->buf2_addr = c_seg->ds_addr;
+ tx_cmd->buf2_len = c_seg->ds_len;
+ break;
+
+ case 2:
+ tx_cmd->buf3_addr = c_seg->ds_addr;
+ tx_cmd->buf3_len = c_seg->ds_len;
+ break;
+
+ case 3:
+ tx_cmd->buf4_addr = c_seg->ds_addr;
+ tx_cmd->buf4_len = c_seg->ds_len;
+ break;
+ }
+
+ c_seg++;
+ nsegs--;
+ }
+
+ hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
+ tx_cmd_count++;
+
+ if (!nsegs)
+ break;
+
+ tx_cmd = &hw->tx_ring_base[hw->txr_next];
+ bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
+ }
+
+ if (hdr_len) {
+ /* TSO : Copy the header in the following tx cmd descriptors */
+ uint8_t *src, *dst;
+
+ src = (uint8_t *)eh;
+
+ tx_cmd = &hw->tx_ring_base[hw->txr_next];
+ bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
+
+ bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
+ bytes = QL_MIN(bytes, hdr_len);
+
+ dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
+
+ if (mp->m_flags & M_VLANTAG) {
+ /* first copy the src/dst MAC addresses */
+ bcopy(src, dst, (ETHER_ADDR_LEN * 2));
+ dst += (ETHER_ADDR_LEN * 2);
+ src += (ETHER_ADDR_LEN * 2);
+
+ hdr_len -= (ETHER_ADDR_LEN * 2);
+
+ *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
+ dst += 2;
+ *((uint16_t *)dst) = mp->m_pkthdr.ether_vtag;
+ dst += 2;
+
+ bytes -= ((ETHER_ADDR_LEN * 2) + 4);
+
+ bcopy(src, dst, bytes);
+ src += bytes;
+ hdr_len -= bytes;
+ } else {
+ bcopy(src, dst, bytes);
+ src += bytes;
+ hdr_len -= bytes;
+ }
+
+ hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
+ tx_cmd_count++;
+
+ while (hdr_len) {
+ tx_cmd = &hw->tx_ring_base[hw->txr_next];
+ bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
+
+ bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
+
+ bcopy(src, tx_cmd, bytes);
+ src += bytes;
+ hdr_len -= bytes;
+ hw->txr_next =
+ (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
+ tx_cmd_count++;
+ }
+ }
+
+ hw->txr_free = hw->txr_free - tx_cmd_count;
+
+ QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->txr_next);
+ QL_DPRINT8((dev, "%s: return\n", __func__));
+ return (0);
+}
+
+/*
+ * Name: qla_del_hw_if
+ * Function: Destroys the hardware specific entities corresponding to an
+ * Ethernet Interface
+ */
+void
+qla_del_hw_if(qla_host_t *ha)
+{
+ int i;
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++)
+ QL_DISABLE_INTERRUPTS(ha, i);
+
+ qla_del_rcv_cntxt(ha);
+ qla_del_xmt_cntxt(ha);
+
+ ha->hw.flags.lro = 0;
+}
+
+/*
+ * Name: qla_init_hw_if
+ * Function: Creates the hardware specific entities corresponding to an
+ * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
+ * corresponding to the interface. Enables LRO if allowed.
+ */
+int
+qla_init_hw_if(qla_host_t *ha)
+{
+ device_t dev;
+ int i;
+ uint8_t bcast_mac[6];
+
+ qla_get_hw_caps(ha);
+
+ dev = ha->pci_dev;
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+ bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
+ ha->hw.dma_buf.sds_ring[i].size);
+ }
+ /*
+ * Create Receive Context
+ */
+ if (qla_init_rcv_cntxt(ha)) {
+ return (-1);
+ }
+
+ ha->hw.rx_next = NUM_RX_DESCRIPTORS - 2;
+ ha->hw.rxj_next = NUM_RX_JUMBO_DESCRIPTORS - 2;
+ ha->hw.rx_in = ha->hw.rxj_in = 0;
+
+ /* Update the RDS Producer Indices */
+ QL_UPDATE_RDS_PRODUCER_INDEX(ha, 0, ha->hw.rx_next);
+ QL_UPDATE_RDS_PRODUCER_INDEX(ha, 1, ha->hw.rxj_next);
+
+ /*
+ * Create Transmit Context
+ */
+ if (qla_init_xmt_cntxt(ha)) {
+ qla_del_rcv_cntxt(ha);
+ return (-1);
+ }
+
+ qla_config_mac_addr(ha, ha->hw.mac_addr,
+ (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 1);
+
+ bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
+ bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
+ qla_config_mac_addr(ha, bcast_mac,
+ (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 1);
+
+ qla_config_rss(ha, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id);
+
+ qla_config_intr_coalesce(ha, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 0);
+
+ for (i = 0; i < ha->hw.num_sds_rings; i++)
+ QL_ENABLE_INTERRUPTS(ha, i);
+
+ return (0);
+}
+
+/*
+ * Name: qla_init_rcv_cntxt
+ * Function: Creates the Receive Context.
+ */
+static int
+qla_init_rcv_cntxt(qla_host_t *ha)
+{
+ device_t dev;
+ qla_cdrp_t cdrp;
+ q80_rcv_cntxt_rsp_t *rsp;
+ q80_stat_desc_t *sdesc;
+ bus_addr_t phys_addr;
+ int i, j;
+ qla_hw_t *hw = &ha->hw;
+
+ dev = ha->pci_dev;
+
+ /*
+ * Create Receive Context
+ */
+
+ for (i = 0; i < hw->num_sds_rings; i++) {
+ sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
+ for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
+ sdesc->data[0] =
+ Q8_STAT_DESC_SET_OWNER(Q8_STAT_DESC_OWNER_FW);
+ }
+ }
+
+ phys_addr = ha->hw.rx_cntxt_req_paddr;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_CREATE_RX_CNTXT;
+ cdrp.cmd_arg1 = (uint32_t)(phys_addr >> 32);
+ cdrp.cmd_arg2 = (uint32_t)(phys_addr);
+ cdrp.cmd_arg3 = (uint32_t)(sizeof (q80_rcv_cntxt_req_t));
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_CREATE_RX_CNTXT failed\n",
+ __func__);
+ return (-1);
+ } else {
+ rsp = ha->hw.rx_cntxt_rsp;
+
+ QL_DPRINT2((dev, "%s: rcv cntxt successful"
+ " rds_ring_offset = 0x%08x"
+ " sds_ring_offset = 0x%08x"
+ " cntxt_state = 0x%08x"
+ " funcs_per_port = 0x%08x"
+ " num_rds_rings = 0x%04x"
+ " num_sds_rings = 0x%04x"
+ " cntxt_id = 0x%04x"
+ " phys_port = 0x%02x"
+ " virt_port = 0x%02x\n",
+ __func__,
+ rsp->rx_rsp.rds_ring_offset,
+ rsp->rx_rsp.sds_ring_offset,
+ rsp->rx_rsp.cntxt_state,
+ rsp->rx_rsp.funcs_per_port,
+ rsp->rx_rsp.num_rds_rings,
+ rsp->rx_rsp.num_sds_rings,
+ rsp->rx_rsp.cntxt_id,
+ rsp->rx_rsp.phys_port,
+ rsp->rx_rsp.virt_port));
+
+ for (i = 0; i < ha->hw.num_rds_rings; i++) {
+ QL_DPRINT2((dev,
+ "%s: rcv cntxt rds[%i].producer_reg = 0x%08x\n",
+ __func__, i, rsp->rds_rsp[i].producer_reg));
+ }
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+ QL_DPRINT2((dev,
+ "%s: rcv cntxt sds[%i].consumer_reg = 0x%08x"
+ " sds[%i].intr_mask_reg = 0x%08x\n",
+ __func__, i, rsp->sds_rsp[i].consumer_reg,
+ i, rsp->sds_rsp[i].intr_mask_reg));
+ }
+ }
+ ha->hw.flags.init_rx_cnxt = 1;
+ return (0);
+}
+
+/*
+ * Name: qla_del_rcv_cntxt
+ * Function: Destroys the Receive Context.
+ */
+void
+qla_del_rcv_cntxt(qla_host_t *ha)
+{
+ qla_cdrp_t cdrp;
+ device_t dev = ha->pci_dev;
+
+ if (!ha->hw.flags.init_rx_cnxt)
+ return;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_DESTROY_RX_CNTXT;
+ cdrp.cmd_arg1 = (uint32_t) (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_DESTROY_RX_CNTXT failed\n",
+ __func__);
+ }
+ ha->hw.flags.init_rx_cnxt = 0;
+}
+
+/*
+ * Name: qla_init_xmt_cntxt
+ * Function: Creates the Transmit Context.
+ */
+static int
+qla_init_xmt_cntxt(qla_host_t *ha)
+{
+ bus_addr_t phys_addr;
+ device_t dev;
+ q80_tx_cntxt_rsp_t *tx_rsp;
+ qla_cdrp_t cdrp;
+ qla_hw_t *hw = &ha->hw;
+
+ dev = ha->pci_dev;
+
+ /*
+ * Create Transmit Context
+ */
+ phys_addr = ha->hw.tx_cntxt_req_paddr;
+ tx_rsp = ha->hw.tx_cntxt_rsp;
+
+ hw->txr_comp = hw->txr_next = 0;
+ *(hw->tx_cons) = 0;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_CREATE_TX_CNTXT;
+ cdrp.cmd_arg1 = (uint32_t)(phys_addr >> 32);
+ cdrp.cmd_arg2 = (uint32_t)(phys_addr);
+ cdrp.cmd_arg3 = (uint32_t)(sizeof (q80_tx_cntxt_req_t));
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_CREATE_TX_CNTXT failed\n",
+ __func__);
+ return (-1);
+ } else {
+ ha->hw.tx_prod_reg = tx_rsp->producer_reg;
+
+ QL_DPRINT2((dev, "%s: tx cntxt successful"
+ " cntxt_state = 0x%08x "
+ " cntxt_id = 0x%04x "
+ " phys_port_id = 0x%02x "
+ " virt_port_id = 0x%02x "
+ " producer_reg = 0x%08x "
+ " intr_mask_reg = 0x%08x\n",
+ __func__, tx_rsp->cntxt_state, tx_rsp->cntxt_id,
+ tx_rsp->phys_port_id, tx_rsp->virt_port_id,
+ tx_rsp->producer_reg, tx_rsp->intr_mask_reg));
+ }
+ ha->hw.txr_free = NUM_TX_DESCRIPTORS;
+
+ ha->hw.flags.init_tx_cnxt = 1;
+ return (0);
+}
+
+/*
+ * Name: qla_del_xmt_cntxt
+ * Function: Destroys the Transmit Context.
+ */
+static void
+qla_del_xmt_cntxt(qla_host_t *ha)
+{
+ qla_cdrp_t cdrp;
+ device_t dev = ha->pci_dev;
+
+ if (!ha->hw.flags.init_tx_cnxt)
+ return;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_DESTROY_TX_CNTXT;
+ cdrp.cmd_arg1 = (uint32_t) (ha->hw.tx_cntxt_rsp)->cntxt_id;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_DESTROY_TX_CNTXT failed\n",
+ __func__);
+ }
+ ha->hw.flags.init_tx_cnxt = 0;
+}
+
+/*
+ * Name: qla_get_max_rds
+ * Function: Returns the maximum number of Receive Descriptor Rings per context.
+ */
+static int
+qla_get_max_rds(qla_host_t *ha)
+{
+ qla_cdrp_t cdrp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_RD_MAX_RDS_PER_CNTXT;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_RD_MAX_RDS_PER_CNTXT failed\n",
+ __func__);
+ return (-1);
+ } else {
+ ha->hw.max_rds_per_cntxt = cdrp.rsp_arg1;
+ QL_DPRINT2((dev, "%s: max_rds_per_context 0x%08x\n",
+ __func__, ha->hw.max_rds_per_cntxt));
+ }
+ return 0;
+}
+
+/*
+ * Name: qla_get_max_sds
+ * Function: Returns the maximum number of Status Descriptor Rings per context.
+ */
+static int
+qla_get_max_sds(qla_host_t *ha)
+{
+ qla_cdrp_t cdrp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_RD_MAX_SDS_PER_CNTXT;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_RD_MAX_RDS_PER_CNTXT failed\n",
+ __func__);
+ return (-1);
+ } else {
+ ha->hw.max_sds_per_cntxt = cdrp.rsp_arg1;
+ QL_DPRINT2((dev, "%s: max_sds_per_context 0x%08x\n",
+ __func__, ha->hw.max_sds_per_cntxt));
+ }
+ return 0;
+}
+
+/*
+ * Name: qla_get_max_rules
+ * Function: Returns the maximum number of Rules per context.
+ */
+static int
+qla_get_max_rules(qla_host_t *ha)
+{
+ qla_cdrp_t cdrp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_RD_MAX_RULES_PER_CNTXT;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_RD_MAX_RULES_PER_CNTXT failed\n",
+ __func__);
+ return (-1);
+ } else {
+ ha->hw.max_rules_per_cntxt = cdrp.rsp_arg1;
+ QL_DPRINT2((dev, "%s: max_rules_per_cntxt 0x%08x\n",
+ __func__, ha->hw.max_rules_per_cntxt));
+ }
+ return 0;
+}
+
+/*
+ * Name: qla_get_max_rcv_cntxts
+ * Function: Returns the maximum number of Receive Contexts supported.
+ */
+static int
+qla_get_max_rcv_cntxts(qla_host_t *ha)
+{
+ qla_cdrp_t cdrp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_RD_MAX_RX_CNTXT;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_RD_MAX_RX_CNTXT failed\n",
+ __func__);
+ return (-1);
+ } else {
+ ha->hw.max_rcv_cntxts = cdrp.rsp_arg1;
+ QL_DPRINT2((dev, "%s: max_rcv_cntxts 0x%08x\n",
+ __func__, ha->hw.max_rcv_cntxts));
+ }
+ return 0;
+}
+
+/*
+ * Name: qla_get_max_tx_cntxts
+ * Function: Returns the maximum number of Transmit Contexts supported.
+ */
+static int
+qla_get_max_tx_cntxts(qla_host_t *ha)
+{
+ qla_cdrp_t cdrp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_RD_MAX_TX_CNTXT;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_RD_MAX_TX_CNTXT failed\n",
+ __func__);
+ return (-1);
+ } else {
+ ha->hw.max_xmt_cntxts = cdrp.rsp_arg1;
+ QL_DPRINT2((dev, "%s: max_xmt_cntxts 0x%08x\n",
+ __func__, ha->hw.max_xmt_cntxts));
+ }
+ return 0;
+}
+
+/*
+ * Name: qla_get_max_mtu
+ * Function: Returns the MTU supported for a context.
+ */
+static int
+qla_get_max_mtu(qla_host_t *ha)
+{
+ qla_cdrp_t cdrp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_RD_MAX_MTU;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_RD_MAX_MTU failed\n", __func__);
+ return (-1);
+ } else {
+ ha->hw.max_mtu = cdrp.rsp_arg1;
+ QL_DPRINT2((dev, "%s: max_mtu 0x%08x\n", __func__,
+ ha->hw.max_mtu));
+ }
+ return 0;
+}
+
+/*
+ * Name: qla_set_max_mtu
+ * Function:
+ * Sets the maximum transfer unit size for the specified rcv context.
+ */
+int
+qla_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
+{
+ qla_cdrp_t cdrp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_SET_MTU;
+ cdrp.cmd_arg1 = (uint32_t)cntxt_id;
+ cdrp.cmd_arg2 = mtu;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_RD_MAX_MTU failed\n", __func__);
+ return (-1);
+ } else {
+ ha->hw.max_mtu = cdrp.rsp_arg1;
+ }
+ return 0;
+}
+
+/*
+ * Name: qla_get_max_lro
+ * Function: Returns the maximum number of TCP Connection which can be supported
+ * with LRO.
+ */
+static int
+qla_get_max_lro(qla_host_t *ha)
+{
+ qla_cdrp_t cdrp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_RD_MAX_LRO;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_RD_MAX_LRO failed\n", __func__);
+ return (-1);
+ } else {
+ ha->hw.max_lro = cdrp.rsp_arg1;
+ QL_DPRINT2((dev, "%s: max_lro 0x%08x\n", __func__,
+ ha->hw.max_lro));
+ }
+ return 0;
+}
+
+/*
+ * Name: qla_get_flow_control
+ * Function: Returns the Receive/Transmit Flow Control (PAUSE) settings for
+ * PCI function.
+ */
+static int
+qla_get_flow_control(qla_host_t *ha)
+{
+ qla_cdrp_t cdrp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ bzero(&cdrp, sizeof(qla_cdrp_t));
+
+ cdrp.cmd = Q8_CMD_GET_FLOW_CNTRL;
+
+ if (qla_issue_cmd(ha, &cdrp)) {
+ device_printf(dev, "%s: Q8_CMD_GET_FLOW_CNTRL failed\n",
+ __func__);
+ return (-1);
+ } else {
+ QL_DPRINT2((dev, "%s: flow control 0x%08x\n", __func__,
+ cdrp.rsp_arg1));
+ }
+ return 0;
+}
+
+/*
+ * Name: qla_get_flow_control
+ * Function: Retrieves hardware capabilities
+ */
+void
+qla_get_hw_caps(qla_host_t *ha)
+{
+ //qla_read_mac_addr(ha);
+ qla_get_max_rds(ha);
+ qla_get_max_sds(ha);
+ qla_get_max_rules(ha);
+ qla_get_max_rcv_cntxts(ha);
+ qla_get_max_tx_cntxts(ha);
+ qla_get_max_mtu(ha);
+ qla_get_max_lro(ha);
+ qla_get_flow_control(ha);
+ return;
+}
+
+/*
+ * Name: qla_hw_set_multi
+ * Function: Sets the Multicast Addresses provided the host O.S into the
+ * hardware (for the given interface)
+ */
+void
+qla_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt,
+ uint32_t add_multi)
+{
+ q80_rcv_cntxt_rsp_t *rsp;
+ int i;
+
+ rsp = ha->hw.rx_cntxt_rsp;
+ for (i = 0; i < mcnt; i++) {
+ qla_config_mac_addr(ha, mta, rsp->rx_rsp.cntxt_id, add_multi);
+ mta += Q8_MAC_ADDR_LEN;
+ }
+ return;
+}
+
+/*
+ * Name: qla_hw_tx_done_locked
+ * Function: Handle Transmit Completions
+ */
+static void
+qla_hw_tx_done_locked(qla_host_t *ha)
+{
+ qla_tx_buf_t *txb;
+ qla_hw_t *hw = &ha->hw;
+ uint32_t comp_idx, comp_count = 0;
+
+ /* retrieve index of last entry in tx ring completed */
+ comp_idx = qla_le32_to_host(*(hw->tx_cons));
+
+ while (comp_idx != hw->txr_comp) {
+
+ txb = &ha->tx_buf[hw->txr_comp];
+
+ hw->txr_comp++;
+ if (hw->txr_comp == NUM_TX_DESCRIPTORS)
+ hw->txr_comp = 0;
+
+ comp_count++;
+
+ if (txb->m_head) {
+ bus_dmamap_sync(ha->tx_tag, txb->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(ha->tx_tag, txb->map);
+ bus_dmamap_destroy(ha->tx_tag, txb->map);
+ m_freem(txb->m_head);
+
+ txb->map = (bus_dmamap_t)0;
+ txb->m_head = NULL;
+ }
+ }
+
+ hw->txr_free += comp_count;
+
+ QL_DPRINT8((ha->pci_dev, "%s: return [c,f, p, pn][%d, %d, %d, %d]\n", __func__,
+ hw->txr_comp, hw->txr_free, hw->txr_next, READ_REG32(ha, (ha->hw.tx_prod_reg + 0x1b2000))));
+
+ return;
+}
+
+/*
+ * Name: qla_hw_tx_done
+ * Function: Handle Transmit Completions
+ */
+void
+qla_hw_tx_done(qla_host_t *ha)
+{
+ if (!mtx_trylock(&ha->tx_lock)) {
+ QL_DPRINT8((ha->pci_dev,
+ "%s: !mtx_trylock(&ha->tx_lock)\n", __func__));
+ return;
+ }
+ qla_hw_tx_done_locked(ha);
+
+ if (ha->hw.txr_free > free_pkt_thres)
+ ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+ mtx_unlock(&ha->tx_lock);
+ return;
+}
+
+void
+qla_update_link_state(qla_host_t *ha)
+{
+ uint32_t link_state;
+
+ if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ ha->hw.flags.link_up = 0;
+ return;
+ }
+ link_state = READ_REG32(ha, Q8_LINK_STATE);
+
+ if (ha->pci_func == 0)
+ ha->hw.flags.link_up = (((link_state & 0xF) == 1)? 1 : 0);
+ else
+ ha->hw.flags.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
+}
+
+int
+qla_config_lro(qla_host_t *ha)
+{
+ int i;
+ qla_hw_t *hw = &ha->hw;
+ struct lro_ctrl *lro;
+
+ for (i = 0; i < hw->num_sds_rings; i++) {
+ lro = &hw->sds[i].lro;
+ if (tcp_lro_init(lro)) {
+ device_printf(ha->pci_dev, "%s: tcp_lro_init failed\n",
+ __func__);
+ return (-1);
+ }
+ lro->ifp = ha->ifp;
+ }
+ ha->flags.lro_init = 1;
+
+ QL_DPRINT2((ha->pci_dev, "%s: LRO initialized\n", __func__));
+ return (0);
+}
+
+void
+qla_free_lro(qla_host_t *ha)
+{
+ int i;
+ qla_hw_t *hw = &ha->hw;
+ struct lro_ctrl *lro;
+
+ if (!ha->flags.lro_init)
+ return;
+
+ for (i = 0; i < hw->num_sds_rings; i++) {
+ lro = &hw->sds[i].lro;
+ tcp_lro_free(lro);
+ }
+ ha->flags.lro_init = 0;
+}
+
+void
+qla_hw_stop_rcv(qla_host_t *ha)
+{
+ int i, done, count = 100;
+
+ while (count--) {
+ done = 1;
+ for (i = 0; i < ha->hw.num_sds_rings; i++) {
+ if (ha->hw.sds[i].rcv_active)
+ done = 0;
+ }
+ if (done)
+ break;
+ else
+ qla_mdelay(__func__, 10);
+ }
+}
+
diff --git a/sys/dev/qlxgb/qla_hw.h b/sys/dev/qlxgb/qla_hw.h
new file mode 100644
index 0000000..46780be
--- /dev/null
+++ b/sys/dev/qlxgb/qla_hw.h
@@ -0,0 +1,831 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * File: qla_hw.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+#ifndef _QLA_HW_H_
+#define _QLA_HW_H_
+
+#define Q8_MAX_NUM_MULTICAST_ADDRS 128
+#define Q8_MAC_ADDR_LEN 6
+
+/*
+ * Firmware Interface
+ */
+
+/*
+ * Command Response Interface - Commands
+ */
+typedef struct qla_cdrp {
+ uint32_t cmd;
+ uint32_t cmd_arg1;
+ uint32_t cmd_arg2;
+ uint32_t cmd_arg3;
+ uint32_t rsp;
+ uint32_t rsp_arg1;
+ uint32_t rsp_arg2;
+ uint32_t rsp_arg3;
+} qla_cdrp_t;
+
+#define Q8_CMD_RD_MAX_RDS_PER_CNTXT 0x80000002
+#define Q8_CMD_RD_MAX_SDS_PER_CNTXT 0x80000003
+#define Q8_CMD_RD_MAX_RULES_PER_CNTXT 0x80000004
+#define Q8_CMD_RD_MAX_RX_CNTXT 0x80000005
+#define Q8_CMD_RD_MAX_TX_CNTXT 0x80000006
+#define Q8_CMD_CREATE_RX_CNTXT 0x80000007
+#define Q8_CMD_DESTROY_RX_CNTXT 0x80000008
+#define Q8_CMD_CREATE_TX_CNTXT 0x80000009
+#define Q8_CMD_DESTROY_TX_CNTXT 0x8000000A
+#define Q8_CMD_SETUP_STATS 0x8000000E
+#define Q8_CMD_GET_STATS 0x8000000F
+#define Q8_CMD_DELETE_STATS 0x80000010
+#define Q8_CMD_GEN_INT 0x80000011
+#define Q8_CMD_SET_MTU 0x80000012
+#define Q8_CMD_GET_FLOW_CNTRL 0x80000016
+#define Q8_CMD_SET_FLOW_CNTRL 0x80000017
+#define Q8_CMD_RD_MAX_MTU 0x80000018
+#define Q8_CMD_RD_MAX_LRO 0x80000019
+
+/*
+ * Command Response Interface - Response
+ */
+#define Q8_RSP_SUCCESS 0x00000000
+#define Q8_RSP_NO_HOST_MEM 0x00000001
+#define Q8_RSP_NO_HOST_RSRC 0x00000002
+#define Q8_RSP_NO_CARD_CRB 0x00000003
+#define Q8_RSP_NO_CARD_MEM 0x00000004
+#define Q8_RSP_NO_CARD_RSRC 0x00000005
+#define Q8_RSP_INVALID_ARGS 0x00000006
+#define Q8_RSP_INVALID_ACTION 0x00000007
+#define Q8_RSP_INVALID_STATE 0x00000008
+#define Q8_RSP_NOT_SUPPORTED 0x00000009
+#define Q8_RSP_NOT_PERMITTED 0x0000000A
+#define Q8_RSP_NOT_READY 0x0000000B
+#define Q8_RSP_DOES_NOT_EXIST 0x0000000C
+#define Q8_RSP_ALREADY_EXISTS 0x0000000D
+#define Q8_RSP_BAD_SIGNATURE 0x0000000E
+#define Q8_RSP_CMD_NOT_IMPLEMENTED 0x0000000F
+#define Q8_RSP_CMD_INVALID 0x00000010
+#define Q8_RSP_TIMEOUT 0x00000011
+
+
+/*
+ * Transmit Related Definitions
+ */
+
+/*
+ * Transmit Context - Q8_CMD_CREATE_TX_CNTXT Command Configuration Data
+ */
+
+typedef struct _q80_tx_cntxt_req {
+ uint64_t rsp_dma_addr; /* rsp from firmware is DMA'ed here */
+ uint64_t cmd_cons_dma_addr;
+ uint64_t rsrvd0;
+
+ uint32_t caps[4]; /* capabilities - bit vector*/
+#define CNTXT_CAP0_BASEFW 0x0001
+#define CNTXT_CAP0_LEGACY_MN 0x0004
+#define CNTXT_CAP0_LSO 0x0040
+
+ uint32_t intr_mode; /* Interrupt Mode */
+#define CNTXT_INTR_MODE_UNIQUE 0x0000
+#define CNTXT_INTR_MODE_SHARED 0x0001
+
+ uint64_t rsrvd1;
+ uint16_t msi_index;
+ uint16_t rsrvd2;
+ uint64_t phys_addr; /* physical address of transmit ring
+ * in system memory */
+ uint32_t num_entries; /* number of entries in transmit ring */
+ uint8_t rsrvd3[128];
+} __packed q80_tx_cntxt_req_t; /* 188 bytes total */
+
+
+/*
+ * Transmit Context - Response from Firmware to Q8_CMD_CREATE_TX_CNTXT
+ */
+
+typedef struct _q80_tx_cntxt_rsp {
+ uint32_t cntxt_state; /* starting state */
+#define CNTXT_STATE_ALLOCATED_NOT_ACTIVE 0x0001
+#define CNTXT_STATE_ACTIVE 0x0002
+#define CNTXT_STATE_QUIESCED 0x0004
+
+ uint16_t cntxt_id; /* handle for context */
+ uint8_t phys_port_id; /* physical id of port */
+ uint8_t virt_port_id; /* virtual or logical id of port */
+ uint32_t producer_reg; /* producer register for transmit ring */
+ uint32_t intr_mask_reg; /* interrupt mask register */
+ uint8_t rsrvd[128];
+} __packed q80_tx_cntxt_rsp_t; /* 144 bytes */
+
+/*
+ * Transmit Command Descriptor
+ * These commands are issued on the Transmit Ring associated with a Transmit
+ * context
+ */
+typedef struct _q80_tx_cmd {
+ uint8_t tcp_hdr_off; /* TCP Header Offset */
+ uint8_t ip_hdr_off; /* IP Header Offset */
+ uint16_t flags_opcode; /* Bits 0-6: flags; 7-12: opcode */
+
+ /* flags field */
+#define Q8_TX_CMD_FLAGS_MULTICAST 0x01
+#define Q8_TX_CMD_FLAGS_LSO_TSO 0x02
+#define Q8_TX_CMD_FLAGS_VLAN_TAGGED 0x10
+#define Q8_TX_CMD_FLAGS_HW_VLAN_ID 0x40
+
+ /* opcode field */
+#define Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6 (0xC << 7)
+#define Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6 (0xB << 7)
+#define Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6 (0x6 << 7)
+#define Q8_TX_CMD_OP_XMT_TCP_LSO (0x5 << 7)
+#define Q8_TX_CMD_OP_XMT_UDP_CHKSUM (0x3 << 7)
+#define Q8_TX_CMD_OP_XMT_TCP_CHKSUM (0x2 << 7)
+#define Q8_TX_CMD_OP_XMT_ETHER (0x1 << 7)
+
+ uint8_t n_bufs; /* # of data segs in data buffer */
+ uint8_t data_len_lo; /* data length lower 8 bits */
+ uint16_t data_len_hi; /* data length upper 16 bits */
+
+ uint64_t buf2_addr; /* buffer 2 address */
+
+ uint16_t rsrvd0;
+ uint16_t mss; /* MSS for this packet */
+ uint8_t port_cntxtid; /* Bits 7-4: ContextId; 3-0: reserved */
+
+#define Q8_TX_CMD_PORT_CNXTID(c_id) ((c_id & 0xF) << 4)
+
+ uint8_t total_hdr_len; /* MAC+IP+TCP Header Length for LSO */
+ uint16_t rsrvd1;
+
+ uint64_t buf3_addr; /* buffer 3 address */
+ uint64_t buf1_addr; /* buffer 1 address */
+
+ uint16_t buf1_len; /* length of buffer 1 */
+ uint16_t buf2_len; /* length of buffer 2 */
+ uint16_t buf3_len; /* length of buffer 3 */
+ uint16_t buf4_len; /* length of buffer 4 */
+
+ uint64_t buf4_addr; /* buffer 4 address */
+
+ uint32_t rsrvd2;
+ uint16_t rsrvd3;
+ uint16_t vlan_tci; /* VLAN TCI when hw tagging is enabled*/
+
+} __packed q80_tx_cmd_t; /* 64 bytes */
+
+#define Q8_TX_CMD_MAX_SEGMENTS 4
+#define Q8_TX_CMD_TSO_ALIGN 2
+#define Q8_TX_MAX_SEGMENTS 14
+
+
+/*
+ * Receive Related Definitions
+ */
+/*
+ * Receive Context - Q8_CMD_CREATE_RX_CNTXT Command Configuration Data
+ */
+
+typedef struct _q80_rq_sds_ring {
+ uint64_t phys_addr; /* physical addr of status ring in system memory */
+ uint32_t size; /* number of entries in status ring */
+ uint16_t msi_index;
+ uint16_t rsrvd;
+} __packed q80_rq_sds_ring_t; /* 16 bytes */
+
+typedef struct _q80_rq_rds_ring {
+ uint64_t phys_addr; /* physical addr of rcv ring in system memory */
+ uint64_t buf_size; /* packet buffer size */
+ uint32_t size; /* number of entries in ring */
+ uint32_t rsrvd;
+} __packed q80_rq_rds_ring_t; /* 24 bytes */
+
+typedef struct _q80_rq_rcv_cntxt {
+ uint64_t rsp_dma_addr; /* rsp from firmware is DMA'ed here */
+ uint32_t caps[4]; /* bit vector */
+#define CNTXT_CAP0_JUMBO 0x0080 /* Contiguous Jumbo buffers*/
+#define CNTXT_CAP0_LRO 0x0100
+#define CNTXT_CAP0_HW_LRO 0x0800 /* HW LRO */
+
+ uint32_t intr_mode; /* same as q80_tx_cntxt_req_t */
+ uint32_t rds_intr_mode; /* same as q80_tx_cntxt_req_t */
+
+ uint32_t rds_ring_offset; /* rds configuration relative to data[0] */
+ uint32_t sds_ring_offset; /* sds configuration relative to data[0] */
+
+ uint16_t num_rds_rings;
+ uint16_t num_sds_rings;
+
+ uint8_t rsrvd1[132];
+} __packed q80_rq_rcv_cntxt_t; /* 176 bytes header + rds + sds ring rqsts */
+
+/*
+ * Receive Context - Response from Firmware to Q8_CMD_CREATE_RX_CNTXT
+ */
+
+typedef struct _q80_rsp_rds_ring {
+ uint32_t producer_reg;
+ uint32_t rsrvd;
+} __packed q80_rsp_rds_ring_t; /* 8 bytes */
+
+typedef struct _q80_rsp_sds_ring {
+ uint32_t consumer_reg;
+ uint32_t intr_mask_reg;
+} __packed q80_rsp_sds_ring_t; /* 8 bytes */
+
+typedef struct _q80_rsp_rcv_cntxt {
+ uint32_t rds_ring_offset; /* rds configuration relative to data[0] */
+ uint32_t sds_ring_offset; /* sds configuration relative to data[0] */
+
+ uint32_t cntxt_state; /* starting state */
+ uint32_t funcs_per_port; /* number of PCI functions sharing each port */
+
+ uint16_t num_rds_rings;
+ uint16_t num_sds_rings;
+
+ uint16_t cntxt_id; /* handle for context */
+
+ uint8_t phys_port; /* physical id of port */
+ uint8_t virt_port; /* virtual or logical id of port */
+
+ uint8_t rsrvd[128];
+ uint8_t data[0];
+} __packed q80_rsp_rcv_cntxt_t; /* 152 bytes header + rds + sds ring rspncs */
+
+
+/*
+ * Note:
+ * Transmit Context
+ * 188 (rq) + 144 (rsp) = 332 bytes are required
+ *
+ * Receive Context
+ * 1 RDS and 1 SDS rings: (16+24+176)+(8+8+152) = 384 bytes
+ *
+ * 3 RDS and 4 SDS rings: (((16+24)*3)+176) + (((8+8)*4)+152) =
+ * = 296 + 216 = 512 bytes
+ * Clearly this within the minimum PAGE size of most O.S platforms
+ * (typically 4Kbytes). Hence it is simpler to simply allocate one PAGE
+ * and then carve out space for each context. It is also a good idea to
+ * to throw in the shadown register for the consumer index of the transmit
+ * ring in this PAGE.
+ */
+
+/*
+ * Receive Descriptor corresponding to each entry in the receive ring
+ */
+typedef struct _q80_rcv_desc {
+ uint16_t handle;
+ uint16_t rsrvd;
+ uint32_t buf_size; /* buffer size in bytes */
+ uint64_t buf_addr; /* physical address of buffer */
+} __packed q80_recv_desc_t;
+
+/*
+ * Status Descriptor corresponding to each entry in the Status ring
+ */
+typedef struct _q80_stat_desc {
+ uint64_t data[2];
+} __packed q80_stat_desc_t;
+
+/*
+ * definitions for data[0] field of Status Descriptor
+ */
+#define Q8_STAT_DESC_OWNER(data) ((data >> 56) & 0x3)
+#define Q8_STAT_DESC_OWNER_HOST 0x1
+#define Q8_STAT_DESC_OWNER_FW 0x2
+
+#define Q8_STAT_DESC_OWNER_MASK (((uint64_t)0x3) << 56)
+#define Q8_STAT_DESC_SET_OWNER(owner) (uint64_t)(((uint64_t)owner) << 56)
+
+#define Q8_STAT_DESC_OPCODE(data) ((data >> 58) & 0x003F)
+#define Q8_STAT_DESC_OPCODE_SYN_OFFLOAD 0x03
+#define Q8_STAT_DESC_OPCODE_RCV_PKT 0x04
+#define Q8_STAT_DESC_OPCODE_CTRL_MSG 0x05
+#define Q8_STAT_DESC_OPCODE_LRO_PKT 0x12
+
+/*
+ * definitions for data[0] field of Status Descriptor for standard frames
+ * status descriptor opcode equals 0x04
+ */
+#define Q8_STAT_DESC_PORT(data) ((data) & 0x000F)
+#define Q8_STAT_DESC_STATUS(data) ((data >> 4) & 0x000F)
+#define Q8_STAT_DESC_STATUS_NO_CHKSUM 0x01
+#define Q8_STAT_DESC_STATUS_CHKSUM_OK 0x02
+#define Q8_STAT_DESC_STATUS_CHKSUM_ERR 0x03
+
+#define Q8_STAT_DESC_TYPE(data) ((data >> 8) & 0x000F)
+#define Q8_STAT_DESC_TOTAL_LENGTH(data) ((data >> 12) & 0xFFFF)
+#define Q8_STAT_DESC_HANDLE(data) ((data >> 28) & 0xFFFF)
+#define Q8_STAT_DESC_PROTOCOL(data) ((data >> 44) & 0x000F)
+#define Q8_STAT_DESC_L2_OFFSET(data) ((data >> 48) & 0x001F)
+#define Q8_STAT_DESC_COUNT(data) ((data >> 53) & 0x0007)
+
+/*
+ * definitions for data[0-1] fields of Status Descriptor for LRO
+ * status descriptor opcode equals 0x05
+ */
+/* definitions for data[0] field */
+#define Q8_LRO_STAT_DESC_HANDLE(data) ((data) & 0xFFFF)
+#define Q8_LRO_STAT_DESC_PAYLOAD_LENGTH(data) ((data >> 16) & 0xFFFF)
+#define Q8_LRO_STAT_DESC_L2_OFFSET(data) ((data >> 32) & 0xFF)
+#define Q8_LRO_STAT_DESC_L4_OFFSET(data) ((data >> 40) & 0xFF)
+#define Q8_LRO_STAT_DESC_TS_PRESENT(data) ((data >> 48) & 0x1)
+#define Q8_LRO_STAT_DESC_TYPE(data) ((data >> 49) & 0x7)
+#define Q8_LRO_STAT_DESC_PUSH_BIT(data) ((data >> 52) & 0x1)
+
+/* definitions for data[1] field */
+#define Q8_LRO_STAT_DESC_SEQ_NUM(data) (uint32_t)(data)
+
+/** Driver Related Definitions Begin **/
+
+#define MAX_RDS_RINGS 2 /* Max# of Receive Descriptor Rings */
+#define MAX_SDS_RINGS 4 /* Max# of Status Descriptor Rings */
+#define TX_SMALL_PKT_SIZE 128 /* size in bytes of small packets */
+
+/* The number of descriptors should be a power of 2 */
+#define NUM_TX_DESCRIPTORS 2048
+#define NUM_RX_DESCRIPTORS 8192
+//#define NUM_RX_JUMBO_DESCRIPTORS 1024
+#define NUM_RX_JUMBO_DESCRIPTORS 2048
+//#define NUM_STATUS_DESCRIPTORS 8192
+#define NUM_STATUS_DESCRIPTORS 2048
+
+typedef struct _q80_rcv_cntxt_req {
+ q80_rq_rcv_cntxt_t rx_req;
+ q80_rq_rds_ring_t rds_req[MAX_RDS_RINGS];
+ q80_rq_sds_ring_t sds_req[MAX_SDS_RINGS];
+} __packed q80_rcv_cntxt_req_t;
+
+typedef struct _q80_rcv_cntxt_rsp {
+ q80_rsp_rcv_cntxt_t rx_rsp;
+ q80_rsp_rds_ring_t rds_rsp[MAX_RDS_RINGS];
+ q80_rsp_sds_ring_t sds_rsp[MAX_SDS_RINGS];
+} __packed q80_rcv_cntxt_rsp_t;
+
+/*
+ * structure describing various dma buffers
+ */
+#define RDS_RING_INDEX_NORMAL 0
+#define RDS_RING_INDEX_JUMBO 1
+
+typedef struct qla_dmabuf {
+ volatile struct {
+ uint32_t tx_ring :1,
+ rds_ring :1,
+ sds_ring :1,
+ context :1;
+ } flags;
+
+ qla_dma_t tx_ring;
+ qla_dma_t rds_ring[MAX_RDS_RINGS];
+ qla_dma_t sds_ring[MAX_SDS_RINGS];
+ qla_dma_t context;
+} qla_dmabuf_t;
+
+/** Driver Related Definitions End **/
+
+/*
+ * Firmware Control Descriptor
+ */
+typedef struct _qla_fw_cds_hdr {
+ uint64_t cmd;
+#define Q8_FWCD_CNTRL_REQ (0x13 << 23)
+ uint8_t opcode;
+ uint8_t cookie;
+ uint16_t cntxt_id;
+ uint8_t response;
+#define Q8_FW_CDS_HDR_COMPLETION 0x1
+ uint16_t rsrvd;
+ uint8_t sub_opcode;
+} __packed qla_fw_cds_hdr_t;
+
+/*
+ * definitions for opcode in qla_fw_cds_hdr_t
+ */
+#define Q8_FWCD_OPCODE_CONFIG_RSS 0x01
+#define Q8_FWCD_OPCODE_CONFIG_RSS_TABLE 0x02
+#define Q8_FWCD_OPCODE_CONFIG_INTR_COALESCING 0x03
+#define Q8_FWCD_OPCODE_CONFIG_LED 0x04
+#define Q8_FWCD_OPCODE_CONFIG_MAC_ADDR 0x06
+#define Q8_FWCD_OPCODE_LRO_FLOW 0x07
+#define Q8_FWCD_OPCODE_GET_SNMP_STATS 0x08
+#define Q8_FWCD_OPCODE_CONFIG_MAC_RCV_MODE 0x0C
+#define Q8_FWCD_OPCODE_STATISTICS 0x10
+#define Q8_FWCD_OPCODE_CONFIG_IPADDR 0x12
+#define Q8_FWCD_OPCODE_CONFIG_LOOPBACK 0x13
+#define Q8_FWCD_OPCODE_LINK_EVENT_REQ 0x15
+#define Q8_FWCD_OPCODE_CONFIG_BRIDGING 0x17
+#define Q8_FWCD_OPCODE_CONFIG_LRO 0x18
+
+/*
+ * Configure RSS
+ */
+typedef struct _qla_fw_cds_config_rss {
+ qla_fw_cds_hdr_t hdr;
+ uint8_t hash_type;
+#define Q8_FWCD_RSS_HASH_TYPE_IPV4_TCP (0x2 << 4)
+#define Q8_FWCD_RSS_HASH_TYPE_IPV4_IP (0x1 << 4)
+#define Q8_FWCD_RSS_HASH_TYPE_IPV4_TCP_IP (0x3 << 4)
+#define Q8_FWCD_RSS_HASH_TYPE_IPV6_TCP (0x2 << 6)
+#define Q8_FWCD_RSS_HASH_TYPE_IPV6_IP (0x1 << 6)
+#define Q8_FWCD_RSS_HASH_TYPE_IPV6_TCP_IP (0x3 << 6)
+
+ uint8_t flags;
+#define Q8_FWCD_RSS_FLAGS_ENABLE_RSS 0x1
+#define Q8_FWCD_RSS_FLAGS_USE_IND_TABLE 0x2
+ uint8_t rsrvd[4];
+ uint16_t ind_tbl_mask;
+ uint64_t rss_key[5];
+} __packed qla_fw_cds_config_rss_t;
+
+/*
+ * Configure RSS Table
+ */
+typedef struct _qla_fw_cds_config_rss_table {
+ qla_fw_cds_hdr_t hdr;
+ uint64_t index;
+ uint8_t table[40];
+} __packed qla_fw_cds_config_rss_table_t;
+
+/*
+ * Configure Interrupt Coalescing
+ */
+typedef struct _qla_fw_cds_config_intr_coalesc {
+ qla_fw_cds_hdr_t hdr;
+ uint16_t rsrvd0;
+ uint16_t rsrvd1;
+ uint16_t flags;
+ uint16_t rsrvd2;
+ uint64_t rsrvd3;
+ uint16_t max_rcv_pkts;
+ uint16_t max_rcv_usecs;
+ uint16_t max_snd_pkts;
+ uint16_t max_snd_usecs;
+ uint64_t rsrvd4;
+ uint64_t rsrvd5;
+ uint32_t usecs_to;
+ uint8_t timer_type;
+#define Q8_FWCMD_INTR_COALESC_TIMER_NONE 0x00
+#define Q8_FWCMD_INTR_COALESC_TIMER_ONCE 0x01
+#define Q8_FWCMD_INTR_COALESC_TIMER_PERIODIC 0x02
+
+ uint8_t sds_ring_bitmask;
+#define Q8_FWCMD_INTR_COALESC_SDS_RING_0 0x01
+#define Q8_FWCMD_INTR_COALESC_SDS_RING_1 0x02
+#define Q8_FWCMD_INTR_COALESC_SDS_RING_2 0x04
+#define Q8_FWCMD_INTR_COALESC_SDS_RING_3 0x08
+
+ uint16_t rsrvd6;
+} __packed qla_fw_cds_config_intr_coalesc_t;
+
+/*
+ * Configure LED Parameters
+ */
+typedef struct _qla_fw_cds_config_led {
+ qla_fw_cds_hdr_t hdr;
+ uint32_t cntxt_id;
+ uint32_t blink_rate;
+ uint32_t blink_state;
+ uint32_t rsrvd;
+} __packed qla_fw_cds_config_led_t;
+
+/*
+ * Configure MAC Address
+ */
+typedef struct _qla_fw_cds_config_mac_addr {
+ qla_fw_cds_hdr_t hdr;
+ uint8_t cmd;
+#define Q8_FWCD_ADD_MAC_ADDR 0x1
+#define Q8_FWCD_DEL_MAC_ADDR 0x2
+ uint8_t rsrvd;
+ uint8_t mac_addr[6];
+} __packed qla_fw_cds_config_mac_addr_t;
+
+/*
+ * Configure Add/Delete LRO
+ */
+typedef struct _qla_fw_cds_config_lro {
+ qla_fw_cds_hdr_t hdr;
+ uint32_t dst_ip_addr;
+ uint32_t src_ip_addr;
+ uint16_t dst_tcp_port;
+ uint16_t src_tcp_port;
+ uint8_t ipv6;
+ uint8_t time_stamp;
+ uint16_t rsrvd;
+ uint32_t rss_hash;
+ uint32_t host_handle;
+} __packed qla_fw_cds_config_lro_t;
+
+/*
+ * Get SNMP Statistics
+ */
+typedef struct _qla_fw_cds_get_snmp {
+ qla_fw_cds_hdr_t hdr;
+ uint64_t phys_addr;
+ uint16_t size;
+ uint16_t cntxt_id;
+ uint32_t rsrvd;
+} __packed qla_fw_cds_get_snmp_t;
+
+typedef struct _qla_snmp_stats {
+ uint64_t jabber_state;
+ uint64_t false_carrier;
+ uint64_t rsrvd;
+ uint64_t mac_cntrl;
+ uint64_t align_errors;
+ uint64_t chksum_errors;
+ uint64_t oversize_frames;
+ uint64_t tx_errors;
+ uint64_t mac_rcv_errors;
+ uint64_t phy_rcv_errors;
+ uint64_t rcv_pause;
+ uint64_t tx_pause;
+} __packed qla_snmp_stats_t;
+
+/*
+ * Enable Link Event Requests
+ */
+typedef struct _qla_link_event_req {
+ qla_fw_cds_hdr_t hdr;
+ uint8_t enable;
+ uint8_t get_clnk_params;
+ uint8_t pad[6];
+} __packed qla_link_event_req_t;
+
+
+/*
+ * Set MAC Receive Mode
+ */
+typedef struct _qla_set_mac_rcv_mode {
+ qla_fw_cds_hdr_t hdr;
+
+ uint32_t mode;
+#define Q8_MAC_RCV_RESET_PROMISC_ALLMULTI 0x00
+#define Q8_MAC_RCV_ENABLE_PROMISCUOUS 0x01
+#define Q8_MAC_RCV_ENABLE_ALLMULTI 0x02
+
+ uint8_t pad[4];
+} __packed qla_set_mac_rcv_mode_t;
+
+/*
+ * Configure IP Address
+ */
+typedef struct _qla_config_ipv4 {
+ qla_fw_cds_hdr_t hdr;
+
+ uint64_t cmd;
+#define Q8_CONFIG_CMD_IP_ENABLE 0x02
+#define Q8_CONFIG_CMD_IP_DISABLE 0x03
+
+ uint64_t ipv4_addr;
+} __packed qla_config_ipv4_t;
+
+/*
+ * Configure LRO
+ */
+typedef struct _qla_config_lro {
+ qla_fw_cds_hdr_t hdr;
+
+ uint64_t cmd;
+#define Q8_CONFIG_LRO_ENABLE 0x08
+} __packed qla_config_lro_t;
+
+
+/*
+ * Control Messages Received on SDS Ring
+ */
+/* Header */
+typedef struct _qla_cntrl_msg_hdr {
+ uint16_t rsrvd0;
+ uint16_t err_code;
+ uint8_t rsp_type;
+ uint8_t comp_id;
+ uint16_t tag;
+#define Q8_CTRL_MSG_TAG_DESC_COUNT_MASK (0x7 << 5)
+#define Q8_CTRL_MSG_TAG_OWNER_MASK (0x3 << 8)
+#define Q8_CTRL_MSG_TAG_OPCODE_MASK (0x3F << 10)
+} __packed qla_cntrl_msg_hdr_t;
+
+/*
+ * definitions for rsp_type in qla_cntrl_msg_hdr_t
+ */
+#define Q8_CTRL_CONFIG_MAC_RSP 0x85
+#define Q8_CTRL_LRO_FLOW_DELETE_RSP 0x86
+#define Q8_CTRL_LRO_FLOW_ADD_FAILURE_RSP 0x87
+#define Q8_CTRL_GET_SNMP_STATS_RSP 0x88
+#define Q8_CTRL_GET_NETWORK_STATS_RSP 0x8C
+#define Q8_CTRL_LINK_EVENT_NOTIFICATION 0x8D
+
+/*
+ * Configure MAC Response
+ */
+typedef struct _qla_config_mac_rsp {
+ uint32_t rval;
+ uint32_t rsrvd;
+} __packed qla_config_mac_rsp_t;
+
+/*
+ * LRO Flow Response (can be LRO Flow Delete and LRO Flow Add Failure)
+ */
+typedef struct _qla_lro_flow_rsp {
+ uint32_t handle;
+ uint32_t rss_hash;
+ uint32_t dst_ip;
+ uint32_t src_ip;
+ uint16_t dst_tcp_port;
+ uint16_t src_tcp_port;
+ uint8_t ipv6;
+ uint8_t rsrvd0;
+ uint16_t rsrvd1;
+} __packed qla_lro_flow_rsp_t;
+
+/*
+ * Get SNMP Statistics Response
+ */
+typedef struct _qla_get_snmp_stats_rsp {
+ uint64_t rsrvd;
+} __packed qla_get_snmp_stats_rsp_t;
+
+/*
+ * Get Network Statistics Response
+ */
+typedef struct _qla_get_net_stats_rsp {
+ uint64_t rsrvd;
+} __packed qla_get_net_stats_rsp_t;
+
+/*
+ * Link Event Notification
+ */
+typedef struct _qla_link_event {
+ uint32_t cable_oui;
+ uint16_t cable_length;
+
+ uint16_t link_speed;
+#define Q8_LE_SPEED_MASK 0xFFF
+#define Q8_LE_SPEED_10GBPS 0x710
+#define Q8_LE_SPEED_1GBPS 0x3E8
+#define Q8_LE_SPEED_100MBPS 0x064
+#define Q8_LE_SPEED_10MBPS 0x00A
+
+ uint8_t link_up;/* 0 = down; else up */
+
+ uint8_t mod_info;
+#define Q8_LE_MI_MODULE_NOT_PRESENT 0x01
+#define Q8_LE_MI_UNKNOWN_OPTICAL_MODULE 0x02
+#define Q8_LE_MI_SR_LR_OPTICAL_MODULE 0x03
+#define Q8_LE_MI_LRM_OPTICAL_MODULE 0x04
+#define Q8_LE_MI_SFP_1G_MODULE 0x05
+#define Q8_LE_MI_UNSUPPORTED_TWINAX 0x06
+#define Q8_LE_MI_UNSUPPORTED_TWINAX_LENGTH 0x07
+#define Q8_LE_MI_SUPPORTED_TWINAX 0x08
+
+ uint8_t fduplex; /* 1 = full duplex; 0 = half duplex */
+ uint8_t autoneg; /* 1 = autoneg enable; 0 = disabled */
+ uint32_t rsrvd;
+} __packed qla_link_event_t;
+
+typedef struct _qla_sds {
+ q80_stat_desc_t *sds_ring_base; /* start of sds ring */
+ uint32_t sdsr_next; /* next entry in SDS ring to process */
+ struct lro_ctrl lro;
+ void *rxb_free;
+ uint32_t rx_free;
+ void *rxjb_free;
+ uint32_t rxj_free;
+ volatile uint32_t rcv_active;
+} qla_sds_t;
+
+/*
+ * struct for storing hardware specific information for a given interface
+ */
+typedef struct _qla_hw {
+ struct {
+ uint32_t
+ lro :1,
+ init_tx_cnxt :1,
+ init_rx_cnxt :1,
+ fduplex :1,
+ autoneg :1,
+ link_up :1;
+ } flags;
+
+ uint16_t link_speed;
+ uint16_t cable_length;
+ uint16_t cable_oui;
+ uint8_t mod_info;
+ uint8_t rsrvd;
+
+ uint32_t max_rds_per_cntxt;
+ uint32_t max_sds_per_cntxt;
+ uint32_t max_rules_per_cntxt;
+ uint32_t max_rcv_cntxts;
+ uint32_t max_xmt_cntxts;
+ uint32_t max_mtu;
+ uint32_t max_lro;
+
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+
+ uint16_t num_rds_rings;
+ uint16_t num_sds_rings;
+
+ qla_dmabuf_t dma_buf;
+
+ /* Transmit Side */
+
+ q80_tx_cmd_t *tx_ring_base;
+
+ q80_tx_cntxt_req_t *tx_cntxt_req; /* TX Context Request */
+ bus_addr_t tx_cntxt_req_paddr;
+
+ q80_tx_cntxt_rsp_t *tx_cntxt_rsp; /* TX Context Response */
+ bus_addr_t tx_cntxt_rsp_paddr;
+
+ uint32_t *tx_cons; /* tx consumer shadow reg */
+ bus_addr_t tx_cons_paddr;
+
+ volatile uint32_t txr_free; /* # of free entries in tx ring */
+ volatile uint32_t txr_next; /* # next available tx ring entry */
+ volatile uint32_t txr_comp; /* index of last tx entry completed */
+
+ uint32_t tx_prod_reg;
+
+ /* Receive Side */
+ volatile uint32_t rx_next; /* next standard rcv ring to arm fw */
+ volatile int32_t rxj_next; /* next jumbo rcv ring to arm fw */
+
+ volatile int32_t rx_in; /* next standard rcv ring to add mbufs */
+ volatile int32_t rxj_in; /* next jumbo rcv ring to add mbufs */
+
+ q80_rcv_cntxt_req_t *rx_cntxt_req; /* Rcv Context Request */
+ bus_addr_t rx_cntxt_req_paddr;
+ q80_rcv_cntxt_rsp_t *rx_cntxt_rsp; /* Rcv Context Response */
+ bus_addr_t rx_cntxt_rsp_paddr;
+
+ qla_sds_t sds[MAX_SDS_RINGS];
+} qla_hw_t;
+
+#define QL_UPDATE_RDS_PRODUCER_INDEX(ha, i, val) \
+ WRITE_REG32(ha, ((ha->hw.rx_cntxt_rsp)->rds_rsp[i].producer_reg +\
+ 0x1b2000), val)
+
+#define QL_UPDATE_TX_PRODUCER_INDEX(ha, val) \
+ WRITE_REG32(ha, (ha->hw.tx_prod_reg + 0x1b2000), val)
+
+#define QL_UPDATE_SDS_CONSUMER_INDEX(ha, i, val) \
+ WRITE_REG32(ha, ((ha->hw.rx_cntxt_rsp)->sds_rsp[i].consumer_reg +\
+ 0x1b2000), val)
+
+#define QL_CLEAR_INTERRUPTS(ha) \
+ if (ha->pci_func == 0) {\
+ WRITE_REG32(ha, Q8_INT_TARGET_STATUS_F0, 0xFFFFFFFF);\
+ } else {\
+ WRITE_REG32(ha, Q8_INT_TARGET_STATUS_F1, 0xFFFFFFFF);\
+ }\
+
+#define QL_ENABLE_INTERRUPTS(ha, sds_index) \
+ {\
+ q80_rsp_sds_ring_t *rsp_sds;\
+ rsp_sds = &((ha->hw.rx_cntxt_rsp)->sds_rsp[sds_index]);\
+ WRITE_REG32(ha, (rsp_sds->intr_mask_reg + 0x1b2000), 0x1);\
+ }
+
+#define QL_DISABLE_INTERRUPTS(ha, sds_index) \
+ {\
+ q80_rsp_sds_ring_t *rsp_sds;\
+ rsp_sds = &((ha->hw.rx_cntxt_rsp)->sds_rsp[sds_index]);\
+ WRITE_REG32(ha, (rsp_sds->intr_mask_reg + 0x1b2000), 0x0);\
+ }
+
+
+#define QL_BUFFER_ALIGN 16
+
+#endif /* #ifndef _QLA_HW_H_ */
diff --git a/sys/dev/qlxgb/qla_inline.h b/sys/dev/qlxgb/qla_inline.h
new file mode 100644
index 0000000..6a6be5f
--- /dev/null
+++ b/sys/dev/qlxgb/qla_inline.h
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * File: qla_inline.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+#ifndef _QLA_INLINE_H_
+#define _QLA_INLINE_H_
+
+/*
+ * Function: qla_hw_reset
+ */
+static __inline void qla_hw_reset(qla_host_t *ha)
+{
+ WRITE_OFFSET32(ha, Q8_ASIC_RESET, 0xFFFFFFFF);
+}
+
+#define QL8_SEMLOCK_TIMEOUT 1000/* QLA8020 Semaphore Lock Timeout 10ms */
+
+
+/*
+ * Inline functions for hardware semaphores
+ */
+
+/*
+ * Name: qla_sem_lock
+ * Function: Locks one of the semaphore registers (semaphore 2,3,5 & 7)
+ * If the id_reg is valid, then id_val is written into it.
+ * This is for debugging purpose
+ * Returns: 0 on success; otherwise its failed.
+ */
+static __inline int
+qla_sem_lock(qla_host_t *ha, uint32_t sem_reg, uint32_t id_reg, uint32_t id_val)
+{
+ int count = QL8_SEMLOCK_TIMEOUT;
+
+ while (count) {
+ if ((READ_REG32(ha, sem_reg) & SEM_LOCK_BIT))
+ break;
+ count--;
+
+ if (!count)
+ return(-1);
+ qla_mdelay(__func__, 10);
+ }
+ if (id_reg)
+ WRITE_OFFSET32(ha, id_reg, id_val);
+
+ return(0);
+}
+
+/*
+ * Name: qla_sem_unlock
+ * Function: Unlocks the semaphore registers (semaphore 2,3,5 & 7)
+ * previously locked by qla_sem_lock()
+ */
+static __inline void
+qla_sem_unlock(qla_host_t *ha, uint32_t sem_reg)
+{
+ READ_REG32(ha, sem_reg);
+}
+
+static __inline int
+qla_get_ifq_snd_maxlen(qla_host_t *ha)
+{
+ return((NUM_TX_DESCRIPTORS - 1));
+}
+
+static __inline uint32_t
+qla_get_optics(qla_host_t *ha)
+{
+ uint32_t link_speed;
+
+ link_speed = READ_REG32(ha, Q8_LINK_SPEED_0);
+ if (ha->pci_func == 0)
+ link_speed = link_speed & 0xFF;
+ else
+ link_speed = (link_speed >> 8) & 0xFF;
+
+ switch (link_speed) {
+ case 0x1:
+ link_speed = IFM_100_FX;
+ break;
+
+ case 0x10:
+ link_speed = IFM_1000_SX;
+ break;
+
+ default:
+ link_speed = (IFM_10G_LR | IFM_10G_SR);
+ break;
+ }
+
+ return(link_speed);
+}
+
+static __inline uint8_t *
+qla_get_mac_addr(qla_host_t *ha)
+{
+ return (ha->hw.mac_addr);
+}
+
+static __inline void
+qla_read_mac_addr(qla_host_t *ha)
+{
+ uint32_t mac_crb_addr;
+ uint32_t mac_lo;
+ uint32_t mac_hi;
+ uint8_t *macp;
+
+ mac_crb_addr = Q8_CRB_MAC_BLOCK_START +
+ (((ha->pci_func >> 1) * 3) << 2) + ((ha->pci_func & 0x01) << 2);
+
+ mac_lo = READ_REG32(ha, mac_crb_addr);
+ mac_hi = READ_REG32(ha, (mac_crb_addr + 0x4));
+
+ if (ha->pci_func & 0x01) {
+ mac_lo = mac_lo >> 16;
+
+ macp = (uint8_t *)&mac_lo;
+
+ ha->hw.mac_addr[5] = macp[0];
+ ha->hw.mac_addr[4] = macp[1];
+
+ macp = (uint8_t *)&mac_hi;
+
+ ha->hw.mac_addr[3] = macp[0];
+ ha->hw.mac_addr[2] = macp[1];
+ ha->hw.mac_addr[1] = macp[2];
+ ha->hw.mac_addr[0] = macp[3];
+ } else {
+ macp = (uint8_t *)&mac_lo;
+
+ ha->hw.mac_addr[5] = macp[0];
+ ha->hw.mac_addr[4] = macp[1];
+ ha->hw.mac_addr[3] = macp[2];
+ ha->hw.mac_addr[2] = macp[3];
+
+ macp = (uint8_t *)&mac_hi;
+
+ ha->hw.mac_addr[1] = macp[0];
+ ha->hw.mac_addr[0] = macp[1];
+ }
+ return;
+}
+
+static __inline void
+qla_set_hw_rcv_desc(qla_host_t *ha, uint32_t ridx, uint32_t index,
+ uint32_t handle, bus_addr_t paddr, uint32_t buf_size)
+{
+ q80_recv_desc_t *rcv_desc;
+
+ rcv_desc = (q80_recv_desc_t *)ha->hw.dma_buf.rds_ring[ridx].dma_b;
+
+ rcv_desc += index;
+
+ rcv_desc->handle = (uint16_t)handle;
+ rcv_desc->buf_size = buf_size;
+ rcv_desc->buf_addr = paddr;
+
+ return;
+}
+
+static __inline void
+qla_init_hw_rcv_descriptors(qla_host_t *ha, uint32_t ridx)
+{
+ if (ridx == RDS_RING_INDEX_NORMAL)
+ bzero((void *)ha->hw.dma_buf.rds_ring[ridx].dma_b,
+ (sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS));
+ else if (ridx == RDS_RING_INDEX_JUMBO)
+ bzero((void *)ha->hw.dma_buf.rds_ring[ridx].dma_b,
+ (sizeof(q80_recv_desc_t) * NUM_RX_JUMBO_DESCRIPTORS));
+ else
+ QL_ASSERT(0, ("%s: invalid rds index [%d]\n", __func__, ridx));
+}
+
+static __inline void
+qla_lock(qla_host_t *ha, const char *str)
+{
+ while (1) {
+ mtx_lock(&ha->hw_lock);
+ if (!ha->hw_lock_held) {
+ ha->hw_lock_held = 1;
+ ha->qla_lock = str;
+ mtx_unlock(&ha->hw_lock);
+ break;
+ }
+ mtx_unlock(&ha->hw_lock);
+ qla_mdelay(__func__, 1);
+ }
+ return;
+}
+
+static __inline void
+qla_unlock(qla_host_t *ha, const char *str)
+{
+ mtx_lock(&ha->hw_lock);
+ ha->hw_lock_held = 0;
+ ha->qla_unlock = str;
+ mtx_unlock(&ha->hw_lock);
+}
+
+#endif /* #ifndef _QLA_INLINE_H_ */
diff --git a/sys/dev/qlxgb/qla_ioctl.c b/sys/dev/qlxgb/qla_ioctl.c
new file mode 100644
index 0000000..1e9557a
--- /dev/null
+++ b/sys/dev/qlxgb/qla_ioctl.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * File: qla_ioctl.c
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "qla_os.h"
+#include "qla_reg.h"
+#include "qla_hw.h"
+#include "qla_def.h"
+#include "qla_reg.h"
+#include "qla_inline.h"
+#include "qla_glbl.h"
+#include "qla_ioctl.h"
+
+static struct cdevsw qla_cdevsw = {
+ .d_version = D_VERSION,
+ .d_ioctl = qla_eioctl,
+ .d_name = "qlcnic",
+};
+
+int
+qla_make_cdev(qla_host_t *ha)
+{
+ ha->ioctl_dev = make_dev(&qla_cdevsw,
+ ha->ifp->if_dunit,
+ UID_ROOT,
+ GID_WHEEL,
+ 0600,
+ "%s",
+ if_name(ha->ifp));
+
+ if (ha->ioctl_dev == NULL)
+ return (-1);
+
+ ha->ioctl_dev->si_drv1 = ha;
+
+ return (0);
+}
+
+void
+qla_del_cdev(qla_host_t *ha)
+{
+ if (ha->ioctl_dev != NULL)
+ destroy_dev(ha->ioctl_dev);
+ return;
+}
+
+int
+qla_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
+ struct thread *td)
+{
+ qla_host_t *ha;
+ int rval = 0;
+ qla_reg_val_t *rv;
+ qla_rd_flash_t *rdf;
+
+ if ((ha = (qla_host_t *)dev->si_drv1) == NULL)
+ return ENXIO;
+
+ switch(cmd) {
+
+ case QLA_RDWR_REG:
+
+ rv = (qla_reg_val_t *)data;
+
+ if (rv->direct) {
+ if (rv->rd) {
+ rv->val = READ_OFFSET32(ha, rv->reg);
+ } else {
+ WRITE_OFFSET32(ha, rv->reg, rv->val);
+ }
+ } else {
+ if ((rval = qla_rdwr_indreg32(ha, rv->reg, &rv->val,
+ rv->rd)))
+ rval = ENXIO;
+ }
+ break;
+
+ case QLA_RD_FLASH:
+ rdf = (qla_rd_flash_t *)data;
+ if ((rval = qla_rd_flash32(ha, rdf->off, &rdf->data)))
+ rval = ENXIO;
+ break;
+ default:
+ break;
+ }
+
+ return rval;
+}
+
diff --git a/sys/dev/qlxgb/qla_ioctl.h b/sys/dev/qlxgb/qla_ioctl.h
new file mode 100644
index 0000000..160c46c
--- /dev/null
+++ b/sys/dev/qlxgb/qla_ioctl.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * File: qla_ioctl.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#ifndef _QLA_IOCTL_H_
+#define _QLA_IOCTL_H_
+
+#include <sys/ioccom.h>
+
+struct qla_reg_val {
+ uint16_t rd;
+ uint16_t direct;
+ uint32_t reg;
+ uint32_t val;
+};
+typedef struct qla_reg_val qla_reg_val_t;
+
+struct qla_rd_flash {
+ uint32_t off;
+ uint32_t data;
+};
+typedef struct qla_rd_flash qla_rd_flash_t;
+
+
+/*
+ * Read/Write Register
+ */
+#define QLA_RDWR_REG _IOWR('q', 1, qla_reg_val_t)
+
+/*
+ * Read Flash
+ */
+#define QLA_RD_FLASH _IOWR('q', 2, qla_rd_flash_t)
+
+#endif /* #ifndef _QLA_IOCTL_H_ */
diff --git a/sys/dev/qlxgb/qla_isr.c b/sys/dev/qlxgb/qla_isr.c
new file mode 100644
index 0000000..382d565
--- /dev/null
+++ b/sys/dev/qlxgb/qla_isr.c
@@ -0,0 +1,416 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * File: qla_isr.c
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "qla_os.h"
+#include "qla_reg.h"
+#include "qla_hw.h"
+#include "qla_def.h"
+#include "qla_inline.h"
+#include "qla_ver.h"
+#include "qla_glbl.h"
+#include "qla_dbg.h"
+
+static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp);
+static void qla_replenish_jumbo_rx(qla_host_t *ha, qla_sds_t *sdsp);
+
+/*
+ * Name: qla_rx_intr
+ * Function: Handles normal ethernet frames received
+ */
+static void
+qla_rx_intr(qla_host_t *ha, uint64_t data, uint32_t sds_idx,
+ struct lro_ctrl *lro)
+{
+ uint32_t idx, length, status, ring;
+ qla_rx_buf_t *rxb;
+ struct mbuf *mp;
+ struct ifnet *ifp = ha->ifp;
+ qla_sds_t *sdsp;
+ struct ether_vlan_header *eh;
+
+ sdsp = &ha->hw.sds[sds_idx];
+
+ ring = (uint32_t)Q8_STAT_DESC_TYPE(data);
+ idx = (uint32_t)Q8_STAT_DESC_HANDLE(data);
+ length = (uint32_t)Q8_STAT_DESC_TOTAL_LENGTH(data);
+ status = (uint32_t)Q8_STAT_DESC_STATUS(data);
+
+ if (ring == 0) {
+ if ((idx >= NUM_RX_DESCRIPTORS) || (length > MCLBYTES)) {
+ device_printf(ha->pci_dev, "%s: ring[%d] index[0x%08x]"
+ " len[0x%08x] invalid\n",
+ __func__, ring, idx, length);
+ return;
+ }
+ } else {
+ if ((idx >= NUM_RX_JUMBO_DESCRIPTORS)||(length > MJUM9BYTES)) {
+ device_printf(ha->pci_dev, "%s: ring[%d] index[0x%08x]"
+ " len[0x%08x] invalid\n",
+ __func__, ring, idx, length);
+ return;
+ }
+ }
+
+ if (ring == 0)
+ rxb = &ha->rx_buf[idx];
+ else
+ rxb = &ha->rx_jbuf[idx];
+
+ QL_ASSERT((rxb != NULL),\
+ ("%s: [r, i, sds_idx]=[%d, 0x%x, %d] rxb != NULL\n",\
+ __func__, ring, idx, sds_idx));
+
+ mp = rxb->m_head;
+
+ QL_ASSERT((mp != NULL),\
+ ("%s: [r,i,rxb, sds_idx]=[%d, 0x%x, %p, %d] mp != NULL\n",\
+ __func__, ring, idx, rxb, sds_idx));
+
+ bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
+
+ if (ring == 0) {
+ rxb->m_head = NULL;
+ rxb->next = sdsp->rxb_free;
+ sdsp->rxb_free = rxb;
+ sdsp->rx_free++;
+ } else {
+ rxb->m_head = NULL;
+ rxb->next = sdsp->rxjb_free;
+ sdsp->rxjb_free = rxb;
+ sdsp->rxj_free++;
+ }
+
+ mp->m_len = length;
+ mp->m_pkthdr.len = length;
+ mp->m_pkthdr.rcvif = ifp;
+
+ eh = mtod(mp, struct ether_vlan_header *);
+
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ uint32_t *data = (uint32_t *)eh;
+
+ mp->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
+ mp->m_flags |= M_VLANTAG;
+
+ *(data + 3) = *(data + 2);
+ *(data + 2) = *(data + 1);
+ *(data + 1) = *data;
+
+ m_adj(mp, ETHER_VLAN_ENCAP_LEN);
+ }
+
+ if (status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
+ mp->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
+ } else {
+ mp->m_pkthdr.csum_flags = 0;
+ }
+
+ if (lro->lro_cnt && (tcp_lro_rx(lro, mp, 0) == 0)) {
+ /* LRO packet has been successfuly queued */
+ } else {
+ (*ifp->if_input)(ifp, mp);
+ }
+
+ if (sdsp->rx_free > std_replenish)
+ qla_replenish_normal_rx(ha, sdsp);
+
+ if (sdsp->rxj_free > jumbo_replenish)
+ qla_replenish_jumbo_rx(ha, sdsp);
+
+ return;
+}
+
+static void
+qla_replenish_jumbo_rx(qla_host_t *ha, qla_sds_t *sdsp)
+{
+ qla_rx_buf_t *rxb;
+ int count = jumbo_replenish;
+ uint32_t rxj_next;
+
+ if (!mtx_trylock(&ha->rxj_lock))
+ return;
+
+ rxj_next = ha->hw.rxj_next;
+
+ while (count--) {
+ rxb = sdsp->rxjb_free;
+
+ if (rxb == NULL)
+ break;
+
+ sdsp->rxjb_free = rxb->next;
+ sdsp->rxj_free--;
+
+
+ if (qla_get_mbuf(ha, rxb, NULL, RDS_RING_INDEX_JUMBO) == 0) {
+ qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO,
+ ha->hw.rxj_in, rxb->handle, rxb->paddr,
+ (rxb->m_head)->m_pkthdr.len);
+ ha->hw.rxj_in++;
+ if (ha->hw.rxj_in == NUM_RX_JUMBO_DESCRIPTORS)
+ ha->hw.rxj_in = 0;
+ ha->hw.rxj_next++;
+ if (ha->hw.rxj_next == NUM_RX_JUMBO_DESCRIPTORS)
+ ha->hw.rxj_next = 0;
+ } else {
+ device_printf(ha->pci_dev,
+ "%s: qla_get_mbuf [1,(%d),(%d)] failed\n",
+ __func__, ha->hw.rxj_in, rxb->handle);
+
+ rxb->m_head = NULL;
+ rxb->next = sdsp->rxjb_free;
+ sdsp->rxjb_free = rxb;
+ sdsp->rxj_free++;
+
+ break;
+ }
+ }
+
+ if (rxj_next != ha->hw.rxj_next) {
+ QL_UPDATE_RDS_PRODUCER_INDEX(ha, 1, ha->hw.rxj_next);
+ }
+ mtx_unlock(&ha->rxj_lock);
+}
+
+static void
+qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp)
+{
+ qla_rx_buf_t *rxb;
+ int count = std_replenish;
+ uint32_t rx_next;
+
+ if (!mtx_trylock(&ha->rx_lock))
+ return;
+
+ rx_next = ha->hw.rx_next;
+
+ while (count--) {
+ rxb = sdsp->rxb_free;
+
+ if (rxb == NULL)
+ break;
+
+ sdsp->rxb_free = rxb->next;
+ sdsp->rx_free--;
+
+ if (qla_get_mbuf(ha, rxb, NULL, RDS_RING_INDEX_NORMAL) == 0) {
+ qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL,
+ ha->hw.rx_in, rxb->handle, rxb->paddr,
+ (rxb->m_head)->m_pkthdr.len);
+ ha->hw.rx_in++;
+ if (ha->hw.rx_in == NUM_RX_DESCRIPTORS)
+ ha->hw.rx_in = 0;
+ ha->hw.rx_next++;
+ if (ha->hw.rx_next == NUM_RX_DESCRIPTORS)
+ ha->hw.rx_next = 0;
+ } else {
+ device_printf(ha->pci_dev,
+ "%s: qla_get_mbuf [0,(%d),(%d)] failed\n",
+ __func__, ha->hw.rx_in, rxb->handle);
+
+ rxb->m_head = NULL;
+ rxb->next = sdsp->rxb_free;
+ sdsp->rxb_free = rxb;
+ sdsp->rx_free++;
+
+ break;
+ }
+ }
+
+ if (rx_next != ha->hw.rx_next) {
+ QL_UPDATE_RDS_PRODUCER_INDEX(ha, 0, ha->hw.rx_next);
+ }
+ mtx_unlock(&ha->rx_lock);
+}
+
+/*
+ * Name: qla_isr
+ * Function: Main Interrupt Service Routine
+ */
+static uint32_t
+qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
+{
+ device_t dev;
+ qla_hw_t *hw;
+ uint32_t comp_idx, desc_count;
+ q80_stat_desc_t *sdesc;
+ struct lro_ctrl *lro;
+ struct lro_entry *queued;
+ uint32_t ret = 0;
+
+ dev = ha->pci_dev;
+ hw = &ha->hw;
+
+ hw->sds[sds_idx].rcv_active = 1;
+ if (ha->flags.stop_rcv) {
+ hw->sds[sds_idx].rcv_active = 0;
+ return 0;
+ }
+
+ QL_DPRINT2((dev, "%s: [%d]enter\n", __func__, sds_idx));
+
+ /*
+ * receive interrupts
+ */
+ comp_idx = hw->sds[sds_idx].sdsr_next;
+ lro = &hw->sds[sds_idx].lro;
+
+ while (count--) {
+
+ sdesc = (q80_stat_desc_t *)
+ &hw->sds[sds_idx].sds_ring_base[comp_idx];
+
+ if (Q8_STAT_DESC_OWNER((sdesc->data[0])) !=
+ Q8_STAT_DESC_OWNER_HOST) {
+ QL_DPRINT2((dev, "%s: data %p sdsr_next 0x%08x\n",
+ __func__, (void *)sdesc->data[0], comp_idx));
+ break;
+ }
+
+ desc_count = Q8_STAT_DESC_COUNT((sdesc->data[0]));
+
+ switch (Q8_STAT_DESC_OPCODE((sdesc->data[0]))) {
+
+ case Q8_STAT_DESC_OPCODE_RCV_PKT:
+ case Q8_STAT_DESC_OPCODE_SYN_OFFLOAD:
+ qla_rx_intr(ha, (sdesc->data[0]), sds_idx, lro);
+
+ break;
+
+ default:
+ device_printf(dev, "%s: default 0x%llx!\n", __func__,
+ (long long unsigned int)sdesc->data[0]);
+ break;
+ }
+
+ while (desc_count--) {
+ sdesc->data[0] =
+ Q8_STAT_DESC_SET_OWNER(Q8_STAT_DESC_OWNER_FW);
+ comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
+ sdesc = (q80_stat_desc_t *)
+ &hw->sds[sds_idx].sds_ring_base[comp_idx];
+ }
+ }
+
+ while((!SLIST_EMPTY(&lro->lro_active))) {
+ queued = SLIST_FIRST(&lro->lro_active);
+ SLIST_REMOVE_HEAD(&lro->lro_active, next);
+ tcp_lro_flush(lro, queued);
+ }
+
+ if (hw->sds[sds_idx].sdsr_next != comp_idx) {
+ QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
+ }
+ hw->sds[sds_idx].sdsr_next = comp_idx;
+
+ sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
+ if ((sds_idx == 0) && (Q8_STAT_DESC_OWNER((sdesc->data[0])) ==
+ Q8_STAT_DESC_OWNER_HOST)) {
+ ret = -1;
+ }
+
+ hw->sds[sds_idx].rcv_active = 0;
+ return (ret);
+}
+
+void
+qla_isr(void *arg)
+{
+ qla_ivec_t *ivec = arg;
+ qla_host_t *ha;
+ uint32_t sds_idx;
+ uint32_t ret;
+
+ ha = ivec->ha;
+ sds_idx = ivec->irq_rid - 1;
+
+ if (sds_idx >= ha->hw.num_sds_rings) {
+ device_printf(ha->pci_dev, "%s: bogus sds_idx 0x%x\n", __func__,
+ sds_idx);
+
+ return;
+ }
+
+ if (sds_idx == 0)
+ taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+
+ ret = qla_rcv_isr(ha, sds_idx, rcv_pkt_thres);
+
+ if (sds_idx == 0)
+ taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+
+ if (ret) {
+ taskqueue_enqueue(ha->irq_vec[sds_idx].rcv_tq,
+ &ha->irq_vec[sds_idx].rcv_task);
+ } else {
+ QL_ENABLE_INTERRUPTS(ha, sds_idx);
+ }
+}
+
+void
+qla_rcv(void *context, int pending)
+{
+ qla_ivec_t *ivec = context;
+ qla_host_t *ha;
+ device_t dev;
+ qla_hw_t *hw;
+ uint32_t sds_idx;
+ uint32_t ret;
+ struct ifnet *ifp;
+
+ ha = ivec->ha;
+ dev = ha->pci_dev;
+ hw = &ha->hw;
+ sds_idx = ivec->irq_rid - 1;
+ ifp = ha->ifp;
+
+ do {
+ if (sds_idx == 0) {
+ if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) {
+ taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+ } else if ((ifp->if_snd.ifq_head != NULL) &&
+ QL_RUNNING(ifp)) {
+ taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+ }
+ }
+ ret = qla_rcv_isr(ha, sds_idx, rcv_pkt_thres_d);
+ } while (ret);
+
+ if (sds_idx == 0)
+ taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+
+ QL_ENABLE_INTERRUPTS(ha, sds_idx);
+}
+
diff --git a/sys/dev/qlxgb/qla_misc.c b/sys/dev/qlxgb/qla_misc.c
new file mode 100644
index 0000000..c616d4f
--- /dev/null
+++ b/sys/dev/qlxgb/qla_misc.c
@@ -0,0 +1,624 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * File : qla_misc.c
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "qla_os.h"
+#include "qla_reg.h"
+#include "qla_hw.h"
+#include "qla_def.h"
+#include "qla_reg.h"
+#include "qla_inline.h"
+#include "qla_glbl.h"
+#include "qla_dbg.h"
+
+/*
+ * structure encapsulating the value to read/write to offchip memory
+ */
+typedef struct _offchip_mem_val {
+ uint32_t data_lo;
+ uint32_t data_hi;
+ uint32_t data_ulo;
+ uint32_t data_uhi;
+} offchip_mem_val_t;
+
+#define Q8_ADDR_UNDEFINED 0xFFFFFFFF
+
+/*
+ * The index to this table is Bits 20-27 of the indirect register address
+ */
+static uint32_t indirect_to_base_map[] =
+ {
+ Q8_ADDR_UNDEFINED, /* 0x00 */
+ 0x77300000, /* 0x01 */
+ 0x29500000, /* 0x02 */
+ 0x2A500000, /* 0x03 */
+ Q8_ADDR_UNDEFINED, /* 0x04 */
+ 0x0D000000, /* 0x05 */
+ 0x1B100000, /* 0x06 */
+ 0x0E600000, /* 0x07 */
+ 0x0E000000, /* 0x08 */
+ 0x0E100000, /* 0x09 */
+ 0x0E200000, /* 0x0A */
+ 0x0E300000, /* 0x0B */
+ 0x42000000, /* 0x0C */
+ 0x41700000, /* 0x0D */
+ 0x42100000, /* 0x0E */
+ 0x34B00000, /* 0x0F */
+ 0x40500000, /* 0x10 */
+ 0x34000000, /* 0x11 */
+ 0x34100000, /* 0x12 */
+ 0x34200000, /* 0x13 */
+ 0x34300000, /* 0x14 */
+ 0x34500000, /* 0x15 */
+ 0x34400000, /* 0x16 */
+ 0x3C000000, /* 0x17 */
+ 0x3C100000, /* 0x18 */
+ 0x3C200000, /* 0x19 */
+ 0x3C300000, /* 0x1A */
+ Q8_ADDR_UNDEFINED, /* 0x1B */
+ 0x3C400000, /* 0x1C */
+ 0x41000000, /* 0x1D */
+ Q8_ADDR_UNDEFINED, /* 0x1E */
+ 0x0D100000, /* 0x1F */
+ Q8_ADDR_UNDEFINED, /* 0x20 */
+ 0x77300000, /* 0x21 */
+ 0x41600000, /* 0x22 */
+ Q8_ADDR_UNDEFINED, /* 0x23 */
+ Q8_ADDR_UNDEFINED, /* 0x24 */
+ Q8_ADDR_UNDEFINED, /* 0x25 */
+ Q8_ADDR_UNDEFINED, /* 0x26 */
+ Q8_ADDR_UNDEFINED, /* 0x27 */
+ 0x41700000, /* 0x28 */
+ Q8_ADDR_UNDEFINED, /* 0x29 */
+ 0x08900000, /* 0x2A */
+ 0x70A00000, /* 0x2B */
+ 0x70B00000, /* 0x2C */
+ 0x70C00000, /* 0x2D */
+ 0x08D00000, /* 0x2E */
+ 0x08E00000, /* 0x2F */
+ 0x70F00000, /* 0x30 */
+ 0x40500000, /* 0x31 */
+ 0x42000000, /* 0x32 */
+ 0x42100000, /* 0x33 */
+ Q8_ADDR_UNDEFINED, /* 0x34 */
+ 0x08800000, /* 0x35 */
+ 0x09100000, /* 0x36 */
+ 0x71200000, /* 0x37 */
+ 0x40600000, /* 0x38 */
+ Q8_ADDR_UNDEFINED, /* 0x39 */
+ 0x71800000, /* 0x3A */
+ 0x19900000, /* 0x3B */
+ 0x1A900000, /* 0x3C */
+ Q8_ADDR_UNDEFINED, /* 0x3D */
+ 0x34600000, /* 0x3E */
+ Q8_ADDR_UNDEFINED, /* 0x3F */
+ };
+
+/*
+ * Address Translation Table for CRB to offsets from PCI BAR0
+ */
+typedef struct _crb_to_pci {
+ uint32_t crb_addr;
+ uint32_t pci_addr;
+} crb_to_pci_t;
+
+static crb_to_pci_t crbinit_to_pciaddr[] = {
+ {(0x088 << 20), (0x035 << 20)},
+ {(0x089 << 20), (0x02A << 20)},
+ {(0x08D << 20), (0x02E << 20)},
+ {(0x08E << 20), (0x02F << 20)},
+ {(0x0C6 << 20), (0x023 << 20)},
+ {(0x0C7 << 20), (0x024 << 20)},
+ {(0x0C8 << 20), (0x025 << 20)},
+ {(0x0D0 << 20), (0x005 << 20)},
+ {(0x0D1 << 20), (0x01F << 20)},
+ {(0x0E0 << 20), (0x008 << 20)},
+ {(0x0E1 << 20), (0x009 << 20)},
+ {(0x0E2 << 20), (0x00A << 20)},
+ {(0x0E3 << 20), (0x00B << 20)},
+ {(0x0E6 << 20), (0x007 << 20)},
+ {(0x199 << 20), (0x03B << 20)},
+ {(0x1B1 << 20), (0x006 << 20)},
+ {(0x295 << 20), (0x002 << 20)},
+ {(0x29A << 20), (0x000 << 20)},
+ {(0x2A5 << 20), (0x003 << 20)},
+ {(0x340 << 20), (0x011 << 20)},
+ {(0x341 << 20), (0x012 << 20)},
+ {(0x342 << 20), (0x013 << 20)},
+ {(0x343 << 20), (0x014 << 20)},
+ {(0x344 << 20), (0x016 << 20)},
+ {(0x345 << 20), (0x015 << 20)},
+ {(0x3C0 << 20), (0x017 << 20)},
+ {(0x3C1 << 20), (0x018 << 20)},
+ {(0x3C2 << 20), (0x019 << 20)},
+ {(0x3C3 << 20), (0x01A << 20)},
+ {(0x3C4 << 20), (0x01C << 20)},
+ {(0x3C5 << 20), (0x01B << 20)},
+ {(0x405 << 20), (0x031 << 20)},
+ {(0x406 << 20), (0x038 << 20)},
+ {(0x410 << 20), (0x01D << 20)},
+ {(0x416 << 20), (0x022 << 20)},
+ {(0x417 << 20), (0x028 << 20)},
+ {(0x420 << 20), (0x032 << 20)},
+ {(0x421 << 20), (0x033 << 20)},
+ {(0x700 << 20), (0x00C << 20)},
+ {(0x701 << 20), (0x00D << 20)},
+ {(0x702 << 20), (0x00E << 20)},
+ {(0x703 << 20), (0x00F << 20)},
+ {(0x704 << 20), (0x010 << 20)},
+ {(0x70A << 20), (0x02B << 20)},
+ {(0x70B << 20), (0x02C << 20)},
+ {(0x70C << 20), (0x02D << 20)},
+ {(0x70F << 20), (0x030 << 20)},
+ {(0x718 << 20), (0x03A << 20)},
+ {(0x758 << 20), (0x026 << 20)},
+ {(0x759 << 20), (0x027 << 20)},
+ {(0x773 << 20), (0x001 << 20)}
+};
+
+#define Q8_INVALID_ADDRESS (-1)
+#define Q8_ADDR_MASK (0xFFF << 20)
+
+typedef struct _addr_val {
+ uint32_t addr;
+ uint32_t value;
+ uint32_t pci_addr;
+ uint32_t ind_addr;
+} addr_val_t;
+
+/*
+ * Name: qla_rdwr_indreg32
+ * Function: Read/Write an Indirect Register
+ */
+int
+qla_rdwr_indreg32(qla_host_t *ha, uint32_t addr, uint32_t *val, uint32_t rd)
+{
+ uint32_t offset;
+ int count = 100;
+
+ offset = (addr & 0xFFF00000) >> 20;
+
+ if (offset > 0x3F) {
+ device_printf(ha->pci_dev, "%s: invalid addr 0x%08x\n",
+ __func__, addr);
+ return -1;
+ }
+
+ offset = indirect_to_base_map[offset];
+ if (offset == Q8_ADDR_UNDEFINED) {
+ device_printf(ha->pci_dev, "%s: undefined map 0x%08x\n",
+ __func__, addr);
+ return -1;
+ }
+
+ offset = offset | (addr & 0x000F0000);
+
+ if (qla_sem_lock(ha, Q8_SEM7_LOCK, 0, 0)) {
+ device_printf(ha->pci_dev, "%s: SEM7_LOCK failed\n", __func__);
+ return (-1);
+ }
+
+ WRITE_OFFSET32(ha, Q8_CRB_WINDOW_2M, offset);
+
+ while (offset != (READ_OFFSET32(ha, Q8_CRB_WINDOW_2M))) {
+ count--;
+ if (!count) {
+ qla_sem_unlock(ha, Q8_SEM7_UNLOCK);
+ return -1;
+ }
+
+ qla_mdelay(__func__, 1);
+ }
+
+ if (rd) {
+ *val = READ_OFFSET32(ha, ((addr & 0xFFFF) | 0x1E0000));
+ } else {
+ WRITE_OFFSET32(ha, ((addr & 0xFFFF) | 0x1E0000), *val);
+ }
+
+ qla_sem_unlock(ha, Q8_SEM7_UNLOCK);
+ return 0;
+}
+
+/*
+ * Name: qla_rdwr_offchip_mem
+ * Function: Read/Write OffChip Memory
+ */
+static int
+qla_rdwr_offchip_mem(qla_host_t *ha, uint64_t addr, offchip_mem_val_t *val,
+ uint32_t rd)
+{
+ uint32_t count = 100;
+ uint32_t data;
+
+ WRITE_OFFSET32(ha, Q8_MIU_TEST_AGT_ADDR_LO, (uint32_t)addr);
+ WRITE_OFFSET32(ha, Q8_MIU_TEST_AGT_ADDR_HI, (uint32_t)(addr >> 32));
+
+ if (!rd) {
+ WRITE_OFFSET32(ha, Q8_MIU_TEST_AGT_WRDATA_LO, val->data_lo);
+ WRITE_OFFSET32(ha, Q8_MIU_TEST_AGT_WRDATA_HI, val->data_hi);
+ WRITE_OFFSET32(ha, Q8_MIU_TEST_AGT_WRDATA_ULO, val->data_ulo);
+ WRITE_OFFSET32(ha, Q8_MIU_TEST_AGT_WRDATA_UHI, val->data_uhi);
+ WRITE_OFFSET32(ha, Q8_MIU_TEST_AGT_CTRL, 0x07); /* Write */
+ } else {
+ WRITE_OFFSET32(ha, Q8_MIU_TEST_AGT_CTRL, 0x03); /* Read */
+ }
+
+ while (count--) {
+ data = READ_OFFSET32(ha, Q8_MIU_TEST_AGT_CTRL);
+ if (!(data & BIT_3)) {
+ if (rd) {
+ val->data_lo = READ_OFFSET32(ha, \
+ Q8_MIU_TEST_AGT_RDDATA_LO);
+ val->data_hi = READ_OFFSET32(ha, \
+ Q8_MIU_TEST_AGT_RDDATA_HI);
+ val->data_ulo = READ_OFFSET32(ha, \
+ Q8_MIU_TEST_AGT_RDDATA_ULO);
+ val->data_uhi = READ_OFFSET32(ha, \
+ Q8_MIU_TEST_AGT_RDDATA_UHI);
+ }
+ return 0;
+ } else
+ qla_mdelay(__func__, 1);
+ }
+
+ device_printf(ha->pci_dev, "%s: failed[0x%08x]\n", __func__, data);
+ return (-1);
+}
+
+/*
+ * Name: qla_rd_flash32
+ * Function: Read Flash Memory
+ */
+int
+qla_rd_flash32(qla_host_t *ha, uint32_t addr, uint32_t *data)
+{
+ uint32_t val;
+ uint32_t count = 100;
+
+ if (qla_sem_lock(ha, Q8_SEM2_LOCK, 0, 0)) {
+ device_printf(ha->pci_dev, "%s: SEM2_LOCK failed\n", __func__);
+ return (-1);
+ }
+ WRITE_OFFSET32(ha, Q8_ROM_LOCKID, 0xa5a5a5a5);
+
+ val = addr;
+ qla_rdwr_indreg32(ha, Q8_ROM_ADDRESS, &val, 0);
+ val = 0;
+ qla_rdwr_indreg32(ha, Q8_ROM_DUMMY_BYTE_COUNT, &val, 0);
+ val = 3;
+ qla_rdwr_indreg32(ha, Q8_ROM_ADDR_BYTE_COUNT, &val, 0);
+
+ QLA_USEC_DELAY(100);
+
+ val = ROM_OPCODE_FAST_RD;
+ qla_rdwr_indreg32(ha, Q8_ROM_INSTR_OPCODE, &val, 0);
+
+ while (!((val = READ_OFFSET32(ha, Q8_ROM_STATUS)) & BIT_1)) {
+ count--;
+ if (!count) {
+ qla_sem_unlock(ha, Q8_SEM7_UNLOCK);
+ return -1;
+ }
+ }
+
+ val = 0;
+ qla_rdwr_indreg32(ha, Q8_ROM_DUMMY_BYTE_COUNT, &val, 0);
+ qla_rdwr_indreg32(ha, Q8_ROM_ADDR_BYTE_COUNT, &val, 0);
+
+ QLA_USEC_DELAY(100);
+
+ qla_rdwr_indreg32(ha, Q8_ROM_RD_DATA, data, 1);
+
+ qla_sem_unlock(ha, Q8_SEM2_UNLOCK);
+ return 0;
+}
+
+/*
+ * Name: qla_int_to_pci_addr_map
+ * Function: Convert's Internal(CRB) Address to Indirect Address
+ */
+static uint32_t
+qla_int_to_pci_addr_map(qla_host_t *ha, uint32_t int_addr)
+{
+ uint32_t crb_to_pci_table_size, i;
+ uint32_t addr;
+
+ crb_to_pci_table_size = sizeof(crbinit_to_pciaddr)/sizeof(crb_to_pci_t);
+ addr = int_addr & Q8_ADDR_MASK;
+
+ for (i = 0; i < crb_to_pci_table_size; i++) {
+ if (crbinit_to_pciaddr[i].crb_addr == addr) {
+ addr = (int_addr & ~Q8_ADDR_MASK) |
+ crbinit_to_pciaddr[i].pci_addr;
+ return (addr);
+ }
+ }
+ return (Q8_INVALID_ADDRESS);
+}
+
+/*
+ * Name: qla_filter_pci_addr
+ * Function: Filter's out Indirect Addresses which are not writeable
+ */
+static uint32_t
+qla_filter_pci_addr(qla_host_t *ha, uint32_t addr)
+{
+ if ((addr == Q8_INVALID_ADDRESS) ||
+ (addr == 0x00112040) ||
+ (addr == 0x00112048) ||
+ ((addr & 0xFFFF0FFF) == 0x001100C4) ||
+ ((addr & 0xFFFF0FFF) == 0x001100C8) ||
+ ((addr & 0x0FF00000) == 0x00200000) ||
+ (addr == 0x022021FC) ||
+ (addr == 0x0330001C) ||
+ (addr == 0x03300024) ||
+ (addr == 0x033000A8) ||
+ (addr == 0x033000C8) ||
+ (addr == 0x033000BC) ||
+ ((addr & 0x0FF00000) == 0x03A00000) ||
+ (addr == 0x03B0001C))
+ return (Q8_INVALID_ADDRESS);
+ else
+ return (addr);
+}
+
+/*
+ * Name: qla_crb_init
+ * Function: CRB Initialization - first step in the initialization after reset
+ * Essentially reads the address/value pairs from address = 0x00 and
+ * writes the value into address in the addr/value pair.
+ */
+static int
+qla_crb_init(qla_host_t *ha)
+{
+ uint32_t val, sig;
+ uint32_t offset, count, i;
+ addr_val_t *addr_val_map, *avmap;
+
+ qla_rd_flash32(ha, 0, &sig);
+ QL_DPRINT2((ha->pci_dev, "%s: val[0] = 0x%08x\n", __func__, val));
+
+ qla_rd_flash32(ha, 4, &val);
+ QL_DPRINT2((ha->pci_dev, "%s: val[4] = 0x%08x\n", __func__, val));
+
+ count = val >> 16;
+ offset = val & 0xFFFF;
+ offset = offset << 2;
+
+ QL_DPRINT2((ha->pci_dev, "%s: [sig,val]=[0x%08x, 0x%08x] %d pairs\n",
+ __func__, sig, val, count));
+
+ addr_val_map = avmap = malloc((sizeof(addr_val_t) * count),
+ M_QLA8XXXBUF, M_NOWAIT);
+
+ if (addr_val_map == NULL) {
+ device_printf(ha->pci_dev, "%s: malloc failed\n", __func__);
+ return (-1);
+ }
+ memset(avmap, 0, (sizeof(addr_val_t) * count));
+
+ count = count << 1;
+ for (i = 0; i < count; ) {
+ qla_rd_flash32(ha, (offset + (i * 4)), &avmap->value);
+ i++;
+ qla_rd_flash32(ha, (offset + (i * 4)), &avmap->addr);
+ i++;
+
+ avmap->pci_addr = qla_int_to_pci_addr_map(ha, avmap->addr);
+ avmap->ind_addr = qla_filter_pci_addr(ha, avmap->pci_addr);
+
+ QL_DPRINT2((ha->pci_dev,
+ "%s: [0x%02x][0x%08x:0x%08x:0x%08x] 0x%08x\n",
+ __func__, (i >> 1), avmap->addr, avmap->pci_addr,
+ avmap->ind_addr, avmap->value));
+
+ if (avmap->ind_addr != Q8_INVALID_ADDRESS) {
+ qla_rdwr_indreg32(ha, avmap->ind_addr, &avmap->value,0);
+ qla_mdelay(__func__, 1);
+ }
+ avmap++;
+ }
+
+ free (addr_val_map, M_QLA8XXXBUF);
+ return (0);
+}
+
+/*
+ * Name: qla_init_peg_regs
+ * Function: Protocol Engine Register Initialization
+ */
+static void
+qla_init_peg_regs(qla_host_t *ha)
+{
+ WRITE_OFFSET32(ha, Q8_PEG_D_RESET1, 0x001E);
+ WRITE_OFFSET32(ha, Q8_PEG_D_RESET2, 0x0008);
+ WRITE_OFFSET32(ha, Q8_PEG_I_RESET, 0x0008);
+ WRITE_OFFSET32(ha, Q8_PEG_0_CLR1, 0x0000);
+ WRITE_OFFSET32(ha, Q8_PEG_0_CLR2, 0x0000);
+ WRITE_OFFSET32(ha, Q8_PEG_1_CLR1, 0x0000);
+ WRITE_OFFSET32(ha, Q8_PEG_1_CLR2, 0x0000);
+ WRITE_OFFSET32(ha, Q8_PEG_2_CLR1, 0x0000);
+ WRITE_OFFSET32(ha, Q8_PEG_2_CLR2, 0x0000);
+ WRITE_OFFSET32(ha, Q8_PEG_3_CLR1, 0x0000);
+ WRITE_OFFSET32(ha, Q8_PEG_3_CLR2, 0x0000);
+ WRITE_OFFSET32(ha, Q8_PEG_4_CLR1, 0x0000);
+ WRITE_OFFSET32(ha, Q8_PEG_4_CLR2, 0x0000);
+}
+
+/*
+ * Name: qla_load_fw_from_flash
+ * Function: Reads the Bootloader from Flash and Loads into Offchip Memory
+ */
+static void
+qla_load_fw_from_flash(qla_host_t *ha)
+{
+ uint64_t mem_off = 0x10000;
+ uint32_t flash_off = 0x10000;
+ uint32_t count;
+ offchip_mem_val_t val;
+
+
+ /* only bootloader needs to be loaded into memory */
+ for (count = 0; count < 0x20000 ; ) {
+ qla_rd_flash32(ha, flash_off, &val.data_lo);
+ count = count + 4;
+ flash_off = flash_off + 4;
+
+ qla_rd_flash32(ha, flash_off, &val.data_hi);
+ count = count + 4;
+ flash_off = flash_off + 4;
+
+ qla_rd_flash32(ha, flash_off, &val.data_ulo);
+ count = count + 4;
+ flash_off = flash_off + 4;
+
+ qla_rd_flash32(ha, flash_off, &val.data_uhi);
+ count = count + 4;
+ flash_off = flash_off + 4;
+
+ qla_rdwr_offchip_mem(ha, mem_off, &val, 0);
+
+ mem_off = mem_off + 16;
+ }
+ return;
+}
+
+/*
+ * Name: qla_init_from_flash
+ * Function: Performs Initialization which consists of the following sequence
+ * - reset
+ * - CRB Init
+ * - Peg Init
+ * - Read the Bootloader from Flash and Load into Offchip Memory
+ * - Kick start the bootloader which loads the rest of the firmware
+ * and performs the remaining steps in the initialization process.
+ */
+static int
+qla_init_from_flash(qla_host_t *ha)
+{
+ uint32_t delay = 300;
+ uint32_t data;
+
+ qla_hw_reset(ha);
+ qla_mdelay(__func__, 100);
+
+ qla_crb_init(ha);
+ qla_mdelay(__func__, 10);
+
+ qla_init_peg_regs(ha);
+ qla_mdelay(__func__, 10);
+
+ qla_load_fw_from_flash(ha);
+
+ WRITE_OFFSET32(ha, Q8_CMDPEG_STATE, 0x00000000);
+ WRITE_OFFSET32(ha, Q8_PEG_0_RESET, 0x00001020);
+ WRITE_OFFSET32(ha, Q8_ASIC_RESET, 0x0080001E);
+ qla_mdelay(__func__, 100);
+
+ do {
+ data = READ_OFFSET32(ha, Q8_CMDPEG_STATE);
+
+ QL_DPRINT2((ha->pci_dev, "%s: func[%d] cmdpegstate 0x%08x\n",
+ __func__, ha->pci_func, data));
+ if (data == CMDPEG_PHAN_INIT_COMPLETE) {
+ QL_DPRINT2((ha->pci_dev,
+ "%s: func[%d] init complete\n",
+ __func__, ha->pci_func));
+ return(0);
+ }
+ qla_mdelay(__func__, 100);
+ } while (delay--);
+
+ device_printf(ha->pci_dev,
+ "%s: func[%d] Q8_PEG_HALT_STATUS1[0x%08x] STATUS2[0x%08x]"
+ " HEARTBEAT[0x%08x] RCVPEG_STATE[0x%08x]"
+ " CMDPEG_STATE[0x%08x]\n",
+ __func__, ha->pci_func,
+ (READ_OFFSET32(ha, Q8_PEG_HALT_STATUS1)),
+ (READ_OFFSET32(ha, Q8_PEG_HALT_STATUS2)),
+ (READ_OFFSET32(ha, Q8_FIRMWARE_HEARTBEAT)),
+ (READ_OFFSET32(ha, Q8_RCVPEG_STATE)), data);
+
+ return (-1);
+}
+
+/*
+ * Name: qla_init_hw
+ * Function: Initializes P3+ hardware.
+ */
+int
+qla_init_hw(qla_host_t *ha)
+{
+ device_t dev;
+ int ret = 0;
+ uint32_t val, delay = 300;
+
+ dev = ha->pci_dev;
+
+ QL_DPRINT1((dev, "%s: enter\n", __func__));
+
+ qla_mdelay(__func__, 100);
+
+ if (ha->pci_func & 0x1) {
+ while ((ha->pci_func & 0x1) && delay--) {
+ val = READ_OFFSET32(ha, Q8_CMDPEG_STATE);
+
+ if (val == CMDPEG_PHAN_INIT_COMPLETE) {
+ QL_DPRINT2((dev,
+ "%s: func = %d init complete\n",
+ __func__, ha->pci_func));
+ qla_mdelay(__func__, 100);
+ goto qla_init_exit;
+ }
+ qla_mdelay(__func__, 100);
+ }
+ return (-1);
+ }
+
+ val = READ_OFFSET32(ha, Q8_CMDPEG_STATE);
+
+ if (val != CMDPEG_PHAN_INIT_COMPLETE) {
+ ret = qla_init_from_flash(ha);
+ qla_mdelay(__func__, 100);
+ }
+
+qla_init_exit:
+ ha->fw_ver_major = READ_OFFSET32(ha, Q8_FW_VER_MAJOR);
+ ha->fw_ver_minor = READ_OFFSET32(ha, Q8_FW_VER_MINOR);
+ ha->fw_ver_sub = READ_OFFSET32(ha, Q8_FW_VER_SUB);
+ ha->fw_ver_build = READ_OFFSET32(ha, Q8_FW_VER_BUILD);
+
+ return (ret);
+}
+
diff --git a/sys/dev/qlxgb/qla_os.c b/sys/dev/qlxgb/qla_os.c
new file mode 100644
index 0000000..1fc30f5
--- /dev/null
+++ b/sys/dev/qlxgb/qla_os.c
@@ -0,0 +1,1481 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * File: qla_os.c
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "qla_os.h"
+#include "qla_reg.h"
+#include "qla_hw.h"
+#include "qla_def.h"
+#include "qla_inline.h"
+#include "qla_ver.h"
+#include "qla_glbl.h"
+#include "qla_dbg.h"
+
+/*
+ * Some PCI Configuration Space Related Defines
+ */
+
+#ifndef PCI_VENDOR_QLOGIC
+#define PCI_VENDOR_QLOGIC 0x1077
+#endif
+
+#ifndef PCI_PRODUCT_QLOGIC_ISP8020
+#define PCI_PRODUCT_QLOGIC_ISP8020 0x8020
+#endif
+
+#define PCI_QLOGIC_ISP8020 \
+ ((PCI_PRODUCT_QLOGIC_ISP8020 << 16) | PCI_VENDOR_QLOGIC)
+
+/*
+ * static functions
+ */
+static int qla_alloc_parent_dma_tag(qla_host_t *ha);
+static void qla_free_parent_dma_tag(qla_host_t *ha);
+static int qla_alloc_xmt_bufs(qla_host_t *ha);
+static void qla_free_xmt_bufs(qla_host_t *ha);
+static int qla_alloc_rcv_bufs(qla_host_t *ha);
+static void qla_free_rcv_bufs(qla_host_t *ha);
+
+static void qla_init_ifnet(device_t dev, qla_host_t *ha);
+static int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS);
+static void qla_release(qla_host_t *ha);
+static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
+ int error);
+static void qla_stop(qla_host_t *ha);
+static int qla_send(qla_host_t *ha, struct mbuf **m_headp);
+static void qla_tx_done(void *context, int pending);
+
+/*
+ * Hooks to the Operating Systems
+ */
+static int qla_pci_probe (device_t);
+static int qla_pci_attach (device_t);
+static int qla_pci_detach (device_t);
+
+static void qla_init(void *arg);
+static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
+static int qla_media_change(struct ifnet *ifp);
+static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
+
+static device_method_t qla_pci_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, qla_pci_probe),
+ DEVMETHOD(device_attach, qla_pci_attach),
+ DEVMETHOD(device_detach, qla_pci_detach),
+ { 0, 0 }
+};
+
+static driver_t qla_pci_driver = {
+ "ql", qla_pci_methods, sizeof (qla_host_t),
+};
+
+static devclass_t qla80xx_devclass;
+
+DRIVER_MODULE(qla80xx, pci, qla_pci_driver, qla80xx_devclass, 0, 0);
+
+MODULE_DEPEND(qla80xx, pci, 1, 1, 1);
+MODULE_DEPEND(qla80xx, ether, 1, 1, 1);
+
+MALLOC_DEFINE(M_QLA8XXXBUF, "qla80xxbuf", "Buffers for qla80xx driver");
+
+uint32_t std_replenish = 8;
+uint32_t jumbo_replenish = 2;
+uint32_t rcv_pkt_thres = 128;
+uint32_t rcv_pkt_thres_d = 32;
+uint32_t snd_pkt_thres = 16;
+uint32_t free_pkt_thres = (NUM_TX_DESCRIPTORS / 2);
+
+static char dev_str[64];
+
+/*
+ * Name: qla_pci_probe
+ * Function: Validate the PCI device to be a QLA80XX device
+ */
+static int
+qla_pci_probe(device_t dev)
+{
+ switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
+ case PCI_QLOGIC_ISP8020:
+ snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
+ "Qlogic ISP 80xx PCI CNA Adapter-Ethernet Function",
+ QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
+ QLA_VERSION_BUILD);
+ device_set_desc(dev, dev_str);
+ break;
+ default:
+ return (ENXIO);
+ }
+
+ if (bootverbose)
+ printf("%s: %s\n ", __func__, dev_str);
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static void
+qla_add_sysctls(qla_host_t *ha)
+{
+ device_t dev = ha->pci_dev;
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RD,
+ (void *)ha, 0,
+ qla_sysctl_get_stats, "I", "Statistics");
+
+ dbg_level = 0;
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "debug", CTLFLAG_RW,
+ &dbg_level, dbg_level, "Debug Level");
+
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "std_replenish", CTLFLAG_RW,
+ &std_replenish, std_replenish,
+ "Threshold for Replenishing Standard Frames");
+
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "jumbo_replenish", CTLFLAG_RW,
+ &jumbo_replenish, jumbo_replenish,
+ "Threshold for Replenishing Jumbo Frames");
+
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "rcv_pkt_thres", CTLFLAG_RW,
+ &rcv_pkt_thres, rcv_pkt_thres,
+ "Threshold for # of rcv pkts to trigger indication isr");
+
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "rcv_pkt_thres_d", CTLFLAG_RW,
+ &rcv_pkt_thres_d, rcv_pkt_thres_d,
+ "Threshold for # of rcv pkts to trigger indication defered");
+
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "snd_pkt_thres", CTLFLAG_RW,
+ &snd_pkt_thres, snd_pkt_thres,
+ "Threshold for # of snd packets");
+
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "free_pkt_thres", CTLFLAG_RW,
+ &free_pkt_thres, free_pkt_thres,
+ "Threshold for # of packets to free at a time");
+
+ return;
+}
+
+static void
+qla_watchdog(void *arg)
+{
+ qla_host_t *ha = arg;
+ qla_hw_t *hw;
+ struct ifnet *ifp;
+
+ hw = &ha->hw;
+ ifp = ha->ifp;
+
+ if (ha->flags.qla_watchdog_exit)
+ return;
+
+ if (!ha->flags.qla_watchdog_pause) {
+ if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) {
+ taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+ } else if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
+ taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
+ }
+ }
+ ha->watchdog_ticks = ha->watchdog_ticks++ % 1000;
+ callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
+ qla_watchdog, ha);
+}
+
+/*
+ * Name: qla_pci_attach
+ * Function: attaches the device to the operating system
+ */
+static int
+qla_pci_attach(device_t dev)
+{
+ qla_host_t *ha = NULL;
+ uint32_t rsrc_len, i;
+
+ QL_DPRINT2((dev, "%s: enter\n", __func__));
+
+ if ((ha = device_get_softc(dev)) == NULL) {
+ device_printf(dev, "cannot get softc\n");
+ return (ENOMEM);
+ }
+
+ memset(ha, 0, sizeof (qla_host_t));
+
+ if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8020) {
+ device_printf(dev, "device is not ISP8020\n");
+ return (ENXIO);
+ }
+
+ ha->pci_func = pci_get_function(dev);
+
+ ha->pci_dev = dev;
+
+ pci_enable_busmaster(dev);
+
+ ha->reg_rid = PCIR_BAR(0);
+ ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
+ RF_ACTIVE);
+
+ if (ha->pci_reg == NULL) {
+ device_printf(dev, "unable to map any ports\n");
+ goto qla_pci_attach_err;
+ }
+
+ rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
+ ha->reg_rid);
+
+ mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
+ mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
+ mtx_init(&ha->rx_lock, "qla80xx_rx_lock", MTX_NETWORK_LOCK, MTX_DEF);
+ mtx_init(&ha->rxj_lock, "qla80xx_rxj_lock", MTX_NETWORK_LOCK, MTX_DEF);
+ ha->flags.lock_init = 1;
+
+ ha->msix_count = pci_msix_count(dev);
+
+ if (ha->msix_count < qla_get_msix_count(ha)) {
+ device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
+ ha->msix_count);
+ goto qla_pci_attach_err;
+ }
+
+ QL_DPRINT2((dev, "%s: ha %p irq %p pci_func 0x%x rsrc_count 0x%08x"
+ " msix_count 0x%x pci_reg %p\n", __func__, ha,
+ ha->irq, ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg));
+
+ ha->msix_count = qla_get_msix_count(ha);
+
+ if (pci_alloc_msix(dev, &ha->msix_count)) {
+ device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
+ ha->msix_count);
+ ha->msix_count = 0;
+ goto qla_pci_attach_err;
+ }
+
+ TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha);
+ ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
+ taskqueue_thread_enqueue, &ha->tx_tq);
+ taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
+ device_get_nameunit(ha->pci_dev));
+
+ for (i = 0; i < ha->msix_count; i++) {
+ ha->irq_vec[i].irq_rid = i+1;
+ ha->irq_vec[i].ha = ha;
+
+ ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
+ &ha->irq_vec[i].irq_rid,
+ (RF_ACTIVE | RF_SHAREABLE));
+
+ if (ha->irq_vec[i].irq == NULL) {
+ device_printf(dev, "could not allocate interrupt\n");
+ goto qla_pci_attach_err;
+ }
+
+ if (bus_setup_intr(dev, ha->irq_vec[i].irq,
+ (INTR_TYPE_NET | INTR_MPSAFE),
+ NULL, qla_isr, &ha->irq_vec[i],
+ &ha->irq_vec[i].handle)) {
+ device_printf(dev, "could not setup interrupt\n");
+ goto qla_pci_attach_err;
+ }
+
+ TASK_INIT(&ha->irq_vec[i].rcv_task, 0, qla_rcv,\
+ &ha->irq_vec[i]);
+
+ ha->irq_vec[i].rcv_tq = taskqueue_create_fast("qla_rcvq",
+ M_NOWAIT, taskqueue_thread_enqueue,
+ &ha->irq_vec[i].rcv_tq);
+
+ taskqueue_start_threads(&ha->irq_vec[i].rcv_tq, 1, PI_NET,
+ "%s rcvq",
+ device_get_nameunit(ha->pci_dev));
+ }
+
+ qla_add_sysctls(ha);
+
+ /* add hardware specific sysctls */
+ qla_hw_add_sysctls(ha);
+
+ /* initialize hardware */
+ if (qla_init_hw(ha)) {
+ device_printf(dev, "%s: qla_init_hw failed\n", __func__);
+ goto qla_pci_attach_err;
+ }
+
+ device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
+ ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
+ ha->fw_ver_build);
+
+ //qla_get_hw_caps(ha);
+ qla_read_mac_addr(ha);
+
+ /* allocate parent dma tag */
+ if (qla_alloc_parent_dma_tag(ha)) {
+ device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
+ __func__);
+ goto qla_pci_attach_err;
+ }
+
+ /* alloc all dma buffers */
+ if (qla_alloc_dma(ha)) {
+ device_printf(dev, "%s: qla_alloc_dma failed\n", __func__);
+ goto qla_pci_attach_err;
+ }
+
+ /* create the o.s ethernet interface */
+ qla_init_ifnet(dev, ha);
+
+ ha->flags.qla_watchdog_active = 1;
+ ha->flags.qla_watchdog_pause = 1;
+
+ callout_init(&ha->tx_callout, TRUE);
+
+ /* create ioctl device interface */
+ if (qla_make_cdev(ha)) {
+ device_printf(dev, "%s: qla_make_cdev failed\n", __func__);
+ goto qla_pci_attach_err;
+ }
+
+ callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
+ qla_watchdog, ha);
+
+ QL_DPRINT2((dev, "%s: exit 0\n", __func__));
+ return (0);
+
+qla_pci_attach_err:
+
+ qla_release(ha);
+
+ QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__));
+ return (ENXIO);
+}
+
+/*
+ * Name: qla_pci_detach
+ * Function: Unhooks the device from the operating system
+ */
+static int
+qla_pci_detach(device_t dev)
+{
+ qla_host_t *ha = NULL;
+ struct ifnet *ifp;
+ int i;
+
+ QL_DPRINT2((dev, "%s: enter\n", __func__));
+
+ if ((ha = device_get_softc(dev)) == NULL) {
+ device_printf(dev, "cannot get softc\n");
+ return (ENOMEM);
+ }
+
+ ifp = ha->ifp;
+
+ QLA_LOCK(ha, __func__);
+ qla_stop(ha);
+ QLA_UNLOCK(ha, __func__);
+
+ if (ha->tx_tq) {
+ taskqueue_drain(ha->tx_tq, &ha->tx_task);
+ taskqueue_free(ha->tx_tq);
+ }
+
+ for (i = 0; i < ha->msix_count; i++) {
+ taskqueue_drain(ha->irq_vec[i].rcv_tq,
+ &ha->irq_vec[i].rcv_task);
+ taskqueue_free(ha->irq_vec[i].rcv_tq);
+ }
+
+ qla_release(ha);
+
+ QL_DPRINT2((dev, "%s: exit\n", __func__));
+
+ return (0);
+}
+
+/*
+ * SYSCTL Related Callbacks
+ */
+static int
+qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS)
+{
+ int err, ret = 0;
+ qla_host_t *ha;
+
+ err = sysctl_handle_int(oidp, &ret, 0, req);
+
+ if (err)
+ return (err);
+
+ ha = (qla_host_t *)arg1;
+ //qla_get_stats(ha);
+ QL_DPRINT2((ha->pci_dev, "%s: called ret %d\n", __func__, ret));
+ return (err);
+}
+
+
+/*
+ * Name: qla_release
+ * Function: Releases the resources allocated for the device
+ */
+static void
+qla_release(qla_host_t *ha)
+{
+ device_t dev;
+ int i;
+
+ dev = ha->pci_dev;
+
+ qla_del_cdev(ha);
+
+ if (ha->flags.qla_watchdog_active)
+ ha->flags.qla_watchdog_exit = 1;
+
+ callout_stop(&ha->tx_callout);
+ qla_mdelay(__func__, 100);
+
+ if (ha->ifp != NULL)
+ ether_ifdetach(ha->ifp);
+
+ qla_free_dma(ha);
+ qla_free_parent_dma_tag(ha);
+
+ for (i = 0; i < ha->msix_count; i++) {
+ if (ha->irq_vec[i].handle)
+ (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
+ ha->irq_vec[i].handle);
+ if (ha->irq_vec[i].irq)
+ (void) bus_release_resource(dev, SYS_RES_IRQ,
+ ha->irq_vec[i].irq_rid,
+ ha->irq_vec[i].irq);
+ }
+ if (ha->msix_count)
+ pci_release_msi(dev);
+
+ if (ha->flags.lock_init) {
+ mtx_destroy(&ha->tx_lock);
+ mtx_destroy(&ha->rx_lock);
+ mtx_destroy(&ha->rxj_lock);
+ mtx_destroy(&ha->hw_lock);
+ }
+
+ if (ha->pci_reg)
+ (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
+ ha->pci_reg);
+}
+
+/*
+ * DMA Related Functions
+ */
+
+static void
+qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ *((bus_addr_t *)arg) = 0;
+
+ if (error) {
+ printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
+ return;
+ }
+
+ QL_ASSERT((nsegs == 1), ("%s: %d segments returned!", __func__, nsegs));
+
+ *((bus_addr_t *)arg) = segs[0].ds_addr;
+
+ return;
+}
+
+int
+qla_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
+{
+ int ret = 0;
+ device_t dev;
+ bus_addr_t b_addr;
+
+ dev = ha->pci_dev;
+
+ QL_DPRINT2((dev, "%s: enter\n", __func__));
+
+ ret = bus_dma_tag_create(
+ ha->parent_tag,/* parent */
+ dma_buf->alignment,
+ ((bus_size_t)(1ULL << 32)),/* boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ dma_buf->size, /* maxsize */
+ 1, /* nsegments */
+ dma_buf->size, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &dma_buf->dma_tag);
+
+ if (ret) {
+ device_printf(dev, "%s: could not create dma tag\n", __func__);
+ goto qla_alloc_dmabuf_exit;
+ }
+ ret = bus_dmamem_alloc(dma_buf->dma_tag,
+ (void **)&dma_buf->dma_b,
+ (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
+ &dma_buf->dma_map);
+ if (ret) {
+ bus_dma_tag_destroy(dma_buf->dma_tag);
+ device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
+ goto qla_alloc_dmabuf_exit;
+ }
+
+ ret = bus_dmamap_load(dma_buf->dma_tag,
+ dma_buf->dma_map,
+ dma_buf->dma_b,
+ dma_buf->size,
+ qla_dmamap_callback,
+ &b_addr, BUS_DMA_NOWAIT);
+
+ if (ret || !b_addr) {
+ bus_dma_tag_destroy(dma_buf->dma_tag);
+ bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
+ dma_buf->dma_map);
+ ret = -1;
+ goto qla_alloc_dmabuf_exit;
+ }
+
+ dma_buf->dma_addr = b_addr;
+
+qla_alloc_dmabuf_exit:
+ QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
+ __func__, ret, (void *)dma_buf->dma_tag,
+ (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
+ dma_buf->size));
+
+ return ret;
+}
+
+void
+qla_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
+{
+ bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
+ bus_dma_tag_destroy(dma_buf->dma_tag);
+}
+
+static int
+qla_alloc_parent_dma_tag(qla_host_t *ha)
+{
+ int ret;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ /*
+ * Allocate parent DMA Tag
+ */
+ ret = bus_dma_tag_create(
+ bus_get_dma_tag(dev), /* parent */
+ 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
+ 0, /* nsegments */
+ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &ha->parent_tag);
+
+ if (ret) {
+ device_printf(dev, "%s: could not create parent dma tag\n",
+ __func__);
+ return (-1);
+ }
+
+ ha->flags.parent_tag = 1;
+
+ return (0);
+}
+
+static void
+qla_free_parent_dma_tag(qla_host_t *ha)
+{
+ if (ha->flags.parent_tag) {
+ bus_dma_tag_destroy(ha->parent_tag);
+ ha->flags.parent_tag = 0;
+ }
+}
+
+/*
+ * Name: qla_init_ifnet
+ * Function: Creates the Network Device Interface and Registers it with the O.S
+ */
+
+static void
+qla_init_ifnet(device_t dev, qla_host_t *ha)
+{
+ struct ifnet *ifp;
+
+ QL_DPRINT2((dev, "%s: enter\n", __func__));
+
+ ifp = ha->ifp = if_alloc(IFT_ETHER);
+
+ if (ifp == NULL)
+ panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
+
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_baudrate = (1 * 1000 * 1000 *1000);
+ ifp->if_init = qla_init;
+ ifp->if_softc = ha;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = qla_ioctl;
+ ifp->if_start = qla_start;
+
+ IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
+ ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
+ IFQ_SET_READY(&ifp->if_snd);
+
+ ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ ether_ifattach(ifp, qla_get_mac_addr(ha));
+
+ ifp->if_capabilities = IFCAP_HWCSUM |
+ IFCAP_TSO4 |
+ IFCAP_TSO6 |
+ IFCAP_JUMBO_MTU;
+
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
+
+#if defined(__FreeBSD_version) && (__FreeBSD_version < 900002)
+ ifp->if_timer = 0;
+ ifp->if_watchdog = NULL;
+#endif /* #if defined(__FreeBSD_version) && (__FreeBSD_version < 900002) */
+
+ ifp->if_capenable = ifp->if_capabilities;
+
+ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+
+ ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
+
+ ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
+ NULL);
+ ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
+
+ ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
+
+ QL_DPRINT2((dev, "%s: exit\n", __func__));
+
+ return;
+}
+
+static void
+qla_init_locked(qla_host_t *ha)
+{
+ struct ifnet *ifp = ha->ifp;
+
+ qla_stop(ha);
+
+ if (qla_alloc_xmt_bufs(ha) != 0)
+ return;
+
+ if (qla_alloc_rcv_bufs(ha) != 0)
+ return;
+
+ if (qla_config_lro(ha))
+ return;
+
+ bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
+
+ ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
+
+ ha->flags.stop_rcv = 0;
+ if (qla_init_hw_if(ha) == 0) {
+ ifp = ha->ifp;
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ ha->flags.qla_watchdog_pause = 0;
+ }
+
+ return;
+}
+
+static void
+qla_init(void *arg)
+{
+ qla_host_t *ha;
+
+ ha = (qla_host_t *)arg;
+
+ QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
+
+ QLA_LOCK(ha, __func__);
+ qla_init_locked(ha);
+ QLA_UNLOCK(ha, __func__);
+
+ QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
+}
+
+static void
+qla_set_multi(qla_host_t *ha, uint32_t add_multi)
+{
+ uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
+ struct ifmultiaddr *ifma;
+ int mcnt = 0;
+ struct ifnet *ifp = ha->ifp;
+
+ IF_ADDR_LOCK(ifp);
+
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+
+ if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
+ break;
+
+ bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
+ &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
+
+ mcnt++;
+ }
+
+ IF_ADDR_UNLOCK(ifp);
+
+ qla_hw_set_multi(ha, mta, mcnt, add_multi);
+
+ return;
+}
+
+static int
+qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ int ret = 0;
+ struct ifreq *ifr = (struct ifreq *)data;
+ struct ifaddr *ifa = (struct ifaddr *)data;
+ qla_host_t *ha;
+
+ ha = (qla_host_t *)ifp->if_softc;
+
+ switch (cmd) {
+ case SIOCSIFADDR:
+ QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
+ __func__, cmd));
+
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ ifp->if_flags |= IFF_UP;
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ QLA_LOCK(ha, __func__);
+ qla_init_locked(ha);
+ QLA_UNLOCK(ha, __func__);
+ }
+ QL_DPRINT4((ha->pci_dev,
+ "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
+ __func__, cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
+
+ arp_ifinit(ifp, ifa);
+ if (ntohl(IA_SIN(ifa)->sin_addr.s_addr) != INADDR_ANY) {
+ qla_config_ipv4_addr(ha,
+ (IA_SIN(ifa)->sin_addr.s_addr));
+ }
+ } else {
+ ether_ioctl(ifp, cmd, data);
+ }
+ break;
+
+ case SIOCSIFMTU:
+ QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
+ __func__, cmd));
+
+ if (ifr->ifr_mtu > QLA_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
+ ret = EINVAL;
+ } else {
+ QLA_LOCK(ha, __func__);
+ ifp->if_mtu = ifr->ifr_mtu;
+ ha->max_frame_size =
+ ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ ret = qla_set_max_mtu(ha, ha->max_frame_size,
+ (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id);
+ }
+ QLA_UNLOCK(ha, __func__);
+
+ if (ret)
+ ret = EINVAL;
+ }
+
+ break;
+
+ case SIOCSIFFLAGS:
+ QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
+ __func__, cmd));
+
+ if (ifp->if_flags & IFF_UP) {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if ((ifp->if_flags ^ ha->if_flags) &
+ IFF_PROMISC) {
+ qla_set_promisc(ha);
+ } else if ((ifp->if_flags ^ ha->if_flags) &
+ IFF_ALLMULTI) {
+ qla_set_allmulti(ha);
+ }
+ } else {
+ QLA_LOCK(ha, __func__);
+ qla_init_locked(ha);
+ ha->max_frame_size = ifp->if_mtu +
+ ETHER_HDR_LEN + ETHER_CRC_LEN;
+ ret = qla_set_max_mtu(ha, ha->max_frame_size,
+ (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id);
+ QLA_UNLOCK(ha, __func__);
+ }
+ } else {
+ QLA_LOCK(ha, __func__);
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ qla_stop(ha);
+ ha->if_flags = ifp->if_flags;
+ QLA_UNLOCK(ha, __func__);
+ }
+ break;
+
+ case SIOCADDMULTI:
+ QL_DPRINT4((ha->pci_dev,
+ "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ qla_set_multi(ha, 1);
+ }
+ break;
+
+ case SIOCDELMULTI:
+ QL_DPRINT4((ha->pci_dev,
+ "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ qla_set_multi(ha, 0);
+ }
+ break;
+
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ QL_DPRINT4((ha->pci_dev,
+ "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
+ __func__, cmd));
+ ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
+ break;
+
+ case SIOCSIFCAP:
+ {
+ int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+
+ QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
+ __func__, cmd));
+
+ if (mask & IFCAP_HWCSUM)
+ ifp->if_capenable ^= IFCAP_HWCSUM;
+ if (mask & IFCAP_TSO4)
+ ifp->if_capenable ^= IFCAP_TSO4;
+ if (mask & IFCAP_TSO6)
+ ifp->if_capenable ^= IFCAP_TSO6;
+ if (mask & IFCAP_VLAN_HWTAGGING)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+ qla_init(ha);
+
+ VLAN_CAPABILITIES(ifp);
+ break;
+ }
+
+ default:
+ QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n",
+ __func__, cmd));
+ ret = ether_ioctl(ifp, cmd, data);
+ break;
+ }
+
+ return (ret);
+}
+
+static int
+qla_media_change(struct ifnet *ifp)
+{
+ qla_host_t *ha;
+ struct ifmedia *ifm;
+ int ret = 0;
+
+ ha = (qla_host_t *)ifp->if_softc;
+
+ QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
+
+ ifm = &ha->media;
+
+ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+ ret = EINVAL;
+
+ QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
+
+ return (ret);
+}
+
+static void
+qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ qla_host_t *ha;
+
+ ha = (qla_host_t *)ifp->if_softc;
+
+ QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ qla_update_link_state(ha);
+ if (ha->hw.flags.link_up) {
+ ifmr->ifm_status |= IFM_ACTIVE;
+ ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
+ }
+
+ QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\
+ (ha->hw.flags.link_up ? "link_up" : "link_down")));
+
+ return;
+}
+
+void
+qla_start(struct ifnet *ifp)
+{
+ struct mbuf *m_head;
+ qla_host_t *ha = (qla_host_t *)ifp->if_softc;
+
+ QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
+
+ if (!mtx_trylock(&ha->tx_lock)) {
+ QL_DPRINT8((ha->pci_dev,
+ "%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
+ return;
+ }
+
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING) {
+ QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
+ QLA_TX_UNLOCK(ha);
+ return;
+ }
+
+ if (!ha->watchdog_ticks)
+ qla_update_link_state(ha);
+
+ if (!ha->hw.flags.link_up) {
+ QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__));
+ QLA_TX_UNLOCK(ha);
+ return;
+ }
+
+ while (ifp->if_snd.ifq_head != NULL) {
+ IF_DEQUEUE(&ifp->if_snd, m_head);
+
+ if (m_head == NULL) {
+ QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n",
+ __func__));
+ break;
+ }
+
+ if (qla_send(ha, &m_head)) {
+ if (m_head == NULL)
+ break;
+ QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__));
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ IF_PREPEND(&ifp->if_snd, m_head);
+ break;
+ }
+ /* Send a copy of the frame to the BPF listener */
+ ETHER_BPF_MTAP(ifp, m_head);
+ }
+ QLA_TX_UNLOCK(ha);
+ QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
+ return;
+}
+
+static int
+qla_send(qla_host_t *ha, struct mbuf **m_headp)
+{
+ bus_dma_segment_t segs[QLA_MAX_SEGMENTS];
+ bus_dmamap_t map;
+ int nsegs;
+ int ret = -1;
+ uint32_t tx_idx;
+ struct mbuf *m_head = *m_headp;
+
+ QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
+
+ if ((ret = bus_dmamap_create(ha->tx_tag, BUS_DMA_NOWAIT, &map))) {
+ ha->err_tx_dmamap_create++;
+ device_printf(ha->pci_dev,
+ "%s: bus_dmamap_create failed[%d, %d]\n",
+ __func__, ret, m_head->m_pkthdr.len);
+ return (ret);
+ }
+
+ ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
+ BUS_DMA_NOWAIT);
+
+ if ((ret == EFBIG) ||
+ ((nsegs > Q8_TX_MAX_SEGMENTS) &&
+ (((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0) ||
+ (m_head->m_pkthdr.len <= ha->max_frame_size)))) {
+
+ struct mbuf *m;
+
+ QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
+ m_head->m_pkthdr.len));
+
+ m = m_defrag(m_head, M_DONTWAIT);
+ if (m == NULL) {
+ ha->err_tx_defrag++;
+ m_freem(m_head);
+ *m_headp = NULL;
+ device_printf(ha->pci_dev,
+ "%s: m_defrag() = NULL [%d]\n",
+ __func__, ret);
+ return (ENOBUFS);
+ }
+ m_head = m;
+
+ if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
+ segs, &nsegs, BUS_DMA_NOWAIT))) {
+
+ ha->err_tx_dmamap_load++;
+
+ device_printf(ha->pci_dev,
+ "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
+ __func__, ret, m_head->m_pkthdr.len);
+
+ bus_dmamap_destroy(ha->tx_tag, map);
+ if (ret != ENOMEM) {
+ m_freem(m_head);
+ *m_headp = NULL;
+ }
+ return (ret);
+ }
+ } else if (ret) {
+ ha->err_tx_dmamap_load++;
+
+ device_printf(ha->pci_dev,
+ "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
+ __func__, ret, m_head->m_pkthdr.len);
+
+ bus_dmamap_destroy(ha->tx_tag, map);
+
+ if (ret != ENOMEM) {
+ m_freem(m_head);
+ *m_headp = NULL;
+ }
+ return (ret);
+ }
+
+ QL_ASSERT((nsegs != 0), ("qla_send: empty packet"));
+
+ bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
+
+ if (!(ret = qla_hw_send(ha, segs, nsegs, &tx_idx, m_head))) {
+ ha->tx_buf[tx_idx].m_head = m_head;
+ ha->tx_buf[tx_idx].map = map;
+ } else {
+ if (ret == EINVAL) {
+ m_freem(m_head);
+ *m_headp = NULL;
+ }
+ }
+
+ QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
+ return (ret);
+}
+
+static void
+qla_stop(qla_host_t *ha)
+{
+ struct ifnet *ifp = ha->ifp;
+ device_t dev;
+
+ dev = ha->pci_dev;
+
+ ha->flags.qla_watchdog_pause = 1;
+ qla_mdelay(__func__, 100);
+
+ ha->flags.stop_rcv = 1;
+ qla_hw_stop_rcv(ha);
+
+ qla_del_hw_if(ha);
+
+ qla_free_lro(ha);
+
+ qla_free_xmt_bufs(ha);
+ qla_free_rcv_bufs(ha);
+
+ ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
+
+ return;
+}
+
+/*
+ * Buffer Management Functions for Transmit and Receive Rings
+ */
+static int
+qla_alloc_xmt_bufs(qla_host_t *ha)
+{
+ if (bus_dma_tag_create(NULL, /* parent */
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ QLA_MAX_TSO_FRAME_SIZE, /* maxsize */
+ QLA_MAX_SEGMENTS, /* nsegments */
+ PAGE_SIZE, /* maxsegsize */
+ BUS_DMA_ALLOCNOW, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &ha->tx_tag)) {
+ device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
+ __func__);
+ return (ENOMEM);
+ }
+ bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
+
+ return 0;
+}
+
+/*
+ * Release mbuf after it sent on the wire
+ */
+static void
+qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
+{
+ QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
+
+ if (txb->m_head) {
+
+ bus_dmamap_unload(ha->tx_tag, txb->map);
+ bus_dmamap_destroy(ha->tx_tag, txb->map);
+
+ m_freem(txb->m_head);
+ txb->m_head = NULL;
+ }
+
+ QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
+}
+
+static void
+qla_free_xmt_bufs(qla_host_t *ha)
+{
+ int i;
+
+ for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
+ qla_clear_tx_buf(ha, &ha->tx_buf[i]);
+
+ if (ha->tx_tag != NULL) {
+ bus_dma_tag_destroy(ha->tx_tag);
+ ha->tx_tag = NULL;
+ }
+ bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
+
+ return;
+}
+
+
+static int
+qla_alloc_rcv_bufs(qla_host_t *ha)
+{
+ int i, j, ret = 0;
+ qla_rx_buf_t *rxb;
+
+ if (bus_dma_tag_create(NULL, /* parent */
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MJUM9BYTES, /* maxsize */
+ 1, /* nsegments */
+ MJUM9BYTES, /* maxsegsize */
+ BUS_DMA_ALLOCNOW, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &ha->rx_tag)) {
+
+ device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
+ __func__);
+
+ return (ENOMEM);
+ }
+
+ bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
+ bzero((void *)ha->rx_jbuf,
+ (sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS));
+
+ for (i = 0; i < MAX_SDS_RINGS; i++) {
+ ha->hw.sds[i].sdsr_next = 0;
+ ha->hw.sds[i].rxb_free = NULL;
+ ha->hw.sds[i].rx_free = 0;
+ ha->hw.sds[i].rxjb_free = NULL;
+ ha->hw.sds[i].rxj_free = 0;
+ }
+
+ for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
+
+ rxb = &ha->rx_buf[i];
+
+ ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
+
+ if (ret) {
+ device_printf(ha->pci_dev,
+ "%s: dmamap[%d] failed\n", __func__, i);
+
+ for (j = 0; j < i; j++) {
+ bus_dmamap_destroy(ha->rx_tag,
+ ha->rx_buf[j].map);
+ }
+ goto qla_alloc_rcv_bufs_failed;
+ }
+ }
+
+ qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_NORMAL);
+
+ for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
+ rxb = &ha->rx_buf[i];
+ rxb->handle = i;
+ if (!(ret = qla_get_mbuf(ha, rxb, NULL, 0))) {
+ /*
+ * set the physical address in the corresponding
+ * descriptor entry in the receive ring/queue for the
+ * hba
+ */
+ qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL, i,
+ rxb->handle, rxb->paddr,
+ (rxb->m_head)->m_pkthdr.len);
+ } else {
+ device_printf(ha->pci_dev,
+ "%s: qla_get_mbuf [standard(%d)] failed\n",
+ __func__, i);
+ bus_dmamap_destroy(ha->rx_tag, rxb->map);
+ goto qla_alloc_rcv_bufs_failed;
+ }
+ }
+
+
+ for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
+
+ rxb = &ha->rx_jbuf[i];
+
+ ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
+
+ if (ret) {
+ device_printf(ha->pci_dev,
+ "%s: dmamap[%d] failed\n", __func__, i);
+
+ for (j = 0; j < i; j++) {
+ bus_dmamap_destroy(ha->rx_tag,
+ ha->rx_jbuf[j].map);
+ }
+ goto qla_alloc_rcv_bufs_failed;
+ }
+ }
+
+ qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_JUMBO);
+
+ for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
+ rxb = &ha->rx_jbuf[i];
+ rxb->handle = i;
+ if (!(ret = qla_get_mbuf(ha, rxb, NULL, 1))) {
+ /*
+ * set the physical address in the corresponding
+ * descriptor entry in the receive ring/queue for the
+ * hba
+ */
+ qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO, i,
+ rxb->handle, rxb->paddr,
+ (rxb->m_head)->m_pkthdr.len);
+ } else {
+ device_printf(ha->pci_dev,
+ "%s: qla_get_mbuf [jumbo(%d)] failed\n",
+ __func__, i);
+ bus_dmamap_destroy(ha->rx_tag, rxb->map);
+ goto qla_alloc_rcv_bufs_failed;
+ }
+ }
+
+ return (0);
+
+qla_alloc_rcv_bufs_failed:
+ qla_free_rcv_bufs(ha);
+ return (ret);
+}
+
+static void
+qla_free_rcv_bufs(qla_host_t *ha)
+{
+ int i;
+ qla_rx_buf_t *rxb;
+
+ for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
+ rxb = &ha->rx_buf[i];
+ if (rxb->m_head != NULL) {
+ bus_dmamap_unload(ha->rx_tag, rxb->map);
+ bus_dmamap_destroy(ha->rx_tag, rxb->map);
+ m_freem(rxb->m_head);
+ rxb->m_head = NULL;
+ }
+ }
+
+ for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
+ rxb = &ha->rx_jbuf[i];
+ if (rxb->m_head != NULL) {
+ bus_dmamap_unload(ha->rx_tag, rxb->map);
+ bus_dmamap_destroy(ha->rx_tag, rxb->map);
+ m_freem(rxb->m_head);
+ rxb->m_head = NULL;
+ }
+ }
+
+ if (ha->rx_tag != NULL) {
+ bus_dma_tag_destroy(ha->rx_tag);
+ ha->rx_tag = NULL;
+ }
+
+ bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
+ bzero((void *)ha->rx_jbuf,
+ (sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS));
+
+ for (i = 0; i < MAX_SDS_RINGS; i++) {
+ ha->hw.sds[i].sdsr_next = 0;
+ ha->hw.sds[i].rxb_free = NULL;
+ ha->hw.sds[i].rx_free = 0;
+ ha->hw.sds[i].rxjb_free = NULL;
+ ha->hw.sds[i].rxj_free = 0;
+ }
+
+ return;
+}
+
+int
+qla_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp,
+ uint32_t jumbo)
+{
+ register struct mbuf *mp = nmp;
+ struct ifnet *ifp;
+ int ret = 0;
+ uint32_t offset;
+
+ QL_DPRINT2((ha->pci_dev, "%s: jumbo(0x%x) enter\n", __func__, jumbo));
+
+ ifp = ha->ifp;
+
+ if (mp == NULL) {
+
+ if (!jumbo) {
+ mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+
+ if (mp == NULL) {
+ ha->err_m_getcl++;
+ ret = ENOBUFS;
+ device_printf(ha->pci_dev,
+ "%s: m_getcl failed\n", __func__);
+ goto exit_qla_get_mbuf;
+ }
+ mp->m_len = mp->m_pkthdr.len = MCLBYTES;
+ } else {
+ mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
+ MJUM9BYTES);
+ if (mp == NULL) {
+ ha->err_m_getjcl++;
+ ret = ENOBUFS;
+ device_printf(ha->pci_dev,
+ "%s: m_getjcl failed\n", __func__);
+ goto exit_qla_get_mbuf;
+ }
+ mp->m_len = mp->m_pkthdr.len = MJUM9BYTES;
+ }
+ } else {
+ if (!jumbo)
+ mp->m_len = mp->m_pkthdr.len = MCLBYTES;
+ else
+ mp->m_len = mp->m_pkthdr.len = MJUM9BYTES;
+
+ mp->m_data = mp->m_ext.ext_buf;
+ mp->m_next = NULL;
+ }
+
+
+ offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
+ if (offset) {
+ offset = 8 - offset;
+ m_adj(mp, offset);
+ }
+
+ /*
+ * Using memory from the mbuf cluster pool, invoke the bus_dma
+ * machinery to arrange the memory mapping.
+ */
+ ret = bus_dmamap_load(ha->rx_tag, rxb->map,
+ mtod(mp, void *), mp->m_len,
+ qla_dmamap_callback, &rxb->paddr,
+ BUS_DMA_NOWAIT);
+ if (ret || !rxb->paddr) {
+ m_free(mp);
+ rxb->m_head = NULL;
+ device_printf(ha->pci_dev,
+ "%s: bus_dmamap_load failed\n", __func__);
+ ret = -1;
+ goto exit_qla_get_mbuf;
+ }
+ rxb->m_head = mp;
+ bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
+
+exit_qla_get_mbuf:
+ QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
+ return (ret);
+}
+
+static void
+qla_tx_done(void *context, int pending)
+{
+ qla_host_t *ha = context;
+
+ qla_hw_tx_done(ha);
+ qla_start(ha->ifp);
+}
+
diff --git a/sys/dev/qlxgb/qla_os.h b/sys/dev/qlxgb/qla_os.h
new file mode 100644
index 0000000..955be5d
--- /dev/null
+++ b/sys/dev/qlxgb/qla_os.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * File: qla_os.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#ifndef _QLA_OS_H_
+#define _QLA_OS_H_
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/kernel.h>
+#include <sys/sockio.h>
+#include <sys/types.h>
+#include <machine/atomic.h>
+#include <sys/conf.h>
+
+#if __FreeBSD_version < 700112
+#error FreeBSD Version not supported - use version >= 700112
+#endif
+
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/bpf.h>
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#include <netinet/udp.h>
+#include <netinet/in_var.h>
+#include <netinet/tcp_lro.h>
+
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+#include <sys/endian.h>
+#include <sys/taskqueue.h>
+#include <sys/pcpu.h>
+
+#include <sys/unistd.h>
+#include <sys/kthread.h>
+
+#define QLA_USEC_DELAY(usec) DELAY(usec)
+
+static __inline int qla_ms_to_hz(int ms)
+{
+ int qla_hz;
+
+ struct timeval t;
+
+ t.tv_sec = ms / 1000;
+ t.tv_usec = (ms % 1000) * 1000;
+
+ qla_hz = tvtohz(&t);
+
+ if (qla_hz < 0)
+ qla_hz = 0x7fffffff;
+ if (!qla_hz)
+ qla_hz = 1;
+
+ return (qla_hz);
+}
+
+static __inline int qla_sec_to_hz(int sec)
+{
+ struct timeval t;
+
+ t.tv_sec = sec;
+ t.tv_usec = 0;
+
+ return (tvtohz(&t));
+}
+
+
+#define qla_host_to_le16(x) htole16(x)
+#define qla_host_to_le32(x) htole32(x)
+#define qla_host_to_le64(x) htole64(x)
+#define qla_host_to_be16(x) htobe16(x)
+#define qla_host_to_be32(x) htobe32(x)
+#define qla_host_to_be64(x) htobe64(x)
+
+#define qla_le16_to_host(x) le16toh(x)
+#define qla_le32_to_host(x) le32toh(x)
+#define qla_le64_to_host(x) le64toh(x)
+#define qla_be16_to_host(x) be16toh(x)
+#define qla_be32_to_host(x) be32toh(x)
+#define qla_be64_to_host(x) be64toh(x)
+
+MALLOC_DECLARE(M_QLA8XXXBUF);
+
+#define qla_mdelay(fn, msecs) \
+ {\
+ if (cold) \
+ DELAY((msecs * 1000)); \
+ else \
+ pause(fn, qla_ms_to_hz(msecs)); \
+ }
+
+/*
+ * Locks
+ */
+#define QLA_LOCK(ha, str) qla_lock(ha, str);
+#define QLA_UNLOCK(ha, str) qla_unlock(ha, str)
+
+#define QLA_TX_LOCK(ha) mtx_lock(&ha->tx_lock);
+#define QLA_TX_UNLOCK(ha) mtx_unlock(&ha->tx_lock);
+
+#define QLA_RX_LOCK(ha) mtx_lock(&ha->rx_lock);
+#define QLA_RX_UNLOCK(ha) mtx_unlock(&ha->rx_lock);
+
+#define QLA_RXJ_LOCK(ha) mtx_lock(&ha->rxj_lock);
+#define QLA_RXJ_UNLOCK(ha) mtx_unlock(&ha->rxj_lock);
+
+/*
+ * structure encapsulating a DMA buffer
+ */
+struct qla_dma {
+ bus_size_t alignment;
+ uint32_t size;
+ void *dma_b;
+ bus_addr_t dma_addr;
+ bus_dmamap_t dma_map;
+ bus_dma_tag_t dma_tag;
+};
+typedef struct qla_dma qla_dma_t;
+
+#define QL_ASSERT(x, y) if (!x) panic y
+
+#endif /* #ifndef _QLA_OS_H_ */
diff --git a/sys/dev/qlxgb/qla_reg.h b/sys/dev/qlxgb/qla_reg.h
new file mode 100644
index 0000000..2f190f3
--- /dev/null
+++ b/sys/dev/qlxgb/qla_reg.h
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * File: qla_reg.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#ifndef _QLA_REG_H_
+#define _QLA_REG_H_
+
+/*
+ * Begin Definitions for QLA82xx Registers
+ */
+
+/*
+ * Register offsets for QLA8022
+ */
+
+/******************************
+ * PCIe Registers
+ ******************************/
+#define Q8_CRB_WINDOW_2M 0x130060
+
+#define Q8_INT_VECTOR 0x130100
+#define Q8_INT_MASK 0x130104
+
+#define Q8_INT_TARGET_STATUS_F0 0x130118
+#define Q8_INT_TARGET_MASK_F0 0x130128
+#define Q8_INT_TARGET_STATUS_F1 0x130160
+#define Q8_INT_TARGET_MASK_F1 0x130170
+#define Q8_INT_TARGET_STATUS_F2 0x130164
+#define Q8_INT_TARGET_MASK_F2 0x130174
+#define Q8_INT_TARGET_STATUS_F3 0x130168
+#define Q8_INT_TARGET_MASK_F3 0x130178
+#define Q8_INT_TARGET_STATUS_F4 0x130360
+#define Q8_INT_TARGET_MASK_F4 0x130370
+#define Q8_INT_TARGET_STATUS_F5 0x130364
+#define Q8_INT_TARGET_MASK_F5 0x130374
+#define Q8_INT_TARGET_STATUS_F6 0x130368
+#define Q8_INT_TARGET_MASK_F6 0x130378
+#define Q8_INT_TARGET_STATUS_F7 0x13036C
+#define Q8_INT_TARGET_MASK_F7 0x13037C
+
+#define Q8_SEM2_LOCK 0x13C010
+#define Q8_SEM2_UNLOCK 0x13C014
+#define Q8_SEM3_LOCK 0x13C018
+#define Q8_SEM3_UNLOCK 0x13C01C
+#define Q8_SEM5_LOCK 0x13C028
+#define Q8_SEM5_UNLOCK 0x13C02C
+#define Q8_SEM7_LOCK 0x13C038
+#define Q8_SEM7_UNLOCK 0x13C03C
+
+/* Valid bit for a SEM<N>_LOCK registers */
+#define SEM_LOCK_BIT 0x00000001
+
+
+#define Q8_ROM_LOCKID 0x1B2100
+
+/*******************************
+ * Firmware Interface Registers
+ *******************************/
+#define Q8_FW_VER_MAJOR 0x1B2150
+#define Q8_FW_VER_MINOR 0x1B2154
+#define Q8_FW_VER_SUB 0x1B2158
+#define Q8_FW_VER_BUILD 0x1B2168
+
+#define Q8_CMDPEG_STATE 0x1B2250
+#define Q8_RCVPEG_STATE 0x1B233C
+/*
+ * definitions for Q8_CMDPEG_STATE
+ */
+#define CMDPEG_PHAN_INIT_COMPLETE 0xFF01
+
+#define Q8_ROM_STATUS 0x1A0004
+/*
+ * definitions for Q8_ROM_STATUS
+ * bit definitions for Q8_UNM_ROMUSB_GLB_STATUS
+ * 31:3 Reserved; Rest as below
+ */
+#define ROM_STATUS_RDY 0x0004
+#define ROM_STATUS_DONE 0x0002
+#define ROM_STATUS_AUTO_ROM_SHDW 0x0001
+
+#define Q8_ASIC_RESET 0x1A0008
+/*
+ * definitions for Q8_ASIC_RESET
+ */
+#define ASIC_RESET_RST_XDMA 0x00800000 /* Reset XDMA */
+#define ASIC_RESET_PEG_ICACHE 0x00000020 /* Reset PEG_ICACHE */
+#define ASIC_RESET_PEG_DCACHE 0x00000010 /* Reset PEG_DCACHE */
+#define ASIC_RESET_PEG_3 0x00000008 /* Reset PEG_3 */
+#define ASIC_RESET_PEG_2 0x00000004 /* Reset PEG_2 */
+#define ASIC_RESET_PEG_1 0x00000002 /* Reset PEG_1 */
+#define ASIC_RESET_PEG_0 0x00000001 /* Reset PEG_0 */
+
+#define Q8_COLD_BOOT 0x1B21FC
+/*
+ * definitions for Q8_COLD_BOOT
+ */
+#define COLD_BOOT_VALUE 0x12345678
+
+
+#define Q8_MIU_TEST_AGT_CTRL 0x180090
+#define Q8_MIU_TEST_AGT_ADDR_LO 0x180094
+#define Q8_MIU_TEST_AGT_ADDR_HI 0x180098
+#define Q8_MIU_TEST_AGT_WRDATA_LO 0x1800A0
+#define Q8_MIU_TEST_AGT_WRDATA_HI 0x1800A4
+#define Q8_MIU_TEST_AGT_RDDATA_LO 0x1800A8
+#define Q8_MIU_TEST_AGT_RDDATA_HI 0x1800AC
+#define Q8_MIU_TEST_AGT_WRDATA_ULO 0x1800B0
+#define Q8_MIU_TEST_AGT_WRDATA_UHI 0x1800B4
+#define Q8_MIU_TEST_AGT_RDDATA_ULO 0x1800B8
+#define Q8_MIU_TEST_AGT_RDDATA_UHI 0x1800BC
+
+#define Q8_PEG_0_RESET 0x160018
+#define Q8_PEG_0_CLR1 0x160008
+#define Q8_PEG_0_CLR2 0x16000C
+#define Q8_PEG_1_CLR1 0x161008
+#define Q8_PEG_1_CLR2 0x16100C
+#define Q8_PEG_2_CLR1 0x162008
+#define Q8_PEG_2_CLR2 0x16200C
+#define Q8_PEG_3_CLR1 0x163008
+#define Q8_PEG_3_CLR2 0x16300C
+#define Q8_PEG_4_CLR1 0x164008
+#define Q8_PEG_4_CLR2 0x16400C
+#define Q8_PEG_D_RESET1 0x1650EC
+#define Q8_PEG_D_RESET2 0x16504C
+#define Q8_PEG_HALT_STATUS1 0x1B20A8
+#define Q8_PEG_HALT_STATUS2 0x1B20AC
+#define Q8_FIRMWARE_HEARTBEAT 0x1B20B0
+#define Q8_PEG_I_RESET 0x16604C
+
+#define Q8_CRB_MAC_BLOCK_START 0x1B21C0
+
+/***************************************************
+ * Flash ROM Access Registers ( Indirect Registers )
+ ***************************************************/
+
+#define Q8_ROM_INSTR_OPCODE 0x03310004
+/*
+ * bit definitions for Q8_ROM_INSTR_OPCODE
+ * 31:8 Reserved; Rest Below
+ */
+#define ROM_OPCODE_WR_STATUS_REG 0x01
+#define ROM_OPCODE_PROG_PAGE 0x02
+#define ROM_OPCODE_RD_BYTE 0x03
+#define ROM_OPCODE_WR_DISABLE 0x04
+#define ROM_OPCODE_RD_STATUS_REG 0x05
+#define ROM_OPCODE_WR_ENABLE 0x06
+#define ROM_OPCODE_FAST_RD 0x0B
+#define ROM_OPCODE_REL_DEEP_PWR_DWN 0xAB
+#define ROM_OPCODE_BULK_ERASE 0xC7
+#define ROM_OPCODE_DEEP_PWR_DWN 0xC9
+#define ROM_OPCODE_SECTOR_ERASE 0xD8
+
+#define Q8_ROM_ADDRESS 0x03310008
+/*
+ * bit definitions for Q8_ROM_ADDRESS
+ * 31:24 Reserved;
+ * 23:0 Physical ROM Address in bytes
+ */
+
+#define Q8_ROM_ADDR_BYTE_COUNT 0x03310010
+/*
+ * bit definitions for Q8_ROM_ADDR_BYTE_COUNT
+ * 31:2 Reserved;
+ * 1:0 max address bytes for ROM Interface
+ */
+
+#define Q8_ROM_DUMMY_BYTE_COUNT 0x03310014
+/*
+ * bit definitions for Q8_ROM_DUMMY_BYTE_COUNT
+ * 31:2 Reserved;
+ * 1:0 dummy bytes for ROM Instructions
+ */
+
+#define Q8_ROM_RD_DATA 0x03310018
+
+#define Q8_NX_CDRP_CMD_RSP 0x1B2218
+#define Q8_NX_CDRP_ARG1 0x1B221C
+#define Q8_NX_CDRP_ARG2 0x1B2220
+#define Q8_NX_CDRP_ARG3 0x1B2224
+#define Q8_NX_CDRP_SIGNATURE 0x1B2228
+
+#define Q8_LINK_STATE 0x1B2298
+#define Q8_LINK_SPEED_0 0x1B22E8
+/*
+ * Macros for reading and writing registers
+ */
+
+#if defined(__i386__) || defined(__amd64__)
+#define Q8_MB() __asm volatile("mfence" ::: "memory")
+#define Q8_WMB() __asm volatile("sfence" ::: "memory")
+#define Q8_RMB() __asm volatile("lfence" ::: "memory")
+#else
+#define Q8_MB()
+#define Q8_WMB()
+#define Q8_RMB()
+#endif
+
+#define READ_REG32(ha, reg) bus_read_4((ha->pci_reg), reg)
+#define READ_OFFSET32(ha, off) READ_REG32(ha, off)
+
+#define WRITE_REG32(ha, reg, val) \
+ {\
+ bus_write_4((ha->pci_reg), reg, val);\
+ bus_read_4((ha->pci_reg), reg);\
+ }
+
+#define WRITE_REG32_MB(ha, reg, val) \
+ {\
+ Q8_WMB();\
+ bus_write_4((ha->pci_reg), reg, val);\
+ }
+
+#define WRITE_OFFSET32(ha, off, val)\
+ {\
+ bus_write_4((ha->pci_reg), off, val);\
+ bus_read_4((ha->pci_reg), off);\
+ }
+
+#endif /* #ifndef _QLA_REG_H_ */
diff --git a/sys/dev/qlxgb/qla_ver.h b/sys/dev/qlxgb/qla_ver.h
new file mode 100644
index 0000000..8c33ff4
--- /dev/null
+++ b/sys/dev/qlxgb/qla_ver.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2010-2011 Qlogic Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * File: qla_ver.h
+ * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
+ */
+
+#ifndef _QLA_VER_H_
+#define _QLA_VER_H_
+
+#define QLA_VERSION_MAJOR 1
+#define QLA_VERSION_MINOR 1
+#define QLA_VERSION_BUILD 30
+
+#endif /* #ifndef _QLA_VER_H_ */
diff --git a/sys/dev/quicc/quicc_core.c b/sys/dev/quicc/quicc_core.c
index 21cfdb3..532bb8e 100644
--- a/sys/dev/quicc/quicc_core.c
+++ b/sys/dev/quicc/quicc_core.c
@@ -61,7 +61,7 @@ __FBSDID("$FreeBSD$");
devclass_t quicc_devclass;
char quicc_driver_name[] = "quicc";
-MALLOC_DEFINE(M_QUICC, "QUICC", "QUICC driver");
+static MALLOC_DEFINE(M_QUICC, "QUICC", "QUICC driver");
struct quicc_device {
struct rman *qd_rman;
diff --git a/sys/dev/re/if_re.c b/sys/dev/re/if_re.c
index 704276e..3ade04c 100644
--- a/sys/dev/re/if_re.c
+++ b/sys/dev/re/if_re.c
@@ -171,7 +171,7 @@ TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap);
/*
* Various supported device vendors/types and their names.
*/
-static struct rl_type re_devs[] = {
+static const struct rl_type const re_devs[] = {
{ DLINK_VENDORID, DLINK_DEVICEID_528T, 0,
"D-Link DGE-528(T) Gigabit Ethernet Adapter" },
{ DLINK_VENDORID, DLINK_DEVICEID_530T_REVC, 0,
@@ -181,7 +181,7 @@ static struct rl_type re_devs[] = {
{ RT_VENDORID, RT_DEVICEID_8101E, 0,
"RealTek 810xE PCIe 10/100baseTX" },
{ RT_VENDORID, RT_DEVICEID_8168, 0,
- "RealTek 8168/8111 B/C/CP/D/DP/E PCIe Gigabit Ethernet" },
+ "RealTek 8168/8111 B/C/CP/D/DP/E/F PCIe Gigabit Ethernet" },
{ RT_VENDORID, RT_DEVICEID_8169, 0,
"RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" },
{ RT_VENDORID, RT_DEVICEID_8169SC, 0,
@@ -194,8 +194,8 @@ static struct rl_type re_devs[] = {
"US Robotics 997902 (RTL8169S) Gigabit Ethernet" }
};
-static struct rl_hwrev re_hwrevs[] = {
- { RL_HWREV_8139, RL_8139, "", RL_MTU },
+static const struct rl_hwrev const re_hwrevs[] = {
+ { RL_HWREV_8139, RL_8139, "", RL_MTU },
{ RL_HWREV_8139A, RL_8139, "A", RL_MTU },
{ RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU },
{ RL_HWREV_8139B, RL_8139, "B", RL_MTU },
@@ -220,7 +220,9 @@ static struct rl_hwrev re_hwrevs[] = {
{ RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU },
{ RL_HWREV_8103E, RL_8169, "8103E", RL_MTU },
{ RL_HWREV_8401E, RL_8169, "8401E", RL_MTU },
+ { RL_HWREV_8402, RL_8169, "8402", RL_MTU },
{ RL_HWREV_8105E, RL_8169, "8105E", RL_MTU },
+ { RL_HWREV_8105E_SPIN1, RL_8169, "8105E", RL_MTU },
{ RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU },
{ RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU },
{ RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
@@ -230,6 +232,8 @@ static struct rl_hwrev re_hwrevs[] = {
{ RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K },
{ RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K},
{ RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K},
+ { RL_HWREV_8168F, RL_8169, "8168F/8111F", RL_JUMBO_MTU_9K},
+ { RL_HWREV_8411, RL_8169, "8411", RL_JUMBO_MTU_9K},
{ 0, 0, NULL, 0 }
};
@@ -868,7 +872,7 @@ re_diag(struct rl_softc *sc)
device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n",
dst, ":", src, ":", ETHERTYPE_IP);
device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n",
- eh->ether_dhost, ":", eh->ether_shost, ":",
+ eh->ether_dhost, ":", eh->ether_shost, ":",
ntohs(eh->ether_type));
device_printf(sc->rl_dev, "You may have a defective 32-bit "
"NIC plugged into a 64-bit PCI slot.\n");
@@ -903,7 +907,7 @@ done:
static int
re_probe(device_t dev)
{
- struct rl_type *t;
+ const struct rl_type *t;
uint16_t devid, vendor;
uint16_t revid, sdevid;
int i;
@@ -1183,7 +1187,8 @@ re_attach(device_t dev)
u_int16_t as[ETHER_ADDR_LEN / 2];
struct rl_softc *sc;
struct ifnet *ifp;
- struct rl_hwrev *hw_rev;
+ const struct rl_hwrev *hw_rev;
+ u_int32_t cap, ctl;
int hwrev;
u_int16_t devid, re_did = 0;
int error = 0, i, phy, rid;
@@ -1239,8 +1244,10 @@ re_attach(device_t dev)
msic = pci_msi_count(dev);
msixc = pci_msix_count(dev);
- if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0)
+ if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
sc->rl_flags |= RL_FLAG_PCIE;
+ sc->rl_expcap = reg;
+ }
if (bootverbose) {
device_printf(dev, "MSI count : %d\n", msic);
device_printf(dev, "MSI-X count : %d\n", msixc);
@@ -1332,6 +1339,23 @@ re_attach(device_t dev)
CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
}
+ /* Disable ASPM L0S/L1. */
+ if (sc->rl_expcap != 0) {
+ cap = pci_read_config(dev, sc->rl_expcap +
+ PCIR_EXPRESS_LINK_CAP, 2);
+ if ((cap & PCIM_LINK_CAP_ASPM) != 0) {
+ ctl = pci_read_config(dev, sc->rl_expcap +
+ PCIR_EXPRESS_LINK_CTL, 2);
+ if ((ctl & 0x0003) != 0) {
+ ctl &= ~0x0003;
+ pci_write_config(dev, sc->rl_expcap +
+ PCIR_EXPRESS_LINK_CTL, ctl, 2);
+ device_printf(dev, "ASPM disabled\n");
+ }
+ } else
+ device_printf(dev, "no ASPM capability\n");
+ }
+
hw_rev = re_hwrevs;
hwrev = CSR_READ_4(sc, RL_TXCFG);
switch (hwrev & 0x70000000) {
@@ -1381,7 +1405,9 @@ re_attach(device_t dev)
RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP;
break;
case RL_HWREV_8401E:
+ case RL_HWREV_8402:
case RL_HWREV_8105E:
+ case RL_HWREV_8105E_SPIN1:
sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD;
@@ -1413,6 +1439,8 @@ re_attach(device_t dev)
RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2;
break;
case RL_HWREV_8168E_VL:
+ case RL_HWREV_8168F:
+ case RL_HWREV_8411:
sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2;
@@ -3308,6 +3336,7 @@ re_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
}
}
#endif /* DEVICE_POLLING */
+ RL_LOCK(sc);
if ((mask & IFCAP_TXCSUM) != 0 &&
(ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
ifp->if_capenable ^= IFCAP_TXCSUM;
@@ -3366,8 +3395,9 @@ re_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
}
if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
- re_init(sc);
+ re_init_locked(sc);
}
+ RL_UNLOCK(sc);
VLAN_CAPABILITIES(ifp);
}
break;
diff --git a/sys/dev/rndtest/rndtest.c b/sys/dev/rndtest/rndtest.c
index 15ddccee..4e42aa8 100644
--- a/sys/dev/rndtest/rndtest.c
+++ b/sys/dev/rndtest/rndtest.c
@@ -70,8 +70,8 @@ static const struct rndtest_testfunc {
#define RNDTEST_NTESTS (sizeof(rndtest_funcs)/sizeof(rndtest_funcs[0]))
-SYSCTL_NODE(_kern, OID_AUTO, rndtest, CTLFLAG_RD, 0, "RNG test parameters");
-
+static SYSCTL_NODE(_kern, OID_AUTO, rndtest, CTLFLAG_RD, 0,
+ "RNG test parameters");
static int rndtest_retest = 120; /* interval in seconds */
SYSCTL_INT(_kern_rndtest, OID_AUTO, retest, CTLFLAG_RW, &rndtest_retest,
0, "retest interval (seconds)");
diff --git a/sys/dev/rt/if_rt.c b/sys/dev/rt/if_rt.c
index 22105dc..edaf95e 100644
--- a/sys/dev/rt/if_rt.c
+++ b/sys/dev/rt/if_rt.c
@@ -136,7 +136,7 @@ static int rt_miibus_writereg(device_t, int, int, int);
static int rt_ifmedia_upd(struct ifnet *);
static void rt_ifmedia_sts(struct ifnet *, struct ifmediareq *);
-SYSCTL_NODE(_hw, OID_AUTO, rt, CTLFLAG_RD, 0, "RT driver parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, rt, CTLFLAG_RD, 0, "RT driver parameters");
#ifdef IF_RT_DEBUG
static int rt_debug = 0;
SYSCTL_INT(_hw_rt, OID_AUTO, debug, CTLFLAG_RW, &rt_debug, 0,
diff --git a/sys/dev/safe/safe.c b/sys/dev/safe/safe.c
index 18ef5e5..00c00ea 100644
--- a/sys/dev/safe/safe.c
+++ b/sys/dev/safe/safe.c
@@ -147,7 +147,8 @@ static void safe_totalreset(struct safe_softc *);
static int safe_free_entry(struct safe_softc *, struct safe_ringentry *);
-SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD, 0, "SafeNet driver parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD, 0,
+ "SafeNet driver parameters");
#ifdef SAFE_DEBUG
static void safe_dump_dmastatus(struct safe_softc *, const char *);
diff --git a/sys/dev/scc/scc_core.c b/sys/dev/scc/scc_core.c
index 76388ad..4e12244 100644
--- a/sys/dev/scc/scc_core.c
+++ b/sys/dev/scc/scc_core.c
@@ -48,7 +48,7 @@ __FBSDID("$FreeBSD$");
devclass_t scc_devclass;
char scc_driver_name[] = "scc";
-MALLOC_DEFINE(M_SCC, "SCC", "SCC driver");
+static MALLOC_DEFINE(M_SCC, "SCC", "SCC driver");
static int
scc_bfe_intr(void *arg)
diff --git a/sys/dev/sdhci/sdhci.c b/sys/dev/sdhci/sdhci.c
index 24cba57..e071155 100644
--- a/sys/dev/sdhci/sdhci.c
+++ b/sys/dev/sdhci/sdhci.c
@@ -154,7 +154,7 @@ struct sdhci_softc {
struct sdhci_slot slots[6];
};
-SYSCTL_NODE(_hw, OID_AUTO, sdhci, CTLFLAG_RD, 0, "sdhci driver");
+static SYSCTL_NODE(_hw, OID_AUTO, sdhci, CTLFLAG_RD, 0, "sdhci driver");
int sdhci_debug;
TUNABLE_INT("hw.sdhci.debug", &sdhci_debug);
diff --git a/sys/dev/sfxge/common/efsys.h b/sys/dev/sfxge/common/efsys.h
new file mode 100644
index 0000000..3e83d24
--- /dev/null
+++ b/sys/dev/sfxge/common/efsys.h
@@ -0,0 +1,834 @@
+/*-
+ * Copyright (c) 2010-2011 Solarflare Communications, Inc.
+ * All rights reserved.
+ *
+ * This software was developed in part by Philip Paeps under contract for
+ * Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _SYS_EFSYS_H
+#define _SYS_EFSYS_H
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/mutex.h>
+#include <sys/rwlock.h>
+#include <sys/sdt.h>
+#include <sys/systm.h>
+
+#include <machine/bus.h>
+#include <machine/endian.h>
+
+#define EFSYS_HAS_UINT64 1
+#define EFSYS_USE_UINT64 0
+#if _BYTE_ORDER == _BIG_ENDIAN
+#define EFSYS_IS_BIG_ENDIAN 1
+#define EFSYS_IS_LITTLE_ENDIAN 0
+#elif _BYTE_ORDER == _LITTLE_ENDIAN
+#define EFSYS_IS_BIG_ENDIAN 0
+#define EFSYS_IS_LITTLE_ENDIAN 1
+#endif
+#include "efx_types.h"
+
+/* Common code requires this */
+#if __FreeBSD_version < 800068
+#define memmove(d, s, l) bcopy(s, d, l)
+#endif
+
+/* FreeBSD equivalents of Solaris things */
+#ifndef _NOTE
+#define _NOTE(s)
+#endif
+
+#ifndef B_FALSE
+#define B_FALSE FALSE
+#endif
+#ifndef B_TRUE
+#define B_TRUE TRUE
+#endif
+
+#ifndef IS_P2ALIGNED
+#define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
+#endif
+
+#ifndef P2ROUNDUP
+#define P2ROUNDUP(x, align) (-(-(x) & -(align)))
+#endif
+
+#ifndef IS2P
+#define ISP2(x) (((x) & ((x) - 1)) == 0)
+#endif
+
+#define ENOTACTIVE EINVAL
+
+/* Memory type to use on FreeBSD */
+MALLOC_DECLARE(M_SFXGE);
+
+/* Machine dependend prefetch wrappers */
+#if defined(__i386__) || defined(__amd64__)
+static __inline void
+prefetch_read_many(void *addr)
+{
+
+ __asm__(
+ "prefetcht0 (%0)"
+ :
+ : "r" (addr));
+}
+
+static __inline void
+prefetch_read_once(void *addr)
+{
+
+ __asm__(
+ "prefetchnta (%0)"
+ :
+ : "r" (addr));
+}
+#elif defined(__sparc64__)
+static __inline void
+prefetch_read_many(void *addr)
+{
+
+ __asm__(
+ "prefetch [%0], 0"
+ :
+ : "r" (addr));
+}
+
+static __inline void
+prefetch_read_once(void *addr)
+{
+
+ __asm__(
+ "prefetch [%0], 1"
+ :
+ : "r" (addr));
+}
+#else
+static __inline void
+prefetch_read_many(void *addr)
+{
+
+}
+
+static __inline void
+prefetch_read_once(void *addr)
+{
+
+}
+#endif
+
+#if defined(__i386__) || defined(__amd64__)
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#endif
+static __inline void
+sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map,
+ struct mbuf *m, bus_dma_segment_t *seg)
+{
+#if defined(__i386__) || defined(__amd64__)
+ seg->ds_addr = pmap_kextract(mtod(m, vm_offset_t));
+ seg->ds_len = m->m_len;
+#else
+ int nsegstmp;
+
+ bus_dmamap_load_mbuf_sg(tag, map, m, seg, &nsegstmp, 0);
+#endif
+}
+
+/* Modifiers used for DOS builds */
+#define __cs
+#define __far
+
+/* Modifiers used for Windows builds */
+#define __in
+#define __in_opt
+#define __in_ecount(_n)
+#define __in_ecount_opt(_n)
+#define __in_bcount(_n)
+#define __in_bcount_opt(_n)
+
+#define __out
+#define __out_opt
+#define __out_ecount(_n)
+#define __out_ecount_opt(_n)
+#define __out_bcount(_n)
+#define __out_bcount_opt(_n)
+
+#define __deref_out
+
+#define __inout
+#define __inout_opt
+#define __inout_ecount(_n)
+#define __inout_ecount_opt(_n)
+#define __inout_bcount(_n)
+#define __inout_bcount_opt(_n)
+#define __inout_bcount_full_opt(_n)
+
+#define __deref_out_bcount_opt(n)
+
+#define __checkReturn
+
+#define __drv_when(_p, _c)
+
+/* Code inclusion options */
+
+
+#define EFSYS_OPT_NAMES 1
+
+#define EFSYS_OPT_FALCON 0
+#define EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE 0
+#define EFSYS_OPT_SIENA 1
+#ifdef DEBUG
+#define EFSYS_OPT_CHECK_REG 1
+#else
+#define EFSYS_OPT_CHECK_REG 0
+#endif
+
+#define EFSYS_OPT_MCDI 1
+
+#define EFSYS_OPT_MAC_FALCON_GMAC 0
+#define EFSYS_OPT_MAC_FALCON_XMAC 0
+#define EFSYS_OPT_MAC_STATS 1
+
+#define EFSYS_OPT_LOOPBACK 0
+
+#define EFSYS_OPT_MON_NULL 0
+#define EFSYS_OPT_MON_LM87 0
+#define EFSYS_OPT_MON_MAX6647 0
+#define EFSYS_OPT_MON_SIENA 0
+#define EFSYS_OPT_MON_STATS 0
+
+#define EFSYS_OPT_PHY_NULL 0
+#define EFSYS_OPT_PHY_QT2022C2 0
+#define EFSYS_OPT_PHY_SFX7101 0
+#define EFSYS_OPT_PHY_TXC43128 0
+#define EFSYS_OPT_PHY_PM8358 0
+#define EFSYS_OPT_PHY_SFT9001 0
+#define EFSYS_OPT_PHY_QT2025C 0
+#define EFSYS_OPT_PHY_STATS 1
+#define EFSYS_OPT_PHY_PROPS 0
+#define EFSYS_OPT_PHY_BIST 1
+#define EFSYS_OPT_PHY_LED_CONTROL 1
+#define EFSYS_OPT_PHY_FLAGS 0
+
+#define EFSYS_OPT_VPD 1
+#define EFSYS_OPT_NVRAM 1
+#define EFSYS_OPT_NVRAM_FALCON_BOOTROM 0
+#define EFSYS_OPT_NVRAM_SFT9001 0
+#define EFSYS_OPT_NVRAM_SFX7101 0
+#define EFSYS_OPT_BOOTCFG 0
+
+#define EFSYS_OPT_PCIE_TUNE 0
+#define EFSYS_OPT_DIAG 0
+#define EFSYS_OPT_WOL 1
+#define EFSYS_OPT_RX_SCALE 1
+#define EFSYS_OPT_QSTATS 1
+#define EFSYS_OPT_FILTER 0
+#define EFSYS_OPT_RX_SCATTER 0
+#define EFSYS_OPT_RX_HDR_SPLIT 0
+
+#define EFSYS_OPT_EV_PREFETCH 0
+
+#define EFSYS_OPT_DECODE_INTR_FATAL 1
+
+/* ID */
+
+typedef struct __efsys_identifier_s efsys_identifier_t;
+
+/* PROBE */
+
+#ifndef KDTRACE_HOOKS
+
+#define EFSYS_PROBE(_name)
+
+#define EFSYS_PROBE1(_name, _type1, _arg1)
+
+#define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)
+
+#define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3)
+
+#define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4)
+
+#define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5)
+
+#define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6)
+
+#define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6, _type7, _arg7)
+
+#else /* KDTRACE_HOOKS */
+
+#define EFSYS_PROBE(_name) \
+ DTRACE_PROBE(_name)
+
+#define EFSYS_PROBE1(_name, _type1, _arg1) \
+ DTRACE_PROBE1(_name, _type1, _arg1)
+
+#define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
+ DTRACE_PROBE2(_name, _type1, _arg1, _type2, _arg2)
+
+#define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3) \
+ DTRACE_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3)
+
+#define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4) \
+ DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4)
+
+#ifdef DTRACE_PROBE5
+#define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5) \
+ DTRACE_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5)
+#else
+#define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5) \
+ DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4)
+#endif
+
+#ifdef DTRACE_PROBE6
+#define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6) \
+ DTRACE_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6)
+#else
+#define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6) \
+ EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5)
+#endif
+
+#ifdef DTRACE_PROBE7
+#define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6, _type7, _arg7) \
+ DTRACE_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6, _type7, _arg7)
+#else
+#define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6, _type7, _arg7) \
+ EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6)
+#endif
+
+#endif /* KDTRACE_HOOKS */
+
+/* DMA */
+
+typedef uint64_t efsys_dma_addr_t;
+
+typedef struct efsys_mem_s {
+ bus_dma_tag_t esm_tag;
+ bus_dmamap_t esm_map;
+ caddr_t esm_base;
+ efsys_dma_addr_t esm_addr;
+ size_t esm_size;
+} efsys_mem_t;
+
+
+#define EFSYS_MEM_ZERO(_esmp, _size) \
+ do { \
+ (void) memset((_esmp)->esm_base, 0, (_size)); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_READD(_esmp, _offset, _edp) \
+ do { \
+ uint32_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \
+ ("not power of 2 aligned")); \
+ \
+ addr = (void *)((_esmp)->esm_base + (_offset)); \
+ \
+ (_edp)->ed_u32[0] = *addr; \
+ \
+ EFSYS_PROBE2(mem_readd, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
+ do { \
+ uint32_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
+ ("not power of 2 aligned")); \
+ \
+ addr = (void *)((_esmp)->esm_base + (_offset)); \
+ \
+ (_eqp)->eq_u32[0] = *addr++; \
+ (_eqp)->eq_u32[1] = *addr; \
+ \
+ EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_READO(_esmp, _offset, _eop) \
+ do { \
+ uint32_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \
+ ("not power of 2 aligned")); \
+ \
+ addr = (void *)((_esmp)->esm_base + (_offset)); \
+ \
+ (_eop)->eo_u32[0] = *addr++; \
+ (_eop)->eo_u32[1] = *addr++; \
+ (_eop)->eo_u32[2] = *addr++; \
+ (_eop)->eo_u32[3] = *addr; \
+ \
+ EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
+ do { \
+ uint32_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \
+ ("not power of 2 aligned")); \
+ \
+ EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ addr = (void *)((_esmp)->esm_base + (_offset)); \
+ \
+ *addr = (_edp)->ed_u32[0]; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
+ do { \
+ uint32_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
+ ("not power of 2 aligned")); \
+ \
+ EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ addr = (void *)((_esmp)->esm_base + (_offset)); \
+ \
+ *addr++ = (_eqp)->eq_u32[0]; \
+ *addr = (_eqp)->eq_u32[1]; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
+ do { \
+ uint32_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \
+ ("not power of 2 aligned")); \
+ \
+ EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ addr = (void *)((_esmp)->esm_base + (_offset)); \
+ \
+ *addr++ = (_eop)->eo_u32[0]; \
+ *addr++ = (_eop)->eo_u32[1]; \
+ *addr++ = (_eop)->eo_u32[2]; \
+ *addr = (_eop)->eo_u32[3]; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_ADDR(_esmp) \
+ ((_esmp)->esm_addr)
+
+/* BAR */
+
+typedef struct efsys_bar_s {
+ struct mtx esb_lock;
+ bus_space_tag_t esb_tag;
+ bus_space_handle_t esb_handle;
+ int esb_rid;
+ struct resource *esb_res;
+} efsys_bar_t;
+
+#define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \
+ ("not power of 2 aligned")); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ if (_lock) \
+ mtx_lock(&((_esbp)->esb_lock)); \
+ \
+ (_edp)->ed_u32[0] = bus_space_read_4((_esbp)->esb_tag, \
+ (_esbp)->esb_handle, (_offset)); \
+ \
+ EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ if (_lock) \
+ mtx_unlock(&((_esbp)->esb_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
+ ("not power of 2 aligned")); \
+ \
+ mtx_lock(&((_esbp)->esb_lock)); \
+ \
+ (_eqp)->eq_u32[0] = bus_space_read_4((_esbp)->esb_tag, \
+ (_esbp)->esb_handle, (_offset)); \
+ (_eqp)->eq_u32[1] = bus_space_read_4((_esbp)->esb_tag, \
+ (_esbp)->esb_handle, (_offset+4)); \
+ \
+ EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ mtx_unlock(&((_esbp)->esb_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \
+ ("not power of 2 aligned")); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ if (_lock) \
+ mtx_lock(&((_esbp)->esb_lock)); \
+ \
+ (_eop)->eo_u32[0] = bus_space_read_4((_esbp)->esb_tag, \
+ (_esbp)->esb_handle, (_offset)); \
+ (_eop)->eo_u32[1] = bus_space_read_4((_esbp)->esb_tag, \
+ (_esbp)->esb_handle, (_offset+4)); \
+ (_eop)->eo_u32[2] = bus_space_read_4((_esbp)->esb_tag, \
+ (_esbp)->esb_handle, (_offset+8)); \
+ (_eop)->eo_u32[3] = bus_space_read_4((_esbp)->esb_tag, \
+ (_esbp)->esb_handle, (_offset+12)); \
+ \
+ EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ if (_lock) \
+ mtx_unlock(&((_esbp)->esb_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \
+ ("not power of 2 aligned")); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ if (_lock) \
+ mtx_lock(&((_esbp)->esb_lock)); \
+ \
+ EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\
+ (_offset), (_edp)->ed_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ if (_lock) \
+ mtx_unlock(&((_esbp)->esb_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
+ ("not power of 2 aligned")); \
+ \
+ mtx_lock(&((_esbp)->esb_lock)); \
+ \
+ EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\
+ (_offset), (_eqp)->eq_u32[0]); \
+ bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\
+ (_offset+4), (_eqp)->eq_u32[1]); \
+ \
+ mtx_unlock(&((_esbp)->esb_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \
+ ("not power of 2 aligned")); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ if (_lock) \
+ mtx_lock(&((_esbp)->esb_lock)); \
+ \
+ EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\
+ (_offset), (_eop)->eo_u32[0]); \
+ bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\
+ (_offset+4), (_eop)->eo_u32[1]); \
+ bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\
+ (_offset+8), (_eop)->eo_u32[2]); \
+ bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\
+ (_offset+12), (_eop)->eo_u32[3]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ if (_lock) \
+ mtx_unlock(&((_esbp)->esb_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* SPIN */
+
+#define EFSYS_SPIN(_us) \
+ do { \
+ DELAY(_us); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_SLEEP EFSYS_SPIN
+
+/* BARRIERS */
+
+/* Strict ordering guaranteed by devacc.devacc_attr_dataorder */
+#define EFSYS_MEM_READ_BARRIER()
+#define EFSYS_PIO_WRITE_BARRIER()
+
+/* TIMESTAMP */
+
+typedef clock_t efsys_timestamp_t;
+
+#define EFSYS_TIMESTAMP(_usp) \
+ do { \
+ clock_t now; \
+ \
+ now = ticks; \
+ *(_usp) = now * hz / 1000000; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* KMEM */
+
+#define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
+ do { \
+ (_esip) = (_esip); \
+ (_p) = malloc((_size), M_SFXGE, M_WAITOK|M_ZERO); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_KMEM_FREE(_esip, _size, _p) \
+ do { \
+ (void) (_esip); \
+ (void) (_size); \
+ free((_p), M_SFXGE); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* LOCK */
+
+typedef struct mtx efsys_lock_t;
+
+#define EFSYS_LOCK_MAGIC 0x000010c4
+
+#define EFSYS_LOCK(_lockp, _state) \
+ do { \
+ mtx_lock(_lockp); \
+ (_state) = EFSYS_LOCK_MAGIC; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_UNLOCK(_lockp, _state) \
+ do { \
+ if ((_state) != EFSYS_LOCK_MAGIC) \
+ KASSERT(B_FALSE, ("not locked")); \
+ mtx_unlock(_lockp); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* PREEMPT */
+
+#define EFSYS_PREEMPT_DISABLE(_state) \
+ do { \
+ (_state) = (_state); \
+ critical_enter(); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_PREEMPT_ENABLE(_state) \
+ do { \
+ (_state) = (_state); \
+ critical_exit(_state); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* STAT */
+
+typedef uint64_t efsys_stat_t;
+
+#define EFSYS_STAT_INCR(_knp, _delta) \
+ do { \
+ *(_knp) += (_delta); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_DECR(_knp, _delta) \
+ do { \
+ *(_knp) -= (_delta); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SET(_knp, _val) \
+ do { \
+ *(_knp) = (_val); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SET_QWORD(_knp, _valp) \
+ do { \
+ *(_knp) = le64toh((_valp)->eq_u64[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SET_DWORD(_knp, _valp) \
+ do { \
+ *(_knp) = le32toh((_valp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
+ do { \
+ *(_knp) += le64toh((_valp)->eq_u64[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
+ do { \
+ *(_knp) -= le64toh((_valp)->eq_u64[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* ERR */
+
+extern void sfxge_err(efsys_identifier_t *, unsigned int,
+ uint32_t, uint32_t);
+
+#if EFSYS_OPT_DECODE_INTR_FATAL
+#define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
+ do { \
+ sfxge_err((_esip), (_code), (_dword0), (_dword1)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#endif
+
+/* ASSERT */
+
+#define EFSYS_ASSERT(_exp) do { \
+ if (!(_exp)) \
+ panic(#_exp); \
+ } while (0)
+
+#define EFSYS_ASSERT3(_x, _op, _y, _t) do { \
+ const _t __x = (_t)(_x); \
+ const _t __y = (_t)(_y); \
+ if (!(__x _op __y)) \
+ panic("assertion failed at %s:%u", __FILE__, __LINE__); \
+ } while(0)
+
+#define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
+#define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
+#define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFSYS_H */
diff --git a/sys/dev/sfxge/common/efx.h b/sys/dev/sfxge/common/efx.h
new file mode 100644
index 0000000..18c248b
--- /dev/null
+++ b/sys/dev/sfxge/common/efx.h
@@ -0,0 +1,1893 @@
+/*-
+ * Copyright 2006-2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _SYS_EFX_H
+#define _SYS_EFX_H
+
+#include "efsys.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EFX_STATIC_ASSERT(_cond) ((void)sizeof(char[(_cond) ? 1 : -1]))
+
+#define EFX_ARRAY_SIZE(_array) (sizeof(_array) / sizeof((_array)[0]))
+
+#ifndef EFSYS_MEM_IS_NULL
+#define EFSYS_MEM_IS_NULL(_esmp) ((_esmp)->esm_base == NULL)
+#endif
+
+typedef enum efx_family_e {
+ EFX_FAMILY_INVALID,
+ EFX_FAMILY_FALCON,
+ EFX_FAMILY_SIENA,
+ EFX_FAMILY_NTYPES
+} efx_family_t;
+
+extern __checkReturn int
+efx_family(
+ __in uint16_t venid,
+ __in uint16_t devid,
+ __out efx_family_t *efp);
+
+extern __checkReturn int
+efx_infer_family(
+ __in efsys_bar_t *esbp,
+ __out efx_family_t *efp);
+
+#define EFX_PCI_VENID_SFC 0x1924
+#define EFX_PCI_DEVID_FALCON 0x0710
+#define EFX_PCI_DEVID_BETHPAGE 0x0803
+#define EFX_PCI_DEVID_SIENA 0x0813
+#define EFX_PCI_DEVID_SIENA_F1_UNINIT 0x0810
+
+#define EFX_MEM_BAR 2
+
+/* Error codes */
+
+enum {
+ EFX_ERR_INVALID,
+ EFX_ERR_SRAM_OOB,
+ EFX_ERR_BUFID_DC_OOB,
+ EFX_ERR_MEM_PERR,
+ EFX_ERR_RBUF_OWN,
+ EFX_ERR_TBUF_OWN,
+ EFX_ERR_RDESQ_OWN,
+ EFX_ERR_TDESQ_OWN,
+ EFX_ERR_EVQ_OWN,
+ EFX_ERR_EVFF_OFLO,
+ EFX_ERR_ILL_ADDR,
+ EFX_ERR_SRAM_PERR,
+ EFX_ERR_NCODES
+};
+
+/* NIC */
+
+typedef struct efx_nic_s efx_nic_t;
+
+extern __checkReturn int
+efx_nic_create(
+ __in efx_family_t family,
+ __in efsys_identifier_t *esip,
+ __in efsys_bar_t *esbp,
+ __in efsys_lock_t *eslp,
+ __deref_out efx_nic_t **enpp);
+
+extern __checkReturn int
+efx_nic_probe(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_PCIE_TUNE
+
+extern __checkReturn int
+efx_nic_pcie_tune(
+ __in efx_nic_t *enp,
+ unsigned int nlanes);
+
+extern __checkReturn int
+efx_nic_pcie_extended_sync(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_PCIE_TUNE */
+
+extern __checkReturn int
+efx_nic_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn int
+efx_nic_reset(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn int
+efx_nic_register_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern void
+efx_nic_fini(
+ __in efx_nic_t *enp);
+
+extern void
+efx_nic_unprobe(
+ __in efx_nic_t *enp);
+
+extern void
+efx_nic_destroy(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_MCDI
+
+typedef struct efx_mcdi_req_s efx_mcdi_req_t;
+
+typedef enum efx_mcdi_exception_e {
+ EFX_MCDI_EXCEPTION_MC_REBOOT,
+ EFX_MCDI_EXCEPTION_MC_BADASSERT,
+} efx_mcdi_exception_t;
+
+typedef struct efx_mcdi_transport_s {
+ void *emt_context;
+ void (*emt_execute)(void *, efx_mcdi_req_t *);
+ void (*emt_ev_cpl)(void *);
+ void (*emt_exception)(void *, efx_mcdi_exception_t);
+} efx_mcdi_transport_t;
+
+extern __checkReturn int
+efx_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp);
+
+extern __checkReturn int
+efx_mcdi_reboot(
+ __in efx_nic_t *enp);
+
+extern void
+efx_mcdi_request_start(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __in boolean_t ev_cpl);
+
+extern __checkReturn boolean_t
+efx_mcdi_request_poll(
+ __in efx_nic_t *enp);
+
+extern __checkReturn boolean_t
+efx_mcdi_request_abort(
+ __in efx_nic_t *enp);
+
+extern void
+efx_mcdi_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+/* INTR */
+
+#define EFX_NINTR_FALCON 64
+#define EFX_NINTR_SIENA 1024
+
+typedef enum efx_intr_type_e {
+ EFX_INTR_INVALID = 0,
+ EFX_INTR_LINE,
+ EFX_INTR_MESSAGE,
+ EFX_INTR_NTYPES
+} efx_intr_type_t;
+
+#define EFX_INTR_SIZE (sizeof (efx_oword_t))
+
+extern __checkReturn int
+efx_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp);
+
+extern void
+efx_intr_enable(
+ __in efx_nic_t *enp);
+
+extern void
+efx_intr_disable(
+ __in efx_nic_t *enp);
+
+extern void
+efx_intr_disable_unlocked(
+ __in efx_nic_t *enp);
+
+#define EFX_INTR_NEVQS 32
+
+extern __checkReturn int
+efx_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level);
+
+extern void
+efx_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *maskp);
+
+extern void
+efx_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp);
+
+extern void
+efx_intr_fatal(
+ __in efx_nic_t *enp);
+
+extern void
+efx_intr_fini(
+ __in efx_nic_t *enp);
+
+/* MAC */
+
+#if EFSYS_OPT_MAC_STATS
+
+/* START MKCONFIG GENERATED EfxHeaderMacBlock bb8d39428b6fdcf5 */
+typedef enum efx_mac_stat_e {
+ EFX_MAC_RX_OCTETS,
+ EFX_MAC_RX_PKTS,
+ EFX_MAC_RX_UNICST_PKTS,
+ EFX_MAC_RX_MULTICST_PKTS,
+ EFX_MAC_RX_BRDCST_PKTS,
+ EFX_MAC_RX_PAUSE_PKTS,
+ EFX_MAC_RX_LE_64_PKTS,
+ EFX_MAC_RX_65_TO_127_PKTS,
+ EFX_MAC_RX_128_TO_255_PKTS,
+ EFX_MAC_RX_256_TO_511_PKTS,
+ EFX_MAC_RX_512_TO_1023_PKTS,
+ EFX_MAC_RX_1024_TO_15XX_PKTS,
+ EFX_MAC_RX_GE_15XX_PKTS,
+ EFX_MAC_RX_ERRORS,
+ EFX_MAC_RX_FCS_ERRORS,
+ EFX_MAC_RX_DROP_EVENTS,
+ EFX_MAC_RX_FALSE_CARRIER_ERRORS,
+ EFX_MAC_RX_SYMBOL_ERRORS,
+ EFX_MAC_RX_ALIGN_ERRORS,
+ EFX_MAC_RX_INTERNAL_ERRORS,
+ EFX_MAC_RX_JABBER_PKTS,
+ EFX_MAC_RX_LANE0_CHAR_ERR,
+ EFX_MAC_RX_LANE1_CHAR_ERR,
+ EFX_MAC_RX_LANE2_CHAR_ERR,
+ EFX_MAC_RX_LANE3_CHAR_ERR,
+ EFX_MAC_RX_LANE0_DISP_ERR,
+ EFX_MAC_RX_LANE1_DISP_ERR,
+ EFX_MAC_RX_LANE2_DISP_ERR,
+ EFX_MAC_RX_LANE3_DISP_ERR,
+ EFX_MAC_RX_MATCH_FAULT,
+ EFX_MAC_RX_NODESC_DROP_CNT,
+ EFX_MAC_TX_OCTETS,
+ EFX_MAC_TX_PKTS,
+ EFX_MAC_TX_UNICST_PKTS,
+ EFX_MAC_TX_MULTICST_PKTS,
+ EFX_MAC_TX_BRDCST_PKTS,
+ EFX_MAC_TX_PAUSE_PKTS,
+ EFX_MAC_TX_LE_64_PKTS,
+ EFX_MAC_TX_65_TO_127_PKTS,
+ EFX_MAC_TX_128_TO_255_PKTS,
+ EFX_MAC_TX_256_TO_511_PKTS,
+ EFX_MAC_TX_512_TO_1023_PKTS,
+ EFX_MAC_TX_1024_TO_15XX_PKTS,
+ EFX_MAC_TX_GE_15XX_PKTS,
+ EFX_MAC_TX_ERRORS,
+ EFX_MAC_TX_SGL_COL_PKTS,
+ EFX_MAC_TX_MULT_COL_PKTS,
+ EFX_MAC_TX_EX_COL_PKTS,
+ EFX_MAC_TX_LATE_COL_PKTS,
+ EFX_MAC_TX_DEF_PKTS,
+ EFX_MAC_TX_EX_DEF_PKTS,
+ EFX_MAC_NSTATS
+} efx_mac_stat_t;
+
+/* END MKCONFIG GENERATED EfxHeaderMacBlock */
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+typedef enum efx_link_mode_e {
+ EFX_LINK_UNKNOWN = 0,
+ EFX_LINK_DOWN,
+ EFX_LINK_10HDX,
+ EFX_LINK_10FDX,
+ EFX_LINK_100HDX,
+ EFX_LINK_100FDX,
+ EFX_LINK_1000HDX,
+ EFX_LINK_1000FDX,
+ EFX_LINK_10000FDX,
+ EFX_LINK_NMODES
+} efx_link_mode_t;
+
+#define EFX_MAC_SDU_MAX 9202
+
+#define EFX_MAC_PDU(_sdu) \
+ P2ROUNDUP(((_sdu) \
+ + /* EtherII */ 14 \
+ + /* VLAN */ 4 \
+ + /* CRC */ 4 \
+ + /* bug16011 */ 16), \
+ (1 << 3))
+
+#define EFX_MAC_PDU_MIN 60
+#define EFX_MAC_PDU_MAX EFX_MAC_PDU(EFX_MAC_SDU_MAX)
+
+extern __checkReturn int
+efx_mac_pdu_set(
+ __in efx_nic_t *enp,
+ __in size_t pdu);
+
+extern __checkReturn int
+efx_mac_addr_set(
+ __in efx_nic_t *enp,
+ __in uint8_t *addr);
+
+extern __checkReturn int
+efx_mac_filter_set(
+ __in efx_nic_t *enp,
+ __in boolean_t unicst,
+ __in boolean_t brdcst);
+
+extern __checkReturn int
+efx_mac_drain(
+ __in efx_nic_t *enp,
+ __in boolean_t enabled);
+
+extern __checkReturn int
+efx_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp);
+
+#define EFX_FCNTL_RESPOND 0x00000001
+#define EFX_FCNTL_GENERATE 0x00000002
+
+extern __checkReturn int
+efx_mac_fcntl_set(
+ __in efx_nic_t *enp,
+ __in unsigned int fcntl,
+ __in boolean_t autoneg);
+
+extern void
+efx_mac_fcntl_get(
+ __in efx_nic_t *enp,
+ __out unsigned int *fcntl_wantedp,
+ __out unsigned int *fcntl_linkp);
+
+#define EFX_MAC_HASH_BITS (1 << 8)
+
+extern __checkReturn int
+efx_mac_hash_set(
+ __in efx_nic_t *enp,
+ __in_ecount(EFX_MAC_HASH_BITS) unsigned int const *bucket);
+
+#if EFSYS_OPT_MAC_STATS
+
+#if EFSYS_OPT_NAMES
+
+extern __checkReturn const char __cs *
+efx_mac_stat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+#define EFX_MAC_STATS_SIZE 0x400
+
+/*
+ * Upload mac statistics supported by the hardware into the given buffer.
+ *
+ * The reference buffer must be at least %EFX_MAC_STATS_SIZE bytes,
+ * and page aligned.
+ *
+ * The hardware will only DMA statistics that it understands (of course).
+ * Drivers should not make any assumptions about which statistics are
+ * supported, especially when the statistics are generated by firmware.
+ *
+ * Thus, drivers should zero this buffer before use, so that not-understood
+ * statistics read back as zero.
+ */
+extern __checkReturn int
+efx_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp);
+
+extern __checkReturn int
+efx_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events);
+
+extern __checkReturn int
+efx_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __out_opt uint32_t *generationp);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+/* MON */
+
+typedef enum efx_mon_type_e {
+ EFX_MON_INVALID = 0,
+ EFX_MON_NULL,
+ EFX_MON_LM87,
+ EFX_MON_MAX6647,
+ EFX_MON_SFC90X0,
+ EFX_MON_NTYPES
+} efx_mon_type_t;
+
+#if EFSYS_OPT_NAMES
+
+extern const char __cs *
+efx_mon_name(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern __checkReturn int
+efx_mon_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_MON_STATS
+
+#define EFX_MON_STATS_SIZE 0x100
+
+/* START MKCONFIG GENERATED MonitorHeaderStatsBlock 16a14e61aa4f8d80 */
+typedef enum efx_mon_stat_e {
+ EFX_MON_STAT_2_5V,
+ EFX_MON_STAT_VCCP1,
+ EFX_MON_STAT_VCC,
+ EFX_MON_STAT_5V,
+ EFX_MON_STAT_12V,
+ EFX_MON_STAT_VCCP2,
+ EFX_MON_STAT_EXT_TEMP,
+ EFX_MON_STAT_INT_TEMP,
+ EFX_MON_STAT_AIN1,
+ EFX_MON_STAT_AIN2,
+ EFX_MON_STAT_INT_COOLING,
+ EFX_MON_STAT_EXT_COOLING,
+ EFX_MON_STAT_1V,
+ EFX_MON_STAT_1_2V,
+ EFX_MON_STAT_1_8V,
+ EFX_MON_STAT_3_3V,
+ EFX_MON_NSTATS
+} efx_mon_stat_t;
+
+/* END MKCONFIG GENERATED MonitorHeaderStatsBlock */
+
+typedef enum efx_mon_stat_state_e {
+ EFX_MON_STAT_STATE_OK = 0,
+ EFX_MON_STAT_STATE_WARNING = 1,
+ EFX_MON_STAT_STATE_FATAL = 2,
+ EFX_MON_STAT_STATE_BROKEN = 3,
+} efx_mon_stat_state_t;
+
+typedef struct efx_mon_stat_value_t {
+ uint16_t emsv_value;
+ uint16_t emsv_state;
+} efx_mon_stat_value_t;
+
+#if EFSYS_OPT_NAMES
+
+extern const char __cs *
+efx_mon_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_mon_stat_t id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern __checkReturn int
+efx_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __out_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values);
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+extern void
+efx_mon_fini(
+ __in efx_nic_t *enp);
+
+/* PHY */
+
+#define PMA_PMD_MMD 1
+#define PCS_MMD 3
+#define PHY_XS_MMD 4
+#define DTE_XS_MMD 5
+#define AN_MMD 7
+#define CL22EXT_MMD 29
+
+#define MAXMMD ((1 << 5) - 1)
+
+/* PHY types */
+#define EFX_PHY_NULL 0x0
+#define EFX_PHY_TXC43128 0x1
+#define EFX_PHY_SFX7101 0x3
+#define EFX_PHY_QT2022C2 0x4
+#define EFX_PHY_SFT9001A 0x8
+#define EFX_PHY_QT2025C 0x9
+#define EFX_PHY_SFT9001B 0xa
+#define EFX_PHY_QLX111V 0xc
+
+extern __checkReturn int
+efx_phy_verify(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+
+typedef enum efx_phy_led_mode_e {
+ EFX_PHY_LED_DEFAULT = 0,
+ EFX_PHY_LED_OFF,
+ EFX_PHY_LED_ON,
+ EFX_PHY_LED_FLASH,
+ EFX_PHY_LED_NMODES
+} efx_phy_led_mode_t;
+
+extern __checkReturn int
+efx_phy_led_set(
+ __in efx_nic_t *enp,
+ __in efx_phy_led_mode_t mode);
+
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+extern __checkReturn int
+efx_port_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_LOOPBACK
+
+typedef enum efx_loopback_type_e {
+ EFX_LOOPBACK_OFF = 0,
+ EFX_LOOPBACK_DATA = 1,
+ EFX_LOOPBACK_GMAC = 2,
+ EFX_LOOPBACK_XGMII = 3,
+ EFX_LOOPBACK_XGXS = 4,
+ EFX_LOOPBACK_XAUI = 5,
+ EFX_LOOPBACK_GMII = 6,
+ EFX_LOOPBACK_SGMII = 7,
+ EFX_LOOPBACK_XGBR = 8,
+ EFX_LOOPBACK_XFI = 9,
+ EFX_LOOPBACK_XAUI_FAR = 10,
+ EFX_LOOPBACK_GMII_FAR = 11,
+ EFX_LOOPBACK_SGMII_FAR = 12,
+ EFX_LOOPBACK_XFI_FAR = 13,
+ EFX_LOOPBACK_GPHY = 14,
+ EFX_LOOPBACK_PHY_XS = 15,
+ EFX_LOOPBACK_PCS = 16,
+ EFX_LOOPBACK_PMA_PMD = 17,
+ EFX_LOOPBACK_NTYPES
+} efx_loopback_type_t;
+
+#define EFX_LOOPBACK_MAC_MASK \
+ ((1 << EFX_LOOPBACK_DATA) | \
+ (1 << EFX_LOOPBACK_GMAC) | \
+ (1 << EFX_LOOPBACK_XGMII) | \
+ (1 << EFX_LOOPBACK_XGXS) | \
+ (1 << EFX_LOOPBACK_XAUI) | \
+ (1 << EFX_LOOPBACK_GMII) | \
+ (1 << EFX_LOOPBACK_SGMII) | \
+ (1 << EFX_LOOPBACK_XGBR) | \
+ (1 << EFX_LOOPBACK_XFI) | \
+ (1 << EFX_LOOPBACK_XAUI_FAR) | \
+ (1 << EFX_LOOPBACK_GMII_FAR) | \
+ (1 << EFX_LOOPBACK_SGMII_FAR) | \
+ (1 << EFX_LOOPBACK_XFI_FAR))
+
+#define EFX_LOOPBACK_MASK \
+ ((1 << EFX_LOOPBACK_NTYPES) - 1)
+
+extern __checkReturn int
+efx_port_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t type);
+
+#if EFSYS_OPT_NAMES
+
+extern __checkReturn const char __cs *
+efx_loopback_type_name(
+ __in efx_nic_t *enp,
+ __in efx_loopback_type_t type);
+
+#endif /* EFSYS_OPT_NAMES */
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+extern __checkReturn int
+efx_port_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep);
+
+extern void
+efx_port_fini(
+ __in efx_nic_t *enp);
+
+typedef enum efx_phy_cap_type_e {
+ EFX_PHY_CAP_INVALID = 0,
+ EFX_PHY_CAP_10HDX,
+ EFX_PHY_CAP_10FDX,
+ EFX_PHY_CAP_100HDX,
+ EFX_PHY_CAP_100FDX,
+ EFX_PHY_CAP_1000HDX,
+ EFX_PHY_CAP_1000FDX,
+ EFX_PHY_CAP_10000FDX,
+ EFX_PHY_CAP_PAUSE,
+ EFX_PHY_CAP_ASYM,
+ EFX_PHY_CAP_AN,
+ EFX_PHY_CAP_NTYPES
+} efx_phy_cap_type_t;
+
+
+#define EFX_PHY_CAP_CURRENT 0x00000000
+#define EFX_PHY_CAP_DEFAULT 0x00000001
+#define EFX_PHY_CAP_PERM 0x00000002
+
+extern void
+efx_phy_adv_cap_get(
+ __in efx_nic_t *enp,
+ __in uint32_t flag,
+ __out uint32_t *maskp);
+
+extern __checkReturn int
+efx_phy_adv_cap_set(
+ __in efx_nic_t *enp,
+ __in uint32_t mask);
+
+extern void
+efx_phy_lp_cap_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp);
+
+extern __checkReturn int
+efx_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip);
+
+typedef enum efx_phy_media_type_e {
+ EFX_PHY_MEDIA_INVALID = 0,
+ EFX_PHY_MEDIA_XAUI,
+ EFX_PHY_MEDIA_CX4,
+ EFX_PHY_MEDIA_KX4,
+ EFX_PHY_MEDIA_XFP,
+ EFX_PHY_MEDIA_SFP_PLUS,
+ EFX_PHY_MEDIA_BASE_T,
+ EFX_PHY_MEDIA_NTYPES
+} efx_phy_media_type_t;
+
+/* Get the type of medium currently used. If the board has ports for
+ * modules, a module is present, and we recognise the media type of
+ * the module, then this will be the media type of the module.
+ * Otherwise it will be the media type of the port.
+ */
+extern void
+efx_phy_media_type_get(
+ __in efx_nic_t *enp,
+ __out efx_phy_media_type_t *typep);
+
+#if EFSYS_OPT_PHY_STATS
+
+/* START MKCONFIG GENERATED PhyHeaderStatsBlock 30ed56ad501f8e36 */
+typedef enum efx_phy_stat_e {
+ EFX_PHY_STAT_OUI,
+ EFX_PHY_STAT_PMA_PMD_LINK_UP,
+ EFX_PHY_STAT_PMA_PMD_RX_FAULT,
+ EFX_PHY_STAT_PMA_PMD_TX_FAULT,
+ EFX_PHY_STAT_PMA_PMD_REV_A,
+ EFX_PHY_STAT_PMA_PMD_REV_B,
+ EFX_PHY_STAT_PMA_PMD_REV_C,
+ EFX_PHY_STAT_PMA_PMD_REV_D,
+ EFX_PHY_STAT_PCS_LINK_UP,
+ EFX_PHY_STAT_PCS_RX_FAULT,
+ EFX_PHY_STAT_PCS_TX_FAULT,
+ EFX_PHY_STAT_PCS_BER,
+ EFX_PHY_STAT_PCS_BLOCK_ERRORS,
+ EFX_PHY_STAT_PHY_XS_LINK_UP,
+ EFX_PHY_STAT_PHY_XS_RX_FAULT,
+ EFX_PHY_STAT_PHY_XS_TX_FAULT,
+ EFX_PHY_STAT_PHY_XS_ALIGN,
+ EFX_PHY_STAT_PHY_XS_SYNC_A,
+ EFX_PHY_STAT_PHY_XS_SYNC_B,
+ EFX_PHY_STAT_PHY_XS_SYNC_C,
+ EFX_PHY_STAT_PHY_XS_SYNC_D,
+ EFX_PHY_STAT_AN_LINK_UP,
+ EFX_PHY_STAT_AN_MASTER,
+ EFX_PHY_STAT_AN_LOCAL_RX_OK,
+ EFX_PHY_STAT_AN_REMOTE_RX_OK,
+ EFX_PHY_STAT_CL22EXT_LINK_UP,
+ EFX_PHY_STAT_SNR_A,
+ EFX_PHY_STAT_SNR_B,
+ EFX_PHY_STAT_SNR_C,
+ EFX_PHY_STAT_SNR_D,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_A,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_B,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_C,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_D,
+ EFX_PHY_STAT_AN_COMPLETE,
+ EFX_PHY_STAT_PMA_PMD_REV_MAJOR,
+ EFX_PHY_STAT_PMA_PMD_REV_MINOR,
+ EFX_PHY_STAT_PMA_PMD_REV_MICRO,
+ EFX_PHY_STAT_PCS_FW_VERSION_0,
+ EFX_PHY_STAT_PCS_FW_VERSION_1,
+ EFX_PHY_STAT_PCS_FW_VERSION_2,
+ EFX_PHY_STAT_PCS_FW_VERSION_3,
+ EFX_PHY_STAT_PCS_FW_BUILD_YY,
+ EFX_PHY_STAT_PCS_FW_BUILD_MM,
+ EFX_PHY_STAT_PCS_FW_BUILD_DD,
+ EFX_PHY_STAT_PCS_OP_MODE,
+ EFX_PHY_NSTATS
+} efx_phy_stat_t;
+
+/* END MKCONFIG GENERATED PhyHeaderStatsBlock */
+
+#if EFSYS_OPT_NAMES
+
+extern const char __cs *
+efx_phy_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_phy_stat_t stat);
+
+#endif /* EFSYS_OPT_NAMES */
+
+#define EFX_PHY_STATS_SIZE 0x100
+
+extern __checkReturn int
+efx_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __out_ecount(EFX_PHY_NSTATS) uint32_t *stat);
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_PHY_PROPS
+
+#if EFSYS_OPT_NAMES
+
+extern const char __cs *
+efx_phy_prop_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+#define EFX_PHY_PROP_DEFAULT 0x00000001
+
+extern __checkReturn int
+efx_phy_prop_get(
+ __in efx_nic_t *enp,
+ __in unsigned int id,
+ __in uint32_t flags,
+ __out uint32_t *valp);
+
+extern __checkReturn int
+efx_phy_prop_set(
+ __in efx_nic_t *enp,
+ __in unsigned int id,
+ __in uint32_t val);
+
+#endif /* EFSYS_OPT_PHY_PROPS */
+
+#if EFSYS_OPT_PHY_BIST
+
+typedef enum efx_phy_bist_type_e {
+ EFX_PHY_BIST_TYPE_UNKNOWN,
+ EFX_PHY_BIST_TYPE_NORMAL,
+ EFX_PHY_BIST_TYPE_CABLE_SHORT,
+ EFX_PHY_BIST_TYPE_CABLE_LONG,
+ EFX_PHY_BIST_TYPE_NTYPES,
+} efx_phy_bist_type_t;
+
+typedef enum efx_phy_bist_result_e {
+ EFX_PHY_BIST_RESULT_UNKNOWN,
+ EFX_PHY_BIST_RESULT_RUNNING,
+ EFX_PHY_BIST_RESULT_PASSED,
+ EFX_PHY_BIST_RESULT_FAILED,
+} efx_phy_bist_result_t;
+
+typedef enum efx_phy_cable_status_e {
+ EFX_PHY_CABLE_STATUS_OK,
+ EFX_PHY_CABLE_STATUS_INVALID,
+ EFX_PHY_CABLE_STATUS_OPEN,
+ EFX_PHY_CABLE_STATUS_INTRAPAIRSHORT,
+ EFX_PHY_CABLE_STATUS_INTERPAIRSHORT,
+ EFX_PHY_CABLE_STATUS_BUSY,
+} efx_phy_cable_status_t;
+
+typedef enum efx_phy_bist_value_e {
+ EFX_PHY_BIST_CABLE_LENGTH_A,
+ EFX_PHY_BIST_CABLE_LENGTH_B,
+ EFX_PHY_BIST_CABLE_LENGTH_C,
+ EFX_PHY_BIST_CABLE_LENGTH_D,
+ EFX_PHY_BIST_CABLE_STATUS_A,
+ EFX_PHY_BIST_CABLE_STATUS_B,
+ EFX_PHY_BIST_CABLE_STATUS_C,
+ EFX_PHY_BIST_CABLE_STATUS_D,
+ EFX_PHY_BIST_FAULT_CODE,
+ EFX_PHY_BIST_NVALUES,
+} efx_phy_bist_value_t;
+
+extern __checkReturn int
+efx_phy_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_phy_bist_type_t type);
+
+extern __checkReturn int
+efx_phy_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_phy_bist_type_t type,
+ __out efx_phy_bist_result_t *resultp,
+ __out_opt uint32_t *value_maskp,
+ __out_ecount_opt(count) unsigned long *valuesp,
+ __in size_t count);
+
+extern void
+efx_phy_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_phy_bist_type_t type);
+
+#endif /* EFSYS_OPT_PHY_BIST */
+
+#define EFX_FEATURE_IPV6 0x00000001
+#define EFX_FEATURE_LFSR_HASH_INSERT 0x00000002
+#define EFX_FEATURE_LINK_EVENTS 0x00000004
+#define EFX_FEATURE_PERIODIC_MAC_STATS 0x00000008
+#define EFX_FEATURE_WOL 0x00000010
+#define EFX_FEATURE_MCDI 0x00000020
+#define EFX_FEATURE_LOOKAHEAD_SPLIT 0x00000040
+#define EFX_FEATURE_MAC_HEADER_FILTERS 0x00000080
+
+typedef struct efx_nic_cfg_s {
+ uint32_t enc_board_type;
+ uint32_t enc_phy_type;
+#if EFSYS_OPT_NAMES
+ char enc_phy_name[21];
+#endif
+ char enc_phy_revision[21];
+ efx_mon_type_t enc_mon_type;
+#if EFSYS_OPT_MON_STATS
+ uint32_t enc_mon_stat_mask;
+#endif
+ unsigned int enc_features;
+ uint8_t enc_mac_addr[6];
+ uint8_t enc_port;
+ uint32_t enc_evq_limit;
+ uint32_t enc_txq_limit;
+ uint32_t enc_rxq_limit;
+ uint32_t enc_buftbl_limit;
+ uint32_t enc_evq_moderation_max;
+#if EFSYS_OPT_LOOPBACK
+ uint32_t enc_loopback_types[EFX_LINK_NMODES];
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_PHY_FLAGS
+ uint32_t enc_phy_flags_mask;
+#endif /* EFSYS_OPT_PHY_FLAGS */
+#if EFSYS_OPT_PHY_LED_CONTROL
+ uint32_t enc_led_mask;
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+#if EFSYS_OPT_PHY_STATS
+ uint64_t enc_phy_stat_mask;
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_PHY_PROPS
+ unsigned int enc_phy_nprops;
+#endif /* EFSYS_OPT_PHY_PROPS */
+#if EFSYS_OPT_SIENA
+ uint8_t enc_siena_channel;
+#if EFSYS_OPT_PHY_STATS
+ uint32_t enc_siena_phy_stat_mask;
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_MON_STATS
+ uint32_t enc_siena_mon_stat_mask;
+#endif /* EFSYS_OPT_MON_STATS */
+#endif /* EFSYS_OPT_SIENA */
+#if EFSYS_OPT_PHY_BIST
+ uint32_t enc_bist_mask;
+#endif /* EFSYS_OPT_PHY_BIST */
+} efx_nic_cfg_t;
+
+extern const efx_nic_cfg_t *
+efx_nic_cfg_get(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_VPD
+
+typedef enum efx_vpd_tag_e {
+ EFX_VPD_ID = 0x02,
+ EFX_VPD_END = 0x0f,
+ EFX_VPD_RO = 0x10,
+ EFX_VPD_RW = 0x11,
+} efx_vpd_tag_t;
+
+typedef uint16_t efx_vpd_keyword_t;
+
+typedef struct efx_vpd_value_s {
+ efx_vpd_tag_t evv_tag;
+ efx_vpd_keyword_t evv_keyword;
+ uint8_t evv_length;
+ uint8_t evv_value[0x100];
+} efx_vpd_value_t;
+
+
+#define EFX_VPD_KEYWORD(x, y) ((x) | ((y) << 8))
+
+extern __checkReturn int
+efx_vpd_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn int
+efx_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep);
+
+extern __checkReturn int
+efx_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn int
+efx_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn int
+efx_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn int
+efx_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp);
+
+extern __checkReturn int
+efx_vpd_set(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+extern __checkReturn int
+efx_vpd_next(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp);
+
+extern __checkReturn int
+efx_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+efx_vpd_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_VPD */
+
+/* NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+typedef enum efx_nvram_type_e {
+ EFX_NVRAM_INVALID = 0,
+ EFX_NVRAM_BOOTROM,
+ EFX_NVRAM_BOOTROM_CFG,
+ EFX_NVRAM_MC_FIRMWARE,
+ EFX_NVRAM_MC_GOLDEN,
+ EFX_NVRAM_PHY,
+ EFX_NVRAM_NULLPHY,
+ EFX_NVRAM_NTYPES,
+} efx_nvram_type_t;
+
+extern __checkReturn int
+efx_nvram_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn int
+efx_nvram_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn int
+efx_nvram_size(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out size_t *sizep);
+
+extern __checkReturn int
+efx_nvram_rw_start(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out_opt size_t *pref_chunkp);
+
+extern void
+efx_nvram_rw_finish(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type);
+
+extern __checkReturn int
+efx_nvram_get_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4]);
+
+extern __checkReturn int
+efx_nvram_read_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn int
+efx_nvram_set_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint16_t version[4]);
+
+extern __checkReturn int
+efx_nvram_erase(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type);
+
+extern __checkReturn int
+efx_nvram_write_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+efx_nvram_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_BOOTCFG
+
+extern int
+efx_bootcfg_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern int
+efx_bootcfg_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+#endif /* EFSYS_OPT_BOOTCFG */
+
+#if EFSYS_OPT_WOL
+
+typedef enum efx_wol_type_e {
+ EFX_WOL_TYPE_INVALID,
+ EFX_WOL_TYPE_MAGIC,
+ EFX_WOL_TYPE_BITMAP,
+ EFX_WOL_TYPE_LINK,
+ EFX_WOL_NTYPES,
+} efx_wol_type_t;
+
+typedef enum efx_lightsout_offload_type_e {
+ EFX_LIGHTSOUT_OFFLOAD_TYPE_INVALID,
+ EFX_LIGHTSOUT_OFFLOAD_TYPE_ARP,
+ EFX_LIGHTSOUT_OFFLOAD_TYPE_NS,
+} efx_lightsout_offload_type_t;
+
+#define EFX_WOL_BITMAP_MASK_SIZE (48)
+#define EFX_WOL_BITMAP_VALUE_SIZE (128)
+
+typedef union efx_wol_param_u {
+ struct {
+ uint8_t mac_addr[6];
+ } ewp_magic;
+ struct {
+ uint8_t mask[EFX_WOL_BITMAP_MASK_SIZE]; /* 1 bit per byte */
+ uint8_t value[EFX_WOL_BITMAP_VALUE_SIZE]; /* value to match */
+ uint8_t value_len;
+ } ewp_bitmap;
+} efx_wol_param_t;
+
+typedef union efx_lightsout_offload_param_u {
+ struct {
+ uint8_t mac_addr[6];
+ uint32_t ip;
+ } elop_arp;
+ struct {
+ uint8_t mac_addr[6];
+ uint32_t solicited_node[4];
+ uint32_t ip[4];
+ } elop_ns;
+} efx_lightsout_offload_param_t;
+
+extern __checkReturn int
+efx_wol_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn int
+efx_wol_filter_clear(
+ __in efx_nic_t *enp);
+
+extern __checkReturn int
+efx_wol_filter_add(
+ __in efx_nic_t *enp,
+ __in efx_wol_type_t type,
+ __in efx_wol_param_t *paramp,
+ __out uint32_t *filter_idp);
+
+extern __checkReturn int
+efx_wol_filter_remove(
+ __in efx_nic_t *enp,
+ __in uint32_t filter_id);
+
+extern __checkReturn int
+efx_lightsout_offload_add(
+ __in efx_nic_t *enp,
+ __in efx_lightsout_offload_type_t type,
+ __in efx_lightsout_offload_param_t *paramp,
+ __out uint32_t *filter_idp);
+
+extern __checkReturn int
+efx_lightsout_offload_remove(
+ __in efx_nic_t *enp,
+ __in efx_lightsout_offload_type_t type,
+ __in uint32_t filter_id);
+
+extern void
+efx_wol_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_WOL */
+
+#if EFSYS_OPT_DIAG
+
+typedef enum efx_pattern_type_t {
+ EFX_PATTERN_BYTE_INCREMENT = 0,
+ EFX_PATTERN_ALL_THE_SAME,
+ EFX_PATTERN_BIT_ALTERNATE,
+ EFX_PATTERN_BYTE_ALTERNATE,
+ EFX_PATTERN_BYTE_CHANGING,
+ EFX_PATTERN_BIT_SWEEP,
+ EFX_PATTERN_NTYPES
+} efx_pattern_type_t;
+
+typedef void
+(*efx_sram_pattern_fn_t)(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp);
+
+extern __checkReturn int
+efx_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_pattern_type_t type);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn int
+efx_sram_buf_tbl_set(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in efsys_mem_t *esmp,
+ __in size_t n);
+
+extern void
+efx_sram_buf_tbl_clear(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in size_t n);
+
+#define EFX_BUF_TBL_SIZE 0x20000
+
+#define EFX_BUF_SIZE 4096
+
+/* EV */
+
+typedef struct efx_evq_s efx_evq_t;
+
+#if EFSYS_OPT_QSTATS
+
+/* START MKCONFIG GENERATED EfxHeaderEventQueueBlock d5614a5d669c8ca3 */
+typedef enum efx_ev_qstat_e {
+ EV_ALL,
+ EV_RX,
+ EV_RX_OK,
+ EV_RX_RECOVERY,
+ EV_RX_FRM_TRUNC,
+ EV_RX_TOBE_DISC,
+ EV_RX_PAUSE_FRM_ERR,
+ EV_RX_BUF_OWNER_ID_ERR,
+ EV_RX_IPV4_HDR_CHKSUM_ERR,
+ EV_RX_TCP_UDP_CHKSUM_ERR,
+ EV_RX_ETH_CRC_ERR,
+ EV_RX_IP_FRAG_ERR,
+ EV_RX_MCAST_PKT,
+ EV_RX_MCAST_HASH_MATCH,
+ EV_RX_TCP_IPV4,
+ EV_RX_TCP_IPV6,
+ EV_RX_UDP_IPV4,
+ EV_RX_UDP_IPV6,
+ EV_RX_OTHER_IPV4,
+ EV_RX_OTHER_IPV6,
+ EV_RX_NON_IP,
+ EV_RX_OVERRUN,
+ EV_TX,
+ EV_TX_WQ_FF_FULL,
+ EV_TX_PKT_ERR,
+ EV_TX_PKT_TOO_BIG,
+ EV_TX_UNEXPECTED,
+ EV_GLOBAL,
+ EV_GLOBAL_PHY,
+ EV_GLOBAL_MNT,
+ EV_GLOBAL_RX_RECOVERY,
+ EV_DRIVER,
+ EV_DRIVER_SRM_UPD_DONE,
+ EV_DRIVER_TX_DESCQ_FLS_DONE,
+ EV_DRIVER_RX_DESCQ_FLS_DONE,
+ EV_DRIVER_RX_DESCQ_FLS_FAILED,
+ EV_DRIVER_RX_DSC_ERROR,
+ EV_DRIVER_TX_DSC_ERROR,
+ EV_DRV_GEN,
+ EV_MCDI_RESPONSE,
+ EV_NQSTATS
+} efx_ev_qstat_t;
+
+/* END MKCONFIG GENERATED EfxHeaderEventQueueBlock */
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern __checkReturn int
+efx_ev_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_ev_fini(
+ __in efx_nic_t *enp);
+
+#define EFX_MASK(_max, _min) (-((_max) << 1) ^ -(_min))
+
+#define EFX_EVQ_MAXNEVS 32768
+#define EFX_EVQ_MINNEVS 512
+
+#define EFX_EVQ_NEVS_MASK EFX_MASK(EFX_EVQ_MAXNEVS, EFX_EVQ_MINNEVS)
+
+#define EFX_EVQ_SIZE(_nevs) ((_nevs) * sizeof (efx_qword_t))
+#define EFX_EVQ_NBUFS(_nevs) (EFX_EVQ_SIZE(_nevs) / EFX_BUF_SIZE)
+
+extern __checkReturn int
+efx_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __deref_out efx_evq_t **eepp);
+
+extern void
+efx_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data);
+
+typedef __checkReturn boolean_t
+(*efx_initialized_ev_t)(
+ __in_opt void *arg);
+
+#define EFX_PKT_UNICAST 0x0004
+#define EFX_PKT_START 0x0008
+
+#define EFX_PKT_VLAN_TAGGED 0x0010
+#define EFX_CKSUM_TCPUDP 0x0020
+#define EFX_CKSUM_IPV4 0x0040
+#define EFX_PKT_CONT 0x0080
+
+#define EFX_CHECK_VLAN 0x0100
+#define EFX_PKT_TCP 0x0200
+#define EFX_PKT_UDP 0x0400
+#define EFX_PKT_IPV4 0x0800
+
+#define EFX_PKT_IPV6 0x1000
+#define EFX_ADDR_MISMATCH 0x4000
+#define EFX_DISCARD 0x8000
+
+#define EFX_EV_RX_NLABELS 32
+#define EFX_EV_TX_NLABELS 32
+
+typedef __checkReturn boolean_t
+(*efx_rx_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t id,
+ __in uint32_t size,
+ __in uint16_t flags);
+
+typedef __checkReturn boolean_t
+(*efx_tx_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t id);
+
+#define EFX_EXCEPTION_RX_RECOVERY 0x00000001
+#define EFX_EXCEPTION_RX_DSC_ERROR 0x00000002
+#define EFX_EXCEPTION_TX_DSC_ERROR 0x00000003
+#define EFX_EXCEPTION_UNKNOWN_SENSOREVT 0x00000004
+#define EFX_EXCEPTION_FWALERT_SRAM 0x00000005
+#define EFX_EXCEPTION_UNKNOWN_FWALERT 0x00000006
+
+typedef __checkReturn boolean_t
+(*efx_exception_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t data);
+
+typedef __checkReturn boolean_t
+(*efx_rxq_flush_done_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label);
+
+typedef __checkReturn boolean_t
+(*efx_rxq_flush_failed_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label);
+
+typedef __checkReturn boolean_t
+(*efx_txq_flush_done_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label);
+
+typedef __checkReturn boolean_t
+(*efx_software_ev_t)(
+ __in_opt void *arg,
+ __in uint16_t magic);
+
+typedef __checkReturn boolean_t
+(*efx_sram_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t code);
+
+#define EFX_SRAM_CLEAR 0
+#define EFX_SRAM_UPDATE 1
+#define EFX_SRAM_ILLEGAL_CLEAR 2
+
+typedef __checkReturn boolean_t
+(*efx_wake_up_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label);
+
+typedef __checkReturn boolean_t
+(*efx_timer_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label);
+
+typedef __checkReturn boolean_t
+(*efx_link_change_ev_t)(
+ __in_opt void *arg,
+ __in efx_link_mode_t link_mode);
+
+#if EFSYS_OPT_MON_STATS
+
+typedef __checkReturn boolean_t
+(*efx_monitor_ev_t)(
+ __in_opt void *arg,
+ __in efx_mon_stat_t id,
+ __in efx_mon_stat_value_t value);
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+#if EFSYS_OPT_MAC_STATS
+
+typedef __checkReturn boolean_t
+(*efx_mac_stats_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t generation
+ );
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+typedef struct efx_ev_callbacks_s {
+ efx_initialized_ev_t eec_initialized;
+ efx_rx_ev_t eec_rx;
+ efx_tx_ev_t eec_tx;
+ efx_exception_ev_t eec_exception;
+ efx_rxq_flush_done_ev_t eec_rxq_flush_done;
+ efx_rxq_flush_failed_ev_t eec_rxq_flush_failed;
+ efx_txq_flush_done_ev_t eec_txq_flush_done;
+ efx_software_ev_t eec_software;
+ efx_sram_ev_t eec_sram;
+ efx_wake_up_ev_t eec_wake_up;
+ efx_timer_ev_t eec_timer;
+ efx_link_change_ev_t eec_link_change;
+#if EFSYS_OPT_MON_STATS
+ efx_monitor_ev_t eec_monitor;
+#endif /* EFSYS_OPT_MON_STATS */
+#if EFSYS_OPT_MAC_STATS
+ efx_mac_stats_ev_t eec_mac_stats;
+#endif /* EFSYS_OPT_MON_STATS */
+} efx_ev_callbacks_t;
+
+extern __checkReturn boolean_t
+efx_ev_qpending(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+#if EFSYS_OPT_EV_PREFETCH
+
+extern void
+efx_ev_qprefetch(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+extern void
+efx_ev_qpoll(
+ __in efx_evq_t *eep,
+ __inout unsigned int *countp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+extern __checkReturn int
+efx_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us);
+
+extern __checkReturn int
+efx_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+#if EFSYS_OPT_QSTATS
+
+#if EFSYS_OPT_NAMES
+
+extern const char __cs *
+efx_ev_qstat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern void
+efx_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern void
+efx_ev_qdestroy(
+ __in efx_evq_t *eep);
+
+/* RX */
+
+typedef struct efx_rxq_s efx_rxq_t;
+
+extern __checkReturn int
+efx_rx_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_rx_fini(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_RX_HDR_SPLIT
+ __checkReturn int
+efx_rx_hdr_split_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int hdr_buf_size,
+ __in unsigned int pld_buf_size);
+
+#endif /* EFSYS_OPT_RX_HDR_SPLIT */
+
+#if EFSYS_OPT_RX_SCATTER
+ __checkReturn int
+efx_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#if EFSYS_OPT_RX_SCALE
+
+typedef enum efx_rx_hash_alg_e {
+ EFX_RX_HASHALG_LFSR = 0,
+ EFX_RX_HASHALG_TOEPLITZ
+} efx_rx_hash_alg_t;
+
+typedef enum efx_rx_hash_type_e {
+ EFX_RX_HASH_IPV4 = 0,
+ EFX_RX_HASH_TCPIPV4,
+ EFX_RX_HASH_IPV6,
+ EFX_RX_HASH_TCPIPV6,
+} efx_rx_hash_type_t;
+
+#define EFX_RSS_TBL_SIZE 128 /* Rows in RX indirection table */
+#define EFX_MAXRSS 64 /* RX indirection entry range */
+#define EFX_MAXRSS_LEGACY 16 /* See bug16611 and bug17213 */
+
+extern __checkReturn int
+efx_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert);
+
+extern __checkReturn int
+efx_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n);
+
+extern __checkReturn int
+efx_rx_scale_toeplitz_ipv4_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n);
+
+extern __checkReturn int
+efx_rx_scale_toeplitz_ipv6_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n);
+
+/*
+ * The prefix is a byte array of one of the forms:
+ *
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * XX.XX.XX.XX.XX.XX.XX.XX.XX.XX.XX.XX.TT.TT.TT.TT
+ * XX.XX.XX.XX.XX.XX.XX.XX.XX.XX.XX.XX.XX.XX.LL.LL
+ *
+ * where:
+ *
+ * TT.TT.TT.TT is a 32-bit Toeplitz hash
+ * LL.LL is a 16-bit LFSR hash
+ *
+ * Hash values are in network (big-endian) byte order.
+ */
+
+#define EFX_RX_PREFIX_SIZE 16
+
+#define EFX_RX_HASH_VALUE(_func, _buffer) \
+ (((_func) == EFX_RX_HASHALG_LFSR) ? \
+ ((uint16_t)(((_buffer)[14] << 8) | (_buffer)[15])) : \
+ ((uint32_t)(((_buffer)[12] << 24) | \
+ ((_buffer)[13] << 16) | \
+ ((_buffer)[14] << 8) | \
+ (_buffer)[15])))
+
+#define EFX_RX_HASH_SIZE(_func) \
+ (((_func) == EFX_RX_HASHALG_LFSR) ? \
+ sizeof (uint16_t) : \
+ sizeof (uint32_t))
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#define EFX_RXQ_MAXNDESCS 4096
+#define EFX_RXQ_MINNDESCS 512
+
+#define EFX_RXQ_NDESCS_MASK EFX_MASK(EFX_RXQ_MAXNDESCS, EFX_RXQ_MINNDESCS)
+
+#define EFX_RXQ_SIZE(_ndescs) ((_ndescs) * sizeof (efx_qword_t))
+#define EFX_RXQ_NBUFS(_ndescs) (EFX_RXQ_SIZE(_ndescs) / EFX_BUF_SIZE)
+#define EFX_RXQ_LIMIT(_ndescs) ((_ndescs) - 16)
+
+typedef enum efx_rxq_type_e {
+ EFX_RXQ_TYPE_DEFAULT,
+ EFX_RXQ_TYPE_SPLIT_HEADER,
+ EFX_RXQ_TYPE_SPLIT_PAYLOAD,
+ EFX_RXQ_TYPE_SCATTER,
+ EFX_RXQ_NTYPES
+} efx_rxq_type_t;
+
+extern __checkReturn int
+efx_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp);
+
+typedef struct efx_buffer_s {
+ efsys_dma_addr_t eb_addr;
+ size_t eb_size;
+ boolean_t eb_eop;
+} efx_buffer_t;
+
+extern void
+efx_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added);
+
+extern void
+efx_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added);
+
+extern void
+efx_rx_qflush(
+ __in efx_rxq_t *erp);
+
+extern void
+efx_rx_qenable(
+ __in efx_rxq_t *erp);
+
+extern void
+efx_rx_qdestroy(
+ __in efx_rxq_t *erp);
+
+/* TX */
+
+typedef struct efx_txq_s efx_txq_t;
+
+#if EFSYS_OPT_QSTATS
+
+/* START MKCONFIG GENERATED EfxHeaderTransmitQueueBlock 536c5fa5014944bf */
+typedef enum efx_tx_qstat_e {
+ TX_POST,
+ TX_UNALIGNED_SPLIT,
+ TX_NQSTATS
+} efx_tx_qstat_t;
+
+/* END MKCONFIG GENERATED EfxHeaderTransmitQueueBlock */
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern __checkReturn int
+efx_tx_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_tx_fini(
+ __in efx_nic_t *enp);
+
+#define EFX_TXQ_MAXNDESCS 4096
+#define EFX_TXQ_MINNDESCS 512
+
+#define EFX_TXQ_NDESCS_MASK EFX_MASK(EFX_TXQ_MAXNDESCS, EFX_TXQ_MINNDESCS)
+
+#define EFX_TXQ_SIZE(_ndescs) ((_ndescs) * sizeof (efx_qword_t))
+#define EFX_TXQ_NBUFS(_ndescs) (EFX_TXQ_SIZE(_ndescs) / EFX_BUF_SIZE)
+#define EFX_TXQ_LIMIT(_ndescs) ((_ndescs) - 16)
+
+extern __checkReturn int
+efx_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_txq_t **etpp);
+
+extern __checkReturn int
+efx_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern void
+efx_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added);
+
+extern void
+efx_tx_qflush(
+ __in efx_txq_t *etp);
+
+extern void
+efx_tx_qenable(
+ __in efx_txq_t *etp);
+
+#if EFSYS_OPT_QSTATS
+
+#if EFSYS_OPT_NAMES
+
+extern const char __cs *
+efx_tx_qstat_name(
+ __in efx_nic_t *etp,
+ __in unsigned int id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern void
+efx_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern void
+efx_tx_qdestroy(
+ __in efx_txq_t *etp);
+
+
+/* FILTER */
+
+#if EFSYS_OPT_FILTER
+
+typedef enum efx_filter_flag_e {
+ EFX_FILTER_FLAG_RX_RSS = 0x01, /* use RSS to spread across
+ * multiple queues */
+ EFX_FILTER_FLAG_RX_SCATTER = 0x02, /* enable RX scatter */
+ EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04, /* MAC filter overrides
+ * any matching IP filter */
+} efx_filter_flag_t;
+
+typedef struct efx_filter_spec_s {
+ uint8_t efs_type;
+ uint8_t efs_flags;
+ uint16_t efs_dmaq_id;
+ uint32_t efs_dword[3];
+} efx_filter_spec_t;
+
+extern __checkReturn int
+efx_filter_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_filter_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn int
+efx_rx_filter_insert(
+ __in efx_rxq_t *erp,
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn int
+efx_rx_filter_remove(
+ __in efx_rxq_t *erp,
+ __inout efx_filter_spec_t *spec);
+
+ void
+efx_filter_restore(
+ __in efx_nic_t *enp);
+
+extern void
+efx_filter_spec_rx_ipv4_tcp_full(
+ __inout efx_filter_spec_t *spec,
+ __in unsigned int flags,
+ __in uint32_t src_ip,
+ __in uint16_t src_tcp,
+ __in uint32_t dest_ip,
+ __in uint16_t dest_tcp);
+
+extern void
+efx_filter_spec_rx_ipv4_tcp_wild(
+ __inout efx_filter_spec_t *spec,
+ __in unsigned int flags,
+ __in uint32_t dest_ip,
+ __in uint16_t dest_tcp);
+
+extern void
+efx_filter_spec_rx_ipv4_udp_full(
+ __inout efx_filter_spec_t *spec,
+ __in unsigned int flags,
+ __in uint32_t src_ip,
+ __in uint16_t src_udp,
+ __in uint32_t dest_ip,
+ __in uint16_t dest_udp);
+
+extern void
+efx_filter_spec_rx_ipv4_udp_wild(
+ __inout efx_filter_spec_t *spec,
+ __in unsigned int flags,
+ __in uint32_t dest_ip,
+ __in uint16_t dest_udp);
+
+extern void
+efx_filter_spec_rx_mac_full(
+ __inout efx_filter_spec_t *spec,
+ __in unsigned int flags,
+ __in uint16_t vlan_id,
+ __in uint8_t *dest_mac);
+
+extern void
+efx_filter_spec_rx_mac_wild(
+ __inout efx_filter_spec_t *spec,
+ __in unsigned int flags,
+ __in uint8_t *dest_mac);
+
+
+extern __checkReturn int
+efx_tx_filter_insert(
+ __in efx_txq_t *etp,
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn int
+efx_tx_filter_remove(
+ __in efx_txq_t *etp,
+ __inout efx_filter_spec_t *spec);
+
+extern void
+efx_filter_spec_tx_ipv4_tcp_full(
+ __inout efx_filter_spec_t *spec,
+ __in uint32_t src_ip,
+ __in uint16_t src_tcp,
+ __in uint32_t dest_ip,
+ __in uint16_t dest_tcp);
+
+extern void
+efx_filter_spec_tx_ipv4_tcp_wild(
+ __inout efx_filter_spec_t *spec,
+ __in uint32_t src_ip,
+ __in uint16_t src_tcp);
+
+extern void
+efx_filter_spec_tx_ipv4_udp_full(
+ __inout efx_filter_spec_t *spec,
+ __in uint32_t src_ip,
+ __in uint16_t src_udp,
+ __in uint32_t dest_ip,
+ __in uint16_t dest_udp);
+
+extern void
+efx_filter_spec_tx_ipv4_udp_wild(
+ __inout efx_filter_spec_t *spec,
+ __in uint32_t src_ip,
+ __in uint16_t src_udp);
+
+extern void
+efx_filter_spec_tx_mac_full(
+ __inout efx_filter_spec_t *spec,
+ __in uint16_t vlan_id,
+ __in uint8_t *src_mac);
+
+extern void
+efx_filter_spec_tx_mac_wild(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t *src_mac);
+
+#endif /* EFSYS_OPT_FILTER */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_H */
diff --git a/sys/dev/sfxge/common/efx_bootcfg.c b/sys/dev/sfxge/common/efx_bootcfg.c
new file mode 100644
index 0000000..1a6a79c
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_bootcfg.c
@@ -0,0 +1,342 @@
+/*-
+ * Copyright 2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "efsys.h"
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_BOOTCFG
+
+/*
+ * Maximum size of BOOTCFG block across all nics as understood by SFCgPXE.
+ * A multiple of 0x100 so trailing 0xff characters don't contrinbute to the
+ * checksum.
+ */
+#define BOOTCFG_MAX_SIZE 0x1000
+
+#define DHCP_END (uint8_t)0xff
+#define DHCP_PAD (uint8_t)0
+
+static __checkReturn uint8_t
+efx_bootcfg_csum(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ unsigned int pos;
+ uint8_t checksum = 0;
+
+ for (pos = 0; pos < size; pos++)
+ checksum += data[pos];
+ return (checksum);
+}
+
+static __checkReturn int
+efx_bootcfg_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out size_t *usedp)
+{
+ size_t offset = 0;
+ size_t used = 0;
+ int rc;
+
+ /* Start parsing tags immediatly after the checksum */
+ for (offset = 1; offset < size; ) {
+ uint8_t tag;
+ uint8_t length;
+
+ /* Consume tag */
+ tag = data[offset];
+ if (tag == DHCP_END) {
+ offset++;
+ used = offset;
+ break;
+ }
+ if (tag == DHCP_PAD) {
+ offset++;
+ continue;
+ }
+
+ /* Consume length */
+ if (offset + 1 >= size) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+ length = data[offset + 1];
+
+ /* Consume *length */
+ if (offset + 1 + length >= size) {
+ rc = ENOSPC;
+ goto fail2;
+ }
+
+ offset += 2 + length;
+ used = offset;
+ }
+
+ /* Checksum the entire sector, including bytes after any DHCP_END */
+ if (efx_bootcfg_csum(enp, data, size) != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ if (usedp != NULL)
+ *usedp = used;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ int
+efx_bootcfg_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ uint8_t *payload = NULL;
+ size_t used_bytes;
+ size_t sector_length;
+ int rc;
+
+ rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &sector_length);
+ if (rc != 0)
+ goto fail1;
+
+ /*
+ * We need to read the entire BOOTCFG area to ensure we read all the
+ * tags, because legacy bootcfg sectors are not guaranteed to end with
+ * a DHCP_END character. If the user hasn't supplied a sufficiently
+ * large buffer then use our own buffer.
+ */
+ if (sector_length > BOOTCFG_MAX_SIZE)
+ sector_length = BOOTCFG_MAX_SIZE;
+ if (sector_length > size) {
+ EFSYS_KMEM_ALLOC(enp->en_esip, sector_length, payload);
+ if (payload == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+ } else
+ payload = (uint8_t *)data;
+
+ if ((rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0)
+ goto fail3;
+
+ rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG, 0,
+ (caddr_t)payload, sector_length);
+
+ efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG);
+
+ if (rc != 0)
+ goto fail4;
+
+ /* Verify that the area is correctly formatted and checksummed */
+ rc = efx_bootcfg_verify(enp, (caddr_t)payload, sector_length,
+ &used_bytes);
+ if (rc != 0 || used_bytes == 0) {
+ payload[0] = (uint8_t)~DHCP_END;
+ payload[1] = DHCP_END;
+ used_bytes = 2;
+ }
+
+ EFSYS_ASSERT(used_bytes >= 2); /* checksum and DHCP_END */
+ EFSYS_ASSERT(used_bytes <= sector_length);
+
+ /*
+ * Legacy bootcfg sectors don't terminate with a DHCP_END character.
+ * Modify the returned payload so it does. BOOTCFG_MAX_SIZE is by
+ * definition large enough for any valid (per-port) bootcfg sector,
+ * so reinitialise the sector if there isn't room for the character.
+ */
+ if (payload[used_bytes - 1] != DHCP_END) {
+ if (used_bytes + 1 > sector_length) {
+ payload[0] = 0;
+ used_bytes = 1;
+ }
+
+ payload[used_bytes] = DHCP_END;
+ ++used_bytes;
+ }
+
+ /*
+ * Verify that the user supplied buffer is large enough for the
+ * entire used bootcfg area, then copy into the user supplied buffer.
+ */
+ if (used_bytes > size) {
+ rc = ENOSPC;
+ goto fail5;
+ }
+ if (sector_length > size) {
+ memcpy(data, payload, used_bytes);
+ EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload);
+ }
+
+ /* Zero out the unused portion of the user buffer */
+ if (used_bytes < size)
+ (void) memset(data + used_bytes, 0, size - used_bytes);
+
+ /*
+ * The checksum includes trailing data after any DHCP_END character,
+ * which we've just modified (by truncation or appending DHCP_END).
+ */
+ data[0] -= efx_bootcfg_csum(enp, data, size);
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+
+ if (sector_length > size)
+ EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ int
+efx_bootcfg_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ uint8_t *chunk;
+ uint8_t checksum;
+ size_t sector_length;
+ size_t chunk_length;
+ size_t used_bytes;
+ size_t offset;
+ size_t remaining;
+ int rc;
+
+ rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &sector_length);
+ if (rc != 0)
+ goto fail1;
+
+ if (sector_length > BOOTCFG_MAX_SIZE)
+ sector_length = BOOTCFG_MAX_SIZE;
+
+ if ((rc = efx_bootcfg_verify(enp, data, size, &used_bytes)) != 0)
+ goto fail2;
+
+ /* The caller *must* terminate their block with a DHCP_END character */
+ EFSYS_ASSERT(used_bytes >= 2); /* checksum and DHCP_END */
+ if ((uint8_t)data[used_bytes - 1] != DHCP_END) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ /* Check that the hardware has support for this much data */
+ if (used_bytes > MIN(sector_length, BOOTCFG_MAX_SIZE)) {
+ rc = ENOSPC;
+ goto fail4;
+ }
+
+ rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, &chunk_length);
+ if (rc != 0)
+ goto fail5;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, chunk_length, chunk);
+ if (chunk == NULL) {
+ rc = ENOMEM;
+ goto fail6;
+ }
+
+ if ((rc = efx_nvram_erase(enp, EFX_NVRAM_BOOTROM_CFG)) != 0)
+ goto fail7;
+
+ /*
+ * Write the entire sector_length bytes of data in chunks. Zero out
+ * all data following the DHCP_END, and adjust the checksum
+ */
+ checksum = efx_bootcfg_csum(enp, data, used_bytes);
+ for (offset = 0; offset < sector_length; offset += remaining) {
+ remaining = MIN(chunk_length, sector_length - offset);
+
+ /* Fill chunk */
+ (void) memset(chunk, 0x0, chunk_length);
+ if (offset < used_bytes)
+ memcpy(chunk, data + offset,
+ MIN(remaining, used_bytes - offset));
+
+ /* Adjust checksum */
+ if (offset == 0)
+ chunk[0] -= checksum;
+
+ if ((rc = efx_nvram_write_chunk(enp, EFX_NVRAM_BOOTROM_CFG,
+ offset, (caddr_t)chunk, remaining)) != 0)
+ goto fail8;
+ }
+
+ efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG);
+
+ EFSYS_KMEM_FREE(enp->en_esip, chunk_length, chunk);
+
+ return (0);
+
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+
+ EFSYS_KMEM_FREE(enp->en_esip, chunk_length, chunk);
+fail6:
+ EFSYS_PROBE(fail6);
+
+ efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_BOOTCFG */
diff --git a/sys/dev/sfxge/common/efx_ev.c b/sys/dev/sfxge/common/efx_ev.c
new file mode 100644
index 0000000..2203bc1
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_ev.c
@@ -0,0 +1,1112 @@
+/*-
+ * Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "efsys.h"
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_QSTATS
+#define EFX_EV_QSTAT_INCR(_eep, _stat) \
+ do { \
+ (_eep)->ee_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_EV_QSTAT_INCR(_eep, _stat)
+#endif
+
+ __checkReturn int
+efx_ev_init(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ if (enp->en_mod_flags & EFX_MOD_EV) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
+
+ /*
+ * Program the event queue for receive and transmit queue
+ * flush events.
+ */
+ EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword);
+
+ enp->en_mod_flags |= EFX_MOD_EV;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static __checkReturn boolean_t
+efx_ev_rx_not_ok(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in uint32_t label,
+ __in uint32_t id,
+ __inout uint16_t *flagsp)
+{
+ boolean_t ignore = B_FALSE;
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC);
+ EFSYS_PROBE(tobe_disc);
+ /* Assume this is a unicast address mismatch, unless below
+ * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or
+ * EV_RX_PAUSE_FRM_ERR is set.
+ */
+ (*flagsp) |= EFX_ADDR_MISMATCH;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) {
+ EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id);
+ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
+ (*flagsp) |= EFX_DISCARD;
+
+#if (EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER)
+ /* Lookout for payload queue ran dry errors and ignore them.
+ *
+ * Sadly for the header/data split cases, the descriptor
+ * pointer in this event refers to the header queue and
+ * therefore cannot be easily detected as duplicate.
+ * So we drop these and rely on the receive processing seeing
+ * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard
+ * the partially received packet.
+ */
+ if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) &&
+ (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) &&
+ (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0))
+ ignore = B_TRUE;
+#endif /* EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER */
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
+ EFSYS_PROBE(crc_err);
+ (*flagsp) &= ~EFX_ADDR_MISMATCH;
+ (*flagsp) |= EFX_DISCARD;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR);
+ EFSYS_PROBE(pause_frm_err);
+ (*flagsp) &= ~EFX_ADDR_MISMATCH;
+ (*flagsp) |= EFX_DISCARD;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR);
+ EFSYS_PROBE(owner_id_err);
+ (*flagsp) |= EFX_DISCARD;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
+ EFSYS_PROBE(ipv4_err);
+ (*flagsp) &= ~EFX_CKSUM_IPV4;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
+ EFSYS_PROBE(udp_chk_err);
+ (*flagsp) &= ~EFX_CKSUM_TCPUDP;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR);
+
+ /*
+ * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This
+ * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error
+ * condition.
+ */
+ (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP);
+ }
+
+ return (ignore);
+}
+
+static __checkReturn boolean_t
+efx_ev_rx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t id;
+ uint32_t size;
+ uint32_t label;
+ boolean_t ok;
+#if (EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER)
+ boolean_t sop;
+ boolean_t jumbo_cont;
+#endif /* EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER */
+ uint32_t hdr_type;
+ boolean_t is_v6;
+ uint16_t flags;
+ boolean_t ignore;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_RX);
+
+ /* Basic packet information */
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR);
+ size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT);
+ label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL);
+ ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0);
+
+#if (EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER)
+ sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0);
+ jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0);
+#endif /* EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER */
+
+ hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE);
+
+ is_v6 = (enp->en_family != EFX_FAMILY_FALCON &&
+ EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0);
+
+ /*
+ * If packet is marked as OK and packet type is TCP/IP or
+ * UDP/IP or other IP, then we can rely on the hardware checksums.
+ */
+ switch (hdr_type) {
+ case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
+ flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP;
+ if (is_v6) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
+ flags |= EFX_PKT_IPV6;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
+ flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
+ }
+ break;
+
+ case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
+ flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP;
+ if (is_v6) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
+ flags |= EFX_PKT_IPV6;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
+ flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
+ }
+ break;
+
+ case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
+ if (is_v6) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
+ flags = EFX_PKT_IPV6;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
+ flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
+ }
+ break;
+
+ case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
+ EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
+ flags = 0;
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ flags = 0;
+ break;
+ }
+
+#if EFSYS_OPT_RX_SCATTER || EFSYS_OPT_RX_HDR_SPLIT
+ /* Report scatter and header/lookahead split buffer flags */
+ if (sop)
+ flags |= EFX_PKT_START;
+ if (jumbo_cont)
+ flags |= EFX_PKT_CONT;
+#endif /* EFSYS_OPT_RX_SCATTER || EFSYS_OPT_RX_HDR_SPLIT */
+
+ /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */
+ if (!ok) {
+ ignore = efx_ev_rx_not_ok(eep, eqp, label, id, &flags);
+ if (ignore) {
+ EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
+ uint32_t, size, uint16_t, flags);
+
+ return (B_FALSE);
+ }
+ }
+
+ /* If we're not discarding the packet then it is ok */
+ if (~flags & EFX_DISCARD)
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
+
+ /* Detect multicast packets that didn't match the filter */
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH);
+ } else {
+ EFSYS_PROBE(mcast_mismatch);
+ flags |= EFX_ADDR_MISMATCH;
+ }
+ } else {
+ flags |= EFX_PKT_UNICAST;
+ }
+
+ /*
+ * The packet parser in Siena can abort parsing packets under
+ * certain error conditions, setting the PKT_NOT_PARSED bit
+ * (which clears PKT_OK). If this is set, then don't trust
+ * the PKT_TYPE field.
+ */
+ if (enp->en_family != EFX_FAMILY_FALCON && !ok) {
+ uint32_t parse_err;
+
+ parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED);
+ if (parse_err != 0)
+ flags |= EFX_CHECK_VLAN;
+ }
+
+ if (~flags & EFX_CHECK_VLAN) {
+ uint32_t pkt_type;
+
+ pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE);
+ if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN)
+ flags |= EFX_PKT_VLAN_TAGGED;
+ }
+
+ EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
+ uint32_t, size, uint16_t, flags);
+
+ EFSYS_ASSERT(eecp->eec_rx != NULL);
+ should_abort = eecp->eec_rx(arg, label, id, size, flags);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+efx_ev_tx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t id;
+ uint32_t label;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_TX);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 &&
+ EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 &&
+ EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 &&
+ EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) {
+
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR);
+ label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL);
+
+ EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
+
+ EFSYS_ASSERT(eecp->eec_tx != NULL);
+ should_abort = eecp->eec_tx(arg, label, id);
+
+ return (should_abort);
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0)
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0)
+ EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0)
+ EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0)
+ EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL);
+
+ EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED);
+ return (B_FALSE);
+}
+
+static __checkReturn boolean_t
+efx_ev_global(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_port_t *epp = &(enp->en_port);
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_GLOBAL);
+ should_abort = B_FALSE;
+
+ /* Check for a link management event */
+ if (EFX_QWORD_FIELD(*eqp, FSF_BZ_GLB_EV_XG_MNT_INTR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_GLOBAL_MNT);
+
+ EFSYS_PROBE(xg_mgt);
+
+ epp->ep_mac_poll_needed = B_TRUE;
+ }
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+efx_ev_driver(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
+ should_abort = B_FALSE;
+
+ switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) {
+ case FSE_AZ_TX_DESCQ_FLS_DONE_EV: {
+ uint32_t label;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
+
+ label = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_PROBE1(tx_descq_fls_done, uint32_t, label);
+
+ EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
+ should_abort = eecp->eec_txq_flush_done(arg, label);
+
+ break;
+ }
+ case FSE_AZ_RX_DESCQ_FLS_DONE_EV: {
+ uint32_t label;
+ uint32_t failed;
+
+ label = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
+ failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
+
+ EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
+ EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL);
+
+ if (failed) {
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED);
+
+ EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, label);
+
+ should_abort = eecp->eec_rxq_flush_failed(arg, label);
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
+
+ EFSYS_PROBE1(rx_descq_fls_done, uint32_t, label);
+
+ should_abort = eecp->eec_rxq_flush_done(arg, label);
+ }
+
+ break;
+ }
+ case FSE_AZ_EVQ_INIT_DONE_EV:
+ EFSYS_ASSERT(eecp->eec_initialized != NULL);
+ should_abort = eecp->eec_initialized(arg);
+
+ break;
+
+ case FSE_AZ_EVQ_NOT_EN_EV:
+ EFSYS_PROBE(evq_not_en);
+ break;
+
+ case FSE_AZ_SRM_UPD_DONE_EV: {
+ uint32_t code;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE);
+
+ code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_ASSERT(eecp->eec_sram != NULL);
+ should_abort = eecp->eec_sram(arg, code);
+
+ break;
+ }
+ case FSE_AZ_WAKE_UP_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_ASSERT(eecp->eec_wake_up != NULL);
+ should_abort = eecp->eec_wake_up(arg, id);
+
+ break;
+ }
+ case FSE_AZ_TX_PKT_NON_TCP_UDP:
+ EFSYS_PROBE(tx_pkt_non_tcp_udp);
+ break;
+
+ case FSE_AZ_TIMER_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_ASSERT(eecp->eec_timer != NULL);
+ should_abort = eecp->eec_timer(arg, id);
+
+ break;
+ }
+ case FSE_AZ_RX_DSC_ERROR_EV:
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR);
+
+ EFSYS_PROBE(rx_dsc_error);
+
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_RX_DSC_ERROR, 0);
+
+ break;
+
+ case FSE_AZ_TX_DSC_ERROR_EV:
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR);
+
+ EFSYS_PROBE(tx_dsc_error);
+
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_TX_DSC_ERROR, 0);
+
+ break;
+
+ default:
+ break;
+ }
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+efx_ev_drv_gen(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t data;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
+
+ data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0);
+ if (data >= ((uint32_t)1 << 16)) {
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+ return (B_TRUE);
+ }
+
+ EFSYS_ASSERT(eecp->eec_software != NULL);
+ should_abort = eecp->eec_software(arg, (uint16_t)data);
+
+ return (should_abort);
+}
+
+#if EFSYS_OPT_MCDI
+
+static __checkReturn boolean_t
+efx_ev_mcdi(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ unsigned code;
+ boolean_t should_abort = B_FALSE;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ if (enp->en_family != EFX_FAMILY_SIENA)
+ goto out;
+
+ EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
+
+ code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
+ switch (code) {
+ case MCDI_EVENT_CODE_BADSSERT:
+ efx_mcdi_ev_death(enp, EINTR);
+ break;
+
+ case MCDI_EVENT_CODE_CMDDONE:
+ efx_mcdi_ev_cpl(enp,
+ MCDI_EV_FIELD(*eqp, CMDDONE_SEQ),
+ MCDI_EV_FIELD(*eqp, CMDDONE_DATALEN),
+ MCDI_EV_FIELD(*eqp, CMDDONE_ERRNO));
+ break;
+
+ case MCDI_EVENT_CODE_LINKCHANGE: {
+ efx_link_mode_t link_mode;
+
+ siena_phy_link_ev(enp, eqp, &link_mode);
+ should_abort = eecp->eec_link_change(arg, link_mode);
+ break;
+ }
+ case MCDI_EVENT_CODE_SENSOREVT: {
+#if EFSYS_OPT_MON_STATS
+ efx_mon_stat_t id;
+ efx_mon_stat_value_t value;
+ int rc;
+
+ if ((rc = siena_mon_ev(enp, eqp, &id, &value)) == 0)
+ should_abort = eecp->eec_monitor(arg, id, value);
+ else if (rc == ENOTSUP) {
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_SENSOREVT,
+ MCDI_EV_FIELD(eqp, DATA));
+ } else
+ EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
+#else
+ should_abort = B_FALSE;
+#endif
+ break;
+ }
+ case MCDI_EVENT_CODE_SCHEDERR:
+ /* Informational only */
+ break;
+
+ case MCDI_EVENT_CODE_REBOOT:
+ efx_mcdi_ev_death(enp, EIO);
+ break;
+
+ case MCDI_EVENT_CODE_MAC_STATS_DMA:
+#if EFSYS_OPT_MAC_STATS
+ if (eecp->eec_mac_stats != NULL) {
+ eecp->eec_mac_stats(arg,
+ MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
+ }
+#endif
+ break;
+
+ case MCDI_EVENT_CODE_FWALERT: {
+ uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
+
+ if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_FWALERT_SRAM,
+ MCDI_EV_FIELD(eqp, FWALERT_DATA));
+ else
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_FWALERT,
+ MCDI_EV_FIELD(eqp, DATA));
+ break;
+ }
+
+ default:
+ EFSYS_PROBE1(mc_pcol_error, int, code);
+ break;
+ }
+
+out:
+ return (should_abort);
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+ __checkReturn int
+efx_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t rptr;
+ efx_dword_t dword;
+ int rc;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ if (!(enp->en_mod_flags & EFX_MOD_INTR)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ rptr = count & eep->ee_mask;
+
+ EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr);
+
+ EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index,
+ &dword, B_FALSE);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn boolean_t
+efx_ev_qpending(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ size_t offset;
+ efx_qword_t qword;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword);
+
+ return (EFX_QWORD_FIELD(qword, EFX_DWORD_0) != 0xffffffff &&
+ EFX_QWORD_FIELD(qword, EFX_DWORD_1) != 0xffffffff);
+}
+
+#if EFSYS_OPT_EV_PREFETCH
+
+ void
+efx_ev_qprefetch(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ unsigned int offset;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
+}
+
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+#define EFX_EV_BATCH 8
+
+#define EFX_EV_PRESENT(_qword) \
+ (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \
+ EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff)
+
+ void
+efx_ev_qpoll(
+ __in efx_evq_t *eep,
+ __inout unsigned int *countp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_qword_t ev[EFX_EV_BATCH];
+ unsigned int batch;
+ unsigned int total;
+ unsigned int count;
+ unsigned int index;
+ size_t offset;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+ EFSYS_ASSERT(countp != NULL);
+ EFSYS_ASSERT(eecp != NULL);
+
+ count = *countp;
+ do {
+ /* Read up until the end of the batch period */
+ batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1));
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ for (total = 0; total < batch; ++total) {
+ EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
+
+ if (!EFX_EV_PRESENT(ev[total]))
+ break;
+
+ EFSYS_PROBE3(event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
+
+ offset += sizeof (efx_qword_t);
+ }
+
+#if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1)
+ /*
+ * Prefetch the next batch when we get within PREFETCH_PERIOD
+ * of a completed batch. If the batch is smaller, then prefetch
+ * immediately.
+ */
+ if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD)
+ EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+ /* Process the batch of events */
+ for (index = 0; index < total; ++index) {
+ boolean_t should_abort;
+ uint32_t code;
+ efx_ev_handler_t handler;
+
+#if EFSYS_OPT_EV_PREFETCH
+ /* Prefetch if we've now reached the batch period */
+ if (total == batch &&
+ index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) {
+ offset = (count + batch) & eep->ee_mask;
+ offset *= sizeof (efx_qword_t);
+
+ EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
+ }
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+ EFX_EV_QSTAT_INCR(eep, EV_ALL);
+
+ code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE);
+ handler = eep->ee_handler[code];
+ EFSYS_ASSERT(handler != NULL);
+ should_abort = handler(eep, &(ev[index]), eecp, arg);
+ if (should_abort) {
+ /* Ignore subsequent events */
+ total = index + 1;
+ break;
+ }
+ }
+
+ /*
+ * Now that the hardware has most likely moved onto dma'ing
+ * into the next cache line, clear the processed events. Take
+ * care to only clear out events that we've processed
+ */
+ EFX_SET_QWORD(ev[0]);
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ for (index = 0; index < total; ++index) {
+ EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0]));
+ offset += sizeof (efx_qword_t);
+ }
+
+ count += total;
+
+ } while (total == batch);
+
+ *countp = count;
+}
+
+ void
+efx_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_qword_t ev;
+ efx_oword_t oword;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV,
+ FSF_AZ_EV_DATA_DW0, (uint32_t)data);
+
+ EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index,
+ EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0),
+ EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1));
+
+ EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword);
+}
+
+ __checkReturn int
+efx_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ unsigned int locked;
+ efx_dword_t dword;
+ int rc;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ if (us > enp->en_nic_cfg.enc_evq_moderation_max) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* If the value is zero then disable the timer */
+ if (us == 0) {
+ if (enp->en_family == EFX_FAMILY_FALCON)
+ EFX_POPULATE_DWORD_2(dword,
+ FRF_AB_TC_TIMER_MODE, FFE_AB_TIMER_MODE_DIS,
+ FRF_AB_TC_TIMER_VAL, 0);
+ else
+ EFX_POPULATE_DWORD_2(dword,
+ FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS,
+ FRF_CZ_TC_TIMER_VAL, 0);
+ } else {
+ uint32_t timer_val;
+
+ /* Calculate the timer value in quanta */
+ us -= (us % EFX_EV_TIMER_QUANTUM);
+ if (us < EFX_EV_TIMER_QUANTUM)
+ us = EFX_EV_TIMER_QUANTUM;
+
+ timer_val = us / EFX_EV_TIMER_QUANTUM;
+
+ /* Moderation value is base 0 so we need to deduct 1 */
+ if (enp->en_family == EFX_FAMILY_FALCON)
+ EFX_POPULATE_DWORD_2(dword,
+ FRF_AB_TC_TIMER_MODE, FFE_AB_TIMER_MODE_INT_HLDOFF,
+ FRF_AB_TIMER_VAL, timer_val - 1);
+ else
+ EFX_POPULATE_DWORD_2(dword,
+ FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF,
+ FRF_CZ_TC_TIMER_VAL, timer_val - 1);
+ }
+
+ locked = (eep->ee_index == 0) ? 1 : 0;
+
+ EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0,
+ eep->ee_index, &dword, locked);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __deref_out efx_evq_t **eepp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t size;
+ efx_evq_t *eep;
+ efx_oword_t oword;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
+
+ EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <, encp->enc_evq_limit);
+
+ if (!ISP2(n) || !(n & EFX_EVQ_NEVS_MASK)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_evq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+#if EFSYS_OPT_RX_SCALE
+ if (enp->en_intr.ei_type == EFX_INTR_LINE &&
+ index >= EFX_MAXRSS_LEGACY) {
+ rc = EINVAL;
+ goto fail3;
+ }
+#endif
+ for (size = 0; (1 << size) <= (EFX_EVQ_MAXNEVS / EFX_EVQ_MINNEVS);
+ size++)
+ if ((1 << size) == (int)(n / EFX_EVQ_MINNEVS))
+ break;
+ if (id + (1 << size) >= encp->enc_buftbl_limit) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /* Allocate an EVQ object */
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep);
+ if (eep == NULL) {
+ rc = ENOMEM;
+ goto fail5;
+ }
+
+ eep->ee_magic = EFX_EVQ_MAGIC;
+ eep->ee_enp = enp;
+ eep->ee_index = index;
+ eep->ee_mask = n - 1;
+ eep->ee_esmp = esmp;
+
+ /* Set up the handler table */
+ eep->ee_handler[FSE_AZ_EV_CODE_RX_EV] = efx_ev_rx;
+ eep->ee_handler[FSE_AZ_EV_CODE_TX_EV] = efx_ev_tx;
+ eep->ee_handler[FSE_AZ_EV_CODE_DRIVER_EV] = efx_ev_driver;
+ eep->ee_handler[FSE_AZ_EV_CODE_GLOBAL_EV] = efx_ev_global;
+ eep->ee_handler[FSE_AZ_EV_CODE_DRV_GEN_EV] = efx_ev_drv_gen;
+#if EFSYS_OPT_MCDI
+ eep->ee_handler[FSE_AZ_EV_CODE_MCDI_EVRESPONSE] = efx_ev_mcdi;
+#endif /* EFSYS_OPT_SIENA */
+
+ /* Set up the new event queue */
+ if (enp->en_family != EFX_FAMILY_FALCON) {
+ EFX_POPULATE_OWORD_1(oword, FRF_CZ_TIMER_Q_EN, 1);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword);
+ }
+
+ EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size,
+ FRF_AZ_EVQ_BUF_BASE_ID, id);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword);
+
+ enp->en_ev_qcount++;
+ *eepp = eep;
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+#if EFSYS_OPT_RX_SCALE
+fail3:
+ EFSYS_PROBE(fail3);
+#endif
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_NAMES
+/* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock 67e9bdcd920059bd */
+static const char __cs * __cs __efx_ev_qstat_name[] = {
+ "all",
+ "rx",
+ "rx_ok",
+ "rx_recovery",
+ "rx_frm_trunc",
+ "rx_tobe_disc",
+ "rx_pause_frm_err",
+ "rx_buf_owner_id_err",
+ "rx_ipv4_hdr_chksum_err",
+ "rx_tcp_udp_chksum_err",
+ "rx_eth_crc_err",
+ "rx_ip_frag_err",
+ "rx_mcast_pkt",
+ "rx_mcast_hash_match",
+ "rx_tcp_ipv4",
+ "rx_tcp_ipv6",
+ "rx_udp_ipv4",
+ "rx_udp_ipv6",
+ "rx_other_ipv4",
+ "rx_other_ipv6",
+ "rx_non_ip",
+ "rx_overrun",
+ "tx",
+ "tx_wq_ff_full",
+ "tx_pkt_err",
+ "tx_pkt_too_big",
+ "tx_unexpected",
+ "global",
+ "global_phy",
+ "global_mnt",
+ "global_rx_recovery",
+ "driver",
+ "driver_srm_upd_done",
+ "driver_tx_descq_fls_done",
+ "driver_rx_descq_fls_done",
+ "driver_rx_descq_fls_failed",
+ "driver_rx_dsc_error",
+ "driver_tx_dsc_error",
+ "drv_gen",
+ "mcdi_response",
+};
+/* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */
+
+ const char __cs *
+efx_ev_qstat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(id, <, EV_NQSTATS);
+
+ return (__efx_ev_qstat_name[id]);
+}
+#endif /* EFSYS_OPT_NAMES */
+
+#if EFSYS_OPT_QSTATS
+ void
+efx_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ for (id = 0; id < EV_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
+ eep->ee_stat[id] = 0;
+ }
+}
+#endif /* EFSYS_OPT_QSTATS */
+
+ void
+efx_ev_qdestroy(
+ __in efx_evq_t *eep)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_oword_t oword;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ EFSYS_ASSERT(enp->en_ev_qcount != 0);
+ --enp->en_ev_qcount;
+
+ /* Purge event queue */
+ EFX_ZERO_OWORD(oword);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL,
+ eep->ee_index, &oword);
+
+ if (enp->en_family != EFX_FAMILY_FALCON) {
+ EFX_ZERO_OWORD(oword);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL,
+ eep->ee_index, &oword);
+ }
+
+ /* Free the EVQ object */
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
+}
+
+ void
+efx_ev_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+ EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
+
+ enp->en_mod_flags &= ~EFX_MOD_EV;
+}
diff --git a/sys/dev/sfxge/common/efx_filter.c b/sys/dev/sfxge/common/efx_filter.c
new file mode 100644
index 0000000..e915e9c
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_filter.c
@@ -0,0 +1,1017 @@
+/*-
+ * Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "efsys.h"
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_FILTER
+
+/* "Fudge factors" - difference between programmed value and actual depth.
+ * Due to pipelined implementation we need to program H/W with a value that
+ * is larger than the hop limit we want.
+ */
+#define FILTER_CTL_SRCH_FUDGE_WILD 3
+#define FILTER_CTL_SRCH_FUDGE_FULL 1
+
+/* Hard maximum hop limit. Hardware will time-out beyond 200-something.
+ * We also need to avoid infinite loops in efx_filter_search() when the
+ * table is full.
+ */
+#define FILTER_CTL_SRCH_MAX 200
+
+/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
+ * key derived from the n-tuple. */
+static uint16_t
+efx_filter_tbl_hash(
+ __in uint32_t key)
+{
+ uint16_t tmp;
+
+ /* First 16 rounds */
+ tmp = 0x1fff ^ (uint16_t)(key >> 16);
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ tmp = tmp ^ tmp >> 9;
+
+ /* Last 16 rounds */
+ tmp = tmp ^ tmp << 13 ^ (uint16_t)(key & 0xffff);
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ tmp = tmp ^ tmp >> 9;
+
+ return (tmp);
+}
+
+
+/* To allow for hash collisions, filter search continues at these
+ * increments from the first possible entry selected by the hash. */
+static uint16_t
+efx_filter_tbl_increment(
+ __in uint32_t key)
+{
+ return ((uint16_t)(key * 2 - 1));
+}
+
+static __checkReturn boolean_t
+efx_filter_test_used(
+ __in efx_filter_tbl_t *eftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT3P(eftp->eft_bitmap, !=, NULL);
+ return ((eftp->eft_bitmap[index / 32] & (1 << (index % 32))) != 0);
+}
+
+static void
+efx_filter_set_used(
+ __in efx_filter_tbl_t *eftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT3P(eftp->eft_bitmap, !=, NULL);
+ eftp->eft_bitmap[index / 32] |= (1 << (index % 32));
+ ++eftp->eft_used;
+}
+
+static void
+efx_filter_clear_used(
+ __in efx_filter_tbl_t *eftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT3P(eftp->eft_bitmap, !=, NULL);
+ eftp->eft_bitmap[index / 32] &= ~(1 << (index % 32));
+
+ --eftp->eft_used;
+ EFSYS_ASSERT3U(eftp->eft_used, >=, 0);
+}
+
+
+static efx_filter_tbl_id_t
+efx_filter_tbl_id(
+ __in efx_filter_type_t type)
+{
+ efx_filter_tbl_id_t tbl_id;
+
+ switch (type)
+ {
+ case EFX_FILTER_RX_TCP_FULL:
+ case EFX_FILTER_RX_TCP_WILD:
+ case EFX_FILTER_RX_UDP_FULL:
+ case EFX_FILTER_RX_UDP_WILD:
+ tbl_id = EFX_FILTER_TBL_RX_IP;
+ break;
+
+#if EFSYS_OPT_SIENA
+ case EFX_FILTER_RX_MAC_FULL:
+ case EFX_FILTER_RX_MAC_WILD:
+ tbl_id = EFX_FILTER_TBL_RX_MAC;
+ break;
+
+ case EFX_FILTER_TX_TCP_FULL:
+ case EFX_FILTER_TX_TCP_WILD:
+ case EFX_FILTER_TX_UDP_FULL:
+ case EFX_FILTER_TX_UDP_WILD:
+ tbl_id = EFX_FILTER_TBL_TX_IP;
+ break;
+
+ case EFX_FILTER_TX_MAC_FULL:
+ case EFX_FILTER_TX_MAC_WILD:
+ tbl_id = EFX_FILTER_TBL_RX_MAC;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ break;
+ }
+ return (tbl_id);
+}
+
+static void
+efx_filter_reset_search_depth(
+ __inout efx_filter_t *efp,
+ __in efx_filter_tbl_id_t tbl_id)
+{
+ switch (tbl_id)
+ {
+ case EFX_FILTER_TBL_RX_IP:
+ efp->ef_depth[EFX_FILTER_RX_TCP_FULL] = 0;
+ efp->ef_depth[EFX_FILTER_RX_TCP_WILD] = 0;
+ efp->ef_depth[EFX_FILTER_RX_UDP_FULL] = 0;
+ efp->ef_depth[EFX_FILTER_RX_UDP_WILD] = 0;
+ break;
+
+#if EFSYS_OPT_SIENA
+ case EFX_FILTER_TBL_RX_MAC:
+ efp->ef_depth[EFX_FILTER_RX_MAC_FULL] = 0;
+ efp->ef_depth[EFX_FILTER_RX_MAC_WILD] = 0;
+ break;
+
+ case EFX_FILTER_TBL_TX_IP:
+ efp->ef_depth[EFX_FILTER_TX_TCP_FULL] = 0;
+ efp->ef_depth[EFX_FILTER_TX_TCP_WILD] = 0;
+ efp->ef_depth[EFX_FILTER_TX_UDP_FULL] = 0;
+ efp->ef_depth[EFX_FILTER_TX_UDP_WILD] = 0;
+ break;
+
+ case EFX_FILTER_TBL_TX_MAC:
+ efp->ef_depth[EFX_FILTER_TX_MAC_FULL] = 0;
+ efp->ef_depth[EFX_FILTER_TX_MAC_WILD] = 0;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ break;
+ }
+}
+
+static void
+efx_filter_push_rx_limits(
+ __in efx_nic_t *enp)
+{
+ efx_filter_t *efp = &enp->en_filter;
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_FULL_SRCH_LIMIT,
+ efp->ef_depth[EFX_FILTER_RX_TCP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_WILD_SRCH_LIMIT,
+ efp->ef_depth[EFX_FILTER_RX_TCP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_FULL_SRCH_LIMIT,
+ efp->ef_depth[EFX_FILTER_RX_UDP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_WILD_SRCH_LIMIT,
+ efp->ef_depth[EFX_FILTER_RX_UDP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+
+#if EFSYS_OPT_SIENA
+ if (efp->ef_tbl[EFX_FILTER_TBL_RX_MAC].eft_size) {
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
+ efp->ef_depth[EFX_FILTER_RX_MAC_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
+ efp->ef_depth[EFX_FILTER_RX_MAC_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+#endif /* EFSYS_OPT_SIENA */
+
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+}
+
+static void
+efx_filter_push_tx_limits(
+ __in efx_nic_t *enp)
+{
+ efx_filter_t *efp = &enp->en_filter;
+ efx_oword_t oword;
+
+ if (efp->ef_tbl[EFX_FILTER_TBL_TX_IP].eft_size == 0)
+ return;
+
+ EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE,
+ efp->ef_depth[EFX_FILTER_TX_TCP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword, FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE,
+ efp->ef_depth[EFX_FILTER_TX_TCP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ EFX_SET_OWORD_FIELD(oword, FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE,
+ efp->ef_depth[EFX_FILTER_TX_UDP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword, FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE,
+ efp->ef_depth[EFX_FILTER_TX_UDP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword);
+}
+
+/* Build a filter entry and return its n-tuple key. */
+static __checkReturn uint32_t
+efx_filter_build(
+ __out efx_oword_t *filter,
+ __in efx_filter_spec_t *spec)
+{
+ uint32_t dword3;
+ uint32_t key;
+ uint8_t type = spec->efs_type;
+ uint8_t flags = spec->efs_flags;
+
+ switch (efx_filter_tbl_id(type)) {
+ case EFX_FILTER_TBL_RX_IP: {
+ boolean_t is_udp = (type == EFX_FILTER_RX_UDP_FULL ||
+ type == EFX_FILTER_RX_UDP_WILD);
+ EFX_POPULATE_OWORD_7(*filter,
+ FRF_BZ_RSS_EN, (flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0,
+ FRF_BZ_SCATTER_EN, (flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0,
+ FRF_AZ_TCP_UDP, is_udp,
+ FRF_AZ_RXQ_ID, spec->efs_dmaq_id,
+ EFX_DWORD_2, spec->efs_dword[2],
+ EFX_DWORD_1, spec->efs_dword[1],
+ EFX_DWORD_0, spec->efs_dword[0]);
+ dword3 = is_udp;
+ break;
+ }
+
+#if EFSYS_OPT_SIENA
+ case EFX_FILTER_TBL_RX_MAC: {
+ boolean_t is_wild = (type == EFX_FILTER_RX_MAC_WILD);
+ EFX_POPULATE_OWORD_8(*filter,
+ FRF_CZ_RMFT_RSS_EN, (flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0,
+ FRF_CZ_RMFT_SCATTER_EN, (flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0,
+ FRF_CZ_RMFT_IP_OVERRIDE, (flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) ? 1 : 0,
+ FRF_CZ_RMFT_RXQ_ID, spec->efs_dmaq_id,
+ FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_RMFT_DEST_MAC_DW1, spec->efs_dword[2],
+ FRF_CZ_RMFT_DEST_MAC_DW0, spec->efs_dword[1],
+ FRF_CZ_RMFT_VLAN_ID, spec->efs_dword[0]);
+ dword3 = is_wild;
+ break;
+ }
+#endif /* EFSYS_OPT_SIENA */
+
+ case EFX_FILTER_TBL_TX_IP: {
+ boolean_t is_udp = (type == EFX_FILTER_TX_UDP_FULL ||
+ type == EFX_FILTER_TX_UDP_WILD);
+ EFX_POPULATE_OWORD_5(*filter,
+ FRF_CZ_TIFT_TCP_UDP, is_udp,
+ FRF_CZ_TIFT_TXQ_ID, spec->efs_dmaq_id,
+ EFX_DWORD_2, spec->efs_dword[2],
+ EFX_DWORD_1, spec->efs_dword[1],
+ EFX_DWORD_0, spec->efs_dword[0]);
+ dword3 = is_udp | spec->efs_dmaq_id << 1;
+ break;
+ }
+
+#if EFSYS_OPT_SIENA
+ case EFX_FILTER_TBL_TX_MAC: {
+ boolean_t is_wild = (type == EFX_FILTER_TX_MAC_WILD);
+ EFX_POPULATE_OWORD_5(*filter,
+ FRF_CZ_TMFT_TXQ_ID, spec->efs_dmaq_id,
+ FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_TMFT_SRC_MAC_DW1, spec->efs_dword[2],
+ FRF_CZ_TMFT_SRC_MAC_DW0, spec->efs_dword[1],
+ FRF_CZ_TMFT_VLAN_ID, spec->efs_dword[0]);
+ dword3 = is_wild | spec->efs_dmaq_id << 1;
+ break;
+ }
+#endif /* EFSYS_OPT_SIENA */
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ }
+
+ key = spec->efs_dword[0] ^ spec->efs_dword[1] ^ spec->efs_dword[2] ^ dword3;
+ return (key);
+}
+
+static __checkReturn int
+efx_filter_push_entry(
+ __inout efx_nic_t *enp,
+ __in efx_filter_type_t type,
+ __in int index,
+ __in efx_oword_t *eop)
+{
+ int rc;
+
+ switch (type)
+ {
+ case EFX_FILTER_RX_TCP_FULL:
+ case EFX_FILTER_RX_TCP_WILD:
+ case EFX_FILTER_RX_UDP_FULL:
+ case EFX_FILTER_RX_UDP_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_FILTER_TBL0, index, eop);
+ break;
+
+#if EFSYS_OPT_SIENA
+ case EFX_FILTER_RX_MAC_FULL:
+ case EFX_FILTER_RX_MAC_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_CZ_RX_MAC_FILTER_TBL0, index, eop);
+ break;
+
+ case EFX_FILTER_TX_TCP_FULL:
+ case EFX_FILTER_TX_TCP_WILD:
+ case EFX_FILTER_TX_UDP_FULL:
+ case EFX_FILTER_TX_UDP_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_FILTER_TBL0, index, eop);
+ break;
+
+ case EFX_FILTER_TX_MAC_FULL:
+ case EFX_FILTER_TX_MAC_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_MAC_FILTER_TBL0, index, eop);
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ return (0);
+
+fail1:
+ return (rc);
+}
+
+
+static __checkReturn boolean_t
+efx_filter_equal(
+ __in const efx_filter_spec_t *left,
+ __in const efx_filter_spec_t *right)
+{
+ efx_filter_tbl_id_t tbl_id = efx_filter_tbl_id(left->efs_type);
+
+ if (left->efs_type != right->efs_type)
+ return (B_FALSE);
+
+ if (memcmp(left->efs_dword, right->efs_dword, sizeof(left->efs_dword)))
+ return (B_FALSE);
+
+ if ((tbl_id == EFX_FILTER_TBL_TX_IP ||
+ tbl_id == EFX_FILTER_TBL_TX_MAC) &&
+ left->efs_dmaq_id != right->efs_dmaq_id)
+ return (B_FALSE);
+
+ return (B_TRUE);
+}
+
+static __checkReturn int
+efx_filter_search(
+ __in efx_filter_tbl_t *eftp,
+ __in efx_filter_spec_t *spec,
+ __in uint32_t key,
+ __in boolean_t for_insert,
+ __out int *filter_index,
+ __out int *depth_required)
+{
+ unsigned hash, incr, filter_idx, depth;
+
+ hash = efx_filter_tbl_hash(key);
+ incr = efx_filter_tbl_increment(key);
+
+ filter_idx = hash & (eftp->eft_size - 1);
+ depth = 1;
+
+ for (;;) {
+ /* Return success if entry is used and matches this spec
+ * or entry is unused and we are trying to insert.
+ */
+ if (efx_filter_test_used(eftp, filter_idx) ?
+ efx_filter_equal(spec, &eftp->eft_spec[filter_idx]) :
+ for_insert) {
+ *filter_index = filter_idx;
+ *depth_required = depth;
+ return (0);
+ }
+
+ /* Return failure if we reached the maximum search depth */
+ if (depth == FILTER_CTL_SRCH_MAX)
+ return for_insert ? EBUSY : ENOENT;
+
+ filter_idx = (filter_idx + incr) & (eftp->eft_size - 1);
+ ++depth;
+ }
+}
+
+ __checkReturn int
+efx_filter_insert_filter(
+ __in efx_nic_t *enp,
+ __in efx_filter_spec_t *spec,
+ __in boolean_t replace)
+{
+ efx_filter_t *efp = &enp->en_filter;
+ efx_filter_tbl_id_t tbl_id = efx_filter_tbl_id(spec->efs_type);
+ efx_filter_tbl_t *eftp = &efp->ef_tbl[tbl_id];
+ efx_filter_spec_t *saved_spec;
+ efx_oword_t filter;
+ int filter_idx;
+ unsigned int depth;
+ int state;
+ uint32_t key;
+ int rc;
+
+ if (eftp->eft_size == 0)
+ return (EINVAL);
+
+ key = efx_filter_build(&filter, spec);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ rc = efx_filter_search(eftp, spec, key, B_TRUE, &filter_idx, &depth);
+ if (rc != 0)
+ goto done;
+
+ EFSYS_ASSERT3U(filter_idx, <, eftp->eft_size);
+ saved_spec = &eftp->eft_spec[filter_idx];
+
+ if (efx_filter_test_used(eftp, filter_idx)) {
+ if (replace == B_FALSE) {
+ rc = EEXIST;
+ goto done;
+ }
+ }
+ efx_filter_set_used(eftp, filter_idx);
+ *saved_spec = *spec;
+
+ if (efp->ef_depth[spec->efs_type] < depth) {
+ efp->ef_depth[spec->efs_type] = depth;
+ if (tbl_id == EFX_FILTER_TBL_TX_IP ||
+ tbl_id == EFX_FILTER_TBL_TX_MAC)
+ efx_filter_push_tx_limits(enp);
+ else
+ efx_filter_push_rx_limits(enp);
+ }
+
+ efx_filter_push_entry(enp, spec->efs_type, filter_idx, &filter);
+
+done:
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return (rc);
+}
+
+static void
+efx_filter_clear_entry(
+ __in efx_nic_t *enp,
+ __in efx_filter_tbl_t *eftp,
+ __in int index)
+{
+ efx_oword_t filter;
+
+ if (efx_filter_test_used(eftp, index)) {
+ efx_filter_clear_used(eftp, index);
+
+ EFX_ZERO_OWORD(filter);
+ efx_filter_push_entry(enp, eftp->eft_spec[index].efs_type,
+ index, &filter);
+
+ memset(&eftp->eft_spec[index], 0, sizeof(eftp->eft_spec[0]));
+ }
+}
+
+ __checkReturn int
+efx_filter_remove_filter(
+ __in efx_nic_t *enp,
+ __in efx_filter_spec_t *spec)
+{
+ efx_filter_t *efp = &enp->en_filter;
+ efx_filter_tbl_id_t tbl_id = efx_filter_tbl_id(spec->efs_type);
+ efx_filter_tbl_t *eftp = &efp->ef_tbl[tbl_id];
+ efx_filter_spec_t *saved_spec;
+ efx_oword_t filter;
+ int filter_idx, depth;
+ int state;
+ uint32_t key;
+ int rc;
+
+ key = efx_filter_build(&filter, spec);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ rc = efx_filter_search(eftp, spec, key, B_FALSE, &filter_idx, &depth);
+ if (rc != 0)
+ goto out;
+
+ saved_spec = &eftp->eft_spec[filter_idx];
+
+ efx_filter_clear_entry(enp, eftp, filter_idx);
+ if (eftp->eft_used == 0)
+ efx_filter_reset_search_depth(efp, tbl_id);
+
+ rc = 0;
+
+out:
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return (rc);
+}
+
+ void
+efx_filter_remove_index(
+ __inout efx_nic_t *enp,
+ __in efx_filter_type_t type,
+ __in int index)
+{
+ efx_filter_t *efp = &enp->en_filter;
+ enum efx_filter_tbl_id tbl_id = efx_filter_tbl_id(type);
+ efx_filter_tbl_t *eftp = &efp->ef_tbl[tbl_id];
+ int state;
+
+ if (index < 0)
+ return;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ efx_filter_clear_entry(enp, eftp, index);
+ if (eftp->eft_used == 0)
+ efx_filter_reset_search_depth(efp, tbl_id);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+}
+
+ void
+efx_filter_tbl_clear(
+ __inout efx_nic_t *enp,
+ __in efx_filter_tbl_id_t tbl_id)
+{
+ efx_filter_t *efp = &enp->en_filter;
+ efx_filter_tbl_t *eftp = &efp->ef_tbl[tbl_id];
+ int index;
+ int state;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ for (index = 0; index < eftp->eft_size; ++index) {
+ efx_filter_clear_entry(enp, eftp, index);
+ }
+
+ if (eftp->eft_used == 0)
+ efx_filter_reset_search_depth(efp, tbl_id);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+}
+
+/* Restore filter state after a reset */
+ void
+efx_filter_restore(
+ __in efx_nic_t *enp)
+{
+ efx_filter_t *efp = &enp->en_filter;
+ efx_filter_tbl_id_t tbl_id;
+ efx_filter_tbl_t *eftp;
+ efx_filter_spec_t *spec;
+ efx_oword_t filter;
+ int filter_idx;
+ int state;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ for (tbl_id = 0; tbl_id < EFX_FILTER_NTBLS; tbl_id++) {
+ eftp = &efp->ef_tbl[tbl_id];
+ for (filter_idx = 0; filter_idx < eftp->eft_size; filter_idx++) {
+ if (!efx_filter_test_used(eftp, filter_idx))
+ continue;
+
+ spec = &eftp->eft_spec[filter_idx];
+ efx_filter_build(&filter, spec);
+ efx_filter_push_entry(enp, spec->efs_type,
+ filter_idx, &filter);
+ }
+ }
+
+ efx_filter_push_rx_limits(enp);
+ efx_filter_push_tx_limits(enp);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+}
+
+ void
+efx_filter_redirect_index(
+ __inout efx_nic_t *enp,
+ __in efx_filter_type_t type,
+ __in int filter_index,
+ __in int rxq_index)
+{
+ efx_filter_t *efp = &enp->en_filter;
+ efx_filter_tbl_t *eftp =
+ &efp->ef_tbl[efx_filter_tbl_id(type)];
+ efx_filter_spec_t *spec;
+ efx_oword_t filter;
+ int state;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ spec = &eftp->eft_spec[filter_index];
+ spec->efs_dmaq_id = (uint16_t)rxq_index;
+
+ efx_filter_build(&filter, spec);
+ efx_filter_push_entry(enp, spec->efs_type, filter_index, &filter);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+}
+
+ __checkReturn int
+efx_filter_init(
+ __in efx_nic_t *enp)
+{
+ efx_filter_t *efp = &enp->en_filter;
+ efx_filter_tbl_t *eftp;
+ int tbl_id;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_FILTER));
+
+ switch (enp->en_family)
+ {
+#if EFSYS_OPT_FALCON
+ case EFX_FAMILY_FALCON:
+ eftp = &efp->ef_tbl[EFX_FILTER_TBL_RX_IP];
+ eftp->eft_size = FR_AZ_RX_FILTER_TBL0_ROWS;
+ break;
+#endif /* EFSYS_OPT_FALCON */
+
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ eftp = &efp->ef_tbl[EFX_FILTER_TBL_RX_IP];
+ eftp->eft_size = FR_AZ_RX_FILTER_TBL0_ROWS;
+
+ eftp = &efp->ef_tbl[EFX_FILTER_TBL_RX_MAC];
+ eftp->eft_size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
+
+ eftp = &efp->ef_tbl[EFX_FILTER_TBL_TX_IP];
+ eftp->eft_size = FR_CZ_TX_FILTER_TBL0_ROWS;
+
+ eftp = &efp->ef_tbl[EFX_FILTER_TBL_TX_MAC];
+ eftp->eft_size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ for (tbl_id = 0; tbl_id < EFX_FILTER_NTBLS; tbl_id++) {
+ unsigned int bitmap_size;
+
+ eftp = &efp->ef_tbl[tbl_id];
+ if (eftp->eft_size == 0)
+ continue;
+
+ EFX_STATIC_ASSERT(sizeof(eftp->eft_bitmap[0]) == sizeof(uint32_t));
+ bitmap_size = (eftp->eft_size + (sizeof(uint32_t) * 8) - 1) / 8;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, bitmap_size, eftp->eft_bitmap);
+ if (!eftp->eft_bitmap) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, eftp->eft_size * sizeof(*eftp->eft_spec),
+ eftp->eft_spec);
+ if (!eftp->eft_spec) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+ memset(eftp->eft_spec, 0, eftp->eft_size * sizeof(*eftp->eft_spec));
+ }
+ enp->en_mod_flags |= EFX_MOD_FILTER;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+ efx_filter_fini(enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+ return (rc);
+}
+
+ void
+efx_filter_fini(
+ __in efx_nic_t *enp)
+{
+ efx_filter_t *efp = &enp->en_filter;
+ efx_filter_tbl_id_t tbl_id;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ for (tbl_id = 0; tbl_id < EFX_FILTER_NTBLS; tbl_id++) {
+ efx_filter_tbl_t *eftp = &efp->ef_tbl[tbl_id];
+ unsigned int bitmap_size;
+
+ EFX_STATIC_ASSERT(sizeof(eftp->eft_bitmap[0]) == sizeof(uint32_t));
+ bitmap_size = (eftp->eft_size + (sizeof(uint32_t) * 8) - 1) / 8;
+
+ EFSYS_KMEM_FREE(enp->en_esip, bitmap_size, eftp->eft_bitmap);
+ eftp->eft_bitmap = NULL;
+
+ EFSYS_KMEM_FREE(enp->en_esip, eftp->eft_size * sizeof(*eftp->eft_spec),
+ eftp->eft_spec);
+ eftp->eft_spec = NULL;
+ }
+
+ enp->en_mod_flags &= ~EFX_MOD_FILTER;
+}
+
+extern void
+efx_filter_spec_rx_ipv4_tcp_full(
+ __inout efx_filter_spec_t *spec,
+ __in unsigned int flags,
+ __in uint32_t src_ip,
+ __in uint16_t src_tcp,
+ __in uint32_t dest_ip,
+ __in uint16_t dest_tcp)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT((flags & ~(EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_SCATTER)) == 0);
+
+ spec->efs_type = EFX_FILTER_RX_TCP_FULL;
+ spec->efs_flags = (uint8_t)flags;
+ spec->efs_dword[0] = src_tcp | src_ip << 16;
+ spec->efs_dword[1] = dest_tcp << 16 | src_ip >> 16;
+ spec->efs_dword[2] = dest_ip;
+}
+
+extern void
+efx_filter_spec_rx_ipv4_tcp_wild(
+ __inout efx_filter_spec_t *spec,
+ __in unsigned int flags,
+ __in uint32_t dest_ip,
+ __in uint16_t dest_tcp)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT((flags & ~(EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_SCATTER)) == 0);
+
+ spec->efs_type = EFX_FILTER_RX_TCP_WILD;
+ spec->efs_flags = (uint8_t)flags;
+ spec->efs_dword[0] = 0;
+ spec->efs_dword[1] = dest_tcp << 16;
+ spec->efs_dword[2] = dest_ip;
+}
+
+extern void
+efx_filter_spec_rx_ipv4_udp_full(
+ __inout efx_filter_spec_t *spec,
+ __in unsigned int flags,
+ __in uint32_t src_ip,
+ __in uint16_t src_udp,
+ __in uint32_t dest_ip,
+ __in uint16_t dest_udp)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT((flags & ~(EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_SCATTER)) == 0);
+
+ spec->efs_type = EFX_FILTER_RX_UDP_FULL;
+ spec->efs_flags = (uint8_t)flags;
+ spec->efs_dword[0] = src_udp | src_ip << 16;
+ spec->efs_dword[1] = dest_udp << 16 | src_ip >> 16;
+ spec->efs_dword[2] = dest_ip;
+}
+
+extern void
+efx_filter_spec_rx_ipv4_udp_wild(
+ __inout efx_filter_spec_t *spec,
+ __in unsigned int flags,
+ __in uint32_t dest_ip,
+ __in uint16_t dest_udp)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT((flags & ~(EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_SCATTER)) == 0);
+
+ spec->efs_type = EFX_FILTER_RX_UDP_WILD;
+ spec->efs_flags = (uint8_t)flags;
+ spec->efs_dword[0] = dest_udp;
+ spec->efs_dword[1] = 0;
+ spec->efs_dword[2] = dest_ip;
+}
+
+#if EFSYS_OPT_SIENA
+extern void
+efx_filter_spec_rx_mac_full(
+ __inout efx_filter_spec_t *spec,
+ __in unsigned int flags,
+ __in uint16_t vlan_id,
+ __in uint8_t *dest_mac)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(dest_mac, !=, NULL);
+ EFSYS_ASSERT((flags & ~(EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_SCATTER |
+ EFX_FILTER_FLAG_RX_OVERRIDE_IP)) == 0);
+
+ spec->efs_type = EFX_FILTER_RX_MAC_FULL;
+ spec->efs_flags = (uint8_t)flags;
+ spec->efs_dword[0] = vlan_id;
+ spec->efs_dword[1] =
+ dest_mac[2] << 24 |
+ dest_mac[3] << 16 |
+ dest_mac[4] << 8 |
+ dest_mac[5];
+ spec->efs_dword[2] =
+ dest_mac[0] << 8 |
+ dest_mac[1];
+}
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+extern void
+efx_filter_spec_rx_mac_wild(
+ __inout efx_filter_spec_t *spec,
+ __in unsigned int flags,
+ __in uint8_t *dest_mac)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(dest_mac, !=, NULL);
+ EFSYS_ASSERT((flags & ~(EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_SCATTER |
+ EFX_FILTER_FLAG_RX_OVERRIDE_IP)) == 0);
+
+ spec->efs_type = EFX_FILTER_RX_MAC_WILD;
+ spec->efs_flags = (uint8_t)flags;
+ spec->efs_dword[0] = 0;
+ spec->efs_dword[1] =
+ dest_mac[2] << 24 |
+ dest_mac[3] << 16 |
+ dest_mac[4] << 8 |
+ dest_mac[5];
+ spec->efs_dword[2] =
+ dest_mac[0] << 8 |
+ dest_mac[1];
+}
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+extern void
+efx_filter_spec_tx_ipv4_tcp_full(
+ __inout efx_filter_spec_t *spec,
+ __in uint32_t src_ip,
+ __in uint16_t src_tcp,
+ __in uint32_t dest_ip,
+ __in uint16_t dest_tcp)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_type = EFX_FILTER_TX_TCP_FULL;
+ spec->efs_flags = 0;
+ spec->efs_dword[0] = src_tcp | src_ip << 16;
+ spec->efs_dword[1] = dest_tcp << 16 | src_ip >> 16;
+ spec->efs_dword[2] = dest_ip;
+}
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+extern void
+efx_filter_spec_tx_ipv4_tcp_wild(
+ __inout efx_filter_spec_t *spec,
+ __in uint32_t src_ip,
+ __in uint16_t src_tcp)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_type = EFX_FILTER_TX_TCP_WILD;
+ spec->efs_flags = 0;
+ spec->efs_dword[0] = 0;
+ spec->efs_dword[1] = src_tcp << 16;
+ spec->efs_dword[2] = src_ip;
+}
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+extern void
+efx_filter_spec_tx_ipv4_udp_full(
+ __inout efx_filter_spec_t *spec,
+ __in uint32_t src_ip,
+ __in uint16_t src_udp,
+ __in uint32_t dest_ip,
+ __in uint16_t dest_udp)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_type = EFX_FILTER_TX_UDP_FULL;
+ spec->efs_flags = 0;
+ spec->efs_dword[0] = src_udp | src_ip << 16;
+ spec->efs_dword[1] = dest_udp << 16 | src_ip >> 16;
+ spec->efs_dword[2] = dest_ip;
+}
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+extern void
+efx_filter_spec_tx_ipv4_udp_wild(
+ __inout efx_filter_spec_t *spec,
+ __in uint32_t src_ip,
+ __in uint16_t src_udp)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_type = EFX_FILTER_TX_UDP_WILD;
+ spec->efs_flags = 0;
+ spec->efs_dword[0] = src_udp;
+ spec->efs_dword[1] = 0;
+ spec->efs_dword[2] = src_ip;
+}
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+extern void
+efx_filter_spec_tx_mac_full(
+ __inout efx_filter_spec_t *spec,
+ __in uint16_t vlan_id,
+ __in uint8_t *src_mac)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(src_mac, !=, NULL);
+
+ spec->efs_type = EFX_FILTER_TX_MAC_FULL;
+ spec->efs_flags = 0;
+ spec->efs_dword[0] = vlan_id;
+ spec->efs_dword[1] =
+ src_mac[2] << 24 |
+ src_mac[3] << 16 |
+ src_mac[4] << 8 |
+ src_mac[5];
+ spec->efs_dword[2] =
+ src_mac[0] << 8 |
+ src_mac[1];
+}
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+extern void
+efx_filter_spec_tx_mac_wild(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t *src_mac)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(src_mac, !=, NULL);
+
+ spec->efs_type = EFX_FILTER_TX_MAC_WILD;
+ spec->efs_flags = 0;
+ spec->efs_dword[0] = 0;
+ spec->efs_dword[1] =
+ src_mac[2] << 24 |
+ src_mac[3] << 16 |
+ src_mac[4] << 8 |
+ src_mac[5];
+ spec->efs_dword[2] =
+ src_mac[0] << 8 |
+ src_mac[1];
+}
+#endif /* EFSYS_OPT_SIENA */
+
+
+#endif /* EFSYS_OPT_FILTER */
diff --git a/sys/dev/sfxge/common/efx_impl.h b/sys/dev/sfxge/common/efx_impl.h
new file mode 100644
index 0000000..5858e96
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_impl.h
@@ -0,0 +1,734 @@
+/*-
+ * Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _SYS_EFX_IMPL_H
+#define _SYS_EFX_IMPL_H
+
+#include "efsys.h"
+#include "efx.h"
+#include "efx_regs.h"
+
+#if EFSYS_OPT_FALCON
+#include "falcon_impl.h"
+#endif /* EFSYS_OPT_FALCON */
+
+#if EFSYS_OPT_SIENA
+#include "siena_impl.h"
+#endif /* EFSYS_OPT_SIENA */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EFX_MOD_MCDI 0x00000001
+#define EFX_MOD_PROBE 0x00000002
+#define EFX_MOD_NVRAM 0x00000004
+#define EFX_MOD_VPD 0x00000008
+#define EFX_MOD_NIC 0x00000010
+#define EFX_MOD_INTR 0x00000020
+#define EFX_MOD_EV 0x00000040
+#define EFX_MOD_RX 0x00000080
+#define EFX_MOD_TX 0x00000100
+#define EFX_MOD_PORT 0x00000200
+#define EFX_MOD_MON 0x00000400
+#define EFX_MOD_WOL 0x00000800
+#define EFX_MOD_FILTER 0x00001000
+
+#define EFX_RESET_MAC 0x00000001
+#define EFX_RESET_PHY 0x00000002
+
+typedef enum efx_mac_type_e {
+ EFX_MAC_INVALID = 0,
+ EFX_MAC_FALCON_GMAC,
+ EFX_MAC_FALCON_XMAC,
+ EFX_MAC_SIENA,
+ EFX_MAC_NTYPES
+} efx_mac_type_t;
+
+typedef struct efx_mac_ops_s {
+ int (*emo_reset)(efx_nic_t *); /* optional */
+ int (*emo_poll)(efx_nic_t *, efx_link_mode_t *);
+ int (*emo_up)(efx_nic_t *, boolean_t *);
+ int (*emo_reconfigure)(efx_nic_t *);
+#if EFSYS_OPT_LOOPBACK
+ int (*emo_loopback_set)(efx_nic_t *, efx_link_mode_t,
+ efx_loopback_type_t);
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_MAC_STATS
+ int (*emo_stats_upload)(efx_nic_t *, efsys_mem_t *);
+ int (*emo_stats_periodic)(efx_nic_t *, efsys_mem_t *,
+ uint16_t, boolean_t);
+ int (*emo_stats_update)(efx_nic_t *, efsys_mem_t *,
+ efsys_stat_t *, uint32_t *);
+#endif /* EFSYS_OPT_MAC_STATS */
+} efx_mac_ops_t;
+
+typedef struct efx_phy_ops_s {
+ int (*epo_power)(efx_nic_t *, boolean_t); /* optional */
+ int (*epo_reset)(efx_nic_t *);
+ int (*epo_reconfigure)(efx_nic_t *);
+ int (*epo_verify)(efx_nic_t *);
+ int (*epo_uplink_check)(efx_nic_t *,
+ boolean_t *); /* optional */
+ int (*epo_downlink_check)(efx_nic_t *, efx_link_mode_t *,
+ unsigned int *, uint32_t *);
+ int (*epo_oui_get)(efx_nic_t *, uint32_t *);
+#if EFSYS_OPT_PHY_STATS
+ int (*epo_stats_update)(efx_nic_t *, efsys_mem_t *,
+ uint32_t *);
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_PHY_PROPS
+#if EFSYS_OPT_NAMES
+ const char __cs *(*epo_prop_name)(efx_nic_t *, unsigned int);
+#endif /* EFSYS_OPT_PHY_PROPS */
+ int (*epo_prop_get)(efx_nic_t *, unsigned int, uint32_t,
+ uint32_t *);
+ int (*epo_prop_set)(efx_nic_t *, unsigned int, uint32_t);
+#endif /* EFSYS_OPT_PHY_PROPS */
+#if EFSYS_OPT_PHY_BIST
+ int (*epo_bist_start)(efx_nic_t *, efx_phy_bist_type_t);
+ int (*epo_bist_poll)(efx_nic_t *, efx_phy_bist_type_t,
+ efx_phy_bist_result_t *, uint32_t *,
+ unsigned long *, size_t);
+ void (*epo_bist_stop)(efx_nic_t *, efx_phy_bist_type_t);
+#endif /* EFSYS_OPT_PHY_BIST */
+} efx_phy_ops_t;
+
+typedef struct efx_port_s {
+ efx_mac_type_t ep_mac_type;
+ uint32_t ep_phy_type;
+ uint8_t ep_port;
+ uint32_t ep_mac_pdu;
+ uint8_t ep_mac_addr[6];
+ efx_link_mode_t ep_link_mode;
+ boolean_t ep_unicst;
+ boolean_t ep_brdcst;
+ unsigned int ep_fcntl;
+ boolean_t ep_fcntl_autoneg;
+ efx_oword_t ep_multicst_hash[2];
+#if EFSYS_OPT_LOOPBACK
+ efx_loopback_type_t ep_loopback_type;
+ efx_link_mode_t ep_loopback_link_mode;
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_PHY_FLAGS
+ uint32_t ep_phy_flags;
+#endif /* EFSYS_OPT_PHY_FLAGS */
+#if EFSYS_OPT_PHY_LED_CONTROL
+ efx_phy_led_mode_t ep_phy_led_mode;
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+ efx_phy_media_type_t ep_fixed_port_type;
+ efx_phy_media_type_t ep_module_type;
+ uint32_t ep_adv_cap_mask;
+ uint32_t ep_lp_cap_mask;
+ uint32_t ep_default_adv_cap_mask;
+ uint32_t ep_phy_cap_mask;
+#if EFSYS_OPT_PHY_TXC43128 || EFSYS_OPT_PHY_QT2025C
+ union {
+ struct {
+ unsigned int bug10934_count;
+ } ep_txc43128;
+ struct {
+ unsigned int bug17190_count;
+ } ep_qt2025c;
+ };
+#endif
+ boolean_t ep_mac_poll_needed; /* falcon only */
+ boolean_t ep_mac_up; /* falcon only */
+ uint32_t ep_fwver; /* falcon only */
+ boolean_t ep_mac_drain;
+ boolean_t ep_mac_stats_pending;
+#if EFSYS_OPT_PHY_BIST
+ efx_phy_bist_type_t ep_current_bist;
+#endif
+ efx_mac_ops_t *ep_emop;
+ efx_phy_ops_t *ep_epop;
+} efx_port_t;
+
+typedef struct efx_mon_ops_s {
+ int (*emo_reset)(efx_nic_t *);
+ int (*emo_reconfigure)(efx_nic_t *);
+#if EFSYS_OPT_MON_STATS
+ int (*emo_stats_update)(efx_nic_t *, efsys_mem_t *,
+ efx_mon_stat_value_t *);
+#endif /* EFSYS_OPT_MON_STATS */
+} efx_mon_ops_t;
+
+typedef struct efx_mon_s {
+ efx_mon_type_t em_type;
+ efx_mon_ops_t *em_emop;
+} efx_mon_t;
+
+typedef struct efx_intr_s {
+ efx_intr_type_t ei_type;
+ efsys_mem_t *ei_esmp;
+ unsigned int ei_level;
+} efx_intr_t;
+
+typedef struct efx_nic_ops_s {
+ int (*eno_probe)(efx_nic_t *);
+ int (*eno_reset)(efx_nic_t *);
+ int (*eno_init)(efx_nic_t *);
+#if EFSYS_OPT_DIAG
+ int (*eno_sram_test)(efx_nic_t *, efx_sram_pattern_fn_t);
+ int (*eno_register_test)(efx_nic_t *);
+#endif /* EFSYS_OPT_DIAG */
+ void (*eno_fini)(efx_nic_t *);
+ void (*eno_unprobe)(efx_nic_t *);
+} efx_nic_ops_t;
+
+#define EFX_TXQ_LIMIT_TARGET 259
+#define EFX_RXQ_LIMIT_TARGET 768
+
+#if EFSYS_OPT_FILTER
+
+typedef enum efx_filter_type_e {
+ EFX_FILTER_RX_TCP_FULL, /* TCP/IPv4 4-tuple {dIP,dTCP,sIP,sTCP} */
+ EFX_FILTER_RX_TCP_WILD, /* TCP/IPv4 dest {dIP,dTCP, -, -} */
+ EFX_FILTER_RX_UDP_FULL, /* UDP/IPv4 4-tuple {dIP,dUDP,sIP,sUDP} */
+ EFX_FILTER_RX_UDP_WILD, /* UDP/IPv4 dest {dIP,dUDP, -, -} */
+
+#if EFSYS_OPT_SIENA
+ EFX_FILTER_RX_MAC_FULL, /* Ethernet {dMAC,VLAN} */
+ EFX_FILTER_RX_MAC_WILD, /* Ethernet {dMAC, -} */
+
+ EFX_FILTER_TX_TCP_FULL, /* TCP/IPv4 {dIP,dTCP,sIP,sTCP} */
+ EFX_FILTER_TX_TCP_WILD, /* TCP/IPv4 { -, -,sIP,sTCP} */
+ EFX_FILTER_TX_UDP_FULL, /* UDP/IPv4 {dIP,dTCP,sIP,sTCP} */
+ EFX_FILTER_TX_UDP_WILD, /* UDP/IPv4 source (host, port) */
+
+ EFX_FILTER_TX_MAC_FULL, /* Ethernet source (MAC address, VLAN ID) */
+ EFX_FILTER_TX_MAC_WILD, /* Ethernet source (MAC address) */
+#endif /* EFSYS_OPT_SIENA */
+
+ EFX_FILTER_NTYPES
+} efx_filter_type_t;
+
+typedef enum efx_filter_tbl_id_e {
+ EFX_FILTER_TBL_RX_IP = 0,
+ EFX_FILTER_TBL_RX_MAC,
+ EFX_FILTER_TBL_TX_IP,
+ EFX_FILTER_TBL_TX_MAC,
+ EFX_FILTER_NTBLS
+} efx_filter_tbl_id_t;
+
+typedef struct efx_filter_tbl_s {
+ int eft_size; /* number of entries */
+ int eft_used; /* active count */
+ uint32_t *eft_bitmap; /* active bitmap */
+ efx_filter_spec_t *eft_spec; /* array of saved specs */
+} efx_filter_tbl_t;
+
+typedef struct efx_filter_s {
+ efx_filter_tbl_t ef_tbl[EFX_FILTER_NTBLS];
+ unsigned int ef_depth[EFX_FILTER_NTYPES];
+} efx_filter_t;
+
+
+extern __checkReturn int
+efx_filter_insert_filter(
+ __in efx_nic_t *enp,
+ __in efx_filter_spec_t *spec,
+ __in boolean_t replace);
+
+extern __checkReturn int
+efx_filter_remove_filter(
+ __in efx_nic_t *enp,
+ __in efx_filter_spec_t *spec);
+
+extern void
+efx_filter_remove_index(
+ __inout efx_nic_t *enp,
+ __in efx_filter_type_t type,
+ __in int filter_idx);
+
+extern void
+efx_filter_redirect_index(
+ __inout efx_nic_t *enp,
+ __in efx_filter_type_t type,
+ __in int filter_index,
+ __in int rxq_index);
+
+extern __checkReturn int
+efx_filter_clear_tbl(
+ __in efx_nic_t *enp,
+ __in efx_filter_tbl_id_t tbl);
+
+#endif /* EFSYS_OPT_FILTER */
+
+#if EFSYS_OPT_NVRAM
+typedef struct efx_nvram_ops_s {
+#if EFSYS_OPT_DIAG
+ int (*envo_test)(efx_nic_t *);
+#endif /* EFSYS_OPT_DIAG */
+ int (*envo_size)(efx_nic_t *, efx_nvram_type_t, size_t *);
+ int (*envo_get_version)(efx_nic_t *, efx_nvram_type_t,
+ uint32_t *, uint16_t *);
+ int (*envo_rw_start)(efx_nic_t *, efx_nvram_type_t, size_t *);
+ int (*envo_read_chunk)(efx_nic_t *, efx_nvram_type_t,
+ unsigned int, caddr_t, size_t);
+ int (*envo_erase)(efx_nic_t *, efx_nvram_type_t);
+ int (*envo_write_chunk)(efx_nic_t *, efx_nvram_type_t,
+ unsigned int, caddr_t, size_t);
+ void (*envo_rw_finish)(efx_nic_t *, efx_nvram_type_t);
+ int (*envo_set_version)(efx_nic_t *, efx_nvram_type_t, uint16_t *);
+
+} efx_nvram_ops_t;
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_VPD
+typedef struct efx_vpd_ops_s {
+ int (*evpdo_init)(efx_nic_t *);
+ int (*evpdo_size)(efx_nic_t *, size_t *);
+ int (*evpdo_read)(efx_nic_t *, caddr_t, size_t);
+ int (*evpdo_verify)(efx_nic_t *, caddr_t, size_t);
+ int (*evpdo_reinit)(efx_nic_t *, caddr_t, size_t);
+ int (*evpdo_get)(efx_nic_t *, caddr_t, size_t, efx_vpd_value_t *);
+ int (*evpdo_set)(efx_nic_t *, caddr_t, size_t, efx_vpd_value_t *);
+ int (*evpdo_next)(efx_nic_t *, caddr_t, size_t, efx_vpd_value_t *,
+ unsigned int *);
+ int (*evpdo_write)(efx_nic_t *, caddr_t, size_t);
+ void (*evpdo_fini)(efx_nic_t *);
+} efx_vpd_ops_t;
+#endif /* EFSYS_OPT_VPD */
+
+struct efx_nic_s {
+ uint32_t en_magic;
+ efx_family_t en_family;
+ uint32_t en_features;
+ efsys_identifier_t *en_esip;
+ efsys_lock_t *en_eslp;
+ efsys_bar_t *en_esbp;
+ unsigned int en_mod_flags;
+ unsigned int en_reset_flags;
+ efx_nic_cfg_t en_nic_cfg;
+ efx_port_t en_port;
+ efx_mon_t en_mon;
+ efx_intr_t en_intr;
+ uint32_t en_ev_qcount;
+ uint32_t en_rx_qcount;
+ uint32_t en_tx_qcount;
+ efx_nic_ops_t *en_enop;
+#if EFSYS_OPT_FILTER
+ efx_filter_t en_filter;
+#endif /* EFSYS_OPT_FILTER */
+#if EFSYS_OPT_NVRAM
+ efx_nvram_type_t en_nvram_locked;
+ efx_nvram_ops_t *en_envop;
+#endif /* EFSYS_OPT_NVRAM */
+#if EFSYS_OPT_VPD
+ efx_vpd_ops_t *en_evpdop;
+#endif /* EFSYS_OPT_VPD */
+ union {
+#if EFSYS_OPT_FALCON
+ struct {
+ falcon_spi_dev_t enu_fsd[FALCON_SPI_NTYPES];
+ falcon_i2c_t enu_fip;
+ boolean_t enu_i2c_locked;
+#if EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE
+ const uint8_t *enu_forced_cfg;
+#endif /* EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE */
+ uint8_t enu_mon_devid;
+#if EFSYS_OPT_PCIE_TUNE
+ unsigned int enu_nlanes;
+#endif /* EFSYS_OPT_PCIE_TUNE */
+ uint16_t enu_board_rev;
+ boolean_t enu_internal_sram;
+ uint8_t enu_sram_num_bank;
+ uint8_t enu_sram_bank_size;
+ } falcon;
+#endif /* EFSYS_OPT_FALCON */
+#if EFSYS_OPT_SIENA
+ struct {
+#if EFSYS_OPT_MCDI
+ efx_mcdi_iface_t enu_mip;
+#endif /* EFSYS_OPT_MCDI */
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+ unsigned int enu_partn_mask;
+#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */
+#if EFSYS_OPT_VPD
+ caddr_t enu_svpd;
+ size_t enu_svpd_length;
+#endif /* EFSYS_OPT_VPD */
+ } siena;
+#endif /* EFSYS_OPT_SIENA */
+ } en_u;
+};
+
+
+#define EFX_NIC_MAGIC 0x02121996
+
+typedef boolean_t (*efx_ev_handler_t)(efx_evq_t *, efx_qword_t *,
+ const efx_ev_callbacks_t *, void *);
+
+struct efx_evq_s {
+ uint32_t ee_magic;
+ efx_nic_t *ee_enp;
+ unsigned int ee_index;
+ unsigned int ee_mask;
+ efsys_mem_t *ee_esmp;
+#if EFSYS_OPT_QSTATS
+ uint32_t ee_stat[EV_NQSTATS];
+#endif /* EFSYS_OPT_QSTATS */
+ efx_ev_handler_t ee_handler[1 << FSF_AZ_EV_CODE_WIDTH];
+};
+
+#define EFX_EVQ_MAGIC 0x08081997
+
+#define EFX_EV_TIMER_QUANTUM 5
+
+struct efx_rxq_s {
+ uint32_t er_magic;
+ efx_nic_t *er_enp;
+ unsigned int er_index;
+ unsigned int er_mask;
+ efsys_mem_t *er_esmp;
+};
+
+#define EFX_RXQ_MAGIC 0x15022005
+
+struct efx_txq_s {
+ uint32_t et_magic;
+ efx_nic_t *et_enp;
+ unsigned int et_index;
+ unsigned int et_mask;
+ efsys_mem_t *et_esmp;
+#if EFSYS_OPT_QSTATS
+ uint32_t et_stat[TX_NQSTATS];
+#endif /* EFSYS_OPT_QSTATS */
+};
+
+#define EFX_TXQ_MAGIC 0x05092005
+
+#define EFX_MAC_ADDR_COPY(_dst, _src) \
+ do { \
+ (_dst)[0] = (_src)[0]; \
+ (_dst)[1] = (_src)[1]; \
+ (_dst)[2] = (_src)[2]; \
+ (_dst)[3] = (_src)[3]; \
+ (_dst)[4] = (_src)[4]; \
+ (_dst)[5] = (_src)[5]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#if EFSYS_OPT_CHECK_REG
+#define EFX_CHECK_REG(_enp, _reg) \
+ do { \
+ const char __cs *name = #_reg; \
+ char min = name[4]; \
+ char max = name[5]; \
+ char rev; \
+ \
+ switch ((_enp)->en_family) { \
+ case EFX_FAMILY_FALCON: \
+ rev = 'B'; \
+ break; \
+ \
+ case EFX_FAMILY_SIENA: \
+ rev = 'C'; \
+ break; \
+ \
+ default: \
+ rev = '?'; \
+ break; \
+ } \
+ \
+ EFSYS_ASSERT3S(rev, >=, min); \
+ EFSYS_ASSERT3S(rev, <=, max); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_CHECK_REG(_enp, _reg) do { \
+ _NOTE(CONSTANTCONDITION) \
+ } while(B_FALSE)
+#endif
+
+#define EFX_BAR_READD(_enp, _reg, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READD((_enp)->en_esbp, _reg ## _OFST, \
+ (_edp), (_lock)); \
+ EFSYS_PROBE3(efx_bar_readd, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_WRITED(_enp, _reg, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE3(efx_bar_writed, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, _reg ## _OFST, \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_READQ(_enp, _reg, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READQ((_enp)->en_esbp, _reg ## _OFST, \
+ (_eqp)); \
+ EFSYS_PROBE4(efx_bar_readq, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_WRITEQ(_enp, _reg, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_writeq, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ EFSYS_BAR_WRITEQ((_enp)->en_esbp, _reg ## _OFST, \
+ (_eqp)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_READO(_enp, _reg, _eop) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READO((_enp)->en_esbp, _reg ## _OFST, \
+ (_eop), B_TRUE); \
+ EFSYS_PROBE6(efx_bar_reado, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_WRITEO(_enp, _reg, _eop) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE6(efx_bar_writeo, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ EFSYS_BAR_WRITEO((_enp)->en_esbp, _reg ## _OFST, \
+ (_eop), B_TRUE); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_READD(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READD((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ EFSYS_PROBE4(efx_bar_tbl_readd, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITED(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITED3(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ (_reg ## _OFST + \
+ (3 * sizeof (efx_dword_t)) + \
+ ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_READQ(_enp, _reg, _index, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READQ((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eqp)); \
+ EFSYS_PROBE5(efx_bar_tbl_readq, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITEQ(_enp, _reg, _index, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE5(efx_bar_tbl_writeq, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ EFSYS_BAR_WRITEQ((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eqp)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_READO(_enp, _reg, _index, _eop) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READO((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eop), B_TRUE); \
+ EFSYS_PROBE7(efx_bar_tbl_reado, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITEO(_enp, _reg, _index, _eop) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE7(efx_bar_tbl_writeo, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ EFSYS_BAR_WRITEO((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eop), B_TRUE); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+extern __checkReturn int
+efx_mac_select(
+ __in efx_nic_t *enp);
+
+extern __checkReturn int
+efx_phy_probe(
+ __in efx_nic_t *enp);
+
+extern void
+efx_phy_unprobe(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_VPD
+
+/* VPD utility functions */
+
+extern __checkReturn int
+efx_vpd_hunk_length(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out size_t *lengthp);
+
+extern __checkReturn int
+efx_vpd_hunk_verify(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out_opt boolean_t *cksummedp);
+
+extern __checkReturn int
+efx_vpd_hunk_reinit(
+ __in caddr_t data,
+ __in size_t size,
+ __in boolean_t wantpid);
+
+extern __checkReturn int
+efx_vpd_hunk_get(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_tag_t tag,
+ __in efx_vpd_keyword_t keyword,
+ __out unsigned int *payloadp,
+ __out uint8_t *paylenp);
+
+extern __checkReturn int
+efx_vpd_hunk_next(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_tag_t *tagp,
+ __out efx_vpd_keyword_t *keyword,
+ __out_bcount_opt(*paylenp) unsigned int *payloadp,
+ __out_opt uint8_t *paylenp,
+ __inout unsigned int *contp);
+
+extern __checkReturn int
+efx_vpd_hunk_set(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+#endif /* EFSYS_OPT_VPD */
+
+#if EFSYS_OPT_DIAG
+
+extern efx_sram_pattern_fn_t __cs __efx_sram_pattern_fns[];
+
+typedef struct efx_register_set_s {
+ unsigned int address;
+ unsigned int step;
+ unsigned int rows;
+ efx_oword_t mask;
+} efx_register_set_t;
+
+extern __checkReturn int
+efx_nic_test_registers(
+ __in efx_nic_t *enp,
+ __in efx_register_set_t *rsp,
+ __in size_t count);
+
+extern __checkReturn int
+efx_nic_test_tables(
+ __in efx_nic_t *enp,
+ __in efx_register_set_t *rsp,
+ __in efx_pattern_type_t pattern,
+ __in size_t count);
+
+#endif /* EFSYS_OPT_DIAG */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_IMPL_H */
diff --git a/sys/dev/sfxge/common/efx_intr.c b/sys/dev/sfxge/common/efx_intr.c
new file mode 100644
index 0000000..77780b1
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_intr.c
@@ -0,0 +1,354 @@
+/*-
+ * Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "efsys.h"
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_impl.h"
+
+ __checkReturn int
+efx_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_oword_t oword;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enp->en_mod_flags & EFX_MOD_INTR) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ enp->en_mod_flags |= EFX_MOD_INTR;
+
+ eip->ei_type = type;
+ eip->ei_esmp = esmp;
+
+ /*
+ * bug17213 workaround.
+ *
+ * Under legacy interrupts, don't share a level between fatal
+ * interrupts and event queue interrupts. Under MSI-X, they
+ * must share, or we won't get an interrupt.
+ */
+ if (enp->en_family == EFX_FAMILY_SIENA &&
+ eip->ei_type == EFX_INTR_LINE)
+ eip->ei_level = 0x1f;
+ else
+ eip->ei_level = 0;
+
+ /* Enable all the genuinely fatal interrupts */
+ EFX_SET_OWORD(oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_ILL_ADR_INT_KER_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RBUF_OWN_INT_KER_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TBUF_OWN_INT_KER_EN, 0);
+ if (enp->en_family >= EFX_FAMILY_SIENA)
+ EFX_SET_OWORD_FIELD(oword, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_FATAL_INTR_REG_KER, &oword);
+
+ /* Set up the interrupt address register */
+ EFX_POPULATE_OWORD_3(oword,
+ FRF_AZ_NORM_INT_VEC_DIS_KER, (type == EFX_INTR_MESSAGE) ? 1 : 0,
+ FRF_AZ_INT_ADR_KER_DW0, EFSYS_MEM_ADDR(esmp) & 0xffffffff,
+ FRF_AZ_INT_ADR_KER_DW1, EFSYS_MEM_ADDR(esmp) >> 32);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ void
+efx_intr_enable(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_oword_t oword;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, eip->ei_level);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+}
+
+ void
+efx_intr_disable(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ EFSYS_SPIN(10);
+}
+
+ void
+efx_intr_disable_unlocked(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ EFSYS_BAR_READO(enp->en_esbp, FR_AZ_INT_EN_REG_KER_OFST,
+ &oword, B_FALSE);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 0);
+ EFSYS_BAR_WRITEO(enp->en_esbp, FR_AZ_INT_EN_REG_KER_OFST,
+ &oword, B_FALSE);
+}
+
+ __checkReturn int
+efx_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_oword_t oword;
+ unsigned int count;
+ uint32_t sel;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ /* bug16757: No event queues can be initialized */
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+
+ switch (enp->en_family) {
+ case EFX_FAMILY_FALCON:
+ if (level > EFX_NINTR_FALCON) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+
+ case EFX_FAMILY_SIENA:
+ if (level > EFX_NINTR_SIENA) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ break;
+ }
+
+ if (level > EFX_MASK32(FRF_AZ_KER_INT_LEVE_SEL))
+ return (ENOTSUP); /* avoid EFSYS_PROBE() */
+
+ sel = level;
+
+ /* Trigger a test interrupt */
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, sel);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_KER, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ /*
+ * Wait up to 100ms for the interrupt to be raised before restoring
+ * KER_INT_LEVE_SEL. Ignore a failure to raise (the caller will
+ * observe this soon enough anyway), but always reset KER_INT_LEVE_SEL
+ */
+ count = 0;
+ do {
+ EFSYS_SPIN(100); /* 100us */
+
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+ } while (EFX_OWORD_FIELD(oword, FRF_AZ_KER_INT_KER) && ++count < 1000);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, eip->ei_level);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static __checkReturn boolean_t
+efx_intr_check_fatal(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efsys_mem_t *esmp = eip->ei_esmp;
+ efx_oword_t oword;
+
+ /* Read the syndrome */
+ EFSYS_MEM_READO(esmp, 0, &oword);
+
+ if (EFX_OWORD_FIELD(oword, FSF_AZ_NET_IVEC_FATAL_INT) != 0) {
+ EFSYS_PROBE(fatal);
+
+ /* Clear the fatal interrupt condition */
+ EFX_SET_OWORD_FIELD(oword, FSF_AZ_NET_IVEC_FATAL_INT, 0);
+ EFSYS_MEM_WRITEO(esmp, 0, &oword);
+
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+ void
+efx_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_dword_t dword;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ /*
+ * Read the queue mask and implicitly acknowledge the
+ * interrupt.
+ */
+ EFX_BAR_READD(enp, FR_BZ_INT_ISR0_REG, &dword, B_FALSE);
+ *qmaskp = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+
+ EFSYS_PROBE1(qmask, uint32_t, *qmaskp);
+
+ if (*qmaskp & (1U << eip->ei_level))
+ *fatalp = efx_intr_check_fatal(enp);
+ else
+ *fatalp = B_FALSE;
+}
+
+ void
+efx_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ if (message == eip->ei_level)
+ *fatalp = efx_intr_check_fatal(enp);
+ else
+ *fatalp = B_FALSE;
+}
+
+ void
+efx_intr_fatal(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_DECODE_INTR_FATAL
+ efx_oword_t fatal;
+ efx_oword_t mem_per;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ EFX_BAR_READO(enp, FR_AZ_FATAL_INTR_REG_KER, &fatal);
+ EFX_ZERO_OWORD(mem_per);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRM_PERR_INT_KER) != 0 ||
+ EFX_OWORD_FIELD(fatal, FRF_AZ_MEM_PERR_INT_KER) != 0)
+ EFX_BAR_READO(enp, FR_AZ_MEM_STAT_REG, &mem_per);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRAM_OOB_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_SRAM_OOB, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_BUFID_DC_OOB_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_BUFID_DC_OOB, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_MEM_PERR_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_MEM_PERR,
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_0),
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_1));
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_RBUF_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_RBUF_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_TBUF_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_TBUF_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_RDESCQ_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_RDESQ_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_TDESCQ_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_TDESQ_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_EVQ_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_EVQ_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_EVF_OFLO_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_EVFF_OFLO, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_ILL_ADR_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_ILL_ADDR, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRM_PERR_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_SRAM_PERR,
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_0),
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_1));
+#else
+ EFSYS_ASSERT(0);
+#endif
+}
+
+ void
+efx_intr_fini(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ /* Clear the interrupt address register */
+ EFX_ZERO_OWORD(oword);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword);
+
+ enp->en_mod_flags &= ~EFX_MOD_INTR;
+}
diff --git a/sys/dev/sfxge/common/efx_mac.c b/sys/dev/sfxge/common/efx_mac.c
new file mode 100644
index 0000000..6852584
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_mac.c
@@ -0,0 +1,684 @@
+/*-
+ * Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "efsys.h"
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MAC_FALCON_GMAC
+#include "falcon_gmac.h"
+#endif
+
+#if EFSYS_OPT_MAC_FALCON_XMAC
+#include "falcon_xmac.h"
+#endif
+
+#if EFSYS_OPT_MAC_FALCON_GMAC
+static efx_mac_ops_t __cs __efx_falcon_gmac_ops = {
+ falcon_gmac_reset, /* emo_reset */
+ falcon_mac_poll, /* emo_poll */
+ falcon_mac_up, /* emo_up */
+ falcon_gmac_reconfigure, /* emo_reconfigure */
+#if EFSYS_OPT_LOOPBACK
+ falcon_mac_loopback_set, /* emo_loopback_set */
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_MAC_STATS
+ falcon_mac_stats_upload, /* emo_stats_upload */
+ NULL, /* emo_stats_periodic */
+ falcon_gmac_stats_update /* emo_stats_update */
+#endif /* EFSYS_OPT_MAC_STATS */
+};
+#endif /* EFSYS_OPT_MAC_FALCON_GMAC */
+
+#if EFSYS_OPT_MAC_FALCON_XMAC
+static efx_mac_ops_t __cs __efx_falcon_xmac_ops = {
+ falcon_xmac_reset, /* emo_reset */
+ falcon_mac_poll, /* emo_poll */
+ falcon_mac_up, /* emo_up */
+ falcon_xmac_reconfigure, /* emo_reconfigure */
+#if EFSYS_OPT_LOOPBACK
+ falcon_mac_loopback_set, /* emo_loopback_set */
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_MAC_STATS
+ falcon_mac_stats_upload, /* emo_stats_upload */
+ NULL, /* emo_stats_periodic */
+ falcon_xmac_stats_update /* emo_stats_update */
+#endif /* EFSYS_OPT_MAC_STATS */
+};
+#endif /* EFSYS_OPT_MAC_FALCON_XMAC */
+
+#if EFSYS_OPT_SIENA
+static efx_mac_ops_t __cs __efx_siena_mac_ops = {
+ NULL, /* emo_reset */
+ siena_mac_poll, /* emo_poll */
+ siena_mac_up, /* emo_up */
+ siena_mac_reconfigure, /* emo_reconfigure */
+#if EFSYS_OPT_LOOPBACK
+ siena_mac_loopback_set, /* emo_loopback_set */
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_MAC_STATS
+ siena_mac_stats_upload, /* emo_stats_upload */
+ siena_mac_stats_periodic, /* emo_stats_periodic */
+ siena_mac_stats_update /* emo_stats_update */
+#endif /* EFSYS_OPT_MAC_STATS */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+static efx_mac_ops_t __cs * __cs __efx_mac_ops[] = {
+ NULL,
+#if EFSYS_OPT_MAC_FALCON_GMAC
+ &__efx_falcon_gmac_ops,
+#else
+ NULL,
+#endif /* EFSYS_OPT_MAC_FALCON_GMAC */
+#if EFSYS_OPT_MAC_FALCON_XMAC
+ &__efx_falcon_xmac_ops,
+#else
+ NULL,
+#endif /* EFSYS_OPT_MAC_FALCON_XMAC */
+#if EFSYS_OPT_SIENA
+ &__efx_siena_mac_ops,
+#else
+ NULL,
+#endif /* EFSYS_OPT_SIENA */
+};
+
+ __checkReturn int
+efx_mac_pdu_set(
+ __in efx_nic_t *enp,
+ __in size_t pdu)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mac_ops_t *emop = epp->ep_emop;
+ uint32_t old_pdu;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ if (pdu < EFX_MAC_PDU_MIN) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (pdu > EFX_MAC_PDU_MAX) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ old_pdu = epp->ep_mac_pdu;
+ epp->ep_mac_pdu = (uint32_t)pdu;
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ epp->ep_mac_pdu = old_pdu;
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_mac_addr_set(
+ __in efx_nic_t *enp,
+ __in uint8_t *addr)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mac_ops_t *emop = epp->ep_emop;
+ uint8_t old_addr[6];
+ uint32_t oui;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (addr[0] & 0x01) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ oui = addr[0] << 16 | addr[1] << 8 | addr[2];
+ if (oui == 0x000000) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ EFX_MAC_ADDR_COPY(old_addr, epp->ep_mac_addr);
+ EFX_MAC_ADDR_COPY(epp->ep_mac_addr, addr);
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ EFX_MAC_ADDR_COPY(epp->ep_mac_addr, old_addr);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_mac_filter_set(
+ __in efx_nic_t *enp,
+ __in boolean_t unicst,
+ __in boolean_t brdcst)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mac_ops_t *emop = epp->ep_emop;
+ boolean_t old_unicst;
+ boolean_t old_brdcst;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ old_unicst = unicst;
+ old_brdcst = brdcst;
+
+ epp->ep_unicst = unicst;
+ epp->ep_brdcst = brdcst;
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ epp->ep_unicst = old_unicst;
+ epp->ep_brdcst = old_brdcst;
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_mac_drain(
+ __in efx_nic_t *enp,
+ __in boolean_t enabled)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mac_ops_t *emop = epp->ep_emop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ if (epp->ep_mac_drain == enabled)
+ return (0);
+
+ epp->ep_mac_drain = enabled;
+
+ if (enabled && emop->emo_reset != NULL) {
+ if ((rc = emop->emo_reset(enp)) != 0)
+ goto fail1;
+
+ EFSYS_ASSERT(enp->en_reset_flags & EFX_RESET_MAC);
+ enp->en_reset_flags &= ~EFX_RESET_PHY;
+ }
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mac_ops_t *emop = epp->ep_emop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if ((rc = emop->emo_up(enp, mac_upp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_mac_fcntl_set(
+ __in efx_nic_t *enp,
+ __in unsigned int fcntl,
+ __in boolean_t autoneg)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mac_ops_t *emop = epp->ep_emop;
+ efx_phy_ops_t *epop = epp->ep_epop;
+ unsigned int old_fcntl;
+ boolean_t old_autoneg;
+ unsigned int old_adv_cap;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if ((fcntl & ~(EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE)) != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /*
+ * Ignore a request to set flow control autonegotiation
+ * if the PHY doesn't support it.
+ */
+ if (~epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN))
+ autoneg = B_FALSE;
+
+ old_fcntl = epp->ep_fcntl;
+ old_autoneg = autoneg;
+ old_adv_cap = epp->ep_adv_cap_mask;
+
+ epp->ep_fcntl = fcntl;
+ epp->ep_fcntl_autoneg = autoneg;
+
+ /*
+ * If the PHY supports autonegotiation, then encode the flow control
+ * settings in the advertised capabilities, and restart AN. Otherwise,
+ * just push the new settings directly to the MAC.
+ */
+ if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN)) {
+ if (fcntl & EFX_FCNTL_RESPOND)
+ epp->ep_adv_cap_mask |= (1 << EFX_PHY_CAP_PAUSE |
+ 1 << EFX_PHY_CAP_ASYM);
+ else
+ epp->ep_adv_cap_mask &= ~(1 << EFX_PHY_CAP_PAUSE |
+ 1 << EFX_PHY_CAP_ASYM);
+
+ if (fcntl & EFX_FCNTL_GENERATE)
+ epp->ep_adv_cap_mask ^= (1 << EFX_PHY_CAP_ASYM);
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail2;
+
+ } else {
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ epp->ep_fcntl = old_fcntl;
+ epp->ep_fcntl_autoneg = old_autoneg;
+ epp->ep_adv_cap_mask = old_adv_cap;
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ void
+efx_mac_fcntl_get(
+ __in efx_nic_t *enp,
+ __out unsigned int *fcntl_wantedp,
+ __out unsigned int *fcntl_linkp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ unsigned int wanted;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ /*
+ * If the PHY supports auto negotiation, then the requested flow
+ * control settings are encoded in the advertised capabilities.
+ */
+ if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN)) {
+ wanted = 0;
+
+ if (epp->ep_adv_cap_mask & (1 << EFX_PHY_CAP_PAUSE))
+ wanted = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ if (epp->ep_adv_cap_mask & (1 << EFX_PHY_CAP_ASYM))
+ wanted ^= EFX_FCNTL_GENERATE;
+ } else
+ wanted = epp->ep_fcntl;
+
+ *fcntl_linkp = epp->ep_fcntl;
+ *fcntl_wantedp = wanted;
+}
+
+ __checkReturn int
+efx_mac_hash_set(
+ __in efx_nic_t *enp,
+ __in_ecount(EFX_MAC_HASH_BITS) unsigned int const *bucket)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mac_ops_t *emop = epp->ep_emop;
+ efx_oword_t old_hash[2];
+ unsigned int index;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ memcpy(old_hash, epp->ep_multicst_hash, sizeof (old_hash));
+
+ /* Set the lower 128 bits of the hash */
+ EFX_ZERO_OWORD(epp->ep_multicst_hash[0]);
+ for (index = 0; index < 128; index++) {
+ if (bucket[index] != 0)
+ EFX_SET_OWORD_BIT(epp->ep_multicst_hash[0], index);
+ }
+
+ /* Set the upper 128 bits of the hash */
+ EFX_ZERO_OWORD(epp->ep_multicst_hash[1]);
+ for (index = 0; index < 128; index++) {
+ if (bucket[index + 128] != 0)
+ EFX_SET_OWORD_BIT(epp->ep_multicst_hash[1], index);
+ }
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ memcpy(epp->ep_multicst_hash, old_hash, sizeof (old_hash));
+
+ return (rc);
+}
+
+#if EFSYS_OPT_MAC_STATS
+
+#if EFSYS_OPT_NAMES
+
+/* START MKCONFIG GENERATED EfxMacStatNamesBlock adf707adba80813e */
+static const char __cs * __cs __efx_mac_stat_name[] = {
+ "rx_octets",
+ "rx_pkts",
+ "rx_unicst_pkts",
+ "rx_multicst_pkts",
+ "rx_brdcst_pkts",
+ "rx_pause_pkts",
+ "rx_le_64_pkts",
+ "rx_65_to_127_pkts",
+ "rx_128_to_255_pkts",
+ "rx_256_to_511_pkts",
+ "rx_512_to_1023_pkts",
+ "rx_1024_to_15xx_pkts",
+ "rx_ge_15xx_pkts",
+ "rx_errors",
+ "rx_fcs_errors",
+ "rx_drop_events",
+ "rx_false_carrier_errors",
+ "rx_symbol_errors",
+ "rx_align_errors",
+ "rx_internal_errors",
+ "rx_jabber_pkts",
+ "rx_lane0_char_err",
+ "rx_lane1_char_err",
+ "rx_lane2_char_err",
+ "rx_lane3_char_err",
+ "rx_lane0_disp_err",
+ "rx_lane1_disp_err",
+ "rx_lane2_disp_err",
+ "rx_lane3_disp_err",
+ "rx_match_fault",
+ "rx_nodesc_drop_cnt",
+ "tx_octets",
+ "tx_pkts",
+ "tx_unicst_pkts",
+ "tx_multicst_pkts",
+ "tx_brdcst_pkts",
+ "tx_pause_pkts",
+ "tx_le_64_pkts",
+ "tx_65_to_127_pkts",
+ "tx_128_to_255_pkts",
+ "tx_256_to_511_pkts",
+ "tx_512_to_1023_pkts",
+ "tx_1024_to_15xx_pkts",
+ "tx_ge_15xx_pkts",
+ "tx_errors",
+ "tx_sgl_col_pkts",
+ "tx_mult_col_pkts",
+ "tx_ex_col_pkts",
+ "tx_late_col_pkts",
+ "tx_def_pkts",
+ "tx_ex_def_pkts",
+};
+/* END MKCONFIG GENERATED EfxMacStatNamesBlock */
+
+ __checkReturn const char __cs *
+efx_mac_stat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(id, <, EFX_MAC_NSTATS);
+ return (__efx_mac_stat_name[id]);
+}
+
+#endif /* EFSYS_OPT_STAT_NAME */
+
+ __checkReturn int
+efx_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mac_ops_t *emop = epp->ep_emop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ /*
+ * Don't assert !ep_mac_stats_pending, because the client might
+ * have failed to finalise statistics when previously stopping
+ * the port.
+ */
+ if ((rc = emop->emo_stats_upload(enp, esmp)) != 0)
+ goto fail1;
+
+ epp->ep_mac_stats_pending = B_TRUE;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mac_ops_t *emop = epp->ep_emop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT(emop != NULL);
+
+ if (emop->emo_stats_periodic == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = emop->emo_stats_periodic(enp, esmp, period_ms, events)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn int
+efx_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *essp,
+ __in uint32_t *generationp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mac_ops_t *emop = epp->ep_emop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ rc = emop->emo_stats_update(enp, esmp, essp, generationp);
+ if (rc == 0)
+ epp->ep_mac_stats_pending = B_FALSE;
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+ __checkReturn int
+efx_mac_select(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mac_type_t type = EFX_MAC_INVALID;
+ efx_mac_ops_t *emop;
+ int rc = EINVAL;
+
+#if EFSYS_OPT_SIENA
+ if (enp->en_family == EFX_FAMILY_SIENA) {
+ type = EFX_MAC_SIENA;
+ goto chosen;
+ }
+#endif
+
+#if EFSYS_OPT_FALCON
+ switch (epp->ep_link_mode) {
+#if EFSYS_OPT_MAC_FALCON_GMAC
+ case EFX_LINK_100HDX:
+ case EFX_LINK_100FDX:
+ case EFX_LINK_1000HDX:
+ case EFX_LINK_1000FDX:
+ type = EFX_MAC_FALCON_GMAC;
+ goto chosen;
+#endif /* EFSYS_OPT_FALCON_GMAC */
+
+#if EFSYS_OPT_MAC_FALCON_XMAC
+ case EFX_LINK_10000FDX:
+ type = EFX_MAC_FALCON_XMAC;
+ goto chosen;
+#endif /* EFSYS_OPT_FALCON_XMAC */
+
+ default:
+#if EFSYS_OPT_MAC_FALCON_GMAC && EFSYS_OPT_MAC_FALCON_XMAC
+ /* Only initialise a MAC supported by the PHY */
+ if (epp->ep_phy_cap_mask &
+ ((1 << EFX_PHY_CAP_1000FDX) |
+ (1 << EFX_PHY_CAP_1000HDX) |
+ (1 << EFX_PHY_CAP_100FDX) |
+ (1 << EFX_PHY_CAP_100HDX) |
+ (1 << EFX_PHY_CAP_10FDX) |
+ (1 << EFX_PHY_CAP_10FDX)))
+ type = EFX_MAC_FALCON_GMAC;
+ else
+ type = EFX_MAC_FALCON_XMAC;
+#elif EFSYS_OPT_MAC_FALCON_GMAC
+ type = EFX_MAC_FALCON_GMAC;
+#else
+ type = EFX_MAC_FALCON_XMAC;
+#endif
+ goto chosen;
+ }
+#endif /* EFSYS_OPT_FALCON */
+
+chosen:
+ EFSYS_ASSERT(type != EFX_MAC_INVALID);
+ EFSYS_ASSERT3U(type, <, EFX_MAC_NTYPES);
+ emop = epp->ep_emop = (efx_mac_ops_t *)__efx_mac_ops[type];
+ EFSYS_ASSERT(emop != NULL);
+
+ epp->ep_mac_type = type;
+
+ if (emop->emo_reset != NULL) {
+ if ((rc = emop->emo_reset(enp)) != 0)
+ goto fail1;
+
+ EFSYS_ASSERT(enp->en_reset_flags & EFX_RESET_MAC);
+ enp->en_reset_flags &= ~EFX_RESET_MAC;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
diff --git a/sys/dev/sfxge/common/efx_mcdi.c b/sys/dev/sfxge/common/efx_mcdi.c
new file mode 100644
index 0000000..7d01e9b
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_mcdi.c
@@ -0,0 +1,733 @@
+/*-
+ * Copyright 2008-2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "efsys.h"
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_regs_mcdi.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MCDI
+
+/* Shared memory layout */
+
+#define MCDI_P1_DBL_OFST 0x0
+#define MCDI_P2_DBL_OFST 0x1
+#define MCDI_P1_PDU_OFST 0x2
+#define MCDI_P2_PDU_OFST 0x42
+#define MCDI_P1_REBOOT_OFST 0x1fe
+#define MCDI_P2_REBOOT_OFST 0x1ff
+
+/* A reboot/assertion causes the MCDI status word to be set after the
+ * command word is set or a REBOOT event is sent. If we notice a reboot
+ * via these mechanisms then wait 10ms for the status word to be set.
+ */
+#define MCDI_STATUS_SLEEP_US 10000
+
+ void
+efx_mcdi_request_start(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __in boolean_t ev_cpl)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
+ efx_dword_t dword;
+ unsigned int seq;
+ unsigned int xflags;
+ unsigned int pdur;
+ unsigned int dbr;
+ unsigned int pos;
+ int state;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ switch (emip->emi_port) {
+ case 1:
+ pdur = MCDI_P1_PDU_OFST;
+ dbr = MCDI_P1_DBL_OFST;
+ break;
+ case 2:
+ pdur = MCDI_P2_PDU_OFST;
+ dbr = MCDI_P2_DBL_OFST;
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ pdur = dbr = 0;
+ };
+
+ /*
+ * efx_mcdi_request_start() is naturally serialised against both
+ * efx_mcdi_request_poll() and efx_mcdi_ev_cpl()/efx_mcdi_ev_death(),
+ * by virtue of there only being one oustanding MCDI request.
+ * Unfortunately, upper layers may also call efx_mcdi_request_abort()
+ * at any time, to timeout a pending mcdi request, That request may
+ * then subsequently complete, meaning efx_mcdi_ev_cpl() or
+ * efx_mcdi_ev_death() may end up running in parallel with
+ * efx_mcdi_request_start(). This race is handled by ensuring that
+ * %emi_pending_req, %emi_ev_cpl and %emi_seq are protected by the
+ * en_eslp lock.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ EFSYS_ASSERT(emip->emi_pending_req == NULL);
+ emip->emi_pending_req = emrp;
+ emip->emi_ev_cpl = ev_cpl;
+ emip->emi_poll_cnt = 0;
+ seq = emip->emi_seq++ & 0xf;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ xflags = 0;
+ if (ev_cpl)
+ xflags |= MCDI_HEADER_XFLAGS_EVREQ;
+
+ /* Construct the header in shared memory */
+ EFX_POPULATE_DWORD_6(dword,
+ MCDI_HEADER_CODE, emrp->emr_cmd,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_DATALEN, emrp->emr_in_length,
+ MCDI_HEADER_SEQ, seq,
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_XFLAGS, xflags);
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, pdur, &dword, B_TRUE);
+
+ for (pos = 0; pos < emrp->emr_in_length; pos += sizeof (efx_dword_t)) {
+ memcpy(&dword, MCDI_IN(*emrp, efx_dword_t, pos),
+ MIN(sizeof (dword), emrp->emr_in_length - pos));
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM,
+ pdur + 1 + (pos >> 2), &dword, B_FALSE);
+ }
+
+ /* Ring the doorbell */
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xd004be11);
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, dbr, &dword, B_FALSE);
+}
+
+static void
+efx_mcdi_request_copyout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
+ unsigned int pos;
+ unsigned int pdur;
+ efx_dword_t data;
+
+ pdur = (emip->emi_port == 1) ? MCDI_P1_PDU_OFST : MCDI_P2_PDU_OFST;
+
+ /* Copy payload out if caller supplied buffer */
+ if (emrp->emr_out_buf != NULL) {
+ size_t bytes = MIN(emrp->emr_out_length_used,
+ emrp->emr_out_length);
+ for (pos = 0; pos < bytes; pos += sizeof (efx_dword_t)) {
+ EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM,
+ pdur + 1 + (pos >> 2), &data, B_FALSE);
+ memcpy(MCDI_OUT(*emrp, efx_dword_t, pos), &data,
+ MIN(sizeof (data), bytes - pos));
+ }
+ }
+}
+
+static int
+efx_mcdi_request_errcode(
+ __in unsigned int err)
+{
+
+ switch (err) {
+ case MC_CMD_ERR_ENOENT:
+ return (ENOENT);
+ case MC_CMD_ERR_EINTR:
+ return (EINTR);
+ case MC_CMD_ERR_EACCES:
+ return (EACCES);
+ case MC_CMD_ERR_EBUSY:
+ return (EBUSY);
+ case MC_CMD_ERR_EINVAL:
+ return (EINVAL);
+ case MC_CMD_ERR_EDEADLK:
+ return (EDEADLK);
+ case MC_CMD_ERR_ENOSYS:
+ return (ENOTSUP);
+ case MC_CMD_ERR_ETIME:
+ return (ETIMEDOUT);
+#ifdef WITH_MCDI_V2
+ case MC_CMD_ERR_EAGAIN:
+ return (EAGAIN);
+ case MC_CMD_ERR_ENOSPC:
+ return (ENOSPC);
+#endif
+ default:
+ EFSYS_PROBE1(mc_pcol_error, int, err);
+ return (EIO);
+ }
+}
+
+static void
+efx_mcdi_raise_exception(
+ __in efx_nic_t *enp,
+ __in_opt efx_mcdi_req_t *emrp,
+ __in int rc)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
+ const efx_mcdi_transport_t *emtp = emip->emi_mtp;
+ efx_mcdi_exception_t exception;
+
+ /* Reboot or Assertion failure only */
+ EFSYS_ASSERT(rc == EIO || rc == EINTR);
+
+ /*
+ * If MC_CMD_REBOOT causes a reboot (dependent on parameters),
+ * then the EIO is not worthy of an exception.
+ */
+ if (emrp != NULL && emrp->emr_cmd == MC_CMD_REBOOT && rc == EIO)
+ return;
+
+ exception = (rc == EIO)
+ ? EFX_MCDI_EXCEPTION_MC_REBOOT
+ : EFX_MCDI_EXCEPTION_MC_BADASSERT;
+
+ emtp->emt_exception(emtp->emt_context, exception);
+}
+
+static int
+efx_mcdi_poll_reboot(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
+ unsigned int rebootr;
+ efx_dword_t dword;
+ uint32_t value;
+
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ rebootr = ((emip->emi_port == 1)
+ ? MCDI_P1_REBOOT_OFST
+ : MCDI_P2_REBOOT_OFST);
+
+ EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, rebootr, &dword, B_FALSE);
+ value = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+
+ if (value == 0)
+ return (0);
+
+ EFX_ZERO_DWORD(dword);
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, rebootr, &dword, B_FALSE);
+
+ if (value == MC_STATUS_DWORD_ASSERT)
+ return (EINTR);
+ else
+ return (EIO);
+}
+
+ __checkReturn boolean_t
+efx_mcdi_request_poll(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
+ efx_mcdi_req_t *emrp;
+ efx_dword_t dword;
+ unsigned int pdur;
+ unsigned int seq;
+ unsigned int length;
+ int state;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /* Serialise against post-watchdog efx_mcdi_ev* */
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ EFSYS_ASSERT(emip->emi_pending_req != NULL);
+ EFSYS_ASSERT(!emip->emi_ev_cpl);
+ emrp = emip->emi_pending_req;
+
+ /* Check for reboot atomically w.r.t efx_mcdi_request_start */
+ if (emip->emi_poll_cnt++ == 0) {
+ if ((rc = efx_mcdi_poll_reboot(enp)) != 0) {
+ emip->emi_pending_req = NULL;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ goto fail1;
+ }
+ }
+
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ pdur = (emip->emi_port == 1) ? MCDI_P1_PDU_OFST : MCDI_P2_PDU_OFST;
+
+ /* Read the command header */
+ EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, pdur, &dword, B_FALSE);
+ if (EFX_DWORD_FIELD(dword, MCDI_HEADER_RESPONSE) == 0) {
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return (B_FALSE);
+ }
+
+ /* Request complete */
+ emip->emi_pending_req = NULL;
+ seq = (emip->emi_seq - 1) & 0xf;
+
+ /* Check for synchronous reboot */
+ if (EFX_DWORD_FIELD(dword, MCDI_HEADER_ERROR) != 0 &&
+ EFX_DWORD_FIELD(dword, MCDI_HEADER_DATALEN) == 0) {
+ /* Consume status word */
+ EFSYS_SPIN(MCDI_STATUS_SLEEP_US);
+ efx_mcdi_poll_reboot(enp);
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ rc = EIO;
+ goto fail2;
+ }
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ /* Check that the returned data is consistent */
+ if (EFX_DWORD_FIELD(dword, MCDI_HEADER_CODE) != emrp->emr_cmd ||
+ EFX_DWORD_FIELD(dword, MCDI_HEADER_SEQ) != seq) {
+ /* Response is for a different request */
+ rc = EIO;
+ goto fail3;
+ }
+
+ length = EFX_DWORD_FIELD(dword, MCDI_HEADER_DATALEN);
+ if (EFX_DWORD_FIELD(dword, MCDI_HEADER_ERROR)) {
+ efx_dword_t errdword;
+ int errcode;
+
+ EFSYS_ASSERT3U(length, ==, 4);
+ EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM,
+ pdur + 1 + (MC_CMD_ERR_CODE_OFST >> 2),
+ &errdword, B_FALSE);
+ errcode = EFX_DWORD_FIELD(errdword, EFX_DWORD_0);
+ rc = efx_mcdi_request_errcode(errcode);
+ EFSYS_PROBE2(mcdi_err, int, emrp->emr_cmd, int, errcode);
+ goto fail4;
+
+ } else {
+ emrp->emr_out_length_used = length;
+ emrp->emr_rc = 0;
+ efx_mcdi_request_copyout(enp, emrp);
+ }
+
+ goto out;
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ /* Fill out error state */
+ emrp->emr_rc = rc;
+ emrp->emr_out_length_used = 0;
+
+ /* Reboot/Assertion */
+ if (rc == EIO || rc == EINTR)
+ efx_mcdi_raise_exception(enp, emrp, rc);
+
+out:
+ return (B_TRUE);
+}
+
+ void
+efx_mcdi_execute(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
+ const efx_mcdi_transport_t *emtp = emip->emi_mtp;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ emtp->emt_execute(emtp->emt_context, emrp);
+}
+
+ void
+efx_mcdi_ev_cpl(
+ __in efx_nic_t *enp,
+ __in unsigned int seq,
+ __in unsigned int outlen,
+ __in int errcode)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
+ const efx_mcdi_transport_t *emtp = emip->emi_mtp;
+ efx_mcdi_req_t *emrp;
+ int state;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /*
+ * Serialise against efx_mcdi_request_poll()/efx_mcdi_request_start()
+ * when we're completing an aborted request.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ if (emip->emi_pending_req == NULL || !emip->emi_ev_cpl ||
+ (seq != ((emip->emi_seq - 1) & 0xf))) {
+ EFSYS_ASSERT(emip->emi_aborted > 0);
+ if (emip->emi_aborted > 0)
+ --emip->emi_aborted;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return;
+ }
+
+ emrp = emip->emi_pending_req;
+ emip->emi_pending_req = NULL;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ /*
+ * Fill out the remaining hdr fields, and copyout the payload
+ * if the user supplied an output buffer.
+ */
+ if (errcode != 0) {
+ EFSYS_PROBE2(mcdi_err, int, emrp->emr_cmd,
+ int, errcode);
+ emrp->emr_out_length_used = 0;
+ emrp->emr_rc = efx_mcdi_request_errcode(errcode);
+ } else {
+ emrp->emr_out_length_used = outlen;
+ emrp->emr_rc = 0;
+ efx_mcdi_request_copyout(enp, emrp);
+ }
+
+ emtp->emt_ev_cpl(emtp->emt_context);
+}
+
+ void
+efx_mcdi_ev_death(
+ __in efx_nic_t *enp,
+ __in int rc)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
+ const efx_mcdi_transport_t *emtp = emip->emi_mtp;
+ efx_mcdi_req_t *emrp = NULL;
+ boolean_t ev_cpl;
+ int state;
+
+ /*
+ * The MCDI request (if there is one) has been terminated, either
+ * by a BADASSERT or REBOOT event.
+ *
+ * If there is an oustanding event-completed MCDI operation, then we
+ * will never receive the completion event (because both MCDI
+ * completions and BADASSERT events are sent to the same evq). So
+ * complete this MCDI op.
+ *
+ * This function might run in parallel with efx_mcdi_request_poll()
+ * for poll completed mcdi requests, and also with
+ * efx_mcdi_request_start() for post-watchdog completions.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ emrp = emip->emi_pending_req;
+ ev_cpl = emip->emi_ev_cpl;
+ if (emrp != NULL && emip->emi_ev_cpl) {
+ emip->emi_pending_req = NULL;
+
+ emrp->emr_out_length_used = 0;
+ emrp->emr_rc = rc;
+ ++emip->emi_aborted;
+ }
+
+ /* Since we're running in parallel with a request, consume the
+ * status word before dropping the lock.
+ */
+ if (rc == EIO || rc == EINTR) {
+ EFSYS_SPIN(MCDI_STATUS_SLEEP_US);
+ (void) efx_mcdi_poll_reboot(enp);
+ }
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ efx_mcdi_raise_exception(enp, emrp, rc);
+
+ if (emrp != NULL && ev_cpl)
+ emtp->emt_ev_cpl(emtp->emt_context);
+}
+
+ __checkReturn int
+efx_mcdi_version(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(4) uint16_t versionp[4],
+ __out_opt uint32_t *buildp,
+ __out_opt efx_mcdi_boot_t *statusp)
+{
+ uint8_t outbuf[MAX(MC_CMD_GET_VERSION_OUT_LEN,
+ MC_CMD_GET_BOOT_STATUS_OUT_LEN)];
+ efx_mcdi_req_t req;
+ efx_word_t *ver_words;
+ uint16_t version[4];
+ uint32_t build;
+ efx_mcdi_boot_t status;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ EFX_STATIC_ASSERT(MC_CMD_GET_VERSION_IN_LEN == 0);
+ req.emr_cmd = MC_CMD_GET_VERSION;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = outbuf;
+ req.emr_out_length = MC_CMD_GET_VERSION_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* bootrom support */
+ if (req.emr_out_length_used == MC_CMD_GET_VERSION_V0_OUT_LEN) {
+ version[0] = version[1] = version[2] = version[3] = 0;
+ build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE);
+
+ goto version;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_VERSION_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ ver_words = MCDI_OUT2(req, efx_word_t, GET_VERSION_OUT_VERSION);
+ version[0] = EFX_WORD_FIELD(ver_words[0], EFX_WORD_0);
+ version[1] = EFX_WORD_FIELD(ver_words[1], EFX_WORD_0);
+ version[2] = EFX_WORD_FIELD(ver_words[2], EFX_WORD_0);
+ version[3] = EFX_WORD_FIELD(ver_words[3], EFX_WORD_0);
+ build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE);
+
+version:
+ /* The bootrom doesn't understand BOOT_STATUS */
+ if (build == MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM) {
+ status = EFX_MCDI_BOOT_ROM;
+ goto out;
+ }
+
+ req.emr_cmd = MC_CMD_GET_BOOT_STATUS;
+ EFX_STATIC_ASSERT(MC_CMD_GET_BOOT_STATUS_IN_LEN == 0);
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = outbuf;
+ req.emr_out_length = MC_CMD_GET_BOOT_STATUS_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_BOOT_STATUS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail4;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, GET_BOOT_STATUS_OUT_FLAGS,
+ GET_BOOT_STATUS_OUT_FLAGS_PRIMARY))
+ status = EFX_MCDI_BOOT_PRIMARY;
+ else
+ status = EFX_MCDI_BOOT_SECONDARY;
+
+out:
+ if (versionp != NULL)
+ memcpy(versionp, version, sizeof (version));
+ if (buildp != NULL)
+ *buildp = build;
+ if (statusp != NULL)
+ *statusp = status;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
+ efx_oword_t oword;
+ unsigned int portnum;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0);
+ enp->en_mod_flags |= EFX_MOD_MCDI;
+
+ if (enp->en_family == EFX_FAMILY_FALCON)
+ return (0);
+
+ emip->emi_mtp = mtp;
+
+ /* Determine the port number to use for MCDI */
+ EFX_BAR_READO(enp, FR_AZ_CS_DEBUG_REG, &oword);
+ portnum = EFX_OWORD_FIELD(oword, FRF_CZ_CS_PORT_NUM);
+
+ if (portnum == 0) {
+ /* Presumably booted from ROM; only MCDI port 1 will work */
+ emip->emi_port = 1;
+ } else if (portnum <= 2) {
+ emip->emi_port = portnum;
+ } else {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /*
+ * Wipe the atomic reboot status so subsequent MCDI requests succeed.
+ * BOOT_STATUS is preserved so eno_nic_probe() can boot out of the
+ * assertion handler.
+ */
+ (void) efx_mcdi_poll_reboot(enp);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ enp->en_mod_flags &= ~EFX_MOD_MCDI;
+
+ return (rc);
+}
+
+
+ __checkReturn int
+efx_mcdi_reboot(
+ __in efx_nic_t *enp)
+{
+ uint8_t payload[MC_CMD_REBOOT_IN_LEN];
+ efx_mcdi_req_t req;
+ int rc;
+
+ /*
+ * We could require the caller to have caused en_mod_flags=0 to
+ * call this function. This doesn't help the other port though,
+ * who's about to get the MC ripped out from underneath them.
+ * Since they have to cope with the subsequent fallout of MCDI
+ * failures, we should as well.
+ */
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ req.emr_cmd = MC_CMD_REBOOT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_REBOOT_IN_LEN;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, REBOOT_IN_FLAGS, 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ /* Invert EIO */
+ if (req.emr_rc != EIO) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn boolean_t
+efx_mcdi_request_abort(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
+ efx_mcdi_req_t *emrp;
+ boolean_t aborted;
+ int state;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /*
+ * efx_mcdi_ev_* may have already completed this event, and be
+ * spinning/blocked on the upper layer lock. So it *is* legitimate
+ * to for emi_pending_req to be NULL. If there is a pending event
+ * completed request, then provide a "credit" to allow
+ * efx_mcdi_ev_cpl() to accept a single spurious completion.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ emrp = emip->emi_pending_req;
+ aborted = (emrp != NULL);
+ if (aborted) {
+ emip->emi_pending_req = NULL;
+
+ /* Error the request */
+ emrp->emr_out_length_used = 0;
+ emrp->emr_rc = ETIMEDOUT;
+
+ /* Provide a credit for seqno/emr_pending_req mismatches */
+ if (emip->emi_ev_cpl)
+ ++emip->emi_aborted;
+
+ /*
+ * The upper layer has called us, so we don't
+ * need to complete the request.
+ */
+ }
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (aborted);
+}
+
+ void
+efx_mcdi_fini(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, ==, EFX_MOD_MCDI);
+ enp->en_mod_flags &= ~EFX_MOD_MCDI;
+
+ if (~(enp->en_features) & EFX_FEATURE_MCDI)
+ return;
+
+ emip->emi_mtp = NULL;
+ emip->emi_port = 0;
+ emip->emi_aborted = 0;
+}
+
+#endif /* EFSYS_OPT_MCDI */
diff --git a/sys/dev/sfxge/common/efx_mcdi.h b/sys/dev/sfxge/common/efx_mcdi.h
new file mode 100644
index 0000000..ef0ba90
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_mcdi.h
@@ -0,0 +1,238 @@
+/*-
+ * Copyright 2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _SYS_EFX_MCDI_H
+#define _SYS_EFX_MCDI_H
+
+#include "efx.h"
+#include "efx_regs.h"
+#include "efx_regs_mcdi.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Number of retries attempted for init code */
+#define EFX_MCDI_REQ_RETRY_INIT 2
+
+struct efx_mcdi_req_s {
+ /* Inputs: Command #, input buffer and length */
+ unsigned int emr_cmd;
+ uint8_t *emr_in_buf;
+ size_t emr_in_length;
+ /* Outputs: retcode, buffer, length, and length used*/
+ int emr_rc;
+ uint8_t *emr_out_buf;
+ size_t emr_out_length;
+ size_t emr_out_length_used;
+};
+
+typedef struct efx_mcdi_iface_s {
+ const efx_mcdi_transport_t *emi_mtp;
+ unsigned int emi_port;
+ unsigned int emi_seq;
+ efx_mcdi_req_t *emi_pending_req;
+ boolean_t emi_ev_cpl;
+ int emi_aborted;
+ uint32_t emi_poll_cnt;
+} efx_mcdi_iface_t;
+
+extern void
+efx_mcdi_execute(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp);
+
+extern void
+efx_mcdi_ev_cpl(
+ __in efx_nic_t *enp,
+ __in unsigned int seq,
+ __in unsigned int outlen,
+ __in int errcode);
+
+extern void
+efx_mcdi_ev_death(
+ __in efx_nic_t *enp,
+ __in int rc);
+
+typedef enum efx_mcdi_boot_e {
+ EFX_MCDI_BOOT_PRIMARY,
+ EFX_MCDI_BOOT_SECONDARY,
+ EFX_MCDI_BOOT_ROM,
+} efx_mcdi_boot_t;
+
+extern __checkReturn int
+efx_mcdi_version(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(4) uint16_t versionp[4],
+ __out_opt uint32_t *buildp,
+ __out_opt efx_mcdi_boot_t *statusp);
+
+#define MCDI_IN(_emr, _type, _ofst) \
+ ((_type *)((_emr).emr_in_buf + (_ofst)))
+
+#define MCDI_IN2(_emr, _type, _ofst) \
+ MCDI_IN(_emr, _type, MC_CMD_ ## _ofst ## _OFST)
+
+#define MCDI_IN_SET_BYTE(_emr, _ofst, _value) \
+ EFX_POPULATE_BYTE_1(*MCDI_IN2(_emr, efx_byte_t, _ofst), \
+ EFX_BYTE_0, _value)
+
+#define MCDI_IN_SET_DWORD(_emr, _ofst, _value) \
+ EFX_POPULATE_DWORD_1(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ EFX_DWORD_0, _value)
+
+#define MCDI_IN_POPULATE_DWORD_1(_emr, _ofst, _field1, _value1) \
+ EFX_POPULATE_DWORD_1(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1)
+
+#define MCDI_IN_POPULATE_DWORD_2(_emr, _ofst, _field1, _value1, \
+ _field2, _value2) \
+ EFX_POPULATE_DWORD_2(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2)
+
+#define MCDI_IN_POPULATE_DWORD_3(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_DWORD_3(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3)
+
+#define MCDI_IN_POPULATE_DWORD_4(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4) \
+ EFX_POPULATE_DWORD_4(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4)
+
+#define MCDI_IN_POPULATE_DWORD_5(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5) \
+ EFX_POPULATE_DWORD_5(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5)
+
+#define MCDI_IN_POPULATE_DWORD_6(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_DWORD_6(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6)
+
+#define MCDI_IN_POPULATE_DWORD_7(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7) \
+ EFX_POPULATE_DWORD_7(MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7)
+
+#define MCDI_IN_POPULATE_DWORD_8(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7, \
+ _field8, _value8) \
+ EFX_POPULATE_DWORD_8(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7, \
+ MC_CMD_ ## _field8, _value8)
+
+#define MCDI_IN_POPULATE_DWORD_9(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7, \
+ _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_DWORD_9(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7, \
+ MC_CMD_ ## _field8, _value8, \
+ MC_CMD_ ## _field9, _value9)
+
+#define MCDI_IN_POPULATE_DWORD_10(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7, \
+ _field8, _value8, _field9, _value9, _field10, _value10) \
+ EFX_POPULATE_DWORD_10(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7, \
+ MC_CMD_ ## _field8, _value8, \
+ MC_CMD_ ## _field9, _value9, \
+ MC_CMD_ ## _field10, _value10)
+
+#define MCDI_OUT(_emr, _type, _ofst) \
+ ((_type *)((_emr).emr_out_buf + (_ofst)))
+
+#define MCDI_OUT2(_emr, _type, _ofst) \
+ MCDI_OUT(_emr, _type, MC_CMD_ ## _ofst ## _OFST)
+
+#define MCDI_OUT_BYTE(_emr, _ofst) \
+ EFX_BYTE_FIELD(*MCDI_OUT2(_emr, efx_byte_t, _ofst), \
+ EFX_BYTE_0)
+
+#define MCDI_OUT_WORD(_emr, _ofst) \
+ EFX_WORD_FIELD(*MCDI_OUT2(_emr, efx_word_t, _ofst), \
+ EFX_WORD_0)
+
+#define MCDI_OUT_DWORD(_emr, _ofst) \
+ EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \
+ EFX_DWORD_0)
+
+#define MCDI_OUT_DWORD_FIELD(_emr, _ofst, _field) \
+ EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field)
+
+#define MCDI_EV_FIELD(_eqp, _field) \
+ EFX_QWORD_FIELD(*eqp, MCDI_EVENT_ ## _field)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_MCDI_H */
diff --git a/sys/dev/sfxge/common/efx_mon.c b/sys/dev/sfxge/common/efx_mon.c
new file mode 100644
index 0000000..0d3221a
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_mon.c
@@ -0,0 +1,269 @@
+/*-
+ * Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "efsys.h"
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MON_NULL
+#include "nullmon.h"
+#endif
+
+#if EFSYS_OPT_MON_LM87
+#include "lm87.h"
+#endif
+
+#if EFSYS_OPT_MON_MAX6647
+#include "max6647.h"
+#endif
+
+#if EFSYS_OPT_NAMES
+
+static const char __cs * __cs __efx_mon_name[] = {
+ "",
+ "nullmon",
+ "lm87",
+ "max6647",
+ "sfx90x0"
+};
+
+ const char __cs *
+efx_mon_name(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID);
+ EFSYS_ASSERT3U(encp->enc_mon_type, <, EFX_MON_NTYPES);
+ return (__efx_mon_name[encp->enc_mon_type]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+#if EFSYS_OPT_MON_NULL
+static efx_mon_ops_t __cs __efx_mon_null_ops = {
+ nullmon_reset, /* emo_reset */
+ nullmon_reconfigure, /* emo_reconfigure */
+#if EFSYS_OPT_MON_STATS
+ nullmon_stats_update /* emo_stat_update */
+#endif /* EFSYS_OPT_MON_STATS */
+};
+#endif
+
+#if EFSYS_OPT_MON_LM87
+static efx_mon_ops_t __cs __efx_mon_lm87_ops = {
+ lm87_reset, /* emo_reset */
+ lm87_reconfigure, /* emo_reconfigure */
+#if EFSYS_OPT_MON_STATS
+ lm87_stats_update /* emo_stat_update */
+#endif /* EFSYS_OPT_MON_STATS */
+};
+#endif
+
+#if EFSYS_OPT_MON_MAX6647
+static efx_mon_ops_t __cs __efx_mon_max6647_ops = {
+ max6647_reset, /* emo_reset */
+ max6647_reconfigure, /* emo_reconfigure */
+#if EFSYS_OPT_MON_STATS
+ max6647_stats_update /* emo_stat_update */
+#endif /* EFSYS_OPT_MON_STATS */
+};
+#endif
+
+#if EFSYS_OPT_MON_SIENA
+static efx_mon_ops_t __cs __efx_mon_siena_ops = {
+ siena_mon_reset, /* emo_reset */
+ siena_mon_reconfigure, /* emo_reconfigure */
+#if EFSYS_OPT_MON_STATS
+ siena_mon_stats_update /* emo_stat_update */
+#endif /* EFSYS_OPT_MON_STATS */
+};
+#endif
+
+
+static efx_mon_ops_t __cs * __cs __efx_mon_ops[] = {
+ NULL,
+#if EFSYS_OPT_MON_NULL
+ &__efx_mon_null_ops,
+#else
+ NULL,
+#endif
+#if EFSYS_OPT_MON_LM87
+ &__efx_mon_lm87_ops,
+#else
+ NULL,
+#endif
+#if EFSYS_OPT_MON_MAX6647
+ &__efx_mon_max6647_ops,
+#else
+ NULL,
+#endif
+#if EFSYS_OPT_MON_SIENA
+ &__efx_mon_siena_ops
+#else
+ NULL
+#endif
+};
+
+ __checkReturn int
+efx_mon_init(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mon_t *emp = &(enp->en_mon);
+ efx_mon_ops_t *emop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (enp->en_mod_flags & EFX_MOD_MON) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ enp->en_mod_flags |= EFX_MOD_MON;
+
+ emp->em_type = encp->enc_mon_type;
+
+ EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID);
+ EFSYS_ASSERT3U(emp->em_type, <, EFX_MON_NTYPES);
+ if ((emop = (efx_mon_ops_t *)__efx_mon_ops[emp->em_type]) == NULL) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ if ((rc = emop->emo_reset(enp)) != 0)
+ goto fail3;
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail4;
+
+ emp->em_emop = emop;
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail5);
+
+ (void) emop->emo_reset(enp);
+
+fail3:
+ EFSYS_PROBE(fail4);
+fail2:
+ EFSYS_PROBE(fail3);
+
+ emp->em_type = EFX_MON_INVALID;
+
+ enp->en_mod_flags &= ~EFX_MOD_MON;
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_MON_STATS
+
+#if EFSYS_OPT_NAMES
+
+/* START MKCONFIG GENERATED MonitorStatNamesBlock 08518fd1fb4e2612 */
+static const char __cs * __cs __mon_stat_name[] = {
+ "value_2_5v",
+ "value_vccp1",
+ "value_vcc",
+ "value_5v",
+ "value_12v",
+ "value_vccp2",
+ "value_ext_temp",
+ "value_int_temp",
+ "value_ain1",
+ "value_ain2",
+ "controller_cooling",
+ "ext_cooling",
+ "1v",
+ "1_2v",
+ "1_8v",
+ "3_3v",
+};
+
+/* END MKCONFIG GENERATED MonitorStatNamesBlock */
+
+extern const char __cs *
+efx_mon_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_mon_stat_t id)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(id, <, EFX_MON_NSTATS);
+ return (__mon_stat_name[id]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+ __checkReturn int
+efx_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __out_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values)
+{
+ efx_mon_t *emp = &(enp->en_mon);
+ efx_mon_ops_t *emop = emp->em_emop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON);
+
+ return (emop->emo_stats_update(enp, esmp, values));
+}
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+ void
+efx_mon_fini(
+ __in efx_nic_t *enp)
+{
+ efx_mon_t *emp = &(enp->en_mon);
+ efx_mon_ops_t *emop = emp->em_emop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON);
+
+ emp->em_emop = NULL;
+
+ rc = emop->emo_reset(enp);
+ if (rc != 0)
+ EFSYS_PROBE1(fail1, int, rc);
+
+ emp->em_type = EFX_MON_INVALID;
+
+ enp->en_mod_flags &= ~EFX_MOD_MON;
+}
diff --git a/sys/dev/sfxge/common/efx_nic.c b/sys/dev/sfxge/common/efx_nic.c
new file mode 100644
index 0000000..07ce009
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_nic.c
@@ -0,0 +1,674 @@
+/*-
+ * Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "efsys.h"
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_impl.h"
+
+ __checkReturn int
+efx_family(
+ __in uint16_t venid,
+ __in uint16_t devid,
+ __out efx_family_t *efp)
+{
+#if EFSYS_OPT_FALCON
+ if (venid == EFX_PCI_VENID_SFC && devid == EFX_PCI_DEVID_FALCON) {
+ *efp = EFX_FAMILY_FALCON;
+ return (0);
+ }
+#endif
+#if EFSYS_OPT_SIENA
+ if (venid == EFX_PCI_VENID_SFC && devid == EFX_PCI_DEVID_BETHPAGE) {
+ *efp = EFX_FAMILY_SIENA;
+ return (0);
+ }
+ if (venid == EFX_PCI_VENID_SFC && devid == EFX_PCI_DEVID_SIENA) {
+ *efp = EFX_FAMILY_SIENA;
+ return (0);
+ }
+ if (venid == EFX_PCI_VENID_SFC &&
+ devid == EFX_PCI_DEVID_SIENA_F1_UNINIT) {
+ *efp = EFX_FAMILY_SIENA;
+ return (0);
+ }
+#endif
+ return (ENOTSUP);
+}
+
+/*
+ * To support clients which aren't provided with any PCI context infer
+ * the hardware family by inspecting the hardware. Obviously the caller
+ * must be damn sure they're really talking to a supported device.
+ */
+ __checkReturn int
+efx_infer_family(
+ __in efsys_bar_t *esbp,
+ __out efx_family_t *efp)
+{
+ efx_family_t family;
+ efx_oword_t oword;
+ unsigned int portnum;
+ int rc;
+
+ EFSYS_BAR_READO(esbp, FR_AZ_CS_DEBUG_REG_OFST, &oword, B_TRUE);
+ portnum = EFX_OWORD_FIELD(oword, FRF_CZ_CS_PORT_NUM);
+ switch (portnum) {
+#if EFSYS_OPT_FALCON
+ case 0:
+ family = EFX_FAMILY_FALCON;
+ break;
+#endif
+#if EFSYS_OPT_SIENA
+ case 1:
+ case 2:
+ family = EFX_FAMILY_SIENA;
+ break;
+#endif
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (efp != NULL)
+ *efp = family;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+/*
+ * The built-in default value device id for port 1 of Siena is 0x0810.
+ * manftest needs to be able to cope with that.
+ */
+
+#define EFX_BIU_MAGIC0 0x01234567
+#define EFX_BIU_MAGIC1 0xfedcba98
+
+static __checkReturn int
+efx_nic_biu_test(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+ int rc;
+
+ /*
+ * Write magic values to scratch registers 0 and 1, then
+ * verify that the values were written correctly. Interleave
+ * the accesses to ensure that the BIU is not just reading
+ * back the cached value that was last written.
+ */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC0);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC1);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC0) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC1) {
+ rc = EIO;
+ goto fail2;
+ }
+
+ /*
+ * Perform the same test, with the values swapped. This
+ * ensures that subsequent tests don't start with the correct
+ * values already written into the scratch registers.
+ */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC1);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC0);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC1) {
+ rc = EIO;
+ goto fail3;
+ }
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC0) {
+ rc = EIO;
+ goto fail4;
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_FALCON
+
+static efx_nic_ops_t __cs __efx_nic_falcon_ops = {
+ falcon_nic_probe, /* eno_probe */
+ falcon_nic_reset, /* eno_reset */
+ falcon_nic_init, /* eno_init */
+#if EFSYS_OPT_DIAG
+ falcon_sram_test, /* eno_sram_test */
+ falcon_nic_register_test, /* eno_register_test */
+#endif /* EFSYS_OPT_DIAG */
+ falcon_nic_fini, /* eno_fini */
+ falcon_nic_unprobe, /* eno_unprobe */
+};
+
+#endif /* EFSYS_OPT_FALCON */
+
+#if EFSYS_OPT_SIENA
+
+static efx_nic_ops_t __cs __efx_nic_siena_ops = {
+ siena_nic_probe, /* eno_probe */
+ siena_nic_reset, /* eno_reset */
+ siena_nic_init, /* eno_init */
+#if EFSYS_OPT_DIAG
+ siena_sram_test, /* eno_sram_test */
+ siena_nic_register_test, /* eno_register_test */
+#endif /* EFSYS_OPT_DIAG */
+ siena_nic_fini, /* eno_fini */
+ siena_nic_unprobe, /* eno_unprobe */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+ __checkReturn int
+efx_nic_create(
+ __in efx_family_t family,
+ __in efsys_identifier_t *esip,
+ __in efsys_bar_t *esbp,
+ __in efsys_lock_t *eslp,
+ __deref_out efx_nic_t **enpp)
+{
+ efx_nic_t *enp;
+ int rc;
+
+ EFSYS_ASSERT3U(family, >, EFX_FAMILY_INVALID);
+ EFSYS_ASSERT3U(family, <, EFX_FAMILY_NTYPES);
+
+ /* Allocate a NIC object */
+ EFSYS_KMEM_ALLOC(esip, sizeof (efx_nic_t), enp);
+
+ if (enp == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ enp->en_magic = EFX_NIC_MAGIC;
+
+ switch (family) {
+#if EFSYS_OPT_FALCON
+ case EFX_FAMILY_FALCON:
+ enp->en_enop = (efx_nic_ops_t *)&__efx_nic_falcon_ops;
+ enp->en_features = 0;
+ break;
+#endif /* EFSYS_OPT_FALCON */
+
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ enp->en_enop = (efx_nic_ops_t *)&__efx_nic_siena_ops;
+ enp->en_features = EFX_FEATURE_IPV6 |
+ EFX_FEATURE_LFSR_HASH_INSERT |
+ EFX_FEATURE_LINK_EVENTS | EFX_FEATURE_PERIODIC_MAC_STATS |
+ EFX_FEATURE_WOL | EFX_FEATURE_MCDI |
+ EFX_FEATURE_LOOKAHEAD_SPLIT | EFX_FEATURE_MAC_HEADER_FILTERS;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ enp->en_family = family;
+ enp->en_esip = esip;
+ enp->en_esbp = esbp;
+ enp->en_eslp = eslp;
+
+ *enpp = enp;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail3);
+
+ enp->en_magic = 0;
+
+ /* Free the NIC object */
+ EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_nic_probe(
+ __in efx_nic_t *enp)
+{
+ efx_nic_ops_t *enop;
+ efx_oword_t oword;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+#if EFSYS_OPT_MCDI
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+#endif /* EFSYS_OPT_MCDI */
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_PROBE));
+
+ /* Test BIU */
+ if ((rc = efx_nic_biu_test(enp)) != 0)
+ goto fail1;
+
+ /* Clear the region register */
+ EFX_POPULATE_OWORD_4(oword,
+ FRF_AZ_ADR_REGION0, 0,
+ FRF_AZ_ADR_REGION1, (1 << 16),
+ FRF_AZ_ADR_REGION2, (2 << 16),
+ FRF_AZ_ADR_REGION3, (3 << 16));
+ EFX_BAR_WRITEO(enp, FR_AZ_ADR_REGION_REG, &oword);
+
+ enop = enp->en_enop;
+ if ((rc = enop->eno_probe(enp)) != 0)
+ goto fail2;
+
+ if ((rc = efx_phy_probe(enp)) != 0)
+ goto fail3;
+
+ enp->en_mod_flags |= EFX_MOD_PROBE;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ enop->eno_unprobe(enp);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_PCIE_TUNE
+
+ __checkReturn int
+efx_nic_pcie_tune(
+ __in efx_nic_t *enp,
+ unsigned int nlanes)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
+
+#if EFSYS_OPT_FALCON
+ if (enp->en_family == EFX_FAMILY_FALCON)
+ return (falcon_nic_pcie_tune(enp, nlanes));
+#endif
+ return (ENOTSUP);
+}
+
+ __checkReturn int
+efx_nic_pcie_extended_sync(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
+
+#if EFSYS_OPT_SIENA
+ if (enp->en_family == EFX_FAMILY_SIENA)
+ return (siena_nic_pcie_extended_sync(enp));
+#endif
+
+ return (ENOTSUP);
+}
+
+#endif /* EFSYS_OPT_PCIE_TUNE */
+
+ __checkReturn int
+efx_nic_init(
+ __in efx_nic_t *enp)
+{
+ efx_nic_ops_t *enop = enp->en_enop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (enp->en_mod_flags & EFX_MOD_NIC) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = enop->eno_init(enp)) != 0)
+ goto fail2;
+
+ enp->en_mod_flags |= EFX_MOD_NIC;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ void
+efx_nic_fini(
+ __in efx_nic_t *enp)
+{
+ efx_nic_ops_t *enop = enp->en_enop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE);
+ EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_NIC);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+
+ enop->eno_fini(enp);
+
+ enp->en_mod_flags &= ~EFX_MOD_NIC;
+}
+
+ void
+efx_nic_unprobe(
+ __in efx_nic_t *enp)
+{
+ efx_nic_ops_t *enop = enp->en_enop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+#if EFSYS_OPT_MCDI
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+#endif /* EFSYS_OPT_MCDI */
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+
+ efx_phy_unprobe(enp);
+
+ enop->eno_unprobe(enp);
+
+ enp->en_mod_flags &= ~EFX_MOD_PROBE;
+}
+
+ void
+efx_nic_destroy(
+ __in efx_nic_t *enp)
+{
+ efsys_identifier_t *esip = enp->en_esip;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0);
+
+ enp->en_family = 0;
+ enp->en_esip = NULL;
+ enp->en_esbp = NULL;
+ enp->en_eslp = NULL;
+
+ enp->en_enop = NULL;
+
+ enp->en_magic = 0;
+
+ /* Free the NIC object */
+ EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp);
+}
+
+ __checkReturn int
+efx_nic_reset(
+ __in efx_nic_t *enp)
+{
+ efx_nic_ops_t *enop = enp->en_enop;
+ unsigned int mod_flags;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE);
+ /*
+ * All modules except the MCDI, PROBE, NVRAM, VPD, MON (which we
+ * do not reset here) must have been shut down or never initialized.
+ *
+ * A rule of thumb here is: If the controller or MC reboots, is *any*
+ * state lost. If it's lost and needs reapplying, then the module
+ * *must* not be initialised during the reset.
+ */
+ mod_flags = enp->en_mod_flags;
+ mod_flags &= ~(EFX_MOD_MCDI | EFX_MOD_PROBE | EFX_MOD_NVRAM |
+ EFX_MOD_VPD | EFX_MOD_MON);
+ EFSYS_ASSERT3U(mod_flags, ==, 0);
+ if (mod_flags != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = enop->eno_reset(enp)) != 0)
+ goto fail2;
+
+ enp->en_reset_flags |= EFX_RESET_MAC;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ const efx_nic_cfg_t *
+efx_nic_cfg_get(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ return (&(enp->en_nic_cfg));
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn int
+efx_nic_register_test(
+ __in efx_nic_t *enp)
+{
+ efx_nic_ops_t *enop = enp->en_enop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
+
+ if ((rc = enop->eno_register_test(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_nic_test_registers(
+ __in efx_nic_t *enp,
+ __in efx_register_set_t *rsp,
+ __in size_t count)
+{
+ unsigned int bit;
+ efx_oword_t original;
+ efx_oword_t reg;
+ efx_oword_t buf;
+ int rc;
+
+ while (count > 0) {
+ /* This function is only suitable for registers */
+ EFSYS_ASSERT(rsp->rows == 1);
+
+ /* bit sweep on and off */
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &original,
+ B_TRUE);
+ for (bit = 0; bit < 128; bit++) {
+ /* Is this bit in the mask? */
+ if (~(rsp->mask.eo_u32[bit >> 5]) & (1 << bit))
+ continue;
+
+ /* Test this bit can be set in isolation */
+ reg = original;
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFX_SET_OWORD_BIT(reg, bit);
+
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &reg,
+ B_TRUE);
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf,
+ B_TRUE);
+
+ EFX_AND_OWORD(buf, rsp->mask);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ /* Test this bit can be cleared in isolation */
+ EFX_OR_OWORD(reg, rsp->mask);
+ EFX_CLEAR_OWORD_BIT(reg, bit);
+
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &reg,
+ B_TRUE);
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf,
+ B_TRUE);
+
+ EFX_AND_OWORD(buf, rsp->mask);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail2;
+ }
+ }
+
+ /* Restore the old value */
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original,
+ B_TRUE);
+
+ --count;
+ ++rsp;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ /* Restore the old value */
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original, B_TRUE);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_nic_test_tables(
+ __in efx_nic_t *enp,
+ __in efx_register_set_t *rsp,
+ __in efx_pattern_type_t pattern,
+ __in size_t count)
+{
+ efx_sram_pattern_fn_t func;
+ unsigned int index;
+ unsigned int address;
+ efx_oword_t reg;
+ efx_oword_t buf;
+ int rc;
+
+ EFSYS_ASSERT(pattern < EFX_PATTERN_NTYPES);
+ func = __efx_sram_pattern_fns[pattern];
+
+ while (count > 0) {
+ /* Write */
+ address = rsp->address;
+ for (index = 0; index < rsp->rows; ++index) {
+ func(2 * index + 0, B_FALSE, &reg.eo_qword[0]);
+ func(2 * index + 1, B_FALSE, &reg.eo_qword[1]);
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFSYS_BAR_WRITEO(enp->en_esbp, address, &reg, B_TRUE);
+
+ address += rsp->step;
+ }
+
+ /* Read */
+ address = rsp->address;
+ for (index = 0; index < rsp->rows; ++index) {
+ func(2 * index + 0, B_FALSE, &reg.eo_qword[0]);
+ func(2 * index + 1, B_FALSE, &reg.eo_qword[1]);
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFSYS_BAR_READO(enp->en_esbp, address, &buf, B_TRUE);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ address += rsp->step;
+ }
+
+ ++rsp;
+ --count;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
diff --git a/sys/dev/sfxge/common/efx_nvram.c b/sys/dev/sfxge/common/efx_nvram.c
new file mode 100644
index 0000000..d4cf741
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_nvram.c
@@ -0,0 +1,372 @@
+/*-
+ * Copyright 2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "efsys.h"
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_NVRAM
+
+#if EFSYS_OPT_FALCON
+
+static efx_nvram_ops_t __cs __efx_nvram_falcon_ops = {
+#if EFSYS_OPT_DIAG
+ falcon_nvram_test, /* envo_test */
+#endif /* EFSYS_OPT_DIAG */
+ falcon_nvram_size, /* envo_size */
+ falcon_nvram_get_version, /* envo_get_version */
+ falcon_nvram_rw_start, /* envo_rw_start */
+ falcon_nvram_read_chunk, /* envo_read_chunk */
+ falcon_nvram_erase, /* envo_erase */
+ falcon_nvram_write_chunk, /* envo_write_chunk */
+ falcon_nvram_rw_finish, /* envo_rw_finish */
+ falcon_nvram_set_version, /* envo_set_version */
+};
+
+#endif /* EFSYS_OPT_FALCON */
+
+#if EFSYS_OPT_SIENA
+
+static efx_nvram_ops_t __cs __efx_nvram_siena_ops = {
+#if EFSYS_OPT_DIAG
+ siena_nvram_test, /* envo_test */
+#endif /* EFSYS_OPT_DIAG */
+ siena_nvram_size, /* envo_size */
+ siena_nvram_get_version, /* envo_get_version */
+ siena_nvram_rw_start, /* envo_rw_start */
+ siena_nvram_read_chunk, /* envo_read_chunk */
+ siena_nvram_erase, /* envo_erase */
+ siena_nvram_write_chunk, /* envo_write_chunk */
+ siena_nvram_rw_finish, /* envo_rw_finish */
+ siena_nvram_set_version, /* envo_set_version */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+ __checkReturn int
+efx_nvram_init(
+ __in efx_nic_t *enp)
+{
+ efx_nvram_ops_t *envop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NVRAM));
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_FALCON
+ case EFX_FAMILY_FALCON:
+ envop = (efx_nvram_ops_t *)&__efx_nvram_falcon_ops;
+ break;
+#endif /* EFSYS_OPT_FALCON */
+
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ envop = (efx_nvram_ops_t *)&__efx_nvram_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ enp->en_envop = envop;
+ enp->en_mod_flags |= EFX_MOD_NVRAM;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn int
+efx_nvram_test(
+ __in efx_nic_t *enp)
+{
+ efx_nvram_ops_t *envop = enp->en_envop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ if ((rc = envop->envo_test(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+ __checkReturn int
+efx_nvram_size(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out size_t *sizep)
+{
+ efx_nvram_ops_t *envop = enp->en_envop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+ if ((rc = envop->envo_size(enp, type, sizep)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_nvram_get_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4])
+{
+ efx_nvram_ops_t *envop = enp->en_envop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+ if ((rc = envop->envo_get_version(enp, type, subtypep, version)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_nvram_rw_start(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out_opt size_t *chunk_sizep)
+{
+ efx_nvram_ops_t *envop = enp->en_envop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
+
+ if ((rc = envop->envo_rw_start(enp, type, chunk_sizep)) != 0)
+ goto fail1;
+
+ enp->en_nvram_locked = type;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_nvram_read_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_nvram_ops_t *envop = enp->en_envop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+
+ if ((rc = envop->envo_read_chunk(enp, type, offset, data, size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_nvram_erase(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type)
+{
+ efx_nvram_ops_t *envop = enp->en_envop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+
+ if ((rc = envop->envo_erase(enp, type)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_nvram_write_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_nvram_ops_t *envop = enp->en_envop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+
+ if ((rc = envop->envo_write_chunk(enp, type, offset, data, size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ void
+efx_nvram_rw_finish(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type)
+{
+ efx_nvram_ops_t *envop = enp->en_envop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+
+ envop->envo_rw_finish(enp, type);
+
+ enp->en_nvram_locked = EFX_NVRAM_INVALID;
+}
+
+ __checkReturn int
+efx_nvram_set_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint16_t version[4])
+{
+ efx_nvram_ops_t *envop = enp->en_envop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+ /*
+ * The Siena implementation of envo_set_version() will attempt to
+ * acquire the NVRAM_UPDATE lock for the DYNAMIC_CONFIG sector.
+ * Therefore, you can't have already acquired the NVRAM_UPDATE lock.
+ */
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
+
+ if ((rc = envop->envo_set_version(enp, type, version)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+void
+efx_nvram_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
+
+ enp->en_envop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_NVRAM;
+}
+
+#endif /* EFSYS_OPT_NVRAM */
diff --git a/sys/dev/sfxge/common/efx_phy.c b/sys/dev/sfxge/common/efx_phy.c
new file mode 100644
index 0000000..0b098ec
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_phy.c
@@ -0,0 +1,752 @@
+/*-
+ * Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "efsys.h"
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_FALCON
+#include "falcon_nvram.h"
+#endif
+
+#if EFSYS_OPT_MAC_FALCON_XMAC
+#include "falcon_xmac.h"
+#endif
+
+#if EFSYS_OPT_MAC_FALCON_GMAC
+#include "falcon_gmac.h"
+#endif
+
+#if EFSYS_OPT_PHY_NULL
+#include "nullphy.h"
+#endif
+
+#if EFSYS_OPT_PHY_QT2022C2
+#include "qt2022c2.h"
+#endif
+
+#if EFSYS_OPT_PHY_SFX7101
+#include "sfx7101.h"
+#endif
+
+#if EFSYS_OPT_PHY_TXC43128
+#include "txc43128.h"
+#endif
+
+#if EFSYS_OPT_PHY_SFT9001
+#include "sft9001.h"
+#endif
+
+#if EFSYS_OPT_PHY_QT2025C
+#include "qt2025c.h"
+#endif
+
+#if EFSYS_OPT_PHY_NULL
+static efx_phy_ops_t __cs __efx_phy_null_ops = {
+ NULL, /* epo_power */
+ nullphy_reset, /* epo_reset */
+ nullphy_reconfigure, /* epo_reconfigure */
+ nullphy_verify, /* epo_verify */
+ NULL, /* epo_uplink_check */
+ nullphy_downlink_check, /* epo_downlink_check */
+ nullphy_oui_get, /* epo_oui_get */
+#if EFSYS_OPT_PHY_STATS
+ nullphy_stats_update, /* epo_stats_update */
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_PHY_PROPS
+#if EFSYS_OPT_NAMES
+ nullphy_prop_name, /* epo_prop_name */
+#endif
+ nullphy_prop_get, /* epo_prop_get */
+ nullphy_prop_set, /* epo_prop_set */
+#endif /* EFSYS_OPT_PHY_PROPS */
+#if EFSYS_OPT_PHY_BIST
+ NULL, /* epo_bist_start */
+ NULL, /* epo_bist_poll */
+ NULL, /* epo_bist_stop */
+#endif /* EFSYS_OPT_PHY_BIST */
+};
+#endif /* EFSYS_OPT_PHY_NULL */
+
+#if EFSYS_OPT_PHY_QT2022C2
+static efx_phy_ops_t __cs __efx_phy_qt2022c2_ops = {
+ NULL, /* epo_power */
+ qt2022c2_reset, /* epo_reset */
+ qt2022c2_reconfigure, /* epo_reconfigure */
+ qt2022c2_verify, /* epo_verify */
+ qt2022c2_uplink_check, /* epo_uplink_check */
+ qt2022c2_downlink_check, /* epo_downlink_check */
+ qt2022c2_oui_get, /* epo_oui_get */
+#if EFSYS_OPT_PHY_STATS
+ qt2022c2_stats_update, /* epo_stats_update */
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_PHY_PROPS
+#if EFSYS_OPT_NAMES
+ qt2022c2_prop_name, /* epo_prop_name */
+#endif
+ qt2022c2_prop_get, /* epo_prop_get */
+ qt2022c2_prop_set, /* epo_prop_set */
+#endif /* EFSYS_OPT_PHY_PROPS */
+#if EFSYS_OPT_PHY_BIST
+ NULL, /* epo_bist_start */
+ NULL, /* epo_bist_poll */
+ NULL, /* epo_bist_stop */
+#endif /* EFSYS_OPT_PHY_BIST */
+};
+#endif /* EFSYS_OPT_PHY_QT2022C2 */
+
+#if EFSYS_OPT_PHY_SFX7101
+static efx_phy_ops_t __cs __efx_phy_sfx7101_ops = {
+ sfx7101_power, /* epo_power */
+ sfx7101_reset, /* epo_reset */
+ sfx7101_reconfigure, /* epo_reconfigure */
+ sfx7101_verify, /* epo_verify */
+ sfx7101_uplink_check, /* epo_uplink_check */
+ sfx7101_downlink_check, /* epo_downlink_check */
+ sfx7101_oui_get, /* epo_oui_get */
+#if EFSYS_OPT_PHY_STATS
+ sfx7101_stats_update, /* epo_stats_update */
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_PHY_PROPS
+#if EFSYS_OPT_NAMES
+ sfx7101_prop_name, /* epo_prop_name */
+#endif
+ sfx7101_prop_get, /* epo_prop_get */
+ sfx7101_prop_set, /* epo_prop_set */
+#endif /* EFSYS_OPT_PHY_PROPS */
+#if EFSYS_OPT_PHY_BIST
+ NULL, /* epo_bist_start */
+ NULL, /* epo_bist_poll */
+ NULL, /* epo_bist_stop */
+#endif /* EFSYS_OPT_PHY_BIST */
+};
+#endif /* EFSYS_OPT_PHY_SFX7101 */
+
+#if EFSYS_OPT_PHY_TXC43128
+static efx_phy_ops_t __cs __efx_phy_txc43128_ops = {
+ NULL, /* epo_power */
+ txc43128_reset, /* epo_reset */
+ txc43128_reconfigure, /* epo_reconfigure */
+ txc43128_verify, /* epo_verify */
+ txc43128_uplink_check, /* epo_uplink_check */
+ txc43128_downlink_check, /* epo_downlink_check */
+ txc43128_oui_get, /* epo_oui_get */
+#if EFSYS_OPT_PHY_STATS
+ txc43128_stats_update, /* epo_stats_update */
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_PHY_PROPS
+#if EFSYS_OPT_NAMES
+ txc43128_prop_name, /* epo_prop_name */
+#endif
+ txc43128_prop_get, /* epo_prop_get */
+ txc43128_prop_set, /* epo_prop_set */
+#endif /* EFSYS_OPT_PHY_PROPS */
+#if EFSYS_OPT_PHY_BIST
+ NULL, /* epo_bist_start */
+ NULL, /* epo_bist_poll */
+ NULL, /* epo_bist_stop */
+#endif /* EFSYS_OPT_PHY_BIST */
+};
+#endif /* EFSYS_OPT_PHY_TXC43128 */
+
+#if EFSYS_OPT_PHY_SFT9001
+static efx_phy_ops_t __cs __efx_phy_sft9001_ops = {
+ NULL, /* epo_power */
+ sft9001_reset, /* epo_reset */
+ sft9001_reconfigure, /* epo_reconfigure */
+ sft9001_verify, /* epo_verify */
+ sft9001_uplink_check, /* epo_uplink_check */
+ sft9001_downlink_check, /* epo_downlink_check */
+ sft9001_oui_get, /* epo_oui_get */
+#if EFSYS_OPT_PHY_STATS
+ sft9001_stats_update, /* epo_stats_update */
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_PHY_PROPS
+#if EFSYS_OPT_NAMES
+ sft9001_prop_name, /* epo_prop_name */
+#endif
+ sft9001_prop_get, /* epo_prop_get */
+ sft9001_prop_set, /* epo_prop_set */
+#endif /* EFSYS_OPT_PHY_PROPS */
+#if EFSYS_OPT_PHY_BIST
+ sft9001_bist_start, /* epo_bist_start */
+ sft9001_bist_poll, /* epo_bist_poll */
+ sft9001_bist_stop, /* epo_bist_stop */
+#endif /* EFSYS_OPT_PHY_BIST */
+};
+#endif /* EFSYS_OPT_PHY_SFT9001 */
+
+#if EFSYS_OPT_PHY_QT2025C
+static efx_phy_ops_t __cs __efx_phy_qt2025c_ops = {
+ NULL, /* epo_power */
+ qt2025c_reset, /* epo_reset */
+ qt2025c_reconfigure, /* epo_reconfigure */
+ qt2025c_verify, /* epo_verify */
+ qt2025c_uplink_check, /* epo_uplink_check */
+ qt2025c_downlink_check, /* epo_downlink_check */
+ qt2025c_oui_get, /* epo_oui_get */
+#if EFSYS_OPT_PHY_STATS
+ qt2025c_stats_update, /* epo_stats_update */
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_PHY_PROPS
+#if EFSYS_OPT_NAMES
+ qt2025c_prop_name, /* epo_prop_name */
+#endif
+ qt2025c_prop_get, /* epo_prop_get */
+ qt2025c_prop_set, /* epo_prop_set */
+#endif /* EFSYS_OPT_PHY_PROPS */
+#if EFSYS_OPT_PHY_BIST
+ NULL, /* epo_bist_start */
+ NULL, /* epo_bist_poll */
+ NULL, /* epo_bist_stop */
+#endif /* EFSYS_OPT_PHY_BIST */
+};
+#endif /* EFSYS_OPT_PHY_QT2025C */
+
+#if EFSYS_OPT_SIENA
+static efx_phy_ops_t __cs __efx_phy_siena_ops = {
+ siena_phy_power, /* epo_power */
+ NULL, /* epo_reset */
+ siena_phy_reconfigure, /* epo_reconfigure */
+ siena_phy_verify, /* epo_verify */
+ NULL, /* epo_uplink_check */
+ NULL, /* epo_downlink_check */
+ siena_phy_oui_get, /* epo_oui_get */
+#if EFSYS_OPT_PHY_STATS
+ siena_phy_stats_update, /* epo_stats_update */
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_PHY_PROPS
+#if EFSYS_OPT_NAMES
+ siena_phy_prop_name, /* epo_prop_name */
+#endif
+ siena_phy_prop_get, /* epo_prop_get */
+ siena_phy_prop_set, /* epo_prop_set */
+#endif /* EFSYS_OPT_PHY_PROPS */
+#if EFSYS_OPT_PHY_BIST
+ siena_phy_bist_start, /* epo_bist_start */
+ siena_phy_bist_poll, /* epo_bist_poll */
+ siena_phy_bist_stop, /* epo_bist_stop */
+#endif /* EFSYS_OPT_PHY_BIST */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+ __checkReturn int
+efx_phy_probe(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_phy_ops_t *epop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ epp->ep_port = encp->enc_port;
+ epp->ep_phy_type = encp->enc_phy_type;
+
+ /* Hook in operations structure */
+ switch (enp->en_family) {
+#if EFSYS_OPT_FALCON
+ case EFX_FAMILY_FALCON:
+ switch (epp->ep_phy_type) {
+#if EFSYS_OPT_PHY_NULL
+ case PHY_TYPE_NONE_DECODE:
+ epop = (efx_phy_ops_t *)&__efx_phy_null_ops;
+ break;
+#endif
+#if EFSYS_OPT_PHY_QT2022C2
+ case PHY_TYPE_QT2022C2_DECODE:
+ epop = (efx_phy_ops_t *)&__efx_phy_qt2022c2_ops;
+ break;
+#endif
+#if EFSYS_OPT_PHY_SFX7101
+ case PHY_TYPE_SFX7101_DECODE:
+ epop = (efx_phy_ops_t *)&__efx_phy_sfx7101_ops;
+ break;
+#endif
+#if EFSYS_OPT_PHY_TXC43128
+ case PHY_TYPE_TXC43128_DECODE:
+ epop = (efx_phy_ops_t *)&__efx_phy_txc43128_ops;
+ break;
+#endif
+#if EFSYS_OPT_PHY_SFT9001
+ case PHY_TYPE_SFT9001A_DECODE:
+ case PHY_TYPE_SFT9001B_DECODE:
+ epop = (efx_phy_ops_t *)&__efx_phy_sft9001_ops;
+ break;
+#endif
+#if EFSYS_OPT_PHY_QT2025C
+ case EFX_PHY_QT2025C:
+ epop = (efx_phy_ops_t *)&__efx_phy_qt2025c_ops;
+ break;
+#endif
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ break;
+#endif /* EFSYS_OPT_FALCON */
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ epop = (efx_phy_ops_t *)&__efx_phy_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ epp->ep_epop = epop;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ epp->ep_port = 0;
+ epp->ep_phy_type = 0;
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_phy_verify(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ return (epop->epo_verify(enp));
+}
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+
+ __checkReturn int
+efx_phy_led_set(
+ __in efx_nic_t *enp,
+ __in efx_phy_led_mode_t mode)
+{
+ efx_nic_cfg_t *encp = (&enp->en_nic_cfg);
+ efx_port_t *epp = &(enp->en_port);
+ efx_phy_ops_t *epop = epp->ep_epop;
+ uint32_t mask;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (epp->ep_phy_led_mode == mode)
+ goto done;
+
+ mask = (1 << EFX_PHY_LED_DEFAULT);
+ mask |= encp->enc_led_mask;
+
+ if (!((1 << mode) & mask)) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT3U(mode, <, EFX_PHY_LED_NMODES);
+ epp->ep_phy_led_mode = mode;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail2;
+
+done:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ void
+efx_phy_adv_cap_get(
+ __in efx_nic_t *enp,
+ __in uint32_t flag,
+ __out uint32_t *maskp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ switch (flag) {
+ case EFX_PHY_CAP_CURRENT:
+ *maskp = epp->ep_adv_cap_mask;
+ break;
+ case EFX_PHY_CAP_DEFAULT:
+ *maskp = epp->ep_default_adv_cap_mask;
+ break;
+ case EFX_PHY_CAP_PERM:
+ *maskp = epp->ep_phy_cap_mask;
+ break;
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ break;
+ }
+}
+
+ __checkReturn int
+efx_phy_adv_cap_set(
+ __in efx_nic_t *enp,
+ __in uint32_t mask)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_phy_ops_t *epop = epp->ep_epop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if ((mask & ~epp->ep_phy_cap_mask) != 0) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (epp->ep_adv_cap_mask == mask)
+ goto done;
+
+ epp->ep_adv_cap_mask = mask;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail2;
+
+done:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ void
+efx_phy_lp_cap_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ *maskp = epp->ep_lp_cap_mask;
+}
+
+ __checkReturn int
+efx_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ return (epop->epo_oui_get(enp, ouip));
+}
+
+ void
+efx_phy_media_type_get(
+ __in efx_nic_t *enp,
+ __out efx_phy_media_type_t *typep)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (epp->ep_module_type != EFX_PHY_MEDIA_INVALID)
+ *typep = epp->ep_module_type;
+ else
+ *typep = epp->ep_fixed_port_type;
+}
+
+#if EFSYS_OPT_PHY_STATS
+
+#if EFSYS_OPT_NAMES
+
+/* START MKCONFIG GENERATED PhyStatNamesBlock 271268f3da0e804f */
+static const char __cs * __cs __efx_phy_stat_name[] = {
+ "oui",
+ "pma_pmd_link_up",
+ "pma_pmd_rx_fault",
+ "pma_pmd_tx_fault",
+ "pma_pmd_rev_a",
+ "pma_pmd_rev_b",
+ "pma_pmd_rev_c",
+ "pma_pmd_rev_d",
+ "pcs_link_up",
+ "pcs_rx_fault",
+ "pcs_tx_fault",
+ "pcs_ber",
+ "pcs_block_errors",
+ "phy_xs_link_up",
+ "phy_xs_rx_fault",
+ "phy_xs_tx_fault",
+ "phy_xs_align",
+ "phy_xs_sync_a",
+ "phy_xs_sync_b",
+ "phy_xs_sync_c",
+ "phy_xs_sync_d",
+ "an_link_up",
+ "an_master",
+ "an_local_rx_ok",
+ "an_remote_rx_ok",
+ "cl22ext_link_up",
+ "snr_a",
+ "snr_b",
+ "snr_c",
+ "snr_d",
+ "pma_pmd_signal_a",
+ "pma_pmd_signal_b",
+ "pma_pmd_signal_c",
+ "pma_pmd_signal_d",
+ "an_complete",
+ "pma_pmd_rev_major",
+ "pma_pmd_rev_minor",
+ "pma_pmd_rev_micro",
+ "pcs_fw_version_0",
+ "pcs_fw_version_1",
+ "pcs_fw_version_2",
+ "pcs_fw_version_3",
+ "pcs_fw_build_yy",
+ "pcs_fw_build_mm",
+ "pcs_fw_build_dd",
+ "pcs_op_mode",
+};
+
+/* END MKCONFIG GENERATED PhyStatNamesBlock */
+
+ const char __cs *
+efx_phy_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_phy_stat_t type)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(type, <, EFX_PHY_NSTATS);
+
+ return (__efx_phy_stat_name[type]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+ __checkReturn int
+efx_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __out_ecount(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ return (epop->epo_stats_update(enp, esmp, stat));
+}
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_PHY_PROPS
+
+#if EFSYS_OPT_NAMES
+ const char __cs *
+efx_phy_prop_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ return (epop->epo_prop_name(enp, id));
+}
+#endif /* EFSYS_OPT_NAMES */
+
+ __checkReturn int
+efx_phy_prop_get(
+ __in efx_nic_t *enp,
+ __in unsigned int id,
+ __in uint32_t flags,
+ __out uint32_t *valp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ return (epop->epo_prop_get(enp, id, flags, valp));
+}
+
+ __checkReturn int
+efx_phy_prop_set(
+ __in efx_nic_t *enp,
+ __in unsigned int id,
+ __in uint32_t val)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ return (epop->epo_prop_set(enp, id, val));
+}
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_PHY_BIST
+
+ __checkReturn int
+efx_phy_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_phy_bist_type_t type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_phy_ops_t *epop = epp->ep_epop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT3U(type, !=, EFX_PHY_BIST_TYPE_UNKNOWN);
+ EFSYS_ASSERT3U(type, <, EFX_PHY_BIST_TYPE_NTYPES);
+ EFSYS_ASSERT3U(epp->ep_current_bist, ==, EFX_PHY_BIST_TYPE_UNKNOWN);
+
+ if (epop->epo_bist_start == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = epop->epo_bist_start(enp, type)) != 0)
+ goto fail2;
+
+ epp->ep_current_bist = type;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_phy_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_phy_bist_type_t type,
+ __out efx_phy_bist_result_t *resultp,
+ __out_opt uint32_t *value_maskp,
+ __out_ecount_opt(count) unsigned long *valuesp,
+ __in size_t count)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_phy_ops_t *epop = epp->ep_epop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT3U(type, !=, EFX_PHY_BIST_TYPE_UNKNOWN);
+ EFSYS_ASSERT3U(type, <, EFX_PHY_BIST_TYPE_NTYPES);
+ EFSYS_ASSERT3U(epp->ep_current_bist, ==, type);
+
+ EFSYS_ASSERT(epop->epo_bist_poll != NULL);
+ if (epop->epo_bist_poll == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = epop->epo_bist_poll(enp, type, resultp, value_maskp,
+ valuesp, count)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ void
+efx_phy_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_phy_bist_type_t type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT3U(type, !=, EFX_PHY_BIST_TYPE_UNKNOWN);
+ EFSYS_ASSERT3U(type, <, EFX_PHY_BIST_TYPE_NTYPES);
+ EFSYS_ASSERT3U(epp->ep_current_bist, ==, type);
+
+ EFSYS_ASSERT(epop->epo_bist_stop != NULL);
+
+ if (epop->epo_bist_stop != NULL)
+ epop->epo_bist_stop(enp, type);
+
+ epp->ep_current_bist = EFX_PHY_BIST_TYPE_UNKNOWN;
+}
+
+#endif /* EFSYS_OPT_PHY_BIST */
+ void
+efx_phy_unprobe(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ epp->ep_epop = NULL;
+
+ epp->ep_adv_cap_mask = 0;
+
+ epp->ep_port = 0;
+ epp->ep_phy_type = 0;
+}
diff --git a/sys/dev/sfxge/common/efx_port.c b/sys/dev/sfxge/common/efx_port.c
new file mode 100644
index 0000000..cbb0b3b
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_port.c
@@ -0,0 +1,226 @@
+/*-
+ * Copyright 2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "efsys.h"
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_impl.h"
+
+ __checkReturn int
+efx_port_init(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_phy_ops_t *epop = epp->ep_epop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enp->en_mod_flags & EFX_MOD_PORT) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ enp->en_mod_flags |= EFX_MOD_PORT;
+
+ epp->ep_mac_type = EFX_MAC_INVALID;
+ epp->ep_link_mode = EFX_LINK_UNKNOWN;
+ epp->ep_mac_poll_needed = B_TRUE;
+ epp->ep_mac_drain = B_TRUE;
+
+ /* Configure the MAC */
+ if ((rc = efx_mac_select(enp)) != 0)
+ goto fail1;
+
+ epp->ep_emop->emo_reconfigure(enp);
+
+ /*
+ * Turn on the PHY if available, otherwise reset it, and
+ * reconfigure it with the current configuration.
+ */
+ if (epop->epo_power != NULL) {
+ if ((rc = epop->epo_power(enp, B_TRUE)) != 0)
+ goto fail2;
+ } else {
+ if ((rc = epop->epo_reset(enp)) != 0)
+ goto fail2;
+ }
+
+ EFSYS_ASSERT(enp->en_reset_flags & EFX_RESET_PHY);
+ enp->en_reset_flags &= ~EFX_RESET_PHY;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ enp->en_mod_flags &= ~EFX_MOD_PORT;
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_port_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mac_ops_t *emop = epp->ep_emop;
+ efx_link_mode_t ignore_link_mode;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT(emop != NULL);
+ EFSYS_ASSERT(!epp->ep_mac_stats_pending);
+
+ if (link_modep == NULL)
+ link_modep = &ignore_link_mode;
+
+ if ((rc = emop->emo_poll(enp, link_modep)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_LOOPBACK
+
+ __checkReturn int
+efx_port_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mac_ops_t *emop = epp->ep_emop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ EFSYS_ASSERT(link_mode < EFX_LINK_NMODES);
+ if ((1 << loopback_type) & ~encp->enc_loopback_types[link_mode]) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (epp->ep_loopback_type == loopback_type &&
+ epp->ep_loopback_link_mode == link_mode)
+ return (0);
+
+ if ((rc = emop->emo_loopback_set(enp, link_mode, loopback_type)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_NAMES
+
+static const char __cs * __cs __efx_loopback_type_name[] = {
+ "OFF",
+ "DATA",
+ "GMAC",
+ "XGMII",
+ "XGXS",
+ "XAUI",
+ "GMII",
+ "SGMII",
+ "XGBR",
+ "XFI",
+ "XAUI_FAR",
+ "GMII_FAR",
+ "SGMII_FAR",
+ "XFI_FAR",
+ "GPHY",
+ "PHY_XS",
+ "PCS",
+ "PMA_PMD",
+};
+
+ __checkReturn const char __cs *
+efx_loopback_type_name(
+ __in efx_nic_t *enp,
+ __in efx_loopback_type_t type)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(type, <, EFX_LOOPBACK_NTYPES);
+
+ return (__efx_loopback_type_name[type]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ void
+efx_port_fini(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT(epp->ep_mac_drain);
+
+ epp->ep_emop = NULL;
+ epp->ep_mac_type = EFX_MAC_INVALID;
+ epp->ep_mac_drain = B_FALSE;
+ epp->ep_mac_poll_needed = B_FALSE;
+
+ /* Turn off the PHY */
+ if (epop->epo_power != NULL)
+ (void) epop->epo_power(enp, B_FALSE);
+
+ enp->en_mod_flags &= ~EFX_MOD_PORT;
+}
diff --git a/sys/dev/sfxge/common/efx_regs.h b/sys/dev/sfxge/common/efx_regs.h
new file mode 100644
index 0000000..c31c33e
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_regs.h
@@ -0,0 +1,3846 @@
+/*-
+ * Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _SYS_EFX_REGS_H
+#define _SYS_EFX_REGS_H
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*
+ * FR_AB_EE_VPD_CFG0_REG_SF(128bit):
+ * SPI/VPD configuration register 0
+ */
+#define FR_AB_EE_VPD_CFG0_REG_SF_OFST 0x00000300
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_EE_VPD_CFG0_REG(128bit):
+ * SPI/VPD configuration register 0
+ */
+#define FR_AB_EE_VPD_CFG0_REG_OFST 0x00000140
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SF_FASTRD_EN_LBN 127
+#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1
+#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120
+#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7
+#define FRF_AB_EE_VPD_WIP_POLL_LBN 119
+#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1
+#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112
+#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7
+#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96
+#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16
+#define FRF_AB_EE_VPDW_LENGTH_LBN 80
+#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15
+#define FRF_AB_EE_VPDW_BASE_LBN 64
+#define FRF_AB_EE_VPDW_BASE_WIDTH 15
+#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56
+#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8
+#define FRF_AB_EE_VPD_BASE_LBN 32
+#define FRF_AB_EE_VPD_BASE_WIDTH 24
+#define FRF_AB_EE_VPD_LENGTH_LBN 16
+#define FRF_AB_EE_VPD_LENGTH_WIDTH 15
+#define FRF_AB_EE_VPD_AD_SIZE_LBN 8
+#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5
+#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5
+#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1
+#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4
+#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1
+#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2
+#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1
+#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1
+#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1
+#define FRF_AB_EE_VPD_EN_LBN 0
+#define FRF_AB_EE_VPD_EN_WIDTH 1
+
+
+/*
+ * FR_AB_PCIE_SD_CTL0123_REG_SF(128bit):
+ * PCIE SerDes control register 0 to 3
+ */
+#define FR_AB_PCIE_SD_CTL0123_REG_SF_OFST 0x00000320
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_PCIE_SD_CTL0123_REG(128bit):
+ * PCIE SerDes control register 0 to 3
+ */
+#define FR_AB_PCIE_SD_CTL0123_REG_OFST 0x00000320
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_PCIE_TESTSIG_H_LBN 96
+#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19
+#define FRF_AB_PCIE_TESTSIG_L_LBN 64
+#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19
+#define FRF_AB_PCIE_OFFSET_LBN 56
+#define FRF_AB_PCIE_OFFSET_WIDTH 8
+#define FRF_AB_PCIE_OFFSETEN_H_LBN 55
+#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1
+#define FRF_AB_PCIE_OFFSETEN_L_LBN 54
+#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1
+#define FRF_AB_PCIE_HIVMODE_H_LBN 53
+#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1
+#define FRF_AB_PCIE_HIVMODE_L_LBN 52
+#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1
+#define FRF_AB_PCIE_PARRESET_H_LBN 51
+#define FRF_AB_PCIE_PARRESET_H_WIDTH 1
+#define FRF_AB_PCIE_PARRESET_L_LBN 50
+#define FRF_AB_PCIE_PARRESET_L_WIDTH 1
+#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49
+#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1
+#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48
+#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1
+#define FRF_AB_PCIE_LPBK_LBN 40
+#define FRF_AB_PCIE_LPBK_WIDTH 8
+#define FRF_AB_PCIE_PARLPBK_LBN 32
+#define FRF_AB_PCIE_PARLPBK_WIDTH 8
+#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30
+#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2
+#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28
+#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2
+#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3
+#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2
+#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1
+#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0
+#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26
+#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2
+#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24
+#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2
+#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3
+#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2
+#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1
+#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0
+#define FRF_AB_PCIE_RXEQCTL_H_LBN 18
+#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2
+#define FRF_AB_PCIE_RXEQCTL_L_LBN 16
+#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2
+#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3
+#define FFE_AB_PCIE_RXEQCTL_OFF 2
+#define FFE_AB_PCIE_RXEQCTL_MIN 1
+#define FFE_AB_PCIE_RXEQCTL_MAX 0
+#define FRF_AB_PCIE_HIDRV_LBN 8
+#define FRF_AB_PCIE_HIDRV_WIDTH 8
+#define FRF_AB_PCIE_LODRV_LBN 0
+#define FRF_AB_PCIE_LODRV_WIDTH 8
+
+
+/*
+ * FR_AB_PCIE_SD_CTL45_REG_SF(128bit):
+ * PCIE SerDes control register 4 and 5
+ */
+#define FR_AB_PCIE_SD_CTL45_REG_SF_OFST 0x00000330
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_PCIE_SD_CTL45_REG(128bit):
+ * PCIE SerDes control register 4 and 5
+ */
+#define FR_AB_PCIE_SD_CTL45_REG_OFST 0x00000330
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_PCIE_DTX7_LBN 60
+#define FRF_AB_PCIE_DTX7_WIDTH 4
+#define FRF_AB_PCIE_DTX6_LBN 56
+#define FRF_AB_PCIE_DTX6_WIDTH 4
+#define FRF_AB_PCIE_DTX5_LBN 52
+#define FRF_AB_PCIE_DTX5_WIDTH 4
+#define FRF_AB_PCIE_DTX4_LBN 48
+#define FRF_AB_PCIE_DTX4_WIDTH 4
+#define FRF_AB_PCIE_DTX3_LBN 44
+#define FRF_AB_PCIE_DTX3_WIDTH 4
+#define FRF_AB_PCIE_DTX2_LBN 40
+#define FRF_AB_PCIE_DTX2_WIDTH 4
+#define FRF_AB_PCIE_DTX1_LBN 36
+#define FRF_AB_PCIE_DTX1_WIDTH 4
+#define FRF_AB_PCIE_DTX0_LBN 32
+#define FRF_AB_PCIE_DTX0_WIDTH 4
+#define FRF_AB_PCIE_DEQ7_LBN 28
+#define FRF_AB_PCIE_DEQ7_WIDTH 4
+#define FRF_AB_PCIE_DEQ6_LBN 24
+#define FRF_AB_PCIE_DEQ6_WIDTH 4
+#define FRF_AB_PCIE_DEQ5_LBN 20
+#define FRF_AB_PCIE_DEQ5_WIDTH 4
+#define FRF_AB_PCIE_DEQ4_LBN 16
+#define FRF_AB_PCIE_DEQ4_WIDTH 4
+#define FRF_AB_PCIE_DEQ3_LBN 12
+#define FRF_AB_PCIE_DEQ3_WIDTH 4
+#define FRF_AB_PCIE_DEQ2_LBN 8
+#define FRF_AB_PCIE_DEQ2_WIDTH 4
+#define FRF_AB_PCIE_DEQ1_LBN 4
+#define FRF_AB_PCIE_DEQ1_WIDTH 4
+#define FRF_AB_PCIE_DEQ0_LBN 0
+#define FRF_AB_PCIE_DEQ0_WIDTH 4
+
+
+/*
+ * FR_AB_PCIE_PCS_CTL_STAT_REG_SF(128bit):
+ * PCIE PCS control and status register
+ */
+#define FR_AB_PCIE_PCS_CTL_STAT_REG_SF_OFST 0x00000340
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_PCIE_PCS_CTL_STAT_REG(128bit):
+ * PCIE PCS control and status register
+ */
+#define FR_AB_PCIE_PCS_CTL_STAT_REG_OFST 0x00000340
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52
+#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4
+#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48
+#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4
+#define FRF_AB_PCIE_PRBSERR_LBN 40
+#define FRF_AB_PCIE_PRBSERR_WIDTH 8
+#define FRF_AB_PCIE_PRBSERRH0_LBN 32
+#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8
+#define FRF_AB_PCIE_FASTINIT_H_LBN 15
+#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1
+#define FRF_AB_PCIE_FASTINIT_L_LBN 14
+#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1
+#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13
+#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1
+#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12
+#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11
+#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1
+#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10
+#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9
+#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1
+#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8
+#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSSEL_LBN 0
+#define FRF_AB_PCIE_PRBSSEL_WIDTH 8
+
+
+/*
+ * FR_AB_HW_INIT_REG_SF(128bit):
+ * Hardware initialization register
+ */
+#define FR_AB_HW_INIT_REG_SF_OFST 0x00000350
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AZ_HW_INIT_REG(128bit):
+ * Hardware initialization register
+ */
+#define FR_AZ_HW_INIT_REG_OFST 0x000000c0
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_BB_BDMRD_CPLF_FULL_LBN 124
+#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1
+#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121
+#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3
+#define FRF_CZ_TX_MRG_TAGS_LBN 120
+#define FRF_CZ_TX_MRG_TAGS_WIDTH 1
+#define FRF_AZ_TRGT_MASK_ALL_LBN 100
+#define FRF_AZ_TRGT_MASK_ALL_WIDTH 1
+#define FRF_AZ_DOORBELL_DROP_LBN 92
+#define FRF_AZ_DOORBELL_DROP_WIDTH 8
+#define FRF_AB_TX_RREQ_MASK_EN_LBN 76
+#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1
+#define FRF_AB_PE_EIDLE_DIS_LBN 75
+#define FRF_AB_PE_EIDLE_DIS_WIDTH 1
+#define FRF_AZ_FC_BLOCKING_EN_LBN 45
+#define FRF_AZ_FC_BLOCKING_EN_WIDTH 1
+#define FRF_AZ_B2B_REQ_EN_LBN 44
+#define FRF_AZ_B2B_REQ_EN_WIDTH 1
+#define FRF_AZ_POST_WR_MASK_LBN 40
+#define FRF_AZ_POST_WR_MASK_WIDTH 4
+#define FRF_AZ_TLP_TC_LBN 34
+#define FRF_AZ_TLP_TC_WIDTH 3
+#define FRF_AZ_TLP_ATTR_LBN 32
+#define FRF_AZ_TLP_ATTR_WIDTH 2
+#define FRF_AB_INTB_VEC_LBN 24
+#define FRF_AB_INTB_VEC_WIDTH 5
+#define FRF_AB_INTA_VEC_LBN 16
+#define FRF_AB_INTA_VEC_WIDTH 5
+#define FRF_AZ_WD_TIMER_LBN 8
+#define FRF_AZ_WD_TIMER_WIDTH 8
+#define FRF_AZ_US_DISABLE_LBN 5
+#define FRF_AZ_US_DISABLE_WIDTH 1
+#define FRF_AZ_TLP_EP_LBN 4
+#define FRF_AZ_TLP_EP_WIDTH 1
+#define FRF_AZ_ATTR_SEL_LBN 3
+#define FRF_AZ_ATTR_SEL_WIDTH 1
+#define FRF_AZ_TD_SEL_LBN 1
+#define FRF_AZ_TD_SEL_WIDTH 1
+#define FRF_AZ_TLP_TD_LBN 0
+#define FRF_AZ_TLP_TD_WIDTH 1
+
+
+/*
+ * FR_AB_NIC_STAT_REG_SF(128bit):
+ * NIC status register
+ */
+#define FR_AB_NIC_STAT_REG_SF_OFST 0x00000360
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_NIC_STAT_REG(128bit):
+ * NIC status register
+ */
+#define FR_AB_NIC_STAT_REG_OFST 0x00000200
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_BB_AER_DIS_LBN 34
+#define FRF_BB_AER_DIS_WIDTH 1
+#define FRF_BB_EE_STRAP_EN_LBN 31
+#define FRF_BB_EE_STRAP_EN_WIDTH 1
+#define FRF_BB_EE_STRAP_LBN 24
+#define FRF_BB_EE_STRAP_WIDTH 4
+#define FRF_BB_REVISION_ID_LBN 17
+#define FRF_BB_REVISION_ID_WIDTH 7
+#define FRF_AB_ONCHIP_SRAM_LBN 16
+#define FRF_AB_ONCHIP_SRAM_WIDTH 1
+#define FRF_AB_SF_PRST_LBN 9
+#define FRF_AB_SF_PRST_WIDTH 1
+#define FRF_AB_EE_PRST_LBN 8
+#define FRF_AB_EE_PRST_WIDTH 1
+#define FRF_AB_ATE_MODE_LBN 3
+#define FRF_AB_ATE_MODE_WIDTH 1
+#define FRF_AB_STRAP_PINS_LBN 0
+#define FRF_AB_STRAP_PINS_WIDTH 3
+
+
+/*
+ * FR_AB_GLB_CTL_REG_SF(128bit):
+ * Global control register
+ */
+#define FR_AB_GLB_CTL_REG_SF_OFST 0x00000370
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_GLB_CTL_REG(128bit):
+ * Global control register
+ */
+#define FR_AB_GLB_CTL_REG_OFST 0x00000220
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EXT_PHY_RST_CTL_LBN 63
+#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1
+#define FRF_AB_XAUI_SD_RST_CTL_LBN 62
+#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_SD_RST_CTL_LBN 61
+#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1
+#define FRF_AA_PCIX_RST_CTL_LBN 60
+#define FRF_AA_PCIX_RST_CTL_WIDTH 1
+#define FRF_BB_BIU_RST_CTL_LBN 60
+#define FRF_BB_BIU_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59
+#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58
+#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57
+#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1
+#define FRF_AB_XGRX_RST_CTL_LBN 56
+#define FRF_AB_XGRX_RST_CTL_WIDTH 1
+#define FRF_AB_XGTX_RST_CTL_LBN 55
+#define FRF_AB_XGTX_RST_CTL_WIDTH 1
+#define FRF_AB_EM_RST_CTL_LBN 54
+#define FRF_AB_EM_RST_CTL_WIDTH 1
+#define FRF_AB_EV_RST_CTL_LBN 53
+#define FRF_AB_EV_RST_CTL_WIDTH 1
+#define FRF_AB_SR_RST_CTL_LBN 52
+#define FRF_AB_SR_RST_CTL_WIDTH 1
+#define FRF_AB_RX_RST_CTL_LBN 51
+#define FRF_AB_RX_RST_CTL_WIDTH 1
+#define FRF_AB_TX_RST_CTL_LBN 50
+#define FRF_AB_TX_RST_CTL_WIDTH 1
+#define FRF_AB_EE_RST_CTL_LBN 49
+#define FRF_AB_EE_RST_CTL_WIDTH 1
+#define FRF_AB_CS_RST_CTL_LBN 48
+#define FRF_AB_CS_RST_CTL_WIDTH 1
+#define FRF_AB_HOT_RST_CTL_LBN 40
+#define FRF_AB_HOT_RST_CTL_WIDTH 2
+#define FRF_AB_RST_EXT_PHY_LBN 31
+#define FRF_AB_RST_EXT_PHY_WIDTH 1
+#define FRF_AB_RST_XAUI_SD_LBN 30
+#define FRF_AB_RST_XAUI_SD_WIDTH 1
+#define FRF_AB_RST_PCIE_SD_LBN 29
+#define FRF_AB_RST_PCIE_SD_WIDTH 1
+#define FRF_AA_RST_PCIX_LBN 28
+#define FRF_AA_RST_PCIX_WIDTH 1
+#define FRF_BB_RST_BIU_LBN 28
+#define FRF_BB_RST_BIU_WIDTH 1
+#define FRF_AB_RST_PCIE_STKY_LBN 27
+#define FRF_AB_RST_PCIE_STKY_WIDTH 1
+#define FRF_AB_RST_PCIE_NSTKY_LBN 26
+#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1
+#define FRF_AB_RST_PCIE_CORE_LBN 25
+#define FRF_AB_RST_PCIE_CORE_WIDTH 1
+#define FRF_AB_RST_XGRX_LBN 24
+#define FRF_AB_RST_XGRX_WIDTH 1
+#define FRF_AB_RST_XGTX_LBN 23
+#define FRF_AB_RST_XGTX_WIDTH 1
+#define FRF_AB_RST_EM_LBN 22
+#define FRF_AB_RST_EM_WIDTH 1
+#define FRF_AB_RST_EV_LBN 21
+#define FRF_AB_RST_EV_WIDTH 1
+#define FRF_AB_RST_SR_LBN 20
+#define FRF_AB_RST_SR_WIDTH 1
+#define FRF_AB_RST_RX_LBN 19
+#define FRF_AB_RST_RX_WIDTH 1
+#define FRF_AB_RST_TX_LBN 18
+#define FRF_AB_RST_TX_WIDTH 1
+#define FRF_AB_RST_SF_LBN 17
+#define FRF_AB_RST_SF_WIDTH 1
+#define FRF_AB_RST_CS_LBN 16
+#define FRF_AB_RST_CS_WIDTH 1
+#define FRF_AB_INT_RST_DUR_LBN 4
+#define FRF_AB_INT_RST_DUR_WIDTH 3
+#define FRF_AB_EXT_PHY_RST_DUR_LBN 1
+#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3
+#define FFE_AB_EXT_PHY_RST_DUR_10240US 7
+#define FFE_AB_EXT_PHY_RST_DUR_5120US 6
+#define FFE_AB_EXT_PHY_RST_DUR_2560US 5
+#define FFE_AB_EXT_PHY_RST_DUR_1280US 4
+#define FFE_AB_EXT_PHY_RST_DUR_640US 3
+#define FFE_AB_EXT_PHY_RST_DUR_320US 2
+#define FFE_AB_EXT_PHY_RST_DUR_160US 1
+#define FFE_AB_EXT_PHY_RST_DUR_80US 0
+#define FRF_AB_SWRST_LBN 0
+#define FRF_AB_SWRST_WIDTH 1
+
+
+/*
+ * FR_AZ_IOM_IND_ADR_REG(32bit):
+ * IO-mapped indirect access address register
+ */
+#define FR_AZ_IOM_IND_ADR_REG_OFST 0x00000000
+/* falcona0,falconb0,sienaa0=net_func_bar0 */
+
+#define FRF_AZ_IOM_AUTO_ADR_INC_EN_LBN 24
+#define FRF_AZ_IOM_AUTO_ADR_INC_EN_WIDTH 1
+#define FRF_AZ_IOM_IND_ADR_LBN 0
+#define FRF_AZ_IOM_IND_ADR_WIDTH 24
+
+
+/*
+ * FR_AZ_IOM_IND_DAT_REG(32bit):
+ * IO-mapped indirect access data register
+ */
+#define FR_AZ_IOM_IND_DAT_REG_OFST 0x00000004
+/* falcona0,falconb0,sienaa0=net_func_bar0 */
+
+#define FRF_AZ_IOM_IND_DAT_LBN 0
+#define FRF_AZ_IOM_IND_DAT_WIDTH 32
+
+
+/*
+ * FR_AZ_ADR_REGION_REG(128bit):
+ * Address region register
+ */
+#define FR_AZ_ADR_REGION_REG_OFST 0x00000000
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_ADR_REGION3_LBN 96
+#define FRF_AZ_ADR_REGION3_WIDTH 18
+#define FRF_AZ_ADR_REGION2_LBN 64
+#define FRF_AZ_ADR_REGION2_WIDTH 18
+#define FRF_AZ_ADR_REGION1_LBN 32
+#define FRF_AZ_ADR_REGION1_WIDTH 18
+#define FRF_AZ_ADR_REGION0_LBN 0
+#define FRF_AZ_ADR_REGION0_WIDTH 18
+
+
+/*
+ * FR_AZ_INT_EN_REG_KER(128bit):
+ * Kernel driver Interrupt enable register
+ */
+#define FR_AZ_INT_EN_REG_KER_OFST 0x00000010
+/* falcona0,falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8
+#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6
+#define FRF_AZ_KER_INT_CHAR_LBN 4
+#define FRF_AZ_KER_INT_CHAR_WIDTH 1
+#define FRF_AZ_KER_INT_KER_LBN 3
+#define FRF_AZ_KER_INT_KER_WIDTH 1
+#define FRF_AZ_DRV_INT_EN_KER_LBN 0
+#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1
+
+
+/*
+ * FR_AZ_INT_EN_REG_CHAR(128bit):
+ * Char Driver interrupt enable register
+ */
+#define FR_AZ_INT_EN_REG_CHAR_OFST 0x00000020
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_CHAR_INT_LEVE_SEL_LBN 8
+#define FRF_AZ_CHAR_INT_LEVE_SEL_WIDTH 6
+#define FRF_AZ_CHAR_INT_CHAR_LBN 4
+#define FRF_AZ_CHAR_INT_CHAR_WIDTH 1
+#define FRF_AZ_CHAR_INT_KER_LBN 3
+#define FRF_AZ_CHAR_INT_KER_WIDTH 1
+#define FRF_AZ_DRV_INT_EN_CHAR_LBN 0
+#define FRF_AZ_DRV_INT_EN_CHAR_WIDTH 1
+
+
+/*
+ * FR_AZ_INT_ADR_REG_KER(128bit):
+ * Interrupt host address for Kernel driver
+ */
+#define FR_AZ_INT_ADR_REG_KER_OFST 0x00000030
+/* falcona0,falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64
+#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1
+#define FRF_AZ_INT_ADR_KER_LBN 0
+#define FRF_AZ_INT_ADR_KER_WIDTH 64
+#define FRF_AZ_INT_ADR_KER_DW0_LBN 0
+#define FRF_AZ_INT_ADR_KER_DW0_WIDTH 32
+#define FRF_AZ_INT_ADR_KER_DW1_LBN 32
+#define FRF_AZ_INT_ADR_KER_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_INT_ADR_REG_CHAR(128bit):
+ * Interrupt host address for Char driver
+ */
+#define FR_AZ_INT_ADR_REG_CHAR_OFST 0x00000040
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_NORM_INT_VEC_DIS_CHAR_LBN 64
+#define FRF_AZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1
+#define FRF_AZ_INT_ADR_CHAR_LBN 0
+#define FRF_AZ_INT_ADR_CHAR_WIDTH 64
+#define FRF_AZ_INT_ADR_CHAR_DW0_LBN 0
+#define FRF_AZ_INT_ADR_CHAR_DW0_WIDTH 32
+#define FRF_AZ_INT_ADR_CHAR_DW1_LBN 32
+#define FRF_AZ_INT_ADR_CHAR_DW1_WIDTH 32
+
+
+/*
+ * FR_AA_INT_ACK_KER(32bit):
+ * Kernel interrupt acknowledge register
+ */
+#define FR_AA_INT_ACK_KER_OFST 0x00000050
+/* falcona0=net_func_bar2 */
+
+#define FRF_AA_INT_ACK_KER_FIELD_LBN 0
+#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32
+
+
+/*
+ * FR_BZ_INT_ISR0_REG(128bit):
+ * Function 0 Interrupt Acknowlege Status register
+ */
+#define FR_BZ_INT_ISR0_REG_OFST 0x00000090
+/* falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_BZ_INT_ISR_REG_LBN 0
+#define FRF_BZ_INT_ISR_REG_WIDTH 64
+#define FRF_BZ_INT_ISR_REG_DW0_LBN 0
+#define FRF_BZ_INT_ISR_REG_DW0_WIDTH 32
+#define FRF_BZ_INT_ISR_REG_DW1_LBN 32
+#define FRF_BZ_INT_ISR_REG_DW1_WIDTH 32
+
+
+/*
+ * FR_AB_EE_SPI_HCMD_REG(128bit):
+ * SPI host command register
+ */
+#define FR_AB_EE_SPI_HCMD_REG_OFST 0x00000100
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31
+#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1
+#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28
+#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24
+#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16
+#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5
+#define FRF_AB_EE_SPI_HCMD_READ_LBN 15
+#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12
+#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2
+#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8
+#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2
+#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0
+#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8
+
+
+/*
+ * FR_CZ_USR_EV_CFG(32bit):
+ * User Level Event Configuration register
+ */
+#define FR_CZ_USR_EV_CFG_OFST 0x00000100
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_USREV_DIS_LBN 16
+#define FRF_CZ_USREV_DIS_WIDTH 1
+#define FRF_CZ_DFLT_EVQ_LBN 0
+#define FRF_CZ_DFLT_EVQ_WIDTH 10
+
+
+/*
+ * FR_AB_EE_SPI_HADR_REG(128bit):
+ * SPI host address register
+ */
+#define FR_AB_EE_SPI_HADR_REG_OFST 0x00000110
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24
+#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8
+#define FRF_AB_EE_SPI_HADR_ADR_LBN 0
+#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24
+
+
+/*
+ * FR_AB_EE_SPI_HDATA_REG(128bit):
+ * SPI host data register
+ */
+#define FR_AB_EE_SPI_HDATA_REG_OFST 0x00000120
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SPI_HDATA3_LBN 96
+#define FRF_AB_EE_SPI_HDATA3_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA2_LBN 64
+#define FRF_AB_EE_SPI_HDATA2_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA1_LBN 32
+#define FRF_AB_EE_SPI_HDATA1_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA0_LBN 0
+#define FRF_AB_EE_SPI_HDATA0_WIDTH 32
+
+
+/*
+ * FR_AB_EE_BASE_PAGE_REG(128bit):
+ * Expansion ROM base mirror register
+ */
+#define FR_AB_EE_BASE_PAGE_REG_OFST 0x00000130
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_EXPROM_MASK_LBN 16
+#define FRF_AB_EE_EXPROM_MASK_WIDTH 13
+#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0
+#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13
+
+
+/*
+ * FR_AB_EE_VPD_SW_CNTL_REG(128bit):
+ * VPD access SW control register
+ */
+#define FR_AB_EE_VPD_SW_CNTL_REG_OFST 0x00000150
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31
+#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1
+#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28
+#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1
+#define FRF_AB_EE_VPD_CYC_ADR_LBN 0
+#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15
+
+
+/*
+ * FR_AB_EE_VPD_SW_DATA_REG(128bit):
+ * VPD access SW data register
+ */
+#define FR_AB_EE_VPD_SW_DATA_REG_OFST 0x00000160
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_VPD_CYC_DAT_LBN 0
+#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32
+
+
+/*
+ * FR_BB_PCIE_CORE_INDIRECT_REG(64bit):
+ * Indirect Access to PCIE Core registers
+ */
+#define FR_BB_PCIE_CORE_INDIRECT_REG_OFST 0x000001f0
+/* falconb0=net_func_bar2 */
+
+#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32
+#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32
+#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15
+#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1
+#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0
+#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12
+
+
+/*
+ * FR_AB_GPIO_CTL_REG(128bit):
+ * GPIO control register
+ */
+#define FR_AB_GPIO_CTL_REG_OFST 0x00000210
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GPIO15_OEN_LBN 63
+#define FRF_AB_GPIO15_OEN_WIDTH 1
+#define FRF_AB_GPIO14_OEN_LBN 62
+#define FRF_AB_GPIO14_OEN_WIDTH 1
+#define FRF_AB_GPIO13_OEN_LBN 61
+#define FRF_AB_GPIO13_OEN_WIDTH 1
+#define FRF_AB_GPIO12_OEN_LBN 60
+#define FRF_AB_GPIO12_OEN_WIDTH 1
+#define FRF_AB_GPIO11_OEN_LBN 59
+#define FRF_AB_GPIO11_OEN_WIDTH 1
+#define FRF_AB_GPIO10_OEN_LBN 58
+#define FRF_AB_GPIO10_OEN_WIDTH 1
+#define FRF_AB_GPIO9_OEN_LBN 57
+#define FRF_AB_GPIO9_OEN_WIDTH 1
+#define FRF_AB_GPIO8_OEN_LBN 56
+#define FRF_AB_GPIO8_OEN_WIDTH 1
+#define FRF_AB_GPIO15_OUT_LBN 55
+#define FRF_AB_GPIO15_OUT_WIDTH 1
+#define FRF_AB_GPIO14_OUT_LBN 54
+#define FRF_AB_GPIO14_OUT_WIDTH 1
+#define FRF_AB_GPIO13_OUT_LBN 53
+#define FRF_AB_GPIO13_OUT_WIDTH 1
+#define FRF_AB_GPIO12_OUT_LBN 52
+#define FRF_AB_GPIO12_OUT_WIDTH 1
+#define FRF_AB_GPIO11_OUT_LBN 51
+#define FRF_AB_GPIO11_OUT_WIDTH 1
+#define FRF_AB_GPIO10_OUT_LBN 50
+#define FRF_AB_GPIO10_OUT_WIDTH 1
+#define FRF_AB_GPIO9_OUT_LBN 49
+#define FRF_AB_GPIO9_OUT_WIDTH 1
+#define FRF_AB_GPIO8_OUT_LBN 48
+#define FRF_AB_GPIO8_OUT_WIDTH 1
+#define FRF_AB_GPIO15_IN_LBN 47
+#define FRF_AB_GPIO15_IN_WIDTH 1
+#define FRF_AB_GPIO14_IN_LBN 46
+#define FRF_AB_GPIO14_IN_WIDTH 1
+#define FRF_AB_GPIO13_IN_LBN 45
+#define FRF_AB_GPIO13_IN_WIDTH 1
+#define FRF_AB_GPIO12_IN_LBN 44
+#define FRF_AB_GPIO12_IN_WIDTH 1
+#define FRF_AB_GPIO11_IN_LBN 43
+#define FRF_AB_GPIO11_IN_WIDTH 1
+#define FRF_AB_GPIO10_IN_LBN 42
+#define FRF_AB_GPIO10_IN_WIDTH 1
+#define FRF_AB_GPIO9_IN_LBN 41
+#define FRF_AB_GPIO9_IN_WIDTH 1
+#define FRF_AB_GPIO8_IN_LBN 40
+#define FRF_AB_GPIO8_IN_WIDTH 1
+#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39
+#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38
+#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37
+#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36
+#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35
+#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34
+#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33
+#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32
+#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1
+#define FRF_BB_CLK156_OUT_EN_LBN 31
+#define FRF_BB_CLK156_OUT_EN_WIDTH 1
+#define FRF_BB_USE_NIC_CLK_LBN 30
+#define FRF_BB_USE_NIC_CLK_WIDTH 1
+#define FRF_AB_GPIO5_OEN_LBN 29
+#define FRF_AB_GPIO5_OEN_WIDTH 1
+#define FRF_AB_GPIO4_OEN_LBN 28
+#define FRF_AB_GPIO4_OEN_WIDTH 1
+#define FRF_AB_GPIO3_OEN_LBN 27
+#define FRF_AB_GPIO3_OEN_WIDTH 1
+#define FRF_AB_GPIO2_OEN_LBN 26
+#define FRF_AB_GPIO2_OEN_WIDTH 1
+#define FRF_AB_GPIO1_OEN_LBN 25
+#define FRF_AB_GPIO1_OEN_WIDTH 1
+#define FRF_AB_GPIO0_OEN_LBN 24
+#define FRF_AB_GPIO0_OEN_WIDTH 1
+#define FRF_AB_GPIO5_OUT_LBN 21
+#define FRF_AB_GPIO5_OUT_WIDTH 1
+#define FRF_AB_GPIO4_OUT_LBN 20
+#define FRF_AB_GPIO4_OUT_WIDTH 1
+#define FRF_AB_GPIO3_OUT_LBN 19
+#define FRF_AB_GPIO3_OUT_WIDTH 1
+#define FRF_AB_GPIO2_OUT_LBN 18
+#define FRF_AB_GPIO2_OUT_WIDTH 1
+#define FRF_AB_GPIO1_OUT_LBN 17
+#define FRF_AB_GPIO1_OUT_WIDTH 1
+#define FRF_AB_GPIO0_OUT_LBN 16
+#define FRF_AB_GPIO0_OUT_WIDTH 1
+#define FRF_AB_GPIO5_IN_LBN 13
+#define FRF_AB_GPIO5_IN_WIDTH 1
+#define FRF_AB_GPIO4_IN_LBN 12
+#define FRF_AB_GPIO4_IN_WIDTH 1
+#define FRF_AB_GPIO3_IN_LBN 11
+#define FRF_AB_GPIO3_IN_WIDTH 1
+#define FRF_AB_GPIO2_IN_LBN 10
+#define FRF_AB_GPIO2_IN_WIDTH 1
+#define FRF_AB_GPIO1_IN_LBN 9
+#define FRF_AB_GPIO1_IN_WIDTH 1
+#define FRF_AB_GPIO0_IN_LBN 8
+#define FRF_AB_GPIO0_IN_WIDTH 1
+#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5
+#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4
+#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3
+#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2
+#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1
+#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0
+#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1
+
+
+/*
+ * FR_AZ_FATAL_INTR_REG_KER(128bit):
+ * Fatal interrupt register for Kernel
+ */
+#define FR_AZ_FATAL_INTR_REG_KER_OFST 0x00000230
+/* falcona0,falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44
+#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43
+#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43
+#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42
+#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1
+#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41
+#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40
+#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39
+#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38
+#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37
+#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36
+#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35
+#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34
+#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33
+#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32
+#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1
+#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12
+#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11
+#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_KER_LBN 11
+#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10
+#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1
+#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9
+#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_KER_LBN 8
+#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7
+#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6
+#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5
+#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4
+#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3
+#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2
+#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_KER_LBN 1
+#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_KER_LBN 0
+#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1
+
+
+/*
+ * FR_AZ_FATAL_INTR_REG_CHAR(128bit):
+ * Fatal interrupt register for Char
+ */
+#define FR_AZ_FATAL_INTR_REG_CHAR_OFST 0x00000240
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_CHAR_EN_LBN 43
+#define FRF_AB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43
+#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_CHAR_EN_LBN 42
+#define FRF_AZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_BUFID_OOB_INT_CHAR_EN_LBN 41
+#define FRF_AZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_CHAR_EN_LBN 40
+#define FRF_AZ_MEM_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_CHAR_EN_LBN 39
+#define FRF_AZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_CHAR_EN_LBN 38
+#define FRF_AZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_CHAR_EN_LBN 35
+#define FRF_AZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_CHAR_EN_LBN 34
+#define FRF_AZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_CHAR_EN_LBN 33
+#define FRF_AZ_ILL_ADR_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_CHAR_EN_LBN 32
+#define FRF_AZ_SRM_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_CHAR_LBN 11
+#define FRF_AB_PCI_BUSERR_INT_CHAR_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11
+#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_CHAR_LBN 10
+#define FRF_AZ_SRAM_OOB_INT_CHAR_WIDTH 1
+#define FRF_AZ_BUFID_DC_OOB_INT_CHAR_LBN 9
+#define FRF_AZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_CHAR_LBN 8
+#define FRF_AZ_MEM_PERR_INT_CHAR_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_CHAR_LBN 7
+#define FRF_AZ_RBUF_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_CHAR_LBN 6
+#define FRF_AZ_TBUF_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_LBN 5
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_LBN 4
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_CHAR_LBN 3
+#define FRF_AZ_EVQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_CHAR_LBN 2
+#define FRF_AZ_EVF_OFLO_INT_CHAR_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_CHAR_LBN 1
+#define FRF_AZ_ILL_ADR_INT_CHAR_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_CHAR_LBN 0
+#define FRF_AZ_SRM_PERR_INT_CHAR_WIDTH 1
+
+
+/*
+ * FR_AZ_DP_CTRL_REG(128bit):
+ * Datapath control register
+ */
+#define FR_AZ_DP_CTRL_REG_OFST 0x00000250
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_FLS_EVQ_ID_LBN 0
+#define FRF_AZ_FLS_EVQ_ID_WIDTH 12
+
+
+/*
+ * FR_AZ_MEM_STAT_REG(128bit):
+ * Memory status register
+ */
+#define FR_AZ_MEM_STAT_REG_OFST 0x00000260
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MEM_PERR_VEC_LBN 53
+#define FRF_AB_MEM_PERR_VEC_WIDTH 40
+#define FRF_AB_MEM_PERR_VEC_DW0_LBN 53
+#define FRF_AB_MEM_PERR_VEC_DW0_WIDTH 32
+#define FRF_AB_MEM_PERR_VEC_DW1_LBN 85
+#define FRF_AB_MEM_PERR_VEC_DW1_WIDTH 6
+#define FRF_AB_MBIST_CORR_LBN 38
+#define FRF_AB_MBIST_CORR_WIDTH 15
+#define FRF_AB_MBIST_ERR_LBN 0
+#define FRF_AB_MBIST_ERR_WIDTH 40
+#define FRF_AB_MBIST_ERR_DW0_LBN 0
+#define FRF_AB_MBIST_ERR_DW0_WIDTH 32
+#define FRF_AB_MBIST_ERR_DW1_LBN 32
+#define FRF_AB_MBIST_ERR_DW1_WIDTH 6
+#define FRF_CZ_MEM_PERR_VEC_LBN 0
+#define FRF_CZ_MEM_PERR_VEC_WIDTH 35
+#define FRF_CZ_MEM_PERR_VEC_DW0_LBN 0
+#define FRF_CZ_MEM_PERR_VEC_DW0_WIDTH 32
+#define FRF_CZ_MEM_PERR_VEC_DW1_LBN 32
+#define FRF_CZ_MEM_PERR_VEC_DW1_WIDTH 3
+
+
+/*
+ * FR_PORT0_CS_DEBUG_REG(128bit):
+ * Debug register
+ */
+
+#define FR_AZ_CS_DEBUG_REG_OFST 0x00000270
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GLB_DEBUG2_SEL_LBN 50
+#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL2_LBN 47
+#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL1_LBN 44
+#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL0_LBN 41
+#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3
+#define FRF_CZ_CS_PORT_NUM_LBN 40
+#define FRF_CZ_CS_PORT_NUM_WIDTH 2
+#define FRF_AB_MISC_DEBUG_ADDR_LBN 36
+#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5
+#define FRF_CZ_CS_RESERVED_LBN 36
+#define FRF_CZ_CS_RESERVED_WIDTH 4
+#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31
+#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5
+#define FRF_CZ_CS_PORT_FPE_DW0_LBN 1
+#define FRF_CZ_CS_PORT_FPE_DW0_WIDTH 32
+#define FRF_CZ_CS_PORT_FPE_DW1_LBN 33
+#define FRF_CZ_CS_PORT_FPE_DW1_WIDTH 3
+#define FRF_CZ_CS_PORT_FPE_LBN 1
+#define FRF_CZ_CS_PORT_FPE_WIDTH 35
+#define FRF_AB_EM_DEBUG_ADDR_LBN 26
+#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_SR_DEBUG_ADDR_LBN 21
+#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_EV_DEBUG_ADDR_LBN 16
+#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_RX_DEBUG_ADDR_LBN 11
+#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_TX_DEBUG_ADDR_LBN 6
+#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1
+#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5
+#define FRF_AZ_CS_DEBUG_EN_LBN 0
+#define FRF_AZ_CS_DEBUG_EN_WIDTH 1
+
+
+/*
+ * FR_AZ_DRIVER_REG(128bit):
+ * Driver scratch register [0-7]
+ */
+#define FR_AZ_DRIVER_REG_OFST 0x00000280
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_DRIVER_REG_STEP 16
+#define FR_AZ_DRIVER_REG_ROWS 8
+
+#define FRF_AZ_DRIVER_DW0_LBN 0
+#define FRF_AZ_DRIVER_DW0_WIDTH 32
+
+
+/*
+ * FR_AZ_ALTERA_BUILD_REG(128bit):
+ * Altera build register
+ */
+#define FR_AZ_ALTERA_BUILD_REG_OFST 0x00000300
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_ALTERA_BUILD_VER_LBN 0
+#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32
+
+
+/*
+ * FR_AZ_CSR_SPARE_REG(128bit):
+ * Spare register
+ */
+#define FR_AZ_CSR_SPARE_REG_OFST 0x00000310
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_MEM_PERR_EN_TX_DATA_LBN 72
+#define FRF_AZ_MEM_PERR_EN_TX_DATA_WIDTH 2
+#define FRF_AZ_MEM_PERR_EN_LBN 64
+#define FRF_AZ_MEM_PERR_EN_WIDTH 38
+#define FRF_AZ_MEM_PERR_EN_DW0_LBN 64
+#define FRF_AZ_MEM_PERR_EN_DW0_WIDTH 32
+#define FRF_AZ_MEM_PERR_EN_DW1_LBN 96
+#define FRF_AZ_MEM_PERR_EN_DW1_WIDTH 6
+#define FRF_AZ_CSR_SPARE_BITS_LBN 0
+#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32
+
+
+/*
+ * FR_BZ_DEBUG_DATA_OUT_REG(128bit):
+ * Live Debug and Debug 2 out ports
+ */
+#define FR_BZ_DEBUG_DATA_OUT_REG_OFST 0x00000350
+/* falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_BZ_DEBUG2_PORT_LBN 25
+#define FRF_BZ_DEBUG2_PORT_WIDTH 15
+#define FRF_BZ_DEBUG1_PORT_LBN 0
+#define FRF_BZ_DEBUG1_PORT_WIDTH 25
+
+
+/*
+ * FR_BZ_EVQ_RPTR_REGP0(32bit):
+ * Event queue read pointer register
+ */
+#define FR_BZ_EVQ_RPTR_REGP0_OFST 0x00000400
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_EVQ_RPTR_REGP0_STEP 8192
+#define FR_BZ_EVQ_RPTR_REGP0_ROWS 1024
+/*
+ * FR_AA_EVQ_RPTR_REG_KER(32bit):
+ * Event queue read pointer register
+ */
+#define FR_AA_EVQ_RPTR_REG_KER_OFST 0x00011b00
+/* falcona0=net_func_bar2 */
+#define FR_AA_EVQ_RPTR_REG_KER_STEP 4
+#define FR_AA_EVQ_RPTR_REG_KER_ROWS 4
+/*
+ * FR_AZ_EVQ_RPTR_REG(32bit):
+ * Event queue read pointer register
+ */
+#define FR_AZ_EVQ_RPTR_REG_OFST 0x00fa0000
+/* falconb0=net_func_bar2,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_EVQ_RPTR_REG_STEP 16
+#define FR_AB_EVQ_RPTR_REG_ROWS 4096
+#define FR_CZ_EVQ_RPTR_REG_ROWS 1024
+/*
+ * FR_BB_EVQ_RPTR_REGP123(32bit):
+ * Event queue read pointer register
+ */
+#define FR_BB_EVQ_RPTR_REGP123_OFST 0x01000400
+/* falconb0=net_func_bar2 */
+#define FR_BB_EVQ_RPTR_REGP123_STEP 8192
+#define FR_BB_EVQ_RPTR_REGP123_ROWS 3072
+
+#define FRF_AZ_EVQ_RPTR_VLD_LBN 15
+#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1
+#define FRF_AZ_EVQ_RPTR_LBN 0
+#define FRF_AZ_EVQ_RPTR_WIDTH 15
+
+
+/*
+ * FR_BZ_TIMER_COMMAND_REGP0(128bit):
+ * Timer Command Registers
+ */
+#define FR_BZ_TIMER_COMMAND_REGP0_OFST 0x00000420
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_TIMER_COMMAND_REGP0_STEP 8192
+#define FR_BZ_TIMER_COMMAND_REGP0_ROWS 1024
+/*
+ * FR_AA_TIMER_COMMAND_REG_KER(128bit):
+ * Timer Command Registers
+ */
+#define FR_AA_TIMER_COMMAND_REG_KER_OFST 0x00000420
+/* falcona0=net_func_bar2 */
+#define FR_AA_TIMER_COMMAND_REG_KER_STEP 8192
+#define FR_AA_TIMER_COMMAND_REG_KER_ROWS 4
+/*
+ * FR_AB_TIMER_COMMAND_REGP123(128bit):
+ * Timer Command Registers
+ */
+#define FR_AB_TIMER_COMMAND_REGP123_OFST 0x01000420
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_TIMER_COMMAND_REGP123_STEP 8192
+#define FR_AB_TIMER_COMMAND_REGP123_ROWS 3072
+/*
+ * FR_AA_TIMER_COMMAND_REGP0(128bit):
+ * Timer Command Registers
+ */
+#define FR_AA_TIMER_COMMAND_REGP0_OFST 0x00008420
+/* falcona0=char_func_bar0 */
+#define FR_AA_TIMER_COMMAND_REGP0_STEP 8192
+#define FR_AA_TIMER_COMMAND_REGP0_ROWS 1020
+
+#define FRF_CZ_TC_TIMER_MODE_LBN 14
+#define FRF_CZ_TC_TIMER_MODE_WIDTH 2
+#define FRF_AB_TC_TIMER_MODE_LBN 12
+#define FRF_AB_TC_TIMER_MODE_WIDTH 2
+#define FRF_CZ_TC_TIMER_VAL_LBN 0
+#define FRF_CZ_TC_TIMER_VAL_WIDTH 14
+#define FRF_AB_TC_TIMER_VAL_LBN 0
+#define FRF_AB_TC_TIMER_VAL_WIDTH 12
+
+
+/*
+ * FR_AZ_DRV_EV_REG(128bit):
+ * Driver generated event register
+ */
+#define FR_AZ_DRV_EV_REG_OFST 0x00000440
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_DRV_EV_QID_LBN 64
+#define FRF_AZ_DRV_EV_QID_WIDTH 12
+#define FRF_AZ_DRV_EV_DATA_LBN 0
+#define FRF_AZ_DRV_EV_DATA_WIDTH 64
+#define FRF_AZ_DRV_EV_DATA_DW0_LBN 0
+#define FRF_AZ_DRV_EV_DATA_DW0_WIDTH 32
+#define FRF_AZ_DRV_EV_DATA_DW1_LBN 32
+#define FRF_AZ_DRV_EV_DATA_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_EVQ_CTL_REG(128bit):
+ * Event queue control register
+ */
+#define FR_AZ_EVQ_CTL_REG_OFST 0x00000450
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15
+#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10
+#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15
+#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6
+#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14
+#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1
+#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7
+#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7
+#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0
+#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7
+
+
+/*
+ * FR_AZ_EVQ_CNT1_REG(128bit):
+ * Event counter 1 register
+ */
+#define FR_AZ_EVQ_CNT1_REG_OFST 0x00000460
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120
+#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7
+#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100
+#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20
+#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80
+#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60
+#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40
+#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20
+#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0
+#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20
+
+
+/*
+ * FR_AZ_EVQ_CNT2_REG(128bit):
+ * Event counter 2 register
+ */
+#define FR_AZ_EVQ_CNT2_REG_OFST 0x00000470
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104
+#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84
+#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_RDY_CNT_LBN 80
+#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4
+#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60
+#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40
+#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20
+#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0
+#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20
+
+
+/*
+ * FR_CZ_USR_EV_REG(32bit):
+ * Event mailbox register
+ */
+#define FR_CZ_USR_EV_REG_OFST 0x00000540
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_USR_EV_REG_STEP 8192
+#define FR_CZ_USR_EV_REG_ROWS 1024
+
+#define FRF_CZ_USR_EV_DATA_LBN 0
+#define FRF_CZ_USR_EV_DATA_WIDTH 32
+
+
+/*
+ * FR_AZ_BUF_TBL_CFG_REG(128bit):
+ * Buffer table configuration register
+ */
+#define FR_AZ_BUF_TBL_CFG_REG_OFST 0x00000600
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_BUF_TBL_MODE_LBN 3
+#define FRF_AZ_BUF_TBL_MODE_WIDTH 1
+
+
+/*
+ * FR_AZ_SRM_RX_DC_CFG_REG(128bit):
+ * SRAM receive descriptor cache configuration register
+ */
+#define FR_AZ_SRM_RX_DC_CFG_REG_OFST 0x00000610
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21
+#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1
+#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0
+#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21
+
+
+/*
+ * FR_AZ_SRM_TX_DC_CFG_REG(128bit):
+ * SRAM transmit descriptor cache configuration register
+ */
+#define FR_AZ_SRM_TX_DC_CFG_REG_OFST 0x00000620
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0
+#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21
+
+
+/*
+ * FR_AZ_SRM_CFG_REG(128bit):
+ * SRAM configuration register
+ */
+#define FR_AZ_SRM_CFG_REG_SF_OFST 0x00000380
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AZ_SRM_CFG_REG(128bit):
+ * SRAM configuration register
+ */
+#define FR_AZ_SRM_CFG_REG_OFST 0x00000630
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5
+#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1
+#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4
+#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1
+#define FRF_AZ_SRM_INIT_EN_LBN 3
+#define FRF_AZ_SRM_INIT_EN_WIDTH 1
+#define FRF_AZ_SRM_NUM_BANK_LBN 2
+#define FRF_AZ_SRM_NUM_BANK_WIDTH 1
+#define FRF_AZ_SRM_BANK_SIZE_LBN 0
+#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2
+
+
+/*
+ * FR_AZ_BUF_TBL_UPD_REG(128bit):
+ * Buffer table update register
+ */
+#define FR_AZ_BUF_TBL_UPD_REG_OFST 0x00000650
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_BUF_UPD_CMD_LBN 63
+#define FRF_AZ_BUF_UPD_CMD_WIDTH 1
+#define FRF_AZ_BUF_CLR_CMD_LBN 62
+#define FRF_AZ_BUF_CLR_CMD_WIDTH 1
+#define FRF_AZ_BUF_CLR_END_ID_LBN 32
+#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20
+#define FRF_AZ_BUF_CLR_START_ID_LBN 0
+#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20
+
+
+/*
+ * FR_AZ_SRM_UPD_EVQ_REG(128bit):
+ * Buffer table update register
+ */
+#define FR_AZ_SRM_UPD_EVQ_REG_OFST 0x00000660
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0
+#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12
+
+
+/*
+ * FR_AZ_SRAM_PARITY_REG(128bit):
+ * SRAM parity register.
+ */
+#define FR_AZ_SRAM_PARITY_REG_OFST 0x00000670
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_BYPASS_ECC_LBN 3
+#define FRF_CZ_BYPASS_ECC_WIDTH 1
+#define FRF_CZ_SEC_INT_LBN 2
+#define FRF_CZ_SEC_INT_WIDTH 1
+#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1
+#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1
+#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0
+#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1
+#define FRF_AB_FORCE_SRAM_PERR_LBN 0
+#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1
+
+
+/*
+ * FR_AZ_RX_CFG_REG(128bit):
+ * Receive configuration register
+ */
+#define FR_AZ_RX_CFG_REG_OFST 0x00000800
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71
+#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1
+#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62
+#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9
+#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53
+#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9
+#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49
+#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4
+#define FRF_BZ_RX_TCP_SUP_LBN 48
+#define FRF_BZ_RX_TCP_SUP_WIDTH 1
+#define FRF_BZ_RX_INGR_EN_LBN 47
+#define FRF_BZ_RX_INGR_EN_WIDTH 1
+#define FRF_BZ_RX_IP_HASH_LBN 46
+#define FRF_BZ_RX_IP_HASH_WIDTH 1
+#define FRF_BZ_RX_HASH_ALG_LBN 45
+#define FRF_BZ_RX_HASH_ALG_WIDTH 1
+#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44
+#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1
+#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43
+#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1
+#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42
+#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1
+#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39
+#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3
+#define FRF_BZ_RX_OWNERR_CTL_LBN 38
+#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1
+#define FRF_BZ_RX_XON_TX_TH_LBN 33
+#define FRF_BZ_RX_XON_TX_TH_WIDTH 5
+#define FRF_AA_RX_DESC_PUSH_EN_LBN 35
+#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1
+#define FRF_AA_RX_RDW_PATCH_EN_LBN 34
+#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1
+#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31
+#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3
+#define FRF_BZ_RX_XOFF_TX_TH_LBN 28
+#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5
+#define FRF_AA_RX_OWNERR_CTL_LBN 30
+#define FRF_AA_RX_OWNERR_CTL_WIDTH 1
+#define FRF_AA_RX_XON_TX_TH_LBN 25
+#define FRF_AA_RX_XON_TX_TH_WIDTH 5
+#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19
+#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9
+#define FRF_AA_RX_XOFF_TX_TH_LBN 20
+#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5
+#define FRF_AA_RX_USR_BUF_SIZE_LBN 11
+#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9
+#define FRF_BZ_RX_XON_MAC_TH_LBN 10
+#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9
+#define FRF_AA_RX_XON_MAC_TH_LBN 6
+#define FRF_AA_RX_XON_MAC_TH_WIDTH 5
+#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1
+#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9
+#define FRF_AA_RX_XOFF_MAC_TH_LBN 1
+#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5
+#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0
+#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1
+
+
+/*
+ * FR_AZ_RX_FILTER_CTL_REG(128bit):
+ * Receive filter control registers
+ */
+#define FR_AZ_RX_FILTER_CTL_REG_OFST 0x00000810
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94
+#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8
+#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86
+#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8
+#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85
+#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1
+#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69
+#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16
+#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57
+#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12
+#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56
+#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55
+#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43
+#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12
+#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42
+#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41
+#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40
+#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1
+#define FRF_AZ_UDP_FULL_SRCH_LIMIT_LBN 32
+#define FRF_AZ_UDP_FULL_SRCH_LIMIT_WIDTH 8
+#define FRF_AZ_NUM_KER_LBN 24
+#define FRF_AZ_NUM_KER_WIDTH 2
+#define FRF_AZ_UDP_WILD_SRCH_LIMIT_LBN 16
+#define FRF_AZ_UDP_WILD_SRCH_LIMIT_WIDTH 8
+#define FRF_AZ_TCP_WILD_SRCH_LIMIT_LBN 8
+#define FRF_AZ_TCP_WILD_SRCH_LIMIT_WIDTH 8
+#define FRF_AZ_TCP_FULL_SRCH_LIMIT_LBN 0
+#define FRF_AZ_TCP_FULL_SRCH_LIMIT_WIDTH 8
+
+
+/*
+ * FR_AZ_RX_FLUSH_DESCQ_REG(128bit):
+ * Receive flush descriptor queue register
+ */
+#define FR_AZ_RX_FLUSH_DESCQ_REG_OFST 0x00000820
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24
+#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1
+#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0
+#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12
+
+
+/*
+ * FR_BZ_RX_DESC_UPD_REGP0(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_BZ_RX_DESC_UPD_REGP0_OFST 0x00000830
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_RX_DESC_UPD_REGP0_STEP 8192
+#define FR_BZ_RX_DESC_UPD_REGP0_ROWS 1024
+/*
+ * FR_AA_RX_DESC_UPD_REG_KER(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_AA_RX_DESC_UPD_REG_KER_OFST 0x00000830
+/* falcona0=net_func_bar2 */
+#define FR_AA_RX_DESC_UPD_REG_KER_STEP 8192
+#define FR_AA_RX_DESC_UPD_REG_KER_ROWS 4
+/*
+ * FR_AB_RX_DESC_UPD_REGP123(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_AB_RX_DESC_UPD_REGP123_OFST 0x01000830
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_RX_DESC_UPD_REGP123_STEP 8192
+#define FR_AB_RX_DESC_UPD_REGP123_ROWS 3072
+/*
+ * FR_AA_RX_DESC_UPD_REGP0(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_AA_RX_DESC_UPD_REGP0_OFST 0x00008830
+/* falcona0=char_func_bar0 */
+#define FR_AA_RX_DESC_UPD_REGP0_STEP 8192
+#define FR_AA_RX_DESC_UPD_REGP0_ROWS 1020
+
+#define FRF_AZ_RX_DESC_WPTR_LBN 96
+#define FRF_AZ_RX_DESC_WPTR_WIDTH 12
+#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95
+#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1
+#define FRF_AZ_RX_DESC_LBN 0
+#define FRF_AZ_RX_DESC_WIDTH 64
+#define FRF_AZ_RX_DESC_DW0_LBN 0
+#define FRF_AZ_RX_DESC_DW0_WIDTH 32
+#define FRF_AZ_RX_DESC_DW1_LBN 32
+#define FRF_AZ_RX_DESC_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_RX_DC_CFG_REG(128bit):
+ * Receive descriptor cache configuration register
+ */
+#define FR_AZ_RX_DC_CFG_REG_OFST 0x00000840
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_MAX_PF_LBN 2
+#define FRF_AZ_RX_MAX_PF_WIDTH 2
+#define FRF_AZ_RX_DC_SIZE_LBN 0
+#define FRF_AZ_RX_DC_SIZE_WIDTH 2
+#define FFE_AZ_RX_DC_SIZE_64 3
+#define FFE_AZ_RX_DC_SIZE_32 2
+#define FFE_AZ_RX_DC_SIZE_16 1
+#define FFE_AZ_RX_DC_SIZE_8 0
+
+
+/*
+ * FR_AZ_RX_DC_PF_WM_REG(128bit):
+ * Receive descriptor cache pre-fetch watermark register
+ */
+#define FR_AZ_RX_DC_PF_WM_REG_OFST 0x00000850
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_DC_PF_HWM_LBN 6
+#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6
+#define FRF_AZ_RX_DC_PF_LWM_LBN 0
+#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6
+
+
+/*
+ * FR_BZ_RX_RSS_TKEY_REG(128bit):
+ * RSS Toeplitz hash key
+ */
+#define FR_BZ_RX_RSS_TKEY_REG_OFST 0x00000860
+/* falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_BZ_RX_RSS_TKEY_LBN 96
+#define FRF_BZ_RX_RSS_TKEY_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW3_LBN 96
+#define FRF_BZ_RX_RSS_TKEY_DW3_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW2_LBN 64
+#define FRF_BZ_RX_RSS_TKEY_DW2_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW1_LBN 32
+#define FRF_BZ_RX_RSS_TKEY_DW1_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW0_LBN 0
+#define FRF_BZ_RX_RSS_TKEY_DW0_WIDTH 32
+
+
+/*
+ * FR_AZ_RX_NODESC_DROP_REG(128bit):
+ * Receive dropped packet counter register
+ */
+#define FR_AZ_RX_NODESC_DROP_REG_OFST 0x00000880
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_NODESC_DROP_CNT_LBN 0
+#define FRF_AZ_RX_NODESC_DROP_CNT_WIDTH 16
+
+
+/*
+ * FR_AZ_RX_SELF_RST_REG(128bit):
+ * Receive self reset register
+ */
+#define FR_AZ_RX_SELF_RST_REG_OFST 0x00000890
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_ISCSI_DIS_LBN 17
+#define FRF_AZ_RX_ISCSI_DIS_WIDTH 1
+#define FRF_AB_RX_SW_RST_REG_LBN 16
+#define FRF_AB_RX_SW_RST_REG_WIDTH 1
+#define FRF_AB_RX_SELF_RST_EN_LBN 8
+#define FRF_AB_RX_SELF_RST_EN_WIDTH 1
+#define FRF_AZ_RX_MAX_PF_LAT_LBN 4
+#define FRF_AZ_RX_MAX_PF_LAT_WIDTH 4
+#define FRF_AZ_RX_MAX_LU_LAT_LBN 0
+#define FRF_AZ_RX_MAX_LU_LAT_WIDTH 4
+
+
+/*
+ * FR_AZ_RX_DEBUG_REG(128bit):
+ * undocumented register
+ */
+#define FR_AZ_RX_DEBUG_REG_OFST 0x000008a0
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_DEBUG_LBN 0
+#define FRF_AZ_RX_DEBUG_WIDTH 64
+#define FRF_AZ_RX_DEBUG_DW0_LBN 0
+#define FRF_AZ_RX_DEBUG_DW0_WIDTH 32
+#define FRF_AZ_RX_DEBUG_DW1_LBN 32
+#define FRF_AZ_RX_DEBUG_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_RX_PUSH_DROP_REG(128bit):
+ * Receive descriptor push dropped counter register
+ */
+#define FR_AZ_RX_PUSH_DROP_REG_OFST 0x000008b0
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0
+#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32
+
+
+/*
+ * FR_CZ_RX_RSS_IPV6_REG1(128bit):
+ * IPv6 RSS Toeplitz hash key low bytes
+ */
+#define FR_CZ_RX_RSS_IPV6_REG1_OFST 0x000008d0
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW0_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW0_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW1_LBN 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW1_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW2_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW2_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW3_LBN 96
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW3_WIDTH 32
+
+
+/*
+ * FR_CZ_RX_RSS_IPV6_REG2(128bit):
+ * IPv6 RSS Toeplitz hash key middle bytes
+ */
+#define FR_CZ_RX_RSS_IPV6_REG2_OFST 0x000008e0
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW0_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW0_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW1_LBN 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW1_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW2_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW2_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW3_LBN 96
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW3_WIDTH 32
+
+
+/*
+ * FR_CZ_RX_RSS_IPV6_REG3(128bit):
+ * IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings
+ */
+#define FR_CZ_RX_RSS_IPV6_REG3_OFST 0x000008f0
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66
+#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65
+#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW0_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW0_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW1_LBN 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_TX_FLUSH_DESCQ_REG(128bit):
+ * Transmit flush descriptor queue register
+ */
+#define FR_AZ_TX_FLUSH_DESCQ_REG_OFST 0x00000a00
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12
+#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1
+#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0
+#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12
+
+
+/*
+ * FR_BZ_TX_DESC_UPD_REGP0(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_BZ_TX_DESC_UPD_REGP0_OFST 0x00000a10
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_TX_DESC_UPD_REGP0_STEP 8192
+#define FR_BZ_TX_DESC_UPD_REGP0_ROWS 1024
+/*
+ * FR_AA_TX_DESC_UPD_REG_KER(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_AA_TX_DESC_UPD_REG_KER_OFST 0x00000a10
+/* falcona0=net_func_bar2 */
+#define FR_AA_TX_DESC_UPD_REG_KER_STEP 8192
+#define FR_AA_TX_DESC_UPD_REG_KER_ROWS 8
+/*
+ * FR_AB_TX_DESC_UPD_REGP123(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_AB_TX_DESC_UPD_REGP123_OFST 0x01000a10
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_TX_DESC_UPD_REGP123_STEP 8192
+#define FR_AB_TX_DESC_UPD_REGP123_ROWS 3072
+/*
+ * FR_AA_TX_DESC_UPD_REGP0(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_AA_TX_DESC_UPD_REGP0_OFST 0x00008a10
+/* falcona0=char_func_bar0 */
+#define FR_AA_TX_DESC_UPD_REGP0_STEP 8192
+#define FR_AA_TX_DESC_UPD_REGP0_ROWS 1020
+
+#define FRF_AZ_TX_DESC_WPTR_LBN 96
+#define FRF_AZ_TX_DESC_WPTR_WIDTH 12
+#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95
+#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1
+#define FRF_AZ_TX_DESC_LBN 0
+#define FRF_AZ_TX_DESC_WIDTH 95
+#define FRF_AZ_TX_DESC_DW0_LBN 0
+#define FRF_AZ_TX_DESC_DW0_WIDTH 32
+#define FRF_AZ_TX_DESC_DW1_LBN 32
+#define FRF_AZ_TX_DESC_DW1_WIDTH 32
+#define FRF_AZ_TX_DESC_DW2_LBN 64
+#define FRF_AZ_TX_DESC_DW2_WIDTH 31
+
+
+/*
+ * FR_AZ_TX_DC_CFG_REG(128bit):
+ * Transmit descriptor cache configuration register
+ */
+#define FR_AZ_TX_DC_CFG_REG_OFST 0x00000a20
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_DC_SIZE_LBN 0
+#define FRF_AZ_TX_DC_SIZE_WIDTH 2
+#define FFE_AZ_TX_DC_SIZE_32 2
+#define FFE_AZ_TX_DC_SIZE_16 1
+#define FFE_AZ_TX_DC_SIZE_8 0
+
+
+/*
+ * FR_AA_TX_CHKSM_CFG_REG(128bit):
+ * Transmit checksum configuration register
+ */
+#define FR_AA_TX_CHKSM_CFG_REG_OFST 0x00000a30
+/* falcona0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96
+#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64
+#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32
+#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0
+#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32
+
+
+/*
+ * FR_AZ_TX_CFG_REG(128bit):
+ * Transmit configuration register
+ */
+#define FR_AZ_TX_CFG_REG_OFST 0x00000a50
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114
+#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8
+#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113
+#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1
+#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105
+#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97
+#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89
+#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81
+#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73
+#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65
+#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64
+#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1
+#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48
+#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16
+#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47
+#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1
+#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16
+#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15
+#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5
+#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1
+#define FRF_AZ_TX_P1_PRI_EN_LBN 4
+#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1
+#define FRF_AZ_TX_OWNERR_CTL_LBN 2
+#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1
+#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1
+#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1
+#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0
+#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1
+
+
+/*
+ * FR_AZ_TX_PUSH_DROP_REG(128bit):
+ * Transmit push dropped register
+ */
+#define FR_AZ_TX_PUSH_DROP_REG_OFST 0x00000a60
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0
+#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32
+
+
+/*
+ * FR_AZ_TX_RESERVED_REG(128bit):
+ * Transmit configuration register
+ */
+#define FR_AZ_TX_RESERVED_REG_OFST 0x00000a80
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_EVT_CNT_LBN 121
+#define FRF_AZ_TX_EVT_CNT_WIDTH 7
+#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119
+#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2
+#define FRF_AZ_TX_RD_COMP_TMR_LBN 96
+#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23
+#define FRF_AZ_TX_PUSH_EN_LBN 89
+#define FRF_AZ_TX_PUSH_EN_WIDTH 1
+#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88
+#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1
+#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85
+#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1
+#define FRF_AZ_TX_DMAR_ST_P0_LBN 81
+#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1
+#define FRF_AZ_TX_DMAQ_ST_LBN 78
+#define FRF_AZ_TX_DMAQ_ST_WIDTH 1
+#define FRF_AZ_TX_RX_SPACER_LBN 64
+#define FRF_AZ_TX_RX_SPACER_WIDTH 8
+#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60
+#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1
+#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59
+#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1
+#define FRF_AZ_TX_PS_EVT_DIS_LBN 58
+#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1
+#define FRF_AZ_TX_RX_SPACER_EN_LBN 57
+#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1
+#define FRF_AZ_TX_XP_TIMER_LBN 52
+#define FRF_AZ_TX_XP_TIMER_WIDTH 5
+#define FRF_AZ_TX_PREF_SPACER_LBN 44
+#define FRF_AZ_TX_PREF_SPACER_WIDTH 8
+#define FRF_AZ_TX_PREF_WD_TMR_LBN 22
+#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22
+#define FRF_AZ_TX_ONLY1TAG_LBN 21
+#define FRF_AZ_TX_ONLY1TAG_WIDTH 1
+#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19
+#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2
+#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18
+#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1
+#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17
+#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1
+#define FRF_AA_TX_DMA_FF_THR_LBN 16
+#define FRF_AA_TX_DMA_FF_THR_WIDTH 1
+#define FRF_AZ_TX_DMA_SPACER_LBN 8
+#define FRF_AZ_TX_DMA_SPACER_WIDTH 8
+#define FRF_AA_TX_TCP_DIS_LBN 7
+#define FRF_AA_TX_TCP_DIS_WIDTH 1
+#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7
+#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1
+#define FRF_AA_TX_IP_DIS_LBN 6
+#define FRF_AA_TX_IP_DIS_WIDTH 1
+#define FRF_AZ_TX_MAX_CPL_LBN 2
+#define FRF_AZ_TX_MAX_CPL_WIDTH 2
+#define FFE_AZ_TX_MAX_CPL_16 3
+#define FFE_AZ_TX_MAX_CPL_8 2
+#define FFE_AZ_TX_MAX_CPL_4 1
+#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0
+#define FRF_AZ_TX_MAX_PREF_LBN 0
+#define FRF_AZ_TX_MAX_PREF_WIDTH 2
+#define FFE_AZ_TX_MAX_PREF_32 3
+#define FFE_AZ_TX_MAX_PREF_16 2
+#define FFE_AZ_TX_MAX_PREF_8 1
+#define FFE_AZ_TX_MAX_PREF_OFF 0
+
+
+/*
+ * FR_BZ_TX_PACE_REG(128bit):
+ * Transmit pace control register
+ */
+#define FR_BZ_TX_PACE_REG_OFST 0x00000a90
+/* falconb0,sienaa0=net_func_bar2 */
+/*
+ * FR_AA_TX_PACE_REG(128bit):
+ * Transmit pace control register
+ */
+#define FR_AA_TX_PACE_REG_OFST 0x00f80000
+/* falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_PACE_SB_NOT_AF_LBN 19
+#define FRF_AZ_TX_PACE_SB_NOT_AF_WIDTH 10
+#define FRF_AZ_TX_PACE_SB_AF_LBN 9
+#define FRF_AZ_TX_PACE_SB_AF_WIDTH 10
+#define FRF_AZ_TX_PACE_FB_BASE_LBN 5
+#define FRF_AZ_TX_PACE_FB_BASE_WIDTH 4
+#define FRF_AZ_TX_PACE_BIN_TH_LBN 0
+#define FRF_AZ_TX_PACE_BIN_TH_WIDTH 5
+
+
+/*
+ * FR_AZ_TX_PACE_DROP_QID_REG(128bit):
+ * PACE Drop QID Counter
+ */
+#define FR_AZ_TX_PACE_DROP_QID_REG_OFST 0x00000aa0
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_PACE_QID_DRP_CNT_LBN 0
+#define FRF_AZ_TX_PACE_QID_DRP_CNT_WIDTH 16
+
+
+/*
+ * FR_AB_TX_VLAN_REG(128bit):
+ * Transmit VLAN tag register
+ */
+#define FR_AB_TX_VLAN_REG_OFST 0x00000ae0
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_TX_VLAN_EN_LBN 127
+#define FRF_AB_TX_VLAN_EN_WIDTH 1
+#define FRF_AB_TX_VLAN7_PORT1_EN_LBN 125
+#define FRF_AB_TX_VLAN7_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN7_PORT0_EN_LBN 124
+#define FRF_AB_TX_VLAN7_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN7_LBN 112
+#define FRF_AB_TX_VLAN7_WIDTH 12
+#define FRF_AB_TX_VLAN6_PORT1_EN_LBN 109
+#define FRF_AB_TX_VLAN6_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN6_PORT0_EN_LBN 108
+#define FRF_AB_TX_VLAN6_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN6_LBN 96
+#define FRF_AB_TX_VLAN6_WIDTH 12
+#define FRF_AB_TX_VLAN5_PORT1_EN_LBN 93
+#define FRF_AB_TX_VLAN5_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN5_PORT0_EN_LBN 92
+#define FRF_AB_TX_VLAN5_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN5_LBN 80
+#define FRF_AB_TX_VLAN5_WIDTH 12
+#define FRF_AB_TX_VLAN4_PORT1_EN_LBN 77
+#define FRF_AB_TX_VLAN4_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN4_PORT0_EN_LBN 76
+#define FRF_AB_TX_VLAN4_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN4_LBN 64
+#define FRF_AB_TX_VLAN4_WIDTH 12
+#define FRF_AB_TX_VLAN3_PORT1_EN_LBN 61
+#define FRF_AB_TX_VLAN3_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN3_PORT0_EN_LBN 60
+#define FRF_AB_TX_VLAN3_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN3_LBN 48
+#define FRF_AB_TX_VLAN3_WIDTH 12
+#define FRF_AB_TX_VLAN2_PORT1_EN_LBN 45
+#define FRF_AB_TX_VLAN2_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN2_PORT0_EN_LBN 44
+#define FRF_AB_TX_VLAN2_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN2_LBN 32
+#define FRF_AB_TX_VLAN2_WIDTH 12
+#define FRF_AB_TX_VLAN1_PORT1_EN_LBN 29
+#define FRF_AB_TX_VLAN1_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN1_PORT0_EN_LBN 28
+#define FRF_AB_TX_VLAN1_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN1_LBN 16
+#define FRF_AB_TX_VLAN1_WIDTH 12
+#define FRF_AB_TX_VLAN0_PORT1_EN_LBN 13
+#define FRF_AB_TX_VLAN0_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN0_PORT0_EN_LBN 12
+#define FRF_AB_TX_VLAN0_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN0_LBN 0
+#define FRF_AB_TX_VLAN0_WIDTH 12
+
+
+/*
+ * FR_AZ_TX_IPFIL_PORTEN_REG(128bit):
+ * Transmit filter control register
+ */
+#define FR_AZ_TX_IPFIL_PORTEN_REG_OFST 0x00000af0
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_MADR0_FIL_EN_LBN 64
+#define FRF_AZ_TX_MADR0_FIL_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL31_PORT_EN_LBN 62
+#define FRF_AB_TX_IPFIL31_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL30_PORT_EN_LBN 60
+#define FRF_AB_TX_IPFIL30_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL29_PORT_EN_LBN 58
+#define FRF_AB_TX_IPFIL29_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL28_PORT_EN_LBN 56
+#define FRF_AB_TX_IPFIL28_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL27_PORT_EN_LBN 54
+#define FRF_AB_TX_IPFIL27_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL26_PORT_EN_LBN 52
+#define FRF_AB_TX_IPFIL26_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL25_PORT_EN_LBN 50
+#define FRF_AB_TX_IPFIL25_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL24_PORT_EN_LBN 48
+#define FRF_AB_TX_IPFIL24_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL23_PORT_EN_LBN 46
+#define FRF_AB_TX_IPFIL23_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL22_PORT_EN_LBN 44
+#define FRF_AB_TX_IPFIL22_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL21_PORT_EN_LBN 42
+#define FRF_AB_TX_IPFIL21_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL20_PORT_EN_LBN 40
+#define FRF_AB_TX_IPFIL20_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL19_PORT_EN_LBN 38
+#define FRF_AB_TX_IPFIL19_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL18_PORT_EN_LBN 36
+#define FRF_AB_TX_IPFIL18_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL17_PORT_EN_LBN 34
+#define FRF_AB_TX_IPFIL17_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL16_PORT_EN_LBN 32
+#define FRF_AB_TX_IPFIL16_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL15_PORT_EN_LBN 30
+#define FRF_AB_TX_IPFIL15_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL14_PORT_EN_LBN 28
+#define FRF_AB_TX_IPFIL14_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL13_PORT_EN_LBN 26
+#define FRF_AB_TX_IPFIL13_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL12_PORT_EN_LBN 24
+#define FRF_AB_TX_IPFIL12_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL11_PORT_EN_LBN 22
+#define FRF_AB_TX_IPFIL11_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL10_PORT_EN_LBN 20
+#define FRF_AB_TX_IPFIL10_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL9_PORT_EN_LBN 18
+#define FRF_AB_TX_IPFIL9_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL8_PORT_EN_LBN 16
+#define FRF_AB_TX_IPFIL8_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL7_PORT_EN_LBN 14
+#define FRF_AB_TX_IPFIL7_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL6_PORT_EN_LBN 12
+#define FRF_AB_TX_IPFIL6_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL5_PORT_EN_LBN 10
+#define FRF_AB_TX_IPFIL5_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL4_PORT_EN_LBN 8
+#define FRF_AB_TX_IPFIL4_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL3_PORT_EN_LBN 6
+#define FRF_AB_TX_IPFIL3_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL2_PORT_EN_LBN 4
+#define FRF_AB_TX_IPFIL2_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL1_PORT_EN_LBN 2
+#define FRF_AB_TX_IPFIL1_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL0_PORT_EN_LBN 0
+#define FRF_AB_TX_IPFIL0_PORT_EN_WIDTH 1
+
+
+/*
+ * FR_AB_TX_IPFIL_TBL(128bit):
+ * Transmit IP source address filter table
+ */
+#define FR_AB_TX_IPFIL_TBL_OFST 0x00000b00
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_TX_IPFIL_TBL_STEP 16
+#define FR_AB_TX_IPFIL_TBL_ROWS 16
+
+#define FRF_AB_TX_IPFIL_MASK_1_LBN 96
+#define FRF_AB_TX_IPFIL_MASK_1_WIDTH 32
+#define FRF_AB_TX_IP_SRC_ADR_1_LBN 64
+#define FRF_AB_TX_IP_SRC_ADR_1_WIDTH 32
+#define FRF_AB_TX_IPFIL_MASK_0_LBN 32
+#define FRF_AB_TX_IPFIL_MASK_0_WIDTH 32
+#define FRF_AB_TX_IP_SRC_ADR_0_LBN 0
+#define FRF_AB_TX_IP_SRC_ADR_0_WIDTH 32
+
+
+/*
+ * FR_AB_MD_TXD_REG(128bit):
+ * PHY management transmit data register
+ */
+#define FR_AB_MD_TXD_REG_OFST 0x00000c00
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_TXD_LBN 0
+#define FRF_AB_MD_TXD_WIDTH 16
+
+
+/*
+ * FR_AB_MD_RXD_REG(128bit):
+ * PHY management receive data register
+ */
+#define FR_AB_MD_RXD_REG_OFST 0x00000c10
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_RXD_LBN 0
+#define FRF_AB_MD_RXD_WIDTH 16
+
+
+/*
+ * FR_AB_MD_CS_REG(128bit):
+ * PHY management configuration & status register
+ */
+#define FR_AB_MD_CS_REG_OFST 0x00000c20
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_RD_EN_LBN 15
+#define FRF_AB_MD_RD_EN_WIDTH 1
+#define FRF_AB_MD_WR_EN_LBN 14
+#define FRF_AB_MD_WR_EN_WIDTH 1
+#define FRF_AB_MD_ADDR_CMD_LBN 13
+#define FRF_AB_MD_ADDR_CMD_WIDTH 1
+#define FRF_AB_MD_PT_LBN 7
+#define FRF_AB_MD_PT_WIDTH 3
+#define FRF_AB_MD_PL_LBN 6
+#define FRF_AB_MD_PL_WIDTH 1
+#define FRF_AB_MD_INT_CLR_LBN 5
+#define FRF_AB_MD_INT_CLR_WIDTH 1
+#define FRF_AB_MD_GC_LBN 4
+#define FRF_AB_MD_GC_WIDTH 1
+#define FRF_AB_MD_PRSP_LBN 3
+#define FRF_AB_MD_PRSP_WIDTH 1
+#define FRF_AB_MD_RIC_LBN 2
+#define FRF_AB_MD_RIC_WIDTH 1
+#define FRF_AB_MD_RDC_LBN 1
+#define FRF_AB_MD_RDC_WIDTH 1
+#define FRF_AB_MD_WRC_LBN 0
+#define FRF_AB_MD_WRC_WIDTH 1
+
+
+/*
+ * FR_AB_MD_PHY_ADR_REG(128bit):
+ * PHY management PHY address register
+ */
+#define FR_AB_MD_PHY_ADR_REG_OFST 0x00000c30
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_PHY_ADR_LBN 0
+#define FRF_AB_MD_PHY_ADR_WIDTH 16
+
+
+/*
+ * FR_AB_MD_ID_REG(128bit):
+ * PHY management ID register
+ */
+#define FR_AB_MD_ID_REG_OFST 0x00000c40
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_PRT_ADR_LBN 11
+#define FRF_AB_MD_PRT_ADR_WIDTH 5
+#define FRF_AB_MD_DEV_ADR_LBN 6
+#define FRF_AB_MD_DEV_ADR_WIDTH 5
+
+
+/*
+ * FR_AB_MD_STAT_REG(128bit):
+ * PHY management status & mask register
+ */
+#define FR_AB_MD_STAT_REG_OFST 0x00000c50
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_PINT_LBN 4
+#define FRF_AB_MD_PINT_WIDTH 1
+#define FRF_AB_MD_DONE_LBN 3
+#define FRF_AB_MD_DONE_WIDTH 1
+#define FRF_AB_MD_BSERR_LBN 2
+#define FRF_AB_MD_BSERR_WIDTH 1
+#define FRF_AB_MD_LNFL_LBN 1
+#define FRF_AB_MD_LNFL_WIDTH 1
+#define FRF_AB_MD_BSY_LBN 0
+#define FRF_AB_MD_BSY_WIDTH 1
+
+
+/*
+ * FR_AB_MAC_STAT_DMA_REG(128bit):
+ * Port MAC statistical counter DMA register
+ */
+#define FR_AB_MAC_STAT_DMA_REG_OFST 0x00000c60
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48
+#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1
+#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0
+#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48
+#define FRF_AB_MAC_STAT_DMA_ADR_DW0_LBN 0
+#define FRF_AB_MAC_STAT_DMA_ADR_DW0_WIDTH 32
+#define FRF_AB_MAC_STAT_DMA_ADR_DW1_LBN 32
+#define FRF_AB_MAC_STAT_DMA_ADR_DW1_WIDTH 16
+
+
+/*
+ * FR_AB_MAC_CTRL_REG(128bit):
+ * Port MAC control register
+ */
+#define FR_AB_MAC_CTRL_REG_OFST 0x00000c80
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_XOFF_VAL_LBN 16
+#define FRF_AB_MAC_XOFF_VAL_WIDTH 16
+#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7
+#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1
+#define FRF_AB_MAC_XG_DISTXCRC_LBN 5
+#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1
+#define FRF_AB_MAC_BCAD_ACPT_LBN 4
+#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1
+#define FRF_AB_MAC_UC_PROM_LBN 3
+#define FRF_AB_MAC_UC_PROM_WIDTH 1
+#define FRF_AB_MAC_LINK_STATUS_LBN 2
+#define FRF_AB_MAC_LINK_STATUS_WIDTH 1
+#define FRF_AB_MAC_SPEED_LBN 0
+#define FRF_AB_MAC_SPEED_WIDTH 2
+#define FRF_AB_MAC_SPEED_10M 0
+#define FRF_AB_MAC_SPEED_100M 1
+#define FRF_AB_MAC_SPEED_1G 2
+#define FRF_AB_MAC_SPEED_10G 3
+
+/*
+ * FR_BB_GEN_MODE_REG(128bit):
+ * General Purpose mode register (external interrupt mask)
+ */
+#define FR_BB_GEN_MODE_REG_OFST 0x00000c90
+/* falconb0=net_func_bar2 */
+
+#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3
+#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1
+#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2
+#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1
+#define FRF_BB_XFP_PHY_INT_MASK_LBN 1
+#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1
+#define FRF_BB_XG_PHY_INT_MASK_LBN 0
+#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1
+
+
+/*
+ * FR_AB_MAC_MC_HASH_REG0(128bit):
+ * Multicast address hash table
+ */
+#define FR_AB_MAC_MC_HASH0_REG_OFST 0x00000ca0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_MCAST_HASH0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128
+#define FRF_AB_MAC_MCAST_HASH0_DW0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH0_DW0_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH0_DW1_LBN 32
+#define FRF_AB_MAC_MCAST_HASH0_DW1_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH0_DW2_LBN 64
+#define FRF_AB_MAC_MCAST_HASH0_DW2_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH0_DW3_LBN 96
+#define FRF_AB_MAC_MCAST_HASH0_DW3_WIDTH 32
+
+
+/*
+ * FR_AB_MAC_MC_HASH_REG1(128bit):
+ * Multicast address hash table
+ */
+#define FR_AB_MAC_MC_HASH1_REG_OFST 0x00000cb0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_MCAST_HASH1_LBN 0
+#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128
+#define FRF_AB_MAC_MCAST_HASH1_DW0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH1_DW0_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH1_DW1_LBN 32
+#define FRF_AB_MAC_MCAST_HASH1_DW1_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH1_DW2_LBN 64
+#define FRF_AB_MAC_MCAST_HASH1_DW2_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH1_DW3_LBN 96
+#define FRF_AB_MAC_MCAST_HASH1_DW3_WIDTH 32
+
+
+/*
+ * FR_AB_GM_CFG1_REG(32bit):
+ * GMAC configuration register 1
+ */
+#define FR_AB_GM_CFG1_REG_OFST 0x00000e00
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_SW_RST_LBN 31
+#define FRF_AB_GM_SW_RST_WIDTH 1
+#define FRF_AB_GM_SIM_RST_LBN 30
+#define FRF_AB_GM_SIM_RST_WIDTH 1
+#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19
+#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1
+#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18
+#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1
+#define FRF_AB_GM_RST_RX_FUNC_LBN 17
+#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1
+#define FRF_AB_GM_RST_TX_FUNC_LBN 16
+#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1
+#define FRF_AB_GM_LOOP_LBN 8
+#define FRF_AB_GM_LOOP_WIDTH 1
+#define FRF_AB_GM_RX_FC_EN_LBN 5
+#define FRF_AB_GM_RX_FC_EN_WIDTH 1
+#define FRF_AB_GM_TX_FC_EN_LBN 4
+#define FRF_AB_GM_TX_FC_EN_WIDTH 1
+#define FRF_AB_GM_SYNC_RXEN_LBN 3
+#define FRF_AB_GM_SYNC_RXEN_WIDTH 1
+#define FRF_AB_GM_RX_EN_LBN 2
+#define FRF_AB_GM_RX_EN_WIDTH 1
+#define FRF_AB_GM_SYNC_TXEN_LBN 1
+#define FRF_AB_GM_SYNC_TXEN_WIDTH 1
+#define FRF_AB_GM_TX_EN_LBN 0
+#define FRF_AB_GM_TX_EN_WIDTH 1
+
+
+/*
+ * FR_AB_GM_CFG2_REG(32bit):
+ * GMAC configuration register 2
+ */
+#define FR_AB_GM_CFG2_REG_OFST 0x00000e10
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_PAMBL_LEN_LBN 12
+#define FRF_AB_GM_PAMBL_LEN_WIDTH 4
+#define FRF_AB_GM_IF_MODE_LBN 8
+#define FRF_AB_GM_IF_MODE_WIDTH 2
+#define FRF_AB_GM_IF_MODE_BYTE_MODE 2
+#define FRF_AB_GM_IF_MODE_NIBBLE_MODE 1
+#define FRF_AB_GM_HUGE_FRM_EN_LBN 5
+#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1
+#define FRF_AB_GM_LEN_CHK_LBN 4
+#define FRF_AB_GM_LEN_CHK_WIDTH 1
+#define FRF_AB_GM_PAD_CRC_EN_LBN 2
+#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1
+#define FRF_AB_GM_CRC_EN_LBN 1
+#define FRF_AB_GM_CRC_EN_WIDTH 1
+#define FRF_AB_GM_FD_LBN 0
+#define FRF_AB_GM_FD_WIDTH 1
+
+
+/*
+ * FR_AB_GM_IPG_REG(32bit):
+ * GMAC IPG register
+ */
+#define FR_AB_GM_IPG_REG_OFST 0x00000e20
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_NONB2B_IPG1_LBN 24
+#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7
+#define FRF_AB_GM_NONB2B_IPG2_LBN 16
+#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7
+#define FRF_AB_GM_MIN_IPG_ENF_LBN 8
+#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8
+#define FRF_AB_GM_B2B_IPG_LBN 0
+#define FRF_AB_GM_B2B_IPG_WIDTH 7
+
+
+/*
+ * FR_AB_GM_HD_REG(32bit):
+ * GMAC half duplex register
+ */
+#define FR_AB_GM_HD_REG_OFST 0x00000e30
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20
+#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4
+#define FRF_AB_GM_ALT_BOFF_EN_LBN 19
+#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1
+#define FRF_AB_GM_BP_NO_BOFF_LBN 18
+#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1
+#define FRF_AB_GM_DIS_BOFF_LBN 17
+#define FRF_AB_GM_DIS_BOFF_WIDTH 1
+#define FRF_AB_GM_EXDEF_TX_EN_LBN 16
+#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1
+#define FRF_AB_GM_RTRY_LIMIT_LBN 12
+#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4
+#define FRF_AB_GM_COL_WIN_LBN 0
+#define FRF_AB_GM_COL_WIN_WIDTH 10
+
+
+/*
+ * FR_AB_GM_MAX_FLEN_REG(32bit):
+ * GMAC maximum frame length register
+ */
+#define FR_AB_GM_MAX_FLEN_REG_OFST 0x00000e40
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_MAX_FLEN_LBN 0
+#define FRF_AB_GM_MAX_FLEN_WIDTH 16
+
+
+/*
+ * FR_AB_GM_TEST_REG(32bit):
+ * GMAC test register
+ */
+#define FR_AB_GM_TEST_REG_OFST 0x00000e70
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_MAX_BOFF_LBN 3
+#define FRF_AB_GM_MAX_BOFF_WIDTH 1
+#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2
+#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1
+#define FRF_AB_GM_TEST_PAUSE_LBN 1
+#define FRF_AB_GM_TEST_PAUSE_WIDTH 1
+#define FRF_AB_GM_SHORT_SLOT_LBN 0
+#define FRF_AB_GM_SHORT_SLOT_WIDTH 1
+
+
+/*
+ * FR_AB_GM_ADR1_REG(32bit):
+ * GMAC station address register 1
+ */
+#define FR_AB_GM_ADR1_REG_OFST 0x00000f00
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_ADR_B0_LBN 24
+#define FRF_AB_GM_ADR_B0_WIDTH 8
+#define FRF_AB_GM_ADR_B1_LBN 16
+#define FRF_AB_GM_ADR_B1_WIDTH 8
+#define FRF_AB_GM_ADR_B2_LBN 8
+#define FRF_AB_GM_ADR_B2_WIDTH 8
+#define FRF_AB_GM_ADR_B3_LBN 0
+#define FRF_AB_GM_ADR_B3_WIDTH 8
+
+
+/*
+ * FR_AB_GM_ADR2_REG(32bit):
+ * GMAC station address register 2
+ */
+#define FR_AB_GM_ADR2_REG_OFST 0x00000f10
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_ADR_B4_LBN 24
+#define FRF_AB_GM_ADR_B4_WIDTH 8
+#define FRF_AB_GM_ADR_B5_LBN 16
+#define FRF_AB_GM_ADR_B5_WIDTH 8
+
+
+/*
+ * FR_AB_GMF_CFG0_REG(32bit):
+ * GMAC FIFO configuration register 0
+ */
+#define FR_AB_GMF_CFG0_REG_OFST 0x00000f20
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_FTFENRPLY_LBN 20
+#define FRF_AB_GMF_FTFENRPLY_WIDTH 1
+#define FRF_AB_GMF_STFENRPLY_LBN 19
+#define FRF_AB_GMF_STFENRPLY_WIDTH 1
+#define FRF_AB_GMF_FRFENRPLY_LBN 18
+#define FRF_AB_GMF_FRFENRPLY_WIDTH 1
+#define FRF_AB_GMF_SRFENRPLY_LBN 17
+#define FRF_AB_GMF_SRFENRPLY_WIDTH 1
+#define FRF_AB_GMF_WTMENRPLY_LBN 16
+#define FRF_AB_GMF_WTMENRPLY_WIDTH 1
+#define FRF_AB_GMF_FTFENREQ_LBN 12
+#define FRF_AB_GMF_FTFENREQ_WIDTH 1
+#define FRF_AB_GMF_STFENREQ_LBN 11
+#define FRF_AB_GMF_STFENREQ_WIDTH 1
+#define FRF_AB_GMF_FRFENREQ_LBN 10
+#define FRF_AB_GMF_FRFENREQ_WIDTH 1
+#define FRF_AB_GMF_SRFENREQ_LBN 9
+#define FRF_AB_GMF_SRFENREQ_WIDTH 1
+#define FRF_AB_GMF_WTMENREQ_LBN 8
+#define FRF_AB_GMF_WTMENREQ_WIDTH 1
+#define FRF_AB_GMF_HSTRSTFT_LBN 4
+#define FRF_AB_GMF_HSTRSTFT_WIDTH 1
+#define FRF_AB_GMF_HSTRSTST_LBN 3
+#define FRF_AB_GMF_HSTRSTST_WIDTH 1
+#define FRF_AB_GMF_HSTRSTFR_LBN 2
+#define FRF_AB_GMF_HSTRSTFR_WIDTH 1
+#define FRF_AB_GMF_HSTRSTSR_LBN 1
+#define FRF_AB_GMF_HSTRSTSR_WIDTH 1
+#define FRF_AB_GMF_HSTRSTWT_LBN 0
+#define FRF_AB_GMF_HSTRSTWT_WIDTH 1
+
+
+/*
+ * FR_AB_GMF_CFG1_REG(32bit):
+ * GMAC FIFO configuration register 1
+ */
+#define FR_AB_GMF_CFG1_REG_OFST 0x00000f30
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGFRTH_LBN 16
+#define FRF_AB_GMF_CFGFRTH_WIDTH 5
+#define FRF_AB_GMF_CFGXOFFRTX_LBN 0
+#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16
+
+
+/*
+ * FR_AB_GMF_CFG2_REG(32bit):
+ * GMAC FIFO configuration register 2
+ */
+#define FR_AB_GMF_CFG2_REG_OFST 0x00000f40
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGHWM_LBN 16
+#define FRF_AB_GMF_CFGHWM_WIDTH 6
+#define FRF_AB_GMF_CFGLWM_LBN 0
+#define FRF_AB_GMF_CFGLWM_WIDTH 6
+
+
+/*
+ * FR_AB_GMF_CFG3_REG(32bit):
+ * GMAC FIFO configuration register 3
+ */
+#define FR_AB_GMF_CFG3_REG_OFST 0x00000f50
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGHWMFT_LBN 16
+#define FRF_AB_GMF_CFGHWMFT_WIDTH 6
+#define FRF_AB_GMF_CFGFTTH_LBN 0
+#define FRF_AB_GMF_CFGFTTH_WIDTH 6
+
+
+/*
+ * FR_AB_GMF_CFG4_REG(32bit):
+ * GMAC FIFO configuration register 4
+ */
+#define FR_AB_GMF_CFG4_REG_OFST 0x00000f60
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_HSTFLTRFRM_LBN 0
+#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18
+
+
+/*
+ * FR_AB_GMF_CFG5_REG(32bit):
+ * GMAC FIFO configuration register 5
+ */
+#define FR_AB_GMF_CFG5_REG_OFST 0x00000f70
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGHDPLX_LBN 22
+#define FRF_AB_GMF_CFGHDPLX_WIDTH 1
+#define FRF_AB_GMF_SRFULL_LBN 21
+#define FRF_AB_GMF_SRFULL_WIDTH 1
+#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20
+#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1
+#define FRF_AB_GMF_CFGBYTMODE_LBN 19
+#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1
+#define FRF_AB_GMF_HSTDRPLT64_LBN 18
+#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1
+#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0
+#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18
+
+
+/*
+ * FR_BB_TX_SRC_MAC_TBL(128bit):
+ * Transmit IP source address filter table
+ */
+#define FR_BB_TX_SRC_MAC_TBL_OFST 0x00001000
+/* falconb0=net_func_bar2 */
+#define FR_BB_TX_SRC_MAC_TBL_STEP 16
+#define FR_BB_TX_SRC_MAC_TBL_ROWS 16
+
+#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64
+#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW0_LBN 64
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW0_WIDTH 32
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW1_LBN 96
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW1_WIDTH 16
+#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0
+#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW0_LBN 0
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW0_WIDTH 32
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW1_LBN 32
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW1_WIDTH 16
+
+
+/*
+ * FR_BB_TX_SRC_MAC_CTL_REG(128bit):
+ * Transmit MAC source address filter control
+ */
+#define FR_BB_TX_SRC_MAC_CTL_REG_OFST 0x00001100
+/* falconb0=net_func_bar2 */
+
+#define FRF_BB_TX_SRC_DROP_CTR_LBN 16
+#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16
+#define FRF_BB_TX_SRC_FLTR_EN_LBN 15
+#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1
+#define FRF_BB_TX_DROP_CTR_CLR_LBN 12
+#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1
+#define FRF_BB_TX_MAC_QID_SEL_LBN 0
+#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3
+
+
+/*
+ * FR_AB_XM_ADR_LO_REG(128bit):
+ * XGMAC address register low
+ */
+#define FR_AB_XM_ADR_LO_REG_OFST 0x00001200
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_ADR_LO_LBN 0
+#define FRF_AB_XM_ADR_LO_WIDTH 32
+
+
+/*
+ * FR_AB_XM_ADR_HI_REG(128bit):
+ * XGMAC address register high
+ */
+#define FR_AB_XM_ADR_HI_REG_OFST 0x00001210
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_ADR_HI_LBN 0
+#define FRF_AB_XM_ADR_HI_WIDTH 16
+
+
+/*
+ * FR_AB_XM_GLB_CFG_REG(128bit):
+ * XGMAC global configuration
+ */
+#define FR_AB_XM_GLB_CFG_REG_OFST 0x00001220
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_RMTFLT_GEN_LBN 17
+#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1
+#define FRF_AB_XM_DEBUG_MODE_LBN 16
+#define FRF_AB_XM_DEBUG_MODE_WIDTH 1
+#define FRF_AB_XM_RX_STAT_EN_LBN 11
+#define FRF_AB_XM_RX_STAT_EN_WIDTH 1
+#define FRF_AB_XM_TX_STAT_EN_LBN 10
+#define FRF_AB_XM_TX_STAT_EN_WIDTH 1
+#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6
+#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1
+#define FRF_AB_XM_WAN_MODE_LBN 5
+#define FRF_AB_XM_WAN_MODE_WIDTH 1
+#define FRF_AB_XM_INTCLR_MODE_LBN 3
+#define FRF_AB_XM_INTCLR_MODE_WIDTH 1
+#define FRF_AB_XM_CORE_RST_LBN 0
+#define FRF_AB_XM_CORE_RST_WIDTH 1
+
+
+/*
+ * FR_AB_XM_TX_CFG_REG(128bit):
+ * XGMAC transmit configuration
+ */
+#define FR_AB_XM_TX_CFG_REG_OFST 0x00001230
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_TX_PROG_LBN 24
+#define FRF_AB_XM_TX_PROG_WIDTH 1
+#define FRF_AB_XM_IPG_LBN 16
+#define FRF_AB_XM_IPG_WIDTH 4
+#define FRF_AB_XM_FCNTL_LBN 10
+#define FRF_AB_XM_FCNTL_WIDTH 1
+#define FRF_AB_XM_TXCRC_LBN 8
+#define FRF_AB_XM_TXCRC_WIDTH 1
+#define FRF_AB_XM_EDRC_LBN 6
+#define FRF_AB_XM_EDRC_WIDTH 1
+#define FRF_AB_XM_AUTO_PAD_LBN 5
+#define FRF_AB_XM_AUTO_PAD_WIDTH 1
+#define FRF_AB_XM_TX_PRMBL_LBN 2
+#define FRF_AB_XM_TX_PRMBL_WIDTH 1
+#define FRF_AB_XM_TXEN_LBN 1
+#define FRF_AB_XM_TXEN_WIDTH 1
+#define FRF_AB_XM_TX_RST_LBN 0
+#define FRF_AB_XM_TX_RST_WIDTH 1
+
+
+/*
+ * FR_AB_XM_RX_CFG_REG(128bit):
+ * XGMAC receive configuration
+ */
+#define FR_AB_XM_RX_CFG_REG_OFST 0x00001240
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_PASS_LENERR_LBN 26
+#define FRF_AB_XM_PASS_LENERR_WIDTH 1
+#define FRF_AB_XM_PASS_CRC_ERR_LBN 25
+#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1
+#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24
+#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_REJ_BCAST_LBN 20
+#define FRF_AB_XM_REJ_BCAST_WIDTH 1
+#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11
+#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1
+#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9
+#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1
+#define FRF_AB_XM_AUTO_DEPAD_LBN 8
+#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1
+#define FRF_AB_XM_RXCRC_LBN 3
+#define FRF_AB_XM_RXCRC_WIDTH 1
+#define FRF_AB_XM_RX_PRMBL_LBN 2
+#define FRF_AB_XM_RX_PRMBL_WIDTH 1
+#define FRF_AB_XM_RXEN_LBN 1
+#define FRF_AB_XM_RXEN_WIDTH 1
+#define FRF_AB_XM_RX_RST_LBN 0
+#define FRF_AB_XM_RX_RST_WIDTH 1
+
+
+/*
+ * FR_AB_XM_MGT_INT_MASK(128bit):
+ * documentation to be written for sum_XM_MGT_INT_MASK
+ */
+#define FR_AB_XM_MGT_INT_MASK_OFST 0x00001250
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_MSK_STA_INTR_LBN 16
+#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1
+#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9
+#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1
+#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8
+#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1
+#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2
+#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_MSK_RMTFLT_LBN 1
+#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1
+#define FRF_AB_XM_MSK_LCLFLT_LBN 0
+#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1
+
+
+/*
+ * FR_AB_XM_FC_REG(128bit):
+ * XGMAC flow control register
+ */
+#define FR_AB_XM_FC_REG_OFST 0x00001270
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_PAUSE_TIME_LBN 16
+#define FRF_AB_XM_PAUSE_TIME_WIDTH 16
+#define FRF_AB_XM_RX_MAC_STAT_LBN 11
+#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1
+#define FRF_AB_XM_TX_MAC_STAT_LBN 10
+#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1
+#define FRF_AB_XM_MCNTL_PASS_LBN 8
+#define FRF_AB_XM_MCNTL_PASS_WIDTH 2
+#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6
+#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1
+#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5
+#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1
+#define FRF_AB_XM_ZPAUSE_LBN 2
+#define FRF_AB_XM_ZPAUSE_WIDTH 1
+#define FRF_AB_XM_XMIT_PAUSE_LBN 1
+#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1
+#define FRF_AB_XM_DIS_FCNTL_LBN 0
+#define FRF_AB_XM_DIS_FCNTL_WIDTH 1
+
+
+/*
+ * FR_AB_XM_PAUSE_TIME_REG(128bit):
+ * XGMAC pause time register
+ */
+#define FR_AB_XM_PAUSE_TIME_REG_OFST 0x00001290
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16
+#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16
+#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0
+#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16
+
+
+/*
+ * FR_AB_XM_TX_PARAM_REG(128bit):
+ * XGMAC transmit parameter register
+ */
+#define FR_AB_XM_TX_PARAM_REG_OFST 0x000012d0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31
+#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3
+#define FRF_AB_XM_PAD_CHAR_LBN 0
+#define FRF_AB_XM_PAD_CHAR_WIDTH 8
+
+
+/*
+ * FR_AB_XM_RX_PARAM_REG(128bit):
+ * XGMAC receive parameter register
+ */
+#define FR_AB_XM_RX_PARAM_REG_OFST 0x000012e0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3
+
+
+/*
+ * FR_AB_XM_MGT_INT_MSK_REG(128bit):
+ * XGMAC management interrupt mask register
+ */
+#define FR_AB_XM_MGT_INT_REG_OFST 0x000012f0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_STAT_CNTR_OF_LBN 9
+#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1
+#define FRF_AB_XM_STAT_CNTR_HF_LBN 8
+#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1
+#define FRF_AB_XM_PRMBLE_ERR_LBN 2
+#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_RMTFLT_LBN 1
+#define FRF_AB_XM_RMTFLT_WIDTH 1
+#define FRF_AB_XM_LCLFLT_LBN 0
+#define FRF_AB_XM_LCLFLT_WIDTH 1
+
+
+/*
+ * FR_AB_XX_PWR_RST_REG(128bit):
+ * XGXS/XAUI powerdown/reset register
+ */
+#define FR_AB_XX_PWR_RST_REG_OFST 0x00001300
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_PWRDND_SIG_LBN 31
+#define FRF_AB_XX_PWRDND_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNC_SIG_LBN 30
+#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNB_SIG_LBN 29
+#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNA_SIG_LBN 28
+#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1
+#define FRF_AB_XX_SIM_MODE_LBN 27
+#define FRF_AB_XX_SIM_MODE_WIDTH 1
+#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25
+#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1
+#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24
+#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1
+#define FRF_AB_XX_RESETD_SIG_LBN 23
+#define FRF_AB_XX_RESETD_SIG_WIDTH 1
+#define FRF_AB_XX_RESETC_SIG_LBN 22
+#define FRF_AB_XX_RESETC_SIG_WIDTH 1
+#define FRF_AB_XX_RESETB_SIG_LBN 21
+#define FRF_AB_XX_RESETB_SIG_WIDTH 1
+#define FRF_AB_XX_RESETA_SIG_LBN 20
+#define FRF_AB_XX_RESETA_SIG_WIDTH 1
+#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18
+#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1
+#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17
+#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1
+#define FRF_AB_XX_SD_RST_ACT_LBN 16
+#define FRF_AB_XX_SD_RST_ACT_WIDTH 1
+#define FRF_AB_XX_PWRDND_EN_LBN 15
+#define FRF_AB_XX_PWRDND_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNC_EN_LBN 14
+#define FRF_AB_XX_PWRDNC_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNB_EN_LBN 13
+#define FRF_AB_XX_PWRDNB_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNA_EN_LBN 12
+#define FRF_AB_XX_PWRDNA_EN_WIDTH 1
+#define FRF_AB_XX_RSTPLLCD_EN_LBN 9
+#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1
+#define FRF_AB_XX_RSTPLLAB_EN_LBN 8
+#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1
+#define FRF_AB_XX_RESETD_EN_LBN 7
+#define FRF_AB_XX_RESETD_EN_WIDTH 1
+#define FRF_AB_XX_RESETC_EN_LBN 6
+#define FRF_AB_XX_RESETC_EN_WIDTH 1
+#define FRF_AB_XX_RESETB_EN_LBN 5
+#define FRF_AB_XX_RESETB_EN_WIDTH 1
+#define FRF_AB_XX_RESETA_EN_LBN 4
+#define FRF_AB_XX_RESETA_EN_WIDTH 1
+#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2
+#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1
+#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1
+#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1
+#define FRF_AB_XX_RST_XX_EN_LBN 0
+#define FRF_AB_XX_RST_XX_EN_WIDTH 1
+
+
+/*
+ * FR_AB_XX_SD_CTL_REG(128bit):
+ * XGXS/XAUI powerdown/reset control register
+ */
+#define FR_AB_XX_SD_CTL_REG_OFST 0x00001310
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_TERMADJ1_LBN 17
+#define FRF_AB_XX_TERMADJ1_WIDTH 1
+#define FRF_AB_XX_TERMADJ0_LBN 16
+#define FRF_AB_XX_TERMADJ0_WIDTH 1
+#define FRF_AB_XX_HIDRVD_LBN 15
+#define FRF_AB_XX_HIDRVD_WIDTH 1
+#define FRF_AB_XX_LODRVD_LBN 14
+#define FRF_AB_XX_LODRVD_WIDTH 1
+#define FRF_AB_XX_HIDRVC_LBN 13
+#define FRF_AB_XX_HIDRVC_WIDTH 1
+#define FRF_AB_XX_LODRVC_LBN 12
+#define FRF_AB_XX_LODRVC_WIDTH 1
+#define FRF_AB_XX_HIDRVB_LBN 11
+#define FRF_AB_XX_HIDRVB_WIDTH 1
+#define FRF_AB_XX_LODRVB_LBN 10
+#define FRF_AB_XX_LODRVB_WIDTH 1
+#define FRF_AB_XX_HIDRVA_LBN 9
+#define FRF_AB_XX_HIDRVA_WIDTH 1
+#define FRF_AB_XX_LODRVA_LBN 8
+#define FRF_AB_XX_LODRVA_WIDTH 1
+#define FRF_AB_XX_LPBKD_LBN 3
+#define FRF_AB_XX_LPBKD_WIDTH 1
+#define FRF_AB_XX_LPBKC_LBN 2
+#define FRF_AB_XX_LPBKC_WIDTH 1
+#define FRF_AB_XX_LPBKB_LBN 1
+#define FRF_AB_XX_LPBKB_WIDTH 1
+#define FRF_AB_XX_LPBKA_LBN 0
+#define FRF_AB_XX_LPBKA_WIDTH 1
+
+
+/*
+ * FR_AB_XX_TXDRV_CTL_REG(128bit):
+ * XAUI SerDes transmit drive control register
+ */
+#define FR_AB_XX_TXDRV_CTL_REG_OFST 0x00001320
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_DEQD_LBN 28
+#define FRF_AB_XX_DEQD_WIDTH 4
+#define FRF_AB_XX_DEQC_LBN 24
+#define FRF_AB_XX_DEQC_WIDTH 4
+#define FRF_AB_XX_DEQB_LBN 20
+#define FRF_AB_XX_DEQB_WIDTH 4
+#define FRF_AB_XX_DEQA_LBN 16
+#define FRF_AB_XX_DEQA_WIDTH 4
+#define FRF_AB_XX_DTXD_LBN 12
+#define FRF_AB_XX_DTXD_WIDTH 4
+#define FRF_AB_XX_DTXC_LBN 8
+#define FRF_AB_XX_DTXC_WIDTH 4
+#define FRF_AB_XX_DTXB_LBN 4
+#define FRF_AB_XX_DTXB_WIDTH 4
+#define FRF_AB_XX_DTXA_LBN 0
+#define FRF_AB_XX_DTXA_WIDTH 4
+
+
+/*
+ * FR_AB_XX_PRBS_CTL_REG(128bit):
+ * documentation to be written for sum_XX_PRBS_CTL_REG
+ */
+#define FR_AB_XX_PRBS_CTL_REG_OFST 0x00001330
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30
+#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29
+#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28
+#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26
+#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25
+#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24
+#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22
+#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21
+#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20
+#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18
+#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17
+#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16
+#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14
+#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13
+#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12
+#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10
+#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9
+#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8
+#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6
+#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5
+#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4
+#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2
+#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1
+#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0
+#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1
+
+
+/*
+ * FR_AB_XX_PRBS_CHK_REG(128bit):
+ * documentation to be written for sum_XX_PRBS_CHK_REG
+ */
+#define FR_AB_XX_PRBS_CHK_REG_OFST 0x00001340
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_REV_LB_EN_LBN 16
+#define FRF_AB_XX_REV_LB_EN_WIDTH 1
+#define FRF_AB_XX_CH3_DEG_DET_LBN 15
+#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14
+#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13
+#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH3_ERR_CHK_LBN 12
+#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH2_DEG_DET_LBN 11
+#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10
+#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9
+#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH2_ERR_CHK_LBN 8
+#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH1_DEG_DET_LBN 7
+#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6
+#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5
+#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH1_ERR_CHK_LBN 4
+#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH0_DEG_DET_LBN 3
+#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2
+#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1
+#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH0_ERR_CHK_LBN 0
+#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1
+
+
+/*
+ * FR_AB_XX_PRBS_ERR_REG(128bit):
+ * documentation to be written for sum_XX_PRBS_ERR_REG
+ */
+#define FR_AB_XX_PRBS_ERR_REG_OFST 0x00001350
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24
+#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16
+#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8
+#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0
+#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8
+
+
+/*
+ * FR_AB_XX_CORE_STAT_REG(128bit):
+ * XAUI XGXS core status register
+ */
+#define FR_AB_XX_CORE_STAT_REG_OFST 0x00001360
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_FORCE_SIG3_LBN 31
+#define FRF_AB_XX_FORCE_SIG3_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30
+#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG2_LBN 29
+#define FRF_AB_XX_FORCE_SIG2_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28
+#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG1_LBN 27
+#define FRF_AB_XX_FORCE_SIG1_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26
+#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG0_LBN 25
+#define FRF_AB_XX_FORCE_SIG0_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24
+#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1
+#define FRF_AB_XX_XGXS_LB_EN_LBN 23
+#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1
+#define FRF_AB_XX_XGMII_LB_EN_LBN 22
+#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1
+#define FRF_AB_XX_MATCH_FAULT_LBN 21
+#define FRF_AB_XX_MATCH_FAULT_WIDTH 1
+#define FRF_AB_XX_ALIGN_DONE_LBN 20
+#define FRF_AB_XX_ALIGN_DONE_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT3_LBN 19
+#define FRF_AB_XX_SYNC_STAT3_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT2_LBN 18
+#define FRF_AB_XX_SYNC_STAT2_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT1_LBN 17
+#define FRF_AB_XX_SYNC_STAT1_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT0_LBN 16
+#define FRF_AB_XX_SYNC_STAT0_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH3_LBN 15
+#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH2_LBN 14
+#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH1_LBN 13
+#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH0_LBN 12
+#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11
+#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10
+#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9
+#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8
+#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7
+#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6
+#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5
+#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4
+#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH3_LBN 3
+#define FRF_AB_XX_DISPERR_CH3_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH2_LBN 2
+#define FRF_AB_XX_DISPERR_CH2_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH1_LBN 1
+#define FRF_AB_XX_DISPERR_CH1_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH0_LBN 0
+#define FRF_AB_XX_DISPERR_CH0_WIDTH 1
+
+
+/*
+ * FR_AA_RX_DESC_PTR_TBL_KER(128bit):
+ * Receive descriptor pointer table
+ */
+#define FR_AA_RX_DESC_PTR_TBL_KER_OFST 0x00011800
+/* falcona0=net_func_bar2 */
+#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16
+#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4
+/*
+ * FR_AZ_RX_DESC_PTR_TBL(128bit):
+ * Receive descriptor pointer table
+ */
+#define FR_AZ_RX_DESC_PTR_TBL_OFST 0x00f40000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_RX_DESC_PTR_TBL_STEP 16
+#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024
+#define FR_AB_RX_DESC_PTR_TBL_ROWS 4096
+
+#define FRF_CZ_RX_HDR_SPLIT_LBN 90
+#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1
+#define FRF_AZ_RX_RESET_LBN 89
+#define FRF_AZ_RX_RESET_WIDTH 1
+#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88
+#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1
+#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87
+#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1
+#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86
+#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1
+#define FRF_AZ_RX_DC_HW_RPTR_LBN 80
+#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6
+#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68
+#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12
+#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56
+#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12
+#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36
+#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24
+#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12
+#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10
+#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14
+#define FRF_AZ_RX_DESCQ_LABEL_LBN 5
+#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5
+#define FRF_AZ_RX_DESCQ_SIZE_LBN 3
+#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2
+#define FFE_AZ_RX_DESCQ_SIZE_4K 3
+#define FFE_AZ_RX_DESCQ_SIZE_2K 2
+#define FFE_AZ_RX_DESCQ_SIZE_1K 1
+#define FFE_AZ_RX_DESCQ_SIZE_512 0
+#define FRF_AZ_RX_DESCQ_TYPE_LBN 2
+#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1
+#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1
+#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1
+#define FRF_AZ_RX_DESCQ_EN_LBN 0
+#define FRF_AZ_RX_DESCQ_EN_WIDTH 1
+
+
+/*
+ * FR_AA_TX_DESC_PTR_TBL_KER(128bit):
+ * Transmit descriptor pointer
+ */
+#define FR_AA_TX_DESC_PTR_TBL_KER_OFST 0x00011900
+/* falcona0=net_func_bar2 */
+#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16
+#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8
+/*
+ * FR_AZ_TX_DESC_PTR_TBL(128bit):
+ * Transmit descriptor pointer
+ */
+#define FR_AZ_TX_DESC_PTR_TBL_OFST 0x00f50000
+/* falconb0=net_func_bar2,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_TX_DESC_PTR_TBL_STEP 16
+#define FR_AB_TX_DESC_PTR_TBL_ROWS 4096
+#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024
+
+#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94
+#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2
+#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93
+#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1
+#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92
+#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1
+#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91
+#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1
+#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90
+#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1
+#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89
+#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1
+#define FRF_AZ_TX_DESCQ_EN_LBN 88
+#define FRF_AZ_TX_DESCQ_EN_WIDTH 1
+#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87
+#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1
+#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86
+#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1
+#define FRF_AZ_TX_DC_HW_RPTR_LBN 80
+#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6
+#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68
+#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12
+#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56
+#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12
+#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36
+#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24
+#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12
+#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10
+#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14
+#define FRF_AZ_TX_DESCQ_LABEL_LBN 5
+#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5
+#define FRF_AZ_TX_DESCQ_SIZE_LBN 3
+#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2
+#define FFE_AZ_TX_DESCQ_SIZE_4K 3
+#define FFE_AZ_TX_DESCQ_SIZE_2K 2
+#define FFE_AZ_TX_DESCQ_SIZE_1K 1
+#define FFE_AZ_TX_DESCQ_SIZE_512 0
+#define FRF_AZ_TX_DESCQ_TYPE_LBN 1
+#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2
+#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0
+#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1
+
+
+/*
+ * FR_AA_EVQ_PTR_TBL_KER(128bit):
+ * Event queue pointer table
+ */
+#define FR_AA_EVQ_PTR_TBL_KER_OFST 0x00011a00
+/* falcona0=net_func_bar2 */
+#define FR_AA_EVQ_PTR_TBL_KER_STEP 16
+#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4
+/*
+ * FR_AZ_EVQ_PTR_TBL(128bit):
+ * Event queue pointer table
+ */
+#define FR_AZ_EVQ_PTR_TBL_OFST 0x00f60000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_EVQ_PTR_TBL_STEP 16
+#define FR_CZ_EVQ_PTR_TBL_ROWS 1024
+#define FR_AB_EVQ_PTR_TBL_ROWS 4096
+
+#define FRF_BZ_EVQ_RPTR_IGN_LBN 40
+#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1
+#define FRF_AZ_EVQ_WKUP_OR_INT_EN_LBN 39
+#define FRF_AZ_EVQ_WKUP_OR_INT_EN_WIDTH 1
+#define FRF_AZ_EVQ_NXT_WPTR_LBN 24
+#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15
+#define FRF_AZ_EVQ_EN_LBN 23
+#define FRF_AZ_EVQ_EN_WIDTH 1
+#define FRF_AZ_EVQ_SIZE_LBN 20
+#define FRF_AZ_EVQ_SIZE_WIDTH 3
+#define FFE_AZ_EVQ_SIZE_32K 6
+#define FFE_AZ_EVQ_SIZE_16K 5
+#define FFE_AZ_EVQ_SIZE_8K 4
+#define FFE_AZ_EVQ_SIZE_4K 3
+#define FFE_AZ_EVQ_SIZE_2K 2
+#define FFE_AZ_EVQ_SIZE_1K 1
+#define FFE_AZ_EVQ_SIZE_512 0
+#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0
+#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20
+
+
+/*
+ * FR_AA_BUF_HALF_TBL_KER(64bit):
+ * Buffer table in half buffer table mode direct access by driver
+ */
+#define FR_AA_BUF_HALF_TBL_KER_OFST 0x00018000
+/* falcona0=net_func_bar2 */
+#define FR_AA_BUF_HALF_TBL_KER_STEP 8
+#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096
+/*
+ * FR_AZ_BUF_HALF_TBL(64bit):
+ * Buffer table in half buffer table mode direct access by driver
+ */
+#define FR_AZ_BUF_HALF_TBL_OFST 0x00800000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_BUF_HALF_TBL_STEP 8
+#define FR_CZ_BUF_HALF_TBL_ROWS 147456
+#define FR_AB_BUF_HALF_TBL_ROWS 524288
+
+#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44
+#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20
+#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32
+#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12
+#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12
+#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20
+#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0
+#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
+
+
+/*
+ * FR_AA_BUF_FULL_TBL_KER(64bit):
+ * Buffer table in full buffer table mode direct access by driver
+ */
+#define FR_AA_BUF_FULL_TBL_KER_OFST 0x00018000
+/* falcona0=net_func_bar2 */
+#define FR_AA_BUF_FULL_TBL_KER_STEP 8
+#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096
+/*
+ * FR_AZ_BUF_FULL_TBL(64bit):
+ * Buffer table in full buffer table mode direct access by driver
+ */
+#define FR_AZ_BUF_FULL_TBL_OFST 0x00800000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_BUF_FULL_TBL_STEP 8
+
+#define FR_CZ_BUF_FULL_TBL_ROWS 147456
+#define FR_AB_BUF_FULL_TBL_ROWS 917504
+
+#define FRF_AZ_BUF_FULL_UNUSED_LBN 51
+#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13
+#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50
+#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1
+#define FRF_AZ_BUF_ADR_REGION_LBN 48
+#define FRF_AZ_BUF_ADR_REGION_WIDTH 2
+#define FFE_AZ_BUF_ADR_REGN3 3
+#define FFE_AZ_BUF_ADR_REGN2 2
+#define FFE_AZ_BUF_ADR_REGN1 1
+#define FFE_AZ_BUF_ADR_REGN0 0
+#define FRF_AZ_BUF_ADR_FBUF_LBN 14
+#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34
+#define FRF_AZ_BUF_ADR_FBUF_DW0_LBN 14
+#define FRF_AZ_BUF_ADR_FBUF_DW0_WIDTH 32
+#define FRF_AZ_BUF_ADR_FBUF_DW1_LBN 46
+#define FRF_AZ_BUF_ADR_FBUF_DW1_WIDTH 2
+#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0
+#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14
+
+
+/*
+ * FR_AZ_RX_FILTER_TBL0(128bit):
+ * TCP/IPv4 Receive filter table
+ */
+#define FR_AZ_RX_FILTER_TBL0_OFST 0x00f00000
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_RX_FILTER_TBL0_STEP 32
+#define FR_AZ_RX_FILTER_TBL0_ROWS 8192
+/*
+ * FR_AB_RX_FILTER_TBL1(128bit):
+ * TCP/IPv4 Receive filter table
+ */
+#define FR_AB_RX_FILTER_TBL1_OFST 0x00f00010
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_RX_FILTER_TBL1_STEP 32
+#define FR_AB_RX_FILTER_TBL1_ROWS 8192
+
+#define FRF_BZ_RSS_EN_LBN 110
+#define FRF_BZ_RSS_EN_WIDTH 1
+#define FRF_BZ_SCATTER_EN_LBN 109
+#define FRF_BZ_SCATTER_EN_WIDTH 1
+#define FRF_AZ_TCP_UDP_LBN 108
+#define FRF_AZ_TCP_UDP_WIDTH 1
+#define FRF_AZ_RXQ_ID_LBN 96
+#define FRF_AZ_RXQ_ID_WIDTH 12
+#define FRF_AZ_DEST_IP_LBN 64
+#define FRF_AZ_DEST_IP_WIDTH 32
+#define FRF_AZ_DEST_PORT_TCP_LBN 48
+#define FRF_AZ_DEST_PORT_TCP_WIDTH 16
+#define FRF_AZ_SRC_IP_LBN 16
+#define FRF_AZ_SRC_IP_WIDTH 32
+#define FRF_AZ_SRC_TCP_DEST_UDP_LBN 0
+#define FRF_AZ_SRC_TCP_DEST_UDP_WIDTH 16
+
+
+/*
+ * FR_CZ_RX_MAC_FILTER_TBL0(128bit):
+ * Receive Ethernet filter table
+ */
+#define FR_CZ_RX_MAC_FILTER_TBL0_OFST 0x00f00010
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32
+#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512
+
+#define FRF_CZ_RMFT_RSS_EN_LBN 75
+#define FRF_CZ_RMFT_RSS_EN_WIDTH 1
+#define FRF_CZ_RMFT_SCATTER_EN_LBN 74
+#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1
+#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73
+#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1
+#define FRF_CZ_RMFT_RXQ_ID_LBN 61
+#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12
+#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
+#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
+#define FRF_CZ_RMFT_DEST_MAC_LBN 12
+#define FRF_CZ_RMFT_DEST_MAC_WIDTH 48
+#define FRF_CZ_RMFT_DEST_MAC_DW0_LBN 12
+#define FRF_CZ_RMFT_DEST_MAC_DW0_WIDTH 32
+#define FRF_CZ_RMFT_DEST_MAC_DW1_LBN 44
+#define FRF_CZ_RMFT_DEST_MAC_DW1_WIDTH 16
+#define FRF_CZ_RMFT_VLAN_ID_LBN 0
+#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12
+
+
+/*
+ * FR_AZ_TIMER_TBL(128bit):
+ * Timer table
+ */
+#define FR_AZ_TIMER_TBL_OFST 0x00f70000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_TIMER_TBL_STEP 16
+#define FR_CZ_TIMER_TBL_ROWS 1024
+#define FR_AB_TIMER_TBL_ROWS 4096
+
+#define FRF_CZ_TIMER_Q_EN_LBN 33
+#define FRF_CZ_TIMER_Q_EN_WIDTH 1
+#define FRF_CZ_INT_ARMD_LBN 32
+#define FRF_CZ_INT_ARMD_WIDTH 1
+#define FRF_CZ_INT_PEND_LBN 31
+#define FRF_CZ_INT_PEND_WIDTH 1
+#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30
+#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1
+#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16
+#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14
+#define FRF_CZ_TIMER_MODE_LBN 14
+#define FRF_CZ_TIMER_MODE_WIDTH 2
+#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3
+#define FFE_CZ_TIMER_MODE_TRIG_START 2
+#define FFE_CZ_TIMER_MODE_IMMED_START 1
+#define FFE_CZ_TIMER_MODE_DIS 0
+#define FRF_AB_TIMER_MODE_LBN 12
+#define FRF_AB_TIMER_MODE_WIDTH 2
+#define FFE_AB_TIMER_MODE_INT_HLDOFF 2
+#define FFE_AB_TIMER_MODE_TRIG_START 2
+#define FFE_AB_TIMER_MODE_IMMED_START 1
+#define FFE_AB_TIMER_MODE_DIS 0
+#define FRF_CZ_TIMER_VAL_LBN 0
+#define FRF_CZ_TIMER_VAL_WIDTH 14
+#define FRF_AB_TIMER_VAL_LBN 0
+#define FRF_AB_TIMER_VAL_WIDTH 12
+
+
+/*
+ * FR_BZ_TX_PACE_TBL(128bit):
+ * Transmit pacing table
+ */
+#define FR_BZ_TX_PACE_TBL_OFST 0x00f80000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2 */
+#define FR_AZ_TX_PACE_TBL_STEP 16
+#define FR_CZ_TX_PACE_TBL_ROWS 1024
+#define FR_BB_TX_PACE_TBL_ROWS 4096
+/*
+ * FR_AA_TX_PACE_TBL(128bit):
+ * Transmit pacing table
+ */
+#define FR_AA_TX_PACE_TBL_OFST 0x00f80040
+/* falcona0=char_func_bar0 */
+/* FR_AZ_TX_PACE_TBL_STEP 16 */
+#define FR_AA_TX_PACE_TBL_ROWS 4092
+
+#define FRF_AZ_TX_PACE_LBN 0
+#define FRF_AZ_TX_PACE_WIDTH 5
+
+
+/*
+ * FR_BZ_RX_INDIRECTION_TBL(7bit):
+ * RX Indirection Table
+ */
+#define FR_BZ_RX_INDIRECTION_TBL_OFST 0x00fb0000
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_RX_INDIRECTION_TBL_STEP 16
+#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128
+
+#define FRF_BZ_IT_QUEUE_LBN 0
+#define FRF_BZ_IT_QUEUE_WIDTH 6
+
+
+/*
+ * FR_CZ_TX_FILTER_TBL0(128bit):
+ * TCP/IPv4 Transmit filter table
+ */
+#define FR_CZ_TX_FILTER_TBL0_OFST 0x00fc0000
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_TX_FILTER_TBL0_STEP 16
+#define FR_CZ_TX_FILTER_TBL0_ROWS 8192
+
+#define FRF_CZ_TIFT_TCP_UDP_LBN 108
+#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1
+#define FRF_CZ_TIFT_TXQ_ID_LBN 96
+#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12
+#define FRF_CZ_TIFT_DEST_IP_LBN 64
+#define FRF_CZ_TIFT_DEST_IP_WIDTH 32
+#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48
+#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16
+#define FRF_CZ_TIFT_SRC_IP_LBN 16
+#define FRF_CZ_TIFT_SRC_IP_WIDTH 32
+#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0
+#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16
+
+
+/*
+ * FR_CZ_TX_MAC_FILTER_TBL0(128bit):
+ * Transmit Ethernet filter table
+ */
+#define FR_CZ_TX_MAC_FILTER_TBL0_OFST 0x00fe0000
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16
+#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512
+
+#define FRF_CZ_TMFT_TXQ_ID_LBN 61
+#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12
+#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
+#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
+#define FRF_CZ_TMFT_SRC_MAC_LBN 12
+#define FRF_CZ_TMFT_SRC_MAC_WIDTH 48
+#define FRF_CZ_TMFT_SRC_MAC_DW0_LBN 12
+#define FRF_CZ_TMFT_SRC_MAC_DW0_WIDTH 32
+#define FRF_CZ_TMFT_SRC_MAC_DW1_LBN 44
+#define FRF_CZ_TMFT_SRC_MAC_DW1_WIDTH 16
+#define FRF_CZ_TMFT_VLAN_ID_LBN 0
+#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12
+
+
+/*
+ * FR_CZ_MC_TREG_SMEM(32bit):
+ * MC Shared Memory
+ */
+#define FR_CZ_MC_TREG_SMEM_OFST 0x00ff0000
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_MC_TREG_SMEM_STEP 4
+#define FR_CZ_MC_TREG_SMEM_ROWS 512
+
+#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0
+#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32
+
+
+/*
+ * FR_BB_MSIX_VECTOR_TABLE(128bit):
+ * MSIX Vector Table
+ */
+#define FR_BB_MSIX_VECTOR_TABLE_OFST 0x00ff0000
+/* falconb0=net_func_bar2 */
+#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16
+#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64
+/*
+ * FR_CZ_MSIX_VECTOR_TABLE(128bit):
+ * MSIX Vector Table
+ */
+#define FR_CZ_MSIX_VECTOR_TABLE_OFST 0x00000000
+/* sienaa0=pci_f0_bar4 */
+/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */
+#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024
+
+#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97
+#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31
+#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96
+#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1
+#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64
+#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32
+
+
+/*
+ * FR_BB_MSIX_PBA_TABLE(32bit):
+ * MSIX Pending Bit Array
+ */
+#define FR_BB_MSIX_PBA_TABLE_OFST 0x00ff2000
+/* falconb0=net_func_bar2 */
+#define FR_BZ_MSIX_PBA_TABLE_STEP 4
+#define FR_BB_MSIX_PBA_TABLE_ROWS 2
+/*
+ * FR_CZ_MSIX_PBA_TABLE(32bit):
+ * MSIX Pending Bit Array
+ */
+#define FR_CZ_MSIX_PBA_TABLE_OFST 0x00008000
+/* sienaa0=pci_f0_bar4 */
+/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */
+#define FR_CZ_MSIX_PBA_TABLE_ROWS 32
+
+#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0
+#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32
+
+
+/*
+ * FR_AZ_SRM_DBG_REG(64bit):
+ * SRAM debug access
+ */
+#define FR_AZ_SRM_DBG_REG_OFST 0x03000000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_SRM_DBG_REG_STEP 8
+
+#define FR_CZ_SRM_DBG_REG_ROWS 262144
+#define FR_AB_SRM_DBG_REG_ROWS 2097152
+
+#define FRF_AZ_SRM_DBG_LBN 0
+#define FRF_AZ_SRM_DBG_WIDTH 64
+#define FRF_AZ_SRM_DBG_DW0_LBN 0
+#define FRF_AZ_SRM_DBG_DW0_WIDTH 32
+#define FRF_AZ_SRM_DBG_DW1_LBN 32
+#define FRF_AZ_SRM_DBG_DW1_WIDTH 32
+
+
+/*
+ * FR_AA_INT_ACK_CHAR(32bit):
+ * CHAR interrupt acknowledge register
+ */
+#define FR_AA_INT_ACK_CHAR_OFST 0x00000060
+/* falcona0=char_func_bar0 */
+
+#define FRF_AA_INT_ACK_CHAR_FIELD_LBN 0
+#define FRF_AA_INT_ACK_CHAR_FIELD_WIDTH 32
+
+
+/* FS_DRIVER_EV */
+#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56
+#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4
+#define FSE_AZ_TX_DSC_ERROR_EV 15
+#define FSE_AZ_RX_DSC_ERROR_EV 14
+#define FSE_AZ_RX_RECOVER_EV 11
+#define FSE_AZ_TIMER_EV 10
+#define FSE_AZ_TX_PKT_NON_TCP_UDP 9
+#define FSE_AZ_WAKE_UP_EV 6
+#define FSE_AZ_SRM_UPD_DONE_EV 5
+#define FSE_AZ_EVQ_NOT_EN_EV 3
+#define FSE_AZ_EVQ_INIT_DONE_EV 2
+#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1
+#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0
+#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0
+#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14
+
+
+/* FS_EVENT_ENTRY */
+#define FSF_AZ_EV_CODE_LBN 60
+#define FSF_AZ_EV_CODE_WIDTH 4
+#define FSE_AZ_EV_CODE_USER_EV 8
+#define FSE_AZ_EV_CODE_DRV_GEN_EV 7
+#define FSE_AZ_EV_CODE_GLOBAL_EV 6
+#define FSE_AZ_EV_CODE_DRIVER_EV 5
+#define FSE_AZ_EV_CODE_TX_EV 2
+#define FSE_AZ_EV_CODE_RX_EV 0
+#define FSF_AZ_EV_DATA_LBN 0
+#define FSF_AZ_EV_DATA_WIDTH 60
+#define FSF_AZ_EV_DATA_DW0_LBN 0
+#define FSF_AZ_EV_DATA_DW0_WIDTH 32
+#define FSF_AZ_EV_DATA_DW1_LBN 32
+#define FSF_AZ_EV_DATA_DW1_WIDTH 28
+
+
+/* FS_GLOBAL_EV */
+#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 12
+#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1
+#define FSF_BZ_GLB_EV_XG_MNT_INTR_LBN 11
+#define FSF_BZ_GLB_EV_XG_MNT_INTR_WIDTH 1
+#define FSF_AZ_GLB_EV_XFP_PHY0_INTR_LBN 10
+#define FSF_AZ_GLB_EV_XFP_PHY0_INTR_WIDTH 1
+#define FSF_AZ_GLB_EV_XG_PHY0_INTR_LBN 9
+#define FSF_AZ_GLB_EV_XG_PHY0_INTR_WIDTH 1
+#define FSF_AZ_GLB_EV_G_PHY0_INTR_LBN 7
+#define FSF_AZ_GLB_EV_G_PHY0_INTR_WIDTH 1
+
+
+/* FS_RX_EV */
+#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58
+#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1
+#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57
+#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1
+#define FSF_AZ_RX_EV_PKT_OK_LBN 56
+#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1
+#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55
+#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54
+#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53
+#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
+#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
+#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50
+#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49
+#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1
+#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47
+#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1
+#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44
+#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3
+#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2
+#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1
+#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0
+#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42
+#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2
+#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_OTHER 2
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_UDP 1
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_TCP 0
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0
+#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41
+#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1
+#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40
+#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1
+#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39
+#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1
+#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37
+#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1
+#define FSF_AZ_RX_EV_Q_LABEL_LBN 32
+#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5
+#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31
+#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1
+#define FSF_AZ_RX_EV_PORT_LBN 30
+#define FSF_AZ_RX_EV_PORT_WIDTH 1
+#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16
+#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14
+#define FSF_AZ_RX_EV_SOP_LBN 15
+#define FSF_AZ_RX_EV_SOP_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14
+#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13
+#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12
+#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_DESC_PTR_LBN 0
+#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12
+
+
+/* FS_RX_KER_DESC */
+#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48
+#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14
+#define FSF_AZ_RX_KER_BUF_REGION_LBN 46
+#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2
+#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0
+#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46
+#define FSF_AZ_RX_KER_BUF_ADDR_DW0_LBN 0
+#define FSF_AZ_RX_KER_BUF_ADDR_DW0_WIDTH 32
+#define FSF_AZ_RX_KER_BUF_ADDR_DW1_LBN 32
+#define FSF_AZ_RX_KER_BUF_ADDR_DW1_WIDTH 14
+
+
+/* FS_RX_USER_DESC */
+#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20
+#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12
+#define FSF_AZ_RX_USER_BUF_ID_LBN 0
+#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20
+
+
+/* FS_TX_EV */
+#define FSF_AZ_TX_EV_PKT_ERR_LBN 38
+#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1
+#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37
+#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1
+#define FSF_AZ_TX_EV_Q_LABEL_LBN 32
+#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5
+#define FSF_AZ_TX_EV_PORT_LBN 16
+#define FSF_AZ_TX_EV_PORT_WIDTH 1
+#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15
+#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1
+#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14
+#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define FSF_AZ_TX_EV_COMP_LBN 12
+#define FSF_AZ_TX_EV_COMP_WIDTH 1
+#define FSF_AZ_TX_EV_DESC_PTR_LBN 0
+#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12
+
+
+/* FS_TX_KER_DESC */
+#define FSF_AZ_TX_KER_CONT_LBN 62
+#define FSF_AZ_TX_KER_CONT_WIDTH 1
+#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48
+#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14
+#define FSF_AZ_TX_KER_BUF_REGION_LBN 46
+#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2
+#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0
+#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46
+#define FSF_AZ_TX_KER_BUF_ADDR_DW0_LBN 0
+#define FSF_AZ_TX_KER_BUF_ADDR_DW0_WIDTH 32
+#define FSF_AZ_TX_KER_BUF_ADDR_DW1_LBN 32
+#define FSF_AZ_TX_KER_BUF_ADDR_DW1_WIDTH 14
+
+
+/* FS_TX_USER_DESC */
+#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48
+#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1
+#define FSF_AZ_TX_USER_CONT_LBN 46
+#define FSF_AZ_TX_USER_CONT_WIDTH 1
+#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33
+#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13
+#define FSF_AZ_TX_USER_BUF_ID_LBN 13
+#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20
+#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0
+#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13
+
+
+/* FS_USER_EV */
+#define FSF_CZ_USER_QID_LBN 32
+#define FSF_CZ_USER_QID_WIDTH 10
+#define FSF_CZ_USER_EV_REG_VALUE_LBN 0
+#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32
+
+
+/* FS_NET_IVEC */
+#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64
+#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1
+#define FSF_AZ_NET_IVEC_INT_Q_LBN 40
+#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4
+#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32
+#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1
+
+
+/* DRIVER_EV */
+/* Sub-fields of an RX flush completion event */
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+
+
+#endif /* _SYS_EFX_REGS_H */
diff --git a/sys/dev/sfxge/common/efx_regs_ef10.h b/sys/dev/sfxge/common/efx_regs_ef10.h
new file mode 100644
index 0000000..62c0372
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_regs_ef10.h
@@ -0,0 +1,2682 @@
+/*-
+ * Copyright 2007-2010 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _SYS_EFX_EF10_REGS_H
+#define _SYS_EFX_EF10_REGS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * BIU_HW_REV_ID_REG(32bit):
+ *
+ */
+
+#define ER_DZ_BIU_HW_REV_ID_REG 0x00000000
+/* hunta0=pcie_pf_bar2 */
+
+#define ERF_DZ_HW_REV_ID_LBN 0
+#define ERF_DZ_HW_REV_ID_WIDTH 32
+
+
+/*
+ * BIU_MC_SFT_STATUS_REG(32bit):
+ *
+ */
+
+#define ER_DZ_BIU_MC_SFT_STATUS_REG 0x00000010
+/* hunta0=pcie_pf_bar2 */
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_STEP 4
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_ROWS 8
+
+#define ERF_DZ_MC_SFT_STATUS_LBN 0
+#define ERF_DZ_MC_SFT_STATUS_WIDTH 32
+
+
+/*
+ * BIU_INT_ISR_REG(32bit):
+ *
+ */
+
+#define ER_DZ_BIU_INT_ISR_REG 0x00000090
+/* hunta0=pcie_pf_bar2 */
+
+#define ERF_DZ_ISR_REG_LBN 0
+#define ERF_DZ_ISR_REG_WIDTH 32
+
+
+/*
+ * MC_DB_LWRD_REG(32bit):
+ *
+ */
+
+#define ER_DZ_MC_DB_LWRD_REG 0x00000200
+/* hunta0=pcie_pf_bar2 */
+
+#define ERF_DZ_MC_DOORBELL_L_LBN 0
+#define ERF_DZ_MC_DOORBELL_L_WIDTH 32
+
+
+/*
+ * MC_DB_HWRD_REG(32bit):
+ *
+ */
+
+#define ER_DZ_MC_DB_HWRD_REG 0x00000204
+/* hunta0=pcie_pf_bar2 */
+
+#define ERF_DZ_MC_DOORBELL_H_LBN 0
+#define ERF_DZ_MC_DOORBELL_H_WIDTH 32
+
+
+/*
+ * EVQ_RPTR_REG(32bit):
+ *
+ */
+
+#define ER_DZ_EVQ_RPTR_REG 0x00000400
+/* hunta0=pcie_pf_bar2 */
+#define ER_DZ_EVQ_RPTR_REG_STEP 4096
+#define ER_DZ_EVQ_RPTR_REG_ROWS 2048
+
+#define ERF_DZ_EVQ_RPTR_VLD_LBN 15
+#define ERF_DZ_EVQ_RPTR_VLD_WIDTH 1
+#define ERF_DZ_EVQ_RPTR_LBN 0
+#define ERF_DZ_EVQ_RPTR_WIDTH 15
+
+
+/*
+ * EVQ_TMR_REG(32bit):
+ *
+ */
+
+#define ER_DZ_EVQ_TMR_REG 0x00000420
+/* hunta0=pcie_pf_bar2 */
+#define ER_DZ_EVQ_TMR_REG_STEP 4096
+#define ER_DZ_EVQ_TMR_REG_ROWS 2048
+
+#define ERF_DZ_TC_TIMER_MODE_LBN 14
+#define ERF_DZ_TC_TIMER_MODE_WIDTH 2
+#define ERF_DZ_TC_TIMER_VAL_LBN 0
+#define ERF_DZ_TC_TIMER_VAL_WIDTH 14
+
+
+/*
+ * RX_DESC_UPD_REG(32bit):
+ *
+ */
+
+#define ER_DZ_RX_DESC_UPD_REG 0x00000830
+/* hunta0=pcie_pf_bar2 */
+#define ER_DZ_RX_DESC_UPD_REG_STEP 4096
+#define ER_DZ_RX_DESC_UPD_REG_ROWS 2048
+
+#define ERF_DZ_RX_DESC_WPTR_LBN 0
+#define ERF_DZ_RX_DESC_WPTR_WIDTH 12
+
+
+/*
+ * TX_DESC_UPD_REG(76bit):
+ *
+ */
+
+#define ER_DZ_TX_DESC_UPD_REG 0x00000a10
+/* hunta0=pcie_pf_bar2 */
+#define ER_DZ_TX_DESC_UPD_REG_STEP 4096
+#define ER_DZ_TX_DESC_UPD_REG_ROWS 2048
+
+#define ERF_DZ_TX_DESC_WPTR_LBN 64
+#define ERF_DZ_TX_DESC_WPTR_WIDTH 12
+#define ERF_DZ_TX_DESC_HWORD_LBN 32
+#define ERF_DZ_TX_DESC_HWORD_WIDTH 32
+#define ERF_DZ_TX_DESC_LWORD_LBN 0
+#define ERF_DZ_TX_DESC_LWORD_WIDTH 32
+
+
+/* ES_DRIVER_EV */
+#define ESF_DZ_DRV_CODE_LBN 60
+#define ESF_DZ_DRV_CODE_WIDTH 4
+#define ESF_DZ_DRV_SUB_CODE_LBN 56
+#define ESF_DZ_DRV_SUB_CODE_WIDTH 4
+#define ESE_DZ_DRV_TIMER_EV 10
+#define ESE_DZ_DRV_WAKE_UP_EV 6
+#define ESF_DZ_DRV_SUB_DATA_DW0_LBN 0
+#define ESF_DZ_DRV_SUB_DATA_DW0_WIDTH 32
+#define ESF_DZ_DRV_SUB_DATA_DW1_LBN 32
+#define ESF_DZ_DRV_SUB_DATA_DW1_WIDTH 24
+#define ESF_DZ_DRV_SUB_DATA_LBN 0
+#define ESF_DZ_DRV_SUB_DATA_WIDTH 56
+#define ESF_DZ_DRV_EVQ_ID_LBN 0
+#define ESF_DZ_DRV_EVQ_ID_WIDTH 14
+#define ESF_DZ_DRV_TMR_ID_LBN 0
+#define ESF_DZ_DRV_TMR_ID_WIDTH 14
+
+
+/* ES_EVENT_ENTRY */
+#define ESF_DZ_EV_CODE_LBN 60
+#define ESF_DZ_EV_CODE_WIDTH 4
+#define ESE_DZ_EV_CODE_MCDI_EV 12
+#define ESE_DZ_EV_CODE_DRIVER_EV 5
+#define ESE_DZ_EV_CODE_TX_EV 2
+#define ESE_DZ_EV_CODE_RX_EV 0
+#define ESE_DZ_OTHER other
+#define ESF_DZ_EV_DATA_DW0_LBN 0
+#define ESF_DZ_EV_DATA_DW0_WIDTH 32
+#define ESF_DZ_EV_DATA_DW1_LBN 32
+#define ESF_DZ_EV_DATA_DW1_WIDTH 28
+#define ESF_DZ_EV_DATA_LBN 0
+#define ESF_DZ_EV_DATA_WIDTH 60
+
+
+/* ES_FF_UMSG_CPU2DL_DESC_FETCH */
+#define ESF_DZ_C2DDF_DSCR_CACHE_RPTR_LBN 112
+#define ESF_DZ_C2DDF_DSCR_CACHE_RPTR_WIDTH 6
+#define ESF_DZ_C2DDF_QID_LBN 96
+#define ESF_DZ_C2DDF_QID_WIDTH 11
+#define ESF_DZ_C2DDF_DSCR_BASE_PAGE_ID_LBN 64
+#define ESF_DZ_C2DDF_DSCR_BASE_PAGE_ID_WIDTH 18
+#define ESF_DZ_C2DDF_DSCR_HW_RPTR_LBN 48
+#define ESF_DZ_C2DDF_DSCR_HW_RPTR_WIDTH 12
+#define ESF_DZ_C2DDF_DSCR_HW_WPTR_LBN 32
+#define ESF_DZ_C2DDF_DSCR_HW_WPTR_WIDTH 12
+#define ESF_DZ_C2DDF_OID_LBN 16
+#define ESF_DZ_C2DDF_OID_WIDTH 12
+#define ESF_DZ_C2DDF_DSCR_SIZE_LBN 13
+#define ESF_DZ_C2DDF_DSCR_SIZE_WIDTH 3
+#define ESE_DZ_C2DDF_DSCR_SIZE_512 7
+#define ESE_DZ_C2DDF_DSCR_SIZE_1K 6
+#define ESE_DZ_C2DDF_DSCR_SIZE_2K 5
+#define ESE_DZ_C2DDF_DSCR_SIZE_4K 4
+#define ESF_DZ_C2DDF_BIU_ARGS_LBN 0
+#define ESF_DZ_C2DDF_BIU_ARGS_WIDTH 13
+
+
+/* ES_FF_UMSG_CPU2DL_DESC_PUSH */
+#define ESF_DZ_C2DDP_DESC_DW0_LBN 128
+#define ESF_DZ_C2DDP_DESC_DW0_WIDTH 32
+#define ESF_DZ_C2DDP_DESC_DW1_LBN 160
+#define ESF_DZ_C2DDP_DESC_DW1_WIDTH 32
+#define ESF_DZ_C2DDP_DESC_LBN 128
+#define ESF_DZ_C2DDP_DESC_WIDTH 64
+#define ESF_DZ_C2DDP_QID_LBN 96
+#define ESF_DZ_C2DDP_QID_WIDTH 11
+#define ESF_DZ_C2DDP_DSCR_HW_RPTR_LBN 48
+#define ESF_DZ_C2DDP_DSCR_HW_RPTR_WIDTH 12
+#define ESF_DZ_C2DDP_DSCR_HW_WPTR_LBN 32
+#define ESF_DZ_C2DDP_DSCR_HW_WPTR_WIDTH 12
+#define ESF_DZ_C2DDP_OID_LBN 16
+#define ESF_DZ_C2DDP_OID_WIDTH 12
+#define ESF_DZ_C2DDP_DSCR_SIZE_LBN 0
+#define ESF_DZ_C2DDP_DSCR_SIZE_WIDTH 3
+#define ESE_DZ_C2DDF_DSCR_SIZE_512 7
+#define ESE_DZ_C2DDF_DSCR_SIZE_1K 6
+#define ESE_DZ_C2DDF_DSCR_SIZE_2K 5
+#define ESE_DZ_C2DDF_DSCR_SIZE_4K 4
+
+
+/* ES_FF_UMSG_CPU2DL_GPRD */
+#define ESF_DZ_C2DG_ENCODED_HOST_ADDR_DW0_LBN 64
+#define ESF_DZ_C2DG_ENCODED_HOST_ADDR_DW0_WIDTH 32
+#define ESF_DZ_C2DG_ENCODED_HOST_ADDR_DW1_LBN 96
+#define ESF_DZ_C2DG_ENCODED_HOST_ADDR_DW1_WIDTH 16
+#define ESF_DZ_C2DG_ENCODED_HOST_ADDR_LBN 64
+#define ESF_DZ_C2DG_ENCODED_HOST_ADDR_WIDTH 48
+#define ESF_DZ_C2DG_SMC_ADDR_LBN 16
+#define ESF_DZ_C2DG_SMC_ADDR_WIDTH 16
+#define ESF_DZ_C2DG_BIU_ARGS_LBN 0
+#define ESF_DZ_C2DG_BIU_ARGS_WIDTH 14
+
+
+/* ES_FF_UMSG_CPU2EV_SOFT */
+#define ESF_DZ_C2ES_TBD_LBN 0
+#define ESF_DZ_C2ES_TBD_WIDTH 1
+
+
+/* ES_FF_UMSG_CPU2EV_TXCMPLT */
+#define ESF_DZ_C2ET_EV_SOFT0_LBN 32
+#define ESF_DZ_C2ET_EV_SOFT0_WIDTH 16
+#define ESF_DZ_C2ET_DSCR_IDX_LBN 16
+#define ESF_DZ_C2ET_DSCR_IDX_WIDTH 16
+#define ESF_DZ_C2ET_EV_QID_LBN 5
+#define ESF_DZ_C2ET_EV_QID_WIDTH 11
+#define ESF_DZ_C2ET_EV_QLABEL_LBN 0
+#define ESF_DZ_C2ET_EV_QLABEL_WIDTH 5
+
+
+/* ES_FF_UMSG_CPU2RXDP_INGR_BUFOP */
+#define ESF_DZ_C2RIB_EV_DISABLE_LBN 191
+#define ESF_DZ_C2RIB_EV_DISABLE_WIDTH 1
+#define ESF_DZ_C2RIB_EV_SOFT_LBN 188
+#define ESF_DZ_C2RIB_EV_SOFT_WIDTH 3
+#define ESF_DZ_C2RIB_EV_DESC_PTR_LBN 176
+#define ESF_DZ_C2RIB_EV_DESC_PTR_WIDTH 12
+#define ESF_DZ_C2RIB_EV_ARG1_LBN 160
+#define ESF_DZ_C2RIB_EV_ARG1_WIDTH 16
+#define ESF_DZ_C2RIB_ENCODED_HOST_ADDR_DW0_LBN 64
+#define ESF_DZ_C2RIB_ENCODED_HOST_ADDR_DW0_WIDTH 32
+#define ESF_DZ_C2RIB_ENCODED_HOST_ADDR_DW1_LBN 96
+#define ESF_DZ_C2RIB_ENCODED_HOST_ADDR_DW1_WIDTH 16
+#define ESF_DZ_C2RIB_ENCODED_HOST_ADDR_LBN 64
+#define ESF_DZ_C2RIB_ENCODED_HOST_ADDR_WIDTH 48
+#define ESF_DZ_C2RIB_BIU_ARGS_LBN 16
+#define ESF_DZ_C2RIB_BIU_ARGS_WIDTH 13
+#define ESF_DZ_C2RIB_EV_QID_LBN 5
+#define ESF_DZ_C2RIB_EV_QID_WIDTH 11
+#define ESF_DZ_C2RIB_EV_QLABEL_LBN 0
+#define ESF_DZ_C2RIB_EV_QLABEL_WIDTH 5
+
+
+/* ES_FF_UMSG_CPU2RXDP_INGR_PDISP */
+#define ESF_DZ_C2RIP_BUF_LEN_LBN 240
+#define ESF_DZ_C2RIP_BUF_LEN_WIDTH 16
+#define ESF_DZ_C2RIP_ENCODED_HOST_ADDR_DW0_LBN 192
+#define ESF_DZ_C2RIP_ENCODED_HOST_ADDR_DW0_WIDTH 32
+#define ESF_DZ_C2RIP_ENCODED_HOST_ADDR_DW1_LBN 224
+#define ESF_DZ_C2RIP_ENCODED_HOST_ADDR_DW1_WIDTH 16
+#define ESF_DZ_C2RIP_ENCODED_HOST_ADDR_LBN 192
+#define ESF_DZ_C2RIP_ENCODED_HOST_ADDR_WIDTH 48
+#define ESF_DZ_C2RIP_EV_DISABLE_LBN 191
+#define ESF_DZ_C2RIP_EV_DISABLE_WIDTH 1
+#define ESF_DZ_C2RIP_EV_SOFT_LBN 188
+#define ESF_DZ_C2RIP_EV_SOFT_WIDTH 3
+#define ESF_DZ_C2RIP_EV_DESC_PTR_LBN 176
+#define ESF_DZ_C2RIP_EV_DESC_PTR_WIDTH 12
+#define ESF_DZ_C2RIP_EV_ARG1_LBN 160
+#define ESF_DZ_C2RIP_EV_ARG1_WIDTH 16
+#define ESF_DZ_C2RIP_UPD_CRC_MODE_LBN 157
+#define ESF_DZ_C2RIP_UPD_CRC_MODE_WIDTH 3
+#define ESE_DZ_C2RIP_FCOIP_MPA 5
+#define ESE_DZ_C2RIP_FCOIP_FCOE 4
+#define ESE_DZ_C2RIP_ISCSI_HDR_AND_PYLD 3
+#define ESE_DZ_C2RIP_ISCSI_HDR 2
+#define ESE_DZ_C2RIP_FCOE 1
+#define ESE_DZ_C2RIP_OFF 0
+#define ESF_DZ_C2RIP_BIU_ARGS_LBN 144
+#define ESF_DZ_C2RIP_BIU_ARGS_WIDTH 13
+#define ESF_DZ_C2RIP_EV_QID_LBN 133
+#define ESF_DZ_C2RIP_EV_QID_WIDTH 11
+#define ESF_DZ_C2RIP_EV_QLABEL_LBN 128
+#define ESF_DZ_C2RIP_EV_QLABEL_WIDTH 5
+#define ESF_DZ_C2RIP_PEDIT_DELTA_LBN 104
+#define ESF_DZ_C2RIP_PEDIT_DELTA_WIDTH 8
+#define ESF_DZ_C2RIP_PYLOAD_OFST_LBN 96
+#define ESF_DZ_C2RIP_PYLOAD_OFST_WIDTH 8
+#define ESF_DZ_C2RIP_L4_HDR_OFST_LBN 88
+#define ESF_DZ_C2RIP_L4_HDR_OFST_WIDTH 8
+#define ESF_DZ_C2RIP_L3_HDR_OFST_LBN 80
+#define ESF_DZ_C2RIP_L3_HDR_OFST_WIDTH 8
+#define ESF_DZ_C2RIP_IS_UDP_LBN 69
+#define ESF_DZ_C2RIP_IS_UDP_WIDTH 1
+#define ESF_DZ_C2RIP_IS_TCP_LBN 68
+#define ESF_DZ_C2RIP_IS_TCP_WIDTH 1
+#define ESF_DZ_C2RIP_IS_IPV6_LBN 67
+#define ESF_DZ_C2RIP_IS_IPV6_WIDTH 1
+#define ESF_DZ_C2RIP_IS_IPV4_LBN 66
+#define ESF_DZ_C2RIP_IS_IPV4_WIDTH 1
+#define ESF_DZ_C2RIP_IS_FCOE_LBN 65
+#define ESF_DZ_C2RIP_IS_FCOE_WIDTH 1
+#define ESF_DZ_C2RIP_PARSE_INCOMP_LBN 64
+#define ESF_DZ_C2RIP_PARSE_INCOMP_WIDTH 1
+#define ESF_DZ_C2RIP_FINFO_WRD3_LBN 48
+#define ESF_DZ_C2RIP_FINFO_WRD3_WIDTH 16
+#define ESF_DZ_C2RIP_FINFO_WRD2_LBN 32
+#define ESF_DZ_C2RIP_FINFO_WRD2_WIDTH 16
+#define ESF_DZ_C2RIP_FINFO_WRD1_LBN 16
+#define ESF_DZ_C2RIP_FINFO_WRD1_WIDTH 16
+#define ESF_DZ_C2RIP_FINFO_SRCDST_LBN 0
+#define ESF_DZ_C2RIP_FINFO_SRCDST_WIDTH 16
+
+
+/* ES_FF_UMSG_CPU2RXDP_INGR_SOFT */
+#define ESF_DZ_C2RIS_SOFT3_LBN 48
+#define ESF_DZ_C2RIS_SOFT3_WIDTH 16
+#define ESF_DZ_C2RIS_SOFT2_LBN 32
+#define ESF_DZ_C2RIS_SOFT2_WIDTH 16
+#define ESF_DZ_C2RIS_SOFT1_LBN 16
+#define ESF_DZ_C2RIS_SOFT1_WIDTH 16
+#define ESF_DZ_C2RIS_SOFT0_LBN 0
+#define ESF_DZ_C2RIS_SOFT0_WIDTH 16
+
+
+/* ES_FF_UMSG_CPU2SMC_BUFLKUP */
+#define ESF_DZ_C2SB_PAGE_ID_LBN 16
+#define ESF_DZ_C2SB_PAGE_ID_WIDTH 18
+#define ESF_DZ_C2SB_EXP_PAGE_ID_LBN 0
+#define ESF_DZ_C2SB_EXP_PAGE_ID_WIDTH 12
+
+
+/* ES_FF_UMSG_CPU2SMC_DESCOP */
+#define ESF_DZ_C2SD_LEN_LBN 112
+#define ESF_DZ_C2SD_LEN_WIDTH 14
+#define ESF_DZ_C2SD_ENCODED_HOST_ADDR_DW0_LBN 64
+#define ESF_DZ_C2SD_ENCODED_HOST_ADDR_DW0_WIDTH 32
+#define ESF_DZ_C2SD_ENCODED_HOST_ADDR_DW1_LBN 96
+#define ESF_DZ_C2SD_ENCODED_HOST_ADDR_DW1_WIDTH 16
+#define ESF_DZ_C2SD_ENCODED_HOST_ADDR_LBN 64
+#define ESF_DZ_C2SD_ENCODED_HOST_ADDR_WIDTH 48
+#define ESF_DZ_C2SD_OFFSET_LBN 48
+#define ESF_DZ_C2SD_OFFSET_WIDTH 8
+#define ESF_DZ_C2SD_QID_LBN 32
+#define ESF_DZ_C2SD_QID_WIDTH 11
+#define ESF_DZ_C2SD_CONT_LBN 16
+#define ESF_DZ_C2SD_CONT_WIDTH 1
+#define ESF_DZ_C2SD_TYPE_LBN 0
+#define ESF_DZ_C2SD_TYPE_WIDTH 1
+
+
+/* ES_FF_UMSG_CPU2SMC_GPOP */
+#define ESF_DZ_C2SG_DATA_DW0_LBN 64
+#define ESF_DZ_C2SG_DATA_DW0_WIDTH 32
+#define ESF_DZ_C2SG_DATA_DW1_LBN 96
+#define ESF_DZ_C2SG_DATA_DW1_WIDTH 32
+#define ESF_DZ_C2SG_DATA_LBN 64
+#define ESF_DZ_C2SG_DATA_WIDTH 64
+#define ESF_DZ_C2SG_SOFT_LBN 48
+#define ESF_DZ_C2SG_SOFT_WIDTH 4
+#define ESF_DZ_C2SG_REFLECT_LBN 32
+#define ESF_DZ_C2SG_REFLECT_WIDTH 1
+#define ESF_DZ_C2SG_ADDR_LBN 0
+#define ESF_DZ_C2SG_ADDR_WIDTH 16
+
+
+/* ES_FF_UMSG_CPU2TXDP_DMA_BUFREQ */
+#define ESF_DZ_C2TDB_BUF_LEN_LBN 176
+#define ESF_DZ_C2TDB_BUF_LEN_WIDTH 16
+#define ESF_DZ_C2TDB_ENCODED_HOST_ADDR_DW0_LBN 128
+#define ESF_DZ_C2TDB_ENCODED_HOST_ADDR_DW0_WIDTH 32
+#define ESF_DZ_C2TDB_ENCODED_HOST_ADDR_DW1_LBN 160
+#define ESF_DZ_C2TDB_ENCODED_HOST_ADDR_DW1_WIDTH 16
+#define ESF_DZ_C2TDB_ENCODED_HOST_ADDR_LBN 128
+#define ESF_DZ_C2TDB_ENCODED_HOST_ADDR_WIDTH 48
+#define ESF_DZ_C2TDB_SOFT_LBN 112
+#define ESF_DZ_C2TDB_SOFT_WIDTH 14
+#define ESF_DZ_C2TDB_DESC_IDX_LBN 96
+#define ESF_DZ_C2TDB_DESC_IDX_WIDTH 16
+#define ESF_DZ_C2TDB_UPD_CRC_MODE_LBN 93
+#define ESF_DZ_C2TDB_UPD_CRC_MODE_WIDTH 3
+#define ESE_DZ_C2RIP_FCOIP_MPA 5
+#define ESE_DZ_C2RIP_FCOIP_FCOE 4
+#define ESE_DZ_C2RIP_ISCSI_HDR_AND_PYLD 3
+#define ESE_DZ_C2RIP_ISCSI_HDR 2
+#define ESE_DZ_C2RIP_FCOE 1
+#define ESE_DZ_C2RIP_OFF 0
+#define ESF_DZ_C2TDB_BIU_ARGS_LBN 80
+#define ESF_DZ_C2TDB_BIU_ARGS_WIDTH 13
+#define ESF_DZ_C2TDB_CONT_LBN 64
+#define ESF_DZ_C2TDB_CONT_WIDTH 1
+#define ESF_DZ_C2TDB_FINFO_WRD3_LBN 48
+#define ESF_DZ_C2TDB_FINFO_WRD3_WIDTH 16
+#define ESF_DZ_C2TDB_FINFO_WRD2_LBN 32
+#define ESF_DZ_C2TDB_FINFO_WRD2_WIDTH 16
+#define ESF_DZ_C2TDB_FINFO_WRD1_LBN 16
+#define ESF_DZ_C2TDB_FINFO_WRD1_WIDTH 16
+#define ESF_DZ_C2TDB_FINFO_SRCDST_LBN 0
+#define ESF_DZ_C2TDB_FINFO_SRCDST_WIDTH 16
+
+
+/* ES_FF_UMSG_CPU2TXDP_DMA_PKTABORT */
+#define ESF_DZ_C2TDP_SOFT_LBN 48
+#define ESF_DZ_C2TDP_SOFT_WIDTH 14
+#define ESF_DZ_C2TDP_DESC_IDX_LBN 32
+#define ESF_DZ_C2TDP_DESC_IDX_WIDTH 16
+#define ESF_DZ_C2TDP_BIU_ARGS_LBN 16
+#define ESF_DZ_C2TDP_BIU_ARGS_WIDTH 14
+
+
+/* ES_FF_UMSG_CPU2TXDP_DMA_SOFT */
+#define ESF_DZ_C2TDS_SOFT3_LBN 48
+#define ESF_DZ_C2TDS_SOFT3_WIDTH 16
+#define ESF_DZ_C2TDS_SOFT2_LBN 32
+#define ESF_DZ_C2TDS_SOFT2_WIDTH 16
+#define ESF_DZ_C2TDS_SOFT1_LBN 16
+#define ESF_DZ_C2TDS_SOFT1_WIDTH 16
+#define ESF_DZ_C2TDS_SOFT0_LBN 0
+#define ESF_DZ_C2TDS_SOFT0_WIDTH 16
+
+
+/* ES_FF_UMSG_CPU2TXDP_EGR */
+#define ESF_DZ_C2TE_PEDIT_DELTA_LBN 168
+#define ESF_DZ_C2TE_PEDIT_DELTA_WIDTH 8
+#define ESF_DZ_C2TE_PYLOAD_OFST_LBN 160
+#define ESF_DZ_C2TE_PYLOAD_OFST_WIDTH 8
+#define ESF_DZ_C2TE_L4_HDR_OFST_LBN 152
+#define ESF_DZ_C2TE_L4_HDR_OFST_WIDTH 8
+#define ESF_DZ_C2TE_L3_HDR_OFST_LBN 144
+#define ESF_DZ_C2TE_L3_HDR_OFST_WIDTH 8
+#define ESF_DZ_C2TE_IS_UDP_LBN 133
+#define ESF_DZ_C2TE_IS_UDP_WIDTH 1
+#define ESF_DZ_C2TE_IS_TCP_LBN 132
+#define ESF_DZ_C2TE_IS_TCP_WIDTH 1
+#define ESF_DZ_C2TE_IS_IPV6_LBN 131
+#define ESF_DZ_C2TE_IS_IPV6_WIDTH 1
+#define ESF_DZ_C2TE_IS_IPV4_LBN 130
+#define ESF_DZ_C2TE_IS_IPV4_WIDTH 1
+#define ESF_DZ_C2TE_IS_FCOE_LBN 129
+#define ESF_DZ_C2TE_IS_FCOE_WIDTH 1
+#define ESF_DZ_C2TE_PARSE_INCOMP_LBN 128
+#define ESF_DZ_C2TE_PARSE_INCOMP_WIDTH 1
+#define ESF_DZ_C2TE_PKT_LEN_LBN 112
+#define ESF_DZ_C2TE_PKT_LEN_WIDTH 16
+#define ESF_DZ_C2TE_UPD_TCPUDPCSUM_MODE_LBN 97
+#define ESF_DZ_C2TE_UPD_TCPUDPCSUM_MODE_WIDTH 1
+#define ESF_DZ_C2TE_UPD_IPCSUM_MODE_LBN 96
+#define ESF_DZ_C2TE_UPD_IPCSUM_MODE_WIDTH 1
+#define ESF_DZ_C2TE_UPD_CRC_MODE_LBN 93
+#define ESF_DZ_C2TE_UPD_CRC_MODE_WIDTH 3
+#define ESE_DZ_C2RIP_FCOIP_MPA 5
+#define ESE_DZ_C2RIP_FCOIP_FCOE 4
+#define ESE_DZ_C2RIP_ISCSI_HDR_AND_PYLD 3
+#define ESE_DZ_C2RIP_ISCSI_HDR 2
+#define ESE_DZ_C2RIP_FCOE 1
+#define ESE_DZ_C2RIP_OFF 0
+#define ESF_DZ_C2TE_FINFO_WRD3_LBN 48
+#define ESF_DZ_C2TE_FINFO_WRD3_WIDTH 16
+#define ESF_DZ_C2TE_FINFO_WRD2_LBN 32
+#define ESF_DZ_C2TE_FINFO_WRD2_WIDTH 16
+#define ESF_DZ_C2TE_FINFO_WRD1_LBN 16
+#define ESF_DZ_C2TE_FINFO_WRD1_WIDTH 16
+#define ESF_DZ_C2TE_FINFO_SRCDST_LBN 0
+#define ESF_DZ_C2TE_FINFO_SRCDST_WIDTH 16
+
+
+/* ES_FF_UMSG_CPU2TXDP_EGR_SOFT */
+#define ESF_DZ_C2TES_SOFT3_LBN 48
+#define ESF_DZ_C2TES_SOFT3_WIDTH 16
+#define ESF_DZ_C2TES_SOFT2_LBN 32
+#define ESF_DZ_C2TES_SOFT2_WIDTH 16
+#define ESF_DZ_C2TES_SOFT1_LBN 16
+#define ESF_DZ_C2TES_SOFT1_WIDTH 16
+#define ESF_DZ_C2TES_SOFT0_LBN 0
+#define ESF_DZ_C2TES_SOFT0_WIDTH 16
+
+
+/* ES_FF_UMSG_DL2CPU_DESC_FETCH */
+#define ESF_DZ_D2CDF_REFL_DSCR_HW_WPTR_LBN 64
+#define ESF_DZ_D2CDF_REFL_DSCR_HW_WPTR_WIDTH 12
+#define ESF_DZ_D2CDF_FAIL_LBN 48
+#define ESF_DZ_D2CDF_FAIL_WIDTH 1
+#define ESF_DZ_D2CDF_QID_LBN 32
+#define ESF_DZ_D2CDF_QID_WIDTH 11
+#define ESF_DZ_D2CDF_NUM_DESC_LBN 16
+#define ESF_DZ_D2CDF_NUM_DESC_WIDTH 7
+#define ESF_DZ_D2CDF_NEW_DSCR_HW_RPTR_LBN 0
+#define ESF_DZ_D2CDF_NEW_DSCR_HW_RPTR_WIDTH 12
+
+
+/* ES_FF_UMSG_DL2CPU_GPRD */
+#define ESF_DZ_D2CG_BIU_ARGS_LBN 0
+#define ESF_DZ_D2CG_BIU_ARGS_WIDTH 14
+
+
+/* ES_FF_UMSG_DPCPU_PACER_TXQ_D_R_I_REQ */
+#define ESF_DZ_FRM_LEN_LBN 16
+#define ESF_DZ_FRM_LEN_WIDTH 15
+#define ESF_DZ_TXQ_ID_LBN 0
+#define ESF_DZ_TXQ_ID_WIDTH 10
+
+
+/* ES_FF_UMSG_PACER_BKT_TBL_RD_REQ */
+#define ESF_DZ_BKT_ID_LBN 0
+#define ESF_DZ_BKT_ID_WIDTH 9
+
+
+/* ES_FF_UMSG_PACER_BKT_TBL_RD_RSP */
+#define ESF_DZ_DUE_TIME_LBN 80
+#define ESF_DZ_DUE_TIME_WIDTH 16
+#define ESF_DZ_LAST_FILL_TIME_LBN 64
+#define ESF_DZ_LAST_FILL_TIME_WIDTH 16
+#define ESF_DZ_RATE_REC_LBN 48
+#define ESF_DZ_RATE_REC_WIDTH 16
+#define ESF_DZ_RATE_LBN 32
+#define ESF_DZ_RATE_WIDTH 16
+#define ESF_DZ_FILL_LEVEL_LBN 16
+#define ESF_DZ_FILL_LEVEL_WIDTH 16
+#define ESF_DZ_IDLE_LBN 15
+#define ESF_DZ_IDLE_WIDTH 1
+#define ESF_DZ_USED_LBN 14
+#define ESF_DZ_USED_WIDTH 1
+#define ESF_DZ_MAX_FILL_REG_LBN 12
+#define ESF_DZ_MAX_FILL_REG_WIDTH 2
+#define ESF_DZ_BKT_ID_LBN 0
+#define ESF_DZ_BKT_ID_WIDTH 9
+
+
+/* ES_FF_UMSG_PACER_BKT_TBL_WR_REQ */
+#define ESF_DZ_RATE_REC_LBN 48
+#define ESF_DZ_RATE_REC_WIDTH 16
+#define ESF_DZ_RATE_LBN 32
+#define ESF_DZ_RATE_WIDTH 16
+#define ESF_DZ_FILL_LEVEL_LBN 16
+#define ESF_DZ_FILL_LEVEL_WIDTH 16
+#define ESF_DZ_IDLE_LBN 15
+#define ESF_DZ_IDLE_WIDTH 1
+#define ESF_DZ_USED_LBN 14
+#define ESF_DZ_USED_WIDTH 1
+#define ESF_DZ_MAX_FILL_REG_LBN 12
+#define ESF_DZ_MAX_FILL_REG_WIDTH 2
+#define ESF_DZ_BKT_ID_LBN 0
+#define ESF_DZ_BKT_ID_WIDTH 9
+
+
+/* ES_FF_UMSG_PACER_TXQ_TBL_RD_REQ */
+#define ESF_DZ_TXQ_ID_LBN 0
+#define ESF_DZ_TXQ_ID_WIDTH 10
+
+
+/* ES_FF_UMSG_PACER_TXQ_TBL_RD_RSP */
+#define ESF_DZ_MAX_BKT2_LBN 112
+#define ESF_DZ_MAX_BKT2_WIDTH 9
+#define ESF_DZ_MAX_BKT1_LBN 96
+#define ESF_DZ_MAX_BKT1_WIDTH 9
+#define ESF_DZ_MAX_BKT0_LBN 80
+#define ESF_DZ_MAX_BKT0_WIDTH 9
+#define ESF_DZ_MIN_BKT_LBN 64
+#define ESF_DZ_MIN_BKT_WIDTH 9
+#define ESF_DZ_LABEL_LBN 48
+#define ESF_DZ_LABEL_WIDTH 4
+#define ESF_DZ_PQ_FLAGS_LBN 32
+#define ESF_DZ_PQ_FLAGS_WIDTH 3
+#define ESF_DZ_DSBL_LBN 16
+#define ESF_DZ_DSBL_WIDTH 1
+#define ESF_DZ_TXQ_ID_LBN 0
+#define ESF_DZ_TXQ_ID_WIDTH 10
+
+
+/* ES_FF_UMSG_PACER_TXQ_TBL_WR_REQ */
+#define ESF_DZ_MAX_BKT2_LBN 112
+#define ESF_DZ_MAX_BKT2_WIDTH 9
+#define ESF_DZ_MAX_BKT1_LBN 96
+#define ESF_DZ_MAX_BKT1_WIDTH 9
+#define ESF_DZ_MAX_BKT0_LBN 80
+#define ESF_DZ_MAX_BKT0_WIDTH 9
+#define ESF_DZ_MIN_BKT_LBN 64
+#define ESF_DZ_MIN_BKT_WIDTH 9
+#define ESF_DZ_LABEL_LBN 48
+#define ESF_DZ_LABEL_WIDTH 4
+#define ESF_DZ_PQ_FLAGS_LBN 32
+#define ESF_DZ_PQ_FLAGS_WIDTH 3
+#define ESF_DZ_DSBL_LBN 16
+#define ESF_DZ_DSBL_WIDTH 1
+#define ESF_DZ_TXQ_ID_LBN 0
+#define ESF_DZ_TXQ_ID_WIDTH 10
+
+
+/* ES_FF_UMSG_PE */
+#define ESF_DZ_PE_PKT_OFST_LBN 47
+#define ESF_DZ_PE_PKT_OFST_WIDTH 17
+#define ESF_DZ_PE_PEDIT_DELTA_LBN 40
+#define ESF_DZ_PE_PEDIT_DELTA_WIDTH 8
+#define ESF_DZ_PE_PYLOAD_OFST_LBN 32
+#define ESF_DZ_PE_PYLOAD_OFST_WIDTH 8
+#define ESF_DZ_PE_L4_HDR_OFST_LBN 24
+#define ESF_DZ_PE_L4_HDR_OFST_WIDTH 8
+#define ESF_DZ_PE_L3_HDR_OFST_LBN 16
+#define ESF_DZ_PE_L3_HDR_OFST_WIDTH 8
+#define ESF_DZ_PE_HAVE_UDP_HDR_LBN 5
+#define ESF_DZ_PE_HAVE_UDP_HDR_WIDTH 1
+#define ESF_DZ_PE_HAVE_TCP_HDR_LBN 4
+#define ESF_DZ_PE_HAVE_TCP_HDR_WIDTH 1
+#define ESF_DZ_PE_HAVE_IPV6_HDR_LBN 3
+#define ESF_DZ_PE_HAVE_IPV6_HDR_WIDTH 1
+#define ESF_DZ_PE_HAVE_IPV4_HDR_LBN 2
+#define ESF_DZ_PE_HAVE_IPV4_HDR_WIDTH 1
+#define ESF_DZ_PE_HAVE_FCOE_LBN 1
+#define ESF_DZ_PE_HAVE_FCOE_WIDTH 1
+#define ESF_DZ_PE_PARSE_INCOMP_LBN 0
+#define ESF_DZ_PE_PARSE_INCOMP_WIDTH 1
+
+
+/* ES_FF_UMSG_RXDP_EGR2CPU_SOFT */
+#define ESF_DZ_RE2CS_SOFT3_LBN 48
+#define ESF_DZ_RE2CS_SOFT3_WIDTH 16
+#define ESF_DZ_RE2CS_SOFT2_LBN 32
+#define ESF_DZ_RE2CS_SOFT2_WIDTH 16
+#define ESF_DZ_RE2CS_SOFT1_LBN 16
+#define ESF_DZ_RE2CS_SOFT1_WIDTH 16
+#define ESF_DZ_RE2CS_SOFT0_LBN 0
+#define ESF_DZ_RE2CS_SOFT0_WIDTH 16
+
+
+/* ES_FF_UMSG_RXDP_INGR2CPU */
+#define ESF_DZ_RI2C_LEN_LBN 208
+#define ESF_DZ_RI2C_LEN_WIDTH 16
+#define ESF_DZ_RI2C_L4_CLASS_LBN 202
+#define ESF_DZ_RI2C_L4_CLASS_WIDTH 3
+#define ESF_DZ_RI2C_L3_CLASS_LBN 199
+#define ESF_DZ_RI2C_L3_CLASS_WIDTH 3
+#define ESF_DZ_RI2C_ETHTAG_CLASS_LBN 196
+#define ESF_DZ_RI2C_ETHTAG_CLASS_WIDTH 3
+#define ESF_DZ_RI2C_ETHBASE_CLASS_LBN 193
+#define ESF_DZ_RI2C_ETHBASE_CLASS_WIDTH 3
+#define ESF_DZ_RI2C_MAC_CLASS_LBN 192
+#define ESF_DZ_RI2C_MAC_CLASS_WIDTH 1
+#define ESF_DZ_RI2C_PKT_OFST_LBN 176
+#define ESF_DZ_RI2C_PKT_OFST_WIDTH 16
+#define ESF_DZ_RI2C_PEDIT_DELTA_LBN 168
+#define ESF_DZ_RI2C_PEDIT_DELTA_WIDTH 8
+#define ESF_DZ_RI2C_PYLOAD_OFST_LBN 160
+#define ESF_DZ_RI2C_PYLOAD_OFST_WIDTH 8
+#define ESF_DZ_RI2C_L4_HDR_OFST_LBN 152
+#define ESF_DZ_RI2C_L4_HDR_OFST_WIDTH 8
+#define ESF_DZ_RI2C_L3_HDR_OFST_LBN 144
+#define ESF_DZ_RI2C_L3_HDR_OFST_WIDTH 8
+#define ESF_DZ_RI2C_HAVE_UDP_HDR_LBN 133
+#define ESF_DZ_RI2C_HAVE_UDP_HDR_WIDTH 1
+#define ESF_DZ_RI2C_HAVE_TCP_HDR_LBN 132
+#define ESF_DZ_RI2C_HAVE_TCP_HDR_WIDTH 1
+#define ESF_DZ_RI2C_HAVE_IPV6_HDR_LBN 131
+#define ESF_DZ_RI2C_HAVE_IPV6_HDR_WIDTH 1
+#define ESF_DZ_RI2C_HAVE_IPV4_HDR_LBN 130
+#define ESF_DZ_RI2C_HAVE_IPV4_HDR_WIDTH 1
+#define ESF_DZ_RI2C_HAVE_FCOE_LBN 129
+#define ESF_DZ_RI2C_HAVE_FCOE_WIDTH 1
+#define ESF_DZ_RI2C_PARSE_INCOMP_LBN 128
+#define ESF_DZ_RI2C_PARSE_INCOMP_WIDTH 1
+#define ESF_DZ_RI2C_EFINFO_WRD3_LBN 112
+#define ESF_DZ_RI2C_EFINFO_WRD3_WIDTH 16
+#define ESF_DZ_RI2C_EFINFO_WRD2_LBN 96
+#define ESF_DZ_RI2C_EFINFO_WRD2_WIDTH 16
+#define ESF_DZ_RI2C_EFINFO_WRD1_LBN 80
+#define ESF_DZ_RI2C_EFINFO_WRD1_WIDTH 16
+#define ESF_DZ_RI2C_EFINFO_WRD0_LBN 64
+#define ESF_DZ_RI2C_EFINFO_WRD0_WIDTH 16
+#define ESF_DZ_RI2C_FINFO_WRD3_LBN 48
+#define ESF_DZ_RI2C_FINFO_WRD3_WIDTH 16
+#define ESF_DZ_RI2C_FINFO_WRD2_LBN 32
+#define ESF_DZ_RI2C_FINFO_WRD2_WIDTH 16
+#define ESF_DZ_RI2C_FINFO_WRD1_LBN 16
+#define ESF_DZ_RI2C_FINFO_WRD1_WIDTH 16
+#define ESF_DZ_RI2C_FINFO_SRCDST_LBN 0
+#define ESF_DZ_RI2C_FINFO_SRCDST_WIDTH 16
+
+
+/* ES_FF_UMSG_SMC2CPU_BUFLKUP */
+#define ESF_DZ_S2CB_ENCODED_PAGE_ADDR_DW0_LBN 0
+#define ESF_DZ_S2CB_ENCODED_PAGE_ADDR_DW0_WIDTH 32
+#define ESF_DZ_S2CB_ENCODED_PAGE_ADDR_DW1_LBN 32
+#define ESF_DZ_S2CB_ENCODED_PAGE_ADDR_DW1_WIDTH 16
+#define ESF_DZ_S2CB_ENCODED_PAGE_ADDR_LBN 0
+#define ESF_DZ_S2CB_ENCODED_PAGE_ADDR_WIDTH 48
+#define ESF_DZ_S2CB_FAIL_LBN 32
+#define ESF_DZ_S2CB_FAIL_WIDTH 1
+
+
+/* ES_FF_UMSG_SMC2CPU_DESCRD */
+#define ESF_DZ_S2CD_BUF_LEN_LBN 112
+#define ESF_DZ_S2CD_BUF_LEN_WIDTH 14
+#define ESF_DZ_S2CD_ENCODED_HOST_ADDR_DW0_LBN 64
+#define ESF_DZ_S2CD_ENCODED_HOST_ADDR_DW0_WIDTH 32
+#define ESF_DZ_S2CD_ENCODED_HOST_ADDR_DW1_LBN 96
+#define ESF_DZ_S2CD_ENCODED_HOST_ADDR_DW1_WIDTH 16
+#define ESF_DZ_S2CD_ENCODED_HOST_ADDR_LBN 64
+#define ESF_DZ_S2CD_ENCODED_HOST_ADDR_WIDTH 48
+#define ESF_DZ_S2CD_CONT_LBN 16
+#define ESF_DZ_S2CD_CONT_WIDTH 1
+#define ESF_DZ_S2CD_TYPE_LBN 0
+#define ESF_DZ_S2CD_TYPE_WIDTH 1
+
+
+/* ES_FF_UMSG_SMC2CPU_GPRD */
+#define ESF_DZ_S2CG_DATA_DW0_LBN 64
+#define ESF_DZ_S2CG_DATA_DW0_WIDTH 32
+#define ESF_DZ_S2CG_DATA_DW1_LBN 96
+#define ESF_DZ_S2CG_DATA_DW1_WIDTH 32
+#define ESF_DZ_S2CG_DATA_LBN 64
+#define ESF_DZ_S2CG_DATA_WIDTH 64
+#define ESF_DZ_S2CG_SOFT_LBN 48
+#define ESF_DZ_S2CG_SOFT_WIDTH 4
+#define ESF_DZ_S2CG_FAIL_LBN 32
+#define ESF_DZ_S2CG_FAIL_WIDTH 1
+
+
+/* ES_FF_UMSG_TXDP_DMA2CPU_PKTRDY */
+#define ESF_DZ_TD2CP_L4_CLASS_LBN 250
+#define ESF_DZ_TD2CP_L4_CLASS_WIDTH 3
+#define ESF_DZ_TD2CP_L3_CLASS_LBN 247
+#define ESF_DZ_TD2CP_L3_CLASS_WIDTH 3
+#define ESF_DZ_TD2CP_ETHTAG_CLASS_LBN 244
+#define ESF_DZ_TD2CP_ETHTAG_CLASS_WIDTH 3
+#define ESF_DZ_TD2CP_ETHBASE_CLASS_LBN 241
+#define ESF_DZ_TD2CP_ETHBASE_CLASS_WIDTH 3
+#define ESF_DZ_TD2CP_MAC_CLASS_LBN 240
+#define ESF_DZ_TD2CP_MAC_CLASS_WIDTH 1
+#define ESF_DZ_TD2CP_SOFT_LBN 226
+#define ESF_DZ_TD2CP_SOFT_WIDTH 14
+#define ESF_DZ_TD2CP_PKT_ABORT_LBN 225
+#define ESF_DZ_TD2CP_PKT_ABORT_WIDTH 1
+#define ESF_DZ_TD2CP_PCIE_ERR_LBN 224
+#define ESF_DZ_TD2CP_PCIE_ERR_WIDTH 1
+#define ESF_DZ_TD2CP_DESC_IDX_LBN 208
+#define ESF_DZ_TD2CP_DESC_IDX_WIDTH 16
+#define ESF_DZ_TD2CP_PKT_LEN_LBN 192
+#define ESF_DZ_TD2CP_PKT_LEN_WIDTH 16
+#define ESF_DZ_TD2CP_PKT_OFFST_OR_FIRST_DESC_IDX_LBN 176
+#define ESF_DZ_TD2CP_PKT_OFFST_OR_FIRST_DESC_IDX_WIDTH 7
+#define ESF_DZ_TD2CP_PEDIT_DELTA_LBN 168
+#define ESF_DZ_TD2CP_PEDIT_DELTA_WIDTH 8
+#define ESF_DZ_TD2CP_PYLOAD_OFST_LBN 160
+#define ESF_DZ_TD2CP_PYLOAD_OFST_WIDTH 8
+#define ESF_DZ_TD2CP_L4_HDR_OFST_LBN 152
+#define ESF_DZ_TD2CP_L4_HDR_OFST_WIDTH 8
+#define ESF_DZ_TD2CP_L3_HDR_OFST_LBN 144
+#define ESF_DZ_TD2CP_L3_HDR_OFST_WIDTH 8
+#define ESF_DZ_TD2CP_IS_UDP_LBN 133
+#define ESF_DZ_TD2CP_IS_UDP_WIDTH 1
+#define ESF_DZ_TD2CP_IS_TCP_LBN 132
+#define ESF_DZ_TD2CP_IS_TCP_WIDTH 1
+#define ESF_DZ_TD2CP_IS_IPV6_LBN 131
+#define ESF_DZ_TD2CP_IS_IPV6_WIDTH 1
+#define ESF_DZ_TD2CP_IS_IPV4_LBN 130
+#define ESF_DZ_TD2CP_IS_IPV4_WIDTH 1
+#define ESF_DZ_TD2CP_IS_FCOE_LBN 129
+#define ESF_DZ_TD2CP_IS_FCOE_WIDTH 1
+#define ESF_DZ_TD2CP_PARSE_INCOMP_LBN 128
+#define ESF_DZ_TD2CP_PARSE_INCOMP_WIDTH 1
+#define ESF_DZ_TD2CP_EFINFO_WRD3_LBN 112
+#define ESF_DZ_TD2CP_EFINFO_WRD3_WIDTH 16
+#define ESF_DZ_TD2CP_EFINFO_WRD2_LBN 96
+#define ESF_DZ_TD2CP_EFINFO_WRD2_WIDTH 16
+#define ESF_DZ_TD2CP_EFINFO_WRD1_LBN 80
+#define ESF_DZ_TD2CP_EFINFO_WRD1_WIDTH 16
+#define ESF_DZ_TD2CP_EFINFO_WRD0_LBN 64
+#define ESF_DZ_TD2CP_EFINFO_WRD0_WIDTH 16
+#define ESF_DZ_TD2CP_FINFO_WRD3_LBN 48
+#define ESF_DZ_TD2CP_FINFO_WRD3_WIDTH 16
+#define ESF_DZ_TD2CP_FINFO_WRD2_LBN 32
+#define ESF_DZ_TD2CP_FINFO_WRD2_WIDTH 16
+#define ESF_DZ_TD2CP_FINFO_WRD1_LBN 16
+#define ESF_DZ_TD2CP_FINFO_WRD1_WIDTH 16
+#define ESF_DZ_TD2CP_FINFO_SRCDST_LBN 0
+#define ESF_DZ_TD2CP_FINFO_SRCDST_WIDTH 16
+
+
+/* ES_FF_UMSG_TXDP_DMA2CPU_SOFT */
+#define ESF_DZ_TD2CS_SOFT3_LBN 48
+#define ESF_DZ_TD2CS_SOFT3_WIDTH 16
+#define ESF_DZ_TD2CS_SOFT2_LBN 32
+#define ESF_DZ_TD2CS_SOFT2_WIDTH 16
+#define ESF_DZ_TD2CS_SOFT1_LBN 16
+#define ESF_DZ_TD2CS_SOFT1_WIDTH 16
+#define ESF_DZ_TD2CS_SOFT0_LBN 0
+#define ESF_DZ_TD2CS_SOFT0_WIDTH 16
+
+
+/* ES_FF_UMSG_TXDP_EGR2CPU_SOFT */
+#define ESF_DZ_TE2CS_SOFT3_LBN 48
+#define ESF_DZ_TE2CS_SOFT3_WIDTH 16
+#define ESF_DZ_TE2CS_SOFT2_LBN 32
+#define ESF_DZ_TE2CS_SOFT2_WIDTH 16
+#define ESF_DZ_TE2CS_SOFT1_LBN 16
+#define ESF_DZ_TE2CS_SOFT1_WIDTH 16
+#define ESF_DZ_TE2CS_SOFT0_LBN 0
+#define ESF_DZ_TE2CS_SOFT0_WIDTH 16
+
+
+/* ES_FF_UMSG_VICTL2CPU */
+#define ESF_DZ_V2C_DESC_WORD3_LBN 112
+#define ESF_DZ_V2C_DESC_WORD3_WIDTH 17
+#define ESF_DZ_V2C_DESC_WORD2_LBN 96
+#define ESF_DZ_V2C_DESC_WORD2_WIDTH 16
+#define ESF_DZ_V2C_DESC_WORD1_LBN 80
+#define ESF_DZ_V2C_DESC_WORD1_WIDTH 16
+#define ESF_DZ_V2C_DESC_WORD0_LBN 64
+#define ESF_DZ_V2C_DESC_WORD0_WIDTH 16
+#define ESF_DZ_V2C_NEW_DSCR_WPTR_LBN 32
+#define ESF_DZ_V2C_NEW_DSCR_WPTR_WIDTH 12
+#define ESF_DZ_V2C_DESC_PUSH_LBN 16
+#define ESF_DZ_V2C_DESC_PUSH_WIDTH 1
+
+
+/* ES_LUE_DB_MATCH_ENTRY */
+#define ESF_DZ_LUE_DSCRMNTR_LBN 140
+#define ESF_DZ_LUE_DSCRMNTR_WIDTH 4
+#define ESF_DZ_LUE_MATCH_VAL_DW0_LBN 44
+#define ESF_DZ_LUE_MATCH_VAL_DW0_WIDTH 32
+#define ESF_DZ_LUE_MATCH_VAL_DW1_LBN 76
+#define ESF_DZ_LUE_MATCH_VAL_DW1_WIDTH 32
+#define ESF_DZ_LUE_MATCH_VAL_DW2_LBN 108
+#define ESF_DZ_LUE_MATCH_VAL_DW2_WIDTH 32
+#define ESF_DZ_LUE_MATCH_VAL_LBN 44
+#define ESF_DZ_LUE_MATCH_VAL_WIDTH 96
+#define ESF_DZ_LUE_ME_SOFT_LBN 35
+#define ESF_DZ_LUE_ME_SOFT_WIDTH 9
+#define ESF_DZ_LUE_TX_MCAST_LBN 33
+#define ESF_DZ_LUE_TX_MCAST_WIDTH 2
+#define ESF_DZ_LUE_TX_DOMAIN_LBN 25
+#define ESF_DZ_LUE_TX_DOMAIN_WIDTH 8
+#define ESF_DZ_LUE_RX_MCAST_LBN 24
+#define ESF_DZ_LUE_RX_MCAST_WIDTH 1
+#define ESE_DZ_LUE_MULTI 1
+#define ESE_DZ_LUE_SINGLE 0
+#define ESF_DZ_LUE_RCPNTR_LBN 0
+#define ESF_DZ_LUE_RCPNTR_WIDTH 24
+#define ESF_DZ_LUE_RCPNTR_ME_PTR_LBN 0
+#define ESF_DZ_LUE_RCPNTR_ME_PTR_WIDTH 14
+
+
+/* ES_LUE_DB_NONMATCH_ENTRY */
+#define ESF_DZ_LUE_DSCRMNTR_LBN 140
+#define ESF_DZ_LUE_DSCRMNTR_WIDTH 4
+#define ESF_DZ_LUE_TERMINAL_LBN 139
+#define ESF_DZ_LUE_TERMINAL_WIDTH 1
+#define ESF_DZ_LUE_LAST_LBN 138
+#define ESF_DZ_LUE_LAST_WIDTH 1
+#define ESF_DZ_LUE_NE_SOFT_LBN 137
+#define ESF_DZ_LUE_NE_SOFT_WIDTH 1
+#define ESF_DZ_LUE_RCPNTR_NUM_LBN 134
+#define ESF_DZ_LUE_RCPNTR_NUM_WIDTH 3
+#define ESF_DZ_LUE_RCPNTR0_LBN 110
+#define ESF_DZ_LUE_RCPNTR0_WIDTH 24
+#define ESF_DZ_LUE_RCPNTR1_LBN 86
+#define ESF_DZ_LUE_RCPNTR1_WIDTH 24
+#define ESF_DZ_LUE_RCPNTR2_LBN 62
+#define ESF_DZ_LUE_RCPNTR2_WIDTH 24
+#define ESF_DZ_LUE_RCPNTR3_LBN 38
+#define ESF_DZ_LUE_RCPNTR3_WIDTH 24
+#define ESF_DZ_LUE_RCPNTR4_LBN 14
+#define ESF_DZ_LUE_RCPNTR4_WIDTH 24
+#define ESF_DZ_LUE_RCPNTR_NE_PTR_LBN 0
+#define ESF_DZ_LUE_RCPNTR_NE_PTR_WIDTH 14
+
+
+/* ES_LUE_MC_DIRECT_REQUEST_MSG */
+#define ESF_DZ_MC2L_DR_PAD_DW0_LBN 22
+#define ESF_DZ_MC2L_DR_PAD_DW0_WIDTH 32
+#define ESF_DZ_MC2L_DR_PAD_DW1_LBN 54
+#define ESF_DZ_MC2L_DR_PAD_DW1_WIDTH 32
+#define ESF_DZ_MC2L_DR_PAD_DW2_LBN 86
+#define ESF_DZ_MC2L_DR_PAD_DW2_WIDTH 32
+#define ESF_DZ_MC2L_DR_PAD_DW3_LBN 118
+#define ESF_DZ_MC2L_DR_PAD_DW3_WIDTH 32
+#define ESF_DZ_MC2L_DR_PAD_DW4_LBN 150
+#define ESF_DZ_MC2L_DR_PAD_DW4_WIDTH 16
+#define ESF_DZ_MC2L_DR_PAD_LBN 22
+#define ESF_DZ_MC2L_DR_PAD_WIDTH 144
+#define ESF_DZ_MC2L_DR_ADDR_LBN 8
+#define ESF_DZ_MC2L_DR_ADDR_WIDTH 14
+#define ESF_DZ_MC2L_DR_THREAD_ID_LBN 5
+#define ESF_DZ_MC2L_DR_THREAD_ID_WIDTH 3
+#define ESF_DZ_MC2L_DR_CLIENT_ID_LBN 2
+#define ESF_DZ_MC2L_DR_CLIENT_ID_WIDTH 3
+#define ESF_DZ_MC2L_DR_OP_LBN 0
+#define ESF_DZ_MC2L_DR_OP_WIDTH 2
+#define ESE_DZ_LUE_GP_WR 3
+#define ESE_DZ_LUE_GP_RD 2
+#define ESE_DZ_LUE_DIR_REQ 1
+#define ESE_DZ_LUE_MATCH_REQ 0
+
+
+/* ES_LUE_MC_DIRECT_RESPONSE_MSG */
+#define ESF_DZ_L2MC_DR_PAD_LBN 146
+#define ESF_DZ_L2MC_DR_PAD_WIDTH 6
+#define ESF_DZ_L2MC_DR_RCPNT_PTR_LBN 132
+#define ESF_DZ_L2MC_DR_RCPNT_PTR_WIDTH 14
+#define ESF_DZ_L2MC_DR_RCPNT4_LBN 108
+#define ESF_DZ_L2MC_DR_RCPNT4_WIDTH 24
+#define ESF_DZ_L2MC_DR_RCPNT3_LBN 84
+#define ESF_DZ_L2MC_DR_RCPNT3_WIDTH 24
+#define ESF_DZ_L2MC_DR_RCPNT2_LBN 60
+#define ESF_DZ_L2MC_DR_RCPNT2_WIDTH 24
+#define ESF_DZ_L2MC_DR_RCPNT1_LBN 36
+#define ESF_DZ_L2MC_DR_RCPNT1_WIDTH 24
+#define ESF_DZ_L2MC_DR_RCPNT0_LBN 12
+#define ESF_DZ_L2MC_DR_RCPNT0_WIDTH 24
+#define ESF_DZ_L2MC_DR_RCPNT_NUM_LBN 9
+#define ESF_DZ_L2MC_DR_RCPNT_NUM_WIDTH 3
+#define ESF_DZ_L2MC_DR_LAST_LBN 8
+#define ESF_DZ_L2MC_DR_LAST_WIDTH 1
+#define ESF_DZ_L2MC_DR_THREAD_ID_LBN 5
+#define ESF_DZ_L2MC_DR_THREAD_ID_WIDTH 3
+#define ESF_DZ_L2MC_DR_CLIENT_ID_LBN 2
+#define ESF_DZ_L2MC_DR_CLIENT_ID_WIDTH 3
+#define ESF_DZ_L2MC_DR_OP_LBN 0
+#define ESF_DZ_L2MC_DR_OP_WIDTH 2
+#define ESE_DZ_LUE_GP_WR 3
+#define ESE_DZ_LUE_GP_RD 2
+#define ESE_DZ_LUE_DIR_REQ 1
+#define ESE_DZ_LUE_MATCH_REQ 0
+
+
+/* ES_LUE_MC_GP_RD_REQUEST_MSG */
+#define ESF_DZ_MC2L_GPR_PAD_DW0_LBN 22
+#define ESF_DZ_MC2L_GPR_PAD_DW0_WIDTH 32
+#define ESF_DZ_MC2L_GPR_PAD_DW1_LBN 54
+#define ESF_DZ_MC2L_GPR_PAD_DW1_WIDTH 32
+#define ESF_DZ_MC2L_GPR_PAD_DW2_LBN 86
+#define ESF_DZ_MC2L_GPR_PAD_DW2_WIDTH 32
+#define ESF_DZ_MC2L_GPR_PAD_DW3_LBN 118
+#define ESF_DZ_MC2L_GPR_PAD_DW3_WIDTH 32
+#define ESF_DZ_MC2L_GPR_PAD_DW4_LBN 150
+#define ESF_DZ_MC2L_GPR_PAD_DW4_WIDTH 16
+#define ESF_DZ_MC2L_GPR_PAD_LBN 22
+#define ESF_DZ_MC2L_GPR_PAD_WIDTH 144
+#define ESF_DZ_MC2L_GPR_ADDR_LBN 8
+#define ESF_DZ_MC2L_GPR_ADDR_WIDTH 14
+#define ESF_DZ_MC2L_GPR_THREAD_ID_LBN 5
+#define ESF_DZ_MC2L_GPR_THREAD_ID_WIDTH 3
+#define ESF_DZ_MC2L_GPR_CLIENT_ID_LBN 2
+#define ESF_DZ_MC2L_GPR_CLIENT_ID_WIDTH 3
+#define ESF_DZ_MC2L_GPR_OP_LBN 0
+#define ESF_DZ_MC2L_GPR_OP_WIDTH 2
+#define ESE_DZ_LUE_GP_WR 3
+#define ESE_DZ_LUE_GP_RD 2
+#define ESE_DZ_LUE_DIR_REQ 1
+#define ESE_DZ_LUE_MATCH_REQ 0
+
+
+/* ES_LUE_MC_GP_RD_RESPONSE_MSG */
+#define ESF_DZ_L2MC_GPR_DATA_DW0_LBN 8
+#define ESF_DZ_L2MC_GPR_DATA_DW0_WIDTH 32
+#define ESF_DZ_L2MC_GPR_DATA_DW1_LBN 40
+#define ESF_DZ_L2MC_GPR_DATA_DW1_WIDTH 32
+#define ESF_DZ_L2MC_GPR_DATA_DW2_LBN 72
+#define ESF_DZ_L2MC_GPR_DATA_DW2_WIDTH 32
+#define ESF_DZ_L2MC_GPR_DATA_DW3_LBN 104
+#define ESF_DZ_L2MC_GPR_DATA_DW3_WIDTH 32
+#define ESF_DZ_L2MC_GPR_DATA_DW4_LBN 136
+#define ESF_DZ_L2MC_GPR_DATA_DW4_WIDTH 16
+#define ESF_DZ_L2MC_GPR_DATA_LBN 8
+#define ESF_DZ_L2MC_GPR_DATA_WIDTH 144
+#define ESF_DZ_L2MC_GPR_THREAD_ID_LBN 5
+#define ESF_DZ_L2MC_GPR_THREAD_ID_WIDTH 3
+#define ESF_DZ_L2MC_GPR_CLIENT_ID_LBN 2
+#define ESF_DZ_L2MC_GPR_CLIENT_ID_WIDTH 3
+#define ESF_DZ_L2MC_GPR_OP_LBN 0
+#define ESF_DZ_L2MC_GPR_OP_WIDTH 2
+#define ESE_DZ_LUE_GP_WR 3
+#define ESE_DZ_LUE_GP_RD 2
+#define ESE_DZ_LUE_DIR_REQ 1
+#define ESE_DZ_LUE_MATCH_REQ 0
+
+
+/* ES_LUE_MC_GP_WR_REQUEST_MSG */
+#define ESF_DZ_MC2L_GPW_DATA_DW0_LBN 22
+#define ESF_DZ_MC2L_GPW_DATA_DW0_WIDTH 32
+#define ESF_DZ_MC2L_GPW_DATA_DW1_LBN 54
+#define ESF_DZ_MC2L_GPW_DATA_DW1_WIDTH 32
+#define ESF_DZ_MC2L_GPW_DATA_DW2_LBN 86
+#define ESF_DZ_MC2L_GPW_DATA_DW2_WIDTH 32
+#define ESF_DZ_MC2L_GPW_DATA_DW3_LBN 118
+#define ESF_DZ_MC2L_GPW_DATA_DW3_WIDTH 32
+#define ESF_DZ_MC2L_GPW_DATA_DW4_LBN 150
+#define ESF_DZ_MC2L_GPW_DATA_DW4_WIDTH 16
+#define ESF_DZ_MC2L_GPW_DATA_LBN 22
+#define ESF_DZ_MC2L_GPW_DATA_WIDTH 144
+#define ESF_DZ_MC2L_GPW_ADDR_LBN 8
+#define ESF_DZ_MC2L_GPW_ADDR_WIDTH 14
+#define ESF_DZ_MC2L_GPW_THREAD_ID_LBN 5
+#define ESF_DZ_MC2L_GPW_THREAD_ID_WIDTH 3
+#define ESF_DZ_MC2L_GPW_CLIENT_ID_LBN 2
+#define ESF_DZ_MC2L_GPW_CLIENT_ID_WIDTH 3
+#define ESF_DZ_MC2L_GPW_OP_LBN 0
+#define ESF_DZ_MC2L_GPW_OP_WIDTH 2
+#define ESE_DZ_LUE_GP_WR 3
+#define ESE_DZ_LUE_GP_RD 2
+#define ESE_DZ_LUE_DIR_REQ 1
+#define ESE_DZ_LUE_MATCH_REQ 0
+
+
+/* ES_LUE_MC_MATCH_REQUEST_MSG */
+#define ESF_DZ_MC2L_MR_PAD_LBN 135
+#define ESF_DZ_MC2L_MR_PAD_WIDTH 31
+#define ESF_DZ_MC2L_MR_HASH2_LBN 122
+#define ESF_DZ_MC2L_MR_HASH2_WIDTH 13
+#define ESF_DZ_MC2L_MR_HASH1_LBN 108
+#define ESF_DZ_MC2L_MR_HASH1_WIDTH 14
+#define ESF_DZ_MC2L_MR_MATCH_BITS_DW0_LBN 12
+#define ESF_DZ_MC2L_MR_MATCH_BITS_DW0_WIDTH 32
+#define ESF_DZ_MC2L_MR_MATCH_BITS_DW1_LBN 44
+#define ESF_DZ_MC2L_MR_MATCH_BITS_DW1_WIDTH 32
+#define ESF_DZ_MC2L_MR_MATCH_BITS_DW2_LBN 76
+#define ESF_DZ_MC2L_MR_MATCH_BITS_DW2_WIDTH 32
+#define ESF_DZ_MC2L_MR_MATCH_BITS_LBN 12
+#define ESF_DZ_MC2L_MR_MATCH_BITS_WIDTH 96
+#define ESF_DZ_MC2L_MR_DSCRMNTR_LBN 8
+#define ESF_DZ_MC2L_MR_DSCRMNTR_WIDTH 4
+#define ESF_DZ_MC2L_MR_THREAD_ID_LBN 5
+#define ESF_DZ_MC2L_MR_THREAD_ID_WIDTH 3
+#define ESF_DZ_MC2L_MR_CLIENT_ID_LBN 2
+#define ESF_DZ_MC2L_MR_CLIENT_ID_WIDTH 3
+#define ESF_DZ_MC2L_MR_OP_LBN 0
+#define ESF_DZ_MC2L_MR_OP_WIDTH 2
+#define ESE_DZ_LUE_GP_WR 3
+#define ESE_DZ_LUE_GP_RD 2
+#define ESE_DZ_LUE_DIR_REQ 1
+#define ESE_DZ_LUE_MATCH_REQ 0
+
+
+/* ES_LUE_MC_MATCH_RESPONSE_MSG */
+#define ESF_DZ_L2MC_MR_PAD_DW0_LBN 53
+#define ESF_DZ_L2MC_MR_PAD_DW0_WIDTH 32
+#define ESF_DZ_L2MC_MR_PAD_DW1_LBN 85
+#define ESF_DZ_L2MC_MR_PAD_DW1_WIDTH 32
+#define ESF_DZ_L2MC_MR_PAD_DW2_LBN 117
+#define ESF_DZ_L2MC_MR_PAD_DW2_WIDTH 32
+#define ESF_DZ_L2MC_MR_PAD_DW3_LBN 149
+#define ESF_DZ_L2MC_MR_PAD_DW3_WIDTH 3
+#define ESF_DZ_L2MC_MR_PAD_LBN 53
+#define ESF_DZ_L2MC_MR_PAD_WIDTH 99
+#define ESF_DZ_L2MC_MR_LUE_RCPNT_LBN 29
+#define ESF_DZ_L2MC_MR_LUE_RCPNT_WIDTH 24
+#define ESF_DZ_L2MC_MR_RX_MCAST_LBN 28
+#define ESF_DZ_L2MC_MR_RX_MCAST_WIDTH 1
+#define ESF_DZ_L2MC_MR_TX_DOMAIN_LBN 20
+#define ESF_DZ_L2MC_MR_TX_DOMAIN_WIDTH 8
+#define ESF_DZ_L2MC_MR_TX_MCAST_LBN 18
+#define ESF_DZ_L2MC_MR_TX_MCAST_WIDTH 2
+#define ESF_DZ_L2MC_MR_SOFT_LBN 9
+#define ESF_DZ_L2MC_MR_SOFT_WIDTH 9
+#define ESF_DZ_L2MC_MR_MATCH_LBN 8
+#define ESF_DZ_L2MC_MR_MATCH_WIDTH 1
+#define ESF_DZ_L2MC_MR_THREAD_ID_LBN 5
+#define ESF_DZ_L2MC_MR_THREAD_ID_WIDTH 3
+#define ESF_DZ_L2MC_MR_CLIENT_ID_LBN 2
+#define ESF_DZ_L2MC_MR_CLIENT_ID_WIDTH 3
+#define ESF_DZ_L2MC_MR_OP_LBN 0
+#define ESF_DZ_L2MC_MR_OP_WIDTH 2
+#define ESE_DZ_LUE_GP_WR 3
+#define ESE_DZ_LUE_GP_RD 2
+#define ESE_DZ_LUE_DIR_REQ 1
+#define ESE_DZ_LUE_MATCH_REQ 0
+
+
+/* ES_LUE_MSG_BASE_REQ */
+#define ESF_DZ_LUE_HW_REQ_BASE_REQ_MSG_DATA_DW0_LBN 8
+#define ESF_DZ_LUE_HW_REQ_BASE_REQ_MSG_DATA_DW0_WIDTH 32
+#define ESF_DZ_LUE_HW_REQ_BASE_REQ_MSG_DATA_DW1_LBN 40
+#define ESF_DZ_LUE_HW_REQ_BASE_REQ_MSG_DATA_DW1_WIDTH 32
+#define ESF_DZ_LUE_HW_REQ_BASE_REQ_MSG_DATA_DW2_LBN 72
+#define ESF_DZ_LUE_HW_REQ_BASE_REQ_MSG_DATA_DW2_WIDTH 32
+#define ESF_DZ_LUE_HW_REQ_BASE_REQ_MSG_DATA_DW3_LBN 104
+#define ESF_DZ_LUE_HW_REQ_BASE_REQ_MSG_DATA_DW3_WIDTH 32
+#define ESF_DZ_LUE_HW_REQ_BASE_REQ_MSG_DATA_DW4_LBN 136
+#define ESF_DZ_LUE_HW_REQ_BASE_REQ_MSG_DATA_DW4_WIDTH 30
+#define ESF_DZ_LUE_HW_REQ_BASE_REQ_MSG_DATA_LBN 8
+#define ESF_DZ_LUE_HW_REQ_BASE_REQ_MSG_DATA_WIDTH 158
+#define ESF_DZ_LUE_HW_REQ_BASE_THREAD_ID_LBN 5
+#define ESF_DZ_LUE_HW_REQ_BASE_THREAD_ID_WIDTH 3
+#define ESF_DZ_LUE_HW_REQ_BASE_CLIENT_ID_LBN 2
+#define ESF_DZ_LUE_HW_REQ_BASE_CLIENT_ID_WIDTH 3
+#define ESE_DZ_LUE_MC_ID 7
+#define ESE_DZ_LUE_MATCH_REQ_FIFO_ID 3
+#define ESE_DZ_LUE_TX_DICPU_ID 1
+#define ESE_DZ_LUE_RX_DICPU_ID 0
+#define ESF_DZ_LUE_HW_REQ_BASE_OP_LBN 0
+#define ESF_DZ_LUE_HW_REQ_BASE_OP_WIDTH 2
+#define ESE_DZ_LUE_GP_WR 3
+#define ESE_DZ_LUE_GP_RD 2
+#define ESE_DZ_LUE_DIR_REQ 1
+#define ESE_DZ_LUE_MATCH_REQ 0
+
+
+/* ES_LUE_MSG_BASE_RESP */
+#define ESF_DZ_LUE_HW_RSP_BASE_RSP_DATA_DW0_LBN 8
+#define ESF_DZ_LUE_HW_RSP_BASE_RSP_DATA_DW0_WIDTH 32
+#define ESF_DZ_LUE_HW_RSP_BASE_RSP_DATA_DW1_LBN 40
+#define ESF_DZ_LUE_HW_RSP_BASE_RSP_DATA_DW1_WIDTH 32
+#define ESF_DZ_LUE_HW_RSP_BASE_RSP_DATA_DW2_LBN 72
+#define ESF_DZ_LUE_HW_RSP_BASE_RSP_DATA_DW2_WIDTH 32
+#define ESF_DZ_LUE_HW_RSP_BASE_RSP_DATA_DW3_LBN 104
+#define ESF_DZ_LUE_HW_RSP_BASE_RSP_DATA_DW3_WIDTH 32
+#define ESF_DZ_LUE_HW_RSP_BASE_RSP_DATA_DW4_LBN 136
+#define ESF_DZ_LUE_HW_RSP_BASE_RSP_DATA_DW4_WIDTH 16
+#define ESF_DZ_LUE_HW_RSP_BASE_RSP_DATA_LBN 8
+#define ESF_DZ_LUE_HW_RSP_BASE_RSP_DATA_WIDTH 144
+#define ESF_DZ_LUE_HW_RSP_BASE_THREAD_ID_LBN 5
+#define ESF_DZ_LUE_HW_RSP_BASE_THREAD_ID_WIDTH 3
+#define ESF_DZ_LUE_HW_RSP_BASE_CLIENT_ID_LBN 2
+#define ESF_DZ_LUE_HW_RSP_BASE_CLIENT_ID_WIDTH 3
+#define ESE_DZ_LUE_MC_ID 7
+#define ESE_DZ_LUE_MATCH_REQ_FIFO_ID 3
+#define ESE_DZ_LUE_TX_DICPU_ID 1
+#define ESE_DZ_LUE_RX_DICPU_ID 0
+#define ESF_DZ_LUE_HW_RSP_BASE_OP_LBN 0
+#define ESF_DZ_LUE_HW_RSP_BASE_OP_WIDTH 2
+#define ESE_DZ_LUE_GP_WR 3
+#define ESE_DZ_LUE_GP_RD 2
+#define ESE_DZ_LUE_DIR_REQ 1
+#define ESE_DZ_LUE_MATCH_REQ 0
+
+
+/* ES_LUE_MSG_DIRECT_REQ */
+#define ESF_DZ_LUE_HW_REQ_DIR_ADDR_LBN 8
+#define ESF_DZ_LUE_HW_REQ_DIR_ADDR_WIDTH 14
+#define ESF_DZ_LUE_HW_REQ_DIR_THREAD_ID_LBN 5
+#define ESF_DZ_LUE_HW_REQ_DIR_THREAD_ID_WIDTH 3
+#define ESF_DZ_LUE_HW_REQ_DIR_CLIENT_ID_LBN 2
+#define ESF_DZ_LUE_HW_REQ_DIR_CLIENT_ID_WIDTH 3
+#define ESE_DZ_LUE_MC_ID 7
+#define ESE_DZ_LUE_MATCH_REQ_FIFO_ID 3
+#define ESE_DZ_LUE_TX_DICPU_ID 1
+#define ESE_DZ_LUE_RX_DICPU_ID 0
+#define ESF_DZ_LUE_HW_REQ_DIR_OP_LBN 0
+#define ESF_DZ_LUE_HW_REQ_DIR_OP_WIDTH 2
+#define ESE_DZ_LUE_GP_WR 3
+#define ESE_DZ_LUE_GP_RD 2
+#define ESE_DZ_LUE_DIR_REQ 1
+#define ESE_DZ_LUE_MATCH_REQ 0
+
+
+/* ES_LUE_MSG_DIRECT_RESP */
+#define ESF_DZ_LUE_HW_RSP_DIR_RCPNT_PTR_LBN 132
+#define ESF_DZ_LUE_HW_RSP_DIR_RCPNT_PTR_WIDTH 14
+#define ESF_DZ_LUE_HW_RSP_DIR_RCPNT4_LBN 108
+#define ESF_DZ_LUE_HW_RSP_DIR_RCPNT4_WIDTH 24
+#define ESF_DZ_LUE_HW_RSP_DIR_RCPNT3_LBN 84
+#define ESF_DZ_LUE_HW_RSP_DIR_RCPNT3_WIDTH 24
+#define ESF_DZ_LUE_HW_RSP_DIR_RCPNT2_LBN 60
+#define ESF_DZ_LUE_HW_RSP_DIR_RCPNT2_WIDTH 24
+#define ESF_DZ_LUE_HW_RSP_DIR_RCPNT1_LBN 36
+#define ESF_DZ_LUE_HW_RSP_DIR_RCPNT1_WIDTH 24
+#define ESF_DZ_LUE_HW_RSP_DIR_RCPNT0_LBN 12
+#define ESF_DZ_LUE_HW_RSP_DIR_RCPNT0_WIDTH 24
+#define ESF_DZ_LUE_HW_RSP_DIR_RCPNT_NUM_LBN 9
+#define ESF_DZ_LUE_HW_RSP_DIR_RCPNT_NUM_WIDTH 3
+#define ESF_DZ_LUE_HW_RSP_DIR_LAST_LBN 8
+#define ESF_DZ_LUE_HW_RSP_DIR_LAST_WIDTH 1
+#define ESF_DZ_LUE_HW_RSP_DIR_THREAD_ID_LBN 5
+#define ESF_DZ_LUE_HW_RSP_DIR_THREAD_ID_WIDTH 3
+#define ESF_DZ_LUE_HW_RSP_DIR_CLIENT_ID_LBN 2
+#define ESF_DZ_LUE_HW_RSP_DIR_CLIENT_ID_WIDTH 3
+#define ESE_DZ_LUE_MC_ID 7
+#define ESE_DZ_LUE_MATCH_REQ_FIFO_ID 3
+#define ESE_DZ_LUE_TX_DICPU_ID 1
+#define ESE_DZ_LUE_RX_DICPU_ID 0
+#define ESF_DZ_LUE_HW_RSP_DIR_OP_LBN 0
+#define ESF_DZ_LUE_HW_RSP_DIR_OP_WIDTH 2
+#define ESE_DZ_LUE_GP_WR 3
+#define ESE_DZ_LUE_GP_RD 2
+#define ESE_DZ_LUE_DIR_REQ 1
+#define ESE_DZ_LUE_MATCH_REQ 0
+
+
+/* ES_LUE_MSG_GP_RD_REQ */
+#define ESF_DZ_LUE_HW_REQ_GPRD_ADDR_LBN 8
+#define ESF_DZ_LUE_HW_REQ_GPRD_ADDR_WIDTH 14
+#define ESF_DZ_LUE_HW_REQ_GPRD_THREAD_ID_LBN 5
+#define ESF_DZ_LUE_HW_REQ_GPRD_THREAD_ID_WIDTH 3
+#define ESF_DZ_LUE_HW_REQ_GPRD_CLIENT_ID_LBN 2
+#define ESF_DZ_LUE_HW_REQ_GPRD_CLIENT_ID_WIDTH 3
+#define ESE_DZ_LUE_MC_ID 7
+#define ESE_DZ_LUE_MATCH_REQ_FIFO_ID 3
+#define ESE_DZ_LUE_TX_DICPU_ID 1
+#define ESE_DZ_LUE_RX_DICPU_ID 0
+#define ESF_DZ_LUE_HW_REQ_GPRD_OP_LBN 0
+#define ESF_DZ_LUE_HW_REQ_GPRD_OP_WIDTH 2
+#define ESE_DZ_LUE_GP_WR 3
+#define ESE_DZ_LUE_GP_RD 2
+#define ESE_DZ_LUE_DIR_REQ 1
+#define ESE_DZ_LUE_MATCH_REQ 0
+
+
+/* ES_LUE_MSG_GP_RD_RESP */
+#define ESF_DZ_LUE_HW_RSP_GPRD_LUE_DATA_DW0_LBN 8
+#define ESF_DZ_LUE_HW_RSP_GPRD_LUE_DATA_DW0_WIDTH 32
+#define ESF_DZ_LUE_HW_RSP_GPRD_LUE_DATA_DW1_LBN 40
+#define ESF_DZ_LUE_HW_RSP_GPRD_LUE_DATA_DW1_WIDTH 32
+#define ESF_DZ_LUE_HW_RSP_GPRD_LUE_DATA_DW2_LBN 72
+#define ESF_DZ_LUE_HW_RSP_GPRD_LUE_DATA_DW2_WIDTH 32
+#define ESF_DZ_LUE_HW_RSP_GPRD_LUE_DATA_DW3_LBN 104
+#define ESF_DZ_LUE_HW_RSP_GPRD_LUE_DATA_DW3_WIDTH 32
+#define ESF_DZ_LUE_HW_RSP_GPRD_LUE_DATA_DW4_LBN 136
+#define ESF_DZ_LUE_HW_RSP_GPRD_LUE_DATA_DW4_WIDTH 16
+#define ESF_DZ_LUE_HW_RSP_GPRD_LUE_DATA_LBN 8
+#define ESF_DZ_LUE_HW_RSP_GPRD_LUE_DATA_WIDTH 144
+#define ESF_DZ_LUE_HW_RSP_GPRD_THREAD_ID_LBN 5
+#define ESF_DZ_LUE_HW_RSP_GPRD_THREAD_ID_WIDTH 3
+#define ESF_DZ_LUE_HW_RSP_GPRD_CLIENT_ID_LBN 2
+#define ESF_DZ_LUE_HW_RSP_GPRD_CLIENT_ID_WIDTH 3
+#define ESE_DZ_LUE_MC_ID 7
+#define ESE_DZ_LUE_MATCH_REQ_FIFO_ID 3
+#define ESE_DZ_LUE_TX_DICPU_ID 1
+#define ESE_DZ_LUE_RX_DICPU_ID 0
+#define ESF_DZ_LUE_HW_RSP_GPRD_OP_LBN 0
+#define ESF_DZ_LUE_HW_RSP_GPRD_OP_WIDTH 2
+#define ESE_DZ_LUE_GP_WR 3
+#define ESE_DZ_LUE_GP_RD 2
+#define ESE_DZ_LUE_DIR_REQ 1
+#define ESE_DZ_LUE_MATCH_REQ 0
+
+
+/* ES_LUE_MSG_GP_WR_REQ */
+#define ESF_DZ_LUE_HW_REQ_GPWR_LUE_DATA_DW0_LBN 22
+#define ESF_DZ_LUE_HW_REQ_GPWR_LUE_DATA_DW0_WIDTH 32
+#define ESF_DZ_LUE_HW_REQ_GPWR_LUE_DATA_DW1_LBN 54
+#define ESF_DZ_LUE_HW_REQ_GPWR_LUE_DATA_DW1_WIDTH 32
+#define ESF_DZ_LUE_HW_REQ_GPWR_LUE_DATA_DW2_LBN 86
+#define ESF_DZ_LUE_HW_REQ_GPWR_LUE_DATA_DW2_WIDTH 32
+#define ESF_DZ_LUE_HW_REQ_GPWR_LUE_DATA_DW3_LBN 118
+#define ESF_DZ_LUE_HW_REQ_GPWR_LUE_DATA_DW3_WIDTH 32
+#define ESF_DZ_LUE_HW_REQ_GPWR_LUE_DATA_DW4_LBN 150
+#define ESF_DZ_LUE_HW_REQ_GPWR_LUE_DATA_DW4_WIDTH 16
+#define ESF_DZ_LUE_HW_REQ_GPWR_LUE_DATA_LBN 22
+#define ESF_DZ_LUE_HW_REQ_GPWR_LUE_DATA_WIDTH 144
+#define ESF_DZ_LUE_HW_REQ_GPWR_ADDR_LBN 8
+#define ESF_DZ_LUE_HW_REQ_GPWR_ADDR_WIDTH 14
+#define ESF_DZ_LUE_HW_REQ_GPWR_THREAD_ID_LBN 5
+#define ESF_DZ_LUE_HW_REQ_GPWR_THREAD_ID_WIDTH 3
+#define ESF_DZ_LUE_HW_REQ_GPWR_CLIENT_ID_LBN 2
+#define ESF_DZ_LUE_HW_REQ_GPWR_CLIENT_ID_WIDTH 3
+#define ESE_DZ_LUE_MC_ID 7
+#define ESE_DZ_LUE_MATCH_REQ_FIFO_ID 3
+#define ESE_DZ_LUE_TX_DICPU_ID 1
+#define ESE_DZ_LUE_RX_DICPU_ID 0
+#define ESF_DZ_LUE_HW_REQ_GPWR_OP_LBN 0
+#define ESF_DZ_LUE_HW_REQ_GPWR_OP_WIDTH 2
+#define ESE_DZ_LUE_GP_WR 3
+#define ESE_DZ_LUE_GP_RD 2
+#define ESE_DZ_LUE_DIR_REQ 1
+#define ESE_DZ_LUE_MATCH_REQ 0
+
+
+/* ES_LUE_MSG_MATCH_REQ */
+#define ESF_DZ_LUE_HW_REQ_MATCH_MATCH_REQ_COUNT_LBN 135
+#define ESF_DZ_LUE_HW_REQ_MATCH_MATCH_REQ_COUNT_WIDTH 6
+#define ESF_DZ_LUE_HW_REQ_MATCH_HASH2_LBN 122
+#define ESF_DZ_LUE_HW_REQ_MATCH_HASH2_WIDTH 13
+#define ESF_DZ_LUE_HW_REQ_MATCH_HASH1_LBN 108
+#define ESF_DZ_LUE_HW_REQ_MATCH_HASH1_WIDTH 14
+#define ESF_DZ_LUE_HW_REQ_MATCH_MATCH_BITS_DW0_LBN 12
+#define ESF_DZ_LUE_HW_REQ_MATCH_MATCH_BITS_DW0_WIDTH 32
+#define ESF_DZ_LUE_HW_REQ_MATCH_MATCH_BITS_DW1_LBN 44
+#define ESF_DZ_LUE_HW_REQ_MATCH_MATCH_BITS_DW1_WIDTH 32
+#define ESF_DZ_LUE_HW_REQ_MATCH_MATCH_BITS_DW2_LBN 76
+#define ESF_DZ_LUE_HW_REQ_MATCH_MATCH_BITS_DW2_WIDTH 32
+#define ESF_DZ_LUE_HW_REQ_MATCH_MATCH_BITS_LBN 12
+#define ESF_DZ_LUE_HW_REQ_MATCH_MATCH_BITS_WIDTH 96
+#define ESF_DZ_LUE_HW_REQ_MATCH_DSCRMNTR_LBN 8
+#define ESF_DZ_LUE_HW_REQ_MATCH_DSCRMNTR_WIDTH 4
+#define ESF_DZ_LUE_HW_REQ_MATCH_THREAD_ID_LBN 5
+#define ESF_DZ_LUE_HW_REQ_MATCH_THREAD_ID_WIDTH 3
+#define ESF_DZ_LUE_HW_REQ_MATCH_CLIENT_ID_LBN 2
+#define ESF_DZ_LUE_HW_REQ_MATCH_CLIENT_ID_WIDTH 3
+#define ESE_DZ_LUE_MC_ID 7
+#define ESE_DZ_LUE_MATCH_REQ_FIFO_ID 3
+#define ESE_DZ_LUE_TX_DICPU_ID 1
+#define ESE_DZ_LUE_RX_DICPU_ID 0
+#define ESF_DZ_LUE_HW_REQ_MATCH_OP_LBN 0
+#define ESF_DZ_LUE_HW_REQ_MATCH_OP_WIDTH 2
+#define ESE_DZ_LUE_GP_WR 3
+#define ESE_DZ_LUE_GP_RD 2
+#define ESE_DZ_LUE_DIR_REQ 1
+#define ESE_DZ_LUE_MATCH_REQ 0
+
+
+/* ES_LUE_MSG_MATCH_RESP */
+#define ESF_DZ_LUE_HW_RSP_MATCH_LUE_RCPNT_LBN 29
+#define ESF_DZ_LUE_HW_RSP_MATCH_LUE_RCPNT_WIDTH 24
+#define ESF_DZ_LUE_HW_RSP_MATCH_RX_MCAST_LBN 28
+#define ESF_DZ_LUE_HW_RSP_MATCH_RX_MCAST_WIDTH 1
+#define ESF_DZ_LUE_HW_RSP_MATCH_TX_DOMAIN_LBN 20
+#define ESF_DZ_LUE_HW_RSP_MATCH_TX_DOMAIN_WIDTH 8
+#define ESF_DZ_LUE_HW_RSP_MATCH_TX_MCAST_LBN 18
+#define ESF_DZ_LUE_HW_RSP_MATCH_TX_MCAST_WIDTH 2
+#define ESF_DZ_LUE_HW_RSP_MATCH_SOFT_LBN 9
+#define ESF_DZ_LUE_HW_RSP_MATCH_SOFT_WIDTH 9
+#define ESF_DZ_LUE_HW_RSP_MATCH_MATCH_LBN 8
+#define ESF_DZ_LUE_HW_RSP_MATCH_MATCH_WIDTH 1
+#define ESF_DZ_LUE_HW_RSP_MATCH_THREAD_ID_LBN 5
+#define ESF_DZ_LUE_HW_RSP_MATCH_THREAD_ID_WIDTH 3
+#define ESF_DZ_LUE_HW_RSP_MATCH_CLIENT_ID_LBN 2
+#define ESF_DZ_LUE_HW_RSP_MATCH_CLIENT_ID_WIDTH 3
+#define ESE_DZ_LUE_MC_ID 7
+#define ESE_DZ_LUE_MATCH_REQ_FIFO_ID 3
+#define ESE_DZ_LUE_TX_DICPU_ID 1
+#define ESE_DZ_LUE_RX_DICPU_ID 0
+#define ESF_DZ_LUE_HW_RSP_MATCH_OP_LBN 0
+#define ESF_DZ_LUE_HW_RSP_MATCH_OP_WIDTH 2
+#define ESE_DZ_LUE_GP_WR 3
+#define ESE_DZ_LUE_GP_RD 2
+#define ESE_DZ_LUE_DIR_REQ 1
+#define ESE_DZ_LUE_MATCH_REQ 0
+
+
+/* ES_LUE_RCPNTR_TYPE */
+#define ESF_DZ_LUE_RXQ_LBN 14
+#define ESF_DZ_LUE_RXQ_WIDTH 10
+#define ESF_DZ_LUE_RSS_INFO_LBN 8
+#define ESF_DZ_LUE_RSS_INFO_WIDTH 6
+#define ESF_DZ_LUE_DEST_LBN 5
+#define ESF_DZ_LUE_DEST_WIDTH 3
+#define ESF_DZ_LUE_SOFT_LBN 0
+#define ESF_DZ_LUE_SOFT_WIDTH 5
+
+
+/* ES_LUE_UMSG_LU2DI_HASH_RESP */
+#define ESF_DZ_L2DHR_LASTREC_ENTRY_STATUS_LBN 50
+#define ESF_DZ_L2DHR_LASTREC_ENTRY_STATUS_WIDTH 1
+#define ESF_DZ_L2DHR_MULTITYPE_STATUS_LBN 50
+#define ESF_DZ_L2DHR_MULTITYPE_STATUS_WIDTH 1
+#define ESF_DZ_L2DHR_LASTREC_STATUS_LBN 49
+#define ESF_DZ_L2DHR_LASTREC_STATUS_WIDTH 1
+#define ESF_DZ_L2DHR_MATCH_STATUS_LBN 48
+#define ESF_DZ_L2DHR_MATCH_STATUS_WIDTH 1
+#define ESF_DZ_L2DHR_HASH_LBN 0
+#define ESF_DZ_L2DHR_HASH_WIDTH 32
+
+
+/* ES_LUE_UMSG_LU2DI_RXLU_MULTI_MATCH_RESP */
+#define ESF_DZ_L2DRMMR_SOFT_LBN 112
+#define ESF_DZ_L2DRMMR_SOFT_WIDTH 9
+#define ESF_DZ_L2DRMMR_RCPNTR_PTR_LBN 96
+#define ESF_DZ_L2DRMMR_RCPNTR_PTR_WIDTH 14
+#define ESF_DZ_L2DRMMR_TX_MCAST_LBN 80
+#define ESF_DZ_L2DRMMR_TX_MCAST_WIDTH 2
+#define ESF_DZ_L2DRMMR_MULTITYPE_STATUS_LBN 67
+#define ESF_DZ_L2DRMMR_MULTITYPE_STATUS_WIDTH 1
+#define ESF_DZ_L2DRMMR_LASTREC_ENTRY_STATUS_LBN 66
+#define ESF_DZ_L2DRMMR_LASTREC_ENTRY_STATUS_WIDTH 1
+#define ESF_DZ_L2DRMMR_LASTREC_STATUS_LBN 65
+#define ESF_DZ_L2DRMMR_LASTREC_STATUS_WIDTH 1
+#define ESF_DZ_L2DRMMR_MATCH_STATUS_LBN 64
+#define ESF_DZ_L2DRMMR_MATCH_STATUS_WIDTH 1
+
+
+/* ES_LUE_UMSG_LU2DI_RXLU_MULTI_RECORD_RESP */
+#define ESF_DZ_L2DRMRR_SOFT_LBN 112
+#define ESF_DZ_L2DRMRR_SOFT_WIDTH 9
+#define ESF_DZ_L2DRMRR_RCPNTR_PTR_LBN 96
+#define ESF_DZ_L2DRMRR_RCPNTR_PTR_WIDTH 14
+#define ESF_DZ_L2DRMRR_RCPNTR_NUM_LBN 80
+#define ESF_DZ_L2DRMRR_RCPNTR_NUM_WIDTH 3
+#define ESF_DZ_L2DRMRR_MULTITYPE_STATUS_LBN 67
+#define ESF_DZ_L2DRMRR_MULTITYPE_STATUS_WIDTH 1
+#define ESF_DZ_L2DRMRR_LASTREC_ENTRY_STATUS_LBN 66
+#define ESF_DZ_L2DRMRR_LASTREC_ENTRY_STATUS_WIDTH 1
+#define ESF_DZ_L2DRMRR_LASTREC_STATUS_LBN 65
+#define ESF_DZ_L2DRMRR_LASTREC_STATUS_WIDTH 1
+#define ESF_DZ_L2DRMRR_MATCH_STATUS_LBN 64
+#define ESF_DZ_L2DRMRR_MATCH_STATUS_WIDTH 1
+#define ESF_DZ_L2DRMRR_RCPNTR_SOFT_LBN 48
+#define ESF_DZ_L2DRMRR_RCPNTR_SOFT_WIDTH 6
+#define ESF_DZ_L2DRMRR_RCPNTR_RSS_INFO_LBN 32
+#define ESF_DZ_L2DRMRR_RCPNTR_RSS_INFO_WIDTH 5
+#define ESF_DZ_L2DRMRR_RCPNTR_RXQ_LBN 16
+#define ESF_DZ_L2DRMRR_RCPNTR_RXQ_WIDTH 10
+#define ESF_DZ_L2DRMRR_HOST_LBN 7
+#define ESF_DZ_L2DRMRR_HOST_WIDTH 1
+#define ESF_DZ_L2DRMRR_MC_LBN 6
+#define ESF_DZ_L2DRMRR_MC_WIDTH 1
+#define ESF_DZ_L2DRMRR_PORT0_MAC_LBN 5
+#define ESF_DZ_L2DRMRR_PORT0_MAC_WIDTH 1
+#define ESF_DZ_L2DRMRR_PORT1_MAC_LBN 4
+#define ESF_DZ_L2DRMRR_PORT1_MAC_WIDTH 1
+
+
+/* ES_LUE_UMSG_LU2DI_RXLU_SINGLE_MATCH_RESP */
+#define ESF_DZ_L2DRSMR_MULTITYPE_STATUS_LBN 67
+#define ESF_DZ_L2DRSMR_MULTITYPE_STATUS_WIDTH 1
+#define ESF_DZ_L2DRSMR_LASTREC_ENTRY_STATUS_LBN 66
+#define ESF_DZ_L2DRSMR_LASTREC_ENTRY_STATUS_WIDTH 1
+#define ESF_DZ_L2DRSMR_LASTREC_STATUS_LBN 65
+#define ESF_DZ_L2DRSMR_LASTREC_STATUS_WIDTH 1
+#define ESF_DZ_L2DRSMR_MATCH_STATUS_LBN 64
+#define ESF_DZ_L2DRSMR_MATCH_STATUS_WIDTH 1
+#define ESF_DZ_L2DRSMR_RCPNTR_SOFT_LBN 48
+#define ESF_DZ_L2DRSMR_RCPNTR_SOFT_WIDTH 6
+#define ESF_DZ_L2DRSMR_RCPNTR_RSS_INFO_LBN 32
+#define ESF_DZ_L2DRSMR_RCPNTR_RSS_INFO_WIDTH 5
+#define ESF_DZ_L2DRSMR_RCPNTR_RXQ_LBN 16
+#define ESF_DZ_L2DRSMR_RCPNTR_RXQ_WIDTH 10
+#define ESF_DZ_L2DRSMR_HOST_LBN 7
+#define ESF_DZ_L2DRSMR_HOST_WIDTH 1
+#define ESF_DZ_L2DRSMR_MC_LBN 6
+#define ESF_DZ_L2DRSMR_MC_WIDTH 1
+#define ESF_DZ_L2DRSMR_PORT0_MAC_LBN 5
+#define ESF_DZ_L2DRSMR_PORT0_MAC_WIDTH 1
+#define ESF_DZ_L2DRSMR_PORT1_MAC_LBN 4
+#define ESF_DZ_L2DRSMR_PORT1_MAC_WIDTH 1
+
+
+/* ES_LUE_UMSG_LU2DI_TXLU_MATCH_RESP */
+#define ESF_DZ_L2DTMR_RCPNTR_SOFT_LBN 112
+#define ESF_DZ_L2DTMR_RCPNTR_SOFT_WIDTH 6
+#define ESF_DZ_L2DTMR_RCPNTR_RSS_INFO_LBN 96
+#define ESF_DZ_L2DTMR_RCPNTR_RSS_INFO_WIDTH 5
+#define ESF_DZ_L2DTMR_RCPNTR__RXQ_LBN 80
+#define ESF_DZ_L2DTMR_RCPNTR__RXQ_WIDTH 10
+#define ESF_DZ_L2DTMR_MULTITYPE_STATUS_LBN 67
+#define ESF_DZ_L2DTMR_MULTITYPE_STATUS_WIDTH 1
+#define ESF_DZ_L2DTMR_LASTREC_ENTRY_STATUS_LBN 66
+#define ESF_DZ_L2DTMR_LASTREC_ENTRY_STATUS_WIDTH 1
+#define ESF_DZ_L2DTMR_LASTREC_STATUS_LBN 65
+#define ESF_DZ_L2DTMR_LASTREC_STATUS_WIDTH 1
+#define ESF_DZ_L2DTMR_MATCH_STATUS_LBN 64
+#define ESF_DZ_L2DTMR_MATCH_STATUS_WIDTH 1
+#define ESF_DZ_L2DTMR_ME_SOFT_LBN 48
+#define ESF_DZ_L2DTMR_ME_SOFT_WIDTH 9
+#define ESF_DZ_L2DTMR_TX_MCAST_LBN 32
+#define ESF_DZ_L2DTMR_TX_MCAST_WIDTH 2
+#define ESF_DZ_L2DTMR_TX_DOMAIN_LBN 16
+#define ESF_DZ_L2DTMR_TX_DOMAIN_WIDTH 8
+#define ESF_DZ_L2DTMR_PORT1_MAC_LBN 6
+#define ESF_DZ_L2DTMR_PORT1_MAC_WIDTH 1
+#define ESF_DZ_L2DTMR_PMEM_LBN 6
+#define ESF_DZ_L2DTMR_PMEM_WIDTH 1
+#define ESF_DZ_L2DTMR_PORT0_MAC_LBN 5
+#define ESF_DZ_L2DTMR_PORT0_MAC_WIDTH 1
+
+
+/* ES_MC_EVENT */
+#define ESF_DZ_MC_CODE_LBN 60
+#define ESF_DZ_MC_CODE_WIDTH 4
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_MC_DROP_EVENT_LBN 58
+#define ESF_DZ_MC_DROP_EVENT_WIDTH 1
+#define ESF_DZ_MC_SOFT_DW0_LBN 0
+#define ESF_DZ_MC_SOFT_DW0_WIDTH 32
+#define ESF_DZ_MC_SOFT_DW1_LBN 32
+#define ESF_DZ_MC_SOFT_DW1_WIDTH 26
+#define ESF_DZ_MC_SOFT_LBN 0
+#define ESF_DZ_MC_SOFT_WIDTH 58
+
+
+/* ES_MC_XGMAC_FLTR_RULE_DEF */
+#define ESF_DZ_MC_XFRC_MODE_LBN 416
+#define ESF_DZ_MC_XFRC_MODE_WIDTH 1
+#define ESE_DZ_MC_XFRC_MODE_LAYERED 1
+#define ESE_DZ_MC_XFRC_MODE_SIMPLE 0
+#define ESF_DZ_MC_XFRC_HASH_LBN 384
+#define ESF_DZ_MC_XFRC_HASH_WIDTH 32
+#define ESF_DZ_MC_XFRC_LAYER4_BYTE_MASK_DW0_LBN 256
+#define ESF_DZ_MC_XFRC_LAYER4_BYTE_MASK_DW0_WIDTH 32
+#define ESF_DZ_MC_XFRC_LAYER4_BYTE_MASK_DW1_LBN 288
+#define ESF_DZ_MC_XFRC_LAYER4_BYTE_MASK_DW1_WIDTH 32
+#define ESF_DZ_MC_XFRC_LAYER4_BYTE_MASK_DW2_LBN 320
+#define ESF_DZ_MC_XFRC_LAYER4_BYTE_MASK_DW2_WIDTH 32
+#define ESF_DZ_MC_XFRC_LAYER4_BYTE_MASK_DW3_LBN 352
+#define ESF_DZ_MC_XFRC_LAYER4_BYTE_MASK_DW3_WIDTH 32
+#define ESF_DZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256
+#define ESF_DZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128
+#define ESF_DZ_MC_XFRC_LAYER3_BYTE_MASK_DW0_LBN 128
+#define ESF_DZ_MC_XFRC_LAYER3_BYTE_MASK_DW0_WIDTH 32
+#define ESF_DZ_MC_XFRC_LAYER3_BYTE_MASK_DW1_LBN 160
+#define ESF_DZ_MC_XFRC_LAYER3_BYTE_MASK_DW1_WIDTH 32
+#define ESF_DZ_MC_XFRC_LAYER3_BYTE_MASK_DW2_LBN 192
+#define ESF_DZ_MC_XFRC_LAYER3_BYTE_MASK_DW2_WIDTH 32
+#define ESF_DZ_MC_XFRC_LAYER3_BYTE_MASK_DW3_LBN 224
+#define ESF_DZ_MC_XFRC_LAYER3_BYTE_MASK_DW3_WIDTH 32
+#define ESF_DZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128
+#define ESF_DZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128
+#define ESF_DZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_DW0_LBN 0
+#define ESF_DZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_DW0_WIDTH 32
+#define ESF_DZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_DW1_LBN 32
+#define ESF_DZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_DW1_WIDTH 32
+#define ESF_DZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_DW2_LBN 64
+#define ESF_DZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_DW2_WIDTH 32
+#define ESF_DZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_DW3_LBN 96
+#define ESF_DZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_DW3_WIDTH 32
+#define ESF_DZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0
+#define ESF_DZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128
+
+
+/* ES_RX_EVENT */
+#define ESF_DZ_RX_CODE_LBN 60
+#define ESF_DZ_RX_CODE_WIDTH 4
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_RX_DROP_EVENT_LBN 58
+#define ESF_DZ_RX_DROP_EVENT_WIDTH 1
+#define ESF_DZ_RX_EV_RSVD2_LBN 55
+#define ESF_DZ_RX_EV_RSVD2_WIDTH 3
+#define ESF_DZ_RX_EV_SOFT2_LBN 52
+#define ESF_DZ_RX_EV_SOFT2_WIDTH 3
+#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
+#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
+#define ESF_DZ_RX_L4_CLASS_LBN 45
+#define ESF_DZ_RX_L4_CLASS_WIDTH 3
+#define ESE_DZ_L4_CLASS_RSVD7 7
+#define ESE_DZ_L4_CLASS_RSVD6 6
+#define ESE_DZ_L4_CLASS_RSVD5 5
+#define ESE_DZ_L4_CLASS_RSVD4 4
+#define ESE_DZ_L4_CLASS_RSVD3 3
+#define ESE_DZ_L4_CLASS_UDP 2
+#define ESE_DZ_L4_CLASS_TCP 1
+#define ESE_DZ_L4_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_L3_CLASS_LBN 42
+#define ESF_DZ_RX_L3_CLASS_WIDTH 3
+#define ESE_DZ_L3_CLASS_RSVD7 7
+#define ESE_DZ_L3_CLASS_IP6_FRAG 6
+#define ESE_DZ_L3_CLASS_ARP 5
+#define ESE_DZ_L3_CLASS_IP4_FRAG 4
+#define ESE_DZ_L3_CLASS_FCOE 3
+#define ESE_DZ_L3_CLASS_IP6 2
+#define ESE_DZ_L3_CLASS_IP4 1
+#define ESE_DZ_L3_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_ETH_TAG_CLASS_LBN 39
+#define ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3
+#define ESE_DZ_ETH_TAG_CLASS_RSVD7 7
+#define ESE_DZ_ETH_TAG_CLASS_RSVD6 6
+#define ESE_DZ_ETH_TAG_CLASS_RSVD5 5
+#define ESE_DZ_ETH_TAG_CLASS_RSVD4 4
+#define ESE_DZ_ETH_TAG_CLASS_RSVD3 3
+#define ESE_DZ_ETH_TAG_CLASS_VLAN2 2
+#define ESE_DZ_ETH_TAG_CLASS_VLAN1 1
+#define ESE_DZ_ETH_TAG_CLASS_NONE 0
+#define ESF_DZ_RX_ETH_BASE_CLASS_LBN 36
+#define ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3
+#define ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2
+#define ESE_DZ_ETH_BASE_CLASS_LLC 1
+#define ESE_DZ_ETH_BASE_CLASS_ETH2 0
+#define ESF_DZ_RX_MAC_CLASS_LBN 35
+#define ESF_DZ_RX_MAC_CLASS_WIDTH 1
+#define ESE_DZ_MAC_CLASS_MCAST 1
+#define ESE_DZ_MAC_CLASS_UCAST 0
+#define ESF_DZ_RX_EV_SOFT1_LBN 32
+#define ESF_DZ_RX_EV_SOFT1_WIDTH 3
+#define ESF_DZ_RX_EV_RSVD1_LBN 30
+#define ESF_DZ_RX_EV_RSVD1_WIDTH 2
+#define ESF_DZ_RX_ECC_ERR_LBN 29
+#define ESF_DZ_RX_ECC_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC1_ERR_LBN 28
+#define ESF_DZ_RX_CRC1_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC0_ERR_LBN 27
+#define ESF_DZ_RX_CRC0_ERR_WIDTH 1
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_IPCKSUM_ERR_LBN 25
+#define ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_ECRC_ERR_LBN 24
+#define ESF_DZ_RX_ECRC_ERR_WIDTH 1
+#define ESF_DZ_RX_QLABEL_LBN 16
+#define ESF_DZ_RX_QLABEL_WIDTH 8
+#define ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15
+#define ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1
+#define ESF_DZ_RX_CONT_LBN 14
+#define ESF_DZ_RX_CONT_WIDTH 1
+#define ESF_DZ_RX_BYTES_LBN 0
+#define ESF_DZ_RX_BYTES_WIDTH 14
+
+
+/* ES_RX_KER_DESC */
+#define ESF_DZ_RX_KER_RESERVED_LBN 62
+#define ESF_DZ_RX_KER_RESERVED_WIDTH 2
+#define ESF_DZ_RX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_RX_KER_BUF_ADDR_DW0_LBN 0
+#define ESF_DZ_RX_KER_BUF_ADDR_DW0_WIDTH 32
+#define ESF_DZ_RX_KER_BUF_ADDR_DW1_LBN 32
+#define ESF_DZ_RX_KER_BUF_ADDR_DW1_WIDTH 16
+#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
+
+
+/* ES_RX_USER_DESC */
+#define ESF_DZ_RX_USR_RESERVED_LBN 62
+#define ESF_DZ_RX_USR_RESERVED_WIDTH 2
+#define ESF_DZ_RX_USR_BYTE_CNT_LBN 48
+#define ESF_DZ_RX_USR_BYTE_CNT_WIDTH 14
+#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_LBN 44
+#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_WIDTH 4
+#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
+#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
+#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
+#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
+#define ESF_DZ_RX_USR_BUF_ID_OFFSET_DW0_LBN 0
+#define ESF_DZ_RX_USR_BUF_ID_OFFSET_DW0_WIDTH 32
+#define ESF_DZ_RX_USR_BUF_ID_OFFSET_DW1_LBN 32
+#define ESF_DZ_RX_USR_BUF_ID_OFFSET_DW1_WIDTH 12
+#define ESF_DZ_RX_USR_BUF_ID_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_BUF_ID_OFFSET_WIDTH 44
+#define ESF_DZ_RX_USR_4KBPS_BUF_ID_LBN 12
+#define ESF_DZ_RX_USR_4KBPS_BUF_ID_WIDTH 32
+#define ESF_DZ_RX_USR_64KBPS_BUF_ID_LBN 16
+#define ESF_DZ_RX_USR_64KBPS_BUF_ID_WIDTH 28
+#define ESF_DZ_RX_USR_1MBPS_BUF_ID_LBN 20
+#define ESF_DZ_RX_USR_1MBPS_BUF_ID_WIDTH 24
+#define ESF_DZ_RX_USR_4MBPS_BUF_ID_LBN 22
+#define ESF_DZ_RX_USR_4MBPS_BUF_ID_WIDTH 22
+#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
+#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
+#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
+#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
+
+
+/* ES_RX_U_QSTATE_TBL0_ENTRY */
+#define ESF_DZ_RX_U_DC_FILL_LBN 112
+#define ESF_DZ_RX_U_DC_FILL_WIDTH 7
+#define ESF_DZ_RX_U_SOFT7_B1R1_0_LBN 112
+#define ESF_DZ_RX_U_SOFT7_B1R1_0_WIDTH 7
+#define ESF_DZ_RX_U_DSCR_HW_RPTR_LBN 96
+#define ESF_DZ_RX_U_DSCR_HW_RPTR_WIDTH 12
+#define ESF_DZ_RX_U_SOFT12_B1R2_0_LBN 96
+#define ESF_DZ_RX_U_SOFT12_B1R2_0_WIDTH 12
+#define ESF_DZ_RX_U_DC_RPTR_LBN 80
+#define ESF_DZ_RX_U_DC_RPTR_WIDTH 6
+#define ESF_DZ_RX_U_SOFT6_B1R1_0_LBN 80
+#define ESF_DZ_RX_U_SOFT6_B1R1_0_WIDTH 6
+#define ESF_DZ_RX_U_NOTIFY_PENDING_LBN 70
+#define ESF_DZ_RX_U_NOTIFY_PENDING_WIDTH 1
+#define ESF_DZ_RX_U_SOFT1_B1R0_6_LBN 70
+#define ESF_DZ_RX_U_SOFT1_B1R0_6_WIDTH 1
+#define ESF_DZ_RX_U_DATA_ACTIVE_LBN 69
+#define ESF_DZ_RX_U_DATA_ACTIVE_WIDTH 1
+#define ESF_DZ_RX_U_SOFT1_B1R0_5_LBN 69
+#define ESF_DZ_RX_U_SOFT1_B1R0_5_WIDTH 1
+#define ESF_DZ_RX_U_FAST_PATH_LBN 68
+#define ESF_DZ_RX_U_FAST_PATH_WIDTH 1
+#define ESF_DZ_RX_U_SOFT1_B1R0_4_LBN 68
+#define ESF_DZ_RX_U_SOFT1_B1R0_4_WIDTH 1
+#define ESF_DZ_RX_U_NO_FLUSH_LBN 67
+#define ESF_DZ_RX_U_NO_FLUSH_WIDTH 1
+#define ESF_DZ_RX_U_SOFT1_B1R0_3_LBN 67
+#define ESF_DZ_RX_U_SOFT1_B1R0_3_WIDTH 1
+#define ESF_DZ_RX_U_DESC_ACTIVE_LBN 66
+#define ESF_DZ_RX_U_DESC_ACTIVE_WIDTH 1
+#define ESF_DZ_RX_U_SOFT1_B1R0_2_LBN 66
+#define ESF_DZ_RX_U_SOFT1_B1R0_2_WIDTH 1
+#define ESF_DZ_RX_U_HDR_SPLIT_LBN 65
+#define ESF_DZ_RX_U_HDR_SPLIT_WIDTH 1
+#define ESF_DZ_RX_U_SOFT1_B1R0_1_LBN 65
+#define ESF_DZ_RX_U_SOFT1_B1R0_1_WIDTH 1
+#define ESF_DZ_RX_U_Q_ENABLE_LBN 64
+#define ESF_DZ_RX_U_Q_ENABLE_WIDTH 1
+#define ESF_DZ_RX_U_SOFT1_B1R0_0_LBN 64
+#define ESF_DZ_RX_U_SOFT1_B1R0_0_WIDTH 1
+#define ESF_DZ_RX_U_UPD_CRC_MODE_LBN 29
+#define ESF_DZ_RX_U_UPD_CRC_MODE_WIDTH 3
+#define ESE_DZ_C2RIP_FCOIP_MPA 5
+#define ESE_DZ_C2RIP_FCOIP_FCOE 4
+#define ESE_DZ_C2RIP_ISCSI_HDR_AND_PYLD 3
+#define ESE_DZ_C2RIP_ISCSI_HDR 2
+#define ESE_DZ_C2RIP_FCOE 1
+#define ESE_DZ_C2RIP_OFF 0
+#define ESF_DZ_RX_U_SOFT16_B0R1_LBN 16
+#define ESF_DZ_RX_U_SOFT16_B0R1_WIDTH 16
+#define ESF_DZ_RX_U_BIU_ARGS_LBN 16
+#define ESF_DZ_RX_U_BIU_ARGS_WIDTH 13
+#define ESF_DZ_RX_U_EV_QID_LBN 5
+#define ESF_DZ_RX_U_EV_QID_WIDTH 11
+#define ESF_DZ_RX_U_SOFT16_B0R0_LBN 0
+#define ESF_DZ_RX_U_SOFT16_B0R0_WIDTH 16
+#define ESF_DZ_RX_U_EV_QLABEL_LBN 0
+#define ESF_DZ_RX_U_EV_QLABEL_WIDTH 5
+
+
+/* ES_RX_U_QSTATE_TBL1_ENTRY */
+#define ESF_DZ_RX_U_DSCR_BASE_PAGE_ID_LBN 64
+#define ESF_DZ_RX_U_DSCR_BASE_PAGE_ID_WIDTH 18
+#define ESF_DZ_RX_U_SOFT18_B1R0_0_LBN 64
+#define ESF_DZ_RX_U_SOFT18_B1R0_0_WIDTH 18
+#define ESF_DZ_RX_U_QST1_SPARE_LBN 52
+#define ESF_DZ_RX_U_QST1_SPARE_WIDTH 12
+#define ESF_DZ_RX_U_SOFT16_B0R3_0_LBN 48
+#define ESF_DZ_RX_U_SOFT16_B0R3_0_WIDTH 16
+#define ESF_DZ_RX_U_PKT_EDIT_LBN 51
+#define ESF_DZ_RX_U_PKT_EDIT_WIDTH 1
+#define ESF_DZ_RX_U_DOORBELL_ENABLED_LBN 50
+#define ESF_DZ_RX_U_DOORBELL_ENABLED_WIDTH 1
+#define ESF_DZ_RX_U_WORK_PENDING_LBN 49
+#define ESF_DZ_RX_U_WORK_PENDING_WIDTH 1
+#define ESF_DZ_RX_U_ERROR_LBN 48
+#define ESF_DZ_RX_U_ERROR_WIDTH 1
+#define ESF_DZ_RX_U_DSCR_SW_WPTR_LBN 32
+#define ESF_DZ_RX_U_DSCR_SW_WPTR_WIDTH 12
+#define ESF_DZ_RX_U_SOFT12_B0R2_0_LBN 32
+#define ESF_DZ_RX_U_SOFT12_B0R2_0_WIDTH 12
+#define ESF_DZ_RX_U_OWNER_ID_LBN 16
+#define ESF_DZ_RX_U_OWNER_ID_WIDTH 12
+#define ESF_DZ_RX_U_SOFT12_B0R1_0_LBN 16
+#define ESF_DZ_RX_U_SOFT12_B0R1_0_WIDTH 12
+#define ESF_DZ_RX_U_DSCR_SIZE_LBN 0
+#define ESF_DZ_RX_U_DSCR_SIZE_WIDTH 3
+#define ESE_DZ_RX_DSCR_SIZE_512 7
+#define ESE_DZ_RX_DSCR_SIZE_1K 6
+#define ESE_DZ_RX_DSCR_SIZE_2K 5
+#define ESE_DZ_RX_DSCR_SIZE_4K 4
+#define ESF_DZ_RX_U_SOFT3_B0R0_0_LBN 0
+#define ESF_DZ_RX_U_SOFT3_B0R0_0_WIDTH 3
+
+
+/* ES_SMC_BUFTBL_CNTRL_ENTRY */
+#define ESF_DZ_SMC_SW_CNTXT_DW0_LBN 16
+#define ESF_DZ_SMC_SW_CNTXT_DW0_WIDTH 32
+#define ESF_DZ_SMC_SW_CNTXT_DW1_LBN 48
+#define ESF_DZ_SMC_SW_CNTXT_DW1_WIDTH 24
+#define ESF_DZ_SMC_SW_CNTXT_LBN 16
+#define ESF_DZ_SMC_SW_CNTXT_WIDTH 56
+#define ESF_DZ_SMC_PAGE_SIZE_LBN 12
+#define ESF_DZ_SMC_PAGE_SIZE_WIDTH 4
+#define ESF_DZ_SMC_OWNER_ID_LBN 0
+#define ESF_DZ_SMC_OWNER_ID_WIDTH 12
+
+
+/* ES_SMC_BUFTBL_TRANSL_ENTRY */
+#define ESF_DZ_SMC_PAGE_INDEX0_DW0_LBN 36
+#define ESF_DZ_SMC_PAGE_INDEX0_DW0_WIDTH 32
+#define ESF_DZ_SMC_PAGE_INDEX0_DW1_LBN 68
+#define ESF_DZ_SMC_PAGE_INDEX0_DW1_WIDTH 4
+#define ESF_DZ_SMC_PAGE_INDEX0_LBN 36
+#define ESF_DZ_SMC_PAGE_INDEX0_WIDTH 36
+#define ESF_DZ_SMC_PAGE_INDEX1_DW0_LBN 0
+#define ESF_DZ_SMC_PAGE_INDEX1_DW0_WIDTH 32
+#define ESF_DZ_SMC_PAGE_INDEX1_DW1_LBN 32
+#define ESF_DZ_SMC_PAGE_INDEX1_DW1_WIDTH 4
+#define ESF_DZ_SMC_PAGE_INDEX1_LBN 0
+#define ESF_DZ_SMC_PAGE_INDEX1_WIDTH 36
+
+
+/* ES_SMC_DSCR_CACHE_ENTRY */
+#define ESF_DZ_SMC_BTE_PAD_LBN 64
+#define ESF_DZ_SMC_BTE_PAD_WIDTH 8
+#define ESF_DZ_SMC_DSCR_DW0_LBN 0
+#define ESF_DZ_SMC_DSCR_DW0_WIDTH 32
+#define ESF_DZ_SMC_DSCR_DW1_LBN 32
+#define ESF_DZ_SMC_DSCR_DW1_WIDTH 32
+#define ESF_DZ_SMC_DSCR_LBN 0
+#define ESF_DZ_SMC_DSCR_WIDTH 64
+
+
+/* ES_SMC_GEN_STORAGE_ENTRY */
+#define ESF_DZ_SMC_DATA_DW0_LBN 0
+#define ESF_DZ_SMC_DATA_DW0_WIDTH 32
+#define ESF_DZ_SMC_DATA_DW1_LBN 32
+#define ESF_DZ_SMC_DATA_DW1_WIDTH 32
+#define ESF_DZ_SMC_DATA_DW2_LBN 64
+#define ESF_DZ_SMC_DATA_DW2_WIDTH 8
+#define ESF_DZ_SMC_DATA_LBN 0
+#define ESF_DZ_SMC_DATA_WIDTH 72
+
+
+/* ES_SMC_MSG_BASE_REQ */
+#define ESF_DZ_MC2S_BASE_REQ_MSG_DATA_DW0_LBN 11
+#define ESF_DZ_MC2S_BASE_REQ_MSG_DATA_DW0_WIDTH 32
+#define ESF_DZ_MC2S_BASE_REQ_MSG_DATA_DW1_LBN 43
+#define ESF_DZ_MC2S_BASE_REQ_MSG_DATA_DW1_WIDTH 32
+#define ESF_DZ_MC2S_BASE_REQ_MSG_DATA_DW2_LBN 75
+#define ESF_DZ_MC2S_BASE_REQ_MSG_DATA_DW2_WIDTH 26
+#define ESF_DZ_MC2S_BASE_REQ_MSG_DATA_LBN 11
+#define ESF_DZ_MC2S_BASE_REQ_MSG_DATA_WIDTH 90
+#define ESF_DZ_MC2S_BASE_SOFT_LBN 7
+#define ESF_DZ_MC2S_BASE_SOFT_WIDTH 4
+#define ESF_DZ_MC2S_BASE_CLIENT_ID_LBN 3
+#define ESF_DZ_MC2S_BASE_CLIENT_ID_WIDTH 4
+#define ESE_DZ_SMC_MACRO_ENGINE_ID 15
+#define ESE_DZ_SMC_TX_DICPU_ID 14
+#define ESE_DZ_SMC_RX_DICPU_ID 13
+#define ESE_DZ_SMC_MC_ID 12
+#define ESE_DZ_SMC_DL_ID 10
+#define ESE_DZ_SMC_EV_ID 8
+#define ESE_DZ_SMC_TX_DPCPU1_ID 5
+#define ESE_DZ_SMC_TX_DPCPU0_ID 4
+#define ESE_DZ_SMC_RX_DPCPU_ID 0
+#define ESF_DZ_MC2S_BASE_OP_LBN 0
+#define ESF_DZ_MC2S_BASE_OP_WIDTH 3
+#define ESE_DZ_SMC_REQ_WR 4
+#define ESE_DZ_SMC_RESP_WR 4
+#define ESE_DZ_SMC_REQ_RD 3
+#define ESE_DZ_SMC_RESP_RD 3
+#define ESE_DZ_SMC_REQ_DSCR_WRITE 2
+#define ESE_DZ_SMC_RESP_DSCR_WRITE 2
+#define ESE_DZ_SMC_REQ_DSCR_READ 1
+#define ESE_DZ_SMC_RESP_DSCR_READ 1
+#define ESE_DZ_SMC_REQ_BUFTBL_LOOKUP 0
+#define ESE_DZ_SMC_RESP_BUFTBL_LOOKUP 0
+
+
+/* ES_SMC_MSG_BUFTBL_LOOKUP_REQ */
+#define ESF_DZ_MC2S_BL_BUF_ID_LBN 28
+#define ESF_DZ_MC2S_BL_BUF_ID_WIDTH 18
+#define ESF_DZ_MC2S_BL_EXP_PAGE_SIZE_LBN 24
+#define ESF_DZ_MC2S_BL_EXP_PAGE_SIZE_WIDTH 4
+#define ESE_DZ_SMC_PAGE_SIZE_4M 10
+#define ESE_DZ_SMC_PAGE_SIZE_1M 8
+#define ESE_DZ_SMC_PAGE_SIZE_64K 4
+#define ESE_DZ_SMC_PAGE_SIZE_4K 0
+#define ESF_DZ_MC2S_BL_EXP_OWNER_ID_LBN 12
+#define ESF_DZ_MC2S_BL_EXP_OWNER_ID_WIDTH 12
+#define ESF_DZ_MC2S_BL_REFLECT_LBN 11
+#define ESF_DZ_MC2S_BL_REFLECT_WIDTH 1
+#define ESF_DZ_MC2S_BL_SOFT_LBN 7
+#define ESF_DZ_MC2S_BL_SOFT_WIDTH 4
+#define ESF_DZ_MC2S_BL_CLIENT_ID_LBN 3
+#define ESF_DZ_MC2S_BL_CLIENT_ID_WIDTH 4
+#define ESE_DZ_SMC_MACRO_ENGINE_ID 15
+#define ESE_DZ_SMC_TX_DICPU_ID 14
+#define ESE_DZ_SMC_RX_DICPU_ID 13
+#define ESE_DZ_SMC_MC_ID 12
+#define ESE_DZ_SMC_DL_ID 10
+#define ESE_DZ_SMC_EV_ID 8
+#define ESE_DZ_SMC_TX_DPCPU1_ID 5
+#define ESE_DZ_SMC_TX_DPCPU0_ID 4
+#define ESE_DZ_SMC_RX_DPCPU_ID 0
+#define ESF_DZ_MC2S_BL_OP_LBN 0
+#define ESF_DZ_MC2S_BL_OP_WIDTH 3
+#define ESE_DZ_SMC_REQ_WR 4
+#define ESE_DZ_SMC_REQ_RD 3
+#define ESE_DZ_SMC_REQ_DSCR_WRITE 2
+#define ESE_DZ_SMC_REQ_DSCR_READ 1
+#define ESE_DZ_SMC_REQ_BUFTBL_LOOKUP 0
+
+
+/* ES_SMC_MSG_BUFTBL_LOOKUP_RESP */
+#define ESF_DZ_S2MC_BL_BUFTBL_ENTRY_DW0_LBN 12
+#define ESF_DZ_S2MC_BL_BUFTBL_ENTRY_DW0_WIDTH 32
+#define ESF_DZ_S2MC_BL_BUFTBL_ENTRY_DW1_LBN 44
+#define ESF_DZ_S2MC_BL_BUFTBL_ENTRY_DW1_WIDTH 4
+#define ESF_DZ_S2MC_BL_BUFTBL_ENTRY_LBN 12
+#define ESF_DZ_S2MC_BL_BUFTBL_ENTRY_WIDTH 36
+#define ESF_DZ_S2MC_BL_FAIL_LBN 11
+#define ESF_DZ_S2MC_BL_FAIL_WIDTH 1
+#define ESF_DZ_S2MC_BL_SOFT_LBN 7
+#define ESF_DZ_S2MC_BL_SOFT_WIDTH 4
+#define ESF_DZ_S2MC_BL_CLIENT_ID_LBN 3
+#define ESF_DZ_S2MC_BL_CLIENT_ID_WIDTH 4
+#define ESE_DZ_SMC_MACRO_ENGINE_ID 15
+#define ESE_DZ_SMC_TX_DICPU_ID 14
+#define ESE_DZ_SMC_RX_DICPU_ID 13
+#define ESE_DZ_SMC_MC_ID 12
+#define ESE_DZ_SMC_DL_ID 10
+#define ESE_DZ_SMC_EV_ID 8
+#define ESE_DZ_SMC_TX_DPCPU1_ID 5
+#define ESE_DZ_SMC_TX_DPCPU0_ID 4
+#define ESE_DZ_SMC_RX_DPCPU_ID 0
+#define ESF_DZ_S2MC_BL_OP_LBN 0
+#define ESF_DZ_S2MC_BL_OP_WIDTH 3
+#define ESE_DZ_SMC_REQ_WR 4
+#define ESE_DZ_SMC_REQ_RD 3
+#define ESE_DZ_SMC_REQ_DSCR_WRITE 2
+#define ESE_DZ_SMC_REQ_DSCR_READ 1
+#define ESE_DZ_SMC_REQ_BUFTBL_LOOKUP 0
+
+
+/* ES_SMC_MSG_DSCR_RD_REQ */
+#define ESF_DZ_MC2S_DR_DSCR_OFST_LBN 24
+#define ESF_DZ_MC2S_DR_DSCR_OFST_WIDTH 6
+#define ESF_DZ_MC2S_DR_QID_LBN 13
+#define ESF_DZ_MC2S_DR_QID_WIDTH 11
+#define ESF_DZ_MC2S_DR_IS_TX_LBN 12
+#define ESF_DZ_MC2S_DR_IS_TX_WIDTH 1
+#define ESF_DZ_MC2S_DR_REFLECT_LBN 11
+#define ESF_DZ_MC2S_DR_REFLECT_WIDTH 1
+#define ESF_DZ_MC2S_DR_SOFT_LBN 7
+#define ESF_DZ_MC2S_DR_SOFT_WIDTH 4
+#define ESF_DZ_MC2S_DR_CLIENT_ID_LBN 3
+#define ESF_DZ_MC2S_DR_CLIENT_ID_WIDTH 4
+#define ESE_DZ_SMC_MACRO_ENGINE_ID 15
+#define ESE_DZ_SMC_TX_DICPU_ID 14
+#define ESE_DZ_SMC_RX_DICPU_ID 13
+#define ESE_DZ_SMC_MC_ID 12
+#define ESE_DZ_SMC_DL_ID 10
+#define ESE_DZ_SMC_EV_ID 8
+#define ESE_DZ_SMC_TX_DPCPU1_ID 5
+#define ESE_DZ_SMC_TX_DPCPU0_ID 4
+#define ESE_DZ_SMC_RX_DPCPU_ID 0
+#define ESF_DZ_MC2S_DR_OP_LBN 0
+#define ESF_DZ_MC2S_DR_OP_WIDTH 3
+#define ESE_DZ_SMC_REQ_WR 4
+#define ESE_DZ_SMC_REQ_RD 3
+#define ESE_DZ_SMC_REQ_DSCR_WRITE 2
+#define ESE_DZ_SMC_REQ_DSCR_READ 1
+#define ESE_DZ_SMC_REQ_BUFTBL_LOOKUP 0
+
+
+/* ES_SMC_MSG_DSCR_RD_RESP */
+#define ESF_DZ_S2MC_DR_DSCR_DW0_LBN 12
+#define ESF_DZ_S2MC_DR_DSCR_DW0_WIDTH 32
+#define ESF_DZ_S2MC_DR_DSCR_DW1_LBN 44
+#define ESF_DZ_S2MC_DR_DSCR_DW1_WIDTH 32
+#define ESF_DZ_S2MC_DR_DSCR_LBN 12
+#define ESF_DZ_S2MC_DR_DSCR_WIDTH 64
+#define ESF_DZ_S2MC_DR_FAIL_LBN 11
+#define ESF_DZ_S2MC_DR_FAIL_WIDTH 1
+#define ESF_DZ_S2MC_DR_SOFT_LBN 7
+#define ESF_DZ_S2MC_DR_SOFT_WIDTH 4
+#define ESF_DZ_S2MC_DR_CLIENT_ID_LBN 3
+#define ESF_DZ_S2MC_DR_CLIENT_ID_WIDTH 4
+#define ESE_DZ_SMC_MACRO_ENGINE_ID 15
+#define ESE_DZ_SMC_TX_DICPU_ID 14
+#define ESE_DZ_SMC_RX_DICPU_ID 13
+#define ESE_DZ_SMC_MC_ID 12
+#define ESE_DZ_SMC_DL_ID 10
+#define ESE_DZ_SMC_EV_ID 8
+#define ESE_DZ_SMC_TX_DPCPU1_ID 5
+#define ESE_DZ_SMC_TX_DPCPU0_ID 4
+#define ESE_DZ_SMC_RX_DPCPU_ID 0
+#define ESF_DZ_S2MC_DR_OP_LBN 0
+#define ESF_DZ_S2MC_DR_OP_WIDTH 3
+#define ESE_DZ_SMC_REQ_WR 4
+#define ESE_DZ_SMC_REQ_RD 3
+#define ESE_DZ_SMC_REQ_DSCR_WRITE 2
+#define ESE_DZ_SMC_REQ_DSCR_READ 1
+#define ESE_DZ_SMC_REQ_BUFTBL_LOOKUP 0
+
+
+/* ES_SMC_MSG_DSCR_WR_REQ */
+#define ESF_DZ_MC2S_DW_DSCR_DW0_LBN 30
+#define ESF_DZ_MC2S_DW_DSCR_DW0_WIDTH 32
+#define ESF_DZ_MC2S_DW_DSCR_DW1_LBN 62
+#define ESF_DZ_MC2S_DW_DSCR_DW1_WIDTH 32
+#define ESF_DZ_MC2S_DW_DSCR_LBN 30
+#define ESF_DZ_MC2S_DW_DSCR_WIDTH 64
+#define ESF_DZ_MC2S_DW_DSCR_OFST_LBN 24
+#define ESF_DZ_MC2S_DW_DSCR_OFST_WIDTH 6
+#define ESF_DZ_MC2S_DW_QID_LBN 13
+#define ESF_DZ_MC2S_DW_QID_WIDTH 11
+#define ESF_DZ_MC2S_DW_IS_TX_LBN 12
+#define ESF_DZ_MC2S_DW_IS_TX_WIDTH 1
+#define ESF_DZ_MC2S_DW_REFLECT_LBN 11
+#define ESF_DZ_MC2S_DW_REFLECT_WIDTH 1
+#define ESF_DZ_MC2S_DW_SOFT_LBN 7
+#define ESF_DZ_MC2S_DW_SOFT_WIDTH 4
+#define ESF_DZ_MC2S_DW_CLIENT_ID_LBN 3
+#define ESF_DZ_MC2S_DW_CLIENT_ID_WIDTH 4
+#define ESE_DZ_SMC_MACRO_ENGINE_ID 15
+#define ESE_DZ_SMC_TX_DICPU_ID 14
+#define ESE_DZ_SMC_RX_DICPU_ID 13
+#define ESE_DZ_SMC_MC_ID 12
+#define ESE_DZ_SMC_DL_ID 10
+#define ESE_DZ_SMC_EV_ID 8
+#define ESE_DZ_SMC_TX_DPCPU1_ID 5
+#define ESE_DZ_SMC_TX_DPCPU0_ID 4
+#define ESE_DZ_SMC_RX_DPCPU_ID 0
+#define ESF_DZ_MC2S_DW_OP_LBN 0
+#define ESF_DZ_MC2S_DW_OP_WIDTH 3
+#define ESE_DZ_SMC_REQ_WR 4
+#define ESE_DZ_SMC_REQ_RD 3
+#define ESE_DZ_SMC_REQ_DSCR_WRITE 2
+#define ESE_DZ_SMC_REQ_DSCR_READ 1
+#define ESE_DZ_SMC_REQ_BUFTBL_LOOKUP 0
+
+
+/* ES_SMC_MSG_DSCR_WR_RESP */
+#define ESF_DZ_S2MC_DW_FAIL_LBN 11
+#define ESF_DZ_S2MC_DW_FAIL_WIDTH 1
+#define ESF_DZ_S2MC_DW_SOFT_LBN 7
+#define ESF_DZ_S2MC_DW_SOFT_WIDTH 4
+#define ESF_DZ_S2MC_DW_CLIENT_ID_LBN 3
+#define ESF_DZ_S2MC_DW_CLIENT_ID_WIDTH 4
+#define ESE_DZ_SMC_MACRO_ENGINE_ID 15
+#define ESE_DZ_SMC_TX_DICPU_ID 14
+#define ESE_DZ_SMC_RX_DICPU_ID 13
+#define ESE_DZ_SMC_MC_ID 12
+#define ESE_DZ_SMC_DL_ID 10
+#define ESE_DZ_SMC_EV_ID 8
+#define ESE_DZ_SMC_TX_DPCPU1_ID 5
+#define ESE_DZ_SMC_TX_DPCPU0_ID 4
+#define ESE_DZ_SMC_RX_DPCPU_ID 0
+#define ESF_DZ_S2MC_DW_OP_LBN 0
+#define ESF_DZ_S2MC_DW_OP_WIDTH 3
+#define ESE_DZ_SMC_REQ_WR 4
+#define ESE_DZ_SMC_REQ_RD 3
+#define ESE_DZ_SMC_REQ_DSCR_WRITE 2
+#define ESE_DZ_SMC_REQ_DSCR_READ 1
+#define ESE_DZ_SMC_REQ_BUFTBL_LOOKUP 0
+
+
+/* ES_SMC_MSG_RD_REQ */
+#define ESF_DZ_MC2S_RD_ADDR_LBN 12
+#define ESF_DZ_MC2S_RD_ADDR_WIDTH 17
+#define ESF_DZ_MC2S_RD_REFLECT_LBN 11
+#define ESF_DZ_MC2S_RD_REFLECT_WIDTH 1
+#define ESF_DZ_MC2S_RD_SOFT_LBN 7
+#define ESF_DZ_MC2S_RD_SOFT_WIDTH 4
+#define ESF_DZ_MC2S_RD_CLIENT_ID_LBN 3
+#define ESF_DZ_MC2S_RD_CLIENT_ID_WIDTH 4
+#define ESE_DZ_SMC_MACRO_ENGINE_ID 15
+#define ESE_DZ_SMC_TX_DICPU_ID 14
+#define ESE_DZ_SMC_RX_DICPU_ID 13
+#define ESE_DZ_SMC_MC_ID 12
+#define ESE_DZ_SMC_DL_ID 10
+#define ESE_DZ_SMC_EV_ID 8
+#define ESE_DZ_SMC_TX_DPCPU1_ID 5
+#define ESE_DZ_SMC_TX_DPCPU0_ID 4
+#define ESE_DZ_SMC_RX_DPCPU_ID 0
+#define ESF_DZ_MC2S_RD_OP_LBN 0
+#define ESF_DZ_MC2S_RD_OP_WIDTH 3
+#define ESE_DZ_SMC_REQ_WR 4
+#define ESE_DZ_SMC_REQ_RD 3
+#define ESE_DZ_SMC_REQ_DSCR_WRITE 2
+#define ESE_DZ_SMC_REQ_DSCR_READ 1
+#define ESE_DZ_SMC_REQ_BUFTBL_LOOKUP 0
+
+
+/* ES_SMC_MSG_RD_RESP */
+#define ESF_DZ_S2MC_RD_DATA_DW0_LBN 12
+#define ESF_DZ_S2MC_RD_DATA_DW0_WIDTH 32
+#define ESF_DZ_S2MC_RD_DATA_DW1_LBN 44
+#define ESF_DZ_S2MC_RD_DATA_DW1_WIDTH 32
+#define ESF_DZ_S2MC_RD_DATA_DW2_LBN 76
+#define ESF_DZ_S2MC_RD_DATA_DW2_WIDTH 8
+#define ESF_DZ_S2MC_RD_DATA_LBN 12
+#define ESF_DZ_S2MC_RD_DATA_WIDTH 72
+#define ESF_DZ_S2MC_RD_FAIL_LBN 11
+#define ESF_DZ_S2MC_RD_FAIL_WIDTH 1
+#define ESF_DZ_S2MC_RD_SOFT_LBN 7
+#define ESF_DZ_S2MC_RD_SOFT_WIDTH 4
+#define ESF_DZ_S2MC_RD_CLIENT_ID_LBN 3
+#define ESF_DZ_S2MC_RD_CLIENT_ID_WIDTH 4
+#define ESE_DZ_SMC_MACRO_ENGINE_ID 15
+#define ESE_DZ_SMC_TX_DICPU_ID 14
+#define ESE_DZ_SMC_RX_DICPU_ID 13
+#define ESE_DZ_SMC_MC_ID 12
+#define ESE_DZ_SMC_DL_ID 10
+#define ESE_DZ_SMC_EV_ID 8
+#define ESE_DZ_SMC_TX_DPCPU1_ID 5
+#define ESE_DZ_SMC_TX_DPCPU0_ID 4
+#define ESE_DZ_SMC_RX_DPCPU_ID 0
+#define ESF_DZ_S2MC_RD_OP_LBN 0
+#define ESF_DZ_S2MC_RD_OP_WIDTH 3
+#define ESE_DZ_SMC_REQ_WR 4
+#define ESE_DZ_SMC_REQ_RD 3
+#define ESE_DZ_SMC_REQ_DSCR_WRITE 2
+#define ESE_DZ_SMC_REQ_DSCR_READ 1
+#define ESE_DZ_SMC_REQ_BUFTBL_LOOKUP 0
+
+
+/* ES_SMC_MSG_RESP */
+#define ESF_DZ_S2MC_BASE_RSP_DATA_DW0_LBN 12
+#define ESF_DZ_S2MC_BASE_RSP_DATA_DW0_WIDTH 32
+#define ESF_DZ_S2MC_BASE_RSP_DATA_DW1_LBN 44
+#define ESF_DZ_S2MC_BASE_RSP_DATA_DW1_WIDTH 32
+#define ESF_DZ_S2MC_BASE_RSP_DATA_DW2_LBN 76
+#define ESF_DZ_S2MC_BASE_RSP_DATA_DW2_WIDTH 8
+#define ESF_DZ_S2MC_BASE_RSP_DATA_LBN 12
+#define ESF_DZ_S2MC_BASE_RSP_DATA_WIDTH 72
+#define ESF_DZ_S2MC_BASE_FAIL_LBN 11
+#define ESF_DZ_S2MC_BASE_FAIL_WIDTH 1
+#define ESF_DZ_S2MC_BASE_SOFT_LBN 7
+#define ESF_DZ_S2MC_BASE_SOFT_WIDTH 4
+#define ESF_DZ_S2MC_BASE_CLIENT_ID_LBN 3
+#define ESF_DZ_S2MC_BASE_CLIENT_ID_WIDTH 4
+#define ESE_DZ_SMC_MACRO_ENGINE_ID 15
+#define ESE_DZ_SMC_TX_DICPU_ID 14
+#define ESE_DZ_SMC_RX_DICPU_ID 13
+#define ESE_DZ_SMC_MC_ID 12
+#define ESE_DZ_SMC_DL_ID 10
+#define ESE_DZ_SMC_EV_ID 8
+#define ESE_DZ_SMC_TX_DPCPU1_ID 5
+#define ESE_DZ_SMC_TX_DPCPU0_ID 4
+#define ESE_DZ_SMC_RX_DPCPU_ID 0
+#define ESF_DZ_S2MC_BASE_OP_LBN 0
+#define ESF_DZ_S2MC_BASE_OP_WIDTH 3
+#define ESE_DZ_SMC_REQ_WR 4
+#define ESE_DZ_SMC_REQ_RD 3
+#define ESE_DZ_SMC_REQ_DSCR_WRITE 2
+#define ESE_DZ_SMC_REQ_DSCR_READ 1
+#define ESE_DZ_SMC_REQ_BUFTBL_LOOKUP 0
+
+
+/* ES_SMC_MSG_WR_REQ */
+#define ESF_DZ_MC2S_WR_DATA_DW0_LBN 29
+#define ESF_DZ_MC2S_WR_DATA_DW0_WIDTH 32
+#define ESF_DZ_MC2S_WR_DATA_DW1_LBN 61
+#define ESF_DZ_MC2S_WR_DATA_DW1_WIDTH 32
+#define ESF_DZ_MC2S_WR_DATA_DW2_LBN 93
+#define ESF_DZ_MC2S_WR_DATA_DW2_WIDTH 8
+#define ESF_DZ_MC2S_WR_DATA_LBN 29
+#define ESF_DZ_MC2S_WR_DATA_WIDTH 72
+#define ESF_DZ_MC2S_WR_ADDR_LBN 12
+#define ESF_DZ_MC2S_WR_ADDR_WIDTH 17
+#define ESF_DZ_MC2S_WR_REFLECT_LBN 11
+#define ESF_DZ_MC2S_WR_REFLECT_WIDTH 1
+#define ESF_DZ_MC2S_WR_SOFT_LBN 7
+#define ESF_DZ_MC2S_WR_SOFT_WIDTH 4
+#define ESF_DZ_MC2S_WR_CLIENT_ID_LBN 3
+#define ESF_DZ_MC2S_WR_CLIENT_ID_WIDTH 4
+#define ESE_DZ_SMC_MACRO_ENGINE_ID 15
+#define ESE_DZ_SMC_TX_DICPU_ID 14
+#define ESE_DZ_SMC_RX_DICPU_ID 13
+#define ESE_DZ_SMC_MC_ID 12
+#define ESE_DZ_SMC_DL_ID 10
+#define ESE_DZ_SMC_EV_ID 8
+#define ESE_DZ_SMC_TX_DPCPU1_ID 5
+#define ESE_DZ_SMC_TX_DPCPU0_ID 4
+#define ESE_DZ_SMC_RX_DPCPU_ID 0
+#define ESF_DZ_MC2S_WR_OP_LBN 0
+#define ESF_DZ_MC2S_WR_OP_WIDTH 3
+#define ESE_DZ_SMC_REQ_WR 4
+#define ESE_DZ_SMC_REQ_RD 3
+#define ESE_DZ_SMC_REQ_DSCR_WRITE 2
+#define ESE_DZ_SMC_REQ_DSCR_READ 1
+#define ESE_DZ_SMC_REQ_BUFTBL_LOOKUP 0
+
+
+/* ES_SMC_MSG_WR_RESP */
+#define ESF_DZ_S2MC_WR_FAIL_LBN 11
+#define ESF_DZ_S2MC_WR_FAIL_WIDTH 1
+#define ESF_DZ_S2MC_WR_SOFT_LBN 7
+#define ESF_DZ_S2MC_WR_SOFT_WIDTH 4
+#define ESF_DZ_S2MC_WR_CLIENT_ID_LBN 3
+#define ESF_DZ_S2MC_WR_CLIENT_ID_WIDTH 4
+#define ESE_DZ_SMC_MACRO_ENGINE_ID 15
+#define ESE_DZ_SMC_TX_DICPU_ID 14
+#define ESE_DZ_SMC_RX_DICPU_ID 13
+#define ESE_DZ_SMC_MC_ID 12
+#define ESE_DZ_SMC_DL_ID 10
+#define ESE_DZ_SMC_EV_ID 8
+#define ESE_DZ_SMC_TX_DPCPU1_ID 5
+#define ESE_DZ_SMC_TX_DPCPU0_ID 4
+#define ESE_DZ_SMC_RX_DPCPU_ID 0
+#define ESF_DZ_S2MC_WR_OP_LBN 0
+#define ESF_DZ_S2MC_WR_OP_WIDTH 3
+#define ESE_DZ_SMC_REQ_WR 4
+#define ESE_DZ_SMC_REQ_RD 3
+#define ESE_DZ_SMC_REQ_DSCR_WRITE 2
+#define ESE_DZ_SMC_REQ_DSCR_READ 1
+#define ESE_DZ_SMC_REQ_BUFTBL_LOOKUP 0
+
+
+/* ES_TX_EVENT */
+#define ESF_DZ_TX_CODE_LBN 60
+#define ESF_DZ_TX_CODE_WIDTH 4
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_TX_DROP_EVENT_LBN 58
+#define ESF_DZ_TX_DROP_EVENT_WIDTH 1
+#define ESF_DZ_TX_EV_RSVD_LBN 48
+#define ESF_DZ_TX_EV_RSVD_WIDTH 10
+#define ESF_DZ_TX_SOFT2_LBN 32
+#define ESF_DZ_TX_SOFT2_WIDTH 16
+#define ESF_DZ_TX_SOFT1_LBN 24
+#define ESF_DZ_TX_SOFT1_WIDTH 8
+#define ESF_DZ_TX_QLABEL_LBN 16
+#define ESF_DZ_TX_QLABEL_WIDTH 8
+#define ESF_DZ_TX_DESCR_INDX_LBN 0
+#define ESF_DZ_TX_DESCR_INDX_WIDTH 16
+
+
+/* ES_TX_KER_DESC */
+#define ESF_DZ_TX_KER_TYPE_LBN 63
+#define ESF_DZ_TX_KER_TYPE_WIDTH 1
+#define ESF_DZ_TX_KER_CONT_LBN 62
+#define ESF_DZ_TX_KER_CONT_WIDTH 1
+#define ESF_DZ_TX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_TX_KER_BUF_ADDR_DW0_LBN 0
+#define ESF_DZ_TX_KER_BUF_ADDR_DW0_WIDTH 32
+#define ESF_DZ_TX_KER_BUF_ADDR_DW1_LBN 32
+#define ESF_DZ_TX_KER_BUF_ADDR_DW1_WIDTH 16
+#define ESF_DZ_TX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48
+
+
+/* ES_TX_OPTION_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 4
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
+#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8
+#define ESF_DZ_TX_TSO_TCP_MSS_LBN 32
+#define ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
+#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
+#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2
+#define ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2
+#define ESE_DZ_TX_OPTION_CRC_FCOE 1
+#define ESE_DZ_TX_OPTION_CRC_OFF 0
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1
+#define ESF_DZ_TX_OPTION_IP_CSUM_LBN 0
+#define ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1
+
+
+/* ES_TX_PACER_BASE_MSG */
+#define ESF_DZ_TXP_BASE_REQ_MSG_DATA_DW0_LBN 11
+#define ESF_DZ_TXP_BASE_REQ_MSG_DATA_DW0_WIDTH 32
+#define ESF_DZ_TXP_BASE_REQ_MSG_DATA_DW1_LBN 43
+#define ESF_DZ_TXP_BASE_REQ_MSG_DATA_DW1_WIDTH 32
+#define ESF_DZ_TXP_BASE_REQ_MSG_DATA_DW2_LBN 75
+#define ESF_DZ_TXP_BASE_REQ_MSG_DATA_DW2_WIDTH 23
+#define ESF_DZ_TXP_BASE_REQ_MSG_DATA_LBN 11
+#define ESF_DZ_TXP_BASE_REQ_MSG_DATA_WIDTH 87
+#define ESF_DZ_TXP_BASE_OP_LBN 2
+#define ESF_DZ_TXP_BASE_OP_WIDTH 3
+#define ESE_DZ_DPCPU_PACER_BKT_TBL_RD 7
+#define ESE_DZ_DPCPU_PACER_BKT_TBL_WR 6
+#define ESE_DZ_DPCPU_PACER_TXQ_TBL_RD 5
+#define ESE_DZ_DPCPU_PACER_TXQ_TBL_WR 4
+#define ESE_DZ_DPCPU_PACER_TXQ_D_R_NI 3
+#define ESE_DZ_DPCPU_PACER_TXQ_D_R_I 2
+#define ESE_DZ_DPCPU_PACER_BKT_D_R_RD 1
+#define ESE_DZ_DPCPU_PACER_BKT_D_RD 0
+#define ESF_DZ_TXP_BASE_CLIENT_ID_LBN 0
+#define ESF_DZ_TXP_BASE_CLIENT_ID_WIDTH 2
+#define ESE_DZ_DPCPU_PACER_CPU_CLIENT 2
+#define ESE_DZ_DPCPU_PACER_CMD_CTL_CLIENT 1
+#define ESE_DZ_DPCPU_PACER_ALRT_CTL_CLIENT 0
+
+
+/* ES_TX_PACER_BKT_D_R_REQ */
+#define ESF_DZ_TXP_BKT_D_R_REQ_FRM_LEN_LBN 45
+#define ESF_DZ_TXP_BKT_D_R_REQ_FRM_LEN_WIDTH 14
+#define ESF_DZ_TXP_BKT_D_R_REQ_MAX_BKT2_LBN 35
+#define ESF_DZ_TXP_BKT_D_R_REQ_MAX_BKT2_WIDTH 10
+#define ESF_DZ_TXP_BKT_D_R_REQ_MAX_BKT1_LBN 25
+#define ESF_DZ_TXP_BKT_D_R_REQ_MAX_BKT1_WIDTH 10
+#define ESF_DZ_TXP_BKT_D_R_REQ_MAX_BKT0_LBN 15
+#define ESF_DZ_TXP_BKT_D_R_REQ_MAX_BKT0_WIDTH 10
+#define ESF_DZ_TXP_BKT_D_R_REQ_MIN_BKT_LBN 5
+#define ESF_DZ_TXP_BKT_D_R_REQ_MIN_BKT_WIDTH 10
+#define ESF_DZ_TXP_BKT_D_R_REQ_OP_LBN 2
+#define ESF_DZ_TXP_BKT_D_R_REQ_OP_WIDTH 3
+#define ESE_DZ_DPCPU_PACER_BKT_TBL_RD 7
+#define ESE_DZ_DPCPU_PACER_BKT_TBL_WR 6
+#define ESE_DZ_DPCPU_PACER_TXQ_TBL_RD 5
+#define ESE_DZ_DPCPU_PACER_TXQ_TBL_WR 4
+#define ESE_DZ_DPCPU_PACER_TXQ_D_R_NI 3
+#define ESE_DZ_DPCPU_PACER_TXQ_D_R_I 2
+#define ESE_DZ_DPCPU_PACER_BKT_D_R_RD 1
+#define ESE_DZ_DPCPU_PACER_BKT_D_RD 0
+#define ESF_DZ_TXP_BKT_D_R_REQ_CLIENT_ID_LBN 0
+#define ESF_DZ_TXP_BKT_D_R_REQ_CLIENT_ID_WIDTH 2
+#define ESE_DZ_DPCPU_PACER_CPU_CLIENT 2
+#define ESE_DZ_DPCPU_PACER_CMD_CTL_CLIENT 1
+#define ESE_DZ_DPCPU_PACER_ALRT_CTL_CLIENT 0
+
+
+/* ES_TX_PACER_BKT_TBL_D_R_RSP */
+#define ESF_DZ_TXP_BKT_TBL_D_R_RSP_DUE_TIME_WITH_MIN_BKT_LBN 21
+#define ESF_DZ_TXP_BKT_TBL_D_R_RSP_DUE_TIME_WITH_MIN_BKT_WIDTH 26
+#define ESF_DZ_TXP_BKT_TBL_D_R_RSP_DUE_TIME_LBN 5
+#define ESF_DZ_TXP_BKT_TBL_D_R_RSP_DUE_TIME_WIDTH 16
+#define ESF_DZ_TXP_BKT_TBL_D_R_RSP_OP_LBN 2
+#define ESF_DZ_TXP_BKT_TBL_D_R_RSP_OP_WIDTH 3
+#define ESE_DZ_DPCPU_PACER_BKT_TBL_RD 7
+#define ESE_DZ_DPCPU_PACER_BKT_TBL_WR 6
+#define ESE_DZ_DPCPU_PACER_TXQ_TBL_RD 5
+#define ESE_DZ_DPCPU_PACER_TXQ_TBL_WR 4
+#define ESE_DZ_DPCPU_PACER_TXQ_D_R_NI 3
+#define ESE_DZ_DPCPU_PACER_TXQ_D_R_I 2
+#define ESE_DZ_DPCPU_PACER_BKT_D_R_RD 1
+#define ESE_DZ_DPCPU_PACER_BKT_D_RD 0
+#define ESF_DZ_TXP_BKT_TBL_D_R_RSP_CLIENT_ID_LBN 0
+#define ESF_DZ_TXP_BKT_TBL_D_R_RSP_CLIENT_ID_WIDTH 2
+#define ESE_DZ_DPCPU_PACER_CPU_CLIENT 2
+#define ESE_DZ_DPCPU_PACER_CMD_CTL_CLIENT 1
+#define ESE_DZ_DPCPU_PACER_ALRT_CTL_CLIENT 0
+
+
+/* ES_TX_PACER_BKT_TBL_RD_REQ */
+#define ESF_DZ_TXP_BKT_TBL_RD_REQ_BKT_ID_LBN 5
+#define ESF_DZ_TXP_BKT_TBL_RD_REQ_BKT_ID_WIDTH 10
+#define ESF_DZ_TXP_BKT_TBL_RD_REQ_OP_LBN 2
+#define ESF_DZ_TXP_BKT_TBL_RD_REQ_OP_WIDTH 3
+#define ESE_DZ_DPCPU_PACER_BKT_TBL_RD 7
+#define ESE_DZ_DPCPU_PACER_BKT_TBL_WR 6
+#define ESE_DZ_DPCPU_PACER_TXQ_TBL_RD 5
+#define ESE_DZ_DPCPU_PACER_TXQ_TBL_WR 4
+#define ESE_DZ_DPCPU_PACER_TXQ_D_R_NI 3
+#define ESE_DZ_DPCPU_PACER_TXQ_D_R_I 2
+#define ESE_DZ_DPCPU_PACER_BKT_D_R_RD 1
+#define ESE_DZ_DPCPU_PACER_BKT_D_RD 0
+#define ESF_DZ_TXP_BKT_TBL_RD_REQ_CLIENT_ID_LBN 0
+#define ESF_DZ_TXP_BKT_TBL_RD_REQ_CLIENT_ID_WIDTH 2
+#define ESE_DZ_DPCPU_PACER_CPU_CLIENT 2
+#define ESE_DZ_DPCPU_PACER_CMD_CTL_CLIENT 1
+#define ESE_DZ_DPCPU_PACER_ALRT_CTL_CLIENT 0
+
+
+/* ES_TX_PACER_BKT_TBL_RD_RSP */
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_IDLE_LBN 97
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_IDLE_WIDTH 1
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_USED_LBN 96
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_USED_WIDTH 1
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_MAX_FILL_REG_LBN 94
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_MAX_FILL_REG_WIDTH 2
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_RATE_REC_LBN 78
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_RATE_REC_WIDTH 16
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_RATE_LBN 62
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_RATE_WIDTH 16
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_FILL_LEVEL_LBN 47
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_FILL_LEVEL_WIDTH 15
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_DUE_TIME_LBN 31
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_DUE_TIME_WIDTH 16
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_LAST_FILL_TIME_LBN 15
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_LAST_FILL_TIME_WIDTH 16
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_BKT_ID_LBN 5
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_BKT_ID_WIDTH 10
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_OP_LBN 2
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_OP_WIDTH 3
+#define ESE_DZ_DPCPU_PACER_BKT_TBL_RD 7
+#define ESE_DZ_DPCPU_PACER_BKT_TBL_WR 6
+#define ESE_DZ_DPCPU_PACER_TXQ_TBL_RD 5
+#define ESE_DZ_DPCPU_PACER_TXQ_TBL_WR 4
+#define ESE_DZ_DPCPU_PACER_TXQ_D_R_NI 3
+#define ESE_DZ_DPCPU_PACER_TXQ_D_R_I 2
+#define ESE_DZ_DPCPU_PACER_BKT_D_R_RD 1
+#define ESE_DZ_DPCPU_PACER_BKT_D_RD 0
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_CLIENT_ID_LBN 0
+#define ESF_DZ_TXP_BKT_TBL_RD_RSP_CLIENT_ID_WIDTH 2
+#define ESE_DZ_DPCPU_PACER_CPU_CLIENT 2
+#define ESE_DZ_DPCPU_PACER_CMD_CTL_CLIENT 1
+#define ESE_DZ_DPCPU_PACER_ALRT_CTL_CLIENT 0
+
+
+/* ES_TX_PACER_BKT_TBL_WR_REQ */
+#define ESF_DZ_TXP_BKT_TBL_WR_REQ_IDLE_LBN 65
+#define ESF_DZ_TXP_BKT_TBL_WR_REQ_IDLE_WIDTH 1
+#define ESF_DZ_TXP_BKT_TBL_WR_REQ_USED_LBN 64
+#define ESF_DZ_TXP_BKT_TBL_WR_REQ_USED_WIDTH 1
+#define ESF_DZ_TXP_BKT_TBL_WR_REQ_MAX_FILL_REG_LBN 62
+#define ESF_DZ_TXP_BKT_TBL_WR_REQ_MAX_FILL_REG_WIDTH 2
+#define ESF_DZ_TXP_BKT_TBL_WR_REQ_RATE_REC_LBN 46
+#define ESF_DZ_TXP_BKT_TBL_WR_REQ_RATE_REC_WIDTH 16
+#define ESF_DZ_TXP_BKT_TBL_WR_REQ_RATE_LBN 30
+#define ESF_DZ_TXP_BKT_TBL_WR_REQ_RATE_WIDTH 16
+#define ESF_DZ_TXP_BKT_TBL_WR_REQ_FILL_LEVEL_LBN 15
+#define ESF_DZ_TXP_BKT_TBL_WR_REQ_FILL_LEVEL_WIDTH 15
+#define ESF_DZ_TXP_BKT_TBL_WR_REQ_BKT_ID_LBN 5
+#define ESF_DZ_TXP_BKT_TBL_WR_REQ_BKT_ID_WIDTH 10
+#define ESF_DZ_TXP_BKT_TBL_WR_REQ_OP_LBN 2
+#define ESF_DZ_TXP_BKT_TBL_WR_REQ_OP_WIDTH 3
+#define ESE_DZ_DPCPU_PACER_BKT_TBL_RD 7
+#define ESE_DZ_DPCPU_PACER_BKT_TBL_WR 6
+#define ESE_DZ_DPCPU_PACER_TXQ_TBL_RD 5
+#define ESE_DZ_DPCPU_PACER_TXQ_TBL_WR 4
+#define ESE_DZ_DPCPU_PACER_TXQ_D_R_NI 3
+#define ESE_DZ_DPCPU_PACER_TXQ_D_R_I 2
+#define ESE_DZ_DPCPU_PACER_BKT_D_R_RD 1
+#define ESE_DZ_DPCPU_PACER_BKT_D_RD 0
+#define ESF_DZ_TXP_BKT_TBL_WR_REQ_CLIENT_ID_LBN 0
+#define ESF_DZ_TXP_BKT_TBL_WR_REQ_CLIENT_ID_WIDTH 2
+#define ESE_DZ_DPCPU_PACER_CPU_CLIENT 2
+#define ESE_DZ_DPCPU_PACER_CMD_CTL_CLIENT 1
+#define ESE_DZ_DPCPU_PACER_ALRT_CTL_CLIENT 0
+
+
+/* ES_TX_PACER_TXQ_D_R_I_REQ */
+#define ESF_DZ_TXP_TXQ_D_R_I_REQ_FRM_LEN_LBN 15
+#define ESF_DZ_TXP_TXQ_D_R_I_REQ_FRM_LEN_WIDTH 14
+#define ESF_DZ_TXP_TXQ_D_R_I_REQ_TXQ_ID_LBN 5
+#define ESF_DZ_TXP_TXQ_D_R_I_REQ_TXQ_ID_WIDTH 10
+#define ESF_DZ_TXP_TXQ_D_R_I_REQ_OP_LBN 2
+#define ESF_DZ_TXP_TXQ_D_R_I_REQ_OP_WIDTH 3
+#define ESE_DZ_DPCPU_PACER_BKT_TBL_RD 7
+#define ESE_DZ_DPCPU_PACER_BKT_TBL_WR 6
+#define ESE_DZ_DPCPU_PACER_TXQ_TBL_RD 5
+#define ESE_DZ_DPCPU_PACER_TXQ_TBL_WR 4
+#define ESE_DZ_DPCPU_PACER_TXQ_D_R_NI 3
+#define ESE_DZ_DPCPU_PACER_TXQ_D_R_I 2
+#define ESE_DZ_DPCPU_PACER_BKT_D_R_RD 1
+#define ESE_DZ_DPCPU_PACER_BKT_D_RD 0
+#define ESF_DZ_TXP_TXQ_D_R_I_REQ_CLIENT_ID_LBN 0
+#define ESF_DZ_TXP_TXQ_D_R_I_REQ_CLIENT_ID_WIDTH 2
+#define ESE_DZ_DPCPU_PACER_CPU_CLIENT 2
+#define ESE_DZ_DPCPU_PACER_CMD_CTL_CLIENT 1
+#define ESE_DZ_DPCPU_PACER_ALRT_CTL_CLIENT 0
+
+
+/* ES_TX_PACER_TXQ_TBL_RD_REQ */
+#define ESF_DZ_TXP_TXQ_TBL_RD_REQ_TXQ_ID_LBN 5
+#define ESF_DZ_TXP_TXQ_TBL_RD_REQ_TXQ_ID_WIDTH 10
+#define ESF_DZ_TXP_TXQ_TBL_RD_REQ_OP_LBN 2
+#define ESF_DZ_TXP_TXQ_TBL_RD_REQ_OP_WIDTH 3
+#define ESE_DZ_DPCPU_PACER_BKT_TBL_RD 7
+#define ESE_DZ_DPCPU_PACER_BKT_TBL_WR 6
+#define ESE_DZ_DPCPU_PACER_TXQ_TBL_RD 5
+#define ESE_DZ_DPCPU_PACER_TXQ_TBL_WR 4
+#define ESE_DZ_DPCPU_PACER_TXQ_D_R_NI 3
+#define ESE_DZ_DPCPU_PACER_TXQ_D_R_I 2
+#define ESE_DZ_DPCPU_PACER_BKT_D_R_RD 1
+#define ESE_DZ_DPCPU_PACER_BKT_D_RD 0
+#define ESF_DZ_TXP_TXQ_TBL_RD_REQ_CLIENT_ID_LBN 0
+#define ESF_DZ_TXP_TXQ_TBL_RD_REQ_CLIENT_ID_WIDTH 2
+#define ESE_DZ_DPCPU_PACER_CPU_CLIENT 2
+#define ESE_DZ_DPCPU_PACER_CMD_CTL_CLIENT 1
+#define ESE_DZ_DPCPU_PACER_ALRT_CTL_CLIENT 0
+
+
+/* ES_TX_PACER_TXQ_TBL_RD_RSP */
+#define ESF_DZ_TXP_TXQ_TBL_RD_RSP_MAX_BKT2_LBN 53
+#define ESF_DZ_TXP_TXQ_TBL_RD_RSP_MAX_BKT2_WIDTH 10
+#define ESF_DZ_TXP_TXQ_TBL_RD_RSP_MAX_BKT1_LBN 43
+#define ESF_DZ_TXP_TXQ_TBL_RD_RSP_MAX_BKT1_WIDTH 10
+#define ESF_DZ_TXP_TXQ_TBL_RD_RSP_MAX_BKT0_LBN 33
+#define ESF_DZ_TXP_TXQ_TBL_RD_RSP_MAX_BKT0_WIDTH 10
+#define ESF_DZ_TXP_TXQ_TBL_RD_RSP_MIN_BKT_LBN 23
+#define ESF_DZ_TXP_TXQ_TBL_RD_RSP_MIN_BKT_WIDTH 10
+#define ESF_DZ_TXP_TXQ_TBL_RD_RSP_LABEL_LBN 19
+#define ESF_DZ_TXP_TXQ_TBL_RD_RSP_LABEL_WIDTH 4
+#define ESF_DZ_TXP_TXQ_TBL_RD_RSP_PQ_FLAGS_LBN 16
+#define ESF_DZ_TXP_TXQ_TBL_RD_RSP_PQ_FLAGS_WIDTH 3
+#define ESF_DZ_TXP_TXQ_TBL_RD_RSP_DSBL_LBN 15
+#define ESF_DZ_TXP_TXQ_TBL_RD_RSP_DSBL_WIDTH 1
+#define ESF_DZ_TXP_TXQ_TBL_RD_RSP_TXQ_ID_LBN 5
+#define ESF_DZ_TXP_TXQ_TBL_RD_RSP_TXQ_ID_WIDTH 10
+#define ESF_DZ_TXP_TXQ_TBL_RD_RSP_OP_LBN 2
+#define ESF_DZ_TXP_TXQ_TBL_RD_RSP_OP_WIDTH 3
+#define ESE_DZ_DPCPU_PACER_BKT_TBL_RD 7
+#define ESE_DZ_DPCPU_PACER_BKT_TBL_WR 6
+#define ESE_DZ_DPCPU_PACER_TXQ_TBL_RD 5
+#define ESE_DZ_DPCPU_PACER_TXQ_TBL_WR 4
+#define ESE_DZ_DPCPU_PACER_TXQ_D_R_NI 3
+#define ESE_DZ_DPCPU_PACER_TXQ_D_R_I 2
+#define ESE_DZ_DPCPU_PACER_BKT_D_R_RD 1
+#define ESE_DZ_DPCPU_PACER_BKT_D_RD 0
+#define ESF_DZ_TXP_TXQ_TBL_RD_RSP_CLIENT_ID_LBN 0
+#define ESF_DZ_TXP_TXQ_TBL_RD_RSP_CLIENT_ID_WIDTH 2
+#define ESE_DZ_DPCPU_PACER_CPU_CLIENT 2
+#define ESE_DZ_DPCPU_PACER_CMD_CTL_CLIENT 1
+#define ESE_DZ_DPCPU_PACER_ALRT_CTL_CLIENT 0
+
+
+/* ES_TX_PACER_TXQ_TBL_WR_REQ */
+#define ESF_DZ_TXP_TXQ_TBL_WR_REQ_MAX_BKT2_LBN 53
+#define ESF_DZ_TXP_TXQ_TBL_WR_REQ_MAX_BKT2_WIDTH 10
+#define ESF_DZ_TXP_TXQ_TBL_WR_REQ_MAX_BKT1_LBN 43
+#define ESF_DZ_TXP_TXQ_TBL_WR_REQ_MAX_BKT1_WIDTH 10
+#define ESF_DZ_TXP_TXQ_TBL_WR_REQ_MAX_BKT0_LBN 33
+#define ESF_DZ_TXP_TXQ_TBL_WR_REQ_MAX_BKT0_WIDTH 10
+#define ESF_DZ_TXP_TXQ_TBL_WR_REQ_MIN_BKT_LBN 23
+#define ESF_DZ_TXP_TXQ_TBL_WR_REQ_MIN_BKT_WIDTH 10
+#define ESF_DZ_TXP_TXQ_TBL_WR_REQ_LABEL_LBN 19
+#define ESF_DZ_TXP_TXQ_TBL_WR_REQ_LABEL_WIDTH 4
+#define ESF_DZ_TXP_TXQ_TBL_WR_REQ_PQ_FLAGS_LBN 16
+#define ESF_DZ_TXP_TXQ_TBL_WR_REQ_PQ_FLAGS_WIDTH 3
+#define ESF_DZ_TXP_TXQ_TBL_WR_REQ_DSBL_LBN 15
+#define ESF_DZ_TXP_TXQ_TBL_WR_REQ_DSBL_WIDTH 1
+#define ESF_DZ_TXP_TXQ_TBL_WR_REQ_TXQ_ID_LBN 5
+#define ESF_DZ_TXP_TXQ_TBL_WR_REQ_TXQ_ID_WIDTH 10
+#define ESF_DZ_TXP_TXQ_TBL_WR_REQ_OP_LBN 2
+#define ESF_DZ_TXP_TXQ_TBL_WR_REQ_OP_WIDTH 3
+#define ESE_DZ_DPCPU_PACER_BKT_TBL_RD 7
+#define ESE_DZ_DPCPU_PACER_BKT_TBL_WR 6
+#define ESE_DZ_DPCPU_PACER_TXQ_TBL_RD 5
+#define ESE_DZ_DPCPU_PACER_TXQ_TBL_WR 4
+#define ESE_DZ_DPCPU_PACER_TXQ_D_R_NI 3
+#define ESE_DZ_DPCPU_PACER_TXQ_D_R_I 2
+#define ESE_DZ_DPCPU_PACER_BKT_D_R_RD 1
+#define ESE_DZ_DPCPU_PACER_BKT_D_RD 0
+#define ESF_DZ_TXP_TXQ_TBL_WR_REQ_CLIENT_ID_LBN 0
+#define ESF_DZ_TXP_TXQ_TBL_WR_REQ_CLIENT_ID_WIDTH 2
+#define ESE_DZ_DPCPU_PACER_CPU_CLIENT 2
+#define ESE_DZ_DPCPU_PACER_CMD_CTL_CLIENT 1
+#define ESE_DZ_DPCPU_PACER_ALRT_CTL_CLIENT 0
+
+
+/* ES_TX_USER_DESC */
+#define ESF_DZ_TX_USR_TYPE_LBN 63
+#define ESF_DZ_TX_USR_TYPE_WIDTH 1
+#define ESF_DZ_TX_USR_CONT_LBN 62
+#define ESF_DZ_TX_USR_CONT_WIDTH 1
+#define ESF_DZ_TX_USR_BYTE_CNT_LBN 48
+#define ESF_DZ_TX_USR_BYTE_CNT_WIDTH 14
+#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_LBN 44
+#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_WIDTH 4
+#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
+#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
+#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
+#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
+#define ESF_DZ_TX_USR_BUF_ID_OFFSET_DW0_LBN 0
+#define ESF_DZ_TX_USR_BUF_ID_OFFSET_DW0_WIDTH 32
+#define ESF_DZ_TX_USR_BUF_ID_OFFSET_DW1_LBN 32
+#define ESF_DZ_TX_USR_BUF_ID_OFFSET_DW1_WIDTH 12
+#define ESF_DZ_TX_USR_BUF_ID_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_BUF_ID_OFFSET_WIDTH 44
+#define ESF_DZ_TX_USR_4KBPS_BUF_ID_LBN 12
+#define ESF_DZ_TX_USR_4KBPS_BUF_ID_WIDTH 32
+#define ESF_DZ_TX_USR_64KBPS_BUF_ID_LBN 16
+#define ESF_DZ_TX_USR_64KBPS_BUF_ID_WIDTH 28
+#define ESF_DZ_TX_USR_1MBPS_BUF_ID_LBN 20
+#define ESF_DZ_TX_USR_1MBPS_BUF_ID_WIDTH 24
+#define ESF_DZ_TX_USR_4MBPS_BUF_ID_LBN 22
+#define ESF_DZ_TX_USR_4MBPS_BUF_ID_WIDTH 22
+#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
+#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
+#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
+#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
+
+
+/* ES_TX_U_QSTATE_TBL0_ENTRY */
+#define ESF_DZ_TX_U_DC_FILL_LBN 112
+#define ESF_DZ_TX_U_DC_FILL_WIDTH 7
+#define ESF_DZ_TX_U_SOFT7_B1R3_LBN 112
+#define ESF_DZ_TX_U_SOFT7_B1R3_WIDTH 7
+#define ESF_DZ_TX_U_DSCR_HW_RPTR_LBN 96
+#define ESF_DZ_TX_U_DSCR_HW_RPTR_WIDTH 12
+#define ESF_DZ_TX_U_SOFT12_B1R2_LBN 96
+#define ESF_DZ_TX_U_SOFT12_B1R2_WIDTH 12
+#define ESF_DZ_TX_U_DC_RPTR_LBN 80
+#define ESF_DZ_TX_U_DC_RPTR_WIDTH 6
+#define ESF_DZ_TX_U_SOFT6_B1R1_LBN 80
+#define ESF_DZ_TX_U_SOFT6_B1R1_WIDTH 6
+#define ESF_DZ_TX_U_SOFT5_B1R0_LBN 64
+#define ESF_DZ_TX_U_SOFT5_B1R0_WIDTH 5
+#define ESF_DZ_TX_U_PREFETCH_ACTIVE_LBN 66
+#define ESF_DZ_TX_U_PREFETCH_ACTIVE_WIDTH 1
+#define ESF_DZ_TX_U_PREFETCH_PENDING_LBN 65
+#define ESF_DZ_TX_U_PREFETCH_PENDING_WIDTH 1
+#define ESF_DZ_TX_U_DOORBELL_ENABLED_LBN 64
+#define ESF_DZ_TX_U_DOORBELL_ENABLED_WIDTH 1
+#define ESF_DZ_TX_U_UPD_UDPTCP_CSUM_MODE_LBN 33
+#define ESF_DZ_TX_U_UPD_UDPTCP_CSUM_MODE_WIDTH 1
+#define ESF_DZ_TX_U_SOFT2_B0R2_LBN 32
+#define ESF_DZ_TX_U_SOFT2_B0R2_WIDTH 2
+#define ESF_DZ_TX_U_UPD_IP_CSUM_MODE_LBN 32
+#define ESF_DZ_TX_U_UPD_IP_CSUM_MODE_WIDTH 1
+#define ESF_DZ_TX_U_UPD_CRC_MODE_LBN 29
+#define ESF_DZ_TX_U_UPD_CRC_MODE_WIDTH 3
+#define ESE_DZ_C2RIP_FCOIP_MPA 5
+#define ESE_DZ_C2RIP_FCOIP_FCOE 4
+#define ESE_DZ_C2RIP_ISCSI_HDR_AND_PYLD 3
+#define ESE_DZ_C2RIP_ISCSI_HDR 2
+#define ESE_DZ_C2RIP_FCOE 1
+#define ESE_DZ_C2RIP_OFF 0
+#define ESF_DZ_TX_U_SOFT16_B0R1_LBN 16
+#define ESF_DZ_TX_U_SOFT16_B0R1_WIDTH 16
+#define ESF_DZ_TX_U_BIU_ARGS_LBN 16
+#define ESF_DZ_TX_U_BIU_ARGS_WIDTH 13
+#define ESF_DZ_TX_U_EV_QID_LBN 5
+#define ESF_DZ_TX_U_EV_QID_WIDTH 11
+#define ESF_DZ_TX_U_SOFT16_B0R0_LBN 0
+#define ESF_DZ_TX_U_SOFT16_B0R0_WIDTH 16
+#define ESF_DZ_TX_U_EV_QLABEL_LBN 0
+#define ESF_DZ_TX_U_EV_QLABEL_WIDTH 5
+
+
+/* ES_TX_U_QSTATE_TBL1_ENTRY */
+#define ESF_DZ_TX_U_DSCR_BASE_PAGE_ID_LBN 64
+#define ESF_DZ_TX_U_DSCR_BASE_PAGE_ID_WIDTH 18
+#define ESF_DZ_TX_U_SOFT18_B1R0_LBN 64
+#define ESF_DZ_TX_U_SOFT18_B1R0_WIDTH 18
+#define ESF_DZ_TX_U_SOFT16_B0R3_LBN 48
+#define ESF_DZ_TX_U_SOFT16_B0R3_WIDTH 16
+#define ESF_DZ_TX_U_QUEUE_ENABLED_LBN 49
+#define ESF_DZ_TX_U_QUEUE_ENABLED_WIDTH 1
+#define ESF_DZ_TX_U_FLUSH_PENDING_LBN 48
+#define ESF_DZ_TX_U_FLUSH_PENDING_WIDTH 1
+#define ESF_DZ_TX_U_DSCR_HW_WPTR_LBN 32
+#define ESF_DZ_TX_U_DSCR_HW_WPTR_WIDTH 12
+#define ESF_DZ_TX_U_SOFT12_B0R2_LBN 32
+#define ESF_DZ_TX_U_SOFT12_B0R2_WIDTH 12
+#define ESF_DZ_TX_U_OWNER_ID_LBN 16
+#define ESF_DZ_TX_U_OWNER_ID_WIDTH 12
+#define ESF_DZ_TX_U_SOFT12_B0R1_LBN 16
+#define ESF_DZ_TX_U_SOFT12_B0R1_WIDTH 12
+#define ESF_DZ_TX_U_DSCR_SIZE_LBN 0
+#define ESF_DZ_TX_U_DSCR_SIZE_WIDTH 3
+#define ESF_DZ_TX_U_SOFT3_B0R0_LBN 0
+#define ESF_DZ_TX_U_SOFT3_B0R0_WIDTH 3
+
+
+/* ES_TX_U_QSTATE_TBL2_ENTRY */
+#define ESF_DZ_TX_FINFO_WRD3_LBN 48
+#define ESF_DZ_TX_FINFO_WRD3_WIDTH 16
+#define ESF_DZ_TX_FINFO_WRD2_LBN 32
+#define ESF_DZ_TX_FINFO_WRD2_WIDTH 16
+#define ESF_DZ_TX_FINFO_WRD1_LBN 16
+#define ESF_DZ_TX_FINFO_WRD1_WIDTH 16
+#define ESF_DZ_TX_FINFO_SRCDST_LBN 0
+#define ESF_DZ_TX_FINFO_SRCDST_WIDTH 16
+
+
+/* ES_b2t_cpl_rsp */
+#define ESF_DZ_B2T_CPL_RSP_CPL_ECC_LBN 268
+#define ESF_DZ_B2T_CPL_RSP_CPL_ECC_WIDTH 32
+#define ESF_DZ_B2T_CPL_RSP_CPL_DATA_DW0_LBN 27
+#define ESF_DZ_B2T_CPL_RSP_CPL_DATA_DW0_WIDTH 32
+#define ESF_DZ_B2T_CPL_RSP_CPL_DATA_DW1_LBN 59
+#define ESF_DZ_B2T_CPL_RSP_CPL_DATA_DW1_WIDTH 32
+#define ESF_DZ_B2T_CPL_RSP_CPL_DATA_DW2_LBN 91
+#define ESF_DZ_B2T_CPL_RSP_CPL_DATA_DW2_WIDTH 32
+#define ESF_DZ_B2T_CPL_RSP_CPL_DATA_DW3_LBN 123
+#define ESF_DZ_B2T_CPL_RSP_CPL_DATA_DW3_WIDTH 32
+#define ESF_DZ_B2T_CPL_RSP_CPL_DATA_DW4_LBN 155
+#define ESF_DZ_B2T_CPL_RSP_CPL_DATA_DW4_WIDTH 32
+#define ESF_DZ_B2T_CPL_RSP_CPL_DATA_DW5_LBN 187
+#define ESF_DZ_B2T_CPL_RSP_CPL_DATA_DW5_WIDTH 32
+#define ESF_DZ_B2T_CPL_RSP_CPL_DATA_DW6_LBN 219
+#define ESF_DZ_B2T_CPL_RSP_CPL_DATA_DW6_WIDTH 32
+#define ESF_DZ_B2T_CPL_RSP_CPL_DATA_DW7_LBN 251
+#define ESF_DZ_B2T_CPL_RSP_CPL_DATA_DW7_WIDTH 32
+#define ESF_DZ_B2T_CPL_RSP_CPL_DATA_LBN 27
+#define ESF_DZ_B2T_CPL_RSP_CPL_DATA_WIDTH 256
+#define ESF_DZ_B2T_CPL_RSP_CPL_EOT_LBN 283
+#define ESF_DZ_B2T_CPL_RSP_CPL_EOT_WIDTH -15
+#define ESF_DZ_B2T_CPL_RSP_CPL_ERROR_LBN 26
+#define ESF_DZ_B2T_CPL_RSP_CPL_ERROR_WIDTH 1
+#define ESF_DZ_B2T_CPL_RSP_CPL_LAST_LBN 25
+#define ESF_DZ_B2T_CPL_RSP_CPL_LAST_WIDTH 1
+#define ESF_DZ_B2T_CPL_RSP_CPL_TAG_LBN 19
+#define ESF_DZ_B2T_CPL_RSP_CPL_TAG_WIDTH 6
+#define ESF_DZ_B2T_CPL_RSP_CPL_LEN_LBN 7
+#define ESF_DZ_B2T_CPL_RSP_CPL_LEN_WIDTH 12
+#define ESF_DZ_B2T_CPL_RSP_CPL_ADRS_LBN 0
+#define ESF_DZ_B2T_CPL_RSP_CPL_ADRS_WIDTH 7
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_EF10_REGS_H */
diff --git a/sys/dev/sfxge/common/efx_regs_mcdi.h b/sys/dev/sfxge/common/efx_regs_mcdi.h
new file mode 100644
index 0000000..720770c
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_regs_mcdi.h
@@ -0,0 +1,2786 @@
+/*-
+ * Copyright 2008-2011 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*! \cidoxg_firmware_mc_cmd */
+
+#ifndef _SIENA_MC_DRIVER_PCOL_H
+#define _SIENA_MC_DRIVER_PCOL_H
+
+
+/* Values to be written into FMCR_CZ_RESET_STATE_REG to control boot. */
+/* Power-on reset state */
+#define MC_FW_STATE_POR (1)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash. */
+#define MC_FW_WARM_BOOT_OK (2)
+/* The MC main image has started to boot. */
+#define MC_FW_STATE_BOOTING (4)
+/* The Scheduler has started. */
+#define MC_FW_STATE_SCHED (8)
+
+/* Values to be written to the per-port status dword in shared
+ * memory on reboot and assert */
+#define MC_STATUS_DWORD_REBOOT (0xb007b007)
+#define MC_STATUS_DWORD_ASSERT (0xdeaddead)
+
+/* The current version of the MCDI protocol.
+ *
+ * Note that the ROM burnt into the card only talks V0, so at the very
+ * least every driver must support version 0 and MCDI_PCOL_VERSION
+ */
+#ifdef WITH_MCDI_V2
+#define MCDI_PCOL_VERSION 2
+#else
+#define MCDI_PCOL_VERSION 1
+#endif
+
+/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
+
+/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
+
+/**
+ * MCDI version 1
+ *
+ * Each MCDI request starts with an MCDI_HEADER, which is a 32byte
+ * structure, filled in by the client.
+ *
+ * 0 7 8 16 20 22 23 24 31
+ * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS |
+ * | | |
+ * | | \--- Response
+ * | \------- Error
+ * \------------------------------ Resync (always set)
+ *
+ * The client writes it's request into MC shared memory, and rings the
+ * doorbell. Each request is completed by either by the MC writting
+ * back into shared memory, or by writting out an event.
+ *
+ * All MCDI commands support completion by shared memory response. Each
+ * request may also contain additional data (accounted for by HEADER.LEN),
+ * and some response's may also contain additional data (again, accounted
+ * for by HEADER.LEN).
+ *
+ * Some MCDI commands support completion by event, in which any associated
+ * response data is included in the event.
+ *
+ * The protocol requires one response to be delivered for every request, a
+ * request should not be sent unless the response for the previous request
+ * has been received (either by polling shared memory, or by receiving
+ * an event).
+ */
+
+/** Request/Response structure */
+#define MCDI_HEADER_OFST 0
+#define MCDI_HEADER_CODE_LBN 0
+#define MCDI_HEADER_CODE_WIDTH 7
+#define MCDI_HEADER_RESYNC_LBN 7
+#define MCDI_HEADER_RESYNC_WIDTH 1
+#define MCDI_HEADER_DATALEN_LBN 8
+#define MCDI_HEADER_DATALEN_WIDTH 8
+#define MCDI_HEADER_SEQ_LBN 16
+#define MCDI_HEADER_RSVD_LBN 20
+#define MCDI_HEADER_RSVD_WIDTH 2
+#define MCDI_HEADER_SEQ_WIDTH 4
+#define MCDI_HEADER_ERROR_LBN 22
+#define MCDI_HEADER_ERROR_WIDTH 1
+#define MCDI_HEADER_RESPONSE_LBN 23
+#define MCDI_HEADER_RESPONSE_WIDTH 1
+#define MCDI_HEADER_XFLAGS_LBN 24
+#define MCDI_HEADER_XFLAGS_WIDTH 8
+/* Request response using event */
+#define MCDI_HEADER_XFLAGS_EVREQ 0x01
+
+/* Maximum number of payload bytes */
+#if MCDI_PCOL_VERSION == 1
+#define MCDI_CTL_SDU_LEN_MAX 0xfc
+#elif MCDI_PCOL_VERSION == 2
+#define MCDI_CTL_SDU_LEN_MAX 0x400
+#endif
+
+/* The MC can generate events for two reasons:
+ * - To complete a shared memory request if XFLAGS_EVREQ was set
+ * - As a notification (link state, i2c event), controlled
+ * via MC_CMD_LOG_CTRL
+ *
+ * Both events share a common structure:
+ *
+ * 0 32 33 36 44 52 60
+ * | Data | Cont | Level | Src | Code | Rsvd |
+ * |
+ * \ There is another event pending in this notification
+ *
+ * If Code==CMDDONE, then the fields are further interpreted as:
+ *
+ * - LEVEL==INFO Command succeded
+ * - LEVEL==ERR Command failed
+ *
+ * 0 8 16 24 32
+ * | Seq | Datalen | Errno | Rsvd |
+ *
+ * These fields are taken directly out of the standard MCDI header, i.e.,
+ * LEVEL==ERR, Datalen == 0 => Reboot
+ *
+ * Events can be squirted out of the UART (using LOG_CTRL) without a
+ * MCDI header. An event can be distinguished from a MCDI response by
+ * examining the first byte which is 0xc0. This corresponds to the
+ * non-existent MCDI command MC_CMD_DEBUG_LOG.
+ *
+ * 0 7 8
+ * | command | Resync | = 0xc0
+ *
+ * Since the event is written in big-endian byte order, this works
+ * providing bits 56-63 of the event are 0xc0.
+ *
+ * 56 60 63
+ * | Rsvd | Code | = 0xc0
+ *
+ * Which means for convenience the event code is 0xc for all MC
+ * generated events.
+ */
+#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+
+
+/* Non-existent command target */
+#define MC_CMD_ERR_ENOENT 2
+/* assert() has killed the MC */
+#define MC_CMD_ERR_EINTR 4
+/* Caller does not hold required locks */
+#define MC_CMD_ERR_EACCES 13
+/* Resource is currently unavailable (e.g. lock contention) */
+#define MC_CMD_ERR_EBUSY 16
+/* Invalid argument to target */
+#define MC_CMD_ERR_EINVAL 22
+/* Non-recursive resource is already acquired */
+#define MC_CMD_ERR_EDEADLK 35
+/* Operation not implemented */
+#define MC_CMD_ERR_ENOSYS 38
+/* Operation timed out */
+#define MC_CMD_ERR_ETIME 62
+
+#define MC_CMD_ERR_CODE_OFST 0
+
+/* We define 8 "escape" commands to allow
+ for command number space extension */
+
+#define MC_CMD_CMD_SPACE_ESCAPE_0 0x78
+#define MC_CMD_CMD_SPACE_ESCAPE_1 0x79
+#define MC_CMD_CMD_SPACE_ESCAPE_2 0x7A
+#define MC_CMD_CMD_SPACE_ESCAPE_3 0x7B
+#define MC_CMD_CMD_SPACE_ESCAPE_4 0x7C
+#define MC_CMD_CMD_SPACE_ESCAPE_5 0x7D
+#define MC_CMD_CMD_SPACE_ESCAPE_6 0x7E
+#define MC_CMD_CMD_SPACE_ESCAPE_7 0x7F
+
+/* Vectors in the boot ROM */
+/* Point to the copycode entry point. */
+#define MC_BOOTROM_COPYCODE_VEC (0x7f4)
+/* Points to the recovery mode entry point. */
+#define MC_BOOTROM_NOFLASH_VEC (0x7f8)
+
+/* The command set exported by the boot ROM (MCDI v0) */
+#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
+ (1 << MC_CMD_READ32) | \
+ (1 << MC_CMD_WRITE32) | \
+ (1 << MC_CMD_COPYCODE) | \
+ (1 << MC_CMD_GET_VERSION), \
+ 0, 0, 0 }
+
+#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \
+ (MC_CMD_SENSOR_ENTRY_OFST + (_x))
+
+#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(n) ( \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST+ \
+ MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST)+ \
+ ((n)*MC_CMD_DBIWROP_TYPEDEF_LEN))
+
+#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(n) ( \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST+ \
+ MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST)+ \
+ ((n)*MC_CMD_DBIWROP_TYPEDEF_LEN))
+
+#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(n) ( \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST+ \
+ MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST)+ \
+ ((n)*MC_CMD_DBIWROP_TYPEDEF_LEN))
+
+
+#ifdef WITH_MCDI_V2
+
+/* Version 2 adds an optional argument to error returns: the errno value
+ * may be followed by the (0-based) number of the first argument that
+ * could not be processed.
+ */
+#define MC_CMD_ERR_ARG_OFST 4
+
+/* Try again */
+#define MC_CMD_ERR_EAGAIN 11
+/* No space */
+#define MC_CMD_ERR_ENOSPC 28
+
+#endif
+
+/* MCDI_EVENT structuredef */
+#define MCDI_EVENT_LEN 8
+#define MCDI_EVENT_CONT_LBN 32
+#define MCDI_EVENT_CONT_WIDTH 1
+#define MCDI_EVENT_LEVEL_LBN 33
+#define MCDI_EVENT_LEVEL_WIDTH 3
+#define MCDI_EVENT_LEVEL_INFO 0x0 /* enum */
+#define MCDI_EVENT_LEVEL_WARN 0x1 /* enum */
+#define MCDI_EVENT_LEVEL_ERR 0x2 /* enum */
+#define MCDI_EVENT_LEVEL_FATAL 0x3 /* enum */
+#define MCDI_EVENT_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
+#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
+#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
+#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8
+#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16
+#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 /* enum */
+#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 /* enum */
+#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 /* enum */
+#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
+#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
+#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_STATE_LBN 8
+#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16
+#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16
+#define MCDI_EVENT_FWALERT_DATA_LBN 8
+#define MCDI_EVENT_FWALERT_DATA_WIDTH 24
+#define MCDI_EVENT_FWALERT_REASON_LBN 0
+#define MCDI_EVENT_FWALERT_REASON_WIDTH 8
+#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1 /* enum */
+#define MCDI_EVENT_FLR_VF_LBN 0
+#define MCDI_EVENT_FLR_VF_WIDTH 8
+#define MCDI_EVENT_TX_ERR_TXQ_LBN 0
+#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12
+#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
+#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1 /* enum */
+#define MCDI_EVENT_TX_ERR_NO_EOP 0x2 /* enum */
+#define MCDI_EVENT_TX_ERR_2BIG 0x3 /* enum */
+#define MCDI_EVENT_TX_ERR_INFO_LBN 16
+#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0
+#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12
+#define MCDI_EVENT_DATA_LBN 0
+#define MCDI_EVENT_DATA_WIDTH 32
+#define MCDI_EVENT_SRC_LBN 36
+#define MCDI_EVENT_SRC_WIDTH 8
+#define MCDI_EVENT_EV_CODE_LBN 60
+#define MCDI_EVENT_EV_CODE_WIDTH 4
+#define MCDI_EVENT_CODE_LBN 44
+#define MCDI_EVENT_CODE_WIDTH 8
+#define MCDI_EVENT_CODE_BADSSERT 0x1 /* enum */
+#define MCDI_EVENT_CODE_PMNOTICE 0x2 /* enum */
+#define MCDI_EVENT_CODE_CMDDONE 0x3 /* enum */
+#define MCDI_EVENT_CODE_LINKCHANGE 0x4 /* enum */
+#define MCDI_EVENT_CODE_SENSOREVT 0x5 /* enum */
+#define MCDI_EVENT_CODE_SCHEDERR 0x6 /* enum */
+#define MCDI_EVENT_CODE_REBOOT 0x7 /* enum */
+#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8 /* enum */
+#define MCDI_EVENT_CODE_FWALERT 0x9 /* enum */
+#define MCDI_EVENT_CODE_FLR 0xa /* enum */
+#define MCDI_EVENT_CODE_TX_ERR 0xb /* enum */
+#define MCDI_EVENT_CODE_TX_FLUSH 0xc /* enum */
+#define MCDI_EVENT_CMDDONE_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_DATA_LBN 0
+#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
+#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
+#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
+#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
+#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
+#define MCDI_EVENT_TX_ERR_DATA_OFST 0
+#define MCDI_EVENT_TX_ERR_DATA_LBN 0
+#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_READ32
+ * Read multiple 32byte words from MC memory.
+ */
+#define MC_CMD_READ32 0x1
+
+/* MC_CMD_READ32_IN msgrequest */
+#define MC_CMD_READ32_IN_LEN 8
+#define MC_CMD_READ32_IN_ADDR_OFST 0
+#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
+
+/* MC_CMD_READ32_OUT msgresponse */
+#define MC_CMD_READ32_OUT_LENMIN 4
+#define MC_CMD_READ32_OUT_LENMAX 252
+#define MC_CMD_READ32_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_READ32_OUT_BUFFER_LEN 4
+#define MC_CMD_READ32_OUT_BUFFER_MINNUM 1
+#define MC_CMD_READ32_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_WRITE32
+ * Write multiple 32byte words to MC memory.
+ */
+#define MC_CMD_WRITE32 0x2
+
+/* MC_CMD_WRITE32_IN msgrequest */
+#define MC_CMD_WRITE32_IN_LENMIN 8
+#define MC_CMD_WRITE32_IN_LENMAX 252
+#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
+#define MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
+#define MC_CMD_WRITE32_IN_BUFFER_LEN 4
+#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
+#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM 62
+
+/* MC_CMD_WRITE32_OUT msgresponse */
+#define MC_CMD_WRITE32_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_COPYCODE
+ * Copy MC code between two locations and jump.
+ */
+#define MC_CMD_COPYCODE 0x3
+
+/* MC_CMD_COPYCODE_IN msgrequest */
+#define MC_CMD_COPYCODE_IN_LEN 16
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
+#define MC_CMD_COPYCODE_JUMP_NONE 0x1 /* enum */
+
+/* MC_CMD_COPYCODE_OUT msgresponse */
+#define MC_CMD_COPYCODE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_FUNC
+ */
+#define MC_CMD_SET_FUNC 0x4
+
+/* MC_CMD_SET_FUNC_IN msgrequest */
+#define MC_CMD_SET_FUNC_IN_LEN 4
+#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+
+/* MC_CMD_SET_FUNC_OUT msgresponse */
+#define MC_CMD_SET_FUNC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_BOOT_STATUS
+ */
+#define MC_CMD_GET_BOOT_STATUS 0x5
+
+/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
+#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
+
+/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */
+#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_WIDTH 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_LBN 2
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_GET_ASSERTS
+ * Get and clear any assertion status.
+ */
+#define MC_CMD_GET_ASSERTS 0x6
+
+/* MC_CMD_GET_ASSERTS_IN msgrequest */
+#define MC_CMD_GET_ASSERTS_IN_LEN 4
+#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+
+/* MC_CMD_GET_ASSERTS_OUT msgresponse */
+#define MC_CMD_GET_ASSERTS_OUT_LEN 140
+#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 /* enum */
+#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 /* enum */
+#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 /* enum */
+#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 /* enum */
+#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
+
+
+/***********************************/
+/* MC_CMD_LOG_CTRL
+ * Configure the output stream for various events and messages.
+ */
+#define MC_CMD_LOG_CTRL 0x7
+
+/* MC_CMD_LOG_CTRL_IN msgrequest */
+#define MC_CMD_LOG_CTRL_IN_LEN 8
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 /* enum */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 /* enum */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+
+/* MC_CMD_LOG_CTRL_OUT msgresponse */
+#define MC_CMD_LOG_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VERSION
+ * Get version information about the MC firmware.
+ */
+#define MC_CMD_GET_VERSION 0x8
+
+/* MC_CMD_GET_VERSION_IN msgrequest */
+#define MC_CMD_GET_VERSION_IN_LEN 0
+
+/* MC_CMD_GET_VERSION_V0_OUT msgresponse */
+#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff /* enum */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM 0xb0070000 /* enum */
+
+/* MC_CMD_GET_VERSION_OUT msgresponse */
+#define MC_CMD_GET_VERSION_OUT_LEN 32
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
+
+
+/***********************************/
+/* MC_CMD_CSR_READ32
+ * Read 32bit words from the indirect memory map.
+ */
+#define MC_CMD_CSR_READ32 0xc
+
+/* MC_CMD_CSR_READ32_IN msgrequest */
+#define MC_CMD_CSR_READ32_IN_LEN 12
+#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+
+/* MC_CMD_CSR_READ32_OUT msgresponse */
+#define MC_CMD_CSR_READ32_OUT_LENMIN 4
+#define MC_CMD_CSR_READ32_OUT_LENMAX 252
+#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_CSR_WRITE32
+ * Write 32bit dwords to the indirect memory map.
+ */
+#define MC_CMD_CSR_WRITE32 0xd
+
+/* MC_CMD_CSR_WRITE32_IN msgrequest */
+#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
+#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
+#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
+#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61
+
+/* MC_CMD_CSR_WRITE32_OUT msgresponse */
+#define MC_CMD_CSR_WRITE32_OUT_LEN 4
+#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_STACKINFO
+ * Get stack information.
+ */
+#define MC_CMD_STACKINFO 0xf
+
+/* MC_CMD_STACKINFO_IN msgrequest */
+#define MC_CMD_STACKINFO_IN_LEN 0
+
+/* MC_CMD_STACKINFO_OUT msgresponse */
+#define MC_CMD_STACKINFO_OUT_LENMIN 12
+#define MC_CMD_STACKINFO_OUT_LENMAX 252
+#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21
+
+
+/***********************************/
+/* MC_CMD_MDIO_READ
+ * MDIO register read.
+ */
+#define MC_CMD_MDIO_READ 0x10
+
+/* MC_CMD_MDIO_READ_IN msgrequest */
+#define MC_CMD_MDIO_READ_IN_LEN 16
+#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
+#define MC_CMD_MDIO_BUS_INTERNAL 0x0 /* enum */
+#define MC_CMD_MDIO_BUS_EXTERNAL 0x1 /* enum */
+#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_CLAUSE22 0x20 /* enum */
+#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+
+/* MC_CMD_MDIO_READ_OUT msgresponse */
+#define MC_CMD_MDIO_READ_OUT_LEN 8
+#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+#define MC_CMD_MDIO_STATUS_GOOD 0x8 /* enum */
+
+
+/***********************************/
+/* MC_CMD_MDIO_WRITE
+ * MDIO register write.
+ */
+#define MC_CMD_MDIO_WRITE 0x11
+
+/* MC_CMD_MDIO_WRITE_IN msgrequest */
+#define MC_CMD_MDIO_WRITE_IN_LEN 20
+#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
+/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
+#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+/* MC_CMD_MDIO_CLAUSE22 0x20 */
+#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+
+/* MC_CMD_MDIO_WRITE_OUT msgresponse */
+#define MC_CMD_MDIO_WRITE_OUT_LEN 4
+#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
+
+
+/***********************************/
+/* MC_CMD_DBI_WRITE
+ * Write DBI register(s).
+ */
+#define MC_CMD_DBI_WRITE 0x12
+
+/* MC_CMD_DBI_WRITE_IN msgrequest */
+#define MC_CMD_DBI_WRITE_IN_LENMIN 12
+#define MC_CMD_DBI_WRITE_IN_LENMAX 252
+#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21
+
+/* MC_CMD_DBI_WRITE_OUT msgresponse */
+#define MC_CMD_DBI_WRITE_OUT_LEN 0
+
+/* MC_CMD_DBIWROP_TYPEDEF structuredef */
+#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_LBN 32
+#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PORT_READ32
+ * Read a 32-bit register from the indirect port register map.
+ */
+#define MC_CMD_PORT_READ32 0x14
+
+/* MC_CMD_PORT_READ32_IN msgrequest */
+#define MC_CMD_PORT_READ32_IN_LEN 4
+#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+
+/* MC_CMD_PORT_READ32_OUT msgresponse */
+#define MC_CMD_PORT_READ32_OUT_LEN 8
+#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE32
+ * Write a 32-bit register to the indirect port register map.
+ */
+#define MC_CMD_PORT_WRITE32 0x15
+
+/* MC_CMD_PORT_WRITE32_IN msgrequest */
+#define MC_CMD_PORT_WRITE32_IN_LEN 8
+#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+
+/* MC_CMD_PORT_WRITE32_OUT msgresponse */
+#define MC_CMD_PORT_WRITE32_OUT_LEN 4
+#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_PORT_READ128
+ * Read a 128-bit register from the indirect port register map.
+ */
+#define MC_CMD_PORT_READ128 0x16
+
+/* MC_CMD_PORT_READ128_IN msgrequest */
+#define MC_CMD_PORT_READ128_IN_LEN 4
+#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+
+/* MC_CMD_PORT_READ128_OUT msgresponse */
+#define MC_CMD_PORT_READ128_OUT_LEN 20
+#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
+#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE128
+ * Write a 128-bit register to the indirect port register map.
+ */
+#define MC_CMD_PORT_WRITE128 0x17
+
+/* MC_CMD_PORT_WRITE128_IN msgrequest */
+#define MC_CMD_PORT_WRITE128_IN_LEN 20
+#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
+
+/* MC_CMD_PORT_WRITE128_OUT msgresponse */
+#define MC_CMD_PORT_WRITE128_OUT_LEN 4
+#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_BOARD_CFG
+ * Returns the MC firmware configuration structure.
+ */
+#define MC_CMD_GET_BOARD_CFG 0x18
+
+/* MC_CMD_GET_BOARD_CFG_IN msgrequest */
+#define MC_CMD_GET_BOARD_CFG_IN_LEN 0
+
+/* MC_CMD_GET_BOARD_CFG_OUT msgresponse */
+#define MC_CMD_GET_BOARD_CFG_OUT_LEN 96
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0x0 /* enum */
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 0x1 /* enum */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+/* Enum values, see field(s): */
+/* CAPABILITIES_PORT0 */
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_NUM 12
+
+
+/***********************************/
+/* MC_CMD_DBI_READX
+ * Read DBI register(s).
+ */
+#define MC_CMD_DBI_READX 0x19
+
+/* MC_CMD_DBI_READX_IN msgrequest */
+#define MC_CMD_DBI_READX_IN_LENMIN 8
+#define MC_CMD_DBI_READX_IN_LENMAX 248
+#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
+#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
+
+/* MC_CMD_DBI_READX_OUT msgresponse */
+#define MC_CMD_DBI_READX_OUT_LENMIN 4
+#define MC_CMD_DBI_READX_OUT_LENMAX 252
+#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
+#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
+#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
+#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_SET_RAND_SEED
+ * Set the 16byte seed for the MC psuedo-random generator.
+ */
+#define MC_CMD_SET_RAND_SEED 0x1a
+
+/* MC_CMD_SET_RAND_SEED_IN msgrequest */
+#define MC_CMD_SET_RAND_SEED_IN_LEN 16
+#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
+#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
+
+/* MC_CMD_SET_RAND_SEED_OUT msgresponse */
+#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LTSSM_HIST
+ * Retrieve the history of the PCIE LTSSM.
+ */
+#define MC_CMD_LTSSM_HIST 0x1b
+
+/* MC_CMD_LTSSM_HIST_IN msgrequest */
+#define MC_CMD_LTSSM_HIST_IN_LEN 0
+
+/* MC_CMD_LTSSM_HIST_OUT msgresponse */
+#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
+#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
+#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
+#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_DRV_ATTACH
+ * Inform MCPU that this port is managed on the host.
+ */
+#define MC_CMD_DRV_ATTACH 0x1c
+
+/* MC_CMD_DRV_ATTACH_IN msgrequest */
+#define MC_CMD_DRV_ATTACH_IN_LEN 8
+#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+
+/* MC_CMD_DRV_ATTACH_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_OUT_LEN 4
+#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_NCSI_PROD
+ * Trigger an NC-SI event.
+ */
+#define MC_CMD_NCSI_PROD 0x1d
+
+/* MC_CMD_NCSI_PROD_IN msgrequest */
+#define MC_CMD_NCSI_PROD_IN_LEN 4
+#define MC_CMD_NCSI_PROD_IN_EVENTS_OFST 0
+#define MC_CMD_NCSI_PROD_LINKCHANGE 0x0 /* enum */
+#define MC_CMD_NCSI_PROD_RESET 0x1 /* enum */
+#define MC_CMD_NCSI_PROD_DRVATTACH 0x2 /* enum */
+#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_LBN 0
+#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_WIDTH 1
+#define MC_CMD_NCSI_PROD_IN_RESET_LBN 1
+#define MC_CMD_NCSI_PROD_IN_RESET_WIDTH 1
+#define MC_CMD_NCSI_PROD_IN_DRVATTACH_LBN 2
+#define MC_CMD_NCSI_PROD_IN_DRVATTACH_WIDTH 1
+
+/* MC_CMD_NCSI_PROD_OUT msgresponse */
+#define MC_CMD_NCSI_PROD_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SHMUART
+ * Route UART output to circular buffer in shared memory instead.
+ */
+#define MC_CMD_SHMUART 0x1f
+
+/* MC_CMD_SHMUART_IN msgrequest */
+#define MC_CMD_SHMUART_IN_LEN 4
+#define MC_CMD_SHMUART_IN_FLAG_OFST 0
+
+/* MC_CMD_SHMUART_OUT msgresponse */
+#define MC_CMD_SHMUART_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PORT_RESET
+ * Generic per-port reset.
+ */
+#define MC_CMD_PORT_RESET 0x20
+
+/* MC_CMD_PORT_RESET_IN msgrequest */
+#define MC_CMD_PORT_RESET_IN_LEN 0
+
+/* MC_CMD_PORT_RESET_OUT msgresponse */
+#define MC_CMD_PORT_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PCIE_CREDITS
+ * Read instantaneous and minimum flow control thresholds.
+ */
+#define MC_CMD_PCIE_CREDITS 0x21
+
+/* MC_CMD_PCIE_CREDITS_IN msgrequest */
+#define MC_CMD_PCIE_CREDITS_IN_LEN 8
+#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
+
+/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
+#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2
+
+
+/***********************************/
+/* MC_CMD_RXD_MONITOR
+ * Get histogram of RX queue fill level.
+ */
+#define MC_CMD_RXD_MONITOR 0x22
+
+/* MC_CMD_RXD_MONITOR_IN msgrequest */
+#define MC_CMD_RXD_MONITOR_IN_LEN 12
+#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
+#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
+
+/* MC_CMD_RXD_MONITOR_OUT msgresponse */
+#define MC_CMD_RXD_MONITOR_OUT_LEN 80
+#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
+#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
+
+
+/***********************************/
+/* MC_CMD_PUTS
+ * puts(3) implementation over MCDI
+ */
+#define MC_CMD_PUTS 0x23
+
+/* MC_CMD_PUTS_IN msgrequest */
+#define MC_CMD_PUTS_IN_LENMIN 13
+#define MC_CMD_PUTS_IN_LENMAX 255
+#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
+#define MC_CMD_PUTS_IN_DEST_OFST 0
+#define MC_CMD_PUTS_IN_UART_LBN 0
+#define MC_CMD_PUTS_IN_UART_WIDTH 1
+#define MC_CMD_PUTS_IN_PORT_LBN 1
+#define MC_CMD_PUTS_IN_PORT_WIDTH 1
+#define MC_CMD_PUTS_IN_DHOST_OFST 4
+#define MC_CMD_PUTS_IN_DHOST_LEN 6
+#define MC_CMD_PUTS_IN_STRING_OFST 12
+#define MC_CMD_PUTS_IN_STRING_LEN 1
+#define MC_CMD_PUTS_IN_STRING_MINNUM 1
+#define MC_CMD_PUTS_IN_STRING_MAXNUM 243
+
+/* MC_CMD_PUTS_OUT msgresponse */
+#define MC_CMD_PUTS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_CFG
+ * Report PHY configuration.
+ */
+#define MC_CMD_GET_PHY_CFG 0x24
+
+/* MC_CMD_GET_PHY_CFG_IN msgrequest */
+#define MC_CMD_GET_PHY_CFG_IN_LEN 0
+
+/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
+#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
+#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define MC_CMD_PHY_CAP_10HDX_LBN 1
+#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10FDX_LBN 2
+#define MC_CMD_PHY_CAP_10FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100HDX_LBN 3
+#define MC_CMD_PHY_CAP_100HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100FDX_LBN 4
+#define MC_CMD_PHY_CAP_100FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000HDX_LBN 5
+#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000FDX_LBN 6
+#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10000FDX_LBN 7
+#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_PAUSE_LBN 8
+#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1
+#define MC_CMD_PHY_CAP_ASYM_LBN 9
+#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
+#define MC_CMD_PHY_CAP_AN_LBN 10
+#define MC_CMD_PHY_CAP_AN_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
+#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+#define MC_CMD_MEDIA_XAUI 0x1 /* enum */
+#define MC_CMD_MEDIA_CX4 0x2 /* enum */
+#define MC_CMD_MEDIA_KX4 0x3 /* enum */
+#define MC_CMD_MEDIA_XFP 0x4 /* enum */
+#define MC_CMD_MEDIA_SFP_PLUS 0x5 /* enum */
+#define MC_CMD_MEDIA_BASE_T 0x6 /* enum */
+#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+#define MC_CMD_MMD_CLAUSE22 0x0 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
+#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PHYXS 0x4 /* enum */
+#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */
+#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */
+#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */
+#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d /* enum */
+#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */
+#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
+
+
+/***********************************/
+/* MC_CMD_START_BIST
+ * Start a BIST test on the PHY.
+ */
+#define MC_CMD_START_BIST 0x25
+
+/* MC_CMD_START_BIST_IN msgrequest */
+#define MC_CMD_START_BIST_IN_LEN 4
+#define MC_CMD_START_BIST_IN_TYPE_OFST 0
+#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1 /* enum */
+#define MC_CMD_PHY_BIST_CABLE_LONG 0x2 /* enum */
+#define MC_CMD_BPX_SERDES_BIST 0x3 /* enum */
+#define MC_CMD_MC_LOOPBACK_BIST 0x4 /* enum */
+#define MC_CMD_PHY_BIST 0x5 /* enum */
+
+/* MC_CMD_START_BIST_OUT msgresponse */
+#define MC_CMD_START_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_POLL_BIST
+ * Poll for BIST completion.
+ */
+#define MC_CMD_POLL_BIST 0x26
+
+/* MC_CMD_POLL_BIST_IN msgrequest */
+#define MC_CMD_POLL_BIST_IN_LEN 0
+
+/* MC_CMD_POLL_BIST_OUT msgresponse */
+#define MC_CMD_POLL_BIST_OUT_LEN 8
+#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+#define MC_CMD_POLL_BIST_RUNNING 0x1 /* enum */
+#define MC_CMD_POLL_BIST_PASSED 0x2 /* enum */
+#define MC_CMD_POLL_BIST_FAILED 0x3 /* enum */
+#define MC_CMD_POLL_BIST_TIMEOUT 0x4 /* enum */
+#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+
+/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1 /* enum */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2 /* enum */
+#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3 /* enum */
+#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4 /* enum */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9 /* enum */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+
+/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0 /* enum */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1 /* enum */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2 /* enum */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3 /* enum */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4 /* enum */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5 /* enum */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6 /* enum */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7 /* enum */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8 /* enum */
+
+
+/***********************************/
+/* MC_CMD_FLUSH_RX_QUEUES
+ * Flush receive queue(s).
+ */
+#define MC_CMD_FLUSH_RX_QUEUES 0x27
+
+/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num))
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63
+
+/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */
+#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LOOPBACK_MODES
+ * Get port's loopback modes.
+ */
+#define MC_CMD_GET_LOOPBACK_MODES 0x28
+
+/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
+#define MC_CMD_LOOPBACK_NONE 0x0 /* enum */
+#define MC_CMD_LOOPBACK_DATA 0x1 /* enum */
+#define MC_CMD_LOOPBACK_GMAC 0x2 /* enum */
+#define MC_CMD_LOOPBACK_XGMII 0x3 /* enum */
+#define MC_CMD_LOOPBACK_XGXS 0x4 /* enum */
+#define MC_CMD_LOOPBACK_XAUI 0x5 /* enum */
+#define MC_CMD_LOOPBACK_GMII 0x6 /* enum */
+#define MC_CMD_LOOPBACK_SGMII 0x7 /* enum */
+#define MC_CMD_LOOPBACK_XGBR 0x8 /* enum */
+#define MC_CMD_LOOPBACK_XFI 0x9 /* enum */
+#define MC_CMD_LOOPBACK_XAUI_FAR 0xa /* enum */
+#define MC_CMD_LOOPBACK_GMII_FAR 0xb /* enum */
+#define MC_CMD_LOOPBACK_SGMII_FAR 0xc /* enum */
+#define MC_CMD_LOOPBACK_XFI_FAR 0xd /* enum */
+#define MC_CMD_LOOPBACK_GPHY 0xe /* enum */
+#define MC_CMD_LOOPBACK_PHYXS 0xf /* enum */
+#define MC_CMD_LOOPBACK_PCS 0x10 /* enum */
+#define MC_CMD_LOOPBACK_PMAPMD 0x11 /* enum */
+#define MC_CMD_LOOPBACK_XPORT 0x12 /* enum */
+#define MC_CMD_LOOPBACK_XGMII_WS 0x13 /* enum */
+#define MC_CMD_LOOPBACK_XAUI_WS 0x14 /* enum */
+#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 /* enum */
+#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 /* enum */
+#define MC_CMD_LOOPBACK_GMII_WS 0x17 /* enum */
+#define MC_CMD_LOOPBACK_XFI_WS 0x18 /* enum */
+#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 /* enum */
+#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a /* enum */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
+/* Enum values, see field(s): */
+/* 100M */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
+/* Enum values, see field(s): */
+/* 100M */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
+/* Enum values, see field(s): */
+/* 100M */
+
+
+/***********************************/
+/* MC_CMD_GET_LINK
+ * Read the unified MAC/PHY link state.
+ */
+#define MC_CMD_GET_LINK 0x29
+
+/* MC_CMD_GET_LINK_IN msgrequest */
+#define MC_CMD_GET_LINK_IN_LEN 0
+
+/* MC_CMD_GET_LINK_OUT msgresponse */
+#define MC_CMD_GET_LINK_OUT_LEN 28
+#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
+#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
+#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+#define MC_CMD_FCNTL_OFF 0x0 /* enum */
+#define MC_CMD_FCNTL_RESPOND 0x1 /* enum */
+#define MC_CMD_FCNTL_BIDIR 0x2 /* enum */
+#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_SET_LINK
+ * Write the unified MAC/PHY link configuration.
+ */
+#define MC_CMD_SET_LINK 0x2a
+
+/* MC_CMD_SET_LINK_IN msgrequest */
+#define MC_CMD_SET_LINK_IN_LEN 16
+#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
+#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
+#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
+#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1
+#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2
+#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
+#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+
+/* MC_CMD_SET_LINK_OUT msgresponse */
+#define MC_CMD_SET_LINK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_ID_LED
+ * Set indentification LED state.
+ */
+#define MC_CMD_SET_ID_LED 0x2b
+
+/* MC_CMD_SET_ID_LED_IN msgrequest */
+#define MC_CMD_SET_ID_LED_IN_LEN 4
+#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
+#define MC_CMD_LED_OFF 0x0 /* enum */
+#define MC_CMD_LED_ON 0x1 /* enum */
+#define MC_CMD_LED_DEFAULT 0x2 /* enum */
+
+/* MC_CMD_SET_ID_LED_OUT msgresponse */
+#define MC_CMD_SET_ID_LED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MAC
+ * Set MAC configuration.
+ */
+#define MC_CMD_SET_MAC 0x2c
+
+/* MC_CMD_SET_MAC_IN msgrequest */
+#define MC_CMD_SET_MAC_IN_LEN 24
+#define MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+/* MC_CMD_FCNTL_OFF 0x0 */
+/* MC_CMD_FCNTL_RESPOND 0x1 */
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+#define MC_CMD_FCNTL_AUTO 0x3 /* enum */
+
+/* MC_CMD_SET_MAC_OUT msgresponse */
+#define MC_CMD_SET_MAC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PHY_STATS
+ * Get generic PHY statistics.
+ */
+#define MC_CMD_PHY_STATS 0x2d
+
+/* MC_CMD_PHY_STATS_IN msgrequest */
+#define MC_CMD_PHY_STATS_IN_LEN 8
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */
+#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_PHY_STATS_OUT_NO_DMA msgresponse */
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (((MC_CMD_PHY_NSTATS*32))>>3)
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
+#define MC_CMD_OUI 0x0 /* enum */
+#define MC_CMD_PMA_PMD_LINK_UP 0x1 /* enum */
+#define MC_CMD_PMA_PMD_RX_FAULT 0x2 /* enum */
+#define MC_CMD_PMA_PMD_TX_FAULT 0x3 /* enum */
+#define MC_CMD_PMA_PMD_SIGNAL 0x4 /* enum */
+#define MC_CMD_PMA_PMD_SNR_A 0x5 /* enum */
+#define MC_CMD_PMA_PMD_SNR_B 0x6 /* enum */
+#define MC_CMD_PMA_PMD_SNR_C 0x7 /* enum */
+#define MC_CMD_PMA_PMD_SNR_D 0x8 /* enum */
+#define MC_CMD_PCS_LINK_UP 0x9 /* enum */
+#define MC_CMD_PCS_RX_FAULT 0xa /* enum */
+#define MC_CMD_PCS_TX_FAULT 0xb /* enum */
+#define MC_CMD_PCS_BER 0xc /* enum */
+#define MC_CMD_PCS_BLOCK_ERRORS 0xd /* enum */
+#define MC_CMD_PHYXS_LINK_UP 0xe /* enum */
+#define MC_CMD_PHYXS_RX_FAULT 0xf /* enum */
+#define MC_CMD_PHYXS_TX_FAULT 0x10 /* enum */
+#define MC_CMD_PHYXS_ALIGN 0x11 /* enum */
+#define MC_CMD_PHYXS_SYNC 0x12 /* enum */
+#define MC_CMD_AN_LINK_UP 0x13 /* enum */
+#define MC_CMD_AN_COMPLETE 0x14 /* enum */
+#define MC_CMD_AN_10GBT_STATUS 0x15 /* enum */
+#define MC_CMD_CL22_LINK_UP 0x16 /* enum */
+#define MC_CMD_PHY_NSTATS 0x17 /* enum */
+
+
+/***********************************/
+/* MC_CMD_MAC_STATS
+ * Get generic MAC statistics.
+ */
+#define MC_CMD_MAC_STATS 0x2e
+
+/* MC_CMD_MAC_STATS_IN msgrequest */
+#define MC_CMD_MAC_STATS_IN_LEN 16
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATS_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
+#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+
+/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
+#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
+#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */
+#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */
+#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */
+#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */
+#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */
+#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */
+#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */
+#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */
+#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */
+#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */
+#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */
+#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */
+#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */
+#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */
+#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */
+#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */
+#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */
+#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */
+#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */
+#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */
+#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */
+#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */
+#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */
+#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */
+#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */
+#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */
+#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */
+#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */
+#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */
+#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */
+#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */
+#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */
+#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */
+#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */
+#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */
+#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */
+#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */
+#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */
+#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */
+#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */
+#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */
+#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */
+#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */
+#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */
+#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */
+#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */
+#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */
+#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */
+#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */
+#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */
+#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */
+#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */
+#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */
+#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
+#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
+#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
+#define MC_CMD_GMAC_DMABUF_START 0x40 /* enum */
+#define MC_CMD_GMAC_DMABUF_END 0x5f /* enum */
+#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
+#define MC_CMD_MAC_NSTATS 0x61 /* enum */
+
+
+/***********************************/
+/* MC_CMD_SRIOV
+ * to be documented
+ */
+#define MC_CMD_SRIOV 0x30
+
+/* MC_CMD_SRIOV_IN msgrequest */
+#define MC_CMD_SRIOV_IN_LEN 12
+#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
+#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
+#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
+
+/* MC_CMD_SRIOV_OUT msgresponse */
+#define MC_CMD_SRIOV_OUT_LEN 8
+#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
+#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
+
+/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MEMCPY
+ * Perform memory copy operation.
+ */
+#define MC_CMD_MEMCPY 0x31
+
+/* MC_CMD_MEMCPY_IN msgrequest */
+#define MC_CMD_MEMCPY_IN_LENMIN 32
+#define MC_CMD_MEMCPY_IN_LENMAX 224
+#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
+#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
+#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
+#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
+#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7
+
+/* MC_CMD_MEMCPY_OUT msgresponse */
+#define MC_CMD_MEMCPY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_SET
+ * Set a WoL filter.
+ */
+#define MC_CMD_WOL_FILTER_SET 0x32
+
+/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
+#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
+#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
+#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
+#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+#define MC_CMD_WOL_TYPE_MAGIC 0x0 /* enum */
+#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 /* enum */
+#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 /* enum */
+#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 /* enum */
+#define MC_CMD_WOL_TYPE_BITMAP 0x5 /* enum */
+#define MC_CMD_WOL_TYPE_LINK 0x6 /* enum */
+#define MC_CMD_WOL_TYPE_MAX 0x7 /* enum */
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
+
+/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_LEN 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST 40
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_LEN 2
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST 42
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_LEN 128
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST 184
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_LEN 1
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST 185
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_LEN 1
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST 186
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_LEN 1
+
+/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
+
+/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_REMOVE
+ * Remove a WoL filter.
+ */
+#define MC_CMD_WOL_FILTER_REMOVE 0x33
+
+/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
+#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
+#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+
+/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_RESET
+ * Reset (i.e. remove all) WoL filters.
+ */
+#define MC_CMD_WOL_FILTER_RESET 0x34
+
+/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
+#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
+#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
+#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
+
+/* MC_CMD_WOL_FILTER_RESET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MCAST_HASH
+ * Set the MCASH hash value.
+ */
+#define MC_CMD_SET_MCAST_HASH 0x35
+
+/* MC_CMD_SET_MCAST_HASH_IN msgrequest */
+#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16
+
+/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */
+#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TYPES
+ * Get virtual NVRAM partitions information.
+ */
+#define MC_CMD_NVRAM_TYPES 0x36
+
+/* MC_CMD_NVRAM_TYPES_IN msgrequest */
+#define MC_CMD_NVRAM_TYPES_IN_LEN 0
+
+/* MC_CMD_NVRAM_TYPES_OUT msgresponse */
+#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
+#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 /* enum */
+#define MC_CMD_NVRAM_TYPE_MC_FW 0x1 /* enum */
+#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2 /* enum */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3 /* enum */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4 /* enum */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5 /* enum */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6 /* enum */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7 /* enum */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8 /* enum */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9 /* enum */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa /* enum */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb /* enum */
+#define MC_CMD_NVRAM_TYPE_LOG 0xc /* enum */
+
+
+/***********************************/
+/* MC_CMD_NVRAM_INFO
+ * Read info about a virtual NVRAM partition.
+ */
+#define MC_CMD_NVRAM_INFO 0x37
+
+/* MC_CMD_NVRAM_INFO_IN msgrequest */
+#define MC_CMD_NVRAM_INFO_IN_LEN 4
+#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_INFO_OUT msgresponse */
+#define MC_CMD_NVRAM_INFO_OUT_LEN 24
+#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_START
+ * Start a group of update operations on a virtual NVRAM partition.
+ */
+#define MC_CMD_NVRAM_UPDATE_START 0x38
+
+/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest */
+#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
+#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_UPDATE_START_OUT msgresponse */
+#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_READ
+ * Read data from a virtual NVRAM partition.
+ */
+#define MC_CMD_NVRAM_READ 0x39
+
+/* MC_CMD_NVRAM_READ_IN msgrequest */
+#define MC_CMD_NVRAM_READ_IN_LEN 12
+#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+
+/* MC_CMD_NVRAM_READ_OUT msgresponse */
+#define MC_CMD_NVRAM_READ_OUT_LENMIN 1
+#define MC_CMD_NVRAM_READ_OUT_LENMAX 255
+#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num))
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 255
+
+
+/***********************************/
+/* MC_CMD_NVRAM_WRITE
+ * Write data to a virtual NVRAM partition.
+ */
+#define MC_CMD_NVRAM_WRITE 0x3a
+
+/* MC_CMD_NVRAM_WRITE_IN msgrequest */
+#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13
+#define MC_CMD_NVRAM_WRITE_IN_LENMAX 255
+#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
+#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 243
+
+/* MC_CMD_NVRAM_WRITE_OUT msgresponse */
+#define MC_CMD_NVRAM_WRITE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_ERASE
+ * Erase sector(s) from a virtual NVRAM partition.
+ */
+#define MC_CMD_NVRAM_ERASE 0x3b
+
+/* MC_CMD_NVRAM_ERASE_IN msgrequest */
+#define MC_CMD_NVRAM_ERASE_IN_LEN 12
+#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+
+/* MC_CMD_NVRAM_ERASE_OUT msgresponse */
+#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_FINISH
+ * Finish a group of update operations on a virtual NVRAM partition.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest */
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse */
+#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_REBOOT
+ * Reboot the MC.
+ */
+#define MC_CMD_REBOOT 0x3d
+
+/* MC_CMD_REBOOT_IN msgrequest */
+#define MC_CMD_REBOOT_IN_LEN 4
+#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
+
+/* MC_CMD_REBOOT_OUT msgresponse */
+#define MC_CMD_REBOOT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SCHEDINFO
+ * Request scheduler info.
+ */
+#define MC_CMD_SCHEDINFO 0x3e
+
+/* MC_CMD_SCHEDINFO_IN msgrequest */
+#define MC_CMD_SCHEDINFO_IN_LEN 0
+
+/* MC_CMD_SCHEDINFO_OUT msgresponse */
+#define MC_CMD_SCHEDINFO_OUT_LENMIN 4
+#define MC_CMD_SCHEDINFO_OUT_LENMAX 252
+#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0
+#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4
+#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1
+#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_REBOOT_MODE
+ */
+#define MC_CMD_REBOOT_MODE 0x3f
+
+/* MC_CMD_REBOOT_MODE_IN msgrequest */
+#define MC_CMD_REBOOT_MODE_IN_LEN 4
+#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_NORMAL 0x0 /* enum */
+#define MC_CMD_REBOOT_MODE_SNAPPER 0x3 /* enum */
+
+/* MC_CMD_REBOOT_MODE_OUT msgresponse */
+#define MC_CMD_REBOOT_MODE_OUT_LEN 4
+#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SENSOR_INFO
+ * Returns information about every available sensor.
+ */
+#define MC_CMD_SENSOR_INFO 0x41
+
+/* MC_CMD_SENSOR_INFO_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_IN_LEN 0
+
+/* MC_CMD_SENSOR_INFO_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_OUT_LENMIN 12
+#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 /* enum */
+#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 /* enum */
+#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 /* enum */
+#define MC_CMD_SENSOR_PHY0_TEMP 0x3 /* enum */
+#define MC_CMD_SENSOR_PHY0_COOLING 0x4 /* enum */
+#define MC_CMD_SENSOR_PHY1_TEMP 0x5 /* enum */
+#define MC_CMD_SENSOR_PHY1_COOLING 0x6 /* enum */
+#define MC_CMD_SENSOR_IN_1V0 0x7 /* enum */
+#define MC_CMD_SENSOR_IN_1V2 0x8 /* enum */
+#define MC_CMD_SENSOR_IN_1V8 0x9 /* enum */
+#define MC_CMD_SENSOR_IN_2V5 0xa /* enum */
+#define MC_CMD_SENSOR_IN_3V3 0xb /* enum */
+#define MC_CMD_SENSOR_IN_12V0 0xc /* enum */
+#define MC_CMD_SENSOR_ENTRY_OFST 4
+#define MC_CMD_SENSOR_ENTRY_LEN 8
+#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
+#define MC_CMD_SENSOR_ENTRY_HI_OFST 8
+#define MC_CMD_SENSOR_ENTRY_MINNUM 1
+#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
+
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LBN 0
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_OFST 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LBN 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_OFST 4
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LBN 32
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_OFST 6
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LBN 48
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_READ_SENSORS
+ * Returns the current reading from each sensor.
+ */
+#define MC_CMD_READ_SENSORS 0x42
+
+/* MC_CMD_READ_SENSORS_IN msgrequest */
+#define MC_CMD_READ_SENSORS_IN_LEN 8
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_READ_SENSORS_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_OUT_LEN 0
+
+/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 3
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
+#define MC_CMD_SENSOR_STATE_OK 0x0 /* enum */
+#define MC_CMD_SENSOR_STATE_WARNING 0x1 /* enum */
+#define MC_CMD_SENSOR_STATE_FATAL 0x2 /* enum */
+#define MC_CMD_SENSOR_STATE_BROKEN 0x3 /* enum */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_STATE
+ * Report current state of PHY.
+ */
+#define MC_CMD_GET_PHY_STATE 0x43
+
+/* MC_CMD_GET_PHY_STATE_IN msgrequest */
+#define MC_CMD_GET_PHY_STATE_IN_LEN 0
+
+/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
+#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
+#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
+#define MC_CMD_PHY_STATE_OK 0x1 /* enum */
+#define MC_CMD_PHY_STATE_ZOMBIE 0x2 /* enum */
+
+
+/***********************************/
+/* MC_CMD_SETUP_8021QBB
+ * 802.1Qbb control.
+ */
+#define MC_CMD_SETUP_8021QBB 0x44
+
+/* MC_CMD_SETUP_8021QBB_IN msgrequest */
+#define MC_CMD_SETUP_8021QBB_IN_LEN 32
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32
+
+/* MC_CMD_SETUP_8021QBB_OUT msgresponse */
+#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_GET
+ * Retrieve ID of any WoL filters.
+ */
+#define MC_CMD_WOL_FILTER_GET 0x45
+
+/* MC_CMD_WOL_FILTER_GET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
+
+/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
+ * Add a protocol offload to NIC for lights-out state.
+ */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
+ * Remove a protocol offload from NIC for lights-out state.
+ */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
+
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAC_RESET_RESTORE
+ * Restore MAC after block reset.
+ */
+#define MC_CMD_MAC_RESET_RESTORE 0x48
+
+/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */
+#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
+
+/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */
+#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TESTASSERT
+ */
+#define MC_CMD_TESTASSERT 0x49
+
+/* MC_CMD_TESTASSERT_IN msgrequest */
+#define MC_CMD_TESTASSERT_IN_LEN 0
+
+/* MC_CMD_TESTASSERT_OUT msgresponse */
+#define MC_CMD_TESTASSERT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WORKAROUND
+ * Enable/Disable a given workaround.
+ */
+#define MC_CMD_WORKAROUND 0x4a
+
+/* MC_CMD_WORKAROUND_IN msgrequest */
+#define MC_CMD_WORKAROUND_IN_LEN 8
+#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+#define MC_CMD_WORKAROUND_BUG17230 0x1 /* enum */
+#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+
+/* MC_CMD_WORKAROUND_OUT msgresponse */
+#define MC_CMD_WORKAROUND_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_MEDIA_INFO
+ * Read media-specific data from PHY.
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 255
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 251
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TEST
+ * Test a particular NVRAM partition.
+ */
+#define MC_CMD_NVRAM_TEST 0x4c
+
+/* MC_CMD_NVRAM_TEST_IN msgrequest */
+#define MC_CMD_NVRAM_TEST_IN_LEN 4
+#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_TEST_OUT msgresponse */
+#define MC_CMD_NVRAM_TEST_OUT_LEN 4
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+#define MC_CMD_NVRAM_TEST_PASS 0x0 /* enum */
+#define MC_CMD_NVRAM_TEST_FAIL 0x1 /* enum */
+#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2 /* enum */
+
+
+/***********************************/
+/* MC_CMD_MRSFP_TWEAK
+ * Read status and/or set parameters for the 'mrsfp' driver.
+ */
+#define MC_CMD_MRSFP_TWEAK 0x4d
+
+/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
+
+/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
+#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
+
+/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
+#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 /* enum */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1 /* enum */
+
+
+/***********************************/
+/* MC_CMD_SENSOR_SET_LIMS
+ * Adjusts the sensor limits.
+ */
+#define MC_CMD_SENSOR_SET_LIMS 0x4e
+
+/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+
+/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
+#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RESOURCE_LIMITS
+ */
+#define MC_CMD_GET_RESOURCE_LIMITS 0x4f
+
+/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */
+#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0
+
+/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+
+/* MC_CMD_RESOURCE_SPECIFIER enum */
+#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff /* enum */
+#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */
+
+
+/***********************************/
+/* MC_CMD_INIT_EVQ
+ */
+#define MC_CMD_INIT_EVQ 0x50
+
+/* MC_CMD_INIT_EVQ_IN msgrequest */
+#define MC_CMD_INIT_EVQ_IN_LENMIN 36
+#define MC_CMD_INIT_EVQ_IN_LENMAX 540
+#define MC_CMD_INIT_EVQ_IN_LEN(num) (28+8*(num))
+#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0 /* enum */
+#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1 /* enum */
+#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2 /* enum */
+#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3 /* enum */
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64
+
+/* MC_CMD_INIT_EVQ_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_OUT_LEN 4
+#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+
+/* QUEUE_CRC_MODE structuredef */
+#define QUEUE_CRC_MODE_LEN 1
+#define QUEUE_CRC_MODE_MODE_LBN 0
+#define QUEUE_CRC_MODE_MODE_WIDTH 4
+#define QUEUE_CRC_MODE_NONE 0x0 /* enum */
+#define QUEUE_CRC_MODE_FCOE 0x1 /* enum */
+#define QUEUE_CRC_MODE_ISCSI_HDR 0x2 /* enum */
+#define QUEUE_CRC_MODE_ISCSI 0x3 /* enum */
+#define QUEUE_CRC_MODE_FCOIPOE 0x4 /* enum */
+#define QUEUE_CRC_MODE_MPA 0x5 /* enum */
+#define QUEUE_CRC_MODE_SPARE_LBN 4
+#define QUEUE_CRC_MODE_SPARE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_INIT_RXQ
+ */
+#define MC_CMD_INIT_RXQ 0x51
+
+/* MC_CMD_INIT_RXQ_IN msgrequest */
+#define MC_CMD_INIT_RXQ_IN_LENMIN 32
+#define MC_CMD_INIT_RXQ_IN_LENMAX 248
+#define MC_CMD_INIT_RXQ_IN_LEN(num) (24+8*(num))
+#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_PKT_EDIT_LBN 2
+#define MC_CMD_INIT_RXQ_IN_FLAG_PKT_EDIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 24
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 24
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_RXQ_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_INIT_TXQ
+ */
+#define MC_CMD_INIT_TXQ 0x52
+
+/* MC_CMD_INIT_TXQ_IN msgrequest */
+#define MC_CMD_INIT_TXQ_IN_LENMIN 32
+#define MC_CMD_INIT_TXQ_IN_LENMAX 248
+#define MC_CMD_INIT_TXQ_IN_LEN(num) (24+8*(num))
+#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 24
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 24
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_TXQ_OUT msgresponse */
+#define MC_CMD_INIT_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_EVQ
+ */
+#define MC_CMD_FINI_EVQ 0x55
+
+/* MC_CMD_FINI_EVQ_IN msgrequest */
+#define MC_CMD_FINI_EVQ_IN_LEN 4
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_EVQ_OUT msgresponse */
+#define MC_CMD_FINI_EVQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_RXQ
+ */
+#define MC_CMD_FINI_RXQ 0x56
+
+/* MC_CMD_FINI_RXQ_IN msgrequest */
+#define MC_CMD_FINI_RXQ_IN_LEN 4
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_RXQ_OUT msgresponse */
+#define MC_CMD_FINI_RXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_TXQ
+ */
+#define MC_CMD_FINI_TXQ 0x57
+
+/* MC_CMD_FINI_TXQ_IN msgrequest */
+#define MC_CMD_FINI_TXQ_IN_LEN 4
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_TXQ_OUT msgresponse */
+#define MC_CMD_FINI_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DRIVER_EVENT
+ */
+#define MC_CMD_DRIVER_EVENT 0x5a
+
+/* MC_CMD_DRIVER_EVENT_IN msgrequest */
+#define MC_CMD_DRIVER_EVENT_IN_LEN 12
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8
+
+
+/***********************************/
+/* MC_CMD_PROXY_CMD
+ */
+#define MC_CMD_PROXY_CMD 0x5b
+
+/* MC_CMD_PROXY_CMD_IN msgrequest */
+#define MC_CMD_PROXY_CMD_IN_LEN 4
+#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_OWNER_IDS
+ */
+#define MC_CMD_ALLOC_OWNER_IDS 0x54
+
+/* MC_CMD_ALLOC_OWNER_IDS_IN msgrequest */
+#define MC_CMD_ALLOC_OWNER_IDS_IN_LEN 4
+#define MC_CMD_ALLOC_OWNER_IDS_IN_NIDS_OFST 0
+
+/* MC_CMD_ALLOC_OWNER_IDS_OUT msgresponse */
+#define MC_CMD_ALLOC_OWNER_IDS_OUT_LEN 12
+#define MC_CMD_ALLOC_OWNER_IDS_OUT_HANDLE_OFST 0
+#define MC_CMD_ALLOC_OWNER_IDS_OUT_NIDS_OFST 4
+#define MC_CMD_ALLOC_OWNER_IDS_OUT_BASE_OFST 8
+
+
+/***********************************/
+/* MC_CMD_FREE_OWNER_IDS
+ */
+#define MC_CMD_FREE_OWNER_IDS 0x59
+
+/* MC_CMD_FREE_OWNER_IDS_IN msgrequest */
+#define MC_CMD_FREE_OWNER_IDS_IN_LEN 4
+#define MC_CMD_FREE_OWNER_IDS_IN_HANDLE_OFST 0
+
+/* MC_CMD_FREE_OWNER_IDS_OUT msgresponse */
+#define MC_CMD_FREE_OWNER_IDS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_BUFTBL_CHUNK
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x5c
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+
+
+/***********************************/
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
+ */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x5d
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 252
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 30
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FREE_BUFTBL_CHUNK
+ */
+#define MC_CMD_FREE_BUFTBL_CHUNK 0x5e
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PF_COUNT
+ */
+#define MC_CMD_GET_PF_COUNT 0x60
+
+/* MC_CMD_GET_PF_COUNT_IN msgrequest */
+#define MC_CMD_GET_PF_COUNT_IN_LEN 0
+
+/* MC_CMD_GET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_GET_PF_COUNT_OUT_LEN 1
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1
+
+
+/***********************************/
+/* MC_CMD_FILTER_OP
+ */
+#define MC_CMD_FILTER_OP 0x61
+
+/* MC_CMD_FILTER_OP_IN msgrequest */
+#define MC_CMD_FILTER_OP_IN_LEN 100
+#define MC_CMD_FILTER_OP_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0 /* enum */
+#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1 /* enum */
+#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2 /* enum */
+#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3 /* enum */
+#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 8
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 12
+#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0 /* enum */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1 /* enum */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2 /* enum */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3 /* enum */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4 /* enum */
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 16
+#define MC_CMD_FILTER_OP_IN_RX_FLAGS_OFST 20
+#define MC_CMD_FILTER_OP_IN_RX_FLAG_RSS_LBN 0
+#define MC_CMD_FILTER_OP_IN_RX_FLAG_RSS_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_RSS_CONTEXT_OFST 24
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 28
+#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 32
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 36
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 42
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2
+#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 44
+#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6
+#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 50
+#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 52
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 54
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 56
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 58
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
+#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 60
+#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 64
+#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 68
+#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16
+#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 84
+#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_OUT msgresponse */
+#define MC_CMD_FILTER_OP_OUT_LEN 8
+#define MC_CMD_FILTER_OP_OUT_OP_OFST 0
+#define MC_CMD_FILTER_OP_OUT_OP_INSERT 0x0 /* enum */
+#define MC_CMD_FILTER_OP_OUT_OP_REMOVE 0x1 /* enum */
+#define MC_CMD_FILTER_OP_OUT_OP_SUBSCRIBE 0x2 /* enum */
+#define MC_CMD_FILTER_OP_OUT_OP_UNSUBSCRIBE 0x3 /* enum */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4
+
+
+/***********************************/
+/* MC_CMD_SET_PF_COUNT
+ */
+#define MC_CMD_SET_PF_COUNT 0x62
+
+/* MC_CMD_SET_PF_COUNT_IN msgrequest */
+#define MC_CMD_SET_PF_COUNT_IN_LEN 4
+#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+
+/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_ASSIGNMENT
+ */
+#define MC_CMD_GET_PORT_ASSIGNMENT 0x63
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_ASSIGNMENT
+ */
+#define MC_CMD_SET_PORT_ASSIGNMENT 0x64
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_VIS
+ */
+#define MC_CMD_ALLOC_VIS 0x65
+
+/* MC_CMD_ALLOC_VIS_IN msgrequest */
+#define MC_CMD_ALLOC_VIS_IN_LEN 4
+#define MC_CMD_ALLOC_VIS_IN_VI_COUNT_OFST 0
+
+/* MC_CMD_ALLOC_VIS_OUT msgresponse */
+#define MC_CMD_ALLOC_VIS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FREE_VIS
+ */
+#define MC_CMD_FREE_VIS 0x66
+
+/* MC_CMD_FREE_VIS_IN msgrequest */
+#define MC_CMD_FREE_VIS_IN_LEN 0
+
+/* MC_CMD_FREE_VIS_OUT msgresponse */
+#define MC_CMD_FREE_VIS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_SRIOV_CFG
+ */
+#define MC_CMD_GET_SRIOV_CFG 0x67
+
+/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0
+
+/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+
+
+/***********************************/
+/* MC_CMD_SET_SRIOV_CFG
+ */
+#define MC_CMD_SET_SRIOV_CFG 0x68
+
+/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+
+/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_COUNT
+ */
+#define MC_CMD_GET_VI_COUNT 0x69
+
+/* MC_CMD_GET_VI_COUNT_IN msgrequest */
+#define MC_CMD_GET_VI_COUNT_IN_LEN 0
+
+/* MC_CMD_GET_VI_COUNT_OUT msgresponse */
+#define MC_CMD_GET_VI_COUNT_OUT_LEN 4
+#define MC_CMD_GET_VI_COUNT_OUT_VI_COUNT_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_VECTOR_CFG
+ */
+#define MC_CMD_GET_VECTOR_CFG 0x70
+
+/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
+
+/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
+#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+
+
+/***********************************/
+/* MC_CMD_SET_VECTOR_CFG
+ */
+#define MC_CMD_SET_VECTOR_CFG 0x71
+
+/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
+#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+
+/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_PIOBUF
+ */
+#define MC_CMD_ALLOC_PIOBUF 0x72
+
+/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
+#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0
+
+/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */
+#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_FREE_PIOBUF
+ */
+#define MC_CMD_FREE_PIOBUF 0x73
+
+/* MC_CMD_FREE_PIOBUF_IN msgrequest */
+#define MC_CMD_FREE_PIOBUF_IN_LEN 4
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+
+/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
+#define MC_CMD_FREE_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_V2_EXTN
+ */
+#define MC_CMD_V2_EXTN 0x7f
+
+/* MC_CMD_V2_EXTN_IN msgrequest */
+#define MC_CMD_V2_EXTN_IN_LEN 4
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
+#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
+#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 6
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_ALLOC
+ */
+#define MC_CMD_TCM_BUCKET_ALLOC 0x80
+
+/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
+
+/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_FREE
+ */
+#define MC_CMD_TCM_BUCKET_FREE 0x81
+
+/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
+#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+
+/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_TXQ_INIT
+ */
+#define MC_CMD_TCM_TXQ_INIT 0x82
+
+/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
+#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
+#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+
+/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
+#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
+
+#endif /* _SIENA_MC_DRIVER_PCOL_H */
+/*! \cidoxg_end */
diff --git a/sys/dev/sfxge/common/efx_regs_pci.h b/sys/dev/sfxge/common/efx_regs_pci.h
new file mode 100644
index 0000000..cb5efab
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_regs_pci.h
@@ -0,0 +1,2376 @@
+/*-
+ * Copyright 2007-2010 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _SYS_EFX_REGS_PCI_H
+#define _SYS_EFX_REGS_PCI_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * PC_VEND_ID_REG(16bit):
+ * Vendor ID register
+ */
+
+#define PCR_AZ_VEND_ID_REG 0x00000000
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_VEND_ID_LBN 0
+#define PCRF_AZ_VEND_ID_WIDTH 16
+
+
+/*
+ * PC_DEV_ID_REG(16bit):
+ * Device ID register
+ */
+
+#define PCR_AZ_DEV_ID_REG 0x00000002
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_DEV_ID_LBN 0
+#define PCRF_AZ_DEV_ID_WIDTH 16
+
+
+/*
+ * PC_CMD_REG(16bit):
+ * Command register
+ */
+
+#define PCR_AZ_CMD_REG 0x00000004
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_INTX_DIS_LBN 10
+#define PCRF_AZ_INTX_DIS_WIDTH 1
+#define PCRF_AZ_FB2B_EN_LBN 9
+#define PCRF_AZ_FB2B_EN_WIDTH 1
+#define PCRF_AZ_SERR_EN_LBN 8
+#define PCRF_AZ_SERR_EN_WIDTH 1
+#define PCRF_AZ_IDSEL_CTL_LBN 7
+#define PCRF_AZ_IDSEL_CTL_WIDTH 1
+#define PCRF_AZ_PERR_EN_LBN 6
+#define PCRF_AZ_PERR_EN_WIDTH 1
+#define PCRF_AZ_VGA_PAL_SNP_LBN 5
+#define PCRF_AZ_VGA_PAL_SNP_WIDTH 1
+#define PCRF_AZ_MWI_EN_LBN 4
+#define PCRF_AZ_MWI_EN_WIDTH 1
+#define PCRF_AZ_SPEC_CYC_LBN 3
+#define PCRF_AZ_SPEC_CYC_WIDTH 1
+#define PCRF_AZ_MST_EN_LBN 2
+#define PCRF_AZ_MST_EN_WIDTH 1
+#define PCRF_AZ_MEM_EN_LBN 1
+#define PCRF_AZ_MEM_EN_WIDTH 1
+#define PCRF_AZ_IO_EN_LBN 0
+#define PCRF_AZ_IO_EN_WIDTH 1
+
+
+/*
+ * PC_STAT_REG(16bit):
+ * Status register
+ */
+
+#define PCR_AZ_STAT_REG 0x00000006
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_DET_PERR_LBN 15
+#define PCRF_AZ_DET_PERR_WIDTH 1
+#define PCRF_AZ_SIG_SERR_LBN 14
+#define PCRF_AZ_SIG_SERR_WIDTH 1
+#define PCRF_AZ_GOT_MABRT_LBN 13
+#define PCRF_AZ_GOT_MABRT_WIDTH 1
+#define PCRF_AZ_GOT_TABRT_LBN 12
+#define PCRF_AZ_GOT_TABRT_WIDTH 1
+#define PCRF_AZ_SIG_TABRT_LBN 11
+#define PCRF_AZ_SIG_TABRT_WIDTH 1
+#define PCRF_AZ_DEVSEL_TIM_LBN 9
+#define PCRF_AZ_DEVSEL_TIM_WIDTH 2
+#define PCRF_AZ_MDAT_PERR_LBN 8
+#define PCRF_AZ_MDAT_PERR_WIDTH 1
+#define PCRF_AZ_FB2B_CAP_LBN 7
+#define PCRF_AZ_FB2B_CAP_WIDTH 1
+#define PCRF_AZ_66MHZ_CAP_LBN 5
+#define PCRF_AZ_66MHZ_CAP_WIDTH 1
+#define PCRF_AZ_CAP_LIST_LBN 4
+#define PCRF_AZ_CAP_LIST_WIDTH 1
+#define PCRF_AZ_INTX_STAT_LBN 3
+#define PCRF_AZ_INTX_STAT_WIDTH 1
+
+
+/*
+ * PC_REV_ID_REG(8bit):
+ * Class code & revision ID register
+ */
+
+#define PCR_AZ_REV_ID_REG 0x00000008
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_REV_ID_LBN 0
+#define PCRF_AZ_REV_ID_WIDTH 8
+
+
+/*
+ * PC_CC_REG(24bit):
+ * Class code register
+ */
+
+#define PCR_AZ_CC_REG 0x00000009
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BASE_CC_LBN 16
+#define PCRF_AZ_BASE_CC_WIDTH 8
+#define PCRF_AZ_SUB_CC_LBN 8
+#define PCRF_AZ_SUB_CC_WIDTH 8
+#define PCRF_AZ_PROG_IF_LBN 0
+#define PCRF_AZ_PROG_IF_WIDTH 8
+
+
+/*
+ * PC_CACHE_LSIZE_REG(8bit):
+ * Cache line size
+ */
+
+#define PCR_AZ_CACHE_LSIZE_REG 0x0000000c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_CACHE_LSIZE_LBN 0
+#define PCRF_AZ_CACHE_LSIZE_WIDTH 8
+
+
+/*
+ * PC_MST_LAT_REG(8bit):
+ * Master latency timer register
+ */
+
+#define PCR_AZ_MST_LAT_REG 0x0000000d
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MST_LAT_LBN 0
+#define PCRF_AZ_MST_LAT_WIDTH 8
+
+
+/*
+ * PC_HDR_TYPE_REG(8bit):
+ * Header type register
+ */
+
+#define PCR_AZ_HDR_TYPE_REG 0x0000000e
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MULT_FUNC_LBN 7
+#define PCRF_AZ_MULT_FUNC_WIDTH 1
+#define PCRF_AZ_TYPE_LBN 0
+#define PCRF_AZ_TYPE_WIDTH 7
+
+
+/*
+ * PC_BIST_REG(8bit):
+ * BIST register
+ */
+
+#define PCR_AZ_BIST_REG 0x0000000f
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BIST_LBN 0
+#define PCRF_AZ_BIST_WIDTH 8
+
+
+/*
+ * PC_BAR0_REG(32bit):
+ * Primary function base address register 0
+ */
+
+#define PCR_AZ_BAR0_REG 0x00000010
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BAR0_LBN 4
+#define PCRF_AZ_BAR0_WIDTH 28
+#define PCRF_AZ_BAR0_PREF_LBN 3
+#define PCRF_AZ_BAR0_PREF_WIDTH 1
+#define PCRF_AZ_BAR0_TYPE_LBN 1
+#define PCRF_AZ_BAR0_TYPE_WIDTH 2
+#define PCRF_AZ_BAR0_IOM_LBN 0
+#define PCRF_AZ_BAR0_IOM_WIDTH 1
+
+
+/*
+ * PC_BAR1_REG(32bit):
+ * Primary function base address register 1, BAR1 is not implemented so read only.
+ */
+
+#define PCR_DZ_BAR1_REG 0x00000014
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_BAR1_LBN 0
+#define PCRF_DZ_BAR1_WIDTH 32
+
+
+/*
+ * PC_BAR2_LO_REG(32bit):
+ * Primary function base address register 2 low bits
+ */
+
+#define PCR_AZ_BAR2_LO_REG 0x00000018
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BAR2_LO_LBN 4
+#define PCRF_AZ_BAR2_LO_WIDTH 28
+#define PCRF_AZ_BAR2_PREF_LBN 3
+#define PCRF_AZ_BAR2_PREF_WIDTH 1
+#define PCRF_AZ_BAR2_TYPE_LBN 1
+#define PCRF_AZ_BAR2_TYPE_WIDTH 2
+#define PCRF_AZ_BAR2_IOM_LBN 0
+#define PCRF_AZ_BAR2_IOM_WIDTH 1
+
+
+/*
+ * PC_BAR2_HI_REG(32bit):
+ * Primary function base address register 2 high bits
+ */
+
+#define PCR_AZ_BAR2_HI_REG 0x0000001c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BAR2_HI_LBN 0
+#define PCRF_AZ_BAR2_HI_WIDTH 32
+
+
+/*
+ * PC_BAR4_LO_REG(32bit):
+ * Primary function base address register 2 low bits
+ */
+
+#define PCR_CZ_BAR4_LO_REG 0x00000020
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_BAR4_LO_LBN 4
+#define PCRF_CZ_BAR4_LO_WIDTH 28
+#define PCRF_CZ_BAR4_PREF_LBN 3
+#define PCRF_CZ_BAR4_PREF_WIDTH 1
+#define PCRF_CZ_BAR4_TYPE_LBN 1
+#define PCRF_CZ_BAR4_TYPE_WIDTH 2
+#define PCRF_CZ_BAR4_IOM_LBN 0
+#define PCRF_CZ_BAR4_IOM_WIDTH 1
+
+
+/*
+ * PC_BAR4_HI_REG(32bit):
+ * Primary function base address register 2 high bits
+ */
+
+#define PCR_CZ_BAR4_HI_REG 0x00000024
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_BAR4_HI_LBN 0
+#define PCRF_CZ_BAR4_HI_WIDTH 32
+
+
+/*
+ * PC_SS_VEND_ID_REG(16bit):
+ * Sub-system vendor ID register
+ */
+
+#define PCR_AZ_SS_VEND_ID_REG 0x0000002c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_SS_VEND_ID_LBN 0
+#define PCRF_AZ_SS_VEND_ID_WIDTH 16
+
+
+/*
+ * PC_SS_ID_REG(16bit):
+ * Sub-system ID register
+ */
+
+#define PCR_AZ_SS_ID_REG 0x0000002e
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_SS_ID_LBN 0
+#define PCRF_AZ_SS_ID_WIDTH 16
+
+
+/*
+ * PC_EXPROM_BAR_REG(32bit):
+ * Expansion ROM base address register
+ */
+
+#define PCR_AZ_EXPROM_BAR_REG 0x00000030
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_EXPROM_BAR_LBN 11
+#define PCRF_AZ_EXPROM_BAR_WIDTH 21
+#define PCRF_AB_EXPROM_MIN_SIZE_LBN 2
+#define PCRF_AB_EXPROM_MIN_SIZE_WIDTH 9
+#define PCRF_CZ_EXPROM_MIN_SIZE_LBN 1
+#define PCRF_CZ_EXPROM_MIN_SIZE_WIDTH 10
+#define PCRF_AB_EXPROM_FEATURE_ENABLE_LBN 1
+#define PCRF_AB_EXPROM_FEATURE_ENABLE_WIDTH 1
+#define PCRF_AZ_EXPROM_EN_LBN 0
+#define PCRF_AZ_EXPROM_EN_WIDTH 1
+
+
+/*
+ * PC_CAP_PTR_REG(8bit):
+ * Capability pointer register
+ */
+
+#define PCR_AZ_CAP_PTR_REG 0x00000034
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_CAP_PTR_LBN 0
+#define PCRF_AZ_CAP_PTR_WIDTH 8
+
+
+/*
+ * PC_INT_LINE_REG(8bit):
+ * Interrupt line register
+ */
+
+#define PCR_AZ_INT_LINE_REG 0x0000003c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_INT_LINE_LBN 0
+#define PCRF_AZ_INT_LINE_WIDTH 8
+
+
+/*
+ * PC_INT_PIN_REG(8bit):
+ * Interrupt pin register
+ */
+
+#define PCR_AZ_INT_PIN_REG 0x0000003d
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_INT_PIN_LBN 0
+#define PCRF_AZ_INT_PIN_WIDTH 8
+
+
+/*
+ * PC_PM_CAP_ID_REG(8bit):
+ * Power management capability ID
+ */
+
+#define PCR_AC_PM_CAP_ID_REG 0x00000040
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCR_DZ_PM_CAP_ID_REG 0x00000080
+/* hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_CAP_ID_LBN 0
+#define PCRF_AZ_PM_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_PM_NXT_PTR_REG(8bit):
+ * Power management next item pointer
+ */
+
+#define PCR_AC_PM_NXT_PTR_REG 0x00000041
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCR_DZ_PM_NXT_PTR_REG 0x00000081
+/* hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_NXT_PTR_LBN 0
+#define PCRF_AZ_PM_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_PM_CAP_REG(16bit):
+ * Power management capabilities register
+ */
+
+#define PCR_AC_PM_CAP_REG 0x00000042
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCR_DZ_PM_CAP_REG 0x00000082
+/* hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_PME_SUPT_LBN 11
+#define PCRF_AZ_PM_PME_SUPT_WIDTH 5
+#define PCRF_AZ_PM_D2_SUPT_LBN 10
+#define PCRF_AZ_PM_D2_SUPT_WIDTH 1
+#define PCRF_AZ_PM_D1_SUPT_LBN 9
+#define PCRF_AZ_PM_D1_SUPT_WIDTH 1
+#define PCRF_AZ_PM_AUX_CURR_LBN 6
+#define PCRF_AZ_PM_AUX_CURR_WIDTH 3
+#define PCRF_AZ_PM_DSI_LBN 5
+#define PCRF_AZ_PM_DSI_WIDTH 1
+#define PCRF_AZ_PM_PME_CLK_LBN 3
+#define PCRF_AZ_PM_PME_CLK_WIDTH 1
+#define PCRF_AZ_PM_PME_VER_LBN 0
+#define PCRF_AZ_PM_PME_VER_WIDTH 3
+
+
+/*
+ * PC_PM_CS_REG(16bit):
+ * Power management control & status register
+ */
+
+#define PCR_AC_PM_CS_REG 0x00000044
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCR_DZ_PM_CS_REG 0x00000084
+/* hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_PME_STAT_LBN 15
+#define PCRF_AZ_PM_PME_STAT_WIDTH 1
+#define PCRF_AZ_PM_DAT_SCALE_LBN 13
+#define PCRF_AZ_PM_DAT_SCALE_WIDTH 2
+#define PCRF_AZ_PM_DAT_SEL_LBN 9
+#define PCRF_AZ_PM_DAT_SEL_WIDTH 4
+#define PCRF_AZ_PM_PME_EN_LBN 8
+#define PCRF_AZ_PM_PME_EN_WIDTH 1
+#define PCRF_CZ_NO_SOFT_RESET_LBN 3
+#define PCRF_CZ_NO_SOFT_RESET_WIDTH 1
+#define PCRF_AZ_PM_PWR_ST_LBN 0
+#define PCRF_AZ_PM_PWR_ST_WIDTH 2
+
+
+/*
+ * PC_MSI_CAP_ID_REG(8bit):
+ * MSI capability ID
+ */
+
+#define PCR_AC_MSI_CAP_ID_REG 0x00000050
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCR_DZ_MSI_CAP_ID_REG 0x00000090
+/* hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_CAP_ID_LBN 0
+#define PCRF_AZ_MSI_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_MSI_NXT_PTR_REG(8bit):
+ * MSI next item pointer
+ */
+
+#define PCR_AC_MSI_NXT_PTR_REG 0x00000051
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCR_DZ_MSI_NXT_PTR_REG 0x00000091
+/* hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_NXT_PTR_LBN 0
+#define PCRF_AZ_MSI_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_MSI_CTL_REG(16bit):
+ * MSI control register
+ */
+
+#define PCR_AC_MSI_CTL_REG 0x00000052
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCR_DZ_MSI_CTL_REG 0x00000092
+/* hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_64_EN_LBN 7
+#define PCRF_AZ_MSI_64_EN_WIDTH 1
+#define PCRF_AZ_MSI_MULT_MSG_EN_LBN 4
+#define PCRF_AZ_MSI_MULT_MSG_EN_WIDTH 3
+#define PCRF_AZ_MSI_MULT_MSG_CAP_LBN 1
+#define PCRF_AZ_MSI_MULT_MSG_CAP_WIDTH 3
+#define PCRF_AZ_MSI_EN_LBN 0
+#define PCRF_AZ_MSI_EN_WIDTH 1
+
+
+/*
+ * PC_MSI_ADR_LO_REG(32bit):
+ * MSI low 32 bits address register
+ */
+
+#define PCR_AC_MSI_ADR_LO_REG 0x00000054
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCR_DZ_MSI_ADR_LO_REG 0x00000094
+/* hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_ADR_LO_LBN 2
+#define PCRF_AZ_MSI_ADR_LO_WIDTH 30
+
+
+/*
+ * PC_VPD_CAP_CTL_REG(8bit):
+ * VPD control and capabilities register
+ */
+
+#define PCR_DZ_VPD_CAP_CTL_REG 0x00000054
+/* hunta0=pci_f0_config */
+
+#define PCR_CC_VPD_CAP_CTL_REG 0x000000d0
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CZ_VPD_FLAG_LBN 31
+#define PCRF_CZ_VPD_FLAG_WIDTH 1
+#define PCRF_CZ_VPD_ADDR_LBN 16
+#define PCRF_CZ_VPD_ADDR_WIDTH 15
+#define PCRF_CZ_VPD_NXT_PTR_LBN 8
+#define PCRF_CZ_VPD_NXT_PTR_WIDTH 8
+#define PCRF_CZ_VPD_CAP_ID_LBN 0
+#define PCRF_CZ_VPD_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_VPD_CAP_DATA_REG(32bit):
+ * documentation to be written for sum_PC_VPD_CAP_DATA_REG
+ */
+
+#define PCR_DZ_VPD_CAP_DATA_REG 0x00000058
+/* hunta0=pci_f0_config */
+
+#define PCR_AB_VPD_CAP_DATA_REG 0x000000b4
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_VPD_CAP_DATA_REG 0x000000d4
+/* sienaa0=pci_f0_config */
+
+#define PCRF_AZ_VPD_DATA_LBN 0
+#define PCRF_AZ_VPD_DATA_WIDTH 32
+
+
+/*
+ * PC_MSI_ADR_HI_REG(32bit):
+ * MSI high 32 bits address register
+ */
+
+#define PCR_AC_MSI_ADR_HI_REG 0x00000058
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCR_DZ_MSI_ADR_HI_REG 0x00000098
+/* hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_ADR_HI_LBN 0
+#define PCRF_AZ_MSI_ADR_HI_WIDTH 32
+
+
+/*
+ * PC_MSI_DAT_REG(16bit):
+ * MSI data register
+ */
+
+#define PCR_AC_MSI_DAT_REG 0x0000005c
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCR_DZ_MSI_DAT_REG 0x0000009c
+/* hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_DAT_LBN 0
+#define PCRF_AZ_MSI_DAT_WIDTH 16
+
+
+/*
+ * PC_PCIE_CAP_LIST_REG(16bit):
+ * PCIe capability list register
+ */
+
+#define PCR_AB_PCIE_CAP_LIST_REG 0x00000060
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_PCIE_CAP_LIST_REG 0x00000070
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_PCIE_CAP_LIST_REG 0x000000c0
+/* hunta0=pci_f0_config */
+
+#define PCRF_AZ_PCIE_NXT_PTR_LBN 8
+#define PCRF_AZ_PCIE_NXT_PTR_WIDTH 8
+#define PCRF_AZ_PCIE_CAP_ID_LBN 0
+#define PCRF_AZ_PCIE_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_PCIE_CAP_REG(16bit):
+ * PCIe capability register
+ */
+
+#define PCR_AB_PCIE_CAP_REG 0x00000062
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_PCIE_CAP_REG 0x00000072
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_PCIE_CAP_REG 0x000000c2
+/* hunta0=pci_f0_config */
+
+#define PCRF_AZ_PCIE_INT_MSG_NUM_LBN 9
+#define PCRF_AZ_PCIE_INT_MSG_NUM_WIDTH 5
+#define PCRF_AZ_PCIE_SLOT_IMP_LBN 8
+#define PCRF_AZ_PCIE_SLOT_IMP_WIDTH 1
+#define PCRF_AZ_PCIE_DEV_PORT_TYPE_LBN 4
+#define PCRF_AZ_PCIE_DEV_PORT_TYPE_WIDTH 4
+#define PCRF_AZ_PCIE_CAP_VER_LBN 0
+#define PCRF_AZ_PCIE_CAP_VER_WIDTH 4
+
+
+/*
+ * PC_DEV_CAP_REG(32bit):
+ * PCIe device capabilities register
+ */
+
+#define PCR_AB_DEV_CAP_REG 0x00000064
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_DEV_CAP_REG 0x00000074
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_DEV_CAP_REG 0x000000c4
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_CAP_FN_LEVEL_RESET_LBN 28
+#define PCRF_CZ_CAP_FN_LEVEL_RESET_WIDTH 1
+#define PCRF_AZ_CAP_SLOT_PWR_SCL_LBN 26
+#define PCRF_AZ_CAP_SLOT_PWR_SCL_WIDTH 2
+#define PCRF_AZ_CAP_SLOT_PWR_VAL_LBN 18
+#define PCRF_AZ_CAP_SLOT_PWR_VAL_WIDTH 8
+#define PCRF_CZ_ROLE_BASE_ERR_REPORTING_LBN 15
+#define PCRF_CZ_ROLE_BASE_ERR_REPORTING_WIDTH 1
+#define PCRF_AB_PWR_IND_LBN 14
+#define PCRF_AB_PWR_IND_WIDTH 1
+#define PCRF_AB_ATTN_IND_LBN 13
+#define PCRF_AB_ATTN_IND_WIDTH 1
+#define PCRF_AB_ATTN_BUTTON_LBN 12
+#define PCRF_AB_ATTN_BUTTON_WIDTH 1
+#define PCRF_AZ_ENDPT_L1_LAT_LBN 9
+#define PCRF_AZ_ENDPT_L1_LAT_WIDTH 3
+#define PCRF_AZ_ENDPT_L0_LAT_LBN 6
+#define PCRF_AZ_ENDPT_L0_LAT_WIDTH 3
+#define PCRF_AZ_TAG_FIELD_LBN 5
+#define PCRF_AZ_TAG_FIELD_WIDTH 1
+#define PCRF_AZ_PHAN_FUNC_LBN 3
+#define PCRF_AZ_PHAN_FUNC_WIDTH 2
+#define PCRF_AZ_MAX_PAYL_SIZE_SUPT_LBN 0
+#define PCRF_AZ_MAX_PAYL_SIZE_SUPT_WIDTH 3
+
+
+/*
+ * PC_DEV_CTL_REG(16bit):
+ * PCIe device control register
+ */
+
+#define PCR_AB_DEV_CTL_REG 0x00000068
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_DEV_CTL_REG 0x00000078
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_DEV_CTL_REG 0x000000c8
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_FN_LEVEL_RESET_LBN 15
+#define PCRF_CZ_FN_LEVEL_RESET_WIDTH 1
+#define PCRF_AZ_MAX_RD_REQ_SIZE_LBN 12
+#define PCRF_AZ_MAX_RD_REQ_SIZE_WIDTH 3
+#define PCFE_AZ_MAX_RD_REQ_SIZE_4096 5
+#define PCFE_AZ_MAX_RD_REQ_SIZE_2048 4
+#define PCFE_AZ_MAX_RD_REQ_SIZE_1024 3
+#define PCFE_AZ_MAX_RD_REQ_SIZE_512 2
+#define PCFE_AZ_MAX_RD_REQ_SIZE_256 1
+#define PCFE_AZ_MAX_RD_REQ_SIZE_128 0
+#define PCFE_DZ_OTHER other
+#define PCRF_AZ_EN_NO_SNOOP_LBN 11
+#define PCRF_AZ_EN_NO_SNOOP_WIDTH 1
+#define PCRF_AZ_AUX_PWR_PM_EN_LBN 10
+#define PCRF_AZ_AUX_PWR_PM_EN_WIDTH 1
+#define PCRF_AZ_PHAN_FUNC_EN_LBN 9
+#define PCRF_AZ_PHAN_FUNC_EN_WIDTH 1
+#define PCRF_AB_DEV_CAP_REG_RSVD0_LBN 8
+#define PCRF_AB_DEV_CAP_REG_RSVD0_WIDTH 1
+#define PCRF_CZ_EXTENDED_TAG_EN_LBN 8
+#define PCRF_CZ_EXTENDED_TAG_EN_WIDTH 1
+#define PCRF_AZ_MAX_PAYL_SIZE_LBN 5
+#define PCRF_AZ_MAX_PAYL_SIZE_WIDTH 3
+#define PCFE_AZ_MAX_PAYL_SIZE_4096 5
+#define PCFE_AZ_MAX_PAYL_SIZE_2048 4
+#define PCFE_AZ_MAX_PAYL_SIZE_1024 3
+#define PCFE_AZ_MAX_PAYL_SIZE_512 2
+#define PCFE_AZ_MAX_PAYL_SIZE_256 1
+#define PCFE_AZ_MAX_PAYL_SIZE_128 0
+#define PCFE_DZ_OTHER other
+#define PCRF_AZ_EN_RELAX_ORDER_LBN 4
+#define PCRF_AZ_EN_RELAX_ORDER_WIDTH 1
+#define PCRF_AZ_UNSUP_REQ_RPT_EN_LBN 3
+#define PCRF_AZ_UNSUP_REQ_RPT_EN_WIDTH 1
+#define PCRF_AZ_FATAL_ERR_RPT_EN_LBN 2
+#define PCRF_AZ_FATAL_ERR_RPT_EN_WIDTH 1
+#define PCRF_AZ_NONFATAL_ERR_RPT_EN_LBN 1
+#define PCRF_AZ_NONFATAL_ERR_RPT_EN_WIDTH 1
+#define PCRF_AZ_CORR_ERR_RPT_EN_LBN 0
+#define PCRF_AZ_CORR_ERR_RPT_EN_WIDTH 1
+
+
+/*
+ * PC_DEV_STAT_REG(16bit):
+ * PCIe device status register
+ */
+
+#define PCR_AB_DEV_STAT_REG 0x0000006a
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_DEV_STAT_REG 0x0000007a
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_DEV_STAT_REG 0x000000ca
+/* hunta0=pci_f0_config */
+
+#define PCRF_AZ_TRNS_PEND_LBN 5
+#define PCRF_AZ_TRNS_PEND_WIDTH 1
+#define PCRF_AZ_AUX_PWR_DET_LBN 4
+#define PCRF_AZ_AUX_PWR_DET_WIDTH 1
+#define PCRF_AZ_UNSUP_REQ_DET_LBN 3
+#define PCRF_AZ_UNSUP_REQ_DET_WIDTH 1
+#define PCRF_AZ_FATAL_ERR_DET_LBN 2
+#define PCRF_AZ_FATAL_ERR_DET_WIDTH 1
+#define PCRF_AZ_NONFATAL_ERR_DET_LBN 1
+#define PCRF_AZ_NONFATAL_ERR_DET_WIDTH 1
+#define PCRF_AZ_CORR_ERR_DET_LBN 0
+#define PCRF_AZ_CORR_ERR_DET_WIDTH 1
+
+
+/*
+ * PC_LNK_CAP_REG(32bit):
+ * PCIe link capabilities register
+ */
+
+#define PCR_AB_LNK_CAP_REG 0x0000006c
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_LNK_CAP_REG 0x0000007c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_LNK_CAP_REG 0x000000cc
+/* hunta0=pci_f0_config */
+
+#define PCRF_AZ_PORT_NUM_LBN 24
+#define PCRF_AZ_PORT_NUM_WIDTH 8
+#define PCRF_CZ_LINK_BWDITH_NOTIF_CAP_LBN 21
+#define PCRF_CZ_LINK_BWDITH_NOTIF_CAP_WIDTH 1
+#define PCRF_CZ_DATA_LINK_ACTIVE_RPT_CAP_LBN 20
+#define PCRF_CZ_DATA_LINK_ACTIVE_RPT_CAP_WIDTH 1
+#define PCRF_CZ_SURPISE_DOWN_RPT_CAP_LBN 19
+#define PCRF_CZ_SURPISE_DOWN_RPT_CAP_WIDTH 1
+#define PCRF_CZ_CLOCK_PWR_MNGMNT_CAP_LBN 18
+#define PCRF_CZ_CLOCK_PWR_MNGMNT_CAP_WIDTH 1
+#define PCRF_AZ_DEF_L1_EXIT_LAT_LBN 15
+#define PCRF_AZ_DEF_L1_EXIT_LAT_WIDTH 3
+#define PCRF_AZ_DEF_L0_EXIT_LATPORT_NUM_LBN 12
+#define PCRF_AZ_DEF_L0_EXIT_LATPORT_NUM_WIDTH 3
+#define PCRF_AZ_AS_LNK_PM_SUPT_LBN 10
+#define PCRF_AZ_AS_LNK_PM_SUPT_WIDTH 2
+#define PCRF_AZ_MAX_LNK_WIDTH_LBN 4
+#define PCRF_AZ_MAX_LNK_WIDTH_WIDTH 6
+#define PCRF_AZ_MAX_LNK_SP_LBN 0
+#define PCRF_AZ_MAX_LNK_SP_WIDTH 4
+
+
+/*
+ * PC_LNK_CTL_REG(16bit):
+ * PCIe link control register
+ */
+
+#define PCR_AB_LNK_CTL_REG 0x00000070
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_LNK_CTL_REG 0x00000080
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_LNK_CTL_REG 0x000000d0
+/* hunta0=pci_f0_config */
+
+#define PCRF_AZ_EXT_SYNC_LBN 7
+#define PCRF_AZ_EXT_SYNC_WIDTH 1
+#define PCRF_AZ_COMM_CLK_CFG_LBN 6
+#define PCRF_AZ_COMM_CLK_CFG_WIDTH 1
+#define PCRF_AB_LNK_CTL_REG_RSVD0_LBN 5
+#define PCRF_AB_LNK_CTL_REG_RSVD0_WIDTH 1
+#define PCRF_CZ_LNK_RETRAIN_LBN 5
+#define PCRF_CZ_LNK_RETRAIN_WIDTH 1
+#define PCRF_AZ_LNK_DIS_LBN 4
+#define PCRF_AZ_LNK_DIS_WIDTH 1
+#define PCRF_AZ_RD_COM_BDRY_LBN 3
+#define PCRF_AZ_RD_COM_BDRY_WIDTH 1
+#define PCRF_AZ_ACT_ST_LNK_PM_CTL_LBN 0
+#define PCRF_AZ_ACT_ST_LNK_PM_CTL_WIDTH 2
+
+
+/*
+ * PC_LNK_STAT_REG(16bit):
+ * PCIe link status register
+ */
+
+#define PCR_AB_LNK_STAT_REG 0x00000072
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_LNK_STAT_REG 0x00000082
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_LNK_STAT_REG 0x000000d2
+/* hunta0=pci_f0_config */
+
+#define PCRF_AZ_SLOT_CLK_CFG_LBN 12
+#define PCRF_AZ_SLOT_CLK_CFG_WIDTH 1
+#define PCRF_AZ_LNK_TRAIN_LBN 11
+#define PCRF_AZ_LNK_TRAIN_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_LBN 10
+#define PCRF_AB_TRAIN_ERR_WIDTH 1
+#define PCRF_AZ_LNK_WIDTH_LBN 4
+#define PCRF_AZ_LNK_WIDTH_WIDTH 6
+#define PCRF_AZ_LNK_SP_LBN 0
+#define PCRF_AZ_LNK_SP_WIDTH 4
+
+
+/*
+ * PC_SLOT_CAP_REG(32bit):
+ * PCIe slot capabilities register
+ */
+
+#define PCR_AB_SLOT_CAP_REG 0x00000074
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_SLOT_NUM_LBN 19
+#define PCRF_AB_SLOT_NUM_WIDTH 13
+#define PCRF_AB_SLOT_PWR_LIM_SCL_LBN 15
+#define PCRF_AB_SLOT_PWR_LIM_SCL_WIDTH 2
+#define PCRF_AB_SLOT_PWR_LIM_VAL_LBN 7
+#define PCRF_AB_SLOT_PWR_LIM_VAL_WIDTH 8
+#define PCRF_AB_SLOT_HP_CAP_LBN 6
+#define PCRF_AB_SLOT_HP_CAP_WIDTH 1
+#define PCRF_AB_SLOT_HP_SURP_LBN 5
+#define PCRF_AB_SLOT_HP_SURP_WIDTH 1
+#define PCRF_AB_SLOT_PWR_IND_PRST_LBN 4
+#define PCRF_AB_SLOT_PWR_IND_PRST_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_IND_PRST_LBN 3
+#define PCRF_AB_SLOT_ATTN_IND_PRST_WIDTH 1
+#define PCRF_AB_SLOT_MRL_SENS_PRST_LBN 2
+#define PCRF_AB_SLOT_MRL_SENS_PRST_WIDTH 1
+#define PCRF_AB_SLOT_PWR_CTL_PRST_LBN 1
+#define PCRF_AB_SLOT_PWR_CTL_PRST_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_BUT_PRST_LBN 0
+#define PCRF_AB_SLOT_ATTN_BUT_PRST_WIDTH 1
+
+
+/*
+ * PC_SLOT_CTL_REG(16bit):
+ * PCIe slot control register
+ */
+
+#define PCR_AB_SLOT_CTL_REG 0x00000078
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_SLOT_PWR_CTLR_CTL_LBN 10
+#define PCRF_AB_SLOT_PWR_CTLR_CTL_WIDTH 1
+#define PCRF_AB_SLOT_PWR_IND_CTL_LBN 8
+#define PCRF_AB_SLOT_PWR_IND_CTL_WIDTH 2
+#define PCRF_AB_SLOT_ATT_IND_CTL_LBN 6
+#define PCRF_AB_SLOT_ATT_IND_CTL_WIDTH 2
+#define PCRF_AB_SLOT_HP_INT_EN_LBN 5
+#define PCRF_AB_SLOT_HP_INT_EN_WIDTH 1
+#define PCRF_AB_SLOT_CMD_COMP_INT_EN_LBN 4
+#define PCRF_AB_SLOT_CMD_COMP_INT_EN_WIDTH 1
+#define PCRF_AB_SLOT_PRES_DET_CHG_EN_LBN 3
+#define PCRF_AB_SLOT_PRES_DET_CHG_EN_WIDTH 1
+#define PCRF_AB_SLOT_MRL_SENS_CHG_EN_LBN 2
+#define PCRF_AB_SLOT_MRL_SENS_CHG_EN_WIDTH 1
+#define PCRF_AB_SLOT_PWR_FLTDET_EN_LBN 1
+#define PCRF_AB_SLOT_PWR_FLTDET_EN_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_BUT_EN_LBN 0
+#define PCRF_AB_SLOT_ATTN_BUT_EN_WIDTH 1
+
+
+/*
+ * PC_SLOT_STAT_REG(16bit):
+ * PCIe slot status register
+ */
+
+#define PCR_AB_SLOT_STAT_REG 0x0000007a
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_PRES_DET_ST_LBN 6
+#define PCRF_AB_PRES_DET_ST_WIDTH 1
+#define PCRF_AB_MRL_SENS_ST_LBN 5
+#define PCRF_AB_MRL_SENS_ST_WIDTH 1
+#define PCRF_AB_SLOT_PWR_IND_LBN 4
+#define PCRF_AB_SLOT_PWR_IND_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_IND_LBN 3
+#define PCRF_AB_SLOT_ATTN_IND_WIDTH 1
+#define PCRF_AB_SLOT_MRL_SENS_LBN 2
+#define PCRF_AB_SLOT_MRL_SENS_WIDTH 1
+#define PCRF_AB_PWR_FLTDET_LBN 1
+#define PCRF_AB_PWR_FLTDET_WIDTH 1
+#define PCRF_AB_ATTN_BUTDET_LBN 0
+#define PCRF_AB_ATTN_BUTDET_WIDTH 1
+
+
+/*
+ * PC_MSIX_CAP_ID_REG(8bit):
+ * MSIX Capability ID
+ */
+
+#define PCR_BB_MSIX_CAP_ID_REG 0x00000090
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_CAP_ID_REG 0x000000b0
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_CAP_ID_LBN 0
+#define PCRF_BZ_MSIX_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_MSIX_NXT_PTR_REG(8bit):
+ * MSIX Capability Next Capability Ptr
+ */
+
+#define PCR_BB_MSIX_NXT_PTR_REG 0x00000091
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_NXT_PTR_REG 0x000000b1
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_NXT_PTR_LBN 0
+#define PCRF_BZ_MSIX_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_MSIX_CTL_REG(16bit):
+ * MSIX control register
+ */
+
+#define PCR_BB_MSIX_CTL_REG 0x00000092
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_CTL_REG 0x000000b2
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_EN_LBN 15
+#define PCRF_BZ_MSIX_EN_WIDTH 1
+#define PCRF_BZ_MSIX_FUNC_MASK_LBN 14
+#define PCRF_BZ_MSIX_FUNC_MASK_WIDTH 1
+#define PCRF_BZ_MSIX_TBL_SIZE_LBN 0
+#define PCRF_BZ_MSIX_TBL_SIZE_WIDTH 11
+
+
+/*
+ * PC_DEV_CAP2_REG(16bit):
+ * PCIe Device Capabilities 2
+ */
+
+#define PCR_CC_DEV_CAP2_REG 0x00000094
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_DEV_CAP2_REG 0x000000e4
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_CMPL_TIMEOUT_DIS_LBN 4
+#define PCRF_CZ_CMPL_TIMEOUT_DIS_WIDTH 1
+#define PCRF_CZ_CMPL_TIMEOUT_LBN 0
+#define PCRF_CZ_CMPL_TIMEOUT_WIDTH 4
+#define PCFE_CZ_CMPL_TIMEOUT_17000_TO_6400MS 14
+#define PCFE_CZ_CMPL_TIMEOUT_4000_TO_1300MS 13
+#define PCFE_CZ_CMPL_TIMEOUT_1000_TO_3500MS 10
+#define PCFE_CZ_CMPL_TIMEOUT_260_TO_900MS 9
+#define PCFE_CZ_CMPL_TIMEOUT_65_TO_210MS 6
+#define PCFE_CZ_CMPL_TIMEOUT_16_TO_55MS 5
+#define PCFE_CZ_CMPL_TIMEOUT_1_TO_10MS 2
+#define PCFE_CZ_CMPL_TIMEOUT_50_TO_100US 1
+#define PCFE_CZ_CMPL_TIMEOUT_DEFAULT 0
+
+
+/*
+ * PC_MSIX_TBL_BASE_REG(32bit):
+ * MSIX Capability Vector Table Base
+ */
+
+#define PCR_BB_MSIX_TBL_BASE_REG 0x00000094
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_TBL_BASE_REG 0x000000b4
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_TBL_OFF_LBN 3
+#define PCRF_BZ_MSIX_TBL_OFF_WIDTH 29
+#define PCRF_BZ_MSIX_TBL_BIR_LBN 0
+#define PCRF_BZ_MSIX_TBL_BIR_WIDTH 3
+
+
+/*
+ * PC_DEV_CTL2_REG(16bit):
+ * PCIe Device Control 2
+ */
+
+#define PCR_CC_DEV_CTL2_REG 0x00000098
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_DEV_CTL2_REG 0x000000e8
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_CMPL_TIMEOUT_DIS_CTL_LBN 4
+#define PCRF_CZ_CMPL_TIMEOUT_DIS_CTL_WIDTH 1
+#define PCRF_CZ_CMPL_TIMEOUT_CTL_LBN 0
+#define PCRF_CZ_CMPL_TIMEOUT_CTL_WIDTH 4
+
+
+/*
+ * PC_MSIX_PBA_BASE_REG(32bit):
+ * MSIX Capability PBA Base
+ */
+
+#define PCR_BB_MSIX_PBA_BASE_REG 0x00000098
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_PBA_BASE_REG 0x000000b8
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_PBA_OFF_LBN 3
+#define PCRF_BZ_MSIX_PBA_OFF_WIDTH 29
+#define PCRF_BZ_MSIX_PBA_BIR_LBN 0
+#define PCRF_BZ_MSIX_PBA_BIR_WIDTH 3
+
+
+/*
+ * PC_LNK_CTL2_REG(16bit):
+ * PCIe Link Control 2
+ */
+
+#define PCR_CC_LNK_CTL2_REG 0x000000a0
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_LNK_CTL2_REG 0x000000f0
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_POLLING_DEEMPH_LVL_LBN 12
+#define PCRF_CZ_POLLING_DEEMPH_LVL_WIDTH 1
+#define PCRF_CZ_COMPLIANCE_SOS_CTL_LBN 11
+#define PCRF_CZ_COMPLIANCE_SOS_CTL_WIDTH 1
+#define PCRF_CZ_ENTER_MODIFIED_COMPLIANCE_CTL_LBN 10
+#define PCRF_CZ_ENTER_MODIFIED_COMPLIANCE_CTL_WIDTH 1
+#define PCRF_CZ_TRANSMIT_MARGIN_LBN 7
+#define PCRF_CZ_TRANSMIT_MARGIN_WIDTH 3
+#define PCRF_CZ_SELECT_DEEMPH_LBN 6
+#define PCRF_CZ_SELECT_DEEMPH_WIDTH 1
+#define PCRF_CZ_HW_AUTONOMOUS_SPEED_DIS_LBN 5
+#define PCRF_CZ_HW_AUTONOMOUS_SPEED_DIS_WIDTH 1
+#define PCRF_CZ_ENTER_COMPLIANCE_CTL_LBN 4
+#define PCRF_CZ_ENTER_COMPLIANCE_CTL_WIDTH 1
+#define PCRF_CZ_TGT_LNK_SPEED_CTL_LBN 0
+#define PCRF_CZ_TGT_LNK_SPEED_CTL_WIDTH 4
+
+
+/*
+ * PC_LNK_STAT2_REG(16bit):
+ * PCIe Link Status 2
+ */
+
+#define PCR_CC_LNK_STAT2_REG 0x000000a2
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_LNK_STAT2_REG 0x000000f2
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_CURRENT_DEEMPH_LBN 0
+#define PCRF_CZ_CURRENT_DEEMPH_WIDTH 1
+
+
+/*
+ * PC_VPD_CAP_ID_REG(8bit):
+ * VPD data register
+ */
+
+#define PCR_AB_VPD_CAP_ID_REG 0x000000b0
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_VPD_CAP_ID_LBN 0
+#define PCRF_AB_VPD_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_VPD_NXT_PTR_REG(8bit):
+ * VPD next item pointer
+ */
+
+#define PCR_AB_VPD_NXT_PTR_REG 0x000000b1
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_VPD_NXT_PTR_LBN 0
+#define PCRF_AB_VPD_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_VPD_ADDR_REG(16bit):
+ * VPD address register
+ */
+
+#define PCR_AB_VPD_ADDR_REG 0x000000b2
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_VPD_FLAG_LBN 15
+#define PCRF_AB_VPD_FLAG_WIDTH 1
+#define PCRF_AB_VPD_ADDR_LBN 0
+#define PCRF_AB_VPD_ADDR_WIDTH 15
+
+
+/*
+ * PC_AER_CAP_HDR_REG(32bit):
+ * AER capability header register
+ */
+
+#define PCR_AZ_AER_CAP_HDR_REG 0x00000100
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_AERCAPHDR_NXT_PTR_LBN 20
+#define PCRF_AZ_AERCAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_AZ_AERCAPHDR_VER_LBN 16
+#define PCRF_AZ_AERCAPHDR_VER_WIDTH 4
+#define PCRF_AZ_AERCAPHDR_ID_LBN 0
+#define PCRF_AZ_AERCAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_AER_UNCORR_ERR_STAT_REG(32bit):
+ * AER Uncorrectable error status register
+ */
+
+#define PCR_AZ_AER_UNCORR_ERR_STAT_REG 0x00000104
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_UNSUPT_REQ_ERR_STAT_LBN 20
+#define PCRF_AZ_UNSUPT_REQ_ERR_STAT_WIDTH 1
+#define PCRF_AZ_ECRC_ERR_STAT_LBN 19
+#define PCRF_AZ_ECRC_ERR_STAT_WIDTH 1
+#define PCRF_AZ_MALF_TLP_STAT_LBN 18
+#define PCRF_AZ_MALF_TLP_STAT_WIDTH 1
+#define PCRF_AZ_RX_OVF_STAT_LBN 17
+#define PCRF_AZ_RX_OVF_STAT_WIDTH 1
+#define PCRF_AZ_UNEXP_COMP_STAT_LBN 16
+#define PCRF_AZ_UNEXP_COMP_STAT_WIDTH 1
+#define PCRF_AZ_COMP_ABRT_STAT_LBN 15
+#define PCRF_AZ_COMP_ABRT_STAT_WIDTH 1
+#define PCRF_AZ_COMP_TIMEOUT_STAT_LBN 14
+#define PCRF_AZ_COMP_TIMEOUT_STAT_WIDTH 1
+#define PCRF_AZ_FC_PROTO_ERR_STAT_LBN 13
+#define PCRF_AZ_FC_PROTO_ERR_STAT_WIDTH 1
+#define PCRF_AZ_PSON_TLP_STAT_LBN 12
+#define PCRF_AZ_PSON_TLP_STAT_WIDTH 1
+#define PCRF_AZ_DL_PROTO_ERR_STAT_LBN 4
+#define PCRF_AZ_DL_PROTO_ERR_STAT_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_STAT_LBN 0
+#define PCRF_AB_TRAIN_ERR_STAT_WIDTH 1
+
+
+/*
+ * PC_AER_UNCORR_ERR_MASK_REG(32bit):
+ * AER Uncorrectable error mask register
+ */
+
+#define PCR_AZ_AER_UNCORR_ERR_MASK_REG 0x00000108
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_UNSUPT_REQ_ERR_MASK_LBN 20
+#define PCRF_AZ_UNSUPT_REQ_ERR_MASK_WIDTH 1
+#define PCRF_AZ_ECRC_ERR_MASK_LBN 19
+#define PCRF_AZ_ECRC_ERR_MASK_WIDTH 1
+#define PCRF_AZ_MALF_TLP_MASK_LBN 18
+#define PCRF_AZ_MALF_TLP_MASK_WIDTH 1
+#define PCRF_AZ_RX_OVF_MASK_LBN 17
+#define PCRF_AZ_RX_OVF_MASK_WIDTH 1
+#define PCRF_AZ_UNEXP_COMP_MASK_LBN 16
+#define PCRF_AZ_UNEXP_COMP_MASK_WIDTH 1
+#define PCRF_AZ_COMP_ABRT_MASK_LBN 15
+#define PCRF_AZ_COMP_ABRT_MASK_WIDTH 1
+#define PCRF_AZ_COMP_TIMEOUT_MASK_LBN 14
+#define PCRF_AZ_COMP_TIMEOUT_MASK_WIDTH 1
+#define PCRF_AZ_FC_PROTO_ERR_MASK_LBN 13
+#define PCRF_AZ_FC_PROTO_ERR_MASK_WIDTH 1
+#define PCRF_AZ_PSON_TLP_MASK_LBN 12
+#define PCRF_AZ_PSON_TLP_MASK_WIDTH 1
+#define PCRF_AZ_DL_PROTO_ERR_MASK_LBN 4
+#define PCRF_AZ_DL_PROTO_ERR_MASK_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_MASK_LBN 0
+#define PCRF_AB_TRAIN_ERR_MASK_WIDTH 1
+
+
+/*
+ * PC_AER_UNCORR_ERR_SEV_REG(32bit):
+ * AER Uncorrectable error severity register
+ */
+
+#define PCR_AZ_AER_UNCORR_ERR_SEV_REG 0x0000010c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_UNSUPT_REQ_ERR_SEV_LBN 20
+#define PCRF_AZ_UNSUPT_REQ_ERR_SEV_WIDTH 1
+#define PCRF_AZ_ECRC_ERR_SEV_LBN 19
+#define PCRF_AZ_ECRC_ERR_SEV_WIDTH 1
+#define PCRF_AZ_MALF_TLP_SEV_LBN 18
+#define PCRF_AZ_MALF_TLP_SEV_WIDTH 1
+#define PCRF_AZ_RX_OVF_SEV_LBN 17
+#define PCRF_AZ_RX_OVF_SEV_WIDTH 1
+#define PCRF_AZ_UNEXP_COMP_SEV_LBN 16
+#define PCRF_AZ_UNEXP_COMP_SEV_WIDTH 1
+#define PCRF_AZ_COMP_ABRT_SEV_LBN 15
+#define PCRF_AZ_COMP_ABRT_SEV_WIDTH 1
+#define PCRF_AZ_COMP_TIMEOUT_SEV_LBN 14
+#define PCRF_AZ_COMP_TIMEOUT_SEV_WIDTH 1
+#define PCRF_AZ_FC_PROTO_ERR_SEV_LBN 13
+#define PCRF_AZ_FC_PROTO_ERR_SEV_WIDTH 1
+#define PCRF_AZ_PSON_TLP_SEV_LBN 12
+#define PCRF_AZ_PSON_TLP_SEV_WIDTH 1
+#define PCRF_AZ_DL_PROTO_ERR_SEV_LBN 4
+#define PCRF_AZ_DL_PROTO_ERR_SEV_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_SEV_LBN 0
+#define PCRF_AB_TRAIN_ERR_SEV_WIDTH 1
+
+
+/*
+ * PC_AER_CORR_ERR_STAT_REG(32bit):
+ * AER Correctable error status register
+ */
+
+#define PCR_AZ_AER_CORR_ERR_STAT_REG 0x00000110
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ADVSY_NON_FATAL_STAT_LBN 13
+#define PCRF_CZ_ADVSY_NON_FATAL_STAT_WIDTH 1
+#define PCRF_AZ_RPLY_TMR_TOUT_STAT_LBN 12
+#define PCRF_AZ_RPLY_TMR_TOUT_STAT_WIDTH 1
+#define PCRF_AZ_RPLAY_NUM_RO_STAT_LBN 8
+#define PCRF_AZ_RPLAY_NUM_RO_STAT_WIDTH 1
+#define PCRF_AZ_BAD_DLLP_STAT_LBN 7
+#define PCRF_AZ_BAD_DLLP_STAT_WIDTH 1
+#define PCRF_AZ_BAD_TLP_STAT_LBN 6
+#define PCRF_AZ_BAD_TLP_STAT_WIDTH 1
+#define PCRF_AZ_RX_ERR_STAT_LBN 0
+#define PCRF_AZ_RX_ERR_STAT_WIDTH 1
+
+
+/*
+ * PC_AER_CORR_ERR_MASK_REG(32bit):
+ * AER Correctable error status register
+ */
+
+#define PCR_AZ_AER_CORR_ERR_MASK_REG 0x00000114
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ADVSY_NON_FATAL_MASK_LBN 13
+#define PCRF_CZ_ADVSY_NON_FATAL_MASK_WIDTH 1
+#define PCRF_AZ_RPLY_TMR_TOUT_MASK_LBN 12
+#define PCRF_AZ_RPLY_TMR_TOUT_MASK_WIDTH 1
+#define PCRF_AZ_RPLAY_NUM_RO_MASK_LBN 8
+#define PCRF_AZ_RPLAY_NUM_RO_MASK_WIDTH 1
+#define PCRF_AZ_BAD_DLLP_MASK_LBN 7
+#define PCRF_AZ_BAD_DLLP_MASK_WIDTH 1
+#define PCRF_AZ_BAD_TLP_MASK_LBN 6
+#define PCRF_AZ_BAD_TLP_MASK_WIDTH 1
+#define PCRF_AZ_RX_ERR_MASK_LBN 0
+#define PCRF_AZ_RX_ERR_MASK_WIDTH 1
+
+
+/*
+ * PC_AER_CAP_CTL_REG(32bit):
+ * AER capability and control register
+ */
+
+#define PCR_AZ_AER_CAP_CTL_REG 0x00000118
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_ECRC_CHK_EN_LBN 8
+#define PCRF_AZ_ECRC_CHK_EN_WIDTH 1
+#define PCRF_AZ_ECRC_CHK_CAP_LBN 7
+#define PCRF_AZ_ECRC_CHK_CAP_WIDTH 1
+#define PCRF_AZ_ECRC_GEN_EN_LBN 6
+#define PCRF_AZ_ECRC_GEN_EN_WIDTH 1
+#define PCRF_AZ_ECRC_GEN_CAP_LBN 5
+#define PCRF_AZ_ECRC_GEN_CAP_WIDTH 1
+#define PCRF_AZ_1ST_ERR_PTR_LBN 0
+#define PCRF_AZ_1ST_ERR_PTR_WIDTH 5
+
+
+/*
+ * PC_AER_HDR_LOG_REG(128bit):
+ * AER Header log register
+ */
+
+#define PCR_AZ_AER_HDR_LOG_REG 0x0000011c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_HDR_LOG_LBN 0
+#define PCRF_AZ_HDR_LOG_WIDTH 128
+
+
+/*
+ * PC_DEVSN_CAP_HDR_REG(32bit):
+ * Device serial number capability header register
+ */
+
+#define PCR_DZ_DEVSN_CAP_HDR_REG 0x00000130
+/* hunta0=pci_f0_config */
+
+#define PCR_CC_DEVSN_CAP_HDR_REG 0x00000140
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CZ_DEVSNCAPHDR_NXT_PTR_LBN 20
+#define PCRF_CZ_DEVSNCAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_CZ_DEVSNCAPHDR_VER_LBN 16
+#define PCRF_CZ_DEVSNCAPHDR_VER_WIDTH 4
+#define PCRF_CZ_DEVSNCAPHDR_ID_LBN 0
+#define PCRF_CZ_DEVSNCAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_DEVSN_DWORD0_REG(32bit):
+ * Device serial number DWORD0
+ */
+
+#define PCR_DZ_DEVSN_DWORD0_REG 0x00000134
+/* hunta0=pci_f0_config */
+
+#define PCR_CC_DEVSN_DWORD0_REG 0x00000144
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CZ_DEVSN_DWORD0_LBN 0
+#define PCRF_CZ_DEVSN_DWORD0_WIDTH 32
+
+
+/*
+ * PC_DEVSN_DWORD1_REG(32bit):
+ * Device serial number DWORD0
+ */
+
+#define PCR_DZ_DEVSN_DWORD1_REG 0x00000138
+/* hunta0=pci_f0_config */
+
+#define PCR_CC_DEVSN_DWORD1_REG 0x00000148
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CZ_DEVSN_DWORD1_LBN 0
+#define PCRF_CZ_DEVSN_DWORD1_WIDTH 32
+
+
+/*
+ * PC_ARI_CAP_HDR_REG(32bit):
+ * ARI capability header register
+ */
+
+#define PCR_DZ_ARI_CAP_HDR_REG 0x00000140
+/* hunta0=pci_f0_config */
+
+#define PCR_CC_ARI_CAP_HDR_REG 0x00000150
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CZ_ARICAPHDR_NXT_PTR_LBN 20
+#define PCRF_CZ_ARICAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_CZ_ARICAPHDR_VER_LBN 16
+#define PCRF_CZ_ARICAPHDR_VER_WIDTH 4
+#define PCRF_CZ_ARICAPHDR_ID_LBN 0
+#define PCRF_CZ_ARICAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_ARI_CAP_REG(16bit):
+ * ARI Capabilities
+ */
+
+#define PCR_DZ_ARI_CAP_REG 0x00000144
+/* hunta0=pci_f0_config */
+
+#define PCR_CC_ARI_CAP_REG 0x00000154
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CZ_ARI_NXT_FN_NUM_LBN 8
+#define PCRF_CZ_ARI_NXT_FN_NUM_WIDTH 8
+#define PCRF_CZ_ARI_ACS_FNGRP_CAP_LBN 1
+#define PCRF_CZ_ARI_ACS_FNGRP_CAP_WIDTH 1
+#define PCRF_CZ_ARI_MFVC_FNGRP_CAP_LBN 0
+#define PCRF_CZ_ARI_MFVC_FNGRP_CAP_WIDTH 1
+
+
+/*
+ * PC_ARI_CTL_REG(16bit):
+ * ARI Control
+ */
+
+#define PCR_DZ_ARI_CTL_REG 0x00000146
+/* hunta0=pci_f0_config */
+
+#define PCR_CC_ARI_CTL_REG 0x00000156
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CZ_ARI_FN_GRP_LBN 4
+#define PCRF_CZ_ARI_FN_GRP_WIDTH 3
+#define PCRF_CZ_ARI_ACS_FNGRP_EN_LBN 1
+#define PCRF_CZ_ARI_ACS_FNGRP_EN_WIDTH 1
+#define PCRF_CZ_ARI_MFVC_FNGRP_EN_LBN 0
+#define PCRF_CZ_ARI_MFVC_FNGRP_EN_WIDTH 1
+
+
+/*
+ * PC_SRIOV_CAP_HDR_REG(32bit):
+ * SRIOV capability header register
+ */
+
+#define PCR_CC_SRIOV_CAP_HDR_REG 0x00000160
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_CAP_HDR_REG 0x00000200
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_SRIOVCAPHDR_NXT_PTR_LBN 20
+#define PCRF_CZ_SRIOVCAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_CZ_SRIOVCAPHDR_VER_LBN 16
+#define PCRF_CZ_SRIOVCAPHDR_VER_WIDTH 4
+#define PCRF_CZ_SRIOVCAPHDR_ID_LBN 0
+#define PCRF_CZ_SRIOVCAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_SRIOV_CAP_REG(32bit):
+ * SRIOV Capabilities
+ */
+
+#define PCR_CC_SRIOV_CAP_REG 0x00000164
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_CAP_REG 0x00000204
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_MIGR_INT_MSG_NUM_LBN 21
+#define PCRF_CZ_VF_MIGR_INT_MSG_NUM_WIDTH 11
+#define PCRF_CZ_VF_MIGR_CAP_LBN 0
+#define PCRF_CZ_VF_MIGR_CAP_WIDTH 1
+
+
+/*
+ * PC_SRIOV_CTL_REG(16bit):
+ * SRIOV Control
+ */
+
+#define PCR_CC_SRIOV_CTL_REG 0x00000168
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_CTL_REG 0x00000208
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_ARI_CAP_HRCHY_LBN 4
+#define PCRF_CZ_VF_ARI_CAP_HRCHY_WIDTH 1
+#define PCRF_CZ_VF_MSE_LBN 3
+#define PCRF_CZ_VF_MSE_WIDTH 1
+#define PCRF_CZ_VF_MIGR_INT_EN_LBN 2
+#define PCRF_CZ_VF_MIGR_INT_EN_WIDTH 1
+#define PCRF_CZ_VF_MIGR_EN_LBN 1
+#define PCRF_CZ_VF_MIGR_EN_WIDTH 1
+#define PCRF_CZ_VF_EN_LBN 0
+#define PCRF_CZ_VF_EN_WIDTH 1
+
+
+/*
+ * PC_SRIOV_STAT_REG(16bit):
+ * SRIOV Status
+ */
+
+#define PCR_CC_SRIOV_STAT_REG 0x0000016a
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_STAT_REG 0x0000020a
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_MIGR_STAT_LBN 0
+#define PCRF_CZ_VF_MIGR_STAT_WIDTH 1
+
+
+/*
+ * PC_SRIOV_INITIALVFS_REG(16bit):
+ * SRIOV Initial VFs
+ */
+
+#define PCR_CC_SRIOV_INITIALVFS_REG 0x0000016c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_INITIALVFS_REG 0x0000020c
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_INITIALVFS_LBN 0
+#define PCRF_CZ_VF_INITIALVFS_WIDTH 16
+
+
+/*
+ * PC_SRIOV_TOTALVFS_REG(10bit):
+ * SRIOV Total VFs
+ */
+
+#define PCR_CC_SRIOV_TOTALVFS_REG 0x0000016e
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_TOTALVFS_REG 0x0000020e
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_TOTALVFS_LBN 0
+#define PCRF_CZ_VF_TOTALVFS_WIDTH 16
+
+
+/*
+ * PC_SRIOV_NUMVFS_REG(16bit):
+ * SRIOV Number of VFs
+ */
+
+#define PCR_CC_SRIOV_NUMVFS_REG 0x00000170
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_NUMVFS_REG 0x00000210
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_NUMVFS_LBN 0
+#define PCRF_CZ_VF_NUMVFS_WIDTH 16
+
+
+/*
+ * PC_SRIOV_FN_DPND_LNK_REG(16bit):
+ * SRIOV Function dependency link
+ */
+
+#define PCR_CC_SRIOV_FN_DPND_LNK_REG 0x00000172
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_FN_DPND_LNK_REG 0x00000212
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_SRIOV_FN_DPND_LNK_LBN 0
+#define PCRF_CZ_SRIOV_FN_DPND_LNK_WIDTH 8
+
+
+/*
+ * PC_SRIOV_1STVF_OFFSET_REG(16bit):
+ * SRIOV First VF Offset
+ */
+
+#define PCR_CC_SRIOV_1STVF_OFFSET_REG 0x00000174
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_1STVF_OFFSET_REG 0x00000214
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_1STVF_OFFSET_LBN 0
+#define PCRF_CZ_VF_1STVF_OFFSET_WIDTH 16
+
+
+/*
+ * PC_SRIOV_VFSTRIDE_REG(16bit):
+ * SRIOV VF Stride
+ */
+
+#define PCR_CC_SRIOV_VFSTRIDE_REG 0x00000176
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_VFSTRIDE_REG 0x00000216
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_VFSTRIDE_LBN 0
+#define PCRF_CZ_VF_VFSTRIDE_WIDTH 16
+
+
+/*
+ * PC_SRIOV_DEVID_REG(16bit):
+ * SRIOV VF Device ID
+ */
+
+#define PCR_CC_SRIOV_DEVID_REG 0x0000017a
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_DEVID_REG 0x0000021a
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_DEVID_LBN 0
+#define PCRF_CZ_VF_DEVID_WIDTH 16
+
+
+/*
+ * PC_SRIOV_SUP_PAGESZ_REG(16bit):
+ * SRIOV Supported Page Sizes
+ */
+
+#define PCR_CC_SRIOV_SUP_PAGESZ_REG 0x0000017c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_SUP_PAGESZ_REG 0x0000021c
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_SUP_PAGESZ_LBN 0
+#define PCRF_CZ_VF_SUP_PAGESZ_WIDTH 16
+
+
+/*
+ * PC_SRIOV_SYS_PAGESZ_REG(32bit):
+ * SRIOV System Page Size
+ */
+
+#define PCR_CC_SRIOV_SYS_PAGESZ_REG 0x00000180
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_SYS_PAGESZ_REG 0x00000220
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_SYS_PAGESZ_LBN 0
+#define PCRF_CZ_VF_SYS_PAGESZ_WIDTH 16
+
+
+/*
+ * PC_SRIOV_BAR0_REG(32bit):
+ * SRIOV VF Bar0
+ */
+
+#define PCR_CC_SRIOV_BAR0_REG 0x00000184
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR0_REG 0x00000224
+/* hunta0=pci_f0_config */
+
+#define PCRF_CC_VF_BAR_ADDRESS_LBN 0
+#define PCRF_CC_VF_BAR_ADDRESS_WIDTH 32
+#define PCRF_DZ_VF_BAR0_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR0_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_BAR1_REG(32bit):
+ * SRIOV Bar1
+ */
+
+#define PCR_CC_SRIOV_BAR1_REG 0x00000188
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR1_REG 0x00000228
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR1_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR1_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_BAR2_REG(32bit):
+ * SRIOV Bar2
+ */
+
+#define PCR_CC_SRIOV_BAR2_REG 0x0000018c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR2_REG 0x0000022c
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR2_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR2_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_BAR3_REG(32bit):
+ * SRIOV Bar3
+ */
+
+#define PCR_CC_SRIOV_BAR3_REG 0x00000190
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR3_REG 0x00000230
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR3_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR3_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_BAR4_REG(32bit):
+ * SRIOV Bar4
+ */
+
+#define PCR_CC_SRIOV_BAR4_REG 0x00000194
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR4_REG 0x00000234
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR4_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR4_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_BAR5_REG(32bit):
+ * SRIOV Bar5
+ */
+
+#define PCR_CC_SRIOV_BAR5_REG 0x00000198
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR5_REG 0x00000238
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR5_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR5_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_MIBR_SARRAY_OFFSET_REG(32bit):
+ * SRIOV VF Migration State Array Offset
+ */
+
+#define PCR_CC_SRIOV_MIBR_SARRAY_OFFSET_REG 0x0000019c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_MIBR_SARRAY_OFFSET_REG 0x0000023c
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_MIGR_OFFSET_LBN 3
+#define PCRF_CZ_VF_MIGR_OFFSET_WIDTH 29
+#define PCRF_CZ_VF_MIGR_BIR_LBN 0
+#define PCRF_CZ_VF_MIGR_BIR_WIDTH 3
+
+
+/*
+ * PC_LTR_CAP_HDR_REG(32bit):
+ * Latency Tolerance Reporting Cap Header Reg
+ */
+
+#define PCR_DZ_LTR_CAP_HDR_REG 0x00000240
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LTR_NXT_PTR_LBN 20
+#define PCRF_DZ_LTR_NXT_PTR_WIDTH 12
+#define PCRF_DZ_LTR_VERSION_LBN 16
+#define PCRF_DZ_LTR_VERSION_WIDTH 4
+#define PCRF_DZ_LTR_EXT_CAP_ID_LBN 0
+#define PCRF_DZ_LTR_EXT_CAP_ID_WIDTH 16
+
+
+/*
+ * PC_LTR_MAX_SNOOP_REG(32bit):
+ * LTR Maximum Snoop/No Snoop Register
+ */
+
+#define PCR_DZ_LTR_MAX_SNOOP_REG 0x00000244
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LTR_MAX_NOSNOOP_SCALE_LBN 26
+#define PCRF_DZ_LTR_MAX_NOSNOOP_SCALE_WIDTH 3
+#define PCRF_DZ_LTR_MAX_NOSNOOP_LAT_LBN 16
+#define PCRF_DZ_LTR_MAX_NOSNOOP_LAT_WIDTH 10
+#define PCRF_DZ_LTR_MAX_SNOOP_SCALE_LBN 10
+#define PCRF_DZ_LTR_MAX_SNOOP_SCALE_WIDTH 3
+#define PCRF_DZ_LTR_MAX_SNOOP_LAT_LBN 0
+#define PCRF_DZ_LTR_MAX_SNOOP_LAT_WIDTH 10
+
+
+/*
+ * PC_TPH_CAP_HDR_REG(32bit):
+ * TPH Capability Header Register
+ */
+
+#define PCR_DZ_TPH_CAP_HDR_REG 0x00000274
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_TPH_NXT_PTR_LBN 20
+#define PCRF_DZ_TPH_NXT_PTR_WIDTH 12
+#define PCRF_DZ_TPH_VERSION_LBN 16
+#define PCRF_DZ_TPH_VERSION_WIDTH 4
+#define PCRF_DZ_TPH_EXT_CAP_ID_LBN 0
+#define PCRF_DZ_TPH_EXT_CAP_ID_WIDTH 16
+
+
+/*
+ * PC_TPH_REQ_CAP_REG(32bit):
+ * TPH Requester Capability Register
+ */
+
+#define PCR_DZ_TPH_REQ_CAP_REG 0x00000278
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_ST_TBLE_SIZE_LBN 16
+#define PCRF_DZ_ST_TBLE_SIZE_WIDTH 11
+#define PCRF_DZ_ST_TBLE_LOC_LBN 9
+#define PCRF_DZ_ST_TBLE_LOC_WIDTH 2
+#define PCRF_DZ_EXT_TPH_MODE_SUP_LBN 8
+#define PCRF_DZ_EXT_TPH_MODE_SUP_WIDTH 1
+#define PCRF_DZ_TPH_DEV_MODE_SUP_LBN 2
+#define PCRF_DZ_TPH_DEV_MODE_SUP_WIDTH 1
+#define PCRF_DZ_TPH_INT_MODE_SUP_LBN 1
+#define PCRF_DZ_TPH_INT_MODE_SUP_WIDTH 1
+#define PCRF_DZ_TPH_NOST_MODE_SUP_LBN 0
+#define PCRF_DZ_TPH_NOST_MODE_SUP_WIDTH 1
+
+
+/*
+ * PC_TPH_REQ_CTL_REG(32bit):
+ * TPH Requester Control Register
+ */
+
+#define PCR_DZ_TPH_REQ_CTL_REG 0x0000027c
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_TPH_REQ_ENABLE_LBN 8
+#define PCRF_DZ_TPH_REQ_ENABLE_WIDTH 2
+#define PCRF_DZ_TPH_ST_MODE_LBN 0
+#define PCRF_DZ_TPH_ST_MODE_WIDTH 3
+
+
+/*
+ * PC_SEC_PCIE_CAP_REG(32bit):
+ * Secondary PCIE Capability Register
+ */
+
+#define PCR_DZ_SEC_PCIE_CAP_REG 0x00000300
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_SEC_NXT_PTR_LBN 20
+#define PCRF_DZ_SEC_NXT_PTR_WIDTH 12
+#define PCRF_DZ_SEC_VERSION_LBN 16
+#define PCRF_DZ_SEC_VERSION_WIDTH 4
+#define PCRF_DZ_SEC_EXT_CAP_ID_LBN 0
+#define PCRF_DZ_SEC_EXT_CAP_ID_WIDTH 16
+
+
+/*
+ * PC_LINK_CONTROL3_REG(32bit):
+ * Link Control 3.
+ */
+
+#define PCR_DZ_LINK_CONTROL3_REG 0x00000304
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LINK_EQ_INT_EN_LBN 1
+#define PCRF_DZ_LINK_EQ_INT_EN_WIDTH 1
+#define PCRF_DZ_PERFORM_EQL_LBN 0
+#define PCRF_DZ_PERFORM_EQL_WIDTH 1
+
+
+/*
+ * PC_LANE_ERROR_STAT_REG(32bit):
+ * Lane Error Status Register.
+ */
+
+#define PCR_DZ_LANE_ERROR_STAT_REG 0x00000308
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE_STATUS_LBN 0
+#define PCRF_DZ_LANE_STATUS_WIDTH 8
+
+
+/*
+ * PC_LANE01_EQU_CONTROL_REG(32bit):
+ * Lanes 0,1 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE01_EQU_CONTROL_REG 0x0000030c
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE1_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE1_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE0_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE0_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_LANE23_EQU_CONTROL_REG(32bit):
+ * Lanes 2,3 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE23_EQU_CONTROL_REG 0x00000310
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE3_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE3_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE2_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE2_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_LANE45_EQU_CONTROL_REG(32bit):
+ * Lanes 4,5 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE45_EQU_CONTROL_REG 0x00000314
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE5_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE5_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE4_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE4_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_LANE67_EQU_CONTROL_REG(32bit):
+ * Lanes 6,7 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE67_EQU_CONTROL_REG 0x00000318
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE7_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE7_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE6_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE6_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_ACK_LAT_TMR_REG(32bit):
+ * ACK latency timer & replay timer register
+ */
+
+#define PCR_AC_ACK_LAT_TMR_REG 0x00000700
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_RT_LBN 16
+#define PCRF_AC_RT_WIDTH 16
+#define PCRF_AC_ALT_LBN 0
+#define PCRF_AC_ALT_WIDTH 16
+
+
+/*
+ * PC_OTHER_MSG_REG(32bit):
+ * Other message register
+ */
+
+#define PCR_AC_OTHER_MSG_REG 0x00000704
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_OM_CRPT3_LBN 24
+#define PCRF_AC_OM_CRPT3_WIDTH 8
+#define PCRF_AC_OM_CRPT2_LBN 16
+#define PCRF_AC_OM_CRPT2_WIDTH 8
+#define PCRF_AC_OM_CRPT1_LBN 8
+#define PCRF_AC_OM_CRPT1_WIDTH 8
+#define PCRF_AC_OM_CRPT0_LBN 0
+#define PCRF_AC_OM_CRPT0_WIDTH 8
+
+
+/*
+ * PC_FORCE_LNK_REG(24bit):
+ * Port force link register
+ */
+
+#define PCR_AC_FORCE_LNK_REG 0x00000708
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_LFS_LBN 16
+#define PCRF_AC_LFS_WIDTH 6
+#define PCRF_AC_FL_LBN 15
+#define PCRF_AC_FL_WIDTH 1
+#define PCRF_AC_LN_LBN 0
+#define PCRF_AC_LN_WIDTH 8
+
+
+/*
+ * PC_ACK_FREQ_REG(32bit):
+ * ACK frequency register
+ */
+
+#define PCR_AC_ACK_FREQ_REG 0x0000070c
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_CC_ALLOW_L1_WITHOUT_L0S_LBN 30
+#define PCRF_CC_ALLOW_L1_WITHOUT_L0S_WIDTH 1
+#define PCRF_AC_L1_ENTR_LAT_LBN 27
+#define PCRF_AC_L1_ENTR_LAT_WIDTH 3
+#define PCRF_AC_L0_ENTR_LAT_LBN 24
+#define PCRF_AC_L0_ENTR_LAT_WIDTH 3
+#define PCRF_CC_COMM_NFTS_LBN 16
+#define PCRF_CC_COMM_NFTS_WIDTH 8
+#define PCRF_AB_ACK_FREQ_REG_RSVD0_LBN 16
+#define PCRF_AB_ACK_FREQ_REG_RSVD0_WIDTH 3
+#define PCRF_AC_MAX_FTS_LBN 8
+#define PCRF_AC_MAX_FTS_WIDTH 8
+#define PCRF_AC_ACK_FREQ_LBN 0
+#define PCRF_AC_ACK_FREQ_WIDTH 8
+
+
+/*
+ * PC_PORT_LNK_CTL_REG(32bit):
+ * Port link control register
+ */
+
+#define PCR_AC_PORT_LNK_CTL_REG 0x00000710
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AB_LRE_LBN 27
+#define PCRF_AB_LRE_WIDTH 1
+#define PCRF_AB_ESYNC_LBN 26
+#define PCRF_AB_ESYNC_WIDTH 1
+#define PCRF_AB_CRPT_LBN 25
+#define PCRF_AB_CRPT_WIDTH 1
+#define PCRF_AB_XB_LBN 24
+#define PCRF_AB_XB_WIDTH 1
+#define PCRF_AC_LC_LBN 16
+#define PCRF_AC_LC_WIDTH 6
+#define PCRF_AC_LDR_LBN 8
+#define PCRF_AC_LDR_WIDTH 4
+#define PCRF_AC_FLM_LBN 7
+#define PCRF_AC_FLM_WIDTH 1
+#define PCRF_AC_LKD_LBN 6
+#define PCRF_AC_LKD_WIDTH 1
+#define PCRF_AC_DLE_LBN 5
+#define PCRF_AC_DLE_WIDTH 1
+#define PCRF_AB_PORT_LNK_CTL_REG_RSVD0_LBN 4
+#define PCRF_AB_PORT_LNK_CTL_REG_RSVD0_WIDTH 1
+#define PCRF_AC_RA_LBN 3
+#define PCRF_AC_RA_WIDTH 1
+#define PCRF_AC_LE_LBN 2
+#define PCRF_AC_LE_WIDTH 1
+#define PCRF_AC_SD_LBN 1
+#define PCRF_AC_SD_WIDTH 1
+#define PCRF_AC_OMR_LBN 0
+#define PCRF_AC_OMR_WIDTH 1
+
+
+/*
+ * PC_LN_SKEW_REG(32bit):
+ * Lane skew register
+ */
+
+#define PCR_AC_LN_SKEW_REG 0x00000714
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_DIS_LBN 31
+#define PCRF_AC_DIS_WIDTH 1
+#define PCRF_AB_RST_LBN 30
+#define PCRF_AB_RST_WIDTH 1
+#define PCRF_AC_AD_LBN 25
+#define PCRF_AC_AD_WIDTH 1
+#define PCRF_AC_FCD_LBN 24
+#define PCRF_AC_FCD_WIDTH 1
+#define PCRF_AC_LS2_LBN 16
+#define PCRF_AC_LS2_WIDTH 8
+#define PCRF_AC_LS1_LBN 8
+#define PCRF_AC_LS1_WIDTH 8
+#define PCRF_AC_LS0_LBN 0
+#define PCRF_AC_LS0_WIDTH 8
+
+
+/*
+ * PC_SYM_NUM_REG(16bit):
+ * Symbol number register
+ */
+
+#define PCR_AC_SYM_NUM_REG 0x00000718
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_CC_MAX_FUNCTIONS_LBN 29
+#define PCRF_CC_MAX_FUNCTIONS_WIDTH 3
+#define PCRF_CC_FC_WATCHDOG_TMR_LBN 24
+#define PCRF_CC_FC_WATCHDOG_TMR_WIDTH 5
+#define PCRF_CC_ACK_NAK_TMR_MOD_LBN 19
+#define PCRF_CC_ACK_NAK_TMR_MOD_WIDTH 5
+#define PCRF_CC_REPLAY_TMR_MOD_LBN 14
+#define PCRF_CC_REPLAY_TMR_MOD_WIDTH 5
+#define PCRF_AB_ES_LBN 12
+#define PCRF_AB_ES_WIDTH 3
+#define PCRF_AB_SYM_NUM_REG_RSVD0_LBN 11
+#define PCRF_AB_SYM_NUM_REG_RSVD0_WIDTH 1
+#define PCRF_CC_NUM_SKP_SYMS_LBN 8
+#define PCRF_CC_NUM_SKP_SYMS_WIDTH 3
+#define PCRF_AB_TS2_LBN 4
+#define PCRF_AB_TS2_WIDTH 4
+#define PCRF_AC_TS1_LBN 0
+#define PCRF_AC_TS1_WIDTH 4
+
+
+/*
+ * PC_SYM_TMR_FLT_MSK_REG(16bit):
+ * Symbol timer and Filter Mask Register
+ */
+
+#define PCR_CC_SYM_TMR_FLT_MSK_REG 0x0000071c
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CC_DEFAULT_FLT_MSK1_LBN 16
+#define PCRF_CC_DEFAULT_FLT_MSK1_WIDTH 16
+#define PCRF_CC_FC_WDOG_TMR_DIS_LBN 15
+#define PCRF_CC_FC_WDOG_TMR_DIS_WIDTH 1
+#define PCRF_CC_SI1_LBN 8
+#define PCRF_CC_SI1_WIDTH 3
+#define PCRF_CC_SKIP_INT_VAL_LBN 0
+#define PCRF_CC_SKIP_INT_VAL_WIDTH 11
+#define PCRF_CC_SI0_LBN 0
+#define PCRF_CC_SI0_WIDTH 8
+
+
+/*
+ * PC_SYM_TMR_REG(16bit):
+ * Symbol timer register
+ */
+
+#define PCR_AB_SYM_TMR_REG 0x0000071c
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_ET_LBN 11
+#define PCRF_AB_ET_WIDTH 4
+#define PCRF_AB_SI1_LBN 8
+#define PCRF_AB_SI1_WIDTH 3
+#define PCRF_AB_SI0_LBN 0
+#define PCRF_AB_SI0_WIDTH 8
+
+
+/*
+ * PC_PHY_STAT_REG(32bit):
+ * PHY status register
+ */
+
+#define PCR_AB_PHY_STAT_REG 0x00000720
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_PHY_STAT_REG 0x00000810
+/* sienaa0=pci_f0_config */
+
+#define PCRF_AC_SSL_LBN 3
+#define PCRF_AC_SSL_WIDTH 1
+#define PCRF_AC_SSR_LBN 2
+#define PCRF_AC_SSR_WIDTH 1
+#define PCRF_AC_SSCL_LBN 1
+#define PCRF_AC_SSCL_WIDTH 1
+#define PCRF_AC_SSCD_LBN 0
+#define PCRF_AC_SSCD_WIDTH 1
+
+
+/*
+ * PC_FLT_MSK_REG(32bit):
+ * Filter Mask Register 2
+ */
+
+#define PCR_CC_FLT_MSK_REG 0x00000720
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CC_DEFAULT_FLT_MSK2_LBN 0
+#define PCRF_CC_DEFAULT_FLT_MSK2_WIDTH 32
+
+
+/*
+ * PC_PHY_CTL_REG(32bit):
+ * PHY control register
+ */
+
+#define PCR_AB_PHY_CTL_REG 0x00000724
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_PHY_CTL_REG 0x00000814
+/* sienaa0=pci_f0_config */
+
+#define PCRF_AC_BD_LBN 31
+#define PCRF_AC_BD_WIDTH 1
+#define PCRF_AC_CDS_LBN 30
+#define PCRF_AC_CDS_WIDTH 1
+#define PCRF_AC_DWRAP_LB_LBN 29
+#define PCRF_AC_DWRAP_LB_WIDTH 1
+#define PCRF_AC_EBD_LBN 28
+#define PCRF_AC_EBD_WIDTH 1
+#define PCRF_AC_SNR_LBN 27
+#define PCRF_AC_SNR_WIDTH 1
+#define PCRF_AC_RX_NOT_DET_LBN 2
+#define PCRF_AC_RX_NOT_DET_WIDTH 1
+#define PCRF_AC_FORCE_LOS_VAL_LBN 1
+#define PCRF_AC_FORCE_LOS_VAL_WIDTH 1
+#define PCRF_AC_FORCE_LOS_EN_LBN 0
+#define PCRF_AC_FORCE_LOS_EN_WIDTH 1
+
+
+/*
+ * PC_DEBUG0_REG(32bit):
+ * Debug register 0
+ */
+
+#define PCR_AC_DEBUG0_REG 0x00000728
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_CDI03_LBN 24
+#define PCRF_AC_CDI03_WIDTH 8
+#define PCRF_AC_CDI0_LBN 0
+#define PCRF_AC_CDI0_WIDTH 32
+#define PCRF_AC_CDI02_LBN 16
+#define PCRF_AC_CDI02_WIDTH 8
+#define PCRF_AC_CDI01_LBN 8
+#define PCRF_AC_CDI01_WIDTH 8
+#define PCRF_AC_CDI00_LBN 0
+#define PCRF_AC_CDI00_WIDTH 8
+
+
+/*
+ * PC_DEBUG1_REG(32bit):
+ * Debug register 1
+ */
+
+#define PCR_AC_DEBUG1_REG 0x0000072c
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_CDI13_LBN 24
+#define PCRF_AC_CDI13_WIDTH 8
+#define PCRF_AC_CDI1_LBN 0
+#define PCRF_AC_CDI1_WIDTH 32
+#define PCRF_AC_CDI12_LBN 16
+#define PCRF_AC_CDI12_WIDTH 8
+#define PCRF_AC_CDI11_LBN 8
+#define PCRF_AC_CDI11_WIDTH 8
+#define PCRF_AC_CDI10_LBN 0
+#define PCRF_AC_CDI10_WIDTH 8
+
+
+/*
+ * PC_XPFCC_STAT_REG(24bit):
+ * documentation to be written for sum_PC_XPFCC_STAT_REG
+ */
+
+#define PCR_AC_XPFCC_STAT_REG 0x00000730
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_XPDC_LBN 12
+#define PCRF_AC_XPDC_WIDTH 8
+#define PCRF_AC_XPHC_LBN 0
+#define PCRF_AC_XPHC_WIDTH 12
+
+
+/*
+ * PC_XNPFCC_STAT_REG(24bit):
+ * documentation to be written for sum_PC_XNPFCC_STAT_REG
+ */
+
+#define PCR_AC_XNPFCC_STAT_REG 0x00000734
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_XNPDC_LBN 12
+#define PCRF_AC_XNPDC_WIDTH 8
+#define PCRF_AC_XNPHC_LBN 0
+#define PCRF_AC_XNPHC_WIDTH 12
+
+
+/*
+ * PC_XCFCC_STAT_REG(24bit):
+ * documentation to be written for sum_PC_XCFCC_STAT_REG
+ */
+
+#define PCR_AC_XCFCC_STAT_REG 0x00000738
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_XCDC_LBN 12
+#define PCRF_AC_XCDC_WIDTH 8
+#define PCRF_AC_XCHC_LBN 0
+#define PCRF_AC_XCHC_WIDTH 12
+
+
+/*
+ * PC_Q_STAT_REG(8bit):
+ * documentation to be written for sum_PC_Q_STAT_REG
+ */
+
+#define PCR_AC_Q_STAT_REG 0x0000073c
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_RQNE_LBN 2
+#define PCRF_AC_RQNE_WIDTH 1
+#define PCRF_AC_XRNE_LBN 1
+#define PCRF_AC_XRNE_WIDTH 1
+#define PCRF_AC_RCNR_LBN 0
+#define PCRF_AC_RCNR_WIDTH 1
+
+
+/*
+ * PC_VC_XMIT_ARB1_REG(32bit):
+ * VC Transmit Arbitration Register 1
+ */
+
+#define PCR_CC_VC_XMIT_ARB1_REG 0x00000740
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC_XMIT_ARB2_REG(32bit):
+ * VC Transmit Arbitration Register 2
+ */
+
+#define PCR_CC_VC_XMIT_ARB2_REG 0x00000744
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC0_P_RQ_CTL_REG(32bit):
+ * VC0 Posted Receive Queue Control
+ */
+
+#define PCR_CC_VC0_P_RQ_CTL_REG 0x00000748
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC0_NP_RQ_CTL_REG(32bit):
+ * VC0 Non-Posted Receive Queue Control
+ */
+
+#define PCR_CC_VC0_NP_RQ_CTL_REG 0x0000074c
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC0_C_RQ_CTL_REG(32bit):
+ * VC0 Completion Receive Queue Control
+ */
+
+#define PCR_CC_VC0_C_RQ_CTL_REG 0x00000750
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_GEN2_REG(32bit):
+ * Gen2 Register
+ */
+
+#define PCR_CC_GEN2_REG 0x0000080c
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CC_SET_DE_EMPHASIS_LBN 20
+#define PCRF_CC_SET_DE_EMPHASIS_WIDTH 1
+#define PCRF_CC_CFG_TX_COMPLIANCE_LBN 19
+#define PCRF_CC_CFG_TX_COMPLIANCE_WIDTH 1
+#define PCRF_CC_CFG_TX_SWING_LBN 18
+#define PCRF_CC_CFG_TX_SWING_WIDTH 1
+#define PCRF_CC_DIR_SPEED_CHANGE_LBN 17
+#define PCRF_CC_DIR_SPEED_CHANGE_WIDTH 1
+#define PCRF_CC_LANE_ENABLE_LBN 8
+#define PCRF_CC_LANE_ENABLE_WIDTH 9
+#define PCRF_CC_NUM_FTS_LBN 0
+#define PCRF_CC_NUM_FTS_WIDTH 8
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_REGS_PCI_H */
diff --git a/sys/dev/sfxge/common/efx_rx.c b/sys/dev/sfxge/common/efx_rx.c
new file mode 100644
index 0000000..7d3d7c1
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_rx.c
@@ -0,0 +1,816 @@
+/*-
+ * Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "efsys.h"
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_impl.h"
+
+ __checkReturn int
+efx_rx_init(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+ unsigned int index;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (!(enp->en_mod_flags & EFX_MOD_EV)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_mod_flags & EFX_MOD_RX) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_DESC_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, 0x3000 / 32);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ /* Zero the RSS table */
+ for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS;
+ index++) {
+ EFX_ZERO_OWORD(oword);
+ EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL,
+ index, &oword);
+ }
+
+ enp->en_mod_flags |= EFX_MOD_RX;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_RX_HDR_SPLIT
+ __checkReturn int
+efx_rx_hdr_split_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int hdr_buf_size,
+ __in unsigned int pld_buf_size)
+{
+ unsigned int nhdr32;
+ unsigned int npld32;
+ efx_oword_t oword;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+ EFSYS_ASSERT3U(enp->en_family, >=, EFX_FAMILY_SIENA);
+
+ nhdr32 = hdr_buf_size / 32;
+ if ((nhdr32 == 0) ||
+ (nhdr32 >= (1 << FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH)) ||
+ ((hdr_buf_size % 32) != 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ npld32 = pld_buf_size / 32;
+ if ((npld32 == 0) ||
+ (npld32 >= (1 << FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH)) ||
+ ((pld_buf_size % 32) != 0)) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if (enp->en_rx_qcount > 0) {
+ rc = EBUSY;
+ goto fail3;
+ }
+
+ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_CZ_RX_HDR_SPLIT_EN, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE, nhdr32);
+ EFX_SET_OWORD_FIELD(oword, FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE, npld32);
+
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_HDR_SPLIT */
+
+
+#if EFSYS_OPT_RX_SCATTER
+ __checkReturn int
+efx_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size)
+{
+ unsigned int nbuf32;
+ efx_oword_t oword;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+ EFSYS_ASSERT3U(enp->en_family, >=, EFX_FAMILY_FALCON);
+
+ nbuf32 = buf_size / 32;
+ if ((nbuf32 == 0) ||
+ (nbuf32 >= (1 << FRF_BZ_RX_USR_BUF_SIZE_WIDTH)) ||
+ ((buf_size % 32) != 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_rx_qcount > 0) {
+ rc = EBUSY;
+ goto fail2;
+ }
+
+ /* Set scatter buffer size */
+ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, nbuf32);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ /* Enable scatter for packets not matching a filter */
+ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+
+#define EFX_RX_LFSR_HASH(_enp, _insert) \
+ do { \
+ efx_oword_t oword; \
+ \
+ EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \
+ (_insert) ? 1 : 0); \
+ EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ \
+ if ((_enp)->en_family == EFX_FAMILY_SIENA) { \
+ EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \
+ &oword); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 0); \
+ EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \
+ &oword); \
+ } \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_RX_TOEPLITZ_IPV4_HASH(_enp, _insert, _ip, _tcp) \
+ do { \
+ efx_oword_t oword; \
+ \
+ EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 1); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, \
+ (_ip) ? 1 : 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, \
+ (_tcp) ? 0 : 1); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \
+ (_insert) ? 1 : 0); \
+ EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_RX_TOEPLITZ_IPV6_HASH(_enp, _ip, _tcp, _rc) \
+ do { \
+ efx_oword_t oword; \
+ \
+ if ((_enp)->en_family == EFX_FAMILY_FALCON) { \
+ (_rc) = ((_ip) || (_tcp)) ? ENOTSUP : 0; \
+ break; \
+ } \
+ \
+ EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, (_ip) ? 1 : 0); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS, (_tcp) ? 0 : 1); \
+ EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \
+ \
+ (_rc) = 0; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn int
+efx_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert)
+{
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+ EFSYS_ASSERT3U(enp->en_family, >=, EFX_FAMILY_FALCON);
+
+ switch (alg) {
+ case EFX_RX_HASHALG_LFSR:
+ EFX_RX_LFSR_HASH(enp, insert);
+ break;
+
+ case EFX_RX_HASHALG_TOEPLITZ:
+ EFX_RX_TOEPLITZ_IPV4_HASH(enp, insert,
+ type & (1 << EFX_RX_HASH_IPV4),
+ type & (1 << EFX_RX_HASH_TCPIPV4));
+
+ EFX_RX_TOEPLITZ_IPV6_HASH(enp,
+ type & (1 << EFX_RX_HASH_IPV6),
+ type & (1 << EFX_RX_HASH_TCPIPV6),
+ rc);
+ if (rc != 0)
+ goto fail1;
+
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ EFX_RX_LFSR_HASH(enp, B_FALSE);
+
+ return (rc);
+}
+#endif
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn int
+efx_rx_scale_toeplitz_ipv4_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ efx_oword_t oword;
+ unsigned int byte;
+ unsigned int offset;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ byte = 0;
+
+ /* Write toeplitz hash key */
+ EFX_ZERO_OWORD(oword);
+ for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword);
+
+ byte = 0;
+
+ /* Verify toeplitz hash key */
+ EFX_BAR_READO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword);
+ for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+#endif
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn int
+efx_rx_scale_toeplitz_ipv6_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ efx_oword_t oword;
+ unsigned int byte;
+ int offset;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ byte = 0;
+
+ /* Write toeplitz hash key 3 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
+
+ /* Write toeplitz hash key 2 */
+ EFX_ZERO_OWORD(oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword);
+
+ /* Write toeplitz hash key 1 */
+ EFX_ZERO_OWORD(oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword);
+
+ byte = 0;
+
+ /* Verify toeplitz hash key 3 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ }
+
+ /* Verify toeplitz hash key 2 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ }
+
+ /* Verify toeplitz hash key 1 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail3;
+ }
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+#endif
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn int
+efx_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ efx_oword_t oword;
+ int index;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ EFX_STATIC_ASSERT(EFX_RSS_TBL_SIZE == FR_BZ_RX_INDIRECTION_TBL_ROWS);
+ EFX_STATIC_ASSERT(EFX_MAXRSS == (1 << FRF_BZ_IT_QUEUE_WIDTH));
+
+ if (n > FR_BZ_RX_INDIRECTION_TBL_ROWS) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS; index++) {
+ uint32_t byte;
+
+ /* Calculate the entry to place in the table */
+ byte = (uint32_t)table[index % n];
+
+ EFSYS_PROBE2(table, int, index, uint32_t, byte);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_BZ_IT_QUEUE, byte);
+
+ /* Write the table */
+ EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL,
+ index, &oword);
+ }
+
+ for (index = FR_BZ_RX_INDIRECTION_TBL_ROWS - 1; index >= 0; --index) {
+ uint32_t byte;
+
+ /* Determine if we're starting a new batch */
+ byte = (uint32_t)table[index % n];
+
+ /* Read the table */
+ EFX_BAR_TBL_READO(enp, FR_BZ_RX_INDIRECTION_TBL,
+ index, &oword);
+
+ /* Verify the entry */
+ if (EFX_OWORD_FIELD(oword, FRF_BZ_IT_QUEUE) != byte) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+#endif
+
+#if EFSYS_OPT_FILTER
+extern __checkReturn int
+efx_rx_filter_insert(
+ __in efx_rxq_t *erp,
+ __inout efx_filter_spec_t *spec)
+{
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_dmaq_id = (uint16_t)erp->er_index;
+ return efx_filter_insert_filter(erp->er_enp, spec, B_FALSE);
+}
+#endif
+
+#if EFSYS_OPT_FILTER
+extern __checkReturn int
+efx_rx_filter_remove(
+ __in efx_rxq_t *erp,
+ __inout efx_filter_spec_t *spec)
+{
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_dmaq_id = (uint16_t)erp->er_index;
+ return efx_filter_remove_filter(erp->er_enp, spec);
+}
+#endif
+
+extern void
+efx_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added)
+{
+ efx_qword_t qword;
+ unsigned int i;
+ unsigned int offset;
+ unsigned int id;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ /* The client driver must not overfill the queue */
+ EFSYS_ASSERT3U(added - completed + n, <=,
+ EFX_RXQ_LIMIT(erp->er_mask + 1));
+
+ id = added & (erp->er_mask);
+ for (i = 0; i < n; i++) {
+ EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
+ unsigned int, id, efsys_dma_addr_t, addrp[i],
+ size_t, size);
+
+ EFX_POPULATE_QWORD_3(qword,
+ FSF_AZ_RX_KER_BUF_SIZE, (uint32_t)(size),
+ FSF_AZ_RX_KER_BUF_ADDR_DW0,
+ (uint32_t)(addrp[i] & 0xffffffff),
+ FSF_AZ_RX_KER_BUF_ADDR_DW1,
+ (uint32_t)(addrp[i] >> 32));
+
+ offset = id * sizeof (efx_qword_t);
+ EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword);
+
+ id = (id + 1) & (erp->er_mask);
+ }
+}
+
+ void
+efx_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added)
+{
+ efx_nic_t *enp = erp->er_enp;
+ uint32_t wptr;
+ efx_oword_t oword;
+ efx_dword_t dword;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFSYS_PIO_WRITE_BARRIER();
+
+ /* Push the populated descriptors out */
+ wptr = added & erp->er_mask;
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DESC_WPTR, wptr);
+
+ /* Only write the third DWORD */
+ EFX_POPULATE_DWORD_1(dword,
+ EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3));
+ EFX_BAR_TBL_WRITED3(enp, FR_BZ_RX_DESC_UPD_REGP0,
+ erp->er_index, &dword, B_FALSE);
+}
+
+ void
+efx_rx_qflush(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_oword_t oword;
+ uint32_t label;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ label = erp->er_index;
+
+ /* Flush the queue */
+ EFX_POPULATE_OWORD_2(oword, FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_RX_FLUSH_DESCQ, label);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FLUSH_DESCQ_REG, &oword);
+}
+
+ void
+efx_rx_qenable(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_oword_t oword;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DC_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_EN, 1);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword);
+}
+
+ __checkReturn int
+efx_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rxq_t *erp;
+ efx_oword_t oword;
+ uint32_t size;
+ boolean_t split;
+ boolean_t jumbo;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << FRF_AZ_RX_DESCQ_LABEL_WIDTH));
+ EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
+ EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit);
+
+ if (!ISP2(n) || !(n & EFX_RXQ_NDESCS_MASK)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_rxq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ for (size = 0; (1 << size) <= (EFX_RXQ_MAXNDESCS / EFX_RXQ_MINNDESCS);
+ size++)
+ if ((1 << size) == (int)(n / EFX_RXQ_MINNDESCS))
+ break;
+ if (id + (1 << size) >= encp->enc_buftbl_limit) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ switch (type) {
+ case EFX_RXQ_TYPE_DEFAULT:
+ split = B_FALSE;
+ jumbo = B_FALSE;
+ break;
+
+#if EFSYS_OPT_RX_HDR_SPLIT
+ case EFX_RXQ_TYPE_SPLIT_HEADER:
+ if ((enp->en_family < EFX_FAMILY_SIENA) || ((index & 1) != 0)) {
+ rc = EINVAL;
+ goto fail4;
+ }
+ split = B_TRUE;
+ jumbo = B_TRUE;
+ break;
+
+ case EFX_RXQ_TYPE_SPLIT_PAYLOAD:
+ if ((enp->en_family < EFX_FAMILY_SIENA) || ((index & 1) == 0)) {
+ rc = EINVAL;
+ goto fail4;
+ }
+ split = B_FALSE;
+ jumbo = B_TRUE;
+ break;
+#endif /* EFSYS_OPT_RX_HDR_SPLIT */
+
+#if EFSYS_OPT_RX_SCATTER
+ case EFX_RXQ_TYPE_SCATTER:
+ if (enp->en_family < EFX_FAMILY_SIENA) {
+ rc = EINVAL;
+ goto fail4;
+ }
+ split = B_FALSE;
+ jumbo = B_TRUE;
+ break;
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+ default:
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /* Allocate an RXQ object */
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_rxq_t), erp);
+
+ if (erp == NULL) {
+ rc = ENOMEM;
+ goto fail5;
+ }
+
+ erp->er_magic = EFX_RXQ_MAGIC;
+ erp->er_enp = enp;
+ erp->er_index = index;
+ erp->er_mask = n - 1;
+ erp->er_esmp = esmp;
+
+ /* Set up the new descriptor queue */
+ EFX_POPULATE_OWORD_10(oword,
+ FRF_CZ_RX_HDR_SPLIT, split,
+ FRF_AZ_RX_ISCSI_DDIG_EN, 0,
+ FRF_AZ_RX_ISCSI_HDIG_EN, 0,
+ FRF_AZ_RX_DESCQ_BUF_BASE_ID, id,
+ FRF_AZ_RX_DESCQ_EVQ_ID, eep->ee_index,
+ FRF_AZ_RX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_RX_DESCQ_LABEL, label,
+ FRF_AZ_RX_DESCQ_SIZE, size,
+ FRF_AZ_RX_DESCQ_TYPE, 0,
+ FRF_AZ_RX_DESCQ_JUMBO, jumbo);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword);
+
+ enp->en_rx_qcount++;
+ *erpp = erp;
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ void
+efx_rx_qdestroy(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_oword_t oword;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ EFSYS_ASSERT(enp->en_rx_qcount != 0);
+ --enp->en_rx_qcount;
+
+ /* Purge descriptor queue */
+ EFX_ZERO_OWORD(oword);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword);
+
+ /* Free the RXQ object */
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
+}
+
+ void
+efx_rx_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+ EFSYS_ASSERT3U(enp->en_rx_qcount, ==, 0);
+
+ enp->en_mod_flags &= ~EFX_MOD_RX;
+}
diff --git a/sys/dev/sfxge/common/efx_sram.c b/sys/dev/sfxge/common/efx_sram.c
new file mode 100644
index 0000000..16a3229
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_sram.c
@@ -0,0 +1,294 @@
+/*-
+ * Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "efsys.h"
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_impl.h"
+
+ __checkReturn int
+efx_sram_buf_tbl_set(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in efsys_mem_t *esmp,
+ __in size_t n)
+{
+ efx_qword_t qword;
+ uint32_t start = id;
+ uint32_t stop = start + n;
+ efsys_dma_addr_t addr;
+ efx_oword_t oword;
+ unsigned int count;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (stop >= EFX_BUF_TBL_SIZE) {
+ rc = EFBIG;
+ goto fail1;
+ }
+
+ /* Add the entries into the buffer table */
+ addr = EFSYS_MEM_ADDR(esmp);
+ for (id = start; id != stop; id++) {
+ EFX_POPULATE_QWORD_5(qword,
+ FRF_AZ_IP_DAT_BUF_SIZE, 0, FRF_AZ_BUF_ADR_REGION, 0,
+ FRF_AZ_BUF_ADR_FBUF_DW0,
+ (uint32_t)((addr >> 12) & 0xffffffff),
+ FRF_AZ_BUF_ADR_FBUF_DW1,
+ (uint32_t)((addr >> 12) >> 32),
+ FRF_AZ_BUF_OWNER_ID_FBUF, 0);
+
+ EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_FULL_TBL,
+ id, &qword);
+
+ addr += EFX_BUF_SIZE;
+ }
+
+ EFSYS_PROBE2(buf, uint32_t, start, uint32_t, stop - 1);
+
+ /* Flush the write buffer */
+ EFX_POPULATE_OWORD_2(oword, FRF_AZ_BUF_UPD_CMD, 1,
+ FRF_AZ_BUF_CLR_CMD, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
+
+ /* Poll for the last entry being written to the buffer table */
+ EFSYS_ASSERT3U(id, ==, stop);
+ addr -= EFX_BUF_SIZE;
+
+ count = 0;
+ do {
+ EFSYS_PROBE1(wait, unsigned int, count);
+
+ /* Spin for 1 ms */
+ EFSYS_SPIN(1000);
+
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_FULL_TBL,
+ id - 1, &qword);
+
+ if (EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW0) ==
+ (uint32_t)((addr >> 12) & 0xffffffff) &&
+ EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW1) ==
+ (uint32_t)((addr >> 12) >> 32))
+ goto verify;
+
+ } while (++count < 100);
+
+ rc = ETIMEDOUT;
+ goto fail2;
+
+verify:
+ /* Verify the rest of the entries in the buffer table */
+ while (--id != start) {
+ addr -= EFX_BUF_SIZE;
+
+ /* Read the buffer table entry */
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_FULL_TBL,
+ id - 1, &qword);
+
+ if (EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW0) !=
+ (uint32_t)((addr >> 12) & 0xffffffff) ||
+ EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW1) !=
+ (uint32_t)((addr >> 12) >> 32)) {
+ rc = EFAULT;
+ goto fail3;
+ }
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ id = stop;
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFX_POPULATE_OWORD_4(oword, FRF_AZ_BUF_UPD_CMD, 0,
+ FRF_AZ_BUF_CLR_CMD, 1, FRF_AZ_BUF_CLR_END_ID, id - 1,
+ FRF_AZ_BUF_CLR_START_ID, start);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ void
+efx_sram_buf_tbl_clear(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in size_t n)
+{
+ efx_oword_t oword;
+ uint32_t start = id;
+ uint32_t stop = start + n;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ EFSYS_ASSERT3U(stop, <, EFX_BUF_TBL_SIZE);
+
+ EFSYS_PROBE2(buf, uint32_t, start, uint32_t, stop - 1);
+
+ EFX_POPULATE_OWORD_4(oword, FRF_AZ_BUF_UPD_CMD, 0,
+ FRF_AZ_BUF_CLR_CMD, 1, FRF_AZ_BUF_CLR_END_ID, stop - 1,
+ FRF_AZ_BUF_CLR_START_ID, start);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
+}
+
+
+#if EFSYS_OPT_DIAG
+
+static void
+efx_sram_byte_increment_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
+ unsigned int index;
+
+ _NOTE(ARGUNUSED(negate))
+
+ for (index = 0; index < sizeof (efx_qword_t); index++)
+ eqp->eq_u8[index] = offset + index;
+}
+
+static void
+efx_sram_all_the_same_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ _NOTE(ARGUNUSED(row))
+
+ if (negate)
+ EFX_SET_QWORD(*eqp);
+ else
+ EFX_ZERO_QWORD(*eqp);
+}
+
+static void
+efx_sram_bit_alternate_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ _NOTE(ARGUNUSED(row))
+
+ EFX_POPULATE_QWORD_2(*eqp,
+ EFX_DWORD_0, (negate) ? 0x55555555 : 0xaaaaaaaa,
+ EFX_DWORD_1, (negate) ? 0x55555555 : 0xaaaaaaaa);
+}
+
+static void
+efx_sram_byte_alternate_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ _NOTE(ARGUNUSED(row))
+
+ EFX_POPULATE_QWORD_2(*eqp,
+ EFX_DWORD_0, (negate) ? 0x00ff00ff : 0xff00ff00,
+ EFX_DWORD_1, (negate) ? 0x00ff00ff : 0xff00ff00);
+}
+
+static void
+efx_sram_byte_changing_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
+ unsigned int index;
+
+ for (index = 0; index < sizeof (efx_qword_t); index++) {
+ uint8_t byte;
+
+ if (offset / 256 == 0)
+ byte = (uint8_t)((offset % 257) % 256);
+ else
+ byte = (uint8_t)(~((offset - 8) % 257) % 256);
+
+ eqp->eq_u8[index] = (negate) ? ~byte : byte;
+ }
+}
+
+static void
+efx_sram_bit_sweep_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
+
+ if (negate) {
+ EFX_SET_QWORD(*eqp);
+ EFX_CLEAR_QWORD_BIT(*eqp, (offset / sizeof (efx_qword_t)) % 64);
+ } else {
+ EFX_ZERO_QWORD(*eqp);
+ EFX_SET_QWORD_BIT(*eqp, (offset / sizeof (efx_qword_t)) % 64);
+ }
+}
+
+efx_sram_pattern_fn_t __cs __efx_sram_pattern_fns[] = {
+ efx_sram_byte_increment_set,
+ efx_sram_all_the_same_set,
+ efx_sram_bit_alternate_set,
+ efx_sram_byte_alternate_set,
+ efx_sram_byte_changing_set,
+ efx_sram_bit_sweep_set
+};
+
+ __checkReturn int
+efx_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_pattern_type_t type)
+{
+ efx_nic_ops_t *enop = enp->en_enop;
+ efx_sram_pattern_fn_t func;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+
+ /* Select pattern generator */
+ EFSYS_ASSERT3U(type, <, EFX_PATTERN_NTYPES);
+ func = __efx_sram_pattern_fns[type];
+
+ return (enop->eno_sram_test(enp, func));
+}
+
+#endif /* EFSYS_OPT_DIAG */
diff --git a/sys/dev/sfxge/common/efx_tx.c b/sys/dev/sfxge/common/efx_tx.c
new file mode 100644
index 0000000..0dc347c
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_tx.c
@@ -0,0 +1,430 @@
+/*-
+ * Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "efsys.h"
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_QSTATS
+#define EFX_TX_QSTAT_INCR(_etp, _stat) \
+ do { \
+ (_etp)->et_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_TX_QSTAT_INCR(_etp, _stat)
+#endif
+
+ __checkReturn int
+efx_tx_init(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (!(enp->en_mod_flags & EFX_MOD_EV)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_mod_flags & EFX_MOD_TX) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0);
+
+ /*
+ * Disable the timer-based TX DMA backoff and allow TX DMA to be
+ * controlled by the RX FIFO fill level (although always allow a
+ * minimal trickle).
+ */
+ EFX_BAR_READO(enp, FR_AZ_TX_RESERVED_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER, 0xfe);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER_EN, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DIS_NON_IP_EV, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_THRESHOLD, 2);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
+
+ /*
+ * Filter all packets less than 14 bytes to avoid parsing
+ * errors.
+ */
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_RESERVED_REG, &oword);
+
+ /*
+ * Do not set TX_NO_EOP_DISC_EN, since it limits packets to 16
+ * descriptors (which is bad).
+ */
+ EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword);
+
+ enp->en_mod_flags |= EFX_MOD_TX;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_FILTER
+extern __checkReturn int
+efx_tx_filter_insert(
+ __in efx_txq_t *etp,
+ __inout efx_filter_spec_t *spec)
+{
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_dmaq_id = (uint16_t)etp->et_index;
+ return efx_filter_insert_filter(etp->et_enp, spec, B_FALSE);
+}
+#endif
+
+#if EFSYS_OPT_FILTER
+extern __checkReturn int
+efx_tx_filter_remove(
+ __in efx_txq_t *etp,
+ __inout efx_filter_spec_t *spec)
+{
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_dmaq_id = (uint16_t)etp->et_index;
+ return efx_filter_remove_filter(etp->et_enp, spec);
+}
+#endif
+
+#define EFX_TX_DESC(_etp, _addr, _size, _eop, _added) \
+ do { \
+ unsigned int id; \
+ size_t offset; \
+ efx_qword_t qword; \
+ \
+ id = (_added)++ & (_etp)->et_mask; \
+ offset = id * sizeof (efx_qword_t); \
+ \
+ EFSYS_PROBE5(tx_post, unsigned int, (_etp)->et_index, \
+ unsigned int, id, efsys_dma_addr_t, (_addr), \
+ size_t, (_size), boolean_t, (_eop)); \
+ \
+ EFX_POPULATE_QWORD_4(qword, \
+ FSF_AZ_TX_KER_CONT, (_eop) ? 0 : 1, \
+ FSF_AZ_TX_KER_BYTE_COUNT, (uint32_t)(_size), \
+ FSF_AZ_TX_KER_BUF_ADDR_DW0, \
+ (uint32_t)((_addr) & 0xffffffff), \
+ FSF_AZ_TX_KER_BUF_ADDR_DW1, \
+ (uint32_t)((_addr) >> 32)); \
+ EFSYS_MEM_WRITEQ((_etp)->et_esmp, offset, &qword); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+ __checkReturn int
+efx_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ int rc = ENOSPC;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1))
+ goto fail1;
+
+ for (i = 0; i < n; i++) {
+ efx_buffer_t *ebp = &eb[i];
+ efsys_dma_addr_t start = ebp->eb_addr;
+ size_t size = ebp->eb_size;
+ efsys_dma_addr_t end = start + size;
+
+ /* Fragments must not span 4k boundaries. */
+ EFSYS_ASSERT(P2ROUNDUP(start + 1, 4096) >= end);
+
+ EFX_TX_DESC(etp, start, size, ebp->eb_eop, added);
+ }
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ void
+efx_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added)
+{
+ efx_nic_t *enp = etp->et_enp;
+ uint32_t wptr;
+ efx_dword_t dword;
+ efx_oword_t oword;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFSYS_PIO_WRITE_BARRIER();
+
+ /* Push the populated descriptors out */
+ wptr = added & etp->et_mask;
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_DESC_WPTR, wptr);
+
+ /* Only write the third DWORD */
+ EFX_POPULATE_DWORD_1(dword,
+ EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3));
+ EFX_BAR_TBL_WRITED3(enp, FR_BZ_TX_DESC_UPD_REGP0,
+ etp->et_index, &dword, B_FALSE);
+}
+
+ void
+efx_tx_qflush(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_oword_t oword;
+ uint32_t label;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ label = etp->et_index;
+
+ /* Flush the queue */
+ EFX_POPULATE_OWORD_2(oword, FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_TX_FLUSH_DESCQ, label);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_FLUSH_DESCQ_REG, &oword);
+}
+
+ void
+efx_tx_qenable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_oword_t oword;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword);
+
+ EFSYS_PROBE5(tx_descq_ptr, unsigned int, etp->et_index,
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_3),
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_2),
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_1),
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_0));
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DC_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_EN, 1);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword);
+}
+
+ __checkReturn int
+efx_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_txq_t **etpp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_txq_t *etp;
+ efx_oword_t oword;
+ uint32_t size;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX);
+
+ EFX_STATIC_ASSERT(EFX_EV_TX_NLABELS == (1 << FRF_AZ_TX_DESCQ_LABEL_WIDTH));
+ EFSYS_ASSERT3U(label, <, EFX_EV_TX_NLABELS);
+ EFSYS_ASSERT3U(enp->en_tx_qcount + 1, <, encp->enc_txq_limit);
+
+ if (!ISP2(n) || !(n & EFX_TXQ_NDESCS_MASK)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_txq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ for (size = 0; (1 << size) <= (EFX_TXQ_MAXNDESCS / EFX_TXQ_MINNDESCS);
+ size++)
+ if ((1 << size) == (int)(n / EFX_TXQ_MINNDESCS))
+ break;
+ if (id + (1 << size) >= encp->enc_buftbl_limit) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ /* Allocate an TXQ object */
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_txq_t), etp);
+
+ if (etp == NULL) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+
+ etp->et_magic = EFX_TXQ_MAGIC;
+ etp->et_enp = enp;
+ etp->et_index = index;
+ etp->et_mask = n - 1;
+ etp->et_esmp = esmp;
+
+ /* Set up the new descriptor queue */
+ EFX_POPULATE_OWORD_6(oword,
+ FRF_AZ_TX_DESCQ_BUF_BASE_ID, id,
+ FRF_AZ_TX_DESCQ_EVQ_ID, eep->ee_index,
+ FRF_AZ_TX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_TX_DESCQ_LABEL, label,
+ FRF_AZ_TX_DESCQ_SIZE, size,
+ FRF_AZ_TX_DESCQ_TYPE, 0);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_NON_IP_DROP_DIS, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_IP_CHKSM_DIS,
+ (flags & EFX_CKSUM_IPV4) ? 0 : 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_TCP_CHKSM_DIS,
+ (flags & EFX_CKSUM_TCPUDP) ? 0 : 1);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword);
+
+ enp->en_tx_qcount++;
+ *etpp = etp;
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_NAMES
+/* START MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock 78ca9ab00287fffb */
+static const char __cs * __cs __efx_tx_qstat_name[] = {
+ "post",
+ "unaligned_split",
+};
+/* END MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock */
+
+ const char __cs *
+efx_tx_qstat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(id, <, TX_NQSTATS);
+
+ return (__efx_tx_qstat_name[id]);
+}
+#endif /* EFSYS_OPT_NAMES */
+
+#if EFSYS_OPT_QSTATS
+ void
+efx_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ for (id = 0; id < TX_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, etp->et_stat[id]);
+ etp->et_stat[id] = 0;
+ }
+}
+#endif /* EFSYS_OPT_QSTATS */
+
+ void
+efx_tx_qdestroy(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_oword_t oword;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ EFSYS_ASSERT(enp->en_tx_qcount != 0);
+ --enp->en_tx_qcount;
+
+ /* Purge descriptor queue */
+ EFX_ZERO_OWORD(oword);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword);
+
+ /* Free the TXQ object */
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_txq_t), etp);
+}
+
+ void
+efx_tx_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX);
+ EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0);
+
+ enp->en_mod_flags &= ~EFX_MOD_TX;
+}
diff --git a/sys/dev/sfxge/common/efx_types.h b/sys/dev/sfxge/common/efx_types.h
new file mode 100644
index 0000000..d691482
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_types.h
@@ -0,0 +1,1605 @@
+/*-
+ * Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Ackowledgement to Fen Systems Ltd.
+ */
+
+#ifndef _SYS_EFX_TYPES_H
+#define _SYS_EFX_TYPES_H
+
+#include "efsys.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Bitfield access
+ *
+ * Solarflare NICs make extensive use of bitfields up to 128 bits
+ * wide. Since there is no native 128-bit datatype on most systems,
+ * and since 64-bit datatypes are inefficient on 32-bit systems and
+ * vice versa, we wrap accesses in a way that uses the most efficient
+ * datatype.
+ *
+ * The NICs are PCI devices and therefore little-endian. Since most
+ * of the quantities that we deal with are DMAed to/from host memory,
+ * we define our datatypes (efx_oword_t, efx_qword_t and efx_dword_t)
+ * to be little-endian.
+ *
+ * In the less common case of using PIO for individual register
+ * writes, we construct the little-endian datatype in host memory and
+ * then use non-swapping register access primitives, rather than
+ * constructing a native-endian datatype and relying on implicit
+ * byte-swapping. (We use a similar strategy for register reads.)
+ */
+
+/*
+ * NOTE: Field definitions here and elsewhere are done in terms of a lowest
+ * bit number (LBN) and a width.
+ */
+
+#define EFX_DUMMY_FIELD_LBN 0
+#define EFX_DUMMY_FIELD_WIDTH 0
+
+#define EFX_BYTE_0_LBN 0
+#define EFX_BYTE_0_WIDTH 8
+
+#define EFX_BYTE_1_LBN 8
+#define EFX_BYTE_1_WIDTH 8
+
+#define EFX_BYTE_2_LBN 16
+#define EFX_BYTE_2_WIDTH 8
+
+#define EFX_BYTE_3_LBN 24
+#define EFX_BYTE_3_WIDTH 8
+
+#define EFX_BYTE_4_LBN 32
+#define EFX_BYTE_4_WIDTH 8
+
+#define EFX_BYTE_5_LBN 40
+#define EFX_BYTE_5_WIDTH 8
+
+#define EFX_BYTE_6_LBN 48
+#define EFX_BYTE_6_WIDTH 8
+
+#define EFX_BYTE_7_LBN 56
+#define EFX_BYTE_7_WIDTH 8
+
+#define EFX_WORD_0_LBN 0
+#define EFX_WORD_0_WIDTH 16
+
+#define EFX_WORD_1_LBN 16
+#define EFX_WORD_1_WIDTH 16
+
+#define EFX_WORD_2_LBN 32
+#define EFX_WORD_2_WIDTH 16
+
+#define EFX_WORD_3_LBN 48
+#define EFX_WORD_3_WIDTH 16
+
+#define EFX_DWORD_0_LBN 0
+#define EFX_DWORD_0_WIDTH 32
+
+#define EFX_DWORD_1_LBN 32
+#define EFX_DWORD_1_WIDTH 32
+
+#define EFX_DWORD_2_LBN 64
+#define EFX_DWORD_2_WIDTH 32
+
+#define EFX_DWORD_3_LBN 96
+#define EFX_DWORD_3_WIDTH 32
+
+#define EFX_QWORD_0_LBN 0
+#define EFX_QWORD_0_WIDTH 64
+
+#define EFX_QWORD_1_LBN 64
+#define EFX_QWORD_1_WIDTH 64
+
+/* Specified attribute (i.e. LBN ow WIDTH) of the specified field */
+#define EFX_VAL(_field, _attribute) \
+ _field ## _ ## _attribute
+
+/* Lowest bit number of the specified field */
+#define EFX_LOW_BIT(_field) \
+ EFX_VAL(_field, LBN)
+
+/* Width of the specified field */
+#define EFX_WIDTH(_field) \
+ EFX_VAL(_field, WIDTH)
+
+/* Highest bit number of the specified field */
+#define EFX_HIGH_BIT(_field) \
+ (EFX_LOW_BIT(_field) + EFX_WIDTH(_field) - 1)
+
+/*
+ * 64-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x000000000000001f.
+ */
+#define EFX_MASK64(_field) \
+ ((EFX_WIDTH(_field) == 64) ? ~((uint64_t)0) : \
+ (((((uint64_t)1) << EFX_WIDTH(_field))) - 1))
+/*
+ * 32-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x0000001f.
+ */
+#define EFX_MASK32(_field) \
+ ((EFX_WIDTH(_field) == 32) ? ~((uint32_t)0) : \
+ (((((uint32_t)1) << EFX_WIDTH(_field))) - 1))
+
+/*
+ * 16-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x001f.
+ */
+#define EFX_MASK16(_field) \
+ ((EFX_WIDTH(_field) == 16) ? 0xffffu : \
+ (uint16_t)((1 << EFX_WIDTH(_field)) - 1))
+
+/*
+ * 8-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x1f.
+ */
+#define EFX_MASK8(_field) \
+ ((uint8_t)((1 << EFX_WIDTH(_field)) - 1))
+
+#pragma pack(1)
+
+/*
+ * A byte (i.e. 8-bit) datatype
+ */
+typedef union efx_byte_u {
+ uint8_t eb_u8[1];
+} efx_byte_t;
+
+/*
+ * A word (i.e. 16-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_word_u {
+ efx_byte_t ew_byte[2];
+ uint16_t ew_u16[1];
+ uint8_t ew_u8[2];
+} efx_word_t;
+
+/*
+ * A doubleword (i.e. 32-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_dword_u {
+ efx_byte_t ed_byte[4];
+ efx_word_t ed_word[2];
+ uint32_t ed_u32[1];
+ uint16_t ed_u16[2];
+ uint8_t ed_u8[4];
+} efx_dword_t;
+
+/*
+ * A quadword (i.e. 64-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_qword_u {
+ efx_byte_t eq_byte[8];
+ efx_word_t eq_word[4];
+ efx_dword_t eq_dword[2];
+#if EFSYS_HAS_UINT64
+ uint64_t eq_u64[1];
+#endif
+ uint32_t eq_u32[2];
+ uint16_t eq_u16[4];
+ uint8_t eq_u8[8];
+} efx_qword_t;
+
+/*
+ * An octword (i.e. 128-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_oword_u {
+ efx_byte_t eo_byte[16];
+ efx_word_t eo_word[8];
+ efx_dword_t eo_dword[4];
+ efx_qword_t eo_qword[2];
+#if EFSYS_HAS_UINT64
+ uint64_t eo_u64[2];
+#endif
+ uint32_t eo_u32[4];
+ uint16_t eo_u16[8];
+ uint8_t eo_u8[16];
+} efx_oword_t;
+
+#pragma pack()
+
+#define __SWAP16(_x) \
+ ((((_x) & 0xff) << 8) | \
+ (((_x) >> 8) & 0xff))
+
+#define __SWAP32(_x) \
+ ((__SWAP16((_x) & 0xffff) << 16) | \
+ __SWAP16(((_x) >> 16) & 0xffff))
+
+#define __SWAP64(_x) \
+ ((__SWAP32((_x) & 0xffffffff) << 32) | \
+ __SWAP32(((_x) >> 32) & 0xffffffff))
+
+#define __NOSWAP16(_x) (_x)
+#define __NOSWAP32(_x) (_x)
+#define __NOSWAP64(_x) (_x)
+
+#if EFSYS_IS_BIG_ENDIAN
+
+#define __CPU_TO_LE_16(_x) (uint16_t)__SWAP16(_x)
+#define __LE_TO_CPU_16(_x) (uint16_t)__SWAP16(_x)
+#define __CPU_TO_BE_16(_x) (uint16_t)__NOSWAP16(_x)
+#define __BE_TO_CPU_16(_x) (uint16_t)__NOSWAP16(_x)
+
+#define __CPU_TO_LE_32(_x) (uint32_t)__SWAP32(_x)
+#define __LE_TO_CPU_32(_x) (uint32_t)__SWAP32(_x)
+#define __CPU_TO_BE_32(_x) (uint32_t)__NOSWAP32(_x)
+#define __BE_TO_CPU_32(_x) (uint32_t)__NOSWAP32(_x)
+
+#define __CPU_TO_LE_64(_x) (uint64_t)__SWAP64(_x)
+#define __LE_TO_CPU_64(_x) (uint64_t)__SWAP64(_x)
+#define __CPU_TO_BE_64(_x) (uint64_t)__NOSWAP64(_x)
+#define __BE_TO_CPU_64(_x) (uint64_t)__NOSWAP64(_x)
+
+#elif EFSYS_IS_LITTLE_ENDIAN
+
+#define __CPU_TO_LE_16(_x) (uint16_t)__NOSWAP16(_x)
+#define __LE_TO_CPU_16(_x) (uint16_t)__NOSWAP16(_x)
+#define __CPU_TO_BE_16(_x) (uint16_t)__SWAP16(_x)
+#define __BE_TO_CPU_16(_x) (uint16_t)__SWAP16(_x)
+
+#define __CPU_TO_LE_32(_x) (uint32_t)__NOSWAP32(_x)
+#define __LE_TO_CPU_32(_x) (uint32_t)__NOSWAP32(_x)
+#define __CPU_TO_BE_32(_x) (uint32_t)__SWAP32(_x)
+#define __BE_TO_CPU_32(_x) (uint32_t)__SWAP32(_x)
+
+#define __CPU_TO_LE_64(_x) (uint64_t)__NOSWAP64(_x)
+#define __LE_TO_CPU_64(_x) (uint64_t)__NOSWAP64(_x)
+#define __CPU_TO_BE_64(_x) (uint64_t)__SWAP64(_x)
+#define __BE_TO_CPU_64(_x) (uint64_t)__SWAP64(_x)
+
+#else
+
+#error "Neither of EFSYS_IS_{BIG,LITTLE}_ENDIAN is set"
+
+#endif
+
+#define __NATIVE_8(_x) (uint8_t)(_x)
+
+/* Format string for printing an efx_byte_t */
+#define EFX_BYTE_FMT "0x%02x"
+
+/* Format string for printing an efx_word_t */
+#define EFX_WORD_FMT "0x%04x"
+
+/* Format string for printing an efx_dword_t */
+#define EFX_DWORD_FMT "0x%08x"
+
+/* Format string for printing an efx_qword_t */
+#define EFX_QWORD_FMT "0x%08x:%08x"
+
+/* Format string for printing an efx_oword_t */
+#define EFX_OWORD_FMT "0x%08x:%08x:%08x:%08x"
+
+/* Parameters for printing an efx_byte_t */
+#define EFX_BYTE_VAL(_byte) \
+ ((unsigned int)__NATIVE_8((_byte).eb_u8[0]))
+
+/* Parameters for printing an efx_word_t */
+#define EFX_WORD_VAL(_word) \
+ ((unsigned int)__LE_TO_CPU_16((_word).ew_u16[0]))
+
+/* Parameters for printing an efx_dword_t */
+#define EFX_DWORD_VAL(_dword) \
+ ((unsigned int)__LE_TO_CPU_32((_dword).ed_u32[0]))
+
+/* Parameters for printing an efx_qword_t */
+#define EFX_QWORD_VAL(_qword) \
+ ((unsigned int)__LE_TO_CPU_32((_qword).eq_u32[1])), \
+ ((unsigned int)__LE_TO_CPU_32((_qword).eq_u32[0]))
+
+/* Parameters for printing an efx_oword_t */
+#define EFX_OWORD_VAL(_oword) \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[3])), \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[2])), \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[1])), \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[0]))
+
+/*
+ * Stop lint complaining about some shifts.
+ */
+#ifdef __lint
+extern int fix_lint;
+#define FIX_LINT(_x) (_x + fix_lint)
+#else
+#define FIX_LINT(_x) (_x)
+#endif
+
+/*
+ * Extract bit field portion [low,high) from the native-endian element
+ * which contains bits [min,max).
+ *
+ * For example, suppose "element" represents the high 32 bits of a
+ * 64-bit value, and we wish to extract the bits belonging to the bit
+ * field occupying bits 28-45 of this 64-bit value.
+ *
+ * Then EFX_EXTRACT(_element, 32, 63, 28, 45) would give
+ *
+ * (_element) << 4
+ *
+ * The result will contain the relevant bits filled in in the range
+ * [0,high-low), with garbage in bits [high-low+1,...).
+ */
+#define EFX_EXTRACT_NATIVE(_element, _min, _max, _low, _high) \
+ ((FIX_LINT(_low > _max) || FIX_LINT(_high < _min)) ? \
+ 0U : \
+ ((_low > _min) ? \
+ ((_element) >> (_low - _min)) : \
+ ((_element) << (_min - _low))))
+
+/*
+ * Extract bit field portion [low,high) from the 64-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT64(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__LE_TO_CPU_64(_element), _min, _max, _low, _high)
+
+/*
+ * Extract bit field portion [low,high) from the 32-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT32(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__LE_TO_CPU_32(_element), _min, _max, _low, _high)
+
+/*
+ * Extract bit field portion [low,high) from the 16-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT16(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__LE_TO_CPU_16(_element), _min, _max, _low, _high)
+
+/*
+ * Extract bit field portion [low,high) from the 8-bit
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT8(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__NATIVE_8(_element), _min, _max, _low, _high)
+
+#define EFX_EXTRACT_OWORD64(_oword, _low, _high) \
+ (EFX_EXTRACT64((_oword).eo_u64[0], FIX_LINT(0), FIX_LINT(63), \
+ _low, _high) | \
+ EFX_EXTRACT64((_oword).eo_u64[1], FIX_LINT(64), FIX_LINT(127), \
+ _low, _high))
+
+#define EFX_EXTRACT_OWORD32(_oword, _low, _high) \
+ (EFX_EXTRACT32((_oword).eo_u32[0], FIX_LINT(0), FIX_LINT(31), \
+ _low, _high) | \
+ EFX_EXTRACT32((_oword).eo_u32[1], FIX_LINT(32), FIX_LINT(63), \
+ _low, _high) | \
+ EFX_EXTRACT32((_oword).eo_u32[2], FIX_LINT(64), FIX_LINT(95), \
+ _low, _high) | \
+ EFX_EXTRACT32((_oword).eo_u32[3], FIX_LINT(96), FIX_LINT(127), \
+ _low, _high))
+
+#define EFX_EXTRACT_QWORD64(_qword, _low, _high) \
+ (EFX_EXTRACT64((_qword).eq_u64[0], FIX_LINT(0), FIX_LINT(63), \
+ _low, _high))
+
+#define EFX_EXTRACT_QWORD32(_qword, _low, _high) \
+ (EFX_EXTRACT32((_qword).eq_u32[0], FIX_LINT(0), FIX_LINT(31), \
+ _low, _high) | \
+ EFX_EXTRACT32((_qword).eq_u32[1], FIX_LINT(32), FIX_LINT(63), \
+ _low, _high))
+
+#define EFX_EXTRACT_DWORD(_dword, _low, _high) \
+ (EFX_EXTRACT32((_dword).ed_u32[0], FIX_LINT(0), FIX_LINT(31), \
+ _low, _high))
+
+#define EFX_EXTRACT_WORD(_word, _low, _high) \
+ (EFX_EXTRACT16((_word).ew_u16[0], FIX_LINT(0), FIX_LINT(15), \
+ _low, _high))
+
+#define EFX_EXTRACT_BYTE(_byte, _low, _high) \
+ (EFX_EXTRACT8((_byte).eb_u8[0], FIX_LINT(0), FIX_LINT(7), \
+ _low, _high))
+
+
+#define EFX_OWORD_FIELD64(_oword, _field) \
+ ((uint32_t)EFX_EXTRACT_OWORD64(_oword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_OWORD_FIELD32(_oword, _field) \
+ (EFX_EXTRACT_OWORD32(_oword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_QWORD_FIELD64(_qword, _field) \
+ ((uint32_t)EFX_EXTRACT_QWORD64(_qword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_QWORD_FIELD32(_qword, _field) \
+ (EFX_EXTRACT_QWORD32(_qword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_DWORD_FIELD(_dword, _field) \
+ (EFX_EXTRACT_DWORD(_dword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_WORD_FIELD(_word, _field) \
+ (EFX_EXTRACT_WORD(_word, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK16(_field))
+
+#define EFX_BYTE_FIELD(_byte, _field) \
+ (EFX_EXTRACT_BYTE(_byte, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK8(_field))
+
+
+#define EFX_OWORD_IS_EQUAL64(_oword_a, _oword_b) \
+ ((_oword_a).eo_u64[0] == (_oword_b).eo_u64[0] && \
+ (_oword_a).eo_u64[1] == (_oword_b).eo_u64[1])
+
+#define EFX_OWORD_IS_EQUAL32(_oword_a, _oword_b) \
+ ((_oword_a).eo_u32[0] == (_oword_b).eo_u32[0] && \
+ (_oword_a).eo_u32[1] == (_oword_b).eo_u32[1] && \
+ (_oword_a).eo_u32[2] == (_oword_b).eo_u32[2] && \
+ (_oword_a).eo_u32[3] == (_oword_b).eo_u32[3])
+
+#define EFX_QWORD_IS_EQUAL64(_qword_a, _qword_b) \
+ ((_qword_a).eq_u64[0] == (_qword_b).eq_u64[0])
+
+#define EFX_QWORD_IS_EQUAL32(_qword_a, _qword_b) \
+ ((_qword_a).eq_u32[0] == (_qword_b).eq_u32[0] && \
+ (_qword_a).eq_u32[1] == (_qword_b).eq_u32[1])
+
+#define EFX_DWORD_IS_EQUAL(_dword_a, _dword_b) \
+ ((_dword_a).ed_u32[0] == (_dword_b).ed_u32[0])
+
+#define EFX_WORD_IS_EQUAL(_word_a, _word_b) \
+ ((_word_a).ew_u16[0] == (_word_b).ew_u16[0])
+
+#define EFX_BYTE_IS_EQUAL(_byte_a, _byte_b) \
+ ((_byte_a).eb_u8[0] == (_byte_b).eb_u8[0])
+
+
+#define EFX_OWORD_IS_ZERO64(_oword) \
+ (((_oword).eo_u64[0] | \
+ (_oword).eo_u64[1]) == 0)
+
+#define EFX_OWORD_IS_ZERO32(_oword) \
+ (((_oword).eo_u32[0] | \
+ (_oword).eo_u32[1] | \
+ (_oword).eo_u32[2] | \
+ (_oword).eo_u32[3]) == 0)
+
+#define EFX_QWORD_IS_ZERO64(_qword) \
+ (((_qword).eq_u64[0]) == 0)
+
+#define EFX_QWORD_IS_ZERO32(_qword) \
+ (((_qword).eq_u32[0] | \
+ (_qword).eq_u32[1]) == 0)
+
+#define EFX_DWORD_IS_ZERO(_dword) \
+ (((_dword).ed_u32[0]) == 0)
+
+#define EFX_WORD_IS_ZERO(_word) \
+ (((_word).ew_u16[0]) == 0)
+
+#define EFX_BYTE_IS_ZERO(_byte) \
+ (((_byte).eb_u8[0]) == 0)
+
+
+#define EFX_OWORD_IS_SET64(_oword) \
+ (((_oword).eo_u64[0] & \
+ (_oword).eo_u64[1]) == ~((uint64_t)0))
+
+#define EFX_OWORD_IS_SET32(_oword) \
+ (((_oword).eo_u32[0] & \
+ (_oword).eo_u32[1] & \
+ (_oword).eo_u32[2] & \
+ (_oword).eo_u32[3]) == ~((uint32_t)0))
+
+#define EFX_QWORD_IS_SET64(_qword) \
+ (((_qword).eq_u64[0]) == ~((uint32_t)0))
+
+#define EFX_QWORD_IS_SET32(_qword) \
+ (((_qword).eq_u32[0] & \
+ (_qword).eq_u32[1]) == ~((uint32_t)0))
+
+#define EFX_DWORD_IS_SET(_dword) \
+ ((_dword).ed_u32[0] == ~((uint32_t)0))
+
+#define EFX_WORD_IS_SET(_word) \
+ ((_word).ew_u16[0] == ~((uint16_t)0))
+
+#define EFX_BYTE_IS_SET(_byte) \
+ ((_byte).eb_u8[0] == ~((uint8_t)0))
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the bit field [low,high) that lies within
+ * the range [min,max).
+ */
+
+#define EFX_INSERT_NATIVE64(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ ((_low > _min) ? \
+ (((uint64_t)(_value)) << (_low - _min)) : \
+ (((uint64_t)(_value)) >> (_min - _low))))
+
+#define EFX_INSERT_NATIVE32(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ ((_low > _min) ? \
+ (((uint32_t)(_value)) << (_low - _min)) : \
+ (((uint32_t)(_value)) >> (_min - _low))))
+
+#define EFX_INSERT_NATIVE16(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ (uint16_t)((_low > _min) ? \
+ ((_value) << (_low - _min)) : \
+ ((_value) >> (_min - _low))))
+
+#define EFX_INSERT_NATIVE8(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ (uint8_t)((_low > _min) ? \
+ ((_value) << (_low - _min)) : \
+ ((_value) >> (_min - _low))))
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the named bit field that lies within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELD_NATIVE64(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE64(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+#define EFX_INSERT_FIELD_NATIVE32(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE32(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+#define EFX_INSERT_FIELD_NATIVE16(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE16(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+#define EFX_INSERT_FIELD_NATIVE8(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE8(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+/*
+ * Construct bit field
+ *
+ * Creates the portion of the named bit fields that lie within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELDS64(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __CPU_TO_LE_64( \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field10, _value10))
+
+#define EFX_INSERT_FIELDS32(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __CPU_TO_LE_32( \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field10, _value10))
+
+#define EFX_INSERT_FIELDS16(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __CPU_TO_LE_16( \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field10, _value10))
+
+#define EFX_INSERT_FIELDS8(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __NATIVE_8( \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field10, _value10))
+
+#define EFX_POPULATE_OWORD64(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] = EFX_INSERT_FIELDS64(0, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[1] = EFX_INSERT_FIELDS64(64, 127, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_OWORD32(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] = EFX_INSERT_FIELDS32(0, 31, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[1] = EFX_INSERT_FIELDS32(32, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[2] = EFX_INSERT_FIELDS32(64, 95, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[3] = EFX_INSERT_FIELDS32(96, 127, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_QWORD64(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] = EFX_INSERT_FIELDS64(0, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_QWORD32(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] = EFX_INSERT_FIELDS32(0, 31, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[1] = EFX_INSERT_FIELDS32(32, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_DWORD(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_dword).ed_u32[0] = EFX_INSERT_FIELDS32(0, 31, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_WORD(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_word).ew_u16[0] = EFX_INSERT_FIELDS16(0, 15, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_BYTE(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_byte).eb_u8[0] = EFX_INSERT_FIELDS8(0, 7, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* Populate an octword field with various numbers of arguments */
+#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD
+
+#define EFX_POPULATE_OWORD_9(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_OWORD_10(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_OWORD_8(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_OWORD_9(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_OWORD_7(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_OWORD_8(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_OWORD_6(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_OWORD_7(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_OWORD_5(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_OWORD_6(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_OWORD_4(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_OWORD_5(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_OWORD_3(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_OWORD_4(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_OWORD_2(_oword, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_OWORD_3(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_OWORD_1(_oword, \
+ _field1, _value1) \
+ EFX_POPULATE_OWORD_2(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_OWORD(_oword) \
+ EFX_POPULATE_OWORD_1(_oword, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_OWORD64(_oword) \
+ EFX_POPULATE_OWORD_2(_oword, \
+ EFX_QWORD_0, (uint64_t)-1, EFX_QWORD_1, (uint64_t)-1)
+
+#define EFX_SET_OWORD32(_oword) \
+ EFX_POPULATE_OWORD_4(_oword, \
+ EFX_DWORD_0, 0xffffffff, EFX_DWORD_1, 0xffffffff, \
+ EFX_DWORD_2, 0xffffffff, EFX_DWORD_3, 0xffffffff)
+
+/* Populate a quadword field with various numbers of arguments */
+#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD
+
+#define EFX_POPULATE_QWORD_9(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_QWORD_10(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_QWORD_8(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_QWORD_9(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_QWORD_7(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_QWORD_8(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_QWORD_6(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_QWORD_7(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_QWORD_5(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_QWORD_6(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_QWORD_4(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_QWORD_5(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_QWORD_3(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_QWORD_4(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_QWORD_2(_qword, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_QWORD_3(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_QWORD_1(_qword, \
+ _field1, _value1) \
+ EFX_POPULATE_QWORD_2(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_QWORD(_qword) \
+ EFX_POPULATE_QWORD_1(_qword, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_QWORD64(_qword) \
+ EFX_POPULATE_QWORD_1(_qword, \
+ EFX_QWORD_0, (uint64_t)-1)
+
+#define EFX_SET_QWORD32(_qword) \
+ EFX_POPULATE_QWORD_2(_qword, \
+ EFX_DWORD_0, 0xffffffff, EFX_DWORD_1, 0xffffffff)
+
+/* Populate a dword field with various numbers of arguments */
+#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD
+
+#define EFX_POPULATE_DWORD_9(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_DWORD_10(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_DWORD_8(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_DWORD_9(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_DWORD_7(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_DWORD_8(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_DWORD_6(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_DWORD_7(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_DWORD_5(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_DWORD_6(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_DWORD_4(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_DWORD_5(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_DWORD_3(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_DWORD_4(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_DWORD_2(_dword, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_DWORD_3(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_DWORD_1(_dword, \
+ _field1, _value1) \
+ EFX_POPULATE_DWORD_2(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_DWORD(_dword) \
+ EFX_POPULATE_DWORD_1(_dword, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_DWORD(_dword) \
+ EFX_POPULATE_DWORD_1(_dword, \
+ EFX_DWORD_0, 0xffffffff)
+
+/* Populate a word field with various numbers of arguments */
+#define EFX_POPULATE_WORD_10 EFX_POPULATE_WORD
+
+#define EFX_POPULATE_WORD_9(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_WORD_10(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_WORD_8(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_WORD_9(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_WORD_7(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_WORD_8(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_WORD_6(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_WORD_7(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_WORD_5(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_WORD_6(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_WORD_4(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_WORD_5(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_WORD_3(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_WORD_4(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_WORD_2(_word, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_WORD_3(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_WORD_1(_word, \
+ _field1, _value1) \
+ EFX_POPULATE_WORD_2(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_WORD(_word) \
+ EFX_POPULATE_WORD_1(_word, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_WORD(_word) \
+ EFX_POPULATE_WORD_1(_word, \
+ EFX_WORD_0, 0xffff)
+
+/* Populate a byte field with various numbers of arguments */
+#define EFX_POPULATE_BYTE_10 EFX_POPULATE_BYTE
+
+#define EFX_POPULATE_BYTE_9(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_BYTE_10(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_BYTE_8(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_BYTE_9(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_BYTE_7(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_BYTE_8(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_BYTE_6(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_BYTE_7(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_BYTE_5(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_BYTE_6(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_BYTE_4(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_BYTE_5(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_BYTE_3(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_BYTE_4(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_BYTE_2(_byte, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_BYTE_3(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_BYTE_1(_byte, \
+ _field1, _value1) \
+ EFX_POPULATE_BYTE_2(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_BYTE(_byte) \
+ EFX_POPULATE_BYTE_1(_byte, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_BYTE(_byte) \
+ EFX_POPULATE_BYTE_1(_byte, \
+ EFX_BYTE_0, 0xff)
+
+/*
+ * Modify a named field within an already-populated structure. Used
+ * for read-modify-write operations.
+ */
+
+#define EFX_INSERT_FIELD64(_min, _max, _field, _value) \
+ __CPU_TO_LE_64(EFX_INSERT_FIELD_NATIVE64(_min, _max, _field, _value))
+
+#define EFX_INSERT_FIELD32(_min, _max, _field, _value) \
+ __CPU_TO_LE_32(EFX_INSERT_FIELD_NATIVE32(_min, _max, _field, _value))
+
+#define EFX_INSERT_FIELD16(_min, _max, _field, _value) \
+ __CPU_TO_LE_16(EFX_INSERT_FIELD_NATIVE16(_min, _max, _field, _value))
+
+#define EFX_INSERT_FIELD8(_min, _max, _field, _value) \
+ __NATIVE_8(EFX_INSERT_FIELD_NATIVE8(_min, _max, _field, _value))
+
+#define EFX_INPLACE_MASK64(_min, _max, _field) \
+ EFX_INSERT_FIELD64(_min, _max, _field, EFX_MASK64(_field))
+
+#define EFX_INPLACE_MASK32(_min, _max, _field) \
+ EFX_INSERT_FIELD32(_min, _max, _field, EFX_MASK32(_field))
+
+#define EFX_INPLACE_MASK16(_min, _max, _field) \
+ EFX_INSERT_FIELD16(_min, _max, _field, EFX_MASK16(_field))
+
+#define EFX_INPLACE_MASK8(_min, _max, _field) \
+ EFX_INSERT_FIELD8(_min, _max, _field, EFX_MASK8(_field))
+
+#define EFX_SET_OWORD_FIELD64(_oword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] = (((_oword).eo_u64[0] & \
+ ~EFX_INPLACE_MASK64(0, 63, _field)) | \
+ EFX_INSERT_FIELD64(0, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[1] = (((_oword).eo_u64[1] & \
+ ~EFX_INPLACE_MASK64(64, 127, _field)) | \
+ EFX_INSERT_FIELD64(64, 127, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_OWORD_FIELD32(_oword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] = (((_oword).eo_u32[0] & \
+ ~EFX_INPLACE_MASK32(0, 31, _field)) | \
+ EFX_INSERT_FIELD32(0, 31, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[1] = (((_oword).eo_u32[1] & \
+ ~EFX_INPLACE_MASK32(32, 63, _field)) | \
+ EFX_INSERT_FIELD32(32, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[2] = (((_oword).eo_u32[2] & \
+ ~EFX_INPLACE_MASK32(64, 95, _field)) | \
+ EFX_INSERT_FIELD32(64, 95, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[3] = (((_oword).eo_u32[3] & \
+ ~EFX_INPLACE_MASK32(96, 127, _field)) | \
+ EFX_INSERT_FIELD32(96, 127, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_QWORD_FIELD64(_qword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] = (((_qword).eq_u64[0] & \
+ ~EFX_INPLACE_MASK64(0, 63, _field)) | \
+ EFX_INSERT_FIELD64(0, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_QWORD_FIELD32(_qword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] = (((_qword).eq_u32[0] & \
+ ~EFX_INPLACE_MASK32(0, 31, _field)) | \
+ EFX_INSERT_FIELD32(0, 31, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[1] = (((_qword).eq_u32[1] & \
+ ~EFX_INPLACE_MASK32(32, 63, _field)) | \
+ EFX_INSERT_FIELD32(32, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_DWORD_FIELD(_dword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_dword).ed_u32[0] = (((_dword).ed_u32[0] & \
+ ~EFX_INPLACE_MASK32(0, 31, _field)) | \
+ EFX_INSERT_FIELD32(0, 31, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_WORD_FIELD(_word, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_word).ew_u16[0] = (((_word).ew_u16[0] & \
+ ~EFX_INPLACE_MASK16(0, 15, _field)) | \
+ EFX_INSERT_FIELD16(0, 15, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_BYTE_FIELD(_byte, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_byte).eb_u8[0] = (((_byte).eb_u8[0] & \
+ ~EFX_INPLACE_MASK8(0, 7, _field)) | \
+ EFX_INSERT_FIELD8(0, 7, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/*
+ * Set or clear a numbered bit within an octword.
+ */
+
+#define EFX_SHIFT64(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 64) ? \
+ ((uint64_t)1 << ((_bit) - (_base))) : \
+ 0U)
+
+#define EFX_SHIFT32(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 32) ? \
+ ((uint32_t)1 << ((_bit) - (_base))) : \
+ 0U)
+
+#define EFX_SHIFT16(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 16) ? \
+ (uint16_t)(1 << ((_bit) - (_base))) : \
+ 0U)
+
+#define EFX_SHIFT8(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 8) ? \
+ (uint8_t)(1 << ((_bit) - (_base))) : \
+ 0U)
+
+#define EFX_SET_OWORD_BIT64(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] |= \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0))); \
+ (_oword).eo_u64[1] |= \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(64))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_OWORD_BIT32(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ (_oword).eo_u32[1] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ (_oword).eo_u32[2] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(64))); \
+ (_oword).eo_u32[3] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(96))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_OWORD_BIT64(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] &= \
+ __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(0))); \
+ (_oword).eo_u64[1] &= \
+ __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(64))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_OWORD_BIT32(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ (_oword).eo_u32[1] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ (_oword).eo_u32[2] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(64))); \
+ (_oword).eo_u32[3] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(96))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_QWORD_BIT64(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] |= \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_QWORD_BIT32(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ (_qword).eq_u32[1] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_QWORD_BIT64(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] &= \
+ __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_QWORD_BIT32(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ (_qword).eq_u32[1] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_DWORD_BIT(_dword, _bit) \
+ do { \
+ (_dword).ed_u32[0] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_DWORD_BIT(_dword, _bit) \
+ do { \
+ (_dword).ed_u32[0] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_WORD_BIT(_word, _bit) \
+ do { \
+ (_word).ew_u16[0] |= \
+ __CPU_TO_LE_16(EFX_SHIFT16(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_WORD_BIT(_word, _bit) \
+ do { \
+ (_word).ew_u32[0] &= \
+ __CPU_TO_LE_16(~EFX_SHIFT16(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_BYTE_BIT(_byte, _bit) \
+ do { \
+ (_byte).eb_u8[0] |= \
+ __NATIVE_8(EFX_SHIFT8(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_BYTE_BIT(_byte, _bit) \
+ do { \
+ (_byte).eb_u8[0] &= \
+ __NATIVE_8(~EFX_SHIFT8(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_OWORD64(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u64[0] |= (_oword2).eo_u64[0]; \
+ (_oword1).eo_u64[1] |= (_oword2).eo_u64[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_OWORD32(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u32[0] |= (_oword2).eo_u32[0]; \
+ (_oword1).eo_u32[1] |= (_oword2).eo_u32[1]; \
+ (_oword1).eo_u32[2] |= (_oword2).eo_u32[2]; \
+ (_oword1).eo_u32[3] |= (_oword2).eo_u32[3]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_OWORD64(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u64[0] &= (_oword2).eo_u64[0]; \
+ (_oword1).eo_u64[1] &= (_oword2).eo_u64[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_OWORD32(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u32[0] &= (_oword2).eo_u32[0]; \
+ (_oword1).eo_u32[1] &= (_oword2).eo_u32[1]; \
+ (_oword1).eo_u32[2] &= (_oword2).eo_u32[2]; \
+ (_oword1).eo_u32[3] &= (_oword2).eo_u32[3]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_QWORD64(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u64[0] |= (_qword2).eq_u64[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_QWORD32(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u32[0] |= (_qword2).eq_u32[0]; \
+ (_qword1).eq_u32[1] |= (_qword2).eq_u32[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_QWORD64(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u64[0] &= (_qword2).eq_u64[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_QWORD32(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u32[0] &= (_qword2).eq_u32[0]; \
+ (_qword1).eq_u32[1] &= (_qword2).eq_u32[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_DWORD(_dword1, _dword2) \
+ do { \
+ (_dword1).ed_u32[0] |= (_dword2).ed_u32[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_DWORD(_dword1, _dword2) \
+ do { \
+ (_dword1).ed_u32[0] &= (_dword2).ed_u32[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_WORD(_word1, _word2) \
+ do { \
+ (_word1).ew_u16[0] |= (_word2).ew_u16[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_WORD(_word1, _word2) \
+ do { \
+ (_word1).ew_u16[0] &= (_word2).ew_u16[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_BYTE(_byte1, _byte2) \
+ do { \
+ (_byte1).eb_u8[0] &= (_byte2).eb_u8[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_BYTE(_byte1, _byte2) \
+ do { \
+ (_byte1).eb_u8[0] &= (_byte2).eb_u8[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#if EFSYS_USE_UINT64
+#define EFX_OWORD_FIELD EFX_OWORD_FIELD64
+#define EFX_QWORD_FIELD EFX_QWORD_FIELD64
+#define EFX_OWORD_IS_EQUAL EFX_OWORD_IS_EQUAL64
+#define EFX_QWORD_IS_EQUAL EFX_QWORD_IS_EQUAL64
+#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64
+#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64
+#define EFX_OWORD_IS_SET EFX_OWORD_IS_SET64
+#define EFX_QWORD_IS_SET EFX_QWORD_IS_SET64
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64
+#define EFX_SET_OWORD EFX_SET_OWORD64
+#define EFX_SET_QWORD EFX_SET_QWORD64
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
+#define EFX_SET_OWORD_BIT EFX_SET_OWORD_BIT64
+#define EFX_CLEAR_OWORD_BIT EFX_CLEAR_OWORD_BIT64
+#define EFX_SET_QWORD_BIT EFX_SET_QWORD_BIT64
+#define EFX_CLEAR_QWORD_BIT EFX_CLEAR_QWORD_BIT64
+#define EFX_OR_OWORD EFX_OR_OWORD64
+#define EFX_AND_OWORD EFX_AND_OWORD64
+#define EFX_OR_QWORD EFX_OR_QWORD64
+#define EFX_AND_QWORD EFX_OR_QWORD64
+#else
+#define EFX_OWORD_FIELD EFX_OWORD_FIELD32
+#define EFX_QWORD_FIELD EFX_QWORD_FIELD32
+#define EFX_OWORD_IS_EQUAL EFX_OWORD_IS_EQUAL32
+#define EFX_QWORD_IS_EQUAL EFX_QWORD_IS_EQUAL32
+#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32
+#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32
+#define EFX_OWORD_IS_SET EFX_OWORD_IS_SET32
+#define EFX_QWORD_IS_SET EFX_QWORD_IS_SET32
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32
+#define EFX_SET_OWORD EFX_SET_OWORD32
+#define EFX_SET_QWORD EFX_SET_QWORD32
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
+#define EFX_SET_OWORD_BIT EFX_SET_OWORD_BIT32
+#define EFX_CLEAR_OWORD_BIT EFX_CLEAR_OWORD_BIT32
+#define EFX_SET_QWORD_BIT EFX_SET_QWORD_BIT32
+#define EFX_CLEAR_QWORD_BIT EFX_CLEAR_QWORD_BIT32
+#define EFX_OR_OWORD EFX_OR_OWORD32
+#define EFX_AND_OWORD EFX_AND_OWORD32
+#define EFX_OR_QWORD EFX_OR_QWORD32
+#define EFX_AND_QWORD EFX_OR_QWORD32
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_TYPES_H */
diff --git a/sys/dev/sfxge/common/efx_vpd.c b/sys/dev/sfxge/common/efx_vpd.c
new file mode 100644
index 0000000..699e890
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_vpd.c
@@ -0,0 +1,999 @@
+/*-
+ * Copyright 2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "efsys.h"
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_VPD
+
+#define TAG_TYPE_LBN 7
+#define TAG_TYPE_WIDTH 1
+#define TAG_TYPE_LARGE_ITEM_DECODE 1
+#define TAG_TYPE_SMALL_ITEM_DECODE 0
+
+#define TAG_SMALL_ITEM_NAME_LBN 3
+#define TAG_SMALL_ITEM_NAME_WIDTH 4
+#define TAG_SMALL_ITEM_SIZE_LBN 0
+#define TAG_SMALL_ITEM_SIZE_WIDTH 3
+
+#define TAG_LARGE_ITEM_NAME_LBN 0
+#define TAG_LARGE_ITEM_NAME_WIDTH 7
+
+#define TAG_NAME_END_DECODE 0x0f
+#define TAG_NAME_ID_STRING_DECODE 0x02
+#define TAG_NAME_VPD_R_DECODE 0x10
+#define TAG_NAME_VPD_W_DECODE 0x11
+
+#if EFSYS_OPT_FALCON
+
+static efx_vpd_ops_t __cs __efx_vpd_falcon_ops = {
+ NULL, /* evpdo_init */
+ falcon_vpd_size, /* evpdo_size */
+ falcon_vpd_read, /* evpdo_read */
+ falcon_vpd_verify, /* evpdo_verify */
+ NULL, /* evpdo_reinit */
+ falcon_vpd_get, /* evpdo_get */
+ falcon_vpd_set, /* evpdo_set */
+ falcon_vpd_next, /* evpdo_next */
+ falcon_vpd_write, /* evpdo_write */
+ NULL, /* evpdo_fini */
+};
+
+#endif /* EFSYS_OPT_FALCON */
+
+#if EFSYS_OPT_SIENA
+
+static efx_vpd_ops_t __cs __efx_vpd_siena_ops = {
+ siena_vpd_init, /* evpdo_init */
+ siena_vpd_size, /* evpdo_size */
+ siena_vpd_read, /* evpdo_read */
+ siena_vpd_verify, /* evpdo_verify */
+ siena_vpd_reinit, /* evpdo_reinit */
+ siena_vpd_get, /* evpdo_get */
+ siena_vpd_set, /* evpdo_set */
+ siena_vpd_next, /* evpdo_next */
+ siena_vpd_write, /* evpdo_write */
+ siena_vpd_fini, /* evpdo_fini */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+ __checkReturn int
+efx_vpd_init(
+ __in efx_nic_t *enp)
+{
+ efx_vpd_ops_t *evpdop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_VPD));
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_FALCON
+ case EFX_FAMILY_FALCON:
+ evpdop = (efx_vpd_ops_t *)&__efx_vpd_falcon_ops;
+ break;
+#endif /* EFSYS_OPT_FALCON */
+
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ evpdop = (efx_vpd_ops_t *)&__efx_vpd_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (evpdop->evpdo_init != NULL) {
+ if ((rc = evpdop->evpdo_init(enp)) != 0)
+ goto fail2;
+ }
+
+ enp->en_evpdop = evpdop;
+ enp->en_mod_flags |= EFX_MOD_VPD;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep)
+{
+ efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_size(enp, sizep)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_read(enp, data, size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_verify(enp, data, size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if (evpdop->evpdo_reinit == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = evpdop->evpdo_reinit(enp, data, size)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp)
+{
+ efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_get(enp, data, size, evvp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_vpd_set(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_set(enp, data, size, evvp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_vpd_next(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp)
+{
+ efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_next(enp, data, size, evvp, contp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_write(enp, data, size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static __checkReturn int
+efx_vpd_next_tag(
+ __in caddr_t data,
+ __in size_t size,
+ __inout unsigned int *offsetp,
+ __out efx_vpd_tag_t *tagp,
+ __out uint16_t *lengthp)
+{
+ efx_byte_t byte;
+ efx_word_t word;
+ uint8_t name;
+ uint16_t length;
+ size_t headlen;
+ int rc;
+
+ if (*offsetp >= size) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ EFX_POPULATE_BYTE_1(byte, EFX_BYTE_0, data[*offsetp]);
+
+ switch (EFX_BYTE_FIELD(byte, TAG_TYPE)) {
+ case TAG_TYPE_SMALL_ITEM_DECODE:
+ headlen = 1;
+
+ name = EFX_BYTE_FIELD(byte, TAG_SMALL_ITEM_NAME);
+ length = (uint16_t)EFX_BYTE_FIELD(byte, TAG_SMALL_ITEM_SIZE);
+
+ break;
+
+ case TAG_TYPE_LARGE_ITEM_DECODE:
+ headlen = 3;
+
+ if (*offsetp + headlen > size) {
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ name = EFX_BYTE_FIELD(byte, TAG_LARGE_ITEM_NAME);
+ EFX_POPULATE_WORD_2(word,
+ EFX_BYTE_0, data[*offsetp + 1],
+ EFX_BYTE_1, data[*offsetp + 2]);
+ length = EFX_WORD_FIELD(word, EFX_WORD_0);
+
+ break;
+
+ default:
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ if (*offsetp + headlen + length > size) {
+ rc = EFAULT;
+ goto fail3;
+ }
+
+ EFX_STATIC_ASSERT(TAG_NAME_END_DECODE == EFX_VPD_END);
+ EFX_STATIC_ASSERT(TAG_NAME_ID_STRING_DECODE == EFX_VPD_ID);
+ EFX_STATIC_ASSERT(TAG_NAME_VPD_R_DECODE == EFX_VPD_RO);
+ EFX_STATIC_ASSERT(TAG_NAME_VPD_W_DECODE == EFX_VPD_RW);
+ if (name != EFX_VPD_END && name != EFX_VPD_ID &&
+ name != EFX_VPD_RO) {
+ rc = EFAULT;
+ goto fail4;
+ }
+
+ *tagp = name;
+ *lengthp = length;
+ *offsetp += headlen;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static __checkReturn int
+efx_vpd_next_keyword(
+ __in_bcount(size) caddr_t tag,
+ __in size_t size,
+ __in unsigned int pos,
+ __out efx_vpd_keyword_t *keywordp,
+ __out uint8_t *lengthp)
+{
+ efx_vpd_keyword_t keyword;
+ uint8_t length;
+ int rc;
+
+ if (pos + 3U > size) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ keyword = EFX_VPD_KEYWORD(tag[pos], tag[pos + 1]);
+ length = tag[pos + 2];
+
+ if (length == 0 || pos + 3U + length > size) {
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ *keywordp = keyword;
+ *lengthp = length;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_vpd_hunk_length(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out size_t *lengthp)
+{
+ efx_vpd_tag_t tag;
+ unsigned int offset;
+ uint16_t taglen;
+ int rc;
+
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail1;
+ offset += taglen;
+ if (tag == EFX_VPD_END)
+ break;
+ }
+
+ *lengthp = offset;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_vpd_hunk_verify(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out_opt boolean_t *cksummedp)
+{
+ efx_vpd_tag_t tag;
+ efx_vpd_keyword_t keyword;
+ unsigned int offset;
+ unsigned int pos;
+ unsigned int i;
+ uint16_t taglen;
+ uint8_t keylen;
+ uint8_t cksum;
+ boolean_t cksummed = B_FALSE;
+ int rc;
+
+ /*
+ * Parse every tag,keyword in the existing VPD. If the csum is present,
+ * the assert it is correct, and is the final keyword in the RO block.
+ */
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail1;
+ if (tag == EFX_VPD_END)
+ break;
+ else if (tag == EFX_VPD_ID)
+ goto done;
+
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ /* RV keyword must be the last in the block */
+ if (cksummed)
+ goto fail2;
+
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail3;
+
+ if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ cksum = 0;
+ for (i = 0; i < offset + pos + 4; i++)
+ cksum += data[i];
+
+ if (cksum != 0) {
+ rc = EFAULT;
+ goto fail4;
+ }
+
+ cksummed = B_TRUE;
+ }
+ }
+
+ done:
+ offset += taglen;
+ }
+
+ if (!cksummed) {
+ rc = EFAULT;
+ goto fail5;
+ }
+
+ if (cksummedp != NULL)
+ *cksummedp = cksummed;
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static uint8_t __cs __efx_vpd_blank_pid[] = {
+ /* Large resource type ID length 1 */
+ 0x82, 0x01, 0x00,
+ /* Product name ' ' */
+ 0x32,
+};
+
+static uint8_t __cs __efx_vpd_blank_r[] = {
+ /* Large resource type VPD-R length 4 */
+ 0x90, 0x04, 0x00,
+ /* RV keyword length 1 */
+ 'R', 'V', 0x01,
+ /* RV payload checksum */
+ 0x00,
+};
+
+ __checkReturn int
+efx_vpd_hunk_reinit(
+ __in caddr_t data,
+ __in size_t size,
+ __in boolean_t wantpid)
+{
+ unsigned int offset = 0;
+ unsigned int pos;
+ efx_byte_t byte;
+ uint8_t cksum;
+ int rc;
+
+ if (size < 0x100) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ if (wantpid) {
+ memcpy(data + offset, __efx_vpd_blank_pid,
+ sizeof (__efx_vpd_blank_pid));
+ offset += sizeof (__efx_vpd_blank_pid);
+ }
+
+ memcpy(data + offset, __efx_vpd_blank_r, sizeof (__efx_vpd_blank_r));
+ offset += sizeof (__efx_vpd_blank_r);
+
+ /* Update checksum */
+ cksum = 0;
+ for (pos = 0; pos < offset; pos++)
+ cksum += data[pos];
+ data[offset - 1] -= cksum;
+
+ /* Append trailing tag */
+ EFX_POPULATE_BYTE_3(byte,
+ TAG_TYPE, TAG_TYPE_SMALL_ITEM_DECODE,
+ TAG_SMALL_ITEM_NAME, TAG_NAME_END_DECODE,
+ TAG_SMALL_ITEM_SIZE, 0);
+ data[offset] = EFX_BYTE_FIELD(byte, EFX_BYTE_0);
+ offset++;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_vpd_hunk_next(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_tag_t *tagp,
+ __out efx_vpd_keyword_t *keywordp,
+ __out_bcount_opt(*paylenp) unsigned int *payloadp,
+ __out_opt uint8_t *paylenp,
+ __inout unsigned int *contp)
+{
+ efx_vpd_tag_t tag;
+ efx_vpd_keyword_t keyword = 0;
+ unsigned int offset;
+ unsigned int pos;
+ unsigned int index;
+ uint16_t taglen;
+ uint8_t keylen;
+ uint8_t paylen;
+ int rc;
+
+ offset = index = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail1;
+ if (tag == EFX_VPD_END)
+ break;
+
+ if (tag == EFX_VPD_ID) {
+ if (index == *contp) {
+ EFSYS_ASSERT3U(taglen, <, 0x100);
+ paylen = (uint8_t)MIN(taglen, 0xff);
+
+ goto done;
+ }
+ } else {
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail2;
+
+ if (index == *contp) {
+ offset += pos + 3;
+ paylen = keylen;
+
+ goto done;
+ }
+ }
+ }
+
+ offset += taglen;
+ }
+
+ *contp = 0;
+ return (0);
+
+done:
+ *tagp = tag;
+ *keywordp = keyword;
+ if (payloadp != NULL)
+ *payloadp = offset;
+ if (paylenp != NULL)
+ *paylenp = paylen;
+
+ ++(*contp);
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_vpd_hunk_get(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_tag_t tag,
+ __in efx_vpd_keyword_t keyword,
+ __out unsigned int *payloadp,
+ __out uint8_t *paylenp)
+{
+ efx_vpd_tag_t itag;
+ efx_vpd_keyword_t ikeyword;
+ unsigned int offset;
+ unsigned int pos;
+ uint16_t taglen;
+ uint8_t keylen;
+ int rc;
+
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &itag, &taglen)) != 0)
+ goto fail1;
+ if (itag == EFX_VPD_END)
+ break;
+
+ if (itag == tag) {
+ if (itag == EFX_VPD_ID) {
+ EFSYS_ASSERT3U(taglen, <, 0x100);
+
+ *paylenp = (uint8_t)MIN(taglen, 0xff);
+ *payloadp = offset;
+ return (0);
+ }
+
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &ikeyword, &keylen)) != 0)
+ goto fail2;
+
+ if (ikeyword == keyword) {
+ *paylenp = keylen;
+ *payloadp = offset + pos + 3;
+ return (0);
+ }
+ }
+ }
+
+ offset += taglen;
+ }
+
+ /* Not an error */
+ return (ENOENT);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_vpd_hunk_set(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ efx_word_t word;
+ efx_vpd_tag_t tag;
+ efx_vpd_keyword_t keyword;
+ unsigned int offset;
+ unsigned int pos;
+ unsigned int taghead;
+ unsigned int source;
+ unsigned int dest;
+ unsigned int i;
+ uint16_t taglen;
+ uint8_t keylen;
+ uint8_t cksum;
+ size_t used;
+ int rc;
+
+ switch (evvp->evv_tag) {
+ case EFX_VPD_ID:
+ if (evvp->evv_keyword != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Can't delete the ID keyword */
+ if (evvp->evv_length == 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+
+ case EFX_VPD_RO:
+ if (evvp->evv_keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Determine total size of all current tags */
+ if ((rc = efx_vpd_hunk_length(data, size, &used)) != 0)
+ goto fail2;
+
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ taghead = offset;
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail3;
+ if (tag == EFX_VPD_END)
+ break;
+ else if (tag != evvp->evv_tag) {
+ offset += taglen;
+ continue;
+ }
+
+ /* We only support modifying large resource tags */
+ if (offset - taghead != 3) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /*
+ * Work out the offset of the byte immediately after the
+ * old (=source) and new (=dest) new keyword/tag
+ */
+ pos = 0;
+ if (tag == EFX_VPD_ID) {
+ source = offset + taglen;
+ dest = offset + evvp->evv_length;
+ goto check_space;
+ }
+
+ EFSYS_ASSERT3U(tag, ==, EFX_VPD_RO);
+ source = dest = 0;
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail5;
+
+ if (keyword == evvp->evv_keyword &&
+ evvp->evv_length == 0) {
+ /* Deleting this keyword */
+ source = offset + pos + 3 + keylen;
+ dest = offset + pos;
+ break;
+
+ } else if (keyword == evvp->evv_keyword) {
+ /* Adjusting this keyword */
+ source = offset + pos + 3 + keylen;
+ dest = offset + pos + 3 + evvp->evv_length;
+ break;
+
+ } else if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ /* The RV keyword must be at the end */
+ EFSYS_ASSERT3U(pos + 3 + keylen, ==, taglen);
+
+ /*
+ * The keyword doesn't already exist. If the
+ * user deleting a non-existant keyword then
+ * this is a no-op.
+ */
+ if (evvp->evv_length == 0)
+ return (0);
+
+ /* Insert this keyword before the RV keyword */
+ source = offset + pos;
+ dest = offset + pos + 3 + evvp->evv_length;
+ break;
+ }
+ }
+
+ check_space:
+ if (used + dest > size + source) {
+ rc = ENOSPC;
+ goto fail6;
+ }
+
+ /* Move trailing data */
+ (void) memmove(data + dest, data + source, used - source);
+
+ /* Copy contents */
+ memcpy(data + dest - evvp->evv_length, evvp->evv_value,
+ evvp->evv_length);
+
+ /* Insert new keyword header if required */
+ if (tag != EFX_VPD_ID && evvp->evv_length > 0) {
+ EFX_POPULATE_WORD_1(word, EFX_WORD_0,
+ evvp->evv_keyword);
+ data[offset + pos + 0] =
+ EFX_WORD_FIELD(word, EFX_BYTE_0);
+ data[offset + pos + 1] =
+ EFX_WORD_FIELD(word, EFX_BYTE_1);
+ data[offset + pos + 2] = evvp->evv_length;
+ }
+
+ /* Modify tag length (large resource type) */
+ taglen += (dest - source);
+ EFX_POPULATE_WORD_1(word, EFX_WORD_0, taglen);
+ data[offset - 2] = EFX_WORD_FIELD(word, EFX_BYTE_0);
+ data[offset - 1] = EFX_WORD_FIELD(word, EFX_BYTE_1);
+
+ goto checksum;
+ }
+
+ /* Unable to find the matching tag */
+ rc = ENOENT;
+ goto fail7;
+
+checksum:
+ /* Find the RV tag, and update the checksum */
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail8;
+ if (tag == EFX_VPD_END)
+ break;
+ if (tag == EFX_VPD_RO) {
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail9;
+
+ if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ cksum = 0;
+ for (i = 0; i < offset + pos + 3; i++)
+ cksum += data[i];
+ data[i] = -cksum;
+ break;
+ }
+ }
+ }
+
+ offset += taglen;
+ }
+
+ /* Zero out the unused portion */
+ (void) memset(data + offset + taglen, 0xff, size - offset - taglen);
+
+ return (0);
+
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ void
+efx_vpd_fini(
+ __in efx_nic_t *enp)
+{
+ efx_vpd_ops_t *evpdop = enp->en_evpdop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if (evpdop->evpdo_fini != NULL)
+ evpdop->evpdo_fini(enp);
+
+ enp->en_evpdop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_VPD;
+}
+
+#endif /* EFSYS_OPT_VPD */
diff --git a/sys/dev/sfxge/common/efx_wol.c b/sys/dev/sfxge/common/efx_wol.c
new file mode 100644
index 0000000..74f11de
--- /dev/null
+++ b/sys/dev/sfxge/common/efx_wol.c
@@ -0,0 +1,396 @@
+/*-
+ * Copyright 2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "efsys.h"
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_WOL
+
+ __checkReturn int
+efx_wol_init(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_WOL));
+
+ if (~(encp->enc_features) & EFX_FEATURE_WOL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ /* Current implementation is Siena specific */
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ enp->en_mod_flags |= EFX_MOD_WOL;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_wol_filter_clear(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_WOL_FILTER_RESET_IN_LEN];
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL);
+
+ req.emr_cmd = MC_CMD_WOL_FILTER_RESET;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_WOL_FILTER_RESET_IN_LEN;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, WOL_FILTER_RESET_IN_MASK,
+ MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS |
+ MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_wol_filter_add(
+ __in efx_nic_t *enp,
+ __in efx_wol_type_t type,
+ __in efx_wol_param_t *paramp,
+ __out uint32_t *filter_idp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_WOL_FILTER_SET_IN_LEN,
+ MC_CMD_WOL_FILTER_SET_OUT_LEN)];
+ efx_byte_t link_mask;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL);
+
+ req.emr_cmd = MC_CMD_WOL_FILTER_SET;
+ (void) memset(payload, '\0', sizeof (payload));
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_WOL_FILTER_SET_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_WOL_FILTER_SET_OUT_LEN;
+
+ switch (type) {
+ case EFX_WOL_TYPE_MAGIC:
+ MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_FILTER_MODE,
+ MC_CMD_FILTER_MODE_SIMPLE);
+ MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_WOL_TYPE,
+ MC_CMD_WOL_TYPE_MAGIC);
+ EFX_MAC_ADDR_COPY(
+ MCDI_IN2(req, uint8_t, WOL_FILTER_SET_IN_MAGIC_MAC),
+ paramp->ewp_magic.mac_addr);
+ break;
+
+ case EFX_WOL_TYPE_BITMAP: {
+ uint32_t swapped = 0;
+ efx_dword_t *dwordp;
+ unsigned int pos, bit;
+
+ MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_FILTER_MODE,
+ MC_CMD_FILTER_MODE_SIMPLE);
+ MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_WOL_TYPE,
+ MC_CMD_WOL_TYPE_BITMAP);
+
+ /*
+ * MC bitmask is supposed to be bit swapped
+ * amongst 32 bit words(!)
+ */
+
+ dwordp = MCDI_IN2(req, efx_dword_t,
+ WOL_FILTER_SET_IN_BITMAP_MASK);
+
+ EFSYS_ASSERT3U(EFX_WOL_BITMAP_MASK_SIZE % 4, ==, 0);
+
+ for (pos = 0; pos < EFX_WOL_BITMAP_MASK_SIZE; ++pos) {
+ uint8_t native = paramp->ewp_bitmap.mask[pos];
+
+ for (bit = 0; bit < 8; ++bit) {
+ swapped <<= 1;
+ swapped |= (native & 0x1);
+ native >>= 1;
+ }
+
+ if ((pos & 3) == 3) {
+ EFX_POPULATE_DWORD_1(dwordp[pos >> 2],
+ EFX_DWORD_0, swapped);
+ swapped = 0;
+ }
+ }
+
+ memcpy(MCDI_IN2(req, uint8_t, WOL_FILTER_SET_IN_BITMAP_BITMAP),
+ paramp->ewp_bitmap.value,
+ sizeof (paramp->ewp_bitmap.value));
+
+ EFSYS_ASSERT3U(paramp->ewp_bitmap.value_len, <=,
+ sizeof (paramp->ewp_bitmap.value));
+ MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_BITMAP_LEN,
+ paramp->ewp_bitmap.value_len);
+ }
+ break;
+
+ case EFX_WOL_TYPE_LINK:
+ MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_FILTER_MODE,
+ MC_CMD_FILTER_MODE_SIMPLE);
+ MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_WOL_TYPE,
+ MC_CMD_WOL_TYPE_LINK);
+
+ EFX_ZERO_BYTE(link_mask);
+ EFX_SET_BYTE_FIELD(link_mask, MC_CMD_WOL_FILTER_SET_IN_LINK_UP,
+ 1);
+ MCDI_IN_SET_BYTE(req, WOL_FILTER_SET_IN_LINK_MASK,
+ link_mask.eb_u8[0]);
+ break;
+
+ default:
+ EFSYS_ASSERT3U(type, !=, type);
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *filter_idp = MCDI_OUT_DWORD(req, WOL_FILTER_SET_OUT_FILTER_ID);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+efx_wol_filter_remove(
+ __in efx_nic_t *enp,
+ __in uint32_t filter_id)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_WOL_FILTER_REMOVE_IN_LEN];
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL);
+
+ req.emr_cmd = MC_CMD_WOL_FILTER_REMOVE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_WOL_FILTER_REMOVE_IN_LEN;
+ EFX_STATIC_ASSERT(MC_CMD_WOL_FILTER_REMOVE_OUT_LEN == 0);
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, WOL_FILTER_REMOVE_IN_FILTER_ID, filter_id);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn int
+efx_lightsout_offload_add(
+ __in efx_nic_t *enp,
+ __in efx_lightsout_offload_type_t type,
+ __in efx_lightsout_offload_param_t *paramp,
+ __out uint32_t *filter_idp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MAX(MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN,
+ MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN),
+ MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN)];
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL);
+
+ req.emr_cmd = MC_CMD_ADD_LIGHTSOUT_OFFLOAD;
+ req.emr_in_buf = payload;
+ req.emr_in_length = sizeof (type);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN;
+
+ switch (type) {
+ case EFX_LIGHTSOUT_OFFLOAD_TYPE_ARP:
+ req.emr_in_length = MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN;
+ MCDI_IN_SET_DWORD(req, ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL,
+ MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP);
+ EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t,
+ ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC),
+ paramp->elop_arp.mac_addr);
+ MCDI_IN_SET_DWORD(req, ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP,
+ paramp->elop_arp.ip);
+ break;
+ case EFX_LIGHTSOUT_OFFLOAD_TYPE_NS:
+ req.emr_in_length = MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN;
+ MCDI_IN_SET_DWORD(req, ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL,
+ MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS);
+ EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t,
+ ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC),
+ paramp->elop_ns.mac_addr);
+ memcpy(MCDI_IN2(req, uint8_t,
+ ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6),
+ paramp->elop_ns.solicited_node,
+ sizeof (paramp->elop_ns.solicited_node));
+ memcpy(MCDI_IN2(req, uint8_t, ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6),
+ paramp->elop_ns.ip, sizeof (paramp->elop_ns.ip));
+ break;
+ default:
+ EFSYS_ASSERT3U(type, !=, type);
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *filter_idp = MCDI_OUT_DWORD(req, ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn int
+efx_lightsout_offload_remove(
+ __in efx_nic_t *enp,
+ __in efx_lightsout_offload_type_t type,
+ __in uint32_t filter_id)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN];
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL);
+
+ req.emr_cmd = MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD;
+ req.emr_in_buf = payload;
+ req.emr_in_length = sizeof (payload);
+ EFX_STATIC_ASSERT(MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN == 0);
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ switch (type) {
+ case EFX_LIGHTSOUT_OFFLOAD_TYPE_ARP:
+ MCDI_IN_SET_DWORD(req, REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL,
+ MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP);
+ break;
+ case EFX_LIGHTSOUT_OFFLOAD_TYPE_NS:
+ MCDI_IN_SET_DWORD(req, REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL,
+ MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS);
+ break;
+ default:
+ EFSYS_ASSERT3U(type, !=, type);
+ }
+
+ MCDI_IN_SET_DWORD(req, REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID,
+ filter_id);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+
+ void
+efx_wol_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL);
+
+ enp->en_mod_flags &= ~EFX_MOD_WOL;
+}
+
+#endif /* EFSYS_OPT_WOL */
diff --git a/sys/dev/sfxge/common/siena_flash.h b/sys/dev/sfxge/common/siena_flash.h
new file mode 100644
index 0000000..7df3995
--- /dev/null
+++ b/sys/dev/sfxge/common/siena_flash.h
@@ -0,0 +1,132 @@
+/*-
+ * Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#ifndef _SYS_SIENA_FLASH_H
+#define _SYS_SIENA_FLASH_H
+
+#pragma pack(1)
+
+/* Fixed locations near the start of flash (which may be in the internal PHY
+ * firmware header) point to the boot header.
+ *
+ * - parsed by MC boot ROM and firmware
+ * - reserved (but not parsed) by PHY firmware
+ * - opaque to driver
+ */
+
+#define SIENA_MC_BOOT_PHY_FW_HDR_LEN (0x20)
+
+#define SIENA_MC_BOOT_PTR_LOCATION (0x18) /* First thing we try to boot */
+#define SIENA_MC_BOOT_ALT_PTR_LOCATION (0x1c) /* Alternative if that fails */
+
+#define SIENA_MC_BOOT_HDR_LEN (0x200)
+
+#define SIENA_MC_BOOT_MAGIC (0x51E4A001)
+#define SIENA_MC_BOOT_VERSION (1)
+
+typedef struct siena_mc_boot_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_BOOT_MAGIC */
+ efx_word_t hdr_version; /* this structure definition is version 1 */
+ efx_byte_t board_type;
+ efx_byte_t firmware_version_a;
+ efx_byte_t firmware_version_b;
+ efx_byte_t firmware_version_c;
+ efx_word_t checksum; /* of whole header area + firmware image */
+ efx_word_t firmware_version_d;
+ efx_word_t reserved_a[1]; /* (set to 0) */
+ efx_dword_t firmware_text_offset; /* offset to firmware .text */
+ efx_dword_t firmware_text_size; /* length of firmware .text, in bytes */
+ efx_dword_t firmware_data_offset; /* offset to firmware .data */
+ efx_dword_t firmware_data_size; /* length of firmware .data, in bytes */
+ efx_dword_t reserved_b[8]; /* (set to 0) */
+} siena_mc_boot_hdr_t;
+
+#define SIENA_MC_STATIC_CONFIG_MAGIC (0xBDCF5555)
+#define SIENA_MC_STATIC_CONFIG_VERSION (0)
+
+typedef struct siena_mc_static_config_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_STATIC_CONFIG_MAGIC */
+ efx_word_t length; /* of header area (i.e. not including VPD) */
+ efx_byte_t version;
+ efx_byte_t csum; /* over header area (i.e. not including VPD) */
+ efx_dword_t static_vpd_offset;
+ efx_dword_t static_vpd_length;
+ efx_dword_t capabilities;
+ efx_byte_t mac_addr_base[6];
+ efx_byte_t green_mode_cal; /* Green mode calibration result */
+ efx_byte_t green_mode_valid; /* Whether cal holds a valid value */
+ efx_word_t mac_addr_count;
+ efx_word_t mac_addr_stride;
+ efx_dword_t reserved2[2]; /* (write as zero) */
+ efx_dword_t num_dbi_items;
+ struct {
+ efx_word_t addr;
+ efx_word_t byte_enables;
+ efx_dword_t value;
+ } dbi[];
+} siena_mc_static_config_hdr_t;
+
+#define SIENA_MC_DYNAMIC_CONFIG_MAGIC (0xBDCFDDDD)
+#define SIENA_MC_DYNAMIC_CONFIG_VERSION (0)
+
+typedef struct siena_mc_fw_version_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t fw_subtype;
+ efx_word_t version_w;
+ efx_word_t version_x;
+ efx_word_t version_y;
+ efx_word_t version_z;
+} siena_mc_fw_version_t;
+
+typedef struct siena_mc_dynamic_config_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_DYNAMIC_CONFIG_MAGIC */
+ efx_word_t length; /* of header area (i.e. not including VPD) */
+ efx_byte_t version;
+ efx_byte_t csum; /* over header area (i.e. not including VPD) */
+ efx_dword_t dynamic_vpd_offset;
+ efx_dword_t dynamic_vpd_length;
+ efx_dword_t num_fw_version_items;
+ siena_mc_fw_version_t fw_version[];
+} siena_mc_dynamic_config_hdr_t;
+
+#define SIENA_MC_EXPROM_SINGLE_MAGIC (0xAA55) /* little-endian uint16_t */
+
+#define SIENA_MC_EXPROM_COMBO_MAGIC (0xB0070102) /* little-endian uint32_t */
+
+typedef struct siena_mc_combo_rom_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_EXPROM_COMBO_MAGIC */
+ efx_dword_t len1; /* length of first image */
+ efx_dword_t len2; /* length of second image */
+ efx_dword_t off1; /* offset of first byte to edit to combine images */
+ efx_dword_t off2; /* offset of second byte to edit to combine images */
+ efx_word_t infoblk0_off; /* infoblk offset */
+ efx_word_t infoblk1_off; /* infoblk offset */
+ efx_byte_t infoblk_len; /* length of space reserved for infoblk structures */
+ efx_byte_t reserved[7]; /* (set to 0) */
+} siena_mc_combo_rom_hdr_t;
+
+#pragma pack()
+
+#endif /* _SYS_SIENA_FLASH_H */
diff --git a/sys/dev/sfxge/common/siena_impl.h b/sys/dev/sfxge/common/siena_impl.h
new file mode 100644
index 0000000..b566adb
--- /dev/null
+++ b/sys/dev/sfxge/common/siena_impl.h
@@ -0,0 +1,477 @@
+/*-
+ * Copyright 2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _SYS_SIENA_IMPL_H
+#define _SYS_SIENA_IMPL_H
+
+#include "efx.h"
+#include "efx_regs.h"
+#include "efx_mcdi.h"
+#include "siena_flash.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if EFSYS_OPT_PHY_PROPS
+
+/* START MKCONFIG GENERATED SienaPhyHeaderPropsBlock a8db1f8eb5106efd */
+typedef enum siena_phy_prop_e {
+ SIENA_PHY_NPROPS
+} siena_phy_prop_t;
+
+/* END MKCONFIG GENERATED SienaPhyHeaderPropsBlock */
+
+#endif /* EFSYS_OPT_PHY_PROPS */
+
+#define SIENA_NVRAM_CHUNK 0x80
+
+extern __checkReturn int
+siena_nic_probe(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_PCIE_TUNE
+
+extern __checkReturn int
+siena_nic_pcie_extended_sync(
+ __in efx_nic_t *enp);
+
+#endif
+
+extern __checkReturn int
+siena_nic_reset(
+ __in efx_nic_t *enp);
+
+extern __checkReturn int
+siena_nic_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn int
+siena_nic_register_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern void
+siena_nic_fini(
+ __in efx_nic_t *enp);
+
+extern void
+siena_nic_unprobe(
+ __in efx_nic_t *enp);
+
+#define SIENA_SRAM_ROWS 0x12000
+
+extern void
+siena_sram_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn int
+siena_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_sram_pattern_fn_t func);
+
+#endif /* EFSYS_OPT_DIAG */
+
+
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+
+extern __checkReturn int
+siena_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in unsigned int partn,
+ __out size_t *sizep);
+
+extern __checkReturn int
+siena_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in unsigned int partn);
+
+extern __checkReturn int
+siena_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in unsigned int partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn int
+siena_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in unsigned int partn,
+ __in unsigned int offset,
+ __in size_t size);
+
+extern __checkReturn int
+siena_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in unsigned int partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+siena_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in unsigned int partn);
+
+extern __checkReturn int
+siena_nvram_get_dynamic_cfg(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in boolean_t vpd,
+ __out siena_mc_dynamic_config_hdr_t **dcfgp,
+ __out size_t *sizep);
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn int
+siena_nvram_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn int
+siena_nvram_size(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out size_t *sizep);
+
+extern __checkReturn int
+siena_nvram_get_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4]);
+
+extern __checkReturn int
+siena_nvram_rw_start(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out size_t *pref_chunkp);
+
+extern __checkReturn int
+siena_nvram_read_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn int
+siena_nvram_erase(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type);
+
+extern __checkReturn int
+siena_nvram_write_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+siena_nvram_rw_finish(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type);
+
+extern __checkReturn int
+siena_nvram_set_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint16_t version[4]);
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_VPD
+
+extern __checkReturn int
+siena_vpd_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn int
+siena_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep);
+
+extern __checkReturn int
+siena_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn int
+siena_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn int
+siena_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn int
+siena_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp);
+
+extern __checkReturn int
+siena_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+extern __checkReturn int
+siena_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp);
+
+extern __checkReturn int
+siena_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+siena_vpd_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_VPD */
+
+typedef struct siena_link_state_s {
+ uint32_t sls_adv_cap_mask;
+ uint32_t sls_lp_cap_mask;
+ unsigned int sls_fcntl;
+ efx_link_mode_t sls_link_mode;
+#if EFSYS_OPT_LOOPBACK
+ efx_loopback_type_t sls_loopback;
+#endif
+ boolean_t sls_mac_up;
+} siena_link_state_t;
+
+extern void
+siena_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn int
+siena_phy_get_link(
+ __in efx_nic_t *enp,
+ __out siena_link_state_t *slsp);
+
+extern __checkReturn int
+siena_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t on);
+
+extern __checkReturn int
+siena_phy_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn int
+siena_phy_verify(
+ __in efx_nic_t *enp);
+
+extern __checkReturn int
+siena_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip);
+
+#if EFSYS_OPT_PHY_STATS
+
+extern void
+siena_phy_decode_stats(
+ __in efx_nic_t *enp,
+ __in uint32_t vmask,
+ __in_opt efsys_mem_t *esmp,
+ __out_opt uint64_t *smaskp,
+ __out_ecount_opt(EFX_PHY_NSTATS) uint32_t *stat);
+
+extern __checkReturn int
+siena_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __out_ecount(EFX_PHY_NSTATS) uint32_t *stat);
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_PHY_PROPS
+
+#if EFSYS_OPT_NAMES
+
+extern const char __cs *
+siena_phy_prop_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern __checkReturn int
+siena_phy_prop_get(
+ __in efx_nic_t *enp,
+ __in unsigned int id,
+ __in uint32_t flags,
+ __out uint32_t *valp);
+
+extern __checkReturn int
+siena_phy_prop_set(
+ __in efx_nic_t *enp,
+ __in unsigned int id,
+ __in uint32_t val);
+
+#endif /* EFSYS_OPT_PHY_PROPS */
+
+#if EFSYS_OPT_PHY_BIST
+
+extern __checkReturn int
+siena_phy_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_phy_bist_type_t type);
+
+extern __checkReturn int
+siena_phy_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_phy_bist_type_t type,
+ __out efx_phy_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count);
+
+extern void
+siena_phy_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_phy_bist_type_t type);
+
+#endif /* EFSYS_OPT_PHY_BIST */
+
+extern __checkReturn int
+siena_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn int
+siena_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp);
+
+extern __checkReturn int
+siena_mac_reconfigure(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_LOOPBACK
+
+extern __checkReturn int
+siena_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type);
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+extern __checkReturn int
+siena_mac_stats_clear(
+ __in efx_nic_t *enp);
+
+extern __checkReturn int
+siena_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp);
+
+extern __checkReturn int
+siena_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events);
+
+extern __checkReturn int
+siena_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __out_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __out_opt uint32_t *generationp);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+extern __checkReturn int
+siena_mon_reset(
+ __in efx_nic_t *enp);
+
+extern __checkReturn int
+siena_mon_reconfigure(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_MON_STATS
+
+extern void
+siena_mon_decode_stats(
+ __in efx_nic_t *enp,
+ __in uint32_t dmask,
+ __in_opt efsys_mem_t *esmp,
+ __out_opt uint32_t *vmaskp,
+ __out_ecount_opt(EFX_MON_NSTATS) efx_mon_stat_value_t *value);
+
+extern __checkReturn int
+siena_mon_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_mon_stat_t *idp,
+ __out efx_mon_stat_value_t *valuep);
+
+extern __checkReturn int
+siena_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __out_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values);
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_SIENA_IMPL_H */
diff --git a/sys/dev/sfxge/common/siena_mac.c b/sys/dev/sfxge/common/siena_mac.c
new file mode 100644
index 0000000..7facf1c
--- /dev/null
+++ b/sys/dev/sfxge/common/siena_mac.c
@@ -0,0 +1,545 @@
+/*-
+ * Copyright 2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include "efsys.h"
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+ __checkReturn int
+siena_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ siena_link_state_t sls;
+ int rc;
+
+ if ((rc = siena_phy_get_link(enp, &sls)) != 0)
+ goto fail1;
+
+ epp->ep_adv_cap_mask = sls.sls_adv_cap_mask;
+ epp->ep_fcntl = sls.sls_fcntl;
+
+ *link_modep = sls.sls_link_mode;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp)
+{
+ siena_link_state_t sls;
+ int rc;
+
+ /*
+ * Because Siena doesn't *require* polling, we can't rely on
+ * siena_mac_poll() being executed to populate epp->ep_mac_up.
+ */
+ if ((rc = siena_phy_get_link(enp, &sls)) != 0)
+ goto fail1;
+
+ *mac_upp = sls.sls_mac_up;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_mac_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ uint8_t payload[MAX(MC_CMD_SET_MAC_IN_LEN,
+ MC_CMD_SET_MCAST_HASH_IN_LEN)];
+ efx_mcdi_req_t req;
+ unsigned int fcntl;
+ int rc;
+
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_IN_LEN;
+ EFX_STATIC_ASSERT(MC_CMD_SET_MAC_OUT_LEN == 0);
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_MTU, epp->ep_mac_pdu);
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_DRAIN, epp->ep_mac_drain ? 1 : 0);
+ EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, SET_MAC_IN_ADDR),
+ epp->ep_mac_addr);
+ MCDI_IN_POPULATE_DWORD_2(req, SET_MAC_IN_REJECT,
+ SET_MAC_IN_REJECT_UNCST, !epp->ep_unicst,
+ SET_MAC_IN_REJECT_BRDCST, !epp->ep_brdcst);
+
+ if (epp->ep_fcntl_autoneg)
+ /* efx_fcntl_set() has already set the phy capabilities */
+ fcntl = MC_CMD_FCNTL_AUTO;
+ else if (epp->ep_fcntl & EFX_FCNTL_RESPOND)
+ fcntl = (epp->ep_fcntl & EFX_FCNTL_GENERATE)
+ ? MC_CMD_FCNTL_BIDIR
+ : MC_CMD_FCNTL_RESPOND;
+ else
+ fcntl = MC_CMD_FCNTL_OFF;
+
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_FCNTL, fcntl);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* Push multicast hash. Set the broadcast bit (0xff) appropriately */
+ req.emr_cmd = MC_CMD_SET_MCAST_HASH;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MCAST_HASH_IN_LEN;
+ EFX_STATIC_ASSERT(MC_CMD_SET_MCAST_HASH_OUT_LEN == 0);
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ memcpy(MCDI_IN2(req, uint8_t, SET_MCAST_HASH_IN_HASH0),
+ epp->ep_multicst_hash, sizeof (epp->ep_multicst_hash));
+ if (epp->ep_brdcst)
+ EFX_SET_OWORD_BIT(*MCDI_IN2(req, efx_oword_t,
+ SET_MCAST_HASH_IN_HASH1), 0x7f);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_LOOPBACK
+
+ __checkReturn int
+siena_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_phy_ops_t *epop = epp->ep_epop;
+ efx_loopback_type_t old_loopback_type;
+ efx_link_mode_t old_loopback_link_mode;
+ int rc;
+
+ /* The PHY object handles this on Siena */
+ old_loopback_type = epp->ep_loopback_type;
+ old_loopback_link_mode = epp->ep_loopback_link_mode;
+ epp->ep_loopback_type = loopback_type;
+ epp->ep_loopback_link_mode = link_mode;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE(fail2);
+
+ epp->ep_loopback_type = old_loopback_type;
+ epp->ep_loopback_link_mode = old_loopback_link_mode;
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+ __checkReturn int
+siena_mac_stats_clear(
+ __in efx_nic_t *enp)
+{
+ uint8_t payload[MC_CMD_MAC_STATS_IN_LEN];
+ efx_mcdi_req_t req;
+ int rc;
+
+ req.emr_cmd = MC_CMD_MAC_STATS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = sizeof (payload);
+ EFX_STATIC_ASSERT(MC_CMD_MAC_STATS_OUT_DMA_LEN == 0);
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_POPULATE_DWORD_3(req, MAC_STATS_IN_CMD,
+ MAC_STATS_IN_DMA, 0,
+ MAC_STATS_IN_CLEAR, 1,
+ MAC_STATS_IN_PERIODIC_CHANGE, 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp)
+{
+ uint8_t payload[MC_CMD_MAC_STATS_IN_LEN];
+ efx_mcdi_req_t req;
+ size_t bytes;
+ int rc;
+
+ EFX_STATIC_ASSERT(MC_CMD_MAC_NSTATS * sizeof (uint64_t) <=
+ EFX_MAC_STATS_SIZE);
+
+ bytes = MC_CMD_MAC_NSTATS * sizeof (uint64_t);
+
+ req.emr_cmd = MC_CMD_MAC_STATS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = sizeof (payload);
+ EFX_STATIC_ASSERT(MC_CMD_MAC_STATS_OUT_DMA_LEN == 0);
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO,
+ EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI,
+ EFSYS_MEM_ADDR(esmp) >> 32);
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes);
+
+ /*
+ * The MC DMAs aggregate statistics for our convinience, so we can
+ * avoid having to pull the statistics buffer into the cache to
+ * maintain cumulative statistics.
+ */
+ MCDI_IN_POPULATE_DWORD_3(req, MAC_STATS_IN_CMD,
+ MAC_STATS_IN_DMA, 1,
+ MAC_STATS_IN_CLEAR, 0,
+ MAC_STATS_IN_PERIODIC_CHANGE, 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period,
+ __in boolean_t events)
+{
+ uint8_t payload[MC_CMD_MAC_STATS_IN_LEN];
+ efx_mcdi_req_t req;
+ size_t bytes;
+ int rc;
+
+ bytes = MC_CMD_MAC_NSTATS * sizeof (uint64_t);
+
+ req.emr_cmd = MC_CMD_MAC_STATS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = sizeof (payload);
+ EFX_STATIC_ASSERT(MC_CMD_MAC_STATS_OUT_DMA_LEN == 0);
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO,
+ EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI,
+ EFSYS_MEM_ADDR(esmp) >> 32);
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes);
+
+ /*
+ * The MC DMAs aggregate statistics for our convinience, so we can
+ * avoid having to pull the statistics buffer into the cache to
+ * maintain cumulative statistics.
+ */
+ MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD,
+ MAC_STATS_IN_DMA, 0,
+ MAC_STATS_IN_CLEAR, 0,
+ MAC_STATS_IN_PERIODIC_CHANGE, 1,
+ MAC_STATS_IN_PERIODIC_ENABLE, period ? 1 : 0,
+ MAC_STATS_IN_PERIODIC_NOEVENT, events ? 0 : 1,
+ MAC_STATS_IN_PERIOD_MS, period);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+
+#define SIENA_MAC_STAT_READ(_esmp, _field, _eqp) \
+ EFSYS_MEM_READQ((_esmp), (_field) * sizeof (efx_qword_t), _eqp)
+
+ __checkReturn int
+siena_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __out_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __out_opt uint32_t *generationp)
+{
+ efx_qword_t rx_pkts;
+ efx_qword_t value;
+ efx_qword_t generation_start;
+ efx_qword_t generation_end;
+
+ _NOTE(ARGUNUSED(enp))
+
+ /* Read END first so we don't race with the MC */
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END,
+ &generation_end);
+ EFSYS_MEM_READ_BARRIER();
+
+ /* TX */
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_CONTROL_PKTS, &value);
+ EFSYS_STAT_SUBR_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PAUSE_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_UNICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULTICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_BRDCST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_OCTETS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LT64_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_64_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_65_TO_127_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_128_TO_255_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_256_TO_511_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_512_TO_1023_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_1024_TO_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_SGL_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULT_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LATE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LATE_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_DEFERRED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_DEF_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_DEF_PKTS]), &value);
+
+ /* RX */
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BYTES, &rx_pkts);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_OCTETS]), &rx_pkts);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_UNICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MULTICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_BRDCST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PAUSE_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNDERSIZE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_64_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_65_TO_127_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_128_TO_255_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_256_TO_511_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_512_TO_1023_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_1024_TO_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FCS_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_OVERFLOW_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_DROP_EVENTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_FALSE_CARRIER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FALSE_CARRIER_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_SYMBOL_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_ALIGN_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_ALIGN_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_INTERNAL_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_JABBER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_JABBER_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MATCH_FAULT, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MATCH_FAULT]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value);
+
+ EFSYS_MEM_READ_BARRIER();
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START,
+ &generation_start);
+
+ /* Check that we didn't read the stats in the middle of a DMA */
+ if (memcmp(&generation_start, &generation_end,
+ sizeof (generation_start)))
+ return (EAGAIN);
+
+ if (generationp)
+ *generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0);
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/sys/dev/sfxge/common/siena_mon.c b/sys/dev/sfxge/common/siena_mon.c
new file mode 100644
index 0000000..de9a1df
--- /dev/null
+++ b/sys/dev/sfxge/common/siena_mon.c
@@ -0,0 +1,248 @@
+/*-
+ * Copyright 2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include "efsys.h"
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MON_SIENA
+
+ __checkReturn int
+siena_mon_reset(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ return (0);
+}
+
+ __checkReturn int
+siena_mon_reconfigure(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ return (0);
+}
+
+#if EFSYS_OPT_MON_STATS
+
+#define SIENA_MON_WRONG_PORT (uint16_t)0xffff
+
+static __cs uint16_t __siena_mon_port0_map[] = {
+ EFX_MON_STAT_INT_TEMP, /* MC_CMD_SENSOR_CONTROLLER_TEMP */
+ EFX_MON_STAT_EXT_TEMP, /* MC_CMD_SENSOR_PHY_COMMON_TEMP */
+ EFX_MON_STAT_INT_COOLING, /* MC_CMD_SENSOR_CONTROLLER_COOLING */
+ EFX_MON_STAT_EXT_TEMP, /* MC_CMD_SENSOR_PHY0_TEMP */
+ EFX_MON_STAT_EXT_COOLING, /* MC_CMD_SENSOR_PHY0_COOLING */
+ SIENA_MON_WRONG_PORT, /* MC_CMD_SENSOR_PHY1_TEMP */
+ SIENA_MON_WRONG_PORT, /* MC_CMD_SENSOR_PHY1_COOLING */
+ EFX_MON_STAT_1V, /* MC_CMD_SENSOR_IN_1V0 */
+ EFX_MON_STAT_1_2V, /* MC_CMD_SENSOR_IN_1V2 */
+ EFX_MON_STAT_1_8V, /* MC_CMD_SENSOR_IN_1V8 */
+ EFX_MON_STAT_2_5V, /* MC_CMD_SENSOR_IN_2V5 */
+ EFX_MON_STAT_3_3V, /* MC_CMD_SENSOR_IN_3V3 */
+ EFX_MON_STAT_12V, /* MC_CMD_SENSOR_IN_12V0 */
+};
+
+static __cs uint16_t __siena_mon_port1_map[] = {
+ EFX_MON_STAT_INT_TEMP, /* MC_CMD_SENSOR_CONTROLLER_TEMP */
+ EFX_MON_STAT_EXT_TEMP, /* MC_CMD_SENSOR_PHY_COMMON_TEMP */
+ EFX_MON_STAT_INT_COOLING, /* MC_CMD_SENSOR_CONTROLLER_COOLING */
+ SIENA_MON_WRONG_PORT, /* MC_CMD_SENSOR_PHY0_TEMP */
+ SIENA_MON_WRONG_PORT, /* MC_CMD_SENSOR_PHY0_COOLING */
+ EFX_MON_STAT_EXT_TEMP, /* MC_CMD_SENSOR_PHY1_TEMP */
+ EFX_MON_STAT_EXT_COOLING, /* MC_CMD_SENSOR_PHY1_COOLING */
+ EFX_MON_STAT_1V, /* MC_CMD_SENSOR_IN_1V0 */
+ EFX_MON_STAT_1_2V, /* MC_CMD_SENSOR_IN_1V2 */
+ EFX_MON_STAT_1_8V, /* MC_CMD_SENSOR_IN_1V8 */
+ EFX_MON_STAT_2_5V, /* MC_CMD_SENSOR_IN_2V5 */
+ EFX_MON_STAT_3_3V, /* MC_CMD_SENSOR_IN_3V3 */
+ EFX_MON_STAT_12V, /* MC_CMD_SENSOR_IN_12V0 */
+};
+
+#define SIENA_STATIC_SENSOR_ASSERT(_field) \
+ EFX_STATIC_ASSERT(MC_CMD_SENSOR_STATE_ ## _field \
+ == EFX_MON_STAT_STATE_ ## _field)
+
+ void
+siena_mon_decode_stats(
+ __in efx_nic_t *enp,
+ __in uint32_t dmask,
+ __in_opt efsys_mem_t *esmp,
+ __out_opt uint32_t *vmaskp,
+ __out_ecount_opt(EFX_MON_NSTATS) efx_mon_stat_value_t *value)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
+ uint16_t *sensor_map;
+ uint16_t mc_sensor;
+ size_t mc_sensor_max;
+ uint32_t vmask = 0;
+
+ /* Assert the MC_CMD_SENSOR and EFX_MON_STATE namespaces agree */
+ SIENA_STATIC_SENSOR_ASSERT(OK);
+ SIENA_STATIC_SENSOR_ASSERT(WARNING);
+ SIENA_STATIC_SENSOR_ASSERT(FATAL);
+ SIENA_STATIC_SENSOR_ASSERT(BROKEN);
+
+ EFX_STATIC_ASSERT(sizeof (__siena_mon_port1_map)
+ == sizeof (__siena_mon_port0_map));
+ mc_sensor_max = EFX_ARRAY_SIZE(__siena_mon_port0_map);
+ sensor_map = (emip->emi_port == 1)
+ ? __siena_mon_port0_map
+ : __siena_mon_port1_map;
+
+ /*
+ * dmask may legitimately contain sensors not understood by the driver
+ */
+ for (mc_sensor = 0; mc_sensor < mc_sensor_max; ++mc_sensor) {
+ uint16_t efx_sensor = sensor_map[mc_sensor];
+
+ if (efx_sensor == SIENA_MON_WRONG_PORT)
+ continue;
+ EFSYS_ASSERT(efx_sensor < EFX_MON_NSTATS);
+
+ if (~dmask & (1 << mc_sensor))
+ continue;
+
+ vmask |= (1 << efx_sensor);
+ if (value != NULL && esmp != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
+ efx_mon_stat_value_t *emsvp = value + efx_sensor;
+ efx_dword_t dword;
+ EFSYS_MEM_READD(esmp, 4 * mc_sensor, &dword);
+ emsvp->emsv_value =
+ (uint16_t)EFX_DWORD_FIELD(
+ dword,
+ MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
+ emsvp->emsv_state =
+ (uint16_t)EFX_DWORD_FIELD(
+ dword,
+ MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
+ }
+ }
+
+ if (vmaskp != NULL)
+ *vmaskp = vmask;
+}
+
+ __checkReturn int
+siena_mon_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_mon_stat_t *idp,
+ __out efx_mon_stat_value_t *valuep)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint16_t ev_monitor;
+ uint16_t ev_state;
+ uint16_t ev_value;
+ uint16_t *sensor_map;
+ efx_mon_stat_t id;
+ int rc;
+
+ sensor_map = (emip->emi_port == 1)
+ ? __siena_mon_port0_map
+ : __siena_mon_port1_map;
+
+ ev_monitor = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_MONITOR);
+ ev_state = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_STATE);
+ ev_value = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_VALUE);
+
+ /* Hardware must support this statistic */
+ EFSYS_ASSERT((1 << ev_monitor) & encp->enc_siena_mon_stat_mask);
+
+ /* But we don't have to understand it */
+ if (ev_monitor >= EFX_ARRAY_SIZE(__siena_mon_port0_map)) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ id = sensor_map[ev_monitor];
+ if (id == SIENA_MON_WRONG_PORT)
+ return (ENODEV);
+ EFSYS_ASSERT(id < EFX_MON_NSTATS);
+
+ *idp = id;
+ valuep->emsv_value = ev_value;
+ valuep->emsv_state = ev_state;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __out_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t dmask = encp->enc_siena_mon_stat_mask;
+ uint32_t vmask;
+ uint8_t payload[MC_CMD_READ_SENSORS_IN_LEN];
+ efx_mcdi_req_t req;
+ int rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ req.emr_cmd = MC_CMD_READ_SENSORS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = sizeof (payload);
+ EFX_STATIC_ASSERT(MC_CMD_READ_SENSORS_OUT_LEN == 0);
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, READ_SENSORS_IN_DMA_ADDR_LO,
+ EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ MCDI_IN_SET_DWORD(req, READ_SENSORS_IN_DMA_ADDR_HI,
+ EFSYS_MEM_ADDR(esmp) >> 32);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ siena_mon_decode_stats(enp, dmask, esmp, &vmask, values);
+ EFSYS_ASSERT(vmask == encp->enc_mon_stat_mask);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+#endif /* EFSYS_OPT_MON_SIENA */
diff --git a/sys/dev/sfxge/common/siena_nic.c b/sys/dev/sfxge/common/siena_nic.c
new file mode 100644
index 0000000..9eb0db2
--- /dev/null
+++ b/sys/dev/sfxge/common/siena_nic.c
@@ -0,0 +1,964 @@
+/*-
+ * Copyright 2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include "efsys.h"
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn int
+siena_nic_get_partn_mask(
+ __in efx_nic_t *enp,
+ __out unsigned int *maskp)
+{
+ efx_mcdi_req_t req;
+ uint8_t outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN];
+ int rc;
+
+ req.emr_cmd = MC_CMD_NVRAM_TYPES;
+ EFX_STATIC_ASSERT(MC_CMD_NVRAM_TYPES_IN_LEN == 0);
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = outbuf;
+ req.emr_out_length = sizeof (outbuf);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_TYPES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *maskp = MCDI_OUT_DWORD(req, NVRAM_TYPES_OUT_TYPES);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static __checkReturn int
+siena_nic_exit_assertion_handler(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_REBOOT_IN_LEN];
+ int rc;
+
+ req.emr_cmd = MC_CMD_REBOOT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_REBOOT_IN_LEN;
+ EFX_STATIC_ASSERT(MC_CMD_REBOOT_OUT_LEN == 0);
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, REBOOT_IN_FLAGS,
+ MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0 && req.emr_rc != EIO) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static __checkReturn int
+siena_nic_read_assertion(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_ASSERTS_IN_LEN,
+ MC_CMD_GET_ASSERTS_OUT_LEN)];
+ const char *reason;
+ unsigned int flags;
+ unsigned int index;
+ unsigned int ofst;
+ int retry;
+ int rc;
+
+ /*
+ * Before we attempt to chat to the MC, we should verify that the MC
+ * isn't in it's assertion handler, either due to a previous reboot,
+ * or because we're reinitializing due to an eec_exception().
+ *
+ * Use GET_ASSERTS to read any assertion state that may be present.
+ * Retry this command twice. Once because a boot-time assertion failure
+ * might cause the 1st MCDI request to fail. And once again because
+ * we might race with siena_nic_exit_assertion_handler() running on the
+ * other port.
+ */
+ retry = 2;
+ do {
+ req.emr_cmd = MC_CMD_GET_ASSERTS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_ASSERTS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_ASSERTS_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, GET_ASSERTS_IN_CLEAR, 1);
+ efx_mcdi_execute(enp, &req);
+
+ } while ((req.emr_rc == EINTR || req.emr_rc == EIO) && retry-- > 0);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_ASSERTS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ /* Print out any assertion state recorded */
+ flags = MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_GLOBAL_FLAGS);
+ if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
+ return (0);
+
+ reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
+ ? "system-level assertion"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
+ ? "thread-level assertion"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
+ ? "watchdog reset"
+ : "unknown assertion";
+ EFSYS_PROBE3(mcpu_assertion,
+ const char *, reason, unsigned int,
+ MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_SAVED_PC_OFFS),
+ unsigned int,
+ MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_THREAD_OFFS));
+
+ /* Print out the registers */
+ ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
+ for (index = 1; index < 32; index++) {
+ EFSYS_PROBE2(mcpu_register, unsigned int, index, unsigned int,
+ EFX_DWORD_FIELD(*MCDI_OUT(req, efx_dword_t, ofst),
+ EFX_DWORD_0));
+ ofst += sizeof (efx_dword_t);
+ }
+ EFSYS_ASSERT(ofst <= MC_CMD_GET_ASSERTS_OUT_LEN);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static __checkReturn int
+siena_nic_attach(
+ __in efx_nic_t *enp,
+ __in boolean_t attach)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_DRV_ATTACH_IN_LEN];
+ int rc;
+
+ req.emr_cmd = MC_CMD_DRV_ATTACH;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_DRV_ATTACH_IN_LEN;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_NEW_STATE, attach ? 1 : 0);
+ MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_DRV_ATTACH_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_PCIE_TUNE
+
+ __checkReturn int
+siena_nic_pcie_extended_sync(
+ __in efx_nic_t *enp)
+{
+ uint8_t inbuf[MC_CMD_WORKAROUND_IN_LEN];
+ efx_mcdi_req_t req;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ req.emr_cmd = MC_CMD_WORKAROUND;
+ req.emr_in_buf = inbuf;
+ req.emr_in_length = sizeof (inbuf);
+ EFX_STATIC_ASSERT(MC_CMD_WORKAROUND_OUT_LEN == 0);
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, WORKAROUND_IN_TYPE, MC_CMD_WORKAROUND_BUG17230);
+ MCDI_IN_SET_DWORD(req, WORKAROUND_IN_ENABLED, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_PCIE_TUNE */
+
+static __checkReturn int
+siena_board_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
+ uint8_t outbuf[MAX(MC_CMD_GET_BOARD_CFG_OUT_LEN,
+ MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN)];
+ efx_mcdi_req_t req;
+ uint8_t *src;
+ int rc;
+
+ /* Board configuration */
+ req.emr_cmd = MC_CMD_GET_BOARD_CFG;
+ EFX_STATIC_ASSERT(MC_CMD_GET_BOARD_CFG_IN_LEN == 0);
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = outbuf;
+ req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (emip->emi_port == 1)
+ src = MCDI_OUT2(req, uint8_t,
+ GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0);
+ else
+ src = MCDI_OUT2(req, uint8_t,
+ GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1);
+ EFX_MAC_ADDR_COPY(encp->enc_mac_addr, src);
+
+ encp->enc_board_type = MCDI_OUT_DWORD(req,
+ GET_BOARD_CFG_OUT_BOARD_TYPE);
+
+ /* Resource limits */
+ req.emr_cmd = MC_CMD_GET_RESOURCE_LIMITS;
+ EFX_STATIC_ASSERT(MC_CMD_GET_RESOURCE_LIMITS_IN_LEN == 0);
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = outbuf;
+ req.emr_out_length = MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc == 0) {
+ if (req.emr_out_length_used < MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ encp->enc_evq_limit = MCDI_OUT_DWORD(req,
+ GET_RESOURCE_LIMITS_OUT_EVQ);
+ encp->enc_txq_limit = MIN(EFX_TXQ_LIMIT_TARGET,
+ MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_TXQ));
+ encp->enc_rxq_limit = MIN(EFX_RXQ_LIMIT_TARGET,
+ MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_RXQ));
+ } else if (req.emr_rc == ENOTSUP) {
+ encp->enc_evq_limit = 1024;
+ encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
+ encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
+ } else {
+ rc = req.emr_rc;
+ goto fail4;
+ }
+
+ encp->enc_buftbl_limit = SIENA_SRAM_ROWS -
+ (encp->enc_txq_limit * 16) - (encp->enc_rxq_limit * 64);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static __checkReturn int
+siena_phy_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN];
+ int rc;
+
+ req.emr_cmd = MC_CMD_GET_PHY_CFG;
+ EFX_STATIC_ASSERT(MC_CMD_GET_PHY_CFG_IN_LEN == 0);
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = outbuf;
+ req.emr_out_length = sizeof (outbuf);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PHY_CFG_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE);
+#if EFSYS_OPT_NAMES
+ (void) strncpy(encp->enc_phy_name,
+ MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME),
+ MIN(sizeof (encp->enc_phy_name) - 1,
+ MC_CMD_GET_PHY_CFG_OUT_NAME_LEN));
+#endif /* EFSYS_OPT_NAMES */
+ (void) memset(encp->enc_phy_revision, 0,
+ sizeof (encp->enc_phy_revision));
+ memcpy(encp->enc_phy_revision,
+ MCDI_OUT2(req, char, GET_PHY_CFG_OUT_REVISION),
+ MIN(sizeof (encp->enc_phy_revision) - 1,
+ MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN));
+#if EFSYS_OPT_PHY_LED_CONTROL
+ encp->enc_led_mask = ((1 << EFX_PHY_LED_DEFAULT) |
+ (1 << EFX_PHY_LED_OFF) |
+ (1 << EFX_PHY_LED_ON));
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+#if EFSYS_OPT_PHY_PROPS
+ encp->enc_phy_nprops = 0;
+#endif /* EFSYS_OPT_PHY_PROPS */
+
+ /* Get the media type of the fixed port, if recognised. */
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_XAUI == EFX_PHY_MEDIA_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_CX4 == EFX_PHY_MEDIA_CX4);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_KX4 == EFX_PHY_MEDIA_KX4);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_XFP == EFX_PHY_MEDIA_XFP);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T);
+ epp->ep_fixed_port_type =
+ MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE);
+ if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES)
+ epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID;
+
+ epp->ep_phy_cap_mask =
+ MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_SUPPORTED_CAP);
+#if EFSYS_OPT_PHY_FLAGS
+ encp->enc_phy_flags_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_FLAGS);
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+ encp->enc_port = (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_PRT);
+
+ /* Populate internal state */
+ encp->enc_siena_channel =
+ (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_CHANNEL);
+
+#if EFSYS_OPT_PHY_STATS
+ encp->enc_siena_phy_stat_mask =
+ MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_STATS_MASK);
+
+ /* Convert the MCDI statistic mask into the EFX_PHY_STAT mask */
+ siena_phy_decode_stats(enp, encp->enc_siena_phy_stat_mask,
+ NULL, &encp->enc_phy_stat_mask, NULL);
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_PHY_BIST
+ encp->enc_bist_mask = 0;
+ if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
+ GET_PHY_CFG_OUT_BIST_CABLE_SHORT))
+ encp->enc_bist_mask |= (1 << EFX_PHY_BIST_TYPE_CABLE_SHORT);
+ if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
+ GET_PHY_CFG_OUT_BIST_CABLE_LONG))
+ encp->enc_bist_mask |= (1 << EFX_PHY_BIST_TYPE_CABLE_LONG);
+ if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
+ GET_PHY_CFG_OUT_BIST))
+ encp->enc_bist_mask |= (1 << EFX_PHY_BIST_TYPE_NORMAL);
+#endif /* EFSYS_OPT_BIST */
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_LOOPBACK
+
+static __checkReturn int
+siena_loopback_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t outbuf[MC_CMD_GET_LOOPBACK_MODES_OUT_LEN];
+ int rc;
+
+ req.emr_cmd = MC_CMD_GET_LOOPBACK_MODES;
+ EFX_STATIC_ASSERT(MC_CMD_GET_LOOPBACK_MODES_IN_LEN == 0);
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = outbuf;
+ req.emr_out_length = sizeof (outbuf);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ /*
+ * We assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespaces agree
+ * in siena_phy.c:siena_phy_get_link()
+ */
+ encp->enc_loopback_types[EFX_LINK_100FDX] = EFX_LOOPBACK_MASK &
+ MCDI_OUT_DWORD(req, GET_LOOPBACK_MODES_OUT_100M) &
+ MCDI_OUT_DWORD(req, GET_LOOPBACK_MODES_OUT_SUGGESTED);
+ encp->enc_loopback_types[EFX_LINK_1000FDX] = EFX_LOOPBACK_MASK &
+ MCDI_OUT_DWORD(req, GET_LOOPBACK_MODES_OUT_1G) &
+ MCDI_OUT_DWORD(req, GET_LOOPBACK_MODES_OUT_SUGGESTED);
+ encp->enc_loopback_types[EFX_LINK_10000FDX] = EFX_LOOPBACK_MASK &
+ MCDI_OUT_DWORD(req, GET_LOOPBACK_MODES_OUT_10G) &
+ MCDI_OUT_DWORD(req, GET_LOOPBACK_MODES_OUT_SUGGESTED);
+ encp->enc_loopback_types[EFX_LINK_UNKNOWN] =
+ (1 << EFX_LOOPBACK_OFF) |
+ encp->enc_loopback_types[EFX_LINK_100FDX] |
+ encp->enc_loopback_types[EFX_LINK_1000FDX] |
+ encp->enc_loopback_types[EFX_LINK_10000FDX];
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MON_STATS
+
+static __checkReturn int
+siena_monitor_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t outbuf[MCDI_CTL_SDU_LEN_MAX];
+ int rc;
+
+ req.emr_cmd = MC_CMD_SENSOR_INFO;
+ EFX_STATIC_ASSERT(MC_CMD_SENSOR_INFO_IN_LEN == 0);
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = outbuf;
+ req.emr_out_length = sizeof (outbuf);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_SENSOR_INFO_OUT_MASK_OFST + 4) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ encp->enc_siena_mon_stat_mask =
+ MCDI_OUT_DWORD(req, SENSOR_INFO_OUT_MASK);
+ encp->enc_mon_type = EFX_MON_SFC90X0;
+
+ siena_mon_decode_stats(enp, encp->enc_siena_mon_stat_mask,
+ NULL, &(encp->enc_mon_stat_mask), NULL);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+ __checkReturn int
+siena_nic_probe(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ siena_link_state_t sls;
+ unsigned int mask;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ /* Read clear any assertion state */
+ if ((rc = siena_nic_read_assertion(enp)) != 0)
+ goto fail1;
+
+ /* Exit the assertion handler */
+ if ((rc = siena_nic_exit_assertion_handler(enp)) != 0)
+ goto fail2;
+
+ /* Wrestle control from the BMC */
+ if ((rc = siena_nic_attach(enp, B_TRUE)) != 0)
+ goto fail3;
+
+ if ((rc = siena_board_cfg(enp)) != 0)
+ goto fail4;
+
+ encp->enc_evq_moderation_max =
+ EFX_EV_TIMER_QUANTUM << FRF_CZ_TIMER_VAL_WIDTH;
+
+ if ((rc = siena_phy_cfg(enp)) != 0)
+ goto fail5;
+
+ /* Obtain the default PHY advertised capabilities */
+ if ((rc = siena_nic_reset(enp)) != 0)
+ goto fail6;
+ if ((rc = siena_phy_get_link(enp, &sls)) != 0)
+ goto fail7;
+ epp->ep_default_adv_cap_mask = sls.sls_adv_cap_mask;
+ epp->ep_adv_cap_mask = sls.sls_adv_cap_mask;
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+ if ((rc = siena_nic_get_partn_mask(enp, &mask)) != 0)
+ goto fail8;
+ enp->en_u.siena.enu_partn_mask = mask;
+#endif
+
+#if EFSYS_OPT_MAC_STATS
+ /* Wipe the MAC statistics */
+ if ((rc = siena_mac_stats_clear(enp)) != 0)
+ goto fail9;
+#endif
+
+#if EFSYS_OPT_LOOPBACK
+ if ((rc = siena_loopback_cfg(enp)) != 0)
+ goto fail10;
+#endif
+
+#if EFSYS_OPT_MON_STATS
+ if ((rc = siena_monitor_cfg(enp)) != 0)
+ goto fail11;
+#endif
+
+ encp->enc_features = enp->en_features;
+
+ return (0);
+
+#if EFSYS_OPT_MON_STATS
+fail11:
+ EFSYS_PROBE(fail11);
+#endif
+#if EFSYS_OPT_LOOPBACK
+fail10:
+ EFSYS_PROBE(fail10);
+#endif
+#if EFSYS_OPT_MAC_STATS
+fail9:
+ EFSYS_PROBE(fail9);
+#endif
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+fail8:
+ EFSYS_PROBE(fail8);
+#endif
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_nic_reset(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ /* siena_nic_reset() is called to recover from BADASSERT failures. */
+ if ((rc = siena_nic_read_assertion(enp)) != 0)
+ goto fail1;
+ if ((rc = siena_nic_exit_assertion_handler(enp)) != 0)
+ goto fail2;
+
+ req.emr_cmd = MC_CMD_PORT_RESET;
+ EFX_STATIC_ASSERT(MC_CMD_PORT_RESET_IN_LEN == 0);
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ EFX_STATIC_ASSERT(MC_CMD_PORT_RESET_OUT_LEN == 0);
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (0);
+}
+
+static __checkReturn int
+siena_nic_logging(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_LOG_CTRL_IN_LEN];
+ int rc;
+
+ req.emr_cmd = MC_CMD_LOG_CTRL;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LOG_CTRL_IN_LEN;
+ EFX_STATIC_ASSERT(MC_CMD_LOG_CTRL_OUT_LEN == 0);
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST,
+ MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ);
+ MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST_EVQ, 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static void
+siena_nic_rx_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /*
+ * RX_INGR_EN is always enabled on Siena, because we rely on
+ * the RX parser to be resiliant to missing SOP/EOP.
+ */
+ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_INGR_EN, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ /* Disable parsing of additional 802.1Q in Q packets */
+ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+}
+
+static void
+siena_nic_usrev_dis(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ EFX_POPULATE_OWORD_1(oword, FRF_CZ_USREV_DIS, 1);
+ EFX_BAR_WRITEO(enp, FR_CZ_USR_EV_CFG, &oword);
+}
+
+ __checkReturn int
+siena_nic_init(
+ __in efx_nic_t *enp)
+{
+ int rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ if ((rc = siena_nic_logging(enp)) != 0)
+ goto fail1;
+
+ siena_sram_init(enp);
+
+ /* Configure Siena's RX block */
+ siena_nic_rx_cfg(enp);
+
+ /* Disable USR_EVents for now */
+ siena_nic_usrev_dis(enp);
+
+ /* bug17057: Ensure set_link is called */
+ if ((rc = siena_phy_reconfigure(enp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ void
+siena_nic_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ void
+siena_nic_unprobe(
+ __in efx_nic_t *enp)
+{
+ (void) siena_nic_attach(enp, B_FALSE);
+}
+
+#if EFSYS_OPT_DIAG
+
+static efx_register_set_t __cs __siena_registers[] = {
+ { FR_AZ_ADR_REGION_REG_OFST, 0, 1 },
+ { FR_CZ_USR_EV_CFG_OFST, 0, 1 },
+ { FR_AZ_RX_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_TX_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_TX_RESERVED_REG_OFST, 0, 1 },
+ { FR_AZ_SRM_TX_DC_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_RX_DC_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_RX_DC_PF_WM_REG_OFST, 0, 1 },
+ { FR_AZ_DP_CTRL_REG_OFST, 0, 1 },
+ { FR_BZ_RX_RSS_TKEY_REG_OFST, 0, 1},
+ { FR_CZ_RX_RSS_IPV6_REG1_OFST, 0, 1},
+ { FR_CZ_RX_RSS_IPV6_REG2_OFST, 0, 1},
+ { FR_CZ_RX_RSS_IPV6_REG3_OFST, 0, 1}
+};
+
+static const uint32_t __cs __siena_register_masks[] = {
+ 0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF,
+ 0x000103FF, 0x00000000, 0x00000000, 0x00000000,
+ 0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000,
+ 0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF,
+ 0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF,
+ 0x001FFFFF, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000003, 0x00000000, 0x00000000, 0x00000000,
+ 0x000003FF, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000FFF, 0x00000000, 0x00000000, 0x00000000,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000
+};
+
+static efx_register_set_t __cs __siena_tables[] = {
+ { FR_AZ_RX_FILTER_TBL0_OFST, FR_AZ_RX_FILTER_TBL0_STEP,
+ FR_AZ_RX_FILTER_TBL0_ROWS },
+ { FR_CZ_RX_MAC_FILTER_TBL0_OFST, FR_CZ_RX_MAC_FILTER_TBL0_STEP,
+ FR_CZ_RX_MAC_FILTER_TBL0_ROWS },
+ { FR_AZ_RX_DESC_PTR_TBL_OFST,
+ FR_AZ_RX_DESC_PTR_TBL_STEP, FR_CZ_RX_DESC_PTR_TBL_ROWS },
+ { FR_AZ_TX_DESC_PTR_TBL_OFST,
+ FR_AZ_TX_DESC_PTR_TBL_STEP, FR_CZ_TX_DESC_PTR_TBL_ROWS },
+ { FR_AZ_TIMER_TBL_OFST, FR_AZ_TIMER_TBL_STEP, FR_CZ_TIMER_TBL_ROWS },
+ { FR_CZ_TX_FILTER_TBL0_OFST,
+ FR_CZ_TX_FILTER_TBL0_STEP, FR_CZ_TX_FILTER_TBL0_ROWS },
+ { FR_CZ_TX_MAC_FILTER_TBL0_OFST,
+ FR_CZ_TX_MAC_FILTER_TBL0_STEP, FR_CZ_TX_MAC_FILTER_TBL0_ROWS }
+};
+
+static const uint32_t __cs __siena_table_masks[] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x000003FF,
+ 0xFFFF0FFF, 0xFFFFFFFF, 0x00000E7F, 0x00000000,
+ 0xFFFFFFFF, 0x0FFFFFFF, 0x01800000, 0x00000000,
+ 0xFFFFFFFE, 0x0FFFFFFF, 0x0C000000, 0x00000000,
+ 0x3FFFFFFF, 0x00000000, 0x00000000, 0x00000000,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x000013FF,
+ 0xFFFF07FF, 0xFFFFFFFF, 0x0000007F, 0x00000000,
+};
+
+ __checkReturn int
+siena_nic_register_test(
+ __in efx_nic_t *enp)
+{
+ efx_register_set_t *rsp;
+ const uint32_t *dwordp;
+ unsigned int nitems;
+ unsigned int count;
+ int rc;
+
+ /* Fill out the register mask entries */
+ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_register_masks)
+ == EFX_ARRAY_SIZE(__siena_registers) * 4);
+
+ nitems = EFX_ARRAY_SIZE(__siena_registers);
+ dwordp = __siena_register_masks;
+ for (count = 0; count < nitems; ++count) {
+ rsp = __siena_registers + count;
+ rsp->mask.eo_u32[0] = *dwordp++;
+ rsp->mask.eo_u32[1] = *dwordp++;
+ rsp->mask.eo_u32[2] = *dwordp++;
+ rsp->mask.eo_u32[3] = *dwordp++;
+ }
+
+ /* Fill out the register table entries */
+ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_table_masks)
+ == EFX_ARRAY_SIZE(__siena_tables) * 4);
+
+ nitems = EFX_ARRAY_SIZE(__siena_tables);
+ dwordp = __siena_table_masks;
+ for (count = 0; count < nitems; ++count) {
+ rsp = __siena_tables + count;
+ rsp->mask.eo_u32[0] = *dwordp++;
+ rsp->mask.eo_u32[1] = *dwordp++;
+ rsp->mask.eo_u32[2] = *dwordp++;
+ rsp->mask.eo_u32[3] = *dwordp++;
+ }
+
+ if ((rc = efx_nic_test_registers(enp, __siena_registers,
+ EFX_ARRAY_SIZE(__siena_registers))) != 0)
+ goto fail1;
+
+ if ((rc = efx_nic_test_tables(enp, __siena_tables,
+ EFX_PATTERN_BYTE_ALTERNATE,
+ EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ goto fail2;
+
+ if ((rc = efx_nic_test_tables(enp, __siena_tables,
+ EFX_PATTERN_BYTE_CHANGING,
+ EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ goto fail3;
+
+ if ((rc = efx_nic_test_tables(enp, __siena_tables,
+ EFX_PATTERN_BIT_SWEEP, EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ goto fail4;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/sys/dev/sfxge/common/siena_nvram.c b/sys/dev/sfxge/common/siena_nvram.c
new file mode 100644
index 0000000..9cdd3ad
--- /dev/null
+++ b/sys/dev/sfxge/common/siena_nvram.c
@@ -0,0 +1,985 @@
+/*-
+ * Copyright 2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "efsys.h"
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+
+ __checkReturn int
+siena_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in unsigned int partn,
+ __out size_t *sizep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_INFO_IN_LEN,
+ MC_CMD_NVRAM_INFO_OUT_LEN)];
+ int rc;
+
+ if ((1 << partn) & ~enp->en_u.siena.enu_partn_mask) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ req.emr_cmd = MC_CMD_NVRAM_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_INFO_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_INFO_IN_TYPE, partn);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_INFO_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *sizep = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_SIZE);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in unsigned int partn)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_NVRAM_UPDATE_START_IN_LEN];
+ int rc;
+
+ req.emr_cmd = MC_CMD_NVRAM_UPDATE_START;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_UPDATE_START_IN_LEN;
+ EFX_STATIC_ASSERT(MC_CMD_NVRAM_UPDATE_START_OUT_LEN == 0);
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_START_IN_TYPE, partn);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in unsigned int partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_READ_IN_LEN,
+ MC_CMD_NVRAM_READ_OUT_LEN(SIENA_NVRAM_CHUNK))];
+ size_t chunk;
+ int rc;
+
+ while (size > 0) {
+ chunk = MIN(size, SIENA_NVRAM_CHUNK);
+
+ req.emr_cmd = MC_CMD_NVRAM_READ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_READ_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length =
+ MC_CMD_NVRAM_READ_OUT_LEN(SIENA_NVRAM_CHUNK);
+
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_OFFSET, offset);
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_LENGTH, chunk);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used <
+ MC_CMD_NVRAM_READ_OUT_LEN(chunk)) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ memcpy(data,
+ MCDI_OUT2(req, uint8_t, NVRAM_READ_OUT_READ_BUFFER),
+ chunk);
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in unsigned int partn,
+ __in unsigned int offset,
+ __in size_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_NVRAM_ERASE_IN_LEN];
+ int rc;
+
+ req.emr_cmd = MC_CMD_NVRAM_ERASE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_ERASE_IN_LEN;
+ EFX_STATIC_ASSERT(MC_CMD_NVRAM_ERASE_OUT_LEN == 0);
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_OFFSET, offset);
+ MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_LENGTH, size);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in unsigned int partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_NVRAM_WRITE_IN_LEN(SIENA_NVRAM_CHUNK)];
+ size_t chunk;
+ int rc;
+
+ while (size > 0) {
+ chunk = MIN(size, SIENA_NVRAM_CHUNK);
+
+ req.emr_cmd = MC_CMD_NVRAM_WRITE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_WRITE_IN_LEN(chunk);
+ EFX_STATIC_ASSERT(MC_CMD_NVRAM_WRITE_OUT_LEN == 0);
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_OFFSET, offset);
+ MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_LENGTH, chunk);
+
+ memcpy(MCDI_IN2(req, uint8_t, NVRAM_WRITE_IN_WRITE_BUFFER),
+ data, chunk);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ void
+siena_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in unsigned int partn)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN];
+ uint32_t reboot;
+ int rc;
+
+ req.emr_cmd = MC_CMD_NVRAM_UPDATE_FINISH;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN;
+ EFX_STATIC_ASSERT(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN == 0);
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ /*
+ * Reboot into the new image only for PHYs. The driver has to
+ * explicitly cope with an MC reboot after a firmware update.
+ */
+ reboot = (partn == MC_CMD_NVRAM_TYPE_PHY_PORT0 ||
+ partn == MC_CMD_NVRAM_TYPE_PHY_PORT1 ||
+ partn == MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO);
+
+ MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_FINISH_IN_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_FINISH_IN_REBOOT, reboot);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return;
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+}
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+typedef struct siena_parttbl_entry_s {
+ unsigned int partn;
+ unsigned int port;
+ efx_nvram_type_t nvtype;
+} siena_parttbl_entry_t;
+
+static siena_parttbl_entry_t siena_parttbl[] = {
+ {MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO, 1, EFX_NVRAM_NULLPHY},
+ {MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO, 2, EFX_NVRAM_NULLPHY},
+ {MC_CMD_NVRAM_TYPE_MC_FW, 1, EFX_NVRAM_MC_FIRMWARE},
+ {MC_CMD_NVRAM_TYPE_MC_FW, 2, EFX_NVRAM_MC_FIRMWARE},
+ {MC_CMD_NVRAM_TYPE_MC_FW_BACKUP, 1, EFX_NVRAM_MC_GOLDEN},
+ {MC_CMD_NVRAM_TYPE_MC_FW_BACKUP, 2, EFX_NVRAM_MC_GOLDEN},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM, 1, EFX_NVRAM_BOOTROM},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM, 2, EFX_NVRAM_BOOTROM},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1, 2, EFX_NVRAM_BOOTROM_CFG},
+ {MC_CMD_NVRAM_TYPE_PHY_PORT0, 1, EFX_NVRAM_PHY},
+ {MC_CMD_NVRAM_TYPE_PHY_PORT1, 2, EFX_NVRAM_PHY},
+ {0, 0, 0},
+};
+
+static __checkReturn siena_parttbl_entry_t *
+siena_parttbl_entry(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
+ siena_parttbl_entry_t *entry;
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+ for (entry = siena_parttbl; entry->port > 0; ++entry) {
+ if (entry->port == emip->emi_port && entry->nvtype == type)
+ return (entry);
+ }
+
+ return (NULL);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn int
+siena_nvram_test(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
+ siena_parttbl_entry_t *entry;
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_TEST_IN_LEN,
+ MC_CMD_NVRAM_TEST_OUT_LEN)];
+ int result;
+ int rc;
+
+ req.emr_cmd = MC_CMD_NVRAM_TEST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_TEST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_TEST_OUT_LEN;
+
+ /*
+ * Iterate over the list of supported partition types
+ * applicable to *this* port
+ */
+ for (entry = siena_parttbl; entry->port > 0; ++entry) {
+ if (entry->port != emip->emi_port ||
+ !(enp->en_u.siena.enu_partn_mask & (1 << entry->partn)))
+ continue;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_TEST_IN_TYPE, entry->partn);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_TEST_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ result = MCDI_OUT_DWORD(req, NVRAM_TEST_OUT_RESULT);
+ if (result == MC_CMD_NVRAM_TEST_FAIL) {
+
+ EFSYS_PROBE1(nvram_test_failure, int, entry->partn);
+
+ rc = (EINVAL);
+ goto fail3;
+ }
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+ __checkReturn int
+siena_nvram_size(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out size_t *sizep)
+{
+ siena_parttbl_entry_t *entry;
+ int rc;
+
+ if ((entry = siena_parttbl_entry(enp, type)) == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = siena_nvram_partn_size(enp, entry->partn, sizep)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ *sizep = 0;
+
+ return (rc);
+}
+
+#define SIENA_DYNAMIC_CFG_SIZE(_nitems) \
+ (sizeof (siena_mc_dynamic_config_hdr_t) + ((_nitems) * \
+ sizeof (((siena_mc_dynamic_config_hdr_t *)NULL)->fw_version[0])))
+
+ __checkReturn int
+siena_nvram_get_dynamic_cfg(
+ __in efx_nic_t *enp,
+ __in unsigned int partn,
+ __in boolean_t vpd,
+ __out siena_mc_dynamic_config_hdr_t **dcfgp,
+ __out size_t *sizep)
+{
+ siena_mc_dynamic_config_hdr_t *dcfg;
+ size_t size;
+ uint8_t cksum;
+ unsigned int vpd_offset;
+ unsigned int vpd_length;
+ unsigned int hdr_length;
+ unsigned int nversions;
+ unsigned int pos;
+ unsigned int region;
+ int rc;
+
+ EFSYS_ASSERT(partn == MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 ||
+ partn == MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1);
+
+ /*
+ * Allocate sufficient memory for the entire dynamiccfg area, even
+ * if we're not actually going to read in the VPD.
+ */
+ if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0)
+ goto fail1;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, size, dcfg);
+ if (dcfg == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ if ((rc = siena_nvram_partn_read(enp, partn, 0,
+ (caddr_t)dcfg, SIENA_NVRAM_CHUNK)) != 0)
+ goto fail3;
+
+ /* Verify the magic */
+ if (EFX_DWORD_FIELD(dcfg->magic, EFX_DWORD_0)
+ != SIENA_MC_DYNAMIC_CONFIG_MAGIC)
+ goto invalid1;
+
+ /* All future versions of the structure must be backwards compatable */
+ EFX_STATIC_ASSERT(SIENA_MC_DYNAMIC_CONFIG_VERSION == 0);
+
+ hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
+ nversions = EFX_DWORD_FIELD(dcfg->num_fw_version_items, EFX_DWORD_0);
+ vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
+ vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
+
+ /* Verify the hdr doesn't overflow the partn size */
+ if (hdr_length > size || vpd_offset > size || vpd_length > size ||
+ vpd_length + vpd_offset > size)
+ goto invalid2;
+
+ /* Verify the header has room for all it's versions */
+ if (hdr_length < SIENA_DYNAMIC_CFG_SIZE(0) ||
+ hdr_length < SIENA_DYNAMIC_CFG_SIZE(nversions))
+ goto invalid3;
+
+ /*
+ * Read the remaining portion of the dcfg, either including
+ * the whole of VPD (there is no vpd length in this structure,
+ * so we have to parse each tag), or just the dcfg header itself
+ */
+ region = vpd ? vpd_offset + vpd_length : hdr_length;
+ if (region > SIENA_NVRAM_CHUNK) {
+ if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK,
+ (caddr_t)dcfg + SIENA_NVRAM_CHUNK,
+ region - SIENA_NVRAM_CHUNK)) != 0)
+ goto fail4;
+ }
+
+ /* Verify checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)dcfg)[pos];
+ if (cksum != 0)
+ goto invalid4;
+
+ goto done;
+
+invalid4:
+ EFSYS_PROBE(invalid4);
+invalid3:
+ EFSYS_PROBE(invalid3);
+invalid2:
+ EFSYS_PROBE(invalid2);
+invalid1:
+ EFSYS_PROBE(invalid1);
+
+ /*
+ * Construct a new "null" dcfg, with an empty version vector,
+ * and an empty VPD chunk trailing. This has the neat side effect
+ * of testing the exception paths in the write path.
+ */
+ EFX_POPULATE_DWORD_1(dcfg->magic,
+ EFX_DWORD_0, SIENA_MC_DYNAMIC_CONFIG_MAGIC);
+ EFX_POPULATE_WORD_1(dcfg->length, EFX_WORD_0, sizeof (*dcfg));
+ EFX_POPULATE_BYTE_1(dcfg->version, EFX_BYTE_0,
+ SIENA_MC_DYNAMIC_CONFIG_VERSION);
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset,
+ EFX_DWORD_0, sizeof (*dcfg));
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_length, EFX_DWORD_0, 0);
+ EFX_POPULATE_DWORD_1(dcfg->num_fw_version_items, EFX_DWORD_0, 0);
+
+done:
+ *dcfgp = dcfg;
+ *sizep = size;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, dcfg);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static __checkReturn int
+siena_nvram_get_subtype(
+ __in efx_nic_t *enp,
+ __in unsigned int partn,
+ __out uint32_t *subtypep)
+{
+ efx_mcdi_req_t req;
+ uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN];
+ efx_word_t *fw_list;
+ int rc;
+
+ req.emr_cmd = MC_CMD_GET_BOARD_CFG;
+ EFX_STATIC_ASSERT(MC_CMD_GET_BOARD_CFG_IN_LEN == 0);
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = outbuf;
+ req.emr_out_length = sizeof (outbuf);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ fw_list = MCDI_OUT2(req, efx_word_t,
+ GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
+ *subtypep = EFX_WORD_FIELD(fw_list[partn], EFX_WORD_0);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_nvram_get_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4])
+{
+ siena_mc_dynamic_config_hdr_t *dcfg;
+ siena_parttbl_entry_t *entry;
+ unsigned int dcfg_partn;
+ unsigned int partn;
+ int rc;
+
+ if ((entry = siena_parttbl_entry(enp, type)) == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ partn = entry->partn;
+
+ if ((1 << partn) & ~enp->en_u.siena.enu_partn_mask) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ if ((rc = siena_nvram_get_subtype(enp, partn, subtypep)) != 0)
+ goto fail3;
+
+ /*
+ * Some partitions are accessible from both ports (for instance BOOTROM)
+ * Find the highest version reported by all dcfg structures on ports
+ * that have access to this partition.
+ */
+ version[0] = version[1] = version[2] = version[3] = 0;
+ for (entry = siena_parttbl; entry->port > 0; ++entry) {
+ unsigned int nitems;
+ uint16_t temp[4];
+ size_t length;
+
+ if (entry->partn != partn)
+ continue;
+
+ dcfg_partn = (entry->port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+ /*
+ * Ingore missing partitions on port 2, assuming they're due
+ * to to running on a single port part.
+ */
+ if ((1 << dcfg_partn) & ~enp->en_u.siena.enu_partn_mask) {
+ if (entry->port == 2)
+ continue;
+ }
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_FALSE, &dcfg, &length)) != 0)
+ goto fail4;
+
+ nitems = EFX_DWORD_FIELD(dcfg->num_fw_version_items,
+ EFX_DWORD_0);
+ if (nitems < entry->partn)
+ goto done;
+
+ temp[0] = EFX_WORD_FIELD(dcfg->fw_version[partn].version_w,
+ EFX_WORD_0);
+ temp[1] = EFX_WORD_FIELD(dcfg->fw_version[partn].version_x,
+ EFX_WORD_0);
+ temp[2] = EFX_WORD_FIELD(dcfg->fw_version[partn].version_y,
+ EFX_WORD_0);
+ temp[3] = EFX_WORD_FIELD(dcfg->fw_version[partn].version_z,
+ EFX_WORD_0);
+ if (memcmp(version, temp, sizeof (temp)) < 0)
+ memcpy(version, temp, sizeof (temp));
+
+ done:
+ EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_nvram_rw_start(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out size_t *chunk_sizep)
+{
+ siena_parttbl_entry_t *entry;
+ int rc;
+
+ if ((entry = siena_parttbl_entry(enp, type)) == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = siena_nvram_partn_lock(enp, entry->partn)) != 0)
+ goto fail2;
+
+ if (chunk_sizep != NULL)
+ *chunk_sizep = SIENA_NVRAM_CHUNK;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_nvram_read_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ siena_parttbl_entry_t *entry;
+ int rc;
+
+ if ((entry = siena_parttbl_entry(enp, type)) == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = siena_nvram_partn_read(enp, entry->partn,
+ offset, data, size)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_nvram_erase(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type)
+{
+ siena_parttbl_entry_t *entry;
+ size_t size;
+ int rc;
+
+ if ((entry = siena_parttbl_entry(enp, type)) == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = siena_nvram_partn_size(enp, entry->partn, &size)) != 0)
+ goto fail2;
+
+ if ((rc = siena_nvram_partn_erase(enp, entry->partn, 0, size)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_nvram_write_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ siena_parttbl_entry_t *entry;
+ int rc;
+
+ if ((entry = siena_parttbl_entry(enp, type)) == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = siena_nvram_partn_write(enp, entry->partn,
+ offset, data, size)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ void
+siena_nvram_rw_finish(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type)
+{
+ siena_parttbl_entry_t *entry;
+
+ if ((entry = siena_parttbl_entry(enp, type)) != NULL)
+ siena_nvram_partn_unlock(enp, entry->partn);
+}
+
+ __checkReturn int
+siena_nvram_set_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint16_t version[4])
+{
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ siena_parttbl_entry_t *entry;
+ unsigned int dcfg_partn;
+ size_t partn_size;
+ unsigned int hdr_length;
+ unsigned int vpd_length;
+ unsigned int vpd_offset;
+ unsigned int nitems;
+ unsigned int required_hdr_length;
+ unsigned int pos;
+ uint8_t cksum;
+ uint32_t subtype;
+ size_t length;
+ int rc;
+
+ if ((entry = siena_parttbl_entry(enp, type)) == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ dcfg_partn = (entry->port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_partn_size(enp, dcfg_partn, &partn_size)) != 0)
+ goto fail2;
+
+ if ((rc = siena_nvram_partn_lock(enp, dcfg_partn)) != 0)
+ goto fail2;
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_TRUE, &dcfg, &length)) != 0)
+ goto fail3;
+
+ hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
+ nitems = EFX_DWORD_FIELD(dcfg->num_fw_version_items, EFX_DWORD_0);
+ vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
+ vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
+
+ /*
+ * NOTE: This function will blatt any fields trailing the version
+ * vector, or the VPD chunk.
+ */
+ required_hdr_length = SIENA_DYNAMIC_CFG_SIZE(entry->partn + 1);
+ if (required_hdr_length + vpd_length > length) {
+ rc = ENOSPC;
+ goto fail4;
+ }
+
+ if (vpd_offset < required_hdr_length) {
+ (void) memmove((caddr_t)dcfg + required_hdr_length,
+ (caddr_t)dcfg + vpd_offset, vpd_length);
+ vpd_offset = required_hdr_length;
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset,
+ EFX_DWORD_0, vpd_offset);
+ }
+
+ if (hdr_length < required_hdr_length) {
+ (void) memset((caddr_t)dcfg + hdr_length, 0,
+ required_hdr_length - hdr_length);
+ hdr_length = required_hdr_length;
+ EFX_POPULATE_WORD_1(dcfg->length,
+ EFX_WORD_0, hdr_length);
+ }
+
+ /* Get the subtype to insert into the fw_subtype array */
+ if ((rc = siena_nvram_get_subtype(enp, entry->partn, &subtype)) != 0)
+ goto fail5;
+
+ /* Fill out the new version */
+ EFX_POPULATE_DWORD_1(dcfg->fw_version[entry->partn].fw_subtype,
+ EFX_DWORD_0, subtype);
+ EFX_POPULATE_WORD_1(dcfg->fw_version[entry->partn].version_w,
+ EFX_WORD_0, version[0]);
+ EFX_POPULATE_WORD_1(dcfg->fw_version[entry->partn].version_x,
+ EFX_WORD_0, version[1]);
+ EFX_POPULATE_WORD_1(dcfg->fw_version[entry->partn].version_y,
+ EFX_WORD_0, version[2]);
+ EFX_POPULATE_WORD_1(dcfg->fw_version[entry->partn].version_z,
+ EFX_WORD_0, version[3]);
+
+ /* Update the version count */
+ if (nitems < entry->partn + 1) {
+ nitems = entry->partn + 1;
+ EFX_POPULATE_DWORD_1(dcfg->num_fw_version_items,
+ EFX_DWORD_0, nitems);
+ }
+
+ /* Update the checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)dcfg)[pos];
+ dcfg->csum.eb_u8[0] -= cksum;
+
+ /* Erase and write the new partition */
+ if ((rc = siena_nvram_partn_erase(enp, dcfg_partn, 0, partn_size)) != 0)
+ goto fail6;
+
+ /* Write out the new structure to nvram */
+ if ((rc = siena_nvram_partn_write(enp, dcfg_partn, 0,
+ (caddr_t)dcfg, vpd_offset + vpd_length)) != 0)
+ goto fail7;
+
+ EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
+
+ siena_nvram_partn_unlock(enp, dcfg_partn);
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/sys/dev/sfxge/common/siena_phy.c b/sys/dev/sfxge/common/siena_phy.c
new file mode 100644
index 0000000..7eb8468
--- /dev/null
+++ b/sys/dev/sfxge/common/siena_phy.c
@@ -0,0 +1,857 @@
+/*-
+ * Copyright 2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include "efsys.h"
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+static void
+siena_phy_decode_cap(
+ __in uint32_t mcdi_cap,
+ __out uint32_t *maskp)
+{
+ uint32_t mask;
+
+ mask = 0;
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ mask |= (1 << EFX_PHY_CAP_PAUSE);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ mask |= (1 << EFX_PHY_CAP_ASYM);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ mask |= (1 << EFX_PHY_CAP_AN);
+
+ *maskp = mask;
+}
+
+static void
+siena_phy_decode_link_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t link_flags,
+ __in unsigned int speed,
+ __in unsigned int fcntl,
+ __out efx_link_mode_t *link_modep,
+ __out unsigned int *fcntlp)
+{
+ boolean_t fd = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ boolean_t up = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (!up)
+ *link_modep = EFX_LINK_DOWN;
+ else if (speed == 10000 && fd)
+ *link_modep = EFX_LINK_10000FDX;
+ else if (speed == 1000)
+ *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX;
+ else if (speed == 100)
+ *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX;
+ else if (speed == 10)
+ *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX;
+ else
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ if (fcntl == MC_CMD_FCNTL_OFF)
+ *fcntlp = 0;
+ else if (fcntl == MC_CMD_FCNTL_RESPOND)
+ *fcntlp = EFX_FCNTL_RESPOND;
+ else if (fcntl == MC_CMD_FCNTL_BIDIR)
+ *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ else {
+ EFSYS_PROBE1(mc_pcol_error, int, fcntl);
+ *fcntlp = 0;
+ }
+}
+
+ void
+siena_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ unsigned int link_flags;
+ unsigned int speed;
+ unsigned int fcntl;
+ efx_link_mode_t link_mode;
+ uint32_t lp_cap_mask;
+
+ /*
+ * Convert the LINKCHANGE speed enumeration into mbit/s, in the
+ * same way as GET_LINK encodes the speed
+ */
+ switch (MCDI_EV_FIELD(*eqp, LINKCHANGE_SPEED)) {
+ case MCDI_EVENT_LINKCHANGE_SPEED_100M:
+ speed = 100;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_1G:
+ speed = 1000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_10G:
+ speed = 10000;
+ break;
+ default:
+ speed = 0;
+ break;
+ }
+
+ link_flags = MCDI_EV_FIELD(*eqp, LINKCHANGE_LINK_FLAGS);
+ siena_phy_decode_link_mode(enp, link_flags, speed,
+ MCDI_EV_FIELD(*eqp, LINKCHANGE_FCNTL),
+ &link_mode, &fcntl);
+ siena_phy_decode_cap(MCDI_EV_FIELD(*eqp, LINKCHANGE_LP_CAP),
+ &lp_cap_mask);
+
+ /*
+ * It's safe to update ep_lp_cap_mask without the driver's port lock
+ * because presumably any concurrently running efx_port_poll() is
+ * only going to arrive at the same value.
+ *
+ * ep_fcntl has two meanings. It's either the link common fcntl
+ * (if the PHY supports AN), or it's the forced link state. If
+ * the former, it's safe to update the value for the same reason as
+ * for ep_lp_cap_mask. If the latter, then just ignore the value,
+ * because we can race with efx_mac_fcntl_set().
+ */
+ epp->ep_lp_cap_mask = lp_cap_mask;
+ if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN))
+ epp->ep_fcntl = fcntl;
+
+ *link_modep = link_mode;
+}
+
+ __checkReturn int
+siena_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t power)
+{
+ int rc;
+
+ if (!power)
+ return (0);
+
+ /* Check if the PHY is a zombie */
+ if ((rc = siena_phy_verify(enp)) != 0)
+ goto fail1;
+
+ enp->en_reset_flags |= EFX_RESET_PHY;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_phy_get_link(
+ __in efx_nic_t *enp,
+ __out siena_link_state_t *slsp)
+{
+ efx_mcdi_req_t req;
+ uint8_t outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ int rc;
+
+ req.emr_cmd = MC_CMD_GET_LINK;
+ EFX_STATIC_ASSERT(MC_CMD_GET_LINK_IN_LEN == 0);
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = outbuf;
+ req.emr_out_length = sizeof (outbuf);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),
+ &slsp->sls_adv_cap_mask);
+ siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),
+ &slsp->sls_lp_cap_mask);
+
+ siena_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),
+ &slsp->sls_link_mode, &slsp->sls_fcntl);
+
+#if EFSYS_OPT_LOOPBACK
+ /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
+
+ slsp->sls_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE);
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ slsp->sls_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_phy_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_ID_LED_IN_LEN,
+ MC_CMD_SET_LINK_IN_LEN)];
+ uint32_t cap_mask;
+ unsigned int led_mode;
+ unsigned int speed;
+ int rc;
+
+ req.emr_cmd = MC_CMD_SET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
+ EFX_STATIC_ASSERT(MC_CMD_SET_LINK_OUT_LEN == 0);
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ cap_mask = epp->ep_adv_cap_mask;
+ MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP,
+ PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1,
+ PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1,
+ PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1,
+ PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1,
+ PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1,
+ PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1,
+ PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1,
+ PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1,
+ PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1,
+ PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);
+
+#if EFSYS_OPT_LOOPBACK
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE,
+ epp->ep_loopback_type);
+ switch (epp->ep_loopback_link_mode) {
+ case EFX_LINK_100FDX:
+ speed = 100;
+ break;
+ case EFX_LINK_1000FDX:
+ speed = 1000;
+ break;
+ case EFX_LINK_10000FDX:
+ speed = 10000;
+ break;
+ default:
+ speed = 0;
+ }
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE);
+ speed = 0;
+#endif /* EFSYS_OPT_LOOPBACK */
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed);
+
+#if EFSYS_OPT_PHY_FLAGS
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags);
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0);
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* And set the blink mode */
+ req.emr_cmd = MC_CMD_SET_ID_LED;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN;
+ EFX_STATIC_ASSERT(MC_CMD_SET_ID_LED_OUT_LEN == 0);
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+ switch (epp->ep_phy_led_mode) {
+ case EFX_PHY_LED_DEFAULT:
+ led_mode = MC_CMD_LED_DEFAULT;
+ break;
+ case EFX_PHY_LED_OFF:
+ led_mode = MC_CMD_LED_OFF;
+ break;
+ case EFX_PHY_LED_ON:
+ led_mode = MC_CMD_LED_ON;
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ led_mode = MC_CMD_LED_DEFAULT;
+ }
+
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode);
+#else
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT);
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_phy_verify(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN];
+ uint32_t state;
+ int rc;
+
+ req.emr_cmd = MC_CMD_GET_PHY_STATE;
+ EFX_STATIC_ASSERT(MC_CMD_GET_PHY_STATE_IN_LEN == 0);
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = outbuf;
+ req.emr_out_length = sizeof (outbuf);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE);
+ if (state != MC_CMD_PHY_STATE_OK) {
+ if (state != MC_CMD_PHY_STATE_ZOMBIE)
+ EFSYS_PROBE1(mc_pcol_error, int, state);
+ rc = ENOTACTIVE;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip)
+{
+ _NOTE(ARGUNUSED(enp, ouip))
+
+ return (ENOTSUP);
+}
+
+#if EFSYS_OPT_PHY_STATS
+
+#define SIENA_SIMPLE_STAT_SET(_vmask, _esmp, _smask, _stat, \
+ _mc_record, _efx_record) \
+ if ((_vmask) & (1ULL << (_mc_record))) { \
+ (_smask) |= (1ULL << (_efx_record)); \
+ if ((_stat) != NULL && !EFSYS_MEM_IS_NULL(_esmp)) { \
+ efx_dword_t dword; \
+ EFSYS_MEM_READD(_esmp, (_mc_record) * 4, &dword);\
+ (_stat)[_efx_record] = \
+ EFX_DWORD_FIELD(dword, EFX_DWORD_0); \
+ } \
+ }
+
+#define SIENA_SIMPLE_STAT_SET2(_vmask, _esmp, _smask, _stat, _record) \
+ SIENA_SIMPLE_STAT_SET(_vmask, _esmp, _smask, _stat, \
+ MC_CMD_ ## _record, \
+ EFX_PHY_STAT_ ## _record)
+
+ void
+siena_phy_decode_stats(
+ __in efx_nic_t *enp,
+ __in uint32_t vmask,
+ __in_opt efsys_mem_t *esmp,
+ __out_opt uint64_t *smaskp,
+ __out_ecount_opt(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ uint64_t smask = 0;
+
+ _NOTE(ARGUNUSED(enp))
+
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, OUI);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_LINK_UP);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_RX_FAULT);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_TX_FAULT);
+
+ if (vmask & (1 << MC_CMD_PMA_PMD_SIGNAL)) {
+ smask |= ((1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_A) |
+ (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_B) |
+ (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_C) |
+ (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_D));
+ if (stat != NULL && esmp != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
+ efx_dword_t dword;
+ uint32_t sig;
+ EFSYS_MEM_READD(esmp, 4 * MC_CMD_PMA_PMD_SIGNAL,
+ &dword);
+ sig = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_A] = (sig >> 1) & 1;
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_B] = (sig >> 2) & 1;
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_C] = (sig >> 3) & 1;
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_D] = (sig >> 4) & 1;
+ }
+ }
+
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_A,
+ EFX_PHY_STAT_SNR_A);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_B,
+ EFX_PHY_STAT_SNR_B);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_C,
+ EFX_PHY_STAT_SNR_C);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_D,
+ EFX_PHY_STAT_SNR_D);
+
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_LINK_UP);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_RX_FAULT);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_TX_FAULT);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_BER);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_BLOCK_ERRORS);
+
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_LINK_UP,
+ EFX_PHY_STAT_PHY_XS_LINK_UP);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_RX_FAULT,
+ EFX_PHY_STAT_PHY_XS_RX_FAULT);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_TX_FAULT,
+ EFX_PHY_STAT_PHY_XS_TX_FAULT);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_ALIGN,
+ EFX_PHY_STAT_PHY_XS_ALIGN);
+
+ if (vmask & (1 << MC_CMD_PHYXS_SYNC)) {
+ smask |= ((1 << EFX_PHY_STAT_PHY_XS_SYNC_A) |
+ (1 << EFX_PHY_STAT_PHY_XS_SYNC_B) |
+ (1 << EFX_PHY_STAT_PHY_XS_SYNC_C) |
+ (1 << EFX_PHY_STAT_PHY_XS_SYNC_D));
+ if (stat != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
+ efx_dword_t dword;
+ uint32_t sync;
+ EFSYS_MEM_READD(esmp, 4 * MC_CMD_PHYXS_SYNC, &dword);
+ sync = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_A] = (sync >> 0) & 1;
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_B] = (sync >> 1) & 1;
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_C] = (sync >> 2) & 1;
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_D] = (sync >> 3) & 1;
+ }
+ }
+
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, AN_LINK_UP);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, AN_COMPLETE);
+
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_CL22_LINK_UP,
+ EFX_PHY_STAT_CL22EXT_LINK_UP);
+
+ if (smaskp != NULL)
+ *smaskp = smask;
+}
+
+ __checkReturn int
+siena_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __out_ecount(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t vmask = encp->enc_siena_phy_stat_mask;
+ uint8_t payload[MC_CMD_PHY_STATS_IN_LEN];
+ uint64_t smask;
+ efx_mcdi_req_t req;
+ int rc;
+
+ req.emr_cmd = MC_CMD_PHY_STATS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = sizeof (payload);
+ EFX_STATIC_ASSERT(MC_CMD_PHY_STATS_OUT_DMA_LEN == 0);
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, PHY_STATS_IN_DMA_ADDR_LO,
+ EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ MCDI_IN_SET_DWORD(req, PHY_STATS_IN_DMA_ADDR_HI,
+ EFSYS_MEM_ADDR(esmp) >> 32);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ EFSYS_ASSERT3U(req.emr_out_length, ==, MC_CMD_PHY_STATS_OUT_DMA_LEN);
+
+ siena_phy_decode_stats(enp, vmask, esmp, &smask, stat);
+ EFSYS_ASSERT(smask == encp->enc_phy_stat_mask);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_PHY_PROPS
+
+#if EFSYS_OPT_NAMES
+
+extern const char __cs *
+siena_phy_prop_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ _NOTE(ARGUNUSED(enp, id))
+
+ return (NULL);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern __checkReturn int
+siena_phy_prop_get(
+ __in efx_nic_t *enp,
+ __in unsigned int id,
+ __in uint32_t flags,
+ __out uint32_t *valp)
+{
+ _NOTE(ARGUNUSED(enp, id, flags, valp))
+
+ return (ENOTSUP);
+}
+
+extern __checkReturn int
+siena_phy_prop_set(
+ __in efx_nic_t *enp,
+ __in unsigned int id,
+ __in uint32_t val)
+{
+ _NOTE(ARGUNUSED(enp, id, val))
+
+ return (ENOTSUP);
+}
+
+#endif /* EFSYS_OPT_PHY_PROPS */
+
+#if EFSYS_OPT_PHY_BIST
+
+ __checkReturn int
+siena_phy_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_phy_bist_type_t type)
+{
+ uint8_t payload[MC_CMD_START_BIST_IN_LEN];
+ efx_mcdi_req_t req;
+ int rc;
+
+ req.emr_cmd = MC_CMD_START_BIST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = sizeof (payload);
+ EFX_STATIC_ASSERT(MC_CMD_START_BIST_OUT_LEN == 0);
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ switch (type) {
+ case EFX_PHY_BIST_TYPE_NORMAL:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST);
+ break;
+ case EFX_PHY_BIST_TYPE_CABLE_SHORT:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_PHY_BIST_CABLE_SHORT);
+ break;
+ case EFX_PHY_BIST_TYPE_CABLE_LONG:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_PHY_BIST_CABLE_LONG);
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static __checkReturn unsigned long
+siena_phy_sft9001_bist_status(
+ __in uint16_t code)
+{
+ switch (code) {
+ case MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY:
+ return (EFX_PHY_CABLE_STATUS_BUSY);
+ case MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT:
+ return (EFX_PHY_CABLE_STATUS_INTERPAIRSHORT);
+ case MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT:
+ return (EFX_PHY_CABLE_STATUS_INTRAPAIRSHORT);
+ case MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN:
+ return (EFX_PHY_CABLE_STATUS_OPEN);
+ case MC_CMD_POLL_BIST_SFT9001_PAIR_OK:
+ return (EFX_PHY_CABLE_STATUS_OK);
+ default:
+ return (EFX_PHY_CABLE_STATUS_INVALID);
+ }
+}
+
+ __checkReturn int
+siena_phy_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_phy_bist_type_t type,
+ __out efx_phy_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint8_t payload[MCDI_CTL_SDU_LEN_MAX];
+ uint32_t value_mask = 0;
+ efx_mcdi_req_t req;
+ uint32_t result;
+ int rc;
+
+ req.emr_cmd = MC_CMD_POLL_BIST;
+ _NOTE(CONSTANTCONDITION)
+ EFSYS_ASSERT(MC_CMD_POLL_BIST_IN_LEN == 0);
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = payload;
+ req.emr_out_length = sizeof (payload);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_POLL_BIST_OUT_RESULT_OFST + 4) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (count > 0)
+ (void) memset(valuesp, '\0', count * sizeof (unsigned long));
+
+ result = MCDI_OUT_DWORD(req, POLL_BIST_OUT_RESULT);
+
+ /* Extract PHY specific results */
+ if (result == MC_CMD_POLL_BIST_PASSED &&
+ encp->enc_phy_type == EFX_PHY_SFT9001B &&
+ req.emr_out_length_used >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN &&
+ (type == EFX_PHY_BIST_TYPE_CABLE_SHORT ||
+ type == EFX_PHY_BIST_TYPE_CABLE_LONG)) {
+ uint16_t word;
+
+ if (count > EFX_PHY_BIST_CABLE_LENGTH_A) {
+ if (valuesp != NULL)
+ valuesp[EFX_PHY_BIST_CABLE_LENGTH_A] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
+ value_mask |= (1 << EFX_PHY_BIST_CABLE_LENGTH_A);
+ }
+
+ if (count > EFX_PHY_BIST_CABLE_LENGTH_B) {
+ if (valuesp != NULL)
+ valuesp[EFX_PHY_BIST_CABLE_LENGTH_B] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B);
+ value_mask |= (1 << EFX_PHY_BIST_CABLE_LENGTH_B);
+ }
+
+ if (count > EFX_PHY_BIST_CABLE_LENGTH_C) {
+ if (valuesp != NULL)
+ valuesp[EFX_PHY_BIST_CABLE_LENGTH_C] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C);
+ value_mask |= (1 << EFX_PHY_BIST_CABLE_LENGTH_C);
+ }
+
+ if (count > EFX_PHY_BIST_CABLE_LENGTH_D) {
+ if (valuesp != NULL)
+ valuesp[EFX_PHY_BIST_CABLE_LENGTH_D] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D);
+ value_mask |= (1 << EFX_PHY_BIST_CABLE_LENGTH_D);
+ }
+
+ if (count > EFX_PHY_BIST_CABLE_STATUS_A) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_A);
+ valuesp[EFX_PHY_BIST_CABLE_STATUS_A] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_PHY_BIST_CABLE_STATUS_A);
+ }
+
+ if (count > EFX_PHY_BIST_CABLE_STATUS_B) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_B);
+ valuesp[EFX_PHY_BIST_CABLE_STATUS_B] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_PHY_BIST_CABLE_STATUS_B);
+ }
+
+ if (count > EFX_PHY_BIST_CABLE_STATUS_C) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_C);
+ valuesp[EFX_PHY_BIST_CABLE_STATUS_C] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_PHY_BIST_CABLE_STATUS_C);
+ }
+
+ if (count > EFX_PHY_BIST_CABLE_STATUS_D) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_D);
+ valuesp[EFX_PHY_BIST_CABLE_STATUS_D] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_PHY_BIST_CABLE_STATUS_D);
+ }
+
+ } else if (result == MC_CMD_POLL_BIST_FAILED &&
+ encp->enc_phy_type == EFX_PHY_QLX111V &&
+ req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MRSFP_LEN &&
+ count > EFX_PHY_BIST_FAULT_CODE) {
+ if (valuesp != NULL)
+ valuesp[EFX_PHY_BIST_FAULT_CODE] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MRSFP_TEST);
+ value_mask |= 1 << EFX_PHY_BIST_FAULT_CODE;
+ }
+
+ if (value_maskp != NULL)
+ *value_maskp = value_mask;
+
+ EFSYS_ASSERT(resultp != NULL);
+ if (result == MC_CMD_POLL_BIST_RUNNING)
+ *resultp = EFX_PHY_BIST_RESULT_RUNNING;
+ else if (result == MC_CMD_POLL_BIST_PASSED)
+ *resultp = EFX_PHY_BIST_RESULT_PASSED;
+ else
+ *resultp = EFX_PHY_BIST_RESULT_FAILED;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ void
+siena_phy_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_phy_bist_type_t type)
+{
+ /* There is no way to stop BIST on Siena */
+ _NOTE(ARGUNUSED(enp, type))
+}
+
+#endif /* EFSYS_OPT_PHY_BIST */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/sys/dev/sfxge/common/siena_sram.c b/sys/dev/sfxge/common/siena_sram.c
new file mode 100644
index 0000000..64fce98
--- /dev/null
+++ b/sys/dev/sfxge/common/siena_sram.c
@@ -0,0 +1,172 @@
+/*-
+ * Copyright 2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include "efsys.h"
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+ void
+siena_sram_init(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ uint32_t rx_base, tx_base;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ rx_base = encp->enc_buftbl_limit;
+ tx_base = rx_base + (encp->enc_rxq_limit * 64);
+
+ /* Initialize the transmit descriptor cache */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_TX_DC_BASE_ADR, tx_base);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_TX_DC_CFG_REG, &oword);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_DC_SIZE, 1); /* 16 descriptors */
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_DC_CFG_REG, &oword);
+
+ /* Initialize the receive descriptor cache */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_RX_DC_BASE_ADR, rx_base);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_RX_DC_CFG_REG, &oword);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DC_SIZE, 3); /* 64 descriptors */
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_DC_CFG_REG, &oword);
+
+ /* Set receive descriptor pre-fetch low water mark */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DC_PF_LWM, 56);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_DC_PF_WM_REG, &oword);
+
+ /* Set the event queue to use for SRAM updates */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_UPD_EVQ_ID, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_UPD_EVQ_REG, &oword);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn int
+siena_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_sram_pattern_fn_t func)
+{
+ efx_oword_t oword;
+ efx_qword_t qword;
+ efx_qword_t verify;
+ size_t rows;
+ unsigned int wptr;
+ unsigned int rptr;
+ int rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Reconfigure into HALF buffer table mode */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
+
+ /*
+ * Move the descriptor caches up to the top of SRAM, and test
+ * all of SRAM below them. We only miss out one row here.
+ */
+ rows = SIENA_SRAM_ROWS - 1;
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_RX_DC_BASE_ADR, rows);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_RX_DC_CFG_REG, &oword);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_TX_DC_BASE_ADR, rows + 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_TX_DC_CFG_REG, &oword);
+
+ /*
+ * Write the pattern through BUF_HALF_TBL. Write
+ * in 64 entry batches, waiting 1us in between each batch
+ * to guarantee not to overflow the SRAM fifo
+ */
+ for (wptr = 0, rptr = 0; wptr < rows; ++wptr) {
+ func(wptr, B_FALSE, &qword);
+ EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_HALF_TBL, wptr, &qword);
+
+ if ((wptr - rptr) < 64 && wptr < rows - 1)
+ continue;
+
+ EFSYS_SPIN(1);
+
+ for (; rptr <= wptr; ++rptr) {
+ func(rptr, B_FALSE, &qword);
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_HALF_TBL, rptr,
+ &verify);
+
+ if (!EFX_QWORD_IS_EQUAL(verify, qword)) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ }
+ }
+
+ /* And do the same negated */
+ for (wptr = 0, rptr = 0; wptr < rows; ++wptr) {
+ func(wptr, B_TRUE, &qword);
+ EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_HALF_TBL, wptr, &qword);
+
+ if ((wptr - rptr) < 64 && wptr < rows - 1)
+ continue;
+
+ EFSYS_SPIN(1);
+
+ for (; rptr <= wptr; ++rptr) {
+ func(rptr, B_TRUE, &qword);
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_HALF_TBL, rptr,
+ &verify);
+
+ if (!EFX_QWORD_IS_EQUAL(verify, qword)) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ }
+ }
+
+ /* Restore back to FULL buffer table mode */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
+
+ /*
+ * We don't need to reconfigure SRAM again because the API
+ * requires efx_nic_fini() to be called after an sram test.
+ */
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ /* Restore back to FULL buffer table mode */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/sys/dev/sfxge/common/siena_vpd.c b/sys/dev/sfxge/common/siena_vpd.c
new file mode 100644
index 0000000..5c44608
--- /dev/null
+++ b/sys/dev/sfxge/common/siena_vpd.c
@@ -0,0 +1,603 @@
+/*-
+ * Copyright 2009 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "efsys.h"
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_VPD
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn int
+siena_vpd_get_static(
+ __in efx_nic_t *enp,
+ __in unsigned int partn,
+ __deref_out_bcount_opt(*sizep) caddr_t *svpdp,
+ __out size_t *sizep)
+{
+ siena_mc_static_config_hdr_t *scfg;
+ caddr_t svpd;
+ size_t size;
+ uint8_t cksum;
+ unsigned int vpd_offset;
+ unsigned int vpd_length;
+ unsigned int hdr_length;
+ unsigned int pos;
+ unsigned int region;
+ int rc;
+
+ EFSYS_ASSERT(partn == MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 ||
+ partn == MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1);
+
+ /* Allocate sufficient memory for the entire static cfg area */
+ if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0)
+ goto fail1;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, size, scfg);
+ if (scfg == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ if ((rc = siena_nvram_partn_read(enp, partn, 0,
+ (caddr_t)scfg, SIENA_NVRAM_CHUNK)) != 0)
+ goto fail3;
+
+ /* Verify the magic number */
+ if (EFX_DWORD_FIELD(scfg->magic, EFX_DWORD_0) !=
+ SIENA_MC_STATIC_CONFIG_MAGIC) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /* All future versions of the structure must be backwards compatable */
+ EFX_STATIC_ASSERT(SIENA_MC_STATIC_CONFIG_VERSION == 0);
+
+ hdr_length = EFX_WORD_FIELD(scfg->length, EFX_WORD_0);
+ vpd_offset = EFX_DWORD_FIELD(scfg->static_vpd_offset, EFX_DWORD_0);
+ vpd_length = EFX_DWORD_FIELD(scfg->static_vpd_length, EFX_DWORD_0);
+
+ /* Verify the hdr doesn't overflow the sector size */
+ if (hdr_length > size || vpd_offset > size || vpd_length > size ||
+ vpd_length + vpd_offset > size) {
+ rc = EINVAL;
+ goto fail5;
+ }
+
+ /* Read the remainder of scfg + static vpd */
+ region = vpd_offset + vpd_length;
+ if (region > SIENA_NVRAM_CHUNK) {
+ if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK,
+ (caddr_t)scfg + SIENA_NVRAM_CHUNK,
+ region - SIENA_NVRAM_CHUNK)) != 0)
+ goto fail6;
+ }
+
+ /* Verify checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)scfg)[pos];
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail7;
+ }
+
+ if (vpd_length == 0)
+ svpd = NULL;
+ else {
+ /* Copy the vpd data out */
+ EFSYS_KMEM_ALLOC(enp->en_esip, vpd_length, svpd);
+ if (svpd == NULL) {
+ rc = ENOMEM;
+ goto fail8;
+ }
+ memcpy(svpd, (caddr_t)scfg + vpd_offset, vpd_length);
+ }
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, scfg);
+
+ *svpdp = svpd;
+ *sizep = vpd_length;
+
+ return (0);
+
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, scfg);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_vpd_init(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
+ caddr_t svpd = NULL;
+ unsigned partn;
+ size_t size = 0;
+ int rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1;
+
+ /*
+ * We need the static VPD sector to present a unified static+dynamic
+ * VPD, that is, basically on every read, write, verify cycle. Since
+ * it should *never* change we can just cache it here.
+ */
+ if ((rc = siena_vpd_get_static(enp, partn, &svpd, &size)) != 0)
+ goto fail1;
+
+ if (svpd != NULL && size > 0) {
+ if ((rc = efx_vpd_hunk_verify(svpd, size, NULL)) != 0)
+ goto fail2;
+ }
+
+ enp->en_u.siena.enu_svpd = svpd;
+ enp->en_u.siena.enu_svpd_length = size;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, svpd);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
+ unsigned int partn;
+ int rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /*
+ * This function returns the total size the user should allocate
+ * for all VPD operations. We've already cached the static vpd,
+ * so we just need to return an upper bound on the dynamic vpd.
+ * Since the dynamic_config structure can change under our feet,
+ * (as version numbers are inserted), just be safe and return the
+ * total size of the dynamic_config *sector*
+ */
+ partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_partn_size(enp, partn, sizep)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
+ siena_mc_dynamic_config_hdr_t *dcfg;
+ unsigned int vpd_length;
+ unsigned int vpd_offset;
+ unsigned int dcfg_partn;
+ size_t dcfg_size;
+ int rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ dcfg_partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_TRUE, &dcfg, &dcfg_size)) != 0)
+ goto fail1;
+
+ vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
+ vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
+
+ if (vpd_length > size) {
+ rc = EFAULT; /* Invalid dcfg: header bigger than sector */
+ goto fail2;
+ }
+
+ EFSYS_ASSERT3U(vpd_length, <=, size);
+ memcpy(data, (caddr_t)dcfg + vpd_offset, vpd_length);
+
+ /* Pad data with all-1s, consistent with update operations */
+ memset(data + vpd_length, 0xff, size - vpd_length);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_vpd_tag_t stag;
+ efx_vpd_tag_t dtag;
+ efx_vpd_keyword_t skey;
+ efx_vpd_keyword_t dkey;
+ unsigned int scont;
+ unsigned int dcont;
+
+ int rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /*
+ * Strictly you could take the view that dynamic vpd is optional.
+ * Instead, to conform more closely to the read/verify/reinit()
+ * paradigm, we require dynamic vpd. siena_vpd_reinit() will
+ * reinitialize it as required.
+ */
+ if ((rc = efx_vpd_hunk_verify(data, size, NULL)) != 0)
+ goto fail1;
+
+ /*
+ * Verify that there is no duplication between the static and
+ * dynamic cfg sectors.
+ */
+ if (enp->en_u.siena.enu_svpd_length == 0)
+ goto done;
+
+ dcont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(data, size, &dtag,
+ &dkey, NULL, NULL, &dcont)) != 0)
+ goto fail2;
+ if (dcont == 0)
+ break;
+
+ scont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(
+ enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length, &stag, &skey,
+ NULL, NULL, &scont)) != 0)
+ goto fail3;
+ if (scont == 0)
+ break;
+
+ if (stag == dtag && skey == dkey) {
+ rc = EEXIST;
+ goto fail4;
+ }
+ }
+ }
+
+done:
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ boolean_t wantpid;
+ int rc;
+
+ /*
+ * Only create a PID if the dynamic cfg doesn't have one
+ */
+ if (enp->en_u.siena.enu_svpd_length == 0)
+ wantpid = B_TRUE;
+ else {
+ unsigned int offset;
+ uint8_t length;
+
+ rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length,
+ EFX_VPD_ID, 0, &offset, &length);
+ if (rc == 0)
+ wantpid = B_FALSE;
+ else if (rc == ENOENT)
+ wantpid = B_TRUE;
+ else
+ goto fail1;
+ }
+
+ if ((rc = efx_vpd_hunk_reinit(data, size, wantpid)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp)
+{
+ unsigned int offset;
+ uint8_t length;
+ int rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Attempt to satisfy the request from svpd first */
+ if (enp->en_u.siena.enu_svpd_length > 0) {
+ if ((rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ evvp->evv_length = length;
+ memcpy(evvp->evv_value,
+ enp->en_u.siena.enu_svpd + offset, length);
+ return (0);
+ } else if (rc != ENOENT)
+ goto fail1;
+ }
+
+ /* And then from the provided data buffer */
+ if ((rc = efx_vpd_hunk_get(data, size, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) != 0)
+ goto fail2;
+
+ evvp->evv_length = length;
+ memcpy(evvp->evv_value, data + offset, length);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ int rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* If the provided (tag,keyword) exists in svpd, then it is readonly */
+ if (enp->en_u.siena.enu_svpd_length > 0) {
+ unsigned int offset;
+ uint8_t length;
+
+ if ((rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ rc = EACCES;
+ goto fail1;
+ }
+ }
+
+ if ((rc = efx_vpd_hunk_set(data, size, evvp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ __checkReturn int
+siena_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp)
+{
+ _NOTE(ARGUNUSED(enp, data, size, evvp, contp))
+
+ return (ENOTSUP);
+}
+
+ __checkReturn int
+siena_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
+ siena_mc_dynamic_config_hdr_t *dcfg;
+ unsigned int vpd_offset;
+ unsigned int dcfg_partn;
+ unsigned int hdr_length;
+ unsigned int pos;
+ uint8_t cksum;
+ size_t partn_size, dcfg_size;
+ size_t vpd_length;
+ int rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Determine total length of all tags */
+ if ((rc = efx_vpd_hunk_length(data, size, &vpd_length)) != 0)
+ goto fail1;
+
+ /* Lock dynamic config sector for write, and read structure only */
+ dcfg_partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_partn_size(enp, dcfg_partn, &partn_size)) != 0)
+ goto fail2;
+
+ if ((rc = siena_nvram_partn_lock(enp, dcfg_partn)) != 0)
+ goto fail2;
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_FALSE, &dcfg, &dcfg_size)) != 0)
+ goto fail3;
+
+ hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
+
+ /* Allocated memory should have room for the new VPD */
+ if (hdr_length + vpd_length > dcfg_size) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+
+ /* Copy in new vpd and update header */
+ vpd_offset = dcfg_size - vpd_length;
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset,
+ EFX_DWORD_0, vpd_offset);
+ memcpy((caddr_t)dcfg + vpd_offset, data, vpd_length);
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_length,
+ EFX_DWORD_0, vpd_length);
+
+ /* Update the checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)dcfg)[pos];
+ dcfg->csum.eb_u8[0] -= cksum;
+
+ /* Erase and write the new sector */
+ if ((rc = siena_nvram_partn_erase(enp, dcfg_partn, 0, partn_size)) != 0)
+ goto fail4;
+
+ /* Write out the new structure to nvram */
+ if ((rc = siena_nvram_partn_write(enp, dcfg_partn, 0, (caddr_t)dcfg,
+ vpd_offset + vpd_length)) != 0)
+ goto fail5;
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+
+ siena_nvram_partn_unlock(enp, dcfg_partn);
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+fail2:
+ EFSYS_PROBE(fail2);
+
+ siena_nvram_partn_unlock(enp, dcfg_partn);
+fail1:
+ EFSYS_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+ void
+siena_vpd_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ if (enp->en_u.siena.enu_svpd_length > 0) {
+ EFSYS_KMEM_FREE(enp->en_esip, enp->en_u.siena.enu_svpd_length,
+ enp->en_u.siena.enu_svpd);
+
+ enp->en_u.siena.enu_svpd = NULL;
+ enp->en_u.siena.enu_svpd_length = 0;
+ }
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+#endif /* EFSYS_OPT_VPD */
diff --git a/sys/dev/sfxge/sfxge.c b/sys/dev/sfxge/sfxge.c
new file mode 100644
index 0000000..380215a
--- /dev/null
+++ b/sys/dev/sfxge/sfxge.c
@@ -0,0 +1,775 @@
+/*-
+ * Copyright (c) 2010-2011 Solarflare Communications, Inc.
+ * All rights reserved.
+ *
+ * This software was developed in part by Philip Paeps under contract for
+ * Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/smp.h>
+#include <sys/socket.h>
+#include <sys/taskqueue.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+
+#include "common/efx.h"
+
+#include "sfxge.h"
+#include "sfxge_rx.h"
+
+#define SFXGE_CAP (IFCAP_VLAN_MTU | \
+ IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | \
+ IFCAP_JUMBO_MTU | IFCAP_LRO | \
+ IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE)
+#define SFXGE_CAP_ENABLE SFXGE_CAP
+#define SFXGE_CAP_FIXED (IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | \
+ IFCAP_JUMBO_MTU | IFCAP_LINKSTATE)
+
+MALLOC_DEFINE(M_SFXGE, "sfxge", "Solarflare 10GigE driver");
+
+static void
+sfxge_reset(void *arg, int npending);
+
+static int
+sfxge_start(struct sfxge_softc *sc)
+{
+ int rc;
+
+ sx_assert(&sc->softc_lock, LA_XLOCKED);
+
+ if (sc->init_state == SFXGE_STARTED)
+ return 0;
+
+ if (sc->init_state != SFXGE_REGISTERED) {
+ rc = EINVAL;
+ goto fail;
+ }
+
+ if ((rc = efx_nic_init(sc->enp)) != 0)
+ goto fail;
+
+ /* Start processing interrupts. */
+ if ((rc = sfxge_intr_start(sc)) != 0)
+ goto fail2;
+
+ /* Start processing events. */
+ if ((rc = sfxge_ev_start(sc)) != 0)
+ goto fail3;
+
+ /* Start the receiver side. */
+ if ((rc = sfxge_rx_start(sc)) != 0)
+ goto fail4;
+
+ /* Start the transmitter side. */
+ if ((rc = sfxge_tx_start(sc)) != 0)
+ goto fail5;
+
+ /* Fire up the port. */
+ if ((rc = sfxge_port_start(sc)) != 0)
+ goto fail6;
+
+ sc->init_state = SFXGE_STARTED;
+
+ /* Tell the stack we're running. */
+ sc->ifnet->if_drv_flags |= IFF_DRV_RUNNING;
+ sc->ifnet->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+ return (0);
+
+fail6:
+ sfxge_tx_stop(sc);
+
+fail5:
+ sfxge_rx_stop(sc);
+
+fail4:
+ sfxge_ev_stop(sc);
+
+fail3:
+ sfxge_intr_stop(sc);
+
+fail2:
+ efx_nic_fini(sc->enp);
+
+fail:
+ device_printf(sc->dev, "sfxge_start: %d\n", rc);
+
+ return (rc);
+}
+
+static void
+sfxge_if_init(void *arg)
+{
+ struct sfxge_softc *sc;
+
+ sc = (struct sfxge_softc *)arg;
+
+ sx_xlock(&sc->softc_lock);
+ (void)sfxge_start(sc);
+ sx_xunlock(&sc->softc_lock);
+}
+
+static void
+sfxge_stop(struct sfxge_softc *sc)
+{
+ sx_assert(&sc->softc_lock, LA_XLOCKED);
+
+ if (sc->init_state != SFXGE_STARTED)
+ return;
+
+ sc->init_state = SFXGE_REGISTERED;
+
+ /* Stop the port. */
+ sfxge_port_stop(sc);
+
+ /* Stop the transmitter. */
+ sfxge_tx_stop(sc);
+
+ /* Stop the receiver. */
+ sfxge_rx_stop(sc);
+
+ /* Stop processing events. */
+ sfxge_ev_stop(sc);
+
+ /* Stop processing interrupts. */
+ sfxge_intr_stop(sc);
+
+ efx_nic_fini(sc->enp);
+
+ sc->ifnet->if_drv_flags &= ~IFF_DRV_RUNNING;
+}
+
+static int
+sfxge_if_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
+{
+ struct sfxge_softc *sc;
+ struct ifreq *ifr;
+ int error;
+
+ ifr = (struct ifreq *)data;
+ sc = ifp->if_softc;
+ error = 0;
+
+ switch (command) {
+ case SIOCSIFFLAGS:
+ sx_xlock(&sc->softc_lock);
+ if (ifp->if_flags & IFF_UP) {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ if ((ifp->if_flags ^ sc->if_flags) &
+ (IFF_PROMISC | IFF_ALLMULTI)) {
+ sfxge_mac_filter_set(sc);
+ }
+ } else
+ sfxge_start(sc);
+ } else
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ sfxge_stop(sc);
+ sc->if_flags = ifp->if_flags;
+ sx_xunlock(&sc->softc_lock);
+ break;
+ case SIOCSIFMTU:
+ if (ifr->ifr_mtu == ifp->if_mtu) {
+ /* Nothing to do */
+ error = 0;
+ } else if (ifr->ifr_mtu > SFXGE_MAX_MTU) {
+ error = EINVAL;
+ } else if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ ifp->if_mtu = ifr->ifr_mtu;
+ error = 0;
+ } else {
+ /* Restart required */
+ sx_xlock(&sc->softc_lock);
+ sfxge_stop(sc);
+ ifp->if_mtu = ifr->ifr_mtu;
+ error = sfxge_start(sc);
+ sx_xunlock(&sc->softc_lock);
+ if (error) {
+ ifp->if_flags &= ~IFF_UP;
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ if_down(ifp);
+ }
+ }
+ break;
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ sfxge_mac_filter_set(sc);
+ break;
+ case SIOCSIFCAP:
+ sx_xlock(&sc->softc_lock);
+
+ /*
+ * The networking core already rejects attempts to
+ * enable capabilities we don't have. We still have
+ * to reject attempts to disable capabilities that we
+ * can't (yet) disable.
+ */
+ if (~ifr->ifr_reqcap & SFXGE_CAP_FIXED) {
+ error = EINVAL;
+ sx_xunlock(&sc->softc_lock);
+ break;
+ }
+
+ ifp->if_capenable = ifr->ifr_reqcap;
+ if (ifp->if_capenable & IFCAP_TXCSUM)
+ ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
+ else
+ ifp->if_hwassist &= ~(CSUM_IP | CSUM_TCP | CSUM_UDP);
+ if (ifp->if_capenable & IFCAP_TSO)
+ ifp->if_hwassist |= CSUM_TSO;
+ else
+ ifp->if_hwassist &= ~CSUM_TSO;
+
+ sx_xunlock(&sc->softc_lock);
+ break;
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
+ break;
+ default:
+ error = ether_ioctl(ifp, command, data);
+ }
+
+ return (error);
+}
+
+static void
+sfxge_ifnet_fini(struct ifnet *ifp)
+{
+ struct sfxge_softc *sc = ifp->if_softc;
+
+ sx_xlock(&sc->softc_lock);
+ sfxge_stop(sc);
+ sx_xunlock(&sc->softc_lock);
+
+ ifmedia_removeall(&sc->media);
+ ether_ifdetach(ifp);
+ if_free(ifp);
+}
+
+static int
+sfxge_ifnet_init(struct ifnet *ifp, struct sfxge_softc *sc)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp);
+ device_t dev;
+ int rc;
+
+ dev = sc->dev;
+ sc->ifnet = ifp;
+
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+ ifp->if_init = sfxge_if_init;
+ ifp->if_softc = sc;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = sfxge_if_ioctl;
+
+ ifp->if_capabilities = SFXGE_CAP;
+ ifp->if_capenable = SFXGE_CAP_ENABLE;
+ ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO;
+
+ ether_ifattach(ifp, encp->enc_mac_addr);
+
+#ifdef SFXGE_HAVE_MQ
+ ifp->if_transmit = sfxge_if_transmit;
+ ifp->if_qflush = sfxge_if_qflush;
+#else
+ ifp->if_start = sfxge_if_start;
+ IFQ_SET_MAXLEN(&ifp->if_snd, SFXGE_NDESCS - 1);
+ ifp->if_snd.ifq_drv_maxlen = SFXGE_NDESCS - 1;
+ IFQ_SET_READY(&ifp->if_snd);
+
+ mtx_init(&sc->tx_lock, "txq", NULL, MTX_DEF);
+#endif
+
+ if ((rc = sfxge_port_ifmedia_init(sc)) != 0)
+ goto fail;
+
+ return 0;
+
+fail:
+ ether_ifdetach(sc->ifnet);
+ return rc;
+}
+
+void
+sfxge_sram_buf_tbl_alloc(struct sfxge_softc *sc, size_t n, uint32_t *idp)
+{
+ KASSERT(sc->buffer_table_next + n <=
+ efx_nic_cfg_get(sc->enp)->enc_buftbl_limit,
+ ("buffer table full"));
+
+ *idp = sc->buffer_table_next;
+ sc->buffer_table_next += n;
+}
+
+static int
+sfxge_bar_init(struct sfxge_softc *sc)
+{
+ efsys_bar_t *esbp = &sc->bar;
+
+ esbp->esb_rid = PCIR_BAR(EFX_MEM_BAR);
+ if ((esbp->esb_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
+ &esbp->esb_rid, RF_ACTIVE)) == NULL) {
+ device_printf(sc->dev, "Cannot allocate BAR region %d\n",
+ EFX_MEM_BAR);
+ return (ENXIO);
+ }
+ esbp->esb_tag = rman_get_bustag(esbp->esb_res);
+ esbp->esb_handle = rman_get_bushandle(esbp->esb_res);
+ mtx_init(&esbp->esb_lock, "sfxge_efsys_bar", NULL, MTX_DEF);
+
+ return (0);
+}
+
+static void
+sfxge_bar_fini(struct sfxge_softc *sc)
+{
+ efsys_bar_t *esbp = &sc->bar;
+
+ bus_release_resource(sc->dev, SYS_RES_MEMORY, esbp->esb_rid,
+ esbp->esb_res);
+ mtx_destroy(&esbp->esb_lock);
+}
+
+static int
+sfxge_create(struct sfxge_softc *sc)
+{
+ device_t dev;
+ efx_nic_t *enp;
+ int error;
+
+ dev = sc->dev;
+
+ sx_init(&sc->softc_lock, "sfxge_softc");
+
+ sc->stats_node = SYSCTL_ADD_NODE(
+ device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "stats", CTLFLAG_RD, NULL, "Statistics");
+ if (!sc->stats_node) {
+ error = ENOMEM;
+ goto fail;
+ }
+
+ TASK_INIT(&sc->task_reset, 0, sfxge_reset, sc);
+
+ (void) pci_enable_busmaster(dev);
+
+ /* Initialize DMA mappings. */
+ if ((error = sfxge_dma_init(sc)) != 0)
+ goto fail;
+
+ /* Map the device registers. */
+ if ((error = sfxge_bar_init(sc)) != 0)
+ goto fail;
+
+ error = efx_family(pci_get_vendor(dev), pci_get_device(dev),
+ &sc->family);
+ KASSERT(error == 0, ("Family should be filtered by sfxge_probe()"));
+
+ /* Create the common code nic object. */
+ mtx_init(&sc->enp_lock, "sfxge_nic", NULL, MTX_DEF);
+ if ((error = efx_nic_create(sc->family, (efsys_identifier_t *)sc,
+ &sc->bar, &sc->enp_lock, &enp)) != 0)
+ goto fail3;
+ sc->enp = enp;
+
+ /* Initialize MCDI to talk to the microcontroller. */
+ if ((error = sfxge_mcdi_init(sc)) != 0)
+ goto fail4;
+
+ /* Probe the NIC and build the configuration data area. */
+ if ((error = efx_nic_probe(enp)) != 0)
+ goto fail5;
+
+ /* Initialize the NVRAM. */
+ if ((error = efx_nvram_init(enp)) != 0)
+ goto fail6;
+
+ /* Initialize the VPD. */
+ if ((error = efx_vpd_init(enp)) != 0)
+ goto fail7;
+
+ /* Reset the NIC. */
+ if ((error = efx_nic_reset(enp)) != 0)
+ goto fail8;
+
+ /* Initialize buffer table allocation. */
+ sc->buffer_table_next = 0;
+
+ /* Set up interrupts. */
+ if ((error = sfxge_intr_init(sc)) != 0)
+ goto fail8;
+
+ /* Initialize event processing state. */
+ if ((error = sfxge_ev_init(sc)) != 0)
+ goto fail11;
+
+ /* Initialize receive state. */
+ if ((error = sfxge_rx_init(sc)) != 0)
+ goto fail12;
+
+ /* Initialize transmit state. */
+ if ((error = sfxge_tx_init(sc)) != 0)
+ goto fail13;
+
+ /* Initialize port state. */
+ if ((error = sfxge_port_init(sc)) != 0)
+ goto fail14;
+
+ sc->init_state = SFXGE_INITIALIZED;
+
+ return (0);
+
+fail14:
+ sfxge_tx_fini(sc);
+
+fail13:
+ sfxge_rx_fini(sc);
+
+fail12:
+ sfxge_ev_fini(sc);
+
+fail11:
+ sfxge_intr_fini(sc);
+
+fail8:
+ efx_vpd_fini(enp);
+
+fail7:
+ efx_nvram_fini(enp);
+
+fail6:
+ efx_nic_unprobe(enp);
+
+fail5:
+ sfxge_mcdi_fini(sc);
+
+fail4:
+ sc->enp = NULL;
+ efx_nic_destroy(enp);
+ mtx_destroy(&sc->enp_lock);
+
+fail3:
+ sfxge_bar_fini(sc);
+ (void) pci_disable_busmaster(sc->dev);
+
+fail:
+ sc->dev = NULL;
+ sx_destroy(&sc->softc_lock);
+ return (error);
+}
+
+static void
+sfxge_destroy(struct sfxge_softc *sc)
+{
+ efx_nic_t *enp;
+
+ /* Clean up port state. */
+ sfxge_port_fini(sc);
+
+ /* Clean up transmit state. */
+ sfxge_tx_fini(sc);
+
+ /* Clean up receive state. */
+ sfxge_rx_fini(sc);
+
+ /* Clean up event processing state. */
+ sfxge_ev_fini(sc);
+
+ /* Clean up interrupts. */
+ sfxge_intr_fini(sc);
+
+ /* Tear down common code subsystems. */
+ efx_nic_reset(sc->enp);
+ efx_vpd_fini(sc->enp);
+ efx_nvram_fini(sc->enp);
+ efx_nic_unprobe(sc->enp);
+
+ /* Tear down MCDI. */
+ sfxge_mcdi_fini(sc);
+
+ /* Destroy common code context. */
+ enp = sc->enp;
+ sc->enp = NULL;
+ efx_nic_destroy(enp);
+
+ /* Free DMA memory. */
+ sfxge_dma_fini(sc);
+
+ /* Free mapped BARs. */
+ sfxge_bar_fini(sc);
+
+ (void) pci_disable_busmaster(sc->dev);
+
+ taskqueue_drain(taskqueue_thread, &sc->task_reset);
+
+ /* Destroy the softc lock. */
+ sx_destroy(&sc->softc_lock);
+}
+
+static int
+sfxge_vpd_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct sfxge_softc *sc = arg1;
+ efx_vpd_value_t value;
+ int rc;
+
+ value.evv_tag = arg2 >> 16;
+ value.evv_keyword = arg2 & 0xffff;
+ if ((rc = efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value))
+ != 0)
+ return rc;
+
+ return SYSCTL_OUT(req, value.evv_value, value.evv_length);
+}
+
+static void
+sfxge_vpd_try_add(struct sfxge_softc *sc, struct sysctl_oid_list *list,
+ efx_vpd_tag_t tag, const char *keyword)
+{
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
+ efx_vpd_value_t value;
+
+ /* Check whether VPD tag/keyword is present */
+ value.evv_tag = tag;
+ value.evv_keyword = EFX_VPD_KEYWORD(keyword[0], keyword[1]);
+ if (efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value) != 0)
+ return;
+
+ SYSCTL_ADD_PROC(
+ ctx, list, OID_AUTO, keyword, CTLTYPE_STRING|CTLFLAG_RD,
+ sc, tag << 16 | EFX_VPD_KEYWORD(keyword[0], keyword[1]),
+ sfxge_vpd_handler, "A", "");
+}
+
+static int
+sfxge_vpd_init(struct sfxge_softc *sc)
+{
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
+ struct sysctl_oid *vpd_node;
+ struct sysctl_oid_list *vpd_list;
+ char keyword[3];
+ efx_vpd_value_t value;
+ int rc;
+
+ if ((rc = efx_vpd_size(sc->enp, &sc->vpd_size)) != 0)
+ goto fail;
+ sc->vpd_data = malloc(sc->vpd_size, M_SFXGE, M_WAITOK);
+ if ((rc = efx_vpd_read(sc->enp, sc->vpd_data, sc->vpd_size)) != 0)
+ goto fail2;
+
+ /* Copy ID (product name) into device description, and log it. */
+ value.evv_tag = EFX_VPD_ID;
+ if (efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value) == 0) {
+ value.evv_value[value.evv_length] = 0;
+ device_set_desc_copy(sc->dev, value.evv_value);
+ device_printf(sc->dev, "%s\n", value.evv_value);
+ }
+
+ vpd_node = SYSCTL_ADD_NODE(
+ ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)),
+ OID_AUTO, "vpd", CTLFLAG_RD, NULL, "Vital Product Data");
+ vpd_list = SYSCTL_CHILDREN(vpd_node);
+
+ /* Add sysctls for all expected and any vendor-defined keywords. */
+ sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, "PN");
+ sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, "EC");
+ sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, "SN");
+ keyword[0] = 'V';
+ keyword[2] = 0;
+ for (keyword[1] = '0'; keyword[1] <= '9'; keyword[1]++)
+ sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, keyword);
+ for (keyword[1] = 'A'; keyword[1] <= 'Z'; keyword[1]++)
+ sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, keyword);
+
+ return 0;
+
+fail2:
+ free(sc->vpd_data, M_SFXGE);
+fail:
+ return rc;
+}
+
+static void
+sfxge_vpd_fini(struct sfxge_softc *sc)
+{
+ free(sc->vpd_data, M_SFXGE);
+}
+
+static void
+sfxge_reset(void *arg, int npending)
+{
+ struct sfxge_softc *sc;
+ int rc;
+
+ (void)npending;
+
+ sc = (struct sfxge_softc *)arg;
+
+ sx_xlock(&sc->softc_lock);
+
+ if (sc->init_state != SFXGE_STARTED)
+ goto done;
+
+ sfxge_stop(sc);
+ efx_nic_reset(sc->enp);
+ if ((rc = sfxge_start(sc)) != 0)
+ device_printf(sc->dev,
+ "reset failed (%d); interface is now stopped\n",
+ rc);
+
+done:
+ sx_xunlock(&sc->softc_lock);
+}
+
+void
+sfxge_schedule_reset(struct sfxge_softc *sc)
+{
+ taskqueue_enqueue(taskqueue_thread, &sc->task_reset);
+}
+
+static int
+sfxge_attach(device_t dev)
+{
+ struct sfxge_softc *sc;
+ struct ifnet *ifp;
+ int error;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ /* Allocate ifnet. */
+ ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ device_printf(dev, "Couldn't allocate ifnet\n");
+ error = ENOMEM;
+ goto fail;
+ }
+ sc->ifnet = ifp;
+
+ /* Initialize hardware. */
+ if ((error = sfxge_create(sc)) != 0)
+ goto fail2;
+
+ /* Create the ifnet for the port. */
+ if ((error = sfxge_ifnet_init(ifp, sc)) != 0)
+ goto fail3;
+
+ if ((error = sfxge_vpd_init(sc)) != 0)
+ goto fail4;
+
+ sc->init_state = SFXGE_REGISTERED;
+
+ return (0);
+
+fail4:
+ sfxge_ifnet_fini(ifp);
+fail3:
+ sfxge_destroy(sc);
+
+fail2:
+ if_free(sc->ifnet);
+
+fail:
+ return (error);
+}
+
+static int
+sfxge_detach(device_t dev)
+{
+ struct sfxge_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ sfxge_vpd_fini(sc);
+
+ /* Destroy the ifnet. */
+ sfxge_ifnet_fini(sc->ifnet);
+
+ /* Tear down hardware. */
+ sfxge_destroy(sc);
+
+ return (0);
+}
+
+static int
+sfxge_probe(device_t dev)
+{
+ uint16_t pci_vendor_id;
+ uint16_t pci_device_id;
+ efx_family_t family;
+ int rc;
+
+ pci_vendor_id = pci_get_vendor(dev);
+ pci_device_id = pci_get_device(dev);
+
+ rc = efx_family(pci_vendor_id, pci_device_id, &family);
+ if (rc)
+ return ENXIO;
+
+ KASSERT(family == EFX_FAMILY_SIENA, ("impossible controller family"));
+ device_set_desc(dev, "Solarflare SFC9000 family");
+ return 0;
+}
+
+static device_method_t sfxge_methods[] = {
+ DEVMETHOD(device_probe, sfxge_probe),
+ DEVMETHOD(device_attach, sfxge_attach),
+ DEVMETHOD(device_detach, sfxge_detach),
+
+ /* Bus interface. */
+ DEVMETHOD(bus_print_child, bus_generic_print_child),
+ DEVMETHOD(bus_driver_added, bus_generic_driver_added),
+
+ { 0, 0 }
+};
+
+static devclass_t sfxge_devclass;
+
+static driver_t sfxge_driver = {
+ "sfxge",
+ sfxge_methods,
+ sizeof(struct sfxge_softc)
+};
+
+DRIVER_MODULE(sfxge, pci, sfxge_driver, sfxge_devclass, 0, 0);
diff --git a/sys/dev/sfxge/sfxge.h b/sys/dev/sfxge/sfxge.h
new file mode 100644
index 0000000..2d3e042
--- /dev/null
+++ b/sys/dev/sfxge/sfxge.h
@@ -0,0 +1,303 @@
+/*-
+ * Copyright (c) 2010-2011 Solarflare Communications, Inc.
+ * All rights reserved.
+ *
+ * This software was developed in part by Philip Paeps under contract for
+ * Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SFXGE_H
+#define _SFXGE_H
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/condvar.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+#include <sys/sx.h>
+#include <vm/uma.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+
+/*
+ * Backward-compatibility
+ */
+#ifndef CACHE_LINE_SIZE
+/* This should be right on most machines the driver will be used on, and
+ * we needn't care too much about wasting a few KB per interface.
+ */
+#define CACHE_LINE_SIZE 128
+#endif
+#ifndef IFCAP_LINKSTATE
+#define IFCAP_LINKSTATE 0
+#endif
+#ifndef IFCAP_VLAN_HWTSO
+#define IFCAP_VLAN_HWTSO 0
+#endif
+#ifndef IFM_10G_T
+#define IFM_10G_T IFM_UNKNOWN
+#endif
+#ifndef IFM_10G_KX4
+#define IFM_10G_KX4 IFM_10G_CX4
+#endif
+#if __FreeBSD_version >= 800054
+/* Networking core is multiqueue aware. We can manage our own TX
+ * queues and use m_pkthdr.flowid.
+ */
+#define SFXGE_HAVE_MQ
+#endif
+#if (__FreeBSD_version >= 800501 && __FreeBSD_version < 900000) || \
+ __FreeBSD_version >= 900003
+#define SFXGE_HAVE_DESCRIBE_INTR
+#endif
+#ifdef IFM_ETH_RXPAUSE
+#define SFXGE_HAVE_PAUSE_MEDIAOPTS
+#endif
+#ifndef CTLTYPE_U64
+#define CTLTYPE_U64 CTLTYPE_QUAD
+#endif
+
+#include "sfxge_rx.h"
+#include "sfxge_tx.h"
+
+#define SFXGE_IP_ALIGN 2
+
+#define SFXGE_ETHERTYPE_LOOPBACK 0x9000 /* Xerox loopback */
+
+enum sfxge_evq_state {
+ SFXGE_EVQ_UNINITIALIZED = 0,
+ SFXGE_EVQ_INITIALIZED,
+ SFXGE_EVQ_STARTING,
+ SFXGE_EVQ_STARTED
+};
+
+#define SFXGE_EV_BATCH 16384
+
+struct sfxge_evq {
+ struct sfxge_softc *sc __aligned(CACHE_LINE_SIZE);
+ struct mtx lock __aligned(CACHE_LINE_SIZE);
+
+ enum sfxge_evq_state init_state;
+ unsigned int index;
+ efsys_mem_t mem;
+ unsigned int buf_base_id;
+
+ boolean_t exception;
+
+ efx_evq_t *common;
+ unsigned int read_ptr;
+ unsigned int rx_done;
+ unsigned int tx_done;
+
+ /* Linked list of TX queues with completions to process */
+ struct sfxge_txq *txq;
+ struct sfxge_txq **txqs;
+};
+
+#define SFXGE_NEVS 4096
+#define SFXGE_NDESCS 1024
+#define SFXGE_MODERATION 30
+
+enum sfxge_intr_state {
+ SFXGE_INTR_UNINITIALIZED = 0,
+ SFXGE_INTR_INITIALIZED,
+ SFXGE_INTR_TESTING,
+ SFXGE_INTR_STARTED
+};
+
+struct sfxge_intr_hdl {
+ int eih_rid;
+ void *eih_tag;
+ struct resource *eih_res;
+};
+
+struct sfxge_intr {
+ enum sfxge_intr_state state;
+ struct resource *msix_res;
+ struct sfxge_intr_hdl *table;
+ int n_alloc;
+ int type;
+ efsys_mem_t status;
+ uint32_t zero_count;
+};
+
+enum sfxge_mcdi_state {
+ SFXGE_MCDI_UNINITIALIZED = 0,
+ SFXGE_MCDI_INITIALIZED,
+ SFXGE_MCDI_BUSY,
+ SFXGE_MCDI_COMPLETED
+};
+
+struct sfxge_mcdi {
+ struct mtx lock;
+ struct cv cv;
+ enum sfxge_mcdi_state state;
+ efx_mcdi_transport_t transport;
+};
+
+struct sfxge_hw_stats {
+ clock_t update_time;
+ efsys_mem_t dma_buf;
+ void *decode_buf;
+};
+
+enum sfxge_port_state {
+ SFXGE_PORT_UNINITIALIZED = 0,
+ SFXGE_PORT_INITIALIZED,
+ SFXGE_PORT_STARTED
+};
+
+struct sfxge_port {
+ struct sfxge_softc *sc;
+ struct mtx lock;
+ enum sfxge_port_state init_state;
+#ifndef SFXGE_HAVE_PAUSE_MEDIAOPTS
+ unsigned int wanted_fc;
+#endif
+ struct sfxge_hw_stats phy_stats;
+ struct sfxge_hw_stats mac_stats;
+ efx_link_mode_t link_mode;
+};
+
+enum sfxge_softc_state {
+ SFXGE_UNINITIALIZED = 0,
+ SFXGE_INITIALIZED,
+ SFXGE_REGISTERED,
+ SFXGE_STARTED
+};
+
+struct sfxge_softc {
+ device_t dev;
+ struct sx softc_lock;
+ enum sfxge_softc_state init_state;
+ struct ifnet *ifnet;
+ unsigned int if_flags;
+ struct sysctl_oid *stats_node;
+
+ struct task task_reset;
+
+ efx_family_t family;
+ caddr_t vpd_data;
+ size_t vpd_size;
+ efx_nic_t *enp;
+ struct mtx enp_lock;
+
+ bus_dma_tag_t parent_dma_tag;
+ efsys_bar_t bar;
+
+ struct sfxge_intr intr;
+ struct sfxge_mcdi mcdi;
+ struct sfxge_port port;
+ uint32_t buffer_table_next;
+
+ struct sfxge_evq *evq[SFXGE_RX_SCALE_MAX];
+ unsigned int ev_moderation;
+ clock_t ev_stats_update_time;
+ uint64_t ev_stats[EV_NQSTATS];
+
+ uma_zone_t rxq_cache;
+ struct sfxge_rxq *rxq[SFXGE_RX_SCALE_MAX];
+ unsigned int rx_indir_table[SFXGE_RX_SCALE_MAX];
+
+#ifdef SFXGE_HAVE_MQ
+ struct sfxge_txq *txq[SFXGE_TXQ_NTYPES + SFXGE_RX_SCALE_MAX];
+#else
+ struct sfxge_txq *txq[SFXGE_TXQ_NTYPES];
+#endif
+
+ struct ifmedia media;
+
+ size_t rx_prefix_size;
+ size_t rx_buffer_size;
+ uma_zone_t rx_buffer_zone;
+
+#ifndef SFXGE_HAVE_MQ
+ struct mtx tx_lock __aligned(CACHE_LINE_SIZE);
+#endif
+};
+
+#define SFXGE_LINK_UP(sc) ((sc)->port.link_mode != EFX_LINK_DOWN)
+#define SFXGE_RUNNING(sc) ((sc)->ifnet->if_drv_flags & IFF_DRV_RUNNING)
+
+/*
+ * From sfxge.c.
+ */
+extern void sfxge_schedule_reset(struct sfxge_softc *sc);
+extern void sfxge_sram_buf_tbl_alloc(struct sfxge_softc *sc, size_t n,
+ uint32_t *idp);
+
+/*
+ * From sfxge_dma.c.
+ */
+extern int sfxge_dma_init(struct sfxge_softc *sc);
+extern void sfxge_dma_fini(struct sfxge_softc *sc);
+extern int sfxge_dma_alloc(struct sfxge_softc *sc, bus_size_t len,
+ efsys_mem_t *esmp);
+extern void sfxge_dma_free(efsys_mem_t *esmp);
+extern int sfxge_dma_map_sg_collapse(bus_dma_tag_t tag, bus_dmamap_t map,
+ struct mbuf **mp, bus_dma_segment_t *segs, int *nsegs, int maxsegs);
+
+/*
+ * From sfxge_ev.c.
+ */
+extern int sfxge_ev_init(struct sfxge_softc *sc);
+extern void sfxge_ev_fini(struct sfxge_softc *sc);
+extern int sfxge_ev_start(struct sfxge_softc *sc);
+extern void sfxge_ev_stop(struct sfxge_softc *sc);
+extern int sfxge_ev_qpoll(struct sfxge_softc *sc, unsigned int index);
+
+/*
+ * From sfxge_intr.c.
+ */
+extern int sfxge_intr_init(struct sfxge_softc *sc);
+extern void sfxge_intr_fini(struct sfxge_softc *sc);
+extern int sfxge_intr_start(struct sfxge_softc *sc);
+extern void sfxge_intr_stop(struct sfxge_softc *sc);
+
+/*
+ * From sfxge_mcdi.c.
+ */
+extern int sfxge_mcdi_init(struct sfxge_softc *sc);
+extern void sfxge_mcdi_fini(struct sfxge_softc *sc);
+
+/*
+ * From sfxge_port.c.
+ */
+extern int sfxge_port_init(struct sfxge_softc *sc);
+extern void sfxge_port_fini(struct sfxge_softc *sc);
+extern int sfxge_port_start(struct sfxge_softc *sc);
+extern void sfxge_port_stop(struct sfxge_softc *sc);
+extern void sfxge_mac_link_update(struct sfxge_softc *sc,
+ efx_link_mode_t mode);
+extern int sfxge_mac_filter_set(struct sfxge_softc *sc);
+extern int sfxge_port_ifmedia_init(struct sfxge_softc *sc);
+
+#define SFXGE_MAX_MTU (9 * 1024)
+
+#endif /* _SFXGE_H */
diff --git a/sys/dev/sfxge/sfxge_dma.c b/sys/dev/sfxge/sfxge_dma.c
new file mode 100644
index 0000000..076c7c6
--- /dev/null
+++ b/sys/dev/sfxge/sfxge_dma.c
@@ -0,0 +1,202 @@
+/*-
+ * Copyright (c) 2010-2011 Solarflare Communications, Inc.
+ * All rights reserved.
+ *
+ * This software was developed in part by Philip Paeps under contract for
+ * Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+
+#include <machine/bus.h>
+
+#include "common/efx.h"
+
+#include "sfxge.h"
+
+static void
+sfxge_dma_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ bus_addr_t *addr;
+
+ addr = arg;
+
+ if (error) {
+ *addr = 0;
+ return;
+ }
+
+ *addr = segs[0].ds_addr;
+}
+
+int
+sfxge_dma_map_sg_collapse(bus_dma_tag_t tag, bus_dmamap_t map,
+ struct mbuf **mp, bus_dma_segment_t *segs, int *nsegs, int maxsegs)
+{
+ bus_dma_segment_t *psegs;
+ struct mbuf *m;
+ int seg_count;
+ int defragged;
+ int err;
+
+ m = *mp;
+ defragged = err = seg_count = 0;
+
+ KASSERT(m->m_pkthdr.len, ("packet has zero header length"));
+
+retry:
+ psegs = segs;
+ seg_count = 0;
+ if (m->m_next == NULL) {
+ sfxge_map_mbuf_fast(tag, map, m, segs);
+ *nsegs = 1;
+ return (0);
+ }
+#if defined(__i386__) || defined(__amd64__)
+ while (m && seg_count < maxsegs) {
+ /*
+ * firmware doesn't like empty segments
+ */
+ if (m->m_len != 0) {
+ seg_count++;
+ sfxge_map_mbuf_fast(tag, map, m, psegs);
+ psegs++;
+ }
+ m = m->m_next;
+ }
+#else
+ err = bus_dmamap_load_mbuf_sg(tag, map, *mp, segs, &seg_count, 0);
+#endif
+ if (seg_count == 0) {
+ err = EFBIG;
+ goto err_out;
+ } else if (err == EFBIG || seg_count >= maxsegs) {
+ if (!defragged) {
+ m = m_defrag(*mp, M_DONTWAIT);
+ if (m == NULL) {
+ err = ENOBUFS;
+ goto err_out;
+ }
+ *mp = m;
+ defragged = 1;
+ goto retry;
+ }
+ err = EFBIG;
+ goto err_out;
+ }
+ *nsegs = seg_count;
+
+err_out:
+ return (err);
+}
+
+void
+sfxge_dma_free(efsys_mem_t *esmp)
+{
+
+ bus_dmamap_unload(esmp->esm_tag, esmp->esm_map);
+ bus_dmamem_free(esmp->esm_tag, esmp->esm_base, esmp->esm_map);
+ bus_dma_tag_destroy(esmp->esm_tag);
+
+ esmp->esm_addr = 0;
+ esmp->esm_base = NULL;
+}
+
+int
+sfxge_dma_alloc(struct sfxge_softc *sc, bus_size_t len, efsys_mem_t *esmp)
+{
+ void *vaddr;
+
+ /* Create the child DMA tag. */
+ if (bus_dma_tag_create(sc->parent_dma_tag, PAGE_SIZE, 0,
+ MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL,
+ NULL, len, 1, len, 0, NULL, NULL, &esmp->esm_tag) != 0) {
+ device_printf(sc->dev, "Couldn't allocate txq DMA tag\n");
+ return (ENOMEM);
+ }
+
+ /* Allocate kernel memory. */
+ if (bus_dmamem_alloc(esmp->esm_tag, (void **)&vaddr,
+ BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
+ &esmp->esm_map) != 0) {
+ device_printf(sc->dev, "Couldn't allocate DMA memory\n");
+ bus_dma_tag_destroy(esmp->esm_tag);
+ return (ENOMEM);
+ }
+
+ /* Load map into device memory. */
+ if (bus_dmamap_load(esmp->esm_tag, esmp->esm_map, vaddr, len,
+ sfxge_dma_cb, &esmp->esm_addr, 0) != 0) {
+ device_printf(sc->dev, "Couldn't load DMA mapping\n");
+ bus_dmamem_free(esmp->esm_tag, esmp->esm_base, esmp->esm_map);
+ bus_dma_tag_destroy(esmp->esm_tag);
+ return (ENOMEM);
+ }
+
+ /*
+ * The callback gets error information about the mapping
+ * and will have set our vaddr to NULL if something went
+ * wrong.
+ */
+ if (vaddr == NULL)
+ return (ENOMEM);
+
+ esmp->esm_base = vaddr;
+
+ return (0);
+}
+
+void
+sfxge_dma_fini(struct sfxge_softc *sc)
+{
+
+ bus_dma_tag_destroy(sc->parent_dma_tag);
+}
+
+int
+sfxge_dma_init(struct sfxge_softc *sc)
+{
+
+ /* Create the parent dma tag. */
+ if (bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */
+ 1, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
+ BUS_SPACE_UNRESTRICTED, /* nsegments */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lock, lockarg */
+ &sc->parent_dma_tag)) {
+ device_printf(sc->dev, "Cannot allocate parent DMA tag\n");
+ return (ENOMEM);
+ }
+
+ return (0);
+}
diff --git a/sys/dev/sfxge/sfxge_ev.c b/sys/dev/sfxge/sfxge_ev.c
new file mode 100644
index 0000000..b506b27
--- /dev/null
+++ b/sys/dev/sfxge/sfxge_ev.c
@@ -0,0 +1,862 @@
+/*-
+ * Copyright (c) 2010-2011 Solarflare Communications, Inc.
+ * All rights reserved.
+ *
+ * This software was developed in part by Philip Paeps under contract for
+ * Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include "common/efx.h"
+
+#include "sfxge.h"
+
+static void
+sfxge_ev_qcomplete(struct sfxge_evq *evq, boolean_t eop)
+{
+ struct sfxge_softc *sc;
+ unsigned int index;
+ struct sfxge_rxq *rxq;
+ struct sfxge_txq *txq;
+
+ sc = evq->sc;
+ index = evq->index;
+ rxq = sc->rxq[index];
+
+ if ((txq = evq->txq) != NULL) {
+ evq->txq = NULL;
+ evq->txqs = &(evq->txq);
+
+ do {
+ struct sfxge_txq *next;
+
+ next = txq->next;
+ txq->next = NULL;
+
+ KASSERT(txq->evq_index == index,
+ ("txq->evq_index != index"));
+
+ if (txq->pending != txq->completed)
+ sfxge_tx_qcomplete(txq);
+
+ txq = next;
+ } while (txq != NULL);
+ }
+
+ if (rxq->pending != rxq->completed)
+ sfxge_rx_qcomplete(rxq, eop);
+}
+
+static boolean_t
+sfxge_ev_rx(void *arg, uint32_t label, uint32_t id, uint32_t size,
+ uint16_t flags)
+{
+ struct sfxge_evq *evq;
+ struct sfxge_softc *sc;
+ struct sfxge_rxq *rxq;
+ unsigned int expected;
+ struct sfxge_rx_sw_desc *rx_desc;
+
+ evq = arg;
+ sc = evq->sc;
+
+ if (evq->exception)
+ goto done;
+
+ rxq = sc->rxq[label];
+ KASSERT(rxq != NULL, ("rxq == NULL"));
+ KASSERT(evq->index == rxq->index,
+ ("evq->index != rxq->index"));
+
+ if (rxq->init_state != SFXGE_RXQ_STARTED)
+ goto done;
+
+ expected = rxq->pending++ & (SFXGE_NDESCS - 1);
+ if (id != expected) {
+ evq->exception = B_TRUE;
+
+ device_printf(sc->dev, "RX completion out of order"
+ " (id=%#x expected=%#x flags=%#x); resetting\n",
+ id, expected, flags);
+ sfxge_schedule_reset(sc);
+
+ goto done;
+ }
+
+ rx_desc = &rxq->queue[id];
+
+ KASSERT(rx_desc->flags == EFX_DISCARD,
+ ("rx_desc->flags != EFX_DISCARD"));
+ rx_desc->flags = flags;
+
+ KASSERT(size < (1 << 16), ("size > (1 << 16)"));
+ rx_desc->size = (uint16_t)size;
+ prefetch_read_many(rx_desc->mbuf);
+
+ evq->rx_done++;
+
+ if (rxq->pending - rxq->completed >= SFXGE_RX_BATCH)
+ sfxge_ev_qcomplete(evq, B_FALSE);
+
+done:
+ return (evq->rx_done >= SFXGE_EV_BATCH);
+}
+
+static boolean_t
+sfxge_ev_exception(void *arg, uint32_t code, uint32_t data)
+{
+ struct sfxge_evq *evq;
+ struct sfxge_softc *sc;
+
+ evq = (struct sfxge_evq *)arg;
+ sc = evq->sc;
+
+ evq->exception = B_TRUE;
+
+ if (code != EFX_EXCEPTION_UNKNOWN_SENSOREVT) {
+ device_printf(sc->dev,
+ "hardware exception (code=%u); resetting\n",
+ code);
+ sfxge_schedule_reset(sc);
+ }
+
+ return (B_FALSE);
+}
+
+static boolean_t
+sfxge_ev_rxq_flush_done(void *arg, uint32_t label)
+{
+ struct sfxge_evq *evq;
+ struct sfxge_softc *sc;
+ struct sfxge_rxq *rxq;
+ unsigned int index;
+ uint16_t magic;
+
+ evq = (struct sfxge_evq *)arg;
+ sc = evq->sc;
+ rxq = sc->rxq[label];
+
+ KASSERT(rxq != NULL, ("rxq == NULL"));
+
+ /* Resend a software event on the correct queue */
+ index = rxq->index;
+ evq = sc->evq[index];
+
+ KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
+ ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != level"));
+ magic = SFXGE_MAGIC_RX_QFLUSH_DONE | label;
+
+ KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
+ ("evq not started"));
+ efx_ev_qpost(evq->common, magic);
+
+ return (B_FALSE);
+}
+
+static boolean_t
+sfxge_ev_rxq_flush_failed(void *arg, uint32_t label)
+{
+ struct sfxge_evq *evq;
+ struct sfxge_softc *sc;
+ struct sfxge_rxq *rxq;
+ unsigned int index;
+ uint16_t magic;
+
+ evq = (struct sfxge_evq *)arg;
+ sc = evq->sc;
+ rxq = sc->rxq[label];
+
+ KASSERT(rxq != NULL, ("rxq == NULL"));
+
+ /* Resend a software event on the correct queue */
+ index = rxq->index;
+ evq = sc->evq[index];
+
+ KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
+ ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != label"));
+ magic = SFXGE_MAGIC_RX_QFLUSH_FAILED | label;
+
+ KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
+ ("evq not started"));
+ efx_ev_qpost(evq->common, magic);
+
+ return (B_FALSE);
+}
+
+static boolean_t
+sfxge_ev_tx(void *arg, uint32_t label, uint32_t id)
+{
+ struct sfxge_evq *evq;
+ struct sfxge_softc *sc;
+ struct sfxge_txq *txq;
+ unsigned int stop;
+ unsigned int delta;
+
+ evq = (struct sfxge_evq *)arg;
+ sc = evq->sc;
+ txq = sc->txq[label];
+
+ KASSERT(txq != NULL, ("txq == NULL"));
+ KASSERT(evq->index == txq->evq_index,
+ ("evq->index != txq->evq_index"));
+
+ if (txq->init_state != SFXGE_TXQ_STARTED)
+ goto done;
+
+ stop = (id + 1) & (SFXGE_NDESCS - 1);
+ id = txq->pending & (SFXGE_NDESCS - 1);
+
+ delta = (stop >= id) ? (stop - id) : (SFXGE_NDESCS - id + stop);
+ txq->pending += delta;
+
+ evq->tx_done++;
+
+ if (txq->next == NULL &&
+ evq->txqs != &(txq->next)) {
+ *(evq->txqs) = txq;
+ evq->txqs = &(txq->next);
+ }
+
+ if (txq->pending - txq->completed >= SFXGE_TX_BATCH)
+ sfxge_tx_qcomplete(txq);
+
+done:
+ return (evq->tx_done >= SFXGE_EV_BATCH);
+}
+
+static boolean_t
+sfxge_ev_txq_flush_done(void *arg, uint32_t label)
+{
+ struct sfxge_evq *evq;
+ struct sfxge_softc *sc;
+ struct sfxge_txq *txq;
+ uint16_t magic;
+
+ evq = (struct sfxge_evq *)arg;
+ sc = evq->sc;
+ txq = sc->txq[label];
+
+ KASSERT(txq != NULL, ("txq == NULL"));
+ KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
+ ("txq not initialized"));
+
+ /* Resend a software event on the correct queue */
+ evq = sc->evq[txq->evq_index];
+
+ KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
+ ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != label"));
+ magic = SFXGE_MAGIC_TX_QFLUSH_DONE | label;
+
+ KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
+ ("evq not started"));
+ efx_ev_qpost(evq->common, magic);
+
+ return (B_FALSE);
+}
+
+static boolean_t
+sfxge_ev_software(void *arg, uint16_t magic)
+{
+ struct sfxge_evq *evq;
+ struct sfxge_softc *sc;
+ unsigned int label;
+
+ evq = (struct sfxge_evq *)arg;
+ sc = evq->sc;
+
+ label = magic & SFXGE_MAGIC_DMAQ_LABEL_MASK;
+ magic &= ~SFXGE_MAGIC_DMAQ_LABEL_MASK;
+
+ switch (magic) {
+ case SFXGE_MAGIC_RX_QFLUSH_DONE: {
+ struct sfxge_rxq *rxq = sc->rxq[label];
+
+ KASSERT(rxq != NULL, ("rxq == NULL"));
+ KASSERT(evq->index == rxq->index,
+ ("evq->index != rxq->index"));
+
+ sfxge_rx_qflush_done(rxq);
+ break;
+ }
+ case SFXGE_MAGIC_RX_QFLUSH_FAILED: {
+ struct sfxge_rxq *rxq = sc->rxq[label];
+
+ KASSERT(rxq != NULL, ("rxq == NULL"));
+ KASSERT(evq->index == rxq->index,
+ ("evq->index != rxq->index"));
+
+ sfxge_rx_qflush_failed(rxq);
+ break;
+ }
+ case SFXGE_MAGIC_RX_QREFILL: {
+ struct sfxge_rxq *rxq = sc->rxq[label];
+
+ KASSERT(rxq != NULL, ("rxq == NULL"));
+ KASSERT(evq->index == rxq->index,
+ ("evq->index != rxq->index"));
+
+ sfxge_rx_qrefill(rxq);
+ break;
+ }
+ case SFXGE_MAGIC_TX_QFLUSH_DONE: {
+ struct sfxge_txq *txq = sc->txq[label];
+
+ KASSERT(txq != NULL, ("txq == NULL"));
+ KASSERT(evq->index == txq->evq_index,
+ ("evq->index != txq->evq_index"));
+
+ sfxge_tx_qflush_done(txq);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return (B_FALSE);
+}
+
+static boolean_t
+sfxge_ev_sram(void *arg, uint32_t code)
+{
+ (void)arg;
+ (void)code;
+
+ switch (code) {
+ case EFX_SRAM_UPDATE:
+ EFSYS_PROBE(sram_update);
+ break;
+
+ case EFX_SRAM_CLEAR:
+ EFSYS_PROBE(sram_clear);
+ break;
+
+ case EFX_SRAM_ILLEGAL_CLEAR:
+ EFSYS_PROBE(sram_illegal_clear);
+ break;
+
+ default:
+ KASSERT(B_FALSE, ("Impossible SRAM event"));
+ break;
+ }
+
+ return (B_FALSE);
+}
+
+static boolean_t
+sfxge_ev_timer(void *arg, uint32_t index)
+{
+ (void)arg;
+ (void)index;
+
+ return (B_FALSE);
+}
+
+static boolean_t
+sfxge_ev_wake_up(void *arg, uint32_t index)
+{
+ (void)arg;
+ (void)index;
+
+ return (B_FALSE);
+}
+
+static void
+sfxge_ev_stat_update(struct sfxge_softc *sc)
+{
+ struct sfxge_evq *evq;
+ unsigned int index;
+ clock_t now;
+
+ sx_xlock(&sc->softc_lock);
+
+ if (sc->evq[0]->init_state != SFXGE_EVQ_STARTED)
+ goto out;
+
+ now = ticks;
+ if (now - sc->ev_stats_update_time < hz)
+ goto out;
+
+ sc->ev_stats_update_time = now;
+
+ /* Add event counts from each event queue in turn */
+ for (index = 0; index < sc->intr.n_alloc; index++) {
+ evq = sc->evq[index];
+ mtx_lock(&evq->lock);
+ efx_ev_qstats_update(evq->common, sc->ev_stats);
+ mtx_unlock(&evq->lock);
+ }
+out:
+ sx_xunlock(&sc->softc_lock);
+}
+
+static int
+sfxge_ev_stat_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct sfxge_softc *sc = arg1;
+ unsigned int id = arg2;
+
+ sfxge_ev_stat_update(sc);
+
+ return SYSCTL_OUT(req, &sc->ev_stats[id], sizeof(sc->ev_stats[id]));
+}
+
+static void
+sfxge_ev_stat_init(struct sfxge_softc *sc)
+{
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
+ struct sysctl_oid_list *stat_list;
+ unsigned int id;
+ char name[40];
+
+ stat_list = SYSCTL_CHILDREN(sc->stats_node);
+
+ for (id = 0; id < EV_NQSTATS; id++) {
+ snprintf(name, sizeof(name), "ev_%s",
+ efx_ev_qstat_name(sc->enp, id));
+ SYSCTL_ADD_PROC(
+ ctx, stat_list,
+ OID_AUTO, name, CTLTYPE_U64|CTLFLAG_RD,
+ sc, id, sfxge_ev_stat_handler, "Q",
+ "");
+ }
+}
+
+static void
+sfxge_ev_qmoderate(struct sfxge_softc *sc, unsigned int idx, unsigned int us)
+{
+ struct sfxge_evq *evq;
+ efx_evq_t *eep;
+
+ evq = sc->evq[idx];
+ eep = evq->common;
+
+ KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
+ ("evq->init_state != SFXGE_EVQ_STARTED"));
+
+ (void)efx_ev_qmoderate(eep, us);
+}
+
+static int
+sfxge_int_mod_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct sfxge_softc *sc = arg1;
+ struct sfxge_intr *intr = &sc->intr;
+ unsigned int moderation;
+ int error;
+ int index;
+
+ sx_xlock(&sc->softc_lock);
+
+ if (req->newptr) {
+ if ((error = SYSCTL_IN(req, &moderation, sizeof(moderation)))
+ != 0)
+ goto out;
+
+ /* We may not be calling efx_ev_qmoderate() now,
+ * so we have to range-check the value ourselves.
+ */
+ if (moderation >
+ efx_nic_cfg_get(sc->enp)->enc_evq_moderation_max) {
+ error = EINVAL;
+ goto out;
+ }
+
+ sc->ev_moderation = moderation;
+ if (intr->state == SFXGE_INTR_STARTED) {
+ for (index = 0; index < intr->n_alloc; index++)
+ sfxge_ev_qmoderate(sc, index, moderation);
+ }
+ } else {
+ error = SYSCTL_OUT(req, &sc->ev_moderation,
+ sizeof(sc->ev_moderation));
+ }
+
+out:
+ sx_xunlock(&sc->softc_lock);
+
+ return error;
+}
+
+static boolean_t
+sfxge_ev_initialized(void *arg)
+{
+ struct sfxge_evq *evq;
+
+ evq = (struct sfxge_evq *)arg;
+
+ KASSERT(evq->init_state == SFXGE_EVQ_STARTING,
+ ("evq not starting"));
+
+ evq->init_state = SFXGE_EVQ_STARTED;
+
+ return (0);
+}
+
+static boolean_t
+sfxge_ev_link_change(void *arg, efx_link_mode_t link_mode)
+{
+ struct sfxge_evq *evq;
+ struct sfxge_softc *sc;
+
+ evq = (struct sfxge_evq *)arg;
+ sc = evq->sc;
+
+ sfxge_mac_link_update(sc, link_mode);
+
+ return (0);
+}
+
+static const efx_ev_callbacks_t sfxge_ev_callbacks = {
+ .eec_initialized = sfxge_ev_initialized,
+ .eec_rx = sfxge_ev_rx,
+ .eec_tx = sfxge_ev_tx,
+ .eec_exception = sfxge_ev_exception,
+ .eec_rxq_flush_done = sfxge_ev_rxq_flush_done,
+ .eec_rxq_flush_failed = sfxge_ev_rxq_flush_failed,
+ .eec_txq_flush_done = sfxge_ev_txq_flush_done,
+ .eec_software = sfxge_ev_software,
+ .eec_sram = sfxge_ev_sram,
+ .eec_wake_up = sfxge_ev_wake_up,
+ .eec_timer = sfxge_ev_timer,
+ .eec_link_change = sfxge_ev_link_change,
+};
+
+
+int
+sfxge_ev_qpoll(struct sfxge_softc *sc, unsigned int index)
+{
+ struct sfxge_evq *evq;
+ int rc;
+
+ evq = sc->evq[index];
+
+ mtx_lock(&evq->lock);
+
+ if (evq->init_state != SFXGE_EVQ_STARTING &&
+ evq->init_state != SFXGE_EVQ_STARTED) {
+ rc = EINVAL;
+ goto fail;
+ }
+
+ /* Synchronize the DMA memory for reading */
+ bus_dmamap_sync(evq->mem.esm_tag, evq->mem.esm_map,
+ BUS_DMASYNC_POSTREAD);
+
+ KASSERT(evq->rx_done == 0, ("evq->rx_done != 0"));
+ KASSERT(evq->tx_done == 0, ("evq->tx_done != 0"));
+ KASSERT(evq->txq == NULL, ("evq->txq != NULL"));
+ KASSERT(evq->txqs == &evq->txq, ("evq->txqs != &evq->txq"));
+
+ /* Poll the queue */
+ efx_ev_qpoll(evq->common, &evq->read_ptr, &sfxge_ev_callbacks, evq);
+
+ evq->rx_done = 0;
+ evq->tx_done = 0;
+
+ /* Perform any pending completion processing */
+ sfxge_ev_qcomplete(evq, B_TRUE);
+
+ /* Re-prime the event queue for interrupts */
+ if ((rc = efx_ev_qprime(evq->common, evq->read_ptr)) != 0)
+ goto fail;
+
+ mtx_unlock(&evq->lock);
+
+ return (0);
+
+fail:
+ mtx_unlock(&(evq->lock));
+ return (rc);
+}
+
+static void
+sfxge_ev_qstop(struct sfxge_softc *sc, unsigned int index)
+{
+ struct sfxge_evq *evq;
+
+ evq = sc->evq[index];
+
+ KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
+ ("evq->init_state != SFXGE_EVQ_STARTED"));
+
+ mtx_lock(&evq->lock);
+ evq->init_state = SFXGE_EVQ_INITIALIZED;
+ evq->read_ptr = 0;
+ evq->exception = B_FALSE;
+
+ /* Add event counts before discarding the common evq state */
+ efx_ev_qstats_update(evq->common, sc->ev_stats);
+
+ efx_ev_qdestroy(evq->common);
+ efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
+ EFX_EVQ_NBUFS(SFXGE_NEVS));
+ mtx_unlock(&evq->lock);
+}
+
+static int
+sfxge_ev_qstart(struct sfxge_softc *sc, unsigned int index)
+{
+ struct sfxge_evq *evq;
+ efsys_mem_t *esmp;
+ int count;
+ int rc;
+
+ evq = sc->evq[index];
+ esmp = &evq->mem;
+
+ KASSERT(evq->init_state == SFXGE_EVQ_INITIALIZED,
+ ("evq->init_state != SFXGE_EVQ_INITIALIZED"));
+
+ /* Clear all events. */
+ (void)memset(esmp->esm_base, 0xff, EFX_EVQ_SIZE(SFXGE_NEVS));
+
+ /* Program the buffer table. */
+ if ((rc = efx_sram_buf_tbl_set(sc->enp, evq->buf_base_id, esmp,
+ EFX_EVQ_NBUFS(SFXGE_NEVS))) != 0)
+ return rc;
+
+ /* Create the common code event queue. */
+ if ((rc = efx_ev_qcreate(sc->enp, index, esmp, SFXGE_NEVS,
+ evq->buf_base_id, &evq->common)) != 0)
+ goto fail;
+
+ mtx_lock(&evq->lock);
+
+ /* Set the default moderation */
+ (void)efx_ev_qmoderate(evq->common, sc->ev_moderation);
+
+ /* Prime the event queue for interrupts */
+ if ((rc = efx_ev_qprime(evq->common, evq->read_ptr)) != 0)
+ goto fail2;
+
+ evq->init_state = SFXGE_EVQ_STARTING;
+
+ mtx_unlock(&evq->lock);
+
+ /* Wait for the initialization event */
+ count = 0;
+ do {
+ /* Pause for 100 ms */
+ pause("sfxge evq init", hz / 10);
+
+ /* Check to see if the test event has been processed */
+ if (evq->init_state == SFXGE_EVQ_STARTED)
+ goto done;
+
+ } while (++count < 20);
+
+ rc = ETIMEDOUT;
+ goto fail3;
+
+done:
+ return (0);
+
+fail3:
+ mtx_lock(&evq->lock);
+ evq->init_state = SFXGE_EVQ_INITIALIZED;
+fail2:
+ mtx_unlock(&evq->lock);
+ efx_ev_qdestroy(evq->common);
+fail:
+ efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
+ EFX_EVQ_NBUFS(SFXGE_NEVS));
+
+ return (rc);
+}
+
+void
+sfxge_ev_stop(struct sfxge_softc *sc)
+{
+ struct sfxge_intr *intr;
+ efx_nic_t *enp;
+ int index;
+
+ intr = &sc->intr;
+ enp = sc->enp;
+
+ KASSERT(intr->state == SFXGE_INTR_STARTED,
+ ("Interrupts not started"));
+
+ /* Stop the event queue(s) */
+ index = intr->n_alloc;
+ while (--index >= 0)
+ sfxge_ev_qstop(sc, index);
+
+ /* Tear down the event module */
+ efx_ev_fini(enp);
+}
+
+int
+sfxge_ev_start(struct sfxge_softc *sc)
+{
+ struct sfxge_intr *intr;
+ int index;
+ int rc;
+
+ intr = &sc->intr;
+
+ KASSERT(intr->state == SFXGE_INTR_STARTED,
+ ("intr->state != SFXGE_INTR_STARTED"));
+
+ /* Initialize the event module */
+ if ((rc = efx_ev_init(sc->enp)) != 0)
+ return rc;
+
+ /* Start the event queues */
+ for (index = 0; index < intr->n_alloc; index++) {
+ if ((rc = sfxge_ev_qstart(sc, index)) != 0)
+ goto fail;
+ }
+
+ return (0);
+
+fail:
+ /* Stop the event queue(s) */
+ while (--index >= 0)
+ sfxge_ev_qstop(sc, index);
+
+ /* Tear down the event module */
+ efx_ev_fini(sc->enp);
+
+ return (rc);
+}
+
+static void
+sfxge_ev_qfini(struct sfxge_softc *sc, unsigned int index)
+{
+ struct sfxge_evq *evq;
+
+ evq = sc->evq[index];
+
+ KASSERT(evq->init_state == SFXGE_EVQ_INITIALIZED,
+ ("evq->init_state != SFXGE_EVQ_INITIALIZED"));
+ KASSERT(evq->txqs == &evq->txq, ("evq->txqs != &evq->txq"));
+
+ sfxge_dma_free(&evq->mem);
+
+ sc->evq[index] = NULL;
+
+ mtx_destroy(&evq->lock);
+
+ free(evq, M_SFXGE);
+}
+
+static int
+sfxge_ev_qinit(struct sfxge_softc *sc, unsigned int index)
+{
+ struct sfxge_evq *evq;
+ efsys_mem_t *esmp;
+ int rc;
+
+ KASSERT(index < SFXGE_RX_SCALE_MAX, ("index >= SFXGE_RX_SCALE_MAX"));
+
+ evq = malloc(sizeof(struct sfxge_evq), M_SFXGE, M_ZERO | M_WAITOK);
+ evq->sc = sc;
+ evq->index = index;
+ sc->evq[index] = evq;
+ esmp = &evq->mem;
+
+ /* Initialise TX completion list */
+ evq->txqs = &evq->txq;
+
+ /* Allocate DMA space. */
+ if ((rc = sfxge_dma_alloc(sc, EFX_EVQ_SIZE(SFXGE_NEVS), esmp)) != 0)
+ return (rc);
+
+ /* Allocate buffer table entries. */
+ sfxge_sram_buf_tbl_alloc(sc, EFX_EVQ_NBUFS(SFXGE_NEVS),
+ &evq->buf_base_id);
+
+ mtx_init(&evq->lock, "evq", NULL, MTX_DEF);
+
+ evq->init_state = SFXGE_EVQ_INITIALIZED;
+
+ return (0);
+}
+
+void
+sfxge_ev_fini(struct sfxge_softc *sc)
+{
+ struct sfxge_intr *intr;
+ int index;
+
+ intr = &sc->intr;
+
+ KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
+ ("intr->state != SFXGE_INTR_INITIALIZED"));
+
+ sc->ev_moderation = 0;
+
+ /* Tear down the event queue(s). */
+ index = intr->n_alloc;
+ while (--index >= 0)
+ sfxge_ev_qfini(sc, index);
+}
+
+int
+sfxge_ev_init(struct sfxge_softc *sc)
+{
+ struct sysctl_ctx_list *sysctl_ctx = device_get_sysctl_ctx(sc->dev);
+ struct sysctl_oid *sysctl_tree = device_get_sysctl_tree(sc->dev);
+ struct sfxge_intr *intr;
+ int index;
+ int rc;
+
+ intr = &sc->intr;
+
+ KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
+ ("intr->state != SFXGE_INTR_INITIALIZED"));
+
+ /* Set default interrupt moderation; add a sysctl to
+ * read and change it.
+ */
+ sc->ev_moderation = 30;
+ SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "int_mod", CTLTYPE_UINT|CTLFLAG_RW,
+ sc, 0, sfxge_int_mod_handler, "IU",
+ "sfxge interrupt moderation (us)");
+
+ /*
+ * Initialize the event queue(s) - one per interrupt.
+ */
+ for (index = 0; index < intr->n_alloc; index++) {
+ if ((rc = sfxge_ev_qinit(sc, index)) != 0)
+ goto fail;
+ }
+
+ sfxge_ev_stat_init(sc);
+
+ return (0);
+
+fail:
+ while (--index >= 0)
+ sfxge_ev_qfini(sc, index);
+
+ return (rc);
+}
diff --git a/sys/dev/sfxge/sfxge_intr.c b/sys/dev/sfxge/sfxge_intr.c
new file mode 100644
index 0000000..c8f4c3d
--- /dev/null
+++ b/sys/dev/sfxge/sfxge_intr.c
@@ -0,0 +1,556 @@
+/*-
+ * Copyright (c) 2010-2011 Solarflare Communications, Inc.
+ * All rights reserved.
+ *
+ * This software was developed in part by Philip Paeps under contract for
+ * Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/smp.h>
+#include <sys/syslog.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "common/efx.h"
+
+#include "sfxge.h"
+
+static int
+sfxge_intr_line_filter(void *arg)
+{
+ struct sfxge_evq *evq;
+ struct sfxge_softc *sc;
+ efx_nic_t *enp;
+ struct sfxge_intr *intr;
+ boolean_t fatal;
+ uint32_t qmask;
+
+ evq = (struct sfxge_evq *)arg;
+ sc = evq->sc;
+ enp = sc->enp;
+ intr = &sc->intr;
+
+ KASSERT(intr != NULL, ("intr == NULL"));
+ KASSERT(intr->type == EFX_INTR_LINE,
+ ("intr->type != EFX_INTR_LINE"));
+
+ if (intr->state != SFXGE_INTR_STARTED)
+ return FILTER_STRAY;
+
+ (void)efx_intr_status_line(enp, &fatal, &qmask);
+
+ if (fatal) {
+ (void) efx_intr_disable(enp);
+ (void) efx_intr_fatal(enp);
+ return FILTER_HANDLED;
+ }
+
+ if (qmask != 0) {
+ intr->zero_count = 0;
+ return FILTER_SCHEDULE_THREAD;
+ }
+
+ /* SF bug 15783: If the function is not asserting its IRQ and
+ * we read the queue mask on the cycle before a flag is added
+ * to the mask, this inhibits the function from asserting the
+ * IRQ even though we don't see the flag set. To work around
+ * this, we must re-prime all event queues and report the IRQ
+ * as handled when we see a mask of zero. To allow for shared
+ * IRQs, we don't repeat this if we see a mask of zero twice
+ * or more in a row.
+ */
+ if (intr->zero_count++ == 0) {
+ if (evq->init_state == SFXGE_EVQ_STARTED) {
+ if (efx_ev_qpending(evq->common, evq->read_ptr))
+ return FILTER_SCHEDULE_THREAD;
+ efx_ev_qprime(evq->common, evq->read_ptr);
+ return FILTER_HANDLED;
+ }
+ }
+
+ return FILTER_STRAY;
+}
+
+static void
+sfxge_intr_line(void *arg)
+{
+ struct sfxge_evq *evq = arg;
+ struct sfxge_softc *sc = evq->sc;
+
+ (void)sfxge_ev_qpoll(sc, 0);
+}
+
+static void
+sfxge_intr_message(void *arg)
+{
+ struct sfxge_evq *evq;
+ struct sfxge_softc *sc;
+ efx_nic_t *enp;
+ struct sfxge_intr *intr;
+ unsigned int index;
+ boolean_t fatal;
+
+ evq = (struct sfxge_evq *)arg;
+ sc = evq->sc;
+ enp = sc->enp;
+ intr = &sc->intr;
+ index = evq->index;
+
+ KASSERT(intr != NULL, ("intr == NULL"));
+ KASSERT(intr->type == EFX_INTR_MESSAGE,
+ ("intr->type != EFX_INTR_MESSAGE"));
+
+ if (intr->state != SFXGE_INTR_STARTED)
+ return;
+
+ (void)efx_intr_status_message(enp, index, &fatal);
+
+ if (fatal) {
+ (void)efx_intr_disable(enp);
+ (void)efx_intr_fatal(enp);
+ return;
+ }
+
+ (void)sfxge_ev_qpoll(sc, index);
+}
+
+static int
+sfxge_intr_bus_enable(struct sfxge_softc *sc)
+{
+ struct sfxge_intr *intr;
+ struct sfxge_intr_hdl *table;
+ driver_filter_t *filter;
+ driver_intr_t *handler;
+ int index;
+ int err;
+
+ intr = &sc->intr;
+ table = intr->table;
+
+ switch (intr->type) {
+ case EFX_INTR_MESSAGE:
+ filter = NULL; /* not shared */
+ handler = sfxge_intr_message;
+ break;
+
+ case EFX_INTR_LINE:
+ filter = sfxge_intr_line_filter;
+ handler = sfxge_intr_line;
+ break;
+
+ default:
+ KASSERT(0, ("Invalid interrupt type"));
+ return EINVAL;
+ }
+
+ /* Try to add the handlers */
+ for (index = 0; index < intr->n_alloc; index++) {
+ if ((err = bus_setup_intr(sc->dev, table[index].eih_res,
+ INTR_MPSAFE|INTR_TYPE_NET, filter, handler,
+ sc->evq[index], &table[index].eih_tag)) != 0) {
+ goto fail;
+ }
+#ifdef SFXGE_HAVE_DESCRIBE_INTR
+ if (intr->n_alloc > 1)
+ bus_describe_intr(sc->dev, table[index].eih_res,
+ table[index].eih_tag, "%d", index);
+#endif
+ bus_bind_intr(sc->dev, table[index].eih_res, index);
+
+ }
+
+ return (0);
+
+fail:
+ /* Remove remaining handlers */
+ while (--index >= 0)
+ bus_teardown_intr(sc->dev, table[index].eih_res,
+ table[index].eih_tag);
+
+ return (err);
+}
+
+static void
+sfxge_intr_bus_disable(struct sfxge_softc *sc)
+{
+ struct sfxge_intr *intr;
+ struct sfxge_intr_hdl *table;
+ int i;
+
+ intr = &sc->intr;
+ table = intr->table;
+
+ /* Remove all handlers */
+ for (i = 0; i < intr->n_alloc; i++)
+ bus_teardown_intr(sc->dev, table[i].eih_res,
+ table[i].eih_tag);
+}
+
+static int
+sfxge_intr_alloc(struct sfxge_softc *sc, int count)
+{
+ device_t dev;
+ struct sfxge_intr_hdl *table;
+ struct sfxge_intr *intr;
+ struct resource *res;
+ int rid;
+ int error;
+ int i;
+
+ dev = sc->dev;
+ intr = &sc->intr;
+ error = 0;
+
+ table = malloc(count * sizeof(struct sfxge_intr_hdl),
+ M_SFXGE, M_WAITOK);
+ intr->table = table;
+
+ for (i = 0; i < count; i++) {
+ rid = i + 1;
+ res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+ RF_SHAREABLE | RF_ACTIVE);
+ if (res == NULL) {
+ device_printf(dev, "Couldn't allocate interrupts for "
+ "message %d\n", rid);
+ error = ENOMEM;
+ break;
+ }
+ table[i].eih_rid = rid;
+ table[i].eih_res = res;
+ }
+
+ if (error) {
+ count = i - 1;
+ for (i = 0; i < count; i++)
+ bus_release_resource(dev, SYS_RES_IRQ,
+ table[i].eih_rid, table[i].eih_res);
+ }
+
+ return (error);
+}
+
+static void
+sfxge_intr_teardown_msix(struct sfxge_softc *sc)
+{
+ device_t dev;
+ struct resource *resp;
+ int rid;
+
+ dev = sc->dev;
+ resp = sc->intr.msix_res;
+
+ rid = rman_get_rid(resp);
+ bus_release_resource(dev, SYS_RES_MEMORY, rid, resp);
+}
+
+static int
+sfxge_intr_setup_msix(struct sfxge_softc *sc)
+{
+ struct sfxge_intr *intr;
+ struct resource *resp;
+ device_t dev;
+ int count;
+ int rid;
+
+ dev = sc->dev;
+ intr = &sc->intr;
+
+ /* Check if MSI-X is available. */
+ count = pci_msix_count(dev);
+ if (count == 0)
+ return (EINVAL);
+
+ /* Limit the number of interrupts to the number of CPUs. */
+ if (count > mp_ncpus)
+ count = mp_ncpus;
+
+ /* Not very likely these days... */
+ if (count > EFX_MAXRSS)
+ count = EFX_MAXRSS;
+
+ rid = PCIR_BAR(4);
+ resp = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
+ if (resp == NULL)
+ return (ENOMEM);
+
+ if (pci_alloc_msix(dev, &count) != 0) {
+ bus_release_resource(dev, SYS_RES_MEMORY, rid, resp);
+ return (ENOMEM);
+ }
+
+ /* Allocate interrupt handlers. */
+ if (sfxge_intr_alloc(sc, count) != 0) {
+ bus_release_resource(dev, SYS_RES_MEMORY, rid, resp);
+ pci_release_msi(dev);
+ return (ENOMEM);
+ }
+
+ intr->type = EFX_INTR_MESSAGE;
+ intr->n_alloc = count;
+ intr->msix_res = resp;
+
+ return (0);
+}
+
+static int
+sfxge_intr_setup_msi(struct sfxge_softc *sc)
+{
+ struct sfxge_intr_hdl *table;
+ struct sfxge_intr *intr;
+ device_t dev;
+ int count;
+ int error;
+
+ dev = sc->dev;
+ intr = &sc->intr;
+ table = intr->table;
+
+ /*
+ * Check if MSI is available. All messages must be written to
+ * the same address and on x86 this means the IRQs have the
+ * same CPU affinity. So we only ever allocate 1.
+ */
+ count = pci_msi_count(dev) ? 1 : 0;
+ if (count == 0)
+ return (EINVAL);
+
+ if ((error = pci_alloc_msi(dev, &count)) != 0)
+ return (ENOMEM);
+
+ /* Allocate interrupt handler. */
+ if (sfxge_intr_alloc(sc, count) != 0) {
+ pci_release_msi(dev);
+ return (ENOMEM);
+ }
+
+ intr->type = EFX_INTR_MESSAGE;
+ intr->n_alloc = count;
+
+ return (0);
+}
+
+static int
+sfxge_intr_setup_fixed(struct sfxge_softc *sc)
+{
+ struct sfxge_intr_hdl *table;
+ struct sfxge_intr *intr;
+ struct resource *res;
+ device_t dev;
+ int rid;
+
+ dev = sc->dev;
+ intr = &sc->intr;
+
+ rid = 0;
+ res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+ RF_SHAREABLE | RF_ACTIVE);
+ if (res == NULL)
+ return (ENOMEM);
+
+ table = malloc(sizeof(struct sfxge_intr_hdl), M_SFXGE, M_WAITOK);
+ table[0].eih_rid = rid;
+ table[0].eih_res = res;
+
+ intr->type = EFX_INTR_LINE;
+ intr->n_alloc = 1;
+ intr->table = table;
+
+ return (0);
+}
+
+static const char *const __sfxge_err[] = {
+ "",
+ "SRAM out-of-bounds",
+ "Buffer ID out-of-bounds",
+ "Internal memory parity",
+ "Receive buffer ownership",
+ "Transmit buffer ownership",
+ "Receive descriptor ownership",
+ "Transmit descriptor ownership",
+ "Event queue ownership",
+ "Event queue FIFO overflow",
+ "Illegal address",
+ "SRAM parity"
+};
+
+void
+sfxge_err(efsys_identifier_t *arg, unsigned int code, uint32_t dword0,
+ uint32_t dword1)
+{
+ struct sfxge_softc *sc = (struct sfxge_softc *)arg;
+ device_t dev = sc->dev;
+
+ log(LOG_WARNING, "[%s%d] FATAL ERROR: %s (0x%08x%08x)",
+ device_get_name(dev), device_get_unit(dev),
+ __sfxge_err[code], dword1, dword0);
+}
+
+void
+sfxge_intr_stop(struct sfxge_softc *sc)
+{
+ struct sfxge_intr *intr;
+
+ intr = &sc->intr;
+
+ KASSERT(intr->state == SFXGE_INTR_STARTED,
+ ("Interrupts not started"));
+
+ intr->state = SFXGE_INTR_INITIALIZED;
+
+ /* Disable interrupts at the NIC */
+ efx_intr_disable(sc->enp);
+
+ /* Disable interrupts at the bus */
+ sfxge_intr_bus_disable(sc);
+
+ /* Tear down common code interrupt bits. */
+ efx_intr_fini(sc->enp);
+}
+
+int
+sfxge_intr_start(struct sfxge_softc *sc)
+{
+ struct sfxge_intr *intr;
+ efsys_mem_t *esmp;
+ int rc;
+
+ intr = &sc->intr;
+ esmp = &intr->status;
+
+ KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
+ ("Interrupts not initialized"));
+
+ /* Zero the memory. */
+ (void)memset(esmp->esm_base, 0, EFX_INTR_SIZE);
+
+ /* Initialize common code interrupt bits. */
+ (void)efx_intr_init(sc->enp, intr->type, esmp);
+
+ /* Enable interrupts at the bus */
+ if ((rc = sfxge_intr_bus_enable(sc)) != 0)
+ goto fail;
+
+ intr->state = SFXGE_INTR_STARTED;
+
+ /* Enable interrupts at the NIC */
+ efx_intr_enable(sc->enp);
+
+ return (0);
+
+fail:
+ /* Tear down common code interrupt bits. */
+ efx_intr_fini(sc->enp);
+
+ intr->state = SFXGE_INTR_INITIALIZED;
+
+ return (rc);
+}
+
+void
+sfxge_intr_fini(struct sfxge_softc *sc)
+{
+ struct sfxge_intr_hdl *table;
+ struct sfxge_intr *intr;
+ efsys_mem_t *esmp;
+ device_t dev;
+ int i;
+
+ dev = sc->dev;
+ intr = &sc->intr;
+ esmp = &intr->status;
+ table = intr->table;
+
+ KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
+ ("intr->state != SFXGE_INTR_INITIALIZED"));
+
+ /* Free DMA memory. */
+ sfxge_dma_free(esmp);
+
+ /* Free interrupt handles. */
+ for (i = 0; i < intr->n_alloc; i++)
+ bus_release_resource(dev, SYS_RES_IRQ,
+ table[i].eih_rid, table[i].eih_res);
+
+ if (table[0].eih_rid != 0)
+ pci_release_msi(dev);
+
+ if (intr->msix_res != NULL)
+ sfxge_intr_teardown_msix(sc);
+
+ /* Free the handle table */
+ free(table, M_SFXGE);
+ intr->table = NULL;
+ intr->n_alloc = 0;
+
+ /* Clear the interrupt type */
+ intr->type = EFX_INTR_INVALID;
+
+ intr->state = SFXGE_INTR_UNINITIALIZED;
+}
+
+int
+sfxge_intr_init(struct sfxge_softc *sc)
+{
+ device_t dev;
+ struct sfxge_intr *intr;
+ efsys_mem_t *esmp;
+ int rc;
+
+ dev = sc->dev;
+ intr = &sc->intr;
+ esmp = &intr->status;
+
+ KASSERT(intr->state == SFXGE_INTR_UNINITIALIZED,
+ ("Interrupts already initialized"));
+
+ /* Try to setup MSI-X or MSI interrupts if available. */
+ if ((rc = sfxge_intr_setup_msix(sc)) == 0)
+ device_printf(dev, "Using MSI-X interrupts\n");
+ else if ((rc = sfxge_intr_setup_msi(sc)) == 0)
+ device_printf(dev, "Using MSI interrupts\n");
+ else if ((rc = sfxge_intr_setup_fixed(sc)) == 0) {
+ device_printf(dev, "Using fixed interrupts\n");
+ } else {
+ device_printf(dev, "Couldn't setup interrupts\n");
+ return (ENOMEM);
+ }
+
+ /* Set up DMA for interrupts. */
+ if ((rc = sfxge_dma_alloc(sc, EFX_INTR_SIZE, esmp)) != 0)
+ return (ENOMEM);
+
+ intr->state = SFXGE_INTR_INITIALIZED;
+
+ return (0);
+}
diff --git a/sys/dev/sfxge/sfxge_mcdi.c b/sys/dev/sfxge/sfxge_mcdi.c
new file mode 100644
index 0000000..6368ab4
--- /dev/null
+++ b/sys/dev/sfxge/sfxge_mcdi.c
@@ -0,0 +1,250 @@
+/*-
+ * Copyright (c) 2010-2011 Solarflare Communications, Inc.
+ * All rights reserved.
+ *
+ * This software was developed in part by Philip Paeps under contract for
+ * Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/condvar.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/syslog.h>
+#include <sys/taskqueue.h>
+
+#include "common/efx.h"
+#include "common/efx_mcdi.h"
+#include "common/efx_regs_mcdi.h"
+
+#include "sfxge.h"
+
+#define SFXGE_MCDI_POLL_INTERVAL_MIN 10 /* 10us in 1us units */
+#define SFXGE_MCDI_POLL_INTERVAL_MAX 100000 /* 100ms in 1us units */
+#define SFXGE_MCDI_WATCHDOG_INTERVAL 10000000 /* 10s in 1us units */
+
+/* Acquire exclusive access to MCDI for the duration of a request. */
+static void
+sfxge_mcdi_acquire(struct sfxge_mcdi *mcdi)
+{
+
+ mtx_lock(&mcdi->lock);
+ KASSERT(mcdi->state != SFXGE_MCDI_UNINITIALIZED,
+ ("MCDI not initialized"));
+
+ while (mcdi->state != SFXGE_MCDI_INITIALIZED)
+ (void)cv_wait_sig(&mcdi->cv, &mcdi->lock);
+ mcdi->state = SFXGE_MCDI_BUSY;
+
+ mtx_unlock(&mcdi->lock);
+}
+
+/* Release ownership of MCDI on request completion. */
+static void
+sfxge_mcdi_release(struct sfxge_mcdi *mcdi)
+{
+
+ mtx_lock(&mcdi->lock);
+ KASSERT((mcdi->state == SFXGE_MCDI_BUSY ||
+ mcdi->state == SFXGE_MCDI_COMPLETED),
+ ("MCDI not busy or task not completed"));
+
+ mcdi->state = SFXGE_MCDI_INITIALIZED;
+ cv_broadcast(&mcdi->cv);
+
+ mtx_unlock(&mcdi->lock);
+}
+
+static void
+sfxge_mcdi_timeout(struct sfxge_softc *sc)
+{
+ device_t dev = sc->dev;
+
+ log(LOG_WARNING, "[%s%d] MC_TIMEOUT", device_get_name(dev),
+ device_get_unit(dev));
+
+ EFSYS_PROBE(mcdi_timeout);
+ sfxge_schedule_reset(sc);
+}
+
+static void
+sfxge_mcdi_poll(struct sfxge_softc *sc)
+{
+ efx_nic_t *enp;
+ clock_t delay_total;
+ clock_t delay_us;
+ boolean_t aborted;
+
+ delay_total = 0;
+ delay_us = SFXGE_MCDI_POLL_INTERVAL_MIN;
+ enp = sc->enp;
+
+ do {
+ if (efx_mcdi_request_poll(enp)) {
+ EFSYS_PROBE1(mcdi_delay, clock_t, delay_total);
+ return;
+ }
+
+ if (delay_total > SFXGE_MCDI_WATCHDOG_INTERVAL) {
+ aborted = efx_mcdi_request_abort(enp);
+ KASSERT(aborted, ("abort failed"));
+ sfxge_mcdi_timeout(sc);
+ return;
+ }
+
+ /* Spin or block depending on delay interval. */
+ if (delay_us < 1000000)
+ DELAY(delay_us);
+ else
+ pause("mcdi wait", delay_us * hz / 1000000);
+
+ delay_total += delay_us;
+
+ /* Exponentially back off the poll frequency. */
+ delay_us = delay_us * 2;
+ if (delay_us > SFXGE_MCDI_POLL_INTERVAL_MAX)
+ delay_us = SFXGE_MCDI_POLL_INTERVAL_MAX;
+
+ } while (1);
+}
+
+static void
+sfxge_mcdi_execute(void *arg, efx_mcdi_req_t *emrp)
+{
+ struct sfxge_softc *sc;
+ struct sfxge_mcdi *mcdi;
+
+ sc = (struct sfxge_softc *)arg;
+ mcdi = &sc->mcdi;
+
+ sfxge_mcdi_acquire(mcdi);
+
+ /* Issue request and poll for completion. */
+ efx_mcdi_request_start(sc->enp, emrp, B_FALSE);
+ sfxge_mcdi_poll(sc);
+
+ sfxge_mcdi_release(mcdi);
+}
+
+static void
+sfxge_mcdi_ev_cpl(void *arg)
+{
+ struct sfxge_softc *sc;
+ struct sfxge_mcdi *mcdi;
+
+ sc = (struct sfxge_softc *)arg;
+ mcdi = &sc->mcdi;
+
+ mtx_lock(&mcdi->lock);
+ KASSERT(mcdi->state == SFXGE_MCDI_BUSY, ("MCDI not busy"));
+ mcdi->state = SFXGE_MCDI_COMPLETED;
+ cv_broadcast(&mcdi->cv);
+ mtx_unlock(&mcdi->lock);
+}
+
+static void
+sfxge_mcdi_exception(void *arg, efx_mcdi_exception_t eme)
+{
+ struct sfxge_softc *sc;
+ device_t dev;
+
+ sc = (struct sfxge_softc *)arg;
+ dev = sc->dev;
+
+ log(LOG_WARNING, "[%s%d] MC_%s", device_get_name(dev),
+ device_get_unit(dev),
+ (eme == EFX_MCDI_EXCEPTION_MC_REBOOT)
+ ? "REBOOT"
+ : (eme == EFX_MCDI_EXCEPTION_MC_BADASSERT)
+ ? "BADASSERT" : "UNKNOWN");
+
+ EFSYS_PROBE(mcdi_exception);
+
+ sfxge_schedule_reset(sc);
+}
+
+int
+sfxge_mcdi_init(struct sfxge_softc *sc)
+{
+ efx_nic_t *enp;
+ struct sfxge_mcdi *mcdi;
+ efx_mcdi_transport_t *emtp;
+ int rc;
+
+ enp = sc->enp;
+ mcdi = &sc->mcdi;
+ emtp = &mcdi->transport;
+
+ KASSERT(mcdi->state == SFXGE_MCDI_UNINITIALIZED,
+ ("MCDI already initialized"));
+
+ mtx_init(&mcdi->lock, "sfxge_mcdi", NULL, MTX_DEF);
+
+ mcdi->state = SFXGE_MCDI_INITIALIZED;
+
+ emtp->emt_context = sc;
+ emtp->emt_execute = sfxge_mcdi_execute;
+ emtp->emt_ev_cpl = sfxge_mcdi_ev_cpl;
+ emtp->emt_exception = sfxge_mcdi_exception;
+
+ cv_init(&mcdi->cv, "sfxge_mcdi");
+
+ if ((rc = efx_mcdi_init(enp, emtp)) != 0)
+ goto fail;
+
+ return (0);
+
+fail:
+ mtx_destroy(&mcdi->lock);
+ mcdi->state = SFXGE_MCDI_UNINITIALIZED;
+ return (rc);
+}
+
+void
+sfxge_mcdi_fini(struct sfxge_softc *sc)
+{
+ struct sfxge_mcdi *mcdi;
+ efx_nic_t *enp;
+ efx_mcdi_transport_t *emtp;
+
+ enp = sc->enp;
+ mcdi = &sc->mcdi;
+ emtp = &mcdi->transport;
+
+ mtx_lock(&mcdi->lock);
+ KASSERT(mcdi->state == SFXGE_MCDI_INITIALIZED,
+ ("MCDI not initialized"));
+
+ efx_mcdi_fini(enp);
+ bzero(emtp, sizeof(*emtp));
+
+ cv_destroy(&mcdi->cv);
+ mtx_unlock(&mcdi->lock);
+
+ mtx_destroy(&mcdi->lock);
+}
diff --git a/sys/dev/sfxge/sfxge_port.c b/sys/dev/sfxge/sfxge_port.c
new file mode 100644
index 0000000..a38f40a
--- /dev/null
+++ b/sys/dev/sfxge/sfxge_port.c
@@ -0,0 +1,789 @@
+/*-
+ * Copyright (c) 2010-2011 Solarflare Communications, Inc.
+ * All rights reserved.
+ *
+ * This software was developed in part by Philip Paeps under contract for
+ * Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/limits.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+
+#include "common/efx.h"
+
+#include "sfxge.h"
+
+static int
+sfxge_mac_stat_update(struct sfxge_softc *sc)
+{
+ struct sfxge_port *port = &sc->port;
+ efsys_mem_t *esmp = &(port->mac_stats.dma_buf);
+ clock_t now;
+ unsigned int count;
+ int rc;
+
+ mtx_lock(&port->lock);
+
+ if (port->init_state != SFXGE_PORT_STARTED) {
+ rc = 0;
+ goto out;
+ }
+
+ now = ticks;
+ if (now - port->mac_stats.update_time < hz) {
+ rc = 0;
+ goto out;
+ }
+
+ port->mac_stats.update_time = now;
+
+ /* If we're unlucky enough to read statistics wduring the DMA, wait
+ * up to 10ms for it to finish (typically takes <500us) */
+ for (count = 0; count < 100; ++count) {
+ EFSYS_PROBE1(wait, unsigned int, count);
+
+ /* Synchronize the DMA memory for reading */
+ bus_dmamap_sync(esmp->esm_tag, esmp->esm_map,
+ BUS_DMASYNC_POSTREAD);
+
+ /* Try to update the cached counters */
+ if ((rc = efx_mac_stats_update(sc->enp, esmp,
+ port->mac_stats.decode_buf, NULL)) != EAGAIN)
+ goto out;
+
+ DELAY(100);
+ }
+
+ rc = ETIMEDOUT;
+out:
+ mtx_unlock(&port->lock);
+ return rc;
+}
+
+static int
+sfxge_mac_stat_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct sfxge_softc *sc = arg1;
+ unsigned int id = arg2;
+ int rc;
+
+ if ((rc = sfxge_mac_stat_update(sc)) != 0)
+ return rc;
+
+ return SYSCTL_OUT(req,
+ (uint64_t *)sc->port.mac_stats.decode_buf + id,
+ sizeof(uint64_t));
+}
+
+static void
+sfxge_mac_stat_init(struct sfxge_softc *sc)
+{
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
+ struct sysctl_oid_list *stat_list;
+ unsigned int id;
+ const char *name;
+
+ stat_list = SYSCTL_CHILDREN(sc->stats_node);
+
+ /* Initialise the named stats */
+ for (id = 0; id < EFX_MAC_NSTATS; id++) {
+ name = efx_mac_stat_name(sc->enp, id);
+ SYSCTL_ADD_PROC(
+ ctx, stat_list,
+ OID_AUTO, name, CTLTYPE_U64|CTLFLAG_RD,
+ sc, id, sfxge_mac_stat_handler, "Q",
+ "");
+ }
+}
+
+#ifdef SFXGE_HAVE_PAUSE_MEDIAOPTS
+
+static unsigned int
+sfxge_port_wanted_fc(struct sfxge_softc *sc)
+{
+ struct ifmedia_entry *ifm = sc->media.ifm_cur;
+
+ if (ifm->ifm_media == (IFM_ETHER | IFM_AUTO))
+ return EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ return ((ifm->ifm_media & IFM_ETH_RXPAUSE) ? EFX_FCNTL_RESPOND : 0) |
+ ((ifm->ifm_media & IFM_ETH_TXPAUSE) ? EFX_FCNTL_GENERATE : 0);
+}
+
+static unsigned int
+sfxge_port_link_fc_ifm(struct sfxge_softc *sc)
+{
+ unsigned int wanted_fc, link_fc;
+
+ efx_mac_fcntl_get(sc->enp, &wanted_fc, &link_fc);
+ return ((link_fc & EFX_FCNTL_RESPOND) ? IFM_ETH_RXPAUSE : 0) |
+ ((link_fc & EFX_FCNTL_GENERATE) ? IFM_ETH_TXPAUSE : 0);
+}
+
+#else /* !SFXGE_HAVE_PAUSE_MEDIAOPTS */
+
+static unsigned int
+sfxge_port_wanted_fc(struct sfxge_softc *sc)
+{
+ return sc->port.wanted_fc;
+}
+
+static unsigned int
+sfxge_port_link_fc_ifm(struct sfxge_softc *sc)
+{
+ return 0;
+}
+
+static int
+sfxge_port_wanted_fc_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct sfxge_softc *sc;
+ struct sfxge_port *port;
+ unsigned int fcntl;
+ int error;
+
+ sc = arg1;
+ port = &sc->port;
+
+ mtx_lock(&port->lock);
+
+ if (req->newptr) {
+ if ((error = SYSCTL_IN(req, &fcntl, sizeof(fcntl))) != 0)
+ goto out;
+
+ if (port->wanted_fc == fcntl)
+ goto out;
+
+ port->wanted_fc = fcntl;
+
+ if (port->init_state != SFXGE_PORT_STARTED)
+ goto out;
+
+ error = efx_mac_fcntl_set(sc->enp, port->wanted_fc, B_TRUE);
+ } else {
+ error = SYSCTL_OUT(req, &port->wanted_fc,
+ sizeof(port->wanted_fc));
+ }
+
+out:
+ mtx_unlock(&port->lock);
+
+ return (error);
+}
+
+static int
+sfxge_port_link_fc_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct sfxge_softc *sc;
+ struct sfxge_port *port;
+ unsigned int wanted_fc, link_fc;
+ int error;
+
+ sc = arg1;
+ port = &sc->port;
+
+ mtx_lock(&port->lock);
+ if (port->init_state == SFXGE_PORT_STARTED && SFXGE_LINK_UP(sc))
+ efx_mac_fcntl_get(sc->enp, &wanted_fc, &link_fc);
+ else
+ link_fc = 0;
+ error = SYSCTL_OUT(req, &link_fc, sizeof(link_fc));
+ mtx_unlock(&port->lock);
+
+ return (error);
+}
+
+#endif /* SFXGE_HAVE_PAUSE_MEDIAOPTS */
+
+static const u_long sfxge_link_baudrate[EFX_LINK_NMODES] = {
+ [EFX_LINK_10HDX] = IF_Mbps(10),
+ [EFX_LINK_10FDX] = IF_Mbps(10),
+ [EFX_LINK_100HDX] = IF_Mbps(100),
+ [EFX_LINK_100FDX] = IF_Mbps(100),
+ [EFX_LINK_1000HDX] = IF_Gbps(1),
+ [EFX_LINK_1000FDX] = IF_Gbps(1),
+ [EFX_LINK_10000FDX] = MIN(IF_Gbps(10ULL), ULONG_MAX),
+};
+
+void
+sfxge_mac_link_update(struct sfxge_softc *sc, efx_link_mode_t mode)
+{
+ struct sfxge_port *port;
+ int link_state;
+
+ port = &sc->port;
+
+ if (port->link_mode == mode)
+ return;
+
+ port->link_mode = mode;
+
+ /* Push link state update to the OS */
+ link_state = (port->link_mode != EFX_LINK_DOWN ?
+ LINK_STATE_UP : LINK_STATE_DOWN);
+ sc->ifnet->if_baudrate = sfxge_link_baudrate[port->link_mode];
+ if_link_state_change(sc->ifnet, link_state);
+}
+
+static void
+sfxge_mac_poll_work(void *arg, int npending)
+{
+ struct sfxge_softc *sc;
+ efx_nic_t *enp;
+ struct sfxge_port *port;
+ efx_link_mode_t mode;
+
+ sc = (struct sfxge_softc *)arg;
+ enp = sc->enp;
+ port = &sc->port;
+
+ mtx_lock(&port->lock);
+
+ if (port->init_state != SFXGE_PORT_STARTED)
+ goto done;
+
+ /* This may sleep waiting for MCDI completion */
+ (void)efx_port_poll(enp, &mode);
+ sfxge_mac_link_update(sc, mode);
+
+done:
+ mtx_unlock(&port->lock);
+}
+
+static int
+sfxge_mac_filter_set_locked(struct sfxge_softc *sc)
+{
+ unsigned int bucket[EFX_MAC_HASH_BITS];
+ struct ifnet *ifp = sc->ifnet;
+ struct ifmultiaddr *ifma;
+ struct sockaddr_dl *sa;
+ efx_nic_t *enp = sc->enp;
+ unsigned int index;
+ int rc;
+
+ /* Set promisc-unicast and broadcast filter bits */
+ if ((rc = efx_mac_filter_set(enp, !!(ifp->if_flags & IFF_PROMISC),
+ B_TRUE)) != 0)
+ return rc;
+
+ /* Set multicast hash filter */
+ if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
+ for (index = 0; index < EFX_MAC_HASH_BITS; index++)
+ bucket[index] = 1;
+ } else {
+ /* Broadcast frames also go through the multicast
+ * filter, and the broadcast address hashes to
+ * 0xff. */
+ bucket[0xff] = 1;
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family == AF_LINK) {
+ sa = (struct sockaddr_dl *)ifma->ifma_addr;
+ index = ether_crc32_le(LLADDR(sa), 6) & 0xff;
+ bucket[index] = 1;
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+ }
+ return efx_mac_hash_set(enp, bucket);
+}
+
+int
+sfxge_mac_filter_set(struct sfxge_softc *sc)
+{
+ struct sfxge_port *port = &sc->port;
+ int rc;
+
+ KASSERT(port->init_state == SFXGE_PORT_STARTED, ("port not started"));
+
+ mtx_lock(&port->lock);
+ rc = sfxge_mac_filter_set_locked(sc);
+ mtx_unlock(&port->lock);
+ return rc;
+}
+
+void
+sfxge_port_stop(struct sfxge_softc *sc)
+{
+ struct sfxge_port *port;
+ efx_nic_t *enp;
+
+ port = &sc->port;
+ enp = sc->enp;
+
+ mtx_lock(&port->lock);
+
+ KASSERT(port->init_state == SFXGE_PORT_STARTED,
+ ("port not started"));
+
+ port->init_state = SFXGE_PORT_INITIALIZED;
+
+ port->mac_stats.update_time = 0;
+
+ /* This may call MCDI */
+ (void)efx_mac_drain(enp, B_TRUE);
+
+ (void)efx_mac_stats_periodic(enp, &port->mac_stats.dma_buf, 0, B_FALSE);
+
+ port->link_mode = EFX_LINK_UNKNOWN;
+
+ /* Destroy the common code port object. */
+ efx_port_fini(sc->enp);
+
+ mtx_unlock(&port->lock);
+}
+
+int
+sfxge_port_start(struct sfxge_softc *sc)
+{
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+ struct ifnet *ifp = sc->ifnet;
+ struct sfxge_port *port;
+ efx_nic_t *enp;
+ size_t pdu;
+ int rc;
+
+ port = &sc->port;
+ enp = sc->enp;
+
+ mtx_lock(&port->lock);
+
+ KASSERT(port->init_state == SFXGE_PORT_INITIALIZED,
+ ("port not initialized"));
+
+ /* Initialize the port object in the common code. */
+ if ((rc = efx_port_init(sc->enp)) != 0)
+ goto fail;
+
+ /* Set the SDU */
+ pdu = EFX_MAC_PDU(ifp->if_mtu);
+ if ((rc = efx_mac_pdu_set(enp, pdu)) != 0)
+ goto fail2;
+
+ if ((rc = efx_mac_fcntl_set(enp, sfxge_port_wanted_fc(sc), B_TRUE))
+ != 0)
+ goto fail2;
+
+ /* Set the unicast address */
+ IF_ADDR_LOCK(ifp);
+ bcopy(LLADDR((struct sockaddr_dl *)ifp->if_addr->ifa_addr),
+ mac_addr, sizeof(mac_addr));
+ IF_ADDR_UNLOCK(ifp);
+ if ((rc = efx_mac_addr_set(enp, mac_addr)) != 0)
+ goto fail;
+
+ sfxge_mac_filter_set_locked(sc);
+
+ /* Update MAC stats by DMA every second */
+ if ((rc = efx_mac_stats_periodic(enp, &port->mac_stats.dma_buf,
+ 1000, B_FALSE)) != 0)
+ goto fail2;
+
+ if ((rc = efx_mac_drain(enp, B_FALSE)) != 0)
+ goto fail3;
+
+ if ((rc = efx_phy_adv_cap_set(sc->enp, sc->media.ifm_cur->ifm_data))
+ != 0)
+ goto fail4;
+
+ port->init_state = SFXGE_PORT_STARTED;
+
+ /* Single poll in case there were missing initial events */
+ mtx_unlock(&port->lock);
+ sfxge_mac_poll_work(sc, 0);
+
+ return (0);
+
+fail4:
+ (void)efx_mac_drain(enp, B_TRUE);
+fail3:
+ (void)efx_mac_stats_periodic(enp, &port->mac_stats.dma_buf,
+ 0, B_FALSE);
+fail2:
+ efx_port_fini(sc->enp);
+fail:
+ mtx_unlock(&port->lock);
+
+ return (rc);
+}
+
+static int
+sfxge_phy_stat_update(struct sfxge_softc *sc)
+{
+ struct sfxge_port *port = &sc->port;
+ efsys_mem_t *esmp = &port->phy_stats.dma_buf;
+ clock_t now;
+ unsigned int count;
+ int rc;
+
+ mtx_lock(&port->lock);
+
+ if (port->init_state != SFXGE_PORT_STARTED) {
+ rc = 0;
+ goto out;
+ }
+
+ now = ticks;
+ if (now - port->phy_stats.update_time < hz) {
+ rc = 0;
+ goto out;
+ }
+
+ port->phy_stats.update_time = now;
+
+ /* If we're unlucky enough to read statistics wduring the DMA, wait
+ * up to 10ms for it to finish (typically takes <500us) */
+ for (count = 0; count < 100; ++count) {
+ EFSYS_PROBE1(wait, unsigned int, count);
+
+ /* Synchronize the DMA memory for reading */
+ bus_dmamap_sync(esmp->esm_tag, esmp->esm_map,
+ BUS_DMASYNC_POSTREAD);
+
+ /* Try to update the cached counters */
+ if ((rc = efx_phy_stats_update(sc->enp, esmp,
+ port->phy_stats.decode_buf)) != EAGAIN)
+ goto out;
+
+ DELAY(100);
+ }
+
+ rc = ETIMEDOUT;
+out:
+ mtx_unlock(&port->lock);
+ return rc;
+}
+
+static int
+sfxge_phy_stat_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct sfxge_softc *sc = arg1;
+ unsigned int id = arg2;
+ int rc;
+
+ if ((rc = sfxge_phy_stat_update(sc)) != 0)
+ return rc;
+
+ return SYSCTL_OUT(req,
+ (uint32_t *)sc->port.phy_stats.decode_buf + id,
+ sizeof(uint32_t));
+}
+
+static void
+sfxge_phy_stat_init(struct sfxge_softc *sc)
+{
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
+ struct sysctl_oid_list *stat_list;
+ unsigned int id;
+ const char *name;
+ uint64_t stat_mask = efx_nic_cfg_get(sc->enp)->enc_phy_stat_mask;
+
+ stat_list = SYSCTL_CHILDREN(sc->stats_node);
+
+ /* Initialise the named stats */
+ for (id = 0; id < EFX_PHY_NSTATS; id++) {
+ if (!(stat_mask & ((uint64_t)1 << id)))
+ continue;
+ name = efx_phy_stat_name(sc->enp, id);
+ SYSCTL_ADD_PROC(
+ ctx, stat_list,
+ OID_AUTO, name, CTLTYPE_UINT|CTLFLAG_RD,
+ sc, id, sfxge_phy_stat_handler,
+ id == EFX_PHY_STAT_OUI ? "IX" : "IU",
+ "");
+ }
+}
+
+void
+sfxge_port_fini(struct sfxge_softc *sc)
+{
+ struct sfxge_port *port;
+ efsys_mem_t *esmp;
+
+ port = &sc->port;
+ esmp = &port->mac_stats.dma_buf;
+
+ KASSERT(port->init_state == SFXGE_PORT_INITIALIZED,
+ ("Port not initialized"));
+
+ port->init_state = SFXGE_PORT_UNINITIALIZED;
+
+ port->link_mode = EFX_LINK_UNKNOWN;
+
+ /* Finish with PHY DMA memory */
+ sfxge_dma_free(&port->phy_stats.dma_buf);
+ free(port->phy_stats.decode_buf, M_SFXGE);
+
+ sfxge_dma_free(esmp);
+ free(port->mac_stats.decode_buf, M_SFXGE);
+
+ mtx_destroy(&port->lock);
+
+ port->sc = NULL;
+}
+
+int
+sfxge_port_init(struct sfxge_softc *sc)
+{
+ struct sfxge_port *port;
+ struct sysctl_ctx_list *sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+ efsys_mem_t *mac_stats_buf, *phy_stats_buf;
+ int rc;
+
+ port = &sc->port;
+ mac_stats_buf = &port->mac_stats.dma_buf;
+ phy_stats_buf = &port->phy_stats.dma_buf;
+
+ KASSERT(port->init_state == SFXGE_PORT_UNINITIALIZED,
+ ("Port already initialized"));
+
+ port->sc = sc;
+
+ mtx_init(&port->lock, "sfxge_port", NULL, MTX_DEF);
+
+ port->phy_stats.decode_buf = malloc(EFX_PHY_NSTATS * sizeof(uint32_t),
+ M_SFXGE, M_WAITOK | M_ZERO);
+ if ((rc = sfxge_dma_alloc(sc, EFX_PHY_STATS_SIZE, phy_stats_buf)) != 0)
+ goto fail;
+ bzero(phy_stats_buf->esm_base, phy_stats_buf->esm_size);
+ sfxge_phy_stat_init(sc);
+
+ sysctl_ctx = device_get_sysctl_ctx(sc->dev);
+ sysctl_tree = device_get_sysctl_tree(sc->dev);
+
+#ifndef SFXGE_HAVE_PAUSE_MEDIAOPTS
+ /* If flow control cannot be configured or reported through
+ * ifmedia, provide sysctls for it. */
+ port->wanted_fc = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "wanted_fc", CTLTYPE_UINT|CTLFLAG_RW, sc, 0,
+ sfxge_port_wanted_fc_handler, "IU", "wanted flow control mode");
+ SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "link_fc", CTLTYPE_UINT|CTLFLAG_RD, sc, 0,
+ sfxge_port_link_fc_handler, "IU", "link flow control mode");
+#endif
+
+ port->mac_stats.decode_buf = malloc(EFX_MAC_NSTATS * sizeof(uint64_t),
+ M_SFXGE, M_WAITOK | M_ZERO);
+ if ((rc = sfxge_dma_alloc(sc, EFX_MAC_STATS_SIZE, mac_stats_buf)) != 0)
+ goto fail2;
+ bzero(mac_stats_buf->esm_base, mac_stats_buf->esm_size);
+ sfxge_mac_stat_init(sc);
+
+ port->init_state = SFXGE_PORT_INITIALIZED;
+
+ return (0);
+
+fail2:
+ free(port->mac_stats.decode_buf, M_SFXGE);
+ sfxge_dma_free(phy_stats_buf);
+fail:
+ free(port->phy_stats.decode_buf, M_SFXGE);
+ (void)mtx_destroy(&port->lock);
+ port->sc = NULL;
+ return rc;
+}
+
+static int sfxge_link_mode[EFX_PHY_MEDIA_NTYPES][EFX_LINK_NMODES] = {
+ [EFX_PHY_MEDIA_CX4] = {
+ [EFX_LINK_10000FDX] = IFM_ETHER | IFM_FDX | IFM_10G_CX4,
+ },
+ [EFX_PHY_MEDIA_KX4] = {
+ [EFX_LINK_10000FDX] = IFM_ETHER | IFM_FDX | IFM_10G_KX4,
+ },
+ [EFX_PHY_MEDIA_XFP] = {
+ /* Don't know the module type, but assume SR for now. */
+ [EFX_LINK_10000FDX] = IFM_ETHER | IFM_FDX | IFM_10G_SR,
+ },
+ [EFX_PHY_MEDIA_SFP_PLUS] = {
+ /* Don't know the module type, but assume SX/SR for now. */
+ [EFX_LINK_1000FDX] = IFM_ETHER | IFM_FDX | IFM_1000_SX,
+ [EFX_LINK_10000FDX] = IFM_ETHER | IFM_FDX | IFM_10G_SR,
+ },
+ [EFX_PHY_MEDIA_BASE_T] = {
+ [EFX_LINK_10HDX] = IFM_ETHER | IFM_HDX | IFM_10_T,
+ [EFX_LINK_10FDX] = IFM_ETHER | IFM_FDX | IFM_10_T,
+ [EFX_LINK_100HDX] = IFM_ETHER | IFM_HDX | IFM_100_TX,
+ [EFX_LINK_100FDX] = IFM_ETHER | IFM_FDX | IFM_100_TX,
+ [EFX_LINK_1000HDX] = IFM_ETHER | IFM_HDX | IFM_1000_T,
+ [EFX_LINK_1000FDX] = IFM_ETHER | IFM_FDX | IFM_1000_T,
+ [EFX_LINK_10000FDX] = IFM_ETHER | IFM_FDX | IFM_10G_T,
+ },
+};
+
+static void
+sfxge_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct sfxge_softc *sc;
+ efx_phy_media_type_t medium_type;
+ efx_link_mode_t mode;
+
+ sc = ifp->if_softc;
+ sx_xlock(&sc->softc_lock);
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ if (SFXGE_RUNNING(sc) && SFXGE_LINK_UP(sc)) {
+ ifmr->ifm_status |= IFM_ACTIVE;
+
+ efx_phy_media_type_get(sc->enp, &medium_type);
+ mode = sc->port.link_mode;
+ ifmr->ifm_active |= sfxge_link_mode[medium_type][mode];
+ ifmr->ifm_active |= sfxge_port_link_fc_ifm(sc);
+ }
+
+ sx_xunlock(&sc->softc_lock);
+}
+
+static int
+sfxge_media_change(struct ifnet *ifp)
+{
+ struct sfxge_softc *sc;
+ struct ifmedia_entry *ifm;
+ int rc;
+
+ sc = ifp->if_softc;
+ ifm = sc->media.ifm_cur;
+
+ sx_xlock(&sc->softc_lock);
+
+ if (!SFXGE_RUNNING(sc)) {
+ rc = 0;
+ goto out;
+ }
+
+ rc = efx_mac_fcntl_set(sc->enp, sfxge_port_wanted_fc(sc), B_TRUE);
+ if (rc != 0)
+ goto out;
+
+ rc = efx_phy_adv_cap_set(sc->enp, ifm->ifm_data);
+out:
+ sx_xunlock(&sc->softc_lock);
+
+ return rc;
+}
+
+int sfxge_port_ifmedia_init(struct sfxge_softc *sc)
+{
+ efx_phy_media_type_t medium_type;
+ uint32_t cap_mask, mode_cap_mask;
+ efx_link_mode_t mode;
+ int mode_ifm, best_mode_ifm = 0;
+ int rc;
+
+ /* We need port state to initialise the ifmedia list. */
+ if ((rc = efx_nic_init(sc->enp)) != 0)
+ goto out;
+ if ((rc = efx_port_init(sc->enp)) != 0)
+ goto out2;
+
+ /*
+ * Register ifconfig callbacks for querying and setting the
+ * link mode and link status.
+ */
+ ifmedia_init(&sc->media, IFM_IMASK, sfxge_media_change,
+ sfxge_media_status);
+
+ /*
+ * Map firmware medium type and capabilities to ifmedia types.
+ * ifmedia does not distinguish between forcing the link mode
+ * and disabling auto-negotiation. 1000BASE-T and 10GBASE-T
+ * require AN even if only one link mode is enabled, and for
+ * 100BASE-TX it is useful even if the link mode is forced.
+ * Therefore we never disable auto-negotiation.
+ *
+ * Also enable and advertise flow control by default.
+ */
+
+ efx_phy_media_type_get(sc->enp, &medium_type);
+ efx_phy_adv_cap_get(sc->enp, EFX_PHY_CAP_PERM, &cap_mask);
+
+ EFX_STATIC_ASSERT(EFX_LINK_10HDX == EFX_PHY_CAP_10HDX + 1);
+ EFX_STATIC_ASSERT(EFX_LINK_10FDX == EFX_PHY_CAP_10FDX + 1);
+ EFX_STATIC_ASSERT(EFX_LINK_100HDX == EFX_PHY_CAP_100HDX + 1);
+ EFX_STATIC_ASSERT(EFX_LINK_100FDX == EFX_PHY_CAP_100FDX + 1);
+ EFX_STATIC_ASSERT(EFX_LINK_1000HDX == EFX_PHY_CAP_1000HDX + 1);
+ EFX_STATIC_ASSERT(EFX_LINK_1000FDX == EFX_PHY_CAP_1000FDX + 1);
+ EFX_STATIC_ASSERT(EFX_LINK_10000FDX == EFX_PHY_CAP_10000FDX + 1);
+
+ for (mode = EFX_LINK_10HDX; mode <= EFX_LINK_10000FDX; mode++) {
+ mode_cap_mask = 1 << (mode - 1);
+ mode_ifm = sfxge_link_mode[medium_type][mode];
+
+ if ((cap_mask & mode_cap_mask) && mode_ifm) {
+ mode_cap_mask |= cap_mask & (1 << EFX_PHY_CAP_AN);
+
+#ifdef SFXGE_HAVE_PAUSE_MEDIAOPTS
+ /* No flow-control */
+ ifmedia_add(&sc->media, mode_ifm, mode_cap_mask, NULL);
+
+ /* Respond-only. If using AN, we implicitly
+ * offer symmetric as well, but that doesn't
+ * mean we *have* to generate pause frames.
+ */
+ mode_cap_mask |= cap_mask & ((1 << EFX_PHY_CAP_PAUSE) |
+ (1 << EFX_PHY_CAP_ASYM));
+ mode_ifm |= IFM_ETH_RXPAUSE;
+ ifmedia_add(&sc->media, mode_ifm, mode_cap_mask, NULL);
+
+ /* Symmetric */
+ mode_cap_mask &= ~(1 << EFX_PHY_CAP_ASYM);
+ mode_ifm |= IFM_ETH_TXPAUSE;
+#else /* !SFXGE_HAVE_PAUSE_MEDIAOPTS */
+ mode_cap_mask |= cap_mask & (1 << EFX_PHY_CAP_PAUSE);
+#endif
+ ifmedia_add(&sc->media, mode_ifm, mode_cap_mask, NULL);
+
+ /* Link modes are numbered in order of speed,
+ * so assume the last one available is the best.
+ */
+ best_mode_ifm = mode_ifm;
+ }
+ }
+
+ if (cap_mask & (1 << EFX_PHY_CAP_AN)) {
+ /* Add autoselect mode. */
+ mode_ifm = IFM_ETHER | IFM_AUTO;
+ ifmedia_add(&sc->media, mode_ifm,
+ cap_mask & ~(1 << EFX_PHY_CAP_ASYM), NULL);
+ best_mode_ifm = mode_ifm;
+ }
+
+ if (best_mode_ifm)
+ ifmedia_set(&sc->media, best_mode_ifm);
+
+ /* Now discard port state until interface is started. */
+ efx_port_fini(sc->enp);
+out2:
+ efx_nic_fini(sc->enp);
+out:
+ return rc;
+}
diff --git a/sys/dev/sfxge/sfxge_rx.c b/sys/dev/sfxge/sfxge_rx.c
new file mode 100644
index 0000000..7dd5160
--- /dev/null
+++ b/sys/dev/sfxge/sfxge_rx.c
@@ -0,0 +1,1233 @@
+/*-
+ * Copyright (c) 2010-2011 Solarflare Communications, Inc.
+ * All rights reserved.
+ *
+ * This software was developed in part by Philip Paeps under contract for
+ * Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/mbuf.h>
+#include <sys/smp.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+#include <sys/limits.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+
+#include <machine/in_cksum.h>
+
+#include "common/efx.h"
+
+
+#include "sfxge.h"
+#include "sfxge_rx.h"
+
+#define RX_REFILL_THRESHOLD (EFX_RXQ_LIMIT(SFXGE_NDESCS) * 9 / 10)
+#define RX_REFILL_THRESHOLD_2 (RX_REFILL_THRESHOLD / 2)
+
+/* Size of the LRO hash table. Must be a power of 2. A larger table
+ * means we can accelerate a larger number of streams.
+ */
+static unsigned lro_table_size = 128;
+
+/* Maximum length of a hash chain. If chains get too long then the lookup
+ * time increases and may exceed the benefit of LRO.
+ */
+static unsigned lro_chain_max = 20;
+
+/* Maximum time (in ticks) that a connection can be idle before it's LRO
+ * state is discarded.
+ */
+static unsigned lro_idle_ticks; /* initialised in sfxge_rx_init() */
+
+/* Number of packets with payload that must arrive in-order before a
+ * connection is eligible for LRO. The idea is we should avoid coalescing
+ * segments when the sender is in slow-start because reducing the ACK rate
+ * can damage performance.
+ */
+static int lro_slow_start_packets = 2000;
+
+/* Number of packets with payload that must arrive in-order following loss
+ * before a connection is eligible for LRO. The idea is we should avoid
+ * coalescing segments when the sender is recovering from loss, because
+ * reducing the ACK rate can damage performance.
+ */
+static int lro_loss_packets = 20;
+
+/* Flags for sfxge_lro_conn::l2_id; must not collide with EVL_VLID_MASK */
+#define SFXGE_LRO_L2_ID_VLAN 0x4000
+#define SFXGE_LRO_L2_ID_IPV6 0x8000
+#define SFXGE_LRO_CONN_IS_VLAN_ENCAP(c) ((c)->l2_id & SFXGE_LRO_L2_ID_VLAN)
+#define SFXGE_LRO_CONN_IS_TCPIPV4(c) (!((c)->l2_id & SFXGE_LRO_L2_ID_IPV6))
+
+/* Compare IPv6 addresses, avoiding conditional branches */
+static __inline unsigned long ipv6_addr_cmp(const struct in6_addr *left,
+ const struct in6_addr *right)
+{
+#if LONG_BIT == 64
+ const uint64_t *left64 = (const uint64_t *)left;
+ const uint64_t *right64 = (const uint64_t *)right;
+ return (left64[0] - right64[0]) | (left64[1] - right64[1]);
+#else
+ return (left->s6_addr32[0] - right->s6_addr32[0]) |
+ (left->s6_addr32[1] - right->s6_addr32[1]) |
+ (left->s6_addr32[2] - right->s6_addr32[2]) |
+ (left->s6_addr32[3] - right->s6_addr32[3]);
+#endif
+}
+
+void
+sfxge_rx_qflush_done(struct sfxge_rxq *rxq)
+{
+
+ rxq->flush_state = SFXGE_FLUSH_DONE;
+}
+
+void
+sfxge_rx_qflush_failed(struct sfxge_rxq *rxq)
+{
+
+ rxq->flush_state = SFXGE_FLUSH_FAILED;
+}
+
+static uint8_t toep_key[] = {
+ 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+ 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
+ 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
+ 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
+ 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
+};
+
+static void
+sfxge_rx_post_refill(void *arg)
+{
+ struct sfxge_rxq *rxq = arg;
+ struct sfxge_softc *sc;
+ unsigned int index;
+ struct sfxge_evq *evq;
+ uint16_t magic;
+
+ sc = rxq->sc;
+ index = rxq->index;
+ evq = sc->evq[index];
+
+ magic = SFXGE_MAGIC_RX_QREFILL | index;
+
+ /* This is guaranteed due to the start/stop order of rx and ev */
+ KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
+ ("evq not started"));
+ KASSERT(rxq->init_state == SFXGE_RXQ_STARTED,
+ ("rxq not started"));
+ efx_ev_qpost(evq->common, magic);
+}
+
+static void
+sfxge_rx_schedule_refill(struct sfxge_rxq *rxq, boolean_t retrying)
+{
+ /* Initially retry after 100 ms, but back off in case of
+ * repeated failures as we probably have to wait for the
+ * administrator to raise the pool limit. */
+ if (retrying)
+ rxq->refill_delay = min(rxq->refill_delay * 2, 10 * hz);
+ else
+ rxq->refill_delay = hz / 10;
+
+ callout_reset_curcpu(&rxq->refill_callout, rxq->refill_delay,
+ sfxge_rx_post_refill, rxq);
+}
+
+static inline struct mbuf *sfxge_rx_alloc_mbuf(struct sfxge_softc *sc)
+{
+ struct mb_args args;
+ struct mbuf *m;
+
+ /* Allocate mbuf structure */
+ args.flags = M_PKTHDR;
+ args.type = MT_DATA;
+ m = (struct mbuf *)uma_zalloc_arg(zone_mbuf, &args, M_DONTWAIT);
+
+ /* Allocate (and attach) packet buffer */
+ if (m && !uma_zalloc_arg(sc->rx_buffer_zone, m, M_DONTWAIT)) {
+ uma_zfree(zone_mbuf, m);
+ m = NULL;
+ }
+
+ return m;
+}
+
+#define SFXGE_REFILL_BATCH 64
+
+static void
+sfxge_rx_qfill(struct sfxge_rxq *rxq, unsigned int target, boolean_t retrying)
+{
+ struct sfxge_softc *sc;
+ unsigned int index;
+ struct sfxge_evq *evq;
+ unsigned int batch;
+ unsigned int rxfill;
+ unsigned int mblksize;
+ int ntodo;
+ efsys_dma_addr_t addr[SFXGE_REFILL_BATCH];
+
+ sc = rxq->sc;
+ index = rxq->index;
+ evq = sc->evq[index];
+
+ prefetch_read_many(sc->enp);
+ prefetch_read_many(rxq->common);
+
+ mtx_assert(&evq->lock, MA_OWNED);
+
+ if (rxq->init_state != SFXGE_RXQ_STARTED)
+ return;
+
+ rxfill = rxq->added - rxq->completed;
+ KASSERT(rxfill <= EFX_RXQ_LIMIT(SFXGE_NDESCS),
+ ("rxfill > EFX_RXQ_LIMIT(SFXGE_NDESCS)"));
+ ntodo = min(EFX_RXQ_LIMIT(SFXGE_NDESCS) - rxfill, target);
+ KASSERT(ntodo <= EFX_RXQ_LIMIT(SFXGE_NDESCS),
+ ("ntodo > EFX_RQX_LIMIT(SFXGE_NDESCS)"));
+
+ if (ntodo == 0)
+ return;
+
+ batch = 0;
+ mblksize = sc->rx_buffer_size;
+ while (ntodo-- > 0) {
+ unsigned int id;
+ struct sfxge_rx_sw_desc *rx_desc;
+ bus_dma_segment_t seg;
+ struct mbuf *m;
+
+ id = (rxq->added + batch) & (SFXGE_NDESCS - 1);
+ rx_desc = &rxq->queue[id];
+ KASSERT(rx_desc->mbuf == NULL, ("rx_desc->mbuf != NULL"));
+
+ rx_desc->flags = EFX_DISCARD;
+ m = rx_desc->mbuf = sfxge_rx_alloc_mbuf(sc);
+ if (m == NULL)
+ break;
+ sfxge_map_mbuf_fast(rxq->mem.esm_tag, rxq->mem.esm_map, m, &seg);
+ addr[batch++] = seg.ds_addr;
+
+ if (batch == SFXGE_REFILL_BATCH) {
+ efx_rx_qpost(rxq->common, addr, mblksize, batch,
+ rxq->completed, rxq->added);
+ rxq->added += batch;
+ batch = 0;
+ }
+ }
+
+ if (ntodo != 0)
+ sfxge_rx_schedule_refill(rxq, retrying);
+
+ if (batch != 0) {
+ efx_rx_qpost(rxq->common, addr, mblksize, batch,
+ rxq->completed, rxq->added);
+ rxq->added += batch;
+ }
+
+ /* Make the descriptors visible to the hardware */
+ bus_dmamap_sync(rxq->mem.esm_tag, rxq->mem.esm_map,
+ BUS_DMASYNC_PREWRITE);
+
+ efx_rx_qpush(rxq->common, rxq->added);
+}
+
+void
+sfxge_rx_qrefill(struct sfxge_rxq *rxq)
+{
+
+ if (rxq->init_state != SFXGE_RXQ_STARTED)
+ return;
+
+ /* Make sure the queue is full */
+ sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(SFXGE_NDESCS), B_TRUE);
+}
+
+static void __sfxge_rx_deliver(struct sfxge_softc *sc, struct mbuf *m)
+{
+ struct ifnet *ifp = sc->ifnet;
+
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.header = m->m_data;
+ m->m_pkthdr.csum_data = 0xffff;
+ ifp->if_input(ifp, m);
+}
+
+static void
+sfxge_rx_deliver(struct sfxge_softc *sc, struct sfxge_rx_sw_desc *rx_desc)
+{
+ struct mbuf *m = rx_desc->mbuf;
+ int csum_flags;
+
+ /* Convert checksum flags */
+ csum_flags = (rx_desc->flags & EFX_CKSUM_IPV4) ?
+ (CSUM_IP_CHECKED | CSUM_IP_VALID) : 0;
+ if (rx_desc->flags & EFX_CKSUM_TCPUDP)
+ csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+
+#ifdef SFXGE_HAVE_MQ
+ /* The hash covers a 4-tuple for TCP only */
+ if (rx_desc->flags & EFX_PKT_TCP) {
+ m->m_pkthdr.flowid = EFX_RX_HASH_VALUE(EFX_RX_HASHALG_TOEPLITZ,
+ mtod(m, uint8_t *));
+ m->m_flags |= M_FLOWID;
+ }
+#endif
+ m->m_data += sc->rx_prefix_size;
+ m->m_len = rx_desc->size - sc->rx_prefix_size;
+ m->m_pkthdr.len = m->m_len;
+ m->m_pkthdr.csum_flags = csum_flags;
+ __sfxge_rx_deliver(sc, rx_desc->mbuf);
+
+ rx_desc->flags = EFX_DISCARD;
+ rx_desc->mbuf = NULL;
+}
+
+static void
+sfxge_lro_deliver(struct sfxge_lro_state *st, struct sfxge_lro_conn *c)
+{
+ struct sfxge_softc *sc = st->sc;
+ struct mbuf *m = c->mbuf;
+ struct tcphdr *c_th;
+ int csum_flags;
+
+ KASSERT(m, ("no mbuf to deliver"));
+
+ ++st->n_bursts;
+
+ /* Finish off packet munging and recalculate IP header checksum. */
+ if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) {
+ struct ip *iph = c->nh;
+ iph->ip_len = htons(iph->ip_len);
+ iph->ip_sum = 0;
+ iph->ip_sum = in_cksum_hdr(iph);
+ c_th = (struct tcphdr *)(iph + 1);
+ csum_flags = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR |
+ CSUM_IP_CHECKED | CSUM_IP_VALID);
+ } else {
+ struct ip6_hdr *iph = c->nh;
+ iph->ip6_plen = htons(iph->ip6_plen);
+ c_th = (struct tcphdr *)(iph + 1);
+ csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ }
+
+ c_th->th_win = c->th_last->th_win;
+ c_th->th_ack = c->th_last->th_ack;
+ if (c_th->th_off == c->th_last->th_off) {
+ /* Copy TCP options (take care to avoid going negative). */
+ int optlen = ((c_th->th_off - 5) & 0xf) << 2u;
+ memcpy(c_th + 1, c->th_last + 1, optlen);
+ }
+
+#ifdef SFXGE_HAVE_MQ
+ m->m_pkthdr.flowid = c->conn_hash;
+ m->m_flags |= M_FLOWID;
+#endif
+ m->m_pkthdr.csum_flags = csum_flags;
+ __sfxge_rx_deliver(sc, m);
+
+ c->mbuf = NULL;
+ c->delivered = 1;
+}
+
+/* Drop the given connection, and add it to the free list. */
+static void sfxge_lro_drop(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c)
+{
+ unsigned bucket;
+
+ KASSERT(!c->mbuf, ("found orphaned mbuf"));
+
+ if (c->next_buf.mbuf) {
+ sfxge_rx_deliver(rxq->sc, &c->next_buf);
+ LIST_REMOVE(c, active_link);
+ }
+
+ bucket = c->conn_hash & rxq->lro.conns_mask;
+ KASSERT(rxq->lro.conns_n[bucket] > 0, ("LRO: bucket fill level wrong"));
+ --rxq->lro.conns_n[bucket];
+ TAILQ_REMOVE(&rxq->lro.conns[bucket], c, link);
+ TAILQ_INSERT_HEAD(&rxq->lro.free_conns, c, link);
+}
+
+/* Stop tracking connections that have gone idle in order to keep hash
+ * chains short.
+ */
+static void sfxge_lro_purge_idle(struct sfxge_rxq *rxq, unsigned now)
+{
+ struct sfxge_lro_conn *c;
+ unsigned i;
+
+ KASSERT(LIST_EMPTY(&rxq->lro.active_conns),
+ ("found active connections"));
+
+ rxq->lro.last_purge_ticks = now;
+ for (i = 0; i <= rxq->lro.conns_mask; ++i) {
+ if (TAILQ_EMPTY(&rxq->lro.conns[i]))
+ continue;
+
+ c = TAILQ_LAST(&rxq->lro.conns[i], sfxge_lro_tailq);
+ if (now - c->last_pkt_ticks > lro_idle_ticks) {
+ ++rxq->lro.n_drop_idle;
+ sfxge_lro_drop(rxq, c);
+ }
+ }
+}
+
+static void
+sfxge_lro_merge(struct sfxge_lro_state *st, struct sfxge_lro_conn *c,
+ struct mbuf *mbuf, struct tcphdr *th)
+{
+ struct tcphdr *c_th;
+
+ /* Tack the new mbuf onto the chain. */
+ KASSERT(!mbuf->m_next, ("mbuf already chained"));
+ c->mbuf_tail->m_next = mbuf;
+ c->mbuf_tail = mbuf;
+
+ /* Increase length appropriately */
+ c->mbuf->m_pkthdr.len += mbuf->m_len;
+
+ /* Update the connection state flags */
+ if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) {
+ struct ip *iph = c->nh;
+ iph->ip_len += mbuf->m_len;
+ c_th = (struct tcphdr *)(iph + 1);
+ } else {
+ struct ip6_hdr *iph = c->nh;
+ iph->ip6_plen += mbuf->m_len;
+ c_th = (struct tcphdr *)(iph + 1);
+ }
+ c_th->th_flags |= (th->th_flags & TH_PUSH);
+ c->th_last = th;
+ ++st->n_merges;
+
+ /* Pass packet up now if another segment could overflow the IP
+ * length.
+ */
+ if (c->mbuf->m_pkthdr.len > 65536 - 9200)
+ sfxge_lro_deliver(st, c);
+}
+
+static void
+sfxge_lro_start(struct sfxge_lro_state *st, struct sfxge_lro_conn *c,
+ struct mbuf *mbuf, void *nh, struct tcphdr *th)
+{
+ /* Start the chain */
+ c->mbuf = mbuf;
+ c->mbuf_tail = c->mbuf;
+ c->nh = nh;
+ c->th_last = th;
+
+ mbuf->m_pkthdr.len = mbuf->m_len;
+
+ /* Mangle header fields for later processing */
+ if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) {
+ struct ip *iph = nh;
+ iph->ip_len = ntohs(iph->ip_len);
+ } else {
+ struct ip6_hdr *iph = nh;
+ iph->ip6_plen = ntohs(iph->ip6_plen);
+ }
+}
+
+/* Try to merge or otherwise hold or deliver (as appropriate) the
+ * packet buffered for this connection (c->next_buf). Return a flag
+ * indicating whether the connection is still active for LRO purposes.
+ */
+static int
+sfxge_lro_try_merge(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c)
+{
+ struct sfxge_rx_sw_desc *rx_buf = &c->next_buf;
+ char *eh = c->next_eh;
+ int data_length, hdr_length, dont_merge;
+ unsigned th_seq, pkt_length;
+ struct tcphdr *th;
+ unsigned now;
+
+ if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) {
+ struct ip *iph = c->next_nh;
+ th = (struct tcphdr *)(iph + 1);
+ pkt_length = ntohs(iph->ip_len) + (char *) iph - eh;
+ } else {
+ struct ip6_hdr *iph = c->next_nh;
+ th = (struct tcphdr *)(iph + 1);
+ pkt_length = ntohs(iph->ip6_plen) + (char *) th - eh;
+ }
+
+ hdr_length = (char *) th + th->th_off * 4 - eh;
+ data_length = (min(pkt_length, rx_buf->size - rxq->sc->rx_prefix_size) -
+ hdr_length);
+ th_seq = ntohl(th->th_seq);
+ dont_merge = ((data_length <= 0)
+ | (th->th_flags & (TH_URG | TH_SYN | TH_RST | TH_FIN)));
+
+ /* Check for options other than aligned timestamp. */
+ if (th->th_off != 5) {
+ const uint32_t *opt_ptr = (const uint32_t *) (th + 1);
+ if (th->th_off == 8 &&
+ opt_ptr[0] == ntohl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_TIMESTAMP << 8) |
+ TCPOLEN_TIMESTAMP)) {
+ /* timestamp option -- okay */
+ } else {
+ dont_merge = 1;
+ }
+ }
+
+ if (__predict_false(th_seq != c->next_seq)) {
+ /* Out-of-order, so start counting again. */
+ if (c->mbuf)
+ sfxge_lro_deliver(&rxq->lro, c);
+ c->n_in_order_pkts -= lro_loss_packets;
+ c->next_seq = th_seq + data_length;
+ ++rxq->lro.n_misorder;
+ goto deliver_buf_out;
+ }
+ c->next_seq = th_seq + data_length;
+
+ now = ticks;
+ if (now - c->last_pkt_ticks > lro_idle_ticks) {
+ ++rxq->lro.n_drop_idle;
+ if (c->mbuf)
+ sfxge_lro_deliver(&rxq->lro, c);
+ sfxge_lro_drop(rxq, c);
+ return 0;
+ }
+ c->last_pkt_ticks = ticks;
+
+ if (c->n_in_order_pkts < lro_slow_start_packets) {
+ /* May be in slow-start, so don't merge. */
+ ++rxq->lro.n_slow_start;
+ ++c->n_in_order_pkts;
+ goto deliver_buf_out;
+ }
+
+ if (__predict_false(dont_merge)) {
+ if (c->mbuf)
+ sfxge_lro_deliver(&rxq->lro, c);
+ if (th->th_flags & (TH_FIN | TH_RST)) {
+ ++rxq->lro.n_drop_closed;
+ sfxge_lro_drop(rxq, c);
+ return 0;
+ }
+ goto deliver_buf_out;
+ }
+
+ rx_buf->mbuf->m_data += rxq->sc->rx_prefix_size;
+
+ if (__predict_true(c->mbuf != NULL)) {
+ /* Remove headers and any padding */
+ rx_buf->mbuf->m_data += hdr_length;
+ rx_buf->mbuf->m_len = data_length;
+
+ sfxge_lro_merge(&rxq->lro, c, rx_buf->mbuf, th);
+ } else {
+ /* Remove any padding */
+ rx_buf->mbuf->m_len = pkt_length;
+
+ sfxge_lro_start(&rxq->lro, c, rx_buf->mbuf, c->next_nh, th);
+ }
+
+ rx_buf->mbuf = NULL;
+ return 1;
+
+ deliver_buf_out:
+ sfxge_rx_deliver(rxq->sc, rx_buf);
+ return 1;
+}
+
+static void sfxge_lro_new_conn(struct sfxge_lro_state *st, uint32_t conn_hash,
+ uint16_t l2_id, void *nh, struct tcphdr *th)
+{
+ unsigned bucket = conn_hash & st->conns_mask;
+ struct sfxge_lro_conn *c;
+
+ if (st->conns_n[bucket] >= lro_chain_max) {
+ ++st->n_too_many;
+ return;
+ }
+
+ if (!TAILQ_EMPTY(&st->free_conns)) {
+ c = TAILQ_FIRST(&st->free_conns);
+ TAILQ_REMOVE(&st->free_conns, c, link);
+ } else {
+ c = malloc(sizeof(*c), M_SFXGE, M_DONTWAIT);
+ if (c == NULL)
+ return;
+ c->mbuf = NULL;
+ c->next_buf.mbuf = NULL;
+ }
+
+ /* Create the connection tracking data */
+ ++st->conns_n[bucket];
+ TAILQ_INSERT_HEAD(&st->conns[bucket], c, link);
+ c->l2_id = l2_id;
+ c->conn_hash = conn_hash;
+ c->source = th->th_sport;
+ c->dest = th->th_dport;
+ c->n_in_order_pkts = 0;
+ c->last_pkt_ticks = *(volatile int *)&ticks;
+ c->delivered = 0;
+ ++st->n_new_stream;
+ /* NB. We don't initialise c->next_seq, and it doesn't matter what
+ * value it has. Most likely the next packet received for this
+ * connection will not match -- no harm done.
+ */
+}
+
+/* Process mbuf and decide whether to dispatch it to the stack now or
+ * later.
+ */
+static void
+sfxge_lro(struct sfxge_rxq *rxq, struct sfxge_rx_sw_desc *rx_buf)
+{
+ struct sfxge_softc *sc = rxq->sc;
+ struct mbuf *m = rx_buf->mbuf;
+ struct ether_header *eh;
+ struct sfxge_lro_conn *c;
+ uint16_t l2_id;
+ uint16_t l3_proto;
+ void *nh;
+ struct tcphdr *th;
+ uint32_t conn_hash;
+ unsigned bucket;
+
+ /* Get the hardware hash */
+ conn_hash = EFX_RX_HASH_VALUE(EFX_RX_HASHALG_TOEPLITZ,
+ mtod(m, uint8_t *));
+
+ eh = (struct ether_header *)(m->m_data + sc->rx_prefix_size);
+ if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
+ struct ether_vlan_header *veh = (struct ether_vlan_header *)eh;
+ l2_id = EVL_VLANOFTAG(ntohs(veh->evl_tag)) |
+ SFXGE_LRO_L2_ID_VLAN;
+ l3_proto = veh->evl_proto;
+ nh = veh + 1;
+ } else {
+ l2_id = 0;
+ l3_proto = eh->ether_type;
+ nh = eh + 1;
+ }
+
+ /* Check whether this is a suitable packet (unfragmented
+ * TCP/IPv4 or TCP/IPv6). If so, find the TCP header and
+ * length, and compute a hash if necessary. If not, return.
+ */
+ if (l3_proto == htons(ETHERTYPE_IP)) {
+ struct ip *iph = nh;
+ if ((iph->ip_p - IPPROTO_TCP) |
+ (iph->ip_hl - (sizeof(*iph) >> 2u)) |
+ (iph->ip_off & htons(IP_MF | IP_OFFMASK)))
+ goto deliver_now;
+ th = (struct tcphdr *)(iph + 1);
+ } else if (l3_proto == htons(ETHERTYPE_IPV6)) {
+ struct ip6_hdr *iph = nh;
+ if (iph->ip6_nxt != IPPROTO_TCP)
+ goto deliver_now;
+ l2_id |= SFXGE_LRO_L2_ID_IPV6;
+ th = (struct tcphdr *)(iph + 1);
+ } else {
+ goto deliver_now;
+ }
+
+ bucket = conn_hash & rxq->lro.conns_mask;
+
+ TAILQ_FOREACH(c, &rxq->lro.conns[bucket], link) {
+ if ((c->l2_id - l2_id) | (c->conn_hash - conn_hash))
+ continue;
+ if ((c->source - th->th_sport) | (c->dest - th->th_dport))
+ continue;
+ if (c->mbuf) {
+ if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) {
+ struct ip *c_iph, *iph = nh;
+ c_iph = c->nh;
+ if ((c_iph->ip_src.s_addr - iph->ip_src.s_addr) |
+ (c_iph->ip_dst.s_addr - iph->ip_dst.s_addr))
+ continue;
+ } else {
+ struct ip6_hdr *c_iph, *iph = nh;
+ c_iph = c->nh;
+ if (ipv6_addr_cmp(&c_iph->ip6_src, &iph->ip6_src) |
+ ipv6_addr_cmp(&c_iph->ip6_dst, &iph->ip6_dst))
+ continue;
+ }
+ }
+
+ /* Re-insert at head of list to reduce lookup time. */
+ TAILQ_REMOVE(&rxq->lro.conns[bucket], c, link);
+ TAILQ_INSERT_HEAD(&rxq->lro.conns[bucket], c, link);
+
+ if (c->next_buf.mbuf) {
+ if (!sfxge_lro_try_merge(rxq, c))
+ goto deliver_now;
+ } else {
+ LIST_INSERT_HEAD(&rxq->lro.active_conns, c,
+ active_link);
+ }
+ c->next_buf = *rx_buf;
+ c->next_eh = eh;
+ c->next_nh = nh;
+
+ rx_buf->mbuf = NULL;
+ rx_buf->flags = EFX_DISCARD;
+ return;
+ }
+
+ sfxge_lro_new_conn(&rxq->lro, conn_hash, l2_id, nh, th);
+ deliver_now:
+ sfxge_rx_deliver(sc, rx_buf);
+}
+
+static void sfxge_lro_end_of_burst(struct sfxge_rxq *rxq)
+{
+ struct sfxge_lro_state *st = &rxq->lro;
+ struct sfxge_lro_conn *c;
+ unsigned t;
+
+ while (!LIST_EMPTY(&st->active_conns)) {
+ c = LIST_FIRST(&st->active_conns);
+ if (!c->delivered && c->mbuf)
+ sfxge_lro_deliver(st, c);
+ if (sfxge_lro_try_merge(rxq, c)) {
+ if (c->mbuf)
+ sfxge_lro_deliver(st, c);
+ LIST_REMOVE(c, active_link);
+ }
+ c->delivered = 0;
+ }
+
+ t = *(volatile int *)&ticks;
+ if (__predict_false(t != st->last_purge_ticks))
+ sfxge_lro_purge_idle(rxq, t);
+}
+
+void
+sfxge_rx_qcomplete(struct sfxge_rxq *rxq, boolean_t eop)
+{
+ struct sfxge_softc *sc = rxq->sc;
+ int lro_enabled = sc->ifnet->if_capenable & IFCAP_LRO;
+ unsigned int index;
+ struct sfxge_evq *evq;
+ unsigned int completed;
+ unsigned int level;
+ struct mbuf *m;
+ struct sfxge_rx_sw_desc *prev = NULL;
+
+ index = rxq->index;
+ evq = sc->evq[index];
+
+ mtx_assert(&evq->lock, MA_OWNED);
+
+ completed = rxq->completed;
+ while (completed != rxq->pending) {
+ unsigned int id;
+ struct sfxge_rx_sw_desc *rx_desc;
+
+ id = completed++ & (SFXGE_NDESCS - 1);
+ rx_desc = &rxq->queue[id];
+ m = rx_desc->mbuf;
+
+ if (rxq->init_state != SFXGE_RXQ_STARTED)
+ goto discard;
+
+ if (rx_desc->flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
+ goto discard;
+
+ prefetch_read_many(mtod(m, caddr_t));
+
+ /* Check for loopback packets */
+ if (!(rx_desc->flags & EFX_PKT_IPV4) &&
+ !(rx_desc->flags & EFX_PKT_IPV6)) {
+ struct ether_header *etherhp;
+
+ /*LINTED*/
+ etherhp = mtod(m, struct ether_header *);
+
+ if (etherhp->ether_type ==
+ htons(SFXGE_ETHERTYPE_LOOPBACK)) {
+ EFSYS_PROBE(loopback);
+
+ rxq->loopback++;
+ goto discard;
+ }
+ }
+
+ /* Pass packet up the stack or into LRO (pipelined) */
+ if (prev != NULL) {
+ if (lro_enabled)
+ sfxge_lro(rxq, prev);
+ else
+ sfxge_rx_deliver(sc, prev);
+ }
+ prev = rx_desc;
+ continue;
+
+discard:
+ /* Return the packet to the pool */
+ m_free(m);
+ rx_desc->mbuf = NULL;
+ }
+ rxq->completed = completed;
+
+ level = rxq->added - rxq->completed;
+
+ /* Pass last packet up the stack or into LRO */
+ if (prev != NULL) {
+ if (lro_enabled)
+ sfxge_lro(rxq, prev);
+ else
+ sfxge_rx_deliver(sc, prev);
+ }
+
+ /*
+ * If there are any pending flows and this is the end of the
+ * poll then they must be completed.
+ */
+ if (eop)
+ sfxge_lro_end_of_burst(rxq);
+
+ /* Top up the queue if necessary */
+ if (level < RX_REFILL_THRESHOLD)
+ sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(SFXGE_NDESCS), B_FALSE);
+}
+
+static void
+sfxge_rx_qstop(struct sfxge_softc *sc, unsigned int index)
+{
+ struct sfxge_rxq *rxq;
+ struct sfxge_evq *evq;
+ unsigned int count;
+
+ rxq = sc->rxq[index];
+ evq = sc->evq[index];
+
+ mtx_lock(&evq->lock);
+
+ KASSERT(rxq->init_state == SFXGE_RXQ_STARTED,
+ ("rxq not started"));
+
+ rxq->init_state = SFXGE_RXQ_INITIALIZED;
+
+ callout_stop(&rxq->refill_callout);
+
+again:
+ rxq->flush_state = SFXGE_FLUSH_PENDING;
+
+ /* Flush the receive queue */
+ efx_rx_qflush(rxq->common);
+
+ mtx_unlock(&evq->lock);
+
+ count = 0;
+ do {
+ /* Spin for 100 ms */
+ DELAY(100000);
+
+ if (rxq->flush_state != SFXGE_FLUSH_PENDING)
+ break;
+
+ } while (++count < 20);
+
+ mtx_lock(&evq->lock);
+
+ if (rxq->flush_state == SFXGE_FLUSH_FAILED)
+ goto again;
+
+ rxq->flush_state = SFXGE_FLUSH_DONE;
+
+ rxq->pending = rxq->added;
+ sfxge_rx_qcomplete(rxq, B_TRUE);
+
+ KASSERT(rxq->completed == rxq->pending,
+ ("rxq->completed != rxq->pending"));
+
+ rxq->added = 0;
+ rxq->pending = 0;
+ rxq->completed = 0;
+ rxq->loopback = 0;
+
+ /* Destroy the common code receive queue. */
+ efx_rx_qdestroy(rxq->common);
+
+ efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id,
+ EFX_RXQ_NBUFS(SFXGE_NDESCS));
+
+ mtx_unlock(&evq->lock);
+}
+
+static int
+sfxge_rx_qstart(struct sfxge_softc *sc, unsigned int index)
+{
+ struct sfxge_rxq *rxq;
+ efsys_mem_t *esmp;
+ struct sfxge_evq *evq;
+ int rc;
+
+ rxq = sc->rxq[index];
+ esmp = &rxq->mem;
+ evq = sc->evq[index];
+
+ KASSERT(rxq->init_state == SFXGE_RXQ_INITIALIZED,
+ ("rxq->init_state != SFXGE_RXQ_INITIALIZED"));
+ KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
+ ("evq->init_state != SFXGE_EVQ_STARTED"));
+
+ /* Program the buffer table. */
+ if ((rc = efx_sram_buf_tbl_set(sc->enp, rxq->buf_base_id, esmp,
+ EFX_RXQ_NBUFS(SFXGE_NDESCS))) != 0)
+ return rc;
+
+ /* Create the common code receive queue. */
+ if ((rc = efx_rx_qcreate(sc->enp, index, index, EFX_RXQ_TYPE_DEFAULT,
+ esmp, SFXGE_NDESCS, rxq->buf_base_id, evq->common,
+ &rxq->common)) != 0)
+ goto fail;
+
+ mtx_lock(&evq->lock);
+
+ /* Enable the receive queue. */
+ efx_rx_qenable(rxq->common);
+
+ rxq->init_state = SFXGE_RXQ_STARTED;
+
+ /* Try to fill the queue from the pool. */
+ sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(SFXGE_NDESCS), B_FALSE);
+
+ mtx_unlock(&evq->lock);
+
+ return (0);
+
+fail:
+ efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id,
+ EFX_RXQ_NBUFS(SFXGE_NDESCS));
+ return rc;
+}
+
+void
+sfxge_rx_stop(struct sfxge_softc *sc)
+{
+ struct sfxge_intr *intr;
+ int index;
+
+ intr = &sc->intr;
+
+ /* Stop the receive queue(s) */
+ index = intr->n_alloc;
+ while (--index >= 0)
+ sfxge_rx_qstop(sc, index);
+
+ sc->rx_prefix_size = 0;
+ sc->rx_buffer_size = 0;
+
+ efx_rx_fini(sc->enp);
+}
+
+int
+sfxge_rx_start(struct sfxge_softc *sc)
+{
+ struct sfxge_intr *intr;
+ int index;
+ int rc;
+
+ intr = &sc->intr;
+
+ /* Initialize the common code receive module. */
+ if ((rc = efx_rx_init(sc->enp)) != 0)
+ return (rc);
+
+ /* Calculate the receive packet buffer size. */
+ sc->rx_prefix_size = EFX_RX_PREFIX_SIZE;
+ sc->rx_buffer_size = (EFX_MAC_PDU(sc->ifnet->if_mtu) +
+ sc->rx_prefix_size);
+
+ /* Select zone for packet buffers */
+ if (sc->rx_buffer_size <= MCLBYTES)
+ sc->rx_buffer_zone = zone_clust;
+ else if (sc->rx_buffer_size <= MJUMPAGESIZE)
+ sc->rx_buffer_zone = zone_jumbop;
+ else if (sc->rx_buffer_size <= MJUM9BYTES)
+ sc->rx_buffer_zone = zone_jumbo9;
+ else
+ sc->rx_buffer_zone = zone_jumbo16;
+
+ /*
+ * Set up the scale table. Enable all hash types and hash insertion.
+ */
+ for (index = 0; index < SFXGE_RX_SCALE_MAX; index++)
+ sc->rx_indir_table[index] = index % sc->intr.n_alloc;
+ if ((rc = efx_rx_scale_tbl_set(sc->enp, sc->rx_indir_table,
+ SFXGE_RX_SCALE_MAX)) != 0)
+ goto fail;
+ (void)efx_rx_scale_mode_set(sc->enp, EFX_RX_HASHALG_TOEPLITZ,
+ (1 << EFX_RX_HASH_IPV4) | (1 << EFX_RX_HASH_TCPIPV4) |
+ (1 << EFX_RX_HASH_IPV6) | (1 << EFX_RX_HASH_TCPIPV6), B_TRUE);
+
+ if ((rc = efx_rx_scale_toeplitz_ipv4_key_set(sc->enp, toep_key,
+ sizeof(toep_key))) != 0)
+ goto fail;
+
+ /* Start the receive queue(s). */
+ for (index = 0; index < intr->n_alloc; index++) {
+ if ((rc = sfxge_rx_qstart(sc, index)) != 0)
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ while (--index >= 0)
+ sfxge_rx_qstop(sc, index);
+
+fail:
+ efx_rx_fini(sc->enp);
+
+ return (rc);
+}
+
+static void sfxge_lro_init(struct sfxge_rxq *rxq)
+{
+ struct sfxge_lro_state *st = &rxq->lro;
+ unsigned i;
+
+ st->conns_mask = lro_table_size - 1;
+ KASSERT(!((st->conns_mask + 1) & st->conns_mask),
+ ("lro_table_size must be a power of 2"));
+ st->sc = rxq->sc;
+ st->conns = malloc((st->conns_mask + 1) * sizeof(st->conns[0]),
+ M_SFXGE, M_WAITOK);
+ st->conns_n = malloc((st->conns_mask + 1) * sizeof(st->conns_n[0]),
+ M_SFXGE, M_WAITOK);
+ for (i = 0; i <= st->conns_mask; ++i) {
+ TAILQ_INIT(&st->conns[i]);
+ st->conns_n[i] = 0;
+ }
+ LIST_INIT(&st->active_conns);
+ TAILQ_INIT(&st->free_conns);
+}
+
+static void sfxge_lro_fini(struct sfxge_rxq *rxq)
+{
+ struct sfxge_lro_state *st = &rxq->lro;
+ struct sfxge_lro_conn *c;
+ unsigned i;
+
+ /* Return cleanly if sfxge_lro_init() has not been called. */
+ if (st->conns == NULL)
+ return;
+
+ KASSERT(LIST_EMPTY(&st->active_conns), ("found active connections"));
+
+ for (i = 0; i <= st->conns_mask; ++i) {
+ while (!TAILQ_EMPTY(&st->conns[i])) {
+ c = TAILQ_LAST(&st->conns[i], sfxge_lro_tailq);
+ sfxge_lro_drop(rxq, c);
+ }
+ }
+
+ while (!TAILQ_EMPTY(&st->free_conns)) {
+ c = TAILQ_FIRST(&st->free_conns);
+ TAILQ_REMOVE(&st->free_conns, c, link);
+ KASSERT(!c->mbuf, ("found orphaned mbuf"));
+ free(c, M_SFXGE);
+ }
+
+ free(st->conns_n, M_SFXGE);
+ free(st->conns, M_SFXGE);
+ st->conns = NULL;
+}
+
+static void
+sfxge_rx_qfini(struct sfxge_softc *sc, unsigned int index)
+{
+ struct sfxge_rxq *rxq;
+
+ rxq = sc->rxq[index];
+
+ KASSERT(rxq->init_state == SFXGE_RXQ_INITIALIZED,
+ ("rxq->init_state != SFXGE_RXQ_INITIALIZED"));
+
+ /* Free the context array and the flow table. */
+ free(rxq->queue, M_SFXGE);
+ sfxge_lro_fini(rxq);
+
+ /* Release DMA memory. */
+ sfxge_dma_free(&rxq->mem);
+
+ sc->rxq[index] = NULL;
+
+ free(rxq, M_SFXGE);
+}
+
+static int
+sfxge_rx_qinit(struct sfxge_softc *sc, unsigned int index)
+{
+ struct sfxge_rxq *rxq;
+ struct sfxge_evq *evq;
+ efsys_mem_t *esmp;
+ int rc;
+
+ KASSERT(index < sc->intr.n_alloc, ("index >= %d", sc->intr.n_alloc));
+
+ rxq = malloc(sizeof(struct sfxge_rxq), M_SFXGE, M_ZERO | M_WAITOK);
+ rxq->sc = sc;
+ rxq->index = index;
+
+ sc->rxq[index] = rxq;
+ esmp = &rxq->mem;
+
+ evq = sc->evq[index];
+
+ /* Allocate and zero DMA space. */
+ if ((rc = sfxge_dma_alloc(sc, EFX_RXQ_SIZE(SFXGE_NDESCS), esmp)) != 0)
+ return (rc);
+ (void)memset(esmp->esm_base, 0, EFX_RXQ_SIZE(SFXGE_NDESCS));
+
+ /* Allocate buffer table entries. */
+ sfxge_sram_buf_tbl_alloc(sc, EFX_RXQ_NBUFS(SFXGE_NDESCS),
+ &rxq->buf_base_id);
+
+ /* Allocate the context array and the flow table. */
+ rxq->queue = malloc(sizeof(struct sfxge_rx_sw_desc) * SFXGE_NDESCS,
+ M_SFXGE, M_WAITOK | M_ZERO);
+ sfxge_lro_init(rxq);
+
+ callout_init(&rxq->refill_callout, B_TRUE);
+
+ rxq->init_state = SFXGE_RXQ_INITIALIZED;
+
+ return (0);
+}
+
+static const struct {
+ const char *name;
+ size_t offset;
+} sfxge_rx_stats[] = {
+#define SFXGE_RX_STAT(name, member) \
+ { #name, offsetof(struct sfxge_rxq, member) }
+ SFXGE_RX_STAT(lro_merges, lro.n_merges),
+ SFXGE_RX_STAT(lro_bursts, lro.n_bursts),
+ SFXGE_RX_STAT(lro_slow_start, lro.n_slow_start),
+ SFXGE_RX_STAT(lro_misorder, lro.n_misorder),
+ SFXGE_RX_STAT(lro_too_many, lro.n_too_many),
+ SFXGE_RX_STAT(lro_new_stream, lro.n_new_stream),
+ SFXGE_RX_STAT(lro_drop_idle, lro.n_drop_idle),
+ SFXGE_RX_STAT(lro_drop_closed, lro.n_drop_closed)
+};
+
+static int
+sfxge_rx_stat_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct sfxge_softc *sc = arg1;
+ unsigned int id = arg2;
+ unsigned int sum, index;
+
+ /* Sum across all RX queues */
+ sum = 0;
+ for (index = 0; index < sc->intr.n_alloc; index++)
+ sum += *(unsigned int *)((caddr_t)sc->rxq[index] +
+ sfxge_rx_stats[id].offset);
+
+ return SYSCTL_OUT(req, &sum, sizeof(sum));
+}
+
+static void
+sfxge_rx_stat_init(struct sfxge_softc *sc)
+{
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
+ struct sysctl_oid_list *stat_list;
+ unsigned int id;
+
+ stat_list = SYSCTL_CHILDREN(sc->stats_node);
+
+ for (id = 0;
+ id < sizeof(sfxge_rx_stats) / sizeof(sfxge_rx_stats[0]);
+ id++) {
+ SYSCTL_ADD_PROC(
+ ctx, stat_list,
+ OID_AUTO, sfxge_rx_stats[id].name,
+ CTLTYPE_UINT|CTLFLAG_RD,
+ sc, id, sfxge_rx_stat_handler, "IU",
+ "");
+ }
+}
+
+void
+sfxge_rx_fini(struct sfxge_softc *sc)
+{
+ struct sfxge_intr *intr;
+ int index;
+
+ intr = &sc->intr;
+
+ index = intr->n_alloc;
+ while (--index >= 0)
+ sfxge_rx_qfini(sc, index);
+}
+
+int
+sfxge_rx_init(struct sfxge_softc *sc)
+{
+ struct sfxge_intr *intr;
+ int index;
+ int rc;
+
+ if (lro_idle_ticks == 0)
+ lro_idle_ticks = hz / 10 + 1; /* 100 ms */
+
+ intr = &sc->intr;
+
+ KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
+ ("intr->state != SFXGE_INTR_INITIALIZED"));
+
+ /* Initialize the receive queue(s) - one per interrupt. */
+ for (index = 0; index < intr->n_alloc; index++) {
+ if ((rc = sfxge_rx_qinit(sc, index)) != 0)
+ goto fail;
+ }
+
+ sfxge_rx_stat_init(sc);
+
+ return (0);
+
+fail:
+ /* Tear down the receive queue(s). */
+ while (--index >= 0)
+ sfxge_rx_qfini(sc, index);
+
+ return (rc);
+}
diff --git a/sys/dev/sfxge/sfxge_rx.h b/sys/dev/sfxge/sfxge_rx.h
new file mode 100644
index 0000000..5a80fdb
--- /dev/null
+++ b/sys/dev/sfxge/sfxge_rx.h
@@ -0,0 +1,189 @@
+/*-
+ * Copyright (c) 2010-2011 Solarflare Communications, Inc.
+ * All rights reserved.
+ *
+ * This software was developed in part by Philip Paeps under contract for
+ * Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SFXGE_RX_H
+#define _SFXGE_RX_H
+
+#define SFXGE_MAGIC_RESERVED 0x8000
+
+#define SFXGE_MAGIC_DMAQ_LABEL_WIDTH 6
+#define SFXGE_MAGIC_DMAQ_LABEL_MASK \
+ ((1 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH) - 1)
+
+#define SFXGE_MAGIC_RX_QFLUSH_DONE \
+ (SFXGE_MAGIC_RESERVED | (1 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
+
+#define SFXGE_MAGIC_RX_QFLUSH_FAILED \
+ (SFXGE_MAGIC_RESERVED | (2 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
+
+#define SFXGE_MAGIC_RX_QREFILL \
+ (SFXGE_MAGIC_RESERVED | (3 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
+
+#define SFXGE_MAGIC_TX_QFLUSH_DONE \
+ (SFXGE_MAGIC_RESERVED | (4 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
+
+#define SFXGE_RX_SCALE_MAX EFX_MAXRSS
+
+struct sfxge_rx_sw_desc {
+ struct mbuf *mbuf;
+ bus_dmamap_t map;
+ int flags;
+ int size;
+};
+
+/**
+ * struct sfxge_lro_conn - Connection state for software LRO
+ * @link: Link for hash table and free list.
+ * @active_link: Link for active_conns list
+ * @l2_id: Identifying information from layer 2
+ * @conn_hash: Hash of connection 4-tuple
+ * @nh: IP (v4 or v6) header of super-packet
+ * @source: Source TCP port number
+ * @dest: Destination TCP port number
+ * @n_in_order_pkts: Number of in-order packets with payload.
+ * @next_seq: Next in-order sequence number.
+ * @last_pkt_ticks: Time we last saw a packet on this connection.
+ * @mbuf: The mbuf we are currently holding.
+ * If %NULL, then all following fields are undefined.
+ * @mbuf_tail: The tail of the frag_list of mbufs we're holding.
+ * Only valid after at least one merge.
+ * @th_last: The TCP header of the last packet merged.
+ * @next_buf: The next RX buffer to process.
+ * @next_eh: Ethernet header of the next buffer.
+ * @next_nh: IP header of the next buffer.
+ * @delivered: True if we've delivered a payload packet up this interrupt.
+ */
+struct sfxge_lro_conn {
+ TAILQ_ENTRY(sfxge_lro_conn) link;
+ LIST_ENTRY(sfxge_lro_conn) active_link;
+ uint16_t l2_id;
+ uint32_t conn_hash;
+ void *nh;
+ uint16_t source, dest;
+ int n_in_order_pkts;
+ unsigned next_seq;
+ unsigned last_pkt_ticks;
+ struct mbuf *mbuf;
+ struct mbuf *mbuf_tail;
+ struct tcphdr *th_last;
+ struct sfxge_rx_sw_desc next_buf;
+ void *next_eh;
+ void *next_nh;
+ int delivered;
+};
+
+/**
+ * struct sfxge_lro_state - Port state for software LRO
+ * @sc: The associated NIC.
+ * @conns_mask: Number of hash buckets - 1.
+ * @conns: Hash buckets for tracked connections.
+ * @conns_n: Length of linked list for each hash bucket.
+ * @active_conns: Connections that are holding a packet.
+ * Connections are self-linked when not in this list.
+ * @free_conns: Free sfxge_lro_conn instances.
+ * @last_purge_ticks: The value of ticks last time we purged idle
+ * connections.
+ * @n_merges: Number of packets absorbed by LRO.
+ * @n_bursts: Number of bursts spotted by LRO.
+ * @n_slow_start: Number of packets not merged because connection may be in
+ * slow-start.
+ * @n_misorder: Number of out-of-order packets seen in tracked streams.
+ * @n_too_many: Incremented when we're trying to track too many streams.
+ * @n_new_stream: Number of distinct streams we've tracked.
+ * @n_drop_idle: Number of streams discarded because they went idle.
+ * @n_drop_closed: Number of streams that have seen a FIN or RST.
+ */
+struct sfxge_lro_state {
+ struct sfxge_softc *sc;
+ unsigned conns_mask;
+ TAILQ_HEAD(sfxge_lro_tailq, sfxge_lro_conn) *conns;
+ unsigned *conns_n;
+ LIST_HEAD(, sfxge_lro_conn) active_conns;
+ TAILQ_HEAD(, sfxge_lro_conn) free_conns;
+ unsigned last_purge_ticks;
+ unsigned n_merges;
+ unsigned n_bursts;
+ unsigned n_slow_start;
+ unsigned n_misorder;
+ unsigned n_too_many;
+ unsigned n_new_stream;
+ unsigned n_drop_idle;
+ unsigned n_drop_closed;
+};
+
+enum sfxge_flush_state {
+ SFXGE_FLUSH_DONE = 0,
+ SFXGE_FLUSH_PENDING,
+ SFXGE_FLUSH_FAILED
+};
+
+enum sfxge_rxq_state {
+ SFXGE_RXQ_UNINITIALIZED = 0,
+ SFXGE_RXQ_INITIALIZED,
+ SFXGE_RXQ_STARTED
+};
+
+#define SFXGE_RX_BATCH 128
+
+struct sfxge_rxq {
+ struct sfxge_softc *sc __aligned(CACHE_LINE_SIZE);
+ unsigned int index;
+ efsys_mem_t mem;
+ unsigned int buf_base_id;
+ enum sfxge_rxq_state init_state;
+
+ struct sfxge_rx_sw_desc *queue __aligned(CACHE_LINE_SIZE);
+ unsigned int added;
+ unsigned int pending;
+ unsigned int completed;
+ unsigned int loopback;
+ struct sfxge_lro_state lro;
+ struct callout refill_callout;
+ unsigned int refill_delay;
+
+ efx_rxq_t *common __aligned(CACHE_LINE_SIZE);
+ volatile enum sfxge_flush_state flush_state;
+};
+
+/*
+ * From sfxge_rx.c.
+ */
+extern int sfxge_rx_init(struct sfxge_softc *sc);
+extern void sfxge_rx_fini(struct sfxge_softc *sc);
+extern int sfxge_rx_start(struct sfxge_softc *sc);
+extern void sfxge_rx_stop(struct sfxge_softc *sc);
+extern void sfxge_rx_qcomplete(struct sfxge_rxq *rxq, boolean_t eop);
+extern void sfxge_rx_qrefill(struct sfxge_rxq *rxq);
+extern void sfxge_rx_qflush_done(struct sfxge_rxq *rxq);
+extern void sfxge_rx_qflush_failed(struct sfxge_rxq *rxq);
+extern void sfxge_rx_scale_update(void *arg, int npending);
+
+#endif
diff --git a/sys/dev/sfxge/sfxge_tx.c b/sys/dev/sfxge/sfxge_tx.c
new file mode 100644
index 0000000..801787a
--- /dev/null
+++ b/sys/dev/sfxge/sfxge_tx.c
@@ -0,0 +1,1491 @@
+/*-
+ * Copyright (c) 2010-2011 Solarflare Communications, Inc.
+ * All rights reserved.
+ *
+ * This software was developed in part by Philip Paeps under contract for
+ * Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/mbuf.h>
+#include <sys/smp.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+
+#include "common/efx.h"
+
+#include "sfxge.h"
+#include "sfxge_tx.h"
+
+/* Set the block level to ensure there is space to generate a
+ * large number of descriptors for TSO. With minimum MSS and
+ * maximum mbuf length we might need more than a ring-ful of
+ * descriptors, but this should not happen in practice except
+ * due to deliberate attack. In that case we will truncate
+ * the output at a packet boundary. Allow for a reasonable
+ * minimum MSS of 512.
+ */
+#define SFXGE_TSO_MAX_DESC ((65535 / 512) * 2 + SFXGE_TX_MAPPING_MAX_SEG - 1)
+#define SFXGE_TXQ_BLOCK_LEVEL (SFXGE_NDESCS - SFXGE_TSO_MAX_DESC)
+
+/* Forward declarations. */
+static inline void sfxge_tx_qdpl_service(struct sfxge_txq *txq);
+static void sfxge_tx_qlist_post(struct sfxge_txq *txq);
+static void sfxge_tx_qunblock(struct sfxge_txq *txq);
+static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
+ const bus_dma_segment_t *dma_seg, int n_dma_seg);
+
+void
+sfxge_tx_qcomplete(struct sfxge_txq *txq)
+{
+ struct sfxge_softc *sc;
+ struct sfxge_evq *evq;
+ unsigned int completed;
+
+ sc = txq->sc;
+ evq = sc->evq[txq->evq_index];
+
+ mtx_assert(&evq->lock, MA_OWNED);
+
+ completed = txq->completed;
+ while (completed != txq->pending) {
+ struct sfxge_tx_mapping *stmp;
+ unsigned int id;
+
+ id = completed++ & (SFXGE_NDESCS - 1);
+
+ stmp = &txq->stmp[id];
+ if (stmp->flags & TX_BUF_UNMAP) {
+ bus_dmamap_unload(txq->packet_dma_tag, stmp->map);
+ if (stmp->flags & TX_BUF_MBUF) {
+ struct mbuf *m = stmp->u.mbuf;
+ do
+ m = m_free(m);
+ while (m != NULL);
+ } else {
+ free(stmp->u.heap_buf, M_SFXGE);
+ }
+ stmp->flags = 0;
+ }
+ }
+ txq->completed = completed;
+
+ /* Check whether we need to unblock the queue. */
+ mb();
+ if (txq->blocked) {
+ unsigned int level;
+
+ level = txq->added - txq->completed;
+ if (level <= SFXGE_TXQ_UNBLOCK_LEVEL)
+ sfxge_tx_qunblock(txq);
+ }
+}
+
+#ifdef SFXGE_HAVE_MQ
+
+/*
+ * Reorder the put list and append it to the get list.
+ */
+static void
+sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq)
+{
+ struct sfxge_tx_dpl *stdp;
+ struct mbuf *mbuf, *get_next, **get_tailp;
+ volatile uintptr_t *putp;
+ uintptr_t put;
+ unsigned int count;
+
+ mtx_assert(&txq->lock, MA_OWNED);
+
+ stdp = &txq->dpl;
+
+ /* Acquire the put list. */
+ putp = &stdp->std_put;
+ put = atomic_readandclear_ptr(putp);
+ mbuf = (void *)put;
+
+ if (mbuf == NULL)
+ return;
+
+ /* Reverse the put list. */
+ get_tailp = &mbuf->m_nextpkt;
+ get_next = NULL;
+
+ count = 0;
+ do {
+ struct mbuf *put_next;
+
+ put_next = mbuf->m_nextpkt;
+ mbuf->m_nextpkt = get_next;
+ get_next = mbuf;
+ mbuf = put_next;
+
+ count++;
+ } while (mbuf != NULL);
+
+ /* Append the reversed put list to the get list. */
+ KASSERT(*get_tailp == NULL, ("*get_tailp != NULL"));
+ *stdp->std_getp = get_next;
+ stdp->std_getp = get_tailp;
+ stdp->std_count += count;
+}
+
+#endif /* SFXGE_HAVE_MQ */
+
+static void
+sfxge_tx_qreap(struct sfxge_txq *txq)
+{
+ mtx_assert(SFXGE_TXQ_LOCK(txq), MA_OWNED);
+
+ txq->reaped = txq->completed;
+}
+
+static void
+sfxge_tx_qlist_post(struct sfxge_txq *txq)
+{
+ unsigned int old_added;
+ unsigned int level;
+ int rc;
+
+ mtx_assert(SFXGE_TXQ_LOCK(txq), MA_OWNED);
+
+ KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0"));
+ KASSERT(txq->n_pend_desc <= SFXGE_TSO_MAX_DESC,
+ ("txq->n_pend_desc too large"));
+ KASSERT(!txq->blocked, ("txq->blocked"));
+
+ old_added = txq->added;
+
+ /* Post the fragment list. */
+ rc = efx_tx_qpost(txq->common, txq->pend_desc, txq->n_pend_desc,
+ txq->reaped, &txq->added);
+ KASSERT(rc == 0, ("efx_tx_qpost() failed"));
+
+ /* If efx_tx_qpost() had to refragment, our information about
+ * buffers to free may be associated with the wrong
+ * descriptors.
+ */
+ KASSERT(txq->added - old_added == txq->n_pend_desc,
+ ("efx_tx_qpost() refragmented descriptors"));
+
+ level = txq->added - txq->reaped;
+ KASSERT(level <= SFXGE_NDESCS, ("overfilled TX queue"));
+
+ /* Clear the fragment list. */
+ txq->n_pend_desc = 0;
+
+ /* Have we reached the block level? */
+ if (level < SFXGE_TXQ_BLOCK_LEVEL)
+ return;
+
+ /* Reap, and check again */
+ sfxge_tx_qreap(txq);
+ level = txq->added - txq->reaped;
+ if (level < SFXGE_TXQ_BLOCK_LEVEL)
+ return;
+
+ txq->blocked = 1;
+
+ /*
+ * Avoid a race with completion interrupt handling that could leave
+ * the queue blocked.
+ */
+ mb();
+ sfxge_tx_qreap(txq);
+ level = txq->added - txq->reaped;
+ if (level < SFXGE_TXQ_BLOCK_LEVEL) {
+ mb();
+ txq->blocked = 0;
+ }
+}
+
+static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
+{
+ bus_dmamap_t *used_map;
+ bus_dmamap_t map;
+ bus_dma_segment_t dma_seg[SFXGE_TX_MAPPING_MAX_SEG];
+ unsigned int id;
+ struct sfxge_tx_mapping *stmp;
+ efx_buffer_t *desc;
+ int n_dma_seg;
+ int rc;
+ int i;
+
+ KASSERT(!txq->blocked, ("txq->blocked"));
+
+ if (mbuf->m_pkthdr.csum_flags & CSUM_TSO)
+ prefetch_read_many(mbuf->m_data);
+
+ if (txq->init_state != SFXGE_TXQ_STARTED) {
+ rc = EINTR;
+ goto reject;
+ }
+
+ /* Load the packet for DMA. */
+ id = txq->added & (SFXGE_NDESCS - 1);
+ stmp = &txq->stmp[id];
+ rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map,
+ mbuf, dma_seg, &n_dma_seg, 0);
+ if (rc == EFBIG) {
+ /* Try again. */
+ struct mbuf *new_mbuf = m_collapse(mbuf, M_DONTWAIT,
+ SFXGE_TX_MAPPING_MAX_SEG);
+ if (new_mbuf == NULL)
+ goto reject;
+ ++txq->collapses;
+ mbuf = new_mbuf;
+ rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag,
+ stmp->map, mbuf,
+ dma_seg, &n_dma_seg, 0);
+ }
+ if (rc != 0)
+ goto reject;
+
+ /* Make the packet visible to the hardware. */
+ bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE);
+
+ used_map = &stmp->map;
+
+ if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
+ rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg);
+ if (rc < 0)
+ goto reject_mapped;
+ stmp = &txq->stmp[rc];
+ } else {
+ /* Add the mapping to the fragment list, and set flags
+ * for the buffer.
+ */
+ i = 0;
+ for (;;) {
+ desc = &txq->pend_desc[i];
+ desc->eb_addr = dma_seg[i].ds_addr;
+ desc->eb_size = dma_seg[i].ds_len;
+ if (i == n_dma_seg - 1) {
+ desc->eb_eop = 1;
+ break;
+ }
+ desc->eb_eop = 0;
+ i++;
+
+ stmp->flags = 0;
+ if (__predict_false(stmp ==
+ &txq->stmp[SFXGE_NDESCS - 1]))
+ stmp = &txq->stmp[0];
+ else
+ stmp++;
+ }
+ txq->n_pend_desc = n_dma_seg;
+ }
+
+ /*
+ * If the mapping required more than one descriptor
+ * then we need to associate the DMA map with the last
+ * descriptor, not the first.
+ */
+ if (used_map != &stmp->map) {
+ map = stmp->map;
+ stmp->map = *used_map;
+ *used_map = map;
+ }
+
+ stmp->u.mbuf = mbuf;
+ stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF;
+
+ /* Post the fragment list. */
+ sfxge_tx_qlist_post(txq);
+
+ return 0;
+
+reject_mapped:
+ bus_dmamap_unload(txq->packet_dma_tag, *used_map);
+reject:
+ /* Drop the packet on the floor. */
+ m_freem(mbuf);
+ ++txq->drops;
+
+ return rc;
+}
+
+#ifdef SFXGE_HAVE_MQ
+
+/*
+ * Drain the deferred packet list into the transmit queue.
+ */
+static void
+sfxge_tx_qdpl_drain(struct sfxge_txq *txq)
+{
+ struct sfxge_softc *sc;
+ struct sfxge_tx_dpl *stdp;
+ struct mbuf *mbuf, *next;
+ unsigned int count;
+ unsigned int pushed;
+ int rc;
+
+ mtx_assert(&txq->lock, MA_OWNED);
+
+ sc = txq->sc;
+ stdp = &txq->dpl;
+ pushed = txq->added;
+
+ prefetch_read_many(sc->enp);
+ prefetch_read_many(txq->common);
+
+ mbuf = stdp->std_get;
+ count = stdp->std_count;
+
+ while (count != 0) {
+ KASSERT(mbuf != NULL, ("mbuf == NULL"));
+
+ next = mbuf->m_nextpkt;
+ mbuf->m_nextpkt = NULL;
+
+ ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */
+
+ if (next != NULL)
+ prefetch_read_many(next);
+
+ rc = sfxge_tx_queue_mbuf(txq, mbuf);
+ --count;
+ mbuf = next;
+ if (rc != 0)
+ continue;
+
+ if (txq->blocked)
+ break;
+
+ /* Push the fragments to the hardware in batches. */
+ if (txq->added - pushed >= SFXGE_TX_BATCH) {
+ efx_tx_qpush(txq->common, txq->added);
+ pushed = txq->added;
+ }
+ }
+
+ if (count == 0) {
+ KASSERT(mbuf == NULL, ("mbuf != NULL"));
+ stdp->std_get = NULL;
+ stdp->std_count = 0;
+ stdp->std_getp = &stdp->std_get;
+ } else {
+ stdp->std_get = mbuf;
+ stdp->std_count = count;
+ }
+
+ if (txq->added != pushed)
+ efx_tx_qpush(txq->common, txq->added);
+
+ KASSERT(txq->blocked || stdp->std_count == 0,
+ ("queue unblocked but count is non-zero"));
+}
+
+#define SFXGE_TX_QDPL_PENDING(_txq) \
+ ((_txq)->dpl.std_put != 0)
+
+/*
+ * Service the deferred packet list.
+ *
+ * NOTE: drops the txq mutex!
+ */
+static inline void
+sfxge_tx_qdpl_service(struct sfxge_txq *txq)
+{
+ mtx_assert(&txq->lock, MA_OWNED);
+
+ do {
+ if (SFXGE_TX_QDPL_PENDING(txq))
+ sfxge_tx_qdpl_swizzle(txq);
+
+ if (!txq->blocked)
+ sfxge_tx_qdpl_drain(txq);
+
+ mtx_unlock(&txq->lock);
+ } while (SFXGE_TX_QDPL_PENDING(txq) &&
+ mtx_trylock(&txq->lock));
+}
+
+/*
+ * Put a packet on the deferred packet list.
+ *
+ * If we are called with the txq lock held, we put the packet on the "get
+ * list", otherwise we atomically push it on the "put list". The swizzle
+ * function takes care of ordering.
+ *
+ * The length of the put list is bounded by SFXGE_TX_MAX_DEFFERED. We
+ * overload the csum_data field in the mbuf to keep track of this length
+ * because there is no cheap alternative to avoid races.
+ */
+static inline int
+sfxge_tx_qdpl_put(struct sfxge_txq *txq, struct mbuf *mbuf, int locked)
+{
+ struct sfxge_tx_dpl *stdp;
+
+ stdp = &txq->dpl;
+
+ KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL"));
+
+ if (locked) {
+ mtx_assert(&txq->lock, MA_OWNED);
+
+ sfxge_tx_qdpl_swizzle(txq);
+
+ *(stdp->std_getp) = mbuf;
+ stdp->std_getp = &mbuf->m_nextpkt;
+ stdp->std_count++;
+ } else {
+ volatile uintptr_t *putp;
+ uintptr_t old;
+ uintptr_t new;
+ unsigned old_len;
+
+ putp = &stdp->std_put;
+ new = (uintptr_t)mbuf;
+
+ do {
+ old = *putp;
+ if (old) {
+ struct mbuf *mp = (struct mbuf *)old;
+ old_len = mp->m_pkthdr.csum_data;
+ } else
+ old_len = 0;
+ if (old_len >= SFXGE_TX_MAX_DEFERRED)
+ return ENOBUFS;
+ mbuf->m_pkthdr.csum_data = old_len + 1;
+ mbuf->m_nextpkt = (void *)old;
+ } while (atomic_cmpset_ptr(putp, old, new) == 0);
+ }
+
+ return (0);
+}
+
+/*
+ * Called from if_transmit - will try to grab the txq lock and enqueue to the
+ * put list if it succeeds, otherwise will push onto the defer list.
+ */
+int
+sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m)
+{
+ int locked;
+ int rc;
+
+ /*
+ * Try to grab the txq lock. If we are able to get the lock,
+ * the packet will be appended to the "get list" of the deferred
+ * packet list. Otherwise, it will be pushed on the "put list".
+ */
+ locked = mtx_trylock(&txq->lock);
+
+ /*
+ * Can only fail if we weren't able to get the lock.
+ */
+ if (sfxge_tx_qdpl_put(txq, m, locked) != 0) {
+ KASSERT(!locked,
+ ("sfxge_tx_qdpl_put() failed locked"));
+ rc = ENOBUFS;
+ goto fail;
+ }
+
+ /*
+ * Try to grab the lock again.
+ *
+ * If we are able to get the lock, we need to process the deferred
+ * packet list. If we are not able to get the lock, another thread
+ * is processing the list.
+ */
+ if (!locked)
+ locked = mtx_trylock(&txq->lock);
+
+ if (locked) {
+ /* Try to service the list. */
+ sfxge_tx_qdpl_service(txq);
+ /* Lock has been dropped. */
+ }
+
+ return (0);
+
+fail:
+ return (rc);
+
+}
+
+static void
+sfxge_tx_qdpl_flush(struct sfxge_txq *txq)
+{
+ struct sfxge_tx_dpl *stdp = &txq->dpl;
+ struct mbuf *mbuf, *next;
+
+ mtx_lock(&txq->lock);
+
+ sfxge_tx_qdpl_swizzle(txq);
+ for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) {
+ next = mbuf->m_nextpkt;
+ m_freem(mbuf);
+ }
+ stdp->std_get = NULL;
+ stdp->std_count = 0;
+ stdp->std_getp = &stdp->std_get;
+
+ mtx_unlock(&txq->lock);
+}
+
+void
+sfxge_if_qflush(struct ifnet *ifp)
+{
+ struct sfxge_softc *sc;
+ int i;
+
+ sc = ifp->if_softc;
+
+ for (i = 0; i < SFXGE_TX_SCALE(sc); i++)
+ sfxge_tx_qdpl_flush(sc->txq[i]);
+}
+
+/*
+ * TX start -- called by the stack.
+ */
+int
+sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m)
+{
+ struct sfxge_softc *sc;
+ struct sfxge_txq *txq;
+ int rc;
+
+ sc = (struct sfxge_softc *)ifp->if_softc;
+
+ KASSERT(ifp->if_flags & IFF_UP, ("interface not up"));
+
+ if (!SFXGE_LINK_UP(sc)) {
+ m_freem(m);
+ return (0);
+ }
+
+ /* Pick the desired transmit queue. */
+ if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO)) {
+ int index = 0;
+
+ if (m->m_flags & M_FLOWID) {
+ uint32_t hash = m->m_pkthdr.flowid;
+
+ index = sc->rx_indir_table[hash % SFXGE_RX_SCALE_MAX];
+ }
+ txq = sc->txq[SFXGE_TXQ_IP_TCP_UDP_CKSUM + index];
+ } else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) {
+ txq = sc->txq[SFXGE_TXQ_IP_CKSUM];
+ } else {
+ txq = sc->txq[SFXGE_TXQ_NON_CKSUM];
+ }
+
+ rc = sfxge_tx_packet_add(txq, m);
+
+ return (rc);
+}
+
+#else /* !SFXGE_HAVE_MQ */
+
+static void sfxge_if_start_locked(struct ifnet *ifp)
+{
+ struct sfxge_softc *sc = ifp->if_softc;
+ struct sfxge_txq *txq;
+ struct mbuf *mbuf;
+ unsigned int pushed[SFXGE_TXQ_NTYPES];
+ unsigned int q_index;
+
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING)
+ return;
+
+ if (!sc->port.link_up)
+ return;
+
+ for (q_index = 0; q_index < SFXGE_TXQ_NTYPES; q_index++) {
+ txq = sc->txq[q_index];
+ pushed[q_index] = txq->added;
+ }
+
+ while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, mbuf);
+ if (mbuf == NULL)
+ break;
+
+ ETHER_BPF_MTAP(ifp, mbuf); /* packet capture */
+
+ /* Pick the desired transmit queue. */
+ if (mbuf->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO))
+ q_index = SFXGE_TXQ_IP_TCP_UDP_CKSUM;
+ else if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP)
+ q_index = SFXGE_TXQ_IP_CKSUM;
+ else
+ q_index = SFXGE_TXQ_NON_CKSUM;
+ txq = sc->txq[q_index];
+
+ if (sfxge_tx_queue_mbuf(txq, mbuf) != 0)
+ continue;
+
+ if (txq->blocked) {
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ break;
+ }
+
+ /* Push the fragments to the hardware in batches. */
+ if (txq->added - pushed[q_index] >= SFXGE_TX_BATCH) {
+ efx_tx_qpush(txq->common, txq->added);
+ pushed[q_index] = txq->added;
+ }
+ }
+
+ for (q_index = 0; q_index < SFXGE_TXQ_NTYPES; q_index++) {
+ txq = sc->txq[q_index];
+ if (txq->added != pushed[q_index])
+ efx_tx_qpush(txq->common, txq->added);
+ }
+}
+
+void sfxge_if_start(struct ifnet *ifp)
+{
+ struct sfxge_softc *sc = ifp->if_softc;
+
+ mtx_lock(&sc->tx_lock);
+ sfxge_if_start_locked(ifp);
+ mtx_unlock(&sc->tx_lock);
+}
+
+static inline void
+sfxge_tx_qdpl_service(struct sfxge_txq *txq)
+{
+ struct sfxge_softc *sc = txq->sc;
+ struct ifnet *ifp = sc->ifnet;
+
+ mtx_assert(&sc->tx_lock, MA_OWNED);
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ sfxge_if_start_locked(ifp);
+ mtx_unlock(&sc->tx_lock);
+}
+
+#endif /* SFXGE_HAVE_MQ */
+
+/*
+ * Software "TSO". Not quite as good as doing it in hardware, but
+ * still faster than segmenting in the stack.
+ */
+
+struct sfxge_tso_state {
+ /* Output position */
+ unsigned out_len; /* Remaining length in current segment */
+ unsigned seqnum; /* Current sequence number */
+ unsigned packet_space; /* Remaining space in current packet */
+
+ /* Input position */
+ unsigned dma_seg_i; /* Current DMA segment number */
+ uint64_t dma_addr; /* DMA address of current position */
+ unsigned in_len; /* Remaining length in current mbuf */
+
+ const struct mbuf *mbuf; /* Input mbuf (head of chain) */
+ u_short protocol; /* Network protocol (after VLAN decap) */
+ ssize_t nh_off; /* Offset of network header */
+ ssize_t tcph_off; /* Offset of TCP header */
+ unsigned header_len; /* Number of bytes of header */
+ int full_packet_size; /* Number of bytes to put in each outgoing
+ * segment */
+};
+
+static inline const struct ip *tso_iph(const struct sfxge_tso_state *tso)
+{
+ KASSERT(tso->protocol == htons(ETHERTYPE_IP),
+ ("tso_iph() in non-IPv4 state"));
+ return (const struct ip *)(tso->mbuf->m_data + tso->nh_off);
+}
+static inline const struct ip6_hdr *tso_ip6h(const struct sfxge_tso_state *tso)
+{
+ KASSERT(tso->protocol == htons(ETHERTYPE_IPV6),
+ ("tso_ip6h() in non-IPv6 state"));
+ return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off);
+}
+static inline const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso)
+{
+ return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off);
+}
+
+/* Size of preallocated TSO header buffers. Larger blocks must be
+ * allocated from the heap.
+ */
+#define TSOH_STD_SIZE 128
+
+/* At most half the descriptors in the queue at any time will refer to
+ * a TSO header buffer, since they must always be followed by a
+ * payload descriptor referring to an mbuf.
+ */
+#define TSOH_COUNT (SFXGE_NDESCS / 2u)
+#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
+#define TSOH_PAGE_COUNT ((TSOH_COUNT + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE)
+
+static int tso_init(struct sfxge_txq *txq)
+{
+ struct sfxge_softc *sc = txq->sc;
+ int i, rc;
+
+ /* Allocate TSO header buffers */
+ txq->tsoh_buffer = malloc(TSOH_PAGE_COUNT * sizeof(txq->tsoh_buffer[0]),
+ M_SFXGE, M_WAITOK);
+
+ for (i = 0; i < TSOH_PAGE_COUNT; i++) {
+ rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]);
+ if (rc)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ while (i-- > 0)
+ sfxge_dma_free(&txq->tsoh_buffer[i]);
+ free(txq->tsoh_buffer, M_SFXGE);
+ txq->tsoh_buffer = NULL;
+ return rc;
+}
+
+static void tso_fini(struct sfxge_txq *txq)
+{
+ int i;
+
+ if (txq->tsoh_buffer) {
+ for (i = 0; i < TSOH_PAGE_COUNT; i++)
+ sfxge_dma_free(&txq->tsoh_buffer[i]);
+ free(txq->tsoh_buffer, M_SFXGE);
+ }
+}
+
+static void tso_start(struct sfxge_tso_state *tso, struct mbuf *mbuf)
+{
+ struct ether_header *eh = mtod(mbuf, struct ether_header *);
+
+ tso->mbuf = mbuf;
+
+ /* Find network protocol and header */
+ tso->protocol = eh->ether_type;
+ if (tso->protocol == htons(ETHERTYPE_VLAN)) {
+ struct ether_vlan_header *veh =
+ mtod(mbuf, struct ether_vlan_header *);
+ tso->protocol = veh->evl_proto;
+ tso->nh_off = sizeof(*veh);
+ } else {
+ tso->nh_off = sizeof(*eh);
+ }
+
+ /* Find TCP header */
+ if (tso->protocol == htons(ETHERTYPE_IP)) {
+ KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP,
+ ("TSO required on non-TCP packet"));
+ tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl;
+ } else {
+ KASSERT(tso->protocol == htons(ETHERTYPE_IPV6),
+ ("TSO required on non-IP packet"));
+ KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP,
+ ("TSO required on non-TCP packet"));
+ tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr);
+ }
+
+ /* We assume all headers are linear in the head mbuf */
+ tso->header_len = tso->tcph_off + 4 * tso_tcph(tso)->th_off;
+ KASSERT(tso->header_len <= mbuf->m_len, ("packet headers fragmented"));
+ tso->full_packet_size = tso->header_len + mbuf->m_pkthdr.tso_segsz;
+
+ tso->seqnum = ntohl(tso_tcph(tso)->th_seq);
+
+ /* These flags must not be duplicated */
+ KASSERT(!(tso_tcph(tso)->th_flags & (TH_URG | TH_SYN | TH_RST)),
+ ("incompatible TCP flag on TSO packet"));
+
+ tso->out_len = mbuf->m_pkthdr.len - tso->header_len;
+}
+
+/*
+ * tso_fill_packet_with_fragment - form descriptors for the current fragment
+ *
+ * Form descriptors for the current fragment, until we reach the end
+ * of fragment or end-of-packet. Return 0 on success, 1 if not enough
+ * space.
+ */
+static void tso_fill_packet_with_fragment(struct sfxge_txq *txq,
+ struct sfxge_tso_state *tso)
+{
+ efx_buffer_t *desc;
+ int n;
+
+ if (tso->in_len == 0 || tso->packet_space == 0)
+ return;
+
+ KASSERT(tso->in_len > 0, ("TSO input length went negative"));
+ KASSERT(tso->packet_space > 0, ("TSO packet space went negative"));
+
+ n = min(tso->in_len, tso->packet_space);
+
+ tso->packet_space -= n;
+ tso->out_len -= n;
+ tso->in_len -= n;
+
+ desc = &txq->pend_desc[txq->n_pend_desc++];
+ desc->eb_addr = tso->dma_addr;
+ desc->eb_size = n;
+ desc->eb_eop = tso->out_len == 0 || tso->packet_space == 0;
+
+ tso->dma_addr += n;
+}
+
+/* Callback from bus_dmamap_load() for long TSO headers. */
+static void tso_map_long_header(void *dma_addr_ret,
+ bus_dma_segment_t *segs, int nseg,
+ int error)
+{
+ *(uint64_t *)dma_addr_ret = ((__predict_true(error == 0) &&
+ __predict_true(nseg == 1)) ?
+ segs->ds_addr : 0);
+}
+
+/*
+ * tso_start_new_packet - generate a new header and prepare for the new packet
+ *
+ * Generate a new header and prepare for the new packet. Return 0 on
+ * success, or an error code if failed to alloc header.
+ */
+static int tso_start_new_packet(struct sfxge_txq *txq,
+ struct sfxge_tso_state *tso,
+ unsigned int id)
+{
+ struct sfxge_tx_mapping *stmp = &txq->stmp[id];
+ struct tcphdr *tsoh_th;
+ unsigned ip_length;
+ caddr_t header;
+ uint64_t dma_addr;
+ bus_dmamap_t map;
+ efx_buffer_t *desc;
+ int rc;
+
+ /* Allocate a DMA-mapped header buffer. */
+ if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) {
+ unsigned int page_index = (id / 2) / TSOH_PER_PAGE;
+ unsigned int buf_index = (id / 2) % TSOH_PER_PAGE;
+
+ header = (txq->tsoh_buffer[page_index].esm_base +
+ buf_index * TSOH_STD_SIZE);
+ dma_addr = (txq->tsoh_buffer[page_index].esm_addr +
+ buf_index * TSOH_STD_SIZE);
+ map = txq->tsoh_buffer[page_index].esm_map;
+
+ stmp->flags = 0;
+ } else {
+ /* We cannot use bus_dmamem_alloc() as that may sleep */
+ header = malloc(tso->header_len, M_SFXGE, M_NOWAIT);
+ if (__predict_false(!header))
+ return ENOMEM;
+ rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map,
+ header, tso->header_len,
+ tso_map_long_header, &dma_addr,
+ BUS_DMA_NOWAIT);
+ if (__predict_false(dma_addr == 0)) {
+ if (rc == 0) {
+ /* Succeeded but got >1 segment */
+ bus_dmamap_unload(txq->packet_dma_tag,
+ stmp->map);
+ rc = EINVAL;
+ }
+ free(header, M_SFXGE);
+ return rc;
+ }
+ map = stmp->map;
+
+ txq->tso_long_headers++;
+ stmp->u.heap_buf = header;
+ stmp->flags = TX_BUF_UNMAP;
+ }
+
+ tsoh_th = (struct tcphdr *)(header + tso->tcph_off);
+
+ /* Copy and update the headers. */
+ memcpy(header, tso->mbuf->m_data, tso->header_len);
+
+ tsoh_th->th_seq = htonl(tso->seqnum);
+ tso->seqnum += tso->mbuf->m_pkthdr.tso_segsz;
+ if (tso->out_len > tso->mbuf->m_pkthdr.tso_segsz) {
+ /* This packet will not finish the TSO burst. */
+ ip_length = tso->full_packet_size - tso->nh_off;
+ tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH);
+ } else {
+ /* This packet will be the last in the TSO burst. */
+ ip_length = tso->header_len - tso->nh_off + tso->out_len;
+ }
+
+ if (tso->protocol == htons(ETHERTYPE_IP)) {
+ struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off);
+ tsoh_iph->ip_len = htons(ip_length);
+ /* XXX We should increment ip_id, but FreeBSD doesn't
+ * currently allocate extra IDs for multiple segments.
+ */
+ } else {
+ struct ip6_hdr *tsoh_iph =
+ (struct ip6_hdr *)(header + tso->nh_off);
+ tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph));
+ }
+
+ /* Make the header visible to the hardware. */
+ bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE);
+
+ tso->packet_space = tso->mbuf->m_pkthdr.tso_segsz;
+ txq->tso_packets++;
+
+ /* Form a descriptor for this header. */
+ desc = &txq->pend_desc[txq->n_pend_desc++];
+ desc->eb_addr = dma_addr;
+ desc->eb_size = tso->header_len;
+ desc->eb_eop = 0;
+
+ return 0;
+}
+
+static int
+sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
+ const bus_dma_segment_t *dma_seg, int n_dma_seg)
+{
+ struct sfxge_tso_state tso;
+ unsigned int id, next_id;
+
+ tso_start(&tso, mbuf);
+
+ /* Grab the first payload fragment. */
+ if (dma_seg->ds_len == tso.header_len) {
+ --n_dma_seg;
+ KASSERT(n_dma_seg, ("no payload found in TSO packet"));
+ ++dma_seg;
+ tso.in_len = dma_seg->ds_len;
+ tso.dma_addr = dma_seg->ds_addr;
+ } else {
+ tso.in_len = dma_seg->ds_len - tso.header_len;
+ tso.dma_addr = dma_seg->ds_addr + tso.header_len;
+ }
+
+ id = txq->added & (SFXGE_NDESCS - 1);
+ if (__predict_false(tso_start_new_packet(txq, &tso, id)))
+ return -1;
+
+ while (1) {
+ id = (id + 1) & (SFXGE_NDESCS - 1);
+ tso_fill_packet_with_fragment(txq, &tso);
+
+ /* Move onto the next fragment? */
+ if (tso.in_len == 0) {
+ --n_dma_seg;
+ if (n_dma_seg == 0)
+ break;
+ ++dma_seg;
+ tso.in_len = dma_seg->ds_len;
+ tso.dma_addr = dma_seg->ds_addr;
+ }
+
+ /* End of packet? */
+ if (tso.packet_space == 0) {
+ /* If the queue is now full due to tiny MSS,
+ * or we can't create another header, discard
+ * the remainder of the input mbuf but do not
+ * roll back the work we have done.
+ */
+ if (txq->n_pend_desc >
+ SFXGE_TSO_MAX_DESC - (1 + SFXGE_TX_MAPPING_MAX_SEG))
+ break;
+ next_id = (id + 1) & (SFXGE_NDESCS - 1);
+ if (__predict_false(tso_start_new_packet(txq, &tso,
+ next_id)))
+ break;
+ id = next_id;
+ }
+ }
+
+ txq->tso_bursts++;
+ return id;
+}
+
+static void
+sfxge_tx_qunblock(struct sfxge_txq *txq)
+{
+ struct sfxge_softc *sc;
+ struct sfxge_evq *evq;
+
+ sc = txq->sc;
+ evq = sc->evq[txq->evq_index];
+
+ mtx_assert(&evq->lock, MA_OWNED);
+
+ if (txq->init_state != SFXGE_TXQ_STARTED)
+ return;
+
+ mtx_lock(SFXGE_TXQ_LOCK(txq));
+
+ if (txq->blocked) {
+ unsigned int level;
+
+ level = txq->added - txq->completed;
+ if (level <= SFXGE_TXQ_UNBLOCK_LEVEL)
+ txq->blocked = 0;
+ }
+
+ sfxge_tx_qdpl_service(txq);
+ /* note: lock has been dropped */
+}
+
+void
+sfxge_tx_qflush_done(struct sfxge_txq *txq)
+{
+
+ txq->flush_state = SFXGE_FLUSH_DONE;
+}
+
+static void
+sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index)
+{
+ struct sfxge_txq *txq;
+ struct sfxge_evq *evq;
+ unsigned int count;
+
+ txq = sc->txq[index];
+ evq = sc->evq[txq->evq_index];
+
+ mtx_lock(SFXGE_TXQ_LOCK(txq));
+
+ KASSERT(txq->init_state == SFXGE_TXQ_STARTED,
+ ("txq->init_state != SFXGE_TXQ_STARTED"));
+
+ txq->init_state = SFXGE_TXQ_INITIALIZED;
+ txq->flush_state = SFXGE_FLUSH_PENDING;
+
+ /* Flush the transmit queue. */
+ efx_tx_qflush(txq->common);
+
+ mtx_unlock(SFXGE_TXQ_LOCK(txq));
+
+ count = 0;
+ do {
+ /* Spin for 100ms. */
+ DELAY(100000);
+
+ if (txq->flush_state != SFXGE_FLUSH_PENDING)
+ break;
+ } while (++count < 20);
+
+ mtx_lock(&evq->lock);
+ mtx_lock(SFXGE_TXQ_LOCK(txq));
+
+ KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED,
+ ("txq->flush_state == SFXGE_FLUSH_FAILED"));
+
+ txq->flush_state = SFXGE_FLUSH_DONE;
+
+ txq->blocked = 0;
+ txq->pending = txq->added;
+
+ sfxge_tx_qcomplete(txq);
+ KASSERT(txq->completed == txq->added,
+ ("txq->completed != txq->added"));
+
+ sfxge_tx_qreap(txq);
+ KASSERT(txq->reaped == txq->completed,
+ ("txq->reaped != txq->completed"));
+
+ txq->added = 0;
+ txq->pending = 0;
+ txq->completed = 0;
+ txq->reaped = 0;
+
+ /* Destroy the common code transmit queue. */
+ efx_tx_qdestroy(txq->common);
+ txq->common = NULL;
+
+ efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
+ EFX_TXQ_NBUFS(SFXGE_NDESCS));
+
+ mtx_unlock(&evq->lock);
+ mtx_unlock(SFXGE_TXQ_LOCK(txq));
+}
+
+static int
+sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index)
+{
+ struct sfxge_txq *txq;
+ efsys_mem_t *esmp;
+ uint16_t flags;
+ struct sfxge_evq *evq;
+ int rc;
+
+ txq = sc->txq[index];
+ esmp = &txq->mem;
+ evq = sc->evq[txq->evq_index];
+
+ KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
+ ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
+ KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
+ ("evq->init_state != SFXGE_EVQ_STARTED"));
+
+ /* Program the buffer table. */
+ if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp,
+ EFX_TXQ_NBUFS(SFXGE_NDESCS))) != 0)
+ return rc;
+
+ /* Determine the kind of queue we are creating. */
+ switch (txq->type) {
+ case SFXGE_TXQ_NON_CKSUM:
+ flags = 0;
+ break;
+ case SFXGE_TXQ_IP_CKSUM:
+ flags = EFX_CKSUM_IPV4;
+ break;
+ case SFXGE_TXQ_IP_TCP_UDP_CKSUM:
+ flags = EFX_CKSUM_IPV4 | EFX_CKSUM_TCPUDP;
+ break;
+ default:
+ KASSERT(0, ("Impossible TX queue"));
+ flags = 0;
+ break;
+ }
+
+ /* Create the common code transmit queue. */
+ if ((rc = efx_tx_qcreate(sc->enp, index, index, esmp,
+ SFXGE_NDESCS, txq->buf_base_id, flags, evq->common,
+ &txq->common)) != 0)
+ goto fail;
+
+ mtx_lock(SFXGE_TXQ_LOCK(txq));
+
+ /* Enable the transmit queue. */
+ efx_tx_qenable(txq->common);
+
+ txq->init_state = SFXGE_TXQ_STARTED;
+
+ mtx_unlock(SFXGE_TXQ_LOCK(txq));
+
+ return (0);
+
+fail:
+ efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
+ EFX_TXQ_NBUFS(SFXGE_NDESCS));
+ return rc;
+}
+
+void
+sfxge_tx_stop(struct sfxge_softc *sc)
+{
+ const efx_nic_cfg_t *encp;
+ int index;
+
+ index = SFXGE_TX_SCALE(sc);
+ while (--index >= 0)
+ sfxge_tx_qstop(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
+
+ sfxge_tx_qstop(sc, SFXGE_TXQ_IP_CKSUM);
+
+ encp = efx_nic_cfg_get(sc->enp);
+ sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM);
+
+ /* Tear down the transmit module */
+ efx_tx_fini(sc->enp);
+}
+
+int
+sfxge_tx_start(struct sfxge_softc *sc)
+{
+ int index;
+ int rc;
+
+ /* Initialize the common code transmit module. */
+ if ((rc = efx_tx_init(sc->enp)) != 0)
+ return (rc);
+
+ if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_NON_CKSUM)) != 0)
+ goto fail;
+
+ if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_IP_CKSUM)) != 0)
+ goto fail2;
+
+ for (index = 0; index < SFXGE_TX_SCALE(sc); index++) {
+ if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM +
+ index)) != 0)
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ while (--index >= 0)
+ sfxge_tx_qstop(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
+
+ sfxge_tx_qstop(sc, SFXGE_TXQ_IP_CKSUM);
+
+fail2:
+ sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM);
+
+fail:
+ efx_tx_fini(sc->enp);
+
+ return (rc);
+}
+
+/**
+ * Destroy a transmit queue.
+ */
+static void
+sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index)
+{
+ struct sfxge_txq *txq;
+ unsigned int nmaps = SFXGE_NDESCS;
+
+ txq = sc->txq[index];
+
+ KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
+ ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
+
+ if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM)
+ tso_fini(txq);
+
+ /* Free the context arrays. */
+ free(txq->pend_desc, M_SFXGE);
+ while (nmaps--)
+ bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
+ free(txq->stmp, M_SFXGE);
+
+ /* Release DMA memory mapping. */
+ sfxge_dma_free(&txq->mem);
+
+ sc->txq[index] = NULL;
+
+#ifdef SFXGE_HAVE_MQ
+ mtx_destroy(&txq->lock);
+#endif
+
+ free(txq, M_SFXGE);
+}
+
+static int
+sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index,
+ enum sfxge_txq_type type, unsigned int evq_index)
+{
+ struct sfxge_txq *txq;
+ struct sfxge_evq *evq;
+#ifdef SFXGE_HAVE_MQ
+ struct sfxge_tx_dpl *stdp;
+#endif
+ efsys_mem_t *esmp;
+ unsigned int nmaps;
+ int rc;
+
+ txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK);
+ txq->sc = sc;
+
+ sc->txq[txq_index] = txq;
+ esmp = &txq->mem;
+
+ evq = sc->evq[evq_index];
+
+ /* Allocate and zero DMA space for the descriptor ring. */
+ if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(SFXGE_NDESCS), esmp)) != 0)
+ return (rc);
+ (void)memset(esmp->esm_base, 0, EFX_TXQ_SIZE(SFXGE_NDESCS));
+
+ /* Allocate buffer table entries. */
+ sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(SFXGE_NDESCS),
+ &txq->buf_base_id);
+
+ /* Create a DMA tag for packet mappings. */
+ if (bus_dma_tag_create(sc->parent_dma_tag, 1, 0x1000,
+ MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL,
+ NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG, 0x1000, 0, NULL, NULL,
+ &txq->packet_dma_tag) != 0) {
+ device_printf(sc->dev, "Couldn't allocate txq DMA tag\n");
+ rc = ENOMEM;
+ goto fail;
+ }
+
+ /* Allocate pending descriptor array for batching writes. */
+ txq->pend_desc = malloc(sizeof(efx_buffer_t) * SFXGE_NDESCS,
+ M_SFXGE, M_ZERO | M_WAITOK);
+
+ /* Allocate and initialise mbuf DMA mapping array. */
+ txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * SFXGE_NDESCS,
+ M_SFXGE, M_ZERO | M_WAITOK);
+ for (nmaps = 0; nmaps < SFXGE_NDESCS; nmaps++) {
+ rc = bus_dmamap_create(txq->packet_dma_tag, 0,
+ &txq->stmp[nmaps].map);
+ if (rc != 0)
+ goto fail2;
+ }
+
+ if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM &&
+ (rc = tso_init(txq)) != 0)
+ goto fail3;
+
+#ifdef SFXGE_HAVE_MQ
+ /* Initialize the deferred packet list. */
+ stdp = &txq->dpl;
+ stdp->std_getp = &stdp->std_get;
+
+ mtx_init(&txq->lock, "txq", NULL, MTX_DEF);
+#endif
+
+ txq->type = type;
+ txq->evq_index = evq_index;
+ txq->txq_index = txq_index;
+ txq->init_state = SFXGE_TXQ_INITIALIZED;
+
+ return (0);
+
+fail3:
+ free(txq->pend_desc, M_SFXGE);
+fail2:
+ while (nmaps--)
+ bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
+ free(txq->stmp, M_SFXGE);
+ bus_dma_tag_destroy(txq->packet_dma_tag);
+
+fail:
+ sfxge_dma_free(esmp);
+
+ return (rc);
+}
+
+static const struct {
+ const char *name;
+ size_t offset;
+} sfxge_tx_stats[] = {
+#define SFXGE_TX_STAT(name, member) \
+ { #name, offsetof(struct sfxge_txq, member) }
+ SFXGE_TX_STAT(tso_bursts, tso_bursts),
+ SFXGE_TX_STAT(tso_packets, tso_packets),
+ SFXGE_TX_STAT(tso_long_headers, tso_long_headers),
+ SFXGE_TX_STAT(tx_collapses, collapses),
+ SFXGE_TX_STAT(tx_drops, drops),
+};
+
+static int
+sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct sfxge_softc *sc = arg1;
+ unsigned int id = arg2;
+ unsigned long sum;
+ unsigned int index;
+
+ /* Sum across all TX queues */
+ sum = 0;
+ for (index = 0;
+ index < SFXGE_TXQ_IP_TCP_UDP_CKSUM + SFXGE_TX_SCALE(sc);
+ index++)
+ sum += *(unsigned long *)((caddr_t)sc->txq[index] +
+ sfxge_tx_stats[id].offset);
+
+ return SYSCTL_OUT(req, &sum, sizeof(sum));
+}
+
+static void
+sfxge_tx_stat_init(struct sfxge_softc *sc)
+{
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
+ struct sysctl_oid_list *stat_list;
+ unsigned int id;
+
+ stat_list = SYSCTL_CHILDREN(sc->stats_node);
+
+ for (id = 0;
+ id < sizeof(sfxge_tx_stats) / sizeof(sfxge_tx_stats[0]);
+ id++) {
+ SYSCTL_ADD_PROC(
+ ctx, stat_list,
+ OID_AUTO, sfxge_tx_stats[id].name,
+ CTLTYPE_ULONG|CTLFLAG_RD,
+ sc, id, sfxge_tx_stat_handler, "LU",
+ "");
+ }
+}
+
+void
+sfxge_tx_fini(struct sfxge_softc *sc)
+{
+ int index;
+
+ index = SFXGE_TX_SCALE(sc);
+ while (--index >= 0)
+ sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
+
+ sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM);
+ sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM);
+}
+
+
+int
+sfxge_tx_init(struct sfxge_softc *sc)
+{
+ struct sfxge_intr *intr;
+ int index;
+ int rc;
+
+ intr = &sc->intr;
+
+ KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
+ ("intr->state != SFXGE_INTR_INITIALIZED"));
+
+ /* Initialize the transmit queues */
+ if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM,
+ SFXGE_TXQ_NON_CKSUM, 0)) != 0)
+ goto fail;
+
+ if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM,
+ SFXGE_TXQ_IP_CKSUM, 0)) != 0)
+ goto fail2;
+
+ for (index = 0; index < SFXGE_TX_SCALE(sc); index++) {
+ if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index,
+ SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0)
+ goto fail3;
+ }
+
+ sfxge_tx_stat_init(sc);
+
+ return (0);
+
+fail3:
+ sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM);
+
+ while (--index >= 0)
+ sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
+
+fail2:
+ sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM);
+
+fail:
+ return (rc);
+}
diff --git a/sys/dev/sfxge/sfxge_tx.h b/sys/dev/sfxge/sfxge_tx.h
new file mode 100644
index 0000000..483a16a
--- /dev/null
+++ b/sys/dev/sfxge/sfxge_tx.h
@@ -0,0 +1,185 @@
+/*-
+ * Copyright (c) 2010-2011 Solarflare Communications, Inc.
+ * All rights reserved.
+ *
+ * This software was developed in part by Philip Paeps under contract for
+ * Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SFXGE_TX_H
+#define _SFXGE_TX_H
+
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/tcp.h>
+
+/* Maximum number of DMA segments needed to map an mbuf chain. With
+ * TSO, the mbuf length may be just over 64K, divided into 2K mbuf
+ * clusters. (The chain could be longer than this initially, but can
+ * be shortened with m_collapse().)
+ */
+#define SFXGE_TX_MAPPING_MAX_SEG (64 / 2 + 1)
+
+/* Maximum number of DMA segments needed to map an output packet. It
+ * could overlap all mbufs in the chain and also require an extra
+ * segment for a TSO header.
+ */
+#define SFXGE_TX_PACKET_MAX_SEG (SFXGE_TX_MAPPING_MAX_SEG + 1)
+
+/*
+ * Buffer mapping flags.
+ *
+ * Buffers and DMA mappings must be freed when the last descriptor
+ * referring to them is completed. Set the TX_BUF_UNMAP and
+ * TX_BUF_MBUF flags on the last descriptor generated for an mbuf
+ * chain. Set only the TX_BUF_UNMAP flag on a descriptor referring to
+ * a heap buffer.
+ */
+enum sfxge_tx_buf_flags {
+ TX_BUF_UNMAP = 1,
+ TX_BUF_MBUF = 2,
+};
+
+/*
+ * Buffer mapping information for descriptors in flight.
+ */
+struct sfxge_tx_mapping {
+ union {
+ struct mbuf *mbuf;
+ caddr_t heap_buf;
+ } u;
+ bus_dmamap_t map;
+ enum sfxge_tx_buf_flags flags;
+};
+
+#define SFXGE_TX_MAX_DEFERRED 64
+
+/*
+ * Deferred packet list.
+ */
+struct sfxge_tx_dpl {
+ uintptr_t std_put; /* Head of put list. */
+ struct mbuf *std_get; /* Head of get list. */
+ struct mbuf **std_getp; /* Tail of get list. */
+ unsigned int std_count; /* Count of packets. */
+};
+
+
+#define SFXGE_TX_BUFFER_SIZE 0x400
+#define SFXGE_TX_HEADER_SIZE 0x100
+#define SFXGE_TX_COPY_THRESHOLD 0x200
+
+enum sfxge_txq_state {
+ SFXGE_TXQ_UNINITIALIZED = 0,
+ SFXGE_TXQ_INITIALIZED,
+ SFXGE_TXQ_STARTED
+};
+
+enum sfxge_txq_type {
+ SFXGE_TXQ_NON_CKSUM = 0,
+ SFXGE_TXQ_IP_CKSUM,
+ SFXGE_TXQ_IP_TCP_UDP_CKSUM,
+ SFXGE_TXQ_NTYPES
+};
+
+#define SFXGE_TXQ_UNBLOCK_LEVEL (EFX_TXQ_LIMIT(SFXGE_NDESCS) / 4)
+
+#define SFXGE_TX_BATCH 64
+
+#ifdef SFXGE_HAVE_MQ
+#define SFXGE_TXQ_LOCK(txq) (&(txq)->lock)
+#define SFXGE_TX_SCALE(sc) ((sc)->intr.n_alloc)
+#else
+#define SFXGE_TXQ_LOCK(txq) (&(txq)->sc->tx_lock)
+#define SFXGE_TX_SCALE(sc) 1
+#endif
+
+struct sfxge_txq {
+ /* The following fields should be written very rarely */
+ struct sfxge_softc *sc;
+ enum sfxge_txq_state init_state;
+ enum sfxge_flush_state flush_state;
+ enum sfxge_txq_type type;
+ unsigned int txq_index;
+ unsigned int evq_index;
+ efsys_mem_t mem;
+ unsigned int buf_base_id;
+
+ struct sfxge_tx_mapping *stmp; /* Packets in flight. */
+ bus_dma_tag_t packet_dma_tag;
+ efx_buffer_t *pend_desc;
+ efx_txq_t *common;
+ struct sfxge_txq *next;
+
+ efsys_mem_t *tsoh_buffer;
+
+ /* This field changes more often and is read regularly on both
+ * the initiation and completion paths
+ */
+ int blocked __aligned(CACHE_LINE_SIZE);
+
+ /* The following fields change more often, and are used mostly
+ * on the initiation path
+ */
+#ifdef SFXGE_HAVE_MQ
+ struct mtx lock __aligned(CACHE_LINE_SIZE);
+ struct sfxge_tx_dpl dpl; /* Deferred packet list. */
+ unsigned int n_pend_desc;
+#else
+ unsigned int n_pend_desc __aligned(CACHE_LINE_SIZE);
+#endif
+ unsigned int added;
+ unsigned int reaped;
+ /* Statistics */
+ unsigned long tso_bursts;
+ unsigned long tso_packets;
+ unsigned long tso_long_headers;
+ unsigned long collapses;
+ unsigned long drops;
+
+ /* The following fields change more often, and are used mostly
+ * on the completion path
+ */
+ unsigned int pending __aligned(CACHE_LINE_SIZE);
+ unsigned int completed;
+};
+
+extern int sfxge_tx_packet_add(struct sfxge_txq *, struct mbuf *);
+
+extern int sfxge_tx_init(struct sfxge_softc *sc);
+extern void sfxge_tx_fini(struct sfxge_softc *sc);
+extern int sfxge_tx_start(struct sfxge_softc *sc);
+extern void sfxge_tx_stop(struct sfxge_softc *sc);
+extern void sfxge_tx_qcomplete(struct sfxge_txq *txq);
+extern void sfxge_tx_qflush_done(struct sfxge_txq *txq);
+#ifdef SFXGE_HAVE_MQ
+extern void sfxge_if_qflush(struct ifnet *ifp);
+extern int sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m);
+#else
+extern void sfxge_if_start(struct ifnet *ifp);
+#endif
+
+#endif
diff --git a/sys/dev/siba/siba_core.c b/sys/dev/siba/siba_core.c
index 2b1a84f..61652ad 100644
--- a/sys/dev/siba/siba_core.c
+++ b/sys/dev/siba/siba_core.c
@@ -214,16 +214,8 @@ siba_core_attach(struct siba_softc *siba)
int
siba_core_detach(struct siba_softc *siba)
{
- device_t *devlistp;
- int devcnt, error = 0, i;
-
- error = device_get_children(siba->siba_dev, &devlistp, &devcnt);
- if (error != 0)
- return (0);
-
- for ( i = 0 ; i < devcnt ; i++)
- device_delete_child(siba->siba_dev, devlistp[i]);
- free(devlistp, M_TEMP);
+ /* detach & delete all children */
+ device_delete_all_children(siba->siba_dev);
return (0);
}
diff --git a/sys/dev/siis/siis.c b/sys/dev/siis/siis.c
index a7b018a..40c17cb 100644
--- a/sys/dev/siis/siis.c
+++ b/sys/dev/siis/siis.c
@@ -91,7 +91,7 @@ static void siis_process_request_sense(device_t dev, union ccb *ccb);
static void siisaction(struct cam_sim *sim, union ccb *ccb);
static void siispoll(struct cam_sim *sim);
-MALLOC_DEFINE(M_SIIS, "SIIS driver", "SIIS driver data buffers");
+static MALLOC_DEFINE(M_SIIS, "SIIS driver", "SIIS driver data buffers");
static struct {
uint32_t id;
@@ -205,15 +205,10 @@ static int
siis_detach(device_t dev)
{
struct siis_controller *ctlr = device_get_softc(dev);
- device_t *children;
- int nchildren, i;
/* Detach & delete all children */
- if (!device_get_children(dev, &children, &nchildren)) {
- for (i = 0; i < nchildren; i++)
- device_delete_child(dev, children[i]);
- free(children, M_TEMP);
- }
+ device_delete_all_children(dev);
+
/* Free interrupts. */
if (ctlr->irq.r_irq) {
bus_teardown_intr(dev, ctlr->irq.r_irq,
diff --git a/sys/dev/sio/sio_pci.c b/sys/dev/sio/sio_pci.c
index 3564194..a693536 100644
--- a/sys/dev/sio/sio_pci.c
+++ b/sys/dev/sio/sio_pci.c
@@ -76,7 +76,8 @@ static struct pci_ids pci_ids[] = {
{ 0x7101135e, "SeaLevel Ultra 530.PCI Single Port Serial", 0x18 },
{ 0x0000151f, "SmartLink 5634PCV SurfRider", 0x10 },
{ 0x0103115d, "Xircom Cardbus modem", 0x10 },
- { 0x432214e4, "Broadcom 802.11g/GPRS CardBus (Serial)", 0x10 },
+ { 0x432214e4, "Broadcom 802.11b/GPRS CardBus (Serial)", 0x10 },
+ { 0x434414e4, "Broadcom 802.11bg/EDGE/GPRS CardBus (Serial)", 0x10 },
{ 0x01c0135c, "Quatech SSCLP-200/300", 0x18
/*
* NB: You must mount the "SPAD" jumper to correctly detect
diff --git a/sys/dev/sis/if_sis.c b/sys/dev/sis/if_sis.c
index d0be783..630281d 100644
--- a/sys/dev/sis/if_sis.c
+++ b/sys/dev/sis/if_sis.c
@@ -90,6 +90,7 @@ __FBSDID("$FreeBSD$");
#include <sys/rman.h>
#include <dev/mii/mii.h>
+#include <dev/mii/mii_bitbang.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
@@ -119,10 +120,13 @@ MODULE_DEPEND(sis, miibus, 1, 1, 1);
#define CSR_READ_2(sc, reg) bus_read_2(sc->sis_res[0], reg)
+#define CSR_BARRIER(sc, reg, length, flags) \
+ bus_barrier(sc->sis_res[0], reg, length, flags)
+
/*
* Various supported device vendors/types and their names.
*/
-static struct sis_type sis_devs[] = {
+static const struct sis_type const sis_devs[] = {
{ SIS_VENDORID, SIS_DEVICEID_900, "SiS 900 10/100BaseTX" },
{ SIS_VENDORID, SIS_DEVICEID_7016, "SiS 7016 10/100BaseTX" },
{ NS_VENDORID, NS_DEVICEID_DP83815, "NatSemi DP8381[56] 10/100BaseTX" },
@@ -145,6 +149,8 @@ static void sis_init(void *);
static void sis_initl(struct sis_softc *);
static void sis_intr(void *);
static int sis_ioctl(struct ifnet *, u_long, caddr_t);
+static uint32_t sis_mii_bitbang_read(device_t);
+static void sis_mii_bitbang_write(device_t, uint32_t);
static int sis_newbuf(struct sis_softc *, struct sis_rxdesc *);
static int sis_resume(device_t);
static int sis_rxeof(struct sis_softc *);
@@ -159,6 +165,20 @@ static void sis_add_sysctls(struct sis_softc *);
static void sis_watchdog(struct sis_softc *);
static void sis_wol(struct sis_softc *);
+/*
+ * MII bit-bang glue
+ */
+static const struct mii_bitbang_ops sis_mii_bitbang_ops = {
+ sis_mii_bitbang_read,
+ sis_mii_bitbang_write,
+ {
+ SIS_MII_DATA, /* MII_BIT_MDO */
+ SIS_MII_DATA, /* MII_BIT_MDI */
+ SIS_MII_CLK, /* MII_BIT_MDC */
+ SIS_MII_DIR, /* MII_BIT_DIR_HOST_PHY */
+ 0, /* MII_BIT_DIR_PHY_HOST */
+ }
+};
static struct resource_spec sis_res_spec[] = {
#ifdef SIS_USEIOSPACE
@@ -412,179 +432,41 @@ sis_read_mac(struct sis_softc *sc, device_t dev, caddr_t dest)
#endif
/*
- * Sync the PHYs by setting data bit and strobing the clock 32 times.
+ * Read the MII serial port for the MII bit-bang module.
*/
-static void
-sis_mii_sync(struct sis_softc *sc)
+static uint32_t
+sis_mii_bitbang_read(device_t dev)
{
- int i;
+ struct sis_softc *sc;
+ uint32_t val;
- SIO_SET(SIS_MII_DIR|SIS_MII_DATA);
+ sc = device_get_softc(dev);
- for (i = 0; i < 32; i++) {
- SIO_SET(SIS_MII_CLK);
- DELAY(1);
- SIO_CLR(SIS_MII_CLK);
- DELAY(1);
- }
+ val = CSR_READ_4(sc, SIS_EECTL);
+ CSR_BARRIER(sc, SIS_EECTL, 4,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
+ return (val);
}
/*
- * Clock a series of bits through the MII.
+ * Write the MII serial port for the MII bit-bang module.
*/
static void
-sis_mii_send(struct sis_softc *sc, uint32_t bits, int cnt)
-{
- int i;
-
- SIO_CLR(SIS_MII_CLK);
-
- for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
- if (bits & i) {
- SIO_SET(SIS_MII_DATA);
- } else {
- SIO_CLR(SIS_MII_DATA);
- }
- DELAY(1);
- SIO_CLR(SIS_MII_CLK);
- DELAY(1);
- SIO_SET(SIS_MII_CLK);
- }
-}
-
-/*
- * Read an PHY register through the MII.
- */
-static int
-sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame)
+sis_mii_bitbang_write(device_t dev, uint32_t val)
{
- int i, ack;
-
- /*
- * Set up frame for RX.
- */
- frame->mii_stdelim = SIS_MII_STARTDELIM;
- frame->mii_opcode = SIS_MII_READOP;
- frame->mii_turnaround = 0;
- frame->mii_data = 0;
-
- /*
- * Turn on data xmit.
- */
- SIO_SET(SIS_MII_DIR);
-
- sis_mii_sync(sc);
-
- /*
- * Send command/address info.
- */
- sis_mii_send(sc, frame->mii_stdelim, 2);
- sis_mii_send(sc, frame->mii_opcode, 2);
- sis_mii_send(sc, frame->mii_phyaddr, 5);
- sis_mii_send(sc, frame->mii_regaddr, 5);
-
- /* Idle bit */
- SIO_CLR((SIS_MII_CLK|SIS_MII_DATA));
- DELAY(1);
- SIO_SET(SIS_MII_CLK);
- DELAY(1);
-
- /* Turn off xmit. */
- SIO_CLR(SIS_MII_DIR);
-
- /* Check for ack */
- SIO_CLR(SIS_MII_CLK);
- DELAY(1);
- ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA;
- SIO_SET(SIS_MII_CLK);
- DELAY(1);
-
- /*
- * Now try reading data bits. If the ack failed, we still
- * need to clock through 16 cycles to keep the PHY(s) in sync.
- */
- if (ack) {
- for (i = 0; i < 16; i++) {
- SIO_CLR(SIS_MII_CLK);
- DELAY(1);
- SIO_SET(SIS_MII_CLK);
- DELAY(1);
- }
- goto fail;
- }
-
- for (i = 0x8000; i; i >>= 1) {
- SIO_CLR(SIS_MII_CLK);
- DELAY(1);
- if (!ack) {
- if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA)
- frame->mii_data |= i;
- DELAY(1);
- }
- SIO_SET(SIS_MII_CLK);
- DELAY(1);
- }
-
-fail:
-
- SIO_CLR(SIS_MII_CLK);
- DELAY(1);
- SIO_SET(SIS_MII_CLK);
- DELAY(1);
-
- if (ack)
- return (1);
- return (0);
-}
-
-/*
- * Write to a PHY register through the MII.
- */
-static int
-sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame)
-{
-
- /*
- * Set up frame for TX.
- */
-
- frame->mii_stdelim = SIS_MII_STARTDELIM;
- frame->mii_opcode = SIS_MII_WRITEOP;
- frame->mii_turnaround = SIS_MII_TURNAROUND;
-
- /*
- * Turn on data output.
- */
- SIO_SET(SIS_MII_DIR);
-
- sis_mii_sync(sc);
-
- sis_mii_send(sc, frame->mii_stdelim, 2);
- sis_mii_send(sc, frame->mii_opcode, 2);
- sis_mii_send(sc, frame->mii_phyaddr, 5);
- sis_mii_send(sc, frame->mii_regaddr, 5);
- sis_mii_send(sc, frame->mii_turnaround, 2);
- sis_mii_send(sc, frame->mii_data, 16);
-
- /* Idle bit. */
- SIO_SET(SIS_MII_CLK);
- DELAY(1);
- SIO_CLR(SIS_MII_CLK);
- DELAY(1);
+ struct sis_softc *sc;
- /*
- * Turn off xmit.
- */
- SIO_CLR(SIS_MII_DIR);
+ sc = device_get_softc(dev);
- return (0);
+ CSR_WRITE_4(sc, SIS_EECTL, val);
+ CSR_BARRIER(sc, SIS_EECTL, 4,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
static int
sis_miibus_readreg(device_t dev, int phy, int reg)
{
struct sis_softc *sc;
- struct sis_mii_frame frame;
sc = device_get_softc(dev);
@@ -628,7 +510,8 @@ sis_miibus_readreg(device_t dev, int phy, int reg)
}
if (i == SIS_TIMEOUT) {
- device_printf(sc->sis_dev, "PHY failed to come ready\n");
+ device_printf(sc->sis_dev,
+ "PHY failed to come ready\n");
return (0);
}
@@ -638,22 +521,15 @@ sis_miibus_readreg(device_t dev, int phy, int reg)
return (0);
return (val);
- } else {
- bzero((char *)&frame, sizeof(frame));
-
- frame.mii_phyaddr = phy;
- frame.mii_regaddr = reg;
- sis_mii_readreg(sc, &frame);
-
- return (frame.mii_data);
- }
+ } else
+ return (mii_bitbang_readreg(dev, &sis_mii_bitbang_ops, phy,
+ reg));
}
static int
sis_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct sis_softc *sc;
- struct sis_mii_frame frame;
sc = device_get_softc(dev);
@@ -686,15 +562,11 @@ sis_miibus_writereg(device_t dev, int phy, int reg, int data)
}
if (i == SIS_TIMEOUT)
- device_printf(sc->sis_dev, "PHY failed to come ready\n");
- } else {
- bzero((char *)&frame, sizeof(frame));
-
- frame.mii_phyaddr = phy;
- frame.mii_regaddr = reg;
- frame.mii_data = data;
- sis_mii_writereg(sc, &frame);
- }
+ device_printf(sc->sis_dev,
+ "PHY failed to come ready\n");
+ } else
+ mii_bitbang_writereg(dev, &sis_mii_bitbang_ops, phy, reg,
+ data);
return (0);
}
@@ -989,7 +861,7 @@ sis_reset(struct sis_softc *sc)
static int
sis_probe(device_t dev)
{
- struct sis_type *t;
+ const struct sis_type *t;
t = sis_devs;
diff --git a/sys/dev/sis/if_sisreg.h b/sys/dev/sis/if_sisreg.h
index c86a13d..b400806 100644
--- a/sys/dev/sis/if_sisreg.h
+++ b/sys/dev/sis/if_sisreg.h
@@ -433,26 +433,9 @@ struct sis_desc {
struct sis_type {
uint16_t sis_vid;
uint16_t sis_did;
- char *sis_name;
+ const char *sis_name;
};
-struct sis_mii_frame {
- uint8_t mii_stdelim;
- uint8_t mii_opcode;
- uint8_t mii_phyaddr;
- uint8_t mii_regaddr;
- uint8_t mii_turnaround;
- uint16_t mii_data;
-};
-
-/*
- * MII constants
- */
-#define SIS_MII_STARTDELIM 0x01
-#define SIS_MII_READOP 0x02
-#define SIS_MII_WRITEOP 0x01
-#define SIS_MII_TURNAROUND 0x02
-
#define SIS_TYPE_900 1
#define SIS_TYPE_7016 2
#define SIS_TYPE_83815 3
diff --git a/sys/dev/smc/if_smc.c b/sys/dev/smc/if_smc.c
index 4aa0396..9404f56 100644
--- a/sys/dev/smc/if_smc.c
+++ b/sys/dev/smc/if_smc.c
@@ -74,6 +74,7 @@ __FBSDID("$FreeBSD$");
#include <dev/smc/if_smcvar.h>
#include <dev/mii/mii.h>
+#include <dev/mii/mii_bitbang.h>
#include <dev/mii/miivar.h>
#define SMC_LOCK(sc) mtx_lock(&(sc)->smc_mtx)
@@ -123,11 +124,33 @@ static timeout_t smc_watchdog;
static poll_handler_t smc_poll;
#endif
+/*
+ * MII bit-bang glue
+ */
+static uint32_t smc_mii_bitbang_read(device_t);
+static void smc_mii_bitbang_write(device_t, uint32_t);
+
+static const struct mii_bitbang_ops smc_mii_bitbang_ops = {
+ smc_mii_bitbang_read,
+ smc_mii_bitbang_write,
+ {
+ MGMT_MDO, /* MII_BIT_MDO */
+ MGMT_MDI, /* MII_BIT_MDI */
+ MGMT_MCLK, /* MII_BIT_MDC */
+ MGMT_MDOE, /* MII_BIT_DIR_HOST_PHY */
+ 0, /* MII_BIT_DIR_PHY_HOST */
+ }
+};
+
static __inline void
smc_select_bank(struct smc_softc *sc, uint16_t bank)
{
+ bus_barrier(sc->smc_reg, BSR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
bus_write_2(sc->smc_reg, BSR, bank & BSR_BANK_MASK);
+ bus_barrier(sc->smc_reg, BSR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
/* Never call this when not in bank 2. */
@@ -143,35 +166,35 @@ smc_mmu_wait(struct smc_softc *sc)
}
static __inline uint8_t
-smc_read_1(struct smc_softc *sc, bus_addr_t offset)
+smc_read_1(struct smc_softc *sc, bus_size_t offset)
{
return (bus_read_1(sc->smc_reg, offset));
}
static __inline void
-smc_write_1(struct smc_softc *sc, bus_addr_t offset, uint8_t val)
+smc_write_1(struct smc_softc *sc, bus_size_t offset, uint8_t val)
{
bus_write_1(sc->smc_reg, offset, val);
}
static __inline uint16_t
-smc_read_2(struct smc_softc *sc, bus_addr_t offset)
+smc_read_2(struct smc_softc *sc, bus_size_t offset)
{
return (bus_read_2(sc->smc_reg, offset));
}
static __inline void
-smc_write_2(struct smc_softc *sc, bus_addr_t offset, uint16_t val)
+smc_write_2(struct smc_softc *sc, bus_size_t offset, uint16_t val)
{
bus_write_2(sc->smc_reg, offset, val);
}
static __inline void
-smc_read_multi_2(struct smc_softc *sc, bus_addr_t offset, uint16_t *datap,
+smc_read_multi_2(struct smc_softc *sc, bus_size_t offset, uint16_t *datap,
bus_size_t count)
{
@@ -179,13 +202,21 @@ smc_read_multi_2(struct smc_softc *sc, bus_addr_t offset, uint16_t *datap,
}
static __inline void
-smc_write_multi_2(struct smc_softc *sc, bus_addr_t offset, uint16_t *datap,
+smc_write_multi_2(struct smc_softc *sc, bus_size_t offset, uint16_t *datap,
bus_size_t count)
{
bus_write_multi_2(sc->smc_reg, offset, datap, count);
}
+static __inline void
+smc_barrier(struct smc_softc *sc, bus_size_t offset, bus_size_t length,
+ int flags)
+{
+
+ bus_barrier(sc->smc_reg, offset, length, flags);
+}
+
int
smc_probe(device_t dev)
{
@@ -900,70 +931,43 @@ smc_task_intr(void *context, int pending)
SMC_UNLOCK(sc);
}
-static u_int
-smc_mii_readbits(struct smc_softc *sc, int nbits)
+static uint32_t
+smc_mii_bitbang_read(device_t dev)
{
- u_int mgmt, mask, val;
+ struct smc_softc *sc;
+ uint32_t val;
+
+ sc = device_get_softc(dev);
SMC_ASSERT_LOCKED(sc);
KASSERT((smc_read_2(sc, BSR) & BSR_BANK_MASK) == 3,
- ("%s: smc_mii_readbits called with bank %d (!= 3)",
+ ("%s: smc_mii_bitbang_read called with bank %d (!= 3)",
device_get_nameunit(sc->smc_dev),
smc_read_2(sc, BSR) & BSR_BANK_MASK));
- /*
- * Set up the MGMT (aka MII) register.
- */
- mgmt = smc_read_2(sc, MGMT) & ~(MGMT_MCLK | MGMT_MDOE | MGMT_MDO);
- smc_write_2(sc, MGMT, mgmt);
-
- /*
- * Read the bits in.
- */
- for (mask = 1 << (nbits - 1), val = 0; mask; mask >>= 1) {
- if (smc_read_2(sc, MGMT) & MGMT_MDI)
- val |= mask;
-
- smc_write_2(sc, MGMT, mgmt);
- DELAY(1);
- smc_write_2(sc, MGMT, mgmt | MGMT_MCLK);
- DELAY(1);
- }
+ val = smc_read_2(sc, MGMT);
+ smc_barrier(sc, MGMT, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
return (val);
}
static void
-smc_mii_writebits(struct smc_softc *sc, u_int val, int nbits)
+smc_mii_bitbang_write(device_t dev, uint32_t val)
{
- u_int mgmt, mask;
+ struct smc_softc *sc;
+
+ sc = device_get_softc(dev);
SMC_ASSERT_LOCKED(sc);
KASSERT((smc_read_2(sc, BSR) & BSR_BANK_MASK) == 3,
- ("%s: smc_mii_writebits called with bank %d (!= 3)",
+ ("%s: smc_mii_bitbang_write called with bank %d (!= 3)",
device_get_nameunit(sc->smc_dev),
smc_read_2(sc, BSR) & BSR_BANK_MASK));
- /*
- * Set up the MGMT (aka MII) register).
- */
- mgmt = smc_read_2(sc, MGMT) & ~(MGMT_MCLK | MGMT_MDOE | MGMT_MDO);
- mgmt |= MGMT_MDOE;
-
- /*
- * Push the bits out.
- */
- for (mask = 1 << (nbits - 1); mask; mask >>= 1) {
- if (val & mask)
- mgmt |= MGMT_MDO;
- else
- mgmt &= ~MGMT_MDO;
-
- smc_write_2(sc, MGMT, mgmt);
- DELAY(1);
- smc_write_2(sc, MGMT, mgmt | MGMT_MCLK);
- DELAY(1);
- }
+ smc_write_2(sc, MGMT, val);
+ smc_barrier(sc, MGMT, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
int
@@ -978,26 +982,7 @@ smc_miibus_readreg(device_t dev, int phy, int reg)
smc_select_bank(sc, 3);
- /*
- * Send out the idle pattern.
- */
- smc_mii_writebits(sc, 0xffffffff, 32);
-
- /*
- * Start code + read opcode + phy address + phy register
- */
- smc_mii_writebits(sc, 6 << 10 | phy << 5 | reg, 14);
-
- /*
- * Turnaround + data
- */
- val = smc_mii_readbits(sc, 18);
-
- /*
- * Reset the MDIO interface.
- */
- smc_write_2(sc, MGMT,
- smc_read_2(sc, MGMT) & ~(MGMT_MCLK | MGMT_MDOE | MGMT_MDO));
+ val = mii_bitbang_readreg(dev, &smc_mii_bitbang_ops, phy, reg);
SMC_UNLOCK(sc);
return (val);
@@ -1014,23 +999,7 @@ smc_miibus_writereg(device_t dev, int phy, int reg, int data)
smc_select_bank(sc, 3);
- /*
- * Send idle pattern.
- */
- smc_mii_writebits(sc, 0xffffffff, 32);
-
- /*
- * Start code + write opcode + phy address + phy register + turnaround
- * + data.
- */
- smc_mii_writebits(sc, 5 << 28 | phy << 23 | reg << 18 | 2 << 16 | data,
- 32);
-
- /*
- * Reset MDIO interface.
- */
- smc_write_2(sc, MGMT,
- smc_read_2(sc, MGMT) & ~(MGMT_MCLK | MGMT_MDOE | MGMT_MDO));
+ mii_bitbang_writereg(dev, &smc_mii_bitbang_ops, phy, reg, data);
SMC_UNLOCK(sc);
return (0);
diff --git a/sys/dev/sound/midi/midi.c b/sys/dev/sound/midi/midi.c
index 97492a7..964e1bd 100644
--- a/sys/dev/sound/midi/midi.c
+++ b/sys/dev/sound/midi/midi.c
@@ -239,7 +239,7 @@ static int midi_unload(void);
* Misc declr.
*/
SYSCTL_NODE(_hw, OID_AUTO, midi, CTLFLAG_RD, 0, "Midi driver");
-SYSCTL_NODE(_hw_midi, OID_AUTO, stat, CTLFLAG_RD, 0, "Status device");
+static SYSCTL_NODE(_hw_midi, OID_AUTO, stat, CTLFLAG_RD, 0, "Status device");
int midi_debug;
/* XXX: should this be moved into debug.midi? */
diff --git a/sys/dev/sound/pci/envy24.c b/sys/dev/sound/pci/envy24.c
index 57a8ed6..1c59765 100644
--- a/sys/dev/sound/pci/envy24.c
+++ b/sys/dev/sound/pci/envy24.c
@@ -41,7 +41,7 @@
SND_DECLARE_FILE("$FreeBSD$");
-MALLOC_DEFINE(M_ENVY24, "envy24", "envy24 audio");
+static MALLOC_DEFINE(M_ENVY24, "envy24", "envy24 audio");
/* -------------------------------------------------------------------- */
diff --git a/sys/dev/sound/pci/envy24ht.c b/sys/dev/sound/pci/envy24ht.c
index 0358625..25d2e3f 100644
--- a/sys/dev/sound/pci/envy24ht.c
+++ b/sys/dev/sound/pci/envy24ht.c
@@ -53,7 +53,7 @@
SND_DECLARE_FILE("$FreeBSD$");
-MALLOC_DEFINE(M_ENVY24HT, "envy24ht", "envy24ht audio");
+static MALLOC_DEFINE(M_ENVY24HT, "envy24ht", "envy24ht audio");
/* -------------------------------------------------------------------- */
diff --git a/sys/dev/sound/pci/maestro.c b/sys/dev/sound/pci/maestro.c
index 7c977ad..66567fc 100644
--- a/sys/dev/sound/pci/maestro.c
+++ b/sys/dev/sound/pci/maestro.c
@@ -191,7 +191,7 @@ static unsigned int powerstate_init = PCI_POWERSTATE_D2;
/* XXX: this should move to a device specific sysctl dev.pcm.X.debug.Y via
device_get_sysctl_*() as discussed on multimedia@ in msg-id
<861wujij2q.fsf@xps.des.no> */
-SYSCTL_NODE(_debug, OID_AUTO, maestro, CTLFLAG_RD, 0, "");
+static SYSCTL_NODE(_debug, OID_AUTO, maestro, CTLFLAG_RD, 0, "");
SYSCTL_UINT(_debug_maestro, OID_AUTO, powerstate_active, CTLFLAG_RW,
&powerstate_active, 0, "The Dx power state when active (0-1)");
SYSCTL_UINT(_debug_maestro, OID_AUTO, powerstate_idle, CTLFLAG_RW,
diff --git a/sys/dev/sound/pci/spicds.c b/sys/dev/sound/pci/spicds.c
index 3a4002c..3e1df4c 100644
--- a/sys/dev/sound/pci/spicds.c
+++ b/sys/dev/sound/pci/spicds.c
@@ -35,7 +35,7 @@
#include <dev/sound/pci/spicds.h>
-MALLOC_DEFINE(M_SPICDS, "spicds", "SPI codec");
+static MALLOC_DEFINE(M_SPICDS, "spicds", "SPI codec");
#define SPICDS_NAMELEN 16
struct spicds_info {
diff --git a/sys/dev/sound/pcm/ac97.c b/sys/dev/sound/pcm/ac97.c
index ad164d6..bc36948 100644
--- a/sys/dev/sound/pcm/ac97.c
+++ b/sys/dev/sound/pcm/ac97.c
@@ -38,7 +38,7 @@
SND_DECLARE_FILE("$FreeBSD$");
-MALLOC_DEFINE(M_AC97, "ac97", "ac97 codec");
+static MALLOC_DEFINE(M_AC97, "ac97", "ac97 codec");
struct ac97mixtable_entry {
int reg; /* register index */
diff --git a/sys/dev/sound/pcm/feeder.c b/sys/dev/sound/pcm/feeder.c
index 916b2d9..720f676 100644
--- a/sys/dev/sound/pcm/feeder.c
+++ b/sys/dev/sound/pcm/feeder.c
@@ -35,7 +35,7 @@
SND_DECLARE_FILE("$FreeBSD$");
-MALLOC_DEFINE(M_FEEDER, "feeder", "pcm feeder");
+static MALLOC_DEFINE(M_FEEDER, "feeder", "pcm feeder");
#define MAXFEEDERS 256
#undef FEEDER_DEBUG
diff --git a/sys/dev/sound/pcm/mixer.c b/sys/dev/sound/pcm/mixer.c
index 4c60c50..58b9326 100644
--- a/sys/dev/sound/pcm/mixer.c
+++ b/sys/dev/sound/pcm/mixer.c
@@ -37,7 +37,7 @@
SND_DECLARE_FILE("$FreeBSD$");
-MALLOC_DEFINE(M_MIXER, "mixer", "mixer");
+static MALLOC_DEFINE(M_MIXER, "mixer", "mixer");
static int mixer_bypass = 1;
TUNABLE_INT("hw.snd.vpc_mixer_bypass", &mixer_bypass);
diff --git a/sys/dev/sound/usb/uaudio.c b/sys/dev/sound/usb/uaudio.c
index 955b4c5..0445572 100644
--- a/sys/dev/sound/usb/uaudio.c
+++ b/sys/dev/sound/usb/uaudio.c
@@ -93,7 +93,7 @@ static int uaudio_default_channels = 0; /* use default */
#ifdef USB_DEBUG
static int uaudio_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, uaudio, CTLFLAG_RW, 0, "USB uaudio");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, uaudio, CTLFLAG_RW, 0, "USB uaudio");
SYSCTL_INT(_hw_usb_uaudio, OID_AUTO, debug, CTLFLAG_RW,
&uaudio_debug, 0, "uaudio debug level");
diff --git a/sys/dev/ste/if_ste.c b/sys/dev/ste/if_ste.c
index 846761c..a6fc00b 100644
--- a/sys/dev/ste/if_ste.c
+++ b/sys/dev/ste/if_ste.c
@@ -64,6 +64,7 @@ __FBSDID("$FreeBSD$");
#include <machine/resource.h>
#include <dev/mii/mii.h>
+#include <dev/mii/mii_bitbang.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
@@ -84,7 +85,7 @@ MODULE_DEPEND(ste, miibus, 1, 1, 1);
/*
* Various supported device vendors/types and their names.
*/
-static struct ste_type ste_devs[] = {
+static const struct ste_type const ste_devs[] = {
{ ST_VENDORID, ST_DEVICEID_ST201_1, "Sundance ST201 10/100BaseTX" },
{ ST_VENDORID, ST_DEVICEID_ST201_2, "Sundance ST201 10/100BaseTX" },
{ DL_VENDORID, DL_DEVICEID_DL10050, "D-Link DL10050 10/100BaseTX" },
@@ -112,10 +113,8 @@ static int ste_init_rx_list(struct ste_softc *);
static void ste_init_tx_list(struct ste_softc *);
static void ste_intr(void *);
static int ste_ioctl(struct ifnet *, u_long, caddr_t);
-static int ste_mii_readreg(struct ste_softc *, struct ste_mii_frame *);
-static void ste_mii_send(struct ste_softc *, uint32_t, int);
-static void ste_mii_sync(struct ste_softc *);
-static int ste_mii_writereg(struct ste_softc *, struct ste_mii_frame *);
+static uint32_t ste_mii_bitbang_read(device_t);
+static void ste_mii_bitbang_write(device_t, uint32_t);
static int ste_miibus_readreg(device_t, int, int);
static void ste_miibus_statchg(device_t);
static int ste_miibus_writereg(device_t, int, int, int);
@@ -138,6 +137,21 @@ static void ste_txeof(struct ste_softc *);
static void ste_wait(struct ste_softc *);
static void ste_watchdog(struct ste_softc *);
+/*
+ * MII bit-bang glue
+ */
+static const struct mii_bitbang_ops ste_mii_bitbang_ops = {
+ ste_mii_bitbang_read,
+ ste_mii_bitbang_write,
+ {
+ STE_PHYCTL_MDATA, /* MII_BIT_MDO */
+ STE_PHYCTL_MDATA, /* MII_BIT_MDI */
+ STE_PHYCTL_MCLK, /* MII_BIT_MDC */
+ STE_PHYCTL_MDIR, /* MII_BIT_DIR_HOST_PHY */
+ 0, /* MII_BIT_DIR_PHY_HOST */
+ }
+};
+
static device_method_t ste_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, ste_probe),
@@ -188,210 +202,51 @@ DRIVER_MODULE(miibus, ste, miibus_driver, miibus_devclass, 0, 0);
#define STE_CLRBIT1(sc, reg, x) \
CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~(x))
-
-#define MII_SET(x) STE_SETBIT1(sc, STE_PHYCTL, x)
-#define MII_CLR(x) STE_CLRBIT1(sc, STE_PHYCTL, x)
-
-/*
- * Sync the PHYs by setting data bit and strobing the clock 32 times.
- */
-static void
-ste_mii_sync(struct ste_softc *sc)
-{
- int i;
-
- MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA);
-
- for (i = 0; i < 32; i++) {
- MII_SET(STE_PHYCTL_MCLK);
- DELAY(1);
- MII_CLR(STE_PHYCTL_MCLK);
- DELAY(1);
- }
-}
-
-/*
- * Clock a series of bits through the MII.
- */
-static void
-ste_mii_send(struct ste_softc *sc, uint32_t bits, int cnt)
-{
- int i;
-
- MII_CLR(STE_PHYCTL_MCLK);
-
- for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
- if (bits & i) {
- MII_SET(STE_PHYCTL_MDATA);
- } else {
- MII_CLR(STE_PHYCTL_MDATA);
- }
- DELAY(1);
- MII_CLR(STE_PHYCTL_MCLK);
- DELAY(1);
- MII_SET(STE_PHYCTL_MCLK);
- }
-}
-
/*
- * Read an PHY register through the MII.
+ * Read the MII serial port for the MII bit-bang module.
*/
-static int
-ste_mii_readreg(struct ste_softc *sc, struct ste_mii_frame *frame)
+static uint32_t
+ste_mii_bitbang_read(device_t dev)
{
- int i, ack;
-
- /*
- * Set up frame for RX.
- */
- frame->mii_stdelim = STE_MII_STARTDELIM;
- frame->mii_opcode = STE_MII_READOP;
- frame->mii_turnaround = 0;
- frame->mii_data = 0;
-
- CSR_WRITE_2(sc, STE_PHYCTL, 0);
- /*
- * Turn on data xmit.
- */
- MII_SET(STE_PHYCTL_MDIR);
-
- ste_mii_sync(sc);
-
- /*
- * Send command/address info.
- */
- ste_mii_send(sc, frame->mii_stdelim, 2);
- ste_mii_send(sc, frame->mii_opcode, 2);
- ste_mii_send(sc, frame->mii_phyaddr, 5);
- ste_mii_send(sc, frame->mii_regaddr, 5);
-
- /* Turn off xmit. */
- MII_CLR(STE_PHYCTL_MDIR);
-
- /* Idle bit */
- MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA));
- DELAY(1);
- MII_SET(STE_PHYCTL_MCLK);
- DELAY(1);
-
- /* Check for ack */
- MII_CLR(STE_PHYCTL_MCLK);
- DELAY(1);
- ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA;
- MII_SET(STE_PHYCTL_MCLK);
- DELAY(1);
-
- /*
- * Now try reading data bits. If the ack failed, we still
- * need to clock through 16 cycles to keep the PHY(s) in sync.
- */
- if (ack) {
- for (i = 0; i < 16; i++) {
- MII_CLR(STE_PHYCTL_MCLK);
- DELAY(1);
- MII_SET(STE_PHYCTL_MCLK);
- DELAY(1);
- }
- goto fail;
- }
-
- for (i = 0x8000; i; i >>= 1) {
- MII_CLR(STE_PHYCTL_MCLK);
- DELAY(1);
- if (!ack) {
- if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA)
- frame->mii_data |= i;
- DELAY(1);
- }
- MII_SET(STE_PHYCTL_MCLK);
- DELAY(1);
- }
+ struct ste_softc *sc;
+ uint32_t val;
-fail:
+ sc = device_get_softc(dev);
- MII_CLR(STE_PHYCTL_MCLK);
- DELAY(1);
- MII_SET(STE_PHYCTL_MCLK);
- DELAY(1);
+ val = CSR_READ_1(sc, STE_PHYCTL);
+ CSR_BARRIER(sc, STE_PHYCTL, 1,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
- if (ack)
- return (1);
- return (0);
+ return (val);
}
/*
- * Write to a PHY register through the MII.
+ * Write the MII serial port for the MII bit-bang module.
*/
-static int
-ste_mii_writereg(struct ste_softc *sc, struct ste_mii_frame *frame)
+static void
+ste_mii_bitbang_write(device_t dev, uint32_t val)
{
+ struct ste_softc *sc;
- /*
- * Set up frame for TX.
- */
-
- frame->mii_stdelim = STE_MII_STARTDELIM;
- frame->mii_opcode = STE_MII_WRITEOP;
- frame->mii_turnaround = STE_MII_TURNAROUND;
-
- /*
- * Turn on data output.
- */
- MII_SET(STE_PHYCTL_MDIR);
-
- ste_mii_sync(sc);
-
- ste_mii_send(sc, frame->mii_stdelim, 2);
- ste_mii_send(sc, frame->mii_opcode, 2);
- ste_mii_send(sc, frame->mii_phyaddr, 5);
- ste_mii_send(sc, frame->mii_regaddr, 5);
- ste_mii_send(sc, frame->mii_turnaround, 2);
- ste_mii_send(sc, frame->mii_data, 16);
-
- /* Idle bit. */
- MII_SET(STE_PHYCTL_MCLK);
- DELAY(1);
- MII_CLR(STE_PHYCTL_MCLK);
- DELAY(1);
-
- /*
- * Turn off xmit.
- */
- MII_CLR(STE_PHYCTL_MDIR);
+ sc = device_get_softc(dev);
- return (0);
+ CSR_WRITE_1(sc, STE_PHYCTL, val);
+ CSR_BARRIER(sc, STE_PHYCTL, 1,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
static int
ste_miibus_readreg(device_t dev, int phy, int reg)
{
- struct ste_softc *sc;
- struct ste_mii_frame frame;
-
- sc = device_get_softc(dev);
- bzero((char *)&frame, sizeof(frame));
- frame.mii_phyaddr = phy;
- frame.mii_regaddr = reg;
- ste_mii_readreg(sc, &frame);
-
- return (frame.mii_data);
+ return (mii_bitbang_readreg(dev, &ste_mii_bitbang_ops, phy, reg));
}
static int
ste_miibus_writereg(device_t dev, int phy, int reg, int data)
{
- struct ste_softc *sc;
- struct ste_mii_frame frame;
-
- sc = device_get_softc(dev);
- bzero((char *)&frame, sizeof(frame));
-
- frame.mii_phyaddr = phy;
- frame.mii_regaddr = reg;
- frame.mii_data = data;
- ste_mii_writereg(sc, &frame);
+ mii_bitbang_writereg(dev, &ste_mii_bitbang_ops, phy, reg, data);
return (0);
}
@@ -1027,7 +882,7 @@ ste_stats_update(struct ste_softc *sc)
static int
ste_probe(device_t dev)
{
- struct ste_type *t;
+ const struct ste_type *t;
t = ste_devs;
diff --git a/sys/dev/ste/if_stereg.h b/sys/dev/ste/if_stereg.h
index 840e0bf..e3aa51b 100644
--- a/sys/dev/ste/if_stereg.h
+++ b/sys/dev/ste/if_stereg.h
@@ -492,6 +492,9 @@ struct ste_desc_onefrag {
#define CSR_READ_1(sc, reg) \
bus_read_1((sc)->ste_res, reg)
+#define CSR_BARRIER(sc, reg, length, flags) \
+ bus_barrier((sc)->ste_res, reg, length, flags)
+
#define STE_DESC_ALIGN 8
#define STE_RX_LIST_CNT 128
#define STE_TX_LIST_CNT 128
@@ -519,7 +522,7 @@ struct ste_desc_onefrag {
struct ste_type {
uint16_t ste_vid;
uint16_t ste_did;
- char *ste_name;
+ const char *ste_name;
};
struct ste_list_data {
@@ -590,20 +593,3 @@ struct ste_softc {
#define STE_LOCK(_sc) mtx_lock(&(_sc)->ste_mtx)
#define STE_UNLOCK(_sc) mtx_unlock(&(_sc)->ste_mtx)
#define STE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->ste_mtx, MA_OWNED)
-
-struct ste_mii_frame {
- uint8_t mii_stdelim;
- uint8_t mii_opcode;
- uint8_t mii_phyaddr;
- uint8_t mii_regaddr;
- uint8_t mii_turnaround;
- uint16_t mii_data;
-};
-
-/*
- * MII constants
- */
-#define STE_MII_STARTDELIM 0x01
-#define STE_MII_READOP 0x02
-#define STE_MII_WRITEOP 0x01
-#define STE_MII_TURNAROUND 0x02
diff --git a/sys/dev/stge/if_stge.c b/sys/dev/stge/if_stge.c
index 5058a4d..b18da27 100644
--- a/sys/dev/stge/if_stge.c
+++ b/sys/dev/stge/if_stge.c
@@ -67,6 +67,7 @@ __FBSDID("$FreeBSD$");
#include <sys/rman.h>
#include <dev/mii/mii.h>
+#include <dev/mii/mii_bitbang.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
@@ -86,11 +87,11 @@ MODULE_DEPEND(stge, miibus, 1, 1, 1);
/*
* Devices supported by this driver.
*/
-static struct stge_product {
+static const struct stge_product {
uint16_t stge_vendorid;
uint16_t stge_deviceid;
const char *stge_name;
-} stge_products[] = {
+} const stge_products[] = {
{ VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST1023,
"Sundance ST-1023 Gigabit Ethernet" },
@@ -160,10 +161,6 @@ static int stge_newbuf(struct stge_softc *, int);
static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *);
#endif
-static void stge_mii_sync(struct stge_softc *);
-static void stge_mii_send(struct stge_softc *, uint32_t, int);
-static int stge_mii_readreg(struct stge_softc *, struct stge_mii_frame *);
-static int stge_mii_writereg(struct stge_softc *, struct stge_mii_frame *);
static int stge_miibus_readreg(device_t, int, int);
static int stge_miibus_writereg(device_t, int, int, int);
static void stge_miibus_statchg(device_t);
@@ -185,6 +182,24 @@ static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
static int sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS);
static int sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS);
+/*
+ * MII bit-bang glue
+ */
+static uint32_t stge_mii_bitbang_read(device_t);
+static void stge_mii_bitbang_write(device_t, uint32_t);
+
+static const struct mii_bitbang_ops stge_mii_bitbang_ops = {
+ stge_mii_bitbang_read,
+ stge_mii_bitbang_write,
+ {
+ PC_MgmtData, /* MII_BIT_MDO */
+ PC_MgmtData, /* MII_BIT_MDI */
+ PC_MgmtClk, /* MII_BIT_MDC */
+ PC_MgmtDir, /* MII_BIT_DIR_HOST_PHY */
+ 0, /* MII_BIT_DIR_PHY_HOST */
+ }
+};
+
static device_method_t stge_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, stge_probe),
@@ -225,176 +240,40 @@ static struct resource_spec stge_res_spec_mem[] = {
{ -1, 0, 0 }
};
-#define MII_SET(x) \
- CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) | (x))
-#define MII_CLR(x) \
- CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) & ~(x))
-
/*
- * Sync the PHYs by setting data bit and strobing the clock 32 times.
+ * stge_mii_bitbang_read: [mii bit-bang interface function]
+ *
+ * Read the MII serial port for the MII bit-bang module.
*/
-static void
-stge_mii_sync(struct stge_softc *sc)
+static uint32_t
+stge_mii_bitbang_read(device_t dev)
{
- int i;
+ struct stge_softc *sc;
+ uint32_t val;
- MII_SET(PC_MgmtDir | PC_MgmtData);
+ sc = device_get_softc(dev);
- for (i = 0; i < 32; i++) {
- MII_SET(PC_MgmtClk);
- DELAY(1);
- MII_CLR(PC_MgmtClk);
- DELAY(1);
- }
+ val = CSR_READ_1(sc, STGE_PhyCtrl);
+ CSR_BARRIER(sc, STGE_PhyCtrl, 1,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
+ return (val);
}
/*
- * Clock a series of bits through the MII.
+ * stge_mii_bitbang_write: [mii big-bang interface function]
+ *
+ * Write the MII serial port for the MII bit-bang module.
*/
static void
-stge_mii_send(struct stge_softc *sc, uint32_t bits, int cnt)
-{
- int i;
-
- MII_CLR(PC_MgmtClk);
-
- for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
- if (bits & i)
- MII_SET(PC_MgmtData);
- else
- MII_CLR(PC_MgmtData);
- DELAY(1);
- MII_CLR(PC_MgmtClk);
- DELAY(1);
- MII_SET(PC_MgmtClk);
- }
-}
-
-/*
- * Read an PHY register through the MII.
- */
-static int
-stge_mii_readreg(struct stge_softc *sc, struct stge_mii_frame *frame)
+stge_mii_bitbang_write(device_t dev, uint32_t val)
{
- int i, ack;
-
- /*
- * Set up frame for RX.
- */
- frame->mii_stdelim = STGE_MII_STARTDELIM;
- frame->mii_opcode = STGE_MII_READOP;
- frame->mii_turnaround = 0;
- frame->mii_data = 0;
-
- CSR_WRITE_1(sc, STGE_PhyCtrl, 0 | sc->sc_PhyCtrl);
- /*
- * Turn on data xmit.
- */
- MII_SET(PC_MgmtDir);
-
- stge_mii_sync(sc);
-
- /*
- * Send command/address info.
- */
- stge_mii_send(sc, frame->mii_stdelim, 2);
- stge_mii_send(sc, frame->mii_opcode, 2);
- stge_mii_send(sc, frame->mii_phyaddr, 5);
- stge_mii_send(sc, frame->mii_regaddr, 5);
-
- /* Turn off xmit. */
- MII_CLR(PC_MgmtDir);
-
- /* Idle bit */
- MII_CLR((PC_MgmtClk | PC_MgmtData));
- DELAY(1);
- MII_SET(PC_MgmtClk);
- DELAY(1);
-
- /* Check for ack */
- MII_CLR(PC_MgmtClk);
- DELAY(1);
- ack = CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData;
- MII_SET(PC_MgmtClk);
- DELAY(1);
-
- /*
- * Now try reading data bits. If the ack failed, we still
- * need to clock through 16 cycles to keep the PHY(s) in sync.
- */
- if (ack) {
- for(i = 0; i < 16; i++) {
- MII_CLR(PC_MgmtClk);
- DELAY(1);
- MII_SET(PC_MgmtClk);
- DELAY(1);
- }
- goto fail;
- }
-
- for (i = 0x8000; i; i >>= 1) {
- MII_CLR(PC_MgmtClk);
- DELAY(1);
- if (!ack) {
- if (CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData)
- frame->mii_data |= i;
- DELAY(1);
- }
- MII_SET(PC_MgmtClk);
- DELAY(1);
- }
-
-fail:
- MII_CLR(PC_MgmtClk);
- DELAY(1);
- MII_SET(PC_MgmtClk);
- DELAY(1);
-
- if (ack)
- return(1);
- return(0);
-}
-
-/*
- * Write to a PHY register through the MII.
- */
-static int
-stge_mii_writereg(struct stge_softc *sc, struct stge_mii_frame *frame)
-{
-
- /*
- * Set up frame for TX.
- */
- frame->mii_stdelim = STGE_MII_STARTDELIM;
- frame->mii_opcode = STGE_MII_WRITEOP;
- frame->mii_turnaround = STGE_MII_TURNAROUND;
-
- /*
- * Turn on data output.
- */
- MII_SET(PC_MgmtDir);
-
- stge_mii_sync(sc);
-
- stge_mii_send(sc, frame->mii_stdelim, 2);
- stge_mii_send(sc, frame->mii_opcode, 2);
- stge_mii_send(sc, frame->mii_phyaddr, 5);
- stge_mii_send(sc, frame->mii_regaddr, 5);
- stge_mii_send(sc, frame->mii_turnaround, 2);
- stge_mii_send(sc, frame->mii_data, 16);
-
- /* Idle bit. */
- MII_SET(PC_MgmtClk);
- DELAY(1);
- MII_CLR(PC_MgmtClk);
- DELAY(1);
+ struct stge_softc *sc;
- /*
- * Turn off xmit.
- */
- MII_CLR(PC_MgmtDir);
+ sc = device_get_softc(dev);
- return(0);
+ CSR_WRITE_1(sc, STGE_PhyCtrl, val);
+ CSR_BARRIER(sc, STGE_PhyCtrl, 1,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
/*
@@ -406,8 +285,7 @@ static int
stge_miibus_readreg(device_t dev, int phy, int reg)
{
struct stge_softc *sc;
- struct stge_mii_frame frame;
- int error;
+ int error, val;
sc = device_get_softc(dev);
@@ -418,21 +296,11 @@ stge_miibus_readreg(device_t dev, int phy, int reg)
STGE_MII_UNLOCK(sc);
return (error);
}
- bzero(&frame, sizeof(frame));
- frame.mii_phyaddr = phy;
- frame.mii_regaddr = reg;
STGE_MII_LOCK(sc);
- error = stge_mii_readreg(sc, &frame);
+ val = mii_bitbang_readreg(dev, &stge_mii_bitbang_ops, phy, reg);
STGE_MII_UNLOCK(sc);
-
- if (error != 0) {
- /* Don't show errors for PHY probe request */
- if (reg != 1)
- device_printf(sc->sc_dev, "phy read fail\n");
- return (0);
- }
- return (frame.mii_data);
+ return (val);
}
/*
@@ -444,22 +312,12 @@ static int
stge_miibus_writereg(device_t dev, int phy, int reg, int val)
{
struct stge_softc *sc;
- struct stge_mii_frame frame;
- int error;
sc = device_get_softc(dev);
- bzero(&frame, sizeof(frame));
- frame.mii_phyaddr = phy;
- frame.mii_regaddr = reg;
- frame.mii_data = val;
-
STGE_MII_LOCK(sc);
- error = stge_mii_writereg(sc, &frame);
+ mii_bitbang_writereg(dev, &stge_mii_bitbang_ops, phy, reg, val);
STGE_MII_UNLOCK(sc);
-
- if (error != 0)
- device_printf(sc->sc_dev, "phy write fail\n");
return (0);
}
@@ -550,7 +408,7 @@ stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
static int
stge_probe(device_t dev)
{
- struct stge_product *sp;
+ const struct stge_product *sp;
int i;
uint16_t vendor, devid;
diff --git a/sys/dev/stge/if_stgereg.h b/sys/dev/stge/if_stgereg.h
index 6ecbdf9..f14142a 100644
--- a/sys/dev/stge/if_stgereg.h
+++ b/sys/dev/stge/if_stgereg.h
@@ -99,6 +99,9 @@
#define CSR_READ_1(_sc, reg) \
bus_read_1((_sc)->sc_res[0], (reg))
+#define CSR_BARRIER(_sc, reg, length, flags) \
+ bus_barrier((_sc)->sc_res[0], reg, length, flags)
+
/*
* TC9021 buffer fragment descriptor.
*/
@@ -677,23 +680,6 @@ do { \
#define STGE_TIMEOUT 1000
-struct stge_mii_frame {
- uint8_t mii_stdelim;
- uint8_t mii_opcode;
- uint8_t mii_phyaddr;
- uint8_t mii_regaddr;
- uint8_t mii_turnaround;
- uint16_t mii_data;
-};
-
-/*
- * MII constants
- */
-#define STGE_MII_STARTDELIM 0x01
-#define STGE_MII_READOP 0x02
-#define STGE_MII_WRITEOP 0x01
-#define STGE_MII_TURNAROUND 0x02
-
#define STGE_RESET_NONE 0x00
#define STGE_RESET_TX 0x01
#define STGE_RESET_RX 0x02
diff --git a/sys/dev/syscons/scterm-teken.c b/sys/dev/syscons/scterm-teken.c
index 725f9f5..6762388 100644
--- a/sys/dev/syscons/scterm-teken.c
+++ b/sys/dev/syscons/scterm-teken.c
@@ -424,10 +424,18 @@ static const struct unicp437 cp437table[] = {
{ 0x00b6, 0x14, 0x00 }, { 0x00b7, 0xfa, 0x00 },
{ 0x00ba, 0xa7, 0x00 }, { 0x00bb, 0xaf, 0x00 },
{ 0x00bc, 0xac, 0x00 }, { 0x00bd, 0xab, 0x00 },
- { 0x00bf, 0xa8, 0x00 }, { 0x00c4, 0x8e, 0x01 },
- { 0x00c6, 0x92, 0x00 }, { 0x00c7, 0x80, 0x00 },
- { 0x00c9, 0x90, 0x00 }, { 0x00d1, 0xa5, 0x00 },
- { 0x00d6, 0x99, 0x00 }, { 0x00dc, 0x9a, 0x00 },
+ { 0x00bf, 0xa8, 0x00 }, { 0x00c0, 0x41, 0x00 },
+ { 0x00c1, 0x41, 0x00 }, { 0x00c2, 0x41, 0x00 },
+ { 0x00c4, 0x8e, 0x01 }, { 0x00c6, 0x92, 0x00 },
+ { 0x00c7, 0x80, 0x00 }, { 0x00c8, 0x45, 0x00 },
+ { 0x00c9, 0x90, 0x00 }, { 0x00ca, 0x45, 0x00 },
+ { 0x00cb, 0x45, 0x00 }, { 0x00cc, 0x49, 0x00 },
+ { 0x00cd, 0x49, 0x00 }, { 0x00ce, 0x49, 0x00 },
+ { 0x00cf, 0x49, 0x00 }, { 0x00d1, 0xa5, 0x00 },
+ { 0x00d2, 0x4f, 0x00 }, { 0x00d3, 0x4f, 0x00 },
+ { 0x00d4, 0x4f, 0x00 }, { 0x00d6, 0x99, 0x00 },
+ { 0x00d9, 0x55, 0x00 }, { 0x00da, 0x55, 0x00 },
+ { 0x00db, 0x55, 0x00 }, { 0x00dc, 0x9a, 0x00 },
{ 0x00df, 0xe1, 0x00 }, { 0x00e0, 0x85, 0x00 },
{ 0x00e1, 0xa0, 0x00 }, { 0x00e2, 0x83, 0x00 },
{ 0x00e4, 0x84, 0x00 }, { 0x00e5, 0x86, 0x00 },
@@ -442,6 +450,7 @@ static const struct unicp437 cp437table[] = {
{ 0x00f8, 0xed, 0x00 }, { 0x00f9, 0x97, 0x00 },
{ 0x00fa, 0xa3, 0x00 }, { 0x00fb, 0x96, 0x00 },
{ 0x00fc, 0x81, 0x00 }, { 0x00ff, 0x98, 0x00 },
+ { 0x013f, 0x4c, 0x00 }, { 0x0140, 0x6c, 0x00 },
{ 0x0192, 0x9f, 0x00 }, { 0x0393, 0xe2, 0x00 },
{ 0x0398, 0xe9, 0x00 }, { 0x03a3, 0xe4, 0x00 },
{ 0x03a6, 0xe8, 0x00 }, { 0x03a9, 0xea, 0x00 },
@@ -490,7 +499,8 @@ static const struct unicp437 cp437table[] = {
{ 0x2584, 0xdc, 0x00 }, { 0x2588, 0xdb, 0x00 },
{ 0x258c, 0xdd, 0x00 }, { 0x2590, 0xde, 0x00 },
{ 0x2591, 0xb0, 0x02 }, { 0x25a0, 0xfe, 0x00 },
- { 0x25ac, 0x16, 0x00 }, { 0x25b2, 0x1e, 0x00 },
+ { 0x25ac, 0x16, 0x00 },
+ { 0x25ae, 0xdb, 0x00 }, { 0x25b2, 0x1e, 0x00 },
{ 0x25ba, 0x10, 0x00 }, { 0x25bc, 0x1f, 0x00 },
{ 0x25c4, 0x11, 0x00 }, { 0x25cb, 0x09, 0x00 },
{ 0x25d8, 0x08, 0x00 }, { 0x25d9, 0x0a, 0x00 },
diff --git a/sys/dev/syscons/syscons.c b/sys/dev/syscons/syscons.c
index 227350e..3928d91 100644
--- a/sys/dev/syscons/syscons.c
+++ b/sys/dev/syscons/syscons.c
@@ -136,8 +136,8 @@ static int sc_no_suspend_vtswitch = 0;
#endif
static int sc_susp_scr;
-SYSCTL_NODE(_hw, OID_AUTO, syscons, CTLFLAG_RD, 0, "syscons");
-SYSCTL_NODE(_hw_syscons, OID_AUTO, saver, CTLFLAG_RD, 0, "saver");
+static SYSCTL_NODE(_hw, OID_AUTO, syscons, CTLFLAG_RD, 0, "syscons");
+static SYSCTL_NODE(_hw_syscons, OID_AUTO, saver, CTLFLAG_RD, 0, "saver");
SYSCTL_INT(_hw_syscons_saver, OID_AUTO, keybonly, CTLFLAG_RW,
&sc_saver_keyb_only, 0, "screen saver interrupted by input only");
SYSCTL_INT(_hw_syscons, OID_AUTO, bell, CTLFLAG_RW, &enable_bell,
diff --git a/sys/dev/tdfx/tdfx_pci.c b/sys/dev/tdfx/tdfx_pci.c
index 27308a9..0992e5f 100644
--- a/sys/dev/tdfx/tdfx_pci.c
+++ b/sys/dev/tdfx/tdfx_pci.c
@@ -89,7 +89,7 @@ static device_method_t tdfx_methods[] = {
{ 0, 0 }
};
-MALLOC_DEFINE(M_TDFX,"tdfx_driver","3DFX Graphics[/2D]/3D Accelerator(s)");
+static MALLOC_DEFINE(M_TDFX,"tdfx_driver","3DFX Graphics[/2D]/3D Accelerators");
/* Char. Dev. file operations structure */
static struct cdevsw tdfx_cdev = {
diff --git a/sys/dev/ti/if_ti.c b/sys/dev/ti/if_ti.c
index 8468565..d57be53 100644
--- a/sys/dev/ti/if_ti.c
+++ b/sys/dev/ti/if_ti.c
@@ -112,8 +112,7 @@ __FBSDID("$FreeBSD$");
#include <sys/bus.h>
#include <sys/rman.h>
-/* #define TI_PRIVATE_JUMBOS */
-#ifndef TI_PRIVATE_JUMBOS
+#ifdef TI_SF_BUF_JUMBO
#include <vm/vm.h>
#include <vm/vm_page.h>
#endif
@@ -126,21 +125,22 @@ __FBSDID("$FreeBSD$");
#include <dev/ti/ti_fw.h>
#include <dev/ti/ti_fw2.h>
+#include <sys/sysctl.h>
+
#define TI_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_IP_FRAGS)
/*
* We can only turn on header splitting if we're using extended receive
* BDs.
*/
-#if defined(TI_JUMBO_HDRSPLIT) && defined(TI_PRIVATE_JUMBOS)
-#error "options TI_JUMBO_HDRSPLIT and TI_PRIVATE_JUMBOS are mutually exclusive"
-#endif /* TI_JUMBO_HDRSPLIT && TI_JUMBO_HDRSPLIT */
+#if defined(TI_JUMBO_HDRSPLIT) && !defined(TI_SF_BUF_JUMBO)
+#error "options TI_JUMBO_HDRSPLIT requires TI_SF_BUF_JUMBO"
+#endif /* TI_JUMBO_HDRSPLIT && !TI_SF_BUF_JUMBO */
typedef enum {
TI_SWAP_HTON,
TI_SWAP_NTOH
} ti_swap_type;
-
/*
* Various supported device vendors/types and their names.
*/
@@ -197,36 +197,38 @@ static void ti_stop(struct ti_softc *);
static void ti_watchdog(void *);
static int ti_shutdown(device_t);
static int ti_ifmedia_upd(struct ifnet *);
+static int ti_ifmedia_upd_locked(struct ti_softc *);
static void ti_ifmedia_sts(struct ifnet *, struct ifmediareq *);
-static u_int32_t ti_eeprom_putbyte(struct ti_softc *, int);
-static u_int8_t ti_eeprom_getbyte(struct ti_softc *, int, u_int8_t *);
+static uint32_t ti_eeprom_putbyte(struct ti_softc *, int);
+static uint8_t ti_eeprom_getbyte(struct ti_softc *, int, uint8_t *);
static int ti_read_eeprom(struct ti_softc *, caddr_t, int, int);
static void ti_add_mcast(struct ti_softc *, struct ether_addr *);
static void ti_del_mcast(struct ti_softc *, struct ether_addr *);
static void ti_setmulti(struct ti_softc *);
-static void ti_mem_read(struct ti_softc *, u_int32_t, u_int32_t, void *);
-static void ti_mem_write(struct ti_softc *, u_int32_t, u_int32_t, void *);
-static void ti_mem_zero(struct ti_softc *, u_int32_t, u_int32_t);
-static int ti_copy_mem(struct ti_softc *, u_int32_t, u_int32_t, caddr_t, int, int);
-static int ti_copy_scratch(struct ti_softc *, u_int32_t, u_int32_t, caddr_t,
- int, int, int);
+static void ti_mem_read(struct ti_softc *, uint32_t, uint32_t, void *);
+static void ti_mem_write(struct ti_softc *, uint32_t, uint32_t, void *);
+static void ti_mem_zero(struct ti_softc *, uint32_t, uint32_t);
+static int ti_copy_mem(struct ti_softc *, uint32_t, uint32_t, caddr_t, int,
+ int);
+static int ti_copy_scratch(struct ti_softc *, uint32_t, uint32_t, caddr_t,
+ int, int, int);
static int ti_bcopy_swap(const void *, void *, size_t, ti_swap_type);
static void ti_loadfw(struct ti_softc *);
static void ti_cmd(struct ti_softc *, struct ti_cmd_desc *);
static void ti_cmd_ext(struct ti_softc *, struct ti_cmd_desc *, caddr_t, int);
static void ti_handle_events(struct ti_softc *);
-static int ti_alloc_dmamaps(struct ti_softc *);
-static void ti_free_dmamaps(struct ti_softc *);
-static int ti_alloc_jumbo_mem(struct ti_softc *);
-#ifdef TI_PRIVATE_JUMBOS
-static void *ti_jalloc(struct ti_softc *);
-static void ti_jfree(void *, void *);
-#endif /* TI_PRIVATE_JUMBOS */
-static int ti_newbuf_std(struct ti_softc *, int, struct mbuf *);
-static int ti_newbuf_mini(struct ti_softc *, int, struct mbuf *);
+static void ti_dma_map_addr(void *, bus_dma_segment_t *, int, int);
+static int ti_dma_alloc(struct ti_softc *);
+static void ti_dma_free(struct ti_softc *);
+static int ti_dma_ring_alloc(struct ti_softc *, bus_size_t, bus_size_t,
+ bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
+static void ti_dma_ring_free(struct ti_softc *, bus_dma_tag_t *, uint8_t **,
+ bus_dmamap_t *);
+static int ti_newbuf_std(struct ti_softc *, int);
+static int ti_newbuf_mini(struct ti_softc *, int);
static int ti_newbuf_jumbo(struct ti_softc *, int, struct mbuf *);
static int ti_init_rx_ring_std(struct ti_softc *);
static void ti_free_rx_ring_std(struct ti_softc *);
@@ -236,16 +238,23 @@ static int ti_init_rx_ring_mini(struct ti_softc *);
static void ti_free_rx_ring_mini(struct ti_softc *);
static void ti_free_tx_ring(struct ti_softc *);
static int ti_init_tx_ring(struct ti_softc *);
+static void ti_discard_std(struct ti_softc *, int);
+#ifndef TI_SF_BUF_JUMBO
+static void ti_discard_jumbo(struct ti_softc *, int);
+#endif
+static void ti_discard_mini(struct ti_softc *, int);
static int ti_64bitslot_war(struct ti_softc *);
static int ti_chipinit(struct ti_softc *);
static int ti_gibinit(struct ti_softc *);
#ifdef TI_JUMBO_HDRSPLIT
-static __inline void ti_hdr_split (struct mbuf *top, int hdr_len,
- int pkt_len, int idx);
+static __inline void ti_hdr_split(struct mbuf *top, int hdr_len, int pkt_len,
+ int idx);
#endif /* TI_JUMBO_HDRSPLIT */
+static void ti_sysctl_node(struct ti_softc *);
+
static device_method_t ti_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, ti_probe),
@@ -270,11 +279,10 @@ MODULE_DEPEND(ti, ether, 1, 1, 1);
/*
* Send an instruction or address to the EEPROM, check for ACK.
*/
-static u_int32_t ti_eeprom_putbyte(sc, byte)
- struct ti_softc *sc;
- int byte;
+static uint32_t
+ti_eeprom_putbyte(struct ti_softc *sc, int byte)
{
- int i, ack = 0;
+ int i, ack = 0;
/*
* Make sure we're in TX mode.
@@ -316,13 +324,11 @@ static u_int32_t ti_eeprom_putbyte(sc, byte)
* We have to send two address bytes since the EEPROM can hold
* more than 256 bytes of data.
*/
-static u_int8_t ti_eeprom_getbyte(sc, addr, dest)
- struct ti_softc *sc;
- int addr;
- u_int8_t *dest;
+static uint8_t
+ti_eeprom_getbyte(struct ti_softc *sc, int addr, uint8_t *dest)
{
- int i;
- u_int8_t byte = 0;
+ int i;
+ uint8_t byte = 0;
EEPROM_START;
@@ -393,14 +399,10 @@ static u_int8_t ti_eeprom_getbyte(sc, addr, dest)
* Read a sequence of bytes from the EEPROM.
*/
static int
-ti_read_eeprom(sc, dest, off, cnt)
- struct ti_softc *sc;
- caddr_t dest;
- int off;
- int cnt;
+ti_read_eeprom(struct ti_softc *sc, caddr_t dest, int off, int cnt)
{
- int err = 0, i;
- u_int8_t byte = 0;
+ int err = 0, i;
+ uint8_t byte = 0;
for (i = 0; i < cnt; i++) {
err = ti_eeprom_getbyte(sc, off + i, &byte);
@@ -417,13 +419,10 @@ ti_read_eeprom(sc, dest, off, cnt)
* Can be used to copy data from NIC local memory.
*/
static void
-ti_mem_read(sc, addr, len, buf)
- struct ti_softc *sc;
- u_int32_t addr, len;
- void *buf;
+ti_mem_read(struct ti_softc *sc, uint32_t addr, uint32_t len, void *buf)
{
- int segptr, segsize, cnt;
- char *ptr;
+ int segptr, segsize, cnt;
+ char *ptr;
segptr = addr;
cnt = len;
@@ -436,7 +435,7 @@ ti_mem_read(sc, addr, len, buf)
segsize = TI_WINLEN - (segptr % TI_WINLEN);
CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
- TI_WINDOW + (segptr & (TI_WINLEN - 1)), (u_int32_t *)ptr,
+ TI_WINDOW + (segptr & (TI_WINLEN - 1)), (uint32_t *)ptr,
segsize / 4);
ptr += segsize;
segptr += segsize;
@@ -450,13 +449,10 @@ ti_mem_read(sc, addr, len, buf)
* Can be used to copy data into NIC local memory.
*/
static void
-ti_mem_write(sc, addr, len, buf)
- struct ti_softc *sc;
- u_int32_t addr, len;
- void *buf;
+ti_mem_write(struct ti_softc *sc, uint32_t addr, uint32_t len, void *buf)
{
- int segptr, segsize, cnt;
- char *ptr;
+ int segptr, segsize, cnt;
+ char *ptr;
segptr = addr;
cnt = len;
@@ -469,7 +465,7 @@ ti_mem_write(sc, addr, len, buf)
segsize = TI_WINLEN - (segptr % TI_WINLEN);
CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
- TI_WINDOW + (segptr & (TI_WINLEN - 1)), (u_int32_t *)ptr,
+ TI_WINDOW + (segptr & (TI_WINLEN - 1)), (uint32_t *)ptr,
segsize / 4);
ptr += segsize;
segptr += segsize;
@@ -482,11 +478,9 @@ ti_mem_write(sc, addr, len, buf)
* Can be used to clear a section of NIC local memory.
*/
static void
-ti_mem_zero(sc, addr, len)
- struct ti_softc *sc;
- u_int32_t addr, len;
+ti_mem_zero(struct ti_softc *sc, uint32_t addr, uint32_t len)
{
- int segptr, segsize, cnt;
+ int segptr, segsize, cnt;
segptr = addr;
cnt = len;
@@ -505,18 +499,14 @@ ti_mem_zero(sc, addr, len)
}
static int
-ti_copy_mem(sc, tigon_addr, len, buf, useraddr, readdata)
- struct ti_softc *sc;
- u_int32_t tigon_addr, len;
- caddr_t buf;
- int useraddr, readdata;
-{
- int segptr, segsize, cnt;
- caddr_t ptr;
- u_int32_t origwin;
- u_int8_t tmparray[TI_WINLEN], tmparray2[TI_WINLEN];
- int resid, segresid;
- int first_pass;
+ti_copy_mem(struct ti_softc *sc, uint32_t tigon_addr, uint32_t len,
+ caddr_t buf, int useraddr, int readdata)
+{
+ int segptr, segsize, cnt;
+ caddr_t ptr;
+ uint32_t origwin;
+ int resid, segresid;
+ int first_pass;
TI_LOCK_ASSERT(sc);
@@ -524,8 +514,7 @@ ti_copy_mem(sc, tigon_addr, len, buf, useraddr, readdata)
* At the moment, we don't handle non-aligned cases, we just bail.
* If this proves to be a problem, it will be fixed.
*/
- if ((readdata == 0)
- && (tigon_addr & 0x3)) {
+ if (readdata == 0 && (tigon_addr & 0x3) != 0) {
device_printf(sc->ti_dev, "%s: tigon address %#x isn't "
"word-aligned\n", __func__, tigon_addr);
device_printf(sc->ti_dev, "%s: unaligned writes aren't "
@@ -574,58 +563,54 @@ ti_copy_mem(sc, tigon_addr, len, buf, useraddr, readdata)
ti_offset = TI_WINDOW + (segptr & (TI_WINLEN -1));
if (readdata) {
-
- bus_space_read_region_4(sc->ti_btag,
- sc->ti_bhandle, ti_offset,
- (u_int32_t *)tmparray,
- segsize >> 2);
+ bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
+ ti_offset, (uint32_t *)sc->ti_membuf, segsize >> 2);
if (useraddr) {
/*
* Yeah, this is a little on the kludgy
* side, but at least this code is only
* used for debugging.
*/
- ti_bcopy_swap(tmparray, tmparray2, segsize,
- TI_SWAP_NTOH);
+ ti_bcopy_swap(sc->ti_membuf, sc->ti_membuf2,
+ segsize, TI_SWAP_NTOH);
TI_UNLOCK(sc);
if (first_pass) {
- copyout(&tmparray2[segresid], ptr,
- segsize - segresid);
+ copyout(&sc->ti_membuf2[segresid], ptr,
+ segsize - segresid);
first_pass = 0;
} else
- copyout(tmparray2, ptr, segsize);
+ copyout(sc->ti_membuf2, ptr, segsize);
TI_LOCK(sc);
} else {
if (first_pass) {
- ti_bcopy_swap(tmparray, tmparray2,
- segsize, TI_SWAP_NTOH);
+ ti_bcopy_swap(sc->ti_membuf,
+ sc->ti_membuf2, segsize,
+ TI_SWAP_NTOH);
TI_UNLOCK(sc);
- bcopy(&tmparray2[segresid], ptr,
- segsize - segresid);
+ bcopy(&sc->ti_membuf2[segresid], ptr,
+ segsize - segresid);
TI_LOCK(sc);
first_pass = 0;
} else
- ti_bcopy_swap(tmparray, ptr, segsize,
- TI_SWAP_NTOH);
+ ti_bcopy_swap(sc->ti_membuf, ptr,
+ segsize, TI_SWAP_NTOH);
}
} else {
if (useraddr) {
TI_UNLOCK(sc);
- copyin(ptr, tmparray2, segsize);
+ copyin(ptr, sc->ti_membuf2, segsize);
TI_LOCK(sc);
- ti_bcopy_swap(tmparray2, tmparray, segsize,
- TI_SWAP_HTON);
+ ti_bcopy_swap(sc->ti_membuf2, sc->ti_membuf,
+ segsize, TI_SWAP_HTON);
} else
- ti_bcopy_swap(ptr, tmparray, segsize,
- TI_SWAP_HTON);
+ ti_bcopy_swap(ptr, sc->ti_membuf, segsize,
+ TI_SWAP_HTON);
- bus_space_write_region_4(sc->ti_btag,
- sc->ti_bhandle, ti_offset,
- (u_int32_t *)tmparray,
- segsize >> 2);
+ bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
+ ti_offset, (uint32_t *)sc->ti_membuf, segsize >> 2);
}
segptr += segsize;
ptr += segsize;
@@ -636,8 +621,8 @@ ti_copy_mem(sc, tigon_addr, len, buf, useraddr, readdata)
* Handle leftover, non-word-aligned bytes.
*/
if (resid != 0) {
- u_int32_t tmpval, tmpval2;
- bus_size_t ti_offset;
+ uint32_t tmpval, tmpval2;
+ bus_size_t ti_offset;
/*
* Set the segment pointer.
@@ -652,7 +637,7 @@ ti_copy_mem(sc, tigon_addr, len, buf, useraddr, readdata)
* writes, since we'll be doing read/modify/write.
*/
bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
- ti_offset, &tmpval, 1);
+ ti_offset, &tmpval, 1);
/*
* Next, translate this from little-endian to big-endian
@@ -694,7 +679,7 @@ ti_copy_mem(sc, tigon_addr, len, buf, useraddr, readdata)
tmpval = htonl(tmpval2);
bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
- ti_offset, &tmpval, 1);
+ ti_offset, &tmpval, 1);
}
}
@@ -704,17 +689,13 @@ ti_copy_mem(sc, tigon_addr, len, buf, useraddr, readdata)
}
static int
-ti_copy_scratch(sc, tigon_addr, len, buf, useraddr, readdata, cpu)
- struct ti_softc *sc;
- u_int32_t tigon_addr, len;
- caddr_t buf;
- int useraddr, readdata;
- int cpu;
-{
- u_int32_t segptr;
- int cnt;
- u_int32_t tmpval, tmpval2;
- caddr_t ptr;
+ti_copy_scratch(struct ti_softc *sc, uint32_t tigon_addr, uint32_t len,
+ caddr_t buf, int useraddr, int readdata, int cpu)
+{
+ uint32_t segptr;
+ int cnt;
+ uint32_t tmpval, tmpval2;
+ caddr_t ptr;
TI_LOCK_ASSERT(sc);
@@ -802,19 +783,14 @@ ti_copy_scratch(sc, tigon_addr, len, buf, useraddr, readdata, cpu)
}
static int
-ti_bcopy_swap(src, dst, len, swap_type)
- const void *src;
- void *dst;
- size_t len;
- ti_swap_type swap_type;
-{
- const u_int8_t *tmpsrc;
- u_int8_t *tmpdst;
+ti_bcopy_swap(const void *src, void *dst, size_t len, ti_swap_type swap_type)
+{
+ const uint8_t *tmpsrc;
+ uint8_t *tmpdst;
size_t tmplen;
if (len & 0x3) {
- printf("ti_bcopy_swap: length %zd isn't 32-bit aligned\n",
- len);
+ printf("ti_bcopy_swap: length %zd isn't 32-bit aligned\n", len);
return (-1);
}
@@ -824,12 +800,9 @@ ti_bcopy_swap(src, dst, len, swap_type)
while (tmplen) {
if (swap_type == TI_SWAP_NTOH)
- *(u_int32_t *)tmpdst =
- ntohl(*(const u_int32_t *)tmpsrc);
+ *(uint32_t *)tmpdst = ntohl(*(const uint32_t *)tmpsrc);
else
- *(u_int32_t *)tmpdst =
- htonl(*(const u_int32_t *)tmpsrc);
-
+ *(uint32_t *)tmpdst = htonl(*(const uint32_t *)tmpsrc);
tmpsrc += 4;
tmpdst += 4;
tmplen -= 4;
@@ -844,8 +817,7 @@ ti_bcopy_swap(src, dst, len, swap_type)
* Tigon 2.
*/
static void
-ti_loadfw(sc)
- struct ti_softc *sc;
+ti_loadfw(struct ti_softc *sc)
{
TI_LOCK_ASSERT(sc);
@@ -902,14 +874,12 @@ ti_loadfw(sc)
* Send the NIC a command via the command ring.
*/
static void
-ti_cmd(sc, cmd)
- struct ti_softc *sc;
- struct ti_cmd_desc *cmd;
+ti_cmd(struct ti_softc *sc, struct ti_cmd_desc *cmd)
{
- int index;
+ int index;
index = sc->ti_cmd_saved_prodidx;
- CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd));
+ CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(cmd));
TI_INC(index, TI_CMD_RING_CNT);
CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index);
sc->ti_cmd_saved_prodidx = index;
@@ -920,21 +890,17 @@ ti_cmd(sc, cmd)
* number of command slots to include after the initial command.
*/
static void
-ti_cmd_ext(sc, cmd, arg, len)
- struct ti_softc *sc;
- struct ti_cmd_desc *cmd;
- caddr_t arg;
- int len;
+ti_cmd_ext(struct ti_softc *sc, struct ti_cmd_desc *cmd, caddr_t arg, int len)
{
- int index;
- int i;
+ int index;
+ int i;
index = sc->ti_cmd_saved_prodidx;
- CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd));
+ CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(cmd));
TI_INC(index, TI_CMD_RING_CNT);
for (i = 0; i < len; i++) {
CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4),
- *(u_int32_t *)(&arg[i * 4]));
+ *(uint32_t *)(&arg[i * 4]));
TI_INC(index, TI_CMD_RING_CNT);
}
CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index);
@@ -945,25 +911,40 @@ ti_cmd_ext(sc, cmd, arg, len)
* Handle events that have triggered interrupts.
*/
static void
-ti_handle_events(sc)
- struct ti_softc *sc;
+ti_handle_events(struct ti_softc *sc)
{
- struct ti_event_desc *e;
+ struct ti_event_desc *e;
- if (sc->ti_rdata->ti_event_ring == NULL)
+ if (sc->ti_rdata.ti_event_ring == NULL)
return;
+ bus_dmamap_sync(sc->ti_cdata.ti_event_ring_tag,
+ sc->ti_cdata.ti_event_ring_map, BUS_DMASYNC_POSTREAD);
while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) {
- e = &sc->ti_rdata->ti_event_ring[sc->ti_ev_saved_considx];
+ e = &sc->ti_rdata.ti_event_ring[sc->ti_ev_saved_considx];
switch (TI_EVENT_EVENT(e)) {
case TI_EV_LINKSTAT_CHANGED:
sc->ti_linkstat = TI_EVENT_CODE(e);
- if (sc->ti_linkstat == TI_EV_CODE_LINK_UP)
- device_printf(sc->ti_dev, "10/100 link up\n");
- else if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP)
- device_printf(sc->ti_dev, "gigabit link up\n");
- else if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN)
- device_printf(sc->ti_dev, "link down\n");
+ if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) {
+ if_link_state_change(sc->ti_ifp, LINK_STATE_UP);
+ sc->ti_ifp->if_baudrate = IF_Mbps(100);
+ if (bootverbose)
+ device_printf(sc->ti_dev,
+ "10/100 link up\n");
+ } else if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) {
+ if_link_state_change(sc->ti_ifp, LINK_STATE_UP);
+ sc->ti_ifp->if_baudrate = IF_Gbps(1UL);
+ if (bootverbose)
+ device_printf(sc->ti_dev,
+ "gigabit link up\n");
+ } else if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) {
+ if_link_state_change(sc->ti_ifp,
+ LINK_STATE_DOWN);
+ sc->ti_ifp->if_baudrate = 0;
+ if (bootverbose)
+ device_printf(sc->ti_dev,
+ "link down\n");
+ }
break;
case TI_EV_ERROR:
if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_INVAL_CMD)
@@ -992,298 +973,453 @@ ti_handle_events(sc)
TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT);
CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx);
}
+ bus_dmamap_sync(sc->ti_cdata.ti_event_ring_tag,
+ sc->ti_cdata.ti_event_ring_map, BUS_DMASYNC_PREREAD);
+}
+
+struct ti_dmamap_arg {
+ bus_addr_t ti_busaddr;
+};
+
+static void
+ti_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ struct ti_dmamap_arg *ctx;
+
+ if (error)
+ return;
+
+ KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
+
+ ctx = arg;
+ ctx->ti_busaddr = segs->ds_addr;
}
static int
-ti_alloc_dmamaps(struct ti_softc *sc)
+ti_dma_ring_alloc(struct ti_softc *sc, bus_size_t alignment, bus_size_t maxsize,
+ bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr,
+ const char *msg)
{
- int i;
+ struct ti_dmamap_arg ctx;
+ int error;
- for (i = 0; i < TI_TX_RING_CNT; i++) {
- sc->ti_cdata.ti_txdesc[i].tx_m = NULL;
- sc->ti_cdata.ti_txdesc[i].tx_dmamap = 0;
- if (bus_dmamap_create(sc->ti_mbuftx_dmat, 0,
- &sc->ti_cdata.ti_txdesc[i].tx_dmamap))
- return (ENOBUFS);
- }
- for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
- if (bus_dmamap_create(sc->ti_mbufrx_dmat, 0,
- &sc->ti_cdata.ti_rx_std_maps[i]))
- return (ENOBUFS);
+ error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag,
+ alignment, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
+ NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
+ if (error != 0) {
+ device_printf(sc->ti_dev,
+ "could not create %s dma tag\n", msg);
+ return (error);
}
-
- for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
- if (bus_dmamap_create(sc->ti_jumbo_dmat, 0,
- &sc->ti_cdata.ti_rx_jumbo_maps[i]))
- return (ENOBUFS);
+ /* Allocate DMA'able memory for ring. */
+ error = bus_dmamem_alloc(*tag, (void **)ring,
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
+ if (error != 0) {
+ device_printf(sc->ti_dev,
+ "could not allocate DMA'able memory for %s\n", msg);
+ return (error);
}
- for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
- if (bus_dmamap_create(sc->ti_mbufrx_dmat, 0,
- &sc->ti_cdata.ti_rx_mini_maps[i]))
- return (ENOBUFS);
+ /* Load the address of the ring. */
+ ctx.ti_busaddr = 0;
+ error = bus_dmamap_load(*tag, *map, *ring, maxsize, ti_dma_map_addr,
+ &ctx, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ device_printf(sc->ti_dev,
+ "could not load DMA'able memory for %s\n", msg);
+ return (error);
}
-
+ *paddr = ctx.ti_busaddr;
return (0);
}
static void
-ti_free_dmamaps(struct ti_softc *sc)
+ti_dma_ring_free(struct ti_softc *sc, bus_dma_tag_t *tag, uint8_t **ring,
+ bus_dmamap_t *map)
{
- int i;
- if (sc->ti_mbuftx_dmat)
- for (i = 0; i < TI_TX_RING_CNT; i++)
- if (sc->ti_cdata.ti_txdesc[i].tx_dmamap) {
- bus_dmamap_destroy(sc->ti_mbuftx_dmat,
- sc->ti_cdata.ti_txdesc[i].tx_dmamap);
- sc->ti_cdata.ti_txdesc[i].tx_dmamap = 0;
- }
+ if (*map != NULL)
+ bus_dmamap_unload(*tag, *map);
+ if (*map != NULL && *ring != NULL) {
+ bus_dmamem_free(*tag, *ring, *map);
+ *ring = NULL;
+ *map = NULL;
+ }
+ if (*tag) {
+ bus_dma_tag_destroy(*tag);
+ *tag = NULL;
+ }
+}
- if (sc->ti_mbufrx_dmat)
- for (i = 0; i < TI_STD_RX_RING_CNT; i++)
- if (sc->ti_cdata.ti_rx_std_maps[i]) {
- bus_dmamap_destroy(sc->ti_mbufrx_dmat,
- sc->ti_cdata.ti_rx_std_maps[i]);
- sc->ti_cdata.ti_rx_std_maps[i] = 0;
- }
+static int
+ti_dma_alloc(struct ti_softc *sc)
+{
+ bus_addr_t lowaddr;
+ int i, error;
+
+ lowaddr = BUS_SPACE_MAXADDR;
+ if (sc->ti_dac == 0)
+ lowaddr = BUS_SPACE_MAXADDR_32BIT;
+
+ error = bus_dma_tag_create(bus_get_dma_tag(sc->ti_dev), 1, 0, lowaddr,
+ BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0,
+ BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
+ &sc->ti_cdata.ti_parent_tag);
+ if (error != 0) {
+ device_printf(sc->ti_dev,
+ "could not allocate parent dma tag\n");
+ return (ENOMEM);
+ }
- if (sc->ti_jumbo_dmat)
- for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++)
- if (sc->ti_cdata.ti_rx_jumbo_maps[i]) {
- bus_dmamap_destroy(sc->ti_jumbo_dmat,
- sc->ti_cdata.ti_rx_jumbo_maps[i]);
- sc->ti_cdata.ti_rx_jumbo_maps[i] = 0;
- }
- if (sc->ti_mbufrx_dmat)
- for (i = 0; i < TI_MINI_RX_RING_CNT; i++)
- if (sc->ti_cdata.ti_rx_mini_maps[i]) {
- bus_dmamap_destroy(sc->ti_mbufrx_dmat,
- sc->ti_cdata.ti_rx_mini_maps[i]);
- sc->ti_cdata.ti_rx_mini_maps[i] = 0;
- }
-}
+ error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, sizeof(struct ti_gib),
+ &sc->ti_cdata.ti_gib_tag, (uint8_t **)&sc->ti_rdata.ti_info,
+ &sc->ti_cdata.ti_gib_map, &sc->ti_rdata.ti_info_paddr, "GIB");
+ if (error)
+ return (error);
-#ifdef TI_PRIVATE_JUMBOS
+ /* Producer/consumer status */
+ error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, sizeof(struct ti_status),
+ &sc->ti_cdata.ti_status_tag, (uint8_t **)&sc->ti_rdata.ti_status,
+ &sc->ti_cdata.ti_status_map, &sc->ti_rdata.ti_status_paddr,
+ "event ring");
+ if (error)
+ return (error);
-/*
- * Memory management for the jumbo receive ring is a pain in the
- * butt. We need to allocate at least 9018 bytes of space per frame,
- * _and_ it has to be contiguous (unless you use the extended
- * jumbo descriptor format). Using malloc() all the time won't
- * work: malloc() allocates memory in powers of two, which means we
- * would end up wasting a considerable amount of space by allocating
- * 9K chunks. We don't have a jumbo mbuf cluster pool. Thus, we have
- * to do our own memory management.
- *
- * The driver needs to allocate a contiguous chunk of memory at boot
- * time. We then chop this up ourselves into 9K pieces and use them
- * as external mbuf storage.
- *
- * One issue here is how much memory to allocate. The jumbo ring has
- * 256 slots in it, but at 9K per slot than can consume over 2MB of
- * RAM. This is a bit much, especially considering we also need
- * RAM for the standard ring and mini ring (on the Tigon 2). To
- * save space, we only actually allocate enough memory for 64 slots
- * by default, which works out to between 500 and 600K. This can
- * be tuned by changing a #define in if_tireg.h.
- */
+ /* Event ring */
+ error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, TI_EVENT_RING_SZ,
+ &sc->ti_cdata.ti_event_ring_tag,
+ (uint8_t **)&sc->ti_rdata.ti_event_ring,
+ &sc->ti_cdata.ti_event_ring_map, &sc->ti_rdata.ti_event_ring_paddr,
+ "event ring");
+ if (error)
+ return (error);
-static int
-ti_alloc_jumbo_mem(sc)
- struct ti_softc *sc;
-{
- caddr_t ptr;
- int i;
- struct ti_jpool_entry *entry;
+ /* Command ring lives in shared memory so no need to create DMA area. */
+ /* Standard RX ring */
+ error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, TI_STD_RX_RING_SZ,
+ &sc->ti_cdata.ti_rx_std_ring_tag,
+ (uint8_t **)&sc->ti_rdata.ti_rx_std_ring,
+ &sc->ti_cdata.ti_rx_std_ring_map,
+ &sc->ti_rdata.ti_rx_std_ring_paddr, "RX ring");
+ if (error)
+ return (error);
+
+ /* Jumbo RX ring */
+ error = ti_dma_ring_alloc(sc, TI_JUMBO_RING_ALIGN, TI_JUMBO_RX_RING_SZ,
+ &sc->ti_cdata.ti_rx_jumbo_ring_tag,
+ (uint8_t **)&sc->ti_rdata.ti_rx_jumbo_ring,
+ &sc->ti_cdata.ti_rx_jumbo_ring_map,
+ &sc->ti_rdata.ti_rx_jumbo_ring_paddr, "jumbo RX ring");
+ if (error)
+ return (error);
+
+ /* RX return ring */
+ error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, TI_RX_RETURN_RING_SZ,
+ &sc->ti_cdata.ti_rx_return_ring_tag,
+ (uint8_t **)&sc->ti_rdata.ti_rx_return_ring,
+ &sc->ti_cdata.ti_rx_return_ring_map,
+ &sc->ti_rdata.ti_rx_return_ring_paddr, "RX return ring");
+ if (error)
+ return (error);
+
+ /* Create DMA tag for standard RX mbufs. */
+ error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, 1, 0,
+ BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
+ MCLBYTES, 0, NULL, NULL, &sc->ti_cdata.ti_rx_std_tag);
+ if (error) {
+ device_printf(sc->ti_dev, "could not allocate RX dma tag\n");
+ return (error);
+ }
+
+ /* Create DMA tag for jumbo RX mbufs. */
+#ifdef TI_SF_BUF_JUMBO
/*
- * Grab a big chunk o' storage. Since we are chopping this pool up
- * into ~9k chunks, there doesn't appear to be a need to use page
- * alignment.
+ * The VM system will take care of providing aligned pages. Alignment
+ * is set to 1 here so that busdma resources won't be wasted.
*/
- if (bus_dma_tag_create(sc->ti_parent_dmat, /* parent */
- 1, 0, /* algnmnt, boundary */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- TI_JMEM, /* maxsize */
- 1, /* nsegments */
- TI_JMEM, /* maxsegsize */
- 0, /* flags */
- NULL, NULL, /* lockfunc, lockarg */
- &sc->ti_jumbo_dmat) != 0) {
- device_printf(sc->ti_dev, "Failed to allocate jumbo dmat\n");
- return (ENOBUFS);
+ error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, 1, 0,
+ BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, PAGE_SIZE * 4, 4,
+ PAGE_SIZE, 0, NULL, NULL, &sc->ti_cdata.ti_rx_jumbo_tag);
+#else
+ error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, 1, 0,
+ BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MJUM9BYTES, 1,
+ MJUM9BYTES, 0, NULL, NULL, &sc->ti_cdata.ti_rx_jumbo_tag);
+#endif
+ if (error) {
+ device_printf(sc->ti_dev,
+ "could not allocate jumbo RX dma tag\n");
+ return (error);
}
- if (bus_dmamem_alloc(sc->ti_jumbo_dmat,
- (void**)&sc->ti_cdata.ti_jumbo_buf,
- BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
- &sc->ti_jumbo_dmamap) != 0) {
- device_printf(sc->ti_dev, "Failed to allocate jumbo memory\n");
- return (ENOBUFS);
+ /* Create DMA tag for TX mbufs. */
+ error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, 1,
+ 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
+ MCLBYTES * TI_MAXTXSEGS, TI_MAXTXSEGS, MCLBYTES, 0, NULL, NULL,
+ &sc->ti_cdata.ti_tx_tag);
+ if (error) {
+ device_printf(sc->ti_dev, "could not allocate TX dma tag\n");
+ return (ENOMEM);
}
- SLIST_INIT(&sc->ti_jfree_listhead);
- SLIST_INIT(&sc->ti_jinuse_listhead);
+ /* Create DMA maps for RX buffers. */
+ for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
+ error = bus_dmamap_create(sc->ti_cdata.ti_rx_std_tag, 0,
+ &sc->ti_cdata.ti_rx_std_maps[i]);
+ if (error) {
+ device_printf(sc->ti_dev,
+ "could not create DMA map for RX\n");
+ return (error);
+ }
+ }
+ error = bus_dmamap_create(sc->ti_cdata.ti_rx_std_tag, 0,
+ &sc->ti_cdata.ti_rx_std_sparemap);
+ if (error) {
+ device_printf(sc->ti_dev,
+ "could not create spare DMA map for RX\n");
+ return (error);
+ }
- /*
- * Now divide it up into 9K pieces and save the addresses
- * in an array.
- */
- ptr = sc->ti_cdata.ti_jumbo_buf;
- for (i = 0; i < TI_JSLOTS; i++) {
- sc->ti_cdata.ti_jslots[i] = ptr;
- ptr += TI_JLEN;
- entry = malloc(sizeof(struct ti_jpool_entry),
- M_DEVBUF, M_NOWAIT);
- if (entry == NULL) {
- device_printf(sc->ti_dev, "no memory for jumbo "
- "buffer queue!\n");
- return (ENOBUFS);
+ /* Create DMA maps for jumbo RX buffers. */
+ for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
+ error = bus_dmamap_create(sc->ti_cdata.ti_rx_jumbo_tag, 0,
+ &sc->ti_cdata.ti_rx_jumbo_maps[i]);
+ if (error) {
+ device_printf(sc->ti_dev,
+ "could not create DMA map for jumbo RX\n");
+ return (error);
}
- entry->slot = i;
- SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries);
+ }
+ error = bus_dmamap_create(sc->ti_cdata.ti_rx_jumbo_tag, 0,
+ &sc->ti_cdata.ti_rx_jumbo_sparemap);
+ if (error) {
+ device_printf(sc->ti_dev,
+ "could not create spare DMA map for jumbo RX\n");
+ return (error);
}
- return (0);
-}
+ /* Create DMA maps for TX buffers. */
+ for (i = 0; i < TI_TX_RING_CNT; i++) {
+ error = bus_dmamap_create(sc->ti_cdata.ti_tx_tag, 0,
+ &sc->ti_cdata.ti_txdesc[i].tx_dmamap);
+ if (error) {
+ device_printf(sc->ti_dev,
+ "could not create DMA map for TX\n");
+ return (ENOMEM);
+ }
+ }
-/*
- * Allocate a jumbo buffer.
- */
-static void *ti_jalloc(sc)
- struct ti_softc *sc;
-{
- struct ti_jpool_entry *entry;
+ /* Mini ring and TX ring is not available on Tigon 1. */
+ if (sc->ti_hwrev == TI_HWREV_TIGON)
+ return (0);
- entry = SLIST_FIRST(&sc->ti_jfree_listhead);
+ /* TX ring */
+ error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, TI_TX_RING_SZ,
+ &sc->ti_cdata.ti_tx_ring_tag, (uint8_t **)&sc->ti_rdata.ti_tx_ring,
+ &sc->ti_cdata.ti_tx_ring_map, &sc->ti_rdata.ti_tx_ring_paddr,
+ "TX ring");
+ if (error)
+ return (error);
+
+ /* Mini RX ring */
+ error = ti_dma_ring_alloc(sc, TI_RING_ALIGN, TI_MINI_RX_RING_SZ,
+ &sc->ti_cdata.ti_rx_mini_ring_tag,
+ (uint8_t **)&sc->ti_rdata.ti_rx_mini_ring,
+ &sc->ti_cdata.ti_rx_mini_ring_map,
+ &sc->ti_rdata.ti_rx_mini_ring_paddr, "mini RX ring");
+ if (error)
+ return (error);
+
+ /* Create DMA tag for mini RX mbufs. */
+ error = bus_dma_tag_create(sc->ti_cdata.ti_parent_tag, 1, 0,
+ BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MHLEN, 1,
+ MHLEN, 0, NULL, NULL, &sc->ti_cdata.ti_rx_mini_tag);
+ if (error) {
+ device_printf(sc->ti_dev,
+ "could not allocate mini RX dma tag\n");
+ return (error);
+ }
- if (entry == NULL) {
- device_printf(sc->ti_dev, "no free jumbo buffers\n");
- return (NULL);
+ /* Create DMA maps for mini RX buffers. */
+ for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
+ error = bus_dmamap_create(sc->ti_cdata.ti_rx_mini_tag, 0,
+ &sc->ti_cdata.ti_rx_mini_maps[i]);
+ if (error) {
+ device_printf(sc->ti_dev,
+ "could not create DMA map for mini RX\n");
+ return (error);
+ }
+ }
+ error = bus_dmamap_create(sc->ti_cdata.ti_rx_mini_tag, 0,
+ &sc->ti_cdata.ti_rx_mini_sparemap);
+ if (error) {
+ device_printf(sc->ti_dev,
+ "could not create spare DMA map for mini RX\n");
+ return (error);
}
- SLIST_REMOVE_HEAD(&sc->ti_jfree_listhead, jpool_entries);
- SLIST_INSERT_HEAD(&sc->ti_jinuse_listhead, entry, jpool_entries);
- return (sc->ti_cdata.ti_jslots[entry->slot]);
+ return (0);
}
-/*
- * Release a jumbo buffer.
- */
static void
-ti_jfree(buf, args)
- void *buf;
- void *args;
+ti_dma_free(struct ti_softc *sc)
{
- struct ti_softc *sc;
- int i;
- struct ti_jpool_entry *entry;
-
- /* Extract the softc struct pointer. */
- sc = (struct ti_softc *)args;
-
- if (sc == NULL)
- panic("ti_jfree: didn't get softc pointer!");
-
- /* calculate the slot this buffer belongs to */
- i = ((vm_offset_t)buf
- - (vm_offset_t)sc->ti_cdata.ti_jumbo_buf) / TI_JLEN;
-
- if ((i < 0) || (i >= TI_JSLOTS))
- panic("ti_jfree: asked to free buffer that we don't manage!");
-
- entry = SLIST_FIRST(&sc->ti_jinuse_listhead);
- if (entry == NULL)
- panic("ti_jfree: buffer not in use!");
- entry->slot = i;
- SLIST_REMOVE_HEAD(&sc->ti_jinuse_listhead, jpool_entries);
- SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries);
-}
+ int i;
-#else
+ /* Destroy DMA maps for RX buffers. */
+ for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
+ if (sc->ti_cdata.ti_rx_std_maps[i]) {
+ bus_dmamap_destroy(sc->ti_cdata.ti_rx_std_tag,
+ sc->ti_cdata.ti_rx_std_maps[i]);
+ sc->ti_cdata.ti_rx_std_maps[i] = NULL;
+ }
+ }
+ if (sc->ti_cdata.ti_rx_std_sparemap) {
+ bus_dmamap_destroy(sc->ti_cdata.ti_rx_std_tag,
+ sc->ti_cdata.ti_rx_std_sparemap);
+ sc->ti_cdata.ti_rx_std_sparemap = NULL;
+ }
+ if (sc->ti_cdata.ti_rx_std_tag) {
+ bus_dma_tag_destroy(sc->ti_cdata.ti_rx_std_tag);
+ sc->ti_cdata.ti_rx_std_tag = NULL;
+ }
-static int
-ti_alloc_jumbo_mem(sc)
- struct ti_softc *sc;
-{
+ /* Destroy DMA maps for jumbo RX buffers. */
+ for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
+ if (sc->ti_cdata.ti_rx_jumbo_maps[i]) {
+ bus_dmamap_destroy(sc->ti_cdata.ti_rx_jumbo_tag,
+ sc->ti_cdata.ti_rx_jumbo_maps[i]);
+ sc->ti_cdata.ti_rx_jumbo_maps[i] = NULL;
+ }
+ }
+ if (sc->ti_cdata.ti_rx_jumbo_sparemap) {
+ bus_dmamap_destroy(sc->ti_cdata.ti_rx_jumbo_tag,
+ sc->ti_cdata.ti_rx_jumbo_sparemap);
+ sc->ti_cdata.ti_rx_jumbo_sparemap = NULL;
+ }
+ if (sc->ti_cdata.ti_rx_jumbo_tag) {
+ bus_dma_tag_destroy(sc->ti_cdata.ti_rx_jumbo_tag);
+ sc->ti_cdata.ti_rx_jumbo_tag = NULL;
+ }
- /*
- * The VM system will take care of providing aligned pages. Alignment
- * is set to 1 here so that busdma resources won't be wasted.
- */
- if (bus_dma_tag_create(sc->ti_parent_dmat, /* parent */
- 1, 0, /* algnmnt, boundary */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- PAGE_SIZE * 4 /*XXX*/, /* maxsize */
- 4, /* nsegments */
- PAGE_SIZE, /* maxsegsize */
- 0, /* flags */
- NULL, NULL, /* lockfunc, lockarg */
- &sc->ti_jumbo_dmat) != 0) {
- device_printf(sc->ti_dev, "Failed to allocate jumbo dmat\n");
- return (ENOBUFS);
+ /* Destroy DMA maps for mini RX buffers. */
+ for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
+ if (sc->ti_cdata.ti_rx_mini_maps[i]) {
+ bus_dmamap_destroy(sc->ti_cdata.ti_rx_mini_tag,
+ sc->ti_cdata.ti_rx_mini_maps[i]);
+ sc->ti_cdata.ti_rx_mini_maps[i] = NULL;
+ }
+ }
+ if (sc->ti_cdata.ti_rx_mini_sparemap) {
+ bus_dmamap_destroy(sc->ti_cdata.ti_rx_mini_tag,
+ sc->ti_cdata.ti_rx_mini_sparemap);
+ sc->ti_cdata.ti_rx_mini_sparemap = NULL;
+ }
+ if (sc->ti_cdata.ti_rx_mini_tag) {
+ bus_dma_tag_destroy(sc->ti_cdata.ti_rx_mini_tag);
+ sc->ti_cdata.ti_rx_mini_tag = NULL;
}
- return (0);
+ /* Destroy DMA maps for TX buffers. */
+ for (i = 0; i < TI_TX_RING_CNT; i++) {
+ if (sc->ti_cdata.ti_txdesc[i].tx_dmamap) {
+ bus_dmamap_destroy(sc->ti_cdata.ti_tx_tag,
+ sc->ti_cdata.ti_txdesc[i].tx_dmamap);
+ sc->ti_cdata.ti_txdesc[i].tx_dmamap = NULL;
+ }
+ }
+ if (sc->ti_cdata.ti_tx_tag) {
+ bus_dma_tag_destroy(sc->ti_cdata.ti_tx_tag);
+ sc->ti_cdata.ti_tx_tag = NULL;
+ }
+
+ /* Destroy standard RX ring. */
+ ti_dma_ring_free(sc, &sc->ti_cdata.ti_rx_std_ring_tag,
+ (void *)&sc->ti_rdata.ti_rx_std_ring,
+ &sc->ti_cdata.ti_rx_std_ring_map);
+ /* Destroy jumbo RX ring. */
+ ti_dma_ring_free(sc, &sc->ti_cdata.ti_rx_jumbo_ring_tag,
+ (void *)&sc->ti_rdata.ti_rx_jumbo_ring,
+ &sc->ti_cdata.ti_rx_jumbo_ring_map);
+ /* Destroy mini RX ring. */
+ ti_dma_ring_free(sc, &sc->ti_cdata.ti_rx_mini_ring_tag,
+ (void *)&sc->ti_rdata.ti_rx_mini_ring,
+ &sc->ti_cdata.ti_rx_mini_ring_map);
+ /* Destroy RX return ring. */
+ ti_dma_ring_free(sc, &sc->ti_cdata.ti_rx_return_ring_tag,
+ (void *)&sc->ti_rdata.ti_rx_return_ring,
+ &sc->ti_cdata.ti_rx_return_ring_map);
+ /* Destroy TX ring. */
+ ti_dma_ring_free(sc, &sc->ti_cdata.ti_tx_ring_tag,
+ (void *)&sc->ti_rdata.ti_tx_ring, &sc->ti_cdata.ti_tx_ring_map);
+ /* Destroy status block. */
+ ti_dma_ring_free(sc, &sc->ti_cdata.ti_status_tag,
+ (void *)&sc->ti_rdata.ti_status, &sc->ti_cdata.ti_status_map);
+ /* Destroy event ring. */
+ ti_dma_ring_free(sc, &sc->ti_cdata.ti_event_ring_tag,
+ (void *)&sc->ti_rdata.ti_event_ring,
+ &sc->ti_cdata.ti_event_ring_map);
+ /* Destroy GIB */
+ ti_dma_ring_free(sc, &sc->ti_cdata.ti_gib_tag,
+ (void *)&sc->ti_rdata.ti_info, &sc->ti_cdata.ti_gib_map);
+
+ /* Destroy the parent tag. */
+ if (sc->ti_cdata.ti_parent_tag) {
+ bus_dma_tag_destroy(sc->ti_cdata.ti_parent_tag);
+ sc->ti_cdata.ti_parent_tag = NULL;
+ }
}
-#endif /* TI_PRIVATE_JUMBOS */
-
/*
* Intialize a standard receive ring descriptor.
*/
static int
-ti_newbuf_std(sc, i, m)
- struct ti_softc *sc;
- int i;
- struct mbuf *m;
-{
- bus_dmamap_t map;
- bus_dma_segment_t segs;
- struct mbuf *m_new = NULL;
- struct ti_rx_desc *r;
- int nsegs;
-
- nsegs = 0;
- if (m == NULL) {
- MGETHDR(m_new, M_DONTWAIT, MT_DATA);
- if (m_new == NULL)
- return (ENOBUFS);
+ti_newbuf_std(struct ti_softc *sc, int i)
+{
+ bus_dmamap_t map;
+ bus_dma_segment_t segs[1];
+ struct mbuf *m;
+ struct ti_rx_desc *r;
+ int error, nsegs;
+
+ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ if (m == NULL)
+ return (ENOBUFS);
+ m->m_len = m->m_pkthdr.len = MCLBYTES;
+ m_adj(m, ETHER_ALIGN);
- MCLGET(m_new, M_DONTWAIT);
- if (!(m_new->m_flags & M_EXT)) {
- m_freem(m_new);
- return (ENOBUFS);
- }
- m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
- } else {
- m_new = m;
- m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
- m_new->m_data = m_new->m_ext.ext_buf;
+ error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_rx_std_tag,
+ sc->ti_cdata.ti_rx_std_sparemap, m, segs, &nsegs, 0);
+ if (error != 0) {
+ m_freem(m);
+ return (error);
+ }
+ KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
+
+ if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) {
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_std_tag,
+ sc->ti_cdata.ti_rx_std_maps[i], BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->ti_cdata.ti_rx_std_tag,
+ sc->ti_cdata.ti_rx_std_maps[i]);
}
- m_adj(m_new, ETHER_ALIGN);
- sc->ti_cdata.ti_rx_std_chain[i] = m_new;
- r = &sc->ti_rdata->ti_rx_std_ring[i];
map = sc->ti_cdata.ti_rx_std_maps[i];
- if (bus_dmamap_load_mbuf_sg(sc->ti_mbufrx_dmat, map, m_new, &segs,
- &nsegs, 0))
- return (ENOBUFS);
- if (nsegs != 1)
- return (ENOBUFS);
- ti_hostaddr64(&r->ti_addr, segs.ds_addr);
- r->ti_len = segs.ds_len;
+ sc->ti_cdata.ti_rx_std_maps[i] = sc->ti_cdata.ti_rx_std_sparemap;
+ sc->ti_cdata.ti_rx_std_sparemap = map;
+ sc->ti_cdata.ti_rx_std_chain[i] = m;
+
+ r = &sc->ti_rdata.ti_rx_std_ring[i];
+ ti_hostaddr64(&r->ti_addr, segs[0].ds_addr);
+ r->ti_len = segs[0].ds_len;
r->ti_type = TI_BDTYPE_RECV_BD;
r->ti_flags = 0;
- if (sc->ti_ifp->if_hwassist)
+ r->ti_vlan_tag = 0;
+ r->ti_tcp_udp_cksum = 0;
+ if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
r->ti_idx = i;
- bus_dmamap_sync(sc->ti_mbufrx_dmat, map, BUS_DMASYNC_PREREAD);
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_std_tag,
+ sc->ti_cdata.ti_rx_std_maps[i], BUS_DMASYNC_PREREAD);
return (0);
}
@@ -1292,117 +1428,112 @@ ti_newbuf_std(sc, i, m)
* the Tigon 2.
*/
static int
-ti_newbuf_mini(sc, i, m)
- struct ti_softc *sc;
- int i;
- struct mbuf *m;
-{
- bus_dma_segment_t segs;
- bus_dmamap_t map;
- struct mbuf *m_new = NULL;
- struct ti_rx_desc *r;
- int nsegs;
-
- nsegs = 0;
- if (m == NULL) {
- MGETHDR(m_new, M_DONTWAIT, MT_DATA);
- if (m_new == NULL) {
- return (ENOBUFS);
- }
- m_new->m_len = m_new->m_pkthdr.len = MHLEN;
- } else {
- m_new = m;
- m_new->m_data = m_new->m_pktdat;
- m_new->m_len = m_new->m_pkthdr.len = MHLEN;
+ti_newbuf_mini(struct ti_softc *sc, int i)
+{
+ bus_dmamap_t map;
+ bus_dma_segment_t segs[1];
+ struct mbuf *m;
+ struct ti_rx_desc *r;
+ int error, nsegs;
+
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ if (m == NULL)
+ return (ENOBUFS);
+ m->m_len = m->m_pkthdr.len = MHLEN;
+ m_adj(m, ETHER_ALIGN);
+
+ error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_rx_mini_tag,
+ sc->ti_cdata.ti_rx_mini_sparemap, m, segs, &nsegs, 0);
+ if (error != 0) {
+ m_freem(m);
+ return (error);
+ }
+ KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
+
+ if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) {
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_tag,
+ sc->ti_cdata.ti_rx_mini_maps[i], BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->ti_cdata.ti_rx_mini_tag,
+ sc->ti_cdata.ti_rx_mini_maps[i]);
}
- m_adj(m_new, ETHER_ALIGN);
- r = &sc->ti_rdata->ti_rx_mini_ring[i];
- sc->ti_cdata.ti_rx_mini_chain[i] = m_new;
map = sc->ti_cdata.ti_rx_mini_maps[i];
- if (bus_dmamap_load_mbuf_sg(sc->ti_mbufrx_dmat, map, m_new, &segs,
- &nsegs, 0))
- return (ENOBUFS);
- if (nsegs != 1)
- return (ENOBUFS);
- ti_hostaddr64(&r->ti_addr, segs.ds_addr);
- r->ti_len = segs.ds_len;
+ sc->ti_cdata.ti_rx_mini_maps[i] = sc->ti_cdata.ti_rx_mini_sparemap;
+ sc->ti_cdata.ti_rx_mini_sparemap = map;
+ sc->ti_cdata.ti_rx_mini_chain[i] = m;
+
+ r = &sc->ti_rdata.ti_rx_mini_ring[i];
+ ti_hostaddr64(&r->ti_addr, segs[0].ds_addr);
+ r->ti_len = segs[0].ds_len;
r->ti_type = TI_BDTYPE_RECV_BD;
r->ti_flags = TI_BDFLAG_MINI_RING;
- if (sc->ti_ifp->if_hwassist)
+ r->ti_vlan_tag = 0;
+ r->ti_tcp_udp_cksum = 0;
+ if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
r->ti_idx = i;
- bus_dmamap_sync(sc->ti_mbufrx_dmat, map, BUS_DMASYNC_PREREAD);
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_tag,
+ sc->ti_cdata.ti_rx_mini_maps[i], BUS_DMASYNC_PREREAD);
return (0);
}
-#ifdef TI_PRIVATE_JUMBOS
+#ifndef TI_SF_BUF_JUMBO
/*
* Initialize a jumbo receive ring descriptor. This allocates
* a jumbo buffer from the pool managed internally by the driver.
*/
static int
-ti_newbuf_jumbo(sc, i, m)
- struct ti_softc *sc;
- int i;
- struct mbuf *m;
+ti_newbuf_jumbo(struct ti_softc *sc, int i, struct mbuf *dummy)
{
- bus_dmamap_t map;
- struct mbuf *m_new = NULL;
- struct ti_rx_desc *r;
- int nsegs;
- bus_dma_segment_t segs;
+ bus_dmamap_t map;
+ bus_dma_segment_t segs[1];
+ struct mbuf *m;
+ struct ti_rx_desc *r;
+ int error, nsegs;
- if (m == NULL) {
- caddr_t *buf = NULL;
+ (void)dummy;
- /* Allocate the mbuf. */
- MGETHDR(m_new, M_DONTWAIT, MT_DATA);
- if (m_new == NULL) {
- return (ENOBUFS);
- }
+ m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
+ if (m == NULL)
+ return (ENOBUFS);
+ m->m_len = m->m_pkthdr.len = MJUM9BYTES;
+ m_adj(m, ETHER_ALIGN);
- /* Allocate the jumbo buffer */
- buf = ti_jalloc(sc);
- if (buf == NULL) {
- m_freem(m_new);
- device_printf(sc->ti_dev, "jumbo allocation failed "
- "-- packet dropped!\n");
- return (ENOBUFS);
- }
+ error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_rx_jumbo_tag,
+ sc->ti_cdata.ti_rx_jumbo_sparemap, m, segs, &nsegs, 0);
+ if (error != 0) {
+ m_freem(m);
+ return (error);
+ }
+ KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
- /* Attach the buffer to the mbuf. */
- m_new->m_data = (void *) buf;
- m_new->m_len = m_new->m_pkthdr.len = TI_JUMBO_FRAMELEN;
- MEXTADD(m_new, buf, TI_JUMBO_FRAMELEN, ti_jfree,
- (struct ti_softc *)sc, 0, EXT_NET_DRV);
- } else {
- m_new = m;
- m_new->m_data = m_new->m_ext.ext_buf;
- m_new->m_ext.ext_size = TI_JUMBO_FRAMELEN;
+ if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) {
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_tag,
+ sc->ti_cdata.ti_rx_jumbo_maps[i], BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->ti_cdata.ti_rx_jumbo_tag,
+ sc->ti_cdata.ti_rx_jumbo_maps[i]);
}
- m_adj(m_new, ETHER_ALIGN);
- /* Set up the descriptor. */
- r = &sc->ti_rdata->ti_rx_jumbo_ring[i];
- sc->ti_cdata.ti_rx_jumbo_chain[i] = m_new;
map = sc->ti_cdata.ti_rx_jumbo_maps[i];
- if (bus_dmamap_load_mbuf_sg(sc->ti_jumbo_dmat, map, m_new, &segs,
- &nsegs, 0))
- return (ENOBUFS);
- if (nsegs != 1)
- return (ENOBUFS);
- ti_hostaddr64(&r->ti_addr, segs.ds_addr);
- r->ti_len = segs.ds_len;
+ sc->ti_cdata.ti_rx_jumbo_maps[i] = sc->ti_cdata.ti_rx_jumbo_sparemap;
+ sc->ti_cdata.ti_rx_jumbo_sparemap = map;
+ sc->ti_cdata.ti_rx_jumbo_chain[i] = m;
+
+ r = &sc->ti_rdata.ti_rx_jumbo_ring[i];
+ ti_hostaddr64(&r->ti_addr, segs[0].ds_addr);
+ r->ti_len = segs[0].ds_len;
r->ti_type = TI_BDTYPE_RECV_JUMBO_BD;
r->ti_flags = TI_BDFLAG_JUMBO_RING;
- if (sc->ti_ifp->if_hwassist)
+ r->ti_vlan_tag = 0;
+ r->ti_tcp_udp_cksum = 0;
+ if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
r->ti_idx = i;
- bus_dmamap_sync(sc->ti_jumbo_dmat, map, BUS_DMASYNC_PREREAD);
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_tag,
+ sc->ti_cdata.ti_rx_jumbo_maps[i], BUS_DMASYNC_PREREAD);
return (0);
}
@@ -1417,30 +1548,26 @@ ti_newbuf_jumbo(sc, i, m)
#define TCP_HDR_LEN (52 + sizeof(struct ether_header))
#define UDP_HDR_LEN (28 + sizeof(struct ether_header))
#define NFS_HDR_LEN (UDP_HDR_LEN)
-static int HDR_LEN = TCP_HDR_LEN;
-
+static int HDR_LEN = TCP_HDR_LEN;
/*
* Initialize a jumbo receive ring descriptor. This allocates
* a jumbo buffer from the pool managed internally by the driver.
*/
static int
-ti_newbuf_jumbo(sc, idx, m_old)
- struct ti_softc *sc;
- int idx;
- struct mbuf *m_old;
-{
- bus_dmamap_t map;
- struct mbuf *cur, *m_new = NULL;
- struct mbuf *m[3] = {NULL, NULL, NULL};
- struct ti_rx_desc_ext *r;
- vm_page_t frame;
- static int color;
- /* 1 extra buf to make nobufs easy*/
- struct sf_buf *sf[3] = {NULL, NULL, NULL};
- int i;
- bus_dma_segment_t segs[4];
- int nsegs;
+ti_newbuf_jumbo(struct ti_softc *sc, int idx, struct mbuf *m_old)
+{
+ bus_dmamap_t map;
+ struct mbuf *cur, *m_new = NULL;
+ struct mbuf *m[3] = {NULL, NULL, NULL};
+ struct ti_rx_desc_ext *r;
+ vm_page_t frame;
+ static int color;
+ /* 1 extra buf to make nobufs easy*/
+ struct sf_buf *sf[3] = {NULL, NULL, NULL};
+ int i;
+ bus_dma_segment_t segs[4];
+ int nsegs;
if (m_old != NULL) {
m_new = m_old;
@@ -1517,11 +1644,11 @@ ti_newbuf_jumbo(sc, idx, m_old)
}
/* Set up the descriptor. */
- r = &sc->ti_rdata->ti_rx_jumbo_ring[idx];
+ r = &sc->ti_rdata.ti_rx_jumbo_ring[idx];
sc->ti_cdata.ti_rx_jumbo_chain[idx] = m_new;
map = sc->ti_cdata.ti_rx_jumbo_maps[i];
- if (bus_dmamap_load_mbuf_sg(sc->ti_jumbo_dmat, map, m_new, segs,
- &nsegs, 0))
+ if (bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_rx_jumbo_tag, map, m_new,
+ segs, &nsegs, 0))
return (ENOBUFS);
if ((nsegs < 1) || (nsegs > 4))
return (ENOBUFS);
@@ -1544,12 +1671,12 @@ ti_newbuf_jumbo(sc, idx, m_old)
r->ti_flags = TI_BDFLAG_JUMBO_RING|TI_RCB_FLAG_USE_EXT_RX_BD;
- if (sc->ti_ifp->if_hwassist)
+ if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM|TI_BDFLAG_IP_CKSUM;
r->ti_idx = idx;
- bus_dmamap_sync(sc->ti_jumbo_dmat, map, BUS_DMASYNC_PREREAD);
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_tag, map, BUS_DMASYNC_PREREAD);
return (0);
nobufs:
@@ -1573,8 +1700,6 @@ nobufs:
}
#endif
-
-
/*
* The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
* that's 1MB or memory, which is a lot. For now, we fill only the first
@@ -1582,151 +1707,151 @@ nobufs:
* the NIC.
*/
static int
-ti_init_rx_ring_std(sc)
- struct ti_softc *sc;
+ti_init_rx_ring_std(struct ti_softc *sc)
{
- int i;
- struct ti_cmd_desc cmd;
+ int i;
+ struct ti_cmd_desc cmd;
- for (i = 0; i < TI_SSLOTS; i++) {
- if (ti_newbuf_std(sc, i, NULL) == ENOBUFS)
+ for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
+ if (ti_newbuf_std(sc, i) != 0)
return (ENOBUFS);
};
- TI_UPDATE_STDPROD(sc, i - 1);
- sc->ti_std = i - 1;
+ sc->ti_std = TI_STD_RX_RING_CNT - 1;
+ TI_UPDATE_STDPROD(sc, TI_STD_RX_RING_CNT - 1);
return (0);
}
static void
-ti_free_rx_ring_std(sc)
- struct ti_softc *sc;
+ti_free_rx_ring_std(struct ti_softc *sc)
{
- bus_dmamap_t map;
- int i;
+ bus_dmamap_t map;
+ int i;
for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) {
map = sc->ti_cdata.ti_rx_std_maps[i];
- bus_dmamap_sync(sc->ti_mbufrx_dmat, map,
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_std_tag, map,
BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(sc->ti_mbufrx_dmat, map);
+ bus_dmamap_unload(sc->ti_cdata.ti_rx_std_tag, map);
m_freem(sc->ti_cdata.ti_rx_std_chain[i]);
sc->ti_cdata.ti_rx_std_chain[i] = NULL;
}
- bzero((char *)&sc->ti_rdata->ti_rx_std_ring[i],
- sizeof(struct ti_rx_desc));
}
+ bzero(sc->ti_rdata.ti_rx_std_ring, TI_STD_RX_RING_SZ);
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_std_ring_tag,
+ sc->ti_cdata.ti_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
}
static int
-ti_init_rx_ring_jumbo(sc)
- struct ti_softc *sc;
+ti_init_rx_ring_jumbo(struct ti_softc *sc)
{
- int i;
- struct ti_cmd_desc cmd;
+ struct ti_cmd_desc cmd;
+ int i;
for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
- if (ti_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
+ if (ti_newbuf_jumbo(sc, i, NULL) != 0)
return (ENOBUFS);
};
- TI_UPDATE_JUMBOPROD(sc, i - 1);
- sc->ti_jumbo = i - 1;
+ sc->ti_jumbo = TI_JUMBO_RX_RING_CNT - 1;
+ TI_UPDATE_JUMBOPROD(sc, TI_JUMBO_RX_RING_CNT - 1);
return (0);
}
static void
-ti_free_rx_ring_jumbo(sc)
- struct ti_softc *sc;
+ti_free_rx_ring_jumbo(struct ti_softc *sc)
{
- bus_dmamap_t map;
- int i;
+ bus_dmamap_t map;
+ int i;
for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) {
map = sc->ti_cdata.ti_rx_jumbo_maps[i];
- bus_dmamap_sync(sc->ti_jumbo_dmat, map,
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_tag, map,
BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(sc->ti_jumbo_dmat, map);
+ bus_dmamap_unload(sc->ti_cdata.ti_rx_jumbo_tag, map);
m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]);
sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL;
}
- bzero((char *)&sc->ti_rdata->ti_rx_jumbo_ring[i],
- sizeof(struct ti_rx_desc));
}
+ bzero(sc->ti_rdata.ti_rx_jumbo_ring, TI_JUMBO_RX_RING_SZ);
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_ring_tag,
+ sc->ti_cdata.ti_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
}
static int
-ti_init_rx_ring_mini(sc)
- struct ti_softc *sc;
+ti_init_rx_ring_mini(struct ti_softc *sc)
{
- int i;
+ int i;
- for (i = 0; i < TI_MSLOTS; i++) {
- if (ti_newbuf_mini(sc, i, NULL) == ENOBUFS)
+ for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
+ if (ti_newbuf_mini(sc, i) != 0)
return (ENOBUFS);
};
- TI_UPDATE_MINIPROD(sc, i - 1);
- sc->ti_mini = i - 1;
+ sc->ti_mini = TI_MINI_RX_RING_CNT - 1;
+ TI_UPDATE_MINIPROD(sc, TI_MINI_RX_RING_CNT - 1);
return (0);
}
static void
-ti_free_rx_ring_mini(sc)
- struct ti_softc *sc;
+ti_free_rx_ring_mini(struct ti_softc *sc)
{
- bus_dmamap_t map;
- int i;
+ bus_dmamap_t map;
+ int i;
+
+ if (sc->ti_rdata.ti_rx_mini_ring == NULL)
+ return;
for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) {
map = sc->ti_cdata.ti_rx_mini_maps[i];
- bus_dmamap_sync(sc->ti_mbufrx_dmat, map,
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_tag, map,
BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(sc->ti_mbufrx_dmat, map);
+ bus_dmamap_unload(sc->ti_cdata.ti_rx_mini_tag, map);
m_freem(sc->ti_cdata.ti_rx_mini_chain[i]);
sc->ti_cdata.ti_rx_mini_chain[i] = NULL;
}
- bzero((char *)&sc->ti_rdata->ti_rx_mini_ring[i],
- sizeof(struct ti_rx_desc));
}
+ bzero(sc->ti_rdata.ti_rx_mini_ring, TI_MINI_RX_RING_SZ);
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_ring_tag,
+ sc->ti_cdata.ti_rx_mini_ring_map, BUS_DMASYNC_PREWRITE);
}
static void
-ti_free_tx_ring(sc)
- struct ti_softc *sc;
+ti_free_tx_ring(struct ti_softc *sc)
{
- struct ti_txdesc *txd;
- int i;
+ struct ti_txdesc *txd;
+ int i;
- if (sc->ti_rdata->ti_tx_ring == NULL)
+ if (sc->ti_rdata.ti_tx_ring == NULL)
return;
for (i = 0; i < TI_TX_RING_CNT; i++) {
txd = &sc->ti_cdata.ti_txdesc[i];
if (txd->tx_m != NULL) {
- bus_dmamap_sync(sc->ti_mbuftx_dmat, txd->tx_dmamap,
+ bus_dmamap_sync(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(sc->ti_mbuftx_dmat, txd->tx_dmamap);
+ bus_dmamap_unload(sc->ti_cdata.ti_tx_tag,
+ txd->tx_dmamap);
m_freem(txd->tx_m);
txd->tx_m = NULL;
}
- bzero((char *)&sc->ti_rdata->ti_tx_ring[i],
- sizeof(struct ti_tx_desc));
}
+ bzero(sc->ti_rdata.ti_tx_ring, TI_TX_RING_SZ);
+ bus_dmamap_sync(sc->ti_cdata.ti_tx_ring_tag,
+ sc->ti_cdata.ti_tx_ring_map, BUS_DMASYNC_PREWRITE);
}
static int
-ti_init_tx_ring(sc)
- struct ti_softc *sc;
+ti_init_tx_ring(struct ti_softc *sc)
{
- struct ti_txdesc *txd;
- int i;
+ struct ti_txdesc *txd;
+ int i;
STAILQ_INIT(&sc->ti_cdata.ti_txfreeq);
STAILQ_INIT(&sc->ti_cdata.ti_txbusyq);
@@ -1747,15 +1872,13 @@ ti_init_tx_ring(sc)
* work.
*/
static void
-ti_add_mcast(sc, addr)
- struct ti_softc *sc;
- struct ether_addr *addr;
+ti_add_mcast(struct ti_softc *sc, struct ether_addr *addr)
{
- struct ti_cmd_desc cmd;
- u_int16_t *m;
- u_int32_t ext[2] = {0, 0};
+ struct ti_cmd_desc cmd;
+ uint16_t *m;
+ uint32_t ext[2] = {0, 0};
- m = (u_int16_t *)&addr->octet[0];
+ m = (uint16_t *)&addr->octet[0];
switch (sc->ti_hwrev) {
case TI_HWREV_TIGON:
@@ -1775,15 +1898,13 @@ ti_add_mcast(sc, addr)
}
static void
-ti_del_mcast(sc, addr)
- struct ti_softc *sc;
- struct ether_addr *addr;
+ti_del_mcast(struct ti_softc *sc, struct ether_addr *addr)
{
- struct ti_cmd_desc cmd;
- u_int16_t *m;
- u_int32_t ext[2] = {0, 0};
+ struct ti_cmd_desc cmd;
+ uint16_t *m;
+ uint32_t ext[2] = {0, 0};
- m = (u_int16_t *)&addr->octet[0];
+ m = (uint16_t *)&addr->octet[0];
switch (sc->ti_hwrev) {
case TI_HWREV_TIGON:
@@ -1817,14 +1938,13 @@ ti_del_mcast(sc, addr)
* any given time.
*/
static void
-ti_setmulti(sc)
- struct ti_softc *sc;
+ti_setmulti(struct ti_softc *sc)
{
- struct ifnet *ifp;
- struct ifmultiaddr *ifma;
- struct ti_cmd_desc cmd;
- struct ti_mc_entry *mc;
- u_int32_t intrs;
+ struct ifnet *ifp;
+ struct ifmultiaddr *ifma;
+ struct ti_cmd_desc cmd;
+ struct ti_mc_entry *mc;
+ uint32_t intrs;
TI_LOCK_ASSERT(sc);
@@ -1877,9 +1997,10 @@ ti_setmulti(sc)
* around it on the Tigon 2 by setting a bit in the PCI state register,
* but for the Tigon 1 we must give up and abort the interface attach.
*/
-static int ti_64bitslot_war(sc)
- struct ti_softc *sc;
+static int
+ti_64bitslot_war(struct ti_softc *sc)
{
+
if (!(CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS)) {
CSR_WRITE_4(sc, 0x600, 0);
CSR_WRITE_4(sc, 0x604, 0);
@@ -1903,21 +2024,15 @@ static int ti_64bitslot_war(sc)
* self-test results.
*/
static int
-ti_chipinit(sc)
- struct ti_softc *sc;
+ti_chipinit(struct ti_softc *sc)
{
- u_int32_t cacheline;
- u_int32_t pci_writemax = 0;
- u_int32_t hdrsplit;
+ uint32_t cacheline;
+ uint32_t pci_writemax = 0;
+ uint32_t hdrsplit;
/* Initialize link to down state. */
sc->ti_linkstat = TI_EV_CODE_LINK_DOWN;
- if (sc->ti_ifp->if_capenable & IFCAP_HWCSUM)
- sc->ti_ifp->if_hwassist = TI_CSUM_FEATURES;
- else
- sc->ti_ifp->if_hwassist = 0;
-
/* Set endianness before we access any non-PCI registers. */
#if 0 && BYTE_ORDER == BIG_ENDIAN
CSR_WRITE_4(sc, TI_MISC_HOST_CTL,
@@ -2036,7 +2151,7 @@ ti_chipinit(sc)
* the firmware racks up lots of nicDmaReadRingFull
* errors. This is not compatible with hardware checksums.
*/
- if (sc->ti_ifp->if_hwassist == 0)
+ if ((sc->ti_ifp->if_capenable & (IFCAP_TXCSUM | IFCAP_RXCSUM)) == 0)
TI_SETBIT(sc, TI_GCR_OPMODE, TI_OPMODE_1_DMA_ACTIVE);
/* Recommended settings from Tigon manual. */
@@ -2057,30 +2172,24 @@ ti_chipinit(sc)
* start the CPU(s) running.
*/
static int
-ti_gibinit(sc)
- struct ti_softc *sc;
+ti_gibinit(struct ti_softc *sc)
{
- struct ti_rcb *rcb;
- int i;
- struct ifnet *ifp;
- uint32_t rdphys;
+ struct ifnet *ifp;
+ struct ti_rcb *rcb;
+ int i;
TI_LOCK_ASSERT(sc);
ifp = sc->ti_ifp;
- rdphys = sc->ti_rdata_phys;
/* Disable interrupts for now. */
CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
- /*
- * Tell the chip where to find the general information block.
- * While this struct could go into >4GB memory, we allocate it in a
- * single slab with the other descriptors, and those don't seem to
- * support being located in a 64-bit region.
- */
- CSR_WRITE_4(sc, TI_GCR_GENINFO_HI, 0);
- CSR_WRITE_4(sc, TI_GCR_GENINFO_LO, rdphys + TI_RD_OFF(ti_info));
+ /* Tell the chip where to find the general information block. */
+ CSR_WRITE_4(sc, TI_GCR_GENINFO_HI,
+ (uint64_t)sc->ti_rdata.ti_info_paddr >> 32);
+ CSR_WRITE_4(sc, TI_GCR_GENINFO_LO,
+ sc->ti_rdata.ti_info_paddr & 0xFFFFFFFF);
/* Load the firmware into SRAM. */
ti_loadfw(sc);
@@ -2088,20 +2197,20 @@ ti_gibinit(sc)
/* Set up the contents of the general info and ring control blocks. */
/* Set up the event ring and producer pointer. */
- rcb = &sc->ti_rdata->ti_info.ti_ev_rcb;
-
- TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_event_ring);
+ bzero(sc->ti_rdata.ti_event_ring, TI_EVENT_RING_SZ);
+ rcb = &sc->ti_rdata.ti_info->ti_ev_rcb;
+ ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_event_ring_paddr);
rcb->ti_flags = 0;
- TI_HOSTADDR(sc->ti_rdata->ti_info.ti_ev_prodidx_ptr) =
- rdphys + TI_RD_OFF(ti_ev_prodidx_r);
+ ti_hostaddr64(&sc->ti_rdata.ti_info->ti_ev_prodidx_ptr,
+ sc->ti_rdata.ti_status_paddr +
+ offsetof(struct ti_status, ti_ev_prodidx_r));
sc->ti_ev_prodidx.ti_idx = 0;
CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0);
sc->ti_ev_saved_considx = 0;
/* Set up the command ring and producer mailbox. */
- rcb = &sc->ti_rdata->ti_info.ti_cmd_rcb;
-
- TI_HOSTADDR(rcb->ti_hostaddr) = TI_GCR_NIC_ADDR(TI_GCR_CMDRING);
+ rcb = &sc->ti_rdata.ti_info->ti_cmd_rcb;
+ ti_hostaddr64(&rcb->ti_hostaddr, TI_GCR_NIC_ADDR(TI_GCR_CMDRING));
rcb->ti_flags = 0;
rcb->ti_max_len = 0;
for (i = 0; i < TI_CMD_RING_CNT; i++) {
@@ -2116,61 +2225,66 @@ ti_gibinit(sc)
* We re-use the current stats buffer for this to
* conserve memory.
*/
- TI_HOSTADDR(sc->ti_rdata->ti_info.ti_refresh_stats_ptr) =
- rdphys + TI_RD_OFF(ti_info.ti_stats);
+ bzero(&sc->ti_rdata.ti_info->ti_stats, sizeof(struct ti_stats));
+ ti_hostaddr64(&sc->ti_rdata.ti_info->ti_refresh_stats_ptr,
+ sc->ti_rdata.ti_info_paddr + offsetof(struct ti_gib, ti_stats));
/* Set up the standard receive ring. */
- rcb = &sc->ti_rdata->ti_info.ti_std_rx_rcb;
- TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_std_ring);
+ rcb = &sc->ti_rdata.ti_info->ti_std_rx_rcb;
+ ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_rx_std_ring_paddr);
rcb->ti_max_len = TI_FRAMELEN;
rcb->ti_flags = 0;
- if (sc->ti_ifp->if_hwassist)
+ if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
- rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
+ if (sc->ti_ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
+ rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
/* Set up the jumbo receive ring. */
- rcb = &sc->ti_rdata->ti_info.ti_jumbo_rx_rcb;
- TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_jumbo_ring);
+ rcb = &sc->ti_rdata.ti_info->ti_jumbo_rx_rcb;
+ ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_rx_jumbo_ring_paddr);
-#ifdef TI_PRIVATE_JUMBOS
- rcb->ti_max_len = TI_JUMBO_FRAMELEN;
+#ifndef TI_SF_BUF_JUMBO
+ rcb->ti_max_len = MJUM9BYTES - ETHER_ALIGN;
rcb->ti_flags = 0;
#else
rcb->ti_max_len = PAGE_SIZE;
rcb->ti_flags = TI_RCB_FLAG_USE_EXT_RX_BD;
#endif
- if (sc->ti_ifp->if_hwassist)
+ if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
- rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
+ if (sc->ti_ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
+ rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
/*
* Set up the mini ring. Only activated on the
* Tigon 2 but the slot in the config block is
* still there on the Tigon 1.
*/
- rcb = &sc->ti_rdata->ti_info.ti_mini_rx_rcb;
- TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_mini_ring);
+ rcb = &sc->ti_rdata.ti_info->ti_mini_rx_rcb;
+ ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_rx_mini_ring_paddr);
rcb->ti_max_len = MHLEN - ETHER_ALIGN;
if (sc->ti_hwrev == TI_HWREV_TIGON)
rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED;
else
rcb->ti_flags = 0;
- if (sc->ti_ifp->if_hwassist)
+ if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
- rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
+ if (sc->ti_ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
+ rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
/*
* Set up the receive return ring.
*/
- rcb = &sc->ti_rdata->ti_info.ti_return_rcb;
- TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_return_ring);
+ rcb = &sc->ti_rdata.ti_info->ti_return_rcb;
+ ti_hostaddr64(&rcb->ti_hostaddr, sc->ti_rdata.ti_rx_return_ring_paddr);
rcb->ti_flags = 0;
rcb->ti_max_len = TI_RETURN_RING_CNT;
- TI_HOSTADDR(sc->ti_rdata->ti_info.ti_return_prodidx_ptr) =
- rdphys + TI_RD_OFF(ti_return_prodidx_r);
+ ti_hostaddr64(&sc->ti_rdata.ti_info->ti_return_prodidx_ptr,
+ sc->ti_rdata.ti_status_paddr +
+ offsetof(struct ti_status, ti_return_prodidx_r));
/*
* Set up the tx ring. Note: for the Tigon 2, we have the option
@@ -2182,31 +2296,42 @@ ti_gibinit(sc)
* a Tigon 1 chip.
*/
CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE);
- bzero((char *)sc->ti_rdata->ti_tx_ring,
- TI_TX_RING_CNT * sizeof(struct ti_tx_desc));
- rcb = &sc->ti_rdata->ti_info.ti_tx_rcb;
+ if (sc->ti_rdata.ti_tx_ring != NULL)
+ bzero(sc->ti_rdata.ti_tx_ring, TI_TX_RING_SZ);
+ rcb = &sc->ti_rdata.ti_info->ti_tx_rcb;
if (sc->ti_hwrev == TI_HWREV_TIGON)
rcb->ti_flags = 0;
else
rcb->ti_flags = TI_RCB_FLAG_HOST_RING;
- rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
- if (sc->ti_ifp->if_hwassist)
+ if (sc->ti_ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
+ rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
+ if (sc->ti_ifp->if_capenable & IFCAP_TXCSUM)
rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
rcb->ti_max_len = TI_TX_RING_CNT;
if (sc->ti_hwrev == TI_HWREV_TIGON)
- TI_HOSTADDR(rcb->ti_hostaddr) = TI_TX_RING_BASE;
+ ti_hostaddr64(&rcb->ti_hostaddr, TI_TX_RING_BASE);
else
- TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_tx_ring);
- TI_HOSTADDR(sc->ti_rdata->ti_info.ti_tx_considx_ptr) =
- rdphys + TI_RD_OFF(ti_tx_considx_r);
-
- bus_dmamap_sync(sc->ti_rdata_dmat, sc->ti_rdata_dmamap,
- BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
-
- /* Set up tuneables */
+ ti_hostaddr64(&rcb->ti_hostaddr,
+ sc->ti_rdata.ti_tx_ring_paddr);
+ ti_hostaddr64(&sc->ti_rdata.ti_info->ti_tx_considx_ptr,
+ sc->ti_rdata.ti_status_paddr +
+ offsetof(struct ti_status, ti_tx_considx_r));
+
+ bus_dmamap_sync(sc->ti_cdata.ti_gib_tag, sc->ti_cdata.ti_gib_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(sc->ti_cdata.ti_status_tag, sc->ti_cdata.ti_status_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(sc->ti_cdata.ti_event_ring_tag,
+ sc->ti_cdata.ti_event_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ if (sc->ti_rdata.ti_tx_ring != NULL)
+ bus_dmamap_sync(sc->ti_cdata.ti_tx_ring_tag,
+ sc->ti_cdata.ti_tx_ring_map, BUS_DMASYNC_PREWRITE);
+
+ /* Set up tunables */
#if 0
- if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
+ if (ifp->if_mtu > ETHERMTU + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)
CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS,
(sc->ti_rx_coal_ticks / 10));
else
@@ -2228,32 +2353,14 @@ ti_gibinit(sc)
return (0);
}
-static void
-ti_rdata_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
-{
- struct ti_softc *sc;
-
- sc = arg;
- if (error || nseg != 1)
- return;
-
- /*
- * All of the Tigon data structures need to live at <4GB. This
- * cast is fine since busdma was told about this constraint.
- */
- sc->ti_rdata_phys = segs[0].ds_addr;
- return;
-}
-
/*
* Probe for a Tigon chip. Check the PCI vendor and device IDs
* against our list and return its name if we find a match.
*/
static int
-ti_probe(dev)
- device_t dev;
+ti_probe(device_t dev)
{
- const struct ti_type *t;
+ const struct ti_type *t;
t = ti_devs;
@@ -2270,16 +2377,14 @@ ti_probe(dev)
}
static int
-ti_attach(dev)
- device_t dev;
+ti_attach(device_t dev)
{
- struct ifnet *ifp;
- struct ti_softc *sc;
- int error = 0, rid;
- u_char eaddr[6];
+ struct ifnet *ifp;
+ struct ti_softc *sc;
+ int error = 0, rid;
+ u_char eaddr[6];
sc = device_get_softc(dev);
- sc->ti_unit = device_get_unit(dev);
sc->ti_dev = dev;
mtx_init(&sc->ti_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
@@ -2292,8 +2397,8 @@ ti_attach(dev)
error = ENOSPC;
goto fail;
}
- sc->ti_ifp->if_capabilities = IFCAP_HWCSUM |
- IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
+ sc->ti_ifp->if_hwassist = TI_CSUM_FEATURES;
+ sc->ti_ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM;
sc->ti_ifp->if_capenable = sc->ti_ifp->if_capabilities;
/*
@@ -2301,7 +2406,7 @@ ti_attach(dev)
*/
pci_enable_busmaster(dev);
- rid = TI_PCI_LOMEM;
+ rid = PCIR_BAR(0);
sc->ti_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
@@ -2349,108 +2454,23 @@ ti_attach(dev)
* the NIC). This means the MAC address is actually preceded
* by two zero bytes. We need to skip over those.
*/
- if (ti_read_eeprom(sc, eaddr,
- TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
+ if (ti_read_eeprom(sc, eaddr, TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
device_printf(dev, "failed to read station address\n");
error = ENXIO;
goto fail;
}
- /* Allocate the general information block and ring buffers. */
- if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
- 1, 0, /* algnmnt, boundary */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
- 0, /* nsegments */
- BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
- 0, /* flags */
- NULL, NULL, /* lockfunc, lockarg */
- &sc->ti_parent_dmat) != 0) {
- device_printf(dev, "Failed to allocate parent dmat\n");
- error = ENOMEM;
- goto fail;
- }
-
- if (bus_dma_tag_create(sc->ti_parent_dmat, /* parent */
- PAGE_SIZE, 0, /* algnmnt, boundary */
- BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- sizeof(struct ti_ring_data), /* maxsize */
- 1, /* nsegments */
- sizeof(struct ti_ring_data), /* maxsegsize */
- 0, /* flags */
- NULL, NULL, /* lockfunc, lockarg */
- &sc->ti_rdata_dmat) != 0) {
- device_printf(dev, "Failed to allocate rdata dmat\n");
- error = ENOMEM;
- goto fail;
- }
-
- if (bus_dmamem_alloc(sc->ti_rdata_dmat, (void**)&sc->ti_rdata,
- BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
- &sc->ti_rdata_dmamap) != 0) {
- device_printf(dev, "Failed to allocate rdata memory\n");
- error = ENOMEM;
- goto fail;
- }
-
- if (bus_dmamap_load(sc->ti_rdata_dmat, sc->ti_rdata_dmamap,
- sc->ti_rdata, sizeof(struct ti_ring_data),
- ti_rdata_cb, sc, BUS_DMA_NOWAIT) != 0) {
- device_printf(dev, "Failed to load rdata segments\n");
- error = ENOMEM;
- goto fail;
- }
-
- bzero(sc->ti_rdata, sizeof(struct ti_ring_data));
-
- /* Try to allocate memory for jumbo buffers. */
- if (ti_alloc_jumbo_mem(sc)) {
- device_printf(dev, "jumbo buffer allocation failed\n");
- error = ENXIO;
- goto fail;
- }
-
- if (bus_dma_tag_create(sc->ti_parent_dmat, /* parent */
- 1, 0, /* algnmnt, boundary */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- MCLBYTES * TI_MAXTXSEGS,/* maxsize */
- TI_MAXTXSEGS, /* nsegments */
- MCLBYTES, /* maxsegsize */
- 0, /* flags */
- NULL, NULL, /* lockfunc, lockarg */
- &sc->ti_mbuftx_dmat) != 0) {
- device_printf(dev, "Failed to allocate rdata dmat\n");
+ /* Allocate working area for memory dump. */
+ sc->ti_membuf = malloc(sizeof(uint8_t) * TI_WINLEN, M_DEVBUF, M_NOWAIT);
+ sc->ti_membuf2 = malloc(sizeof(uint8_t) * TI_WINLEN, M_DEVBUF,
+ M_NOWAIT);
+ if (sc->ti_membuf == NULL || sc->ti_membuf2 == NULL) {
+ device_printf(dev, "cannot allocate memory buffer\n");
error = ENOMEM;
goto fail;
}
-
- if (bus_dma_tag_create(sc->ti_parent_dmat, /* parent */
- 1, 0, /* algnmnt, boundary */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- MCLBYTES, /* maxsize */
- 1, /* nsegments */
- MCLBYTES, /* maxsegsize */
- 0, /* flags */
- NULL, NULL, /* lockfunc, lockarg */
- &sc->ti_mbufrx_dmat) != 0) {
- device_printf(dev, "Failed to allocate rdata dmat\n");
- error = ENOMEM;
- goto fail;
- }
-
- if (ti_alloc_dmamaps(sc)) {
- device_printf(dev, "dma map creation failed\n");
- error = ENXIO;
+ if ((error = ti_dma_alloc(sc)) != 0)
goto fail;
- }
/*
* We really need a better way to tell a 1000baseTX card
@@ -2467,19 +2487,8 @@ ti_attach(dev)
pci_get_device(dev) == NG_DEVICEID_GA620T)
sc->ti_copper = 1;
- /* Set default tuneable values. */
- sc->ti_stat_ticks = 2 * TI_TICKS_PER_SEC;
-#if 0
- sc->ti_rx_coal_ticks = TI_TICKS_PER_SEC / 5000;
-#endif
- sc->ti_rx_coal_ticks = 170;
- sc->ti_tx_coal_ticks = TI_TICKS_PER_SEC / 500;
- sc->ti_rx_max_coal_bds = 64;
-#if 0
- sc->ti_tx_max_coal_bds = 128;
-#endif
- sc->ti_tx_max_coal_bds = 32;
- sc->ti_tx_buf_ratio = 21;
+ /* Set default tunable values. */
+ ti_sysctl_node(sc);
/* Set up ifnet structure */
ifp->if_softc = sc;
@@ -2488,8 +2497,10 @@ ti_attach(dev)
ifp->if_ioctl = ti_ioctl;
ifp->if_start = ti_start;
ifp->if_init = ti_init;
- ifp->if_baudrate = 1000000000;
- ifp->if_snd.ifq_maxlen = TI_TX_RING_CNT - 1;
+ ifp->if_baudrate = IF_Gbps(1UL);
+ ifp->if_snd.ifq_drv_maxlen = TI_TX_RING_CNT - 1;
+ IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
+ IFQ_SET_READY(&ifp->if_snd);
/* Set up ifmedia support. */
if (sc->ti_copper) {
@@ -2526,8 +2537,8 @@ ti_attach(dev)
*/
/* Register the device */
- sc->dev = make_dev(&ti_cdevsw, sc->ti_unit, UID_ROOT, GID_OPERATOR,
- 0600, "ti%d", sc->ti_unit);
+ sc->dev = make_dev(&ti_cdevsw, device_get_unit(dev), UID_ROOT,
+ GID_OPERATOR, 0600, "ti%d", device_get_unit(dev));
sc->dev->si_drv1 = sc;
/*
@@ -2535,6 +2546,17 @@ ti_attach(dev)
*/
ether_ifattach(ifp, eaddr);
+ /* VLAN capability setup. */
+ ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM |
+ IFCAP_VLAN_HWTAGGING;
+ ifp->if_capenable = ifp->if_capabilities;
+ /* Tell the upper layer we support VLAN over-sized frames. */
+ ifp->if_hdrlen = sizeof(struct ether_vlan_header);
+
+ /* Driver supports link state tracking. */
+ ifp->if_capabilities |= IFCAP_LINKSTATE;
+ ifp->if_capenable |= IFCAP_LINKSTATE;
+
/* Hook interrupt last to avoid having to lock softc */
error = bus_setup_intr(dev, sc->ti_irq, INTR_TYPE_NET|INTR_MPSAFE,
NULL, ti_intr, sc, &sc->ti_intrhand);
@@ -2559,11 +2581,10 @@ fail:
* allocated.
*/
static int
-ti_detach(dev)
- device_t dev;
+ti_detach(device_t dev)
{
- struct ti_softc *sc;
- struct ifnet *ifp;
+ struct ti_softc *sc;
+ struct ifnet *ifp;
sc = device_get_softc(dev);
if (sc->dev)
@@ -2580,37 +2601,23 @@ ti_detach(dev)
/* These should only be active if attach succeeded */
callout_drain(&sc->ti_watchdog);
bus_generic_detach(dev);
- ti_free_dmamaps(sc);
+ ti_dma_free(sc);
ifmedia_removeall(&sc->ifmedia);
-#ifdef TI_PRIVATE_JUMBOS
- if (sc->ti_cdata.ti_jumbo_buf)
- bus_dmamem_free(sc->ti_jumbo_dmat, sc->ti_cdata.ti_jumbo_buf,
- sc->ti_jumbo_dmamap);
-#endif
- if (sc->ti_jumbo_dmat)
- bus_dma_tag_destroy(sc->ti_jumbo_dmat);
- if (sc->ti_mbuftx_dmat)
- bus_dma_tag_destroy(sc->ti_mbuftx_dmat);
- if (sc->ti_mbufrx_dmat)
- bus_dma_tag_destroy(sc->ti_mbufrx_dmat);
- if (sc->ti_rdata)
- bus_dmamem_free(sc->ti_rdata_dmat, sc->ti_rdata,
- sc->ti_rdata_dmamap);
- if (sc->ti_rdata_dmat)
- bus_dma_tag_destroy(sc->ti_rdata_dmat);
- if (sc->ti_parent_dmat)
- bus_dma_tag_destroy(sc->ti_parent_dmat);
if (sc->ti_intrhand)
bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand);
if (sc->ti_irq)
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq);
if (sc->ti_res) {
- bus_release_resource(dev, SYS_RES_MEMORY, TI_PCI_LOMEM,
+ bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
sc->ti_res);
}
if (ifp)
if_free(ifp);
+ if (sc->ti_membuf)
+ free(sc->ti_membuf, M_DEVBUF);
+ if (sc->ti_membuf2)
+ free(sc->ti_membuf2, M_DEVBUF);
mtx_destroy(&sc->ti_mtx);
@@ -2674,6 +2681,59 @@ ti_hdr_split(struct mbuf *top, int hdr_len, int pkt_len, int idx)
}
#endif /* TI_JUMBO_HDRSPLIT */
+static void
+ti_discard_std(struct ti_softc *sc, int i)
+{
+
+ struct ti_rx_desc *r;
+
+ r = &sc->ti_rdata.ti_rx_std_ring[i];
+ r->ti_len = MCLBYTES - ETHER_ALIGN;
+ r->ti_type = TI_BDTYPE_RECV_BD;
+ r->ti_flags = 0;
+ r->ti_vlan_tag = 0;
+ r->ti_tcp_udp_cksum = 0;
+ if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
+ r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
+ r->ti_idx = i;
+}
+
+static void
+ti_discard_mini(struct ti_softc *sc, int i)
+{
+
+ struct ti_rx_desc *r;
+
+ r = &sc->ti_rdata.ti_rx_mini_ring[i];
+ r->ti_len = MHLEN - ETHER_ALIGN;
+ r->ti_type = TI_BDTYPE_RECV_BD;
+ r->ti_flags = TI_BDFLAG_MINI_RING;
+ r->ti_vlan_tag = 0;
+ r->ti_tcp_udp_cksum = 0;
+ if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
+ r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
+ r->ti_idx = i;
+}
+
+#ifndef TI_SF_BUF_JUMBO
+static void
+ti_discard_jumbo(struct ti_softc *sc, int i)
+{
+
+ struct ti_rx_desc *r;
+
+ r = &sc->ti_rdata.ti_rx_jumbo_ring[i];
+ r->ti_len = MJUM9BYTES - ETHER_ALIGN;
+ r->ti_type = TI_BDTYPE_RECV_JUMBO_BD;
+ r->ti_flags = TI_BDFLAG_JUMBO_RING;
+ r->ti_vlan_tag = 0;
+ r->ti_tcp_udp_cksum = 0;
+ if (sc->ti_ifp->if_capenable & IFCAP_RXCSUM)
+ r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM;
+ r->ti_idx = i;
+}
+#endif
+
/*
* Frame reception handling. This is called if there's a frame
* on the receive return list.
@@ -2686,114 +2746,136 @@ ti_hdr_split(struct mbuf *top, int hdr_len, int pkt_len, int idx)
*/
static void
-ti_rxeof(sc)
- struct ti_softc *sc;
+ti_rxeof(struct ti_softc *sc)
{
- bus_dmamap_t map;
- struct ifnet *ifp;
- struct ti_cmd_desc cmd;
+ struct ifnet *ifp;
+#ifdef TI_SF_BUF_JUMBO
+ bus_dmamap_t map;
+#endif
+ struct ti_cmd_desc cmd;
+ int jumbocnt, minicnt, stdcnt, ti_len;
TI_LOCK_ASSERT(sc);
ifp = sc->ti_ifp;
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_std_ring_tag,
+ sc->ti_cdata.ti_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
+ if (ifp->if_mtu > ETHERMTU + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_ring_tag,
+ sc->ti_cdata.ti_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
+ if (sc->ti_rdata.ti_rx_mini_ring != NULL)
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_ring_tag,
+ sc->ti_cdata.ti_rx_mini_ring_map, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_return_ring_tag,
+ sc->ti_cdata.ti_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
+
+ jumbocnt = minicnt = stdcnt = 0;
while (sc->ti_rx_saved_considx != sc->ti_return_prodidx.ti_idx) {
- struct ti_rx_desc *cur_rx;
- u_int32_t rxidx;
- struct mbuf *m = NULL;
- u_int16_t vlan_tag = 0;
- int have_tag = 0;
+ struct ti_rx_desc *cur_rx;
+ uint32_t rxidx;
+ struct mbuf *m = NULL;
+ uint16_t vlan_tag = 0;
+ int have_tag = 0;
cur_rx =
- &sc->ti_rdata->ti_rx_return_ring[sc->ti_rx_saved_considx];
+ &sc->ti_rdata.ti_rx_return_ring[sc->ti_rx_saved_considx];
rxidx = cur_rx->ti_idx;
+ ti_len = cur_rx->ti_len;
TI_INC(sc->ti_rx_saved_considx, TI_RETURN_RING_CNT);
if (cur_rx->ti_flags & TI_BDFLAG_VLAN_TAG) {
have_tag = 1;
- vlan_tag = cur_rx->ti_vlan_tag & 0xfff;
+ vlan_tag = cur_rx->ti_vlan_tag;
}
if (cur_rx->ti_flags & TI_BDFLAG_JUMBO_RING) {
-
+ jumbocnt++;
TI_INC(sc->ti_jumbo, TI_JUMBO_RX_RING_CNT);
m = sc->ti_cdata.ti_rx_jumbo_chain[rxidx];
+#ifndef TI_SF_BUF_JUMBO
+ if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
+ ifp->if_ierrors++;
+ ti_discard_jumbo(sc, rxidx);
+ continue;
+ }
+ if (ti_newbuf_jumbo(sc, rxidx, NULL) != 0) {
+ ifp->if_iqdrops++;
+ ti_discard_jumbo(sc, rxidx);
+ continue;
+ }
+ m->m_len = ti_len;
+#else /* !TI_SF_BUF_JUMBO */
sc->ti_cdata.ti_rx_jumbo_chain[rxidx] = NULL;
map = sc->ti_cdata.ti_rx_jumbo_maps[rxidx];
- bus_dmamap_sync(sc->ti_jumbo_dmat, map,
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_tag, map,
BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(sc->ti_jumbo_dmat, map);
+ bus_dmamap_unload(sc->ti_cdata.ti_rx_jumbo_tag, map);
if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
ifp->if_ierrors++;
ti_newbuf_jumbo(sc, sc->ti_jumbo, m);
continue;
}
if (ti_newbuf_jumbo(sc, sc->ti_jumbo, NULL) == ENOBUFS) {
- ifp->if_ierrors++;
+ ifp->if_iqdrops++;
ti_newbuf_jumbo(sc, sc->ti_jumbo, m);
continue;
}
-#ifdef TI_PRIVATE_JUMBOS
- m->m_len = cur_rx->ti_len;
-#else /* TI_PRIVATE_JUMBOS */
#ifdef TI_JUMBO_HDRSPLIT
if (sc->ti_hdrsplit)
ti_hdr_split(m, TI_HOSTADDR(cur_rx->ti_addr),
- cur_rx->ti_len, rxidx);
+ ti_len, rxidx);
else
#endif /* TI_JUMBO_HDRSPLIT */
- m_adj(m, cur_rx->ti_len - m->m_pkthdr.len);
-#endif /* TI_PRIVATE_JUMBOS */
+ m_adj(m, ti_len - m->m_pkthdr.len);
+#endif /* TI_SF_BUF_JUMBO */
} else if (cur_rx->ti_flags & TI_BDFLAG_MINI_RING) {
+ minicnt++;
TI_INC(sc->ti_mini, TI_MINI_RX_RING_CNT);
m = sc->ti_cdata.ti_rx_mini_chain[rxidx];
- sc->ti_cdata.ti_rx_mini_chain[rxidx] = NULL;
- map = sc->ti_cdata.ti_rx_mini_maps[rxidx];
- bus_dmamap_sync(sc->ti_mbufrx_dmat, map,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(sc->ti_mbufrx_dmat, map);
if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
ifp->if_ierrors++;
- ti_newbuf_mini(sc, sc->ti_mini, m);
+ ti_discard_mini(sc, rxidx);
continue;
}
- if (ti_newbuf_mini(sc, sc->ti_mini, NULL) == ENOBUFS) {
- ifp->if_ierrors++;
- ti_newbuf_mini(sc, sc->ti_mini, m);
+ if (ti_newbuf_mini(sc, rxidx) != 0) {
+ ifp->if_iqdrops++;
+ ti_discard_mini(sc, rxidx);
continue;
}
- m->m_len = cur_rx->ti_len;
+ m->m_len = ti_len;
} else {
+ stdcnt++;
TI_INC(sc->ti_std, TI_STD_RX_RING_CNT);
m = sc->ti_cdata.ti_rx_std_chain[rxidx];
- sc->ti_cdata.ti_rx_std_chain[rxidx] = NULL;
- map = sc->ti_cdata.ti_rx_std_maps[rxidx];
- bus_dmamap_sync(sc->ti_mbufrx_dmat, map,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(sc->ti_mbufrx_dmat, map);
if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
ifp->if_ierrors++;
- ti_newbuf_std(sc, sc->ti_std, m);
+ ti_discard_std(sc, rxidx);
continue;
}
- if (ti_newbuf_std(sc, sc->ti_std, NULL) == ENOBUFS) {
- ifp->if_ierrors++;
- ti_newbuf_std(sc, sc->ti_std, m);
+ if (ti_newbuf_std(sc, rxidx) != 0) {
+ ifp->if_iqdrops++;
+ ti_discard_std(sc, rxidx);
continue;
}
- m->m_len = cur_rx->ti_len;
+ m->m_len = ti_len;
}
- m->m_pkthdr.len = cur_rx->ti_len;
+ m->m_pkthdr.len = ti_len;
ifp->if_ipackets++;
m->m_pkthdr.rcvif = ifp;
- if (ifp->if_hwassist) {
- m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED |
- CSUM_DATA_VALID;
- if ((cur_rx->ti_ip_cksum ^ 0xffff) == 0)
- m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
- m->m_pkthdr.csum_data = cur_rx->ti_tcp_udp_cksum;
+ if (ifp->if_capenable & IFCAP_RXCSUM) {
+ if (cur_rx->ti_flags & TI_BDFLAG_IP_CKSUM) {
+ m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
+ if ((cur_rx->ti_ip_cksum ^ 0xffff) == 0)
+ m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+ }
+ if (cur_rx->ti_flags & TI_BDFLAG_TCP_UDP_CKSUM) {
+ m->m_pkthdr.csum_data =
+ cur_rx->ti_tcp_udp_cksum;
+ m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
+ }
}
/*
@@ -2809,31 +2891,48 @@ ti_rxeof(sc)
TI_LOCK(sc);
}
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_return_ring_tag,
+ sc->ti_cdata.ti_rx_return_ring_map, BUS_DMASYNC_PREREAD);
/* Only necessary on the Tigon 1. */
if (sc->ti_hwrev == TI_HWREV_TIGON)
CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX,
sc->ti_rx_saved_considx);
- TI_UPDATE_STDPROD(sc, sc->ti_std);
- TI_UPDATE_MINIPROD(sc, sc->ti_mini);
- TI_UPDATE_JUMBOPROD(sc, sc->ti_jumbo);
+ if (stdcnt > 0) {
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_std_ring_tag,
+ sc->ti_cdata.ti_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
+ TI_UPDATE_STDPROD(sc, sc->ti_std);
+ }
+ if (minicnt > 0) {
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_mini_ring_tag,
+ sc->ti_cdata.ti_rx_mini_ring_map, BUS_DMASYNC_PREWRITE);
+ TI_UPDATE_MINIPROD(sc, sc->ti_mini);
+ }
+ if (jumbocnt > 0) {
+ bus_dmamap_sync(sc->ti_cdata.ti_rx_jumbo_ring_tag,
+ sc->ti_cdata.ti_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
+ TI_UPDATE_JUMBOPROD(sc, sc->ti_jumbo);
+ }
}
static void
-ti_txeof(sc)
- struct ti_softc *sc;
+ti_txeof(struct ti_softc *sc)
{
- struct ti_txdesc *txd;
- struct ti_tx_desc txdesc;
- struct ti_tx_desc *cur_tx = NULL;
- struct ifnet *ifp;
- int idx;
+ struct ti_txdesc *txd;
+ struct ti_tx_desc txdesc;
+ struct ti_tx_desc *cur_tx = NULL;
+ struct ifnet *ifp;
+ int idx;
ifp = sc->ti_ifp;
txd = STAILQ_FIRST(&sc->ti_cdata.ti_txbusyq);
if (txd == NULL)
return;
+
+ if (sc->ti_rdata.ti_tx_ring != NULL)
+ bus_dmamap_sync(sc->ti_cdata.ti_tx_ring_tag,
+ sc->ti_cdata.ti_tx_ring_map, BUS_DMASYNC_POSTWRITE);
/*
* Go through our tx ring and free mbufs for those
* frames that have been sent.
@@ -2845,14 +2944,14 @@ ti_txeof(sc)
sizeof(txdesc), &txdesc);
cur_tx = &txdesc;
} else
- cur_tx = &sc->ti_rdata->ti_tx_ring[idx];
+ cur_tx = &sc->ti_rdata.ti_tx_ring[idx];
sc->ti_txcnt--;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
if ((cur_tx->ti_flags & TI_BDFLAG_END) == 0)
continue;
- bus_dmamap_sync(sc->ti_mbuftx_dmat, txd->tx_dmamap,
+ bus_dmamap_sync(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap,
BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(sc->ti_mbuftx_dmat, txd->tx_dmamap);
+ bus_dmamap_unload(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap);
ifp->if_opackets++;
m_freem(txd->tx_m);
@@ -2862,72 +2961,73 @@ ti_txeof(sc)
txd = STAILQ_FIRST(&sc->ti_cdata.ti_txbusyq);
}
sc->ti_tx_saved_considx = idx;
-
- sc->ti_timer = sc->ti_txcnt > 0 ? 5 : 0;
+ if (sc->ti_txcnt == 0)
+ sc->ti_timer = 0;
}
static void
-ti_intr(xsc)
- void *xsc;
+ti_intr(void *xsc)
{
- struct ti_softc *sc;
- struct ifnet *ifp;
+ struct ti_softc *sc;
+ struct ifnet *ifp;
sc = xsc;
TI_LOCK(sc);
ifp = sc->ti_ifp;
-/*#ifdef notdef*/
- /* Avoid this for now -- checking this register is expensive. */
/* Make sure this is really our interrupt. */
if (!(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_INTSTATE)) {
TI_UNLOCK(sc);
return;
}
-/*#endif*/
/* Ack interrupt and stop others from occuring. */
CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ bus_dmamap_sync(sc->ti_cdata.ti_status_tag,
+ sc->ti_cdata.ti_status_map, BUS_DMASYNC_POSTREAD);
/* Check RX return ring producer/consumer */
ti_rxeof(sc);
/* Check TX ring producer/consumer */
ti_txeof(sc);
+ bus_dmamap_sync(sc->ti_cdata.ti_status_tag,
+ sc->ti_cdata.ti_status_map, BUS_DMASYNC_PREREAD);
}
ti_handle_events(sc);
- /* Re-enable interrupts. */
- CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
- ifp->if_snd.ifq_head != NULL)
- ti_start_locked(ifp);
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ /* Re-enable interrupts. */
+ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ ti_start_locked(ifp);
+ }
TI_UNLOCK(sc);
}
static void
-ti_stats_update(sc)
- struct ti_softc *sc;
+ti_stats_update(struct ti_softc *sc)
{
- struct ifnet *ifp;
+ struct ifnet *ifp;
+ struct ti_stats *s;
ifp = sc->ti_ifp;
- bus_dmamap_sync(sc->ti_rdata_dmat, sc->ti_rdata_dmamap,
+ if (sc->ti_stat_ticks == 0)
+ return;
+ bus_dmamap_sync(sc->ti_cdata.ti_gib_tag, sc->ti_cdata.ti_gib_map,
BUS_DMASYNC_POSTREAD);
- ifp->if_collisions +=
- (sc->ti_rdata->ti_info.ti_stats.dot3StatsSingleCollisionFrames +
- sc->ti_rdata->ti_info.ti_stats.dot3StatsMultipleCollisionFrames +
- sc->ti_rdata->ti_info.ti_stats.dot3StatsExcessiveCollisions +
- sc->ti_rdata->ti_info.ti_stats.dot3StatsLateCollisions) -
- ifp->if_collisions;
+ s = &sc->ti_rdata.ti_info->ti_stats;
+ ifp->if_collisions += (s->dot3StatsSingleCollisionFrames +
+ s->dot3StatsMultipleCollisionFrames +
+ s->dot3StatsExcessiveCollisions + s->dot3StatsLateCollisions) -
+ ifp->if_collisions;
- bus_dmamap_sync(sc->ti_rdata_dmat, sc->ti_rdata_dmamap,
+ bus_dmamap_sync(sc->ti_cdata.ti_gib_tag, sc->ti_cdata.ti_gib_map,
BUS_DMASYNC_PREREAD);
}
@@ -2936,22 +3036,20 @@ ti_stats_update(sc)
* pointers to descriptors.
*/
static int
-ti_encap(sc, m_head)
- struct ti_softc *sc;
- struct mbuf **m_head;
-{
- struct ti_txdesc *txd;
- struct ti_tx_desc *f;
- struct ti_tx_desc txdesc;
- struct mbuf *m;
- bus_dma_segment_t txsegs[TI_MAXTXSEGS];
- u_int16_t csum_flags;
- int error, frag, i, nseg;
+ti_encap(struct ti_softc *sc, struct mbuf **m_head)
+{
+ struct ti_txdesc *txd;
+ struct ti_tx_desc *f;
+ struct ti_tx_desc txdesc;
+ struct mbuf *m;
+ bus_dma_segment_t txsegs[TI_MAXTXSEGS];
+ uint16_t csum_flags;
+ int error, frag, i, nseg;
if ((txd = STAILQ_FIRST(&sc->ti_cdata.ti_txfreeq)) == NULL)
return (ENOBUFS);
- error = bus_dmamap_load_mbuf_sg(sc->ti_mbuftx_dmat, txd->tx_dmamap,
+ error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap,
*m_head, txsegs, &nseg, 0);
if (error == EFBIG) {
m = m_defrag(*m_head, M_DONTWAIT);
@@ -2961,7 +3059,7 @@ ti_encap(sc, m_head)
return (ENOMEM);
}
*m_head = m;
- error = bus_dmamap_load_mbuf_sg(sc->ti_mbuftx_dmat,
+ error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_tx_tag,
txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
if (error) {
m_freem(*m_head);
@@ -2977,9 +3075,11 @@ ti_encap(sc, m_head)
}
if (sc->ti_txcnt + nseg >= TI_TX_RING_CNT) {
- bus_dmamap_unload(sc->ti_mbuftx_dmat, txd->tx_dmamap);
+ bus_dmamap_unload(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap);
return (ENOBUFS);
}
+ bus_dmamap_sync(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap,
+ BUS_DMASYNC_PREWRITE);
m = *m_head;
csum_flags = 0;
@@ -2994,24 +3094,19 @@ ti_encap(sc, m_head)
csum_flags |= TI_BDFLAG_IP_FRAG;
}
- bus_dmamap_sync(sc->ti_mbuftx_dmat, txd->tx_dmamap,
- BUS_DMASYNC_PREWRITE);
- bus_dmamap_sync(sc->ti_rdata_dmat, sc->ti_rdata_dmamap,
- BUS_DMASYNC_PREWRITE);
-
frag = sc->ti_tx_saved_prodidx;
for (i = 0; i < nseg; i++) {
if (sc->ti_hwrev == TI_HWREV_TIGON) {
bzero(&txdesc, sizeof(txdesc));
f = &txdesc;
} else
- f = &sc->ti_rdata->ti_tx_ring[frag];
+ f = &sc->ti_rdata.ti_tx_ring[frag];
ti_hostaddr64(&f->ti_addr, txsegs[i].ds_addr);
f->ti_len = txsegs[i].ds_len;
f->ti_flags = csum_flags;
if (m->m_flags & M_VLANTAG) {
f->ti_flags |= TI_BDFLAG_VLAN_TAG;
- f->ti_vlan_tag = m->m_pkthdr.ether_vtag & 0xfff;
+ f->ti_vlan_tag = m->m_pkthdr.ether_vtag;
} else {
f->ti_vlan_tag = 0;
}
@@ -3030,7 +3125,7 @@ ti_encap(sc, m_head)
ti_mem_write(sc, TI_TX_RING_BASE + frag * sizeof(txdesc),
sizeof(txdesc), &txdesc);
} else
- sc->ti_rdata->ti_tx_ring[frag].ti_flags |= TI_BDFLAG_END;
+ sc->ti_rdata.ti_tx_ring[frag].ti_flags |= TI_BDFLAG_END;
STAILQ_REMOVE_HEAD(&sc->ti_cdata.ti_txfreeq, tx_q);
STAILQ_INSERT_TAIL(&sc->ti_cdata.ti_txbusyq, txd, tx_q);
@@ -3041,10 +3136,9 @@ ti_encap(sc, m_head)
}
static void
-ti_start(ifp)
- struct ifnet *ifp;
+ti_start(struct ifnet *ifp)
{
- struct ti_softc *sc;
+ struct ti_softc *sc;
sc = ifp->if_softc;
TI_LOCK(sc);
@@ -3057,18 +3151,17 @@ ti_start(ifp)
* to the mbuf data regions directly in the transmit descriptors.
*/
static void
-ti_start_locked(ifp)
- struct ifnet *ifp;
+ti_start_locked(struct ifnet *ifp)
{
- struct ti_softc *sc;
- struct mbuf *m_head = NULL;
- int enq = 0;
+ struct ti_softc *sc;
+ struct mbuf *m_head = NULL;
+ int enq = 0;
sc = ifp->if_softc;
- for (; ifp->if_snd.ifq_head != NULL &&
+ for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
sc->ti_txcnt < (TI_TX_RING_CNT - 16);) {
- IF_DEQUEUE(&ifp->if_snd, m_head);
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
if (m_head == NULL)
break;
@@ -3084,7 +3177,7 @@ ti_start_locked(ifp)
m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
if ((TI_TX_RING_CNT - sc->ti_txcnt) <
m_head->m_pkthdr.csum_data + 16) {
- IF_PREPEND(&ifp->if_snd, m_head);
+ IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
break;
}
@@ -3098,7 +3191,7 @@ ti_start_locked(ifp)
if (ti_encap(sc, &m_head)) {
if (m_head == NULL)
break;
- IF_PREPEND(&ifp->if_snd, m_head);
+ IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
break;
}
@@ -3112,6 +3205,9 @@ ti_start_locked(ifp)
}
if (enq > 0) {
+ if (sc->ti_rdata.ti_tx_ring != NULL)
+ bus_dmamap_sync(sc->ti_cdata.ti_tx_ring_tag,
+ sc->ti_cdata.ti_tx_ring_map, BUS_DMASYNC_PREWRITE);
/* Transmit */
CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, sc->ti_tx_saved_prodidx);
@@ -3123,10 +3219,9 @@ ti_start_locked(ifp)
}
static void
-ti_init(xsc)
- void *xsc;
+ti_init(void *xsc)
{
- struct ti_softc *sc;
+ struct ti_softc *sc;
sc = xsc;
TI_LOCK(sc);
@@ -3135,10 +3230,12 @@ ti_init(xsc)
}
static void
-ti_init_locked(xsc)
- void *xsc;
+ti_init_locked(void *xsc)
{
- struct ti_softc *sc = xsc;
+ struct ti_softc *sc = xsc;
+
+ if (sc->ti_ifp->if_drv_flags & IFF_DRV_RUNNING)
+ return;
/* Cancel pending I/O and flush buffers. */
ti_stop(sc);
@@ -3150,21 +3247,20 @@ ti_init_locked(xsc)
}
}
-static void ti_init2(sc)
- struct ti_softc *sc;
+static void ti_init2(struct ti_softc *sc)
{
- struct ti_cmd_desc cmd;
- struct ifnet *ifp;
- u_int8_t *ea;
- struct ifmedia *ifm;
- int tmp;
+ struct ti_cmd_desc cmd;
+ struct ifnet *ifp;
+ uint8_t *ea;
+ struct ifmedia *ifm;
+ int tmp;
TI_LOCK_ASSERT(sc);
ifp = sc->ti_ifp;
/* Specify MTU and interface index. */
- CSR_WRITE_4(sc, TI_GCR_IFINDEX, sc->ti_unit);
+ CSR_WRITE_4(sc, TI_GCR_IFINDEX, device_get_unit(sc->ti_dev));
CSR_WRITE_4(sc, TI_GCR_IFMTU, ifp->if_mtu +
ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
TI_DO_CMD(TI_CMD_UPDATE_GENCOM, 0, 0);
@@ -3195,18 +3291,34 @@ static void ti_init2(sc)
}
/* Init RX ring. */
- ti_init_rx_ring_std(sc);
+ if (ti_init_rx_ring_std(sc) != 0) {
+ /* XXX */
+ device_printf(sc->ti_dev, "no memory for std Rx buffers.\n");
+ return;
+ }
/* Init jumbo RX ring. */
- if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
- ti_init_rx_ring_jumbo(sc);
+ if (ifp->if_mtu > ETHERMTU + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) {
+ if (ti_init_rx_ring_jumbo(sc) != 0) {
+ /* XXX */
+ device_printf(sc->ti_dev,
+ "no memory for jumbo Rx buffers.\n");
+ return;
+ }
+ }
/*
* If this is a Tigon 2, we can also configure the
* mini ring.
*/
- if (sc->ti_hwrev == TI_HWREV_TIGON_II)
- ti_init_rx_ring_mini(sc);
+ if (sc->ti_hwrev == TI_HWREV_TIGON_II) {
+ if (ti_init_rx_ring_mini(sc) != 0) {
+ /* XXX */
+ device_printf(sc->ti_dev,
+ "no memory for mini Rx buffers.\n");
+ return;
+ }
+ }
CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 0);
sc->ti_rx_saved_considx = 0;
@@ -3233,7 +3345,7 @@ static void ti_init2(sc)
ifm = &sc->ifmedia;
tmp = ifm->ifm_media;
ifm->ifm_media = ifm->ifm_cur->ifm_media;
- ti_ifmedia_upd(ifp);
+ ti_ifmedia_upd_locked(sc);
ifm->ifm_media = tmp;
}
@@ -3241,15 +3353,26 @@ static void ti_init2(sc)
* Set media options.
*/
static int
-ti_ifmedia_upd(ifp)
- struct ifnet *ifp;
+ti_ifmedia_upd(struct ifnet *ifp)
{
- struct ti_softc *sc;
- struct ifmedia *ifm;
- struct ti_cmd_desc cmd;
- u_int32_t flowctl;
+ struct ti_softc *sc;
+ int error;
sc = ifp->if_softc;
+ TI_LOCK(sc);
+ error = ti_ifmedia_upd(ifp);
+ TI_UNLOCK(sc);
+
+ return (error);
+}
+
+static int
+ti_ifmedia_upd_locked(struct ti_softc *sc)
+{
+ struct ifmedia *ifm;
+ struct ti_cmd_desc cmd;
+ uint32_t flowctl;
+
ifm = &sc->ifmedia;
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
@@ -3343,20 +3466,22 @@ ti_ifmedia_upd(ifp)
* Report current media status.
*/
static void
-ti_ifmedia_sts(ifp, ifmr)
- struct ifnet *ifp;
- struct ifmediareq *ifmr;
+ti_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
{
- struct ti_softc *sc;
- u_int32_t media = 0;
+ struct ti_softc *sc;
+ uint32_t media = 0;
sc = ifp->if_softc;
+ TI_LOCK(sc);
+
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
- if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN)
+ if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) {
+ TI_UNLOCK(sc);
return;
+ }
ifmr->ifm_status |= IFM_ACTIVE;
@@ -3388,27 +3513,28 @@ ti_ifmedia_sts(ifp, ifmr)
if (media & TI_LNK_HALF_DUPLEX)
ifmr->ifm_active |= IFM_HDX;
}
+ TI_UNLOCK(sc);
}
static int
-ti_ioctl(ifp, command, data)
- struct ifnet *ifp;
- u_long command;
- caddr_t data;
+ti_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
{
- struct ti_softc *sc = ifp->if_softc;
- struct ifreq *ifr = (struct ifreq *) data;
- int mask, error = 0;
- struct ti_cmd_desc cmd;
+ struct ti_softc *sc = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq *) data;
+ struct ti_cmd_desc cmd;
+ int mask, error = 0;
switch (command) {
case SIOCSIFMTU:
TI_LOCK(sc);
- if (ifr->ifr_mtu > TI_JUMBO_MTU)
+ if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > TI_JUMBO_MTU)
error = EINVAL;
else {
ifp->if_mtu = ifr->ifr_mtu;
- ti_init_locked(sc);
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ ti_init_locked(sc);
+ }
}
TI_UNLOCK(sc);
break;
@@ -3457,15 +3583,32 @@ ti_ioctl(ifp, command, data)
case SIOCSIFCAP:
TI_LOCK(sc);
mask = ifr->ifr_reqcap ^ ifp->if_capenable;
- if (mask & IFCAP_HWCSUM) {
- if (IFCAP_HWCSUM & ifp->if_capenable)
- ifp->if_capenable &= ~IFCAP_HWCSUM;
- else
- ifp->if_capenable |= IFCAP_HWCSUM;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ if ((mask & IFCAP_TXCSUM) != 0 &&
+ (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
+ ifp->if_capenable ^= IFCAP_TXCSUM;
+ if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
+ ifp->if_hwassist |= TI_CSUM_FEATURES;
+ else
+ ifp->if_hwassist &= ~TI_CSUM_FEATURES;
+ }
+ if ((mask & IFCAP_RXCSUM) != 0 &&
+ (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
+ ifp->if_capenable ^= IFCAP_RXCSUM;
+ if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
+ (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+ if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
+ (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
+ ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
+ if ((mask & (IFCAP_TXCSUM | IFCAP_RXCSUM |
+ IFCAP_VLAN_HWTAGGING)) != 0) {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
ti_init_locked(sc);
+ }
}
TI_UNLOCK(sc);
+ VLAN_CAPABILITIES(ifp);
break;
default:
error = ether_ioctl(ifp, command, data);
@@ -3514,8 +3657,8 @@ static int
ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
struct thread *td)
{
- int error;
struct ti_softc *sc;
+ int error;
sc = dev->si_drv1;
if (sc == NULL)
@@ -3531,14 +3674,16 @@ ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
outstats = (struct ti_stats *)addr;
TI_LOCK(sc);
- bcopy(&sc->ti_rdata->ti_info.ti_stats, outstats,
- sizeof(struct ti_stats));
+ bus_dmamap_sync(sc->ti_cdata.ti_gib_tag,
+ sc->ti_cdata.ti_gib_map, BUS_DMASYNC_POSTREAD);
+ bcopy(&sc->ti_rdata.ti_info->ti_stats, outstats,
+ sizeof(struct ti_stats));
TI_UNLOCK(sc);
break;
}
case TIIOCGETPARAMS:
{
- struct ti_params *params;
+ struct ti_params *params;
params = (struct ti_params *)addr;
@@ -3551,9 +3696,6 @@ ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
params->ti_tx_buf_ratio = sc->ti_tx_buf_ratio;
params->param_mask = TI_PARAM_ALL;
TI_UNLOCK(sc);
-
- error = 0;
-
break;
}
case TIIOCSETPARAMS:
@@ -3598,13 +3740,10 @@ ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
sc->ti_tx_buf_ratio);
}
TI_UNLOCK(sc);
-
- error = 0;
-
break;
}
case TIIOCSETTRACE: {
- ti_trace_type trace_type;
+ ti_trace_type trace_type;
trace_type = *(ti_trace_type *)addr;
@@ -3613,15 +3752,14 @@ ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
* this register to 0 should have the effect of disabling
* tracing.
*/
+ TI_LOCK(sc);
CSR_WRITE_4(sc, TI_GCR_NIC_TRACING, trace_type);
-
- error = 0;
-
+ TI_UNLOCK(sc);
break;
}
case TIIOCGETTRACE: {
- struct ti_trace_buf *trace_buf;
- u_int32_t trace_start, cur_trace_ptr, trace_len;
+ struct ti_trace_buf *trace_buf;
+ uint32_t trace_start, cur_trace_ptr, trace_len;
trace_buf = (struct ti_trace_buf *)addr;
@@ -3629,7 +3767,6 @@ ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
trace_start = CSR_READ_4(sc, TI_GCR_NICTRACE_START);
cur_trace_ptr = CSR_READ_4(sc, TI_GCR_NICTRACE_PTR);
trace_len = CSR_READ_4(sc, TI_GCR_NICTRACE_LEN);
-
#if 0
if_printf(sc->ti_ifp, "trace_start = %#x, cur_trace_ptr = %#x, "
"trace_len = %d\n", trace_start,
@@ -3637,24 +3774,20 @@ ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
if_printf(sc->ti_ifp, "trace_buf->buf_len = %d\n",
trace_buf->buf_len);
#endif
-
error = ti_copy_mem(sc, trace_start, min(trace_len,
- trace_buf->buf_len),
- (caddr_t)trace_buf->buf, 1, 1);
-
+ trace_buf->buf_len), (caddr_t)trace_buf->buf, 1, 1);
if (error == 0) {
trace_buf->fill_len = min(trace_len,
- trace_buf->buf_len);
+ trace_buf->buf_len);
if (cur_trace_ptr < trace_start)
trace_buf->cur_trace_ptr =
- trace_start - cur_trace_ptr;
+ trace_start - cur_trace_ptr;
else
trace_buf->cur_trace_ptr =
- cur_trace_ptr - trace_start;
+ cur_trace_ptr - trace_start;
} else
trace_buf->fill_len = 0;
TI_UNLOCK(sc);
-
break;
}
@@ -3676,13 +3809,12 @@ ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
* you're interested in every ioctl, you'll only be
* able to debug one board at a time.
*/
- error = 0;
break;
case ALT_READ_TG_MEM:
case ALT_WRITE_TG_MEM:
{
struct tg_mem *mem_param;
- u_int32_t sram_end, scratch_end;
+ uint32_t sram_end, scratch_end;
mem_param = (struct tg_mem *)addr;
@@ -3699,25 +3831,22 @@ ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
* nothing else.
*/
TI_LOCK(sc);
- if ((mem_param->tgAddr >= TI_BEG_SRAM)
- && ((mem_param->tgAddr + mem_param->len) <= sram_end)) {
+ if (mem_param->tgAddr >= TI_BEG_SRAM &&
+ mem_param->tgAddr + mem_param->len <= sram_end) {
/*
* In this instance, we always copy to/from user
* space, so the user space argument is set to 1.
*/
error = ti_copy_mem(sc, mem_param->tgAddr,
- mem_param->len,
- mem_param->userAddr, 1,
- (cmd == ALT_READ_TG_MEM) ? 1 : 0);
- } else if ((mem_param->tgAddr >= TI_BEG_SCRATCH)
- && (mem_param->tgAddr <= scratch_end)) {
+ mem_param->len, mem_param->userAddr, 1,
+ cmd == ALT_READ_TG_MEM ? 1 : 0);
+ } else if (mem_param->tgAddr >= TI_BEG_SCRATCH &&
+ mem_param->tgAddr <= scratch_end) {
error = ti_copy_scratch(sc, mem_param->tgAddr,
- mem_param->len,
- mem_param->userAddr, 1,
- (cmd == ALT_READ_TG_MEM) ?
- 1 : 0, TI_PROCESSOR_A);
- } else if ((mem_param->tgAddr >= TI_BEG_SCRATCH_B_DEBUG)
- && (mem_param->tgAddr <= TI_BEG_SCRATCH_B_DEBUG)) {
+ mem_param->len, mem_param->userAddr, 1,
+ cmd == ALT_READ_TG_MEM ? 1 : 0, TI_PROCESSOR_A);
+ } else if (mem_param->tgAddr >= TI_BEG_SCRATCH_B_DEBUG &&
+ mem_param->tgAddr <= TI_BEG_SCRATCH_B_DEBUG) {
if (sc->ti_hwrev == TI_HWREV_TIGON) {
if_printf(sc->ti_ifp,
"invalid memory range for Tigon I\n");
@@ -3725,11 +3854,9 @@ ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
break;
}
error = ti_copy_scratch(sc, mem_param->tgAddr -
- TI_SCRATCH_DEBUG_OFF,
- mem_param->len,
- mem_param->userAddr, 1,
- (cmd == ALT_READ_TG_MEM) ?
- 1 : 0, TI_PROCESSOR_B);
+ TI_SCRATCH_DEBUG_OFF, mem_param->len,
+ mem_param->userAddr, 1,
+ cmd == ALT_READ_TG_MEM ? 1 : 0, TI_PROCESSOR_B);
} else {
if_printf(sc->ti_ifp, "memory address %#x len %d is "
"out of supported range\n",
@@ -3737,14 +3864,13 @@ ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
error = EINVAL;
}
TI_UNLOCK(sc);
-
break;
}
case ALT_READ_TG_REG:
case ALT_WRITE_TG_REG:
{
- struct tg_reg *regs;
- u_int32_t tmpval;
+ struct tg_reg *regs;
+ uint32_t tmpval;
regs = (struct tg_reg *)addr;
@@ -3758,7 +3884,7 @@ ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
TI_LOCK(sc);
if (cmd == ALT_READ_TG_REG) {
bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
- regs->addr, &tmpval, 1);
+ regs->addr, &tmpval, 1);
regs->data = ntohl(tmpval);
#if 0
if ((regs->addr == TI_CPU_STATE)
@@ -3770,10 +3896,9 @@ ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
} else {
tmpval = htonl(regs->data);
bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
- regs->addr, &tmpval, 1);
+ regs->addr, &tmpval, 1);
}
TI_UNLOCK(sc);
-
break;
}
default:
@@ -3786,8 +3911,8 @@ ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
static void
ti_watchdog(void *arg)
{
- struct ti_softc *sc;
- struct ifnet *ifp;
+ struct ti_softc *sc;
+ struct ifnet *ifp;
sc = arg;
TI_LOCK_ASSERT(sc);
@@ -3805,7 +3930,7 @@ ti_watchdog(void *arg)
ifp = sc->ti_ifp;
if_printf(ifp, "watchdog timeout -- resetting\n");
- ti_stop(sc);
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
ti_init_locked(sc);
ifp->if_oerrors++;
@@ -3816,11 +3941,10 @@ ti_watchdog(void *arg)
* RX and TX lists.
*/
static void
-ti_stop(sc)
- struct ti_softc *sc;
+ti_stop(struct ti_softc *sc)
{
- struct ifnet *ifp;
- struct ti_cmd_desc cmd;
+ struct ifnet *ifp;
+ struct ti_cmd_desc cmd;
TI_LOCK_ASSERT(sc);
@@ -3834,11 +3958,11 @@ ti_stop(sc)
TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_DOWN, 0);
/* Halt and reinitialize. */
- if (ti_chipinit(sc) != 0)
- return;
- ti_mem_zero(sc, 0x2000, 0x100000 - 0x2000);
- if (ti_chipinit(sc) != 0)
- return;
+ if (ti_chipinit(sc) == 0) {
+ ti_mem_zero(sc, 0x2000, 0x100000 - 0x2000);
+ /* XXX ignore init errors. */
+ ti_chipinit(sc);
+ }
/* Free the RX lists. */
ti_free_rx_ring_std(sc);
@@ -3866,10 +3990,9 @@ ti_stop(sc)
* get confused by errant DMAs when rebooting.
*/
static int
-ti_shutdown(dev)
- device_t dev;
+ti_shutdown(device_t dev)
{
- struct ti_softc *sc;
+ struct ti_softc *sc;
sc = device_get_softc(dev);
TI_LOCK(sc);
@@ -3878,3 +4001,65 @@ ti_shutdown(dev)
return (0);
}
+
+static void
+ti_sysctl_node(struct ti_softc *sc)
+{
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid_list *child;
+ char tname[32];
+
+ ctx = device_get_sysctl_ctx(sc->ti_dev);
+ child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ti_dev));
+
+ /* Use DAC */
+ sc->ti_dac = 1;
+ snprintf(tname, sizeof(tname), "dev.ti.%d.dac",
+ device_get_unit(sc->ti_dev));
+ TUNABLE_INT_FETCH(tname, &sc->ti_dac);
+
+ SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_coal_ticks", CTLFLAG_RW,
+ &sc->ti_rx_coal_ticks, 0, "Receive coalcesced ticks");
+ SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_max_coal_bds", CTLFLAG_RW,
+ &sc->ti_rx_max_coal_bds, 0, "Receive max coalcesced BDs");
+
+ SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_coal_ticks", CTLFLAG_RW,
+ &sc->ti_tx_coal_ticks, 0, "Send coalcesced ticks");
+ SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_max_coal_bds", CTLFLAG_RW,
+ &sc->ti_tx_max_coal_bds, 0, "Send max coalcesced BDs");
+ SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_buf_ratio", CTLFLAG_RW,
+ &sc->ti_tx_buf_ratio, 0,
+ "Ratio of NIC memory devoted to TX buffer");
+
+ SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "stat_ticks", CTLFLAG_RW,
+ &sc->ti_stat_ticks, 0,
+ "Number of clock ticks for statistics update interval");
+
+ /* Pull in device tunables. */
+ sc->ti_rx_coal_ticks = 170;
+ resource_int_value(device_get_name(sc->ti_dev),
+ device_get_unit(sc->ti_dev), "rx_coal_ticks",
+ &sc->ti_rx_coal_ticks);
+ sc->ti_rx_max_coal_bds = 64;
+ resource_int_value(device_get_name(sc->ti_dev),
+ device_get_unit(sc->ti_dev), "rx_max_coal_bds",
+ &sc->ti_rx_max_coal_bds);
+
+ sc->ti_tx_coal_ticks = TI_TICKS_PER_SEC / 500;
+ resource_int_value(device_get_name(sc->ti_dev),
+ device_get_unit(sc->ti_dev), "tx_coal_ticks",
+ &sc->ti_tx_coal_ticks);
+ sc->ti_tx_max_coal_bds = 32;
+ resource_int_value(device_get_name(sc->ti_dev),
+ device_get_unit(sc->ti_dev), "tx_max_coal_bds",
+ &sc->ti_tx_max_coal_bds);
+ sc->ti_tx_buf_ratio = 21;
+ resource_int_value(device_get_name(sc->ti_dev),
+ device_get_unit(sc->ti_dev), "tx_buf_ratio",
+ &sc->ti_tx_buf_ratio);
+
+ sc->ti_stat_ticks = 2 * TI_TICKS_PER_SEC;
+ resource_int_value(device_get_name(sc->ti_dev),
+ device_get_unit(sc->ti_dev), "stat_ticks",
+ &sc->ti_stat_ticks);
+}
diff --git a/sys/dev/ti/if_tireg.h b/sys/dev/ti/if_tireg.h
index 22259b5..63b93f4 100644
--- a/sys/dev/ti/if_tireg.h
+++ b/sys/dev/ti/if_tireg.h
@@ -400,6 +400,8 @@
#define TI_RETURN_RING_CNT 2048
#define TI_MAXTXSEGS 32
+#define TI_RING_ALIGN 32
+#define TI_JUMBO_RING_ALIGN 64
/*
* Possible TX ring sizes.
@@ -477,8 +479,8 @@
*/
typedef struct {
- u_int32_t ti_addr_hi;
- u_int32_t ti_addr_lo;
+ uint32_t ti_addr_hi;
+ uint32_t ti_addr_lo;
} ti_hostaddr;
#define TI_HOSTADDR(x) x.ti_addr_lo
@@ -516,13 +518,13 @@ ti_hostaddr64(ti_hostaddr *x, bus_addr_t addr)
struct ti_rcb {
ti_hostaddr ti_hostaddr;
#if BYTE_ORDER == BIG_ENDIAN
- u_int16_t ti_max_len;
- u_int16_t ti_flags;
+ uint16_t ti_max_len;
+ uint16_t ti_flags;
#else
- u_int16_t ti_flags;
- u_int16_t ti_max_len;
+ uint16_t ti_flags;
+ uint16_t ti_max_len;
#endif
- u_int32_t ti_unused;
+ uint32_t ti_unused;
};
#define TI_RCB_FLAG_TCP_UDP_CKSUM 0x00000001
@@ -536,8 +538,8 @@ struct ti_rcb {
#define TI_RCB_FLAG_RING_DISABLED 0x00000200
struct ti_producer {
- u_int32_t ti_idx;
- u_int32_t ti_unused;
+ uint32_t ti_idx;
+ uint32_t ti_unused;
};
/*
@@ -571,109 +573,123 @@ struct ti_gib {
struct ti_rx_desc {
ti_hostaddr ti_addr;
#if BYTE_ORDER == BIG_ENDIAN
- u_int16_t ti_idx;
- u_int16_t ti_len;
+ uint16_t ti_idx;
+ uint16_t ti_len;
#else
- u_int16_t ti_len;
- u_int16_t ti_idx;
+ uint16_t ti_len;
+ uint16_t ti_idx;
#endif
#if BYTE_ORDER == BIG_ENDIAN
- u_int16_t ti_type;
- u_int16_t ti_flags;
+ uint16_t ti_type;
+ uint16_t ti_flags;
#else
- u_int16_t ti_flags;
- u_int16_t ti_type;
+ uint16_t ti_flags;
+ uint16_t ti_type;
#endif
#if BYTE_ORDER == BIG_ENDIAN
- u_int16_t ti_ip_cksum;
- u_int16_t ti_tcp_udp_cksum;
+ uint16_t ti_ip_cksum;
+ uint16_t ti_tcp_udp_cksum;
#else
- u_int16_t ti_tcp_udp_cksum;
- u_int16_t ti_ip_cksum;
+ uint16_t ti_tcp_udp_cksum;
+ uint16_t ti_ip_cksum;
#endif
#if BYTE_ORDER == BIG_ENDIAN
- u_int16_t ti_error_flags;
- u_int16_t ti_vlan_tag;
+ uint16_t ti_error_flags;
+ uint16_t ti_vlan_tag;
#else
- u_int16_t ti_vlan_tag;
- u_int16_t ti_error_flags;
+ uint16_t ti_vlan_tag;
+ uint16_t ti_error_flags;
#endif
- u_int32_t ti_rsvd;
- u_int32_t ti_opaque;
+ uint32_t ti_rsvd;
+ uint32_t ti_opaque;
};
+#define TI_STD_RX_RING_SZ (sizeof(struct ti_rx_desc) * TI_STD_RX_RING_CNT)
+#define TI_MINI_RX_RING_SZ (sizeof(struct ti_rx_desc) * TI_MINI_RX_RING_CNT)
+#define TI_RX_RETURN_RING_SZ (sizeof(struct ti_rx_desc) * TI_RETURN_RING_CNT)
+
struct ti_rx_desc_ext {
ti_hostaddr ti_addr1;
ti_hostaddr ti_addr2;
ti_hostaddr ti_addr3;
#if BYTE_ORDER == BIG_ENDIAN
- u_int16_t ti_len1;
- u_int16_t ti_len2;
+ uint16_t ti_len1;
+ uint16_t ti_len2;
#else
- u_int16_t ti_len2;
- u_int16_t ti_len1;
+ uint16_t ti_len2;
+ uint16_t ti_len1;
#endif
#if BYTE_ORDER == BIG_ENDIAN
- u_int16_t ti_len3;
- u_int16_t ti_rsvd0;
+ uint16_t ti_len3;
+ uint16_t ti_rsvd0;
#else
- u_int16_t ti_rsvd0;
- u_int16_t ti_len3;
+ uint16_t ti_rsvd0;
+ uint16_t ti_len3;
#endif
ti_hostaddr ti_addr0;
#if BYTE_ORDER == BIG_ENDIAN
- u_int16_t ti_idx;
- u_int16_t ti_len0;
+ uint16_t ti_idx;
+ uint16_t ti_len0;
#else
- u_int16_t ti_len0;
- u_int16_t ti_idx;
+ uint16_t ti_len0;
+ uint16_t ti_idx;
#endif
#if BYTE_ORDER == BIG_ENDIAN
- u_int16_t ti_type;
- u_int16_t ti_flags;
+ uint16_t ti_type;
+ uint16_t ti_flags;
#else
- u_int16_t ti_flags;
- u_int16_t ti_type;
+ uint16_t ti_flags;
+ uint16_t ti_type;
#endif
#if BYTE_ORDER == BIG_ENDIAN
- u_int16_t ti_ip_cksum;
- u_int16_t ti_tcp_udp_cksum;
+ uint16_t ti_ip_cksum;
+ uint16_t ti_tcp_udp_cksum;
#else
- u_int16_t ti_tcp_udp_cksum;
- u_int16_t ti_ip_cksum;
+ uint16_t ti_tcp_udp_cksum;
+ uint16_t ti_ip_cksum;
#endif
#if BYTE_ORDER == BIG_ENDIAN
- u_int16_t ti_error_flags;
- u_int16_t ti_vlan_tag;
+ uint16_t ti_error_flags;
+ uint16_t ti_vlan_tag;
#else
- u_int16_t ti_vlan_tag;
- u_int16_t ti_error_flags;
+ uint16_t ti_vlan_tag;
+ uint16_t ti_error_flags;
#endif
- u_int32_t ti_rsvd1;
- u_int32_t ti_opaque;
+ uint32_t ti_rsvd1;
+ uint32_t ti_opaque;
};
+#ifdef TI_SF_BUF_JUMBO
+#define TI_JUMBO_RX_RING_SZ \
+ (sizeof(struct ti_rx_desc_ext) * TI_JUMBO_RX_RING_CNT)
+#else
+#define TI_JUMBO_RX_RING_SZ \
+ (sizeof(struct ti_rx_desc) * TI_JUMBO_RX_RING_CNT)
+#endif
+
/*
* Transmit descriptors are, mercifully, very small.
*/
struct ti_tx_desc {
ti_hostaddr ti_addr;
#if BYTE_ORDER == BIG_ENDIAN
- u_int16_t ti_len;
- u_int16_t ti_flags;
+ uint16_t ti_len;
+ uint16_t ti_flags;
#else
- u_int16_t ti_flags;
- u_int16_t ti_len;
+ uint16_t ti_flags;
+ uint16_t ti_len;
#endif
#if BYTE_ORDER == BIG_ENDIAN
- u_int16_t ti_rsvd;
- u_int16_t ti_vlan_tag;
+ uint16_t ti_rsvd;
+ uint16_t ti_vlan_tag;
#else
- u_int16_t ti_vlan_tag;
- u_int16_t ti_rsvd;
+ uint16_t ti_vlan_tag;
+ uint16_t ti_rsvd;
#endif
};
+#define TI_TX_RING_SZ (sizeof(struct ti_tx_desc) * TI_TX_RING_CNT)
+
/*
* NOTE! On the Alpha, we have an alignment constraint.
* The first thing in the packet is a 14-byte Ethernet header.
@@ -753,7 +769,7 @@ struct ti_tx_desc {
* Tigon command structure.
*/
struct ti_cmd_desc {
- u_int32_t ti_cmdx;
+ uint32_t ti_cmdx;
};
#define TI_CMD_CMD(cmd) (((((cmd)->ti_cmdx)) >> 24) & 0xff)
@@ -842,9 +858,10 @@ struct ti_cmd_desc {
* Tigon event structure.
*/
struct ti_event_desc {
- u_int32_t ti_eventx;
- u_int32_t ti_rsvd;
+ uint32_t ti_eventx;
+ uint32_t ti_rsvd;
};
+#define TI_EVENT_RING_SZ (sizeof(struct ti_event_desc) * TI_EVENT_RING_CNT)
#define TI_EVENT_EVENT(e) (((((e)->ti_eventx)) >> 24) & 0xff)
#define TI_EVENT_CODE(e) (((((e)->ti_eventx)) >> 12) & 0xfff)
@@ -887,23 +904,6 @@ struct ti_event_desc {
#define TI_CLRBIT(sc, reg, x) \
CSR_WRITE_4((sc), (reg), (CSR_READ_4((sc), (reg)) & ~(x)))
-/*
- * Memory management stuff. Note: the SSLOTS, MSLOTS and JSLOTS
- * values are tuneable. They control the actual amount of buffers
- * allocated for the standard, mini and jumbo receive rings.
- */
-
-#define TI_SSLOTS 256
-#define TI_MSLOTS 256
-#define TI_JSLOTS 256
-
-#define TI_JRAWLEN (TI_JUMBO_FRAMELEN + ETHER_ALIGN)
-#define TI_JLEN (TI_JRAWLEN + (sizeof(u_int64_t) - \
- (TI_JRAWLEN % sizeof(u_int64_t))))
-#define TI_JPAGESZ PAGE_SIZE
-#define TI_RESID (TI_JPAGESZ - (TI_JLEN * TI_JSLOTS) % TI_JPAGESZ)
-#define TI_JMEM ((TI_JLEN * TI_JSLOTS) + TI_RESID)
-
struct ti_txdesc {
struct mbuf *tx_m;
bus_dmamap_t tx_dmamap;
@@ -912,6 +912,21 @@ struct ti_txdesc {
STAILQ_HEAD(ti_txdq, ti_txdesc);
+struct ti_status {
+ /*
+ * Make sure producer structures are aligned on 32-byte cache
+ * line boundaries. We can create separate DMA area for each
+ * producer/consumer area but it wouldn't get much benefit
+ * since driver use a global driver lock.
+ */
+ struct ti_producer ti_ev_prodidx_r;
+ uint32_t ti_pad0[6];
+ struct ti_producer ti_return_prodidx_r;
+ uint32_t ti_pad1[6];
+ struct ti_producer ti_tx_considx_r;
+ uint32_t ti_pad2[6];
+};
+
/*
* Ring structures. Most of these reside in host memory and we tell
* the NIC where they are via the ring control blocks. The exceptions
@@ -919,54 +934,72 @@ STAILQ_HEAD(ti_txdq, ti_txdesc);
* we access via the shared memory window.
*/
struct ti_ring_data {
- struct ti_rx_desc ti_rx_std_ring[TI_STD_RX_RING_CNT];
-#ifdef TI_PRIVATE_JUMBOS
- struct ti_rx_desc ti_rx_jumbo_ring[TI_JUMBO_RX_RING_CNT];
+ struct ti_gib *ti_info;
+ bus_addr_t ti_info_paddr;
+ struct ti_status *ti_status;
+ bus_addr_t ti_status_paddr;
+ struct ti_rx_desc *ti_rx_std_ring;
+ bus_addr_t ti_rx_std_ring_paddr;
+#ifdef TI_SF_BUF_JUMBO
+ struct ti_rx_desc_ext *ti_rx_jumbo_ring;
#else
- struct ti_rx_desc_ext ti_rx_jumbo_ring[TI_JUMBO_RX_RING_CNT];
+ struct ti_rx_desc *ti_rx_jumbo_ring;
#endif
- struct ti_rx_desc ti_rx_mini_ring[TI_MINI_RX_RING_CNT];
- struct ti_rx_desc ti_rx_return_ring[TI_RETURN_RING_CNT];
- struct ti_event_desc ti_event_ring[TI_EVENT_RING_CNT];
- struct ti_tx_desc ti_tx_ring[TI_TX_RING_CNT];
- /*
- * Make sure producer structures are aligned on 32-byte cache
- * line boundaries.
- */
- struct ti_producer ti_ev_prodidx_r;
- u_int32_t ti_pad0[6];
- struct ti_producer ti_return_prodidx_r;
- u_int32_t ti_pad1[6];
- struct ti_producer ti_tx_considx_r;
- u_int32_t ti_pad2[6];
- struct ti_gib ti_info;
+ bus_addr_t ti_rx_jumbo_ring_paddr;
+ struct ti_rx_desc *ti_rx_mini_ring;
+ bus_addr_t ti_rx_mini_ring_paddr;
+ struct ti_rx_desc *ti_rx_return_ring;
+ bus_addr_t ti_rx_return_ring_paddr;
+ struct ti_event_desc *ti_event_ring;
+ bus_addr_t ti_event_ring_paddr;
+ struct ti_tx_desc *ti_tx_ring;
+ bus_addr_t ti_tx_ring_paddr;
};
-#define TI_RD_OFF(x) offsetof(struct ti_ring_data, x)
-
/*
* Mbuf pointers. We need these to keep track of the virtual addresses
* of our mbuf chains since we can only convert from physical to virtual,
* not the other way around.
*/
struct ti_chain_data {
+ bus_dma_tag_t ti_parent_tag;
+ bus_dma_tag_t ti_gib_tag;
+ bus_dmamap_t ti_gib_map;
+ bus_dma_tag_t ti_event_ring_tag;
+ bus_dmamap_t ti_event_ring_map;
+ bus_dma_tag_t ti_status_tag;
+ bus_dmamap_t ti_status_map;
+ bus_dma_tag_t ti_tx_ring_tag;
+ bus_dmamap_t ti_tx_ring_map;
+ bus_dma_tag_t ti_tx_tag;
struct ti_txdesc ti_txdesc[TI_TX_RING_CNT];
struct ti_txdq ti_txfreeq;
struct ti_txdq ti_txbusyq;
+ bus_dma_tag_t ti_rx_return_ring_tag;
+ bus_dmamap_t ti_rx_return_ring_map;
+ bus_dma_tag_t ti_rx_std_ring_tag;
+ bus_dmamap_t ti_rx_std_ring_map;
+ bus_dma_tag_t ti_rx_std_tag;
struct mbuf *ti_rx_std_chain[TI_STD_RX_RING_CNT];
bus_dmamap_t ti_rx_std_maps[TI_STD_RX_RING_CNT];
+ bus_dmamap_t ti_rx_std_sparemap;
+ bus_dma_tag_t ti_rx_jumbo_ring_tag;
+ bus_dmamap_t ti_rx_jumbo_ring_map;
+ bus_dma_tag_t ti_rx_jumbo_tag;
struct mbuf *ti_rx_jumbo_chain[TI_JUMBO_RX_RING_CNT];
bus_dmamap_t ti_rx_jumbo_maps[TI_JUMBO_RX_RING_CNT];
+ bus_dmamap_t ti_rx_jumbo_sparemap;
+ bus_dma_tag_t ti_rx_mini_ring_tag;
+ bus_dmamap_t ti_rx_mini_ring_map;
+ bus_dma_tag_t ti_rx_mini_tag;
struct mbuf *ti_rx_mini_chain[TI_MINI_RX_RING_CNT];
bus_dmamap_t ti_rx_mini_maps[TI_MINI_RX_RING_CNT];
- /* Stick the jumbo mem management stuff here too. */
- caddr_t ti_jslots[TI_JSLOTS];
- void *ti_jumbo_buf;
+ bus_dmamap_t ti_rx_mini_sparemap;
};
struct ti_type {
- u_int16_t ti_vid;
- u_int16_t ti_did;
+ uint16_t ti_vid;
+ uint16_t ti_did;
const char *ti_name;
};
@@ -980,11 +1013,6 @@ struct ti_mc_entry {
SLIST_ENTRY(ti_mc_entry) mc_entries;
};
-struct ti_jpool_entry {
- int slot;
- SLIST_ENTRY(ti_jpool_entry) jpool_entries;
-};
-
typedef enum {
TI_FLAG_NONE = 0x00,
TI_FLAG_DEBUGING = 0x01,
@@ -1000,24 +1028,16 @@ struct ti_softc {
struct resource *ti_irq;
struct resource *ti_res;
struct ifmedia ifmedia; /* media info */
- u_int8_t ti_unit; /* interface number */
- u_int8_t ti_hwrev; /* Tigon rev (1 or 2) */
- u_int8_t ti_copper; /* 1000baseTX card */
- u_int8_t ti_linkstat; /* Link state */
+ uint8_t ti_hwrev; /* Tigon rev (1 or 2) */
+ uint8_t ti_copper; /* 1000baseTX card */
+ uint8_t ti_linkstat; /* Link state */
int ti_hdrsplit; /* enable header splitting */
- bus_dma_tag_t ti_parent_dmat;
- bus_dma_tag_t ti_jumbo_dmat;
- bus_dmamap_t ti_jumbo_dmamap;
- bus_dma_tag_t ti_mbuftx_dmat;
- bus_dma_tag_t ti_mbufrx_dmat;
- bus_dma_tag_t ti_rdata_dmat;
- bus_dmamap_t ti_rdata_dmamap;
- bus_addr_t ti_rdata_phys;
- struct ti_ring_data *ti_rdata; /* rings */
+ int ti_dac;
+ struct ti_ring_data ti_rdata; /* rings */
struct ti_chain_data ti_cdata; /* mbufs */
-#define ti_ev_prodidx ti_rdata->ti_ev_prodidx_r
-#define ti_return_prodidx ti_rdata->ti_return_prodidx_r
-#define ti_tx_considx ti_rdata->ti_tx_considx_r
+#define ti_ev_prodidx ti_rdata.ti_status->ti_ev_prodidx_r
+#define ti_return_prodidx ti_rdata.ti_status->ti_return_prodidx_r
+#define ti_tx_considx ti_rdata.ti_status->ti_tx_considx_r
int ti_tx_saved_prodidx;
int ti_tx_saved_considx;
int ti_rx_saved_considx;
@@ -1027,20 +1047,20 @@ struct ti_softc {
int ti_mini; /* current mini ring head */
int ti_jumbo; /* current jumo ring head */
SLIST_HEAD(__ti_mchead, ti_mc_entry) ti_mc_listhead;
- SLIST_HEAD(__ti_jfreehead, ti_jpool_entry) ti_jfree_listhead;
- SLIST_HEAD(__ti_jinusehead, ti_jpool_entry) ti_jinuse_listhead;
- u_int32_t ti_stat_ticks;
- u_int32_t ti_rx_coal_ticks;
- u_int32_t ti_tx_coal_ticks;
- u_int32_t ti_rx_max_coal_bds;
- u_int32_t ti_tx_max_coal_bds;
- u_int32_t ti_tx_buf_ratio;
+ uint32_t ti_stat_ticks;
+ uint32_t ti_rx_coal_ticks;
+ uint32_t ti_tx_coal_ticks;
+ uint32_t ti_rx_max_coal_bds;
+ uint32_t ti_tx_max_coal_bds;
+ uint32_t ti_tx_buf_ratio;
int ti_if_flags;
int ti_txcnt;
struct mtx ti_mtx;
struct callout ti_watchdog;
int ti_timer;
ti_flag_vals ti_flags;
+ uint8_t *ti_membuf;
+ uint8_t *ti_membuf2;
struct cdev *dev;
};
diff --git a/sys/dev/tl/if_tl.c b/sys/dev/tl/if_tl.c
index a256552..b5ffc2c 100644
--- a/sys/dev/tl/if_tl.c
+++ b/sys/dev/tl/if_tl.c
@@ -203,6 +203,7 @@ __FBSDID("$FreeBSD$");
#include <sys/rman.h>
#include <dev/mii/mii.h>
+#include <dev/mii/mii_bitbang.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
@@ -228,7 +229,7 @@ MODULE_DEPEND(tl, miibus, 1, 1, 1);
* Various supported device vendors/types and their names.
*/
-static struct tl_type tl_devs[] = {
+static const struct tl_type const tl_devs[] = {
{ TI_VENDORID, TI_DEVICEID_THUNDERLAN,
"Texas Instruments ThunderLAN" },
{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10,
@@ -290,10 +291,6 @@ static u_int8_t tl_eeprom_putbyte(struct tl_softc *, int);
static u_int8_t tl_eeprom_getbyte(struct tl_softc *, int, u_int8_t *);
static int tl_read_eeprom(struct tl_softc *, caddr_t, int, int);
-static void tl_mii_sync(struct tl_softc *);
-static void tl_mii_send(struct tl_softc *, u_int32_t, int);
-static int tl_mii_readreg(struct tl_softc *, struct tl_mii_frame *);
-static int tl_mii_writereg(struct tl_softc *, struct tl_mii_frame *);
static int tl_miibus_readreg(device_t, int, int);
static int tl_miibus_writereg(device_t, int, int, int);
static void tl_miibus_statchg(device_t);
@@ -318,6 +315,24 @@ static void tl_dio_clrbit(struct tl_softc *, int, int);
static void tl_dio_setbit16(struct tl_softc *, int, int);
static void tl_dio_clrbit16(struct tl_softc *, int, int);
+/*
+ * MII bit-bang glue
+ */
+static uint32_t tl_mii_bitbang_read(device_t);
+static void tl_mii_bitbang_write(device_t, uint32_t);
+
+static const struct mii_bitbang_ops tl_mii_bitbang_ops = {
+ tl_mii_bitbang_read,
+ tl_mii_bitbang_write,
+ {
+ TL_SIO_MDATA, /* MII_BIT_MDO */
+ TL_SIO_MDATA, /* MII_BIT_MDI */
+ TL_SIO_MCLK, /* MII_BIT_MDC */
+ TL_SIO_MTXEN, /* MII_BIT_DIR_HOST_PHY */
+ 0, /* MII_BIT_DIR_PHY_HOST */
+ }
+};
+
#ifdef TL_USEIOSPACE
#define TL_RES SYS_RES_IOPORT
#define TL_RID TL_PCI_LOIO
@@ -360,7 +375,12 @@ static u_int8_t tl_dio_read8(sc, reg)
struct tl_softc *sc;
int reg;
{
+
+ CSR_BARRIER(sc, TL_DIO_ADDR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
+ CSR_BARRIER(sc, TL_DIO_ADDR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)));
}
@@ -368,7 +388,12 @@ static u_int16_t tl_dio_read16(sc, reg)
struct tl_softc *sc;
int reg;
{
+
+ CSR_BARRIER(sc, TL_DIO_ADDR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
+ CSR_BARRIER(sc, TL_DIO_ADDR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)));
}
@@ -376,7 +401,12 @@ static u_int32_t tl_dio_read32(sc, reg)
struct tl_softc *sc;
int reg;
{
+
+ CSR_BARRIER(sc, TL_DIO_ADDR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
+ CSR_BARRIER(sc, TL_DIO_ADDR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3)));
}
@@ -385,9 +415,13 @@ static void tl_dio_write8(sc, reg, val)
int reg;
int val;
{
+
+ CSR_BARRIER(sc, TL_DIO_ADDR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
+ CSR_BARRIER(sc, TL_DIO_ADDR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val);
- return;
}
static void tl_dio_write16(sc, reg, val)
@@ -395,9 +429,13 @@ static void tl_dio_write16(sc, reg, val)
int reg;
int val;
{
+
+ CSR_BARRIER(sc, TL_DIO_ADDR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
+ CSR_BARRIER(sc, TL_DIO_ADDR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val);
- return;
}
static void tl_dio_write32(sc, reg, val)
@@ -405,9 +443,13 @@ static void tl_dio_write32(sc, reg, val)
int reg;
int val;
{
+
+ CSR_BARRIER(sc, TL_DIO_ADDR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
+ CSR_BARRIER(sc, TL_DIO_ADDR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val);
- return;
}
static void
@@ -418,12 +460,16 @@ tl_dio_setbit(sc, reg, bit)
{
u_int8_t f;
+ CSR_BARRIER(sc, TL_DIO_ADDR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
+ CSR_BARRIER(sc, TL_DIO_ADDR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
f |= bit;
+ CSR_BARRIER(sc, TL_DIO_DATA + (reg & 3), 1,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
-
- return;
}
static void
@@ -434,12 +480,16 @@ tl_dio_clrbit(sc, reg, bit)
{
u_int8_t f;
+ CSR_BARRIER(sc, TL_DIO_ADDR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
+ CSR_BARRIER(sc, TL_DIO_ADDR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
f &= ~bit;
+ CSR_BARRIER(sc, TL_DIO_DATA + (reg & 3), 1,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
-
- return;
}
static void tl_dio_setbit16(sc, reg, bit)
@@ -449,12 +499,16 @@ static void tl_dio_setbit16(sc, reg, bit)
{
u_int16_t f;
+ CSR_BARRIER(sc, TL_DIO_ADDR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
+ CSR_BARRIER(sc, TL_DIO_ADDR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
f |= bit;
+ CSR_BARRIER(sc, TL_DIO_DATA + (reg & 3), 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
-
- return;
}
static void tl_dio_clrbit16(sc, reg, bit)
@@ -464,12 +518,16 @@ static void tl_dio_clrbit16(sc, reg, bit)
{
u_int16_t f;
+ CSR_BARRIER(sc, TL_DIO_ADDR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
+ CSR_BARRIER(sc, TL_DIO_ADDR, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
f &= ~bit;
+ CSR_BARRIER(sc, TL_DIO_DATA + (reg & 3), 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
-
- return;
}
/*
@@ -608,61 +666,55 @@ tl_read_eeprom(sc, dest, off, cnt)
return(err ? 1 : 0);
}
-static void
-tl_mii_sync(sc)
- struct tl_softc *sc;
+#define TL_SIO_MII (TL_SIO_MCLK | TL_SIO_MDATA | TL_SIO_MTXEN)
+
+/*
+ * Read the MII serial port for the MII bit-bang module.
+ */
+static uint32_t
+tl_mii_bitbang_read(device_t dev)
{
- register int i;
+ struct tl_softc *sc;
+ uint32_t val;
- tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
+ sc = device_get_softc(dev);
- for (i = 0; i < 32; i++) {
- tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
- tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
- }
+ val = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MII;
+ CSR_BARRIER(sc, TL_NETSIO, 1,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
- return;
+ return (val);
}
+/*
+ * Write the MII serial port for the MII bit-bang module.
+ */
static void
-tl_mii_send(sc, bits, cnt)
- struct tl_softc *sc;
- u_int32_t bits;
- int cnt;
+tl_mii_bitbang_write(device_t dev, uint32_t val)
{
- int i;
+ struct tl_softc *sc;
- for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
- tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
- if (bits & i) {
- tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MDATA);
- } else {
- tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MDATA);
- }
- tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
- }
+ sc = device_get_softc(dev);
+
+ val = (tl_dio_read8(sc, TL_NETSIO) & ~TL_SIO_MII) | val;
+ CSR_BARRIER(sc, TL_NETSIO, 1,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
+ tl_dio_write8(sc, TL_NETSIO, val);
+ CSR_BARRIER(sc, TL_NETSIO, 1,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
static int
-tl_mii_readreg(sc, frame)
- struct tl_softc *sc;
- struct tl_mii_frame *frame;
-
+tl_miibus_readreg(dev, phy, reg)
+ device_t dev;
+ int phy, reg;
{
- int i, ack;
- int minten = 0;
+ struct tl_softc *sc;
+ int minten, val;
- tl_mii_sync(sc);
+ sc = device_get_softc(dev);
/*
- * Set up frame for RX.
- */
- frame->mii_stdelim = TL_MII_STARTDELIM;
- frame->mii_opcode = TL_MII_READOP;
- frame->mii_turnaround = 0;
- frame->mii_data = 0;
-
- /*
* Turn off MII interrupt by forcing MINTEN low.
*/
minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
@@ -670,89 +722,26 @@ tl_mii_readreg(sc, frame)
tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
}
- /*
- * Turn on data xmit.
- */
- tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
-
- /*
- * Send command/address info.
- */
- tl_mii_send(sc, frame->mii_stdelim, 2);
- tl_mii_send(sc, frame->mii_opcode, 2);
- tl_mii_send(sc, frame->mii_phyaddr, 5);
- tl_mii_send(sc, frame->mii_regaddr, 5);
-
- /*
- * Turn off xmit.
- */
- tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
-
- /* Idle bit */
- tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
- tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
-
- /* Check for ack */
- tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
- ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA;
-
- /* Complete the cycle */
- tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
-
- /*
- * Now try reading data bits. If the ack failed, we still
- * need to clock through 16 cycles to keep the PHYs in sync.
- */
- if (ack) {
- for(i = 0; i < 16; i++) {
- tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
- tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
- }
- goto fail;
- }
-
- for (i = 0x8000; i; i >>= 1) {
- tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
- if (!ack) {
- if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA)
- frame->mii_data |= i;
- }
- tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
- }
-
-fail:
-
- tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
- tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
+ val = mii_bitbang_readreg(dev, &tl_mii_bitbang_ops, phy, reg);
- /* Reenable interrupts */
+ /* Reenable interrupts. */
if (minten) {
tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
}
- if (ack)
- return(1);
- return(0);
+ return (val);
}
static int
-tl_mii_writereg(sc, frame)
- struct tl_softc *sc;
- struct tl_mii_frame *frame;
-
+tl_miibus_writereg(dev, phy, reg, data)
+ device_t dev;
+ int phy, reg, data;
{
+ struct tl_softc *sc;
int minten;
- tl_mii_sync(sc);
-
- /*
- * Set up frame for TX.
- */
+ sc = device_get_softc(dev);
- frame->mii_stdelim = TL_MII_STARTDELIM;
- frame->mii_opcode = TL_MII_WRITEOP;
- frame->mii_turnaround = TL_MII_TURNAROUND;
-
/*
* Turn off MII interrupt by forcing MINTEN low.
*/
@@ -761,67 +750,12 @@ tl_mii_writereg(sc, frame)
tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
}
- /*
- * Turn on data output.
- */
- tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
-
- tl_mii_send(sc, frame->mii_stdelim, 2);
- tl_mii_send(sc, frame->mii_opcode, 2);
- tl_mii_send(sc, frame->mii_phyaddr, 5);
- tl_mii_send(sc, frame->mii_regaddr, 5);
- tl_mii_send(sc, frame->mii_turnaround, 2);
- tl_mii_send(sc, frame->mii_data, 16);
-
- tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
- tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
-
- /*
- * Turn off xmit.
- */
- tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
+ mii_bitbang_writereg(dev, &tl_mii_bitbang_ops, phy, reg, data);
- /* Reenable interrupts */
- if (minten)
+ /* Reenable interrupts. */
+ if (minten) {
tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
-
- return(0);
-}
-
-static int
-tl_miibus_readreg(dev, phy, reg)
- device_t dev;
- int phy, reg;
-{
- struct tl_softc *sc;
- struct tl_mii_frame frame;
-
- sc = device_get_softc(dev);
- bzero((char *)&frame, sizeof(frame));
-
- frame.mii_phyaddr = phy;
- frame.mii_regaddr = reg;
- tl_mii_readreg(sc, &frame);
-
- return(frame.mii_data);
-}
-
-static int
-tl_miibus_writereg(dev, phy, reg, data)
- device_t dev;
- int phy, reg, data;
-{
- struct tl_softc *sc;
- struct tl_mii_frame frame;
-
- sc = device_get_softc(dev);
- bzero((char *)&frame, sizeof(frame));
-
- frame.mii_phyaddr = phy;
- frame.mii_regaddr = reg;
- frame.mii_data = data;
-
- tl_mii_writereg(sc, &frame);
+ }
return(0);
}
@@ -841,8 +775,6 @@ tl_miibus_statchg(dev)
} else {
tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
}
-
- return;
}
/*
@@ -865,8 +797,6 @@ tl_setmode(sc, media)
tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
}
}
-
- return;
}
/*
@@ -909,8 +839,6 @@ tl_setfilt(sc, addr, slot)
for (i = 0; i < ETHER_ADDR_LEN; i++)
tl_dio_write8(sc, regaddr + i, *(addr + i));
-
- return;
}
/*
@@ -980,8 +908,6 @@ tl_setmulti(sc)
tl_dio_write32(sc, TL_HASH1, hashes[0]);
tl_dio_write32(sc, TL_HASH2, hashes[1]);
-
- return;
}
/*
@@ -994,13 +920,10 @@ static void
tl_hardreset(dev)
device_t dev;
{
- struct tl_softc *sc;
int i;
u_int16_t flags;
- sc = device_get_softc(dev);
-
- tl_mii_sync(sc);
+ mii_bitbang_sync(dev, &tl_mii_bitbang_ops);
flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN;
@@ -1010,11 +933,10 @@ tl_hardreset(dev)
tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO);
DELAY(50000);
tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_LOOP|BMCR_ISO);
- tl_mii_sync(sc);
+ mii_bitbang_sync(dev, &tl_mii_bitbang_ops);
while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET);
DELAY(50000);
- return;
}
static void
@@ -1072,8 +994,6 @@ tl_softreset(sc, internal)
/* Wait for things to settle down a little. */
DELAY(500);
-
- return;
}
/*
@@ -1084,7 +1004,7 @@ static int
tl_probe(dev)
device_t dev;
{
- struct tl_type *t;
+ const struct tl_type *t;
t = tl_devs;
@@ -1105,7 +1025,7 @@ tl_attach(dev)
device_t dev;
{
u_int16_t did, vid;
- struct tl_type *t;
+ const struct tl_type *t;
struct ifnet *ifp;
struct tl_softc *sc;
int error, flags, i, rid, unit;
@@ -1415,9 +1335,9 @@ static int
tl_list_rx_init(sc)
struct tl_softc *sc;
{
- struct tl_chain_data *cd;
- struct tl_list_data *ld;
- int i;
+ struct tl_chain_data *cd;
+ struct tl_list_data *ld;
+ int i;
cd = &sc->tl_cdata;
ld = sc->tl_ldata;
@@ -1783,8 +1703,6 @@ tl_intr(xsc)
tl_start_locked(ifp);
TL_UNLOCK(sc);
-
- return;
}
static void
@@ -1843,8 +1761,6 @@ tl_stats_update(xsc)
mii = device_get_softc(sc->tl_miibus);
mii_tick(mii);
}
-
- return;
}
/*
@@ -2046,8 +1962,6 @@ tl_start_locked(ifp)
* Set a timeout in case the chip goes out to lunch.
*/
sc->tl_timer = 5;
-
- return;
}
static void
@@ -2143,8 +2057,6 @@ tl_init_locked(sc)
/* Start the stats update counter */
callout_reset(&sc->tl_stat_callout, hz, tl_stats_update, sc);
-
- return;
}
/*
@@ -2204,8 +2116,6 @@ tl_ifmedia_sts(ifp, ifmr)
ifmr->ifm_status = mii->mii_media_status;
}
TL_UNLOCK(sc);
-
- return;
}
static int
@@ -2284,8 +2194,6 @@ tl_watchdog(sc)
tl_softreset(sc, 1);
tl_init_locked(sc);
-
- return;
}
/*
@@ -2351,8 +2259,6 @@ tl_stop(sc)
sizeof(sc->tl_ldata->tl_tx_list));
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
-
- return;
}
/*
diff --git a/sys/dev/tl/if_tlreg.h b/sys/dev/tl/if_tlreg.h
index 4e340b1..4a82cc5 100644
--- a/sys/dev/tl/if_tlreg.h
+++ b/sys/dev/tl/if_tlreg.h
@@ -32,11 +32,10 @@
* $FreeBSD$
*/
-
struct tl_type {
u_int16_t tl_vid;
u_int16_t tl_did;
- char *tl_name;
+ const char *tl_name;
};
/*
@@ -203,6 +202,7 @@ struct tl_softc {
#define TL_INT_MASK 0x001C
#define TL_VEC_MASK 0x1FE0
+
/*
* Host command register bits
*/
@@ -390,36 +390,6 @@ struct tl_softc {
#define TL_MASK_MASK5 0x20
#define TL_MASK_MASK4 0x10
-/*
- * MII frame format
- */
-#ifdef ANSI_DOESNT_ALLOW_BITFIELDS
-struct tl_mii_frame {
- u_int16_t mii_stdelim:2,
- mii_opcode:2,
- mii_phyaddr:5,
- mii_regaddr:5,
- mii_turnaround:2;
- u_int16_t mii_data;
-};
-#else
-struct tl_mii_frame {
- u_int8_t mii_stdelim;
- u_int8_t mii_opcode;
- u_int8_t mii_phyaddr;
- u_int8_t mii_regaddr;
- u_int8_t mii_turnaround;
- u_int16_t mii_data;
-};
-#endif
-/*
- * MII constants
- */
-#define TL_MII_STARTDELIM 0x01
-#define TL_MII_READOP 0x02
-#define TL_MII_WRITEOP 0x01
-#define TL_MII_TURNAROUND 0x02
-
#define TL_LAST_FRAG 0x80000000
#define TL_CSTAT_UNUSED 0x8000
#define TL_CSTAT_FRAMECMP 0x4000
@@ -499,6 +469,9 @@ struct tl_stats {
#define CSR_READ_2(sc, reg) bus_read_2(sc->tl_res, reg)
#define CSR_READ_1(sc, reg) bus_read_1(sc->tl_res, reg)
+#define CSR_BARRIER(sc, reg, length, flags) \
+ bus_barrier(sc->tl_res, reg, length, flags)
+
#define CMD_PUT(sc, x) CSR_WRITE_4(sc, TL_HOSTCMD, x)
#define CMD_SET(sc, x) \
CSR_WRITE_4(sc, TL_HOSTCMD, CSR_READ_4(sc, TL_HOSTCMD) | (x))
diff --git a/sys/dev/twa/tw_osl_freebsd.c b/sys/dev/twa/tw_osl_freebsd.c
index caf18f1..5651b7f 100644
--- a/sys/dev/twa/tw_osl_freebsd.c
+++ b/sys/dev/twa/tw_osl_freebsd.c
@@ -54,7 +54,7 @@ TW_INT32 TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG;
TW_INT32 TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG;
#endif /* TW_OSL_DEBUG */
-MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands");
+static MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands");
static d_open_t twa_open;
diff --git a/sys/dev/twe/twe_freebsd.c b/sys/dev/twe/twe_freebsd.c
index 0328599..a7b9c91 100644
--- a/sys/dev/twe/twe_freebsd.c
+++ b/sys/dev/twe/twe_freebsd.c
@@ -872,7 +872,7 @@ twed_detach(device_t dev)
/********************************************************************************
* Allocate a command buffer
*/
-MALLOC_DEFINE(TWE_MALLOC_CLASS, "twe_commands", "twe commands");
+static MALLOC_DEFINE(TWE_MALLOC_CLASS, "twe_commands", "twe commands");
struct twe_request *
twe_allocate_request(struct twe_softc *sc, int tag)
diff --git a/sys/dev/tws/tws_services.c b/sys/dev/tws/tws_services.c
index 07200b5..d2a52cd 100644
--- a/sys/dev/tws/tws_services.c
+++ b/sys/dev/tws/tws_services.c
@@ -53,7 +53,7 @@ struct tws_sense *tws_find_sense_from_mfa(struct tws_softc *sc, u_int64_t mfa);
-struct error_desc array[] = {
+static struct error_desc array[] = {
{ "Cannot add sysctl tree node", 0x2000, ERROR,
"%s: (0x%02X: 0x%04X): %s:\n", "ERROR" },
{ "Register window not available", 0x2001, ERROR,
diff --git a/sys/dev/tws/tws_services.h b/sys/dev/tws/tws_services.h
index 643d720..f9016da 100644
--- a/sys/dev/tws/tws_services.h
+++ b/sys/dev/tws/tws_services.h
@@ -114,7 +114,6 @@ struct error_desc {
char *error_str;
};
-extern struct error_desc array[];
/* ----------- q services ------------- */
#define TWS_FREE_Q 0
diff --git a/sys/dev/uart/uart_core.c b/sys/dev/uart/uart_core.c
index 9260d06..fb1bb64 100644
--- a/sys/dev/uart/uart_core.c
+++ b/sys/dev/uart/uart_core.c
@@ -56,7 +56,7 @@ char uart_driver_name[] = "uart";
SLIST_HEAD(uart_devinfo_list, uart_devinfo) uart_sysdevs =
SLIST_HEAD_INITIALIZER(uart_sysdevs);
-MALLOC_DEFINE(M_UART, "UART", "UART driver");
+static MALLOC_DEFINE(M_UART, "UART", "UART driver");
void
uart_add_sysdev(struct uart_devinfo *di)
diff --git a/sys/dev/uart/uart_dev_ns8250.c b/sys/dev/uart/uart_dev_ns8250.c
index 489be29..5d56a68 100644
--- a/sys/dev/uart/uart_dev_ns8250.c
+++ b/sys/dev/uart/uart_dev_ns8250.c
@@ -582,9 +582,11 @@ static int
ns8250_bus_ipend(struct uart_softc *sc)
{
struct uart_bas *bas;
+ struct ns8250_softc *ns8250;
int ipend;
uint8_t iir, lsr;
+ ns8250 = (struct ns8250_softc *)sc;
bas = &sc->sc_bas;
uart_lock(sc->sc_hwmtx);
iir = uart_getreg(bas, REG_IIR);
@@ -602,9 +604,10 @@ ns8250_bus_ipend(struct uart_softc *sc)
if (lsr & LSR_RXRDY)
ipend |= SER_INT_RXREADY;
} else {
- if (iir & IIR_TXRDY)
+ if (iir & IIR_TXRDY) {
ipend |= SER_INT_TXIDLE;
- else
+ uart_setreg(bas, REG_IER, ns8250->ier);
+ } else
ipend |= SER_INT_SIGCHG;
}
if (ipend == 0)
diff --git a/sys/dev/ubsec/ubsec.c b/sys/dev/ubsec/ubsec.c
index 1ea150d..49e9dad 100644
--- a/sys/dev/ubsec/ubsec.c
+++ b/sys/dev/ubsec/ubsec.c
@@ -177,7 +177,8 @@ static int ubsec_ksigbits(struct crparam *);
static void ubsec_kshift_r(u_int, u_int8_t *, u_int, u_int8_t *, u_int);
static void ubsec_kshift_l(u_int, u_int8_t *, u_int, u_int8_t *, u_int);
-SYSCTL_NODE(_hw, OID_AUTO, ubsec, CTLFLAG_RD, 0, "Broadcom driver parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, ubsec, CTLFLAG_RD, 0,
+ "Broadcom driver parameters");
#ifdef UBSEC_DEBUG
static void ubsec_dump_pb(volatile struct ubsec_pktbuf *);
diff --git a/sys/dev/usb/controller/at91dci.c b/sys/dev/usb/controller/at91dci.c
index 449aaa3..f831115 100644
--- a/sys/dev/usb/controller/at91dci.c
+++ b/sys/dev/usb/controller/at91dci.c
@@ -91,7 +91,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int at91dcidebug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, at91dci, CTLFLAG_RW, 0, "USB at91dci");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, at91dci, CTLFLAG_RW, 0, "USB at91dci");
SYSCTL_INT(_hw_usb_at91dci, OID_AUTO, debug, CTLFLAG_RW,
&at91dcidebug, 0, "at91dci debug level");
#endif
@@ -2123,7 +2123,7 @@ tr_handle_get_port_status:
if (sc->sc_flags.status_vbus &&
sc->sc_flags.status_bus_reset) {
/* reset endpoint flags */
- bzero(sc->sc_ep_flags, sizeof(sc->sc_ep_flags));
+ memset(sc->sc_ep_flags, 0, sizeof(sc->sc_ep_flags));
}
}
if (sc->sc_flags.change_suspend) {
diff --git a/sys/dev/usb/controller/atmegadci.c b/sys/dev/usb/controller/atmegadci.c
index a3134d8..ad53fc3 100644
--- a/sys/dev/usb/controller/atmegadci.c
+++ b/sys/dev/usb/controller/atmegadci.c
@@ -83,7 +83,8 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int atmegadci_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, atmegadci, CTLFLAG_RW, 0, "USB ATMEGA DCI");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, atmegadci, CTLFLAG_RW, 0,
+ "USB ATMEGA DCI");
SYSCTL_INT(_hw_usb_atmegadci, OID_AUTO, debug, CTLFLAG_RW,
&atmegadci_debug, 0, "ATMEGA DCI debug level");
#endif
diff --git a/sys/dev/usb/controller/avr32dci.c b/sys/dev/usb/controller/avr32dci.c
index d8b3934..26785a3 100644
--- a/sys/dev/usb/controller/avr32dci.c
+++ b/sys/dev/usb/controller/avr32dci.c
@@ -83,7 +83,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int avr32dci_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, avr32dci, CTLFLAG_RW, 0, "USB AVR32 DCI");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, avr32dci, CTLFLAG_RW, 0, "USB AVR32 DCI");
SYSCTL_INT(_hw_usb_avr32dci, OID_AUTO, debug, CTLFLAG_RW,
&avr32dci_debug, 0, "AVR32 DCI debug level");
#endif
@@ -415,12 +415,11 @@ repeat:
buf_res.length = count;
}
/* receive data */
- bcopy(sc->physdata +
+ memcpy(buf_res.buffer, sc->physdata +
(AVR32_EPTSTA_CURRENT_BANK(temp) << td->bank_shift) +
- (td->ep_no << 16) + (td->offset % td->max_packet_size),
- buf_res.buffer, buf_res.length)
+ (td->ep_no << 16) + (td->offset % td->max_packet_size), buf_res.length);
/* update counters */
- count -= buf_res.length;
+ count -= buf_res.length;
td->offset += buf_res.length;
td->remainder -= buf_res.length;
}
@@ -491,12 +490,12 @@ repeat:
buf_res.length = count;
}
/* transmit data */
- bcopy(buf_res.buffer, sc->physdata +
+ memcpy(sc->physdata +
(AVR32_EPTSTA_CURRENT_BANK(temp) << td->bank_shift) +
(td->ep_no << 16) + (td->offset % td->max_packet_size),
- buf_res.length)
+ buf_res.buffer, buf_res.length);
/* update counters */
- count -= buf_res.length;
+ count -= buf_res.length;
td->offset += buf_res.length;
td->remainder -= buf_res.length;
}
diff --git a/sys/dev/usb/controller/ehci.c b/sys/dev/usb/controller/ehci.c
index b7cd3cd..7e682f4 100644
--- a/sys/dev/usb/controller/ehci.c
+++ b/sys/dev/usb/controller/ehci.c
@@ -94,7 +94,7 @@ static int ehcinohighspeed = 0;
static int ehciiaadbug = 0;
static int ehcilostintrbug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, ehci, CTLFLAG_RW, 0, "USB ehci");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, ehci, CTLFLAG_RW, 0, "USB ehci");
SYSCTL_INT(_hw_usb_ehci, OID_AUTO, debug, CTLFLAG_RW,
&ehcidebug, 0, "Debug level");
SYSCTL_INT(_hw_usb_ehci, OID_AUTO, no_hs, CTLFLAG_RW,
@@ -3369,7 +3369,7 @@ ehci_roothub_exec(struct usb_device *udev,
break;
case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
len = 16;
- bzero(sc->sc_hub_desc.temp, 16);
+ memset(sc->sc_hub_desc.temp, 0, 16);
break;
case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
DPRINTFN(9, "get port status i=%d\n",
diff --git a/sys/dev/usb/controller/musb_otg.c b/sys/dev/usb/controller/musb_otg.c
index 7c9b02e..be32b2b 100644
--- a/sys/dev/usb/controller/musb_otg.c
+++ b/sys/dev/usb/controller/musb_otg.c
@@ -85,7 +85,7 @@
#ifdef USB_DEBUG
static int musbotgdebug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, musbotg, CTLFLAG_RW, 0, "USB musbotg");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, musbotg, CTLFLAG_RW, 0, "USB musbotg");
SYSCTL_INT(_hw_usb_musbotg, OID_AUTO, debug, CTLFLAG_RW,
&musbotgdebug, 0, "Debug level");
#endif
diff --git a/sys/dev/usb/controller/ohci.c b/sys/dev/usb/controller/ohci.c
index 0d63a6c..fa2d607 100644
--- a/sys/dev/usb/controller/ohci.c
+++ b/sys/dev/usb/controller/ohci.c
@@ -80,7 +80,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int ohcidebug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, ohci, CTLFLAG_RW, 0, "USB ohci");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, ohci, CTLFLAG_RW, 0, "USB ohci");
SYSCTL_INT(_hw_usb_ohci, OID_AUTO, debug, CTLFLAG_RW,
&ohcidebug, 0, "ohci debug level");
@@ -2347,7 +2347,7 @@ ohci_roothub_exec(struct usb_device *udev,
case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
len = 16;
- bzero(sc->sc_hub_desc.temp, 16);
+ memset(sc->sc_hub_desc.temp, 0, 16);
break;
case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
DPRINTFN(9, "get port status i=%d\n",
diff --git a/sys/dev/usb/controller/uhci.c b/sys/dev/usb/controller/uhci.c
index b78342f..c365e01 100644
--- a/sys/dev/usb/controller/uhci.c
+++ b/sys/dev/usb/controller/uhci.c
@@ -85,7 +85,7 @@ __FBSDID("$FreeBSD$");
static int uhcidebug = 0;
static int uhcinoloop = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, uhci, CTLFLAG_RW, 0, "USB uhci");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, uhci, CTLFLAG_RW, 0, "USB uhci");
SYSCTL_INT(_hw_usb_uhci, OID_AUTO, debug, CTLFLAG_RW,
&uhcidebug, 0, "uhci debug level");
SYSCTL_INT(_hw_usb_uhci, OID_AUTO, loop, CTLFLAG_RW,
@@ -2702,7 +2702,7 @@ uhci_roothub_exec(struct usb_device *udev,
break;
case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
len = 16;
- bzero(sc->sc_hub_desc.temp, 16);
+ memset(sc->sc_hub_desc.temp, 0, 16);
break;
case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
if (index == 1)
diff --git a/sys/dev/usb/controller/usb_controller.c b/sys/dev/usb/controller/usb_controller.c
index dade0ad..7ff0bc0 100644
--- a/sys/dev/usb/controller/usb_controller.c
+++ b/sys/dev/usb/controller/usb_controller.c
@@ -75,7 +75,7 @@ static void usb_attach_sub(device_t, struct usb_bus *);
#ifdef USB_DEBUG
static int usb_ctrl_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, ctrl, CTLFLAG_RW, 0, "USB controller");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, ctrl, CTLFLAG_RW, 0, "USB controller");
SYSCTL_INT(_hw_usb_ctrl, OID_AUTO, debug, CTLFLAG_RW, &usb_ctrl_debug, 0,
"Debug level");
#endif
diff --git a/sys/dev/usb/controller/uss820dci.c b/sys/dev/usb/controller/uss820dci.c
index b524529..b98043e 100644
--- a/sys/dev/usb/controller/uss820dci.c
+++ b/sys/dev/usb/controller/uss820dci.c
@@ -79,7 +79,8 @@
#ifdef USB_DEBUG
static int uss820dcidebug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, uss820dci, CTLFLAG_RW, 0, "USB uss820dci");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, uss820dci, CTLFLAG_RW, 0,
+ "USB uss820dci");
SYSCTL_INT(_hw_usb_uss820dci, OID_AUTO, debug, CTLFLAG_RW,
&uss820dcidebug, 0, "uss820dci debug level");
#endif
diff --git a/sys/dev/usb/controller/xhci.c b/sys/dev/usb/controller/xhci.c
index 0df4c4d..9bc7d21 100644
--- a/sys/dev/usb/controller/xhci.c
+++ b/sys/dev/usb/controller/xhci.c
@@ -86,7 +86,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int xhcidebug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, xhci, CTLFLAG_RW, 0, "USB XHCI");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, xhci, CTLFLAG_RW, 0, "USB XHCI");
SYSCTL_INT(_hw_usb_xhci, OID_AUTO, debug, CTLFLAG_RW,
&xhcidebug, 0, "Debug level");
@@ -292,7 +292,7 @@ xhci_start_controller(struct xhci_softc *sc)
XWRITE4(sc, oper, XHCI_USBCMD, XHCI_CMD_HCRST);
for (i = 0; i != 100; i++) {
- usb_pause_mtx(NULL, hz / 1000);
+ usb_pause_mtx(NULL, hz / 100);
temp = XREAD4(sc, oper, XHCI_USBCMD) &
(XHCI_CMD_HCRST | XHCI_STS_CNR);
if (!temp)
@@ -453,7 +453,7 @@ xhci_start_controller(struct xhci_softc *sc)
XHCI_CMD_INTE | XHCI_CMD_HSEE);
for (i = 0; i != 100; i++) {
- usb_pause_mtx(NULL, hz / 1000);
+ usb_pause_mtx(NULL, hz / 100);
temp = XREAD4(sc, oper, XHCI_USBSTS) & XHCI_STS_HCH;
if (!temp)
break;
@@ -487,7 +487,7 @@ xhci_halt_controller(struct xhci_softc *sc)
XWRITE4(sc, oper, XHCI_USBCMD, 0);
for (i = 0; i != 100; i++) {
- usb_pause_mtx(NULL, hz / 1000);
+ usb_pause_mtx(NULL, hz / 100);
temp = XREAD4(sc, oper, XHCI_USBSTS) & XHCI_STS_HCH;
if (temp)
break;
@@ -1110,7 +1110,7 @@ xhci_cmd_nop(struct xhci_softc *sc)
trb.dwTrb3 = htole32(temp);
- return (xhci_do_command(sc, &trb, 50 /* ms */));
+ return (xhci_do_command(sc, &trb, 100 /* ms */));
}
#endif
@@ -1127,7 +1127,7 @@ xhci_cmd_enable_slot(struct xhci_softc *sc, uint8_t *pslot)
trb.dwTrb2 = 0;
trb.dwTrb3 = htole32(XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT));
- err = xhci_do_command(sc, &trb, 50 /* ms */);
+ err = xhci_do_command(sc, &trb, 100 /* ms */);
if (err)
goto done;
@@ -1154,7 +1154,7 @@ xhci_cmd_disable_slot(struct xhci_softc *sc, uint8_t slot_id)
trb.dwTrb3 = htole32(temp);
- return (xhci_do_command(sc, &trb, 50 /* ms */));
+ return (xhci_do_command(sc, &trb, 100 /* ms */));
}
static usb_error_t
@@ -1310,7 +1310,7 @@ xhci_cmd_configure_ep(struct xhci_softc *sc, uint64_t input_ctx,
trb.dwTrb3 = htole32(temp);
- return (xhci_do_command(sc, &trb, 50 /* ms */));
+ return (xhci_do_command(sc, &trb, 100 /* ms */));
}
static usb_error_t
@@ -1328,7 +1328,7 @@ xhci_cmd_evaluate_ctx(struct xhci_softc *sc, uint64_t input_ctx,
XHCI_TRB_3_SLOT_SET(slot_id);
trb.dwTrb3 = htole32(temp);
- return (xhci_do_command(sc, &trb, 50 /* ms */));
+ return (xhci_do_command(sc, &trb, 100 /* ms */));
}
static usb_error_t
@@ -1351,7 +1351,7 @@ xhci_cmd_reset_ep(struct xhci_softc *sc, uint8_t preserve,
trb.dwTrb3 = htole32(temp);
- return (xhci_do_command(sc, &trb, 50 /* ms */));
+ return (xhci_do_command(sc, &trb, 100 /* ms */));
}
static usb_error_t
@@ -1373,7 +1373,7 @@ xhci_cmd_set_tr_dequeue_ptr(struct xhci_softc *sc, uint64_t dequeue_ptr,
XHCI_TRB_3_EP_SET(ep_id);
trb.dwTrb3 = htole32(temp);
- return (xhci_do_command(sc, &trb, 50 /* ms */));
+ return (xhci_do_command(sc, &trb, 100 /* ms */));
}
static usb_error_t
@@ -1396,7 +1396,7 @@ xhci_cmd_stop_ep(struct xhci_softc *sc, uint8_t suspend,
trb.dwTrb3 = htole32(temp);
- return (xhci_do_command(sc, &trb, 50 /* ms */));
+ return (xhci_do_command(sc, &trb, 100 /* ms */));
}
static usb_error_t
@@ -1414,7 +1414,7 @@ xhci_cmd_reset_dev(struct xhci_softc *sc, uint8_t slot_id)
trb.dwTrb3 = htole32(temp);
- return (xhci_do_command(sc, &trb, 50 /* ms */));
+ return (xhci_do_command(sc, &trb, 100 /* ms */));
}
/*------------------------------------------------------------------------*
@@ -2831,7 +2831,7 @@ struct xhci_bos_desc xhci_bosd = {
.bLength = sizeof(xhci_bosd.usb2extd),
.bDescriptorType = 1,
.bDevCapabilityType = 2,
- .bmAttributes = 2,
+ .bmAttributes[0] = 2,
},
.usbdcd = {
.bLength = sizeof(xhci_bosd.usbdcd),
@@ -2841,7 +2841,8 @@ struct xhci_bos_desc xhci_bosd = {
HSETW(.wSpeedsSupported, 0x000C),
.bFunctionalitySupport = 8,
.bU1DevExitLat = 255, /* dummy - not used */
- .bU2DevExitLat = 255, /* dummy - not used */
+ .wU2DevExitLat[0] = 0x00,
+ .wU2DevExitLat[1] = 0x08,
},
.cidd = {
.bLength = sizeof(xhci_bosd.cidd),
diff --git a/sys/dev/usb/input/atp.c b/sys/dev/usb/input/atp.c
index 9a17950..6cfba78 100644
--- a/sys/dev/usb/input/atp.c
+++ b/sys/dev/usb/input/atp.c
@@ -114,7 +114,7 @@ __FBSDID("$FreeBSD$");
/* Tunables */
-SYSCTL_NODE(_hw_usb, OID_AUTO, atp, CTLFLAG_RW, 0, "USB atp");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, atp, CTLFLAG_RW, 0, "USB atp");
#ifdef USB_DEBUG
enum atp_log_level {
diff --git a/sys/dev/usb/input/uep.c b/sys/dev/usb/input/uep.c
index e90298b..e534b27 100644
--- a/sys/dev/usb/input/uep.c
+++ b/sys/dev/usb/input/uep.c
@@ -57,7 +57,7 @@
#ifdef USB_DEBUG
static int uep_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, uep, CTLFLAG_RW, 0, "USB uep");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, uep, CTLFLAG_RW, 0, "USB uep");
SYSCTL_INT(_hw_usb_uep, OID_AUTO, debug, CTLFLAG_RW,
&uep_debug, 0, "Debug level");
#endif
diff --git a/sys/dev/usb/input/uhid.c b/sys/dev/usb/input/uhid.c
index 4723142..929227b 100644
--- a/sys/dev/usb/input/uhid.c
+++ b/sys/dev/usb/input/uhid.c
@@ -78,7 +78,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int uhid_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, uhid, CTLFLAG_RW, 0, "USB uhid");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, uhid, CTLFLAG_RW, 0, "USB uhid");
SYSCTL_INT(_hw_usb_uhid, OID_AUTO, debug, CTLFLAG_RW,
&uhid_debug, 0, "Debug level");
#endif
diff --git a/sys/dev/usb/input/ukbd.c b/sys/dev/usb/input/ukbd.c
index bf3ecd7..688f17b 100644
--- a/sys/dev/usb/input/ukbd.c
+++ b/sys/dev/usb/input/ukbd.c
@@ -93,7 +93,7 @@ __FBSDID("$FreeBSD$");
static int ukbd_debug = 0;
static int ukbd_no_leds = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, ukbd, CTLFLAG_RW, 0, "USB ukbd");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, ukbd, CTLFLAG_RW, 0, "USB ukbd");
SYSCTL_INT(_hw_usb_ukbd, OID_AUTO, debug, CTLFLAG_RW,
&ukbd_debug, 0, "Debug level");
SYSCTL_INT(_hw_usb_ukbd, OID_AUTO, no_leds, CTLFLAG_RW,
diff --git a/sys/dev/usb/input/ums.c b/sys/dev/usb/input/ums.c
index 12d9e89..a910df4 100644
--- a/sys/dev/usb/input/ums.c
+++ b/sys/dev/usb/input/ums.c
@@ -76,7 +76,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int ums_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, ums, CTLFLAG_RW, 0, "USB ums");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, ums, CTLFLAG_RW, 0, "USB ums");
SYSCTL_INT(_hw_usb_ums, OID_AUTO, debug, CTLFLAG_RW,
&ums_debug, 0, "Debug level");
#endif
diff --git a/sys/dev/usb/misc/udbp.c b/sys/dev/usb/misc/udbp.c
index 1a72cae..150985e 100644
--- a/sys/dev/usb/misc/udbp.c
+++ b/sys/dev/usb/misc/udbp.c
@@ -96,7 +96,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int udbp_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, udbp, CTLFLAG_RW, 0, "USB udbp");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, udbp, CTLFLAG_RW, 0, "USB udbp");
SYSCTL_INT(_hw_usb_udbp, OID_AUTO, debug, CTLFLAG_RW,
&udbp_debug, 0, "udbp debug level");
#endif
diff --git a/sys/dev/usb/net/if_aue.c b/sys/dev/usb/net/if_aue.c
index adf47f4..a14f233 100644
--- a/sys/dev/usb/net/if_aue.c
+++ b/sys/dev/usb/net/if_aue.c
@@ -102,7 +102,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int aue_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, aue, CTLFLAG_RW, 0, "USB aue");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, aue, CTLFLAG_RW, 0, "USB aue");
SYSCTL_INT(_hw_usb_aue, OID_AUTO, debug, CTLFLAG_RW, &aue_debug, 0,
"Debug level");
#endif
diff --git a/sys/dev/usb/net/if_axe.c b/sys/dev/usb/net/if_axe.c
index 62adda4..0d18f2b 100644
--- a/sys/dev/usb/net/if_axe.c
+++ b/sys/dev/usb/net/if_axe.c
@@ -133,7 +133,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int axe_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, axe, CTLFLAG_RW, 0, "USB axe");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, axe, CTLFLAG_RW, 0, "USB axe");
SYSCTL_INT(_hw_usb_axe, OID_AUTO, debug, CTLFLAG_RW, &axe_debug, 0,
"Debug level");
#endif
diff --git a/sys/dev/usb/net/if_cdce.c b/sys/dev/usb/net/if_cdce.c
index 12e6f67..eabf9c6 100644
--- a/sys/dev/usb/net/if_cdce.c
+++ b/sys/dev/usb/net/if_cdce.c
@@ -111,7 +111,7 @@ static uint32_t cdce_m_crc32(struct mbuf *, uint32_t, uint32_t);
static int cdce_debug = 0;
static int cdce_tx_interval = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, cdce, CTLFLAG_RW, 0, "USB CDC-Ethernet");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, cdce, CTLFLAG_RW, 0, "USB CDC-Ethernet");
SYSCTL_INT(_hw_usb_cdce, OID_AUTO, debug, CTLFLAG_RW, &cdce_debug, 0,
"Debug level");
SYSCTL_INT(_hw_usb_cdce, OID_AUTO, interval, CTLFLAG_RW, &cdce_tx_interval, 0,
diff --git a/sys/dev/usb/net/if_cue.c b/sys/dev/usb/net/if_cue.c
index 90a18f3..7466ba9 100644
--- a/sys/dev/usb/net/if_cue.c
+++ b/sys/dev/usb/net/if_cue.c
@@ -124,7 +124,7 @@ static void cue_reset(struct cue_softc *);
#ifdef USB_DEBUG
static int cue_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, cue, CTLFLAG_RW, 0, "USB cue");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, cue, CTLFLAG_RW, 0, "USB cue");
SYSCTL_INT(_hw_usb_cue, OID_AUTO, debug, CTLFLAG_RW, &cue_debug, 0,
"Debug level");
#endif
diff --git a/sys/dev/usb/net/if_ipheth.c b/sys/dev/usb/net/if_ipheth.c
index d666835..ad4b6f1 100644
--- a/sys/dev/usb/net/if_ipheth.c
+++ b/sys/dev/usb/net/if_ipheth.c
@@ -81,7 +81,7 @@ static uether_fn_t ipheth_setpromisc;
#ifdef USB_DEBUG
static int ipheth_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, ipheth, CTLFLAG_RW, 0, "USB iPhone ethernet");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, ipheth, CTLFLAG_RW, 0, "USB iPhone ethernet");
SYSCTL_INT(_hw_usb_ipheth, OID_AUTO, debug, CTLFLAG_RW, &ipheth_debug, 0, "Debug level");
#endif
diff --git a/sys/dev/usb/net/if_kue.c b/sys/dev/usb/net/if_kue.c
index 5480a5d..2d3fc42 100644
--- a/sys/dev/usb/net/if_kue.c
+++ b/sys/dev/usb/net/if_kue.c
@@ -165,7 +165,7 @@ static void kue_reset(struct kue_softc *);
#ifdef USB_DEBUG
static int kue_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, kue, CTLFLAG_RW, 0, "USB kue");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, kue, CTLFLAG_RW, 0, "USB kue");
SYSCTL_INT(_hw_usb_kue, OID_AUTO, debug, CTLFLAG_RW, &kue_debug, 0,
"Debug level");
#endif
@@ -380,8 +380,9 @@ kue_setmulti(struct usb_ether *ue)
*/
if (i == KUE_MCFILTCNT(sc))
break;
- bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
- KUE_MCFILT(sc, i), ETHER_ADDR_LEN);
+ memcpy(KUE_MCFILT(sc, i),
+ LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
+ ETHER_ADDR_LEN);
i++;
}
if_maddr_runlock(ifp);
diff --git a/sys/dev/usb/net/if_mos.c b/sys/dev/usb/net/if_mos.c
index 5fbbb39..11cb962 100644
--- a/sys/dev/usb/net/if_mos.c
+++ b/sys/dev/usb/net/if_mos.c
@@ -132,7 +132,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int mos_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, mos, CTLFLAG_RW, 0, "USB mos");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, mos, CTLFLAG_RW, 0, "USB mos");
SYSCTL_INT(_hw_usb_mos, OID_AUTO, debug, CTLFLAG_RW, &mos_debug, 0,
"Debug level");
#endif
diff --git a/sys/dev/usb/net/if_rue.c b/sys/dev/usb/net/if_rue.c
index c874c1d..e4fd035 100644
--- a/sys/dev/usb/net/if_rue.c
+++ b/sys/dev/usb/net/if_rue.c
@@ -99,7 +99,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int rue_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, rue, CTLFLAG_RW, 0, "USB rue");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, rue, CTLFLAG_RW, 0, "USB rue");
SYSCTL_INT(_hw_usb_rue, OID_AUTO, debug, CTLFLAG_RW,
&rue_debug, 0, "Debug level");
#endif
diff --git a/sys/dev/usb/net/if_udav.c b/sys/dev/usb/net/if_udav.c
index e9a0504..3fa3d15 100644
--- a/sys/dev/usb/net/if_udav.c
+++ b/sys/dev/usb/net/if_udav.c
@@ -187,7 +187,7 @@ static const struct usb_ether_methods udav_ue_methods = {
#ifdef USB_DEBUG
static int udav_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, udav, CTLFLAG_RW, 0, "USB udav");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, udav, CTLFLAG_RW, 0, "USB udav");
SYSCTL_INT(_hw_usb_udav, OID_AUTO, debug, CTLFLAG_RW, &udav_debug, 0,
"Debug level");
#endif
diff --git a/sys/dev/usb/net/if_usie.c b/sys/dev/usb/net/if_usie.c
index cda1d3d..ed9054f 100644
--- a/sys/dev/usb/net/if_usie.c
+++ b/sys/dev/usb/net/if_usie.c
@@ -77,7 +77,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int usie_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, usie, CTLFLAG_RW, 0, "sierra USB modem");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, usie, CTLFLAG_RW, 0, "sierra USB modem");
SYSCTL_INT(_hw_usb_usie, OID_AUTO, debug, CTLFLAG_RW, &usie_debug, 0,
"usie debug level");
#endif
diff --git a/sys/dev/usb/net/uhso.c b/sys/dev/usb/net/uhso.c
index 6b42de8..8211170 100644
--- a/sys/dev/usb/net/uhso.c
+++ b/sys/dev/usb/net/uhso.c
@@ -283,7 +283,7 @@ static const STRUCT_USB_HOST_ID uhso_devs[] = {
#undef UHSO_DEV
};
-SYSCTL_NODE(_hw_usb, OID_AUTO, uhso, CTLFLAG_RW, 0, "USB uhso");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, uhso, CTLFLAG_RW, 0, "USB uhso");
static int uhso_autoswitch = 1;
SYSCTL_INT(_hw_usb_uhso, OID_AUTO, auto_switch, CTLFLAG_RW,
&uhso_autoswitch, 0, "Automatically switch to modem mode");
@@ -1153,7 +1153,7 @@ uhso_mux_read_callback(struct usb_xfer *xfer, usb_error_t error)
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
- bzero(&req, sizeof(struct usb_device_request));
+ memset(&req, 0, sizeof(struct usb_device_request));
req.bmRequestType = UT_READ_CLASS_INTERFACE;
req.bRequest = UCDC_GET_ENCAPSULATED_RESPONSE;
USETW(req.wValue, 0);
@@ -1206,7 +1206,7 @@ uhso_mux_write_callback(struct usb_xfer *xfer, usb_error_t error)
usbd_get_page(pc, 0, &res);
- bzero(&req, sizeof(struct usb_device_request));
+ memset(&req, 0, sizeof(struct usb_device_request));
req.bmRequestType = UT_WRITE_CLASS_INTERFACE;
req.bRequest = UCDC_SEND_ENCAPSULATED_COMMAND;
USETW(req.wValue, 0);
@@ -1731,7 +1731,7 @@ uhso_if_rxflush(void *arg)
* copy the IP-packet into it.
*/
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
- bcopy(mtod(m0, uint8_t *), mtod(m, uint8_t *), iplen);
+ memcpy(mtod(m, uint8_t *), mtod(m0, uint8_t *), iplen);
m->m_pkthdr.len = m->m_len = iplen;
/* Adjust the size of the original mbuf */
diff --git a/sys/dev/usb/net/usb_ethernet.c b/sys/dev/usb/net/usb_ethernet.c
index bc73d70..e405ae8 100644
--- a/sys/dev/usb/net/usb_ethernet.c
+++ b/sys/dev/usb/net/usb_ethernet.c
@@ -57,7 +57,8 @@ __FBSDID("$FreeBSD$");
#include <dev/usb/usb_process.h>
#include <dev/usb/net/usb_ethernet.h>
-SYSCTL_NODE(_net, OID_AUTO, ue, CTLFLAG_RD, 0, "USB Ethernet parameters");
+static SYSCTL_NODE(_net, OID_AUTO, ue, CTLFLAG_RD, 0,
+ "USB Ethernet parameters");
#define UE_LOCK(_ue) mtx_lock((_ue)->ue_mtx)
#define UE_UNLOCK(_ue) mtx_unlock((_ue)->ue_mtx)
diff --git a/sys/dev/usb/quirk/usb_quirk.c b/sys/dev/usb/quirk/usb_quirk.c
index 128fe2a..b0d7a7d 100644
--- a/sys/dev/usb/quirk/usb_quirk.c
+++ b/sys/dev/usb/quirk/usb_quirk.c
@@ -148,10 +148,6 @@ static struct usb_quirk_entry usb_quirks[USB_DEV_QUIRKS_MAX] = {
UQ_MSC_FORCE_PROTO_SCSI),
USB_QUIRK(AIPTEK, POCKETCAM3M, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(ALCOR, SDCR_6335, 0x0000, 0xffff, UQ_MSC_NO_TEST_UNIT_READY,
- UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(ALCOR, SDCR_6362, 0x0000, 0xffff, UQ_MSC_NO_TEST_UNIT_READY,
- UQ_MSC_NO_SYNC_CACHE),
USB_QUIRK(ALCOR, UMCR_9361, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN),
USB_QUIRK(ALCOR, TRANSCEND, 0x0000, 0xffff, UQ_MSC_NO_GETMAXLUN,
@@ -462,6 +458,7 @@ static struct usb_quirk_entry usb_quirks[USB_DEV_QUIRKS_MAX] = {
* Quirks for manufacturers which USB devices does not respond
* after issuing non-supported commands:
*/
+ USB_QUIRK(ALCOR, DUMMY, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE, UQ_MSC_NO_TEST_UNIT_READY, UQ_MATCH_VENDOR_ONLY),
USB_QUIRK(FEIYA, DUMMY, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE, UQ_MATCH_VENDOR_ONLY),
USB_QUIRK(REALTEK, DUMMY, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE, UQ_MATCH_VENDOR_ONLY),
USB_QUIRK(INITIO, DUMMY, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE, UQ_MATCH_VENDOR_ONLY),
diff --git a/sys/dev/usb/serial/u3g.c b/sys/dev/usb/serial/u3g.c
index 46a153f..6db2b55 100644
--- a/sys/dev/usb/serial/u3g.c
+++ b/sys/dev/usb/serial/u3g.c
@@ -66,7 +66,7 @@
#ifdef USB_DEBUG
static int u3g_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, u3g, CTLFLAG_RW, 0, "USB 3g");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, u3g, CTLFLAG_RW, 0, "USB 3g");
SYSCTL_INT(_hw_usb_u3g, OID_AUTO, debug, CTLFLAG_RW,
&u3g_debug, 0, "Debug level");
#endif
diff --git a/sys/dev/usb/serial/ubsa.c b/sys/dev/usb/serial/ubsa.c
index 6afe05b..c29d75d 100644
--- a/sys/dev/usb/serial/ubsa.c
+++ b/sys/dev/usb/serial/ubsa.c
@@ -95,7 +95,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int ubsa_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, ubsa, CTLFLAG_RW, 0, "USB ubsa");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, ubsa, CTLFLAG_RW, 0, "USB ubsa");
SYSCTL_INT(_hw_usb_ubsa, OID_AUTO, debug, CTLFLAG_RW,
&ubsa_debug, 0, "ubsa debug level");
#endif
diff --git a/sys/dev/usb/serial/ubser.c b/sys/dev/usb/serial/ubser.c
index 0d70ba9..86278cc 100644
--- a/sys/dev/usb/serial/ubser.c
+++ b/sys/dev/usb/serial/ubser.c
@@ -116,7 +116,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int ubser_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, ubser, CTLFLAG_RW, 0, "USB ubser");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, ubser, CTLFLAG_RW, 0, "USB ubser");
SYSCTL_INT(_hw_usb_ubser, OID_AUTO, debug, CTLFLAG_RW,
&ubser_debug, 0, "ubser debug level");
#endif
diff --git a/sys/dev/usb/serial/uchcom.c b/sys/dev/usb/serial/uchcom.c
index 3f69c4d..2b12029 100644
--- a/sys/dev/usb/serial/uchcom.c
+++ b/sys/dev/usb/serial/uchcom.c
@@ -103,7 +103,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int uchcom_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, uchcom, CTLFLAG_RW, 0, "USB uchcom");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, uchcom, CTLFLAG_RW, 0, "USB uchcom");
SYSCTL_INT(_hw_usb_uchcom, OID_AUTO, debug, CTLFLAG_RW,
&uchcom_debug, 0, "uchcom debug level");
#endif
diff --git a/sys/dev/usb/serial/ufoma.c b/sys/dev/usb/serial/ufoma.c
index 31be85c..7233f90 100644
--- a/sys/dev/usb/serial/ufoma.c
+++ b/sys/dev/usb/serial/ufoma.c
@@ -438,7 +438,7 @@ ufoma_attach(device_t dev)
goto detach;
}
sc->sc_modetable[0] = (elements + 1);
- bcopy(mad->bMode, &sc->sc_modetable[1], elements);
+ memcpy(&sc->sc_modetable[1], mad->bMode, elements);
sc->sc_currentmode = UMCPC_ACM_MODE_UNLINKED;
sc->sc_modetoactivate = mad->bMode[0];
@@ -968,7 +968,7 @@ ufoma_cfg_param(struct ucom_softc *ucom, struct termios *t)
}
DPRINTF("\n");
- bzero(&ls, sizeof(ls));
+ memset(&ls, 0, sizeof(ls));
USETDW(ls.dwDTERate, t->c_ospeed);
diff --git a/sys/dev/usb/serial/uftdi.c b/sys/dev/usb/serial/uftdi.c
index 1c88063..843a56a 100644
--- a/sys/dev/usb/serial/uftdi.c
+++ b/sys/dev/usb/serial/uftdi.c
@@ -75,7 +75,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int uftdi_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, uftdi, CTLFLAG_RW, 0, "USB uftdi");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, uftdi, CTLFLAG_RW, 0, "USB uftdi");
SYSCTL_INT(_hw_usb_uftdi, OID_AUTO, debug, CTLFLAG_RW,
&uftdi_debug, 0, "Debug level");
#endif
@@ -560,7 +560,7 @@ static int
uftdi_set_parm_soft(struct termios *t,
struct uftdi_param_config *cfg, uint8_t type)
{
- bzero(cfg, sizeof(*cfg));
+ memset(cfg, 0, sizeof(*cfg));
switch (type) {
case UFTDI_TYPE_SIO:
diff --git a/sys/dev/usb/serial/ulpt.c b/sys/dev/usb/serial/ulpt.c
index def2ae5..063e297 100644
--- a/sys/dev/usb/serial/ulpt.c
+++ b/sys/dev/usb/serial/ulpt.c
@@ -74,7 +74,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int ulpt_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, ulpt, CTLFLAG_RW, 0, "USB ulpt");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, ulpt, CTLFLAG_RW, 0, "USB ulpt");
SYSCTL_INT(_hw_usb_ulpt, OID_AUTO, debug, CTLFLAG_RW,
&ulpt_debug, 0, "Debug level");
#endif
diff --git a/sys/dev/usb/serial/umcs.c b/sys/dev/usb/serial/umcs.c
index 94ed4d9..a2b7852 100644
--- a/sys/dev/usb/serial/umcs.c
+++ b/sys/dev/usb/serial/umcs.c
@@ -79,7 +79,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int umcs_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, umcs, CTLFLAG_RW, 0, "USB umcs quadport serial adapter");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, umcs, CTLFLAG_RW, 0, "USB umcs quadport serial adapter");
SYSCTL_INT(_hw_usb_umcs, OID_AUTO, debug, CTLFLAG_RW, &umcs_debug, 0, "Debug level");
#endif /* USB_DEBUG */
diff --git a/sys/dev/usb/serial/umodem.c b/sys/dev/usb/serial/umodem.c
index ed5162f..9289eec 100644
--- a/sys/dev/usb/serial/umodem.c
+++ b/sys/dev/usb/serial/umodem.c
@@ -118,7 +118,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int umodem_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, umodem, CTLFLAG_RW, 0, "USB umodem");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, umodem, CTLFLAG_RW, 0, "USB umodem");
SYSCTL_INT(_hw_usb_umodem, OID_AUTO, debug, CTLFLAG_RW,
&umodem_debug, 0, "Debug level");
#endif
@@ -127,7 +127,11 @@ static const STRUCT_USB_HOST_ID umodem_devs[] = {
/* Generic Modem class match */
{USB_IFACE_CLASS(UICLASS_CDC),
USB_IFACE_SUBCLASS(UISUBCLASS_ABSTRACT_CONTROL_MODEL),
- USB_IFACE_PROTOCOL(UIPROTO_CDC_AT)},
+ USB_IFACE_PROTOCOL(UIPROTO_CDC_AT)},
+ /* Huawei Modem class match */
+ {USB_IFACE_CLASS(UICLASS_CDC),
+ USB_IFACE_SUBCLASS(UISUBCLASS_ABSTRACT_CONTROL_MODEL),
+ USB_IFACE_PROTOCOL(0xFF)},
/* Kyocera AH-K3001V */
{USB_VPI(USB_VENDOR_KYOCERA, USB_PRODUCT_KYOCERA_AHK3001V, 1)},
{USB_VPI(USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC5720, 1)},
@@ -536,7 +540,7 @@ umodem_cfg_param(struct ucom_softc *ucom, struct termios *t)
DPRINTF("sc=%p\n", sc);
- bzero(&ls, sizeof(ls));
+ memset(&ls, 0, sizeof(ls));
USETDW(ls.dwDTERate, t->c_ospeed);
diff --git a/sys/dev/usb/serial/umoscom.c b/sys/dev/usb/serial/umoscom.c
index c346ae6..eef2f47 100644
--- a/sys/dev/usb/serial/umoscom.c
+++ b/sys/dev/usb/serial/umoscom.c
@@ -50,7 +50,7 @@
#ifdef USB_DEBUG
static int umoscom_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, umoscom, CTLFLAG_RW, 0, "USB umoscom");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, umoscom, CTLFLAG_RW, 0, "USB umoscom");
SYSCTL_INT(_hw_usb_umoscom, OID_AUTO, debug, CTLFLAG_RW,
&umoscom_debug, 0, "Debug level");
#endif
diff --git a/sys/dev/usb/serial/uplcom.c b/sys/dev/usb/serial/uplcom.c
index 4af0537..f868a53 100644
--- a/sys/dev/usb/serial/uplcom.c
+++ b/sys/dev/usb/serial/uplcom.c
@@ -118,7 +118,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int uplcom_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, uplcom, CTLFLAG_RW, 0, "USB uplcom");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, uplcom, CTLFLAG_RW, 0, "USB uplcom");
SYSCTL_INT(_hw_usb_uplcom, OID_AUTO, debug, CTLFLAG_RW,
&uplcom_debug, 0, "Debug level");
#endif
@@ -659,7 +659,7 @@ uplcom_cfg_param(struct ucom_softc *ucom, struct termios *t)
DPRINTF("sc = %p\n", sc);
- bzero(&ls, sizeof(ls));
+ memset(&ls, 0, sizeof(ls));
USETDW(ls.dwDTERate, t->c_ospeed);
diff --git a/sys/dev/usb/serial/usb_serial.c b/sys/dev/usb/serial/usb_serial.c
index 61885ea..a3e6ffb 100644
--- a/sys/dev/usb/serial/usb_serial.c
+++ b/sys/dev/usb/serial/usb_serial.c
@@ -101,7 +101,7 @@ __FBSDID("$FreeBSD$");
#include "opt_gdb.h"
-SYSCTL_NODE(_hw_usb, OID_AUTO, ucom, CTLFLAG_RW, 0, "USB ucom");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, ucom, CTLFLAG_RW, 0, "USB ucom");
#ifdef USB_DEBUG
static int ucom_debug = 0;
diff --git a/sys/dev/usb/serial/uslcom.c b/sys/dev/usb/serial/uslcom.c
index 6eaec83..848f2d5 100644
--- a/sys/dev/usb/serial/uslcom.c
+++ b/sys/dev/usb/serial/uslcom.c
@@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
#include <dev/usb/usbdi_util.h>
+#include <dev/usb/usb_ioctl.h>
#include "usbdevs.h"
#define USB_DEBUG_VAR uslcom_debug
@@ -52,7 +53,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int uslcom_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, uslcom, CTLFLAG_RW, 0, "USB uslcom");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, uslcom, CTLFLAG_RW, 0, "USB uslcom");
SYSCTL_INT(_hw_usb_uslcom, OID_AUTO, debug, CTLFLAG_RW,
&uslcom_debug, 0, "Debug level");
#endif
@@ -63,49 +64,72 @@ SYSCTL_INT(_hw_usb_uslcom, OID_AUTO, debug, CTLFLAG_RW,
#define USLCOM_SET_DATA_BITS(x) ((x) << 8)
+/* Request types */
#define USLCOM_WRITE 0x41
#define USLCOM_READ 0xc1
+/* Request codes */
#define USLCOM_UART 0x00
#define USLCOM_BAUD_RATE 0x01
#define USLCOM_DATA 0x03
#define USLCOM_BREAK 0x05
#define USLCOM_CTRL 0x07
+#define USLCOM_RCTRL 0x08
+#define USLCOM_SET_FLOWCTRL 0x13
+#define USLCOM_VENDOR_SPECIFIC 0xff
+/* USLCOM_UART values */
#define USLCOM_UART_DISABLE 0x00
#define USLCOM_UART_ENABLE 0x01
+/* USLCOM_CTRL/USLCOM_RCTRL values */
#define USLCOM_CTRL_DTR_ON 0x0001
#define USLCOM_CTRL_DTR_SET 0x0100
#define USLCOM_CTRL_RTS_ON 0x0002
#define USLCOM_CTRL_RTS_SET 0x0200
#define USLCOM_CTRL_CTS 0x0010
#define USLCOM_CTRL_DSR 0x0020
+#define USLCOM_CTRL_RI 0x0040
#define USLCOM_CTRL_DCD 0x0080
+/* USLCOM_BAUD_RATE values */
#define USLCOM_BAUD_REF 0x384000
+/* USLCOM_DATA values */
#define USLCOM_STOP_BITS_1 0x00
#define USLCOM_STOP_BITS_2 0x02
-
#define USLCOM_PARITY_NONE 0x00
#define USLCOM_PARITY_ODD 0x10
#define USLCOM_PARITY_EVEN 0x20
-#define USLCOM_PORT_NO 0xFFFF /* XXX think this should be 0 --hps */
+#define USLCOM_PORT_NO 0x0000
+/* USLCOM_BREAK values */
#define USLCOM_BREAK_OFF 0x00
#define USLCOM_BREAK_ON 0x01
+/* USLCOM_SET_FLOWCTRL values - 1st word */
+#define USLCOM_FLOW_DTR_ON 0x00000001 /* DTR static active */
+#define USLCOM_FLOW_CTS_HS 0x00000008 /* CTS handshake */
+/* USLCOM_SET_FLOWCTRL values - 2nd word */
+#define USLCOM_FLOW_RTS_ON 0x00000040 /* RTS static active */
+#define USLCOM_FLOW_RTS_HS 0x00000080 /* RTS handshake */
+
+/* USLCOM_VENDOR_SPECIFIC values */
+#define USLCOM_WRITE_LATCH 0x37E1
+#define USLCOM_READ_LATCH 0x00C2
+
enum {
USLCOM_BULK_DT_WR,
USLCOM_BULK_DT_RD,
+ USLCOM_CTRL_DT_RD,
USLCOM_N_TRANSFER,
};
struct uslcom_softc {
struct ucom_super_softc sc_super_ucom;
struct ucom_softc sc_ucom;
+ struct usb_callout sc_watchdog;
struct usb_xfer *sc_xfer[USLCOM_N_TRANSFER];
struct usb_device *sc_udev;
@@ -121,12 +145,15 @@ static device_detach_t uslcom_detach;
static usb_callback_t uslcom_write_callback;
static usb_callback_t uslcom_read_callback;
+static usb_callback_t uslcom_control_callback;
static void uslcom_open(struct ucom_softc *);
static void uslcom_close(struct ucom_softc *);
static void uslcom_set_dtr(struct ucom_softc *, uint8_t);
static void uslcom_set_rts(struct ucom_softc *, uint8_t);
static void uslcom_set_break(struct ucom_softc *, uint8_t);
+static int uslcom_ioctl(struct ucom_softc *, uint32_t, caddr_t, int,
+ struct thread *);
static int uslcom_pre_param(struct ucom_softc *, struct termios *);
static void uslcom_param(struct ucom_softc *, struct termios *);
static void uslcom_get_status(struct ucom_softc *, uint8_t *, uint8_t *);
@@ -143,7 +170,7 @@ static const struct usb_config uslcom_config[USLCOM_N_TRANSFER] = {
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_OUT,
.bufsize = USLCOM_BULK_BUF_SIZE,
- .flags = {.pipe_bof = 1,.force_short_xfer = 1,},
+ .flags = {.pipe_bof = 1,},
.callback = &uslcom_write_callback,
},
@@ -155,6 +182,15 @@ static const struct usb_config uslcom_config[USLCOM_N_TRANSFER] = {
.flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
.callback = &uslcom_read_callback,
},
+ [USLCOM_CTRL_DT_RD] = {
+ .type = UE_CONTROL,
+ .endpoint = 0x00,
+ .direction = UE_DIR_ANY,
+ .bufsize = sizeof(struct usb_device_request) + 8,
+ .flags = {.pipe_bof = 1,},
+ .callback = &uslcom_control_callback,
+ .timeout = 1000, /* 1 second timeout */
+ },
};
static struct ucom_callback uslcom_callback = {
@@ -164,6 +200,7 @@ static struct ucom_callback uslcom_callback = {
.ucom_cfg_set_dtr = &uslcom_set_dtr,
.ucom_cfg_set_rts = &uslcom_set_rts,
.ucom_cfg_set_break = &uslcom_set_break,
+ .ucom_ioctl = &uslcom_ioctl,
.ucom_cfg_param = &uslcom_param,
.ucom_pre_param = &uslcom_pre_param,
.ucom_start_read = &uslcom_start_read,
@@ -280,6 +317,19 @@ MODULE_DEPEND(uslcom, ucom, 1, 1, 1);
MODULE_DEPEND(uslcom, usb, 1, 1, 1);
MODULE_VERSION(uslcom, 1);
+static void
+uslcom_watchdog(void *arg)
+{
+ struct uslcom_softc *sc = arg;
+
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+
+ usbd_transfer_start(sc->sc_xfer[USLCOM_CTRL_DT_RD]);
+
+ usb_callout_reset(&sc->sc_watchdog,
+ hz / 4, &uslcom_watchdog, sc);
+}
+
static int
uslcom_probe(device_t dev)
{
@@ -310,6 +360,7 @@ uslcom_attach(device_t dev)
device_set_usb_desc(dev);
mtx_init(&sc->sc_mtx, "uslcom", NULL, MTX_DEF);
+ usb_callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
sc->sc_udev = uaa->device;
@@ -350,6 +401,8 @@ uslcom_detach(device_t dev)
ucom_detach(&sc->sc_super_ucom, &sc->sc_ucom);
usbd_transfer_unsetup(sc->sc_xfer, USLCOM_N_TRANSFER);
+
+ usb_callout_drain(&sc->sc_watchdog);
mtx_destroy(&sc->sc_mtx);
return (0);
@@ -371,6 +424,9 @@ uslcom_open(struct ucom_softc *ucom)
&req, NULL, 0, 1000)) {
DPRINTF("UART enable failed (ignored)\n");
}
+
+ /* start polling status */
+ uslcom_watchdog(sc);
}
static void
@@ -379,13 +435,16 @@ uslcom_close(struct ucom_softc *ucom)
struct uslcom_softc *sc = ucom->sc_parent;
struct usb_device_request req;
+ /* stop polling status */
+ usb_callout_stop(&sc->sc_watchdog);
+
req.bmRequestType = USLCOM_WRITE;
req.bRequest = USLCOM_UART;
USETW(req.wValue, USLCOM_UART_DISABLE);
USETW(req.wIndex, USLCOM_PORT_NO);
USETW(req.wLength, 0);
- if (ucom_cfg_do_request(sc->sc_udev, &sc->sc_ucom,
+ if (ucom_cfg_do_request(sc->sc_udev, &sc->sc_ucom,
&req, NULL, 0, 1000)) {
DPRINTF("UART disable failed (ignored)\n");
}
@@ -452,6 +511,7 @@ uslcom_param(struct ucom_softc *ucom, struct termios *t)
{
struct uslcom_softc *sc = ucom->sc_parent;
struct usb_device_request req;
+ uint32_t flowctrl[4];
uint16_t data;
DPRINTF("\n");
@@ -503,7 +563,28 @@ uslcom_param(struct ucom_softc *ucom, struct termios *t)
&req, NULL, 0, 1000)) {
DPRINTF("Set format failed (ignored)\n");
}
- return;
+
+ if (t->c_cflag & CRTSCTS) {
+ flowctrl[0] = htole32(USLCOM_FLOW_DTR_ON | USLCOM_FLOW_CTS_HS);
+ flowctrl[1] = htole32(USLCOM_FLOW_RTS_HS);
+ flowctrl[2] = 0;
+ flowctrl[3] = 0;
+ } else {
+ flowctrl[0] = htole32(USLCOM_FLOW_DTR_ON);
+ flowctrl[1] = htole32(USLCOM_FLOW_RTS_ON);
+ flowctrl[2] = 0;
+ flowctrl[3] = 0;
+ }
+ req.bmRequestType = USLCOM_WRITE;
+ req.bRequest = USLCOM_SET_FLOWCTRL;
+ USETW(req.wValue, 0);
+ USETW(req.wIndex, USLCOM_PORT_NO);
+ USETW(req.wLength, sizeof(flowctrl));
+
+ if (ucom_cfg_do_request(sc->sc_udev, &sc->sc_ucom,
+ &req, flowctrl, 0, 1000)) {
+ DPRINTF("Set flowcontrol failed (ignored)\n");
+ }
}
static void
@@ -536,6 +617,55 @@ uslcom_set_break(struct ucom_softc *ucom, uint8_t onoff)
}
}
+static int
+uslcom_ioctl(struct ucom_softc *ucom, uint32_t cmd, caddr_t data,
+ int flag, struct thread *td)
+{
+ struct uslcom_softc *sc = ucom->sc_parent;
+ struct usb_device_request req;
+ int error = 0;
+ uint8_t latch;
+
+ DPRINTF("cmd=0x%08x\n", cmd);
+
+ switch (cmd) {
+ case USB_GET_GPIO:
+ req.bmRequestType = USLCOM_READ;
+ req.bRequest = USLCOM_VENDOR_SPECIFIC;
+ USETW(req.wValue, USLCOM_READ_LATCH);
+ USETW(req.wIndex, 0);
+ USETW(req.wLength, sizeof(latch));
+
+ if (ucom_cfg_do_request(sc->sc_udev, &sc->sc_ucom,
+ &req, &latch, 0, 1000)) {
+ DPRINTF("Get LATCH failed\n");
+ error = EIO;
+ }
+ *(int *)data = latch;
+ break;
+
+ case USB_SET_GPIO:
+ req.bmRequestType = USLCOM_WRITE;
+ req.bRequest = USLCOM_VENDOR_SPECIFIC;
+ USETW(req.wValue, USLCOM_WRITE_LATCH);
+ USETW(req.wIndex, (*(int *)data));
+ USETW(req.wLength, 0);
+
+ if (ucom_cfg_do_request(sc->sc_udev, &sc->sc_ucom,
+ &req, NULL, 0, 1000)) {
+ DPRINTF("Set LATCH failed\n");
+ error = EIO;
+ }
+ break;
+
+ default:
+ DPRINTF("Unknown IOCTL\n");
+ error = ENOIOCTL;
+ break;
+ }
+ return (error);
+}
+
static void
uslcom_write_callback(struct usb_xfer *xfer, usb_error_t error)
{
@@ -599,6 +729,59 @@ tr_setup:
}
static void
+uslcom_control_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct uslcom_softc *sc = usbd_xfer_softc(xfer);
+ struct usb_page_cache *pc;
+ struct usb_device_request req;
+ uint8_t msr = 0;
+ uint8_t buf;
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ pc = usbd_xfer_get_frame(xfer, 1);
+ usbd_copy_out(pc, 0, &buf, sizeof(buf));
+ if (buf & USLCOM_CTRL_CTS)
+ msr |= SER_CTS;
+ if (buf & USLCOM_CTRL_DSR)
+ msr |= SER_DSR;
+ if (buf & USLCOM_CTRL_RI)
+ msr |= SER_RI;
+ if (buf & USLCOM_CTRL_DCD)
+ msr |= SER_DCD;
+
+ if (msr != sc->sc_msr) {
+ DPRINTF("status change msr=0x%02x "
+ "(was 0x%02x)\n", msr, sc->sc_msr);
+ sc->sc_msr = msr;
+ ucom_status_change(&sc->sc_ucom);
+ }
+ break;
+
+ case USB_ST_SETUP:
+ req.bmRequestType = USLCOM_READ;
+ req.bRequest = USLCOM_RCTRL;
+ USETW(req.wValue, 0);
+ USETW(req.wIndex, USLCOM_PORT_NO);
+ USETW(req.wLength, sizeof(buf));
+
+ usbd_xfer_set_frames(xfer, 2);
+ usbd_xfer_set_frame_len(xfer, 0, sizeof(req));
+ usbd_xfer_set_frame_len(xfer, 1, sizeof(buf));
+
+ pc = usbd_xfer_get_frame(xfer, 0);
+ usbd_copy_in(pc, 0, &req, sizeof(req));
+ usbd_transfer_submit(xfer);
+ break;
+
+ default: /* error */
+ if (error != USB_ERR_CANCELLED)
+ DPRINTF("error=%s\n", usbd_errstr(error));
+ break;
+ }
+}
+
+static void
uslcom_start_read(struct ucom_softc *ucom)
{
struct uslcom_softc *sc = ucom->sc_parent;
diff --git a/sys/dev/usb/serial/uvisor.c b/sys/dev/usb/serial/uvisor.c
index 976ea19..df96958 100644
--- a/sys/dev/usb/serial/uvisor.c
+++ b/sys/dev/usb/serial/uvisor.c
@@ -80,7 +80,7 @@
#ifdef USB_DEBUG
static int uvisor_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, uvisor, CTLFLAG_RW, 0, "USB uvisor");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, uvisor, CTLFLAG_RW, 0, "USB uvisor");
SYSCTL_INT(_hw_usb_uvisor, OID_AUTO, debug, CTLFLAG_RW,
&uvisor_debug, 0, "Debug level");
#endif
@@ -311,8 +311,9 @@ uvisor_attach(device_t dev)
int error;
DPRINTF("sc=%p\n", sc);
- bcopy(uvisor_config, uvisor_config_copy,
+ memcpy(uvisor_config_copy, uvisor_config,
sizeof(uvisor_config_copy));
+
device_set_usb_desc(dev);
mtx_init(&sc->sc_mtx, "uvisor", NULL, MTX_DEF);
diff --git a/sys/dev/usb/serial/uvscom.c b/sys/dev/usb/serial/uvscom.c
index 52e02ad..f5dc2d5 100644
--- a/sys/dev/usb/serial/uvscom.c
+++ b/sys/dev/usb/serial/uvscom.c
@@ -70,7 +70,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int uvscom_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, uvscom, CTLFLAG_RW, 0, "USB uvscom");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, uvscom, CTLFLAG_RW, 0, "USB uvscom");
SYSCTL_INT(_hw_usb_uvscom, OID_AUTO, debug, CTLFLAG_RW,
&uvscom_debug, 0, "Debug level");
#endif
diff --git a/sys/dev/usb/storage/umass.c b/sys/dev/usb/storage/umass.c
index b932cfa..3f2bd11 100644
--- a/sys/dev/usb/storage/umass.c
+++ b/sys/dev/usb/storage/umass.c
@@ -173,7 +173,7 @@ __FBSDID("$FreeBSD$");
#define UDMASS_ALL 0xffff0000 /* all of the above */
static int umass_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, umass, CTLFLAG_RW, 0, "USB umass");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, umass, CTLFLAG_RW, 0, "USB umass");
SYSCTL_INT(_hw_usb_umass, OID_AUTO, debug, CTLFLAG_RW,
&umass_debug, 0, "umass debug level");
@@ -891,7 +891,7 @@ umass_attach(device_t dev)
int32_t err;
/*
- * NOTE: the softc struct is bzero-ed in device_set_driver.
+ * NOTE: the softc struct is cleared in device_set_driver.
* We can safely call umass_detach without specifically
* initializing the struct.
*/
@@ -1305,11 +1305,13 @@ umass_t_bbb_command_callback(struct usb_xfer *xfer, usb_error_t error)
}
sc->cbw.bCDBLength = sc->sc_transfer.cmd_len;
- bcopy(sc->sc_transfer.cmd_data, sc->cbw.CBWCDB,
+ memcpy(sc->cbw.CBWCDB, sc->sc_transfer.cmd_data,
sc->sc_transfer.cmd_len);
- bzero(sc->sc_transfer.cmd_data + sc->sc_transfer.cmd_len,
- sizeof(sc->cbw.CBWCDB) - sc->sc_transfer.cmd_len);
+ memset(sc->sc_transfer.cmd_data +
+ sc->sc_transfer.cmd_len, 0,
+ sizeof(sc->cbw.CBWCDB) -
+ sc->sc_transfer.cmd_len);
DIF(UDMASS_BBB, umass_bbb_dump_cbw(sc, &sc->cbw));
@@ -1480,9 +1482,9 @@ umass_t_bbb_status_callback(struct usb_xfer *xfer, usb_error_t error)
/* Zero missing parts of the CSW: */
- if (actlen < sizeof(sc->csw)) {
- bzero(&sc->csw, sizeof(sc->csw));
- }
+ if (actlen < sizeof(sc->csw))
+ memset(&sc->csw, 0, sizeof(sc->csw));
+
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_out(pc, 0, &sc->csw, actlen);
@@ -2755,7 +2757,7 @@ umass_scsi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
if (sc->sc_quirks & NO_TEST_UNIT_READY) {
DPRINTF(sc, UDMASS_SCSI, "Converted TEST_UNIT_READY "
"to START_UNIT\n");
- bzero(sc->sc_transfer.cmd_data, cmd_len);
+ memset(sc->sc_transfer.cmd_data, 0, cmd_len);
sc->sc_transfer.cmd_data[0] = START_STOP_UNIT;
sc->sc_transfer.cmd_data[4] = SSS_START;
return (1);
@@ -2768,14 +2770,14 @@ umass_scsi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
* information.
*/
if (sc->sc_quirks & FORCE_SHORT_INQUIRY) {
- bcopy(cmd_ptr, sc->sc_transfer.cmd_data, cmd_len);
+ memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
sc->sc_transfer.cmd_data[4] = SHORT_INQUIRY_LENGTH;
return (1);
}
break;
}
- bcopy(cmd_ptr, sc->sc_transfer.cmd_data, cmd_len);
+ memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
return (1);
}
@@ -2810,10 +2812,11 @@ umass_rbc_transform(struct umass_softc *sc, uint8_t *cmd_ptr, uint8_t cmd_len)
case REQUEST_SENSE:
case PREVENT_ALLOW:
- bcopy(cmd_ptr, sc->sc_transfer.cmd_data, cmd_len);
+ memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
if ((sc->sc_quirks & RBC_PAD_TO_12) && (cmd_len < 12)) {
- bzero(sc->sc_transfer.cmd_data + cmd_len, 12 - cmd_len);
+ memset(sc->sc_transfer.cmd_data + cmd_len,
+ 0, 12 - cmd_len);
cmd_len = 12;
}
sc->sc_transfer.cmd_len = cmd_len;
@@ -2841,7 +2844,7 @@ umass_ufi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
sc->sc_transfer.cmd_len = UFI_COMMAND_LENGTH;
/* Zero the command data */
- bzero(sc->sc_transfer.cmd_data, UFI_COMMAND_LENGTH);
+ memset(sc->sc_transfer.cmd_data, 0, UFI_COMMAND_LENGTH);
switch (cmd_ptr[0]) {
/*
@@ -2898,7 +2901,7 @@ umass_ufi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
return (0); /* failure */
}
- bcopy(cmd_ptr, sc->sc_transfer.cmd_data, cmd_len);
+ memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
return (1); /* success */
}
@@ -2919,7 +2922,7 @@ umass_atapi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
sc->sc_transfer.cmd_len = ATAPI_COMMAND_LENGTH;
/* Zero the command data */
- bzero(sc->sc_transfer.cmd_data, ATAPI_COMMAND_LENGTH);
+ memset(sc->sc_transfer.cmd_data, 0, ATAPI_COMMAND_LENGTH);
switch (cmd_ptr[0]) {
/*
@@ -2933,7 +2936,7 @@ umass_atapi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
* information.
*/
if (sc->sc_quirks & FORCE_SHORT_INQUIRY) {
- bcopy(cmd_ptr, sc->sc_transfer.cmd_data, cmd_len);
+ memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
sc->sc_transfer.cmd_data[4] = SHORT_INQUIRY_LENGTH;
return (1);
@@ -2994,7 +2997,7 @@ umass_atapi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
break;
}
- bcopy(cmd_ptr, sc->sc_transfer.cmd_data, cmd_len);
+ memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
return (1); /* success */
}
diff --git a/sys/dev/usb/storage/urio.c b/sys/dev/usb/storage/urio.c
index 6687173..e79ae87 100644
--- a/sys/dev/usb/storage/urio.c
+++ b/sys/dev/usb/storage/urio.c
@@ -80,7 +80,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int urio_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, urio, CTLFLAG_RW, 0, "USB urio");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, urio, CTLFLAG_RW, 0, "USB urio");
SYSCTL_INT(_hw_usb_urio, OID_AUTO, debug, CTLFLAG_RW,
&urio_debug, 0, "urio debug level");
#endif
@@ -440,7 +440,7 @@ urio_ioctl(struct usb_fifo *fifo, u_long cmd, void *addr,
error = EPERM;
goto done;
}
- bzero(&ur, sizeof(ur));
+ memset(&ur, 0, sizeof(ur));
rio_cmd = addr;
ur.ucr_request.bmRequestType =
rio_cmd->requesttype | UT_READ_VENDOR_DEVICE;
@@ -451,7 +451,7 @@ urio_ioctl(struct usb_fifo *fifo, u_long cmd, void *addr,
error = EPERM;
goto done;
}
- bzero(&ur, sizeof(ur));
+ memset(&ur, 0, sizeof(ur));
rio_cmd = addr;
ur.ucr_request.bmRequestType =
rio_cmd->requesttype | UT_WRITE_VENDOR_DEVICE;
diff --git a/sys/dev/usb/storage/ustorage_fs.c b/sys/dev/usb/storage/ustorage_fs.c
index dbf025a..dd6b413 100644
--- a/sys/dev/usb/storage/ustorage_fs.c
+++ b/sys/dev/usb/storage/ustorage_fs.c
@@ -355,7 +355,7 @@ ustorage_fs_attach(device_t dev)
int unit;
/*
- * NOTE: the softc struct is bzero-ed in device_set_driver.
+ * NOTE: the softc struct is cleared in device_set_driver.
* We can safely call ustorage_fs_detach without specifically
* initializing the struct.
*/
@@ -364,6 +364,9 @@ ustorage_fs_attach(device_t dev)
sc->sc_udev = uaa->device;
unit = device_get_unit(dev);
+ /* enable power saving mode */
+ usbd_set_power_mode(uaa->device, USB_POWER_MODE_SAVE);
+
if (unit == 0) {
if (ustorage_fs_ramdisk == NULL) {
/*
@@ -371,7 +374,9 @@ ustorage_fs_attach(device_t dev)
* further
*/
ustorage_fs_ramdisk =
- malloc(USTORAGE_FS_RAM_SECT << 9, M_USB, M_ZERO | M_WAITOK);
+ malloc(USTORAGE_FS_RAM_SECT << 9, M_USB,
+ M_ZERO | M_WAITOK);
+
if (ustorage_fs_ramdisk == NULL) {
return (ENOMEM);
}
diff --git a/sys/dev/usb/template/usb_template.c b/sys/dev/usb/template/usb_template.c
index cf97482..209016a 100644
--- a/sys/dev/usb/template/usb_template.c
+++ b/sys/dev/usb/template/usb_template.c
@@ -913,7 +913,7 @@ usb_hw_ep_resolve(struct usb_device *udev,
}
ues = udev->bus->scratch[0].hw_ep_scratch;
- bzero(ues, sizeof(*ues));
+ memset(ues, 0, sizeof(*ues));
ues->ep_max = ues->ep;
ues->cd = (void *)desc;
@@ -1240,7 +1240,7 @@ usb_temp_setup(struct usb_device *udev,
}
uts = udev->bus->scratch[0].temp_setup;
- bzero(uts, sizeof(*uts));
+ memset(uts, 0, sizeof(*uts));
uts->usb_speed = udev->speed;
uts->self_powered = udev->flags.self_powered;
diff --git a/sys/dev/usb/usb.h b/sys/dev/usb/usb.h
index 6a9b126..fb1c8e6 100644
--- a/sys/dev/usb/usb.h
+++ b/sys/dev/usb/usb.h
@@ -323,7 +323,7 @@ struct usb_devcap_usb2ext_descriptor {
uByte bLength;
uByte bDescriptorType;
uByte bDevCapabilityType;
- uByte bmAttributes;
+ uDWord bmAttributes;
#define USB_V2EXT_LPM 0x02
} __packed;
typedef struct usb_devcap_usb2ext_descriptor usb_devcap_usb2ext_descriptor_t;
@@ -336,7 +336,7 @@ struct usb_devcap_ss_descriptor {
uWord wSpeedsSupported;
uByte bFunctionalitySupport;
uByte bU1DevExitLat;
- uByte bU2DevExitLat;
+ uWord wU2DevExitLat;
} __packed;
typedef struct usb_devcap_ss_descriptor usb_devcap_ss_descriptor_t;
diff --git a/sys/dev/usb/usb_busdma.c b/sys/dev/usb/usb_busdma.c
index f550dd0..d31aeec 100644
--- a/sys/dev/usb/usb_busdma.c
+++ b/sys/dev/usb/usb_busdma.c
@@ -80,9 +80,9 @@ void
usbd_get_page(struct usb_page_cache *pc, usb_frlength_t offset,
struct usb_page_search *res)
{
+#if USB_HAVE_BUSDMA
struct usb_page *page;
-#if USB_HAVE_BUSDMA
if (pc->page_start) {
/* Case 1 - something has been loaded into DMA */
@@ -145,7 +145,7 @@ usbd_copy_in(struct usb_page_cache *cache, usb_frlength_t offset,
if (buf_res.length > len) {
buf_res.length = len;
}
- bcopy(ptr, buf_res.buffer, buf_res.length);
+ memcpy(buf_res.buffer, ptr, buf_res.length);
offset += buf_res.length;
len -= buf_res.length;
@@ -267,7 +267,7 @@ usbd_copy_out(struct usb_page_cache *cache, usb_frlength_t offset,
if (res.length > len) {
res.length = len;
}
- bcopy(res.buffer, ptr, res.length);
+ memcpy(ptr, res.buffer, res.length);
offset += res.length;
len -= res.length;
@@ -325,7 +325,7 @@ usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset,
if (res.length > len) {
res.length = len;
}
- bzero(res.buffer, res.length);
+ memset(res.buffer, 0, res.length);
offset += res.length;
len -= res.length;
@@ -560,7 +560,7 @@ usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg,
bus_dmamem_free(utag->tag, ptr, map);
goto error;
}
- bzero(ptr, size);
+ memset(ptr, 0, size);
usb_pc_cpu_flush(pc);
@@ -797,7 +797,7 @@ usb_dma_tag_setup(struct usb_dma_parent_tag *udpt,
struct mtx *mtx, usb_dma_callback_t *func,
uint8_t ndmabits, uint8_t nudt)
{
- bzero(udpt, sizeof(*udpt));
+ memset(udpt, 0, sizeof(*udpt));
/* sanity checking */
if ((nudt == 0) ||
@@ -818,7 +818,7 @@ usb_dma_tag_setup(struct usb_dma_parent_tag *udpt,
udpt->dma_bits = ndmabits;
while (nudt--) {
- bzero(udt, sizeof(*udt));
+ memset(udt, 0, sizeof(*udt));
udt->tag_parent = udpt;
udt++;
}
diff --git a/sys/dev/usb/usb_compat_linux.c b/sys/dev/usb/usb_compat_linux.c
index 8e20d20..2f78d9f 100644
--- a/sys/dev/usb/usb_compat_linux.c
+++ b/sys/dev/usb/usb_compat_linux.c
@@ -564,7 +564,7 @@ usb_clear_halt(struct usb_device *dev, struct usb_host_endpoint *uhe)
type = uhe->desc.bmAttributes & UE_XFERTYPE;
addr = uhe->desc.bEndpointAddress;
- bzero(cfg, sizeof(cfg));
+ memset(cfg, 0, sizeof(cfg));
cfg[0].type = type;
cfg[0].endpoint = addr & UE_ADDR;
@@ -709,12 +709,12 @@ usb_control_msg(struct usb_device *dev, struct usb_host_endpoint *uhe,
urb->dev = dev;
urb->endpoint = uhe;
- bcopy(&req, urb->setup_packet, sizeof(req));
+ memcpy(urb->setup_packet, &req, sizeof(req));
if (size && (!(req.bmRequestType & UT_READ))) {
/* move the data to a real buffer */
- bcopy(data, USB_ADD_BYTES(urb->setup_packet,
- sizeof(req)), size);
+ memcpy(USB_ADD_BYTES(urb->setup_packet, sizeof(req)),
+ data, size);
}
err = usb_start_wait_urb(urb, timeout, &actlen);
@@ -789,7 +789,7 @@ usb_setup_endpoint(struct usb_device *dev,
if (bufsize == 0) {
return (0);
}
- bzero(cfg, sizeof(cfg));
+ memset(cfg, 0, sizeof(cfg));
if (type == UE_ISOCHRONOUS) {
@@ -1251,7 +1251,7 @@ usb_init_urb(struct urb *urb)
if (urb == NULL) {
return;
}
- bzero(urb, sizeof(*urb));
+ memset(urb, 0, sizeof(*urb));
}
/*------------------------------------------------------------------------*
diff --git a/sys/dev/usb/usb_dev.c b/sys/dev/usb/usb_dev.c
index e5d98fe..e2c934e 100644
--- a/sys/dev/usb/usb_dev.c
+++ b/sys/dev/usb/usb_dev.c
@@ -81,7 +81,7 @@
#ifdef USB_DEBUG
static int usb_fifo_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, dev, CTLFLAG_RW, 0, "USB device");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, dev, CTLFLAG_RW, 0, "USB device");
SYSCTL_INT(_hw_usb_dev, OID_AUTO, debug, CTLFLAG_RW,
&usb_fifo_debug, 0, "Debug Level");
@@ -1809,8 +1809,8 @@ usb_fifo_free_buffer(struct usb_fifo *f)
}
/* reset queues */
- bzero(&f->free_q, sizeof(f->free_q));
- bzero(&f->used_q, sizeof(f->used_q));
+ memset(&f->free_q, 0, sizeof(f->free_q));
+ memset(&f->used_q, 0, sizeof(f->used_q));
}
void
@@ -1909,7 +1909,7 @@ usb_fifo_put_data_linear(struct usb_fifo *f, void *ptr,
io_len = MIN(len, m->cur_data_len);
- bcopy(ptr, m->cur_data_ptr, io_len);
+ memcpy(m->cur_data_ptr, ptr, io_len);
m->cur_data_len = io_len;
ptr = USB_ADD_BYTES(ptr, io_len);
@@ -2052,7 +2052,7 @@ usb_fifo_get_data_linear(struct usb_fifo *f, void *ptr,
io_len = MIN(len, m->cur_data_len);
- bcopy(m->cur_data_ptr, ptr, io_len);
+ memcpy(ptr, m->cur_data_ptr, io_len);
len -= io_len;
ptr = USB_ADD_BYTES(ptr, io_len);
diff --git a/sys/dev/usb/usb_device.c b/sys/dev/usb/usb_device.c
index 7e0de33..726d7a7 100644
--- a/sys/dev/usb/usb_device.c
+++ b/sys/dev/usb/usb_device.c
@@ -1851,7 +1851,8 @@ repeat_set_config:
}
}
if (set_config_failed == 0 && config_index == 0 &&
- usb_test_quirk(&uaa, UQ_MSC_NO_SYNC_CACHE) == 0) {
+ usb_test_quirk(&uaa, UQ_MSC_NO_SYNC_CACHE) == 0 &&
+ usb_test_quirk(&uaa, UQ_MSC_NO_GETMAXLUN) == 0) {
/*
* Try to figure out if there are any MSC quirks we
diff --git a/sys/dev/usb/usb_generic.c b/sys/dev/usb/usb_generic.c
index d62f8f9..f175eb9 100644
--- a/sys/dev/usb/usb_generic.c
+++ b/sys/dev/usb/usb_generic.c
@@ -126,7 +126,7 @@ struct usb_fifo_methods usb_ugen_methods = {
#ifdef USB_DEBUG
static int ugen_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, ugen, CTLFLAG_RW, 0, "USB generic");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, ugen, CTLFLAG_RW, 0, "USB generic");
SYSCTL_INT(_hw_usb_ugen, OID_AUTO, debug, CTLFLAG_RW, &ugen_debug,
0, "Debug level");
@@ -240,7 +240,7 @@ ugen_open_pipe_write(struct usb_fifo *f)
/* transfers are already opened */
return (0);
}
- bzero(usb_config, sizeof(usb_config));
+ memset(usb_config, 0, sizeof(usb_config));
usb_config[1].type = UE_CONTROL;
usb_config[1].endpoint = 0;
@@ -308,7 +308,7 @@ ugen_open_pipe_read(struct usb_fifo *f)
/* transfers are already opened */
return (0);
}
- bzero(usb_config, sizeof(usb_config));
+ memset(usb_config, 0, sizeof(usb_config));
usb_config[1].type = UE_CONTROL;
usb_config[1].endpoint = 0;
diff --git a/sys/dev/usb/usb_hub.c b/sys/dev/usb/usb_hub.c
index 5795d56..8652661 100644
--- a/sys/dev/usb/usb_hub.c
+++ b/sys/dev/usb/usb_hub.c
@@ -76,7 +76,7 @@
#ifdef USB_DEBUG
static int uhub_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, uhub, CTLFLAG_RW, 0, "USB HUB");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, uhub, CTLFLAG_RW, 0, "USB HUB");
SYSCTL_INT(_hw_usb_uhub, OID_AUTO, debug, CTLFLAG_RW, &uhub_debug, 0,
"Debug level");
diff --git a/sys/dev/usb/usb_ioctl.h b/sys/dev/usb/usb_ioctl.h
index d35fa10..9af6ee5 100644
--- a/sys/dev/usb/usb_ioctl.h
+++ b/sys/dev/usb/usb_ioctl.h
@@ -289,6 +289,10 @@ struct usb_gen_quirk {
#define USB_GET_CM_OVER_DATA _IOR ('U', 180, int)
#define USB_SET_CM_OVER_DATA _IOW ('U', 181, int)
+/* GPIO control */
+#define USB_GET_GPIO _IOR ('U', 182, int)
+#define USB_SET_GPIO _IOW ('U', 183, int)
+
/* USB file system interface */
#define USB_FS_START _IOW ('U', 192, struct usb_fs_start)
#define USB_FS_STOP _IOW ('U', 193, struct usb_fs_stop)
diff --git a/sys/dev/usb/usb_msctest.c b/sys/dev/usb/usb_msctest.c
index 0355653..7f33014 100644
--- a/sys/dev/usb/usb_msctest.c
+++ b/sys/dev/usb/usb_msctest.c
@@ -475,8 +475,8 @@ bbb_command_start(struct bbb_transfer *sc, uint8_t dir, uint8_t lun,
sc->data_timeout = (data_timeout + USB_MS_HZ);
sc->actlen = 0;
sc->cmd_len = cmd_len;
- bzero(&sc->cbw.CBWCDB, sizeof(sc->cbw.CBWCDB));
- bcopy(cmd_ptr, &sc->cbw.CBWCDB, cmd_len);
+ memset(&sc->cbw.CBWCDB, 0, sizeof(sc->cbw.CBWCDB));
+ memcpy(&sc->cbw.CBWCDB, cmd_ptr, cmd_len);
DPRINTFN(1, "SCSI cmd = %*D\n", (int)cmd_len, &sc->cbw.CBWCDB, ":");
mtx_lock(&sc->mtx);
@@ -603,6 +603,29 @@ usb_iface_is_cdrom(struct usb_device *udev, uint8_t iface_index)
return (is_cdrom);
}
+static uint8_t
+usb_msc_get_max_lun(struct usb_device *udev, uint8_t iface_index)
+{
+ struct usb_device_request req;
+ usb_error_t err;
+ uint8_t buf = 0;
+
+
+ /* The Get Max Lun command is a class-specific request. */
+ req.bmRequestType = UT_READ_CLASS_INTERFACE;
+ req.bRequest = 0xFE; /* GET_MAX_LUN */
+ USETW(req.wValue, 0);
+ req.wIndex[0] = iface_index;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, 1);
+
+ err = usbd_do_request(udev, NULL, &req, &buf);
+ if (err)
+ buf = 0;
+
+ return (buf);
+}
+
usb_error_t
usb_msc_auto_quirk(struct usb_device *udev, uint8_t iface_index)
{
@@ -622,6 +645,11 @@ usb_msc_auto_quirk(struct usb_device *udev, uint8_t iface_index)
*/
usb_pause_mtx(NULL, hz);
+ if (usb_msc_get_max_lun(udev, iface_index) == 0) {
+ DPRINTF("Device has only got one LUN.\n");
+ usbd_add_dynamic_quirk(udev, UQ_MSC_NO_GETMAXLUN);
+ }
+
is_no_direct = 1;
for (timeout = 4; timeout; timeout--) {
err = bbb_command_start(sc, DIR_IN, 0, sc->buffer,
diff --git a/sys/dev/usb/usb_process.c b/sys/dev/usb/usb_process.c
index 051ded9..ab579f2 100644
--- a/sys/dev/usb/usb_process.c
+++ b/sys/dev/usb/usb_process.c
@@ -67,11 +67,17 @@ static int usb_pcount;
#define USB_THREAD_CREATE(f, s, p, ...) \
kproc_kthread_add((f), (s), &usbproc, (p), RFHIGHPID, \
0, "usb", __VA_ARGS__)
+#if (__FreeBSD_version >= 900000)
+#define USB_THREAD_SUSPEND_CHECK() kthread_suspend_check()
+#else
+#define USB_THREAD_SUSPEND_CHECK() kthread_suspend_check(curthread)
+#endif
#define USB_THREAD_SUSPEND(p) kthread_suspend(p,0)
#define USB_THREAD_EXIT(err) kthread_exit()
#else
#define USB_THREAD_CREATE(f, s, p, ...) \
kthread_create((f), (s), (p), RFHIGHPID, 0, __VA_ARGS__)
+#define USB_THREAD_SUSPEND_CHECK() kthread_suspend_check(curproc)
#define USB_THREAD_SUSPEND(p) kthread_suspend(p,0)
#define USB_THREAD_EXIT(err) kthread_exit(err)
#endif
@@ -79,7 +85,7 @@ static int usb_pcount;
#ifdef USB_DEBUG
static int usb_proc_debug;
-SYSCTL_NODE(_hw_usb, OID_AUTO, proc, CTLFLAG_RW, 0, "USB process");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, proc, CTLFLAG_RW, 0, "USB process");
SYSCTL_INT(_hw_usb_proc, OID_AUTO, debug, CTLFLAG_RW, &usb_proc_debug, 0,
"Debug level");
@@ -98,6 +104,9 @@ usb_process(void *arg)
struct usb_proc_msg *pm;
struct thread *td;
+ /* in case of attach error, check for suspended */
+ USB_THREAD_SUSPEND_CHECK();
+
/* adjust priority */
td = curthread;
thread_lock(td);
diff --git a/sys/dev/usb/usb_request.c b/sys/dev/usb/usb_request.c
index 347f946..d692b93 100644
--- a/sys/dev/usb/usb_request.c
+++ b/sys/dev/usb/usb_request.c
@@ -517,7 +517,7 @@ usbd_do_request_flags(struct usb_device *udev, struct mtx *mtx,
}
} else
#endif
- bcopy(desc, data, length);
+ memcpy(data, desc, length);
}
goto done; /* success */
}
diff --git a/sys/dev/usb/usb_transfer.c b/sys/dev/usb/usb_transfer.c
index cd7b2e2..6f4b678 100644
--- a/sys/dev/usb/usb_transfer.c
+++ b/sys/dev/usb/usb_transfer.c
@@ -858,7 +858,7 @@ usbd_transfer_setup(struct usb_device *udev,
if (parm.err) {
goto done;
}
- bzero(&parm, sizeof(parm));
+ memset(&parm, 0, sizeof(parm));
parm.udev = udev;
parm.speed = usbd_get_speed(udev);
@@ -982,7 +982,7 @@ usbd_transfer_setup(struct usb_device *udev,
* memory:
*/
xfer = &dummy;
- bzero(&dummy, sizeof(dummy));
+ memset(&dummy, 0, sizeof(dummy));
refcount++;
}
diff --git a/sys/dev/usb/usb_util.c b/sys/dev/usb/usb_util.c
index 1c357a3..4fe79c5 100644
--- a/sys/dev/usb/usb_util.c
+++ b/sys/dev/usb/usb_util.c
@@ -58,31 +58,6 @@
#include <dev/usb/usb_bus.h>
/*------------------------------------------------------------------------*
- * device_delete_all_children - delete all children of a device
- *------------------------------------------------------------------------*/
-#ifndef device_delete_all_children
-int
-device_delete_all_children(device_t dev)
-{
- device_t *devlist;
- int devcount;
- int error;
-
- error = device_get_children(dev, &devlist, &devcount);
- if (error == 0) {
- while (devcount-- > 0) {
- error = device_delete_child(dev, devlist[devcount]);
- if (error) {
- break;
- }
- }
- free(devlist, M_TEMP);
- }
- return (error);
-}
-#endif
-
-/*------------------------------------------------------------------------*
* device_set_usb_desc
*
* This function can be called at probe or attach to set the USB
@@ -140,33 +115,21 @@ device_set_usb_desc(device_t dev)
*
* This function will delay the code by the passed number of system
* ticks. The passed mutex "mtx" will be dropped while waiting, if
- * "mtx" is not NULL.
+ * "mtx" is different from NULL.
*------------------------------------------------------------------------*/
void
-usb_pause_mtx(struct mtx *mtx, int _ticks)
+usb_pause_mtx(struct mtx *mtx, int timo)
{
if (mtx != NULL)
mtx_unlock(mtx);
- if (cold) {
- /* convert to milliseconds */
- _ticks = (_ticks * 1000) / hz;
- /* convert to microseconds, rounded up */
- _ticks = (_ticks + 1) * 1000;
- DELAY(_ticks);
-
- } else {
+ /*
+ * Add one tick to the timeout so that we don't return too
+ * early! Note that pause() will assert that the passed
+ * timeout is positive and non-zero!
+ */
+ pause("USBWAIT", timo + 1);
- /*
- * Add one to the number of ticks so that we don't return
- * too early!
- */
- _ticks++;
-
- if (pause("USBWAIT", _ticks)) {
- /* ignore */
- }
- }
if (mtx != NULL)
mtx_lock(mtx);
}
diff --git a/sys/dev/usb/usb_util.h b/sys/dev/usb/usb_util.h
index 35abedd..7e52404 100644
--- a/sys/dev/usb/usb_util.h
+++ b/sys/dev/usb/usb_util.h
@@ -27,7 +27,6 @@
#ifndef _USB_UTIL_H_
#define _USB_UTIL_H_
-int device_delete_all_children(device_t dev);
uint8_t usb_make_str_desc(void *ptr, uint16_t max_len, const char *s);
void usb_printbcd(char *p, uint16_t p_len, uint16_t bcd);
void usb_trim_spaces(char *p);
diff --git a/sys/dev/usb/usbdevs b/sys/dev/usb/usbdevs
index 5a4d3f4..4e3af74 100644
--- a/sys/dev/usb/usbdevs
+++ b/sys/dev/usb/usbdevs
@@ -664,7 +664,7 @@ vendor LINKSYS3 0x1915 Linksys
vendor QUALCOMMINC 0x19d2 Qualcomm, Incorporated
vendor WCH2 0x1a86 QinHeng Electronics
vendor STELERA 0x1a8d Stelera Wireless
-vendor MATRIXORBITAL 0x1b3d Matrix Orbital
+vendor MATRIXORBITAL 0x1b3d Matrix Orbital
vendor OVISLINK 0x1b75 OvisLink
vendor TCTMOBILE 0x1bbb TCT Mobile
vendor WAGO 0x1be3 WAGO Kontakttechnik GmbH.
@@ -915,8 +915,10 @@ product ALCATEL OT535 0x02df One Touch 535/735
/* Alcor Micro, Inc. products */
product ALCOR2 KBD_HUB 0x2802 Kbd Hub
+product ALCOR DUMMY 0x0000 Dummy product
product ALCOR SDCR_6335 0x6335 SD/MMC Card Reader
product ALCOR SDCR_6362 0x6362 SD/MMC Card Reader
+product ALCOR SDCR_6366 0x6366 SD/MMC Card Reader
product ALCOR TRANSCEND 0x6387 Transcend JetFlash Drive
product ALCOR MA_KBD_HUB 0x9213 MacAlly Kbd Hub
product ALCOR AU9814 0x9215 AU9814 Hub
@@ -2084,11 +2086,13 @@ product LINKSYS4 RT3070 0x0078 RT3070
product LINKSYS4 WUSB600NV2 0x0079 WUSB600N v2
/* Logitech products */
+product LOGITECH LANW300NU2 0x0166 LAN-W300N/U2
product LOGITECH M2452 0x0203 M2452 keyboard
product LOGITECH M4848 0x0301 M4848 mouse
product LOGITECH PAGESCAN 0x040f PageScan
product LOGITECH QUICKCAMWEB 0x0801 QuickCam Web
product LOGITECH QUICKCAMPRO 0x0810 QuickCam Pro
+product LOGITECH WEBCAMC100 0X0817 Webcam C100
product LOGITECH QUICKCAMEXP 0x0840 QuickCam Express
product LOGITECH QUICKCAM 0x0850 QuickCam
product LOGITECH QUICKCAMPRO3 0x0990 QuickCam Pro 9000
@@ -2134,7 +2138,7 @@ product MACALLY MOUSE1 0x0101 mouse
/* Marvell Technology Group, Ltd. products */
product MARVELL SHEEVAPLUG 0x9e8f SheevaPlug serial interface
-
+
/* Matrix Orbital products */
product MATRIXORBITAL MOUA 0x0153 Martrix Orbital MOU-Axxxx LCD displays
@@ -2179,6 +2183,7 @@ product MELCO RT2870_1 0x0148 RT2870
product MELCO RT2870_2 0x0150 RT2870
product MELCO WLIUCGN 0x015d WLI-UC-GN
product MELCO WLIUCG301N 0x016f WLI-UC-G301N
+product MELCO WLIUCGNM 0x01a2 WLI-UC-GNM
/* Merlin products */
product MERLIN V620 0x1110 Merlin V620
diff --git a/sys/dev/usb/wlan/if_rum.c b/sys/dev/usb/wlan/if_rum.c
index c51d485..8cf9f47 100644
--- a/sys/dev/usb/wlan/if_rum.c
+++ b/sys/dev/usb/wlan/if_rum.c
@@ -80,7 +80,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int rum_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, rum, CTLFLAG_RW, 0, "USB rum");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, rum, CTLFLAG_RW, 0, "USB rum");
SYSCTL_INT(_hw_usb_rum, OID_AUTO, debug, CTLFLAG_RW, &rum_debug, 0,
"Debug level");
#endif
diff --git a/sys/dev/usb/wlan/if_run.c b/sys/dev/usb/wlan/if_run.c
index a0ac26a..ad6c736 100644
--- a/sys/dev/usb/wlan/if_run.c
+++ b/sys/dev/usb/wlan/if_run.c
@@ -82,7 +82,7 @@ __FBSDID("$FreeBSD$");
#ifdef RUN_DEBUG
int run_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, run, CTLFLAG_RW, 0, "USB run");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, run, CTLFLAG_RW, 0, "USB run");
SYSCTL_INT(_hw_usb_run, OID_AUTO, debug, CTLFLAG_RW, &run_debug, 0,
"run debug level");
#endif
@@ -208,12 +208,14 @@ static const STRUCT_USB_HOST_ID run_devs[] = {
RUN_DEV(LOGITEC, RT2870_1),
RUN_DEV(LOGITEC, RT2870_2),
RUN_DEV(LOGITEC, RT2870_3),
+ RUN_DEV(LOGITECH, LANW300NU2),
RUN_DEV(MELCO, RT2870_1),
RUN_DEV(MELCO, RT2870_2),
RUN_DEV(MELCO, WLIUCAG300N),
RUN_DEV(MELCO, WLIUCG300N),
RUN_DEV(MELCO, WLIUCG301N),
RUN_DEV(MELCO, WLIUCGN),
+ RUN_DEV(MELCO, WLIUCGNM),
RUN_DEV(MOTOROLA4, RT2770),
RUN_DEV(MOTOROLA4, RT3070),
RUN_DEV(MSI, RT3070_1),
diff --git a/sys/dev/usb/wlan/if_uath.c b/sys/dev/usb/wlan/if_uath.c
index 328dc4f..8bc821b 100644
--- a/sys/dev/usb/wlan/if_uath.c
+++ b/sys/dev/usb/wlan/if_uath.c
@@ -111,7 +111,7 @@ __FBSDID("$FreeBSD$");
#include <dev/usb/wlan/if_uathreg.h>
#include <dev/usb/wlan/if_uathvar.h>
-SYSCTL_NODE(_hw_usb, OID_AUTO, uath, CTLFLAG_RW, 0, "USB Atheros");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, uath, CTLFLAG_RW, 0, "USB Atheros");
static int uath_countrycode = CTRY_DEFAULT; /* country code */
SYSCTL_INT(_hw_usb_uath, OID_AUTO, countrycode, CTLFLAG_RW, &uath_countrycode,
@@ -710,12 +710,12 @@ uath_cmdsend(struct uath_softc *sc, uint32_t code, const void *idata, int ilen,
cmd->buflen = roundup2(sizeof(struct uath_cmd_hdr) + ilen, 4);
hdr = (struct uath_cmd_hdr *)cmd->buf;
- bzero(hdr, sizeof (struct uath_cmd_hdr)); /* XXX not needed */
+ memset(hdr, 0, sizeof(struct uath_cmd_hdr));
hdr->len = htobe32(cmd->buflen);
hdr->code = htobe32(code);
hdr->msgid = cmd->msgid; /* don't care about endianness */
hdr->magic = htobe32((cmd->flags & UATH_CMD_FLAG_MAGIC) ? 1 << 24 : 0);
- bcopy(idata, (uint8_t *)(hdr + 1), ilen);
+ memcpy((uint8_t *)(hdr + 1), idata, ilen);
#ifdef UATH_DEBUG
if (sc->sc_debug & UATH_DEBUG_CMDS) {
@@ -1403,7 +1403,7 @@ uath_dataflush(struct uath_softc *sc)
chunk->flags = UATH_CFLAGS_FINAL;
chunk->length = htobe16(sizeof (struct uath_tx_desc));
- bzero(desc, sizeof(struct uath_tx_desc));
+ memset(desc, 0, sizeof(struct uath_tx_desc));
desc->msglen = htobe32(sizeof(struct uath_tx_desc));
desc->msgid = (sc->sc_msgid++) + 1; /* don't care about endianness */
desc->type = htobe32(WDCMSG_FLUSH);
@@ -1482,7 +1482,7 @@ uath_set_chan(struct uath_softc *sc, struct ieee80211_channel *c)
#endif
struct uath_cmd_reset reset;
- bzero(&reset, sizeof reset);
+ memset(&reset, 0, sizeof(reset));
if (IEEE80211_IS_CHAN_2GHZ(c))
reset.flags |= htobe32(UATH_CHAN_2GHZ);
if (IEEE80211_IS_CHAN_5GHZ(c))
@@ -1971,7 +1971,7 @@ uath_create_connection(struct uath_softc *sc, uint32_t connid)
struct uath_cmd_create_connection create;
ni = ieee80211_ref_node(vap->iv_bss);
- bzero(&create, sizeof create);
+ memset(&create, 0, sizeof(create));
create.connid = htobe32(connid);
create.bssid = htobe32(0);
/* XXX packed or not? */
@@ -2000,7 +2000,7 @@ uath_set_rates(struct uath_softc *sc, const struct ieee80211_rateset *rs)
{
struct uath_cmd_rates rates;
- bzero(&rates, sizeof rates);
+ memset(&rates, 0, sizeof(rates));
rates.connid = htobe32(UATH_ID_BSS); /* XXX */
rates.size = htobe32(sizeof(struct uath_cmd_rateset));
/* XXX bounds check rs->rs_nrates */
@@ -2022,7 +2022,7 @@ uath_write_associd(struct uath_softc *sc)
struct uath_cmd_set_associd associd;
ni = ieee80211_ref_node(vap->iv_bss);
- bzero(&associd, sizeof associd);
+ memset(&associd, 0, sizeof(associd));
associd.defaultrateix = htobe32(1); /* XXX */
associd.associd = htobe32(ni->ni_associd);
associd.timoffset = htobe32(0x3b); /* XXX */
@@ -2168,7 +2168,7 @@ uath_set_key(struct uath_softc *sc, const struct ieee80211_key *wk,
struct uath_cmd_crypto crypto;
int i;
- bzero(&crypto, sizeof crypto);
+ memset(&crypto, 0, sizeof(crypto));
crypto.keyidx = htobe32(index);
crypto.magic1 = htobe32(1);
crypto.size = htobe32(368);
@@ -2176,7 +2176,7 @@ uath_set_key(struct uath_softc *sc, const struct ieee80211_key *wk,
crypto.flags = htobe32(0x80000068);
if (index != UATH_DEFAULT_KEY)
crypto.flags |= htobe32(index << 16);
- memset(crypto.magic2, 0xff, sizeof crypto.magic2);
+ memset(crypto.magic2, 0xff, sizeof(crypto.magic2));
/*
* Each byte of the key must be XOR'ed with 10101010 before being
diff --git a/sys/dev/usb/wlan/if_upgt.c b/sys/dev/usb/wlan/if_upgt.c
index b9381a2..78ac2d9 100644
--- a/sys/dev/usb/wlan/if_upgt.c
+++ b/sys/dev/usb/wlan/if_upgt.c
@@ -70,7 +70,7 @@
* Sebastien Bourdeauducq <lekernel@prism54.org>.
*/
-SYSCTL_NODE(_hw, OID_AUTO, upgt, CTLFLAG_RD, 0,
+static SYSCTL_NODE(_hw, OID_AUTO, upgt, CTLFLAG_RD, 0,
"USB PrismGT GW3887 driver parameters");
#ifdef UPGT_DEBUG
@@ -432,7 +432,7 @@ upgt_get_stats(struct upgt_softc *sc)
/*
* Transmit the URB containing the CMD data.
*/
- bzero(data_cmd->buf, MCLBYTES);
+ memset(data_cmd->buf, 0, MCLBYTES);
mem = (struct upgt_lmac_mem *)data_cmd->buf;
mem->addr = htole32(sc->sc_memaddr_frame_start +
@@ -540,7 +540,7 @@ upgt_set_led(struct upgt_softc *sc, int action)
/*
* Transmit the URB containing the CMD data.
*/
- bzero(data_cmd->buf, MCLBYTES);
+ memset(data_cmd->buf, 0, MCLBYTES);
mem = (struct upgt_lmac_mem *)data_cmd->buf;
mem->addr = htole32(sc->sc_memaddr_frame_start +
@@ -670,7 +670,7 @@ upgt_set_macfilter(struct upgt_softc *sc, uint8_t state)
/*
* Transmit the URB containing the CMD data.
*/
- bzero(data_cmd->buf, MCLBYTES);
+ memset(data_cmd->buf, 0, MCLBYTES);
mem = (struct upgt_lmac_mem *)data_cmd->buf;
mem->addr = htole32(sc->sc_memaddr_frame_start +
@@ -785,11 +785,11 @@ upgt_setup_rates(struct ieee80211vap *vap, struct ieee80211com *ic)
* will pickup a rate.
*/
if (ic->ic_curmode == IEEE80211_MODE_11B)
- bcopy(rateset_auto_11b, sc->sc_cur_rateset,
+ memcpy(sc->sc_cur_rateset, rateset_auto_11b,
sizeof(sc->sc_cur_rateset));
if (ic->ic_curmode == IEEE80211_MODE_11G ||
ic->ic_curmode == IEEE80211_MODE_AUTO)
- bcopy(rateset_auto_11g, sc->sc_cur_rateset,
+ memcpy(sc->sc_cur_rateset, rateset_auto_11g,
sizeof(sc->sc_cur_rateset));
} else {
/* set a fixed rate */
@@ -975,7 +975,7 @@ upgt_set_chan(struct upgt_softc *sc, struct ieee80211_channel *c)
/*
* Transmit the URB containing the CMD data.
*/
- bzero(data_cmd->buf, MCLBYTES);
+ memset(data_cmd->buf, 0, MCLBYTES);
mem = (struct upgt_lmac_mem *)data_cmd->buf;
mem->addr = htole32(sc->sc_memaddr_frame_start +
@@ -998,11 +998,11 @@ upgt_set_chan(struct upgt_softc *sc, struct ieee80211_channel *c)
chan->settings = sc->sc_eeprom_freq6_settings;
chan->unknown3 = UPGT_CHANNEL_UNKNOWN3;
- bcopy(&sc->sc_eeprom_freq3[channel].data, chan->freq3_1,
+ memcpy(chan->freq3_1, &sc->sc_eeprom_freq3[channel].data,
sizeof(chan->freq3_1));
- bcopy(&sc->sc_eeprom_freq4[channel], chan->freq4,
+ memcpy(chan->freq4, &sc->sc_eeprom_freq4[channel],
sizeof(sc->sc_eeprom_freq4[channel]));
- bcopy(&sc->sc_eeprom_freq3[channel].data, chan->freq3_2,
+ memcpy(chan->freq3_2, &sc->sc_eeprom_freq3[channel].data,
sizeof(chan->freq3_2));
data_cmd->buflen = sizeof(*mem) + sizeof(*chan);
@@ -1331,7 +1331,7 @@ upgt_eeprom_read(struct upgt_softc *sc)
/*
* Transmit the URB containing the CMD data.
*/
- bzero(data_cmd->buf, MCLBYTES);
+ memset(data_cmd->buf, 0, MCLBYTES);
mem = (struct upgt_lmac_mem *)data_cmd->buf;
mem->addr = htole32(sc->sc_memaddr_frame_start +
@@ -1423,8 +1423,9 @@ upgt_rxeof(struct usb_xfer *xfer, struct upgt_data *data, int *rssi)
"received EEPROM block (offset=%d, len=%d)\n",
eeprom_offset, eeprom_len);
- bcopy(data->buf + sizeof(struct upgt_lmac_eeprom) + 4,
- sc->sc_eeprom + eeprom_offset, eeprom_len);
+ memcpy(sc->sc_eeprom + eeprom_offset,
+ data->buf + sizeof(struct upgt_lmac_eeprom) + 4,
+ eeprom_len);
/* EEPROM data has arrived in time, wakeup. */
wakeup(sc);
@@ -1498,7 +1499,7 @@ upgt_rx(struct upgt_softc *sc, uint8_t *data, int pkglen, int *rssi)
return (NULL);
}
m_adj(m, ETHER_ALIGN);
- bcopy(rxdesc->data, mtod(m, char *), pkglen);
+ memcpy(mtod(m, char *), rxdesc->data, pkglen);
/* trim FCS */
m->m_len = m->m_pkthdr.len = pkglen - IEEE80211_CRC_LEN;
m->m_pkthdr.rcvif = ifp;
@@ -1620,7 +1621,7 @@ upgt_fw_load(struct upgt_softc *sc)
goto fail;
}
data_cmd->buflen = sizeof(start_fwload_cmd);
- bcopy(start_fwload_cmd, data_cmd->buf, data_cmd->buflen);
+ memcpy(data_cmd->buf, start_fwload_cmd, data_cmd->buflen);
upgt_bulk_tx(sc, data_cmd);
/* send X2 header */
@@ -1631,7 +1632,7 @@ upgt_fw_load(struct upgt_softc *sc)
}
data_cmd->buflen = sizeof(struct upgt_fw_x2_header);
x2 = (struct upgt_fw_x2_header *)data_cmd->buf;
- bcopy(UPGT_X2_SIGNATURE, x2->signature, UPGT_X2_SIGNATURE_SIZE);
+ memcpy(x2->signature, UPGT_X2_SIGNATURE, UPGT_X2_SIGNATURE_SIZE);
x2->startaddr = htole32(UPGT_MEMADDR_FIRMWARE_START);
x2->len = htole32(fw->datasize);
x2->crc = upgt_crc32_le((uint8_t *)data_cmd->buf +
@@ -1925,7 +1926,7 @@ upgt_device_reset(struct upgt_softc *sc)
UPGT_UNLOCK(sc);
return (ENOBUFS);
}
- bcopy(init_cmd, data->buf, sizeof(init_cmd));
+ memcpy(data->buf, init_cmd, sizeof(init_cmd));
data->buflen = sizeof(init_cmd);
upgt_bulk_tx(sc, data);
usb_pause_mtx(&sc->sc_mtx, 100);
@@ -2178,7 +2179,7 @@ upgt_tx_start(struct upgt_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
}
/* Transmit the URB containing the TX data. */
- bzero(data->buf, MCLBYTES);
+ memset(data->buf, 0, MCLBYTES);
mem = (struct upgt_lmac_mem *)data->buf;
mem->addr = htole32(data->addr);
txdesc = (struct upgt_lmac_tx_desc *)(mem + 1);
@@ -2192,7 +2193,7 @@ upgt_tx_start(struct upgt_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
} else {
/* data frames */
txdesc->header1.flags = UPGT_H1_FLAGS_TX_DATA;
- bcopy(sc->sc_cur_rateset, txdesc->rates, sizeof(txdesc->rates));
+ memcpy(txdesc->rates, sc->sc_cur_rateset, sizeof(txdesc->rates));
}
txdesc->header1.type = UPGT_H1_TYPE_TX_DATA;
txdesc->header1.len = htole16(m->m_pkthdr.len);
diff --git a/sys/dev/usb/wlan/if_ural.c b/sys/dev/usb/wlan/if_ural.c
index 048392d..a72da83 100644
--- a/sys/dev/usb/wlan/if_ural.c
+++ b/sys/dev/usb/wlan/if_ural.c
@@ -81,7 +81,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int ural_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, ural, CTLFLAG_RW, 0, "USB ural");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, ural, CTLFLAG_RW, 0, "USB ural");
SYSCTL_INT(_hw_usb_ural, OID_AUTO, debug, CTLFLAG_RW, &ural_debug, 0,
"Debug level");
#endif
diff --git a/sys/dev/usb/wlan/if_urtw.c b/sys/dev/usb/wlan/if_urtw.c
index 6ae7e16..2a52a8c 100644
--- a/sys/dev/usb/wlan/if_urtw.c
+++ b/sys/dev/usb/wlan/if_urtw.c
@@ -61,7 +61,7 @@ __FBSDID("$FreeBSD$");
#include <dev/usb/wlan/if_urtwreg.h>
#include <dev/usb/wlan/if_urtwvar.h>
-SYSCTL_NODE(_hw_usb, OID_AUTO, urtw, CTLFLAG_RW, 0, "USB Realtek 8187L");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, urtw, CTLFLAG_RW, 0, "USB Realtek 8187L");
#ifdef URTW_DEBUG
int urtw_debug = 0;
SYSCTL_INT(_hw_usb_urtw, OID_AUTO, debug, CTLFLAG_RW, &urtw_debug, 0,
@@ -1745,7 +1745,7 @@ urtw_tx_start(struct urtw_softc *sc, struct ieee80211_node *ni, struct mbuf *m0,
if ((0 == xferlen % 64) || (0 == xferlen % 512))
xferlen += 1;
- bzero(data->buf, URTW_TX_MAXSIZE);
+ memset(data->buf, 0, URTW_TX_MAXSIZE);
flags = m0->m_pkthdr.len & 0xfff;
flags |= URTW_TX_FLAG_NO_ENC;
if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
diff --git a/sys/dev/usb/wlan/if_zyd.c b/sys/dev/usb/wlan/if_zyd.c
index f8d905e..835cdc5 100644
--- a/sys/dev/usb/wlan/if_zyd.c
+++ b/sys/dev/usb/wlan/if_zyd.c
@@ -78,7 +78,7 @@ __FBSDID("$FreeBSD$");
#ifdef USB_DEBUG
static int zyd_debug = 0;
-SYSCTL_NODE(_hw_usb, OID_AUTO, zyd, CTLFLAG_RW, 0, "USB zyd");
+static SYSCTL_NODE(_hw_usb, OID_AUTO, zyd, CTLFLAG_RW, 0, "USB zyd");
SYSCTL_INT(_hw_usb_zyd, OID_AUTO, debug, CTLFLAG_RW, &zyd_debug, 0,
"zyd debug level");
@@ -683,7 +683,7 @@ zyd_intr_read_callback(struct usb_xfer *xfer, usb_error_t error)
if (i != cnt)
continue;
/* copy answer into caller-supplied buffer */
- bcopy(cmd->data, rqp->odata, rqp->olen);
+ memcpy(rqp->odata, cmd->data, rqp->olen);
DPRINTF(sc, ZYD_DEBUG_CMD,
"command %p complete, data = %*D \n",
rqp, rqp->olen, rqp->odata, ":");
@@ -783,7 +783,7 @@ zyd_cmd(struct zyd_softc *sc, uint16_t code, const void *idata, int ilen,
return (EINVAL);
cmd.code = htole16(code);
- bcopy(idata, cmd.data, ilen);
+ memcpy(cmd.data, idata, ilen);
DPRINTF(sc, ZYD_DEBUG_CMD, "sending cmd %p = %*D\n",
&rq, ilen, idata, ":");
diff --git a/sys/dev/virtio/balloon/virtio_balloon.c b/sys/dev/virtio/balloon/virtio_balloon.c
new file mode 100644
index 0000000..ef7aca9
--- /dev/null
+++ b/sys/dev/virtio/balloon/virtio_balloon.c
@@ -0,0 +1,569 @@
+/*-
+ * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Driver for VirtIO memory balloon devices. */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/endian.h>
+#include <sys/kthread.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/sglist.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/queue.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <dev/virtio/virtio.h>
+#include <dev/virtio/virtqueue.h>
+#include <dev/virtio/balloon/virtio_balloon.h>
+
+#include "virtio_if.h"
+
+struct vtballoon_softc {
+ device_t vtballoon_dev;
+ struct mtx vtballoon_mtx;
+ uint64_t vtballoon_features;
+ uint32_t vtballoon_flags;
+#define VTBALLOON_FLAG_DETACH 0x01
+
+ struct virtqueue *vtballoon_inflate_vq;
+ struct virtqueue *vtballoon_deflate_vq;
+
+ uint32_t vtballoon_desired_npages;
+ uint32_t vtballoon_current_npages;
+ TAILQ_HEAD(,vm_page) vtballoon_pages;
+
+ struct proc *vtballoon_kproc;
+ uint32_t *vtballoon_page_frames;
+ int vtballoon_timeout;
+};
+
+static struct virtio_feature_desc vtballoon_feature_desc[] = {
+ { VIRTIO_BALLOON_F_MUST_TELL_HOST, "MustTellHost" },
+ { VIRTIO_BALLOON_F_STATS_VQ, "StatsVq" },
+
+ { 0, NULL }
+};
+
+static int vtballoon_probe(device_t);
+static int vtballoon_attach(device_t);
+static int vtballoon_detach(device_t);
+static int vtballoon_config_change(device_t);
+
+static void vtballoon_negotiate_features(struct vtballoon_softc *);
+static int vtballoon_alloc_virtqueues(struct vtballoon_softc *);
+
+static int vtballoon_vq_intr(void *);
+
+static void vtballoon_inflate(struct vtballoon_softc *, int);
+static void vtballoon_deflate(struct vtballoon_softc *, int);
+
+static void vtballoon_send_page_frames(struct vtballoon_softc *,
+ struct virtqueue *, int);
+
+static void vtballoon_pop(struct vtballoon_softc *);
+static void vtballoon_stop(struct vtballoon_softc *);
+
+static vm_page_t
+ vtballoon_alloc_page(struct vtballoon_softc *);
+static void vtballoon_free_page(struct vtballoon_softc *, vm_page_t);
+
+static int vtballoon_sleep(struct vtballoon_softc *);
+static void vtballoon_thread(void *);
+static void vtballoon_add_sysctl(struct vtballoon_softc *);
+
+/* Features desired/implemented by this driver. */
+#define VTBALLOON_FEATURES 0
+
+/* Timeout between retries when the balloon needs inflating. */
+#define VTBALLOON_LOWMEM_TIMEOUT hz
+
+/*
+ * Maximum number of pages we'll request to inflate or deflate
+ * the balloon in one virtqueue request. Both Linux and NetBSD
+ * have settled on 256, doing up to 1MB at a time.
+ */
+#define VTBALLOON_PAGES_PER_REQUEST 256
+
+#define VTBALLOON_MTX(_sc) &(_sc)->vtballoon_mtx
+#define VTBALLOON_LOCK_INIT(_sc, _name) mtx_init(VTBALLOON_MTX((_sc)), _name, \
+ "VirtIO Balloon Lock", MTX_SPIN)
+#define VTBALLOON_LOCK(_sc) mtx_lock_spin(VTBALLOON_MTX((_sc)))
+#define VTBALLOON_UNLOCK(_sc) mtx_unlock_spin(VTBALLOON_MTX((_sc)))
+#define VTBALLOON_LOCK_DESTROY(_sc) mtx_destroy(VTBALLOON_MTX((_sc)))
+
+static device_method_t vtballoon_methods[] = {
+ /* Device methods. */
+ DEVMETHOD(device_probe, vtballoon_probe),
+ DEVMETHOD(device_attach, vtballoon_attach),
+ DEVMETHOD(device_detach, vtballoon_detach),
+
+ /* VirtIO methods. */
+ DEVMETHOD(virtio_config_change, vtballoon_config_change),
+
+ { 0, 0 }
+};
+
+static driver_t vtballoon_driver = {
+ "vtballoon",
+ vtballoon_methods,
+ sizeof(struct vtballoon_softc)
+};
+static devclass_t vtballoon_devclass;
+
+DRIVER_MODULE(virtio_balloon, virtio_pci, vtballoon_driver,
+ vtballoon_devclass, 0, 0);
+MODULE_VERSION(virtio_balloon, 1);
+MODULE_DEPEND(virtio_balloon, virtio, 1, 1, 1);
+
+static int
+vtballoon_probe(device_t dev)
+{
+
+ if (virtio_get_device_type(dev) != VIRTIO_ID_BALLOON)
+ return (ENXIO);
+
+ device_set_desc(dev, "VirtIO Balloon Adapter");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+vtballoon_attach(device_t dev)
+{
+ struct vtballoon_softc *sc;
+ int error;
+
+ sc = device_get_softc(dev);
+ sc->vtballoon_dev = dev;
+
+ VTBALLOON_LOCK_INIT(sc, device_get_nameunit(dev));
+ TAILQ_INIT(&sc->vtballoon_pages);
+
+ vtballoon_add_sysctl(sc);
+
+ virtio_set_feature_desc(dev, vtballoon_feature_desc);
+ vtballoon_negotiate_features(sc);
+
+ sc->vtballoon_page_frames = malloc(VTBALLOON_PAGES_PER_REQUEST *
+ sizeof(uint32_t), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (sc->vtballoon_page_frames == NULL) {
+ error = ENOMEM;
+ device_printf(dev,
+ "cannot allocate page frame request array\n");
+ goto fail;
+ }
+
+ error = vtballoon_alloc_virtqueues(sc);
+ if (error) {
+ device_printf(dev, "cannot allocate virtqueues\n");
+ goto fail;
+ }
+
+ error = virtio_setup_intr(dev, INTR_TYPE_MISC);
+ if (error) {
+ device_printf(dev, "cannot setup virtqueue interrupts\n");
+ goto fail;
+ }
+
+ error = kproc_create(vtballoon_thread, sc, &sc->vtballoon_kproc,
+ 0, 0, "virtio_balloon");
+ if (error) {
+ device_printf(dev, "cannot create balloon kproc\n");
+ goto fail;
+ }
+
+ virtqueue_enable_intr(sc->vtballoon_inflate_vq);
+ virtqueue_enable_intr(sc->vtballoon_deflate_vq);
+
+fail:
+ if (error)
+ vtballoon_detach(dev);
+
+ return (error);
+}
+
+static int
+vtballoon_detach(device_t dev)
+{
+ struct vtballoon_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ if (sc->vtballoon_kproc != NULL) {
+ VTBALLOON_LOCK(sc);
+ sc->vtballoon_flags |= VTBALLOON_FLAG_DETACH;
+ wakeup_one(sc);
+ msleep_spin(sc->vtballoon_kproc, VTBALLOON_MTX(sc),
+ "vtbdth", 0);
+ VTBALLOON_UNLOCK(sc);
+
+ sc->vtballoon_kproc = NULL;
+ }
+
+ if (device_is_attached(dev)) {
+ vtballoon_pop(sc);
+ vtballoon_stop(sc);
+ }
+
+ if (sc->vtballoon_page_frames != NULL) {
+ free(sc->vtballoon_page_frames, M_DEVBUF);
+ sc->vtballoon_page_frames = NULL;
+ }
+
+ VTBALLOON_LOCK_DESTROY(sc);
+
+ return (0);
+}
+
+static int
+vtballoon_config_change(device_t dev)
+{
+ struct vtballoon_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ VTBALLOON_LOCK(sc);
+ wakeup_one(sc);
+ VTBALLOON_UNLOCK(sc);
+
+ return (1);
+}
+
+static void
+vtballoon_negotiate_features(struct vtballoon_softc *sc)
+{
+ device_t dev;
+ uint64_t features;
+
+ dev = sc->vtballoon_dev;
+ features = virtio_negotiate_features(dev, VTBALLOON_FEATURES);
+ sc->vtballoon_features = features;
+}
+
+static int
+vtballoon_alloc_virtqueues(struct vtballoon_softc *sc)
+{
+ device_t dev;
+ struct vq_alloc_info vq_info[2];
+ int nvqs;
+
+ dev = sc->vtballoon_dev;
+ nvqs = 2;
+
+ VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtballoon_vq_intr, sc,
+ &sc->vtballoon_inflate_vq, "%s inflate", device_get_nameunit(dev));
+
+ VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtballoon_vq_intr, sc,
+ &sc->vtballoon_deflate_vq, "%s deflate", device_get_nameunit(dev));
+
+ return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
+}
+
+static int
+vtballoon_vq_intr(void *xsc)
+{
+ struct vtballoon_softc *sc;
+
+ sc = xsc;
+
+ VTBALLOON_LOCK(sc);
+ wakeup_one(sc);
+ VTBALLOON_UNLOCK(sc);
+
+ return (1);
+}
+
+static void
+vtballoon_inflate(struct vtballoon_softc *sc, int npages)
+{
+ struct virtqueue *vq;
+ vm_page_t m;
+ int i;
+
+ vq = sc->vtballoon_inflate_vq;
+ m = NULL;
+
+ if (npages > VTBALLOON_PAGES_PER_REQUEST)
+ npages = VTBALLOON_PAGES_PER_REQUEST;
+ KASSERT(npages > 0, ("balloon doesn't need inflating?"));
+
+ for (i = 0; i < npages; i++) {
+ if ((m = vtballoon_alloc_page(sc)) == NULL)
+ break;
+
+ sc->vtballoon_page_frames[i] =
+ VM_PAGE_TO_PHYS(m) >> VIRTIO_BALLOON_PFN_SHIFT;
+
+ KASSERT(m->queue == PQ_NONE, ("allocated page on queue"));
+ TAILQ_INSERT_TAIL(&sc->vtballoon_pages, m, pageq);
+ }
+
+ if (i > 0)
+ vtballoon_send_page_frames(sc, vq, i);
+
+ if (m == NULL)
+ sc->vtballoon_timeout = VTBALLOON_LOWMEM_TIMEOUT;
+}
+
+static void
+vtballoon_deflate(struct vtballoon_softc *sc, int npages)
+{
+ TAILQ_HEAD(, vm_page) free_pages;
+ struct virtqueue *vq;
+ vm_page_t m;
+ int i;
+
+ vq = sc->vtballoon_deflate_vq;
+ TAILQ_INIT(&free_pages);
+
+ if (npages > VTBALLOON_PAGES_PER_REQUEST)
+ npages = VTBALLOON_PAGES_PER_REQUEST;
+ KASSERT(npages > 0, ("balloon doesn't need deflating?"));
+
+ for (i = 0; i < npages; i++) {
+ m = TAILQ_FIRST(&sc->vtballoon_pages);
+ KASSERT(m != NULL, ("no more pages to deflate"));
+
+ sc->vtballoon_page_frames[i] =
+ VM_PAGE_TO_PHYS(m) >> VIRTIO_BALLOON_PFN_SHIFT;
+
+ TAILQ_REMOVE(&sc->vtballoon_pages, m, pageq);
+ TAILQ_INSERT_TAIL(&free_pages, m, pageq);
+ }
+
+ if (i > 0) {
+ /* Always tell host first before freeing the pages. */
+ vtballoon_send_page_frames(sc, vq, i);
+
+ while ((m = TAILQ_FIRST(&free_pages)) != NULL) {
+ TAILQ_REMOVE(&free_pages, m, pageq);
+ vtballoon_free_page(sc, m);
+ }
+ }
+
+ KASSERT((TAILQ_EMPTY(&sc->vtballoon_pages) &&
+ sc->vtballoon_current_npages == 0) ||
+ (!TAILQ_EMPTY(&sc->vtballoon_pages) &&
+ sc->vtballoon_current_npages != 0), ("balloon empty?"));
+}
+
+static void
+vtballoon_send_page_frames(struct vtballoon_softc *sc, struct virtqueue *vq,
+ int npages)
+{
+ struct sglist sg;
+ struct sglist_seg segs[1];
+ void *c;
+ int error;
+
+ sglist_init(&sg, 1, segs);
+
+ error = sglist_append(&sg, sc->vtballoon_page_frames,
+ npages * sizeof(uint32_t));
+ KASSERT(error == 0, ("error adding page frames to sglist"));
+
+ error = virtqueue_enqueue(vq, vq, &sg, 1, 0);
+ KASSERT(error == 0, ("error enqueuing page frames to virtqueue"));
+
+ /*
+ * Inflate and deflate operations are done synchronously. The
+ * interrupt handler will wake us up.
+ */
+ VTBALLOON_LOCK(sc);
+ virtqueue_notify(vq);
+
+ while ((c = virtqueue_dequeue(vq, NULL)) == NULL)
+ msleep_spin(sc, VTBALLOON_MTX(sc), "vtbspf", 0);
+ VTBALLOON_UNLOCK(sc);
+
+ KASSERT(c == vq, ("unexpected balloon operation response"));
+}
+
+static void
+vtballoon_pop(struct vtballoon_softc *sc)
+{
+
+ while (!TAILQ_EMPTY(&sc->vtballoon_pages))
+ vtballoon_deflate(sc, sc->vtballoon_current_npages);
+}
+
+static void
+vtballoon_stop(struct vtballoon_softc *sc)
+{
+
+ virtqueue_disable_intr(sc->vtballoon_inflate_vq);
+ virtqueue_disable_intr(sc->vtballoon_deflate_vq);
+
+ virtio_stop(sc->vtballoon_dev);
+}
+
+static vm_page_t
+vtballoon_alloc_page(struct vtballoon_softc *sc)
+{
+ vm_page_t m;
+
+ m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_WIRED |
+ VM_ALLOC_NOOBJ);
+ if (m != NULL)
+ sc->vtballoon_current_npages++;
+
+ return (m);
+}
+
+static void
+vtballoon_free_page(struct vtballoon_softc *sc, vm_page_t m)
+{
+
+ vm_page_unwire(m, 0);
+ vm_page_free(m);
+ sc->vtballoon_current_npages--;
+}
+
+static uint32_t
+vtballoon_desired_size(struct vtballoon_softc *sc)
+{
+ uint32_t desired;
+
+ desired = virtio_read_dev_config_4(sc->vtballoon_dev,
+ offsetof(struct virtio_balloon_config, num_pages));
+
+ return (le32toh(desired));
+}
+
+static void
+vtballoon_update_size(struct vtballoon_softc *sc)
+{
+
+ virtio_write_dev_config_4(sc->vtballoon_dev,
+ offsetof(struct virtio_balloon_config, actual),
+ htole32(sc->vtballoon_current_npages));
+
+}
+
+static int
+vtballoon_sleep(struct vtballoon_softc *sc)
+{
+ int rc, timeout;
+ uint32_t current, desired;
+
+ rc = 0;
+ current = sc->vtballoon_current_npages;
+
+ VTBALLOON_LOCK(sc);
+ for (;;) {
+ if (sc->vtballoon_flags & VTBALLOON_FLAG_DETACH) {
+ rc = 1;
+ break;
+ }
+
+ desired = vtballoon_desired_size(sc);
+ sc->vtballoon_desired_npages = desired;
+
+ /*
+ * If given, use non-zero timeout on the first time through
+ * the loop. On subsequent times, timeout will be zero so
+ * we will reevaluate the desired size of the balloon and
+ * break out to retry if needed.
+ */
+ timeout = sc->vtballoon_timeout;
+ sc->vtballoon_timeout = 0;
+
+ if (current > desired)
+ break;
+ if (current < desired && timeout == 0)
+ break;
+
+ msleep_spin(sc, VTBALLOON_MTX(sc), "vtbslp", timeout);
+ }
+ VTBALLOON_UNLOCK(sc);
+
+ return (rc);
+}
+
+static void
+vtballoon_thread(void *xsc)
+{
+ struct vtballoon_softc *sc;
+ uint32_t current, desired;
+
+ sc = xsc;
+
+ for (;;) {
+ if (vtballoon_sleep(sc) != 0)
+ break;
+
+ current = sc->vtballoon_current_npages;
+ desired = sc->vtballoon_desired_npages;
+
+ if (desired != current) {
+ if (desired > current)
+ vtballoon_inflate(sc, desired - current);
+ else
+ vtballoon_deflate(sc, current - desired);
+
+ vtballoon_update_size(sc);
+ }
+ }
+
+ kproc_exit(0);
+}
+
+static void
+vtballoon_add_sysctl(struct vtballoon_softc *sc)
+{
+ device_t dev;
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid *tree;
+ struct sysctl_oid_list *child;
+
+ dev = sc->vtballoon_dev;
+ ctx = device_get_sysctl_ctx(dev);
+ tree = device_get_sysctl_tree(dev);
+ child = SYSCTL_CHILDREN(tree);
+
+ SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "desired",
+ CTLFLAG_RD, &sc->vtballoon_desired_npages, sizeof(uint32_t),
+ "Desired balloon size in pages");
+
+ SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "current",
+ CTLFLAG_RD, &sc->vtballoon_current_npages, sizeof(uint32_t),
+ "Current balloon size in pages");
+}
diff --git a/sys/dev/virtio/balloon/virtio_balloon.h b/sys/dev/virtio/balloon/virtio_balloon.h
new file mode 100644
index 0000000..cea84ba
--- /dev/null
+++ b/sys/dev/virtio/balloon/virtio_balloon.h
@@ -0,0 +1,41 @@
+/*
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _VIRTIO_BALLOON_H
+#define _VIRTIO_BALLOON_H
+
+#include <sys/types.h>
+
+/* Feature bits. */
+#define VIRTIO_BALLOON_F_MUST_TELL_HOST 0x1 /* Tell before reclaiming pages */
+#define VIRTIO_BALLOON_F_STATS_VQ 0x2 /* Memory stats virtqueue */
+
+/* Size of a PFN in the balloon interface. */
+#define VIRTIO_BALLOON_PFN_SHIFT 12
+
+struct virtio_balloon_config {
+ /* Number of pages host wants Guest to give up. */
+ uint32_t num_pages;
+
+ /* Number of pages we've actually got in balloon. */
+ uint32_t actual;
+};
+
+#define VIRTIO_BALLOON_S_SWAP_IN 0 /* Amount of memory swapped in */
+#define VIRTIO_BALLOON_S_SWAP_OUT 1 /* Amount of memory swapped out */
+#define VIRTIO_BALLOON_S_MAJFLT 2 /* Number of major faults */
+#define VIRTIO_BALLOON_S_MINFLT 3 /* Number of minor faults */
+#define VIRTIO_BALLOON_S_MEMFREE 4 /* Total amount of free memory */
+#define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */
+#define VIRTIO_BALLOON_S_NR 6
+
+struct virtio_balloon_stat {
+ uint16_t tag;
+ uint64_t val;
+} __packed;
+
+#endif /* _VIRTIO_BALLOON_H */
diff --git a/sys/dev/virtio/block/virtio_blk.c b/sys/dev/virtio/block/virtio_blk.c
new file mode 100644
index 0000000..09783a8
--- /dev/null
+++ b/sys/dev/virtio/block/virtio_blk.c
@@ -0,0 +1,1149 @@
+/*-
+ * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Driver for VirtIO block devices. */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/bio.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/sglist.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/queue.h>
+#include <sys/taskqueue.h>
+
+#include <geom/geom_disk.h>
+#include <vm/uma.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <dev/virtio/virtio.h>
+#include <dev/virtio/virtqueue.h>
+#include <dev/virtio/block/virtio_blk.h>
+
+#include "virtio_if.h"
+
+struct vtblk_request {
+ struct virtio_blk_outhdr vbr_hdr;
+ struct bio *vbr_bp;
+ uint8_t vbr_ack;
+
+ TAILQ_ENTRY(vtblk_request) vbr_link;
+};
+
+struct vtblk_softc {
+ device_t vtblk_dev;
+ struct mtx vtblk_mtx;
+ uint64_t vtblk_features;
+ uint32_t vtblk_flags;
+#define VTBLK_FLAG_INDIRECT 0x0001
+#define VTBLK_FLAG_READONLY 0x0002
+#define VTBLK_FLAG_DETACHING 0x0004
+#define VTBLK_FLAG_SUSPENDED 0x0008
+#define VTBLK_FLAG_DUMPING 0x0010
+
+ struct virtqueue *vtblk_vq;
+ struct sglist *vtblk_sglist;
+ struct disk *vtblk_disk;
+
+ struct bio_queue_head vtblk_bioq;
+ TAILQ_HEAD(, vtblk_request)
+ vtblk_req_free;
+ TAILQ_HEAD(, vtblk_request)
+ vtblk_req_ready;
+
+ struct taskqueue *vtblk_tq;
+ struct task vtblk_intr_task;
+
+ int vtblk_sector_size;
+ int vtblk_max_nsegs;
+ int vtblk_unit;
+ int vtblk_request_count;
+
+ struct vtblk_request vtblk_dump_request;
+};
+
+static struct virtio_feature_desc vtblk_feature_desc[] = {
+ { VIRTIO_BLK_F_BARRIER, "HostBarrier" },
+ { VIRTIO_BLK_F_SIZE_MAX, "MaxSegSize" },
+ { VIRTIO_BLK_F_SEG_MAX, "MaxNumSegs" },
+ { VIRTIO_BLK_F_GEOMETRY, "DiskGeometry" },
+ { VIRTIO_BLK_F_RO, "ReadOnly" },
+ { VIRTIO_BLK_F_BLK_SIZE, "BlockSize" },
+ { VIRTIO_BLK_F_SCSI, "SCSICmds" },
+ { VIRTIO_BLK_F_FLUSH, "FlushCmd" },
+ { VIRTIO_BLK_F_TOPOLOGY, "Topology" },
+
+ { 0, NULL }
+};
+
+static int vtblk_modevent(module_t, int, void *);
+
+static int vtblk_probe(device_t);
+static int vtblk_attach(device_t);
+static int vtblk_detach(device_t);
+static int vtblk_suspend(device_t);
+static int vtblk_resume(device_t);
+static int vtblk_shutdown(device_t);
+
+static void vtblk_negotiate_features(struct vtblk_softc *);
+static int vtblk_maximum_segments(struct vtblk_softc *,
+ struct virtio_blk_config *);
+static int vtblk_alloc_virtqueue(struct vtblk_softc *);
+static void vtblk_alloc_disk(struct vtblk_softc *,
+ struct virtio_blk_config *);
+static void vtblk_create_disk(struct vtblk_softc *);
+
+static int vtblk_open(struct disk *);
+static int vtblk_close(struct disk *);
+static int vtblk_ioctl(struct disk *, u_long, void *, int,
+ struct thread *);
+static int vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
+static void vtblk_strategy(struct bio *);
+
+static void vtblk_startio(struct vtblk_softc *);
+static struct vtblk_request * vtblk_bio_request(struct vtblk_softc *);
+static int vtblk_execute_request(struct vtblk_softc *,
+ struct vtblk_request *);
+
+static int vtblk_vq_intr(void *);
+static void vtblk_intr_task(void *, int);
+
+static void vtblk_stop(struct vtblk_softc *);
+
+static void vtblk_get_ident(struct vtblk_softc *);
+static void vtblk_prepare_dump(struct vtblk_softc *);
+static int vtblk_write_dump(struct vtblk_softc *, void *, off_t, size_t);
+static int vtblk_flush_dump(struct vtblk_softc *);
+static int vtblk_poll_request(struct vtblk_softc *,
+ struct vtblk_request *);
+
+static void vtblk_drain_vq(struct vtblk_softc *, int);
+static void vtblk_drain(struct vtblk_softc *);
+
+static int vtblk_alloc_requests(struct vtblk_softc *);
+static void vtblk_free_requests(struct vtblk_softc *);
+static struct vtblk_request * vtblk_dequeue_request(struct vtblk_softc *);
+static void vtblk_enqueue_request(struct vtblk_softc *,
+ struct vtblk_request *);
+
+static struct vtblk_request * vtblk_dequeue_ready(struct vtblk_softc *);
+static void vtblk_enqueue_ready(struct vtblk_softc *,
+ struct vtblk_request *);
+
+static void vtblk_bio_error(struct bio *, int);
+
+/* Tunables. */
+static int vtblk_no_ident = 0;
+TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
+
+/* Features desired/implemented by this driver. */
+#define VTBLK_FEATURES \
+ (VIRTIO_BLK_F_BARRIER | \
+ VIRTIO_BLK_F_SIZE_MAX | \
+ VIRTIO_BLK_F_SEG_MAX | \
+ VIRTIO_BLK_F_GEOMETRY | \
+ VIRTIO_BLK_F_RO | \
+ VIRTIO_BLK_F_BLK_SIZE | \
+ VIRTIO_BLK_F_FLUSH | \
+ VIRTIO_RING_F_INDIRECT_DESC)
+
+#define VTBLK_MTX(_sc) &(_sc)->vtblk_mtx
+#define VTBLK_LOCK_INIT(_sc, _name) \
+ mtx_init(VTBLK_MTX((_sc)), (_name), \
+ "VTBLK Lock", MTX_DEF)
+#define VTBLK_LOCK(_sc) mtx_lock(VTBLK_MTX((_sc)))
+#define VTBLK_TRYLOCK(_sc) mtx_trylock(VTBLK_MTX((_sc)))
+#define VTBLK_UNLOCK(_sc) mtx_unlock(VTBLK_MTX((_sc)))
+#define VTBLK_LOCK_DESTROY(_sc) mtx_destroy(VTBLK_MTX((_sc)))
+#define VTBLK_LOCK_ASSERT(_sc) mtx_assert(VTBLK_MTX((_sc)), MA_OWNED)
+#define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \
+ mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED)
+
+#define VTBLK_BIO_SEGMENTS(_bp) sglist_count((_bp)->bio_data, (_bp)->bio_bcount)
+
+#define VTBLK_DISK_NAME "vtbd"
+
+/*
+ * Each block request uses at least two segments - one for the header
+ * and one for the status.
+ */
+#define VTBLK_MIN_SEGMENTS 2
+
+static uma_zone_t vtblk_req_zone;
+
+static device_method_t vtblk_methods[] = {
+ /* Device methods. */
+ DEVMETHOD(device_probe, vtblk_probe),
+ DEVMETHOD(device_attach, vtblk_attach),
+ DEVMETHOD(device_detach, vtblk_detach),
+ DEVMETHOD(device_suspend, vtblk_suspend),
+ DEVMETHOD(device_resume, vtblk_resume),
+ DEVMETHOD(device_shutdown, vtblk_shutdown),
+
+ { 0, 0 }
+};
+
+static driver_t vtblk_driver = {
+ "vtblk",
+ vtblk_methods,
+ sizeof(struct vtblk_softc)
+};
+static devclass_t vtblk_devclass;
+
+DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
+ vtblk_modevent, 0);
+MODULE_VERSION(virtio_blk, 1);
+MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
+
+static int
+vtblk_modevent(module_t mod, int type, void *unused)
+{
+ int error;
+
+ error = 0;
+
+ switch (type) {
+ case MOD_LOAD:
+ vtblk_req_zone = uma_zcreate("vtblk_request",
+ sizeof(struct vtblk_request),
+ NULL, NULL, NULL, NULL, 0, 0);
+ break;
+ case MOD_QUIESCE:
+ case MOD_UNLOAD:
+ if (uma_zone_get_cur(vtblk_req_zone) > 0)
+ error = EBUSY;
+ else if (type == MOD_UNLOAD) {
+ uma_zdestroy(vtblk_req_zone);
+ vtblk_req_zone = NULL;
+ }
+ break;
+ case MOD_SHUTDOWN:
+ break;
+ default:
+ error = EOPNOTSUPP;
+ break;
+ }
+
+ return (error);
+}
+
+static int
+vtblk_probe(device_t dev)
+{
+
+ if (virtio_get_device_type(dev) != VIRTIO_ID_BLOCK)
+ return (ENXIO);
+
+ device_set_desc(dev, "VirtIO Block Adapter");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+vtblk_attach(device_t dev)
+{
+ struct vtblk_softc *sc;
+ struct virtio_blk_config blkcfg;
+ int error;
+
+ sc = device_get_softc(dev);
+ sc->vtblk_dev = dev;
+ sc->vtblk_unit = device_get_unit(dev);
+
+ VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
+
+ bioq_init(&sc->vtblk_bioq);
+ TAILQ_INIT(&sc->vtblk_req_free);
+ TAILQ_INIT(&sc->vtblk_req_ready);
+
+ virtio_set_feature_desc(dev, vtblk_feature_desc);
+ vtblk_negotiate_features(sc);
+
+ if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
+ sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
+
+ if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
+ sc->vtblk_flags |= VTBLK_FLAG_READONLY;
+
+ /* Get local copy of config. */
+ if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY) == 0) {
+ bzero(&blkcfg, sizeof(struct virtio_blk_config));
+ virtio_read_device_config(dev, 0, &blkcfg,
+ offsetof(struct virtio_blk_config, physical_block_exp));
+ } else
+ virtio_read_device_config(dev, 0, &blkcfg,
+ sizeof(struct virtio_blk_config));
+
+ /*
+ * With the current sglist(9) implementation, it is not easy
+ * for us to support a maximum segment size as adjacent
+ * segments are coalesced. For now, just make sure it's larger
+ * than the maximum supported transfer size.
+ */
+ if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
+ if (blkcfg.size_max < MAXPHYS) {
+ error = ENOTSUP;
+ device_printf(dev, "host requires unsupported "
+ "maximum segment size feature\n");
+ goto fail;
+ }
+ }
+
+ sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
+
+ /*
+ * Allocate working sglist. The number of segments may be too
+ * large to safely store on the stack.
+ */
+ sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
+ if (sc->vtblk_sglist == NULL) {
+ error = ENOMEM;
+ device_printf(dev, "cannot allocate sglist\n");
+ goto fail;
+ }
+
+ error = vtblk_alloc_virtqueue(sc);
+ if (error) {
+ device_printf(dev, "cannot allocate virtqueue\n");
+ goto fail;
+ }
+
+ error = vtblk_alloc_requests(sc);
+ if (error) {
+ device_printf(dev, "cannot preallocate requests\n");
+ goto fail;
+ }
+
+ vtblk_alloc_disk(sc, &blkcfg);
+
+ TASK_INIT(&sc->vtblk_intr_task, 0, vtblk_intr_task, sc);
+ sc->vtblk_tq = taskqueue_create_fast("vtblk_taskq", M_NOWAIT,
+ taskqueue_thread_enqueue, &sc->vtblk_tq);
+ if (sc->vtblk_tq == NULL) {
+ error = ENOMEM;
+ device_printf(dev, "cannot allocate taskqueue\n");
+ goto fail;
+ }
+ taskqueue_start_threads(&sc->vtblk_tq, 1, PI_DISK, "%s taskq",
+ device_get_nameunit(dev));
+
+ error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
+ if (error) {
+ device_printf(dev, "cannot setup virtqueue interrupt\n");
+ goto fail;
+ }
+
+ vtblk_create_disk(sc);
+
+ virtqueue_enable_intr(sc->vtblk_vq);
+
+fail:
+ if (error)
+ vtblk_detach(dev);
+
+ return (error);
+}
+
+static int
+vtblk_detach(device_t dev)
+{
+ struct vtblk_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ VTBLK_LOCK(sc);
+ sc->vtblk_flags |= VTBLK_FLAG_DETACHING;
+ if (device_is_attached(dev))
+ vtblk_stop(sc);
+ VTBLK_UNLOCK(sc);
+
+ if (sc->vtblk_tq != NULL) {
+ taskqueue_drain(sc->vtblk_tq, &sc->vtblk_intr_task);
+ taskqueue_free(sc->vtblk_tq);
+ sc->vtblk_tq = NULL;
+ }
+
+ vtblk_drain(sc);
+
+ if (sc->vtblk_disk != NULL) {
+ disk_destroy(sc->vtblk_disk);
+ sc->vtblk_disk = NULL;
+ }
+
+ if (sc->vtblk_sglist != NULL) {
+ sglist_free(sc->vtblk_sglist);
+ sc->vtblk_sglist = NULL;
+ }
+
+ VTBLK_LOCK_DESTROY(sc);
+
+ return (0);
+}
+
+static int
+vtblk_suspend(device_t dev)
+{
+ struct vtblk_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ VTBLK_LOCK(sc);
+ sc->vtblk_flags |= VTBLK_FLAG_SUSPENDED;
+ /* TODO Wait for any inflight IO to complete? */
+ VTBLK_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+vtblk_resume(device_t dev)
+{
+ struct vtblk_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ VTBLK_LOCK(sc);
+ sc->vtblk_flags &= ~VTBLK_FLAG_SUSPENDED;
+ /* TODO Resume IO? */
+ VTBLK_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+vtblk_shutdown(device_t dev)
+{
+
+ return (0);
+}
+
+static int
+vtblk_open(struct disk *dp)
+{
+ struct vtblk_softc *sc;
+
+ if ((sc = dp->d_drv1) == NULL)
+ return (ENXIO);
+
+ return (sc->vtblk_flags & VTBLK_FLAG_DETACHING ? ENXIO : 0);
+}
+
+static int
+vtblk_close(struct disk *dp)
+{
+ struct vtblk_softc *sc;
+
+ if ((sc = dp->d_drv1) == NULL)
+ return (ENXIO);
+
+ return (0);
+}
+
+static int
+vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag,
+ struct thread *td)
+{
+ struct vtblk_softc *sc;
+
+ if ((sc = dp->d_drv1) == NULL)
+ return (ENXIO);
+
+ return (ENOTTY);
+}
+
+static int
+vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
+ size_t length)
+{
+ struct disk *dp;
+ struct vtblk_softc *sc;
+ int error;
+
+ dp = arg;
+ error = 0;
+
+ if ((sc = dp->d_drv1) == NULL)
+ return (ENXIO);
+
+ if (VTBLK_TRYLOCK(sc) == 0) {
+ device_printf(sc->vtblk_dev,
+ "softc already locked, cannot dump...\n");
+ return (EBUSY);
+ }
+
+ if ((sc->vtblk_flags & VTBLK_FLAG_DUMPING) == 0) {
+ vtblk_prepare_dump(sc);
+ sc->vtblk_flags |= VTBLK_FLAG_DUMPING;
+ }
+
+ if (length > 0)
+ error = vtblk_write_dump(sc, virtual, offset, length);
+ else if (virtual == NULL && offset == 0)
+ error = vtblk_flush_dump(sc);
+
+ VTBLK_UNLOCK(sc);
+
+ return (error);
+}
+
+static void
+vtblk_strategy(struct bio *bp)
+{
+ struct vtblk_softc *sc;
+
+ if ((sc = bp->bio_disk->d_drv1) == NULL) {
+ vtblk_bio_error(bp, EINVAL);
+ return;
+ }
+
+ /*
+ * Fail any write if RO. Unfortunately, there does not seem to
+ * be a better way to report our readonly'ness to GEOM above.
+ */
+ if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
+ (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH)) {
+ vtblk_bio_error(bp, EROFS);
+ return;
+ }
+
+ /*
+ * Prevent read/write buffers spanning too many segments from
+ * getting into the queue. This should only trip if d_maxsize
+ * was incorrectly set.
+ */
+ if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
+ KASSERT(VTBLK_BIO_SEGMENTS(bp) <= sc->vtblk_max_nsegs -
+ VTBLK_MIN_SEGMENTS,
+ ("bio spanned too many segments: %d, max: %d",
+ VTBLK_BIO_SEGMENTS(bp),
+ sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS));
+ }
+
+ VTBLK_LOCK(sc);
+ if ((sc->vtblk_flags & VTBLK_FLAG_DETACHING) == 0) {
+ bioq_disksort(&sc->vtblk_bioq, bp);
+ vtblk_startio(sc);
+ } else
+ vtblk_bio_error(bp, ENXIO);
+ VTBLK_UNLOCK(sc);
+}
+
+static void
+vtblk_negotiate_features(struct vtblk_softc *sc)
+{
+ device_t dev;
+ uint64_t features;
+
+ dev = sc->vtblk_dev;
+ features = VTBLK_FEATURES;
+
+ sc->vtblk_features = virtio_negotiate_features(dev, features);
+}
+
+static int
+vtblk_maximum_segments(struct vtblk_softc *sc,
+ struct virtio_blk_config *blkcfg)
+{
+ device_t dev;
+ int nsegs;
+
+ dev = sc->vtblk_dev;
+ nsegs = VTBLK_MIN_SEGMENTS;
+
+ if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
+ nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1);
+ if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
+ nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
+ } else
+ nsegs += 1;
+
+ return (nsegs);
+}
+
+static int
+vtblk_alloc_virtqueue(struct vtblk_softc *sc)
+{
+ device_t dev;
+ struct vq_alloc_info vq_info;
+
+ dev = sc->vtblk_dev;
+
+ VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
+ vtblk_vq_intr, sc, &sc->vtblk_vq,
+ "%s request", device_get_nameunit(dev));
+
+ return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info));
+}
+
+static void
+vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
+{
+ device_t dev;
+ struct disk *dp;
+
+ dev = sc->vtblk_dev;
+
+ sc->vtblk_disk = dp = disk_alloc();
+ dp->d_open = vtblk_open;
+ dp->d_close = vtblk_close;
+ dp->d_ioctl = vtblk_ioctl;
+ dp->d_strategy = vtblk_strategy;
+ dp->d_name = VTBLK_DISK_NAME;
+ dp->d_unit = sc->vtblk_unit;
+ dp->d_drv1 = sc;
+
+ if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
+ dp->d_dump = vtblk_dump;
+
+ /* Capacity is always in 512-byte units. */
+ dp->d_mediasize = blkcfg->capacity * 512;
+
+ if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE))
+ sc->vtblk_sector_size = blkcfg->blk_size;
+ else
+ sc->vtblk_sector_size = 512;
+ dp->d_sectorsize = sc->vtblk_sector_size;
+
+ /*
+ * The VirtIO maximum I/O size is given in terms of segments.
+ * However, FreeBSD limits I/O size by logical buffer size, not
+ * by physically contiguous pages. Therefore, we have to assume
+ * no pages are contiguous. This may impose an artificially low
+ * maximum I/O size. But in practice, since QEMU advertises 128
+ * segments, this gives us a maximum IO size of 125 * PAGE_SIZE,
+ * which is typically greater than MAXPHYS. Eventually we should
+ * just advertise MAXPHYS and split buffers that are too big.
+ *
+ * Note we must subtract one additional segment in case of non
+ * page aligned buffers.
+ */
+ dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) *
+ PAGE_SIZE;
+ if (dp->d_maxsize < PAGE_SIZE)
+ dp->d_maxsize = PAGE_SIZE; /* XXX */
+
+ if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) {
+ dp->d_fwsectors = blkcfg->geometry.sectors;
+ dp->d_fwheads = blkcfg->geometry.heads;
+ }
+
+ if (virtio_with_feature(dev, VIRTIO_BLK_F_FLUSH))
+ dp->d_flags |= DISKFLAG_CANFLUSHCACHE;
+}
+
+static void
+vtblk_create_disk(struct vtblk_softc *sc)
+{
+ struct disk *dp;
+
+ dp = sc->vtblk_disk;
+
+ /*
+ * Retrieving the identification string must be done after
+ * the virtqueue interrupt is setup otherwise it will hang.
+ */
+ vtblk_get_ident(sc);
+
+ device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n",
+ (uintmax_t) dp->d_mediasize >> 20,
+ (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
+ dp->d_sectorsize);
+
+ disk_create(dp, DISK_VERSION);
+}
+
+static void
+vtblk_startio(struct vtblk_softc *sc)
+{
+ struct virtqueue *vq;
+ struct vtblk_request *req;
+ int enq;
+
+ vq = sc->vtblk_vq;
+ enq = 0;
+
+ VTBLK_LOCK_ASSERT(sc);
+
+ if (sc->vtblk_flags & VTBLK_FLAG_SUSPENDED)
+ return;
+
+ while (!virtqueue_full(vq)) {
+ if ((req = vtblk_dequeue_ready(sc)) == NULL)
+ req = vtblk_bio_request(sc);
+ if (req == NULL)
+ break;
+
+ if (vtblk_execute_request(sc, req) != 0) {
+ vtblk_enqueue_ready(sc, req);
+ break;
+ }
+
+ enq++;
+ }
+
+ if (enq > 0)
+ virtqueue_notify(vq);
+}
+
+static struct vtblk_request *
+vtblk_bio_request(struct vtblk_softc *sc)
+{
+ struct bio_queue_head *bioq;
+ struct vtblk_request *req;
+ struct bio *bp;
+
+ bioq = &sc->vtblk_bioq;
+
+ if (bioq_first(bioq) == NULL)
+ return (NULL);
+
+ req = vtblk_dequeue_request(sc);
+ if (req == NULL)
+ return (NULL);
+
+ bp = bioq_takefirst(bioq);
+ req->vbr_bp = bp;
+ req->vbr_ack = -1;
+ req->vbr_hdr.ioprio = 1;
+
+ switch (bp->bio_cmd) {
+ case BIO_FLUSH:
+ req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
+ break;
+ case BIO_READ:
+ req->vbr_hdr.type = VIRTIO_BLK_T_IN;
+ req->vbr_hdr.sector = bp->bio_offset / 512;
+ break;
+ case BIO_WRITE:
+ req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
+ req->vbr_hdr.sector = bp->bio_offset / 512;
+ break;
+ default:
+ KASSERT(0, ("bio with unhandled cmd: %d", bp->bio_cmd));
+ req->vbr_hdr.type = -1;
+ break;
+ }
+
+ if (bp->bio_flags & BIO_ORDERED)
+ req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
+
+ return (req);
+}
+
+static int
+vtblk_execute_request(struct vtblk_softc *sc, struct vtblk_request *req)
+{
+ struct sglist *sg;
+ struct bio *bp;
+ int writable, error;
+
+ sg = sc->vtblk_sglist;
+ bp = req->vbr_bp;
+ writable = 0;
+
+ VTBLK_LOCK_ASSERT(sc);
+
+ sglist_reset(sg);
+ error = sglist_append(sg, &req->vbr_hdr,
+ sizeof(struct virtio_blk_outhdr));
+ KASSERT(error == 0, ("error adding header to sglist"));
+ KASSERT(sg->sg_nseg == 1,
+ ("header spanned multiple segments: %d", sg->sg_nseg));
+
+ if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
+ error = sglist_append(sg, bp->bio_data, bp->bio_bcount);
+ KASSERT(error == 0, ("error adding buffer to sglist"));
+
+ /* BIO_READ means the host writes into our buffer. */
+ if (bp->bio_cmd == BIO_READ)
+ writable += sg->sg_nseg - 1;
+ }
+
+ error = sglist_append(sg, &req->vbr_ack, sizeof(uint8_t));
+ KASSERT(error == 0, ("error adding ack to sglist"));
+ writable++;
+
+ KASSERT(sg->sg_nseg >= VTBLK_MIN_SEGMENTS,
+ ("fewer than min segments: %d", sg->sg_nseg));
+
+ error = virtqueue_enqueue(sc->vtblk_vq, req, sg,
+ sg->sg_nseg - writable, writable);
+
+ return (error);
+}
+
+static int
+vtblk_vq_intr(void *xsc)
+{
+ struct vtblk_softc *sc;
+
+ sc = xsc;
+
+ virtqueue_disable_intr(sc->vtblk_vq);
+ taskqueue_enqueue_fast(sc->vtblk_tq, &sc->vtblk_intr_task);
+
+ return (1);
+}
+
+static void
+vtblk_intr_task(void *arg, int pending)
+{
+ struct vtblk_softc *sc;
+ struct vtblk_request *req;
+ struct virtqueue *vq;
+ struct bio *bp;
+
+ sc = arg;
+ vq = sc->vtblk_vq;
+
+ VTBLK_LOCK(sc);
+ if (sc->vtblk_flags & VTBLK_FLAG_DETACHING) {
+ VTBLK_UNLOCK(sc);
+ return;
+ }
+
+ while ((req = virtqueue_dequeue(vq, NULL)) != NULL) {
+ bp = req->vbr_bp;
+
+ if (req->vbr_ack == VIRTIO_BLK_S_OK)
+ bp->bio_resid = 0;
+ else {
+ bp->bio_flags |= BIO_ERROR;
+ if (req->vbr_ack == VIRTIO_BLK_S_UNSUPP)
+ bp->bio_error = ENOTSUP;
+ else
+ bp->bio_error = EIO;
+ }
+
+ biodone(bp);
+ vtblk_enqueue_request(sc, req);
+ }
+
+ vtblk_startio(sc);
+
+ if (virtqueue_enable_intr(vq) != 0) {
+ virtqueue_disable_intr(vq);
+ VTBLK_UNLOCK(sc);
+ taskqueue_enqueue_fast(sc->vtblk_tq,
+ &sc->vtblk_intr_task);
+ return;
+ }
+
+ VTBLK_UNLOCK(sc);
+}
+
+static void
+vtblk_stop(struct vtblk_softc *sc)
+{
+
+ virtqueue_disable_intr(sc->vtblk_vq);
+ virtio_stop(sc->vtblk_dev);
+}
+
+static void
+vtblk_get_ident(struct vtblk_softc *sc)
+{
+ struct bio buf;
+ struct disk *dp;
+ struct vtblk_request *req;
+ int len, error;
+
+ dp = sc->vtblk_disk;
+ len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE);
+
+ if (vtblk_no_ident != 0)
+ return;
+
+ req = vtblk_dequeue_request(sc);
+ if (req == NULL)
+ return;
+
+ req->vbr_ack = -1;
+ req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID;
+ req->vbr_hdr.ioprio = 1;
+ req->vbr_hdr.sector = 0;
+
+ req->vbr_bp = &buf;
+ bzero(&buf, sizeof(struct bio));
+
+ buf.bio_cmd = BIO_READ;
+ buf.bio_data = dp->d_ident;
+ buf.bio_bcount = len;
+
+ VTBLK_LOCK(sc);
+ error = vtblk_poll_request(sc, req);
+ vtblk_enqueue_request(sc, req);
+ VTBLK_UNLOCK(sc);
+
+ if (error) {
+ device_printf(sc->vtblk_dev,
+ "error getting device identifier: %d\n", error);
+ }
+}
+
+static void
+vtblk_prepare_dump(struct vtblk_softc *sc)
+{
+ device_t dev;
+ struct virtqueue *vq;
+
+ dev = sc->vtblk_dev;
+ vq = sc->vtblk_vq;
+
+ vtblk_stop(sc);
+
+ /*
+ * Drain all requests caught in-flight in the virtqueue,
+ * skipping biodone(). When dumping, only one request is
+ * outstanding at a time, and we just poll the virtqueue
+ * for the response.
+ */
+ vtblk_drain_vq(sc, 1);
+
+ if (virtio_reinit(dev, sc->vtblk_features) != 0)
+ panic("cannot reinit VirtIO block device during dump");
+
+ virtqueue_disable_intr(vq);
+ virtio_reinit_complete(dev);
+}
+
+static int
+vtblk_write_dump(struct vtblk_softc *sc, void *virtual, off_t offset,
+ size_t length)
+{
+ struct bio buf;
+ struct vtblk_request *req;
+
+ req = &sc->vtblk_dump_request;
+ req->vbr_ack = -1;
+ req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
+ req->vbr_hdr.ioprio = 1;
+ req->vbr_hdr.sector = offset / 512;
+
+ req->vbr_bp = &buf;
+ bzero(&buf, sizeof(struct bio));
+
+ buf.bio_cmd = BIO_WRITE;
+ buf.bio_data = virtual;
+ buf.bio_bcount = length;
+
+ return (vtblk_poll_request(sc, req));
+}
+
+static int
+vtblk_flush_dump(struct vtblk_softc *sc)
+{
+ struct bio buf;
+ struct vtblk_request *req;
+
+ req = &sc->vtblk_dump_request;
+ req->vbr_ack = -1;
+ req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
+ req->vbr_hdr.ioprio = 1;
+ req->vbr_hdr.sector = 0;
+
+ req->vbr_bp = &buf;
+ bzero(&buf, sizeof(struct bio));
+
+ buf.bio_cmd = BIO_FLUSH;
+
+ return (vtblk_poll_request(sc, req));
+}
+
+static int
+vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
+{
+ device_t dev;
+ struct virtqueue *vq;
+ struct vtblk_request *r;
+ int error;
+
+ dev = sc->vtblk_dev;
+ vq = sc->vtblk_vq;
+
+ if (!virtqueue_empty(vq))
+ return (EBUSY);
+
+ error = vtblk_execute_request(sc, req);
+ if (error)
+ return (error);
+
+ virtqueue_notify(vq);
+
+ r = virtqueue_poll(vq, NULL);
+ KASSERT(r == req, ("unexpected request response"));
+
+ if (req->vbr_ack != VIRTIO_BLK_S_OK) {
+ error = req->vbr_ack == VIRTIO_BLK_S_UNSUPP ? ENOTSUP : EIO;
+ if (bootverbose)
+ device_printf(dev,
+ "vtblk_poll_request: IO error: %d\n", error);
+ }
+
+ return (error);
+}
+
+static void
+vtblk_drain_vq(struct vtblk_softc *sc, int skip_done)
+{
+ struct virtqueue *vq;
+ struct vtblk_request *req;
+ int last;
+
+ vq = sc->vtblk_vq;
+ last = 0;
+
+ while ((req = virtqueue_drain(vq, &last)) != NULL) {
+ if (!skip_done)
+ vtblk_bio_error(req->vbr_bp, ENXIO);
+
+ vtblk_enqueue_request(sc, req);
+ }
+
+ KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
+}
+
+static void
+vtblk_drain(struct vtblk_softc *sc)
+{
+ struct bio_queue_head *bioq;
+ struct vtblk_request *req;
+ struct bio *bp;
+
+ bioq = &sc->vtblk_bioq;
+
+ if (sc->vtblk_vq != NULL)
+ vtblk_drain_vq(sc, 0);
+
+ while ((req = vtblk_dequeue_ready(sc)) != NULL) {
+ vtblk_bio_error(req->vbr_bp, ENXIO);
+ vtblk_enqueue_request(sc, req);
+ }
+
+ while (bioq_first(bioq) != NULL) {
+ bp = bioq_takefirst(bioq);
+ vtblk_bio_error(bp, ENXIO);
+ }
+
+ vtblk_free_requests(sc);
+}
+
+static int
+vtblk_alloc_requests(struct vtblk_softc *sc)
+{
+ struct vtblk_request *req;
+ int i, size;
+
+ size = virtqueue_size(sc->vtblk_vq);
+
+ /*
+ * Preallocate sufficient requests to keep the virtqueue full. Each
+ * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce
+ * the number allocated when indirect descriptors are not available.
+ */
+ if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0)
+ size /= VTBLK_MIN_SEGMENTS;
+
+ for (i = 0; i < size; i++) {
+ req = uma_zalloc(vtblk_req_zone, M_NOWAIT);
+ if (req == NULL)
+ return (ENOMEM);
+
+ sc->vtblk_request_count++;
+ vtblk_enqueue_request(sc, req);
+ }
+
+ return (0);
+}
+
+static void
+vtblk_free_requests(struct vtblk_softc *sc)
+{
+ struct vtblk_request *req;
+
+ while ((req = vtblk_dequeue_request(sc)) != NULL) {
+ sc->vtblk_request_count--;
+ uma_zfree(vtblk_req_zone, req);
+ }
+
+ KASSERT(sc->vtblk_request_count == 0, ("leaked requests"));
+}
+
+static struct vtblk_request *
+vtblk_dequeue_request(struct vtblk_softc *sc)
+{
+ struct vtblk_request *req;
+
+ req = TAILQ_FIRST(&sc->vtblk_req_free);
+ if (req != NULL)
+ TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link);
+
+ return (req);
+}
+
+static void
+vtblk_enqueue_request(struct vtblk_softc *sc, struct vtblk_request *req)
+{
+
+ bzero(req, sizeof(struct vtblk_request));
+ TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link);
+}
+
+static struct vtblk_request *
+vtblk_dequeue_ready(struct vtblk_softc *sc)
+{
+ struct vtblk_request *req;
+
+ req = TAILQ_FIRST(&sc->vtblk_req_ready);
+ if (req != NULL)
+ TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link);
+
+ return (req);
+}
+
+static void
+vtblk_enqueue_ready(struct vtblk_softc *sc, struct vtblk_request *req)
+{
+
+ TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link);
+}
+
+static void
+vtblk_bio_error(struct bio *bp, int error)
+{
+
+ biofinish(bp, NULL, error);
+}
diff --git a/sys/dev/virtio/block/virtio_blk.h b/sys/dev/virtio/block/virtio_blk.h
new file mode 100644
index 0000000..4fb32e0
--- /dev/null
+++ b/sys/dev/virtio/block/virtio_blk.h
@@ -0,0 +1,106 @@
+/*
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _VIRTIO_BLK_H
+#define _VIRTIO_BLK_H
+
+#include <sys/types.h>
+
+/* Feature bits */
+#define VIRTIO_BLK_F_BARRIER 0x0001 /* Does host support barriers? */
+#define VIRTIO_BLK_F_SIZE_MAX 0x0002 /* Indicates maximum segment size */
+#define VIRTIO_BLK_F_SEG_MAX 0x0004 /* Indicates maximum # of segments */
+#define VIRTIO_BLK_F_GEOMETRY 0x0010 /* Legacy geometry available */
+#define VIRTIO_BLK_F_RO 0x0020 /* Disk is read-only */
+#define VIRTIO_BLK_F_BLK_SIZE 0x0040 /* Block size of disk is available*/
+#define VIRTIO_BLK_F_SCSI 0x0080 /* Supports scsi command passthru */
+#define VIRTIO_BLK_F_FLUSH 0x0200 /* Cache flush command support */
+#define VIRTIO_BLK_F_TOPOLOGY 0x0400 /* Topology information is available */
+
+#define VIRTIO_BLK_ID_BYTES 20 /* ID string length */
+
+struct virtio_blk_config {
+ /* The capacity (in 512-byte sectors). */
+ uint64_t capacity;
+ /* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */
+ uint32_t size_max;
+ /* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */
+ uint32_t seg_max;
+ /* geometry the device (if VIRTIO_BLK_F_GEOMETRY) */
+ struct virtio_blk_geometry {
+ uint16_t cylinders;
+ uint8_t heads;
+ uint8_t sectors;
+ } geometry;
+
+ /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */
+ uint32_t blk_size;
+
+ /* the next 4 entries are guarded by VIRTIO_BLK_F_TOPOLOGY */
+ /* exponent for physical block per logical block. */
+ uint8_t physical_block_exp;
+ /* alignment offset in logical blocks. */
+ uint8_t alignment_offset;
+ /* minimum I/O size without performance penalty in logical blocks. */
+ uint16_t min_io_size;
+ /* optimal sustained I/O size in logical blocks. */
+ uint32_t opt_io_size;
+} __packed;
+
+/*
+ * Command types
+ *
+ * Usage is a bit tricky as some bits are used as flags and some are not.
+ *
+ * Rules:
+ * VIRTIO_BLK_T_OUT may be combined with VIRTIO_BLK_T_SCSI_CMD or
+ * VIRTIO_BLK_T_BARRIER. VIRTIO_BLK_T_FLUSH is a command of its own
+ * and may not be combined with any of the other flags.
+ */
+
+/* These two define direction. */
+#define VIRTIO_BLK_T_IN 0
+#define VIRTIO_BLK_T_OUT 1
+
+/* This bit says it's a scsi command, not an actual read or write. */
+#define VIRTIO_BLK_T_SCSI_CMD 2
+
+/* Cache flush command */
+#define VIRTIO_BLK_T_FLUSH 4
+
+/* Get device ID command */
+#define VIRTIO_BLK_T_GET_ID 8
+
+/* Barrier before this op. */
+#define VIRTIO_BLK_T_BARRIER 0x80000000
+
+/* ID string length */
+#define VIRTIO_BLK_ID_BYTES 20
+
+/* This is the first element of the read scatter-gather list. */
+struct virtio_blk_outhdr {
+ /* VIRTIO_BLK_T* */
+ uint32_t type;
+ /* io priority. */
+ uint32_t ioprio;
+ /* Sector (ie. 512 byte offset) */
+ uint64_t sector;
+};
+
+struct virtio_scsi_inhdr {
+ uint32_t errors;
+ uint32_t data_len;
+ uint32_t sense_len;
+ uint32_t residual;
+};
+
+/* And this is the final byte of the write scatter-gather list. */
+#define VIRTIO_BLK_S_OK 0
+#define VIRTIO_BLK_S_IOERR 1
+#define VIRTIO_BLK_S_UNSUPP 2
+
+#endif /* _VIRTIO_BLK_H */
diff --git a/sys/dev/virtio/network/if_vtnet.c b/sys/dev/virtio/network/if_vtnet.c
new file mode 100644
index 0000000..22becb1
--- /dev/null
+++ b/sys/dev/virtio/network/if_vtnet.c
@@ -0,0 +1,2746 @@
+/*-
+ * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Driver for VirtIO network devices. */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifdef HAVE_KERNEL_OPTION_HEADERS
+#include "opt_device_polling.h"
+#endif
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/sockio.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+#include <sys/random.h>
+#include <sys/sglist.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+
+#include <vm/uma.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_types.h>
+#include <net/if_media.h>
+#include <net/if_vlan_var.h>
+
+#include <net/bpf.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/udp.h>
+#include <netinet/tcp.h>
+#include <netinet/sctp.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <dev/virtio/virtio.h>
+#include <dev/virtio/virtqueue.h>
+#include <dev/virtio/network/virtio_net.h>
+#include <dev/virtio/network/if_vtnetvar.h>
+
+#include "virtio_if.h"
+
+static int vtnet_modevent(module_t, int, void *);
+
+static int vtnet_probe(device_t);
+static int vtnet_attach(device_t);
+static int vtnet_detach(device_t);
+static int vtnet_suspend(device_t);
+static int vtnet_resume(device_t);
+static int vtnet_shutdown(device_t);
+static int vtnet_config_change(device_t);
+
+static void vtnet_negotiate_features(struct vtnet_softc *);
+static int vtnet_alloc_virtqueues(struct vtnet_softc *);
+static void vtnet_get_hwaddr(struct vtnet_softc *);
+static void vtnet_set_hwaddr(struct vtnet_softc *);
+static int vtnet_is_link_up(struct vtnet_softc *);
+static void vtnet_update_link_status(struct vtnet_softc *);
+static void vtnet_watchdog(struct vtnet_softc *);
+static void vtnet_config_change_task(void *, int);
+static int vtnet_change_mtu(struct vtnet_softc *, int);
+static int vtnet_ioctl(struct ifnet *, u_long, caddr_t);
+
+static int vtnet_init_rx_vq(struct vtnet_softc *);
+static void vtnet_free_rx_mbufs(struct vtnet_softc *);
+static void vtnet_free_tx_mbufs(struct vtnet_softc *);
+static void vtnet_free_ctrl_vq(struct vtnet_softc *);
+
+#ifdef DEVICE_POLLING
+static poll_handler_t vtnet_poll;
+#endif
+
+static struct mbuf * vtnet_alloc_rxbuf(struct vtnet_softc *, int,
+ struct mbuf **);
+static int vtnet_replace_rxbuf(struct vtnet_softc *,
+ struct mbuf *, int);
+static int vtnet_newbuf(struct vtnet_softc *);
+static void vtnet_discard_merged_rxbuf(struct vtnet_softc *, int);
+static void vtnet_discard_rxbuf(struct vtnet_softc *, struct mbuf *);
+static int vtnet_enqueue_rxbuf(struct vtnet_softc *, struct mbuf *);
+static void vtnet_vlan_tag_remove(struct mbuf *);
+static int vtnet_rx_csum(struct vtnet_softc *, struct mbuf *,
+ struct virtio_net_hdr *);
+static int vtnet_rxeof_merged(struct vtnet_softc *, struct mbuf *, int);
+static int vtnet_rxeof(struct vtnet_softc *, int, int *);
+static void vtnet_rx_intr_task(void *, int);
+static int vtnet_rx_vq_intr(void *);
+
+static void vtnet_txeof(struct vtnet_softc *);
+static struct mbuf * vtnet_tx_offload(struct vtnet_softc *, struct mbuf *,
+ struct virtio_net_hdr *);
+static int vtnet_enqueue_txbuf(struct vtnet_softc *, struct mbuf **,
+ struct vtnet_tx_header *);
+static int vtnet_encap(struct vtnet_softc *, struct mbuf **);
+static void vtnet_start_locked(struct ifnet *);
+static void vtnet_start(struct ifnet *);
+static void vtnet_tick(void *);
+static void vtnet_tx_intr_task(void *, int);
+static int vtnet_tx_vq_intr(void *);
+
+static void vtnet_stop(struct vtnet_softc *);
+static int vtnet_reinit(struct vtnet_softc *);
+static void vtnet_init_locked(struct vtnet_softc *);
+static void vtnet_init(void *);
+
+static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
+ struct sglist *, int, int);
+
+static void vtnet_rx_filter(struct vtnet_softc *sc);
+static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
+static int vtnet_set_promisc(struct vtnet_softc *, int);
+static int vtnet_set_allmulti(struct vtnet_softc *, int);
+static void vtnet_rx_filter_mac(struct vtnet_softc *);
+
+static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
+static void vtnet_rx_filter_vlan(struct vtnet_softc *);
+static void vtnet_set_vlan_filter(struct vtnet_softc *, int, uint16_t);
+static void vtnet_register_vlan(void *, struct ifnet *, uint16_t);
+static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
+
+static int vtnet_ifmedia_upd(struct ifnet *);
+static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
+
+static void vtnet_add_statistics(struct vtnet_softc *);
+
+static int vtnet_enable_rx_intr(struct vtnet_softc *);
+static int vtnet_enable_tx_intr(struct vtnet_softc *);
+static void vtnet_disable_rx_intr(struct vtnet_softc *);
+static void vtnet_disable_tx_intr(struct vtnet_softc *);
+
+/* Tunables. */
+static int vtnet_csum_disable = 0;
+TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
+static int vtnet_tso_disable = 0;
+TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
+static int vtnet_lro_disable = 0;
+TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
+
+/*
+ * Reducing the number of transmit completed interrupts can
+ * improve performance. To do so, the define below keeps the
+ * Tx vq interrupt disabled and adds calls to vtnet_txeof()
+ * in the start and watchdog paths. The price to pay for this
+ * is the m_free'ing of transmitted mbufs may be delayed until
+ * the watchdog fires.
+ */
+#define VTNET_TX_INTR_MODERATION
+
+static uma_zone_t vtnet_tx_header_zone;
+
+static struct virtio_feature_desc vtnet_feature_desc[] = {
+ { VIRTIO_NET_F_CSUM, "TxChecksum" },
+ { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" },
+ { VIRTIO_NET_F_MAC, "MacAddress" },
+ { VIRTIO_NET_F_GSO, "TxAllGSO" },
+ { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" },
+ { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" },
+ { VIRTIO_NET_F_GUEST_ECN, "RxECN" },
+ { VIRTIO_NET_F_GUEST_UFO, "RxUFO" },
+ { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" },
+ { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" },
+ { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" },
+ { VIRTIO_NET_F_HOST_UFO, "TxUFO" },
+ { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" },
+ { VIRTIO_NET_F_STATUS, "Status" },
+ { VIRTIO_NET_F_CTRL_VQ, "ControlVq" },
+ { VIRTIO_NET_F_CTRL_RX, "RxMode" },
+ { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" },
+ { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" },
+
+ { 0, NULL }
+};
+
+static device_method_t vtnet_methods[] = {
+ /* Device methods. */
+ DEVMETHOD(device_probe, vtnet_probe),
+ DEVMETHOD(device_attach, vtnet_attach),
+ DEVMETHOD(device_detach, vtnet_detach),
+ DEVMETHOD(device_suspend, vtnet_suspend),
+ DEVMETHOD(device_resume, vtnet_resume),
+ DEVMETHOD(device_shutdown, vtnet_shutdown),
+
+ /* VirtIO methods. */
+ DEVMETHOD(virtio_config_change, vtnet_config_change),
+
+ { 0, 0 }
+};
+
+static driver_t vtnet_driver = {
+ "vtnet",
+ vtnet_methods,
+ sizeof(struct vtnet_softc)
+};
+static devclass_t vtnet_devclass;
+
+DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass,
+ vtnet_modevent, 0);
+MODULE_VERSION(vtnet, 1);
+MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
+
+static int
+vtnet_modevent(module_t mod, int type, void *unused)
+{
+ int error;
+
+ error = 0;
+
+ switch (type) {
+ case MOD_LOAD:
+ vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr",
+ sizeof(struct vtnet_tx_header),
+ NULL, NULL, NULL, NULL, 0, 0);
+ break;
+ case MOD_QUIESCE:
+ case MOD_UNLOAD:
+ if (uma_zone_get_cur(vtnet_tx_header_zone) > 0)
+ error = EBUSY;
+ else if (type == MOD_UNLOAD) {
+ uma_zdestroy(vtnet_tx_header_zone);
+ vtnet_tx_header_zone = NULL;
+ }
+ break;
+ case MOD_SHUTDOWN:
+ break;
+ default:
+ error = EOPNOTSUPP;
+ break;
+ }
+
+ return (error);
+}
+
+static int
+vtnet_probe(device_t dev)
+{
+
+ if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
+ return (ENXIO);
+
+ device_set_desc(dev, "VirtIO Networking Adapter");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+vtnet_attach(device_t dev)
+{
+ struct vtnet_softc *sc;
+ struct ifnet *ifp;
+ int tx_size, error;
+
+ sc = device_get_softc(dev);
+ sc->vtnet_dev = dev;
+
+ VTNET_LOCK_INIT(sc);
+ callout_init_mtx(&sc->vtnet_tick_ch, VTNET_MTX(sc), 0);
+
+ ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
+ vtnet_ifmedia_sts);
+ ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
+ ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
+
+ vtnet_add_statistics(sc);
+
+ virtio_set_feature_desc(dev, vtnet_feature_desc);
+ vtnet_negotiate_features(sc);
+
+ if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
+ sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
+ sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ } else
+ sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
+
+ sc->vtnet_rx_mbuf_size = MCLBYTES;
+ sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
+
+ if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
+ sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
+
+ if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
+ sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
+ if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
+ sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
+ }
+
+ vtnet_get_hwaddr(sc);
+
+ error = vtnet_alloc_virtqueues(sc);
+ if (error) {
+ device_printf(dev, "cannot allocate virtqueues\n");
+ goto fail;
+ }
+
+ ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ device_printf(dev, "cannot allocate ifnet structure\n");
+ error = ENOSPC;
+ goto fail;
+ }
+
+ ifp->if_softc = sc;
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_init = vtnet_init;
+ ifp->if_start = vtnet_start;
+ ifp->if_ioctl = vtnet_ioctl;
+
+ sc->vtnet_rx_size = virtqueue_size(sc->vtnet_rx_vq);
+ sc->vtnet_rx_process_limit = sc->vtnet_rx_size;
+
+ tx_size = virtqueue_size(sc->vtnet_tx_vq);
+ sc->vtnet_tx_size = tx_size;
+ IFQ_SET_MAXLEN(&ifp->if_snd, tx_size - 1);
+ ifp->if_snd.ifq_drv_maxlen = tx_size - 1;
+ IFQ_SET_READY(&ifp->if_snd);
+
+ ether_ifattach(ifp, sc->vtnet_hwaddr);
+
+ if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
+ ifp->if_capabilities |= IFCAP_LINKSTATE;
+
+ /* Tell the upper layer(s) we support long frames. */
+ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+ ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
+
+ if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
+ ifp->if_capabilities |= IFCAP_TXCSUM;
+
+ if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
+ ifp->if_capabilities |= IFCAP_TSO4;
+ if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
+ ifp->if_capabilities |= IFCAP_TSO6;
+ if (ifp->if_capabilities & IFCAP_TSO)
+ ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
+
+ if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
+ sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
+ }
+
+ if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
+ ifp->if_capabilities |= IFCAP_RXCSUM;
+
+ if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
+ virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
+ ifp->if_capabilities |= IFCAP_LRO;
+ }
+
+ if (ifp->if_capabilities & IFCAP_HWCSUM) {
+ /*
+ * VirtIO does not support VLAN tagging, but we can fake
+ * it by inserting and removing the 802.1Q header during
+ * transmit and receive. We are then able to do checksum
+ * offloading of VLAN frames.
+ */
+ ifp->if_capabilities |=
+ IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
+ }
+
+ ifp->if_capenable = ifp->if_capabilities;
+
+ /*
+ * Capabilities after here are not enabled by default.
+ */
+
+ if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
+ ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
+
+ sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
+ vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
+ sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
+ vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
+ }
+
+#ifdef DEVICE_POLLING
+ ifp->if_capabilities |= IFCAP_POLLING;
+#endif
+
+ TASK_INIT(&sc->vtnet_rx_intr_task, 0, vtnet_rx_intr_task, sc);
+ TASK_INIT(&sc->vtnet_tx_intr_task, 0, vtnet_tx_intr_task, sc);
+ TASK_INIT(&sc->vtnet_cfgchg_task, 0, vtnet_config_change_task, sc);
+
+ sc->vtnet_tq = taskqueue_create_fast("vtnet_taskq", M_NOWAIT,
+ taskqueue_thread_enqueue, &sc->vtnet_tq);
+ if (sc->vtnet_tq == NULL) {
+ error = ENOMEM;
+ device_printf(dev, "cannot allocate taskqueue\n");
+ ether_ifdetach(ifp);
+ goto fail;
+ }
+ taskqueue_start_threads(&sc->vtnet_tq, 1, PI_NET, "%s taskq",
+ device_get_nameunit(dev));
+
+ error = virtio_setup_intr(dev, INTR_TYPE_NET);
+ if (error) {
+ device_printf(dev, "cannot setup virtqueue interrupts\n");
+ taskqueue_free(sc->vtnet_tq);
+ sc->vtnet_tq = NULL;
+ ether_ifdetach(ifp);
+ goto fail;
+ }
+
+ /*
+ * Device defaults to promiscuous mode for backwards
+ * compatibility. Turn it off if possible.
+ */
+ if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
+ VTNET_LOCK(sc);
+ if (vtnet_set_promisc(sc, 0) != 0) {
+ ifp->if_flags |= IFF_PROMISC;
+ device_printf(dev,
+ "cannot disable promiscuous mode\n");
+ }
+ VTNET_UNLOCK(sc);
+ } else
+ ifp->if_flags |= IFF_PROMISC;
+
+fail:
+ if (error)
+ vtnet_detach(dev);
+
+ return (error);
+}
+
+static int
+vtnet_detach(device_t dev)
+{
+ struct vtnet_softc *sc;
+ struct ifnet *ifp;
+
+ sc = device_get_softc(dev);
+ ifp = sc->vtnet_ifp;
+
+ KASSERT(mtx_initialized(VTNET_MTX(sc)),
+ ("vtnet mutex not initialized"));
+
+#ifdef DEVICE_POLLING
+ if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
+ ether_poll_deregister(ifp);
+#endif
+
+ if (device_is_attached(dev)) {
+ VTNET_LOCK(sc);
+ vtnet_stop(sc);
+ VTNET_UNLOCK(sc);
+
+ callout_drain(&sc->vtnet_tick_ch);
+ taskqueue_drain(taskqueue_fast, &sc->vtnet_cfgchg_task);
+
+ ether_ifdetach(ifp);
+ }
+
+ if (sc->vtnet_tq != NULL) {
+ taskqueue_drain(sc->vtnet_tq, &sc->vtnet_rx_intr_task);
+ taskqueue_drain(sc->vtnet_tq, &sc->vtnet_tx_intr_task);
+ taskqueue_free(sc->vtnet_tq);
+ sc->vtnet_tq = NULL;
+ }
+
+ if (sc->vtnet_vlan_attach != NULL) {
+ EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
+ sc->vtnet_vlan_attach = NULL;
+ }
+ if (sc->vtnet_vlan_detach != NULL) {
+ EVENTHANDLER_DEREGISTER(vlan_unconfg, sc->vtnet_vlan_detach);
+ sc->vtnet_vlan_detach = NULL;
+ }
+
+ if (ifp) {
+ if_free(ifp);
+ sc->vtnet_ifp = NULL;
+ }
+
+ if (sc->vtnet_rx_vq != NULL)
+ vtnet_free_rx_mbufs(sc);
+ if (sc->vtnet_tx_vq != NULL)
+ vtnet_free_tx_mbufs(sc);
+ if (sc->vtnet_ctrl_vq != NULL)
+ vtnet_free_ctrl_vq(sc);
+
+ ifmedia_removeall(&sc->vtnet_media);
+ VTNET_LOCK_DESTROY(sc);
+
+ return (0);
+}
+
+static int
+vtnet_suspend(device_t dev)
+{
+ struct vtnet_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ VTNET_LOCK(sc);
+ vtnet_stop(sc);
+ sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
+ VTNET_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+vtnet_resume(device_t dev)
+{
+ struct vtnet_softc *sc;
+ struct ifnet *ifp;
+
+ sc = device_get_softc(dev);
+ ifp = sc->vtnet_ifp;
+
+ VTNET_LOCK(sc);
+ if (ifp->if_flags & IFF_UP)
+ vtnet_init_locked(sc);
+ sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
+ VTNET_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+vtnet_shutdown(device_t dev)
+{
+
+ /*
+ * Suspend already does all of what we need to
+ * do here; we just never expect to be resumed.
+ */
+ return (vtnet_suspend(dev));
+}
+
+static int
+vtnet_config_change(device_t dev)
+{
+ struct vtnet_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ taskqueue_enqueue_fast(taskqueue_fast, &sc->vtnet_cfgchg_task);
+
+ return (1);
+}
+
+static void
+vtnet_negotiate_features(struct vtnet_softc *sc)
+{
+ device_t dev;
+ uint64_t mask, features;
+
+ dev = sc->vtnet_dev;
+ mask = 0;
+
+ if (vtnet_csum_disable)
+ mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
+
+ /*
+ * TSO and LRO are only available when their corresponding
+ * checksum offload feature is also negotiated.
+ */
+
+ if (vtnet_csum_disable || vtnet_tso_disable)
+ mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 |
+ VIRTIO_NET_F_HOST_ECN;
+
+ if (vtnet_csum_disable || vtnet_lro_disable)
+ mask |= VTNET_LRO_FEATURES;
+
+ features = VTNET_FEATURES & ~mask;
+#ifdef VTNET_TX_INTR_MODERATION
+ features |= VIRTIO_F_NOTIFY_ON_EMPTY;
+#endif
+ sc->vtnet_features = virtio_negotiate_features(dev, features);
+
+ if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0 &&
+ virtio_with_feature(dev, VTNET_LRO_FEATURES)) {
+ /*
+ * LRO without mergeable buffers requires special care. This
+ * is not ideal because every receive buffer must be large
+ * enough to hold the maximum TCP packet, the Ethernet header,
+ * and the vtnet_rx_header. This requires up to 34 descriptors
+ * when using MCLBYTES clusters. If we do not have indirect
+ * descriptors, LRO is disabled since the virtqueue will not
+ * be able to contain very many receive buffers.
+ */
+ if (virtio_with_feature(dev,
+ VIRTIO_RING_F_INDIRECT_DESC) == 0) {
+ device_printf(dev,
+ "LRO disabled due to lack of both mergeable "
+ "buffers and indirect descriptors\n");
+
+ sc->vtnet_features = virtio_negotiate_features(dev,
+ features & ~VTNET_LRO_FEATURES);
+ } else
+ sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
+ }
+}
+
+static int
+vtnet_alloc_virtqueues(struct vtnet_softc *sc)
+{
+ device_t dev;
+ struct vq_alloc_info vq_info[3];
+ int nvqs, rxsegs;
+
+ dev = sc->vtnet_dev;
+ nvqs = 2;
+
+ /*
+ * Indirect descriptors are not needed for the Rx
+ * virtqueue when mergeable buffers are negotiated.
+ * The header is placed inline with the data, not
+ * in a separate descriptor, and mbuf clusters are
+ * always physically contiguous.
+ */
+ if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
+ rxsegs = sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG ?
+ VTNET_MAX_RX_SEGS : VTNET_MIN_RX_SEGS;
+ } else
+ rxsegs = 0;
+
+ VQ_ALLOC_INFO_INIT(&vq_info[0], rxsegs,
+ vtnet_rx_vq_intr, sc, &sc->vtnet_rx_vq,
+ "%s receive", device_get_nameunit(dev));
+
+ VQ_ALLOC_INFO_INIT(&vq_info[1], VTNET_MAX_TX_SEGS,
+ vtnet_tx_vq_intr, sc, &sc->vtnet_tx_vq,
+ "%s transmit", device_get_nameunit(dev));
+
+ if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
+ nvqs++;
+
+ VQ_ALLOC_INFO_INIT(&vq_info[2], 0, NULL, NULL,
+ &sc->vtnet_ctrl_vq, "%s control",
+ device_get_nameunit(dev));
+ }
+
+ return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
+}
+
+static void
+vtnet_get_hwaddr(struct vtnet_softc *sc)
+{
+ device_t dev;
+
+ dev = sc->vtnet_dev;
+
+ if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
+ virtio_read_device_config(dev,
+ offsetof(struct virtio_net_config, mac),
+ sc->vtnet_hwaddr, ETHER_ADDR_LEN);
+ } else {
+ /* Generate random locally administered unicast address. */
+ sc->vtnet_hwaddr[0] = 0xB2;
+ arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
+
+ vtnet_set_hwaddr(sc);
+ }
+}
+
+static void
+vtnet_set_hwaddr(struct vtnet_softc *sc)
+{
+ device_t dev;
+
+ dev = sc->vtnet_dev;
+
+ virtio_write_device_config(dev,
+ offsetof(struct virtio_net_config, mac),
+ sc->vtnet_hwaddr, ETHER_ADDR_LEN);
+}
+
+static int
+vtnet_is_link_up(struct vtnet_softc *sc)
+{
+ device_t dev;
+ struct ifnet *ifp;
+ uint16_t status;
+
+ dev = sc->vtnet_dev;
+ ifp = sc->vtnet_ifp;
+
+ VTNET_LOCK_ASSERT(sc);
+
+ if ((ifp->if_capenable & IFCAP_LINKSTATE) == 0)
+ return (1);
+
+ status = virtio_read_dev_config_2(dev,
+ offsetof(struct virtio_net_config, status));
+
+ return ((status & VIRTIO_NET_S_LINK_UP) != 0);
+}
+
+static void
+vtnet_update_link_status(struct vtnet_softc *sc)
+{
+ device_t dev;
+ struct ifnet *ifp;
+ int link;
+
+ dev = sc->vtnet_dev;
+ ifp = sc->vtnet_ifp;
+
+ link = vtnet_is_link_up(sc);
+
+ if (link && ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) {
+ sc->vtnet_flags |= VTNET_FLAG_LINK;
+ if (bootverbose)
+ device_printf(dev, "Link is up\n");
+
+ if_link_state_change(ifp, LINK_STATE_UP);
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ vtnet_start_locked(ifp);
+ } else if (!link && (sc->vtnet_flags & VTNET_FLAG_LINK)) {
+ sc->vtnet_flags &= ~VTNET_FLAG_LINK;
+ if (bootverbose)
+ device_printf(dev, "Link is down\n");
+
+ if_link_state_change(ifp, LINK_STATE_DOWN);
+ }
+}
+
+static void
+vtnet_watchdog(struct vtnet_softc *sc)
+{
+ struct ifnet *ifp;
+
+ ifp = sc->vtnet_ifp;
+
+#ifdef VTNET_TX_INTR_MODERATION
+ vtnet_txeof(sc);
+#endif
+
+ if (sc->vtnet_watchdog_timer == 0 || --sc->vtnet_watchdog_timer)
+ return;
+
+ if_printf(ifp, "watchdog timeout -- resetting\n");
+#ifdef VTNET_DEBUG
+ virtqueue_dump(sc->vtnet_tx_vq);
+#endif
+ ifp->if_oerrors++;
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ vtnet_init_locked(sc);
+}
+
+static void
+vtnet_config_change_task(void *arg, int pending)
+{
+ struct vtnet_softc *sc;
+
+ sc = arg;
+
+ VTNET_LOCK(sc);
+ vtnet_update_link_status(sc);
+ VTNET_UNLOCK(sc);
+}
+
+static int
+vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct vtnet_softc *sc;
+ struct ifreq *ifr;
+ int reinit, mask, error;
+
+ sc = ifp->if_softc;
+ ifr = (struct ifreq *) data;
+ reinit = 0;
+ error = 0;
+
+ switch (cmd) {
+ case SIOCSIFMTU:
+ if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VTNET_MAX_MTU)
+ error = EINVAL;
+ else if (ifp->if_mtu != ifr->ifr_mtu) {
+ VTNET_LOCK(sc);
+ error = vtnet_change_mtu(sc, ifr->ifr_mtu);
+ VTNET_UNLOCK(sc);
+ }
+ break;
+
+ case SIOCSIFFLAGS:
+ VTNET_LOCK(sc);
+ if ((ifp->if_flags & IFF_UP) == 0) {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ vtnet_stop(sc);
+ } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ if ((ifp->if_flags ^ sc->vtnet_if_flags) &
+ (IFF_PROMISC | IFF_ALLMULTI)) {
+ if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
+ vtnet_rx_filter(sc);
+ else
+ error = ENOTSUP;
+ }
+ } else
+ vtnet_init_locked(sc);
+
+ if (error == 0)
+ sc->vtnet_if_flags = ifp->if_flags;
+ VTNET_UNLOCK(sc);
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ VTNET_LOCK(sc);
+ if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) &&
+ (ifp->if_drv_flags & IFF_DRV_RUNNING))
+ vtnet_rx_filter_mac(sc);
+ VTNET_UNLOCK(sc);
+ break;
+
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
+ break;
+
+ case SIOCSIFCAP:
+ mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+
+#ifdef DEVICE_POLLING
+ if (mask & IFCAP_POLLING) {
+ if (ifr->ifr_reqcap & IFCAP_POLLING) {
+ error = ether_poll_register(vtnet_poll, ifp);
+ if (error)
+ break;
+
+ VTNET_LOCK(sc);
+ vtnet_disable_rx_intr(sc);
+ vtnet_disable_tx_intr(sc);
+ ifp->if_capenable |= IFCAP_POLLING;
+ VTNET_UNLOCK(sc);
+ } else {
+ error = ether_poll_deregister(ifp);
+
+ /* Enable interrupts even in error case. */
+ VTNET_LOCK(sc);
+ vtnet_enable_tx_intr(sc);
+ vtnet_enable_rx_intr(sc);
+ ifp->if_capenable &= ~IFCAP_POLLING;
+ VTNET_UNLOCK(sc);
+ }
+ }
+#endif
+ VTNET_LOCK(sc);
+
+ if (mask & IFCAP_TXCSUM) {
+ ifp->if_capenable ^= IFCAP_TXCSUM;
+ if (ifp->if_capenable & IFCAP_TXCSUM)
+ ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
+ else
+ ifp->if_hwassist &= ~VTNET_CSUM_OFFLOAD;
+ }
+
+ if (mask & IFCAP_TSO4) {
+ ifp->if_capenable ^= IFCAP_TSO4;
+ if (ifp->if_capenable & IFCAP_TSO4)
+ ifp->if_hwassist |= CSUM_TSO;
+ else
+ ifp->if_hwassist &= ~CSUM_TSO;
+ }
+
+ if (mask & IFCAP_RXCSUM) {
+ ifp->if_capenable ^= IFCAP_RXCSUM;
+ reinit = 1;
+ }
+
+ if (mask & IFCAP_LRO) {
+ ifp->if_capenable ^= IFCAP_LRO;
+ reinit = 1;
+ }
+
+ if (mask & IFCAP_VLAN_HWFILTER) {
+ ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
+ reinit = 1;
+ }
+
+ if (mask & IFCAP_VLAN_HWTSO)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
+
+ if (mask & IFCAP_VLAN_HWTAGGING)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+
+ if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ vtnet_init_locked(sc);
+ }
+ VLAN_CAPABILITIES(ifp);
+
+ VTNET_UNLOCK(sc);
+ break;
+
+ default:
+ error = ether_ioctl(ifp, cmd, data);
+ break;
+ }
+
+ VTNET_LOCK_ASSERT_NOTOWNED(sc);
+
+ return (error);
+}
+
+static int
+vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
+{
+ struct ifnet *ifp;
+ int new_frame_size, clsize;
+
+ ifp = sc->vtnet_ifp;
+
+ if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
+ new_frame_size = sizeof(struct vtnet_rx_header) +
+ sizeof(struct ether_vlan_header) + new_mtu;
+
+ if (new_frame_size > MJUM9BYTES)
+ return (EINVAL);
+
+ if (new_frame_size <= MCLBYTES)
+ clsize = MCLBYTES;
+ else
+ clsize = MJUM9BYTES;
+ } else {
+ new_frame_size = sizeof(struct virtio_net_hdr_mrg_rxbuf) +
+ sizeof(struct ether_vlan_header) + new_mtu;
+
+ if (new_frame_size <= MCLBYTES)
+ clsize = MCLBYTES;
+ else
+ clsize = MJUMPAGESIZE;
+ }
+
+ sc->vtnet_rx_mbuf_size = clsize;
+ sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
+ KASSERT(sc->vtnet_rx_mbuf_count < VTNET_MAX_RX_SEGS,
+ ("too many rx mbufs: %d", sc->vtnet_rx_mbuf_count));
+
+ ifp->if_mtu = new_mtu;
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ vtnet_init_locked(sc);
+ }
+
+ return (0);
+}
+
+static int
+vtnet_init_rx_vq(struct vtnet_softc *sc)
+{
+ struct virtqueue *vq;
+ int nbufs, error;
+
+ vq = sc->vtnet_rx_vq;
+ nbufs = 0;
+ error = ENOSPC;
+
+ while (!virtqueue_full(vq)) {
+ if ((error = vtnet_newbuf(sc)) != 0)
+ break;
+ nbufs++;
+ }
+
+ if (nbufs > 0) {
+ virtqueue_notify(vq);
+
+ /*
+ * EMSGSIZE signifies the virtqueue did not have enough
+ * entries available to hold the last mbuf. This is not
+ * an error. We should not get ENOSPC since we check if
+ * the virtqueue is full before attempting to add a
+ * buffer.
+ */
+ if (error == EMSGSIZE)
+ error = 0;
+ }
+
+ return (error);
+}
+
+static void
+vtnet_free_rx_mbufs(struct vtnet_softc *sc)
+{
+ struct virtqueue *vq;
+ struct mbuf *m;
+ int last;
+
+ vq = sc->vtnet_rx_vq;
+ last = 0;
+
+ while ((m = virtqueue_drain(vq, &last)) != NULL)
+ m_freem(m);
+
+ KASSERT(virtqueue_empty(vq), ("mbufs remaining in Rx Vq"));
+}
+
+static void
+vtnet_free_tx_mbufs(struct vtnet_softc *sc)
+{
+ struct virtqueue *vq;
+ struct vtnet_tx_header *txhdr;
+ int last;
+
+ vq = sc->vtnet_tx_vq;
+ last = 0;
+
+ while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
+ m_freem(txhdr->vth_mbuf);
+ uma_zfree(vtnet_tx_header_zone, txhdr);
+ }
+
+ KASSERT(virtqueue_empty(vq), ("mbufs remaining in Tx Vq"));
+}
+
+static void
+vtnet_free_ctrl_vq(struct vtnet_softc *sc)
+{
+
+ /*
+ * The control virtqueue is only polled, therefore
+ * it should already be empty.
+ */
+ KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq),
+ ("Ctrl Vq not empty"));
+}
+
+#ifdef DEVICE_POLLING
+static int
+vtnet_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
+{
+ struct vtnet_softc *sc;
+ int rx_done;
+
+ sc = ifp->if_softc;
+ rx_done = 0;
+
+ VTNET_LOCK(sc);
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ if (cmd == POLL_AND_CHECK_STATUS)
+ vtnet_update_link_status(sc);
+
+ if (virtqueue_nused(sc->vtnet_rx_vq) > 0)
+ vtnet_rxeof(sc, count, &rx_done);
+
+ vtnet_txeof(sc);
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ vtnet_start_locked(ifp);
+ }
+ VTNET_UNLOCK(sc);
+
+ return (rx_done);
+}
+#endif /* DEVICE_POLLING */
+
+static struct mbuf *
+vtnet_alloc_rxbuf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
+{
+ struct mbuf *m_head, *m_tail, *m;
+ int i, clsize;
+
+ clsize = sc->vtnet_rx_mbuf_size;
+
+ m_head = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, clsize);
+ if (m_head == NULL)
+ goto fail;
+
+ m_head->m_len = clsize;
+ m_tail = m_head;
+
+ if (nbufs > 1) {
+ KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
+ ("chained Rx mbuf requested without LRO_NOMRG"));
+
+ for (i = 0; i < nbufs - 1; i++) {
+ m = m_getjcl(M_DONTWAIT, MT_DATA, 0, clsize);
+ if (m == NULL)
+ goto fail;
+
+ m->m_len = clsize;
+ m_tail->m_next = m;
+ m_tail = m;
+ }
+ }
+
+ if (m_tailp != NULL)
+ *m_tailp = m_tail;
+
+ return (m_head);
+
+fail:
+ sc->vtnet_stats.mbuf_alloc_failed++;
+ m_freem(m_head);
+
+ return (NULL);
+}
+
+static int
+vtnet_replace_rxbuf(struct vtnet_softc *sc, struct mbuf *m0, int len0)
+{
+ struct mbuf *m, *m_prev;
+ struct mbuf *m_new, *m_tail;
+ int len, clsize, nreplace, error;
+
+ m = m0;
+ m_prev = NULL;
+ len = len0;
+
+ m_tail = NULL;
+ clsize = sc->vtnet_rx_mbuf_size;
+ nreplace = 0;
+
+ if (m->m_next != NULL)
+ KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
+ ("chained Rx mbuf without LRO_NOMRG"));
+
+ /*
+ * Since LRO_NOMRG mbuf chains are so large, we want to avoid
+ * allocating an entire chain for each received frame. When
+ * the received frame's length is less than that of the chain,
+ * the unused mbufs are reassigned to the new chain.
+ */
+ while (len > 0) {
+ /*
+ * Something is seriously wrong if we received
+ * a frame larger than the mbuf chain. Drop it.
+ */
+ if (m == NULL) {
+ sc->vtnet_stats.rx_frame_too_large++;
+ return (EMSGSIZE);
+ }
+
+ KASSERT(m->m_len == clsize,
+ ("mbuf length not expected cluster size: %d",
+ m->m_len));
+
+ m->m_len = MIN(m->m_len, len);
+ len -= m->m_len;
+
+ m_prev = m;
+ m = m->m_next;
+ nreplace++;
+ }
+
+ KASSERT(m_prev != NULL, ("m_prev == NULL"));
+ KASSERT(nreplace <= sc->vtnet_rx_mbuf_count,
+ ("too many replacement mbufs: %d/%d", nreplace,
+ sc->vtnet_rx_mbuf_count));
+
+ m_new = vtnet_alloc_rxbuf(sc, nreplace, &m_tail);
+ if (m_new == NULL) {
+ m_prev->m_len = clsize;
+ return (ENOBUFS);
+ }
+
+ /*
+ * Move unused mbufs, if any, from the original chain
+ * onto the end of the new chain.
+ */
+ if (m_prev->m_next != NULL) {
+ m_tail->m_next = m_prev->m_next;
+ m_prev->m_next = NULL;
+ }
+
+ error = vtnet_enqueue_rxbuf(sc, m_new);
+ if (error) {
+ /*
+ * BAD! We could not enqueue the replacement mbuf chain. We
+ * must restore the m0 chain to the original state if it was
+ * modified so we can subsequently discard it.
+ *
+ * NOTE: The replacement is suppose to be an identical copy
+ * to the one just dequeued so this is an unexpected error.
+ */
+ sc->vtnet_stats.rx_enq_replacement_failed++;
+
+ if (m_tail->m_next != NULL) {
+ m_prev->m_next = m_tail->m_next;
+ m_tail->m_next = NULL;
+ }
+
+ m_prev->m_len = clsize;
+ m_freem(m_new);
+ }
+
+ return (error);
+}
+
+static int
+vtnet_newbuf(struct vtnet_softc *sc)
+{
+ struct mbuf *m;
+ int error;
+
+ m = vtnet_alloc_rxbuf(sc, sc->vtnet_rx_mbuf_count, NULL);
+ if (m == NULL)
+ return (ENOBUFS);
+
+ error = vtnet_enqueue_rxbuf(sc, m);
+ if (error)
+ m_freem(m);
+
+ return (error);
+}
+
+static void
+vtnet_discard_merged_rxbuf(struct vtnet_softc *sc, int nbufs)
+{
+ struct virtqueue *vq;
+ struct mbuf *m;
+
+ vq = sc->vtnet_rx_vq;
+
+ while (--nbufs > 0) {
+ if ((m = virtqueue_dequeue(vq, NULL)) == NULL)
+ break;
+ vtnet_discard_rxbuf(sc, m);
+ }
+}
+
+static void
+vtnet_discard_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
+{
+ int error;
+
+ /*
+ * Requeue the discarded mbuf. This should always be
+ * successful since it was just dequeued.
+ */
+ error = vtnet_enqueue_rxbuf(sc, m);
+ KASSERT(error == 0, ("cannot requeue discarded mbuf"));
+}
+
+static int
+vtnet_enqueue_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
+{
+ struct sglist sg;
+ struct sglist_seg segs[VTNET_MAX_RX_SEGS];
+ struct vtnet_rx_header *rxhdr;
+ struct virtio_net_hdr *hdr;
+ uint8_t *mdata;
+ int offset, error;
+
+ VTNET_LOCK_ASSERT(sc);
+ if ((sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0)
+ KASSERT(m->m_next == NULL, ("chained Rx mbuf"));
+
+ sglist_init(&sg, VTNET_MAX_RX_SEGS, segs);
+
+ mdata = mtod(m, uint8_t *);
+ offset = 0;
+
+ if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
+ rxhdr = (struct vtnet_rx_header *) mdata;
+ hdr = &rxhdr->vrh_hdr;
+ offset += sizeof(struct vtnet_rx_header);
+
+ error = sglist_append(&sg, hdr, sc->vtnet_hdr_size);
+ KASSERT(error == 0, ("cannot add header to sglist"));
+ }
+
+ error = sglist_append(&sg, mdata + offset, m->m_len - offset);
+ if (error)
+ return (error);
+
+ if (m->m_next != NULL) {
+ error = sglist_append_mbuf(&sg, m->m_next);
+ if (error)
+ return (error);
+ }
+
+ return (virtqueue_enqueue(sc->vtnet_rx_vq, m, &sg, 0, sg.sg_nseg));
+}
+
+static void
+vtnet_vlan_tag_remove(struct mbuf *m)
+{
+ struct ether_vlan_header *evl;
+
+ evl = mtod(m, struct ether_vlan_header *);
+
+ m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag);
+ m->m_flags |= M_VLANTAG;
+
+ /* Strip the 802.1Q header. */
+ bcopy((char *) evl, (char *) evl + ETHER_VLAN_ENCAP_LEN,
+ ETHER_HDR_LEN - ETHER_TYPE_LEN);
+ m_adj(m, ETHER_VLAN_ENCAP_LEN);
+}
+
+#ifdef notyet
+static int
+vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m,
+ struct virtio_net_hdr *hdr)
+{
+ struct ether_header *eh;
+ struct ether_vlan_header *evh;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ struct udphdr *udp;
+ int ip_offset, csum_start, csum_offset, hlen;
+ uint16_t eth_type;
+ uint8_t ip_proto;
+
+ /*
+ * Convert the VirtIO checksum interface to FreeBSD's interface.
+ * The host only provides us with the offset at which to start
+ * checksumming, and the offset from that to place the completed
+ * checksum. While this maps well with how Linux does checksums,
+ * for FreeBSD, we must parse the received packet in order to set
+ * the appropriate CSUM_* flags.
+ */
+
+ /*
+ * Every mbuf added to the receive virtqueue is always at least
+ * MCLBYTES big, so assume something is amiss if the first mbuf
+ * does not contain both the Ethernet and protocol headers.
+ */
+ ip_offset = sizeof(struct ether_header);
+ if (m->m_len < ip_offset)
+ return (1);
+
+ eh = mtod(m, struct ether_header *);
+ eth_type = ntohs(eh->ether_type);
+ if (eth_type == ETHERTYPE_VLAN) {
+ ip_offset = sizeof(struct ether_vlan_header);
+ if (m->m_len < ip_offset)
+ return (1);
+ evh = mtod(m, struct ether_vlan_header *);
+ eth_type = ntohs(evh->evl_proto);
+ }
+
+ switch (eth_type) {
+ case ETHERTYPE_IP:
+ if (m->m_len < ip_offset + sizeof(struct ip))
+ return (1);
+
+ ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset);
+ /* Sanity check the IP header. */
+ if (ip->ip_v != IPVERSION)
+ return (1);
+ hlen = ip->ip_hl << 2;
+ if (hlen < sizeof(struct ip))
+ return (1);
+ if (ntohs(ip->ip_len) < hlen)
+ return (1);
+ if (ntohs(ip->ip_len) != (m->m_pkthdr.len - ip_offset))
+ return (1);
+
+ ip_proto = ip->ip_p;
+ csum_start = ip_offset + hlen;
+ break;
+
+ case ETHERTYPE_IPV6:
+ if (m->m_len < ip_offset + sizeof(struct ip6_hdr))
+ return (1);
+
+ /*
+ * XXX FreeBSD does not handle any IPv6 checksum offloading
+ * at the moment.
+ */
+
+ ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset);
+ /* XXX Assume no extension headers are present. */
+ ip_proto = ip6->ip6_nxt;
+ csum_start = ip_offset + sizeof(struct ip6_hdr);
+ break;
+
+ default:
+ sc->vtnet_stats.rx_csum_bad_ethtype++;
+ return (1);
+ }
+
+ /* Assume checksum begins right after the IP header. */
+ if (hdr->csum_start != csum_start) {
+ sc->vtnet_stats.rx_csum_bad_start++;
+ return (1);
+ }
+
+ switch (ip_proto) {
+ case IPPROTO_TCP:
+ csum_offset = offsetof(struct tcphdr, th_sum);
+ break;
+
+ case IPPROTO_UDP:
+ csum_offset = offsetof(struct udphdr, uh_sum);
+ break;
+
+ case IPPROTO_SCTP:
+ csum_offset = offsetof(struct sctphdr, checksum);
+ break;
+
+ default:
+ sc->vtnet_stats.rx_csum_bad_ipproto++;
+ return (1);
+ }
+
+ if (hdr->csum_offset != csum_offset) {
+ sc->vtnet_stats.rx_csum_bad_offset++;
+ return (1);
+ }
+
+ /*
+ * The IP header checksum is almost certainly valid but I'm
+ * uncertain if that is guaranteed.
+ *
+ * m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
+ */
+
+ switch (ip_proto) {
+ case IPPROTO_UDP:
+ if (m->m_len < csum_start + sizeof(struct udphdr))
+ return (1);
+
+ udp = (struct udphdr *)(mtod(m, uint8_t *) + csum_start);
+ if (udp->uh_sum == 0)
+ return (0);
+
+ /* FALLTHROUGH */
+
+ case IPPROTO_TCP:
+ m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xFFFF;
+ break;
+
+ case IPPROTO_SCTP:
+ m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
+ break;
+ }
+
+ sc->vtnet_stats.rx_csum_offloaded++;
+
+ return (0);
+}
+#endif
+
+/*
+ * Alternative method of doing receive checksum offloading. Rather
+ * than parsing the received frame down to the IP header, use the
+ * csum_offset to determine which CSUM_* flags are appropriate. We
+ * can get by with doing this only because the checksum offsets are
+ * unique for the things we care about.
+ */
+static int
+vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m,
+ struct virtio_net_hdr *hdr)
+{
+ struct ether_header *eh;
+ struct ether_vlan_header *evh;
+ struct udphdr *udp;
+ int csum_len;
+ uint16_t eth_type;
+
+ csum_len = hdr->csum_start + hdr->csum_offset;
+
+ if (csum_len < sizeof(struct ether_header) + sizeof(struct ip))
+ return (1);
+ if (m->m_len < csum_len)
+ return (1);
+
+ eh = mtod(m, struct ether_header *);
+ eth_type = ntohs(eh->ether_type);
+ if (eth_type == ETHERTYPE_VLAN) {
+ evh = mtod(m, struct ether_vlan_header *);
+ eth_type = ntohs(evh->evl_proto);
+ }
+
+ if (eth_type != ETHERTYPE_IP && eth_type != ETHERTYPE_IPV6) {
+ sc->vtnet_stats.rx_csum_bad_ethtype++;
+ return (1);
+ }
+
+ /* Use the offset to determine the appropriate CSUM_* flags. */
+ switch (hdr->csum_offset) {
+ case offsetof(struct udphdr, uh_sum):
+ if (m->m_len < hdr->csum_start + sizeof(struct udphdr))
+ return (1);
+ udp = (struct udphdr *)(mtod(m, uint8_t *) + hdr->csum_start);
+ if (udp->uh_sum == 0)
+ return (0);
+
+ /* FALLTHROUGH */
+
+ case offsetof(struct tcphdr, th_sum):
+ m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xFFFF;
+ break;
+
+ case offsetof(struct sctphdr, checksum):
+ m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
+ break;
+
+ default:
+ sc->vtnet_stats.rx_csum_bad_offset++;
+ return (1);
+ }
+
+ sc->vtnet_stats.rx_csum_offloaded++;
+
+ return (0);
+}
+
+static int
+vtnet_rxeof_merged(struct vtnet_softc *sc, struct mbuf *m_head, int nbufs)
+{
+ struct ifnet *ifp;
+ struct virtqueue *vq;
+ struct mbuf *m, *m_tail;
+ int len;
+
+ ifp = sc->vtnet_ifp;
+ vq = sc->vtnet_rx_vq;
+ m_tail = m_head;
+
+ while (--nbufs > 0) {
+ m = virtqueue_dequeue(vq, &len);
+ if (m == NULL) {
+ ifp->if_ierrors++;
+ goto fail;
+ }
+
+ if (vtnet_newbuf(sc) != 0) {
+ ifp->if_iqdrops++;
+ vtnet_discard_rxbuf(sc, m);
+ if (nbufs > 1)
+ vtnet_discard_merged_rxbuf(sc, nbufs);
+ goto fail;
+ }
+
+ if (m->m_len < len)
+ len = m->m_len;
+
+ m->m_len = len;
+ m->m_flags &= ~M_PKTHDR;
+
+ m_head->m_pkthdr.len += len;
+ m_tail->m_next = m;
+ m_tail = m;
+ }
+
+ return (0);
+
+fail:
+ sc->vtnet_stats.rx_mergeable_failed++;
+ m_freem(m_head);
+
+ return (1);
+}
+
+static int
+vtnet_rxeof(struct vtnet_softc *sc, int count, int *rx_npktsp)
+{
+ struct virtio_net_hdr lhdr;
+ struct ifnet *ifp;
+ struct virtqueue *vq;
+ struct mbuf *m;
+ struct ether_header *eh;
+ struct virtio_net_hdr *hdr;
+ struct virtio_net_hdr_mrg_rxbuf *mhdr;
+ int len, deq, nbufs, adjsz, rx_npkts;
+
+ ifp = sc->vtnet_ifp;
+ vq = sc->vtnet_rx_vq;
+ hdr = &lhdr;
+ deq = 0;
+ rx_npkts = 0;
+
+ VTNET_LOCK_ASSERT(sc);
+
+ while (--count >= 0) {
+ m = virtqueue_dequeue(vq, &len);
+ if (m == NULL)
+ break;
+ deq++;
+
+ if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
+ ifp->if_ierrors++;
+ vtnet_discard_rxbuf(sc, m);
+ continue;
+ }
+
+ if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
+ nbufs = 1;
+ adjsz = sizeof(struct vtnet_rx_header);
+ /*
+ * Account for our pad between the header and
+ * the actual start of the frame.
+ */
+ len += VTNET_RX_HEADER_PAD;
+ } else {
+ mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
+ nbufs = mhdr->num_buffers;
+ adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ }
+
+ if (vtnet_replace_rxbuf(sc, m, len) != 0) {
+ ifp->if_iqdrops++;
+ vtnet_discard_rxbuf(sc, m);
+ if (nbufs > 1)
+ vtnet_discard_merged_rxbuf(sc, nbufs);
+ continue;
+ }
+
+ m->m_pkthdr.len = len;
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.csum_flags = 0;
+
+ if (nbufs > 1) {
+ if (vtnet_rxeof_merged(sc, m, nbufs) != 0)
+ continue;
+ }
+
+ ifp->if_ipackets++;
+
+ /*
+ * Save copy of header before we strip it. For both mergeable
+ * and non-mergeable, the VirtIO header is placed first in the
+ * mbuf's data. We no longer need num_buffers, so always use a
+ * virtio_net_hdr.
+ */
+ memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
+ m_adj(m, adjsz);
+
+ if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
+ eh = mtod(m, struct ether_header *);
+ if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
+ vtnet_vlan_tag_remove(m);
+
+ /*
+ * With the 802.1Q header removed, update the
+ * checksum starting location accordingly.
+ */
+ if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
+ hdr->csum_start -=
+ ETHER_VLAN_ENCAP_LEN;
+ }
+ }
+
+ if (ifp->if_capenable & IFCAP_RXCSUM &&
+ hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ if (vtnet_rx_csum(sc, m, hdr) != 0)
+ sc->vtnet_stats.rx_csum_failed++;
+ }
+
+ VTNET_UNLOCK(sc);
+ rx_npkts++;
+ (*ifp->if_input)(ifp, m);
+ VTNET_LOCK(sc);
+
+ /*
+ * The interface may have been stopped while we were
+ * passing the packet up the network stack.
+ */
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ break;
+ }
+
+ virtqueue_notify(vq);
+
+ if (rx_npktsp != NULL)
+ *rx_npktsp = rx_npkts;
+
+ return (count > 0 ? 0 : EAGAIN);
+}
+
+static void
+vtnet_rx_intr_task(void *arg, int pending)
+{
+ struct vtnet_softc *sc;
+ struct ifnet *ifp;
+ int more;
+
+ sc = arg;
+ ifp = sc->vtnet_ifp;
+
+ VTNET_LOCK(sc);
+
+#ifdef DEVICE_POLLING
+ if (ifp->if_capenable & IFCAP_POLLING) {
+ VTNET_UNLOCK(sc);
+ return;
+ }
+#endif
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ vtnet_enable_rx_intr(sc);
+ VTNET_UNLOCK(sc);
+ return;
+ }
+
+ more = vtnet_rxeof(sc, sc->vtnet_rx_process_limit, NULL);
+ if (!more && vtnet_enable_rx_intr(sc) != 0) {
+ vtnet_disable_rx_intr(sc);
+ more = 1;
+ }
+
+ VTNET_UNLOCK(sc);
+
+ if (more) {
+ sc->vtnet_stats.rx_task_rescheduled++;
+ taskqueue_enqueue_fast(sc->vtnet_tq,
+ &sc->vtnet_rx_intr_task);
+ }
+}
+
+static int
+vtnet_rx_vq_intr(void *xsc)
+{
+ struct vtnet_softc *sc;
+
+ sc = xsc;
+
+ vtnet_disable_rx_intr(sc);
+ taskqueue_enqueue_fast(sc->vtnet_tq, &sc->vtnet_rx_intr_task);
+
+ return (1);
+}
+
+static void
+vtnet_txeof(struct vtnet_softc *sc)
+{
+ struct virtqueue *vq;
+ struct ifnet *ifp;
+ struct vtnet_tx_header *txhdr;
+ int deq;
+
+ vq = sc->vtnet_tx_vq;
+ ifp = sc->vtnet_ifp;
+ deq = 0;
+
+ VTNET_LOCK_ASSERT(sc);
+
+ while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
+ deq++;
+ ifp->if_opackets++;
+ m_freem(txhdr->vth_mbuf);
+ uma_zfree(vtnet_tx_header_zone, txhdr);
+ }
+
+ if (deq > 0) {
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ if (virtqueue_empty(vq))
+ sc->vtnet_watchdog_timer = 0;
+ }
+}
+
+static struct mbuf *
+vtnet_tx_offload(struct vtnet_softc *sc, struct mbuf *m,
+ struct virtio_net_hdr *hdr)
+{
+ struct ifnet *ifp;
+ struct ether_header *eh;
+ struct ether_vlan_header *evh;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ struct tcphdr *tcp;
+ int ip_offset;
+ uint16_t eth_type, csum_start;
+ uint8_t ip_proto, gso_type;
+
+ ifp = sc->vtnet_ifp;
+ M_ASSERTPKTHDR(m);
+
+ ip_offset = sizeof(struct ether_header);
+ if (m->m_len < ip_offset) {
+ if ((m = m_pullup(m, ip_offset)) == NULL)
+ return (NULL);
+ }
+
+ eh = mtod(m, struct ether_header *);
+ eth_type = ntohs(eh->ether_type);
+ if (eth_type == ETHERTYPE_VLAN) {
+ ip_offset = sizeof(struct ether_vlan_header);
+ if (m->m_len < ip_offset) {
+ if ((m = m_pullup(m, ip_offset)) == NULL)
+ return (NULL);
+ }
+ evh = mtod(m, struct ether_vlan_header *);
+ eth_type = ntohs(evh->evl_proto);
+ }
+
+ switch (eth_type) {
+ case ETHERTYPE_IP:
+ if (m->m_len < ip_offset + sizeof(struct ip)) {
+ m = m_pullup(m, ip_offset + sizeof(struct ip));
+ if (m == NULL)
+ return (NULL);
+ }
+
+ ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset);
+ ip_proto = ip->ip_p;
+ csum_start = ip_offset + (ip->ip_hl << 2);
+ gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ break;
+
+ case ETHERTYPE_IPV6:
+ if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) {
+ m = m_pullup(m, ip_offset + sizeof(struct ip6_hdr));
+ if (m == NULL)
+ return (NULL);
+ }
+
+ ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset);
+ /*
+ * XXX Assume no extension headers are present. Presently,
+ * this will always be true in the case of TSO, and FreeBSD
+ * does not perform checksum offloading of IPv6 yet.
+ */
+ ip_proto = ip6->ip6_nxt;
+ csum_start = ip_offset + sizeof(struct ip6_hdr);
+ gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+ break;
+
+ default:
+ return (m);
+ }
+
+ if (m->m_pkthdr.csum_flags & VTNET_CSUM_OFFLOAD) {
+ hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ hdr->csum_start = csum_start;
+ hdr->csum_offset = m->m_pkthdr.csum_data;
+
+ sc->vtnet_stats.tx_csum_offloaded++;
+ }
+
+ if (m->m_pkthdr.csum_flags & CSUM_TSO) {
+ if (ip_proto != IPPROTO_TCP)
+ return (m);
+
+ if (m->m_len < csum_start + sizeof(struct tcphdr)) {
+ m = m_pullup(m, csum_start + sizeof(struct tcphdr));
+ if (m == NULL)
+ return (NULL);
+ }
+
+ tcp = (struct tcphdr *)(mtod(m, uint8_t *) + csum_start);
+ hdr->gso_type = gso_type;
+ hdr->hdr_len = csum_start + (tcp->th_off << 2);
+ hdr->gso_size = m->m_pkthdr.tso_segsz;
+
+ if (tcp->th_flags & TH_CWR) {
+ /*
+ * Drop if we did not negotiate VIRTIO_NET_F_HOST_ECN.
+ * ECN support is only configurable globally with the
+ * net.inet.tcp.ecn.enable sysctl knob.
+ */
+ if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
+ if_printf(ifp, "TSO with ECN not supported "
+ "by host\n");
+ m_freem(m);
+ return (NULL);
+ }
+
+ hdr->flags |= VIRTIO_NET_HDR_GSO_ECN;
+ }
+
+ sc->vtnet_stats.tx_tso_offloaded++;
+ }
+
+ return (m);
+}
+
+static int
+vtnet_enqueue_txbuf(struct vtnet_softc *sc, struct mbuf **m_head,
+ struct vtnet_tx_header *txhdr)
+{
+ struct sglist sg;
+ struct sglist_seg segs[VTNET_MAX_TX_SEGS];
+ struct virtqueue *vq;
+ struct mbuf *m;
+ int collapsed, error;
+
+ vq = sc->vtnet_tx_vq;
+ m = *m_head;
+ collapsed = 0;
+
+ sglist_init(&sg, VTNET_MAX_TX_SEGS, segs);
+ error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
+ KASSERT(error == 0 && sg.sg_nseg == 1,
+ ("cannot add header to sglist"));
+
+again:
+ error = sglist_append_mbuf(&sg, m);
+ if (error) {
+ if (collapsed)
+ goto fail;
+
+ m = m_collapse(m, M_DONTWAIT, VTNET_MAX_TX_SEGS - 1);
+ if (m == NULL)
+ goto fail;
+
+ *m_head = m;
+ collapsed = 1;
+ goto again;
+ }
+
+ txhdr->vth_mbuf = m;
+
+ return (virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0));
+
+fail:
+ m_freem(*m_head);
+ *m_head = NULL;
+
+ return (ENOBUFS);
+}
+
+static int
+vtnet_encap(struct vtnet_softc *sc, struct mbuf **m_head)
+{
+ struct vtnet_tx_header *txhdr;
+ struct virtio_net_hdr *hdr;
+ struct mbuf *m;
+ int error;
+
+ txhdr = uma_zalloc(vtnet_tx_header_zone, M_NOWAIT | M_ZERO);
+ if (txhdr == NULL)
+ return (ENOMEM);
+
+ /*
+ * Always use the non-mergeable header to simplify things. When
+ * the mergeable feature is negotiated, the num_buffers field
+ * must be set to zero. We use vtnet_hdr_size later to enqueue
+ * the correct header size to the host.
+ */
+ hdr = &txhdr->vth_uhdr.hdr;
+ m = *m_head;
+
+ error = ENOBUFS;
+
+ if (m->m_flags & M_VLANTAG) {
+ m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
+ if ((*m_head = m) == NULL)
+ goto fail;
+ m->m_flags &= ~M_VLANTAG;
+ }
+
+ if (m->m_pkthdr.csum_flags != 0) {
+ m = vtnet_tx_offload(sc, m, hdr);
+ if ((*m_head = m) == NULL)
+ goto fail;
+ }
+
+ error = vtnet_enqueue_txbuf(sc, m_head, txhdr);
+fail:
+ if (error)
+ uma_zfree(vtnet_tx_header_zone, txhdr);
+
+ return (error);
+}
+
+static void
+vtnet_start(struct ifnet *ifp)
+{
+ struct vtnet_softc *sc;
+
+ sc = ifp->if_softc;
+
+ VTNET_LOCK(sc);
+ vtnet_start_locked(ifp);
+ VTNET_UNLOCK(sc);
+}
+
+static void
+vtnet_start_locked(struct ifnet *ifp)
+{
+ struct vtnet_softc *sc;
+ struct virtqueue *vq;
+ struct mbuf *m0;
+ int enq;
+
+ sc = ifp->if_softc;
+ vq = sc->vtnet_tx_vq;
+ enq = 0;
+
+ VTNET_LOCK_ASSERT(sc);
+
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING || ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0))
+ return;
+
+#ifdef VTNET_TX_INTR_MODERATION
+ if (virtqueue_nused(vq) >= sc->vtnet_tx_size / 2)
+ vtnet_txeof(sc);
+#endif
+
+ while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
+ if (virtqueue_full(vq)) {
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ break;
+ }
+
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
+ if (m0 == NULL)
+ break;
+
+ if (vtnet_encap(sc, &m0) != 0) {
+ if (m0 == NULL)
+ break;
+ IFQ_DRV_PREPEND(&ifp->if_snd, m0);
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ break;
+ }
+
+ enq++;
+ ETHER_BPF_MTAP(ifp, m0);
+ }
+
+ if (enq > 0) {
+ virtqueue_notify(vq);
+ sc->vtnet_watchdog_timer = VTNET_WATCHDOG_TIMEOUT;
+ }
+}
+
+static void
+vtnet_tick(void *xsc)
+{
+ struct vtnet_softc *sc;
+
+ sc = xsc;
+
+ VTNET_LOCK_ASSERT(sc);
+#ifdef VTNET_DEBUG
+ virtqueue_dump(sc->vtnet_rx_vq);
+ virtqueue_dump(sc->vtnet_tx_vq);
+#endif
+
+ vtnet_watchdog(sc);
+ callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
+}
+
+static void
+vtnet_tx_intr_task(void *arg, int pending)
+{
+ struct vtnet_softc *sc;
+ struct ifnet *ifp;
+
+ sc = arg;
+ ifp = sc->vtnet_ifp;
+
+ VTNET_LOCK(sc);
+
+#ifdef DEVICE_POLLING
+ if (ifp->if_capenable & IFCAP_POLLING) {
+ VTNET_UNLOCK(sc);
+ return;
+ }
+#endif
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ vtnet_enable_tx_intr(sc);
+ VTNET_UNLOCK(sc);
+ return;
+ }
+
+ vtnet_txeof(sc);
+
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ vtnet_start_locked(ifp);
+
+ if (vtnet_enable_tx_intr(sc) != 0) {
+ vtnet_disable_tx_intr(sc);
+ sc->vtnet_stats.tx_task_rescheduled++;
+ VTNET_UNLOCK(sc);
+ taskqueue_enqueue_fast(sc->vtnet_tq, &sc->vtnet_tx_intr_task);
+ return;
+ }
+
+ VTNET_UNLOCK(sc);
+}
+
+static int
+vtnet_tx_vq_intr(void *xsc)
+{
+ struct vtnet_softc *sc;
+
+ sc = xsc;
+
+ vtnet_disable_tx_intr(sc);
+ taskqueue_enqueue_fast(sc->vtnet_tq, &sc->vtnet_tx_intr_task);
+
+ return (1);
+}
+
+static void
+vtnet_stop(struct vtnet_softc *sc)
+{
+ device_t dev;
+ struct ifnet *ifp;
+
+ dev = sc->vtnet_dev;
+ ifp = sc->vtnet_ifp;
+
+ VTNET_LOCK_ASSERT(sc);
+
+ sc->vtnet_watchdog_timer = 0;
+ callout_stop(&sc->vtnet_tick_ch);
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+ vtnet_disable_rx_intr(sc);
+ vtnet_disable_tx_intr(sc);
+
+ /*
+ * Stop the host VirtIO adapter. Note this will reset the host
+ * adapter's state back to the pre-initialized state, so in
+ * order to make the device usable again, we must drive it
+ * through virtio_reinit() and virtio_reinit_complete().
+ */
+ virtio_stop(dev);
+
+ sc->vtnet_flags &= ~VTNET_FLAG_LINK;
+
+ vtnet_free_rx_mbufs(sc);
+ vtnet_free_tx_mbufs(sc);
+}
+
+static int
+vtnet_reinit(struct vtnet_softc *sc)
+{
+ struct ifnet *ifp;
+ uint64_t features;
+
+ ifp = sc->vtnet_ifp;
+ features = sc->vtnet_features;
+
+ /*
+ * Re-negotiate with the host, removing any disabled receive
+ * features. Transmit features are disabled only on our side
+ * via if_capenable and if_hwassist.
+ */
+
+ if (ifp->if_capabilities & IFCAP_RXCSUM) {
+ if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
+ features &= ~VIRTIO_NET_F_GUEST_CSUM;
+ }
+
+ if (ifp->if_capabilities & IFCAP_LRO) {
+ if ((ifp->if_capenable & IFCAP_LRO) == 0)
+ features &= ~VTNET_LRO_FEATURES;
+ }
+
+ if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
+ if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
+ features &= ~VIRTIO_NET_F_CTRL_VLAN;
+ }
+
+ return (virtio_reinit(sc->vtnet_dev, features));
+}
+
+static void
+vtnet_init_locked(struct vtnet_softc *sc)
+{
+ device_t dev;
+ struct ifnet *ifp;
+ int error;
+
+ dev = sc->vtnet_dev;
+ ifp = sc->vtnet_ifp;
+
+ VTNET_LOCK_ASSERT(sc);
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ return;
+
+ /* Stop host's adapter, cancel any pending I/O. */
+ vtnet_stop(sc);
+
+ /* Reinitialize the host device. */
+ error = vtnet_reinit(sc);
+ if (error) {
+ device_printf(dev,
+ "reinitialization failed, stopping device...\n");
+ vtnet_stop(sc);
+ return;
+ }
+
+ /* Update host with assigned MAC address. */
+ bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
+ vtnet_set_hwaddr(sc);
+
+ ifp->if_hwassist = 0;
+ if (ifp->if_capenable & IFCAP_TXCSUM)
+ ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
+ if (ifp->if_capenable & IFCAP_TSO4)
+ ifp->if_hwassist |= CSUM_TSO;
+
+ error = vtnet_init_rx_vq(sc);
+ if (error) {
+ device_printf(dev,
+ "cannot allocate mbufs for Rx virtqueue\n");
+ vtnet_stop(sc);
+ return;
+ }
+
+ if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
+ if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
+ /* Restore promiscuous and all-multicast modes. */
+ vtnet_rx_filter(sc);
+
+ /* Restore filtered MAC addresses. */
+ vtnet_rx_filter_mac(sc);
+ }
+
+ /* Restore VLAN filters. */
+ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
+ vtnet_rx_filter_vlan(sc);
+ }
+
+#ifdef DEVICE_POLLING
+ if (ifp->if_capenable & IFCAP_POLLING) {
+ vtnet_disable_rx_intr(sc);
+ vtnet_disable_tx_intr(sc);
+ } else
+#endif
+ {
+ vtnet_enable_rx_intr(sc);
+ vtnet_enable_tx_intr(sc);
+ }
+
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+ virtio_reinit_complete(dev);
+
+ vtnet_update_link_status(sc);
+ callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
+}
+
+static void
+vtnet_init(void *xsc)
+{
+ struct vtnet_softc *sc;
+
+ sc = xsc;
+
+ VTNET_LOCK(sc);
+ vtnet_init_locked(sc);
+ VTNET_UNLOCK(sc);
+}
+
+static void
+vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
+ struct sglist *sg, int readable, int writable)
+{
+ struct virtqueue *vq;
+ void *c;
+
+ vq = sc->vtnet_ctrl_vq;
+
+ VTNET_LOCK_ASSERT(sc);
+ KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
+ ("no control virtqueue"));
+ KASSERT(virtqueue_empty(vq),
+ ("control command already enqueued"));
+
+ if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
+ return;
+
+ virtqueue_notify(vq);
+
+ /*
+ * Poll until the command is complete. Previously, we would
+ * sleep until the control virtqueue interrupt handler woke
+ * us up, but dropping the VTNET_MTX leads to serialization
+ * difficulties.
+ *
+ * Furthermore, it appears QEMU/KVM only allocates three MSIX
+ * vectors. Two of those vectors are needed for the Rx and Tx
+ * virtqueues. We do not support sharing both a Vq and config
+ * changed notification on the same MSIX vector.
+ */
+ c = virtqueue_poll(vq, NULL);
+ KASSERT(c == cookie, ("unexpected control command response"));
+}
+
+static void
+vtnet_rx_filter(struct vtnet_softc *sc)
+{
+ device_t dev;
+ struct ifnet *ifp;
+
+ dev = sc->vtnet_dev;
+ ifp = sc->vtnet_ifp;
+
+ VTNET_LOCK_ASSERT(sc);
+ KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
+ ("CTRL_RX feature not negotiated"));
+
+ if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
+ device_printf(dev, "cannot %s promiscuous mode\n",
+ ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
+
+ if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
+ device_printf(dev, "cannot %s all-multicast mode\n",
+ ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
+}
+
+static int
+vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
+{
+ struct virtio_net_ctrl_hdr hdr;
+ struct sglist_seg segs[3];
+ struct sglist sg;
+ uint8_t onoff, ack;
+ int error;
+
+ if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
+ return (ENOTSUP);
+
+ error = 0;
+
+ hdr.class = VIRTIO_NET_CTRL_RX;
+ hdr.cmd = cmd;
+ onoff = !!on;
+ ack = VIRTIO_NET_ERR;
+
+ sglist_init(&sg, 3, segs);
+ error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
+ error |= sglist_append(&sg, &onoff, sizeof(uint8_t));
+ error |= sglist_append(&sg, &ack, sizeof(uint8_t));
+ KASSERT(error == 0 && sg.sg_nseg == 3,
+ ("error adding Rx filter message to sglist"));
+
+ vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
+
+ return (ack == VIRTIO_NET_OK ? 0 : EIO);
+}
+
+static int
+vtnet_set_promisc(struct vtnet_softc *sc, int on)
+{
+
+ return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
+}
+
+static int
+vtnet_set_allmulti(struct vtnet_softc *sc, int on)
+{
+
+ return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
+}
+
+static void
+vtnet_rx_filter_mac(struct vtnet_softc *sc)
+{
+ struct virtio_net_ctrl_hdr hdr;
+ struct vtnet_mac_filter *filter;
+ struct sglist_seg segs[4];
+ struct sglist sg;
+ struct ifnet *ifp;
+ struct ifaddr *ifa;
+ struct ifmultiaddr *ifma;
+ int ucnt, mcnt, promisc, allmulti, error;
+ uint8_t ack;
+
+ ifp = sc->vtnet_ifp;
+ ucnt = 0;
+ mcnt = 0;
+ promisc = 0;
+ allmulti = 0;
+ error = 0;
+
+ VTNET_LOCK_ASSERT(sc);
+ KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
+ ("CTRL_RX feature not negotiated"));
+
+ /*
+ * Allocate the MAC filtering table. Note we could do this
+ * at attach time, but it is probably not worth keeping it
+ * around for an infrequent occurrence.
+ */
+ filter = malloc(sizeof(struct vtnet_mac_filter), M_DEVBUF,
+ M_NOWAIT | M_ZERO);
+ if (filter == NULL) {
+ device_printf(sc->vtnet_dev,
+ "cannot allocate MAC address filtering table\n");
+ return;
+ }
+
+ /* Unicast MAC addresses: */
+ if_addr_rlock(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_LINK)
+ continue;
+ else if (ucnt == VTNET_MAX_MAC_ENTRIES)
+ break;
+
+ bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
+ &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN);
+ ucnt++;
+ }
+ if_addr_runlock(ifp);
+
+ if (ucnt >= VTNET_MAX_MAC_ENTRIES) {
+ promisc = 1;
+ filter->vmf_unicast.nentries = 0;
+
+ if_printf(ifp, "more than %d MAC addresses assigned, "
+ "falling back to promiscuous mode\n",
+ VTNET_MAX_MAC_ENTRIES);
+ } else
+ filter->vmf_unicast.nentries = ucnt;
+
+ /* Multicast MAC addresses: */
+ if_maddr_rlock(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ else if (mcnt == VTNET_MAX_MAC_ENTRIES)
+ break;
+
+ bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
+ &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN);
+ mcnt++;
+ }
+ if_maddr_runlock(ifp);
+
+ if (mcnt >= VTNET_MAX_MAC_ENTRIES) {
+ allmulti = 1;
+ filter->vmf_multicast.nentries = 0;
+
+ if_printf(ifp, "more than %d multicast MAC addresses "
+ "assigned, falling back to all-multicast mode\n",
+ VTNET_MAX_MAC_ENTRIES);
+ } else
+ filter->vmf_multicast.nentries = mcnt;
+
+ if (promisc && allmulti)
+ goto out;
+
+ hdr.class = VIRTIO_NET_CTRL_MAC;
+ hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
+ ack = VIRTIO_NET_ERR;
+
+ sglist_init(&sg, 4, segs);
+ error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
+ error |= sglist_append(&sg, &filter->vmf_unicast,
+ sizeof(struct vtnet_mac_table));
+ error |= sglist_append(&sg, &filter->vmf_multicast,
+ sizeof(struct vtnet_mac_table));
+ error |= sglist_append(&sg, &ack, sizeof(uint8_t));
+ KASSERT(error == 0 && sg.sg_nseg == 4,
+ ("error adding MAC filtering message to sglist"));
+
+ vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
+
+ if (ack != VIRTIO_NET_OK)
+ if_printf(ifp, "error setting host MAC filter table\n");
+
+out:
+ free(filter, M_DEVBUF);
+
+ if (promisc)
+ if (vtnet_set_promisc(sc, 1) != 0)
+ if_printf(ifp, "cannot enable promiscuous mode\n");
+ if (allmulti)
+ if (vtnet_set_allmulti(sc, 1) != 0)
+ if_printf(ifp, "cannot enable all-multicast mode\n");
+}
+
+static int
+vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
+{
+ struct virtio_net_ctrl_hdr hdr;
+ struct sglist_seg segs[3];
+ struct sglist sg;
+ uint8_t ack;
+ int error;
+
+ hdr.class = VIRTIO_NET_CTRL_VLAN;
+ hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
+ ack = VIRTIO_NET_ERR;
+ error = 0;
+
+ sglist_init(&sg, 3, segs);
+ error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
+ error |= sglist_append(&sg, &tag, sizeof(uint16_t));
+ error |= sglist_append(&sg, &ack, sizeof(uint8_t));
+ KASSERT(error == 0 && sg.sg_nseg == 3,
+ ("error adding VLAN control message to sglist"));
+
+ vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
+
+ return (ack == VIRTIO_NET_OK ? 0 : EIO);
+}
+
+static void
+vtnet_rx_filter_vlan(struct vtnet_softc *sc)
+{
+ device_t dev;
+ uint32_t w, mask;
+ uint16_t tag;
+ int i, nvlans, error;
+
+ VTNET_LOCK_ASSERT(sc);
+ KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
+ ("VLAN_FILTER feature not negotiated"));
+
+ dev = sc->vtnet_dev;
+ nvlans = sc->vtnet_nvlans;
+ error = 0;
+
+ /* Enable filtering for each configured VLAN. */
+ for (i = 0; i < VTNET_VLAN_SHADOW_SIZE && nvlans > 0; i++) {
+ w = sc->vtnet_vlan_shadow[i];
+ for (mask = 1, tag = i * 32; w != 0; mask <<= 1, tag++) {
+ if ((w & mask) != 0) {
+ w &= ~mask;
+ nvlans--;
+ if (vtnet_exec_vlan_filter(sc, 1, tag) != 0)
+ error++;
+ }
+ }
+ }
+
+ KASSERT(nvlans == 0, ("VLAN count incorrect"));
+ if (error)
+ device_printf(dev, "cannot restore VLAN filter table\n");
+}
+
+static void
+vtnet_set_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
+{
+ struct ifnet *ifp;
+ int idx, bit;
+
+ KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
+ ("VLAN_FILTER feature not negotiated"));
+
+ if ((tag == 0) || (tag > 4095))
+ return;
+
+ ifp = sc->vtnet_ifp;
+ idx = (tag >> 5) & 0x7F;
+ bit = tag & 0x1F;
+
+ VTNET_LOCK(sc);
+
+ /* Update shadow VLAN table. */
+ if (add) {
+ sc->vtnet_nvlans++;
+ sc->vtnet_vlan_shadow[idx] |= (1 << bit);
+ } else {
+ sc->vtnet_nvlans--;
+ sc->vtnet_vlan_shadow[idx] &= ~(1 << bit);
+ }
+
+ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
+ if (vtnet_exec_vlan_filter(sc, add, tag) != 0) {
+ device_printf(sc->vtnet_dev,
+ "cannot %s VLAN %d %s the host filter table\n",
+ add ? "add" : "remove", tag,
+ add ? "to" : "from");
+ }
+ }
+
+ VTNET_UNLOCK(sc);
+}
+
+static void
+vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
+{
+
+ if (ifp->if_softc != arg)
+ return;
+
+ vtnet_set_vlan_filter(arg, 1, tag);
+}
+
+static void
+vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
+{
+
+ if (ifp->if_softc != arg)
+ return;
+
+ vtnet_set_vlan_filter(arg, 0, tag);
+}
+
+static int
+vtnet_ifmedia_upd(struct ifnet *ifp)
+{
+ struct vtnet_softc *sc;
+ struct ifmedia *ifm;
+
+ sc = ifp->if_softc;
+ ifm = &sc->vtnet_media;
+
+ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+ return (EINVAL);
+
+ return (0);
+}
+
+static void
+vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct vtnet_softc *sc;
+
+ sc = ifp->if_softc;
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ VTNET_LOCK(sc);
+ if (vtnet_is_link_up(sc) != 0) {
+ ifmr->ifm_status |= IFM_ACTIVE;
+ ifmr->ifm_active |= VTNET_MEDIATYPE;
+ } else
+ ifmr->ifm_active |= IFM_NONE;
+ VTNET_UNLOCK(sc);
+}
+
+static void
+vtnet_add_statistics(struct vtnet_softc *sc)
+{
+ device_t dev;
+ struct vtnet_statistics *stats;
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid *tree;
+ struct sysctl_oid_list *child;
+
+ dev = sc->vtnet_dev;
+ stats = &sc->vtnet_stats;
+ ctx = device_get_sysctl_ctx(dev);
+ tree = device_get_sysctl_tree(dev);
+ child = SYSCTL_CHILDREN(tree);
+
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_failed",
+ CTLFLAG_RD, &stats->mbuf_alloc_failed,
+ "Mbuf cluster allocation failures");
+
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_frame_too_large",
+ CTLFLAG_RD, &stats->rx_frame_too_large,
+ "Received frame larger than the mbuf chain");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
+ CTLFLAG_RD, &stats->rx_enq_replacement_failed,
+ "Enqueuing the replacement receive mbuf failed");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_mergeable_failed",
+ CTLFLAG_RD, &stats->rx_mergeable_failed,
+ "Mergeable buffers receive failures");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
+ CTLFLAG_RD, &stats->rx_csum_bad_ethtype,
+ "Received checksum offloaded buffer with unsupported "
+ "Ethernet type");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_start",
+ CTLFLAG_RD, &stats->rx_csum_bad_start,
+ "Received checksum offloaded buffer with incorrect start offset");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
+ CTLFLAG_RD, &stats->rx_csum_bad_ipproto,
+ "Received checksum offloaded buffer with incorrect IP protocol");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_offset",
+ CTLFLAG_RD, &stats->rx_csum_bad_offset,
+ "Received checksum offloaded buffer with incorrect offset");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_failed",
+ CTLFLAG_RD, &stats->rx_csum_failed,
+ "Received buffer checksum offload failed");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_offloaded",
+ CTLFLAG_RD, &stats->rx_csum_offloaded,
+ "Received buffer checksum offload succeeded");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_task_rescheduled",
+ CTLFLAG_RD, &stats->rx_task_rescheduled,
+ "Times the receive interrupt task rescheduled itself");
+
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_csum_offloaded",
+ CTLFLAG_RD, &stats->tx_csum_offloaded,
+ "Offloaded checksum of transmitted buffer");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_tso_offloaded",
+ CTLFLAG_RD, &stats->tx_tso_offloaded,
+ "Segmentation offload of transmitted buffer");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
+ CTLFLAG_RD, &stats->tx_csum_bad_ethtype,
+ "Aborted transmit of checksum offloaded buffer with unknown "
+ "Ethernet type");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
+ CTLFLAG_RD, &stats->tx_tso_bad_ethtype,
+ "Aborted transmit of TSO buffer with unknown Ethernet type");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_task_rescheduled",
+ CTLFLAG_RD, &stats->tx_task_rescheduled,
+ "Times the transmit interrupt task rescheduled itself");
+}
+
+static int
+vtnet_enable_rx_intr(struct vtnet_softc *sc)
+{
+
+ return (virtqueue_enable_intr(sc->vtnet_rx_vq));
+}
+
+static void
+vtnet_disable_rx_intr(struct vtnet_softc *sc)
+{
+
+ virtqueue_disable_intr(sc->vtnet_rx_vq);
+}
+
+static int
+vtnet_enable_tx_intr(struct vtnet_softc *sc)
+{
+
+#ifdef VTNET_TX_INTR_MODERATION
+ return (0);
+#else
+ return (virtqueue_enable_intr(sc->vtnet_tx_vq));
+#endif
+}
+
+static void
+vtnet_disable_tx_intr(struct vtnet_softc *sc)
+{
+
+ virtqueue_disable_intr(sc->vtnet_tx_vq);
+}
diff --git a/sys/dev/virtio/network/if_vtnetvar.h b/sys/dev/virtio/network/if_vtnetvar.h
new file mode 100644
index 0000000..613b2b0
--- /dev/null
+++ b/sys/dev/virtio/network/if_vtnetvar.h
@@ -0,0 +1,240 @@
+/*-
+ * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IF_VTNETVAR_H
+#define _IF_VTNETVAR_H
+
+struct vtnet_statistics {
+ unsigned long mbuf_alloc_failed;
+
+ unsigned long rx_frame_too_large;
+ unsigned long rx_enq_replacement_failed;
+ unsigned long rx_mergeable_failed;
+ unsigned long rx_csum_bad_ethtype;
+ unsigned long rx_csum_bad_start;
+ unsigned long rx_csum_bad_ipproto;
+ unsigned long rx_csum_bad_offset;
+ unsigned long rx_csum_failed;
+ unsigned long rx_csum_offloaded;
+ unsigned long rx_task_rescheduled;
+
+ unsigned long tx_csum_offloaded;
+ unsigned long tx_tso_offloaded;
+ unsigned long tx_csum_bad_ethtype;
+ unsigned long tx_tso_bad_ethtype;
+ unsigned long tx_task_rescheduled;
+};
+
+struct vtnet_softc {
+ device_t vtnet_dev;
+ struct ifnet *vtnet_ifp;
+ struct mtx vtnet_mtx;
+
+ uint32_t vtnet_flags;
+#define VTNET_FLAG_LINK 0x0001
+#define VTNET_FLAG_SUSPENDED 0x0002
+#define VTNET_FLAG_CTRL_VQ 0x0004
+#define VTNET_FLAG_CTRL_RX 0x0008
+#define VTNET_FLAG_VLAN_FILTER 0x0010
+#define VTNET_FLAG_TSO_ECN 0x0020
+#define VTNET_FLAG_MRG_RXBUFS 0x0040
+#define VTNET_FLAG_LRO_NOMRG 0x0080
+
+ struct virtqueue *vtnet_rx_vq;
+ struct virtqueue *vtnet_tx_vq;
+ struct virtqueue *vtnet_ctrl_vq;
+
+ int vtnet_hdr_size;
+ int vtnet_tx_size;
+ int vtnet_rx_size;
+ int vtnet_rx_process_limit;
+ int vtnet_rx_mbuf_size;
+ int vtnet_rx_mbuf_count;
+ int vtnet_if_flags;
+ int vtnet_watchdog_timer;
+ uint64_t vtnet_features;
+
+ struct taskqueue *vtnet_tq;
+ struct task vtnet_rx_intr_task;
+ struct task vtnet_tx_intr_task;
+ struct task vtnet_cfgchg_task;
+
+ struct vtnet_statistics vtnet_stats;
+
+ struct callout vtnet_tick_ch;
+
+ eventhandler_tag vtnet_vlan_attach;
+ eventhandler_tag vtnet_vlan_detach;
+
+ struct ifmedia vtnet_media;
+ /*
+ * Fake media type; the host does not provide us with
+ * any real media information.
+ */
+#define VTNET_MEDIATYPE (IFM_ETHER | IFM_1000_T | IFM_FDX)
+ char vtnet_hwaddr[ETHER_ADDR_LEN];
+
+ /*
+ * During reset, the host's VLAN filtering table is lost. The
+ * array below is used to restore all the VLANs configured on
+ * this interface after a reset.
+ */
+#define VTNET_VLAN_SHADOW_SIZE (4096 / 32)
+ int vtnet_nvlans;
+ uint32_t vtnet_vlan_shadow[VTNET_VLAN_SHADOW_SIZE];
+
+ char vtnet_mtx_name[16];
+};
+
+/*
+ * When mergeable buffers are not negotiated, the vtnet_rx_header structure
+ * below is placed at the beginning of the mbuf data. Use 4 bytes of pad to
+ * both keep the VirtIO header and the data non-contiguous and to keep the
+ * frame's payload 4 byte aligned.
+ *
+ * When mergeable buffers are negotiated, the host puts the VirtIO header in
+ * the beginning of the first mbuf's data.
+ */
+#define VTNET_RX_HEADER_PAD 4
+struct vtnet_rx_header {
+ struct virtio_net_hdr vrh_hdr;
+ char vrh_pad[VTNET_RX_HEADER_PAD];
+} __packed;
+
+/*
+ * For each outgoing frame, the vtnet_tx_header below is allocated from
+ * the vtnet_tx_header_zone.
+ */
+struct vtnet_tx_header {
+ union {
+ struct virtio_net_hdr hdr;
+ struct virtio_net_hdr_mrg_rxbuf mhdr;
+ } vth_uhdr;
+
+ struct mbuf *vth_mbuf;
+};
+
+/*
+ * The VirtIO specification does not place a limit on the number of MAC
+ * addresses the guest driver may request to be filtered. In practice,
+ * the host is constrained by available resources. To simplify this driver,
+ * impose a reasonably high limit of MAC addresses we will filter before
+ * falling back to promiscuous or all-multicast modes.
+ */
+#define VTNET_MAX_MAC_ENTRIES 128
+
+struct vtnet_mac_table {
+ uint32_t nentries;
+ uint8_t macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN];
+} __packed;
+
+struct vtnet_mac_filter {
+ struct vtnet_mac_table vmf_unicast;
+ uint32_t vmf_pad; /* Make tables non-contiguous. */
+ struct vtnet_mac_table vmf_multicast;
+};
+
+/*
+ * The MAC filter table is malloc(9)'d when needed. Ensure it will
+ * always fit in one segment.
+ */
+CTASSERT(sizeof(struct vtnet_mac_filter) <= PAGE_SIZE);
+
+#define VTNET_WATCHDOG_TIMEOUT 5
+#define VTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP | CSUM_SCTP)
+
+/* Features desired/implemented by this driver. */
+#define VTNET_FEATURES \
+ (VIRTIO_NET_F_MAC | \
+ VIRTIO_NET_F_STATUS | \
+ VIRTIO_NET_F_CTRL_VQ | \
+ VIRTIO_NET_F_CTRL_RX | \
+ VIRTIO_NET_F_CTRL_VLAN | \
+ VIRTIO_NET_F_CSUM | \
+ VIRTIO_NET_F_HOST_TSO4 | \
+ VIRTIO_NET_F_HOST_TSO6 | \
+ VIRTIO_NET_F_HOST_ECN | \
+ VIRTIO_NET_F_GUEST_CSUM | \
+ VIRTIO_NET_F_GUEST_TSO4 | \
+ VIRTIO_NET_F_GUEST_TSO6 | \
+ VIRTIO_NET_F_GUEST_ECN | \
+ VIRTIO_NET_F_MRG_RXBUF | \
+ VIRTIO_RING_F_INDIRECT_DESC)
+
+/*
+ * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
+ * frames larger than 1514 bytes. We do not yet support software LRO
+ * via tcp_lro_rx().
+ */
+#define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \
+ VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN)
+
+#define VTNET_MAX_MTU 65536
+#define VTNET_MAX_RX_SIZE 65550
+
+/*
+ * Used to preallocate the Vq indirect descriptors. The first segment
+ * is reserved for the header.
+ */
+#define VTNET_MIN_RX_SEGS 2
+#define VTNET_MAX_RX_SEGS 34
+#define VTNET_MAX_TX_SEGS 34
+
+/*
+ * Assert we can receive and transmit the maximum with regular
+ * size clusters.
+ */
+CTASSERT(((VTNET_MAX_RX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE);
+CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_MTU);
+
+/*
+ * Determine how many mbufs are in each receive buffer. For LRO without
+ * mergeable descriptors, we must allocate an mbuf chain large enough to
+ * hold both the vtnet_rx_header and the maximum receivable data.
+ */
+#define VTNET_NEEDED_RX_MBUFS(_sc) \
+ ((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0 ? 1 : \
+ howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE, \
+ (_sc)->vtnet_rx_mbuf_size)
+
+#define VTNET_MTX(_sc) &(_sc)->vtnet_mtx
+#define VTNET_LOCK(_sc) mtx_lock(VTNET_MTX((_sc)))
+#define VTNET_UNLOCK(_sc) mtx_unlock(VTNET_MTX((_sc)))
+#define VTNET_LOCK_DESTROY(_sc) mtx_destroy(VTNET_MTX((_sc)))
+#define VTNET_LOCK_ASSERT(_sc) mtx_assert(VTNET_MTX((_sc)), MA_OWNED)
+#define VTNET_LOCK_ASSERT_NOTOWNED(_sc) \
+ mtx_assert(VTNET_MTX((_sc)), MA_NOTOWNED)
+
+#define VTNET_LOCK_INIT(_sc) do { \
+ snprintf((_sc)->vtnet_mtx_name, sizeof((_sc)->vtnet_mtx_name), \
+ "%s", device_get_nameunit((_sc)->vtnet_dev)); \
+ mtx_init(VTNET_MTX((_sc)), (_sc)->vtnet_mtx_name, \
+ "VTNET Core Lock", MTX_DEF); \
+} while (0)
+
+#endif /* _IF_VTNETVAR_H */
diff --git a/sys/dev/virtio/network/virtio_net.h b/sys/dev/virtio/network/virtio_net.h
new file mode 100644
index 0000000..7361aa1
--- /dev/null
+++ b/sys/dev/virtio/network/virtio_net.h
@@ -0,0 +1,138 @@
+/*
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _VIRTIO_NET_H
+#define _VIRTIO_NET_H
+
+#include <sys/types.h>
+
+/* The feature bitmap for virtio net */
+#define VIRTIO_NET_F_CSUM 0x00001 /* Host handles pkts w/ partial csum */
+#define VIRTIO_NET_F_GUEST_CSUM 0x00002 /* Guest handles pkts w/ partial csum*/
+#define VIRTIO_NET_F_MAC 0x00020 /* Host has given MAC address. */
+#define VIRTIO_NET_F_GSO 0x00040 /* Host handles pkts w/ any GSO type */
+#define VIRTIO_NET_F_GUEST_TSO4 0x00080 /* Guest can handle TSOv4 in. */
+#define VIRTIO_NET_F_GUEST_TSO6 0x00100 /* Guest can handle TSOv6 in. */
+#define VIRTIO_NET_F_GUEST_ECN 0x00200 /* Guest can handle TSO[6] w/ ECN in.*/
+#define VIRTIO_NET_F_GUEST_UFO 0x00400 /* Guest can handle UFO in. */
+#define VIRTIO_NET_F_HOST_TSO4 0x00800 /* Host can handle TSOv4 in. */
+#define VIRTIO_NET_F_HOST_TSO6 0x01000 /* Host can handle TSOv6 in. */
+#define VIRTIO_NET_F_HOST_ECN 0x02000 /* Host can handle TSO[6] w/ ECN in. */
+#define VIRTIO_NET_F_HOST_UFO 0x04000 /* Host can handle UFO in. */
+#define VIRTIO_NET_F_MRG_RXBUF 0x08000 /* Host can merge receive buffers. */
+#define VIRTIO_NET_F_STATUS 0x10000 /* virtio_net_config.status available*/
+#define VIRTIO_NET_F_CTRL_VQ 0x20000 /* Control channel available */
+#define VIRTIO_NET_F_CTRL_RX 0x40000 /* Control channel RX mode support */
+#define VIRTIO_NET_F_CTRL_VLAN 0x80000 /* Control channel VLAN filtering */
+#define VIRTIO_NET_F_CTRL_RX_EXTRA 0x100000 /* Extra RX mode control support */
+
+#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
+
+struct virtio_net_config {
+ /* The config defining mac address (if VIRTIO_NET_F_MAC) */
+ uint8_t mac[ETHER_ADDR_LEN];
+ /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
+ uint16_t status;
+} __packed;
+
+/*
+ * This is the first element of the scatter-gather list. If you don't
+ * specify GSO or CSUM features, you can simply ignore the header.
+ */
+struct virtio_net_hdr {
+#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start,csum_offset*/
+ uint8_t flags;
+#define VIRTIO_NET_HDR_GSO_NONE 0 /* Not a GSO frame */
+#define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */
+#define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */
+#define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */
+#define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */
+ uint8_t gso_type;
+ uint16_t hdr_len; /* Ethernet + IP + tcp/udp hdrs */
+ uint16_t gso_size; /* Bytes to append to hdr_len per frame */
+ uint16_t csum_start; /* Position to start checksumming from */
+ uint16_t csum_offset; /* Offset after that to place checksum */
+};
+
+/*
+ * This is the version of the header to use when the MRG_RXBUF
+ * feature has been negotiated.
+ */
+struct virtio_net_hdr_mrg_rxbuf {
+ struct virtio_net_hdr hdr;
+ uint16_t num_buffers; /* Number of merged rx buffers */
+};
+
+/*
+ * Control virtqueue data structures
+ *
+ * The control virtqueue expects a header in the first sg entry
+ * and an ack/status response in the last entry. Data for the
+ * command goes in between.
+ */
+struct virtio_net_ctrl_hdr {
+ uint8_t class;
+ uint8_t cmd;
+} __packed;
+
+typedef uint8_t virtio_net_ctrl_ack;
+
+#define VIRTIO_NET_OK 0
+#define VIRTIO_NET_ERR 1
+
+/*
+ * Control the RX mode, ie. promiscuous, allmulti, etc...
+ * All commands require an "out" sg entry containing a 1 byte
+ * state value, zero = disable, non-zero = enable. Commands
+ * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature.
+ * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA.
+ */
+#define VIRTIO_NET_CTRL_RX 0
+#define VIRTIO_NET_CTRL_RX_PROMISC 0
+#define VIRTIO_NET_CTRL_RX_ALLMULTI 1
+#define VIRTIO_NET_CTRL_RX_ALLUNI 2
+#define VIRTIO_NET_CTRL_RX_NOMULTI 3
+#define VIRTIO_NET_CTRL_RX_NOUNI 4
+#define VIRTIO_NET_CTRL_RX_NOBCAST 5
+
+/*
+ * Control the MAC filter table.
+ *
+ * The MAC filter table is managed by the hypervisor, the guest should
+ * assume the size is infinite. Filtering should be considered
+ * non-perfect, ie. based on hypervisor resources, the guest may
+ * received packets from sources not specified in the filter list.
+ *
+ * In addition to the class/cmd header, the TABLE_SET command requires
+ * two out scatterlists. Each contains a 4 byte count of entries followed
+ * by a concatenated byte stream of the ETH_ALEN MAC addresses. The
+ * first sg list contains unicast addresses, the second is for multicast.
+ * This functionality is present if the VIRTIO_NET_F_CTRL_RX feature
+ * is available.
+ */
+struct virtio_net_ctrl_mac {
+ uint32_t entries;
+ uint8_t macs[][ETHER_ADDR_LEN];
+} __packed;
+
+#define VIRTIO_NET_CTRL_MAC 1
+#define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
+
+/*
+ * Control VLAN filtering
+ *
+ * The VLAN filter table is controlled via a simple ADD/DEL interface.
+ * VLAN IDs not added may be filtered by the hypervisor. Del is the
+ * opposite of add. Both commands expect an out entry containing a 2
+ * byte VLAN ID. VLAN filtering is available with the
+ * VIRTIO_NET_F_CTRL_VLAN feature bit.
+ */
+#define VIRTIO_NET_CTRL_VLAN 2
+#define VIRTIO_NET_CTRL_VLAN_ADD 0
+#define VIRTIO_NET_CTRL_VLAN_DEL 1
+
+#endif /* _VIRTIO_NET_H */
diff --git a/sys/dev/virtio/pci/virtio_pci.c b/sys/dev/virtio/pci/virtio_pci.c
new file mode 100644
index 0000000..dd348a5
--- /dev/null
+++ b/sys/dev/virtio/pci/virtio_pci.c
@@ -0,0 +1,1081 @@
+/*-
+ * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Driver for the VirtIO PCI interface. */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+
+#include <dev/virtio/virtio.h>
+#include <dev/virtio/virtqueue.h>
+#include <dev/virtio/pci/virtio_pci.h>
+
+#include "virtio_bus_if.h"
+#include "virtio_if.h"
+
+struct vtpci_softc {
+ device_t vtpci_dev;
+ struct resource *vtpci_res;
+ struct resource *vtpci_msix_res;
+ uint64_t vtpci_features;
+ uint32_t vtpci_flags;
+#define VIRTIO_PCI_FLAG_NO_MSI 0x0001
+#define VIRTIO_PCI_FLAG_MSI 0x0002
+#define VIRTIO_PCI_FLAG_NO_MSIX 0x0010
+#define VIRTIO_PCI_FLAG_MSIX 0x0020
+#define VIRTIO_PCI_FLAG_SHARED_MSIX 0x0040
+
+ device_t vtpci_child_dev;
+ struct virtio_feature_desc *vtpci_child_feat_desc;
+
+ /*
+ * Ideally, each virtqueue that the driver provides a callback for
+ * will receive its own MSIX vector. If there are not sufficient
+ * vectors available, we will then attempt to have all the VQs
+ * share one vector. Note that when using MSIX, the configuration
+ * changed notifications must be on their own vector.
+ *
+ * If MSIX is not available, we will attempt to have the whole
+ * device share one MSI vector, and then, finally, one legacy
+ * interrupt.
+ */
+ int vtpci_nvqs;
+ struct vtpci_virtqueue {
+ struct virtqueue *vq;
+
+ /* Index into vtpci_intr_res[] below. Unused, then -1. */
+ int ires_idx;
+ } vtpci_vqx[VIRTIO_MAX_VIRTQUEUES];
+
+ /*
+ * When using MSIX interrupts, the first element of vtpci_intr_res[]
+ * is always the configuration changed notifications. The remaining
+ * element(s) are used for the virtqueues.
+ *
+ * With MSI and legacy interrupts, only the first element of
+ * vtpci_intr_res[] is used.
+ */
+ int vtpci_nintr_res;
+ struct vtpci_intr_resource {
+ struct resource *irq;
+ int rid;
+ void *intrhand;
+ } vtpci_intr_res[1 + VIRTIO_MAX_VIRTQUEUES];
+};
+
+static int vtpci_probe(device_t);
+static int vtpci_attach(device_t);
+static int vtpci_detach(device_t);
+static int vtpci_suspend(device_t);
+static int vtpci_resume(device_t);
+static int vtpci_shutdown(device_t);
+static void vtpci_driver_added(device_t, driver_t *);
+static void vtpci_child_detached(device_t, device_t);
+static int vtpci_read_ivar(device_t, device_t, int, uintptr_t *);
+static int vtpci_write_ivar(device_t, device_t, int, uintptr_t);
+
+static uint64_t vtpci_negotiate_features(device_t, uint64_t);
+static int vtpci_with_feature(device_t, uint64_t);
+static int vtpci_alloc_virtqueues(device_t, int, int,
+ struct vq_alloc_info *);
+static int vtpci_setup_intr(device_t, enum intr_type);
+static void vtpci_stop(device_t);
+static int vtpci_reinit(device_t, uint64_t);
+static void vtpci_reinit_complete(device_t);
+static void vtpci_notify_virtqueue(device_t, uint16_t);
+static uint8_t vtpci_get_status(device_t);
+static void vtpci_set_status(device_t, uint8_t);
+static void vtpci_read_dev_config(device_t, bus_size_t, void *, int);
+static void vtpci_write_dev_config(device_t, bus_size_t, void *, int);
+
+static void vtpci_describe_features(struct vtpci_softc *, const char *,
+ uint64_t);
+static void vtpci_probe_and_attach_child(struct vtpci_softc *);
+
+static int vtpci_alloc_interrupts(struct vtpci_softc *, int, int,
+ struct vq_alloc_info *);
+static int vtpci_alloc_intr_resources(struct vtpci_softc *, int,
+ struct vq_alloc_info *);
+static int vtpci_alloc_msi(struct vtpci_softc *);
+static int vtpci_alloc_msix(struct vtpci_softc *, int);
+static int vtpci_register_msix_vector(struct vtpci_softc *, int, int);
+
+static void vtpci_free_interrupts(struct vtpci_softc *);
+static void vtpci_free_virtqueues(struct vtpci_softc *);
+static void vtpci_release_child_resources(struct vtpci_softc *);
+static void vtpci_reset(struct vtpci_softc *);
+
+static int vtpci_legacy_intr(void *);
+static int vtpci_vq_shared_intr(void *);
+static int vtpci_vq_intr(void *);
+static int vtpci_config_intr(void *);
+
+/*
+ * I/O port read/write wrappers.
+ */
+#define vtpci_read_config_1(sc, o) bus_read_1((sc)->vtpci_res, (o))
+#define vtpci_read_config_2(sc, o) bus_read_2((sc)->vtpci_res, (o))
+#define vtpci_read_config_4(sc, o) bus_read_4((sc)->vtpci_res, (o))
+#define vtpci_write_config_1(sc, o, v) bus_write_1((sc)->vtpci_res, (o), (v))
+#define vtpci_write_config_2(sc, o, v) bus_write_2((sc)->vtpci_res, (o), (v))
+#define vtpci_write_config_4(sc, o, v) bus_write_4((sc)->vtpci_res, (o), (v))
+
+/* Tunables. */
+static int vtpci_disable_msix = 0;
+TUNABLE_INT("hw.virtio.pci.disable_msix", &vtpci_disable_msix);
+
+static device_method_t vtpci_methods[] = {
+ /* Device interface. */
+ DEVMETHOD(device_probe, vtpci_probe),
+ DEVMETHOD(device_attach, vtpci_attach),
+ DEVMETHOD(device_detach, vtpci_detach),
+ DEVMETHOD(device_suspend, vtpci_suspend),
+ DEVMETHOD(device_resume, vtpci_resume),
+ DEVMETHOD(device_shutdown, vtpci_shutdown),
+
+ /* Bus interface. */
+ DEVMETHOD(bus_driver_added, vtpci_driver_added),
+ DEVMETHOD(bus_child_detached, vtpci_child_detached),
+ DEVMETHOD(bus_read_ivar, vtpci_read_ivar),
+ DEVMETHOD(bus_write_ivar, vtpci_write_ivar),
+
+ /* VirtIO bus interface. */
+ DEVMETHOD(virtio_bus_negotiate_features, vtpci_negotiate_features),
+ DEVMETHOD(virtio_bus_with_feature, vtpci_with_feature),
+ DEVMETHOD(virtio_bus_alloc_virtqueues, vtpci_alloc_virtqueues),
+ DEVMETHOD(virtio_bus_setup_intr, vtpci_setup_intr),
+ DEVMETHOD(virtio_bus_stop, vtpci_stop),
+ DEVMETHOD(virtio_bus_reinit, vtpci_reinit),
+ DEVMETHOD(virtio_bus_reinit_complete, vtpci_reinit_complete),
+ DEVMETHOD(virtio_bus_notify_vq, vtpci_notify_virtqueue),
+ DEVMETHOD(virtio_bus_read_device_config, vtpci_read_dev_config),
+ DEVMETHOD(virtio_bus_write_device_config, vtpci_write_dev_config),
+
+ { 0, 0 }
+};
+
+static driver_t vtpci_driver = {
+ "virtio_pci",
+ vtpci_methods,
+ sizeof(struct vtpci_softc)
+};
+
+devclass_t vtpci_devclass;
+
+DRIVER_MODULE(virtio_pci, pci, vtpci_driver, vtpci_devclass, 0, 0);
+MODULE_VERSION(virtio_pci, 1);
+MODULE_DEPEND(virtio_pci, pci, 1, 1, 1);
+MODULE_DEPEND(virtio_pci, virtio, 1, 1, 1);
+
+static int
+vtpci_probe(device_t dev)
+{
+ char desc[36];
+ const char *name;
+
+ if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID)
+ return (ENXIO);
+
+ if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN ||
+ pci_get_device(dev) > VIRTIO_PCI_DEVICEID_MAX)
+ return (ENXIO);
+
+ if (pci_get_revid(dev) != VIRTIO_PCI_ABI_VERSION)
+ return (ENXIO);
+
+ name = virtio_device_name(pci_get_subdevice(dev));
+ if (name == NULL)
+ name = "Unknown";
+
+ snprintf(desc, sizeof(desc), "VirtIO PCI %s adapter", name);
+ device_set_desc_copy(dev, desc);
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+vtpci_attach(device_t dev)
+{
+ struct vtpci_softc *sc;
+ device_t child;
+ int rid;
+
+ sc = device_get_softc(dev);
+ sc->vtpci_dev = dev;
+
+ pci_enable_busmaster(dev);
+
+ rid = PCIR_BAR(0);
+ sc->vtpci_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
+ RF_ACTIVE);
+ if (sc->vtpci_res == NULL) {
+ device_printf(dev, "cannot map I/O space\n");
+ return (ENXIO);
+ }
+
+ if (pci_find_extcap(dev, PCIY_MSI, NULL) != 0)
+ sc->vtpci_flags |= VIRTIO_PCI_FLAG_NO_MSI;
+
+ if (pci_find_extcap(dev, PCIY_MSIX, NULL) == 0) {
+ rid = PCIR_BAR(1);
+ sc->vtpci_msix_res = bus_alloc_resource_any(dev,
+ SYS_RES_MEMORY, &rid, RF_ACTIVE);
+ }
+
+ if (sc->vtpci_msix_res == NULL)
+ sc->vtpci_flags |= VIRTIO_PCI_FLAG_NO_MSIX;
+
+ vtpci_reset(sc);
+
+ /* Tell the host we've noticed this device. */
+ vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
+
+ if ((child = device_add_child(dev, NULL, -1)) == NULL) {
+ device_printf(dev, "cannot create child device\n");
+ vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
+ vtpci_detach(dev);
+ return (ENOMEM);
+ }
+
+ sc->vtpci_child_dev = child;
+ vtpci_probe_and_attach_child(sc);
+
+ return (0);
+}
+
+static int
+vtpci_detach(device_t dev)
+{
+ struct vtpci_softc *sc;
+ device_t child;
+ int error;
+
+ sc = device_get_softc(dev);
+
+ if ((child = sc->vtpci_child_dev) != NULL) {
+ error = device_delete_child(dev, child);
+ if (error)
+ return (error);
+ sc->vtpci_child_dev = NULL;
+ }
+
+ vtpci_reset(sc);
+
+ if (sc->vtpci_msix_res != NULL) {
+ bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(1),
+ sc->vtpci_msix_res);
+ sc->vtpci_msix_res = NULL;
+ }
+
+ if (sc->vtpci_res != NULL) {
+ bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0),
+ sc->vtpci_res);
+ sc->vtpci_res = NULL;
+ }
+
+ return (0);
+}
+
+static int
+vtpci_suspend(device_t dev)
+{
+
+ return (bus_generic_suspend(dev));
+}
+
+static int
+vtpci_resume(device_t dev)
+{
+
+ return (bus_generic_resume(dev));
+}
+
+static int
+vtpci_shutdown(device_t dev)
+{
+
+ (void) bus_generic_shutdown(dev);
+ /* Forcibly stop the host device. */
+ vtpci_stop(dev);
+
+ return (0);
+}
+
+static void
+vtpci_driver_added(device_t dev, driver_t *driver)
+{
+ struct vtpci_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ vtpci_probe_and_attach_child(sc);
+}
+
+static void
+vtpci_child_detached(device_t dev, device_t child)
+{
+ struct vtpci_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ vtpci_reset(sc);
+ vtpci_release_child_resources(sc);
+}
+
+static int
+vtpci_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
+{
+ struct vtpci_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ if (sc->vtpci_child_dev != child)
+ return (ENOENT);
+
+ switch (index) {
+ case VIRTIO_IVAR_DEVTYPE:
+ *result = pci_get_subdevice(dev);
+ break;
+ default:
+ return (ENOENT);
+ }
+
+ return (0);
+}
+
+static int
+vtpci_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
+{
+ struct vtpci_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ if (sc->vtpci_child_dev != child)
+ return (ENOENT);
+
+ switch (index) {
+ case VIRTIO_IVAR_FEATURE_DESC:
+ sc->vtpci_child_feat_desc = (void *) value;
+ break;
+ default:
+ return (ENOENT);
+ }
+
+ return (0);
+}
+
+static uint64_t
+vtpci_negotiate_features(device_t dev, uint64_t child_features)
+{
+ struct vtpci_softc *sc;
+ uint64_t host_features, features;
+
+ sc = device_get_softc(dev);
+
+ host_features = vtpci_read_config_4(sc, VIRTIO_PCI_HOST_FEATURES);
+ vtpci_describe_features(sc, "host", host_features);
+
+ /*
+ * Limit negotiated features to what the driver, virtqueue, and
+ * host all support.
+ */
+ features = host_features & child_features;
+ features = virtqueue_filter_features(features);
+ sc->vtpci_features = features;
+
+ vtpci_describe_features(sc, "negotiated", features);
+ vtpci_write_config_4(sc, VIRTIO_PCI_GUEST_FEATURES, features);
+
+ return (features);
+}
+
+static int
+vtpci_with_feature(device_t dev, uint64_t feature)
+{
+ struct vtpci_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ return ((sc->vtpci_features & feature) != 0);
+}
+
+static int
+vtpci_alloc_virtqueues(device_t dev, int flags, int nvqs,
+ struct vq_alloc_info *vq_info)
+{
+ struct vtpci_softc *sc;
+ struct vtpci_virtqueue *vqx;
+ struct vq_alloc_info *info;
+ int queue, error;
+ uint16_t vq_size;
+
+ sc = device_get_softc(dev);
+
+ if (sc->vtpci_nvqs != 0 || nvqs <= 0 ||
+ nvqs > VIRTIO_MAX_VIRTQUEUES)
+ return (EINVAL);
+
+ error = vtpci_alloc_interrupts(sc, flags, nvqs, vq_info);
+ if (error) {
+ device_printf(dev, "cannot allocate interrupts\n");
+ return (error);
+ }
+
+ if (sc->vtpci_flags & VIRTIO_PCI_FLAG_MSIX) {
+ error = vtpci_register_msix_vector(sc,
+ VIRTIO_MSI_CONFIG_VECTOR, 0);
+ if (error)
+ return (error);
+ }
+
+ for (queue = 0; queue < nvqs; queue++) {
+ vqx = &sc->vtpci_vqx[queue];
+ info = &vq_info[queue];
+
+ vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_SEL, queue);
+
+ vq_size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM);
+ error = virtqueue_alloc(dev, queue, vq_size,
+ VIRTIO_PCI_VRING_ALIGN, 0xFFFFFFFFUL, info, &vqx->vq);
+ if (error)
+ return (error);
+
+ if (sc->vtpci_flags & VIRTIO_PCI_FLAG_MSIX) {
+ error = vtpci_register_msix_vector(sc,
+ VIRTIO_MSI_QUEUE_VECTOR, vqx->ires_idx);
+ if (error)
+ return (error);
+ }
+
+ vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN,
+ virtqueue_paddr(vqx->vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
+
+ *info->vqai_vq = vqx->vq;
+ sc->vtpci_nvqs++;
+ }
+
+ return (0);
+}
+
+static int
+vtpci_setup_intr(device_t dev, enum intr_type type)
+{
+ struct vtpci_softc *sc;
+ struct vtpci_intr_resource *ires;
+ struct vtpci_virtqueue *vqx;
+ int i, flags, error;
+
+ sc = device_get_softc(dev);
+ flags = type | INTR_MPSAFE;
+ ires = &sc->vtpci_intr_res[0];
+
+ if ((sc->vtpci_flags & VIRTIO_PCI_FLAG_MSIX) == 0) {
+ error = bus_setup_intr(dev, ires->irq, flags,
+ vtpci_legacy_intr, NULL, sc, &ires->intrhand);
+
+ return (error);
+ }
+
+ error = bus_setup_intr(dev, ires->irq, flags, vtpci_config_intr,
+ NULL, sc, &ires->intrhand);
+ if (error)
+ return (error);
+
+ if (sc->vtpci_flags & VIRTIO_PCI_FLAG_SHARED_MSIX) {
+ ires = &sc->vtpci_intr_res[1];
+ error = bus_setup_intr(dev, ires->irq, flags,
+ vtpci_vq_shared_intr, NULL, sc, &ires->intrhand);
+
+ return (error);
+ }
+
+ /* Setup an interrupt handler for each virtqueue. */
+ for (i = 0; i < sc->vtpci_nvqs; i++) {
+ vqx = &sc->vtpci_vqx[i];
+ if (vqx->ires_idx < 1)
+ continue;
+
+ ires = &sc->vtpci_intr_res[vqx->ires_idx];
+ error = bus_setup_intr(dev, ires->irq, flags,
+ vtpci_vq_intr, NULL, vqx->vq, &ires->intrhand);
+ if (error)
+ return (error);
+ }
+
+ return (0);
+}
+
+static void
+vtpci_stop(device_t dev)
+{
+
+ vtpci_reset(device_get_softc(dev));
+}
+
+static int
+vtpci_reinit(device_t dev, uint64_t features)
+{
+ struct vtpci_softc *sc;
+ struct vtpci_virtqueue *vqx;
+ struct virtqueue *vq;
+ int queue, error;
+ uint16_t vq_size;
+
+ sc = device_get_softc(dev);
+
+ /*
+ * Redrive the device initialization. This is a bit of an abuse
+ * of the specification, but both VirtualBox and QEMU/KVM seem
+ * to play nice. We do not allow the host device to change from
+ * what was originally negotiated beyond what the guest driver
+ * changed (MSIX state should not change, number of virtqueues
+ * and their size remain the same, etc).
+ */
+
+ if (vtpci_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET)
+ vtpci_stop(dev);
+
+ /*
+ * Quickly drive the status through ACK and DRIVER. The device
+ * does not become usable again until vtpci_reinit_complete().
+ */
+ vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
+ vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
+
+ vtpci_negotiate_features(dev, features);
+
+ if (sc->vtpci_flags & VIRTIO_PCI_FLAG_MSIX) {
+ error = vtpci_register_msix_vector(sc,
+ VIRTIO_MSI_CONFIG_VECTOR, 0);
+ if (error)
+ return (error);
+ }
+
+ for (queue = 0; queue < sc->vtpci_nvqs; queue++) {
+ vqx = &sc->vtpci_vqx[queue];
+ vq = vqx->vq;
+
+ KASSERT(vq != NULL, ("vq %d not allocated", queue));
+ vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_SEL, queue);
+
+ vq_size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM);
+ error = virtqueue_reinit(vq, vq_size);
+ if (error)
+ return (error);
+
+ if (sc->vtpci_flags & VIRTIO_PCI_FLAG_MSIX) {
+ error = vtpci_register_msix_vector(sc,
+ VIRTIO_MSI_QUEUE_VECTOR, vqx->ires_idx);
+ if (error)
+ return (error);
+ }
+
+ vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN,
+ virtqueue_paddr(vqx->vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
+ }
+
+ return (0);
+}
+
+static void
+vtpci_reinit_complete(device_t dev)
+{
+
+ vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
+}
+
+static void
+vtpci_notify_virtqueue(device_t dev, uint16_t queue)
+{
+ struct vtpci_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_NOTIFY, queue);
+}
+
+static uint8_t
+vtpci_get_status(device_t dev)
+{
+ struct vtpci_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ return (vtpci_read_config_1(sc, VIRTIO_PCI_STATUS));
+}
+
+static void
+vtpci_set_status(device_t dev, uint8_t status)
+{
+ struct vtpci_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ if (status != VIRTIO_CONFIG_STATUS_RESET)
+ status |= vtpci_get_status(dev);
+
+ vtpci_write_config_1(sc, VIRTIO_PCI_STATUS, status);
+}
+
+static void
+vtpci_read_dev_config(device_t dev, bus_size_t offset,
+ void *dst, int length)
+{
+ struct vtpci_softc *sc;
+ bus_size_t off;
+ uint8_t *d;
+ int size;
+
+ sc = device_get_softc(dev);
+ off = VIRTIO_PCI_CONFIG(sc) + offset;
+
+ for (d = dst; length > 0; d += size, off += size, length -= size) {
+ if (length >= 4) {
+ size = 4;
+ *(uint32_t *)d = vtpci_read_config_4(sc, off);
+ } else if (length >= 2) {
+ size = 2;
+ *(uint16_t *)d = vtpci_read_config_2(sc, off);
+ } else {
+ size = 1;
+ *d = vtpci_read_config_1(sc, off);
+ }
+ }
+}
+
+static void
+vtpci_write_dev_config(device_t dev, bus_size_t offset,
+ void *src, int length)
+{
+ struct vtpci_softc *sc;
+ bus_size_t off;
+ uint8_t *s;
+ int size;
+
+ sc = device_get_softc(dev);
+ off = VIRTIO_PCI_CONFIG(sc) + offset;
+
+ for (s = src; length > 0; s += size, off += size, length -= size) {
+ if (length >= 4) {
+ size = 4;
+ vtpci_write_config_4(sc, off, *(uint32_t *)s);
+ } else if (length >= 2) {
+ size = 2;
+ vtpci_write_config_2(sc, off, *(uint16_t *)s);
+ } else {
+ size = 1;
+ vtpci_write_config_1(sc, off, *s);
+ }
+ }
+}
+
+static void
+vtpci_describe_features(struct vtpci_softc *sc, const char *msg,
+ uint64_t features)
+{
+ device_t dev, child;
+
+ dev = sc->vtpci_dev;
+ child = sc->vtpci_child_dev;
+
+ if (device_is_attached(child) && bootverbose == 0)
+ return;
+
+ virtio_describe(dev, msg, features, sc->vtpci_child_feat_desc);
+}
+
+static void
+vtpci_probe_and_attach_child(struct vtpci_softc *sc)
+{
+ device_t dev, child;
+
+ dev = sc->vtpci_dev;
+ child = sc->vtpci_child_dev;
+
+ if (child == NULL)
+ return;
+
+ if (device_get_state(child) != DS_NOTPRESENT)
+ return;
+
+ if (device_probe(child) != 0)
+ return;
+
+ vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
+ if (device_attach(child) != 0) {
+ vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
+ vtpci_reset(sc);
+ vtpci_release_child_resources(sc);
+
+ /* Reset status for future attempt. */
+ vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
+ } else
+ vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
+}
+
+static int
+vtpci_alloc_interrupts(struct vtpci_softc *sc, int flags, int nvqs,
+ struct vq_alloc_info *vq_info)
+{
+ int i, nvectors, error;
+
+ /*
+ * Only allocate a vector for virtqueues that are actually
+ * expecting an interrupt.
+ */
+ for (nvectors = 0, i = 0; i < nvqs; i++)
+ if (vq_info[i].vqai_intr != NULL)
+ nvectors++;
+
+ if (vtpci_disable_msix != 0 ||
+ sc->vtpci_flags & VIRTIO_PCI_FLAG_NO_MSIX ||
+ flags & VIRTIO_ALLOC_VQS_DISABLE_MSIX ||
+ vtpci_alloc_msix(sc, nvectors) != 0) {
+ /*
+ * Use MSI interrupts if available. Otherwise, we fallback
+ * to legacy interrupts.
+ */
+ if ((sc->vtpci_flags & VIRTIO_PCI_FLAG_NO_MSI) == 0 &&
+ vtpci_alloc_msi(sc) == 0)
+ sc->vtpci_flags |= VIRTIO_PCI_FLAG_MSI;
+
+ sc->vtpci_nintr_res = 1;
+ }
+
+ error = vtpci_alloc_intr_resources(sc, nvqs, vq_info);
+
+ return (error);
+}
+
+static int
+vtpci_alloc_intr_resources(struct vtpci_softc *sc, int nvqs,
+ struct vq_alloc_info *vq_info)
+{
+ device_t dev;
+ struct resource *irq;
+ struct vtpci_virtqueue *vqx;
+ int i, rid, flags, res_idx;
+
+ dev = sc->vtpci_dev;
+ flags = RF_ACTIVE;
+
+ if ((sc->vtpci_flags &
+ (VIRTIO_PCI_FLAG_MSI | VIRTIO_PCI_FLAG_MSIX)) == 0) {
+ rid = 0;
+ flags |= RF_SHAREABLE;
+ } else
+ rid = 1;
+
+ for (i = 0; i < sc->vtpci_nintr_res; i++) {
+ irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, flags);
+ if (irq == NULL)
+ return (ENXIO);
+
+ sc->vtpci_intr_res[i].irq = irq;
+ sc->vtpci_intr_res[i].rid = rid++;
+ }
+
+ /*
+ * Map the virtqueue into the correct index in vq_intr_res[]. Note the
+ * first index is reserved for configuration changes notifications.
+ */
+ for (i = 0, res_idx = 1; i < nvqs; i++) {
+ vqx = &sc->vtpci_vqx[i];
+
+ if (sc->vtpci_flags & VIRTIO_PCI_FLAG_MSIX) {
+ if (vq_info[i].vqai_intr == NULL)
+ vqx->ires_idx = -1;
+ else if (sc->vtpci_flags & VIRTIO_PCI_FLAG_SHARED_MSIX)
+ vqx->ires_idx = res_idx;
+ else
+ vqx->ires_idx = res_idx++;
+ } else
+ vqx->ires_idx = -1;
+ }
+
+ return (0);
+}
+
+static int
+vtpci_alloc_msi(struct vtpci_softc *sc)
+{
+ device_t dev;
+ int nmsi, cnt;
+
+ dev = sc->vtpci_dev;
+ nmsi = pci_msi_count(dev);
+
+ if (nmsi < 1)
+ return (1);
+
+ cnt = 1;
+ if (pci_alloc_msi(dev, &cnt) == 0 && cnt == 1)
+ return (0);
+
+ return (1);
+}
+
+static int
+vtpci_alloc_msix(struct vtpci_softc *sc, int nvectors)
+{
+ device_t dev;
+ int nmsix, cnt, required;
+
+ dev = sc->vtpci_dev;
+
+ nmsix = pci_msix_count(dev);
+ if (nmsix < 1)
+ return (1);
+
+ /* An additional vector is needed for the config changes. */
+ required = nvectors + 1;
+ if (nmsix >= required) {
+ cnt = required;
+ if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required)
+ goto out;
+
+ pci_release_msi(dev);
+ }
+
+ /* Attempt shared MSIX configuration. */
+ required = 2;
+ if (nmsix >= required) {
+ cnt = required;
+ if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
+ sc->vtpci_flags |= VIRTIO_PCI_FLAG_SHARED_MSIX;
+ goto out;
+ }
+
+ pci_release_msi(dev);
+ }
+
+ return (1);
+
+out:
+ sc->vtpci_nintr_res = required;
+ sc->vtpci_flags |= VIRTIO_PCI_FLAG_MSIX;
+
+ if (bootverbose) {
+ if (sc->vtpci_flags & VIRTIO_PCI_FLAG_SHARED_MSIX)
+ device_printf(dev, "using shared virtqueue MSIX\n");
+ else
+ device_printf(dev, "using per virtqueue MSIX\n");
+ }
+
+ return (0);
+}
+
+static int
+vtpci_register_msix_vector(struct vtpci_softc *sc, int offset, int res_idx)
+{
+ device_t dev;
+ uint16_t vector;
+
+ dev = sc->vtpci_dev;
+
+ if (offset != VIRTIO_MSI_CONFIG_VECTOR &&
+ offset != VIRTIO_MSI_QUEUE_VECTOR)
+ return (EINVAL);
+
+ if (res_idx != -1) {
+ /* Map from rid to host vector. */
+ vector = sc->vtpci_intr_res[res_idx].rid - 1;
+ } else
+ vector = VIRTIO_MSI_NO_VECTOR;
+
+ /* The first resource is special; make sure it is used correctly. */
+ if (res_idx == 0) {
+ KASSERT(vector == 0, ("unexpected config vector"));
+ KASSERT(offset == VIRTIO_MSI_CONFIG_VECTOR,
+ ("unexpected config offset"));
+ }
+
+ vtpci_write_config_2(sc, offset, vector);
+
+ if (vtpci_read_config_2(sc, offset) != vector) {
+ device_printf(dev, "insufficient host resources for "
+ "MSIX interrupts\n");
+ return (ENODEV);
+ }
+
+ return (0);
+}
+
+static void
+vtpci_free_interrupts(struct vtpci_softc *sc)
+{
+ device_t dev;
+ struct vtpci_intr_resource *ires;
+ int i;
+
+ dev = sc->vtpci_dev;
+ sc->vtpci_nintr_res = 0;
+
+ if (sc->vtpci_flags & (VIRTIO_PCI_FLAG_MSI | VIRTIO_PCI_FLAG_MSIX)) {
+ pci_release_msi(dev);
+ sc->vtpci_flags &= ~(VIRTIO_PCI_FLAG_MSI |
+ VIRTIO_PCI_FLAG_MSIX | VIRTIO_PCI_FLAG_SHARED_MSIX);
+ }
+
+ for (i = 0; i < 1 + VIRTIO_MAX_VIRTQUEUES; i++) {
+ ires = &sc->vtpci_intr_res[i];
+
+ if (ires->intrhand != NULL) {
+ bus_teardown_intr(dev, ires->irq, ires->intrhand);
+ ires->intrhand = NULL;
+ }
+
+ if (ires->irq != NULL) {
+ bus_release_resource(dev, SYS_RES_IRQ, ires->rid,
+ ires->irq);
+ ires->irq = NULL;
+ }
+
+ ires->rid = -1;
+ }
+}
+
+static void
+vtpci_free_virtqueues(struct vtpci_softc *sc)
+{
+ struct vtpci_virtqueue *vqx;
+ int i;
+
+ sc->vtpci_nvqs = 0;
+
+ for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; i++) {
+ vqx = &sc->vtpci_vqx[i];
+
+ if (vqx->vq != NULL) {
+ virtqueue_free(vqx->vq);
+ vqx->vq = NULL;
+ }
+ }
+}
+
+static void
+vtpci_release_child_resources(struct vtpci_softc *sc)
+{
+
+ vtpci_free_interrupts(sc);
+ vtpci_free_virtqueues(sc);
+}
+
+static void
+vtpci_reset(struct vtpci_softc *sc)
+{
+
+ /*
+ * Setting the status to RESET sets the host device to
+ * the original, uninitialized state.
+ */
+ vtpci_set_status(sc->vtpci_dev, VIRTIO_CONFIG_STATUS_RESET);
+}
+
+static int
+vtpci_legacy_intr(void *xsc)
+{
+ struct vtpci_softc *sc;
+ struct vtpci_virtqueue *vqx;
+ int i;
+ uint8_t isr;
+
+ sc = xsc;
+ vqx = &sc->vtpci_vqx[0];
+
+ /* Reading the ISR also clears it. */
+ isr = vtpci_read_config_1(sc, VIRTIO_PCI_ISR);
+
+ if (isr & VIRTIO_PCI_ISR_CONFIG)
+ vtpci_config_intr(sc);
+
+ if (isr & VIRTIO_PCI_ISR_INTR)
+ for (i = 0; i < sc->vtpci_nvqs; i++, vqx++)
+ virtqueue_intr(vqx->vq);
+
+ return (isr ? FILTER_HANDLED : FILTER_STRAY);
+}
+
+static int
+vtpci_vq_shared_intr(void *xsc)
+{
+ struct vtpci_softc *sc;
+ struct vtpci_virtqueue *vqx;
+ int i, rc;
+
+ rc = 0;
+ sc = xsc;
+ vqx = &sc->vtpci_vqx[0];
+
+ for (i = 0; i < sc->vtpci_nvqs; i++, vqx++)
+ rc |= virtqueue_intr(vqx->vq);
+
+ return (rc ? FILTER_HANDLED : FILTER_STRAY);
+}
+
+static int
+vtpci_vq_intr(void *xvq)
+{
+ struct virtqueue *vq;
+ int rc;
+
+ vq = xvq;
+ rc = virtqueue_intr(vq);
+
+ return (rc ? FILTER_HANDLED : FILTER_STRAY);
+}
+
+static int
+vtpci_config_intr(void *xsc)
+{
+ struct vtpci_softc *sc;
+ device_t child;
+ int rc;
+
+ rc = 0;
+ sc = xsc;
+ child = sc->vtpci_child_dev;
+
+ if (child != NULL)
+ rc = VIRTIO_CONFIG_CHANGE(child);
+
+ return (rc ? FILTER_HANDLED : FILTER_STRAY);
+}
diff --git a/sys/dev/virtio/pci/virtio_pci.h b/sys/dev/virtio/pci/virtio_pci.h
new file mode 100644
index 0000000..6ebfdd5
--- /dev/null
+++ b/sys/dev/virtio/pci/virtio_pci.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright IBM Corp. 2007
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _VIRTIO_PCI_H
+#define _VIRTIO_PCI_H
+
+/* VirtIO PCI vendor/device ID. */
+#define VIRTIO_PCI_VENDORID 0x1AF4
+#define VIRTIO_PCI_DEVICEID_MIN 0x1000
+#define VIRTIO_PCI_DEVICEID_MAX 0x103F
+
+/* VirtIO ABI version, this must match exactly. */
+#define VIRTIO_PCI_ABI_VERSION 0
+
+/*
+ * VirtIO Header, located in BAR 0.
+ */
+#define VIRTIO_PCI_HOST_FEATURES 0 /* host's supported features (32bit, RO)*/
+#define VIRTIO_PCI_GUEST_FEATURES 4 /* guest's supported features (32, RW) */
+#define VIRTIO_PCI_QUEUE_PFN 8 /* physical address of VQ (32, RW) */
+#define VIRTIO_PCI_QUEUE_NUM 12 /* number of ring entries (16, RO) */
+#define VIRTIO_PCI_QUEUE_SEL 14 /* current VQ selection (16, RW) */
+#define VIRTIO_PCI_QUEUE_NOTIFY 16 /* notify host regarding VQ (16, RW) */
+#define VIRTIO_PCI_STATUS 18 /* device status register (8, RW) */
+#define VIRTIO_PCI_ISR 19 /* interrupt status register, reading
+ * also clears the register (8, RO) */
+/* Only if MSIX is enabled: */
+#define VIRTIO_MSI_CONFIG_VECTOR 20 /* configuration change vector (16, RW) */
+#define VIRTIO_MSI_QUEUE_VECTOR 22 /* vector for selected VQ notifications
+ (16, RW) */
+
+/* The bit of the ISR which indicates a device has an interrupt. */
+#define VIRTIO_PCI_ISR_INTR 0x1
+/* The bit of the ISR which indicates a device configuration change. */
+#define VIRTIO_PCI_ISR_CONFIG 0x2
+/* Vector value used to disable MSI for queue. */
+#define VIRTIO_MSI_NO_VECTOR 0xFFFF
+
+/*
+ * The remaining space is defined by each driver as the per-driver
+ * configuration space.
+ */
+#define VIRTIO_PCI_CONFIG(sc) \
+ (((sc)->vtpci_flags & VIRTIO_PCI_FLAG_MSIX) ? 24 : 20)
+
+/*
+ * How many bits to shift physical queue address written to QUEUE_PFN.
+ * 12 is historical, and due to x86 page size.
+ */
+#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
+
+/* The alignment to use between consumer and producer parts of vring. */
+#define VIRTIO_PCI_VRING_ALIGN 4096
+
+#endif /* _VIRTIO_PCI_H */
diff --git a/sys/dev/virtio/virtio.c b/sys/dev/virtio/virtio.c
new file mode 100644
index 0000000..e385575
--- /dev/null
+++ b/sys/dev/virtio/virtio.c
@@ -0,0 +1,283 @@
+/*-
+ * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/sbuf.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/_inttypes.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <dev/virtio/virtio.h>
+#include <dev/virtio/virtqueue.h>
+
+#include "virtio_bus_if.h"
+
+static int virtio_modevent(module_t, int, void *);
+static const char *virtio_feature_name(uint64_t, struct virtio_feature_desc *);
+
+static struct virtio_ident {
+ uint16_t devid;
+ char *name;
+} virtio_ident_table[] = {
+ { VIRTIO_ID_NETWORK, "Network" },
+ { VIRTIO_ID_BLOCK, "Block" },
+ { VIRTIO_ID_CONSOLE, "Console" },
+ { VIRTIO_ID_ENTROPY, "Entropy" },
+ { VIRTIO_ID_BALLOON, "Balloon" },
+ { VIRTIO_ID_IOMEMORY, "IOMemory" },
+ { VIRTIO_ID_9P, "9P Transport" },
+
+ { 0, NULL }
+};
+
+/* Device independent features. */
+static struct virtio_feature_desc virtio_common_feature_desc[] = {
+ { VIRTIO_F_NOTIFY_ON_EMPTY, "NotifyOnEmpty" },
+ { VIRTIO_RING_F_INDIRECT_DESC, "RingIndirect" },
+ { VIRTIO_RING_F_EVENT_IDX, "EventIdx" },
+ { VIRTIO_F_BAD_FEATURE, "BadFeature" },
+
+ { 0, NULL }
+};
+
+const char *
+virtio_device_name(uint16_t devid)
+{
+ struct virtio_ident *ident;
+
+ for (ident = virtio_ident_table; ident->name != NULL; ident++) {
+ if (ident->devid == devid)
+ return (ident->name);
+ }
+
+ return (NULL);
+}
+
+int
+virtio_get_device_type(device_t dev)
+{
+ uintptr_t devtype;
+
+ devtype = -1;
+
+ BUS_READ_IVAR(device_get_parent(dev), dev,
+ VIRTIO_IVAR_DEVTYPE, &devtype);
+
+ return ((int) devtype);
+}
+
+void
+virtio_set_feature_desc(device_t dev,
+ struct virtio_feature_desc *feature_desc)
+{
+
+ BUS_WRITE_IVAR(device_get_parent(dev), dev,
+ VIRTIO_IVAR_FEATURE_DESC, (uintptr_t) feature_desc);
+}
+
+void
+virtio_describe(device_t dev, const char *msg,
+ uint64_t features, struct virtio_feature_desc *feature_desc)
+{
+ struct sbuf sb;
+ uint64_t val;
+ char *buf;
+ const char *name;
+ int n;
+
+ if ((buf = malloc(512, M_TEMP, M_NOWAIT)) == NULL) {
+ device_printf(dev, "%s features: 0x%"PRIx64"\n", msg,
+ features);
+ return;
+ }
+
+ sbuf_new(&sb, buf, 512, SBUF_FIXEDLEN);
+ sbuf_printf(&sb, "%s features: 0x%"PRIx64, msg, features);
+
+ for (n = 0, val = 1ULL << 63; val != 0; val >>= 1) {
+ /*
+ * BAD_FEATURE is used to detect broken Linux clients
+ * and therefore is not applicable to FreeBSD.
+ */
+ if (((features & val) == 0) || val == VIRTIO_F_BAD_FEATURE)
+ continue;
+
+ if (n++ == 0)
+ sbuf_cat(&sb, " <");
+ else
+ sbuf_cat(&sb, ",");
+
+ name = NULL;
+ if (feature_desc != NULL)
+ name = virtio_feature_name(val, feature_desc);
+ if (name == NULL)
+ name = virtio_feature_name(val,
+ virtio_common_feature_desc);
+
+ if (name == NULL)
+ sbuf_printf(&sb, "0x%"PRIx64, val);
+ else
+ sbuf_cat(&sb, name);
+ }
+
+ if (n > 0)
+ sbuf_cat(&sb, ">");
+
+#if __FreeBSD_version < 900020
+ sbuf_finish(&sb);
+ if (sbuf_overflowed(&sb) == 0)
+#else
+ if (sbuf_finish(&sb) == 0)
+#endif
+ device_printf(dev, "%s\n", sbuf_data(&sb));
+
+ sbuf_delete(&sb);
+ free(buf, M_TEMP);
+}
+
+static const char *
+virtio_feature_name(uint64_t val, struct virtio_feature_desc *feature_desc)
+{
+ int i;
+
+ for (i = 0; feature_desc[i].vfd_val != 0; i++)
+ if (val == feature_desc[i].vfd_val)
+ return (feature_desc[i].vfd_str);
+
+ return (NULL);
+}
+
+/*
+ * VirtIO bus method wrappers.
+ */
+
+uint64_t
+virtio_negotiate_features(device_t dev, uint64_t child_features)
+{
+
+ return (VIRTIO_BUS_NEGOTIATE_FEATURES(device_get_parent(dev),
+ child_features));
+}
+
+int
+virtio_alloc_virtqueues(device_t dev, int flags, int nvqs,
+ struct vq_alloc_info *info)
+{
+
+ return (VIRTIO_BUS_ALLOC_VIRTQUEUES(device_get_parent(dev), flags,
+ nvqs, info));
+}
+
+int
+virtio_setup_intr(device_t dev, enum intr_type type)
+{
+
+ return (VIRTIO_BUS_SETUP_INTR(device_get_parent(dev), type));
+}
+
+int
+virtio_with_feature(device_t dev, uint64_t feature)
+{
+
+ return (VIRTIO_BUS_WITH_FEATURE(device_get_parent(dev), feature));
+}
+
+void
+virtio_stop(device_t dev)
+{
+
+ VIRTIO_BUS_STOP(device_get_parent(dev));
+}
+
+int
+virtio_reinit(device_t dev, uint64_t features)
+{
+
+ return (VIRTIO_BUS_REINIT(device_get_parent(dev), features));
+}
+
+void
+virtio_reinit_complete(device_t dev)
+{
+
+ VIRTIO_BUS_REINIT_COMPLETE(device_get_parent(dev));
+}
+
+void
+virtio_read_device_config(device_t dev, bus_size_t offset, void *dst, int len)
+{
+
+ VIRTIO_BUS_READ_DEVICE_CONFIG(device_get_parent(dev),
+ offset, dst, len);
+}
+
+void
+virtio_write_device_config(device_t dev, bus_size_t offset, void *dst, int len)
+{
+
+ VIRTIO_BUS_WRITE_DEVICE_CONFIG(device_get_parent(dev),
+ offset, dst, len);
+}
+
+static int
+virtio_modevent(module_t mod, int type, void *unused)
+{
+ int error;
+
+ error = 0;
+
+ switch (type) {
+ case MOD_LOAD:
+ case MOD_QUIESCE:
+ case MOD_UNLOAD:
+ case MOD_SHUTDOWN:
+ break;
+ default:
+ error = EOPNOTSUPP;
+ break;
+ }
+
+ return (error);
+}
+
+static moduledata_t virtio_mod = {
+ "virtio",
+ virtio_modevent,
+ 0
+};
+
+DECLARE_MODULE(virtio, virtio_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
+MODULE_VERSION(virtio, 1);
diff --git a/sys/dev/virtio/virtio.h b/sys/dev/virtio/virtio.h
new file mode 100644
index 0000000..ebd3c74
--- /dev/null
+++ b/sys/dev/virtio/virtio.h
@@ -0,0 +1,130 @@
+/*
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _VIRTIO_H_
+#define _VIRTIO_H_
+
+#include <sys/types.h>
+
+struct vq_alloc_info;
+
+/* VirtIO device IDs. */
+#define VIRTIO_ID_NETWORK 0x01
+#define VIRTIO_ID_BLOCK 0x02
+#define VIRTIO_ID_CONSOLE 0x03
+#define VIRTIO_ID_ENTROPY 0x04
+#define VIRTIO_ID_BALLOON 0x05
+#define VIRTIO_ID_IOMEMORY 0x06
+#define VIRTIO_ID_9P 0x09
+
+/* Status byte for guest to report progress. */
+#define VIRTIO_CONFIG_STATUS_RESET 0x00
+#define VIRTIO_CONFIG_STATUS_ACK 0x01
+#define VIRTIO_CONFIG_STATUS_DRIVER 0x02
+#define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x04
+#define VIRTIO_CONFIG_STATUS_FAILED 0x80
+
+/*
+ * Generate interrupt when the virtqueue ring is
+ * completely used, even if we've suppressed them.
+ */
+#define VIRTIO_F_NOTIFY_ON_EMPTY (1 << 24)
+
+/*
+ * The guest should never negotiate this feature; it
+ * is used to detect faulty drivers.
+ */
+#define VIRTIO_F_BAD_FEATURE (1 << 30)
+
+/*
+ * Some VirtIO feature bits (currently bits 28 through 31) are
+ * reserved for the transport being used (eg. virtio_ring), the
+ * rest are per-device feature bits.
+ */
+#define VIRTIO_TRANSPORT_F_START 28
+#define VIRTIO_TRANSPORT_F_END 32
+
+/*
+ * Maximum number of virtqueues per device.
+ */
+#define VIRTIO_MAX_VIRTQUEUES 8
+
+/*
+ * Each virtqueue indirect descriptor list must be physically contiguous.
+ * To allow us to malloc(9) each list individually, limit the number
+ * supported to what will fit in one page. With 4KB pages, this is a limit
+ * of 256 descriptors. If there is ever a need for more, we can switch to
+ * contigmalloc(9) for the larger allocations, similar to what
+ * bus_dmamem_alloc(9) does.
+ *
+ * Note the sizeof(struct vring_desc) is 16 bytes.
+ */
+#define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16))
+
+/*
+ * VirtIO instance variables indices.
+ */
+#define VIRTIO_IVAR_DEVTYPE 1
+#define VIRTIO_IVAR_FEATURE_DESC 2
+
+struct virtio_feature_desc {
+ uint64_t vfd_val;
+ char *vfd_str;
+};
+
+const char *virtio_device_name(uint16_t devid);
+int virtio_get_device_type(device_t dev);
+void virtio_set_feature_desc(device_t dev,
+ struct virtio_feature_desc *feature_desc);
+void virtio_describe(device_t dev, const char *msg,
+ uint64_t features, struct virtio_feature_desc *feature_desc);
+
+/*
+ * VirtIO Bus Methods.
+ */
+uint64_t virtio_negotiate_features(device_t dev, uint64_t child_features);
+int virtio_alloc_virtqueues(device_t dev, int flags, int nvqs,
+ struct vq_alloc_info *info);
+int virtio_setup_intr(device_t dev, enum intr_type type);
+int virtio_with_feature(device_t dev, uint64_t feature);
+void virtio_stop(device_t dev);
+int virtio_reinit(device_t dev, uint64_t features);
+void virtio_reinit_complete(device_t dev);
+
+/*
+ * Read/write a variable amount from the device specific (ie, network)
+ * configuration region. This region is encoded in the same endian as
+ * the guest.
+ */
+void virtio_read_device_config(device_t dev, bus_size_t offset,
+ void *dst, int length);
+void virtio_write_device_config(device_t dev, bus_size_t offset,
+ void *src, int length);
+
+/* Inlined device specific read/write functions for common lengths. */
+#define VIRTIO_RDWR_DEVICE_CONFIG(size, type) \
+static inline type \
+__CONCAT(virtio_read_dev_config_,size)(device_t dev, \
+ bus_size_t offset) \
+{ \
+ type val; \
+ virtio_read_device_config(dev, offset, &val, sizeof(type)); \
+ return (val); \
+} \
+ \
+static inline void \
+__CONCAT(virtio_write_dev_config_,size)(device_t dev, \
+ bus_size_t offset, type val) \
+{ \
+ virtio_write_device_config(dev, offset, &val, sizeof(type)); \
+}
+
+VIRTIO_RDWR_DEVICE_CONFIG(1, uint8_t);
+VIRTIO_RDWR_DEVICE_CONFIG(2, uint16_t);
+VIRTIO_RDWR_DEVICE_CONFIG(4, uint32_t);
+
+#endif /* _VIRTIO_H_ */
diff --git a/sys/dev/virtio/virtio_bus_if.m b/sys/dev/virtio/virtio_bus_if.m
new file mode 100644
index 0000000..ec2029d
--- /dev/null
+++ b/sys/dev/virtio/virtio_bus_if.m
@@ -0,0 +1,92 @@
+#-
+# Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $FreeBSD$
+
+#include <sys/bus.h>
+#include <machine/bus.h>
+
+INTERFACE virtio_bus;
+
+HEADER {
+struct vq_alloc_info;
+};
+
+METHOD uint64_t negotiate_features {
+ device_t dev;
+ uint64_t child_features;
+};
+
+METHOD int with_feature {
+ device_t dev;
+ uint64_t feature;
+};
+
+METHOD int alloc_virtqueues {
+ device_t dev;
+ int flags;
+ int nvqs;
+ struct vq_alloc_info *info;
+};
+HEADER {
+#define VIRTIO_ALLOC_VQS_DISABLE_MSIX 0x1
+};
+
+METHOD int setup_intr {
+ device_t dev;
+ enum intr_type type;
+};
+
+METHOD void stop {
+ device_t dev;
+};
+
+METHOD int reinit {
+ device_t dev;
+ uint64_t features;
+};
+
+METHOD void reinit_complete {
+ device_t dev;
+};
+
+METHOD void notify_vq {
+ device_t dev;
+ uint16_t queue;
+};
+
+METHOD void read_device_config {
+ device_t dev;
+ bus_size_t offset;
+ void *dst;
+ int len;
+};
+
+METHOD void write_device_config {
+ device_t dev;
+ bus_size_t offset;
+ void *src;
+ int len;
+};
diff --git a/sys/dev/virtio/virtio_if.m b/sys/dev/virtio/virtio_if.m
new file mode 100644
index 0000000..701678c
--- /dev/null
+++ b/sys/dev/virtio/virtio_if.m
@@ -0,0 +1,43 @@
+#-
+# Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $FreeBSD$
+
+#include <sys/bus.h>
+
+INTERFACE virtio;
+
+CODE {
+ static int
+ virtio_default_config_change(device_t dev)
+ {
+ /* Return that we've handled the change. */
+ return (1);
+ }
+};
+
+METHOD int config_change {
+ device_t dev;
+} DEFAULT virtio_default_config_change;
diff --git a/sys/dev/virtio/virtio_ring.h b/sys/dev/virtio/virtio_ring.h
new file mode 100644
index 0000000..124cb4d
--- /dev/null
+++ b/sys/dev/virtio/virtio_ring.h
@@ -0,0 +1,119 @@
+/*
+ * This header is BSD licensed so anyone can use the definitions
+ * to implement compatible drivers/servers.
+ *
+ * Copyright Rusty Russell IBM Corporation 2007.
+ */
+/* $FreeBSD$ */
+
+#ifndef VIRTIO_RING_H
+#define VIRTIO_RING_H
+
+#include <sys/types.h>
+
+/* This marks a buffer as continuing via the next field. */
+#define VRING_DESC_F_NEXT 1
+/* This marks a buffer as write-only (otherwise read-only). */
+#define VRING_DESC_F_WRITE 2
+/* This means the buffer contains a list of buffer descriptors. */
+#define VRING_DESC_F_INDIRECT 4
+
+/* The Host uses this in used->flags to advise the Guest: don't kick me
+ * when you add a buffer. It's unreliable, so it's simply an
+ * optimization. Guest will still kick if it's out of buffers. */
+#define VRING_USED_F_NO_NOTIFY 1
+/* The Guest uses this in avail->flags to advise the Host: don't
+ * interrupt me when you consume a buffer. It's unreliable, so it's
+ * simply an optimization. */
+#define VRING_AVAIL_F_NO_INTERRUPT 1
+
+/* VirtIO ring descriptors: 16 bytes.
+ * These can chain together via "next". */
+struct vring_desc {
+ /* Address (guest-physical). */
+ uint64_t addr;
+ /* Length. */
+ uint32_t len;
+ /* The flags as indicated above. */
+ uint16_t flags;
+ /* We chain unused descriptors via this, too. */
+ uint16_t next;
+};
+
+struct vring_avail {
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[0];
+};
+
+/* uint32_t is used here for ids for padding reasons. */
+struct vring_used_elem {
+ /* Index of start of used descriptor chain. */
+ uint32_t id;
+ /* Total length of the descriptor chain which was written to. */
+ uint32_t len;
+};
+
+struct vring_used {
+ uint16_t flags;
+ uint16_t idx;
+ struct vring_used_elem ring[0];
+};
+
+struct vring {
+ unsigned int num;
+
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+};
+
+/* The standard layout for the ring is a continuous chunk of memory which
+ * looks like this. We assume num is a power of 2.
+ *
+ * struct vring {
+ * // The actual descriptors (16 bytes each)
+ * struct vring_desc desc[num];
+ *
+ * // A ring of available descriptor heads with free-running index.
+ * __u16 avail_flags;
+ * __u16 avail_idx;
+ * __u16 available[num];
+ *
+ * // Padding to the next align boundary.
+ * char pad[];
+ *
+ * // A ring of used descriptor heads with free-running index.
+ * __u16 used_flags;
+ * __u16 used_idx;
+ * struct vring_used_elem used[num];
+ * };
+ *
+ * NOTE: for VirtIO PCI, align is 4096.
+ */
+
+static inline int
+vring_size(unsigned int num, unsigned long align)
+{
+ int size;
+
+ size = num * sizeof(struct vring_desc);
+ size += sizeof(struct vring_avail) + (num * sizeof(uint16_t));
+ size = (size + align - 1) & ~(align - 1);
+ size += sizeof(struct vring_used) +
+ (num * sizeof(struct vring_used_elem));
+ return (size);
+}
+
+static inline void
+vring_init(struct vring *vr, unsigned int num, uint8_t *p,
+ unsigned long align)
+{
+ vr->num = num;
+ vr->desc = (struct vring_desc *) p;
+ vr->avail = (struct vring_avail *) (p +
+ num * sizeof(struct vring_desc));
+ vr->used = (void *)
+ (((unsigned long) &vr->avail->ring[num] + align-1) & ~(align-1));
+}
+#endif /* VIRTIO_RING_H */
diff --git a/sys/dev/virtio/virtqueue.c b/sys/dev/virtio/virtqueue.c
new file mode 100644
index 0000000..1fb182e
--- /dev/null
+++ b/sys/dev/virtio/virtqueue.c
@@ -0,0 +1,755 @@
+/*-
+ * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Implements the virtqueue interface as basically described
+ * in the original VirtIO paper.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/sglist.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/cpu.h>
+#include <machine/bus.h>
+#include <machine/atomic.h>
+#include <machine/resource.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <dev/virtio/virtio.h>
+#include <dev/virtio/virtqueue.h>
+#include <dev/virtio/virtio_ring.h>
+
+#include "virtio_bus_if.h"
+
+struct virtqueue {
+ device_t vq_dev;
+ char vq_name[VIRTQUEUE_MAX_NAME_SZ];
+ uint16_t vq_queue_index;
+ uint16_t vq_nentries;
+ uint32_t vq_flags;
+#define VIRTQUEUE_FLAG_INDIRECT 0x0001
+
+ int vq_alignment;
+ int vq_ring_size;
+ void *vq_ring_mem;
+ int vq_max_indirect_size;
+ int vq_indirect_mem_size;
+ virtqueue_intr_t *vq_intrhand;
+ void *vq_intrhand_arg;
+
+ struct vring vq_ring;
+ uint16_t vq_free_cnt;
+ uint16_t vq_queued_cnt;
+ /*
+ * Head of the free chain in the descriptor table. If
+ * there are no free descriptors, this will be set to
+ * VQ_RING_DESC_CHAIN_END.
+ */
+ uint16_t vq_desc_head_idx;
+ /*
+ * Last consumed descriptor in the used table,
+ * trails vq_ring.used->idx.
+ */
+ uint16_t vq_used_cons_idx;
+
+ struct vq_desc_extra {
+ void *cookie;
+ struct vring_desc *indirect;
+ vm_paddr_t indirect_paddr;
+ uint16_t ndescs;
+ } vq_descx[0];
+};
+
+/*
+ * The maximum virtqueue size is 2^15. Use that value as the end of
+ * descriptor chain terminator since it will never be a valid index
+ * in the descriptor table. This is used to verify we are correctly
+ * handling vq_free_cnt.
+ */
+#define VQ_RING_DESC_CHAIN_END 32768
+
+#define VQASSERT(_vq, _exp, _msg, ...) \
+ KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name, \
+ ##__VA_ARGS__))
+
+#define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) \
+ VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, \
+ "invalid ring index: %d, max: %d", (_idx), \
+ (_vq)->vq_nentries)
+
+#define VQ_RING_ASSERT_CHAIN_TERM(_vq) \
+ VQASSERT((_vq), (_vq)->vq_desc_head_idx == \
+ VQ_RING_DESC_CHAIN_END, "full ring terminated " \
+ "incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
+
+static int virtqueue_init_indirect(struct virtqueue *vq, int);
+static void virtqueue_free_indirect(struct virtqueue *vq);
+static void virtqueue_init_indirect_list(struct virtqueue *,
+ struct vring_desc *);
+
+static void vq_ring_init(struct virtqueue *);
+static void vq_ring_update_avail(struct virtqueue *, uint16_t);
+static uint16_t vq_ring_enqueue_segments(struct virtqueue *,
+ struct vring_desc *, uint16_t, struct sglist *, int, int);
+static int vq_ring_use_indirect(struct virtqueue *, int);
+static void vq_ring_enqueue_indirect(struct virtqueue *, void *,
+ struct sglist *, int, int);
+static void vq_ring_notify_host(struct virtqueue *, int);
+static void vq_ring_free_chain(struct virtqueue *, uint16_t);
+
+uint64_t
+virtqueue_filter_features(uint64_t features)
+{
+ uint64_t mask;
+
+ mask = (1 << VIRTIO_TRANSPORT_F_START) - 1;
+ mask |= VIRTIO_RING_F_INDIRECT_DESC;
+
+ return (features & mask);
+}
+
+int
+virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
+ vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
+{
+ struct virtqueue *vq;
+ int error;
+
+ *vqp = NULL;
+ error = 0;
+
+ if (size == 0) {
+ device_printf(dev,
+ "virtqueue %d (%s) does not exist (size is zero)\n",
+ queue, info->vqai_name);
+ return (ENODEV);
+ } else if (!powerof2(size)) {
+ device_printf(dev,
+ "virtqueue %d (%s) size is not a power of 2: %d\n",
+ queue, info->vqai_name, size);
+ return (ENXIO);
+ } else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
+ device_printf(dev, "virtqueue %d (%s) requested too many "
+ "indirect descriptors: %d, max %d\n",
+ queue, info->vqai_name, info->vqai_maxindirsz,
+ VIRTIO_MAX_INDIRECT);
+ return (EINVAL);
+ }
+
+ vq = malloc(sizeof(struct virtqueue) +
+ size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (vq == NULL) {
+ device_printf(dev, "cannot allocate virtqueue\n");
+ return (ENOMEM);
+ }
+
+ vq->vq_dev = dev;
+ strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
+ vq->vq_queue_index = queue;
+ vq->vq_alignment = align;
+ vq->vq_nentries = size;
+ vq->vq_free_cnt = size;
+ vq->vq_intrhand = info->vqai_intr;
+ vq->vq_intrhand_arg = info->vqai_intr_arg;
+
+ if (info->vqai_maxindirsz > 1) {
+ error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
+ if (error)
+ goto fail;
+ }
+
+ vq->vq_ring_size = round_page(vring_size(size, align));
+ vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
+ M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
+ if (vq->vq_ring_mem == NULL) {
+ device_printf(dev,
+ "cannot allocate memory for virtqueue ring\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ vq_ring_init(vq);
+ virtqueue_disable_intr(vq);
+
+ *vqp = vq;
+
+fail:
+ if (error)
+ virtqueue_free(vq);
+
+ return (error);
+}
+
+static int
+virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
+{
+ device_t dev;
+ struct vq_desc_extra *dxp;
+ int i, size;
+
+ dev = vq->vq_dev;
+
+ if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
+ /*
+ * Indirect descriptors requested by the driver but not
+ * negotiated. Return zero to keep the initialization
+ * going: we'll run fine without.
+ */
+ if (bootverbose)
+ device_printf(dev, "virtqueue %d (%s) requested "
+ "indirect descriptors but not negotiated\n",
+ vq->vq_queue_index, vq->vq_name);
+ return (0);
+ }
+
+ size = indirect_size * sizeof(struct vring_desc);
+ vq->vq_max_indirect_size = indirect_size;
+ vq->vq_indirect_mem_size = size;
+ vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
+
+ for (i = 0; i < vq->vq_nentries; i++) {
+ dxp = &vq->vq_descx[i];
+
+ dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT);
+ if (dxp->indirect == NULL) {
+ device_printf(dev, "cannot allocate indirect list\n");
+ return (ENOMEM);
+ }
+
+ dxp->indirect_paddr = vtophys(dxp->indirect);
+ virtqueue_init_indirect_list(vq, dxp->indirect);
+ }
+
+ return (0);
+}
+
+static void
+virtqueue_free_indirect(struct virtqueue *vq)
+{
+ struct vq_desc_extra *dxp;
+ int i;
+
+ for (i = 0; i < vq->vq_nentries; i++) {
+ dxp = &vq->vq_descx[i];
+
+ if (dxp->indirect == NULL)
+ break;
+
+ free(dxp->indirect, M_DEVBUF);
+ dxp->indirect = NULL;
+ dxp->indirect_paddr = 0;
+ }
+
+ vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
+ vq->vq_indirect_mem_size = 0;
+}
+
+static void
+virtqueue_init_indirect_list(struct virtqueue *vq,
+ struct vring_desc *indirect)
+{
+ int i;
+
+ bzero(indirect, vq->vq_indirect_mem_size);
+
+ for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
+ indirect[i].next = i + 1;
+ indirect[i].next = VQ_RING_DESC_CHAIN_END;
+}
+
+int
+virtqueue_reinit(struct virtqueue *vq, uint16_t size)
+{
+ struct vq_desc_extra *dxp;
+ int i;
+
+ if (vq->vq_nentries != size) {
+ device_printf(vq->vq_dev,
+ "%s: '%s' changed size; old=%hu, new=%hu\n",
+ __func__, vq->vq_name, vq->vq_nentries, size);
+ return (EINVAL);
+ }
+
+ /* Warn if the virtqueue was not properly cleaned up. */
+ if (vq->vq_free_cnt != vq->vq_nentries) {
+ device_printf(vq->vq_dev,
+ "%s: warning, '%s' virtqueue not empty, "
+ "leaking %d entries\n", __func__, vq->vq_name,
+ vq->vq_nentries - vq->vq_free_cnt);
+ }
+
+ vq->vq_desc_head_idx = 0;
+ vq->vq_used_cons_idx = 0;
+ vq->vq_queued_cnt = 0;
+ vq->vq_free_cnt = vq->vq_nentries;
+
+ /* To be safe, reset all our allocated memory. */
+ bzero(vq->vq_ring_mem, vq->vq_ring_size);
+ for (i = 0; i < vq->vq_nentries; i++) {
+ dxp = &vq->vq_descx[i];
+ dxp->cookie = NULL;
+ dxp->ndescs = 0;
+ if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
+ virtqueue_init_indirect_list(vq, dxp->indirect);
+ }
+
+ vq_ring_init(vq);
+ virtqueue_disable_intr(vq);
+
+ return (0);
+}
+
+void
+virtqueue_free(struct virtqueue *vq)
+{
+
+ if (vq->vq_free_cnt != vq->vq_nentries) {
+ device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
+ "leaking %d entries\n", vq->vq_name,
+ vq->vq_nentries - vq->vq_free_cnt);
+ }
+
+ if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
+ virtqueue_free_indirect(vq);
+
+ if (vq->vq_ring_mem != NULL) {
+ contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
+ vq->vq_ring_size = 0;
+ vq->vq_ring_mem = NULL;
+ }
+
+ free(vq, M_DEVBUF);
+}
+
+vm_paddr_t
+virtqueue_paddr(struct virtqueue *vq)
+{
+
+ return (vtophys(vq->vq_ring_mem));
+}
+
+int
+virtqueue_size(struct virtqueue *vq)
+{
+
+ return (vq->vq_nentries);
+}
+
+int
+virtqueue_empty(struct virtqueue *vq)
+{
+
+ return (vq->vq_nentries == vq->vq_free_cnt);
+}
+
+int
+virtqueue_full(struct virtqueue *vq)
+{
+
+ return (vq->vq_free_cnt == 0);
+}
+
+void
+virtqueue_notify(struct virtqueue *vq)
+{
+
+ vq->vq_queued_cnt = 0;
+ vq_ring_notify_host(vq, 0);
+}
+
+int
+virtqueue_nused(struct virtqueue *vq)
+{
+ uint16_t used_idx, nused;
+
+ used_idx = vq->vq_ring.used->idx;
+ if (used_idx >= vq->vq_used_cons_idx)
+ nused = used_idx - vq->vq_used_cons_idx;
+ else
+ nused = UINT16_MAX - vq->vq_used_cons_idx +
+ used_idx + 1;
+ VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
+
+ return (nused);
+}
+
+int
+virtqueue_intr(struct virtqueue *vq)
+{
+
+ if (vq->vq_intrhand == NULL ||
+ vq->vq_used_cons_idx == vq->vq_ring.used->idx)
+ return (0);
+
+ vq->vq_intrhand(vq->vq_intrhand_arg);
+
+ return (1);
+}
+
+int
+virtqueue_enable_intr(struct virtqueue *vq)
+{
+
+ /*
+ * Enable interrupts, making sure we get the latest
+ * index of what's already been consumed.
+ */
+ vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+
+ mb();
+
+ /*
+ * Additional items may have been consumed in the time between
+ * since we last checked and enabled interrupts above. Let our
+ * caller know so it processes the new entries.
+ */
+ if (vq->vq_used_cons_idx != vq->vq_ring.used->idx)
+ return (1);
+
+ return (0);
+}
+
+void
+virtqueue_disable_intr(struct virtqueue *vq)
+{
+
+ /*
+ * Note this is only considered a hint to the host.
+ */
+ vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
+int
+virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
+ int readable, int writable)
+{
+ struct vq_desc_extra *dxp;
+ int needed;
+ uint16_t head_idx, idx;
+
+ needed = readable + writable;
+
+ VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
+ VQASSERT(vq, needed == sg->sg_nseg,
+ "segment count mismatch, %d, %d", needed, sg->sg_nseg);
+ VQASSERT(vq,
+ needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
+ "too many segments to enqueue: %d, %d/%d", needed,
+ vq->vq_nentries, vq->vq_max_indirect_size);
+
+ if (needed < 1)
+ return (EINVAL);
+ if (vq->vq_free_cnt == 0)
+ return (ENOSPC);
+
+ if (vq_ring_use_indirect(vq, needed)) {
+ vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
+ return (0);
+ } else if (vq->vq_free_cnt < needed)
+ return (EMSGSIZE);
+
+ head_idx = vq->vq_desc_head_idx;
+ VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
+ dxp = &vq->vq_descx[head_idx];
+
+ VQASSERT(vq, dxp->cookie == NULL,
+ "cookie already exists for index %d", head_idx);
+ dxp->cookie = cookie;
+ dxp->ndescs = needed;
+
+ idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
+ sg, readable, writable);
+
+ vq->vq_desc_head_idx = idx;
+ vq->vq_free_cnt -= needed;
+ if (vq->vq_free_cnt == 0)
+ VQ_RING_ASSERT_CHAIN_TERM(vq);
+ else
+ VQ_RING_ASSERT_VALID_IDX(vq, idx);
+
+ vq_ring_update_avail(vq, head_idx);
+
+ return (0);
+}
+
+void *
+virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
+{
+ struct vring_used_elem *uep;
+ void *cookie;
+ uint16_t used_idx, desc_idx;
+
+ if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
+ return (NULL);
+
+ used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
+ uep = &vq->vq_ring.used->ring[used_idx];
+
+ mb();
+ desc_idx = (uint16_t) uep->id;
+ if (len != NULL)
+ *len = uep->len;
+
+ vq_ring_free_chain(vq, desc_idx);
+
+ cookie = vq->vq_descx[desc_idx].cookie;
+ VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
+ vq->vq_descx[desc_idx].cookie = NULL;
+
+ return (cookie);
+}
+
+void *
+virtqueue_poll(struct virtqueue *vq, uint32_t *len)
+{
+ void *cookie;
+
+ while ((cookie = virtqueue_dequeue(vq, len)) == NULL)
+ cpu_spinwait();
+
+ return (cookie);
+}
+
+void *
+virtqueue_drain(struct virtqueue *vq, int *last)
+{
+ void *cookie;
+ int idx;
+
+ cookie = NULL;
+ idx = *last;
+
+ while (idx < vq->vq_nentries && cookie == NULL) {
+ if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
+ vq->vq_descx[idx].cookie = NULL;
+ /* Free chain to keep free count consistent. */
+ vq_ring_free_chain(vq, idx);
+ }
+ idx++;
+ }
+
+ *last = idx;
+
+ return (cookie);
+}
+
+void
+virtqueue_dump(struct virtqueue *vq)
+{
+
+ if (vq == NULL)
+ return;
+
+ printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
+ "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
+ "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
+ vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
+ virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
+ vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
+ vq->vq_ring.used->idx, vq->vq_ring.avail->flags,
+ vq->vq_ring.used->flags);
+}
+
+static void
+vq_ring_init(struct virtqueue *vq)
+{
+ struct vring *vr;
+ char *ring_mem;
+ int i, size;
+
+ ring_mem = vq->vq_ring_mem;
+ size = vq->vq_nentries;
+ vr = &vq->vq_ring;
+
+ vring_init(vr, size, ring_mem, vq->vq_alignment);
+
+ for (i = 0; i < size - 1; i++)
+ vr->desc[i].next = i + 1;
+ vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
+}
+
+static void
+vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
+{
+ uint16_t avail_idx;
+
+ /*
+ * Place the head of the descriptor chain into the next slot and make
+ * it usable to the host. The chain is made available now rather than
+ * deferring to virtqueue_notify() in the hopes that if the host is
+ * currently running on another CPU, we can keep it processing the new
+ * descriptor.
+ */
+ avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
+ vq->vq_ring.avail->ring[avail_idx] = desc_idx;
+
+ mb();
+ vq->vq_ring.avail->idx++;
+
+ /* Keep pending count until virtqueue_notify() for debugging. */
+ vq->vq_queued_cnt++;
+}
+
+static uint16_t
+vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
+ uint16_t head_idx, struct sglist *sg, int readable, int writable)
+{
+ struct sglist_seg *seg;
+ struct vring_desc *dp;
+ int i, needed;
+ uint16_t idx;
+
+ needed = readable + writable;
+
+ for (i = 0, idx = head_idx, seg = sg->sg_segs;
+ i < needed;
+ i++, idx = dp->next, seg++) {
+ VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
+ "premature end of free desc chain");
+
+ dp = &desc[idx];
+ dp->addr = seg->ss_paddr;
+ dp->len = seg->ss_len;
+ dp->flags = 0;
+
+ if (i < needed - 1)
+ dp->flags |= VRING_DESC_F_NEXT;
+ if (i >= readable)
+ dp->flags |= VRING_DESC_F_WRITE;
+ }
+
+ return (idx);
+}
+
+static int
+vq_ring_use_indirect(struct virtqueue *vq, int needed)
+{
+
+ if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
+ return (0);
+
+ if (vq->vq_max_indirect_size < needed)
+ return (0);
+
+ if (needed < 2)
+ return (0);
+
+ return (1);
+}
+
+static void
+vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
+ struct sglist *sg, int readable, int writable)
+{
+ struct vring_desc *dp;
+ struct vq_desc_extra *dxp;
+ int needed;
+ uint16_t head_idx;
+
+ needed = readable + writable;
+ VQASSERT(vq, needed <= vq->vq_max_indirect_size,
+ "enqueuing too many indirect descriptors");
+
+ head_idx = vq->vq_desc_head_idx;
+ VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
+ dp = &vq->vq_ring.desc[head_idx];
+ dxp = &vq->vq_descx[head_idx];
+
+ VQASSERT(vq, dxp->cookie == NULL,
+ "cookie already exists for index %d", head_idx);
+ dxp->cookie = cookie;
+ dxp->ndescs = 1;
+
+ dp->addr = dxp->indirect_paddr;
+ dp->len = needed * sizeof(struct vring_desc);
+ dp->flags = VRING_DESC_F_INDIRECT;
+
+ vq_ring_enqueue_segments(vq, dxp->indirect, 0,
+ sg, readable, writable);
+
+ vq->vq_desc_head_idx = dp->next;
+ vq->vq_free_cnt--;
+ if (vq->vq_free_cnt == 0)
+ VQ_RING_ASSERT_CHAIN_TERM(vq);
+ else
+ VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
+
+ vq_ring_update_avail(vq, head_idx);
+}
+
+static void
+vq_ring_notify_host(struct virtqueue *vq, int force)
+{
+
+ mb();
+
+ if (force ||
+ (vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0)
+ VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index);
+}
+
+static void
+vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
+{
+ struct vring_desc *dp;
+ struct vq_desc_extra *dxp;
+
+ VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
+ dp = &vq->vq_ring.desc[desc_idx];
+ dxp = &vq->vq_descx[desc_idx];
+
+ if (vq->vq_free_cnt == 0)
+ VQ_RING_ASSERT_CHAIN_TERM(vq);
+
+ vq->vq_free_cnt += dxp->ndescs;
+ dxp->ndescs--;
+
+ if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
+ while (dp->flags & VRING_DESC_F_NEXT) {
+ VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
+ dp = &vq->vq_ring.desc[dp->next];
+ dxp->ndescs--;
+ }
+ }
+ VQASSERT(vq, dxp->ndescs == 0, "failed to free entire desc chain");
+
+ /*
+ * We must append the existing free chain, if any, to the end of
+ * newly freed chain. If the virtqueue was completely used, then
+ * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
+ */
+ dp->next = vq->vq_desc_head_idx;
+ vq->vq_desc_head_idx = desc_idx;
+}
diff --git a/sys/dev/virtio/virtqueue.h b/sys/dev/virtio/virtqueue.h
new file mode 100644
index 0000000..e790e65
--- /dev/null
+++ b/sys/dev/virtio/virtqueue.h
@@ -0,0 +1,98 @@
+/*-
+ * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _VIRTIO_VIRTQUEUE_H
+#define _VIRTIO_VIRTQUEUE_H
+
+#include <sys/types.h>
+
+struct virtqueue;
+struct sglist;
+
+/* Support for indirect buffer descriptors. */
+#define VIRTIO_RING_F_INDIRECT_DESC (1 << 28)
+
+/* The guest publishes the used index for which it expects an interrupt
+ * at the end of the avail ring. Host should ignore the avail->flags field.
+ * The host publishes the avail index for which it expects a kick
+ * at the end of the used ring. Guest should ignore the used->flags field.
+ */
+#define VIRTIO_RING_F_EVENT_IDX (1 << 29)
+
+/* Device callback for a virtqueue interrupt. */
+typedef int virtqueue_intr_t(void *);
+
+#define VIRTQUEUE_MAX_NAME_SZ 32
+
+/* One for each virtqueue the device wishes to allocate. */
+struct vq_alloc_info {
+ char vqai_name[VIRTQUEUE_MAX_NAME_SZ];
+ int vqai_maxindirsz;
+ virtqueue_intr_t *vqai_intr;
+ void *vqai_intr_arg;
+ struct virtqueue **vqai_vq;
+};
+
+#define VQ_ALLOC_INFO_INIT(_i,_nsegs,_intr,_arg,_vqp,_str,...) do { \
+ snprintf((_i)->vqai_name, VIRTQUEUE_MAX_NAME_SZ, _str, \
+ ##__VA_ARGS__); \
+ (_i)->vqai_maxindirsz = (_nsegs); \
+ (_i)->vqai_intr = (_intr); \
+ (_i)->vqai_intr_arg = (_arg); \
+ (_i)->vqai_vq = (_vqp); \
+} while (0)
+
+uint64_t virtqueue_filter_features(uint64_t features);
+
+int virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size,
+ int align, vm_paddr_t highaddr, struct vq_alloc_info *info,
+ struct virtqueue **vqp);
+void *virtqueue_drain(struct virtqueue *vq, int *last);
+void virtqueue_free(struct virtqueue *vq);
+int virtqueue_reinit(struct virtqueue *vq, uint16_t size);
+
+int virtqueue_intr(struct virtqueue *vq);
+int virtqueue_enable_intr(struct virtqueue *vq);
+void virtqueue_disable_intr(struct virtqueue *vq);
+
+/* Get physical address of the virtqueue ring. */
+vm_paddr_t virtqueue_paddr(struct virtqueue *vq);
+
+int virtqueue_full(struct virtqueue *vq);
+int virtqueue_empty(struct virtqueue *vq);
+int virtqueue_size(struct virtqueue *vq);
+int virtqueue_nused(struct virtqueue *vq);
+void virtqueue_notify(struct virtqueue *vq);
+void virtqueue_dump(struct virtqueue *vq);
+
+int virtqueue_enqueue(struct virtqueue *vq, void *cookie,
+ struct sglist *sg, int readable, int writable);
+void *virtqueue_dequeue(struct virtqueue *vq, uint32_t *len);
+void *virtqueue_poll(struct virtqueue *vq, uint32_t *len);
+
+#endif /* _VIRTIO_VIRTQUEUE_H */
diff --git a/sys/dev/wb/if_wb.c b/sys/dev/wb/if_wb.c
index f924d3e..daac022 100644
--- a/sys/dev/wb/if_wb.c
+++ b/sys/dev/wb/if_wb.c
@@ -113,6 +113,7 @@ __FBSDID("$FreeBSD$");
#include <dev/pci/pcivar.h>
#include <dev/mii/mii.h>
+#include <dev/mii/mii_bitbang.h>
#include <dev/mii/miivar.h>
/* "device miibus" required. See GENERIC if you get errors here. */
@@ -129,7 +130,7 @@ MODULE_DEPEND(wb, miibus, 1, 1, 1);
/*
* Various supported device vendors/types and their names.
*/
-static struct wb_type wb_devs[] = {
+static const struct wb_type const wb_devs[] = {
{ WB_VENDORID, WB_DEVICEID_840F,
"Winbond W89C840F 10/100BaseTX" },
{ CP_VENDORID, CP_DEVICEID_RL100,
@@ -166,10 +167,6 @@ static void wb_ifmedia_sts(struct ifnet *, struct ifmediareq *);
static void wb_eeprom_putbyte(struct wb_softc *, int);
static void wb_eeprom_getword(struct wb_softc *, int, u_int16_t *);
static void wb_read_eeprom(struct wb_softc *, caddr_t, int, int, int);
-static void wb_mii_sync(struct wb_softc *);
-static void wb_mii_send(struct wb_softc *, u_int32_t, int);
-static int wb_mii_readreg(struct wb_softc *, struct wb_mii_frame *);
-static int wb_mii_writereg(struct wb_softc *, struct wb_mii_frame *);
static void wb_setcfg(struct wb_softc *, u_int32_t);
static void wb_setmulti(struct wb_softc *);
@@ -182,6 +179,24 @@ static int wb_miibus_readreg(device_t, int, int);
static int wb_miibus_writereg(device_t, int, int, int);
static void wb_miibus_statchg(device_t);
+/*
+ * MII bit-bang glue
+ */
+static uint32_t wb_mii_bitbang_read(device_t);
+static void wb_mii_bitbang_write(device_t, uint32_t);
+
+static const struct mii_bitbang_ops wb_mii_bitbang_ops = {
+ wb_mii_bitbang_read,
+ wb_mii_bitbang_write,
+ {
+ WB_SIO_MII_DATAOUT, /* MII_BIT_MDO */
+ WB_SIO_MII_DATAIN, /* MII_BIT_MDI */
+ WB_SIO_MII_CLK, /* MII_BIT_MDC */
+ WB_SIO_MII_DIR, /* MII_BIT_DIR_HOST_PHY */
+ 0, /* MII_BIT_DIR_PHY_HOST */
+ }
+};
+
#ifdef WB_USEIOSPACE
#define WB_RES SYS_RES_IOPORT
#define WB_RID WB_PCI_LOIO
@@ -262,8 +277,6 @@ wb_eeprom_putbyte(sc, addr)
SIO_CLR(WB_SIO_EE_CLK);
DELAY(100);
}
-
- return;
}
/*
@@ -304,8 +317,6 @@ wb_eeprom_getword(sc, addr, dest)
CSR_WRITE_4(sc, WB_SIO, 0);
*dest = word;
-
- return;
}
/*
@@ -330,194 +341,39 @@ wb_read_eeprom(sc, dest, off, cnt, swap)
else
*ptr = word;
}
-
- return;
}
/*
- * Sync the PHYs by setting data bit and strobing the clock 32 times.
+ * Read the MII serial port for the MII bit-bang module.
*/
-static void
-wb_mii_sync(sc)
- struct wb_softc *sc;
+static uint32_t
+wb_mii_bitbang_read(device_t dev)
{
- register int i;
+ struct wb_softc *sc;
+ uint32_t val;
- SIO_SET(WB_SIO_MII_DIR|WB_SIO_MII_DATAIN);
+ sc = device_get_softc(dev);
- for (i = 0; i < 32; i++) {
- SIO_SET(WB_SIO_MII_CLK);
- DELAY(1);
- SIO_CLR(WB_SIO_MII_CLK);
- DELAY(1);
- }
+ val = CSR_READ_4(sc, WB_SIO);
+ CSR_BARRIER(sc, WB_SIO, 4,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
- return;
+ return (val);
}
/*
- * Clock a series of bits through the MII.
+ * Write the MII serial port for the MII bit-bang module.
*/
static void
-wb_mii_send(sc, bits, cnt)
- struct wb_softc *sc;
- u_int32_t bits;
- int cnt;
-{
- int i;
-
- SIO_CLR(WB_SIO_MII_CLK);
-
- for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
- if (bits & i) {
- SIO_SET(WB_SIO_MII_DATAIN);
- } else {
- SIO_CLR(WB_SIO_MII_DATAIN);
- }
- DELAY(1);
- SIO_CLR(WB_SIO_MII_CLK);
- DELAY(1);
- SIO_SET(WB_SIO_MII_CLK);
- }
-}
-
-/*
- * Read an PHY register through the MII.
- */
-static int
-wb_mii_readreg(sc, frame)
- struct wb_softc *sc;
- struct wb_mii_frame *frame;
-
+wb_mii_bitbang_write(device_t dev, uint32_t val)
{
- int i, ack;
-
- /*
- * Set up frame for RX.
- */
- frame->mii_stdelim = WB_MII_STARTDELIM;
- frame->mii_opcode = WB_MII_READOP;
- frame->mii_turnaround = 0;
- frame->mii_data = 0;
-
- CSR_WRITE_4(sc, WB_SIO, 0);
-
- /*
- * Turn on data xmit.
- */
- SIO_SET(WB_SIO_MII_DIR);
-
- wb_mii_sync(sc);
-
- /*
- * Send command/address info.
- */
- wb_mii_send(sc, frame->mii_stdelim, 2);
- wb_mii_send(sc, frame->mii_opcode, 2);
- wb_mii_send(sc, frame->mii_phyaddr, 5);
- wb_mii_send(sc, frame->mii_regaddr, 5);
-
- /* Idle bit */
- SIO_CLR((WB_SIO_MII_CLK|WB_SIO_MII_DATAIN));
- DELAY(1);
- SIO_SET(WB_SIO_MII_CLK);
- DELAY(1);
-
- /* Turn off xmit. */
- SIO_CLR(WB_SIO_MII_DIR);
- /* Check for ack */
- SIO_CLR(WB_SIO_MII_CLK);
- DELAY(1);
- ack = CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT;
- SIO_SET(WB_SIO_MII_CLK);
- DELAY(1);
- SIO_CLR(WB_SIO_MII_CLK);
- DELAY(1);
- SIO_SET(WB_SIO_MII_CLK);
- DELAY(1);
-
- /*
- * Now try reading data bits. If the ack failed, we still
- * need to clock through 16 cycles to keep the PHY(s) in sync.
- */
- if (ack) {
- for(i = 0; i < 16; i++) {
- SIO_CLR(WB_SIO_MII_CLK);
- DELAY(1);
- SIO_SET(WB_SIO_MII_CLK);
- DELAY(1);
- }
- goto fail;
- }
-
- for (i = 0x8000; i; i >>= 1) {
- SIO_CLR(WB_SIO_MII_CLK);
- DELAY(1);
- if (!ack) {
- if (CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT)
- frame->mii_data |= i;
- DELAY(1);
- }
- SIO_SET(WB_SIO_MII_CLK);
- DELAY(1);
- }
-
-fail:
+ struct wb_softc *sc;
- SIO_CLR(WB_SIO_MII_CLK);
- DELAY(1);
- SIO_SET(WB_SIO_MII_CLK);
- DELAY(1);
-
- if (ack)
- return(1);
- return(0);
-}
-
-/*
- * Write to a PHY register through the MII.
- */
-static int
-wb_mii_writereg(sc, frame)
- struct wb_softc *sc;
- struct wb_mii_frame *frame;
-
-{
-
- /*
- * Set up frame for TX.
- */
-
- frame->mii_stdelim = WB_MII_STARTDELIM;
- frame->mii_opcode = WB_MII_WRITEOP;
- frame->mii_turnaround = WB_MII_TURNAROUND;
-
- /*
- * Turn on data output.
- */
- SIO_SET(WB_SIO_MII_DIR);
-
- wb_mii_sync(sc);
-
- wb_mii_send(sc, frame->mii_stdelim, 2);
- wb_mii_send(sc, frame->mii_opcode, 2);
- wb_mii_send(sc, frame->mii_phyaddr, 5);
- wb_mii_send(sc, frame->mii_regaddr, 5);
- wb_mii_send(sc, frame->mii_turnaround, 2);
- wb_mii_send(sc, frame->mii_data, 16);
-
- /* Idle bit. */
- SIO_SET(WB_SIO_MII_CLK);
- DELAY(1);
- SIO_CLR(WB_SIO_MII_CLK);
- DELAY(1);
-
- /*
- * Turn off xmit.
- */
- SIO_CLR(WB_SIO_MII_DIR);
+ sc = device_get_softc(dev);
- return(0);
+ CSR_WRITE_4(sc, WB_SIO, val);
+ CSR_BARRIER(sc, WB_SIO, 4,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
static int
@@ -525,18 +381,8 @@ wb_miibus_readreg(dev, phy, reg)
device_t dev;
int phy, reg;
{
- struct wb_softc *sc;
- struct wb_mii_frame frame;
-
- sc = device_get_softc(dev);
- bzero((char *)&frame, sizeof(frame));
-
- frame.mii_phyaddr = phy;
- frame.mii_regaddr = reg;
- wb_mii_readreg(sc, &frame);
-
- return(frame.mii_data);
+ return (mii_bitbang_readreg(dev, &wb_mii_bitbang_ops, phy, reg));
}
static int
@@ -544,18 +390,8 @@ wb_miibus_writereg(dev, phy, reg, data)
device_t dev;
int phy, reg, data;
{
- struct wb_softc *sc;
- struct wb_mii_frame frame;
- sc = device_get_softc(dev);
-
- bzero((char *)&frame, sizeof(frame));
-
- frame.mii_phyaddr = phy;
- frame.mii_regaddr = reg;
- frame.mii_data = data;
-
- wb_mii_writereg(sc, &frame);
+ mii_bitbang_writereg(dev, &wb_mii_bitbang_ops, phy, reg, data);
return(0);
}
@@ -570,8 +406,6 @@ wb_miibus_statchg(dev)
sc = device_get_softc(dev);
mii = device_get_softc(sc->wb_miibus);
wb_setcfg(sc, mii->mii_media_active);
-
- return;
}
/*
@@ -627,8 +461,6 @@ wb_setmulti(sc)
CSR_WRITE_4(sc, WB_MAR0, hashes[0]);
CSR_WRITE_4(sc, WB_MAR1, hashes[1]);
CSR_WRITE_4(sc, WB_NETCFG, rxfilt);
-
- return;
}
/*
@@ -671,8 +503,6 @@ wb_setcfg(sc, media)
if (restart)
WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON|WB_NETCFG_RX_ON);
-
- return;
}
static void
@@ -742,7 +572,7 @@ static int
wb_probe(dev)
device_t dev;
{
- struct wb_type *t;
+ const struct wb_type *t;
t = wb_devs;
@@ -1001,7 +831,7 @@ wb_bfree(buf, args)
void *buf;
void *args;
{
- return;
+
}
/*
@@ -1127,8 +957,6 @@ wb_rxeoc(sc)
WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
if (CSR_READ_4(sc, WB_ISR) & WB_RXSTATE_SUSPEND)
CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF);
-
- return;
}
/*
@@ -1185,8 +1013,6 @@ wb_txeof(sc)
sc->wb_cdata.wb_tx_head = cur_tx->wb_nextdesc;
}
-
- return;
}
/*
@@ -1212,8 +1038,6 @@ wb_txeoc(sc)
CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
}
}
-
- return;
}
static void
@@ -1300,8 +1124,6 @@ wb_intr(arg)
}
WB_UNLOCK(sc);
-
- return;
}
static void
@@ -1320,8 +1142,6 @@ wb_tick(xsc)
if (sc->wb_timer > 0 && --sc->wb_timer == 0)
wb_watchdog(sc);
callout_reset(&sc->wb_stat_callout, hz, wb_tick, sc);
-
- return;
}
/*
@@ -1520,8 +1340,6 @@ wb_start_locked(ifp)
* Set a timeout in case the chip goes out to lunch.
*/
sc->wb_timer = 5;
-
- return;
}
static void
@@ -1647,8 +1465,6 @@ wb_init_locked(sc)
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
callout_reset(&sc->wb_stat_callout, hz, wb_tick, sc);
-
- return;
}
/*
@@ -1690,8 +1506,6 @@ wb_ifmedia_sts(ifp, ifmr)
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
WB_UNLOCK(sc);
-
- return;
}
static int
@@ -1757,8 +1571,6 @@ wb_watchdog(sc)
if (ifp->if_snd.ifq_head != NULL)
wb_start_locked(ifp);
-
- return;
}
/*
@@ -1809,8 +1621,6 @@ wb_stop(sc)
sizeof(sc->wb_ldata->wb_tx_list));
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
-
- return;
}
/*
diff --git a/sys/dev/wb/if_wbreg.h b/sys/dev/wb/if_wbreg.h
index 95d0a8e..16eb8a1 100644
--- a/sys/dev/wb/if_wbreg.h
+++ b/sys/dev/wb/if_wbreg.h
@@ -341,26 +341,9 @@ struct wb_chain_data {
struct wb_type {
u_int16_t wb_vid;
u_int16_t wb_did;
- char *wb_name;
+ const char *wb_name;
};
-struct wb_mii_frame {
- u_int8_t mii_stdelim;
- u_int8_t mii_opcode;
- u_int8_t mii_phyaddr;
- u_int8_t mii_regaddr;
- u_int8_t mii_turnaround;
- u_int16_t mii_data;
-};
-
-/*
- * MII constants
- */
-#define WB_MII_STARTDELIM 0x01
-#define WB_MII_READOP 0x02
-#define WB_MII_WRITEOP 0x01
-#define WB_MII_TURNAROUND 0x02
-
struct wb_softc {
struct ifnet *wb_ifp; /* interface info */
device_t wb_dev;
@@ -395,6 +378,9 @@ struct wb_softc {
#define CSR_READ_2(sc, reg) bus_read_2(sc->wb_res, reg)
#define CSR_READ_1(sc, reg) bus_read_1(sc->wb_res, reg)
+#define CSR_BARRIER(sc, reg, length, flags) \
+ bus_barrier(sc->wb_res, reg, length, flags)
+
#define WB_TIMEOUT 1000
/*
diff --git a/sys/dev/wi/if_wi.c b/sys/dev/wi/if_wi.c
index 69db6a7..c31ad7b 100644
--- a/sys/dev/wi/if_wi.c
+++ b/sys/dev/wi/if_wi.c
@@ -166,7 +166,8 @@ wi_write_val(struct wi_softc *sc, int rid, u_int16_t val)
return wi_write_rid(sc, rid, &val, sizeof(val));
}
-SYSCTL_NODE(_hw, OID_AUTO, wi, CTLFLAG_RD, 0, "Wireless driver parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, wi, CTLFLAG_RD, 0,
+ "Wireless driver parameters");
static struct timeval lasttxerror; /* time of last tx error msg */
static int curtxeps; /* current tx error msgs/sec */
diff --git a/sys/dev/xe/if_xe.c b/sys/dev/xe/if_xe.c
index 77b953a..57411ba 100644
--- a/sys/dev/xe/if_xe.c
+++ b/sys/dev/xe/if_xe.c
@@ -201,7 +201,7 @@ static void xe_reg_dump(struct xe_softc *scp);
#ifdef XE_DEBUG
/* sysctl vars */
-SYSCTL_NODE(_hw, OID_AUTO, xe, CTLFLAG_RD, 0, "if_xe parameters");
+static SYSCTL_NODE(_hw, OID_AUTO, xe, CTLFLAG_RD, 0, "if_xe parameters");
int xe_debug = 0;
SYSCTL_INT(_hw_xe, OID_AUTO, debug, CTLFLAG_RW, &xe_debug, 0,
diff --git a/sys/dev/xen/balloon/balloon.c b/sys/dev/xen/balloon/balloon.c
index 81d1094..6aa8c9b 100644
--- a/sys/dev/xen/balloon/balloon.c
+++ b/sys/dev/xen/balloon/balloon.c
@@ -49,7 +49,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm.h>
#include <vm/vm_page.h>
-MALLOC_DEFINE(M_BALLOON, "Balloon", "Xen Balloon Driver");
+static MALLOC_DEFINE(M_BALLOON, "Balloon", "Xen Balloon Driver");
struct mtx balloon_mutex;
@@ -84,7 +84,7 @@ static struct balloon_stats balloon_stats;
#define bs balloon_stats
SYSCTL_DECL(_dev_xen);
-SYSCTL_NODE(_dev_xen, OID_AUTO, balloon, CTLFLAG_RD, NULL, "Balloon");
+static SYSCTL_NODE(_dev_xen, OID_AUTO, balloon, CTLFLAG_RD, NULL, "Balloon");
SYSCTL_ULONG(_dev_xen_balloon, OID_AUTO, current, CTLFLAG_RD,
&bs.current_pages, 0, "Current allocation");
SYSCTL_ULONG(_dev_xen_balloon, OID_AUTO, target, CTLFLAG_RD,
@@ -298,8 +298,7 @@ decrease_reservation(unsigned long nr_pages)
nr_pages = ARRAY_SIZE(frame_list);
for (i = 0; i < nr_pages; i++) {
- int color = 0;
- if ((page = vm_page_alloc(NULL, color++,
+ if ((page = vm_page_alloc(NULL, 0,
VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
nr_pages = i;
diff --git a/sys/dev/xen/blkback/blkback.c b/sys/dev/xen/blkback/blkback.c
index e52c342..c42bfd9 100644
--- a/sys/dev/xen/blkback/blkback.c
+++ b/sys/dev/xen/blkback/blkback.c
@@ -121,7 +121,7 @@ __FBSDID("$FreeBSD$");
/**
* Custom malloc type for all driver allocations.
*/
-MALLOC_DEFINE(M_XENBLOCKBACK, "xbbd", "Xen Block Back Driver Data");
+static MALLOC_DEFINE(M_XENBLOCKBACK, "xbbd", "Xen Block Back Driver Data");
#ifdef XBB_DEBUG
#define DPRINTF(fmt, args...) \
diff --git a/sys/dev/xen/blkfront/blkfront.c b/sys/dev/xen/blkfront/blkfront.c
index fb530f3..954cf1f 100644
--- a/sys/dev/xen/blkfront/blkfront.c
+++ b/sys/dev/xen/blkfront/blkfront.c
@@ -81,7 +81,7 @@ static int blkif_completion(struct xb_command *);
static void blkif_free(struct xb_softc *);
static void blkif_queue_cb(void *, bus_dma_segment_t *, int, int);
-MALLOC_DEFINE(M_XENBLOCKFRONT, "xbd", "Xen Block Front driver data");
+static MALLOC_DEFINE(M_XENBLOCKFRONT, "xbd", "Xen Block Front driver data");
#define GRANT_INVALID_REF 0
diff --git a/sys/dev/xl/if_xl.c b/sys/dev/xl/if_xl.c
index e09e434..b774e7f 100644
--- a/sys/dev/xl/if_xl.c
+++ b/sys/dev/xl/if_xl.c
@@ -127,6 +127,7 @@ __FBSDID("$FreeBSD$");
#include <sys/rman.h>
#include <dev/mii/mii.h>
+#include <dev/mii/mii_bitbang.h>
#include <dev/mii/miivar.h>
#include <dev/pci/pcireg.h>
@@ -160,7 +161,7 @@ MODULE_DEPEND(xl, miibus, 1, 1, 1);
/*
* Various supported device vendors/types and their names.
*/
-static const struct xl_type xl_devs[] = {
+static const struct xl_type const xl_devs[] = {
{ TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT,
"3Com 3c900-TPO Etherlink XL" },
{ TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO,
@@ -258,10 +259,6 @@ static void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
static int xl_eeprom_wait(struct xl_softc *);
static int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int);
-static void xl_mii_sync(struct xl_softc *);
-static void xl_mii_send(struct xl_softc *, u_int32_t, int);
-static int xl_mii_readreg(struct xl_softc *, struct xl_mii_frame *);
-static int xl_mii_writereg(struct xl_softc *, struct xl_mii_frame *);
static void xl_rxfilter(struct xl_softc *);
static void xl_rxfilter_90x(struct xl_softc *);
@@ -286,6 +283,24 @@ static int xl_miibus_writereg(device_t, int, int, int);
static void xl_miibus_statchg(device_t);
static void xl_miibus_mediainit(device_t);
+/*
+ * MII bit-bang glue
+ */
+static uint32_t xl_mii_bitbang_read(device_t);
+static void xl_mii_bitbang_write(device_t, uint32_t);
+
+static const struct mii_bitbang_ops xl_mii_bitbang_ops = {
+ xl_mii_bitbang_read,
+ xl_mii_bitbang_write,
+ {
+ XL_MII_DATA, /* MII_BIT_MDO */
+ XL_MII_DATA, /* MII_BIT_MDI */
+ XL_MII_CLK, /* MII_BIT_MDC */
+ XL_MII_DIR, /* MII_BIT_DIR_HOST_PHY */
+ 0, /* MII_BIT_DIR_PHY_HOST */
+ }
+};
+
static device_method_t xl_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, xl_probe),
@@ -359,194 +374,66 @@ xl_wait(struct xl_softc *sc)
* some chips/CPUs/processor speeds/bus speeds/etc but not
* with others.
*/
-#define MII_SET(x) \
- CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
- CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x))
-
-#define MII_CLR(x) \
- CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
- CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x))
-
-/*
- * Sync the PHYs by setting data bit and strobing the clock 32 times.
- */
-static void
-xl_mii_sync(struct xl_softc *sc)
-{
- register int i;
-
- XL_SEL_WIN(4);
- MII_SET(XL_MII_DIR|XL_MII_DATA);
-
- for (i = 0; i < 32; i++) {
- MII_SET(XL_MII_CLK);
- MII_SET(XL_MII_DATA);
- MII_SET(XL_MII_DATA);
- MII_CLR(XL_MII_CLK);
- MII_SET(XL_MII_DATA);
- MII_SET(XL_MII_DATA);
- }
-}
/*
- * Clock a series of bits through the MII.
+ * Read the MII serial port for the MII bit-bang module.
*/
-static void
-xl_mii_send(struct xl_softc *sc, u_int32_t bits, int cnt)
+static uint32_t
+xl_mii_bitbang_read(device_t dev)
{
- int i;
-
- XL_SEL_WIN(4);
- MII_CLR(XL_MII_CLK);
-
- for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
- if (bits & i) {
- MII_SET(XL_MII_DATA);
- } else {
- MII_CLR(XL_MII_DATA);
- }
- MII_CLR(XL_MII_CLK);
- MII_SET(XL_MII_CLK);
- }
-}
-
-/*
- * Read an PHY register through the MII.
- */
-static int
-xl_mii_readreg(struct xl_softc *sc, struct xl_mii_frame *frame)
-{
- int i, ack;
-
- /* Set up frame for RX. */
- frame->mii_stdelim = XL_MII_STARTDELIM;
- frame->mii_opcode = XL_MII_READOP;
- frame->mii_turnaround = 0;
- frame->mii_data = 0;
-
- /* Select register window 4. */
- XL_SEL_WIN(4);
-
- CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
- /* Turn on data xmit. */
- MII_SET(XL_MII_DIR);
-
- xl_mii_sync(sc);
-
- /* Send command/address info. */
- xl_mii_send(sc, frame->mii_stdelim, 2);
- xl_mii_send(sc, frame->mii_opcode, 2);
- xl_mii_send(sc, frame->mii_phyaddr, 5);
- xl_mii_send(sc, frame->mii_regaddr, 5);
-
- /* Idle bit */
- MII_CLR((XL_MII_CLK|XL_MII_DATA));
- MII_SET(XL_MII_CLK);
-
- /* Turn off xmit. */
- MII_CLR(XL_MII_DIR);
-
- /* Check for ack */
- MII_CLR(XL_MII_CLK);
- ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
- MII_SET(XL_MII_CLK);
-
- /*
- * Now try reading data bits. If the ack failed, we still
- * need to clock through 16 cycles to keep the PHY(s) in sync.
- */
- if (ack) {
- for (i = 0; i < 16; i++) {
- MII_CLR(XL_MII_CLK);
- MII_SET(XL_MII_CLK);
- }
- goto fail;
- }
+ struct xl_softc *sc;
+ uint32_t val;
- for (i = 0x8000; i; i >>= 1) {
- MII_CLR(XL_MII_CLK);
- if (!ack) {
- if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
- frame->mii_data |= i;
- }
- MII_SET(XL_MII_CLK);
- }
+ sc = device_get_softc(dev);
-fail:
- MII_CLR(XL_MII_CLK);
- MII_SET(XL_MII_CLK);
+ /* We're already in window 4. */
+ val = CSR_READ_2(sc, XL_W4_PHY_MGMT);
+ CSR_BARRIER(sc, XL_W4_PHY_MGMT, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
- return (ack ? 1 : 0);
+ return (val);
}
/*
- * Write to a PHY register through the MII.
+ * Write the MII serial port for the MII bit-bang module.
*/
-static int
-xl_mii_writereg(struct xl_softc *sc, struct xl_mii_frame *frame)
+static void
+xl_mii_bitbang_write(device_t dev, uint32_t val)
{
+ struct xl_softc *sc;
- /* Set up frame for TX. */
- frame->mii_stdelim = XL_MII_STARTDELIM;
- frame->mii_opcode = XL_MII_WRITEOP;
- frame->mii_turnaround = XL_MII_TURNAROUND;
-
- /* Select the window 4. */
- XL_SEL_WIN(4);
-
- /* Turn on data output. */
- MII_SET(XL_MII_DIR);
-
- xl_mii_sync(sc);
-
- xl_mii_send(sc, frame->mii_stdelim, 2);
- xl_mii_send(sc, frame->mii_opcode, 2);
- xl_mii_send(sc, frame->mii_phyaddr, 5);
- xl_mii_send(sc, frame->mii_regaddr, 5);
- xl_mii_send(sc, frame->mii_turnaround, 2);
- xl_mii_send(sc, frame->mii_data, 16);
-
- /* Idle bit. */
- MII_SET(XL_MII_CLK);
- MII_CLR(XL_MII_CLK);
-
- /* Turn off xmit. */
- MII_CLR(XL_MII_DIR);
+ sc = device_get_softc(dev);
- return (0);
+ /* We're already in window 4. */
+ CSR_WRITE_2(sc, XL_W4_PHY_MGMT, val);
+ CSR_BARRIER(sc, XL_W4_PHY_MGMT, 2,
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
}
static int
xl_miibus_readreg(device_t dev, int phy, int reg)
{
struct xl_softc *sc;
- struct xl_mii_frame frame;
sc = device_get_softc(dev);
- bzero((char *)&frame, sizeof(frame));
- frame.mii_phyaddr = phy;
- frame.mii_regaddr = reg;
-
- xl_mii_readreg(sc, &frame);
+ /* Select the window 4. */
+ XL_SEL_WIN(4);
- return (frame.mii_data);
+ return (mii_bitbang_readreg(dev, &xl_mii_bitbang_ops, phy, reg));
}
static int
xl_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct xl_softc *sc;
- struct xl_mii_frame frame;
sc = device_get_softc(dev);
- bzero((char *)&frame, sizeof(frame));
- frame.mii_phyaddr = phy;
- frame.mii_regaddr = reg;
- frame.mii_data = data;
+ /* Select the window 4. */
+ XL_SEL_WIN(4);
- xl_mii_writereg(sc, &frame);
+ mii_bitbang_writereg(dev, &xl_mii_bitbang_ops, phy, reg, data);
return (0);
}
diff --git a/sys/dev/xl/if_xlreg.h b/sys/dev/xl/if_xlreg.h
index f5494f5..b27e038 100644
--- a/sys/dev/xl/if_xlreg.h
+++ b/sys/dev/xl/if_xlreg.h
@@ -556,27 +556,10 @@ struct xl_chain_data {
struct xl_type {
u_int16_t xl_vid;
u_int16_t xl_did;
- char *xl_name;
-};
-
-struct xl_mii_frame {
- u_int8_t mii_stdelim;
- u_int8_t mii_opcode;
- u_int8_t mii_phyaddr;
- u_int8_t mii_regaddr;
- u_int8_t mii_turnaround;
- u_int16_t mii_data;
+ const char *xl_name;
};
/*
- * MII constants
- */
-#define XL_MII_STARTDELIM 0x01
-#define XL_MII_READOP 0x02
-#define XL_MII_WRITEOP 0x01
-#define XL_MII_TURNAROUND 0x02
-
-/*
* The 3C905B adapters implement a few features that we want to
* take advantage of, namely the multicast hash filter. With older
* chips, you only have the option of turning on reception of all
@@ -680,8 +663,17 @@ struct xl_stats {
#define CSR_READ_1(sc, reg) \
bus_space_read_1(sc->xl_btag, sc->xl_bhandle, reg)
-#define XL_SEL_WIN(x) \
- CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_WINSEL | x)
+#define CSR_BARRIER(sc, reg, length, flags) \
+ bus_space_barrier(sc->xl_btag, sc->xl_bhandle, reg, length, flags)
+
+#define XL_SEL_WIN(x) do { \
+ CSR_BARRIER(sc, XL_COMMAND, 2, \
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); \
+ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_WINSEL | x); \
+ CSR_BARRIER(sc, XL_COMMAND, 2, \
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); \
+} while (0)
+
#define XL_TIMEOUT 1000
/*
OpenPOWER on IntegriCloud