summaryrefslogtreecommitdiffstats
path: root/sys/dev
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/acpica/Osd/OsdSchedule.c11
-rw-r--r--sys/dev/acpica/Osd/OsdTable.c4
-rw-r--r--sys/dev/acpica/acpi.c18
-rw-r--r--sys/dev/acpica/acpi_pci.c2
-rw-r--r--sys/dev/ahci/ahci.c10
-rw-r--r--sys/dev/ath/ath_hal/ah.c7
-rw-r--r--sys/dev/ath/ath_hal/ah.h4
-rw-r--r--sys/dev/ath/ath_hal/ah_internal.h1
-rw-r--r--sys/dev/ath/ath_hal/ar5210/ar5210_attach.c2
-rw-r--r--sys/dev/ath/ath_hal/ar5211/ar5211_attach.c2
-rw-r--r--sys/dev/ath/ath_hal/ar5212/ar5212_attach.c2
-rw-r--r--sys/dev/ath/ath_hal/ar5312/ar5312_attach.c2
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416_attach.c2
-rw-r--r--sys/dev/ath/ath_hal/ar9001/ar9130_attach.c4
-rw-r--r--sys/dev/ath/ath_hal/ar9001/ar9160_attach.c1
-rw-r--r--sys/dev/ath/ath_hal/ar9002/ar9280_attach.c1
-rw-r--r--sys/dev/ath/ath_hal/ar9002/ar9285_attach.c1
-rw-r--r--sys/dev/ath/ath_hal/ar9002/ar9287_attach.c1
-rw-r--r--sys/dev/ath/if_ath.c89
-rw-r--r--sys/dev/ath/if_ath_lna_div.c4
-rw-r--r--sys/dev/ath/if_ath_pci.c98
-rw-r--r--sys/dev/ath/if_ath_pci_devlist.h669
-rw-r--r--sys/dev/ath/if_athvar.h22
-rw-r--r--sys/dev/bge/if_bge.c2
-rw-r--r--sys/dev/ce/if_ce.c7
-rw-r--r--sys/dev/cp/if_cp.c2
-rw-r--r--sys/dev/ctau/if_ct.c2
-rw-r--r--sys/dev/cx/if_cx.c2
-rw-r--r--sys/dev/cxgb/cxgb_adapter.h2
-rw-r--r--sys/dev/cxgb/cxgb_main.c122
-rw-r--r--sys/dev/cxgb/cxgb_sge.c4
-rw-r--r--sys/dev/cxgbe/adapter.h7
-rw-r--r--sys/dev/cxgbe/t4_main.c137
-rw-r--r--sys/dev/dwc/if_dwc.c2
-rw-r--r--sys/dev/e1000/if_em.c8
-rw-r--r--sys/dev/e1000/if_lem.c8
-rw-r--r--sys/dev/firewire/firewire.c675
-rw-r--r--sys/dev/firewire/firewire.h20
-rw-r--r--sys/dev/firewire/firewire_phy.h2
-rw-r--r--sys/dev/firewire/firewirereg.h42
-rw-r--r--sys/dev/firewire/fwcrom.c82
-rw-r--r--sys/dev/firewire/fwdev.c138
-rw-r--r--sys/dev/firewire/fwdma.c30
-rw-r--r--sys/dev/firewire/fwdma.h16
-rw-r--r--sys/dev/firewire/fwmem.c63
-rw-r--r--sys/dev/firewire/fwohci.c524
-rw-r--r--sys/dev/firewire/fwohci_pci.c21
-rw-r--r--sys/dev/firewire/fwohcireg.h29
-rw-r--r--sys/dev/firewire/fwohcivar.h2
-rw-r--r--sys/dev/firewire/iec68113.h2
-rw-r--r--sys/dev/firewire/if_fwe.c23
-rw-r--r--sys/dev/firewire/if_fwip.c23
-rw-r--r--sys/dev/firewire/sbp.c185
-rw-r--r--sys/dev/firewire/sbp.h6
-rw-r--r--sys/dev/firewire/sbp_targ.c72
-rw-r--r--sys/dev/fxp/if_fxp.c8
-rw-r--r--sys/dev/ie/if_ie.c9
-rw-r--r--sys/dev/iscsi_initiator/isc_cam.c33
-rw-r--r--sys/dev/ixgb/if_ixgb.c51
-rw-r--r--sys/dev/ixgbe/ixgbe.c52
-rw-r--r--sys/dev/ixgbe/ixv.c10
-rwxr-xr-xsys/dev/ixl/i40e_osdep.h13
-rwxr-xr-xsys/dev/ixl/if_ixl.c44
-rw-r--r--sys/dev/ixl/if_ixlv.c5
-rw-r--r--sys/dev/ixl/ixl.h43
-rwxr-xr-xsys/dev/ixl/ixl_txrx.c45
-rw-r--r--sys/dev/ixl/ixlvc.c37
-rw-r--r--sys/dev/nfe/if_nfe.c4
-rw-r--r--sys/dev/sfxge/common/efsys.h44
-rw-r--r--sys/dev/sfxge/sfxge.c84
-rw-r--r--sys/dev/sfxge/sfxge.h52
-rw-r--r--sys/dev/sfxge/sfxge_dma.c13
-rw-r--r--sys/dev/sfxge/sfxge_ev.c52
-rw-r--r--sys/dev/sfxge/sfxge_intr.c20
-rw-r--r--sys/dev/sfxge/sfxge_port.c56
-rw-r--r--sys/dev/sfxge/sfxge_rx.c94
-rw-r--r--sys/dev/sfxge/sfxge_rx.h29
-rw-r--r--sys/dev/sfxge/sfxge_tx.c207
-rw-r--r--sys/dev/sfxge/sfxge_tx.h30
-rw-r--r--sys/dev/sound/usb/uaudio.c59
-rw-r--r--sys/dev/sound/usb/uaudioreg.h8
-rw-r--r--sys/dev/uart/uart.h1
-rw-r--r--sys/dev/uart/uart_bus_fdt.c1
-rw-r--r--sys/dev/uart/uart_dev_imx.c42
-rw-r--r--sys/dev/uart/uart_dev_msm.c568
-rw-r--r--sys/dev/uart/uart_dev_msm.h229
-rw-r--r--sys/dev/usb/controller/xhci.c21
-rw-r--r--sys/dev/usb/controller/xhci.h3
-rw-r--r--sys/dev/usb/usb_dev.c21
-rw-r--r--sys/dev/usb/usbdevs1
-rw-r--r--sys/dev/usb/wlan/if_urtwn.c1
-rw-r--r--sys/dev/vt/vt.h1
-rw-r--r--sys/dev/vt/vt_core.c251
-rw-r--r--sys/dev/xen/balloon/balloon.c90
-rw-r--r--sys/dev/xen/blkback/blkback.c117
-rw-r--r--sys/dev/xen/xenstore/xenstore.c1703
-rw-r--r--sys/dev/xen/xenstore/xenstore_dev.c287
-rw-r--r--sys/dev/xen/xenstore/xenstored_dev.c169
98 files changed, 6178 insertions, 1647 deletions
diff --git a/sys/dev/acpica/Osd/OsdSchedule.c b/sys/dev/acpica/Osd/OsdSchedule.c
index d49f886..e998fee 100644
--- a/sys/dev/acpica/Osd/OsdSchedule.c
+++ b/sys/dev/acpica/Osd/OsdSchedule.c
@@ -60,6 +60,13 @@ SYSCTL_INT(_debug_acpi, OID_AUTO, max_tasks, CTLFLAG_RDTUN, &acpi_max_tasks,
0, "Maximum acpi tasks");
/*
+ * Track and report the system's demand for task slots.
+ */
+static int acpi_tasks_hiwater;
+SYSCTL_INT(_debug_acpi, OID_AUTO, tasks_hiwater, CTLFLAG_RD,
+ &acpi_tasks_hiwater, 1, "Peak demand for ACPI event task slots.");
+
+/*
* Allow the user to tune the number of task threads we start. It seems
* some systems have problems with increased parallelism.
*/
@@ -151,6 +158,10 @@ acpi_task_enqueue(int priority, ACPI_OSD_EXEC_CALLBACK Function, void *Context)
acpi_task_count++;
break;
}
+
+ if (i > acpi_tasks_hiwater)
+ atomic_cmpset_int(&acpi_tasks_hiwater, acpi_tasks_hiwater, i);
+
if (at == NULL) {
printf("AcpiOsExecute: failed to enqueue task, consider increasing "
"the debug.acpi.max_tasks tunable\n");
diff --git a/sys/dev/acpica/Osd/OsdTable.c b/sys/dev/acpica/Osd/OsdTable.c
index 1e738f8..006b2ce 100644
--- a/sys/dev/acpica/Osd/OsdTable.c
+++ b/sys/dev/acpica/Osd/OsdTable.c
@@ -91,8 +91,8 @@ AcpiOsTableOverride(ACPI_TABLE_HEADER *ExistingTable,
if (hdr == NULL || sz == 0)
return (AE_ERROR);
#ifndef notyet
- /* Assume SSDT is loaded with DSDT. */
- AcpiGbl_DisableSsdtTableLoad = TRUE;
+ /* Assume SSDT is installed with DSDT. */
+ AcpiGbl_DisableSsdtTableInstall = TRUE;
#endif
*NewTable = hdr;
return (AE_OK);
diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c
index 1913912..37c79a5 100644
--- a/sys/dev/acpica/acpi.c
+++ b/sys/dev/acpica/acpi.c
@@ -256,16 +256,6 @@ static char acpi_remove_interface[256];
TUNABLE_STR("hw.acpi.remove_interface", acpi_remove_interface,
sizeof(acpi_remove_interface));
-/*
- * Allow override of whether methods execute in parallel or not.
- * Enable this for serial behavior, which fixes "AE_ALREADY_EXISTS"
- * errors for AML that really can't handle parallel method execution.
- * It is off by default since this breaks recursive methods and
- * some IBMs use such code.
- */
-static int acpi_serialize_methods;
-TUNABLE_INT("hw.acpi.serialize_methods", &acpi_serialize_methods);
-
/* Allow users to dump Debug objects without ACPI debugger. */
static int acpi_debug_objects;
TUNABLE_INT("debug.acpi.enable_debug_objects", &acpi_debug_objects);
@@ -279,6 +269,12 @@ TUNABLE_INT("debug.acpi.interpreter_slack", &acpi_interpreter_slack);
SYSCTL_INT(_debug_acpi, OID_AUTO, interpreter_slack, CTLFLAG_RDTUN,
&acpi_interpreter_slack, 1, "Turn on interpreter slack mode.");
+/* Ignore register widths set by FADT and use default widths instead. */
+static int acpi_ignore_reg_width = 1;
+TUNABLE_INT("debug.acpi.default_register_width", &acpi_ignore_reg_width);
+SYSCTL_INT(_debug_acpi, OID_AUTO, default_register_width, CTLFLAG_RDTUN,
+ &acpi_ignore_reg_width, 1, "Ignore register widths set by FADT");
+
#ifdef __amd64__
/* Reset system clock while resuming. XXX Remove once tested. */
static int acpi_reset_clock = 1;
@@ -474,9 +470,9 @@ acpi_attach(device_t dev)
* Set the globals from our tunables. This is needed because ACPI-CA
* uses UINT8 for some values and we have no tunable_byte.
*/
- AcpiGbl_AllMethodsSerialized = acpi_serialize_methods ? TRUE : FALSE;
AcpiGbl_EnableInterpreterSlack = acpi_interpreter_slack ? TRUE : FALSE;
AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE;
+ AcpiGbl_UseDefaultRegisterWidths = acpi_ignore_reg_width ? TRUE : FALSE;
#ifndef ACPI_DEBUG
/*
diff --git a/sys/dev/acpica/acpi_pci.c b/sys/dev/acpica/acpi_pci.c
index f4e794a..d94b6f0 100644
--- a/sys/dev/acpica/acpi_pci.c
+++ b/sys/dev/acpica/acpi_pci.c
@@ -282,7 +282,7 @@ acpi_pci_probe(device_t dev)
if (acpi_get_handle(dev) == NULL)
return (ENXIO);
device_set_desc(dev, "ACPI PCI bus");
- return (0);
+ return (BUS_PROBE_DEFAULT);
}
static int
diff --git a/sys/dev/ahci/ahci.c b/sys/dev/ahci/ahci.c
index 0c3197a..69fa76b 100644
--- a/sys/dev/ahci/ahci.c
+++ b/sys/dev/ahci/ahci.c
@@ -1126,6 +1126,7 @@ ahci_ch_intr_direct(void *arg)
struct ahci_channel *ch = (struct ahci_channel *)arg;
struct ccb_hdr *ccb_h;
uint32_t istatus;
+ STAILQ_HEAD(, ccb_hdr) tmp_doneq = STAILQ_HEAD_INITIALIZER(tmp_doneq);
/* Read interrupt statuses. */
istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
@@ -1136,9 +1137,14 @@ ahci_ch_intr_direct(void *arg)
ch->batch = 1;
ahci_ch_intr_main(ch, istatus);
ch->batch = 0;
+ /*
+ * Prevent the possibility of issues caused by processing the queue
+ * while unlocked below by moving the contents to a local queue.
+ */
+ STAILQ_CONCAT(&tmp_doneq, &ch->doneq);
mtx_unlock(&ch->mtx);
- while ((ccb_h = STAILQ_FIRST(&ch->doneq)) != NULL) {
- STAILQ_REMOVE_HEAD(&ch->doneq, sim_links.stqe);
+ while ((ccb_h = STAILQ_FIRST(&tmp_doneq)) != NULL) {
+ STAILQ_REMOVE_HEAD(&tmp_doneq, sim_links.stqe);
xpt_done_direct((union ccb *)ccb_h);
}
}
diff --git a/sys/dev/ath/ath_hal/ah.c b/sys/dev/ath/ath_hal/ah.c
index 750248e..2aefc89 100644
--- a/sys/dev/ath/ath_hal/ah.c
+++ b/sys/dev/ath/ath_hal/ah.c
@@ -55,7 +55,9 @@ ath_hal_probe(uint16_t vendorid, uint16_t devid)
*/
struct ath_hal*
ath_hal_attach(uint16_t devid, HAL_SOFTC sc,
- HAL_BUS_TAG st, HAL_BUS_HANDLE sh, uint16_t *eepromdata, HAL_STATUS *error)
+ HAL_BUS_TAG st, HAL_BUS_HANDLE sh, uint16_t *eepromdata,
+ HAL_OPS_CONFIG *ah_config,
+ HAL_STATUS *error)
{
struct ath_hal_chip * const *pchip;
@@ -66,7 +68,8 @@ ath_hal_attach(uint16_t devid, HAL_SOFTC sc,
/* XXX don't have vendorid, assume atheros one works */
if (chip->probe(ATHEROS_VENDOR_ID, devid) == AH_NULL)
continue;
- ah = chip->attach(devid, sc, st, sh, eepromdata, error);
+ ah = chip->attach(devid, sc, st, sh, eepromdata, ah_config,
+ error);
if (ah != AH_NULL) {
/* copy back private state to public area */
ah->ah_devid = AH_PRIVATE(ah)->ah_devid;
diff --git a/sys/dev/ath/ath_hal/ah.h b/sys/dev/ath/ath_hal/ah.h
index 4f2d3e9..239ebc8 100644
--- a/sys/dev/ath/ath_hal/ah.h
+++ b/sys/dev/ath/ath_hal/ah.h
@@ -1264,6 +1264,7 @@ typedef struct
int ath_hal_show_bb_panic;
int ath_hal_ant_ctrl_comm2g_switch_enable;
int ath_hal_ext_atten_margin_cfg;
+ int ath_hal_min_gainidx;
int ath_hal_war70c;
uint32_t ath_hal_mci_config;
} HAL_OPS_CONFIG;
@@ -1616,7 +1617,8 @@ extern const char *__ahdecl ath_hal_probe(uint16_t vendorid, uint16_t devid);
* be returned if the status parameter is non-zero.
*/
extern struct ath_hal * __ahdecl ath_hal_attach(uint16_t devid, HAL_SOFTC,
- HAL_BUS_TAG, HAL_BUS_HANDLE, uint16_t *eepromdata, HAL_STATUS* status);
+ HAL_BUS_TAG, HAL_BUS_HANDLE, uint16_t *eepromdata,
+ HAL_OPS_CONFIG *ah_config, HAL_STATUS* status);
extern const char *ath_hal_mac_name(struct ath_hal *);
extern const char *ath_hal_rf_name(struct ath_hal *);
diff --git a/sys/dev/ath/ath_hal/ah_internal.h b/sys/dev/ath/ath_hal/ah_internal.h
index 2e35148..0892a42 100644
--- a/sys/dev/ath/ath_hal/ah_internal.h
+++ b/sys/dev/ath/ath_hal/ah_internal.h
@@ -91,6 +91,7 @@ struct ath_hal_chip {
const char *(*probe)(uint16_t vendorid, uint16_t devid);
struct ath_hal *(*attach)(uint16_t devid, HAL_SOFTC,
HAL_BUS_TAG, HAL_BUS_HANDLE, uint16_t *eepromdata,
+ HAL_OPS_CONFIG *ah,
HAL_STATUS *error);
};
#ifndef AH_CHIP
diff --git a/sys/dev/ath/ath_hal/ar5210/ar5210_attach.c b/sys/dev/ath/ath_hal/ar5210/ar5210_attach.c
index ee11ce9..ceafa99 100644
--- a/sys/dev/ath/ath_hal/ar5210/ar5210_attach.c
+++ b/sys/dev/ath/ath_hal/ar5210/ar5210_attach.c
@@ -183,7 +183,7 @@ static HAL_BOOL ar5210FillCapabilityInfo(struct ath_hal *ah);
*/
static struct ath_hal *
ar5210Attach(uint16_t devid, HAL_SOFTC sc, HAL_BUS_TAG st, HAL_BUS_HANDLE sh,
- uint16_t *eepromdata, HAL_STATUS *status)
+ uint16_t *eepromdata, HAL_OPS_CONFIG *ah_config, HAL_STATUS *status)
{
#define N(a) (sizeof(a)/sizeof(a[0]))
struct ath_hal_5210 *ahp;
diff --git a/sys/dev/ath/ath_hal/ar5211/ar5211_attach.c b/sys/dev/ath/ath_hal/ar5211/ar5211_attach.c
index 4549295..3416dc0 100644
--- a/sys/dev/ath/ath_hal/ar5211/ar5211_attach.c
+++ b/sys/dev/ath/ath_hal/ar5211/ar5211_attach.c
@@ -203,7 +203,7 @@ ar5211GetRadioRev(struct ath_hal *ah)
static struct ath_hal *
ar5211Attach(uint16_t devid, HAL_SOFTC sc,
HAL_BUS_TAG st, HAL_BUS_HANDLE sh, uint16_t *eepromdata,
- HAL_STATUS *status)
+ HAL_OPS_CONFIG *ah_config, HAL_STATUS *status)
{
#define N(a) (sizeof(a)/sizeof(a[0]))
struct ath_hal_5211 *ahp;
diff --git a/sys/dev/ath/ath_hal/ar5212/ar5212_attach.c b/sys/dev/ath/ath_hal/ar5212/ar5212_attach.c
index e0af27c..a95f244 100644
--- a/sys/dev/ath/ath_hal/ar5212/ar5212_attach.c
+++ b/sys/dev/ath/ath_hal/ar5212/ar5212_attach.c
@@ -317,7 +317,7 @@ ar5212IsMacSupported(uint8_t macVersion, uint8_t macRev)
static struct ath_hal *
ar5212Attach(uint16_t devid, HAL_SOFTC sc,
HAL_BUS_TAG st, HAL_BUS_HANDLE sh, uint16_t *eepromdata,
- HAL_STATUS *status)
+ HAL_OPS_CONFIG *ah_config, HAL_STATUS *status)
{
#define AH_EEPROM_PROTECT(ah) \
(AH_PRIVATE(ah)->ah_ispcie)? AR_EEPROM_PROTECT_PCIE : AR_EEPROM_PROTECT)
diff --git a/sys/dev/ath/ath_hal/ar5312/ar5312_attach.c b/sys/dev/ath/ath_hal/ar5312/ar5312_attach.c
index 4ca1a4d..5c84eb8 100644
--- a/sys/dev/ath/ath_hal/ar5312/ar5312_attach.c
+++ b/sys/dev/ath/ath_hal/ar5312/ar5312_attach.c
@@ -62,7 +62,7 @@ ar5312AniSetup(struct ath_hal *ah)
static struct ath_hal *
ar5312Attach(uint16_t devid, HAL_SOFTC sc,
HAL_BUS_TAG st, HAL_BUS_HANDLE sh, uint16_t *eepromdata,
- HAL_STATUS *status)
+ HAL_OPS_CONFIG *ah_config, HAL_STATUS *status)
{
struct ath_hal_5212 *ahp = AH_NULL;
struct ath_hal *ah;
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416_attach.c b/sys/dev/ath/ath_hal/ar5416/ar5416_attach.c
index bdc6111..a20499a 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416_attach.c
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416_attach.c
@@ -297,7 +297,7 @@ ar5416GetRadioRev(struct ath_hal *ah)
static struct ath_hal *
ar5416Attach(uint16_t devid, HAL_SOFTC sc,
HAL_BUS_TAG st, HAL_BUS_HANDLE sh, uint16_t *eepromdata,
- HAL_STATUS *status)
+ HAL_OPS_CONFIG *ah_config, HAL_STATUS *status)
{
struct ath_hal_5416 *ahp5416;
struct ath_hal_5212 *ahp;
diff --git a/sys/dev/ath/ath_hal/ar9001/ar9130_attach.c b/sys/dev/ath/ath_hal/ar9001/ar9130_attach.c
index 4f478c0..c270bab 100644
--- a/sys/dev/ath/ath_hal/ar9001/ar9130_attach.c
+++ b/sys/dev/ath/ath_hal/ar9001/ar9130_attach.c
@@ -69,7 +69,9 @@ static HAL_BOOL ar9130FillCapabilityInfo(struct ath_hal *ah);
*/
static struct ath_hal *
ar9130Attach(uint16_t devid, HAL_SOFTC sc,
- HAL_BUS_TAG st, HAL_BUS_HANDLE sh, uint16_t *eepromdata, HAL_STATUS *status)
+ HAL_BUS_TAG st, HAL_BUS_HANDLE sh, uint16_t *eepromdata,
+ HAL_OPS_CONFIG *ah_config,
+ HAL_STATUS *status)
{
struct ath_hal_5416 *ahp5416;
struct ath_hal_5212 *ahp;
diff --git a/sys/dev/ath/ath_hal/ar9001/ar9160_attach.c b/sys/dev/ath/ath_hal/ar9001/ar9160_attach.c
index 979ba1a..5bda519 100644
--- a/sys/dev/ath/ath_hal/ar9001/ar9160_attach.c
+++ b/sys/dev/ath/ath_hal/ar9001/ar9160_attach.c
@@ -114,6 +114,7 @@ ar9160InitPLL(struct ath_hal *ah, const struct ieee80211_channel *chan)
static struct ath_hal *
ar9160Attach(uint16_t devid, HAL_SOFTC sc,
HAL_BUS_TAG st, HAL_BUS_HANDLE sh, uint16_t *eepromdata,
+ HAL_OPS_CONFIG *ah_config,
HAL_STATUS *status)
{
struct ath_hal_5416 *ahp5416;
diff --git a/sys/dev/ath/ath_hal/ar9002/ar9280_attach.c b/sys/dev/ath/ath_hal/ar9002/ar9280_attach.c
index 2a67fe5..3be3e35 100644
--- a/sys/dev/ath/ath_hal/ar9002/ar9280_attach.c
+++ b/sys/dev/ath/ath_hal/ar9002/ar9280_attach.c
@@ -148,6 +148,7 @@ ar9280InitPLL(struct ath_hal *ah, const struct ieee80211_channel *chan)
static struct ath_hal *
ar9280Attach(uint16_t devid, HAL_SOFTC sc,
HAL_BUS_TAG st, HAL_BUS_HANDLE sh, uint16_t *eepromdata,
+ HAL_OPS_CONFIG *ah_config,
HAL_STATUS *status)
{
struct ath_hal_9280 *ahp9280;
diff --git a/sys/dev/ath/ath_hal/ar9002/ar9285_attach.c b/sys/dev/ath/ath_hal/ar9002/ar9285_attach.c
index edb6f26..eb3490a 100644
--- a/sys/dev/ath/ath_hal/ar9002/ar9285_attach.c
+++ b/sys/dev/ath/ath_hal/ar9002/ar9285_attach.c
@@ -133,6 +133,7 @@ ar9285_eeprom_print_diversity_settings(struct ath_hal *ah)
static struct ath_hal *
ar9285Attach(uint16_t devid, HAL_SOFTC sc,
HAL_BUS_TAG st, HAL_BUS_HANDLE sh, uint16_t *eepromdata,
+ HAL_OPS_CONFIG *ah_config,
HAL_STATUS *status)
{
struct ath_hal_9285 *ahp9285;
diff --git a/sys/dev/ath/ath_hal/ar9002/ar9287_attach.c b/sys/dev/ath/ath_hal/ar9002/ar9287_attach.c
index 010e2c3..0ea565c 100644
--- a/sys/dev/ath/ath_hal/ar9002/ar9287_attach.c
+++ b/sys/dev/ath/ath_hal/ar9002/ar9287_attach.c
@@ -111,6 +111,7 @@ ar9287AniSetup(struct ath_hal *ah)
static struct ath_hal *
ar9287Attach(uint16_t devid, HAL_SOFTC sc,
HAL_BUS_TAG st, HAL_BUS_HANDLE sh, uint16_t *eepromdata,
+ HAL_OPS_CONFIG *ah_config,
HAL_STATUS *status)
{
struct ath_hal_9287 *ahp9287;
diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c
index 898433e..af686f0 100644
--- a/sys/dev/ath/if_ath.c
+++ b/sys/dev/ath/if_ath.c
@@ -435,6 +435,81 @@ _ath_power_restore_power_state(struct ath_softc *sc, const char *file, int line)
}
+/*
+ * Configure the initial HAL configuration values based on bus
+ * specific parameters.
+ *
+ * Some PCI IDs and other information may need tweaking.
+ *
+ * XXX TODO: ath9k and the Atheros HAL only program comm2g_switch_enable
+ * if BT antenna diversity isn't enabled.
+ *
+ * So, let's also figure out how to enable BT diversity for AR9485.
+ */
+static void
+ath_setup_hal_config(struct ath_softc *sc, HAL_OPS_CONFIG *ah_config)
+{
+ /* XXX TODO: only for PCI devices? */
+
+ if (sc->sc_pci_devinfo & (ATH_PCI_CUS198 | ATH_PCI_CUS230)) {
+ ah_config->ath_hal_ext_lna_ctl_gpio = 0x200; /* bit 9 */
+ ah_config->ath_hal_ext_atten_margin_cfg = AH_TRUE;
+ ah_config->ath_hal_min_gainidx = AH_TRUE;
+ ah_config->ath_hal_ant_ctrl_comm2g_switch_enable = 0x000bbb88;
+ /* XXX low_rssi_thresh */
+ /* XXX fast_div_bias */
+ device_printf(sc->sc_dev, "configuring for %s\n",
+ (sc->sc_pci_devinfo & ATH_PCI_CUS198) ?
+ "CUS198" : "CUS230");
+ }
+
+ if (sc->sc_pci_devinfo & ATH_PCI_CUS217)
+ device_printf(sc->sc_dev, "CUS217 card detected\n");
+
+ if (sc->sc_pci_devinfo & ATH_PCI_CUS252)
+ device_printf(sc->sc_dev, "CUS252 card detected\n");
+
+ if (sc->sc_pci_devinfo & ATH_PCI_AR9565_1ANT)
+ device_printf(sc->sc_dev, "WB335 1-ANT card detected\n");
+
+ if (sc->sc_pci_devinfo & ATH_PCI_AR9565_2ANT)
+ device_printf(sc->sc_dev, "WB335 2-ANT card detected\n");
+
+ if (sc->sc_pci_devinfo & ATH_PCI_KILLER)
+ device_printf(sc->sc_dev, "Killer Wireless card detected\n");
+
+#if 0
+ /*
+ * Some WB335 cards do not support antenna diversity. Since
+ * we use a hardcoded value for AR9565 instead of using the
+ * EEPROM/OTP data, remove the combining feature from
+ * the HW capabilities bitmap.
+ */
+ if (sc->sc_pci_devinfo & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) {
+ if (!(sc->sc_pci_devinfo & ATH9K_PCI_BT_ANT_DIV))
+ pCap->hw_caps &= ~ATH9K_HW_CAP_ANT_DIV_COMB;
+ }
+
+ if (sc->sc_pci_devinfo & ATH9K_PCI_BT_ANT_DIV) {
+ pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV;
+ device_printf(sc->sc_dev, "Set BT/WLAN RX diversity capability\n");
+ }
+#endif
+
+ if (sc->sc_pci_devinfo & ATH_PCI_D3_L1_WAR) {
+ ah_config->ath_hal_pcie_waen = 0x0040473b;
+ device_printf(sc->sc_dev, "Enable WAR for ASPM D3/L1\n");
+ }
+
+#if 0
+ if (sc->sc_pci_devinfo & ATH9K_PCI_NO_PLL_PWRSAVE) {
+ ah->config.no_pll_pwrsave = true;
+ device_printf(sc->sc_dev, "Disable PLL PowerSave\n");
+ }
+#endif
+
+}
+
#define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20)
#define HAL_MODE_HT40 \
(HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \
@@ -450,6 +525,7 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
u_int wmodes;
uint8_t macaddr[IEEE80211_ADDR_LEN];
int rx_chainmask, tx_chainmask;
+ HAL_OPS_CONFIG ah_config;
DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
@@ -468,8 +544,17 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
device_get_unit(sc->sc_dev));
CURVNET_RESTORE();
+ /*
+ * Configure the initial configuration data.
+ *
+ * This is stuff that may be needed early during attach
+ * rather than done via configuration calls later.
+ */
+ bzero(&ah_config, sizeof(ah_config));
+ ath_setup_hal_config(sc, &ah_config);
+
ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh,
- sc->sc_eepromdata, &status);
+ sc->sc_eepromdata, &ah_config, &status);
if (ah == NULL) {
if_printf(ifp, "unable to attach hardware; HAL status %u\n",
status);
@@ -7101,6 +7186,6 @@ ath_node_recv_pspoll(struct ieee80211_node *ni, struct mbuf *m)
MODULE_VERSION(if_ath, 1);
MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */
-#if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ)
+#if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) || defined(ATH_DEBUG_ALQ)
MODULE_DEPEND(if_ath, alq, 1, 1, 1);
#endif
diff --git a/sys/dev/ath/if_ath_lna_div.c b/sys/dev/ath/if_ath_lna_div.c
index 961b834..f0a33a5 100644
--- a/sys/dev/ath/if_ath_lna_div.c
+++ b/sys/dev/ath/if_ath_lna_div.c
@@ -209,6 +209,10 @@ bad:
return (error);
}
+/*
+ * XXX need to low_rssi_thresh config from ath9k, to support CUS198
+ * antenna diversity correctly.
+ */
static HAL_BOOL
ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta, int mindelta,
int main_rssi_avg, int alt_rssi_avg, int pkt_count)
diff --git a/sys/dev/ath/if_ath_pci.c b/sys/dev/ath/if_ath_pci.c
index dbbc411..057ec2c 100644
--- a/sys/dev/ath/if_ath_pci.c
+++ b/sys/dev/ath/if_ath_pci.c
@@ -80,6 +80,98 @@ struct ath_pci_softc {
void *sc_ih; /* interrupt handler */
};
+/*
+ * XXX eventually this should be some system level definition
+ * so modules will hvae probe/attach information like USB.
+ * But for now..
+ */
+struct pci_device_id {
+ int vendor_id;
+ int device_id;
+
+ int sub_vendor_id;
+ int sub_device_id;
+
+ int driver_data;
+
+ int match_populated:1;
+ int match_vendor_id:1;
+ int match_device_id:1;
+ int match_sub_vendor_id:1;
+ int match_sub_device_id:1;
+};
+
+#define PCI_VDEVICE(v, s) \
+ .vendor_id = (v), \
+ .device_id = (s), \
+ .match_populated = 1, \
+ .match_vendor_id = 1, \
+ .match_device_id = 1
+
+#define PCI_DEVICE_SUB(v, d, dv, ds) \
+ .match_populated = 1, \
+ .vendor_id = (v), .match_vendor_id = 1, \
+ .device_id = (d), .match_device_id = 1, \
+ .sub_vendor_id = (dv), .match_sub_vendor_id = 1, \
+ .sub_device_id = (ds), .match_sub_device_id = 1
+
+#define PCI_VENDOR_ID_ATHEROS 0x168c
+#define PCI_VENDOR_ID_SAMSUNG 0x144d
+#define PCI_VENDOR_ID_AZWAVE 0x1a3b
+#define PCI_VENDOR_ID_FOXCONN 0x105b
+#define PCI_VENDOR_ID_ATTANSIC 0x1969
+#define PCI_VENDOR_ID_ASUSTEK 0x1043
+#define PCI_VENDOR_ID_DELL 0x1028
+#define PCI_VENDOR_ID_QMI 0x1a32
+#define PCI_VENDOR_ID_LENOVO 0x17aa
+#define PCI_VENDOR_ID_HP 0x103c
+
+#include "if_ath_pci_devlist.h"
+
+/*
+ * Attempt to find a match for the given device in
+ * the given device table.
+ *
+ * Returns the device structure or NULL if no matching
+ * PCI device is found.
+ */
+static const struct pci_device_id *
+ath_pci_probe_device(device_t dev, const struct pci_device_id *dev_table, int nentries)
+{
+ int i;
+ int vendor_id, device_id;
+ int sub_vendor_id, sub_device_id;
+
+ vendor_id = pci_get_vendor(dev);
+ device_id = pci_get_device(dev);
+ sub_vendor_id = pci_get_subvendor(dev);
+ sub_device_id = pci_get_subdevice(dev);
+
+ for (i = 0; i < nentries; i++) {
+ /* Don't match on non-populated (eg empty) entries */
+ if (! dev_table[i].match_populated)
+ continue;
+
+ if (dev_table[i].match_vendor_id &&
+ (dev_table[i].vendor_id != vendor_id))
+ continue;
+ if (dev_table[i].match_device_id &&
+ (dev_table[i].device_id != device_id))
+ continue;
+ if (dev_table[i].match_sub_vendor_id &&
+ (dev_table[i].sub_vendor_id != sub_vendor_id))
+ continue;
+ if (dev_table[i].match_sub_device_id &&
+ (dev_table[i].sub_device_id != sub_device_id))
+ continue;
+
+ /* Match */
+ return (&dev_table[i]);
+ }
+
+ return (NULL);
+}
+
#define BS_BAR 0x10
#define PCIR_RETRY_TIMEOUT 0x41
#define PCIR_CFG_PMCSR 0x48
@@ -150,9 +242,15 @@ ath_pci_attach(device_t dev)
const struct firmware *fw = NULL;
const char *buf;
#endif
+ const struct pci_device_id *pd;
sc->sc_dev = dev;
+ /* Do this lookup anyway; figure out what to do with it later */
+ pd = ath_pci_probe_device(dev, ath_pci_id_table, nitems(ath_pci_id_table));
+ if (pd)
+ sc->sc_pci_devinfo = pd->driver_data;
+
/*
* Enable bus mastering.
*/
diff --git a/sys/dev/ath/if_ath_pci_devlist.h b/sys/dev/ath/if_ath_pci_devlist.h
new file mode 100644
index 0000000..ae65909
--- /dev/null
+++ b/sys/dev/ath/if_ath_pci_devlist.h
@@ -0,0 +1,669 @@
+/*-
+ * Copyright (c) 2014 Qualcomm Atheros.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ * redistribution must be conditioned upon including a substantially
+ * similar Disclaimer requirement for further binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $FreeBSD$
+ */
+
+static const struct pci_device_id ath_pci_id_table[] = {
+ { PCI_VDEVICE(PCI_VENDOR_ID_ATHEROS, 0x0023) }, /* PCI */
+ { PCI_VDEVICE(PCI_VENDOR_ID_ATHEROS, 0x0024) }, /* PCI-E */
+ { PCI_VDEVICE(PCI_VENDOR_ID_ATHEROS, 0x0027) }, /* PCI */
+ { PCI_VDEVICE(PCI_VENDOR_ID_ATHEROS, 0x0029) }, /* PCI */
+ { PCI_VDEVICE(PCI_VENDOR_ID_ATHEROS, 0x002A) }, /* PCI-E */
+
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x1C71),
+ .driver_data = ATH_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE01F),
+ .driver_data = ATH_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x11AD, /* LITEON */
+ 0x6632),
+ .driver_data = ATH_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x11AD, /* LITEON */
+ 0x6642),
+ .driver_data = ATH_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ PCI_VENDOR_ID_QMI,
+ 0x0306),
+ .driver_data = ATH_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x185F, /* WNC */
+ 0x309D),
+ .driver_data = ATH_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x10CF, /* Fujitsu */
+ 0x147C),
+ .driver_data = ATH_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x10CF, /* Fujitsu */
+ 0x147D),
+ .driver_data = ATH_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x10CF, /* Fujitsu */
+ 0x1536),
+ .driver_data = ATH_PCI_D3_L1_WAR },
+
+ /* AR9285 card for Asus */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002B,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2C37),
+ .driver_data = ATH_PCI_BT_ANT_DIV },
+
+ { PCI_VDEVICE(PCI_VENDOR_ID_ATHEROS, 0x002B) }, /* PCI-E */
+ { PCI_VDEVICE(PCI_VENDOR_ID_ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
+ { PCI_VDEVICE(PCI_VENDOR_ID_ATHEROS, 0x002D) }, /* PCI */
+ { PCI_VDEVICE(PCI_VENDOR_ID_ATHEROS, 0x002E) }, /* PCI-E */
+
+ /* Killer Wireless (3x3) */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0030,
+ 0x1A56,
+ 0x2000),
+ .driver_data = ATH_PCI_KILLER },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0030,
+ 0x1A56,
+ 0x2001),
+ .driver_data = ATH_PCI_KILLER },
+
+ { PCI_VDEVICE(PCI_VENDOR_ID_ATHEROS, 0x0030) }, /* PCI-E AR9300 */
+
+ /* PCI-E CUS198 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2086),
+ .driver_data = ATH_PCI_CUS198 | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x1237),
+ .driver_data = ATH_PCI_CUS198 | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2126),
+ .driver_data = ATH_PCI_CUS198 | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x126A),
+ .driver_data = ATH_PCI_CUS198 | ATH_PCI_BT_ANT_DIV },
+
+ /* PCI-E CUS230 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2152),
+ .driver_data = ATH_PCI_CUS230 | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE075),
+ .driver_data = ATH_PCI_CUS230 | ATH_PCI_BT_ANT_DIV },
+
+ /* WB225 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3119),
+ .driver_data = ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3122),
+ .driver_data = ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x185F, /* WNC */
+ 0x3119),
+ .driver_data = ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x185F, /* WNC */
+ 0x3027),
+ .driver_data = ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x4105),
+ .driver_data = ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x4106),
+ .driver_data = ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x410D),
+ .driver_data = ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x410E),
+ .driver_data = ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x410F),
+ .driver_data = ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0xC706),
+ .driver_data = ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0xC680),
+ .driver_data = ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0xC708),
+ .driver_data = ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_LENOVO,
+ 0x3218),
+ .driver_data = ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_LENOVO,
+ 0x3219),
+ .driver_data = ATH_PCI_BT_ANT_DIV },
+
+ /* AR9485 cards with PLL power-save disabled by default. */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2C97),
+ .driver_data = ATH_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2100),
+ .driver_data = ATH_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x1C56, /* ASKEY */
+ 0x4001),
+ .driver_data = ATH_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x11AD, /* LITEON */
+ 0x6627),
+ .driver_data = ATH_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x11AD, /* LITEON */
+ 0x6628),
+ .driver_data = ATH_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE04E),
+ .driver_data = ATH_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE04F),
+ .driver_data = ATH_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x144F, /* ASKEY */
+ 0x7197),
+ .driver_data = ATH_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x1B9A, /* XAVI */
+ 0x2000),
+ .driver_data = ATH_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x1B9A, /* XAVI */
+ 0x2001),
+ .driver_data = ATH_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x1186),
+ .driver_data = ATH_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x1F86),
+ .driver_data = ATH_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x1195),
+ .driver_data = ATH_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x1F95),
+ .driver_data = ATH_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x1B9A, /* XAVI */
+ 0x1C00),
+ .driver_data = ATH_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x1B9A, /* XAVI */
+ 0x1C01),
+ .driver_data = ATH_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_ASUSTEK,
+ 0x850D),
+ .driver_data = ATH_PCI_NO_PLL_PWRSAVE },
+
+ { PCI_VDEVICE(PCI_VENDOR_ID_ATHEROS, 0x0032) }, /* PCI-E AR9485 */
+ { PCI_VDEVICE(PCI_VENDOR_ID_ATHEROS, 0x0033) }, /* PCI-E AR9580 */
+
+ /* PCI-E CUS217 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2116),
+ .driver_data = ATH_PCI_CUS217 },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ 0x11AD, /* LITEON */
+ 0x6661),
+ .driver_data = ATH_PCI_CUS217 },
+
+ /* AR9462 with WoW support */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3117),
+ .driver_data = ATH_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_LENOVO,
+ 0x3214),
+ .driver_data = ATH_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_ATTANSIC,
+ 0x0091),
+ .driver_data = ATH_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2110),
+ .driver_data = ATH_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_ASUSTEK,
+ 0x850E),
+ .driver_data = ATH_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ 0x11AD, /* LITEON */
+ 0x6631),
+ .driver_data = ATH_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ 0x11AD, /* LITEON */
+ 0x6641),
+ .driver_data = ATH_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_HP,
+ 0x1864),
+ .driver_data = ATH_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ 0x14CD, /* USI */
+ 0x0063),
+ .driver_data = ATH_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ 0x14CD, /* USI */
+ 0x0064),
+ .driver_data = ATH_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ 0x10CF, /* Fujitsu */
+ 0x1783),
+ .driver_data = ATH_PCI_WOW },
+
+ /* Killer Wireless (2x2) */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0030,
+ 0x1A56,
+ 0x2003),
+ .driver_data = ATH_PCI_KILLER },
+
+ { PCI_VDEVICE(PCI_VENDOR_ID_ATHEROS, 0x0034) }, /* PCI-E AR9462 */
+ { PCI_VDEVICE(PCI_VENDOR_ID_ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
+
+ /* CUS252 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3028),
+ .driver_data = ATH_PCI_CUS252 |
+ ATH_PCI_AR9565_2ANT |
+ ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2176),
+ .driver_data = ATH_PCI_CUS252 |
+ ATH_PCI_AR9565_2ANT |
+ ATH_PCI_BT_ANT_DIV },
+
+ /* WB335 1-ANT */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE068),
+ .driver_data = ATH_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0xA119),
+ .driver_data = ATH_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0632),
+ .driver_data = ATH_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x06B2),
+ .driver_data = ATH_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0842),
+ .driver_data = ATH_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x6671),
+ .driver_data = ATH_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x2811),
+ .driver_data = ATH_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x2812),
+ .driver_data = ATH_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x28A1),
+ .driver_data = ATH_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x218A),
+ .driver_data = ATH_PCI_AR9565_1ANT },
+
+ /* WB335 1-ANT / Antenna Diversity */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3025),
+ .driver_data = ATH_PCI_AR9565_1ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3026),
+ .driver_data = ATH_PCI_AR9565_1ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x302B),
+ .driver_data = ATH_PCI_AR9565_1ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE069),
+ .driver_data = ATH_PCI_AR9565_1ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0x3028),
+ .driver_data = ATH_PCI_AR9565_1ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0622),
+ .driver_data = ATH_PCI_AR9565_1ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0672),
+ .driver_data = ATH_PCI_AR9565_1ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0662),
+ .driver_data = ATH_PCI_AR9565_1ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x06A2),
+ .driver_data = ATH_PCI_AR9565_1ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0682),
+ .driver_data = ATH_PCI_AR9565_1ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x213A),
+ .driver_data = ATH_PCI_AR9565_1ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_HP,
+ 0x18E3),
+ .driver_data = ATH_PCI_AR9565_1ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_HP,
+ 0x217F),
+ .driver_data = ATH_PCI_AR9565_1ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_HP,
+ 0x2005),
+ .driver_data = ATH_PCI_AR9565_1ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_DELL,
+ 0x020C),
+ .driver_data = ATH_PCI_AR9565_1ANT | ATH_PCI_BT_ANT_DIV },
+
+ /* WB335 2-ANT / Antenna-Diversity */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411A),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411B),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411C),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411D),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411E),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3027),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x302C),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0642),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0652),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0612),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0832),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0692),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2130),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x213B),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2182),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x144F, /* ASKEY */
+ 0x7202),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x2810),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x28A2),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0x3027),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0xA120),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE07F),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE081),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_LENOVO,
+ 0x3026),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_LENOVO,
+ 0x4026),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ASUSTEK,
+ 0x85F2),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_DELL,
+ 0x020E),
+ .driver_data = ATH_PCI_AR9565_2ANT | ATH_PCI_BT_ANT_DIV },
+
+ /* PCI-E AR9565 (WB335) */
+ { PCI_VDEVICE(PCI_VENDOR_ID_ATHEROS, 0x0036),
+ .driver_data = ATH_PCI_BT_ANT_DIV },
+
+ { 0 }
+};
+
diff --git a/sys/dev/ath/if_athvar.h b/sys/dev/ath/if_athvar.h
index 67eaecf..e888ca2 100644
--- a/sys/dev/ath/if_athvar.h
+++ b/sys/dev/ath/if_athvar.h
@@ -82,6 +82,25 @@
#define ATH_BEACON_CWMAX_DEFAULT 0 /* default cwmax for ap beacon q */
/*
+ * The following bits can be set during the PCI (and perhaps non-PCI
+ * later) device probe path.
+ *
+ * It controls some of the driver and HAL behaviour.
+ */
+
+#define ATH_PCI_CUS198 0x0001
+#define ATH_PCI_CUS230 0x0002
+#define ATH_PCI_CUS217 0x0004
+#define ATH_PCI_CUS252 0x0008
+#define ATH_PCI_WOW 0x0010
+#define ATH_PCI_BT_ANT_DIV 0x0020
+#define ATH_PCI_D3_L1_WAR 0x0040
+#define ATH_PCI_AR9565_1ANT 0x0080
+#define ATH_PCI_AR9565_2ANT 0x0100
+#define ATH_PCI_NO_PLL_PWRSAVE 0x0200
+#define ATH_PCI_KILLER 0x0400
+
+/*
* The key cache is used for h/w cipher state and also for
* tracking station state such as the current tx antenna.
* We also setup a mapping table between key cache slot indices
@@ -884,6 +903,9 @@ struct ath_softc {
HAL_POWER_MODE sc_cur_powerstate;
int sc_powersave_refcnt;
+
+ /* ATH_PCI_* flags */
+ uint32_t sc_pci_devinfo;
};
#define ATH_LOCK_INIT(_sc) \
diff --git a/sys/dev/bge/if_bge.c b/sys/dev/bge/if_bge.c
index 374a419..838be4f 100644
--- a/sys/dev/bge/if_bge.c
+++ b/sys/dev/bge/if_bge.c
@@ -5828,7 +5828,7 @@ bge_ioctl(if_t ifp, u_long command, caddr_t data)
#ifdef DEVICE_POLLING
if (mask & IFCAP_POLLING) {
if (ifr->ifr_reqcap & IFCAP_POLLING) {
- error = ether_poll_register_drv(bge_poll, ifp);
+ error = ether_poll_register(bge_poll, ifp);
if (error)
return (error);
BGE_LOCK(sc);
diff --git a/sys/dev/ce/if_ce.c b/sys/dev/ce/if_ce.c
index 36c4ba4..d0dd0c7 100644
--- a/sys/dev/ce/if_ce.c
+++ b/sys/dev/ce/if_ce.c
@@ -1133,12 +1133,7 @@ static void ce_receive (ce_chan_t *c, unsigned char *data, int len)
m->m_pkthdr.rcvif = d->ifp;
/* Check if there's a BPF listener on this interface.
* If so, hand off the raw packet to bpf. */
-#if __FreeBSD_version >= 500000
- BPF_TAP (d->ifp, data, len);
-#else
- if (d->ifp->if_bpf)
- bpf_tap (d->ifp, data, len);
-#endif
+ BPF_MTAP(d->ifp, m);
IF_ENQUEUE(&d->rqueue, m);
#endif
}
diff --git a/sys/dev/cp/if_cp.c b/sys/dev/cp/if_cp.c
index ca6aaf4..4e70838 100644
--- a/sys/dev/cp/if_cp.c
+++ b/sys/dev/cp/if_cp.c
@@ -902,7 +902,7 @@ static void cp_receive (cp_chan_t *c, unsigned char *data, int len)
m->m_pkthdr.rcvif = d->ifp;
/* Check if there's a BPF listener on this interface.
* If so, hand off the raw packet to bpf. */
- BPF_TAP (d->ifp, data, len);
+ BPF_MTAP(d->ifp, m);
IF_ENQUEUE (&d->queue, m);
#endif
}
diff --git a/sys/dev/ctau/if_ct.c b/sys/dev/ctau/if_ct.c
index 5cf20dd..397c3ac 100644
--- a/sys/dev/ctau/if_ct.c
+++ b/sys/dev/ctau/if_ct.c
@@ -1120,7 +1120,7 @@ static void ct_receive (ct_chan_t *c, char *data, int len)
m->m_pkthdr.rcvif = d->ifp;
/* Check if there's a BPF listener on this interface.
* If so, hand off the raw packet to bpf. */
- BPF_TAP (d->ifp, data, len);
+ BPF_MTAP(d->ifp, m);
IF_ENQUEUE (&d->queue, m);
#endif
}
diff --git a/sys/dev/cx/if_cx.c b/sys/dev/cx/if_cx.c
index eecab5f..360e1f6 100644
--- a/sys/dev/cx/if_cx.c
+++ b/sys/dev/cx/if_cx.c
@@ -1318,7 +1318,7 @@ static void cx_receive (cx_chan_t *c, char *data, int len)
m->m_pkthdr.rcvif = d->ifp;
/* Check if there's a BPF listener on this interface.
* If so, hand off the raw packet to bpf. */
- BPF_TAP (d->ifp, data, len);
+ BPF_MTAP(d->ifp, m);
IF_ENQUEUE (&d->queue, m);
#endif
}
diff --git a/sys/dev/cxgb/cxgb_adapter.h b/sys/dev/cxgb/cxgb_adapter.h
index 6aaacd3..6f3abe7 100644
--- a/sys/dev/cxgb/cxgb_adapter.h
+++ b/sys/dev/cxgb/cxgb_adapter.h
@@ -97,6 +97,7 @@ struct port_info {
const struct port_type_info *port_type;
struct cphy phy;
struct cmac mac;
+ struct timeval last_refreshed;
struct link_config link_config;
struct ifmedia media;
struct mtx lock;
@@ -575,4 +576,5 @@ void cxgb_tx_watchdog(void *arg);
int cxgb_transmit(struct ifnet *ifp, struct mbuf *m);
void cxgb_qflush(struct ifnet *ifp);
void t3_iterate(void (*)(struct adapter *, void *), void *);
+void cxgb_refresh_stats(struct port_info *);
#endif
diff --git a/sys/dev/cxgb/cxgb_main.c b/sys/dev/cxgb/cxgb_main.c
index 7488553..b4bacc6 100644
--- a/sys/dev/cxgb/cxgb_main.c
+++ b/sys/dev/cxgb/cxgb_main.c
@@ -96,6 +96,7 @@ static int cxgb_media_change(struct ifnet *);
static int cxgb_ifm_type(int);
static void cxgb_build_medialist(struct port_info *);
static void cxgb_media_status(struct ifnet *, struct ifmediareq *);
+static uint64_t cxgb_get_counter(struct ifnet *, ift_counter);
static int setup_sge_qsets(adapter_t *);
static void cxgb_async_intr(void *);
static void cxgb_tick_handler(void *, int);
@@ -1022,6 +1023,7 @@ cxgb_port_attach(device_t dev)
ifp->if_ioctl = cxgb_ioctl;
ifp->if_transmit = cxgb_transmit;
ifp->if_qflush = cxgb_qflush;
+ ifp->if_get_counter = cxgb_get_counter;
ifp->if_capabilities = CXGB_CAP;
#ifdef TCP_OFFLOAD
@@ -2189,6 +2191,71 @@ cxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
speed));
}
+static uint64_t
+cxgb_get_counter(struct ifnet *ifp, ift_counter c)
+{
+ struct port_info *pi = ifp->if_softc;
+ struct adapter *sc = pi->adapter;
+ struct cmac *mac = &pi->mac;
+ struct mac_stats *mstats = &mac->stats;
+
+ cxgb_refresh_stats(pi);
+
+ switch (c) {
+ case IFCOUNTER_IPACKETS:
+ return (mstats->rx_frames);
+
+ case IFCOUNTER_IERRORS:
+ return (mstats->rx_jabber + mstats->rx_data_errs +
+ mstats->rx_sequence_errs + mstats->rx_runt +
+ mstats->rx_too_long + mstats->rx_mac_internal_errs +
+ mstats->rx_short + mstats->rx_fcs_errs);
+
+ case IFCOUNTER_OPACKETS:
+ return (mstats->tx_frames);
+
+ case IFCOUNTER_OERRORS:
+ return (mstats->tx_excess_collisions + mstats->tx_underrun +
+ mstats->tx_len_errs + mstats->tx_mac_internal_errs +
+ mstats->tx_excess_deferral + mstats->tx_fcs_errs);
+
+ case IFCOUNTER_COLLISIONS:
+ return (mstats->tx_total_collisions);
+
+ case IFCOUNTER_IBYTES:
+ return (mstats->rx_octets);
+
+ case IFCOUNTER_OBYTES:
+ return (mstats->tx_octets);
+
+ case IFCOUNTER_IMCASTS:
+ return (mstats->rx_mcast_frames);
+
+ case IFCOUNTER_OMCASTS:
+ return (mstats->tx_mcast_frames);
+
+ case IFCOUNTER_IQDROPS:
+ return (mstats->rx_cong_drops);
+
+ case IFCOUNTER_OQDROPS: {
+ int i;
+ uint64_t drops;
+
+ drops = 0;
+ if (sc->flags & FULL_INIT_DONE) {
+ for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
+ drops += sc->sge.qs[i].txq[TXQ_ETH].txq_mr->br_drops;
+ }
+
+ return (drops);
+
+ }
+
+ default:
+ return (if_get_counter_default(ifp, c));
+ }
+}
+
static void
cxgb_async_intr(void *data)
{
@@ -2289,6 +2356,23 @@ cxgb_tick(void *arg)
callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
}
+void
+cxgb_refresh_stats(struct port_info *pi)
+{
+ struct timeval tv;
+ const struct timeval interval = {0, 250000}; /* 250ms */
+
+ getmicrotime(&tv);
+ timevalsub(&tv, &interval);
+ if (timevalcmp(&tv, &pi->last_refreshed, <))
+ return;
+
+ PORT_LOCK(pi);
+ t3_mac_update_stats(&pi->mac);
+ PORT_UNLOCK(pi);
+ getmicrotime(&pi->last_refreshed);
+}
+
static void
cxgb_tick_handler(void *arg, int count)
{
@@ -2333,48 +2417,12 @@ cxgb_tick_handler(void *arg, int count)
for (i = 0; i < sc->params.nports; i++) {
struct port_info *pi = &sc->port[i];
- struct ifnet *ifp = pi->ifp;
struct cmac *mac = &pi->mac;
- struct mac_stats *mstats = &mac->stats;
- int drops, j;
if (!isset(&sc->open_device_map, pi->port_id))
continue;
- PORT_LOCK(pi);
- t3_mac_update_stats(mac);
- PORT_UNLOCK(pi);
-
- ifp->if_opackets = mstats->tx_frames;
- ifp->if_ipackets = mstats->rx_frames;
- ifp->if_obytes = mstats->tx_octets;
- ifp->if_ibytes = mstats->rx_octets;
- ifp->if_omcasts = mstats->tx_mcast_frames;
- ifp->if_imcasts = mstats->rx_mcast_frames;
- ifp->if_collisions = mstats->tx_total_collisions;
- ifp->if_iqdrops = mstats->rx_cong_drops;
-
- drops = 0;
- for (j = pi->first_qset; j < pi->first_qset + pi->nqsets; j++)
- drops += sc->sge.qs[j].txq[TXQ_ETH].txq_mr->br_drops;
- ifp->if_oqdrops = drops;
-
- ifp->if_oerrors =
- mstats->tx_excess_collisions +
- mstats->tx_underrun +
- mstats->tx_len_errs +
- mstats->tx_mac_internal_errs +
- mstats->tx_excess_deferral +
- mstats->tx_fcs_errs;
- ifp->if_ierrors =
- mstats->rx_jabber +
- mstats->rx_data_errs +
- mstats->rx_sequence_errs +
- mstats->rx_runt +
- mstats->rx_too_long +
- mstats->rx_mac_internal_errs +
- mstats->rx_short +
- mstats->rx_fcs_errs;
+ cxgb_refresh_stats(pi);
if (mac->multiport)
continue;
diff --git a/sys/dev/cxgb/cxgb_sge.c b/sys/dev/cxgb/cxgb_sge.c
index fcc0376..88ee7cc 100644
--- a/sys/dev/cxgb/cxgb_sge.c
+++ b/sys/dev/cxgb/cxgb_sge.c
@@ -3410,10 +3410,8 @@ sysctl_handle_macstat(SYSCTL_HANDLER_ARGS)
if (!p)
return (EINVAL);
+ cxgb_refresh_stats(p);
parg = (uint64_t *) ((uint8_t *)&p->mac.stats + arg2);
- PORT_LOCK(p);
- t3_mac_update_stats(&p->mac);
- PORT_UNLOCK(p);
return (sysctl_handle_64(oidp, parg, 0, req));
}
diff --git a/sys/dev/cxgbe/adapter.h b/sys/dev/cxgbe/adapter.h
index 316852b..8cebdaa 100644
--- a/sys/dev/cxgbe/adapter.h
+++ b/sys/dev/cxgbe/adapter.h
@@ -268,7 +268,10 @@ struct port_info {
int linkdnrc;
struct link_config link_cfg;
- struct port_stats stats;
+
+ struct timeval last_refreshed;
+ struct port_stats stats;
+ u_int tnl_cong_drops;
eventhandler_tag vlan_c;
@@ -790,6 +793,8 @@ struct adapter {
TAILQ_HEAD(, sge_fl) sfl;
struct callout sfl_callout;
+ struct mtx regwin_lock; /* for indirect reads and memory windows */
+
an_handler_t an_handler __aligned(CACHE_LINE_SIZE);
fw_msg_handler_t fw_msg_handler[5]; /* NUM_FW6_TYPES */
cpl_handler_t cpl_handler[0xef]; /* NUM_CPL_CMDS */
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index 99bc344..77c99b2 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -151,6 +151,7 @@ static void cxgbe_init(void *);
static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
static int cxgbe_transmit(struct ifnet *, struct mbuf *);
static void cxgbe_qflush(struct ifnet *);
+static uint64_t cxgbe_get_counter(struct ifnet *, ift_counter);
static int cxgbe_media_change(struct ifnet *);
static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
@@ -386,6 +387,7 @@ static int t4_free_irq(struct adapter *, struct irq *);
static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
unsigned int);
static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
+static void cxgbe_refresh_stats(struct adapter *, struct port_info *);
static void cxgbe_tick(void *);
static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
@@ -609,6 +611,8 @@ t4_attach(device_t dev)
TAILQ_INIT(&sc->sfl);
callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
+ mtx_init(&sc->regwin_lock, "register and memory window", 0, MTX_DEF);
+
rc = map_bars_0_and_4(sc);
if (rc != 0)
goto done; /* error message displayed already */
@@ -1024,6 +1028,8 @@ t4_detach(device_t dev)
mtx_destroy(&sc->sfl_lock);
if (mtx_initialized(&sc->ifp_lock))
mtx_destroy(&sc->ifp_lock);
+ if (mtx_initialized(&sc->regwin_lock))
+ mtx_destroy(&sc->regwin_lock);
bzero(sc, sizeof(*sc));
@@ -1073,6 +1079,7 @@ cxgbe_attach(device_t dev)
ifp->if_ioctl = cxgbe_ioctl;
ifp->if_transmit = cxgbe_transmit;
ifp->if_qflush = cxgbe_qflush;
+ ifp->if_get_counter = cxgbe_get_counter;
ifp->if_capabilities = T4_CAP;
#ifdef TCP_OFFLOAD
@@ -1083,6 +1090,10 @@ cxgbe_attach(device_t dev)
ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
+ ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
+ ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS;
+ ifp->if_hw_tsomaxsegsize = 65536;
+
/* Initialize ifmedia for this port */
ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
cxgbe_media_status);
@@ -1500,6 +1511,67 @@ cxgbe_qflush(struct ifnet *ifp)
if_qflush(ifp);
}
+static uint64_t
+cxgbe_get_counter(struct ifnet *ifp, ift_counter c)
+{
+ struct port_info *pi = ifp->if_softc;
+ struct adapter *sc = pi->adapter;
+ struct port_stats *s = &pi->stats;
+
+ cxgbe_refresh_stats(sc, pi);
+
+ switch (c) {
+ case IFCOUNTER_IPACKETS:
+ return (s->rx_frames - s->rx_pause);
+
+ case IFCOUNTER_IERRORS:
+ return (s->rx_jabber + s->rx_runt + s->rx_too_long +
+ s->rx_fcs_err + s->rx_len_err);
+
+ case IFCOUNTER_OPACKETS:
+ return (s->tx_frames - s->tx_pause);
+
+ case IFCOUNTER_OERRORS:
+ return (s->tx_error_frames);
+
+ case IFCOUNTER_IBYTES:
+ return (s->rx_octets - s->rx_pause * 64);
+
+ case IFCOUNTER_OBYTES:
+ return (s->tx_octets - s->tx_pause * 64);
+
+ case IFCOUNTER_IMCASTS:
+ return (s->rx_mcast_frames - s->rx_pause);
+
+ case IFCOUNTER_OMCASTS:
+ return (s->tx_mcast_frames - s->tx_pause);
+
+ case IFCOUNTER_IQDROPS:
+ return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
+ s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
+ s->rx_trunc3 + pi->tnl_cong_drops);
+
+ case IFCOUNTER_OQDROPS: {
+ uint64_t drops;
+
+ drops = s->tx_drop;
+ if (pi->flags & PORT_INIT_DONE) {
+ int i;
+ struct sge_txq *txq;
+
+ for_each_txq(pi, i, txq)
+ drops += txq->br->br_drops;
+ }
+
+ return (drops);
+
+ }
+
+ default:
+ return (if_get_counter_default(ifp, c));
+ }
+}
+
static int
cxgbe_media_change(struct ifnet *ifp)
{
@@ -4273,14 +4345,39 @@ t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
}
static void
+cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi)
+{
+ int i;
+ u_int v, tnl_cong_drops;
+ struct timeval tv;
+ const struct timeval interval = {0, 250000}; /* 250ms */
+
+ getmicrotime(&tv);
+ timevalsub(&tv, &interval);
+ if (timevalcmp(&tv, &pi->last_refreshed, <))
+ return;
+
+ tnl_cong_drops = 0;
+ t4_get_port_stats(sc, pi->tx_chan, &pi->stats);
+ for (i = 0; i < NCHAN; i++) {
+ if (pi->rx_chan_map & (1 << i)) {
+ mtx_lock(&sc->regwin_lock);
+ t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
+ 1, A_TP_MIB_TNL_CNG_DROP_0 + i);
+ mtx_unlock(&sc->regwin_lock);
+ tnl_cong_drops += v;
+ }
+ }
+ pi->tnl_cong_drops = tnl_cong_drops;
+ getmicrotime(&pi->last_refreshed);
+}
+
+static void
cxgbe_tick(void *arg)
{
struct port_info *pi = arg;
struct adapter *sc = pi->adapter;
struct ifnet *ifp = pi->ifp;
- struct sge_txq *txq;
- int i, drops;
- struct port_stats *s = &pi->stats;
PORT_LOCK(pi);
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
@@ -4288,39 +4385,7 @@ cxgbe_tick(void *arg)
return; /* without scheduling another callout */
}
- t4_get_port_stats(sc, pi->tx_chan, s);
-
- ifp->if_opackets = s->tx_frames - s->tx_pause;
- ifp->if_ipackets = s->rx_frames - s->rx_pause;
- ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
- ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
- ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
- ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
- ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
- s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
- s->rx_trunc3;
- for (i = 0; i < 4; i++) {
- if (pi->rx_chan_map & (1 << i)) {
- uint32_t v;
-
- /*
- * XXX: indirect reads from the same ADDR/DATA pair can
- * race with each other.
- */
- t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
- 1, A_TP_MIB_TNL_CNG_DROP_0 + i);
- ifp->if_iqdrops += v;
- }
- }
-
- drops = s->tx_drop;
- for_each_txq(pi, i, txq)
- drops += txq->br->br_drops;
- ifp->if_oqdrops = drops;
-
- ifp->if_oerrors = s->tx_error_frames;
- ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
- s->rx_fcs_err + s->rx_len_err;
+ cxgbe_refresh_stats(sc, pi);
callout_schedule(&pi->tick, hz);
PORT_UNLOCK(pi);
diff --git a/sys/dev/dwc/if_dwc.c b/sys/dev/dwc/if_dwc.c
index f07832c..aeb98e9 100644
--- a/sys/dev/dwc/if_dwc.c
+++ b/sys/dev/dwc/if_dwc.c
@@ -799,7 +799,7 @@ dwc_rxfinish_locked(struct dwc_softc *sc)
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = len;
m->m_len = len;
- ifp->if_ipackets++;
+ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
DWC_UNLOCK(sc);
(*ifp->if_input)(ifp, m);
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index d8fc23e..070bb5b 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -307,7 +307,7 @@ static int em_sysctl_eee(SYSCTL_HANDLER_ARGS);
static __inline void em_rx_discard(struct rx_ring *, int);
#ifdef DEVICE_POLLING
-static poll_handler_drv_t em_poll;
+static poll_handler_t em_poll;
#endif /* POLLING */
/*********************************************************************
@@ -787,7 +787,7 @@ em_detach(device_t dev)
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING)
- ether_poll_deregister_drv(ifp);
+ ether_poll_deregister(ifp);
#endif
if (adapter->led_dev != NULL)
@@ -1208,7 +1208,7 @@ em_ioctl(if_t ifp, u_long command, caddr_t data)
#ifdef DEVICE_POLLING
if (mask & IFCAP_POLLING) {
if (ifr->ifr_reqcap & IFCAP_POLLING) {
- error = ether_poll_register_drv(em_poll, ifp);
+ error = ether_poll_register(em_poll, ifp);
if (error)
return (error);
EM_CORE_LOCK(adapter);
@@ -1216,7 +1216,7 @@ em_ioctl(if_t ifp, u_long command, caddr_t data)
if_setcapenablebit(ifp, IFCAP_POLLING, 0);
EM_CORE_UNLOCK(adapter);
} else {
- error = ether_poll_deregister_drv(ifp);
+ error = ether_poll_deregister(ifp);
/* Enable interrupt even in error case */
EM_CORE_LOCK(adapter);
em_enable_intr(adapter);
diff --git a/sys/dev/e1000/if_lem.c b/sys/dev/e1000/if_lem.c
index 3639318..d29c7f1 100644
--- a/sys/dev/e1000/if_lem.c
+++ b/sys/dev/e1000/if_lem.c
@@ -260,7 +260,7 @@ static void lem_add_rx_process_limit(struct adapter *, const char *,
const char *, int *, int);
#ifdef DEVICE_POLLING
-static poll_handler_drv_t lem_poll;
+static poll_handler_t lem_poll;
#endif /* POLLING */
/*********************************************************************
@@ -789,7 +789,7 @@ lem_detach(device_t dev)
#ifdef DEVICE_POLLING
if (if_getcapenable(ifp) & IFCAP_POLLING)
- ether_poll_deregister_drv(ifp);
+ ether_poll_deregister(ifp);
#endif
if (adapter->led_dev != NULL)
@@ -1119,7 +1119,7 @@ lem_ioctl(if_t ifp, u_long command, caddr_t data)
#ifdef DEVICE_POLLING
if (mask & IFCAP_POLLING) {
if (ifr->ifr_reqcap & IFCAP_POLLING) {
- error = ether_poll_register_drv(lem_poll, ifp);
+ error = ether_poll_register(lem_poll, ifp);
if (error)
return (error);
EM_CORE_LOCK(adapter);
@@ -1127,7 +1127,7 @@ lem_ioctl(if_t ifp, u_long command, caddr_t data)
if_setcapenablebit(ifp, IFCAP_POLLING, 0);
EM_CORE_UNLOCK(adapter);
} else {
- error = ether_poll_deregister_drv(ifp);
+ error = ether_poll_deregister(ifp);
/* Enable interrupt even in error case */
EM_CORE_LOCK(adapter);
lem_enable_intr(adapter);
diff --git a/sys/dev/firewire/firewire.c b/sys/dev/firewire/firewire.c
index ef4758c..c5f5dba 100644
--- a/sys/dev/firewire/firewire.c
+++ b/sys/dev/firewire/firewire.c
@@ -48,7 +48,6 @@ __FBSDID("$FreeBSD$");
#include <sys/kthread.h>
#include <sys/kdb.h>
-
#include <sys/bus.h> /* used by smbus and newbus */
#include <machine/bus.h>
@@ -65,7 +64,7 @@ struct crom_src_buf {
struct crom_chunk hw;
};
-int firewire_debug=0, try_bmr=1, hold_count=0;
+int firewire_debug = 0, try_bmr = 1, hold_count = 0;
SYSCTL_INT(_debug, OID_AUTO, firewire_debug, CTLFLAG_RW, &firewire_debug, 0,
"FireWire driver debug flag");
SYSCTL_NODE(_hw, OID_AUTO, firewire, CTLFLAG_RD, 0, "FireWire Subsystem");
@@ -81,19 +80,19 @@ MALLOC_DEFINE(M_FWXFER, "fw_xfer", "XFER/FireWire");
devclass_t firewire_devclass;
-static void firewire_identify (driver_t *, device_t);
-static int firewire_probe (device_t);
-static int firewire_attach (device_t);
-static int firewire_detach (device_t);
-static int firewire_resume (device_t);
+static void firewire_identify(driver_t *, device_t);
+static int firewire_probe(device_t);
+static int firewire_attach(device_t);
+static int firewire_detach(device_t);
+static int firewire_resume(device_t);
static void firewire_xfer_timeout(void *, int);
static device_t firewire_add_child(device_t, u_int, const char *, int);
-static void fw_try_bmr (void *);
-static void fw_try_bmr_callback (struct fw_xfer *);
-static void fw_asystart (struct fw_xfer *);
-static int fw_get_tlabel (struct firewire_comm *, struct fw_xfer *);
-static void fw_bus_probe (struct firewire_comm *);
-static void fw_attach_dev (struct firewire_comm *);
+static void fw_try_bmr(void *);
+static void fw_try_bmr_callback(struct fw_xfer *);
+static void fw_asystart(struct fw_xfer *);
+static int fw_get_tlabel(struct firewire_comm *, struct fw_xfer *);
+static void fw_bus_probe(void *);
+static void fw_attach_dev(struct firewire_comm *);
static void fw_bus_probe_thread(void *);
#ifdef FW_VMACCESS
static void fw_vmaccess (struct fw_xfer *);
@@ -116,6 +115,7 @@ static device_method_t firewire_methods[] = {
DEVMETHOD_END
};
+
char *linkspeed[] = {
"S100", "S200", "S400", "S800",
"S1600", "S3200", "undef", "undef"
@@ -174,8 +174,10 @@ fw_noderesolve_eui64(struct firewire_comm *fc, struct fw_eui64 *eui)
FW_GUNLOCK(fc);
splx(s);
- if(fwdev == NULL) return NULL;
- if(fwdev->status == FWDEVINVAL) return NULL;
+ if (fwdev == NULL)
+ return NULL;
+ if (fwdev->status == FWDEVINVAL)
+ return NULL;
return fwdev;
}
@@ -192,8 +194,9 @@ fw_asyreq(struct firewire_comm *fc, int sub, struct fw_xfer *xfer)
int tcode;
struct tcode_info *info;
- if(xfer == NULL) return EINVAL;
- if(xfer->hand == NULL){
+ if (xfer == NULL)
+ return EINVAL;
+ if (xfer->hand == NULL) {
printf("hand == NULL\n");
return EINVAL;
}
@@ -231,17 +234,17 @@ fw_asyreq(struct firewire_comm *fc, int sub, struct fw_xfer *xfer)
len = fp->mode.rresb.len;
else
len = 0;
- if (len != xfer->send.pay_len){
+ if (len != xfer->send.pay_len) {
printf("len(%d) != send.pay_len(%d) %s(%x)\n",
len, xfer->send.pay_len, tcode_str[tcode], tcode);
- return EINVAL;
+ return EINVAL;
}
- if(xferq->start == NULL){
+ if (xferq->start == NULL) {
printf("xferq->start == NULL\n");
return EINVAL;
}
- if(!(xferq->queued < xferq->maxq)){
+ if (!(xferq->queued < xferq->maxq)) {
device_printf(fc->bdev, "Discard a packet (queued=%d)\n",
xferq->queued);
return EAGAIN;
@@ -260,6 +263,7 @@ fw_asyreq(struct firewire_comm *fc, int sub, struct fw_xfer *xfer)
fw_asystart(xfer);
return err;
}
+
/*
* Wakeup blocked process.
*/
@@ -283,8 +287,8 @@ fw_xferwait(struct fw_xfer *xfer)
int err = 0;
mtx_lock(lock);
- if ((xfer->flag & FWXF_WAKE) == 0)
- err = msleep((void *)xfer, lock, PWAIT|PCATCH, "fw_xferwait", 0);
+ while ((xfer->flag & FWXF_WAKE) == 0)
+ err = msleep(xfer, lock, PWAIT|PCATCH, "fw_xferwait", 0);
mtx_unlock(lock);
return (err);
@@ -298,13 +302,14 @@ fw_asystart(struct fw_xfer *xfer)
{
struct firewire_comm *fc = xfer->fc;
int s;
+
s = splfw();
/* Protect from interrupt/timeout */
FW_GLOCK(fc);
xfer->flag = FWXF_INQ;
STAILQ_INSERT_TAIL(&xfer->q->q, xfer, link);
#if 0
- xfer->q->queued ++;
+ xfer->q->queued++;
#endif
FW_GUNLOCK(fc);
splx(s);
@@ -346,7 +351,7 @@ firewire_xfer_timeout(void *arg, int pending)
s = splfw();
mtx_lock(&fc->tlabel_lock);
- for (i = 0; i < 0x40; i ++) {
+ for (i = 0; i < 0x40; i++) {
while ((xfer = STAILQ_FIRST(&fc->tlabels[i])) != NULL) {
if ((xfer->flag & FWXF_SENT) == 0)
/* not sent yet */
@@ -355,8 +360,8 @@ firewire_xfer_timeout(void *arg, int pending)
/* the rests are newer than this */
break;
device_printf(fc->bdev,
- "split transaction timeout: "
- "tl=0x%x flag=0x%02x\n", i, xfer->flag);
+ "split transaction timeout: tl=0x%x flag=0x%02x\n",
+ i, xfer->flag);
fw_dump_hdr(&xfer->send.hdr, "send");
xfer->resp = ETIMEDOUT;
xfer->tl = -1;
@@ -369,7 +374,7 @@ firewire_xfer_timeout(void *arg, int pending)
fc->timeout(fc);
STAILQ_FOREACH_SAFE(xfer, &xfer_timeout, tlabel, txfer)
- xfer->hand(xfer);
+ xfer->hand(xfer);
}
#define WATCHDOG_HZ 10
@@ -379,7 +384,7 @@ firewire_watchdog(void *arg)
struct firewire_comm *fc;
static int watchdog_clock = 0;
- fc = (struct firewire_comm *)arg;
+ fc = arg;
/*
* At boot stage, the device interrupt is disabled and
@@ -389,10 +394,10 @@ firewire_watchdog(void *arg)
if (watchdog_clock > WATCHDOG_HZ * 15)
taskqueue_enqueue(fc->taskqueue, &fc->task_timeout);
else
- watchdog_clock ++;
+ watchdog_clock++;
callout_reset(&fc->timeout_callout, hz / WATCHDOG_HZ,
- (void *)firewire_watchdog, (void *)fc);
+ firewire_watchdog, fc);
}
/*
@@ -406,35 +411,36 @@ firewire_attach(device_t dev)
device_t pa = device_get_parent(dev);
struct firewire_comm *fc;
- fc = (struct firewire_comm *)device_get_softc(pa);
+ fc = device_get_softc(pa);
sc->fc = fc;
fc->status = FWBUSNOTREADY;
unit = device_get_unit(dev);
- if( fc->nisodma > FWMAXNDMA) fc->nisodma = FWMAXNDMA;
+ if (fc->nisodma > FWMAXNDMA)
+ fc->nisodma = FWMAXNDMA;
fwdev_makedev(sc);
- fc->crom_src_buf = (struct crom_src_buf *)malloc(
- sizeof(struct crom_src_buf),
- M_FW, M_NOWAIT | M_ZERO);
+ fc->crom_src_buf = malloc(sizeof(struct crom_src_buf),
+ M_FW, M_NOWAIT | M_ZERO);
if (fc->crom_src_buf == NULL) {
- device_printf(fc->dev, "%s: Malloc Failure crom src buff\n", __func__);
+ device_printf(fc->dev,
+ "%s: unable to allocate crom src buffer\n", __func__);
return ENOMEM;
}
- fc->topology_map = (struct fw_topology_map *)malloc(
- sizeof(struct fw_topology_map),
- M_FW, M_NOWAIT | M_ZERO);
+ fc->topology_map = malloc(sizeof(struct fw_topology_map),
+ M_FW, M_NOWAIT | M_ZERO);
if (fc->topology_map == NULL) {
- device_printf(fc->dev, "%s: Malloc Failure topology map\n", __func__);
+ device_printf(fc->dev, "%s: unable to allocate topology map\n",
+ __func__);
free(fc->crom_src_buf, M_FW);
return ENOMEM;
}
- fc->speed_map = (struct fw_speed_map *)malloc(
- sizeof(struct fw_speed_map),
- M_FW, M_NOWAIT | M_ZERO);
+ fc->speed_map = malloc(sizeof(struct fw_speed_map),
+ M_FW, M_NOWAIT | M_ZERO);
if (fc->speed_map == NULL) {
- device_printf(fc->dev, "%s: Malloc Failure speed map\n", __func__);
+ device_printf(fc->dev, "%s: unable to allocate speed map\n",
+ __func__);
free(fc->crom_src_buf, M_FW);
free(fc->topology_map, M_FW);
return ENOMEM;
@@ -445,14 +451,14 @@ firewire_attach(device_t dev)
CALLOUT_INIT(&fc->timeout_callout);
CALLOUT_INIT(&fc->bmr_callout);
CALLOUT_INIT(&fc->busprobe_callout);
- TASK_INIT(&fc->task_timeout, 0, firewire_xfer_timeout, (void *)fc);
+ TASK_INIT(&fc->task_timeout, 0, firewire_xfer_timeout, fc);
callout_reset(&sc->fc->timeout_callout, hz,
- (void *)firewire_watchdog, (void *)sc->fc);
+ firewire_watchdog, sc->fc);
/* create thread */
- kproc_create(fw_bus_probe_thread, (void *)fc, &fc->probe_thread,
- 0, 0, "fw%d_probe", unit);
+ kproc_create(fw_bus_probe_thread, fc, &fc->probe_thread,
+ 0, 0, "fw%d_probe", unit);
/* Locate our children */
bus_generic_probe(dev);
@@ -475,10 +481,10 @@ firewire_attach(device_t dev)
static device_t
firewire_add_child(device_t dev, u_int order, const char *name, int unit)
{
- device_t child;
+ device_t child;
struct firewire_softc *sc;
- sc = (struct firewire_softc *)device_get_softc(dev);
+ sc = device_get_softc(dev);
child = device_add_child(dev, name, unit);
if (child) {
device_set_ivars(child, sc->fc);
@@ -493,16 +499,16 @@ firewire_resume(device_t dev)
{
struct firewire_softc *sc;
- sc = (struct firewire_softc *)device_get_softc(dev);
+ sc = device_get_softc(dev);
sc->fc->status = FWBUSNOTREADY;
-
+
bus_generic_resume(dev);
- return(0);
+ return (0);
}
/*
- * Dettach it.
+ * Detach it.
*/
static int
firewire_detach(device_t dev)
@@ -512,7 +518,7 @@ firewire_detach(device_t dev)
struct fw_device *fwdev, *fwdev_next;
int err;
- sc = (struct firewire_softc *)device_get_softc(dev);
+ sc = device_get_softc(dev);
fc = sc->fc;
mtx_lock(&fc->wait_lock);
fc->status = FWBUSDETACH;
@@ -521,7 +527,7 @@ firewire_detach(device_t dev)
printf("firewire probe thread didn't die\n");
mtx_unlock(&fc->wait_lock);
- if (fc->arq !=0 && fc->arq->maxq > 0)
+ if (fc->arq != 0 && fc->arq->maxq > 0)
fw_drain_txq(fc);
if ((err = fwdev_destroydev(sc)) != 0)
@@ -536,7 +542,7 @@ firewire_detach(device_t dev)
/* XXX xfer_free and untimeout on all xfers */
for (fwdev = STAILQ_FIRST(&fc->devices); fwdev != NULL;
- fwdev = fwdev_next) {
+ fwdev = fwdev_next) {
fwdev_next = STAILQ_NEXT(fwdev, link);
free(fwdev, M_FW);
}
@@ -546,7 +552,7 @@ firewire_detach(device_t dev)
mtx_destroy(&fc->tlabel_lock);
mtx_destroy(&fc->wait_lock);
- return(0);
+ return (0);
}
static void
@@ -557,7 +563,7 @@ fw_xferq_drain(struct fw_xferq *xferq)
while ((xfer = STAILQ_FIRST(&xferq->q)) != NULL) {
STAILQ_REMOVE_HEAD(&xferq->q, link);
#if 0
- xferq->queued --;
+ xferq->queued--;
#endif
xfer->resp = EAGAIN;
xfer->flag = FWXF_SENTERR;
@@ -577,12 +583,12 @@ fw_drain_txq(struct firewire_comm *fc)
FW_GLOCK(fc);
fw_xferq_drain(fc->atq);
fw_xferq_drain(fc->ats);
- for(i = 0; i < fc->nisodma; i++)
+ for (i = 0; i < fc->nisodma; i++)
fw_xferq_drain(fc->it[i]);
FW_GUNLOCK(fc);
mtx_lock(&fc->tlabel_lock);
- for (i = 0; i < 0x40; i ++)
+ for (i = 0; i < 0x40; i++)
while ((xfer = STAILQ_FIRST(&fc->tlabels[i])) != NULL) {
if (firewire_debug)
printf("tl=%d flag=%d\n", i, xfer->flag);
@@ -594,7 +600,7 @@ fw_drain_txq(struct firewire_comm *fc)
mtx_unlock(&fc->tlabel_lock);
STAILQ_FOREACH_SAFE(xfer, &xfer_drain, tlabel, txfer)
- xfer->hand(xfer);
+ xfer->hand(xfer);
}
static void
@@ -603,7 +609,7 @@ fw_reset_csr(struct firewire_comm *fc)
int i;
CSRARC(fc, STATE_CLEAR)
- = 1 << 23 | 0 << 17 | 1 << 16 | 1 << 15 | 1 << 14 ;
+ = 1 << 23 | 0 << 17 | 1 << 16 | 1 << 15 | 1 << 14;
CSRARC(fc, STATE_SET) = CSRARC(fc, STATE_CLEAR);
CSRARC(fc, NODE_IDS) = 0x3f;
@@ -612,10 +618,10 @@ fw_reset_csr(struct firewire_comm *fc)
fc->max_node = -1;
- for(i = 2; i < 0x100/4 - 2 ; i++){
+ for (i = 2; i < 0x100 / 4 - 2; i++) {
CSRARC(fc, SPED_MAP + i * 4) = 0;
}
- CSRARC(fc, STATE_CLEAR) = 1 << 23 | 0 << 17 | 1 << 16 | 1 << 15 | 1 << 14 ;
+ CSRARC(fc, STATE_CLEAR) = 1 << 23 | 0 << 17 | 1 << 16 | 1 << 15 | 1 << 14;
CSRARC(fc, STATE_SET) = CSRARC(fc, STATE_CLEAR);
CSRARC(fc, RESET_START) = 0;
CSRARC(fc, SPLIT_TIMEOUT_HI) = 0;
@@ -631,14 +637,14 @@ fw_reset_csr(struct firewire_comm *fc)
CSRARC(fc, CONF_ROM) = 0x04 << 24;
CSRARC(fc, CONF_ROM + 4) = 0x31333934; /* means strings 1394 */
CSRARC(fc, CONF_ROM + 8) = 1 << 31 | 1 << 30 | 1 << 29 |
- 1 << 28 | 0xff << 16 | 0x09 << 8;
+ 1 << 28 | 0xff << 16 | 0x09 << 8;
CSRARC(fc, CONF_ROM + 0xc) = 0;
-/* DV depend CSRs see blue book */
- CSRARC(fc, oPCR) &= ~DV_BROADCAST_ON;
- CSRARC(fc, iPCR) &= ~DV_BROADCAST_ON;
+ /* DV depend CSRs see blue book */
+ CSRARC(fc, oPCR) &= ~DV_BROADCAST_ON;
+ CSRARC(fc, iPCR) &= ~DV_BROADCAST_ON;
- CSRARC(fc, STATE_CLEAR) &= ~(1 << 23 | 1 << 15 | 1 << 14 );
+ CSRARC(fc, STATE_CLEAR) &= ~(1 << 23 | 1 << 15 | 1 << 14);
CSRARC(fc, STATE_SET) = CSRARC(fc, STATE_CLEAR);
}
@@ -694,13 +700,8 @@ fw_reset_crom(struct firewire_comm *fc)
crom_add_entry(root, CSRKEY_NCAP, 0x0083c0); /* XXX */
/* private company_id */
crom_add_entry(root, CSRKEY_VENDOR, CSRVAL_VENDOR_PRIVATE);
-#ifdef __DragonFly__
- crom_add_simple_text(src, root, &buf->vendor, "DragonFly Project");
- crom_add_entry(root, CSRKEY_HW, __DragonFly_cc_version);
-#else
crom_add_simple_text(src, root, &buf->vendor, "FreeBSD Project");
crom_add_entry(root, CSRKEY_HW, __FreeBSD_version);
-#endif
mtx_lock(&prison0.pr_mtx);
crom_add_simple_text(src, root, &buf->hw, prison0.pr_hostname);
mtx_unlock(&prison0.pr_mtx);
@@ -731,8 +732,8 @@ fw_busreset(struct firewire_comm *fc, uint32_t new_status)
fw_reset_crom(fc);
if (device_get_children(fc->bdev, &devlistp, &devcnt) == 0) {
- for( i = 0 ; i < devcnt ; i++)
- if (device_get_state(devlistp[i]) >= DS_ATTACHED) {
+ for (i = 0; i < devcnt; i++)
+ if (device_get_state(devlistp[i]) >= DS_ATTACHED) {
fdc = device_get_softc(devlistp[i]);
if (fdc->post_busreset != NULL)
fdc->post_busreset(fdc);
@@ -741,31 +742,30 @@ fw_busreset(struct firewire_comm *fc, uint32_t new_status)
}
src = &fc->crom_src_buf->src;
- /*
- * If the old config rom needs to be overwritten,
- * bump the businfo.generation indicator to
- * indicate that we need to be reprobed
- * See 1394a-2000 8.3.2.5.4 for more details.
- * generation starts at 2 and rolls over at 0xF
- * back to 2.
- *
- * A generation of 0 indicates a device
- * that is not 1394a-2000 compliant.
- * A generation of 1 indicates a device that
- * does not change it's Bus Info Block or
- * Configuration ROM.
- */
+ /*
+ * If the old config rom needs to be overwritten,
+ * bump the businfo.generation indicator to
+ * indicate that we need to be reprobed
+ * See 1394a-2000 8.3.2.5.4 for more details.
+ * generation starts at 2 and rolls over at 0xF
+ * back to 2.
+ *
+ * A generation of 0 indicates a device
+ * that is not 1394a-2000 compliant.
+ * A generation of 1 indicates a device that
+ * does not change it's Bus Info Block or
+ * Configuration ROM.
+ */
#define FW_MAX_GENERATION 0xF
newrom = malloc(CROMSIZE, M_FW, M_NOWAIT | M_ZERO);
src = &fc->crom_src_buf->src;
crom_load(src, newrom, CROMSIZE);
if (bcmp(newrom, fc->config_rom, CROMSIZE) != 0) {
- if ( src->businfo.generation++ > FW_MAX_GENERATION )
+ if (src->businfo.generation++ > FW_MAX_GENERATION)
src->businfo.generation = FW_GENERATION_CHANGEABLE;
- bcopy(newrom, (void *)fc->config_rom, CROMSIZE);
+ bcopy(newrom, fc->config_rom, CROMSIZE);
}
free(newrom, M_FW);
-
}
/* Call once after reboot */
@@ -795,7 +795,7 @@ void fw_init(struct firewire_comm *fc)
STAILQ_INIT(&fc->atq->q);
STAILQ_INIT(&fc->ats->q);
- for( i = 0 ; i < fc->nisodma ; i ++ ){
+ for (i = 0; i < fc->nisodma; i++) {
fc->it[i]->queued = 0;
fc->ir[i]->queued = 0;
@@ -817,7 +817,7 @@ void fw_init(struct firewire_comm *fc)
fc->atq->maxq = FWMAXQUEUE;
fc->ats->maxq = FWMAXQUEUE;
- for( i = 0 ; i < fc->nisodma ; i++){
+ for (i = 0; i < fc->nisodma; i++) {
fc->ir[i]->maxq = FWMAXQUEUE;
fc->it[i]->maxq = FWMAXQUEUE;
}
@@ -829,9 +829,9 @@ void fw_init(struct firewire_comm *fc)
STAILQ_INIT(&fc->devices);
-/* Initialize Async handlers */
+ /* Initialize Async handlers */
STAILQ_INIT(&fc->binds);
- for( i = 0 ; i < 0x40 ; i++){
+ for (i = 0; i < 0x40; i++) {
STAILQ_INIT(&fc->tlabels[i]);
}
@@ -839,14 +839,14 @@ void fw_init(struct firewire_comm *fc)
#if 0
CSRARC(fc, oMPR) = 0x3fff0001; /* # output channel = 1 */
CSRARC(fc, oPCR) = 0x8000007a;
- for(i = 4 ; i < 0x7c/4 ; i+=4){
- CSRARC(fc, i + oPCR) = 0x8000007a;
+ for (i = 4; i < 0x7c/4; i += 4) {
+ CSRARC(fc, i + oPCR) = 0x8000007a;
}
-
+
CSRARC(fc, iMPR) = 0x00ff0001; /* # input channel = 1 */
CSRARC(fc, iPCR) = 0x803f0000;
- for(i = 4 ; i < 0x7c/4 ; i+=4){
- CSRARC(fc, i + iPCR) = 0x0;
+ for (i = 4; i < 0x7c/4; i += 4) {
+ CSRARC(fc, i + iPCR) = 0x0;
}
#endif
@@ -854,10 +854,11 @@ void fw_init(struct firewire_comm *fc)
#ifdef FW_VMACCESS
xfer = fw_xfer_alloc();
- if(xfer == NULL) return;
+ if (xfer == NULL)
+ return;
- fwb = (struct fw_bind *)malloc(sizeof (struct fw_bind), M_FW, M_NOWAIT);
- if(fwb == NULL){
+ fwb = malloc(sizeof(struct fw_bind), M_FW, M_NOWAIT);
+ if (fwb == NULL) {
fw_xfer_free(xfer);
return;
}
@@ -873,8 +874,8 @@ void fw_init(struct firewire_comm *fc)
#endif
}
-#define BIND_CMP(addr, fwb) (((addr) < (fwb)->start)?-1:\
- ((fwb)->end < (addr))?1:0)
+#define BIND_CMP(addr, fwb) (((addr) < (fwb)->start)? -1 : \
+ ((fwb)->end < (addr)) ? 1 : 0)
/*
* To lookup bound process from IEEE1394 address.
@@ -893,7 +894,7 @@ fw_bindlookup(struct firewire_comm *fc, uint16_t dest_hi, uint32_t dest_lo)
break;
}
FW_GUNLOCK(fc);
- return(r);
+ return (r);
}
/*
@@ -978,7 +979,7 @@ fw_xferlist_add(struct fw_xferlist *q, struct malloc_type *type,
for (i = 0; i < n; i++) {
xfer = fw_xfer_alloc_buf(type, slen, rlen);
if (xfer == NULL)
- return (n);
+ return (i);
xfer->fc = fc;
xfer->sc = sc;
xfer->hand = hand;
@@ -995,10 +996,10 @@ fw_xferlist_remove(struct fw_xferlist *q)
struct fw_xfer *xfer, *next;
for (xfer = STAILQ_FIRST(q); xfer != NULL; xfer = next) {
- next = STAILQ_NEXT(xfer, link);
- fw_xfer_free_buf(xfer);
- }
- STAILQ_INIT(q);
+ next = STAILQ_NEXT(xfer, link);
+ fw_xfer_free_buf(xfer);
+ }
+ STAILQ_INIT(q);
}
/*
* dump packet header
@@ -1031,7 +1032,7 @@ fw_tl_free(struct firewire_comm *fc, struct fw_xfer *xfer)
}
/* make sure the label is allocated */
STAILQ_FOREACH(txfer, &fc->tlabels[xfer->tl], tlabel)
- if(txfer == xfer)
+ if (txfer == xfer)
break;
if (txfer == NULL) {
printf("%s: the xfer is not in the queue "
@@ -1064,7 +1065,7 @@ fw_tl2xfer(struct firewire_comm *fc, int node, int tlabel, int tcode)
mtx_lock(&fc->tlabel_lock);
STAILQ_FOREACH(xfer, &fc->tlabels[tlabel], tlabel)
- if(xfer->send.hdr.mode.hdr.dst == node) {
+ if (xfer->send.hdr.mode.hdr.dst == node) {
mtx_unlock(&fc->tlabel_lock);
splx(s);
KASSERT(xfer->tl == tlabel,
@@ -1075,18 +1076,18 @@ fw_tl2xfer(struct firewire_comm *fc, int node, int tlabel, int tcode)
printf("%s: invalid response tcode "
"(0x%x for 0x%x)\n", __FUNCTION__,
tcode, req);
- return(NULL);
+ return (NULL);
}
-
+
if (firewire_debug > 2)
printf("fw_tl2xfer: found tl=%d\n", tlabel);
- return(xfer);
+ return (xfer);
}
mtx_unlock(&fc->tlabel_lock);
if (firewire_debug > 1)
printf("fw_tl2xfer: not found tl=%d\n", tlabel);
splx(s);
- return(NULL);
+ return (NULL);
}
/*
@@ -1113,14 +1114,14 @@ fw_xfer_alloc_buf(struct malloc_type *type, int send_len, int recv_len)
xfer = fw_xfer_alloc(type);
if (xfer == NULL)
- return(NULL);
+ return (NULL);
xfer->send.pay_len = send_len;
xfer->recv.pay_len = recv_len;
if (send_len > 0) {
xfer->send.payload = malloc(send_len, type, M_NOWAIT | M_ZERO);
if (xfer->send.payload == NULL) {
fw_xfer_free(xfer);
- return(NULL);
+ return (NULL);
}
}
if (recv_len > 0) {
@@ -1129,10 +1130,10 @@ fw_xfer_alloc_buf(struct malloc_type *type, int send_len, int recv_len)
if (xfer->send.payload != NULL)
free(xfer->send.payload, type);
fw_xfer_free(xfer);
- return(NULL);
+ return (NULL);
}
}
- return(xfer);
+ return (xfer);
}
/*
@@ -1154,18 +1155,19 @@ fw_xfer_done(struct fw_xfer *xfer)
}
void
-fw_xfer_unload(struct fw_xfer* xfer)
+fw_xfer_unload(struct fw_xfer *xfer)
{
int s;
- if(xfer == NULL ) return;
- if(xfer->flag & FWXF_INQ){
+ if (xfer == NULL)
+ return;
+ if (xfer->flag & FWXF_INQ) {
printf("fw_xfer_free FWXF_INQ\n");
s = splfw();
FW_GLOCK(xfer->fc);
STAILQ_REMOVE(&xfer->q->q, xfer, fw_xfer, link);
#if 0
- xfer->q->queued --;
+ xfer->q->queued--;
#endif
FW_GUNLOCK(xfer->fc);
splx(s);
@@ -1177,7 +1179,7 @@ fw_xfer_unload(struct fw_xfer* xfer)
*/
fw_tl_free(xfer->fc, xfer);
#if 1
- if(xfer->flag & FWXF_START)
+ if (xfer->flag & FWXF_START)
/*
* This could happen if:
* 1. We call fwohci_arcv() before fwohci_txd().
@@ -1189,28 +1191,27 @@ fw_xfer_unload(struct fw_xfer* xfer)
xfer->flag = FWXF_INIT;
xfer->resp = 0;
}
+
/*
- * To free IEEE1394 XFER structure.
+ * To free IEEE1394 XFER structure.
*/
void
-fw_xfer_free_buf( struct fw_xfer* xfer)
+fw_xfer_free_buf(struct fw_xfer *xfer)
{
if (xfer == NULL) {
printf("%s: xfer == NULL\n", __func__);
return;
}
fw_xfer_unload(xfer);
- if(xfer->send.payload != NULL){
+ if (xfer->send.payload != NULL)
free(xfer->send.payload, xfer->malloc);
- }
- if(xfer->recv.payload != NULL){
+ if (xfer->recv.payload != NULL)
free(xfer->recv.payload, xfer->malloc);
- }
free(xfer, xfer->malloc);
}
void
-fw_xfer_free( struct fw_xfer* xfer)
+fw_xfer_free(struct fw_xfer *xfer)
{
if (xfer == NULL) {
printf("%s: xfer == NULL\n", __func__);
@@ -1231,7 +1232,7 @@ fw_asy_callback_free(struct fw_xfer *xfer)
}
/*
- * To configure PHY.
+ * To configure PHY.
*/
static void
fw_phy_config(struct firewire_comm *fc, int root_node, int gap_count)
@@ -1250,9 +1251,9 @@ fw_phy_config(struct firewire_comm *fc, int root_node, int gap_count)
fp = &xfer->send.hdr;
fp->mode.ld[1] = 0;
if (root_node >= 0)
- fp->mode.ld[1] |= (root_node & 0x3f) << 24 | 1 << 23;
+ fp->mode.ld[1] |= (1 << 23) | (root_node & 0x3f) << 24;
if (gap_count >= 0)
- fp->mode.ld[1] |= 1 << 22 | (gap_count & 0x3f) << 16;
+ fp->mode.ld[1] |= (1 << 22) | (gap_count & 0x3f) << 16;
fp->mode.ld[2] = ~fp->mode.ld[1];
/* XXX Dangerous, how to pass PHY packet to device driver */
fp->mode.common.tcode |= FWTCODE_PHY;
@@ -1264,113 +1265,107 @@ fw_phy_config(struct firewire_comm *fc, int root_node, int gap_count)
}
/*
- * Dump self ID.
+ * Dump self ID.
*/
static void
fw_print_sid(uint32_t sid)
{
union fw_self_id *s;
s = (union fw_self_id *) &sid;
- if ( s->p0.sequel ) {
- if ( s->p1.sequence_num == FW_SELF_ID_PAGE0 ) {
+ if (s->p0.sequel) {
+ if (s->p1.sequence_num == FW_SELF_ID_PAGE0) {
printf("node:%d p3:%d p4:%d p5:%d p6:%d p7:%d"
- "p8:%d p9:%d p10:%d\n",
- s->p1.phy_id, s->p1.port3, s->p1.port4,
- s->p1.port5, s->p1.port6, s->p1.port7,
- s->p1.port8, s->p1.port9, s->p1.port10);
- } else if (s->p2.sequence_num == FW_SELF_ID_PAGE1 ){
+ "p8:%d p9:%d p10:%d\n",
+ s->p1.phy_id, s->p1.port3, s->p1.port4,
+ s->p1.port5, s->p1.port6, s->p1.port7,
+ s->p1.port8, s->p1.port9, s->p1.port10);
+ } else if (s->p2.sequence_num == FW_SELF_ID_PAGE1) {
printf("node:%d p11:%d p12:%d p13:%d p14:%d p15:%d\n",
- s->p2.phy_id, s->p2.port11, s->p2.port12,
- s->p2.port13, s->p2.port14, s->p2.port15);
+ s->p2.phy_id, s->p2.port11, s->p2.port12,
+ s->p2.port13, s->p2.port14, s->p2.port15);
} else {
printf("node:%d Unknown Self ID Page number %d\n",
- s->p1.phy_id, s->p1.sequence_num);
+ s->p1.phy_id, s->p1.sequence_num);
}
} else {
printf("node:%d link:%d gap:%d spd:%d con:%d pwr:%d"
- " p0:%d p1:%d p2:%d i:%d m:%d\n",
- s->p0.phy_id, s->p0.link_active, s->p0.gap_count,
- s->p0.phy_speed, s->p0.contender,
- s->p0.power_class, s->p0.port0, s->p0.port1,
- s->p0.port2, s->p0.initiated_reset, s->p0.more_packets);
+ " p0:%d p1:%d p2:%d i:%d m:%d\n",
+ s->p0.phy_id, s->p0.link_active, s->p0.gap_count,
+ s->p0.phy_speed, s->p0.contender,
+ s->p0.power_class, s->p0.port0, s->p0.port1,
+ s->p0.port2, s->p0.initiated_reset, s->p0.more_packets);
}
}
/*
- * To receive self ID.
+ * To receive self ID.
*/
-void fw_sidrcv(struct firewire_comm* fc, uint32_t *sid, u_int len)
+void fw_sidrcv(struct firewire_comm *fc, uint32_t *sid, u_int len)
{
uint32_t *p;
union fw_self_id *self_id;
u_int i, j, node, c_port = 0, i_branch = 0;
- fc->sid_cnt = len /(sizeof(uint32_t) * 2);
+ fc->sid_cnt = len / (sizeof(uint32_t) * 2);
fc->max_node = fc->nodeid & 0x3f;
CSRARC(fc, NODE_IDS) = ((uint32_t)fc->nodeid) << 16;
fc->status = FWBUSCYMELECT;
fc->topology_map->crc_len = 2;
- fc->topology_map->generation ++;
+ fc->topology_map->generation++;
fc->topology_map->self_id_count = 0;
- fc->topology_map->node_count = 0;
- fc->speed_map->generation ++;
- fc->speed_map->crc_len = 1 + (64*64 + 3) / 4;
+ fc->topology_map->node_count= 0;
+ fc->speed_map->generation++;
+ fc->speed_map->crc_len = 1 + (64 * 64 + 3) / 4;
self_id = &fc->topology_map->self_id[0];
- for(i = 0; i < fc->sid_cnt; i ++){
+ for (i = 0; i < fc->sid_cnt; i++) {
if (sid[1] != ~sid[0]) {
- device_printf(fc->bdev, "%s: ERROR invalid self-id packet\n",
- __func__);
+ device_printf(fc->bdev,
+ "%s: ERROR invalid self-id packet\n", __func__);
sid += 2;
continue;
}
*self_id = *((union fw_self_id *)sid);
fc->topology_map->crc_len++;
- if(self_id->p0.sequel == 0){
- fc->topology_map->node_count ++;
+ if (self_id->p0.sequel == 0) {
+ fc->topology_map->node_count++;
c_port = 0;
if (firewire_debug)
fw_print_sid(sid[0]);
node = self_id->p0.phy_id;
- if(fc->max_node < node){
+ if (fc->max_node < node)
fc->max_node = self_id->p0.phy_id;
- }
/* XXX I'm not sure this is the right speed_map */
- fc->speed_map->speed[node][node]
- = self_id->p0.phy_speed;
- for (j = 0; j < node; j ++) {
- fc->speed_map->speed[j][node]
- = fc->speed_map->speed[node][j]
- = min(fc->speed_map->speed[j][j],
- self_id->p0.phy_speed);
+ fc->speed_map->speed[node][node] =
+ self_id->p0.phy_speed;
+ for (j = 0; j < node; j++) {
+ fc->speed_map->speed[j][node] =
+ fc->speed_map->speed[node][j] =
+ min(fc->speed_map->speed[j][j],
+ self_id->p0.phy_speed);
}
if ((fc->irm == -1 || self_id->p0.phy_id > fc->irm) &&
- (self_id->p0.link_active && self_id->p0.contender)) {
+ (self_id->p0.link_active && self_id->p0.contender))
fc->irm = self_id->p0.phy_id;
- }
- if(self_id->p0.port0 >= 0x2){
+ if (self_id->p0.port0 >= 0x2)
c_port++;
- }
- if(self_id->p0.port1 >= 0x2){
+ if (self_id->p0.port1 >= 0x2)
c_port++;
- }
- if(self_id->p0.port2 >= 0x2){
+ if (self_id->p0.port2 >= 0x2)
c_port++;
- }
}
- if(c_port > 2){
+ if (c_port > 2)
i_branch += (c_port - 2);
- }
sid += 2;
self_id++;
- fc->topology_map->self_id_count ++;
+ fc->topology_map->self_id_count++;
}
/* CRC */
fc->topology_map->crc = fw_crc16(
- (uint32_t *)&fc->topology_map->generation,
- fc->topology_map->crc_len * 4);
+ (uint32_t *)&fc->topology_map->generation,
+ fc->topology_map->crc_len * 4);
fc->speed_map->crc = fw_crc16(
- (uint32_t *)&fc->speed_map->generation,
- fc->speed_map->crc_len * 4);
+ (uint32_t *)&fc->speed_map->generation,
+ fc->speed_map->crc_len * 4);
/* byteswap and copy to CSR */
p = (uint32_t *)fc->topology_map;
for (i = 0; i <= fc->topology_map->crc_len; i++)
@@ -1379,14 +1374,13 @@ void fw_sidrcv(struct firewire_comm* fc, uint32_t *sid, u_int len)
CSRARC(fc, SPED_MAP) = htonl(*p++);
CSRARC(fc, SPED_MAP + 4) = htonl(*p++);
/* don't byte-swap uint8_t array */
- bcopy(p, &CSRARC(fc, SPED_MAP + 8), (fc->speed_map->crc_len - 1)*4);
+ bcopy(p, &CSRARC(fc, SPED_MAP + 8), (fc->speed_map->crc_len - 1) * 4);
fc->max_hop = fc->max_node - i_branch;
device_printf(fc->bdev, "%d nodes, maxhop <= %d %s irm(%d) %s\n",
- fc->max_node + 1, fc->max_hop,
- (fc->irm == -1) ? "Not IRM capable" : "cable IRM",
- fc->irm,
- (fc->irm == fc->nodeid) ? " (me) " : "");
+ fc->max_node + 1, fc->max_hop,
+ (fc->irm == -1) ? "Not IRM capable" : "cable IRM",
+ fc->irm, (fc->irm == fc->nodeid) ? " (me) " : "");
if (try_bmr && (fc->irm != -1) && (CSRARC(fc, BUS_MGR_ID) == 0x3f)) {
if (fc->irm == fc->nodeid) {
@@ -1395,26 +1389,27 @@ void fw_sidrcv(struct firewire_comm* fc, uint32_t *sid, u_int len)
fw_bmr(fc);
} else {
fc->status = FWBUSMGRELECT;
- callout_reset(&fc->bmr_callout, hz/8,
- (void *)fw_try_bmr, (void *)fc);
+ callout_reset(&fc->bmr_callout, hz / 8,
+ fw_try_bmr, fc);
}
} else
fc->status = FWBUSMGRDONE;
- callout_reset(&fc->busprobe_callout, hz/4,
- (void *)fw_bus_probe, (void *)fc);
+ callout_reset(&fc->busprobe_callout, hz / 4, fw_bus_probe, fc);
}
/*
- * To probe devices on the IEEE1394 bus.
+ * To probe devices on the IEEE1394 bus.
*/
static void
-fw_bus_probe(struct firewire_comm *fc)
+fw_bus_probe(void *arg)
{
- int s;
+ struct firewire_comm *fc;
struct fw_device *fwdev;
+ int s;
s = splfw();
+ fc = arg;
fc->status = FWBUSEXPLORE;
/* Invalidate all devices, just after bus reset. */
@@ -1438,7 +1433,7 @@ fw_bus_probe(struct firewire_comm *fc)
}
splx(s);
- wakeup((void *)fc);
+ wakeup(fc);
}
static int
@@ -1449,10 +1444,9 @@ fw_explore_read_quads(struct fw_device *fwdev, int offset,
uint32_t tmp;
int i, error;
- for (i = 0; i < length; i ++, offset += sizeof(uint32_t)) {
- xfer = fwmem_read_quad(fwdev, NULL, -1,
- 0xffff, 0xf0000000 | offset, (void *)&tmp,
- fw_xferwake);
+ for (i = 0; i < length; i++, offset += sizeof(uint32_t)) {
+ xfer = fwmem_read_quad(fwdev, NULL, -1, 0xffff,
+ 0xf0000000 | offset, &tmp, fw_xferwake);
if (xfer == NULL)
return (-1);
fw_xferwait(xfer);
@@ -1476,14 +1470,14 @@ fw_explore_csrblock(struct fw_device *fwdev, int offset, int recur)
struct csrdirectory *dir;
struct csrreg *reg;
- dir = (struct csrdirectory *)&fwdev->csrrom[offset/sizeof(uint32_t)];
+ dir = (struct csrdirectory *)&fwdev->csrrom[offset / sizeof(uint32_t)];
err = fw_explore_read_quads(fwdev, CSRROMOFF + offset,
(uint32_t *)dir, 1);
if (err)
return (-1);
offset += sizeof(uint32_t);
- reg = (struct csrreg *)&fwdev->csrrom[offset/sizeof(uint32_t)];
+ reg = (struct csrreg *)&fwdev->csrrom[offset / sizeof(uint32_t)];
err = fw_explore_read_quads(fwdev, CSRROMOFF + offset,
(uint32_t *)reg, dir->crc_len);
if (err)
@@ -1498,7 +1492,7 @@ fw_explore_csrblock(struct fw_device *fwdev, int offset, int recur)
if (recur == 0)
return (0);
- for (i = 0; i < dir->crc_len; i ++, offset += sizeof(uint32_t)) {
+ for (i = 0; i < dir->crc_len; i++, offset += sizeof(uint32_t)) {
if ((reg[i].key & CSRTYPE_MASK) == CSRTYPE_D)
recur = 1;
else if ((reg[i].key & CSRTYPE_MASK) == CSRTYPE_L)
@@ -1509,7 +1503,7 @@ fw_explore_csrblock(struct fw_device *fwdev, int offset, int recur)
off = offset + reg[i].val * sizeof(uint32_t);
if (off > CROMSIZE) {
printf("%s: invalid offset %d\n", __FUNCTION__, off);
- return(-1);
+ return (-1);
}
err = fw_explore_csrblock(fwdev, off, recur);
if (err)
@@ -1542,7 +1536,8 @@ fw_explore_node(struct fw_device *dfwdev)
hdr = (struct csrhdr *)&csr[0];
if (hdr->info_len != 4) {
if (firewire_debug)
- device_printf(fc->bdev, "%s: node%d: wrong bus info len(%d)\n",
+ device_printf(fc->bdev,
+ "%s: node%d: wrong bus info len(%d)\n",
__func__, node, hdr->info_len);
dfwdev->status = FWDEVINVAL;
return (-1);
@@ -1562,14 +1557,14 @@ fw_explore_node(struct fw_device *dfwdev)
if (firewire_debug)
device_printf(fc->bdev, "%s: node(%d) BUS INFO BLOCK:\n"
- "irmc(%d) cmc(%d) isc(%d) bmc(%d) pmc(%d) "
- "cyc_clk_acc(%d) max_rec(%d) max_rom(%d) "
- "generation(%d) link_spd(%d)\n",
- __func__, node,
- binfo->irmc, binfo->cmc, binfo->isc,
- binfo->bmc, binfo->pmc, binfo->cyc_clk_acc,
- binfo->max_rec, binfo->max_rom,
- binfo->generation, binfo->link_spd);
+ "irmc(%d) cmc(%d) isc(%d) bmc(%d) pmc(%d) "
+ "cyc_clk_acc(%d) max_rec(%d) max_rom(%d) "
+ "generation(%d) link_spd(%d)\n",
+ __func__, node,
+ binfo->irmc, binfo->cmc, binfo->isc,
+ binfo->bmc, binfo->pmc, binfo->cyc_clk_acc,
+ binfo->max_rec, binfo->max_rom,
+ binfo->generation, binfo->link_spd);
STAILQ_FOREACH(fwdev, &fc->devices, link)
if (FW_EUI64_EQUAL(fwdev->eui, binfo->eui64))
@@ -1577,7 +1572,7 @@ fw_explore_node(struct fw_device *dfwdev)
if (fwdev == NULL) {
/* new device */
fwdev = malloc(sizeof(struct fw_device), M_FW,
- M_NOWAIT | M_ZERO);
+ M_NOWAIT | M_ZERO);
if (fwdev == NULL) {
device_printf(fc->bdev, "%s: node%d: no memory\n",
__func__, node);
@@ -1591,16 +1586,15 @@ fw_explore_node(struct fw_device *dfwdev)
/*
* Pre-1394a-2000 didn't have link_spd in
- * the Bus Info block, so try and use the
+ * the Bus Info block, so try and use the
* speed map value.
* 1394a-2000 compliant devices only use
* the Bus Info Block link spd value, so
* ignore the speed map alltogether. SWB
*/
- if ( binfo->link_spd == FWSPD_S100 /* 0 */) {
+ if (binfo->link_spd == FWSPD_S100 /* 0 */) {
device_printf(fc->bdev, "%s: "
- "Pre 1394a-2000 detected\n",
- __func__);
+ "Pre 1394a-2000 detected\n", __func__);
fwdev->speed = fc->speed_map->speed[fc->nodeid][node];
} else
fwdev->speed = binfo->link_spd;
@@ -1610,20 +1604,19 @@ fw_explore_node(struct fw_device *dfwdev)
*/
while (fwdev->speed > FWSPD_S100 /* 0 */) {
err = fw_explore_read_quads(fwdev, CSRROMOFF,
- &speed_test, 1);
+ &speed_test, 1);
if (err) {
- device_printf(fc->bdev, "%s: fwdev->speed(%s)"
- " decremented due to negotiation\n",
- __func__,
- linkspeed[fwdev->speed]);
+ device_printf(fc->bdev,
+ "%s: fwdev->speed(%s) decremented due to negotiation\n",
+ __func__, linkspeed[fwdev->speed]);
fwdev->speed--;
} else
break;
-
+
}
/*
- * If the fwdev is not found in the
+ * If the fwdev is not found in the
* fc->devices TAILQ, then we will add it.
*/
pfwdev = NULL;
@@ -1644,7 +1637,8 @@ fw_explore_node(struct fw_device *dfwdev)
/* unchanged ? */
if (bcmp(&csr[0], &fwdev->csrrom[0], sizeof(uint32_t) * 5) == 0) {
if (firewire_debug)
- device_printf(fc->dev, "node%d: crom unchanged\n", node);
+ device_printf(fc->dev,
+ "node%d: crom unchanged\n", node);
return (0);
}
}
@@ -1702,53 +1696,54 @@ fw_explore(struct firewire_comm *fc)
dfwdev.maxrec = 8; /* 512 */
dfwdev.status = FWDEVINIT;
- for (node = 0; node <= fc->max_node; node ++) {
+ for (node = 0; node <= fc->max_node; node++) {
/* We don't probe myself and linkdown nodes */
if (node == fc->nodeid) {
if (firewire_debug)
device_printf(fc->bdev, "%s:"
- "found myself node(%d) fc->nodeid(%d) fc->max_node(%d)\n",
- __func__, node, fc->nodeid, fc->max_node);
+ "found myself node(%d) fc->nodeid(%d) fc->max_node(%d)\n",
+ __func__, node, fc->nodeid, fc->max_node);
continue;
} else if (firewire_debug) {
device_printf(fc->bdev, "%s:"
- "node(%d) fc->max_node(%d) found\n",
- __func__, node, fc->max_node);
+ "node(%d) fc->max_node(%d) found\n",
+ __func__, node, fc->max_node);
}
fwsid = fw_find_self_id(fc, node);
if (!fwsid || !fwsid->p0.link_active) {
if (firewire_debug)
- device_printf(fc->bdev, "%s: node%d: link down\n",
- __func__, node);
+ device_printf(fc->bdev,
+ "%s: node%d: link down\n",
+ __func__, node);
continue;
}
nodes[todo++] = node;
}
s = splfw();
- for (trys = 0; todo > 0 && trys < 3; trys ++) {
+ for (trys = 0; todo > 0 && trys < 3; trys++) {
todo2 = 0;
- for (i = 0; i < todo; i ++) {
+ for (i = 0; i < todo; i++) {
dfwdev.dst = nodes[i];
err = fw_explore_node(&dfwdev);
if (err)
nodes[todo2++] = nodes[i];
if (firewire_debug)
- device_printf(fc->bdev, "%s: node %d, err = %d\n",
- __func__, node, err);
+ device_printf(fc->bdev,
+ "%s: node %d, err = %d\n",
+ __func__, node, err);
}
todo = todo2;
}
splx(s);
}
-
static void
fw_bus_probe_thread(void *arg)
{
struct firewire_comm *fc;
- fc = (struct firewire_comm *)arg;
+ fc = arg;
mtx_lock(&fc->wait_lock);
while (fc->status != FWBUSDETACH) {
@@ -1784,11 +1779,11 @@ fw_attach_dev(struct firewire_comm *fc)
if (fwdev->status == FWDEVINIT) {
fwdev->status = FWDEVATTACHED;
} else if (fwdev->status == FWDEVINVAL) {
- fwdev->rcnt ++;
+ fwdev->rcnt++;
if (firewire_debug)
device_printf(fc->bdev, "%s:"
- "fwdev->rcnt(%d), hold_count(%d)\n",
- __func__, fwdev->rcnt, hold_count);
+ "fwdev->rcnt(%d), hold_count(%d)\n",
+ __func__, fwdev->rcnt, hold_count);
if (fwdev->rcnt > hold_count) {
/*
* Remove devices which have not been seen
@@ -1802,9 +1797,9 @@ fw_attach_dev(struct firewire_comm *fc)
}
err = device_get_children(fc->bdev, &devlistp, &devcnt);
- if( err == 0 ) {
- for( i = 0 ; i < devcnt ; i++){
- if (device_get_state(devlistp[i]) >= DS_ATTACHED) {
+ if (err == 0) {
+ for (i = 0; i < devcnt; i++) {
+ if (device_get_state(devlistp[i]) >= DS_ATTACHED) {
fdc = device_get_softc(devlistp[i]);
if (fdc->post_explore != NULL)
fdc->post_explore(fdc);
@@ -1832,8 +1827,8 @@ fw_get_tlabel(struct firewire_comm *fc, struct fw_xfer *xfer)
new_tlabel = (fc->last_tlabel[dst] + 1) & 0x3f;
STAILQ_FOREACH(txfer, &fc->tlabels[new_tlabel], tlabel)
if ((txfer->send.hdr.mode.hdr.dst & 0x3f) == dst)
- break;
- if(txfer == NULL) {
+ break;
+ if (txfer == NULL) {
fc->last_tlabel[dst] = new_tlabel;
STAILQ_INSERT_TAIL(&fc->tlabels[new_tlabel], xfer, tlabel);
mtx_unlock(&fc->tlabel_lock);
@@ -1865,7 +1860,7 @@ fw_rcv_copy(struct fw_rcv_buf *rb)
pkt = (struct fw_pkt *)rb->vec->iov_base;
tinfo = &rb->fc->tcode[pkt->mode.hdr.tcode];
- /* Copy header */
+ /* Copy header */
p = (u_char *)&rb->xfer->recv.hdr;
bcopy(rb->vec->iov_base, p, tinfo->hdr_len);
rb->vec->iov_base = (u_char *)rb->vec->iov_base + tinfo->hdr_len;
@@ -1904,7 +1899,6 @@ fw_rcv_copy(struct fw_rcv_buf *rb)
break;
}
rb->xfer->recv.pay_len -= res;
-
}
/*
@@ -1923,11 +1917,11 @@ fw_rcv(struct fw_rcv_buf *rb)
int i;
qld = (uint32_t *)buf;
printf("spd %d len:%d\n", spd, len);
- for( i = 0 ; i <= len && i < 32; i+= 4){
+ for (i = 0; i <= len && i < 32; i+= 4) {
printf("0x%08x ", ntohl(qld[i/4]));
- if((i % 16) == 15) printf("\n");
+ if ((i % 16) == 15) printf("\n");
}
- if((i % 16) != 15) printf("\n");
+ if ((i % 16) != 15) printf("\n");
}
#endif
fp = (struct fw_pkt *)rb->vec[0].iov_base;
@@ -1939,20 +1933,19 @@ fw_rcv(struct fw_rcv_buf *rb)
case FWTCODE_LRES:
rb->xfer = fw_tl2xfer(rb->fc, fp->mode.hdr.src,
fp->mode.hdr.tlrt >> 2, fp->mode.hdr.tcode);
- if(rb->xfer == NULL) {
- device_printf(rb->fc->bdev, "%s: "
- "unknown response "
- "%s(%x) src=0x%x tl=0x%x rt=%d data=0x%x\n",
- __func__,
- tcode_str[tcode], tcode,
- fp->mode.hdr.src,
- fp->mode.hdr.tlrt >> 2,
- fp->mode.hdr.tlrt & 3,
- fp->mode.rresq.data);
+ if (rb->xfer == NULL) {
+ device_printf(rb->fc->bdev, "%s: unknown response "
+ "%s(%x) src=0x%x tl=0x%x rt=%d data=0x%x\n",
+ __func__,
+ tcode_str[tcode], tcode,
+ fp->mode.hdr.src,
+ fp->mode.hdr.tlrt >> 2,
+ fp->mode.hdr.tlrt & 3,
+ fp->mode.rresq.data);
#if 0
printf("try ad-hoc work around!!\n");
rb->xfer = fw_tl2xfer(rb->fc, fp->mode.hdr.src,
- (fp->mode.hdr.tlrt >> 2)^3);
+ (fp->mode.hdr.tlrt >> 2)^3);
if (rb->xfer == NULL) {
printf("no use...\n");
return;
@@ -1981,7 +1974,8 @@ fw_rcv(struct fw_rcv_buf *rb)
break;
default:
device_printf(rb->fc->bdev, "%s: "
- "unexpected flag 0x%02x\n", __func__, rb->xfer->flag);
+ "unexpected flag 0x%02x\n", __func__,
+ rb->xfer->flag);
}
return;
case FWTCODE_WREQQ:
@@ -1990,29 +1984,26 @@ fw_rcv(struct fw_rcv_buf *rb)
case FWTCODE_RREQB:
case FWTCODE_LREQ:
bind = fw_bindlookup(rb->fc, fp->mode.rreqq.dest_hi,
- fp->mode.rreqq.dest_lo);
- if(bind == NULL){
+ fp->mode.rreqq.dest_lo);
+ if (bind == NULL) {
device_printf(rb->fc->bdev, "%s: "
- "Unknown service addr 0x%04x:0x%08x %s(%x)"
-#if defined(__DragonFly__) || __FreeBSD_version < 500000
- " src=0x%x data=%lx\n",
-#else
- " src=0x%x data=%x\n",
-#endif
- __func__,
- fp->mode.wreqq.dest_hi,
- fp->mode.wreqq.dest_lo,
- tcode_str[tcode], tcode,
- fp->mode.hdr.src,
- ntohl(fp->mode.wreqq.data));
+ "Unknown service addr 0x%04x:0x%08x %s(%x)"
+ " src=0x%x data=%x\n",
+ __func__,
+ fp->mode.wreqq.dest_hi,
+ fp->mode.wreqq.dest_lo,
+ tcode_str[tcode], tcode,
+ fp->mode.hdr.src,
+ ntohl(fp->mode.wreqq.data));
if (rb->fc->status == FWBUSINIT) {
- device_printf(rb->fc->bdev, "%s: cannot respond(bus reset)!\n",
- __func__);
+ device_printf(rb->fc->bdev,
+ "%s: cannot respond(bus reset)!\n",
+ __func__);
return;
}
rb->xfer = fw_xfer_alloc(M_FWXFER);
- if(rb->xfer == NULL){
+ if (rb->xfer == NULL) {
return;
}
rb->xfer->send.spd = rb->spd;
@@ -2043,20 +2034,16 @@ fw_rcv(struct fw_rcv_buf *rb)
rb->xfer->hand = fw_xferwake;
*/
rb->xfer->hand = fw_xfer_free;
- if(fw_asyreq(rb->fc, -1, rb->xfer)){
+ if (fw_asyreq(rb->fc, -1, rb->xfer))
fw_xfer_free(rb->xfer);
- return;
- }
- return;
}
len = 0;
- for (i = 0; i < rb->nvec; i ++)
+ for (i = 0; i < rb->nvec; i++)
len += rb->vec[i].iov_len;
rb->xfer = STAILQ_FIRST(&bind->xferlist);
if (rb->xfer == NULL) {
device_printf(rb->fc->bdev, "%s: "
- "Discard a packet for this bind.\n",
- __func__);
+ "Discard a packet for this bind.\n", __func__);
return;
}
STAILQ_REMOVE_HEAD(&bind->xferlist, link);
@@ -2073,11 +2060,11 @@ fw_rcv(struct fw_rcv_buf *rb)
printf("stream rcv dma %d len %d off %d spd %d\n",
sub, len, off, spd);
#endif
- if(xferq->queued >= xferq->maxq) {
+ if (xferq->queued >= xferq->maxq) {
printf("receive queue is full\n");
return;
}
- /* XXX get xfer from xfer queue, we don't need copy for
+ /* XXX get xfer from xfer queue, we don't need copy for
per packet mode */
rb->xfer = fw_xfer_alloc_buf(M_FWXFER, 0, /* XXX */
vec[0].iov_len);
@@ -2089,11 +2076,7 @@ fw_rcv(struct fw_rcv_buf *rb)
STAILQ_INSERT_TAIL(&xferq->q, rb->xfer, link);
splx(s);
sc = device_get_softc(rb->fc->bdev);
-#if defined(__DragonFly__) || __FreeBSD_version < 500000
- if (&xferq->rsel.si_pid != 0)
-#else
if (SEL_WAITING(&xferq->rsel))
-#endif
selwakeuppri(&xferq->rsel, FWPRI);
if (xferq->flag & FWXFERQ_WAKEUP) {
xferq->flag &= ~FWXFERQ_WAKEUP;
@@ -2108,7 +2091,7 @@ fw_rcv(struct fw_rcv_buf *rb)
#endif
default:
device_printf(rb->fc->bdev,"%s: unknown tcode %d\n",
- __func__, tcode);
+ __func__, tcode);
break;
}
}
@@ -2154,14 +2137,13 @@ static void
fw_try_bmr(void *arg)
{
struct fw_xfer *xfer;
- struct firewire_comm *fc = (struct firewire_comm *)arg;
+ struct firewire_comm *fc = arg;
struct fw_pkt *fp;
int err = 0;
xfer = fw_xfer_alloc_buf(M_FWXFER, 8, 4);
- if(xfer == NULL){
+ if (xfer == NULL)
return;
- }
xfer->send.spd = 0;
fc->status = FWBUSMGRELECT;
@@ -2180,7 +2162,7 @@ fw_try_bmr(void *arg)
xfer->hand = fw_try_bmr_callback;
err = fw_asyreq(fc, -1, xfer);
- if(err){
+ if (err) {
fw_xfer_free_buf(xfer);
return;
}
@@ -2190,33 +2172,37 @@ fw_try_bmr(void *arg)
#ifdef FW_VMACCESS
/*
* Software implementation for physical memory block access.
- * XXX:Too slow, usef for debug purpose only.
+ * XXX:Too slow, useful for debug purpose only.
*/
static void
-fw_vmaccess(struct fw_xfer *xfer){
+fw_vmaccess(struct fw_xfer *xfer)
+{
struct fw_pkt *rfp, *sfp = NULL;
uint32_t *ld = (uint32_t *)xfer->recv.buf;
printf("vmaccess spd:%2x len:%03x data:%08x %08x %08x %08x\n",
- xfer->spd, xfer->recv.len, ntohl(ld[0]), ntohl(ld[1]), ntohl(ld[2]), ntohl(ld[3]));
- printf("vmaccess data:%08x %08x %08x %08x\n", ntohl(ld[4]), ntohl(ld[5]), ntohl(ld[6]), ntohl(ld[7]));
- if(xfer->resp != 0){
- fw_xfer_free( xfer);
+ xfer->spd, xfer->recv.len, ntohl(ld[0]), ntohl(ld[1]), ntohl(ld[2]),
+ ntohl(ld[3]));
+ printf("vmaccess data:%08x %08x %08x %08x\n", ntohl(ld[4]),
+ ntohl(ld[5]), ntohl(ld[6]), ntohl(ld[7]));
+ if (xfer->resp != 0) {
+ fw_xfer_free(xfer);
return;
}
- if(xfer->recv.buf == NULL){
- fw_xfer_free( xfer);
+ if (xfer->recv.buf == NULL) {
+ fw_xfer_free(xfer);
return;
}
rfp = (struct fw_pkt *)xfer->recv.buf;
- switch(rfp->mode.hdr.tcode){
+ switch (rfp->mode.hdr.tcode) {
/* XXX need fix for 64bit arch */
case FWTCODE_WREQB:
xfer->send.buf = malloc(12, M_FW, M_NOWAIT);
xfer->send.len = 12;
sfp = (struct fw_pkt *)xfer->send.buf;
bcopy(rfp->mode.wreqb.payload,
- (caddr_t)ntohl(rfp->mode.wreqb.dest_lo), ntohs(rfp->mode.wreqb.len));
+ (caddr_t)ntohl(rfp->mode.wreqb.dest_lo),s
+ ntohs(rfp->mode.wreqb.len));
sfp->mode.wres.tcode = FWTCODE_WRES;
sfp->mode.wres.rtcode = 0;
break;
@@ -2224,15 +2210,18 @@ fw_vmaccess(struct fw_xfer *xfer){
xfer->send.buf = malloc(12, M_FW, M_NOWAIT);
xfer->send.len = 12;
sfp->mode.wres.tcode = FWTCODE_WRES;
- *((uint32_t *)(ntohl(rfp->mode.wreqb.dest_lo))) = rfp->mode.wreqq.data;
+ *((uint32_t *)(ntohl(rfp->mode.wreqb.dest_lo))) =
+ rfp->mode.wreqq.data;
sfp->mode.wres.rtcode = 0;
break;
case FWTCODE_RREQB:
- xfer->send.buf = malloc(16 + rfp->mode.rreqb.len, M_FW, M_NOWAIT);
+ xfer->send.buf = malloc(16 + rfp->mode.rreqb.len,
+ M_FW, M_NOWAIT);
xfer->send.len = 16 + ntohs(rfp->mode.rreqb.len);
sfp = (struct fw_pkt *)xfer->send.buf;
bcopy((caddr_t)ntohl(rfp->mode.rreqb.dest_lo),
- sfp->mode.rresb.payload, (uint16_t)ntohs(rfp->mode.rreqb.len));
+ sfp->mode.rresb.payload,
+ ntohs(rfp->mode.rreqb.len));
sfp->mode.rresb.tcode = FWTCODE_RRESB;
sfp->mode.rresb.len = rfp->mode.rreqb.len;
sfp->mode.rresb.rtcode = 0;
@@ -2242,12 +2231,13 @@ fw_vmaccess(struct fw_xfer *xfer){
xfer->send.buf = malloc(16, M_FW, M_NOWAIT);
xfer->send.len = 16;
sfp = (struct fw_pkt *)xfer->send.buf;
- sfp->mode.rresq.data = *(uint32_t *)(ntohl(rfp->mode.rreqq.dest_lo));
+ sfp->mode.rresq.data =
+ *(uint32_t *)(ntohl(rfp->mode.rreqq.dest_lo));
sfp->mode.wres.tcode = FWTCODE_RRESQ;
sfp->mode.rresb.rtcode = 0;
break;
default:
- fw_xfer_free( xfer);
+ fw_xfer_free(xfer);
return;
}
sfp->mode.hdr.dst = rfp->mode.hdr.src;
@@ -2261,24 +2251,25 @@ fw_vmaccess(struct fw_xfer *xfer){
/**/
return;
}
-#endif
+#endif
/*
* CRC16 check-sum for IEEE1394 register blocks.
*/
uint16_t
-fw_crc16(uint32_t *ptr, uint32_t len){
+fw_crc16(uint32_t *ptr, uint32_t len)
+{
uint32_t i, sum, crc = 0;
int shift;
len = (len + 3) & ~3;
- for(i = 0 ; i < len ; i+= 4){
- for( shift = 28 ; shift >= 0 ; shift -= 4){
+ for (i = 0; i < len; i += 4) {
+ for (shift = 28; shift >= 0; shift -= 4) {
sum = ((crc >> 12) ^ (ptr[i/4] >> shift)) & 0xf;
- crc = (crc << 4) ^ ( sum << 12 ) ^ ( sum << 5) ^ sum;
+ crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ sum;
}
crc &= 0xffff;
}
- return((uint16_t) crc);
+ return ((uint16_t) crc);
}
/*
@@ -2303,7 +2294,7 @@ fw_bmr(struct firewire_comm *fc)
cmstr = fc->max_node;
else {
device_printf(fc->bdev,
- "root node is not cycle master capable\n");
+ "root node is not cycle master capable\n");
/* XXX shall we be the cycle master? */
cmstr = fc->nodeid;
/* XXX need bus reset */
@@ -2314,13 +2305,13 @@ fw_bmr(struct firewire_comm *fc)
device_printf(fc->bdev, "bus manager %d %s\n",
CSRARC(fc, BUS_MGR_ID),
(CSRARC(fc, BUS_MGR_ID) != fc->nodeid) ? "(me)" : "");
- if(CSRARC(fc, BUS_MGR_ID) != fc->nodeid) {
+ if (CSRARC(fc, BUS_MGR_ID) != fc->nodeid) {
/* We are not the bus manager */
- return(0);
+ return (0);
}
/* Optimize gapcount */
- if(fc->max_hop <= MAX_GAPHOP )
+ if (fc->max_hop <= MAX_GAPHOP)
fw_phy_config(fc, cmstr, gap_cnt[fc->max_hop]);
/* If we are the cycle master, nothing to do */
if (cmstr == fc->nodeid || cmstr == -1)
@@ -2335,7 +2326,7 @@ fw_bmr(struct firewire_comm *fc)
/* Set cmstr bit on the cycle master */
quad = htonl(1 << 8);
fwmem_write_quad(&fwdev, NULL, 0/*spd*/,
- 0xffff, 0xf0000000 | STATE_SET, &quad, fw_asy_callback_free);
+ 0xffff, 0xf0000000 | STATE_SET, &quad, fw_asy_callback_free);
return 0;
}
@@ -2353,7 +2344,7 @@ fw_open_isodma(struct firewire_comm *fc, int tx)
xferqa = &fc->ir[0];
FW_GLOCK(fc);
- for (i = 0; i < fc->nisodma; i ++) {
+ for (i = 0; i < fc->nisodma; i++) {
xferq = xferqa[i];
if ((xferq->flag & FWXFERQ_OPEN) == 0) {
xferq->flag |= FWXFERQ_OPEN;
@@ -2372,22 +2363,16 @@ static int
fw_modevent(module_t mode, int type, void *data)
{
int err = 0;
-#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
static eventhandler_tag fwdev_ehtag = NULL;
-#endif
switch (type) {
case MOD_LOAD:
-#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
fwdev_ehtag = EVENTHANDLER_REGISTER(dev_clone,
- fwdev_clone, 0, 1000);
-#endif
+ fwdev_clone, 0, 1000);
break;
case MOD_UNLOAD:
-#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
if (fwdev_ehtag != NULL)
EVENTHANDLER_DEREGISTER(dev_clone, fwdev_ehtag);
-#endif
break;
case MOD_SHUTDOWN:
break;
@@ -2398,8 +2383,6 @@ fw_modevent(module_t mode, int type, void *data)
}
-#ifdef __DragonFly__
-DECLARE_DUMMY_MODULE(firewire);
-#endif
-DRIVER_MODULE(firewire,fwohci,firewire_driver,firewire_devclass,fw_modevent,0);
+DRIVER_MODULE(firewire, fwohci, firewire_driver, firewire_devclass,
+ fw_modevent,0);
MODULE_VERSION(firewire, 1);
diff --git a/sys/dev/firewire/firewire.h b/sys/dev/firewire/firewire.h
index 0a512e1..442fd6f 100644
--- a/sys/dev/firewire/firewire.h
+++ b/sys/dev/firewire/firewire.h
@@ -30,7 +30,7 @@
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
- *
+ *
* $FreeBSD$
*
*/
@@ -42,8 +42,8 @@
#define DEV_DV 2
struct fw_isochreq {
- unsigned char ch:6,
- tag:2;
+ unsigned char ch:6;
+ unsigned char tag:2;
};
struct fw_isobufreq {
@@ -247,15 +247,15 @@ struct fw_eui64 {
uint32_t hi, lo;
};
#define FW_EUI64_BYTE(eui, x) \
- ((((x)<4)? \
- ((eui)->hi >> (8*(3-(x)))): \
- ((eui)->lo >> (8*(7-(x)))) \
+ ((((x) < 4)? \
+ ((eui)->hi >> (8 * (3 - (x)))): \
+ ((eui)->lo >> (8 * (7 - (x)))) \
) & 0xff)
#define FW_EUI64_EQUAL(x, y) \
((x).hi == (y).hi && (x).lo == (y).lo)
struct fw_asyreq {
- struct fw_asyreq_t{
+ struct fw_asyreq_t {
unsigned char sped;
unsigned int type;
#define FWASREQNODE 0
@@ -265,8 +265,8 @@ struct fw_asyreq {
unsigned short len;
union {
struct fw_eui64 eui;
- }dst;
- }req;
+ } dst;
+ } req;
struct fw_pkt pkt;
uint32_t data[512];
};
@@ -406,7 +406,7 @@ struct fw_topology_map {
uint32_t generation;
uint32_t self_id_count:16,
node_count:16;
- union fw_self_id self_id[4*64];
+ union fw_self_id self_id[4 * 64];
};
struct fw_speed_map {
diff --git a/sys/dev/firewire/firewire_phy.h b/sys/dev/firewire/firewire_phy.h
index 42feff2..a420a4a 100644
--- a/sys/dev/firewire/firewire_phy.h
+++ b/sys/dev/firewire/firewire_phy.h
@@ -29,7 +29,7 @@
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
- *
+ *
* $FreeBSD$
*
*/
diff --git a/sys/dev/firewire/firewirereg.h b/sys/dev/firewire/firewirereg.h
index ba58f28..e9a8606 100644
--- a/sys/dev/firewire/firewirereg.h
+++ b/sys/dev/firewire/firewirereg.h
@@ -30,21 +30,13 @@
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
- *
+ *
* $FreeBSD$
*
*/
-#ifdef __DragonFly__
-typedef d_thread_t fw_proc;
-#include <sys/select.h>
-#elif __FreeBSD_version >= 500000
typedef struct thread fw_proc;
#include <sys/selinfo.h>
-#else
-typedef struct proc fw_proc;
-#include <sys/select.h>
-#endif
#include <sys/uio.h>
#include <sys/mutex.h>
@@ -54,7 +46,7 @@ typedef struct proc fw_proc;
STAILQ_HEAD(fw_xferlist, fw_xfer);
-struct fw_device{
+struct fw_device {
uint16_t dst;
struct fw_eui64 eui;
uint8_t speed;
@@ -64,7 +56,7 @@ struct fw_device{
#define CSRROMOFF 0x400
#define CSRROMSIZE 0x400
int rommax; /* offset from 0xffff f000 0000 */
- uint32_t csrrom[CSRROMSIZE/4];
+ uint32_t csrrom[CSRROMSIZE / 4];
int rcnt;
struct firewire_comm *fc;
uint32_t status;
@@ -101,11 +93,11 @@ struct tcode_info {
u_char valid_res;
};
-struct firewire_comm{
+struct firewire_comm {
device_t dev;
device_t bdev;
uint16_t busid:10,
- nodeid:6;
+ nodeid:6;
u_int mode;
u_int nport;
u_int speed;
@@ -137,7 +129,7 @@ struct firewire_comm{
STAILQ_HEAD(, fw_device) devices;
u_int sid_cnt;
#define CSRSIZE 0x4000
- uint32_t csr_arc[CSRSIZE/4];
+ uint32_t csr_arc[CSRSIZE / 4];
#define CROMSIZE 0x400
uint32_t *config_rom;
struct crom_src_buf *crom_src_buf;
@@ -149,7 +141,7 @@ struct firewire_comm{
struct callout bmr_callout;
struct callout timeout_callout;
struct task task_timeout;
- uint32_t (*cyctimer) (struct firewire_comm *);
+ uint32_t (*cyctimer) (struct firewire_comm *);
void (*ibr) (struct firewire_comm *);
uint32_t (*set_bmr) (struct firewire_comm *, uint32_t);
int (*ioctl) (struct cdev *, u_long, caddr_t, int, fw_proc *);
@@ -169,7 +161,7 @@ struct firewire_comm{
struct taskqueue *taskqueue;
struct proc *probe_thread;
};
-#define CSRARC(sc, offset) ((sc)->csr_arc[(offset)/4])
+#define CSRARC(sc, offset) ((sc)->csr_arc[(offset) / 4])
#define FW_GMTX(fc) (&(fc)->mtx)
#define FW_GLOCK(fc) mtx_lock(FW_GMTX(fc))
@@ -190,7 +182,7 @@ struct fw_xferq {
#define FWXFERQ_HANDLER (1 << 16)
#define FWXFERQ_WAKEUP (1 << 17)
- void (*start) (struct firewire_comm*);
+ void (*start) (struct firewire_comm *);
int dmach;
struct fw_xferlist q;
u_int queued;
@@ -209,7 +201,7 @@ struct fw_xferq {
void (*hand) (struct fw_xferq *);
};
-struct fw_bulkxfer{
+struct fw_bulkxfer {
int poffset;
struct mbuf *mbuf;
STAILQ_ENTRY(fw_bulkxfer) link;
@@ -218,7 +210,7 @@ struct fw_bulkxfer{
int resp;
};
-struct fw_bind{
+struct fw_bind {
u_int64_t start;
u_int64_t end;
struct fw_xferlist xferlist;
@@ -227,7 +219,7 @@ struct fw_bind{
void *sc;
};
-struct fw_xfer{
+struct fw_xfer {
caddr_t sc;
struct firewire_comm *fc;
struct fw_xferq *q;
@@ -267,9 +259,9 @@ struct fw_rcv_buf {
void fw_sidrcv (struct firewire_comm *, uint32_t *, u_int);
void fw_rcv (struct fw_rcv_buf *);
-void fw_xfer_unload ( struct fw_xfer*);
-void fw_xfer_free_buf ( struct fw_xfer*);
-void fw_xfer_free ( struct fw_xfer*);
+void fw_xfer_unload (struct fw_xfer *);
+void fw_xfer_free_buf (struct fw_xfer *);
+void fw_xfer_free (struct fw_xfer*);
struct fw_xfer *fw_xfer_alloc (struct malloc_type *);
struct fw_xfer *fw_xfer_alloc_buf (struct malloc_type *, int, int);
void fw_init (struct firewire_comm *);
@@ -280,7 +272,7 @@ int fw_bindremove (struct firewire_comm *, struct fw_bind *);
int fw_xferlist_add (struct fw_xferlist *, struct malloc_type *, int, int, int,
struct firewire_comm *, void *, void (*)(struct fw_xfer *));
void fw_xferlist_remove (struct fw_xferlist *);
-int fw_asyreq (struct firewire_comm *, int, struct fw_xfer*);
+int fw_asyreq (struct firewire_comm *, int, struct fw_xfer *);
void fw_busreset (struct firewire_comm *, uint32_t);
uint16_t fw_crc16 (uint32_t *, uint32_t);
void fw_xfer_timeout (void *);
@@ -301,7 +293,7 @@ extern int firewire_debug;
extern devclass_t firewire_devclass;
extern int firewire_phydma_enable;
-#define FWPRI ((PZERO+8)|PCATCH)
+#define FWPRI ((PZERO + 8) | PCATCH)
#define CALLOUT_INIT(x) callout_init(x, 1 /* mpsafe */)
diff --git a/sys/dev/firewire/fwcrom.c b/sys/dev/firewire/fwcrom.c
index 4ee0f11..e53d3b6 100644
--- a/sys/dev/firewire/fwcrom.c
+++ b/sys/dev/firewire/fwcrom.c
@@ -59,13 +59,8 @@ __FBSDID("$FreeBSD$");
#endif
#endif
-#ifdef __DragonFly__
-#include "firewire.h"
-#include "iec13213.h"
-#else
#include <dev/firewire/firewire.h>
#include <dev/firewire/iec13213.h>
-#endif
#define MAX_ROM (1024 - sizeof(uint32_t) * 5)
#define CROM_END(cc) ((vm_offset_t)(cc)->stack[0].dir + MAX_ROM - 1)
@@ -116,7 +111,7 @@ crom_next(struct crom_context *cc)
printf("crom_next: too deep\n");
goto again;
}
- cc->depth ++;
+ cc->depth++;
ptr = &cc->stack[cc->depth];
ptr->dir = (struct csrdirectory *) (reg + reg->val);
@@ -125,10 +120,10 @@ crom_next(struct crom_context *cc)
}
again:
ptr = &cc->stack[cc->depth];
- ptr->index ++;
+ ptr->index++;
check:
if (ptr->index < ptr->dir->crc_len &&
- (vm_offset_t)crom_get(cc) <= CROM_END(cc))
+ (vm_offset_t)crom_get(cc) <= CROM_END(cc))
return;
if (ptr->index < ptr->dir->crc_len)
@@ -148,7 +143,7 @@ crom_search_key(struct crom_context *cc, uint8_t key)
{
struct csrreg *reg;
- while(cc->depth >= 0) {
+ while (cc->depth >= 0) {
reg = crom_get(cc);
if (reg->key == key)
return reg;
@@ -166,7 +161,7 @@ crom_has_specver(uint32_t *p, uint32_t spec, uint32_t ver)
cc = &c;
crom_init_context(cc, p);
- while(cc->depth >= 0) {
+ while (cc->depth >= 0) {
reg = crom_get(cc);
if (state == 0) {
if (reg->key == CSRKEY_SPEC && reg->val == spec)
@@ -198,7 +193,7 @@ crom_parse_text(struct crom_context *cc, char *buf, int len)
reg = crom_get(cc);
if (reg->key != CROM_TEXTLEAF ||
- (vm_offset_t)(reg + reg->val) > CROM_END(cc)) {
+ (vm_offset_t)(reg + reg->val) > CROM_END(cc)) {
strncpy(buf, nullstr, len);
return;
}
@@ -215,7 +210,7 @@ crom_parse_text(struct crom_context *cc, char *buf, int len)
qlen = textleaf->crc_len - 2;
if (len < qlen * 4)
qlen = len/4;
- for (i = 0; i < qlen; i ++)
+ for (i = 0; i < qlen; i++)
*bp++ = ntohl(textleaf->text[i]);
/* make sure to terminate the string */
if (len <= qlen * 4)
@@ -238,7 +233,7 @@ crom_crc(uint32_t *ptr, int len)
}
crc &= 0xffff;
}
- return((uint16_t) crc);
+ return ((uint16_t) crc);
}
#if !defined(_KERNEL) && !defined(_BOOT)
@@ -315,17 +310,17 @@ crom_desc(struct crom_context *cc, char *buf, int len)
break;
case CSRTYPE_C:
len -= snprintf(buf, len, "offset=0x%04x(%d)",
- reg->val, reg->val);
+ reg->val, reg->val);
buf += strlen(buf);
break;
case CSRTYPE_L:
/* XXX fall through */
case CSRTYPE_D:
- dir = (struct csrdirectory *) (reg + reg->val);
+ dir = (struct csrdirectory *)(reg + reg->val);
crc = crom_crc((uint32_t *)&dir->entry[0], dir->crc_len);
len -= snprintf(buf, len, "len=%d crc=0x%04x(%s) ",
- dir->crc_len, dir->crc,
- (crc == dir->crc) ? "OK" : "NG");
+ dir->crc_len, dir->crc,
+ (crc == dir->crc) ? "OK" : "NG");
buf += strlen(buf);
}
switch (reg->key) {
@@ -399,11 +394,11 @@ crom_add_quad(struct crom_chunk *chunk, uint32_t entry)
index = chunk->data.crc_len;
if (index >= CROM_MAX_CHUNK_LEN - 1) {
printf("too large chunk %d\n", index);
- return(-1);
+ return (-1);
}
chunk->data.buf[index] = entry;
chunk->data.crc_len++;
- return(index);
+ return (index);
}
int
@@ -414,7 +409,7 @@ crom_add_entry(struct crom_chunk *chunk, int key, int val)
struct csrreg reg;
uint32_t i;
} foo;
-
+
foo.reg.key = key;
foo.reg.val = val;
return (crom_add_quad(chunk, foo.i));
@@ -422,29 +417,29 @@ crom_add_entry(struct crom_chunk *chunk, int key, int val)
int
crom_add_chunk(struct crom_src *src, struct crom_chunk *parent,
- struct crom_chunk *child, int key)
+ struct crom_chunk *child, int key)
{
int index;
if (parent == NULL) {
STAILQ_INSERT_TAIL(&src->chunk_list, child, link);
- return(0);
+ return (0);
}
index = crom_add_entry(parent, key, 0);
if (index < 0) {
- return(-1);
+ return (-1);
}
child->ref_chunk = parent;
child->ref_index = index;
STAILQ_INSERT_TAIL(&src->chunk_list, child, link);
- return(index);
+ return (index);
}
#define MAX_TEXT ((CROM_MAX_CHUNK_LEN + 1) * 4 - sizeof(struct csrtext))
int
crom_add_simple_text(struct crom_src *src, struct crom_chunk *parent,
- struct crom_chunk *chunk, char *buf)
+ struct crom_chunk *chunk, char *buf)
{
struct csrtext *tl;
uint32_t *p;
@@ -453,7 +448,7 @@ crom_add_simple_text(struct crom_src *src, struct crom_chunk *parent,
len = strlen(buf);
if (len > MAX_TEXT) {
- printf("text(%d) trancated to %td.\n", len, MAX_TEXT);
+ printf("text(%d) truncated to %td.\n", len, MAX_TEXT);
len = MAX_TEXT;
}
@@ -465,7 +460,7 @@ crom_add_simple_text(struct crom_src *src, struct crom_chunk *parent,
bzero(&t[0], roundup2(len, sizeof(uint32_t)));
bcopy(buf, &t[0], len);
p = (uint32_t *)&t[0];
- for (i = 0; i < howmany(len, sizeof(uint32_t)); i ++)
+ for (i = 0; i < howmany(len, sizeof(uint32_t)); i++)
tl->text[i] = ntohl(*p++);
return (crom_add_chunk(src, parent, chunk, CROM_TEXTLEAF));
}
@@ -475,11 +470,11 @@ crom_copy(uint32_t *src, uint32_t *dst, int *offset, int len, int maxlen)
{
if (*offset + len > maxlen) {
printf("Config. ROM is too large for the buffer\n");
- return(-1);
+ return (-1);
}
bcopy(src, (char *)(dst + *offset), len * sizeof(uint32_t));
*offset += len;
- return(0);
+ return (0);
}
int
@@ -503,9 +498,9 @@ crom_load(struct crom_src *src, uint32_t *buf, int maxlen)
if (parent != NULL) {
struct csrreg *reg;
reg = (struct csrreg *)
- &parent->data.buf[chunk->ref_index];
+ &parent->data.buf[chunk->ref_index];
reg->val = offset -
- (parent->offset + 1 + chunk->ref_index);
+ (parent->offset + 1 + chunk->ref_index);
}
offset += 1 + chunk->data.crc_len;
}
@@ -514,15 +509,15 @@ crom_load(struct crom_src *src, uint32_t *buf, int maxlen)
len = 1 + src->hdr.info_len;
count = 0;
if (crom_copy((uint32_t *)&src->hdr, buf, &count, len, maxlen) < 0)
- return(-1);
+ return (-1);
STAILQ_FOREACH(chunk, &src->chunk_list, link) {
chunk->data.crc =
- crom_crc(&chunk->data.buf[0], chunk->data.crc_len);
+ crom_crc(&chunk->data.buf[0], chunk->data.crc_len);
len = 1 + chunk->data.crc_len;
if (crom_copy((uint32_t *)&chunk->data, buf,
- &count, len, maxlen) < 0)
- return(-1);
+ &count, len, maxlen) < 0)
+ return (-1);
}
hdr = (struct csrhdr *)buf;
hdr->crc_len = count - 1;
@@ -531,19 +526,20 @@ crom_load(struct crom_src *src, uint32_t *buf, int maxlen)
#if defined(_KERNEL) || defined(_BOOT)
/* byte swap */
ptr = buf;
- for (i = 0; i < count; i ++) {
+ for (i = 0; i < count; i++) {
*ptr = htonl(*ptr);
ptr++;
}
#endif
- return(count);
+ return (count);
}
#endif
#ifdef TEST
int
-main () {
+main()
+{
struct crom_src src;
struct crom_chunk root,unit1,unit2,unit3;
struct crom_chunk text1,text2,text3,text4,text5,text6,text7;
@@ -587,15 +583,9 @@ main () {
/* private company_id */
crom_add_entry(&root, CSRKEY_VENDOR, 0xacde48);
-#ifdef __DragonFly__
- crom_add_simple_text(&src, &root, &text1, "DragonFly");
- crom_add_entry(&root, CSRKEY_HW, __DragonFly_cc_version);
- crom_add_simple_text(&src, &root, &text2, "DragonFly-1");
-#else
crom_add_simple_text(&src, &root, &text1, "FreeBSD");
crom_add_entry(&root, CSRKEY_HW, __FreeBSD_version);
crom_add_simple_text(&src, &root, &text2, "FreeBSD-5");
-#endif
/* SBP unit directory */
crom_add_chunk(&src, &root, &unit1, CROM_UDIR);
@@ -628,11 +618,11 @@ main () {
crom_load(&src, buf, 256);
p = buf;
#define DUMP_FORMAT "%08x %08x %08x %08x %08x %08x %08x %08x\n"
- for (i = 0; i < 256/8; i ++) {
+ for (i = 0; i < 256/8; i++) {
printf(DUMP_FORMAT,
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]);
p += 8;
}
- return(0);
+ return (0);
}
#endif
diff --git a/sys/dev/firewire/fwdev.c b/sys/dev/firewire/fwdev.c
index 20d89a4..e09ce2c 100644
--- a/sys/dev/firewire/fwdev.c
+++ b/sys/dev/firewire/fwdev.c
@@ -30,7 +30,7 @@
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
- *
+ *
* $FreeBSD$
*
*/
@@ -52,19 +52,11 @@
#include <sys/ioccom.h>
-#ifdef __DragonFly__
-#include "firewire.h"
-#include "firewirereg.h"
-#include "fwdma.h"
-#include "fwmem.h"
-#include "iec68113.h"
-#else
#include <dev/firewire/firewire.h>
#include <dev/firewire/firewirereg.h>
#include <dev/firewire/fwdma.h>
#include <dev/firewire/fwmem.h>
#include <dev/firewire/iec68113.h>
-#endif
#define FWNODE_INVAL 0xffff
@@ -78,12 +70,6 @@ static d_mmap_t fw_mmap;
static d_strategy_t fw_strategy;
struct cdevsw firewire_cdevsw = {
-#ifdef __DragonFly__
-#define CDEV_MAJOR 127
- "fw", CDEV_MAJOR, D_MEM, NULL, 0,
- fw_open, fw_close, fw_read, fw_write, fw_ioctl,
- fw_poll, fw_mmap, fw_strategy, nodump, nopsize,
-#elif __FreeBSD_version >= 500104
.d_version = D_VERSION,
.d_open = fw_open,
.d_close = fw_close,
@@ -95,12 +81,6 @@ struct cdevsw firewire_cdevsw = {
.d_strategy = fw_strategy,
.d_name = "fw",
.d_flags = D_MEM
-#else
-#define CDEV_MAJOR 127
- fw_open, fw_close, fw_read, fw_write, fw_ioctl,
- fw_poll, fw_mmap, fw_strategy, "fw", CDEV_MAJOR,
- nodump, nopsize, D_MEM, -1
-#endif
};
struct fw_drv1 {
@@ -119,22 +99,21 @@ fwdev_allocbuf(struct firewire_comm *fc, struct fw_xferq *q,
int i;
if (q->flag & (FWXFERQ_RUNNING | FWXFERQ_EXTBUF))
- return(EBUSY);
+ return (EBUSY);
- q->bulkxfer = (struct fw_bulkxfer *) malloc(
- sizeof(struct fw_bulkxfer) * b->nchunk,
- M_FW, M_WAITOK);
+ q->bulkxfer = malloc(sizeof(struct fw_bulkxfer) * b->nchunk,
+ M_FW, M_WAITOK);
if (q->bulkxfer == NULL)
- return(ENOMEM);
+ return (ENOMEM);
b->psize = roundup2(b->psize, sizeof(uint32_t));
q->buf = fwdma_malloc_multiseg(fc, sizeof(uint32_t),
- b->psize, b->nchunk * b->npacket, BUS_DMA_WAITOK);
+ b->psize, b->nchunk * b->npacket, BUS_DMA_WAITOK);
if (q->buf == NULL) {
free(q->bulkxfer, M_FW);
q->bulkxfer = NULL;
- return(ENOMEM);
+ return (ENOMEM);
}
q->bnchunk = b->nchunk;
q->bnpacket = b->npacket;
@@ -146,7 +125,7 @@ fwdev_allocbuf(struct firewire_comm *fc, struct fw_xferq *q,
STAILQ_INIT(&q->stdma);
q->stproc = NULL;
- for(i = 0 ; i < q->bnchunk; i++){
+ for (i = 0; i < q->bnchunk; i++) {
q->bulkxfer[i].poffset = i * q->bnpacket;
q->bulkxfer[i].mbuf = NULL;
STAILQ_INSERT_TAIL(&q->stfree, &q->bulkxfer[i], link);
@@ -177,7 +156,7 @@ fwdev_freebuf(struct fw_xferq *q)
static int
-fw_open (struct cdev *dev, int flags, int fmt, fw_proc *td)
+fw_open(struct cdev *dev, int flags, int fmt, fw_proc *td)
{
int err = 0;
int unit = DEV2UNIT(dev);
@@ -209,10 +188,10 @@ fw_open (struct cdev *dev, int flags, int fmt, fw_proc *td)
int sub = DEV2SUB(dev);
make_dev(&firewire_cdevsw, dev2unit(dev),
- UID_ROOT, GID_OPERATOR, 0660,
- "fw%d.%d", unit, sub);
+ UID_ROOT, GID_OPERATOR, 0660, "fw%d.%d", unit, sub);
}
- d = (struct fw_drv1 *)dev->si_drv1;
+
+ d = dev->si_drv1;
d->fc = sc->fc;
STAILQ_INIT(&d->binds);
STAILQ_INIT(&d->rq);
@@ -221,7 +200,7 @@ fw_open (struct cdev *dev, int flags, int fmt, fw_proc *td)
}
static int
-fw_close (struct cdev *dev, int flags, int fmt, fw_proc *td)
+fw_close(struct cdev *dev, int flags, int fmt, fw_proc *td)
{
struct firewire_comm *fc;
struct fw_drv1 *d;
@@ -232,12 +211,12 @@ fw_close (struct cdev *dev, int flags, int fmt, fw_proc *td)
if (DEV_FWMEM(dev))
return fwmem_close(dev, flags, fmt, td);
- d = (struct fw_drv1 *)dev->si_drv1;
+ d = dev->si_drv1;
fc = d->fc;
/* remove binding */
for (fwb = STAILQ_FIRST(&d->binds); fwb != NULL;
- fwb = STAILQ_FIRST(&d->binds)) {
+ fwb = STAILQ_FIRST(&d->binds)) {
fw_bindremove(fc, fwb);
STAILQ_REMOVE_HEAD(&d->binds, chlist);
fw_xferlist_remove(&fwb->xferlist);
@@ -256,15 +235,15 @@ fw_close (struct cdev *dev, int flags, int fmt, fw_proc *td)
fwdev_freebuf(ir);
/* drain receiving buffer */
for (xfer = STAILQ_FIRST(&ir->q);
- xfer != NULL; xfer = STAILQ_FIRST(&ir->q)) {
- ir->queued --;
+ xfer != NULL; xfer = STAILQ_FIRST(&ir->q)) {
+ ir->queued--;
STAILQ_REMOVE_HEAD(&ir->q, link);
xfer->resp = 0;
fw_xfer_done(xfer);
}
- ir->flag &= ~(FWXFERQ_OPEN |
- FWXFERQ_MODEMASK | FWXFERQ_CHTAGMASK);
+ ir->flag &= ~(FWXFERQ_OPEN | FWXFERQ_MODEMASK |
+ FWXFERQ_CHTAGMASK);
d->ir = NULL;
}
@@ -280,7 +259,7 @@ fw_close (struct cdev *dev, int flags, int fmt, fw_proc *td)
/* free extbuf */
fwdev_freebuf(it);
it->flag &= ~(FWXFERQ_OPEN |
- FWXFERQ_MODEMASK | FWXFERQ_CHTAGMASK);
+ FWXFERQ_MODEMASK | FWXFERQ_CHTAGMASK);
d->it = NULL;
}
free(dev->si_drv1, M_FW);
@@ -317,10 +296,10 @@ fw_read_async(struct fw_drv1 *d, struct uio *uio, int ioflag)
fc->irx_post(fc, fp->mode.ld);
#endif
tinfo = &xfer->fc->tcode[fp->mode.hdr.tcode];
- err = uiomove((void *)fp, tinfo->hdr_len, uio);
+ err = uiomove(fp, tinfo->hdr_len, uio);
if (err)
goto out;
- err = uiomove((void *)xfer->recv.payload, xfer->recv.pay_len, uio);
+ err = uiomove(xfer->recv.payload, xfer->recv.pay_len, uio);
out:
/* recycle this xfer */
@@ -337,7 +316,7 @@ out:
* read request.
*/
static int
-fw_read (struct cdev *dev, struct uio *uio, int ioflag)
+fw_read(struct cdev *dev, struct uio *uio, int ioflag)
{
struct fw_drv1 *d;
struct fw_xferq *ir;
@@ -348,7 +327,7 @@ fw_read (struct cdev *dev, struct uio *uio, int ioflag)
if (DEV_FWMEM(dev))
return (physio(dev, uio, ioflag));
- d = (struct fw_drv1 *)dev->si_drv1;
+ d = dev->si_drv1;
fc = d->fc;
ir = d->ir;
@@ -383,21 +362,21 @@ readloop:
err = EIO;
FW_GUNLOCK(fc);
return err;
- } else if(ir->stproc != NULL) {
+ } else if (ir->stproc != NULL) {
/* iso bulkxfer */
FW_GUNLOCK(fc);
- fp = (struct fw_pkt *)fwdma_v_addr(ir->buf,
- ir->stproc->poffset + ir->queued);
- if(fc->irx_post != NULL)
+ fp = (struct fw_pkt *)fwdma_v_addr(ir->buf,
+ ir->stproc->poffset + ir->queued);
+ if (fc->irx_post != NULL)
fc->irx_post(fc, fp->mode.ld);
- if(fp->mode.stream.len == 0){
+ if (fp->mode.stream.len == 0) {
err = EIO;
return err;
}
err = uiomove((caddr_t)fp,
fp->mode.stream.len + sizeof(uint32_t), uio);
- ir->queued ++;
- if(ir->queued >= ir->bnpacket){
+ ir->queued++;
+ if (ir->queued >= ir->bnpacket) {
s = splfw();
STAILQ_INSERT_TAIL(&ir->stfree, ir->stproc, link);
splx(s);
@@ -470,7 +449,7 @@ out:
}
static int
-fw_write (struct cdev *dev, struct uio *uio, int ioflag)
+fw_write(struct cdev *dev, struct uio *uio, int ioflag)
{
int err = 0;
int s, slept = 0;
@@ -482,7 +461,7 @@ fw_write (struct cdev *dev, struct uio *uio, int ioflag)
if (DEV_FWMEM(dev))
return (physio(dev, uio, ioflag));
- d = (struct fw_drv1 *)dev->si_drv1;
+ d = dev->si_drv1;
fc = d->fc;
it = d->it;
@@ -523,7 +502,7 @@ isoloop:
err = uiomove((caddr_t)fp, sizeof(struct fw_isohdr), uio);
err = uiomove((caddr_t)fp->mode.stream.payload,
fp->mode.stream.len, uio);
- it->queued ++;
+ it->queued++;
if (it->queued >= it->bnpacket) {
s = splfw();
STAILQ_INSERT_TAIL(&it->stvalid, it->stproc, link);
@@ -550,7 +529,7 @@ fw_hand(struct fw_xfer *xfer)
struct fw_drv1 *d;
fwb = (struct fw_bind *)xfer->sc;
- d = (struct fw_drv1 *)fwb->sc;
+ d = fwb->sc;
FW_GLOCK(xfer->fc);
STAILQ_INSERT_TAIL(&d->rq, xfer, link);
FW_GUNLOCK(xfer->fc);
@@ -561,7 +540,7 @@ fw_hand(struct fw_xfer *xfer)
* ioctl support.
*/
int
-fw_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, fw_proc *td)
+fw_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, fw_proc *td)
{
struct firewire_comm *fc;
struct fw_drv1 *d;
@@ -585,9 +564,9 @@ fw_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, fw_proc *td)
return fwmem_ioctl(dev, cmd, data, flag, td);
if (!data)
- return(EINVAL);
+ return (EINVAL);
- d = (struct fw_drv1 *)dev->si_drv1;
+ d = dev->si_drv1;
fc = d->fc;
ir = d->ir;
it = d->it;
@@ -703,7 +682,7 @@ fw_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, fw_proc *td)
bcopy(fp, (void *)&xfer->send.hdr, tinfo->hdr_len);
if (pay_len > 0)
bcopy((char *)fp + tinfo->hdr_len,
- (void *)xfer->send.payload, pay_len);
+ xfer->send.payload, pay_len);
xfer->send.spd = asyreq->req.sped;
xfer->hand = fw_xferwake;
@@ -725,7 +704,7 @@ fw_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, fw_proc *td)
pay_len = xfer->recv.pay_len;
if (asyreq->req.len >= xfer->recv.pay_len + tinfo->hdr_len) {
asyreq->req.len = xfer->recv.pay_len +
- tinfo->hdr_len;
+ tinfo->hdr_len;
} else {
err = EINVAL;
pay_len = 0;
@@ -745,7 +724,7 @@ out:
case FW_CBINDADDR:
fwb = fw_bindlookup(fc,
bindreq->start.hi, bindreq->start.lo);
- if(fwb == NULL){
+ if (fwb == NULL) {
err = EINVAL;
break;
}
@@ -755,30 +734,30 @@ out:
free(fwb, M_FW);
break;
case FW_SBINDADDR:
- if(bindreq->len <= 0 ){
+ if (bindreq->len <= 0) {
err = EINVAL;
break;
}
- if(bindreq->start.hi > 0xffff ){
+ if (bindreq->start.hi > 0xffff) {
err = EINVAL;
break;
}
- fwb = (struct fw_bind *)malloc(sizeof (struct fw_bind), M_FW, M_WAITOK);
- if(fwb == NULL){
+ fwb = malloc(sizeof(struct fw_bind), M_FW, M_WAITOK);
+ if (fwb == NULL) {
err = ENOMEM;
break;
}
fwb->start = ((u_int64_t)bindreq->start.hi << 32) |
bindreq->start.lo;
fwb->end = fwb->start + bindreq->len;
- fwb->sc = (void *)d;
+ fwb->sc = d;
STAILQ_INIT(&fwb->xferlist);
err = fw_bindadd(fc, fwb);
if (err == 0) {
fw_xferlist_add(&fwb->xferlist, M_FWXFER,
/* XXX */
PAGE_SIZE, PAGE_SIZE, 5,
- fc, (void *)fwb, fw_hand);
+ fc, fwb, fw_hand);
STAILQ_INSERT_TAIL(&d->binds, fwb, chlist);
}
break;
@@ -791,11 +770,11 @@ out:
devinfo->eui.hi = fc->eui.hi;
devinfo->eui.lo = fc->eui.lo;
STAILQ_FOREACH(fwdev, &fc->devices, link) {
- if(len < FW_MAX_DEVLST){
+ if (len < FW_MAX_DEVLST) {
devinfo = &fwdevlst->dev[len++];
devinfo->dst = fwdev->dst;
- devinfo->status =
- (fwdev->status == FWDEVINVAL)?0:1;
+ devinfo->status =
+ (fwdev->status == FWDEVINVAL) ? 0 : 1;
devinfo->eui.hi = fwdev->eui.hi;
devinfo->eui.lo = fwdev->eui.lo;
}
@@ -806,7 +785,7 @@ out:
break;
case FW_GTPMAP:
bcopy(fc->topology_map, data,
- (fc->topology_map->crc_len + 1) * 4);
+ (fc->topology_map->crc_len + 1) * 4);
break;
case FW_GCROM:
STAILQ_FOREACH(fwdev, &fc->devices, link)
@@ -841,7 +820,7 @@ out:
free(ptr, M_FW);
break;
default:
- fc->ioctl (dev, cmd, data, flag, td);
+ fc->ioctl(dev, cmd, data, flag, td);
break;
}
return err;
@@ -867,7 +846,7 @@ fw_poll(struct cdev *dev, int events, fw_proc *td)
}
tmp = POLLOUT | POLLWRNORM;
if (events & tmp) {
- /* XXX should be fixed */
+ /* XXX should be fixed */
revents |= tmp;
}
@@ -877,7 +856,7 @@ fw_poll(struct cdev *dev, int events, fw_proc *td)
static int
fw_mmap (struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
int nproto, vm_memattr_t *memattr)
-{
+{
if (DEV_FWMEM(dev))
return fwmem_mmap(dev, offset, paddr, nproto, memattr);
@@ -912,12 +891,9 @@ fwdev_makedev(struct firewire_softc *sc)
unit = device_get_unit(sc->fc->bdev);
sc->dev = make_dev(&firewire_cdevsw, MAKEMINOR(0, unit, 0),
- UID_ROOT, GID_OPERATOR, 0660,
- "fw%d.%d", unit, 0);
- d = make_dev(&firewire_cdevsw,
- MAKEMINOR(FWMEM_FLAG, unit, 0),
- UID_ROOT, GID_OPERATOR, 0660,
- "fwmem%d.%d", unit, 0);
+ UID_ROOT, GID_OPERATOR, 0660, "fw%d.%d", unit, 0);
+ d = make_dev(&firewire_cdevsw, MAKEMINOR(FWMEM_FLAG, unit, 0),
+ UID_ROOT, GID_OPERATOR, 0660, "fwmem%d.%d", unit, 0);
dev_depends(sc->dev, d);
make_dev_alias(sc->dev, "fw%d", unit);
make_dev_alias(d, "fwmem%d", unit);
diff --git a/sys/dev/firewire/fwdma.c b/sys/dev/firewire/fwdma.c
index d63966d..c8378f0 100644
--- a/sys/dev/firewire/fwdma.c
+++ b/sys/dev/firewire/fwdma.c
@@ -1,7 +1,7 @@
/*-
* Copyright (c) 2003
* Hidetoshi Shimokawa. All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -18,7 +18,7 @@
* 4. Neither the name of the author nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -30,7 +30,7 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
+ *
*/
#ifdef __FreeBSD__
@@ -87,7 +87,7 @@ fwdma_malloc(struct firewire_comm *fc, int alignment, bus_size_t size,
&dma->dma_tag);
if (err) {
printf("fwdma_malloc: failed(1)\n");
- return(NULL);
+ return (NULL);
}
err = bus_dmamem_alloc(dma->dma_tag, &dma->v_addr,
@@ -95,13 +95,13 @@ fwdma_malloc(struct firewire_comm *fc, int alignment, bus_size_t size,
if (err) {
printf("fwdma_malloc: failed(2)\n");
/* XXX destroy tag */
- return(NULL);
+ return (NULL);
}
bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->v_addr,
size, fwdma_map_cb, &dma->bus_addr, /*flags*/0);
- return(dma->v_addr);
+ return (dma->v_addr);
}
void
@@ -121,11 +121,11 @@ fwdma_malloc_size(bus_dma_tag_t dmat, bus_dmamap_t *dmamap,
if (bus_dmamem_alloc(dmat, &v_addr, flag, dmamap)) {
printf("fwdma_malloc_size: failed(1)\n");
- return(NULL);
+ return (NULL);
}
bus_dmamap_load(dmat, *dmamap, v_addr, size,
fwdma_map_cb, bus_addr, /*flags*/0);
- return(v_addr);
+ return (v_addr);
}
void
@@ -134,7 +134,7 @@ fwdma_free_size(bus_dma_tag_t dmat, bus_dmamap_t dmamap,
{
bus_dmamap_unload(dmat, dmamap);
bus_dmamem_free(dmat, vaddr, dmamap);
-}
+}
/*
* Allocate multisegment dma buffers
@@ -162,7 +162,7 @@ fwdma_malloc_multiseg(struct firewire_comm *fc, int alignment,
+ sizeof(struct fwdma_seg)*nseg, M_FW, M_WAITOK);
if (am == NULL) {
printf("fwdma_malloc_multiseg: malloc failed\n");
- return(NULL);
+ return (NULL);
}
am->ssize = ssize;
am->esize = esize;
@@ -183,21 +183,21 @@ fwdma_malloc_multiseg(struct firewire_comm *fc, int alignment,
&am->dma_tag)) {
printf("fwdma_malloc_multiseg: tag_create failed\n");
free(am, M_FW);
- return(NULL);
+ return (NULL);
}
- for (seg = &am->seg[0]; nseg --; seg ++) {
+ for (seg = &am->seg[0]; nseg--; seg++) {
seg->v_addr = fwdma_malloc_size(am->dma_tag, &seg->dma_map,
ssize, &seg->bus_addr, flag);
if (seg->v_addr == NULL) {
printf("fwdma_malloc_multi: malloc_size failed %d\n",
am->nseg);
fwdma_free_multiseg(am);
- return(NULL);
+ return (NULL);
}
am->nseg++;
}
- return(am);
+ return (am);
}
void
@@ -205,7 +205,7 @@ fwdma_free_multiseg(struct fwdma_alloc_multi *am)
{
struct fwdma_seg *seg;
- for (seg = &am->seg[0]; am->nseg --; seg ++) {
+ for (seg = &am->seg[0]; am->nseg--; seg++) {
fwdma_free_size(am->dma_tag, seg->dma_map,
seg->v_addr, am->ssize);
}
diff --git a/sys/dev/firewire/fwdma.h b/sys/dev/firewire/fwdma.h
index 3a8278c..ec67971 100644
--- a/sys/dev/firewire/fwdma.h
+++ b/sys/dev/firewire/fwdma.h
@@ -1,7 +1,7 @@
/*-
* Copyright (C) 2003
* Hidetoshi Shimokawa. All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -18,7 +18,7 @@
* 4. Neither the name of the author nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -30,20 +30,20 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
+ *
* $FreeBSD$
*/
struct fwdma_alloc {
bus_dma_tag_t dma_tag;
bus_dmamap_t dma_map;
- void * v_addr;
+ void *v_addr;
bus_addr_t bus_addr;
};
struct fwdma_seg {
bus_dmamap_t dma_map;
- void * v_addr;
+ void *v_addr;
bus_addr_t bus_addr;
};
@@ -74,20 +74,20 @@ fwdma_bus_addr(struct fwdma_alloc_multi *am, int index)
}
static __inline void
-fwdma_sync(struct fwdma_alloc *dma, bus_dmasync_op_t op)
+fwdma_sync(struct fwdma_alloc *dma, bus_dmasync_op_t op)
{
bus_dmamap_sync(dma->dma_tag, dma->dma_map, op);
}
static __inline void
fwdma_sync_multiseg(struct fwdma_alloc_multi *am,
- int start, int end, bus_dmasync_op_t op)
+ int start, int end, bus_dmasync_op_t op)
{
struct fwdma_seg *seg, *eseg;
seg = &am->seg[am->esize * start / am->ssize];
eseg = &am->seg[am->esize * end / am->ssize];
- for (; seg <= eseg; seg ++)
+ for (; seg <= eseg; seg++)
bus_dmamap_sync(am->dma_tag, seg->dma_map, op);
}
diff --git a/sys/dev/firewire/fwmem.c b/sys/dev/firewire/fwmem.c
index 4cd24d0..182fc8a 100644
--- a/sys/dev/firewire/fwmem.c
+++ b/sys/dev/firewire/fwmem.c
@@ -1,7 +1,7 @@
/*-
* Copyright (c) 2002-2003
* Hidetoshi Shimokawa. All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -18,7 +18,7 @@
* 4. Neither the name of the author nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -30,7 +30,7 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
+ *
*/
#ifdef __FreeBSD__
@@ -60,7 +60,7 @@ __FBSDID("$FreeBSD$");
#include <dev/firewire/firewirereg.h>
#include <dev/firewire/fwmem.h>
-static int fwmem_speed=2, fwmem_debug=0;
+static int fwmem_speed = 2, fwmem_debug = 0;
static struct fw_eui64 fwmem_eui64;
SYSCTL_DECL(_hw_firewire);
static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwmem, CTLFLAG_RD, 0,
@@ -126,7 +126,7 @@ fwmem_read_quad(
struct fw_xfer *xfer;
struct fw_pkt *fp;
- xfer = fwmem_xfer_req(fwdev, (void *)sc, spd, 0, 4, hand);
+ xfer = fwmem_xfer_req(fwdev, sc, spd, 0, 4, hand);
if (xfer == NULL) {
return NULL;
}
@@ -141,7 +141,7 @@ fwmem_read_quad(
if (fwmem_debug)
printf("fwmem_read_quad: %d %04x:%08x\n", fwdev->dst,
- dst_hi, dst_lo);
+ dst_hi, dst_lo);
if (fw_asyreq(xfer->fc, -1, xfer) == 0)
return xfer;
@@ -177,7 +177,7 @@ fwmem_write_quad(
if (fwmem_debug)
printf("fwmem_write_quad: %d %04x:%08x %08x\n", fwdev->dst,
- dst_hi, dst_lo, *(uint32_t *)data);
+ dst_hi, dst_lo, *(uint32_t *)data);
if (fw_asyreq(xfer->fc, -1, xfer) == 0)
return xfer;
@@ -199,7 +199,7 @@ fwmem_read_block(
{
struct fw_xfer *xfer;
struct fw_pkt *fp;
-
+
xfer = fwmem_xfer_req(fwdev, sc, spd, 0, roundup2(len, 4), hand);
if (xfer == NULL)
return NULL;
@@ -216,7 +216,7 @@ fwmem_read_block(
if (fwmem_debug)
printf("fwmem_read_block: %d %04x:%08x %d\n", fwdev->dst,
- dst_hi, dst_lo, len);
+ dst_hi, dst_lo, len);
if (fw_asyreq(xfer->fc, -1, xfer) == 0)
return xfer;
@@ -262,9 +262,8 @@ fwmem_write_block(
return NULL;
}
-
int
-fwmem_open (struct cdev *dev, int flags, int fmt, fw_proc *td)
+fwmem_open(struct cdev *dev, int flags, int fmt, fw_proc *td)
{
struct fwmem_softc *fms;
struct firewire_softc *sc;
@@ -278,20 +277,20 @@ fwmem_open (struct cdev *dev, int flags, int fmt, fw_proc *td)
if (dev->si_drv1 != NULL) {
if ((flags & FWRITE) != 0) {
FW_GUNLOCK(sc->fc);
- return(EBUSY);
+ return (EBUSY);
}
FW_GUNLOCK(sc->fc);
- fms = (struct fwmem_softc *)dev->si_drv1;
- fms->refcount ++;
+ fms = dev->si_drv1;
+ fms->refcount++;
} else {
dev->si_drv1 = (void *)-1;
FW_GUNLOCK(sc->fc);
dev->si_drv1 = malloc(sizeof(struct fwmem_softc),
- M_FWMEM, M_WAITOK);
+ M_FWMEM, M_WAITOK);
if (dev->si_drv1 == NULL)
- return(ENOMEM);
+ return (ENOMEM);
dev->si_iosize_max = DFLTPHYS;
- fms = (struct fwmem_softc *)dev->si_drv1;
+ fms = dev->si_drv1;
bcopy(&fwmem_eui64, &fms->eui, sizeof(struct fw_eui64));
fms->sc = sc;
fms->refcount = 1;
@@ -307,10 +306,10 @@ fwmem_close (struct cdev *dev, int flags, int fmt, fw_proc *td)
{
struct fwmem_softc *fms;
- fms = (struct fwmem_softc *)dev->si_drv1;
+ fms = dev->si_drv1;
FW_GLOCK(fms->sc->fc);
- fms->refcount --;
+ fms->refcount--;
FW_GUNLOCK(fms->sc->fc);
if (fwmem_debug)
printf("%s: refcount=%d\n", __func__, fms->refcount);
@@ -349,18 +348,18 @@ fwmem_strategy(struct bio *bp)
struct fw_device *fwdev;
struct fw_xfer *xfer;
struct cdev *dev;
- int err=0, s, iolen;
+ int err = 0, s, iolen;
dev = bp->bio_dev;
/* XXX check request length */
s = splfw();
- fms = (struct fwmem_softc *)dev->si_drv1;
+ fms = dev->si_drv1;
fwdev = fw_noderesolve_eui64(fms->sc->fc, &fms->eui);
if (fwdev == NULL) {
if (fwmem_debug)
printf("fwmem: no such device ID:%08x%08x\n",
- fms->eui.hi, fms->eui.lo);
+ fms->eui.hi, fms->eui.lo);
err = EINVAL;
goto error;
}
@@ -369,12 +368,12 @@ fwmem_strategy(struct bio *bp)
if ((bp->bio_cmd & BIO_READ) == BIO_READ) {
if (iolen == 4 && (bp->bio_offset & 3) == 0)
xfer = fwmem_read_quad(fwdev,
- (void *) bp, fwmem_speed,
+ (void *)bp, fwmem_speed,
bp->bio_offset >> 32, bp->bio_offset & 0xffffffff,
bp->bio_data, fwmem_biodone);
else
xfer = fwmem_read_block(fwdev,
- (void *) bp, fwmem_speed,
+ (void *)bp, fwmem_speed,
bp->bio_offset >> 32, bp->bio_offset & 0xffffffff,
iolen, bp->bio_data, fwmem_biodone);
} else {
@@ -408,12 +407,12 @@ error:
}
int
-fwmem_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, fw_proc *td)
+fwmem_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, fw_proc *td)
{
struct fwmem_softc *fms;
int err = 0;
- fms = (struct fwmem_softc *)dev->si_drv1;
+ fms = dev->si_drv1;
switch (cmd) {
case FW_SDEUI64:
bcopy(data, &fms->eui, sizeof(struct fw_eui64));
@@ -424,16 +423,18 @@ fwmem_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, fw_proc *td)
default:
err = EINVAL;
}
- return(err);
+ return (err);
}
+
int
-fwmem_poll (struct cdev *dev, int events, fw_proc *td)
-{
+fwmem_poll(struct cdev *dev, int events, fw_proc *td)
+{
return EINVAL;
}
+
int
-fwmem_mmap (struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
+fwmem_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
int nproto, vm_memattr_t *memattr)
-{
+{
return EINVAL;
}
diff --git a/sys/dev/firewire/fwohci.c b/sys/dev/firewire/fwohci.c
index 06321f0..00a54e2 100644
--- a/sys/dev/firewire/fwohci.c
+++ b/sys/dev/firewire/fwohci.c
@@ -30,18 +30,11 @@
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
- *
+ *
* $FreeBSD$
*
*/
-#define ATRQ_CH 0
-#define ATRS_CH 1
-#define ARRQ_CH 2
-#define ARRS_CH 3
-#define ITX_CH 4
-#define IRX_CH 0x24
-
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/mbuf.h>
@@ -73,25 +66,25 @@ SYSCTL_INT(_hw_firewire, OID_AUTO, nocyclemaster, CTLFLAG_RWTUN,
SYSCTL_INT(_hw_firewire, OID_AUTO, phydma_enable, CTLFLAG_RWTUN,
&firewire_phydma_enable, 0, "Allow physical request DMA from firewire");
-static char dbcode[16][0x10]={"OUTM", "OUTL","INPM","INPL",
- "STOR","LOAD","NOP ","STOP",};
-
-static char dbkey[8][0x10]={"ST0", "ST1","ST2","ST3",
- "UNDEF","REG","SYS","DEV"};
-static char dbcond[4][0x10]={"NEV","C=1", "C=0", "ALL"};
-char fwohcicode[32][0x20]={
- "No stat","Undef","long","miss Ack err",
- "FIFO underrun","FIFO overrun","desc err", "data read err",
- "data write err","bus reset","timeout","tcode err",
- "Undef","Undef","unknown event","flushed",
- "Undef","ack complete","ack pend","Undef",
- "ack busy_X","ack busy_A","ack busy_B","Undef",
- "Undef","Undef","Undef","ack tardy",
- "Undef","ack data_err","ack type_err",""};
+static char dbcode[16][0x10] = {"OUTM", "OUTL", "INPM", "INPL",
+ "STOR", "LOAD", "NOP ", "STOP",};
+
+static char dbkey[8][0x10] = {"ST0", "ST1", "ST2", "ST3",
+ "UNDEF", "REG", "SYS", "DEV"};
+static char dbcond[4][0x10] = {"NEV", "C=1", "C=0", "ALL"};
+char fwohcicode[32][0x20]= {
+ "No stat", "Undef", "long", "miss Ack err",
+ "FIFO underrun", "FIFO overrun", "desc err", "data read err",
+ "data write err", "bus reset", "timeout", "tcode err",
+ "Undef", "Undef", "unknown event", "flushed",
+ "Undef" ,"ack complete", "ack pend", "Undef",
+ "ack busy_X", "ack busy_A", "ack busy_B", "Undef",
+ "Undef", "Undef", "Undef", "ack tardy",
+ "Undef", "ack data_err", "ack type_err", ""};
#define MAX_SPEED 3
extern char *linkspeed[];
-uint32_t tagbit[4] = { 1 << 28, 1 << 29, 1 << 30, 1 << 31};
+uint32_t tagbit[4] = {1 << 28, 1 << 29, 1 << 30, 1 << 31};
static struct tcode_info tinfo[] = {
/* hdr_len block flag valid_response */
@@ -113,6 +106,13 @@ static struct tcode_info tinfo[] = {
/* f XXX */ { 0, 0, 0xff}
};
+#define ATRQ_CH 0
+#define ATRS_CH 1
+#define ARRQ_CH 2
+#define ARRS_CH 3
+#define ITX_CH 4
+#define IRX_CH 0x24
+
#define OHCI_WRITE_SIGMASK 0xffff0000
#define OHCI_READ_SIGMASK 0xffff0000
@@ -127,8 +127,8 @@ static void fwohci_txd (struct fwohci_softc *, struct fwohci_dbch *);
static void fwohci_start_atq (struct firewire_comm *);
static void fwohci_start_ats (struct firewire_comm *);
static void fwohci_start (struct fwohci_softc *, struct fwohci_dbch *);
-static uint32_t fwphy_wrdata ( struct fwohci_softc *, uint32_t, uint32_t);
-static uint32_t fwphy_rddata ( struct fwohci_softc *, uint32_t);
+static uint32_t fwphy_wrdata (struct fwohci_softc *, uint32_t, uint32_t);
+static uint32_t fwphy_rddata (struct fwohci_softc *, uint32_t);
static int fwohci_rx_enable (struct fwohci_softc *, struct fwohci_dbch *);
static int fwohci_tx_enable (struct fwohci_softc *, struct fwohci_dbch *);
static int fwohci_irx_enable (struct firewire_comm *, int);
@@ -143,9 +143,9 @@ static void fwohci_set_intr (struct firewire_comm *, int);
static int fwohci_add_rx_buf (struct fwohci_dbch *, struct fwohcidb_tr *, int, struct fwdma_alloc *);
static int fwohci_add_tx_buf (struct fwohci_dbch *, struct fwohcidb_tr *, int);
-static void dump_db (struct fwohci_softc *, uint32_t);
-static void print_db (struct fwohcidb_tr *, struct fwohcidb *, uint32_t , uint32_t);
-static void dump_dma (struct fwohci_softc *, uint32_t);
+static void dump_db (struct fwohci_softc *, uint32_t);
+static void print_db (struct fwohcidb_tr *, struct fwohcidb *, uint32_t , uint32_t);
+static void dump_dma (struct fwohci_softc *, uint32_t);
static uint32_t fwohci_cyctimer (struct firewire_comm *);
static void fwohci_rbuf_update (struct fwohci_softc *, int);
static void fwohci_tbuf_update (struct fwohci_softc *, int);
@@ -257,18 +257,19 @@ d_ioctl_t fwohci_ioctl;
*/
/* XXX need lock for phy access */
static uint32_t
-fwphy_wrdata( struct fwohci_softc *sc, uint32_t addr, uint32_t data)
+fwphy_wrdata(struct fwohci_softc *sc, uint32_t addr, uint32_t data)
{
uint32_t fun;
addr &= 0xf;
data &= 0xff;
- fun = (PHYDEV_WRCMD | (addr << PHYDEV_REGADDR) | (data << PHYDEV_WRDATA));
+ fun = (PHYDEV_WRCMD | (addr << PHYDEV_REGADDR) |
+ (data << PHYDEV_WRDATA));
OWRITE(sc, OHCI_PHYACCESS, fun);
DELAY(100);
- return(fwphy_rddata( sc, addr));
+ return (fwphy_rddata(sc, addr));
}
static uint32_t
@@ -289,17 +290,16 @@ fwohci_set_bus_manager(struct firewire_comm *fc, u_int node)
for (i = 0; !(OREAD(sc, OHCI_CSR_CONT) & (1<<31)) && (i < 1000); i++)
DELAY(10);
bm = OREAD(sc, OHCI_CSR_DATA);
- if((bm & 0x3f) == 0x3f)
+ if ((bm & 0x3f) == 0x3f)
bm = node;
if (firewire_debug)
device_printf(sc->fc.dev, "%s: %d->%d (loop=%d)\n",
__func__, bm, node, i);
-
- return(bm);
+ return (bm);
}
static uint32_t
-fwphy_rddata(struct fwohci_softc *sc, u_int addr)
+fwphy_rddata(struct fwohci_softc *sc, u_int addr)
{
uint32_t fun, stat;
u_int i, retry = 0;
@@ -310,13 +310,13 @@ again:
OWRITE(sc, FWOHCI_INTSTATCLR, OHCI_INT_REG_FAIL);
fun = PHYDEV_RDCMD | (addr << PHYDEV_REGADDR);
OWRITE(sc, OHCI_PHYACCESS, fun);
- for ( i = 0 ; i < MAX_RETRY ; i ++ ){
+ for (i = 0; i < MAX_RETRY; i++) {
fun = OREAD(sc, OHCI_PHYACCESS);
if ((fun & PHYDEV_RDCMD) == 0 && (fun & PHYDEV_RDDONE) != 0)
break;
DELAY(100);
}
- if(i >= MAX_RETRY) {
+ if (i >= MAX_RETRY) {
if (firewire_debug)
device_printf(sc->fc.dev, "%s: failed(1).\n", __func__);
if (++retry < MAX_RETRY) {
@@ -336,12 +336,13 @@ again:
}
}
if (firewire_debug > 1 || retry >= MAX_RETRY)
- device_printf(sc->fc.dev,
+ device_printf(sc->fc.dev,
"%s:: 0x%x loop=%d, retry=%d\n",
__func__, addr, i, retry);
#undef MAX_RETRY
- return((fun >> PHYDEV_RDDATA )& 0xff);
+ return ((fun >> PHYDEV_RDDATA) & 0xff);
}
+
/* Device specific ioctl. */
int
fwohci_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, fw_proc *td)
@@ -350,41 +351,41 @@ fwohci_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, fw_proc *td)
struct fwohci_softc *fc;
int unit = DEV2UNIT(dev);
int err = 0;
- struct fw_reg_req_t *reg = (struct fw_reg_req_t *) data;
+ struct fw_reg_req_t *reg = (struct fw_reg_req_t *) data;
uint32_t *dmach = (uint32_t *) data;
sc = devclass_get_softc(firewire_devclass, unit);
- if(sc == NULL){
- return(EINVAL);
- }
+ if (sc == NULL)
+ return (EINVAL);
+
fc = (struct fwohci_softc *)sc->fc;
if (!data)
- return(EINVAL);
+ return (EINVAL);
switch (cmd) {
case FWOHCI_WRREG:
#define OHCI_MAX_REG 0x800
- if(reg->addr <= OHCI_MAX_REG){
+ if (reg->addr <= OHCI_MAX_REG) {
OWRITE(fc, reg->addr, reg->data);
reg->data = OREAD(fc, reg->addr);
- }else{
+ } else {
err = EINVAL;
}
break;
case FWOHCI_RDREG:
- if(reg->addr <= OHCI_MAX_REG){
+ if (reg->addr <= OHCI_MAX_REG) {
reg->data = OREAD(fc, reg->addr);
- }else{
+ } else {
err = EINVAL;
}
break;
/* Read DMA descriptors for debug */
case DUMPDMA:
- if(*dmach <= OHCI_MAX_DMA_CH ){
+ if (*dmach <= OHCI_MAX_DMA_CH) {
dump_dma(fc, *dmach);
dump_db(fc, *dmach);
- }else{
+ } else {
err = EINVAL;
}
break;
@@ -414,19 +415,20 @@ fwohci_probe_phy(struct fwohci_softc *sc, device_t dev)
{
uint32_t reg, reg2;
int e1394a = 1;
-/*
- * probe PHY parameters
- * 0. to prove PHY version, whether compliance of 1394a.
- * 1. to probe maximum speed supported by the PHY and
- * number of port supported by core-logic.
- * It is not actually available port on your PC .
- */
+
+ /*
+ * probe PHY parameters
+ * 0. to prove PHY version, whether compliance of 1394a.
+ * 1. to probe maximum speed supported by the PHY and
+ * number of port supported by core-logic.
+ * It is not actually available port on your PC .
+ */
OWRITE(sc, OHCI_HCCCTL, OHCI_HCC_LPS);
DELAY(500);
reg = fwphy_rddata(sc, FW_PHY_SPD_REG);
- if((reg >> 5) != 7 ){
+ if ((reg >> 5) != 7) {
sc->fc.mode &= ~FWPHYASYST;
sc->fc.nport = reg & FW_PHY_NP;
sc->fc.speed = reg & FW_PHY_SPD >> 6;
@@ -438,7 +440,7 @@ fwohci_probe_phy(struct fwohci_softc *sc, device_t dev)
device_printf(dev,
"Phy 1394 only %s, %d ports.\n",
linkspeed[sc->fc.speed], sc->fc.nport);
- }else{
+ } else {
reg2 = fwphy_rddata(sc, FW_PHY_ESPD_REG);
sc->fc.mode |= FWPHYASYST;
sc->fc.nport = reg & FW_PHY_NP;
@@ -475,7 +477,7 @@ fwohci_probe_phy(struct fwohci_softc *sc, device_t dev)
}
reg = fwphy_rddata(sc, FW_PHY_SPD_REG);
- if((reg >> 5) == 7 ){
+ if ((reg >> 5) == 7) {
reg = fwphy_rddata(sc, 4);
reg |= 1 << 6;
fwphy_wrdata(sc, 4, reg);
@@ -492,19 +494,19 @@ fwohci_reset(struct fwohci_softc *sc, device_t dev)
uint32_t reg, reg2;
struct fwohcidb_tr *db_tr;
- /* Disable interrupts */
+ /* Disable interrupts */
OWRITE(sc, FWOHCI_INTMASKCLR, ~0);
/* Now stopping all DMA channels */
- OWRITE(sc, OHCI_ARQCTLCLR, OHCI_CNTL_DMA_RUN);
- OWRITE(sc, OHCI_ARSCTLCLR, OHCI_CNTL_DMA_RUN);
- OWRITE(sc, OHCI_ATQCTLCLR, OHCI_CNTL_DMA_RUN);
- OWRITE(sc, OHCI_ATSCTLCLR, OHCI_CNTL_DMA_RUN);
+ OWRITE(sc, OHCI_ARQCTLCLR, OHCI_CNTL_DMA_RUN);
+ OWRITE(sc, OHCI_ARSCTLCLR, OHCI_CNTL_DMA_RUN);
+ OWRITE(sc, OHCI_ATQCTLCLR, OHCI_CNTL_DMA_RUN);
+ OWRITE(sc, OHCI_ATSCTLCLR, OHCI_CNTL_DMA_RUN);
- OWRITE(sc, OHCI_IR_MASKCLR, ~0);
- for( i = 0 ; i < sc->fc.nisodma ; i ++ ){
- OWRITE(sc, OHCI_IRCTLCLR(i), OHCI_CNTL_DMA_RUN);
- OWRITE(sc, OHCI_ITCTLCLR(i), OHCI_CNTL_DMA_RUN);
+ OWRITE(sc, OHCI_IR_MASKCLR, ~0);
+ for (i = 0; i < sc->fc.nisodma; i++) {
+ OWRITE(sc, OHCI_IRCTLCLR(i), OHCI_CNTL_DMA_RUN);
+ OWRITE(sc, OHCI_ITCTLCLR(i), OHCI_CNTL_DMA_RUN);
}
/* FLUSH FIFO and reset Transmitter/Reciever */
@@ -512,7 +514,7 @@ fwohci_reset(struct fwohci_softc *sc, device_t dev)
if (firewire_debug)
device_printf(dev, "resetting OHCI...");
i = 0;
- while(OREAD(sc, OHCI_HCCCTL) & OHCI_HCC_RESET) {
+ while (OREAD(sc, OHCI_HCCCTL) & OHCI_HCC_RESET) {
if (i++ > 100) break;
DELAY(1000);
}
@@ -523,7 +525,7 @@ fwohci_reset(struct fwohci_softc *sc, device_t dev)
fwohci_probe_phy(sc, dev);
/* Probe link */
- reg = OREAD(sc, OHCI_BUS_OPT);
+ reg = OREAD(sc, OHCI_BUS_OPT);
reg2 = reg | OHCI_BUSFNC;
max_rec = (reg & 0x0000f000) >> 12;
speed = (reg & 0x00000007);
@@ -538,7 +540,7 @@ fwohci_reset(struct fwohci_softc *sc, device_t dev)
}
if (firewire_debug)
device_printf(dev, "BUS_OPT 0x%x -> 0x%x\n", reg, reg2);
- OWRITE(sc, OHCI_BUS_OPT, reg2);
+ OWRITE(sc, OHCI_BUS_OPT, reg2);
/* Initialize registers */
OWRITE(sc, OHCI_CROMHDR, sc->fc.config_rom[0]);
@@ -564,33 +566,31 @@ fwohci_reset(struct fwohci_softc *sc, device_t dev)
/* AT Retries */
OWRITE(sc, FWOHCI_RETRY,
/* CycleLimit PhyRespRetries ATRespRetries ATReqRetries */
- (0xffff << 16 ) | (0x0f << 8) | (0x0f << 4) | 0x0f) ;
+ (0xffff << 16) | (0x0f << 8) | (0x0f << 4) | 0x0f);
sc->atrq.top = STAILQ_FIRST(&sc->atrq.db_trq);
sc->atrs.top = STAILQ_FIRST(&sc->atrs.db_trq);
sc->atrq.bottom = sc->atrq.top;
sc->atrs.bottom = sc->atrs.top;
- for( i = 0, db_tr = sc->atrq.top; i < sc->atrq.ndb ;
- i ++, db_tr = STAILQ_NEXT(db_tr, link)){
+ for (i = 0, db_tr = sc->atrq.top; i < sc->atrq.ndb;
+ i++, db_tr = STAILQ_NEXT(db_tr, link)) {
db_tr->xfer = NULL;
}
- for( i = 0, db_tr = sc->atrs.top; i < sc->atrs.ndb ;
- i ++, db_tr = STAILQ_NEXT(db_tr, link)){
+ for (i = 0, db_tr = sc->atrs.top; i < sc->atrs.ndb;
+ i++, db_tr = STAILQ_NEXT(db_tr, link)) {
db_tr->xfer = NULL;
}
-
/* Enable interrupts */
sc->intmask = (OHCI_INT_ERR | OHCI_INT_PHY_SID
- | OHCI_INT_DMA_ATRQ | OHCI_INT_DMA_ATRS
+ | OHCI_INT_DMA_ATRQ | OHCI_INT_DMA_ATRS
| OHCI_INT_DMA_PRRQ | OHCI_INT_DMA_PRRS
| OHCI_INT_PHY_BUS_R | OHCI_INT_PW_ERR);
sc->intmask |= OHCI_INT_DMA_IR | OHCI_INT_DMA_IT;
sc->intmask |= OHCI_INT_CYC_LOST | OHCI_INT_PHY_INT;
OWRITE(sc, FWOHCI_INTMASK, sc->intmask);
fwohci_set_intr(&sc->fc, 1);
-
}
int
@@ -604,7 +604,7 @@ fwohci_init(struct fwohci_softc *sc, device_t dev)
reg = OREAD(sc, OHCI_VERSION);
mver = (reg >> 16) & 0xff;
device_printf(dev, "OHCI version %x.%x (ROM=%d)\n",
- mver, reg & 0xff, (reg>>24) & 1);
+ mver, reg & 0xff, (reg >> 24) & 1);
if (mver < 1 || mver > 9) {
device_printf(dev, "invalid OHCI version\n");
return (ENXIO);
@@ -659,7 +659,7 @@ fwohci_init(struct fwohci_softc *sc, device_t dev)
sc->atrq.ndb = NDB;
sc->atrs.ndb = NDB / 2;
- for( i = 0 ; i < sc->fc.nisodma ; i ++ ){
+ for (i = 0; i < sc->fc.nisodma; i++) {
sc->fc.it[i] = &sc->it[i].xferq;
sc->fc.ir[i] = &sc->ir[i].xferq;
sc->it[i].xferq.dmach = i;
@@ -673,7 +673,7 @@ fwohci_init(struct fwohci_softc *sc, device_t dev)
sc->fc.config_rom = fwdma_malloc(&sc->fc, CROMSIZE, CROMSIZE,
&sc->crom_dma, BUS_DMA_WAITOK | BUS_DMA_COHERENT);
- if(sc->fc.config_rom == NULL){
+ if (sc->fc.config_rom == NULL) {
device_printf(dev, "config_rom alloc failed.");
return ENOMEM;
}
@@ -690,7 +690,6 @@ fwohci_init(struct fwohci_softc *sc, device_t dev)
sc->fc.config_rom[0] |= fw_crc16(&sc->fc.config_rom[1], 5*4);
#endif
-
/* SID recieve buffer must align 2^11 */
#define OHCI_SIDSIZE (1 << 11)
sc->sid_buf = fwdma_malloc(&sc->fc, OHCI_SIDSIZE, OHCI_SIDSIZE,
@@ -701,7 +700,7 @@ fwohci_init(struct fwohci_softc *sc, device_t dev)
}
fwdma_malloc(&sc->fc, sizeof(uint32_t), sizeof(uint32_t),
- &sc->dummy_dma, BUS_DMA_WAITOK);
+ &sc->dummy_dma, BUS_DMA_WAITOK);
if (sc->dummy_dma.v_addr == NULL) {
device_printf(dev, "dummy_dma alloc failed.");
@@ -726,7 +725,7 @@ fwohci_init(struct fwohci_softc *sc, device_t dev)
sc->fc.eui.hi = OREAD(sc, FWOHCIGUID_H);
sc->fc.eui.lo = OREAD(sc, FWOHCIGUID_L);
- for( i = 0 ; i < 8 ; i ++)
+ for (i = 0; i < 8; i++)
ui[i] = FW_EUI64_BYTE(&sc->fc.eui,i);
device_printf(dev, "EUI64 %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
ui[0], ui[1], ui[2], ui[3], ui[4], ui[5], ui[6], ui[7]);
@@ -756,7 +755,7 @@ fwohci_init(struct fwohci_softc *sc, device_t dev)
sc->fc.taskqueue = taskqueue_create_fast("fw_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->fc.taskqueue);
taskqueue_start_threads(&sc->fc.taskqueue, 1, PI_NET, "fw%d_taskq",
- device_get_unit(dev));
+ device_get_unit(dev));
TASK_INIT(&sc->fwohci_task_busreset, 2, fwohci_task_busreset, sc);
TASK_INIT(&sc->fwohci_task_sid, 1, fwohci_task_sid, sc);
TASK_INIT(&sc->fwohci_task_dma, 0, fwohci_task_dma, sc);
@@ -779,7 +778,7 @@ uint32_t
fwohci_cyctimer(struct firewire_comm *fc)
{
struct fwohci_softc *sc = (struct fwohci_softc *)fc;
- return(OREAD(sc, OHCI_CYCLETIMER));
+ return (OREAD(sc, OHCI_CYCLETIMER));
}
int
@@ -798,7 +797,7 @@ fwohci_detach(struct fwohci_softc *sc, device_t dev)
fwohci_db_free(&sc->atrq);
fwohci_db_free(&sc->atrs);
- for( i = 0 ; i < sc->fc.nisodma ; i ++ ){
+ for (i = 0; i < sc->fc.nisodma; i++) {
fwohci_db_free(&sc->it[i]);
fwohci_db_free(&sc->ir[i]);
}
@@ -819,7 +818,7 @@ fwohci_detach(struct fwohci_softc *sc, device_t dev)
int _cnt = _dbtr->dbcnt; \
db = &_dbtr->db[ (_cnt > 2) ? (_cnt -1) : 0]; \
} while (0)
-
+
static void
fwohci_execute_db(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
@@ -847,7 +846,7 @@ fwohci_execute_db(void *arg, bus_dma_segment_t *segs, int nseg, int error)
static void
fwohci_execute_db2(void *arg, bus_dma_segment_t *segs, int nseg,
- bus_size_t size, int error)
+ bus_size_t size, int error)
{
fwohci_execute_db(arg, segs, nseg, error);
}
@@ -870,11 +869,11 @@ fwohci_start(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
FW_GLOCK_ASSERT(&sc->fc);
- if(&sc->atrq == dbch){
+ if (&sc->atrq == dbch) {
off = OHCI_ATQOFF;
- }else if(&sc->atrs == dbch){
+ } else if (&sc->atrs == dbch) {
off = OHCI_ATSOFF;
- }else{
+ } else {
return;
}
@@ -885,11 +884,11 @@ fwohci_start(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
db_tr = dbch->top;
txloop:
xfer = STAILQ_FIRST(&dbch->xferq.q);
- if(xfer == NULL){
+ if (xfer == NULL) {
goto kick;
}
#if 0
- if(dbch->xferq.queued == 0 ){
+ if (dbch->xferq.queued == 0) {
device_printf(sc->fc.dev, "TX queue empty\n");
}
#endif
@@ -906,11 +905,11 @@ txloop:
ld = &ohcifp->mode.ld[0];
ld[0] = ld[1] = ld[2] = ld[3] = 0;
- for( i = 0 ; i < pl_off ; i+= 4)
+ for (i = 0; i < pl_off; i+= 4)
ld[i/4] = fp->mode.ld[i/4];
ohcifp->mode.common.spd = xfer->send.spd & 0x7;
- if (tcode == FWTCODE_STREAM ){
+ if (tcode == FWTCODE_STREAM) {
hdr_len = 8;
ohcifp->mode.stream.len = fp->mode.stream.len;
} else if (tcode == FWTCODE_PHY) {
@@ -930,14 +929,14 @@ txloop:
FWOHCI_DMA_WRITE(db->db.desc.addr, 0);
FWOHCI_DMA_WRITE(db->db.desc.res, 0);
/* Specify bound timer of asy. responce */
- if(&sc->atrs == dbch){
+ if (&sc->atrs == dbch) {
FWOHCI_DMA_WRITE(db->db.desc.res,
(OREAD(sc, OHCI_CYCLETIMER) >> 12) + (1 << 13));
}
#if BYTE_ORDER == BIG_ENDIAN
if (tcode == FWTCODE_WREQQ || tcode == FWTCODE_RRESQ)
hdr_len = 12;
- for (i = 0; i < hdr_len/4; i ++)
+ for (i = 0; i < hdr_len/4; i++)
FWOHCI_DMA_WRITE(ld[i], ld[i]);
#endif
@@ -968,7 +967,7 @@ again:
m_copydata(xfer->mbuf, 0,
xfer->mbuf->m_pkthdr.len,
mtod(m0, caddr_t));
- m0->m_len = m0->m_pkthdr.len =
+ m0->m_len = m0->m_pkthdr.len =
xfer->mbuf->m_pkthdr.len;
m_freem(xfer->mbuf);
xfer->mbuf = m0;
@@ -999,16 +998,16 @@ again:
FWOHCI_DMA_WRITE(db->db.desc.depend,
STAILQ_NEXT(db_tr, link)->bus_addr);
- if(fsegment == -1 )
+ if (fsegment == -1)
fsegment = db_tr->dbcnt;
if (dbch->pdb_tr != NULL) {
LAST_DB(dbch->pdb_tr, db);
FWOHCI_DMA_SET(db->db.desc.depend, db_tr->dbcnt);
}
- dbch->xferq.queued ++;
+ dbch->xferq.queued++;
dbch->pdb_tr = db_tr;
db_tr = STAILQ_NEXT(db_tr, link);
- if(db_tr != dbch->bottom){
+ if (db_tr != dbch->bottom) {
goto txloop;
} else {
device_printf(sc->fc.dev, "fwohci_start: lack of db_trq\n");
@@ -1019,7 +1018,7 @@ kick:
fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREREAD);
fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREWRITE);
- if(dbch->xferq.flag & FWXFERQ_RUNNING) {
+ if (dbch->xferq.flag & FWXFERQ_RUNNING) {
OWRITE(sc, OHCI_DMACTL(off), OHCI_CNTL_DMA_WAKE);
} else {
if (firewire_debug)
@@ -1040,7 +1039,7 @@ fwohci_start_atq(struct firewire_comm *fc)
{
struct fwohci_softc *sc = (struct fwohci_softc *)fc;
FW_GLOCK(&sc->fc);
- fwohci_start( sc, &(sc->atrq));
+ fwohci_start(sc, &(sc->atrq));
FW_GUNLOCK(&sc->fc);
return;
}
@@ -1050,7 +1049,7 @@ fwohci_start_ats(struct firewire_comm *fc)
{
struct fwohci_softc *sc = (struct fwohci_softc *)fc;
FW_GLOCK(&sc->fc);
- fwohci_start( sc, &(sc->atrs));
+ fwohci_start(sc, &(sc->atrs));
FW_GUNLOCK(&sc->fc);
return;
}
@@ -1067,13 +1066,13 @@ fwohci_txd(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
int packets;
struct firewire_comm *fc = (struct firewire_comm *)sc;
- if(&sc->atrq == dbch){
+ if (&sc->atrq == dbch) {
off = OHCI_ATQOFF;
ch = ATRQ_CH;
- }else if(&sc->atrs == dbch){
+ } else if (&sc->atrs == dbch) {
off = OHCI_ATSOFF;
ch = ATRS_CH;
- }else{
+ } else {
return;
}
s = splfw();
@@ -1081,11 +1080,11 @@ fwohci_txd(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
packets = 0;
fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_POSTREAD);
fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_POSTWRITE);
- while(dbch->xferq.queued > 0){
+ while (dbch->xferq.queued > 0) {
LAST_DB(tr, db);
status = FWOHCI_DMA_READ(db->db.desc.res) >> OHCI_STATUS_SHIFT;
- if(!(status & OHCI_CNTL_DMA_ACTIVE)){
- if (fc->status != FWBUSINIT)
+ if (!(status & OHCI_CNTL_DMA_ACTIVE)) {
+ if (fc->status != FWBUSINIT)
/* maybe out of order?? */
goto out;
}
@@ -1096,7 +1095,7 @@ fwohci_txd(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
if (firewire_debug > 1)
dump_db(sc, ch);
#endif
- if(status & OHCI_CNTL_DMA_DEAD) {
+ if (status & OHCI_CNTL_DMA_DEAD) {
/* Stop DMA */
OWRITE(sc, OHCI_DMACTLCLR(off), OHCI_CNTL_DMA_RUN);
device_printf(sc->fc.dev, "force reset AT FIFO\n");
@@ -1105,7 +1104,7 @@ fwohci_txd(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
OWRITE(sc, OHCI_DMACTLCLR(off), OHCI_CNTL_DMA_RUN);
}
stat = status & FWOHCIEV_MASK;
- switch(stat){
+ switch (stat) {
case FWOHCIEV_ACKPEND:
case FWOHCIEV_ACKCOMPL:
err = 0;
@@ -1159,17 +1158,17 @@ fwohci_txd(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
}
/*
* The watchdog timer takes care of split
- * transcation timeout for ACKPEND case.
+ * transaction timeout for ACKPEND case.
*/
} else {
printf("this shouldn't happen\n");
}
FW_GLOCK(fc);
- dbch->xferq.queued --;
+ dbch->xferq.queued--;
FW_GUNLOCK(fc);
tr->xfer = NULL;
- packets ++;
+ packets++;
tr = STAILQ_NEXT(tr, link);
dbch->bottom = tr;
if (dbch->bottom == dbch->top) {
@@ -1199,10 +1198,10 @@ fwohci_db_free(struct fwohci_dbch *dbch)
if ((dbch->flags & FWOHCI_DBCH_INIT) == 0)
return;
- for(db_tr = STAILQ_FIRST(&dbch->db_trq), idb = 0; idb < dbch->ndb;
- db_tr = STAILQ_NEXT(db_tr, link), idb++){
+ for (db_tr = STAILQ_FIRST(&dbch->db_trq), idb = 0; idb < dbch->ndb;
+ db_tr = STAILQ_NEXT(db_tr, link), idb++) {
if ((dbch->xferq.flag & FWXFERQ_EXTBUF) == 0 &&
- db_tr->buf != NULL) {
+ db_tr->buf != NULL) {
fwdma_free_size(dbch->dmat, db_tr->dma_map,
db_tr->buf, dbch->xferq.psize);
db_tr->buf = NULL;
@@ -1237,10 +1236,8 @@ fwohci_db_init(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
/*nsegments*/ dbch->ndesc > 3 ? dbch->ndesc - 2 : 1,
/*maxsegsz*/ MAX_REQCOUNT,
/*flags*/ 0,
-#if defined(__FreeBSD__) && __FreeBSD_version >= 501102
/*lockfunc*/busdma_lock_mutex,
/*lockarg*/FW_GMTX(&sc->fc),
-#endif
&dbch->dmat))
return;
@@ -1250,13 +1247,13 @@ fwohci_db_init(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
db_tr = (struct fwohcidb_tr *)
malloc(sizeof(struct fwohcidb_tr) * dbch->ndb,
M_FW, M_WAITOK | M_ZERO);
- if(db_tr == NULL){
+ if (db_tr == NULL) {
printf("fwohci_db_init: malloc(1) failed\n");
return;
}
#define DB_SIZE(x) (sizeof(struct fwohcidb) * (x)->ndesc)
- dbch->am = fwdma_malloc_multiseg(&sc->fc, DB_SIZE(dbch),
+ dbch->am = fwdma_malloc_multiseg(&sc->fc, sizeof(struct fwohcidb),
DB_SIZE(dbch), dbch->ndb, BUS_DMA_WAITOK);
if (dbch->am == NULL) {
printf("fwohci_db_init: fwdma_malloc_multiseg failed\n");
@@ -1264,7 +1261,7 @@ fwohci_db_init(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
return;
}
/* Attach DB to DMA ch. */
- for(idb = 0 ; idb < dbch->ndb ; idb++){
+ for (idb = 0; idb < dbch->ndb; idb++) {
db_tr->dbcnt = 0;
db_tr->db = (struct fwohcidb *)fwdma_v_addr(dbch->am, idb);
db_tr->bus_addr = fwdma_bus_addr(dbch->am, idb);
@@ -1303,7 +1300,7 @@ fwohci_itx_disable(struct firewire_comm *fc, int dmach)
{
struct fwohci_softc *sc = (struct fwohci_softc *)fc;
- OWRITE(sc, OHCI_ITCTLCLR(dmach),
+ OWRITE(sc, OHCI_ITCTLCLR(dmach),
OHCI_CNTL_DMA_RUN | OHCI_CNTL_CYCMATCH_S);
OWRITE(sc, OHCI_IT_MASKCLR, 1 << dmach);
OWRITE(sc, OHCI_IT_STATCLR, 1 << dmach);
@@ -1347,31 +1344,31 @@ fwohci_tx_enable(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
struct fwohcidb_tr *db_tr;
struct fwohcidb *db;
- if(!(dbch->xferq.flag & FWXFERQ_EXTBUF)){
+ if (!(dbch->xferq.flag & FWXFERQ_EXTBUF)) {
err = EINVAL;
return err;
}
z = dbch->ndesc;
- for(dmach = 0 ; dmach < sc->fc.nisodma ; dmach++){
- if( &sc->it[dmach] == dbch){
+ for (dmach = 0; dmach < sc->fc.nisodma; dmach++) {
+ if (&sc->it[dmach] == dbch) {
off = OHCI_ITOFF(dmach);
break;
}
}
- if(off == 0){
+ if (off == 0) {
err = EINVAL;
return err;
}
- if(dbch->xferq.flag & FWXFERQ_RUNNING)
+ if (dbch->xferq.flag & FWXFERQ_RUNNING)
return err;
dbch->xferq.flag |= FWXFERQ_RUNNING;
- for( i = 0, dbch->bottom = dbch->top; i < (dbch->ndb - 1); i++){
+ for (i = 0, dbch->bottom = dbch->top; i < (dbch->ndb - 1); i++) {
dbch->bottom = STAILQ_NEXT(dbch->bottom, link);
}
db_tr = dbch->top;
- for (idb = 0; idb < dbch->ndb; idb ++) {
+ for (idb = 0; idb < dbch->ndb; idb++) {
fwohci_add_tx_buf(dbch, db_tr, idb);
- if(STAILQ_NEXT(db_tr, link) == NULL){
+ if (STAILQ_NEXT(db_tr, link) == NULL) {
break;
}
db = db_tr->db;
@@ -1379,8 +1376,8 @@ fwohci_tx_enable(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
FWOHCI_DMA_WRITE(db[0].db.desc.depend,
STAILQ_NEXT(db_tr, link)->bus_addr | z);
db[ldesc].db.desc.depend = db[0].db.desc.depend;
- if(dbch->xferq.flag & FWXFERQ_EXTBUF){
- if(((idb + 1 ) % dbch->xferq.bnpacket) == 0){
+ if (dbch->xferq.flag & FWXFERQ_EXTBUF) {
+ if (((idb + 1) % dbch->xferq.bnpacket) == 0) {
FWOHCI_DMA_SET(
db[ldesc].db.desc.cmd,
OHCI_INTERRUPT_ALWAYS);
@@ -1407,38 +1404,38 @@ fwohci_rx_enable(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
struct fwohcidb *db;
z = dbch->ndesc;
- if(&sc->arrq == dbch){
+ if (&sc->arrq == dbch) {
off = OHCI_ARQOFF;
- }else if(&sc->arrs == dbch){
+ } else if (&sc->arrs == dbch) {
off = OHCI_ARSOFF;
- }else{
- for(dmach = 0 ; dmach < sc->fc.nisodma ; dmach++){
- if( &sc->ir[dmach] == dbch){
+ } else {
+ for (dmach = 0; dmach < sc->fc.nisodma; dmach++) {
+ if (&sc->ir[dmach] == dbch) {
off = OHCI_IROFF(dmach);
break;
}
}
}
- if(off == 0){
+ if (off == 0) {
err = EINVAL;
return err;
}
- if(dbch->xferq.flag & FWXFERQ_STREAM){
- if(dbch->xferq.flag & FWXFERQ_RUNNING)
+ if (dbch->xferq.flag & FWXFERQ_STREAM) {
+ if (dbch->xferq.flag & FWXFERQ_RUNNING)
return err;
- }else{
- if(dbch->xferq.flag & FWXFERQ_RUNNING){
+ } else {
+ if (dbch->xferq.flag & FWXFERQ_RUNNING) {
err = EBUSY;
return err;
}
}
dbch->xferq.flag |= FWXFERQ_RUNNING;
dbch->top = STAILQ_FIRST(&dbch->db_trq);
- for( i = 0, dbch->bottom = dbch->top; i < (dbch->ndb - 1); i++){
+ for (i = 0, dbch->bottom = dbch->top; i < (dbch->ndb - 1); i++) {
dbch->bottom = STAILQ_NEXT(dbch->bottom, link);
}
db_tr = dbch->top;
- for (idb = 0; idb < dbch->ndb; idb ++) {
+ for (idb = 0; idb < dbch->ndb; idb++) {
fwohci_add_rx_buf(dbch, db_tr, idb, &sc->dummy_dma);
if (STAILQ_NEXT(db_tr, link) == NULL)
break;
@@ -1446,8 +1443,8 @@ fwohci_rx_enable(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
ldesc = db_tr->dbcnt - 1;
FWOHCI_DMA_WRITE(db[ldesc].db.desc.depend,
STAILQ_NEXT(db_tr, link)->bus_addr | z);
- if(dbch->xferq.flag & FWXFERQ_EXTBUF){
- if(((idb + 1 ) % dbch->xferq.bnpacket) == 0){
+ if (dbch->xferq.flag & FWXFERQ_EXTBUF) {
+ if (((idb + 1) % dbch->xferq.bnpacket) == 0) {
FWOHCI_DMA_SET(
db[ldesc].db.desc.cmd,
OHCI_INTERRUPT_ALWAYS);
@@ -1463,9 +1460,9 @@ fwohci_rx_enable(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
dbch->buf_offset = 0;
fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREREAD);
fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREWRITE);
- if(dbch->xferq.flag & FWXFERQ_STREAM){
+ if (dbch->xferq.flag & FWXFERQ_STREAM) {
return err;
- }else{
+ } else {
OWRITE(sc, OHCI_DMACMD(off), dbch->top->bus_addr | z);
}
OWRITE(sc, OHCI_DMACTL(off), OHCI_CNTL_DMA_RUN);
@@ -1487,12 +1484,12 @@ fwohci_next_cycle(struct firewire_comm *fc, int cycle_now)
#endif
cycle = cycle + CYCLE_DELAY;
if (cycle >= 8000) {
- sec ++;
+ sec++;
cycle -= 8000;
}
cycle = roundup2(cycle, CYCLE_MOD);
if (cycle >= 8000) {
- sec ++;
+ sec++;
if (cycle == 8000)
cycle = 0;
else
@@ -1500,7 +1497,7 @@ fwohci_next_cycle(struct firewire_comm *fc, int cycle_now)
}
cycle_match = ((sec << 13) | cycle) & 0x7ffff;
- return(cycle_match);
+ return (cycle_match);
}
static int
@@ -1529,7 +1526,7 @@ fwohci_itxbuf_enable(struct firewire_comm *fc, int dmach)
err = fwohci_tx_enable(sc, dbch);
}
- if(err)
+ if (err)
return err;
ldesc = dbch->ndesc - 1;
@@ -1549,7 +1546,7 @@ fwohci_itxbuf_enable(struct firewire_comm *fc, int dmach)
OHCI_BRANCH_ALWAYS);
#endif
#if 0 /* if bulkxfer->npacket changes */
- db[ldesc].db.desc.depend = db[0].db.desc.depend =
+ db[ldesc].db.desc.depend = db[0].db.desc.depend =
((struct fwohcidb_tr *)
(chunk->start))->bus_addr | dbch->ndesc;
#else
@@ -1652,7 +1649,7 @@ fwohci_irx_enable(struct firewire_comm *fc, int dmach)
return ENOMEM;
err = fwohci_rx_enable(sc, dbch);
}
- if(err)
+ if (err)
return err;
first = STAILQ_FIRST(&ir->stfree);
@@ -1731,28 +1728,28 @@ fwohci_stop(struct fwohci_softc *sc, device_t dev)
fwohci_set_intr(&sc->fc, 0);
/* Now stopping all DMA channel */
- OWRITE(sc, OHCI_ARQCTLCLR, OHCI_CNTL_DMA_RUN);
- OWRITE(sc, OHCI_ARSCTLCLR, OHCI_CNTL_DMA_RUN);
- OWRITE(sc, OHCI_ATQCTLCLR, OHCI_CNTL_DMA_RUN);
- OWRITE(sc, OHCI_ATSCTLCLR, OHCI_CNTL_DMA_RUN);
+ OWRITE(sc, OHCI_ARQCTLCLR, OHCI_CNTL_DMA_RUN);
+ OWRITE(sc, OHCI_ARSCTLCLR, OHCI_CNTL_DMA_RUN);
+ OWRITE(sc, OHCI_ATQCTLCLR, OHCI_CNTL_DMA_RUN);
+ OWRITE(sc, OHCI_ATSCTLCLR, OHCI_CNTL_DMA_RUN);
- for( i = 0 ; i < sc->fc.nisodma ; i ++ ){
- OWRITE(sc, OHCI_IRCTLCLR(i), OHCI_CNTL_DMA_RUN);
- OWRITE(sc, OHCI_ITCTLCLR(i), OHCI_CNTL_DMA_RUN);
+ for (i = 0; i < sc->fc.nisodma; i++) {
+ OWRITE(sc, OHCI_IRCTLCLR(i), OHCI_CNTL_DMA_RUN);
+ OWRITE(sc, OHCI_ITCTLCLR(i), OHCI_CNTL_DMA_RUN);
}
-#if 0 /* Let dcons(4) be accessed */
+#if 0 /* Let dcons(4) be accessed */
/* Stop interrupt */
OWRITE(sc, FWOHCI_INTMASKCLR,
OHCI_INT_EN | OHCI_INT_ERR | OHCI_INT_PHY_SID
| OHCI_INT_PHY_INT
- | OHCI_INT_DMA_ATRQ | OHCI_INT_DMA_ATRS
+ | OHCI_INT_DMA_ATRQ | OHCI_INT_DMA_ATRS
| OHCI_INT_DMA_PRRQ | OHCI_INT_DMA_PRRS
- | OHCI_INT_DMA_ARRQ | OHCI_INT_DMA_ARRS
+ | OHCI_INT_DMA_ARRQ | OHCI_INT_DMA_ARRS
| OHCI_INT_PHY_BUS_R);
/* FLUSH FIFO and reset Transmitter/Reciever */
- OWRITE(sc, OHCI_HCCCTL, OHCI_HCC_RESET);
+ OWRITE(sc, OHCI_HCCCTL, OHCI_HCC_RESET);
#endif
/* XXX Link down? Bus reset? */
@@ -1768,14 +1765,14 @@ fwohci_resume(struct fwohci_softc *sc, device_t dev)
fwohci_reset(sc, dev);
/* XXX resume isochronous receive automatically. (how about TX?) */
- for(i = 0; i < sc->fc.nisodma; i ++) {
+ for (i = 0; i < sc->fc.nisodma; i++) {
ir = &sc->ir[i].xferq;
- if((ir->flag & FWXFERQ_RUNNING) != 0) {
+ if ((ir->flag & FWXFERQ_RUNNING) != 0) {
device_printf(sc->fc.dev,
"resume iso receive ch: %d\n", i);
ir->flag &= ~FWXFERQ_RUNNING;
/* requeue stdma to stfree */
- while((chunk = STAILQ_FIRST(&ir->stdma)) != NULL) {
+ while ((chunk = STAILQ_FIRST(&ir->stdma)) != NULL) {
STAILQ_REMOVE_HEAD(&ir->stdma, link);
STAILQ_INSERT_TAIL(&ir->stfree, chunk, link);
}
@@ -1792,7 +1789,7 @@ fwohci_resume(struct fwohci_softc *sc, device_t dev)
static void
fwohci_dump_intr(struct fwohci_softc *sc, uint32_t stat)
{
- if(stat & OREAD(sc, FWOHCI_INTMASK))
+ if (stat & OREAD(sc, FWOHCI_INTMASK))
device_printf(fc->dev, "INTERRUPT < %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s> 0x%08x, 0x%08x\n",
stat & OHCI_INT_EN ? "DMA_EN ":"",
stat & OHCI_INT_PHY_REG ? "PHY_REG ":"",
@@ -1815,10 +1812,11 @@ fwohci_dump_intr(struct fwohci_softc *sc, uint32_t stat)
stat & OHCI_INT_DMA_ARRQ ? "DMA_ARRQ " :"",
stat & OHCI_INT_DMA_ATRS ? "DMA_ATRS " :"",
stat & OHCI_INT_DMA_ATRQ ? "DMA_ATRQ " :"",
- stat, OREAD(sc, FWOHCI_INTMASK)
+ stat, OREAD(sc, FWOHCI_INTMASK)
);
}
#endif
+
static void
fwohci_intr_core(struct fwohci_softc *sc, uint32_t stat, int count)
{
@@ -1829,15 +1827,15 @@ fwohci_intr_core(struct fwohci_softc *sc, uint32_t stat, int count)
if ((stat & OHCI_INT_PHY_BUS_R) && (fc->status != FWBUSRESET)) {
fc->status = FWBUSRESET;
/* Disable bus reset interrupt until sid recv. */
- OWRITE(sc, FWOHCI_INTMASKCLR, OHCI_INT_PHY_BUS_R);
-
+ OWRITE(sc, FWOHCI_INTMASKCLR, OHCI_INT_PHY_BUS_R);
+
device_printf(fc->dev, "%s: BUS reset\n", __func__);
- OWRITE(sc, FWOHCI_INTMASKCLR, OHCI_INT_CYC_LOST);
+ OWRITE(sc, FWOHCI_INTMASKCLR, OHCI_INT_CYC_LOST);
OWRITE(sc, OHCI_LNKCTLCLR, OHCI_CNTL_CYCSRC);
- OWRITE(sc, OHCI_ATQCTLCLR, OHCI_CNTL_DMA_RUN);
+ OWRITE(sc, OHCI_ATQCTLCLR, OHCI_CNTL_DMA_RUN);
sc->atrq.xferq.flag &= ~FWXFERQ_RUNNING;
- OWRITE(sc, OHCI_ATSCTLCLR, OHCI_CNTL_DMA_RUN);
+ OWRITE(sc, OHCI_ATSCTLCLR, OHCI_CNTL_DMA_RUN);
sc->atrs.xferq.flag &= ~FWXFERQ_RUNNING;
if (!kdb_active)
@@ -1858,10 +1856,10 @@ fwohci_intr_core(struct fwohci_softc *sc, uint32_t stat, int count)
OWRITE(sc, OHCI_PREQUPPER, 0x10000);
}
/* Set ATRetries register */
- OWRITE(sc, OHCI_ATRETRY, 1<<(13+16) | 0xfff);
+ OWRITE(sc, OHCI_ATRETRY, 1<<(13 + 16) | 0xfff);
/*
- * Checking whether the node is root or not. If root, turn on
+ * Checking whether the node is root or not. If root, turn on
* cycle master.
*/
node_id = OREAD(sc, FWOHCI_NODEID);
@@ -1878,7 +1876,7 @@ fwohci_intr_core(struct fwohci_softc *sc, uint32_t stat, int count)
/* cycle timer */
sc->cycle_lost = 0;
- OWRITE(sc, FWOHCI_INTMASK, OHCI_INT_CYC_LOST);
+ OWRITE(sc, FWOHCI_INTMASK, OHCI_INT_CYC_LOST);
if ((node_id & OHCI_NODE_ROOT) && !nocyclemaster) {
printf("CYCLEMASTER mode\n");
OWRITE(sc, OHCI_LNKCTL,
@@ -1908,10 +1906,10 @@ fwohci_intr_dma(struct fwohci_softc *sc, uint32_t stat, int count)
if (stat & OHCI_INT_DMA_IR) {
irstat = atomic_readandclear_int(&sc->irstat);
- for(i = 0; i < fc->nisodma ; i++){
+ for (i = 0; i < fc->nisodma; i++) {
struct fwohci_dbch *dbch;
- if((irstat & (1 << i)) != 0){
+ if ((irstat & (1 << i)) != 0) {
dbch = &sc->ir[i];
if ((dbch->xferq.flag & FWXFERQ_OPEN) == 0) {
device_printf(sc->fc.dev,
@@ -1924,8 +1922,8 @@ fwohci_intr_dma(struct fwohci_softc *sc, uint32_t stat, int count)
}
if (stat & OHCI_INT_DMA_IT) {
itstat = atomic_readandclear_int(&sc->itstat);
- for(i = 0; i < fc->nisodma ; i++){
- if((itstat & (1 << i)) != 0){
+ for (i = 0; i < fc->nisodma; i++) {
+ if ((itstat & (1 << i)) != 0) {
fwohci_tbuf_update(sc, i);
}
}
@@ -1946,13 +1944,13 @@ fwohci_intr_dma(struct fwohci_softc *sc, uint32_t stat, int count)
}
if (stat & OHCI_INT_CYC_LOST) {
if (sc->cycle_lost >= 0)
- sc->cycle_lost ++;
+ sc->cycle_lost++;
if (sc->cycle_lost > 10) {
sc->cycle_lost = -1;
#if 0
OWRITE(sc, OHCI_LNKCTLCLR, OHCI_CNTL_CYCTIMER);
#endif
- OWRITE(sc, FWOHCI_INTMASKCLR, OHCI_INT_CYC_LOST);
+ OWRITE(sc, FWOHCI_INTMASKCLR, OHCI_INT_CYC_LOST);
device_printf(fc->dev, "too many cycles lost, "
"no cycle master present?\n");
}
@@ -1972,8 +1970,6 @@ fwohci_intr_dma(struct fwohci_softc *sc, uint32_t stat, int count)
if (stat & OHCI_INT_PHY_INT) {
device_printf(fc->dev, "phy int\n");
}
-
- return;
}
static void
@@ -2018,8 +2014,8 @@ fwohci_task_sid(void *arg, int pending)
device_printf(fc->dev, "malloc failed\n");
return;
}
- for (i = 0; i < plen / 4; i ++)
- buf[i] = FWOHCI_DMA_READ(sc->sid_buf[i+1]);
+ for (i = 0; i < plen / 4; i++)
+ buf[i] = FWOHCI_DMA_READ(sc->sid_buf[i + 1]);
/* pending all pre-bus_reset packets */
fwohci_txd(sc, &sc->atrq);
@@ -2138,7 +2134,7 @@ fwohci_tbuf_update(struct fwohci_softc *sc, int dmach)
dump_db(sc, ITX_CH + dmach);
while ((chunk = STAILQ_FIRST(&it->stdma)) != NULL) {
db = ((struct fwohcidb_tr *)(chunk->end))->db;
- stat = FWOHCI_DMA_READ(db[ldesc].db.desc.res)
+ stat = FWOHCI_DMA_READ(db[ldesc].db.desc.res)
>> OHCI_STATUS_SHIFT;
db = ((struct fwohcidb_tr *)(chunk->start))->db;
/* timestamp */
@@ -2147,7 +2143,7 @@ fwohci_tbuf_update(struct fwohci_softc *sc, int dmach)
if (stat == 0)
break;
STAILQ_REMOVE_HEAD(&it->stdma, link);
- switch (stat & FWOHCIEV_MASK){
+ switch (stat & FWOHCIEV_MASK) {
case FWOHCIEV_ACKCOMPL:
#if 0
device_printf(fc->dev, "0x%08x\n", count);
@@ -2226,7 +2222,7 @@ fwohci_rbuf_update(struct fwohci_softc *sc, int dmach)
if (w == 0)
return;
- if (ir->flag & FWXFERQ_HANDLER)
+ if (ir->flag & FWXFERQ_HANDLER)
ir->hand(ir);
else
wakeup(ir);
@@ -2237,17 +2233,17 @@ dump_dma(struct fwohci_softc *sc, uint32_t ch)
{
uint32_t off, cntl, stat, cmd, match;
- if(ch == 0){
+ if (ch == 0) {
off = OHCI_ATQOFF;
- }else if(ch == 1){
+ } else if (ch == 1) {
off = OHCI_ATSOFF;
- }else if(ch == 2){
+ } else if (ch == 2) {
off = OHCI_ARQOFF;
- }else if(ch == 3){
+ } else if (ch == 3) {
off = OHCI_ARSOFF;
- }else if(ch < IRX_CH){
+ } else if (ch < IRX_CH) {
off = OHCI_ITCTL(ch - ITX_CH);
- }else{
+ } else {
off = OHCI_IRCTL(ch - IRX_CH);
}
cntl = stat = OREAD(sc, off);
@@ -2256,10 +2252,10 @@ dump_dma(struct fwohci_softc *sc, uint32_t ch)
device_printf(sc->fc.dev, "ch %1x cntl:0x%08x cmd:0x%08x match:0x%08x\n",
ch,
- cntl,
- cmd,
+ cntl,
+ cmd,
match);
- stat &= 0xffff ;
+ stat &= 0xffff;
if (stat) {
device_printf(sc->fc.dev, "dma %d ch:%s%s%s%s%s%s %s(%x)\n",
ch,
@@ -2272,7 +2268,7 @@ dump_dma(struct fwohci_softc *sc, uint32_t ch)
fwohcicode[stat & 0x1f],
stat & 0x1f
);
- }else{
+ } else {
device_printf(sc->fc.dev, "dma %d ch: Nostat\n", ch);
}
}
@@ -2285,60 +2281,61 @@ dump_db(struct fwohci_softc *sc, uint32_t ch)
struct fwohcidb *curr = NULL, *prev, *next = NULL;
int idb, jdb;
uint32_t cmd, off;
- if(ch == 0){
+
+ if (ch == 0) {
off = OHCI_ATQOFF;
dbch = &sc->atrq;
- }else if(ch == 1){
+ } else if (ch == 1) {
off = OHCI_ATSOFF;
dbch = &sc->atrs;
- }else if(ch == 2){
+ } else if (ch == 2) {
off = OHCI_ARQOFF;
dbch = &sc->arrq;
- }else if(ch == 3){
+ } else if (ch == 3) {
off = OHCI_ARSOFF;
dbch = &sc->arrs;
- }else if(ch < IRX_CH){
+ } else if (ch < IRX_CH) {
off = OHCI_ITCTL(ch - ITX_CH);
dbch = &sc->it[ch - ITX_CH];
- }else {
+ } else {
off = OHCI_IRCTL(ch - IRX_CH);
dbch = &sc->ir[ch - IRX_CH];
}
cmd = OREAD(sc, off + 0xc);
- if( dbch->ndb == 0 ){
+ if (dbch->ndb == 0) {
device_printf(sc->fc.dev, "No DB is attached ch=%d\n", ch);
return;
}
pp = dbch->top;
prev = pp->db;
- for(idb = 0 ; idb < dbch->ndb ; idb ++ ){
+ for (idb = 0; idb < dbch->ndb; idb++) {
cp = STAILQ_NEXT(pp, link);
- if(cp == NULL){
+ if (cp == NULL) {
curr = NULL;
goto outdb;
}
np = STAILQ_NEXT(cp, link);
- for(jdb = 0 ; jdb < dbch->ndesc ; jdb ++ ){
+ for (jdb = 0; jdb < dbch->ndesc; jdb++) {
if ((cmd & 0xfffffff0) == cp->bus_addr) {
curr = cp->db;
- if(np != NULL){
+ if (np != NULL) {
next = np->db;
- }else{
+ } else {
next = NULL;
}
goto outdb;
}
}
pp = STAILQ_NEXT(pp, link);
- if(pp == NULL){
+ if (pp == NULL) {
curr = NULL;
goto outdb;
}
prev = pp->db;
}
outdb:
- if( curr != NULL){
+ if (curr != NULL) {
#if 0
printf("Prev DB %d\n", ch);
print_db(pp, prev, ch, dbch->ndesc);
@@ -2349,7 +2346,7 @@ outdb:
printf("Next DB %d\n", ch);
print_db(np, next, ch, dbch->ndesc);
#endif
- }else{
+ } else {
printf("dbdump err ch = %d cmd = 0x%08x\n", ch, cmd);
}
return;
@@ -2363,7 +2360,7 @@ print_db(struct fwohcidb_tr *db_tr, struct fwohcidb *db,
int i, key;
uint32_t cmd, res;
- if(db == NULL){
+ if (db == NULL) {
printf("No Descriptor is found\n");
return;
}
@@ -2380,18 +2377,13 @@ print_db(struct fwohcidb_tr *db_tr, struct fwohcidb *db,
"Depend",
"Stat",
"Cnt");
- for( i = 0 ; i <= max ; i ++){
+ for (i = 0; i <= max; i++) {
cmd = FWOHCI_DMA_READ(db[i].db.desc.cmd);
res = FWOHCI_DMA_READ(db[i].db.desc.res);
key = cmd & OHCI_KEY_MASK;
stat = res >> OHCI_STATUS_SHIFT;
-#if defined(__DragonFly__) || __FreeBSD_version < 500000
- printf("%08x %s %s %s %s %5d %08x %08x %04x:%04x",
- db_tr->bus_addr,
-#else
printf("%08jx %s %s %s %s %5d %08x %08x %04x:%04x",
(uintmax_t)db_tr->bus_addr,
-#endif
dbcode[(cmd >> 28) & 0xf],
dbkey[(cmd >> 24) & 0x7],
dbcond[(cmd >> 20) & 0x3],
@@ -2401,7 +2393,7 @@ print_db(struct fwohcidb_tr *db_tr, struct fwohcidb *db,
FWOHCI_DMA_READ(db[i].db.desc.depend),
stat,
res & OHCI_COUNT_MASK);
- if(stat & 0xff00){
+ if (stat & 0xff00) {
printf(" %s%s%s%s%s%s %s(%x)\n",
stat & OHCI_CNTL_DMA_RUN ? "RUN," : "",
stat & OHCI_CNTL_DMA_WAKE ? "WAKE," : "",
@@ -2412,32 +2404,32 @@ print_db(struct fwohcidb_tr *db_tr, struct fwohcidb *db,
fwohcicode[stat & 0x1f],
stat & 0x1f
);
- }else{
+ } else {
printf(" Nostat\n");
}
- if(key == OHCI_KEY_ST2 ){
- printf("0x%08x 0x%08x 0x%08x 0x%08x\n",
- FWOHCI_DMA_READ(db[i+1].db.immed[0]),
- FWOHCI_DMA_READ(db[i+1].db.immed[1]),
- FWOHCI_DMA_READ(db[i+1].db.immed[2]),
- FWOHCI_DMA_READ(db[i+1].db.immed[3]));
+ if (key == OHCI_KEY_ST2) {
+ printf("0x%08x 0x%08x 0x%08x 0x%08x\n",
+ FWOHCI_DMA_READ(db[i + 1].db.immed[0]),
+ FWOHCI_DMA_READ(db[i + 1].db.immed[1]),
+ FWOHCI_DMA_READ(db[i + 1].db.immed[2]),
+ FWOHCI_DMA_READ(db[i + 1].db.immed[3]));
}
- if(key == OHCI_KEY_DEVICE){
+ if (key == OHCI_KEY_DEVICE) {
return;
}
- if((cmd & OHCI_BRANCH_MASK)
- == OHCI_BRANCH_ALWAYS){
+ if ((cmd & OHCI_BRANCH_MASK)
+ == OHCI_BRANCH_ALWAYS) {
return;
}
- if((cmd & OHCI_CMD_MASK)
- == OHCI_OUTPUT_LAST){
+ if ((cmd & OHCI_CMD_MASK)
+ == OHCI_OUTPUT_LAST) {
return;
}
- if((cmd & OHCI_CMD_MASK)
- == OHCI_INPUT_LAST){
+ if ((cmd & OHCI_CMD_MASK)
+ == OHCI_INPUT_LAST) {
return;
}
- if(key == OHCI_KEY_ST2 ){
+ if (key == OHCI_KEY_ST2) {
i++;
}
}
@@ -2498,7 +2490,7 @@ fwohci_txbufdb(struct fwohci_softc *sc, int dmach, struct fw_bulkxfer *bulkxfer)
/*
device_printf(sc->fc.dev, "DB %08x %08x %08x\n", bulkxfer, db_tr->bus_addr, fdb_tr->bus_addr);
*/
- for (idb = 0; idb < dbch->xferq.bnpacket; idb ++) {
+ for (idb = 0; idb < dbch->xferq.bnpacket; idb++) {
db = db_tr->db;
fp = (struct fw_pkt *)db_tr->buf;
ohcifp = (struct fwohci_txpkthdr *) db[1].db.immed;
@@ -2508,8 +2500,8 @@ device_printf(sc->fc.dev, "DB %08x %08x %08x\n", bulkxfer, db_tr->bus_addr, fdb_
ohcifp->mode.stream.chtag = chtag;
ohcifp->mode.stream.tcode = 0xa;
#if BYTE_ORDER == BIG_ENDIAN
- FWOHCI_DMA_WRITE(db[1].db.immed[0], db[1].db.immed[0]);
- FWOHCI_DMA_WRITE(db[1].db.immed[1], db[1].db.immed[1]);
+ FWOHCI_DMA_WRITE(db[1].db.immed[0], db[1].db.immed[0]);
+ FWOHCI_DMA_WRITE(db[1].db.immed[1], db[1].db.immed[1]);
#endif
FWOHCI_DMA_CLEAR(db[2].db.desc.cmd, OHCI_COUNT_MASK);
@@ -2554,7 +2546,7 @@ fwohci_add_tx_buf(struct fwohci_dbch *dbch, struct fwohcidb_tr *db_tr,
int err = 0;
it = &dbch->xferq;
- if(it->buf == 0){
+ if (it->buf == 0) {
err = EINVAL;
return err;
}
@@ -2594,7 +2586,7 @@ fwohci_add_rx_buf(struct fwohci_dbch *dbch, struct fwohcidb_tr *db_tr,
&db_tr->dma_map, ir->psize, &dbuf[0],
BUS_DMA_NOWAIT);
if (db_tr->buf == NULL)
- return(ENOMEM);
+ return (ENOMEM);
}
db_tr->dbcnt = 1;
dsiz[0] = ir->psize;
@@ -2609,11 +2601,11 @@ fwohci_add_rx_buf(struct fwohci_dbch *dbch, struct fwohcidb_tr *db_tr,
dsiz[db_tr->dbcnt] = ir->psize;
if (ir->buf != NULL) {
db_tr->buf = fwdma_v_addr(ir->buf, poffset);
- dbuf[db_tr->dbcnt] = fwdma_bus_addr( ir->buf, poffset);
+ dbuf[db_tr->dbcnt] = fwdma_bus_addr(ir->buf, poffset);
}
db_tr->dbcnt++;
}
- for(i = 0 ; i < db_tr->dbcnt ; i++){
+ for (i = 0; i < db_tr->dbcnt; i++) {
FWOHCI_DMA_WRITE(db[i].db.desc.addr, dbuf[i]);
FWOHCI_DMA_WRITE(db[i].db.desc.cmd, OHCI_INPUT_MORE | dsiz[i]);
if (ir->flag & FWXFERQ_STREAM) {
@@ -2663,19 +2655,19 @@ fwohci_arcv_swap(struct fw_pkt *fp, int len)
break;
default:
printf("Unknown tcode %d\n", fp0->mode.common.tcode);
- return(0);
+ return (0);
}
hlen = tinfo[fp0->mode.common.tcode].hdr_len;
if (hlen > len) {
if (firewire_debug)
printf("splitted header\n");
- return(-hlen);
+ return (-hlen);
}
#if BYTE_ORDER == BIG_ENDIAN
- for(i = 0; i < slen/4; i ++)
+ for (i = 0; i < slen/4; i++)
fp->mode.ld[i] = FWOHCI_DMA_READ(fp->mode.ld[i]);
#endif
- return(hlen);
+ return (hlen);
}
static int
@@ -2737,11 +2729,11 @@ fwohci_arcv(struct fwohci_softc *sc, struct fwohci_dbch *dbch, int count)
caddr_t buf;
int resCount;
- if(&sc->arrq == dbch){
+ if (&sc->arrq == dbch) {
off = OHCI_ARQOFF;
- }else if(&sc->arrs == dbch){
+ } else if (&sc->arrs == dbch) {
off = OHCI_ARSOFF;
- }else{
+ } else {
return;
}
@@ -2769,10 +2761,10 @@ fwohci_arcv(struct fwohci_softc *sc, struct fwohci_dbch *dbch, int count)
if (len > 0)
bus_dmamap_sync(dbch->dmat, db_tr->dma_map,
BUS_DMASYNC_POSTREAD);
- while (len > 0 ) {
+ while (len > 0) {
if (count >= 0 && count-- == 0)
goto out;
- if(dbch->pdb_tr != NULL){
+ if (dbch->pdb_tr != NULL) {
/* we have a fragment in previous buffer */
int rlen;
@@ -2824,7 +2816,7 @@ fwohci_arcv(struct fwohci_softc *sc, struct fwohci_dbch *dbch, int count)
dbch->pdb_tr = db_tr;
dbch->buf_offset = - dbch->buf_offset;
/* sanity check */
- if (resCount != 0) {
+ if (resCount != 0) {
printf("resCount=%d hlen=%d\n",
resCount, hlen);
goto err;
@@ -2849,7 +2841,7 @@ fwohci_arcv(struct fwohci_softc *sc, struct fwohci_dbch *dbch, int count)
if (firewire_debug)
printf("splitted payload\n");
/* sanity check */
- if (resCount != 0) {
+ if (resCount != 0) {
printf("resCount=%d plen=%d"
" len=%d\n",
resCount, plen, len);
@@ -2859,7 +2851,7 @@ fwohci_arcv(struct fwohci_softc *sc, struct fwohci_dbch *dbch, int count)
}
vec[nvec].iov_base = ld;
vec[nvec].iov_len = plen;
- nvec ++;
+ nvec++;
ld += plen;
}
dbch->buf_offset = ld - (uint8_t *)db_tr->buf;
@@ -2886,7 +2878,7 @@ fwohci_arcv(struct fwohci_softc *sc, struct fwohci_dbch *dbch, int count)
if ((vec[nvec-1].iov_len -=
sizeof(struct fwohci_trailer)) == 0)
- nvec--;
+ nvec--;
rb.fc = &sc->fc;
rb.vec = vec;
rb.nvec = nvec;
@@ -2913,7 +2905,7 @@ fwohci_arcv(struct fwohci_softc *sc, struct fwohci_dbch *dbch, int count)
#endif
break;
}
- pcnt ++;
+ pcnt++;
if (dbch->pdb_tr != NULL) {
fwohci_arcv_free_buf(sc, dbch, dbch->pdb_tr,
off, 1);
diff --git a/sys/dev/firewire/fwohci_pci.c b/sys/dev/firewire/fwohci_pci.c
index 77cb586..7523f2c 100644
--- a/sys/dev/firewire/fwohci_pci.c
+++ b/sys/dev/firewire/fwohci_pci.c
@@ -67,7 +67,7 @@ static int fwohci_pci_detach(device_t self);
* The probe routine.
*/
static int
-fwohci_pci_probe( device_t dev )
+fwohci_pci_probe(device_t dev)
{
uint32_t id;
@@ -211,7 +211,7 @@ fwohci_pci_init(device_t self)
cmd = pci_read_config(self, PCIR_COMMAND, 2);
cmd |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN;
#if 1 /* for broken hardware */
- cmd &= ~PCIM_CMD_MWRICEN;
+ cmd &= ~PCIM_CMD_MWRICEN;
#endif
pci_write_config(self, PCIR_COMMAND, cmd, 2);
@@ -311,14 +311,15 @@ fwohci_pci_attach(device_t self)
/*lockarg*/FW_GMTX(&sc->fc),
&sc->fc.dmat);
if (err != 0) {
- printf("fwohci_pci_attach: Could not allocate DMA tag "
- "- error %d\n", err);
- return (ENOMEM);
+ device_printf(self, "fwohci_pci_attach: Could not allocate DMA "
+ "tag - error %d\n", err);
+ fwohci_pci_detach(self);
+ return (ENOMEM);
}
err = fwohci_init(sc, self);
- if (err) {
+ if (err != 0) {
device_printf(self, "fwohci_init failed with err=%d\n", err);
fwohci_pci_detach(self);
return EIO;
@@ -337,13 +338,13 @@ fwohci_pci_detach(device_t self)
fwohci_softc_t *sc = device_get_softc(self);
int s;
-
s = splfw();
if (sc->bsr)
fwohci_stop(sc, self);
bus_generic_detach(self);
+
if (sc->fc.bdev) {
device_delete_child(self, sc->fc.bdev);
sc->fc.bdev = NULL;
@@ -368,7 +369,7 @@ fwohci_pci_detach(device_t self)
}
if (sc->bsr) {
- bus_release_resource(self, SYS_RES_MEMORY,PCI_CBMEM,sc->bsr);
+ bus_release_resource(self, SYS_RES_MEMORY, PCI_CBMEM, sc->bsr);
sc->bsr = NULL;
sc->bst = 0;
sc->bsh = 0;
@@ -428,7 +429,7 @@ fwohci_pci_add_child(device_t dev, u_int order, const char *name, int unit)
return (child);
sc->fc.bdev = child;
- device_set_ivars(child, (void *)&sc->fc);
+ device_set_ivars(child, &sc->fc);
err = device_probe_and_attach(child);
if (err) {
@@ -447,7 +448,7 @@ fwohci_pci_add_child(device_t dev, u_int order, const char *name, int unit)
int s;
DELAY(250); /* 2 cycles */
s = splfw();
- fwohci_poll((void *)sc, 0, -1);
+ fwohci_poll(&sc->fc, 0, -1);
splx(s);
}
diff --git a/sys/dev/firewire/fwohcireg.h b/sys/dev/firewire/fwohcireg.h
index d8deca8..95fe26e 100644
--- a/sys/dev/firewire/fwohcireg.h
+++ b/sys/dev/firewire/fwohcireg.h
@@ -184,7 +184,7 @@ struct fwohcidb {
#define FWOHCIEV_MASK 0x1f
-struct ohci_dma{
+struct ohci_dma {
fwohcireg_t cntl;
#define OHCI_CNTL_CYCMATCH_S (0x1 << 31)
@@ -211,7 +211,7 @@ struct ohci_dma{
fwohcireg_t dummy3;
};
-struct ohci_itdma{
+struct ohci_itdma {
fwohcireg_t cntl;
fwohcireg_t cntl_clr;
fwohcireg_t dummy0;
@@ -237,7 +237,7 @@ struct ohci_registers {
fwohcireg_t config_rom; /* config ROM map 0x34 */
fwohcireg_t post_wr_lo; /* post write addr lo 0x38 */
fwohcireg_t post_wr_hi; /* post write addr hi 0x3c */
- fwohcireg_t vender; /* vender ID 0x40 */
+ fwohcireg_t vendor; /* vendor ID 0x40 */
fwohcireg_t dummy1[3]; /* dummy 0x44-0x4c */
fwohcireg_t hcc_cntl_set; /* HCC control set 0x50 */
fwohcireg_t hcc_cntl_clr; /* HCC control clr 0x54 */
@@ -308,7 +308,7 @@ struct ohci_registers {
fwohcireg_t pys_upper; /* Physical Upper bound 0x120 */
fwohcireg_t dummy7[23]; /* dummy 0x124-0x17c */
-
+
/* 0x180, 0x184, 0x188, 0x18c */
/* 0x190, 0x194, 0x198, 0x19c */
/* 0x1a0, 0x1a4, 0x1a8, 0x1ac */
@@ -328,7 +328,7 @@ struct ohci_registers {
struct ohci_dma dma_irch[0x20];
};
-struct fwohcidb_tr{
+struct fwohcidb_tr {
STAILQ_ENTRY(fwohcidb_tr) link;
struct fw_xfer *xfer;
struct fwohcidb *db;
@@ -341,8 +341,8 @@ struct fwohcidb_tr{
/*
* OHCI info structure.
*/
-struct fwohci_txpkthdr{
- union{
+struct fwohci_txpkthdr {
+ union {
uint32_t ld[4];
struct {
#if BYTE_ORDER == BIG_ENDIAN
@@ -376,7 +376,7 @@ struct fwohci_txpkthdr{
:8;
#endif
BIT16x2(dst, );
- }asycomm;
+ } asycomm;
struct {
#if BYTE_ORDER == BIG_ENDIAN
uint32_t :13,
@@ -392,16 +392,17 @@ struct fwohci_txpkthdr{
:13;
#endif
BIT16x2(len, );
- }stream;
- }mode;
+ } stream;
+ } mode;
};
-struct fwohci_trailer{
+
+struct fwohci_trailer {
#if BYTE_ORDER == BIG_ENDIAN
uint32_t stat:16,
- time:16;
+ time:16;
#else
uint32_t time:16,
- stat:16;
+ stat:16;
#endif
};
@@ -412,7 +413,7 @@ struct fwohci_trailer{
#define OHCI_CNTL_SID (0x1 << 9)
/*
- * defined in OHCI 1.1
+ * defined in OHCI 1.1
* chapter 6.1
*/
#define OHCI_INT_DMA_ATRQ (0x1 << 0)
diff --git a/sys/dev/firewire/fwohcivar.h b/sys/dev/firewire/fwohcivar.h
index 3c9b242..985bc6a 100644
--- a/sys/dev/firewire/fwohcivar.h
+++ b/sys/dev/firewire/fwohcivar.h
@@ -44,7 +44,7 @@ typedef struct fwohci_softc {
void *ih;
struct resource *bsr;
struct resource *irq_res;
- struct fwohci_dbch{
+ struct fwohci_dbch {
u_int ndb;
u_int ndesc;
STAILQ_HEAD(, fwohcidb_tr) db_trq;
diff --git a/sys/dev/firewire/iec68113.h b/sys/dev/firewire/iec68113.h
index 11f3042..393d931 100644
--- a/sys/dev/firewire/iec68113.h
+++ b/sys/dev/firewire/iec68113.h
@@ -97,7 +97,7 @@ struct ciphdr {
} fdf;
};
-struct dvdbc{
+struct dvdbc {
#if BYTE_ORDER == BIG_ENDIAN
uint8_t sct:3, /* Section type */
:1, /* Reserved */
diff --git a/sys/dev/firewire/if_fwe.c b/sys/dev/firewire/if_fwe.c
index f43fccb..5d15ead 100644
--- a/sys/dev/firewire/if_fwe.c
+++ b/sys/dev/firewire/if_fwe.c
@@ -1,7 +1,7 @@
/*-
* Copyright (c) 2002-2003
* Hidetoshi Shimokawa. All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -18,7 +18,7 @@
* 4. Neither the name of the author nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -30,7 +30,7 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
+ *
* $FreeBSD$
*/
@@ -123,8 +123,8 @@ fwe_probe(device_t dev)
device_t pa;
pa = device_get_parent(dev);
- if(device_get_unit(dev) != device_get_unit(pa)){
- return(ENXIO);
+ if (device_get_unit(dev) != device_get_unit(pa)) {
+ return (ENXIO);
}
device_set_desc(dev, "Ethernet over FireWire");
@@ -176,7 +176,7 @@ fwe_attach(device_t dev)
"%02x:%02x:%02x:%02x:%02x:%02x\n", unit,
eaddr[0], eaddr[1], eaddr[2], eaddr[3], eaddr[4], eaddr[5]);
- /* fill the rest and attach interface */
+ /* fill the rest and attach interface */
ifp = fwe->eth_softc.ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
device_printf(dev, "can not if_alloc()\n");
@@ -220,12 +220,12 @@ fwe_stop(struct fwe_softc *fwe)
if (xferq->flag & FWXFERQ_RUNNING)
fc->irx_disable(fc, fwe->dma_ch);
- xferq->flag &=
+ xferq->flag &=
~(FWXFERQ_MODEMASK | FWXFERQ_OPEN | FWXFERQ_STREAM |
FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK);
xferq->hand = NULL;
- for (i = 0; i < xferq->bnchunk; i ++)
+ for (i = 0; i < xferq->bnchunk; i++)
m_freem(xferq->bulkxfer[i].mbuf);
free(xferq->bulkxfer, M_FWE);
@@ -315,7 +315,7 @@ fwe_init(void *arg)
STAILQ_INIT(&xferq->stfree);
STAILQ_INIT(&xferq->stdma);
xferq->stproc = NULL;
- for (i = 0; i < xferq->bnchunk; i ++) {
+ for (i = 0; i < xferq->bnchunk; i++) {
m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
xferq->bulkxfer[i].mbuf = m;
m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
@@ -393,7 +393,7 @@ fwe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
!(ifp->if_capenable & IFCAP_POLLING)) {
error = ether_poll_register(fwe_poll, ifp);
if (error)
- return(error);
+ return (error);
/* Disable interrupts */
fc->set_intr(fc, 0);
ifp->if_capenable |= IFCAP_POLLING;
@@ -435,7 +435,6 @@ fwe_output_callback(struct fw_xfer *xfer)
FWEDEBUG(ifp, "resp = %d\n", xfer->resp);
if (xfer->resp != 0)
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
-
m_freem(xfer->mbuf);
fw_xfer_unload(xfer);
@@ -604,7 +603,7 @@ fwe_as_input(struct fw_xferq *xferq)
c[16], c[17], c[18], c[19],
c[20], c[21], c[22], c[23],
c[20], c[21], c[22], c[23]
- );
+ );
#endif
(*ifp->if_input)(ifp, m);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
diff --git a/sys/dev/firewire/if_fwip.c b/sys/dev/firewire/if_fwip.c
index b88bf77..0617a9c 100644
--- a/sys/dev/firewire/if_fwip.c
+++ b/sys/dev/firewire/if_fwip.c
@@ -60,16 +60,10 @@
#include <net/firewire.h>
#include <net/if_arp.h>
#include <net/if_types.h>
-#ifdef __DragonFly__
-#include <bus/firewire/firewire.h>
-#include <bus/firewire/firewirereg.h>
-#include "if_fwipvar.h"
-#else
#include <dev/firewire/firewire.h>
#include <dev/firewire/firewirereg.h>
#include <dev/firewire/iec13213.h>
#include <dev/firewire/if_fwipvar.h>
-#endif
/*
* We really need a mechanism for allocating regions in the FIFO
@@ -139,8 +133,8 @@ fwip_probe(device_t dev)
device_t pa;
pa = device_get_parent(dev);
- if(device_get_unit(dev) != device_get_unit(pa)){
- return(ENXIO);
+ if (device_get_unit(dev) != device_get_unit(pa)) {
+ return (ENXIO);
}
device_set_desc(dev, "IP over FireWire");
@@ -228,7 +222,7 @@ fwip_stop(struct fwip_softc *fwip)
FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK);
xferq->hand = NULL;
- for (i = 0; i < xferq->bnchunk; i ++)
+ for (i = 0; i < xferq->bnchunk; i++)
m_freem(xferq->bulkxfer[i].mbuf);
free(xferq->bulkxfer, M_FWIP);
@@ -322,7 +316,7 @@ fwip_init(void *arg)
STAILQ_INIT(&xferq->stfree);
STAILQ_INIT(&xferq->stdma);
xferq->stproc = NULL;
- for (i = 0; i < xferq->bnchunk; i ++) {
+ for (i = 0; i < xferq->bnchunk; i++) {
m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
xferq->bulkxfer[i].mbuf = m;
m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
@@ -335,7 +329,7 @@ fwip_init(void *arg)
/* pre-allocate xfer */
STAILQ_INIT(&fwip->fwb.xferlist);
- for (i = 0; i < rx_queue_len; i ++) {
+ for (i = 0; i < rx_queue_len; i++) {
xfer = fw_xfer_alloc(M_FWIP);
if (xfer == NULL)
break;
@@ -411,13 +405,12 @@ fwip_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
!(ifp->if_capenable & IFCAP_POLLING)) {
error = ether_poll_register(fwip_poll, ifp);
if (error)
- return(error);
+ return (error);
/* Disable interrupts */
fc->set_intr(fc, 0);
ifp->if_capenable |= IFCAP_POLLING |
IFCAP_POLLING_NOCOUNT;
return (error);
-
}
if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
ifp->if_capenable & IFCAP_POLLING) {
@@ -485,7 +478,6 @@ fwip_output_callback(struct fw_xfer *xfer)
FWIPDEBUG(ifp, "resp = %d\n", xfer->resp);
if (xfer->resp != 0)
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
-
m_freem(xfer->mbuf);
fw_xfer_unload(xfer);
@@ -937,9 +929,6 @@ static driver_t fwip_driver = {
};
-#ifdef __DragonFly__
-DECLARE_DUMMY_MODULE(fwip);
-#endif
DRIVER_MODULE(fwip, firewire, fwip_driver, fwip_devclass, 0, 0);
MODULE_VERSION(fwip, 1);
MODULE_DEPEND(fwip, firewire, 1, 1, 1);
diff --git a/sys/dev/firewire/sbp.c b/sys/dev/firewire/sbp.c
index d66933a..69e83c4 100644
--- a/sys/dev/firewire/sbp.c
+++ b/sys/dev/firewire/sbp.c
@@ -30,7 +30,7 @@
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
- *
+ *
* $FreeBSD$
*
*/
@@ -75,16 +75,16 @@
#define SBP_QUEUE_LEN ((SBP_DMA_SIZE - SBP_LOGIN_SIZE) / sizeof(struct sbp_ocb))
#define SBP_NUM_OCB (SBP_QUEUE_LEN * SBP_NUM_TARGETS)
-/*
+/*
* STATUS FIFO addressing
* bit
- * -----------------------
+ *-----------------------
* 0- 1( 2): 0 (alignment)
* 2- 7( 6): target
* 8-15( 8): lun
* 16-31( 8): reserved
- * 32-47(16): SBP_BIND_HI
- * 48-64(16): bus_id, node_id
+ * 32-47(16): SBP_BIND_HI
+ * 48-64(16): bus_id, node_id
*/
#define SBP_BIND_HI 0x1
#define SBP_DEV2ADDR(t, l) \
@@ -154,7 +154,7 @@ struct sbp_ocb {
#define OCB_ACT_CMD 1
#define OCB_MATCH(o,s) ((o)->bus_addr == ntohl((s)->orb_lo))
-struct sbp_dev{
+struct sbp_dev {
#define SBP_DEV_RESET 0 /* accept login */
#define SBP_DEV_LOGIN 1 /* to login */
#if 0
@@ -232,7 +232,7 @@ static void sbp_cmd_callback (struct fw_xfer *);
#endif
static void sbp_orb_pointer (struct sbp_dev *, struct sbp_ocb *);
static void sbp_doorbell(struct sbp_dev *);
-static void sbp_execute_ocb (void *, bus_dma_segment_t *, int, int);
+static void sbp_execute_ocb (void *, bus_dma_segment_t *, int, int);
static void sbp_free_ocb (struct sbp_dev *, struct sbp_ocb *);
static void sbp_abort_ocb (struct sbp_ocb *, int);
static void sbp_abort_all_ocbs (struct sbp_dev *, int);
@@ -388,7 +388,7 @@ sbp_new_target(struct sbp_softc *sbp, struct fw_device *fwdev)
/* XXX wired-down configuration should be gotten from
tunable or device hint */
- for (i = 0; wired[i].bus >= 0; i ++) {
+ for (i = 0; wired[i].bus >= 0; i++) {
if (wired[i].bus == bus) {
w[wired[i].target] = 1;
if (wired[i].eui.hi == fwdev->eui.hi &&
@@ -397,16 +397,16 @@ sbp_new_target(struct sbp_softc *sbp, struct fw_device *fwdev)
}
}
if (target >= 0) {
- if(target < SBP_NUM_TARGETS &&
+ if (target < SBP_NUM_TARGETS &&
sbp->targets[target].fwdev == NULL)
- return(target);
+ return (target);
device_printf(sbp->fd.dev,
- "target %d is not free for %08x:%08x\n",
+ "target %d is not free for %08x:%08x\n",
target, fwdev->eui.hi, fwdev->eui.lo);
target = -1;
}
/* non-wired target */
- for (i = 0; i < SBP_NUM_TARGETS; i ++)
+ for (i = 0; i < SBP_NUM_TARGETS; i++)
if (sbp->targets[i].fwdev == NULL && w[i] == 0) {
target = i;
break;
@@ -445,12 +445,12 @@ END_DEBUG
device_printf(target->sbp->fd.dev, "%d no LUN found\n",
target->target_id);
- maxlun ++;
+ maxlun++;
if (maxlun >= SBP_NUM_LUNS)
maxlun = SBP_NUM_LUNS;
/* Invalidiate stale devices */
- for (lun = 0; lun < target->num_lun; lun ++) {
+ for (lun = 0; lun < target->num_lun; lun++) {
sdev = target->luns[lun];
if (sdev == NULL)
continue;
@@ -468,7 +468,7 @@ END_DEBUG
newluns = (struct sbp_dev **) realloc(target->luns,
sizeof(struct sbp_dev *) * maxlun,
M_SBP, M_NOWAIT | M_ZERO);
-
+
if (newluns == NULL) {
printf("%s: realloc failed\n", __func__);
newluns = target->luns;
@@ -527,7 +527,7 @@ END_DEBUG
if (new == 0)
goto next;
- fwdma_malloc(sbp->fd.fc,
+ fwdma_malloc(sbp->fd.fc,
/* alignment */ sizeof(uint32_t),
SBP_DMA_SIZE, &sdev->dma, BUS_DMA_NOWAIT |
BUS_DMA_COHERENT);
@@ -542,7 +542,7 @@ END_DEBUG
sdev->ocb = (struct sbp_ocb *)
((char *)sdev->dma.v_addr + SBP_LOGIN_SIZE);
bzero((char *)sdev->ocb,
- sizeof (struct sbp_ocb) * SBP_QUEUE_LEN);
+ sizeof(struct sbp_ocb) * SBP_QUEUE_LEN);
STAILQ_INIT(&sdev->free_ocbs);
for (i = 0; i < SBP_QUEUE_LEN; i++) {
@@ -564,7 +564,7 @@ next:
crom_next(&cc);
}
- for (lun = 0; lun < target->num_lun; lun ++) {
+ for (lun = 0; lun < target->num_lun; lun++) {
sdev = target->luns[lun];
if (sdev != NULL && (sdev->flags & VALID_LUN) == 0) {
sbp_cam_detach_sdev(sdev);
@@ -713,7 +713,7 @@ END_DEBUG
if (alive && (sdev->status != SBP_DEV_DEAD)) {
if (sdev->path != NULL) {
xpt_freeze_devq(sdev->path, 1);
- sdev->freeze ++;
+ sdev->freeze++;
}
sbp_probe_lun(sdev);
sbp_show_sdev_info(sdev);
@@ -743,7 +743,7 @@ SBP_DEBUG(0)
END_DEBUG
if (sdev->path) {
xpt_freeze_devq(sdev->path, 1);
- sdev->freeze ++;
+ sdev->freeze++;
}
sdev->status = SBP_DEV_RETRY;
sbp_cam_detach_sdev(sdev);
@@ -797,7 +797,7 @@ END_DEBUG
return;
if (sbp_cold > 0)
- sbp_cold --;
+ sbp_cold--;
SBP_LOCK(sbp);
#if 0
@@ -809,7 +809,7 @@ END_DEBUG
#endif
/* Garbage Collection */
- for(i = 0 ; i < SBP_NUM_TARGETS ; i ++){
+ for (i = 0; i < SBP_NUM_TARGETS; i++) {
target = &sbp->targets[i];
STAILQ_FOREACH(fwdev, &sbp->fd.fc->devices, link)
if (target->fwdev == NULL || target->fwdev == fwdev)
@@ -829,14 +829,14 @@ SBP_DEBUG(0)
fwdev->status);
END_DEBUG
alive = SBP_FWDEV_ALIVE(fwdev);
- for(i = 0 ; i < SBP_NUM_TARGETS ; i ++){
+ for (i = 0; i < SBP_NUM_TARGETS; i++) {
target = &sbp->targets[i];
- if(target->fwdev == fwdev ) {
+ if (target->fwdev == fwdev) {
/* known target */
break;
}
}
- if(i == SBP_NUM_TARGETS){
+ if (i == SBP_NUM_TARGETS) {
if (alive) {
/* new target */
target = sbp_alloc_target(sbp, fwdev);
@@ -857,7 +857,8 @@ END_DEBUG
#if NEED_RESPONSE
static void
-sbp_loginres_callback(struct fw_xfer *xfer){
+sbp_loginres_callback(struct fw_xfer *xfer)
+{
struct sbp_dev *sdev;
sdev = (struct sbp_dev *)xfer->sc;
SBP_DEBUG(1)
@@ -950,8 +951,8 @@ sbp_next_dev(struct sbp_target *target, int lun)
for (i = lun, sdevp = &target->luns[lun]; i < target->num_lun;
i++, sdevp++)
if (*sdevp != NULL && (*sdevp)->status == SBP_DEV_PROBE)
- return(*sdevp);
- return(NULL);
+ return (*sdevp);
+ return (NULL);
}
#define SCAN_PRI 1
@@ -1147,7 +1148,7 @@ END_DEBUG
fp = &xfer->send.hdr;
fp->mode.wreqq.dest_hi = 0xffff;
fp->mode.wreqq.dest_lo = 0xf0000000 | BUSY_TIMEOUT;
- fp->mode.wreqq.data = htonl((1 << (13+12)) | 0xf);
+ fp->mode.wreqq.data = htonl((1 << (13 + 12)) | 0xf);
fw_asyreq(xfer->fc, -1, xfer);
}
@@ -1213,8 +1214,8 @@ END_DEBUG
fp = &xfer->send.hdr;
fp->mode.wreqb.len = 8;
fp->mode.wreqb.extcode = 0;
- xfer->send.payload[0] =
- htonl(((sdev->target->sbp->fd.fc->nodeid | FWLOCALBUS )<< 16));
+ xfer->send.payload[0] =
+ htonl(((sdev->target->sbp->fd.fc->nodeid | FWLOCALBUS) << 16));
xfer->send.payload[1] = htonl((uint32_t)ocb->bus_addr);
if (fw_asyreq(xfer->fc, -1, xfer) != 0) {
@@ -1288,14 +1289,14 @@ sbp_write_cmd(struct sbp_dev *sdev, int tcode, int offset)
if (xfer == NULL) {
if (target->n_xfer > 5 /* XXX */) {
printf("sbp: no more xfer for this target\n");
- return(NULL);
+ return (NULL);
}
xfer = fw_xfer_alloc_buf(M_SBP, 8, 0);
- if(xfer == NULL){
+ if (xfer == NULL) {
printf("sbp: fw_xfer_alloc_buf failed\n");
return NULL;
}
- target->n_xfer ++;
+ target->n_xfer++;
if (debug)
printf("sbp: alloc %d xfer\n", target->n_xfer);
new = 1;
@@ -1362,7 +1363,7 @@ SBP_DEBUG(0)
device_printf(sdev->target->sbp->fd.dev,
"%s:%s %s\n",
__func__,sdev->bustgtlun,
- orb_fun_name[(func>>16)&0xf]);
+ orb_fun_name[(func >> 16) & 0xf]);
END_DEBUG
switch (func) {
case ORB_FUN_LGI:
@@ -1399,7 +1400,7 @@ start:
callout_reset(&target->mgm_ocb_timeout, 5*hz,
sbp_mgm_timeout, (caddr_t)ocb);
xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0);
- if(xfer == NULL){
+ if (xfer == NULL) {
return;
}
xfer->hand = sbp_mgm_callback;
@@ -1470,25 +1471,25 @@ END_DEBUG
case SCSI_STATUS_CHECK_COND:
case SCSI_STATUS_BUSY:
case SCSI_STATUS_CMD_TERMINATED:
- if(sbp_cmd_status->sfmt == SBP_SFMT_CURR){
+ if (sbp_cmd_status->sfmt == SBP_SFMT_CURR) {
sense->error_code = SSD_CURRENT_ERROR;
- }else{
+ } else {
sense->error_code = SSD_DEFERRED_ERROR;
}
- if(sbp_cmd_status->valid)
+ if (sbp_cmd_status->valid)
sense->error_code |= SSD_ERRCODE_VALID;
sense->flags = sbp_cmd_status->s_key;
- if(sbp_cmd_status->mark)
+ if (sbp_cmd_status->mark)
sense->flags |= SSD_FILEMARK;
- if(sbp_cmd_status->eom)
+ if (sbp_cmd_status->eom)
sense->flags |= SSD_EOM;
- if(sbp_cmd_status->ill_len)
+ if (sbp_cmd_status->ill_len)
sense->flags |= SSD_ILI;
bcopy(&sbp_cmd_status->info, &sense->info[0], 4);
if (sbp_status->len <= 1)
- /* XXX not scsi status. shouldn't be happened */
+ /* XXX not scsi status. shouldn't be happened */
sense->extra_len = 0;
else if (sbp_status->len <= 4)
/* add_sense_code(_qual), info, cmd_spec_info */
@@ -1513,10 +1514,10 @@ END_DEBUG
{
uint8_t j, *tmp;
tmp = sense;
- for( j = 0 ; j < 32 ; j+=8){
- printf("sense %02x%02x %02x%02x %02x%02x %02x%02x\n",
- tmp[j], tmp[j+1], tmp[j+2], tmp[j+3],
- tmp[j+4], tmp[j+5], tmp[j+6], tmp[j+7]);
+ for (j = 0; j < 32; j += 8) {
+ printf("sense %02x%02x %02x%02x %02x%02x %02x%02x\n",
+ tmp[j], tmp[j + 1], tmp[j + 2], tmp[j + 3],
+ tmp[j + 4], tmp[j + 5], tmp[j + 6], tmp[j + 7]);
}
}
@@ -1550,7 +1551,7 @@ END_DEBUG
switch (SID_TYPE(inq)) {
case T_DIRECT:
#if 0
- /*
+ /*
* XXX Convert Direct Access device to RBC.
* I've never seen FireWire DA devices which support READ_6.
*/
@@ -1566,7 +1567,7 @@ END_DEBUG
#if 1
bcopy(sdev->vendor, inq->vendor, sizeof(inq->vendor));
bcopy(sdev->product, inq->product, sizeof(inq->product));
- bcopy(sdev->revision+2, inq->revision, sizeof(inq->revision));
+ bcopy(sdev->revision + 2, inq->revision, sizeof(inq->revision));
#endif
break;
}
@@ -1606,16 +1607,16 @@ printf("sbp %08x %08x %08x %08x\n", ntohl(ld[8]), ntohl(ld[9]), ntohl(ld[10]), n
*/
sbp = (struct sbp_softc *)xfer->sc;
SBP_LOCK_ASSERT(sbp);
- if (xfer->resp != 0){
+ if (xfer->resp != 0) {
printf("sbp_recv: xfer->resp = %d\n", xfer->resp);
goto done0;
}
- if (xfer->recv.payload == NULL){
+ if (xfer->recv.payload == NULL) {
printf("sbp_recv: xfer->recv.payload == NULL\n");
goto done0;
}
rfp = &xfer->recv.hdr;
- if(rfp->mode.wreqb.tcode != FWTCODE_WREQB){
+ if (rfp->mode.wreqb.tcode != FWTCODE_WREQB) {
printf("sbp_recv: tcode = %d\n", rfp->mode.wreqb.tcode);
goto done0;
}
@@ -1677,7 +1678,7 @@ END_DEBUG
&& sbp_status->dead == 0);
status_valid = (status_valid0 && sbp_status->status == 0);
- if (!status_valid0 || debug > 2){
+ if (!status_valid0 || debug > 2) {
int status;
SBP_DEBUG(0)
device_printf(sdev->target->sbp->fd.dev,
@@ -1691,7 +1692,7 @@ END_DEBUG
device_printf(sdev->target->sbp->fd.dev,
"%s\n", sdev->bustgtlun);
status = sbp_status->status;
- switch(sbp_status->resp) {
+ switch (sbp_status->resp) {
case 0:
if (status > MAX_ORB_STATUS0)
printf("%s\n", orb_status0[MAX_ORB_STATUS0]);
@@ -1700,7 +1701,7 @@ END_DEBUG
break;
case 1:
printf("Obj: %s, Error: %s\n",
- orb_status1_object[(status>>6) & 3],
+ orb_status1_object[(status >> 6) & 3],
orb_status1_serial_bus_error[status & 0xf]);
break;
case 2:
@@ -1718,7 +1719,7 @@ END_DEBUG
if (sbp_status->dead) {
if (sdev->path) {
xpt_freeze_devq(sdev->path, 1);
- sdev->freeze ++;
+ sdev->freeze++;
}
reset_agent = 1;
}
@@ -1726,17 +1727,17 @@ END_DEBUG
if (ocb == NULL)
goto done;
- switch(ntohl(ocb->orb[4]) & ORB_FMT_MSK){
+ switch (ntohl(ocb->orb[4]) & ORB_FMT_MSK) {
case ORB_FMT_NOP:
break;
case ORB_FMT_VED:
break;
case ORB_FMT_STD:
- switch(ocb->flags) {
+ switch (ocb->flags) {
case OCB_ACT_MGM:
orb_fun = ntohl(ocb->orb[4]) & ORB_FUN_MSK;
reset_agent = 0;
- switch(orb_fun) {
+ switch (orb_fun) {
case ORB_FUN_LGI:
fwdma_sync(&sdev->dma, BUS_DMASYNC_POSTREAD);
login_res = sdev->login;
@@ -1807,16 +1808,16 @@ END_DEBUG
break;
case OCB_ACT_CMD:
sdev->timeout = 0;
- if(ocb->ccb != NULL){
+ if (ocb->ccb != NULL) {
union ccb *ccb;
ccb = ocb->ccb;
- if(sbp_status->len > 1){
+ if (sbp_status->len > 1) {
sbp_scsi_status(sbp_status, ocb);
- }else{
- if(sbp_status->resp != ORB_RES_CMPL){
+ } else {
+ if (sbp_status->resp != ORB_RES_CMPL) {
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
- }else{
+ } else {
ccb->ccb_h.status = CAM_REQ_CMP;
}
}
@@ -1843,7 +1844,7 @@ done0:
* the buffer. In that case, the controller return ack_complete and
* no respose is necessary.
*
- * XXX fwohci.c and firewire.c should inform event_code such as
+ * XXX fwohci.c and firewire.c should inform event_code such as
* ack_complete or ack_pending to upper driver.
*/
#if NEED_RESPONSE
@@ -1900,7 +1901,7 @@ SBP_DEBUG(0)
END_DEBUG
if (cold)
- sbp_cold ++;
+ sbp_cold++;
sbp = device_get_softc(dev);
sbp->fd.dev = dev;
sbp->fd.fc = fc = device_get_ivars(dev);
@@ -1932,7 +1933,7 @@ END_DEBUG
if (devq == NULL)
return (ENXIO);
- for( i = 0 ; i < SBP_NUM_TARGETS ; i++){
+ for (i = 0; i < SBP_NUM_TARGETS; i++) {
sbp->targets[i].fwdev = NULL;
sbp->targets[i].luns = NULL;
sbp->targets[i].sbp = sbp;
@@ -2001,7 +2002,7 @@ SBP_DEBUG(0)
printf("sbp_logout_all\n");
END_DEBUG
SBP_LOCK_ASSERT(sbp);
- for (i = 0 ; i < SBP_NUM_TARGETS ; i ++) {
+ for (i = 0; i < SBP_NUM_TARGETS; i++) {
target = &sbp->targets[i];
if (target->luns == NULL)
continue;
@@ -2090,7 +2091,7 @@ SBP_DEBUG(0)
END_DEBUG
SBP_LOCK(sbp);
- for (i = 0; i < SBP_NUM_TARGETS; i ++)
+ for (i = 0; i < SBP_NUM_TARGETS; i++)
sbp_cam_detach_target(&sbp->targets[i]);
xpt_async(AC_LOST_DEVICE, sbp->path, NULL);
@@ -2105,7 +2106,7 @@ END_DEBUG
pause("sbpdtc", hz/2);
SBP_LOCK(sbp);
- for (i = 0 ; i < SBP_NUM_TARGETS ; i ++)
+ for (i = 0; i < SBP_NUM_TARGETS; i++)
sbp_free_target(&sbp->targets[i]);
SBP_UNLOCK(sbp);
@@ -2172,12 +2173,12 @@ sbp_target_reset(struct sbp_dev *sdev, int method)
if (tsdev->status == SBP_DEV_RESET)
continue;
xpt_freeze_devq(tsdev->path, 1);
- tsdev->freeze ++;
+ tsdev->freeze++;
sbp_abort_all_ocbs(tsdev, CAM_CMD_TIMEOUT);
if (method == 2)
tsdev->status = SBP_DEV_LOGIN;
}
- switch(method) {
+ switch (method) {
case 1:
printf("target reset\n");
sbp_mgm_orb(sdev, ORB_FUN_RST, NULL);
@@ -2187,7 +2188,7 @@ sbp_target_reset(struct sbp_dev *sdev, int method)
sbp_reset_start(sdev);
break;
}
-
+
}
static void
@@ -2225,12 +2226,12 @@ sbp_timeout(void *arg)
__func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr);
SBP_LOCK_ASSERT(sdev->target->sbp);
- sdev->timeout ++;
- switch(sdev->timeout) {
+ sdev->timeout++;
+ switch (sdev->timeout) {
case 1:
printf("agent reset\n");
xpt_freeze_devq(sdev->path, 1);
- sdev->freeze ++;
+ sdev->freeze++;
sbp_abort_all_ocbs(sdev, CAM_CMD_TIMEOUT);
sbp_agent_reset(sdev);
break;
@@ -2309,7 +2310,7 @@ END_DEBUG
* sometimes aimed at the SIM (sc is invalid and target is
* CAM_TARGET_WILDCARD)
*/
- if (sbp == NULL &&
+ if (sbp == NULL &&
ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
SBP_DEBUG(0)
printf("%s:%d:%jx func_code 0x%04x: "
@@ -2361,7 +2362,7 @@ SBP_DEBUG(2)
csio->cdb_len, csio->dxfer_len,
csio->sense_len);
END_DEBUG
- if(sdev == NULL){
+ if (sdev == NULL) {
ccb->ccb_h.status = CAM_DEV_NOT_THERE;
xpt_done(ccb);
return;
@@ -2383,7 +2384,7 @@ END_DEBUG
ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
if (sdev->freeze == 0) {
xpt_freeze_devq(sdev->path, 1);
- sdev->freeze ++;
+ sdev->freeze++;
}
xpt_done(ccb);
return;
@@ -2395,12 +2396,12 @@ END_DEBUG
ccb->ccb_h.ccb_sdev_ptr = sdev;
ocb->orb[0] = htonl(1U << 31);
ocb->orb[1] = 0;
- ocb->orb[2] = htonl(((sbp->fd.fc->nodeid | FWLOCALBUS )<< 16) );
+ ocb->orb[2] = htonl(((sbp->fd.fc->nodeid | FWLOCALBUS) << 16));
ocb->orb[3] = htonl(ocb->bus_addr + IND_PTR_OFFSET);
speed = min(target->fwdev->speed, max_speed);
ocb->orb[4] = htonl(ORB_NOTIFY | ORB_CMD_SPD(speed)
| ORB_CMD_MAXP(speed + 7));
- if((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN){
+ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
ocb->orb[4] |= htonl(ORB_CMD_IN);
}
@@ -2467,7 +2468,7 @@ END_DEBUG
case XPT_PATH_INQ: /* Path routing inquiry */
{
struct ccb_pathinq *cpi = &ccb->cpi;
-
+
SBP_DEBUG(1)
printf("%s:%d:%jx XPT_PATH_INQ:.\n",
device_get_nameunit(sbp->fd.dev),
@@ -2536,7 +2537,7 @@ END_DEBUG
}
static void
-sbp_execute_ocb(void *arg, bus_dma_segment_t *segments, int seg, int error)
+sbp_execute_ocb(void *arg, bus_dma_segment_t *segments, int seg, int error)
{
int i;
struct sbp_ocb *ocb;
@@ -2563,7 +2564,7 @@ END_DEBUG
panic("ds_len > SBP_SEG_MAX, fix busdma code");
ocb->orb[3] = htonl(s->ds_addr);
ocb->orb[4] |= htonl(s->ds_len);
- } else if(seg > 1) {
+ } else if (seg > 1) {
/* page table */
for (i = 0; i < seg; i++) {
s = &segments[i];
@@ -2572,7 +2573,7 @@ SBP_DEBUG(0)
if (s->ds_len < 16)
printf("sbp_execute_ocb: warning, "
"segment length(%zd) is less than 16."
- "(seg=%d/%d)\n", (size_t)s->ds_len, i+1, seg);
+ "(seg=%d/%d)\n", (size_t)s->ds_len, i + 1, seg);
END_DEBUG
if (s->ds_len > SBP_SEG_MAX)
panic("ds_len > SBP_SEG_MAX, fix busdma code");
@@ -2581,7 +2582,7 @@ END_DEBUG
}
ocb->orb[4] |= htonl(ORB_CMD_PTBL | seg);
}
-
+
if (seg > 0)
bus_dmamap_sync(ocb->sdev->target->sbp->dmat, ocb->dmamap,
(ntohl(ocb->orb[4]) & ORB_CMD_IN) ?
@@ -2593,19 +2594,19 @@ END_DEBUG
if (ocb->sdev->last_ocb != NULL)
sbp_doorbell(ocb->sdev);
else
- sbp_orb_pointer(ocb->sdev, ocb);
+ sbp_orb_pointer(ocb->sdev, ocb);
}
} else {
if (prev == NULL || (ocb->sdev->flags & ORB_LINK_DEAD) != 0) {
ocb->sdev->flags &= ~ORB_LINK_DEAD;
- sbp_orb_pointer(ocb->sdev, ocb);
+ sbp_orb_pointer(ocb->sdev, ocb);
}
}
}
static void
sbp_poll(struct cam_sim *sim)
-{
+{
struct sbp_softc *sbp;
struct firewire_comm *fc;
@@ -2648,7 +2649,7 @@ END_DEBUG
if (!use_doorbell) {
if (sbp_status->src == SRC_NO_NEXT) {
if (next != NULL)
- sbp_orb_pointer(sdev, next);
+ sbp_orb_pointer(sdev, next);
else if (order > 0) {
/*
* Unordered execution
@@ -2661,7 +2662,7 @@ END_DEBUG
} else {
/*
* XXX this is not correct for unordered
- * execution.
+ * execution.
*/
if (sdev->last_ocb != NULL) {
sbp_free_ocb(sdev, sdev->last_ocb);
@@ -2673,7 +2674,7 @@ END_DEBUG
}
break;
} else
- order ++;
+ order++;
}
SBP_DEBUG(0)
if (ocb && order > 0) {
diff --git a/sys/dev/firewire/sbp.h b/sys/dev/firewire/sbp.h
index 84d522a..79f2e28 100644
--- a/sys/dev/firewire/sbp.h
+++ b/sys/dev/firewire/sbp.h
@@ -76,7 +76,7 @@ struct ind_ptr {
#define SBP_RECV_LEN 32
-struct sbp_login_res{
+struct sbp_login_res {
uint16_t len;
uint16_t id;
uint16_t res0;
@@ -86,7 +86,7 @@ struct sbp_login_res{
uint16_t recon_hold;
};
-struct sbp_status{
+struct sbp_status {
#if BYTE_ORDER == BIG_ENDIAN
uint8_t src:2,
resp:2,
@@ -155,7 +155,7 @@ struct sbp_status{
/* F: Address error */
-struct sbp_cmd_status{
+struct sbp_cmd_status {
#define SBP_SFMT_CURR 0
#define SBP_SFMT_DEFER 1
#if BYTE_ORDER == BIG_ENDIAN
diff --git a/sys/dev/firewire/sbp_targ.c b/sys/dev/firewire/sbp_targ.c
index f8f3063..0d78e9f 100644
--- a/sys/dev/firewire/sbp_targ.c
+++ b/sys/dev/firewire/sbp_targ.c
@@ -1,7 +1,7 @@
/*-
* Copyright (C) 2003
* Hidetoshi Shimokawa. All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -18,7 +18,7 @@
* 4. Neither the name of the author nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -30,7 +30,7 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
+ *
* $FreeBSD$
*/
@@ -104,16 +104,16 @@ struct sbp_targ_login {
struct sbp_targ_lstate *lstate;
struct fw_device *fwdev;
struct sbp_login_res loginres;
- uint16_t fifo_hi;
+ uint16_t fifo_hi;
uint16_t last_hi;
- uint32_t fifo_lo;
+ uint32_t fifo_lo;
uint32_t last_lo;
STAILQ_HEAD(, orb_info) orbs;
STAILQ_ENTRY(sbp_targ_login) link;
uint16_t hold_sec;
uint16_t id;
- uint8_t flags;
- uint8_t spd;
+ uint8_t flags;
+ uint8_t spd;
struct callout hold_callout;
};
@@ -124,7 +124,7 @@ struct sbp_targ_lstate {
struct ccb_hdr_slist accept_tios;
struct ccb_hdr_slist immed_notifies;
struct crom_chunk model;
- uint32_t flags;
+ uint32_t flags;
STAILQ_HEAD(, sbp_targ_login) logins;
};
@@ -205,7 +205,7 @@ struct orb_info {
struct sbp_targ_login *login;
union ccb *ccb;
struct ccb_accept_tio *atio;
- uint8_t state;
+ uint8_t state;
#define ORBI_STATUS_NONE 0
#define ORBI_STATUS_FETCH 1
#define ORBI_STATUS_ATIO 2
@@ -213,7 +213,7 @@ struct orb_info {
#define ORBI_STATUS_STATUS 4
#define ORBI_STATUS_POINTER 5
#define ORBI_STATUS_ABORTED 7
- uint8_t refcount;
+ uint8_t refcount;
uint16_t orb_hi;
uint32_t orb_lo;
uint32_t data_hi;
@@ -250,8 +250,8 @@ sbp_targ_probe(device_t dev)
device_t pa;
pa = device_get_parent(dev);
- if(device_get_unit(dev) != device_get_unit(pa)){
- return(ENXIO);
+ if (device_get_unit(dev) != device_get_unit(pa)) {
+ return (ENXIO);
}
device_set_desc(dev, "SBP-2/SCSI over FireWire target mode");
@@ -336,7 +336,7 @@ sbp_targ_post_busreset(void *arg)
crom_add_entry(unit, CROM_MGM, SBP_TARG_MGM >> 2);
crom_add_entry(unit, CSRKEY_UNIT_CH, (10<<8) | 8);
- for (i = 0; i < MAX_LUN; i ++) {
+ for (i = 0; i < MAX_LUN; i++) {
lstate = sc->lstate[i];
if (lstate == NULL)
continue;
@@ -347,7 +347,7 @@ sbp_targ_post_busreset(void *arg)
}
/* Process for reconnection hold time */
- for (i = 0; i < MAX_LOGINS; i ++) {
+ for (i = 0; i < MAX_LOGINS; i++) {
login = sc->logins[i];
if (login == NULL)
continue;
@@ -355,7 +355,7 @@ sbp_targ_post_busreset(void *arg)
if (login->flags & F_LOGIN) {
login->flags |= F_HOLD;
callout_reset(&login->hold_callout,
- hz * login->hold_sec,
+ hz * login->hold_sec,
sbp_targ_hold_expire, (void *)login);
}
}
@@ -392,7 +392,7 @@ sbp_targ_find_devs(struct sbp_targ_softc *sc, union ccb *ccb,
lun = ccb->ccb_h.target_lun;
if (lun >= MAX_LUN)
return (CAM_LUN_INVALID);
-
+
*lstate = sc->lstate[lun];
if (notfound_failure != 0 && *lstate == NULL) {
@@ -840,7 +840,7 @@ sbp_targ_cam_done(struct fw_xfer *xfer)
sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link));
}
- orbi->refcount --;
+ orbi->refcount--;
ccb = orbi->ccb;
if (orbi->refcount == 0) {
@@ -916,7 +916,7 @@ sbp_targ_abort_ccb(struct sbp_targ_softc *sc, union ccb *ccb)
found = 1;
SLIST_REMOVE_HEAD(list, sim_links.sle);
} else {
- while(curelm != NULL) {
+ while (curelm != NULL) {
struct ccb_hdr *nextelm;
nextelm = SLIST_NEXT(curelm, sim_links.sle);
@@ -982,7 +982,7 @@ sbp_targ_xfer_buf(struct orb_info *orbi, u_int offset,
if (xfer == NULL) {
printf("%s: xfer == NULL", __func__);
/* XXX what should we do?? */
- orbi->refcount --;
+ orbi->refcount--;
}
off += len;
}
@@ -1354,7 +1354,7 @@ sbp_targ_action1(struct cam_sim *sim, union ccb *ccb)
ccb->ccb_h.status = CAM_UA_ABORT;
break;
default:
- printf("%s: aborting unknown function %d\n",
+ printf("%s: aborting unknown function %d\n",
__func__, accb->ccb_h.func_code);
ccb->ccb_h.status = CAM_REQ_INVALID;
break;
@@ -1464,7 +1464,7 @@ sbp_targ_cmd_handler(struct fw_xfer *xfer)
orb = orbi->orb;
/* swap payload except SCSI command */
- for (i = 0; i < 5; i ++)
+ for (i = 0; i < 5; i++)
orb[i] = ntohl(orb[i]);
orb4 = (struct corb4 *)&orb[4];
@@ -1545,12 +1545,12 @@ sbp_targ_get_login(struct sbp_targ_softc *sc, struct fw_device *fwdev, int lun)
int i;
lstate = sc->lstate[lun];
-
+
STAILQ_FOREACH(login, &lstate->logins, link)
if (login->fwdev == fwdev)
return (login);
- for (i = 0; i < MAX_LOGINS; i ++)
+ for (i = 0; i < MAX_LOGINS; i++)
if (sc->logins[i] == NULL)
goto found;
@@ -1607,7 +1607,7 @@ sbp_targ_mgm_handler(struct fw_xfer *xfer)
orb = orbi->orb;
/* swap payload */
- for (i = 0; i < 8; i ++) {
+ for (i = 0; i < 8; i++) {
orb[i] = ntohl(orb[i]);
}
orb4 = (struct morb4 *)&orb[4];
@@ -1628,10 +1628,10 @@ sbp_targ_mgm_handler(struct fw_xfer *xfer)
lstate = orbi->sc->lstate[lun];
if (lun >= MAX_LUN || lstate == NULL ||
- (exclusive &&
+ (exclusive &&
STAILQ_FIRST(&lstate->logins) != NULL &&
STAILQ_FIRST(&lstate->logins)->fwdev != orbi->fwdev)
- ) {
+ ) {
/* error */
orbi->status.dead = 1;
orbi->status.status = STATUS_ACCESS_DENY;
@@ -1819,16 +1819,16 @@ sbp_targ_cmd(struct fw_xfer *xfer, struct fw_device *fwdev, int login_id,
int rtcode = 0;
if (login_id < 0 || login_id >= MAX_LOGINS)
- return(RESP_ADDRESS_ERROR);
+ return (RESP_ADDRESS_ERROR);
sc = (struct sbp_targ_softc *)xfer->sc;
login = sc->logins[login_id];
if (login == NULL)
- return(RESP_ADDRESS_ERROR);
+ return (RESP_ADDRESS_ERROR);
if (login->fwdev != fwdev) {
/* XXX */
- return(RESP_ADDRESS_ERROR);
+ return (RESP_ADDRESS_ERROR);
}
switch (reg) {
@@ -1895,17 +1895,17 @@ sbp_targ_mgm(struct fw_xfer *xfer, struct fw_device *fwdev)
sc = (struct sbp_targ_softc *)xfer->sc;
fp = &xfer->recv.hdr;
- if (fp->mode.wreqb.tcode != FWTCODE_WREQB){
+ if (fp->mode.wreqb.tcode != FWTCODE_WREQB) {
printf("%s: tcode = %d\n", __func__, fp->mode.wreqb.tcode);
- return(RESP_TYPE_ERROR);
+ return (RESP_TYPE_ERROR);
}
sbp_targ_fetch_orb(sc, fwdev,
ntohl(xfer->recv.payload[0]),
ntohl(xfer->recv.payload[1]),
NULL, FETCH_MGM);
-
- return(0);
+
+ return (0);
}
static void
@@ -2023,9 +2023,9 @@ sbp_targ_detach(device_t dev)
xpt_free_path(sc->path);
xpt_bus_deregister(cam_sim_path(sc->sim));
SBP_UNLOCK(sc);
- cam_sim_free(sc->sim, /*free_devq*/TRUE);
+ cam_sim_free(sc->sim, /*free_devq*/TRUE);
- for (i = 0; i < MAX_LUN; i ++) {
+ for (i = 0; i < MAX_LUN; i++) {
lstate = sc->lstate[i];
if (lstate != NULL) {
xpt_free_path(lstate->path);
@@ -2036,7 +2036,7 @@ sbp_targ_detach(device_t dev)
xpt_free_path(sc->black_hole->path);
free(sc->black_hole, M_SBP_TARG);
}
-
+
fw_bindremove(sc->fd.fc, &sc->fwb);
fw_xferlist_remove(&sc->fwb.xferlist);
diff --git a/sys/dev/fxp/if_fxp.c b/sys/dev/fxp/if_fxp.c
index 48bf8e9..947f057 100644
--- a/sys/dev/fxp/if_fxp.c
+++ b/sys/dev/fxp/if_fxp.c
@@ -1008,7 +1008,7 @@ fxp_detach(device_t dev)
#ifdef DEVICE_POLLING
if (if_getcapenable(sc->ifp) & IFCAP_POLLING)
- ether_poll_deregister_drv(sc->ifp);
+ ether_poll_deregister(sc->ifp);
#endif
FXP_LOCK(sc);
@@ -1670,7 +1670,7 @@ fxp_encap(struct fxp_softc *sc, struct mbuf **m_head)
}
#ifdef DEVICE_POLLING
-static poll_handler_drv_t fxp_poll;
+static poll_handler_t fxp_poll;
static int
fxp_poll(if_t ifp, enum poll_cmd cmd, int count)
@@ -2890,7 +2890,7 @@ fxp_ioctl(if_t ifp, u_long command, caddr_t data)
#ifdef DEVICE_POLLING
if (mask & IFCAP_POLLING) {
if (ifr->ifr_reqcap & IFCAP_POLLING) {
- error = ether_poll_register_drv(fxp_poll, ifp);
+ error = ether_poll_register(fxp_poll, ifp);
if (error)
return(error);
FXP_LOCK(sc);
@@ -2899,7 +2899,7 @@ fxp_ioctl(if_t ifp, u_long command, caddr_t data)
if_setcapenablebit(ifp, IFCAP_POLLING, 0);
FXP_UNLOCK(sc);
} else {
- error = ether_poll_deregister_drv(ifp);
+ error = ether_poll_deregister(ifp);
/* Enable interrupts in any case */
FXP_LOCK(sc);
CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
diff --git a/sys/dev/ie/if_ie.c b/sys/dev/ie/if_ie.c
index eceac6c..72e2559 100644
--- a/sys/dev/ie/if_ie.c
+++ b/sys/dev/ie/if_ie.c
@@ -949,6 +949,8 @@ iestart_locked(struct ifnet *ifp)
if (!m)
break;
+ BPF_MTAP(ifp, m);
+
buffer = sc->xmit_cbuffs[sc->xmit_count];
len = 0;
@@ -961,13 +963,6 @@ iestart_locked(struct ifnet *ifp)
m_freem(m0);
len = max(len, ETHER_MIN_LEN);
- /*
- * See if bpf is listening on this interface, let it see the
- * packet before we commit it to the wire.
- */
- BPF_TAP(sc->ifp,
- (void *)sc->xmit_cbuffs[sc->xmit_count], len);
-
sc->xmit_buffs[sc->xmit_count]->ie_xmit_flags =
IE_XMIT_LAST|len;
sc->xmit_buffs[sc->xmit_count]->ie_xmit_next = 0xffff;
diff --git a/sys/dev/iscsi_initiator/isc_cam.c b/sys/dev/iscsi_initiator/isc_cam.c
index 8f8bd64..6089694 100644
--- a/sys/dev/iscsi_initiator/isc_cam.c
+++ b/sys/dev/iscsi_initiator/isc_cam.c
@@ -125,7 +125,7 @@ scan_callback(struct cam_periph *periph, union ccb *ccb)
debug_called(8);
- free(ccb, M_TEMP);
+ xpt_free_ccb(ccb);
if(sp->flags & ISC_SCANWAIT) {
sp->flags &= ~ISC_SCANWAIT;
@@ -141,30 +141,15 @@ ic_scan(isc_session_t *sp)
debug_called(8);
sdebug(2, "scanning sid=%d", sp->sid);
- if((ccb = malloc(sizeof(union ccb), M_TEMP, M_WAITOK | M_ZERO)) == NULL) {
- xdebug("scan failed (can't allocate CCB)");
- return ENOMEM; // XXX
- }
-
sp->flags &= ~ISC_CAMDEVS;
sp->flags |= ISC_SCANWAIT;
- CAM_LOCK(sp);
- if(xpt_create_path(&sp->cam_path, NULL, cam_sim_path(sp->cam_sim),
- 0, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
- xdebug("can't create cam path");
- CAM_UNLOCK(sp);
- free(ccb, M_TEMP);
- return ENODEV; // XXX
- }
- xpt_setup_ccb(&ccb->ccb_h, sp->cam_path, 5/*priority (low)*/);
- ccb->ccb_h.func_code = XPT_SCAN_BUS;
+ ccb = xpt_alloc_ccb();
+ ccb->ccb_h.path = sp->cam_path;
ccb->ccb_h.cbfcnp = scan_callback;
- ccb->crcn.flags = CAM_FLAG_NONE;
ccb->ccb_h.spriv_ptr0 = sp;
- xpt_action(ccb);
- CAM_UNLOCK(sp);
+ xpt_rescan(ccb);
while(sp->flags & ISC_SCANWAIT)
tsleep(sp, PRIBIO, "ffp", 5*hz); // the timeout time should
@@ -374,6 +359,16 @@ ic_init(isc_session_t *sp)
return ENXIO;
}
sp->cam_sim = sim;
+ if(xpt_create_path(&sp->cam_path, NULL, cam_sim_path(sp->cam_sim),
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
+ xpt_bus_deregister(cam_sim_path(sp->cam_sim));
+ cam_sim_free(sim, /*free_devq*/TRUE);
+ CAM_UNLOCK(sp);
+#if __FreeBSD_version >= 700000
+ mtx_destroy(&sp->cam_mtx);
+#endif
+ return ENXIO;
+ }
CAM_UNLOCK(sp);
sdebug(1, "cam subsystem initialized");
diff --git a/sys/dev/ixgb/if_ixgb.c b/sys/dev/ixgb/if_ixgb.c
index 9b4555d..6f25c0a 100644
--- a/sys/dev/ixgb/if_ixgb.c
+++ b/sys/dev/ixgb/if_ixgb.c
@@ -97,6 +97,7 @@ static void ixgb_intr(void *);
static void ixgb_start(struct ifnet *);
static void ixgb_start_locked(struct ifnet *);
static int ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t);
+static uint64_t ixgb_get_counter(struct ifnet *, ift_counter);
static void ixgb_watchdog(struct adapter *);
static void ixgb_init(void *);
static void ixgb_init_locked(struct adapter *);
@@ -643,7 +644,7 @@ ixgb_watchdog(struct adapter *adapter)
ixgb_init_locked(adapter);
- ifp->if_oerrors++;
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return;
}
@@ -1355,6 +1356,7 @@ ixgb_setup_interface(device_t dev, struct adapter * adapter)
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = ixgb_ioctl;
ifp->if_start = ixgb_start;
+ ifp->if_get_counter = ixgb_get_counter;
ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
#if __FreeBSD_version < 500000
@@ -2326,7 +2328,6 @@ ixgb_write_pci_cfg(struct ixgb_hw * hw,
static void
ixgb_update_stats_counters(struct adapter * adapter)
{
- struct ifnet *ifp;
adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
@@ -2389,29 +2390,37 @@ ixgb_update_stats_counters(struct adapter * adapter)
adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
+}
- ifp = adapter->ifp;
-
- /* Fill out the OS statistics structure */
- ifp->if_ipackets = adapter->stats.gprcl;
- ifp->if_opackets = adapter->stats.gptcl;
- ifp->if_ibytes = adapter->stats.gorcl;
- ifp->if_obytes = adapter->stats.gotcl;
- ifp->if_imcasts = adapter->stats.mprcl;
- ifp->if_collisions = 0;
-
- /* Rx Errors */
- ifp->if_ierrors =
- adapter->dropped_pkts +
- adapter->stats.crcerrs +
- adapter->stats.rnbc +
- adapter->stats.mpc +
- adapter->stats.rlec;
-
+static uint64_t
+ixgb_get_counter(struct ifnet *ifp, ift_counter cnt)
+{
+ struct adapter *adapter;
+ adapter = if_getsoftc(ifp);
+
+ switch (cnt) {
+ case IFCOUNTER_IPACKETS:
+ return (adapter->stats.gprcl);
+ case IFCOUNTER_OPACKETS:
+ return ( adapter->stats.gptcl);
+ case IFCOUNTER_IBYTES:
+ return (adapter->stats.gorcl);
+ case IFCOUNTER_OBYTES:
+ return (adapter->stats.gotcl);
+ case IFCOUNTER_IMCASTS:
+ return ( adapter->stats.mprcl);
+ case IFCOUNTER_COLLISIONS:
+ return (0);
+ case IFCOUNTER_IERRORS:
+ return (adapter->dropped_pkts + adapter->stats.crcerrs +
+ adapter->stats.rnbc + adapter->stats.mpc +
+ adapter->stats.rlec);
+ default:
+ return (if_get_counter_default(ifp, cnt));
+ }
}
-
/**********************************************************************
*
* This routine is called only when ixgb_display_debug_stats is enabled.
diff --git a/sys/dev/ixgbe/ixgbe.c b/sys/dev/ixgbe/ixgbe.c
index 279dac2..d7371a8 100644
--- a/sys/dev/ixgbe/ixgbe.c
+++ b/sys/dev/ixgbe/ixgbe.c
@@ -120,6 +120,7 @@ static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
static void ixgbe_init(void *);
static void ixgbe_init_locked(struct adapter *);
static void ixgbe_stop(void *);
+static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
static int ixgbe_media_change(struct ifnet *);
static void ixgbe_identify_hardware(struct adapter *);
@@ -2721,6 +2722,7 @@ ixgbe_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_softc = adapter;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = ixgbe_ioctl;
+ ifp->if_get_counter = ixgbe_get_counter;
#ifndef IXGBE_LEGACY_TX
ifp->if_transmit = ixgbe_mq_start;
ifp->if_qflush = ixgbe_qflush;
@@ -5364,10 +5366,8 @@ ixgbe_reinit_fdir(void *context, int pending)
static void
ixgbe_update_stats_counters(struct adapter *adapter)
{
- struct ifnet *ifp = adapter->ifp;
struct ixgbe_hw *hw = &adapter->hw;
u32 missed_rx = 0, bprc, lxon, lxoff, total;
- u64 total_missed_rx = 0;
adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
adapter->stats.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
@@ -5386,8 +5386,6 @@ ixgbe_update_stats_counters(struct adapter *adapter)
missed_rx += mp;
/* global total per queue */
adapter->stats.mpc[i] += mp;
- /* Running comprehensive total for stats display */
- total_missed_rx += adapter->stats.mpc[i];
if (hw->mac.type == ixgbe_mac_82598EB) {
adapter->stats.rnbc[i] +=
IXGBE_READ_REG(hw, IXGBE_RNBC(i));
@@ -5497,19 +5495,41 @@ ixgbe_update_stats_counters(struct adapter *adapter)
adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
}
+}
- /* Fill out the OS statistics structure */
- ifp->if_ipackets = adapter->stats.gprc;
- ifp->if_opackets = adapter->stats.gptc;
- ifp->if_ibytes = adapter->stats.gorc;
- ifp->if_obytes = adapter->stats.gotc;
- ifp->if_imcasts = adapter->stats.mprc;
- ifp->if_omcasts = adapter->stats.mptc;
- ifp->if_collisions = 0;
-
- /* Rx Errors */
- ifp->if_iqdrops = total_missed_rx;
- ifp->if_ierrors = adapter->stats.crcerrs + adapter->stats.rlec;
+static uint64_t
+ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
+{
+ struct adapter *adapter;
+ uint64_t rv;
+
+ adapter = if_getsoftc(ifp);
+
+ switch (cnt) {
+ case IFCOUNTER_IPACKETS:
+ return (adapter->stats.gprc);
+ case IFCOUNTER_OPACKETS:
+ return (adapter->stats.gptc);
+ case IFCOUNTER_IBYTES:
+ return (adapter->stats.gorc);
+ case IFCOUNTER_OBYTES:
+ return (adapter->stats.gotc);
+ case IFCOUNTER_IMCASTS:
+ return (adapter->stats.mprc);
+ case IFCOUNTER_OMCASTS:
+ return (adapter->stats.mptc);
+ case IFCOUNTER_COLLISIONS:
+ return (0);
+ case IFCOUNTER_IQDROPS:
+ rv = 0;
+ for (int i = 0; i < 8; i++)
+ rv += adapter->stats.mpc[i];
+ return (rv);
+ case IFCOUNTER_IERRORS:
+ return (adapter->stats.crcerrs + adapter->stats.rlec);
+ default:
+ return (if_get_counter_default(ifp, cnt));
+ }
}
/** ixgbe_sysctl_tdh_handler - Handler function
diff --git a/sys/dev/ixgbe/ixv.c b/sys/dev/ixgbe/ixv.c
index ed137f51..296138e 100644
--- a/sys/dev/ixgbe/ixv.c
+++ b/sys/dev/ixgbe/ixv.c
@@ -634,9 +634,9 @@ ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
}
drbr_advance(ifp, txr->br);
enqueued++;
- ifp->if_obytes += next->m_pkthdr.len;
+ if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len);
if (next->m_flags & M_MCAST)
- ifp->if_omcasts++;
+ if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
/* Send a copy of the frame to the BPF listener */
ETHER_BPF_MTAP(ifp, next);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
@@ -2651,7 +2651,7 @@ ixv_txeof(struct tx_ring *txr)
tx_desc =
(struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
}
- ++ifp->if_opackets;
+ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
/* See if there is more work now */
last = tx_buffer->eop_index;
if (last != -1) {
@@ -3341,7 +3341,7 @@ ixv_rxeof(struct ix_queue *que, int count)
/* Make sure all parts of a bad packet are discarded */
if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
(rxr->discard)) {
- ifp->if_ierrors++;
+ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
rxr->rx_discarded++;
if (!eop)
rxr->discard = TRUE;
@@ -3455,7 +3455,7 @@ ixv_rxeof(struct ix_queue *que, int count)
/* Sending this frame? */
if (eop) {
sendmp->m_pkthdr.rcvif = ifp;
- ifp->if_ipackets++;
+ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
rxr->rx_packets++;
/* capture data for AIM */
rxr->bytes += sendmp->m_pkthdr.len;
diff --git a/sys/dev/ixl/i40e_osdep.h b/sys/dev/ixl/i40e_osdep.h
index 97908ba..895bf83 100755
--- a/sys/dev/ixl/i40e_osdep.h
+++ b/sys/dev/ixl/i40e_osdep.h
@@ -152,6 +152,7 @@ struct i40e_osdep
bus_space_tag_t mem_bus_space_tag;
bus_space_handle_t mem_bus_space_handle;
bus_size_t mem_bus_space_size;
+ uint32_t flush_reg;
struct device *dev;
};
@@ -208,6 +209,13 @@ wr32_osdep(struct i40e_osdep *osdep, uint32_t reg, uint32_t value)
osdep->mem_bus_space_handle, reg, value);
}
+static __inline void
+ixl_flush_osdep(struct i40e_osdep *osdep)
+{
+
+ rd32_osdep(osdep, osdep->flush_reg);
+}
+
#define rd32(a, reg) rd32_osdep((a)->back, (reg))
#define wr32(a, reg, value) wr32_osdep((a)->back, (reg), (value))
@@ -221,9 +229,6 @@ wr32_osdep(struct i40e_osdep *osdep, uint32_t reg, uint32_t value)
((struct i40e_osdep *)(a)->back)->mem_bus_space_handle, \
reg, value))
-#define ixl_flush(a) (\
- bus_space_read_4( ((struct i40e_osdep *)(a)->back)->mem_bus_space_tag, \
- ((struct i40e_osdep *)(a)->back)->mem_bus_space_handle, \
- I40E_GLGEN_STAT))
+#define ixl_flush(a) ixl_flush_osdep((a)->back)
#endif /* _I40E_OSDEP_H_ */
diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c
index abae7a5..8d1100c 100755
--- a/sys/dev/ixl/if_ixl.c
+++ b/sys/dev/ixl/if_ixl.c
@@ -2177,6 +2177,7 @@ ixl_allocate_pci_resources(struct ixl_pf *pf)
pf->osdep.mem_bus_space_handle =
rman_get_bushandle(pf->pci_mem);
pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
+ pf->osdep.flush_reg = I40E_GLGEN_STAT;
pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
pf->hw.back = &pf->osdep;
@@ -2275,6 +2276,10 @@ ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = ixl_ioctl;
+#if __FreeBSD_version >= 1100000
+ if_setgetcounterfn(ifp, ixl_get_counter);
+#endif
+
ifp->if_transmit = ixl_mq_start;
ifp->if_qflush = ixl_qflush;
@@ -3700,7 +3705,6 @@ ixl_update_stats_counters(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
- struct ifnet *ifp = vsi->ifp;
struct i40e_hw_port_stats *nsd = &pf->stats;
struct i40e_hw_port_stats *osd = &pf->stats_offsets;
@@ -3893,7 +3897,7 @@ ixl_update_stats_counters(struct ixl_pf *pf)
/* OS statistics */
// ERJ - these are per-port, update all vsis?
- ifp->if_ierrors = nsd->crc_errors + nsd->illegal_bytes;
+ IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes);
}
/*
@@ -4027,13 +4031,16 @@ void ixl_update_eth_stats(struct ixl_vsi *vsi)
{
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct i40e_hw *hw = &pf->hw;
- struct ifnet *ifp = vsi->ifp;
struct i40e_eth_stats *es;
struct i40e_eth_stats *oes;
+ int i;
+ uint64_t tx_discards;
+ struct i40e_hw_port_stats *nsd;
u16 stat_idx = vsi->info.stat_counter_idx;
es = &vsi->eth_stats;
oes = &vsi->eth_stats_offsets;
+ nsd = &pf->stats;
/* Gather up the stats that the hw collects */
ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
@@ -4078,22 +4085,27 @@ void ixl_update_eth_stats(struct ixl_vsi *vsi)
&oes->tx_broadcast, &es->tx_broadcast);
vsi->stat_offsets_loaded = true;
+ tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
+ for (i = 0; i < vsi->num_queues; i++)
+ tx_discards += vsi->queues[i].txr.br->br_drops;
+
/* Update ifnet stats */
- ifp->if_ipackets = es->rx_unicast +
+ IXL_SET_IPACKETS(vsi, es->rx_unicast +
es->rx_multicast +
- es->rx_broadcast;
- ifp->if_opackets = es->tx_unicast +
+ es->rx_broadcast);
+ IXL_SET_OPACKETS(vsi, es->tx_unicast +
es->tx_multicast +
- es->tx_broadcast;
- ifp->if_ibytes = es->rx_bytes;
- ifp->if_obytes = es->tx_bytes;
- ifp->if_imcasts = es->rx_multicast;
- ifp->if_omcasts = es->tx_multicast;
-
- ifp->if_oerrors = es->tx_errors;
- ifp->if_iqdrops = es->rx_discards;
- ifp->if_noproto = es->rx_unknown_protocol;
- ifp->if_collisions = 0;
+ es->tx_broadcast);
+ IXL_SET_IBYTES(vsi, es->rx_bytes);
+ IXL_SET_OBYTES(vsi, es->tx_bytes);
+ IXL_SET_IMCASTS(vsi, es->rx_multicast);
+ IXL_SET_OMCASTS(vsi, es->tx_multicast);
+
+ IXL_SET_OERRORS(vsi, es->tx_errors);
+ IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
+ IXL_SET_OQDROPS(vsi, tx_discards);
+ IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
+ IXL_SET_COLLISIONS(vsi, 0);
}
/**
diff --git a/sys/dev/ixl/if_ixlv.c b/sys/dev/ixl/if_ixlv.c
index a29d669..bd3c202 100644
--- a/sys/dev/ixl/if_ixlv.c
+++ b/sys/dev/ixl/if_ixlv.c
@@ -1137,6 +1137,7 @@ ixlv_allocate_pci_resources(struct ixlv_sc *sc)
sc->osdep.mem_bus_space_handle =
rman_get_bushandle(sc->pci_mem);
sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
+ sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
sc->hw.back = &sc->osdep;
@@ -1355,6 +1356,10 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = ixlv_ioctl;
+#if __FreeBSD_version >= 1100000
+ if_setgetcounterfn(ifp, ixl_get_counter);
+#endif
+
ifp->if_transmit = ixl_mq_start;
ifp->if_qflush = ixl_qflush;
diff --git a/sys/dev/ixl/ixl.h b/sys/dev/ixl/ixl.h
index 69be008..c240b02 100644
--- a/sys/dev/ixl/ixl.h
+++ b/sys/dev/ixl/ixl.h
@@ -264,6 +264,35 @@
#define IXL_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
#define IXL_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
+#if __FreeBSD_version >= 1100000
+#define IXL_SET_IPACKETS(vsi, count) (vsi)->ipackets = (count)
+#define IXL_SET_IERRORS(vsi, count) (vsi)->ierrors = (count)
+#define IXL_SET_OPACKETS(vsi, count) (vsi)->opackets = (count)
+#define IXL_SET_OERRORS(vsi, count) (vsi)->oerrors = (count)
+#define IXL_SET_COLLISIONS(vsi, count) /* Do nothing; collisions is always 0. */
+#define IXL_SET_IBYTES(vsi, count) (vsi)->ibytes = (count)
+#define IXL_SET_OBYTES(vsi, count) (vsi)->obytes = (count)
+#define IXL_SET_IMCASTS(vsi, count) (vsi)->imcasts = (count)
+#define IXL_SET_OMCASTS(vsi, count) (vsi)->omcasts = (count)
+#define IXL_SET_IQDROPS(vsi, count) (vsi)->iqdrops = (count)
+#define IXL_SET_OQDROPS(vsi, count) (vsi)->iqdrops = (count)
+#define IXL_SET_NOPROTO(vsi, count) (vsi)->noproto = (count)
+#else
+#define IXL_SET_IPACKETS(vsi, count) (vsi)->ifp->if_ipackets = (count)
+#define IXL_SET_IERRORS(vsi, count) (vsi)->ifp->if_ierrors = (count)
+#define IXL_SET_OPACKETS(vsi, count) (vsi)->ifp->if_opackets = (count)
+#define IXL_SET_OERRORS(vsi, count) (vsi)->ifp->if_oerrors = (count)
+#define IXL_SET_COLLISIONS(vsi, count) (vsi)->ifp->if_collisions = (count)
+#define IXL_SET_IBYTES(vsi, count) (vsi)->ifp->if_ibytes = (count)
+#define IXL_SET_OBYTES(vsi, count) (vsi)->ifp->if_obytes = (count)
+#define IXL_SET_IMCASTS(vsi, count) (vsi)->ifp->if_imcasts = (count)
+#define IXL_SET_OMCASTS(vsi, count) (vsi)->ifp->if_omcasts = (count)
+#define IXL_SET_IQDROPS(vsi, count) (vsi)->ifp->if_iqdrops = (count)
+#define IXL_SET_OQDROPS(vsi, odrops) (vsi)->ifp->if_snd.ifq_drops = (odrops)
+#define IXL_SET_NOPROTO(vsi, count) (vsi)->noproto = (count)
+#endif
+
+
/*
*****************************************************************************
* vendor_info_array
@@ -447,6 +476,17 @@ struct ixl_vsi {
struct i40e_eth_stats eth_stats;
struct i40e_eth_stats eth_stats_offsets;
bool stat_offsets_loaded;
+ u64 ipackets;
+ u64 ierrors;
+ u64 opackets;
+ u64 oerrors;
+ u64 ibytes;
+ u64 obytes;
+ u64 imcasts;
+ u64 omcasts;
+ u64 iqdrops;
+ u64 oqdrops;
+ u64 noproto;
/* Driver statistics */
u64 hw_filters_del;
@@ -554,5 +594,8 @@ void ixl_free_que_rx(struct ixl_queue *);
#ifdef IXL_FDIR
void ixl_atr(struct ixl_queue *, struct tcphdr *, int);
#endif
+#if __FreeBSD_version >= 1100000
+uint64_t ixl_get_counter(if_t ifp, ift_counter cnt);
+#endif
#endif /* _IXL_H_ */
diff --git a/sys/dev/ixl/ixl_txrx.c b/sys/dev/ixl/ixl_txrx.c
index 306d4ec..b804c76 100755
--- a/sys/dev/ixl/ixl_txrx.c
+++ b/sys/dev/ixl/ixl_txrx.c
@@ -783,8 +783,6 @@ ixl_get_tx_head(struct ixl_queue *que)
bool
ixl_txeof(struct ixl_queue *que)
{
- struct ixl_vsi *vsi = que->vsi;
- struct ifnet *ifp = vsi->ifp;
struct tx_ring *txr = &que->txr;
u32 first, last, head, done, processed;
struct ixl_tx_buf *buf;
@@ -857,7 +855,6 @@ ixl_txeof(struct ixl_queue *que)
tx_desc = &txr->base[first];
}
++txr->packets;
- ++ifp->if_opackets;
/* See if there is more work now */
last = buf->eop_index;
if (last != -1) {
@@ -1420,7 +1417,6 @@ ixl_rxeof(struct ixl_queue *que, int count)
** error results.
*/
if (eop && (error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
- ifp->if_ierrors++;
rxr->discarded++;
ixl_rx_discard(rxr, i);
goto next_desc;
@@ -1529,7 +1525,6 @@ ixl_rxeof(struct ixl_queue *que, int count)
if (eop) {
sendmp->m_pkthdr.rcvif = ifp;
/* gather stats */
- ifp->if_ipackets++;
rxr->rx_packets++;
rxr->rx_bytes += sendmp->m_pkthdr.len;
/* capture data for dynamic ITR adjustment */
@@ -1625,3 +1620,43 @@ ixl_rx_checksum(struct mbuf * mp, u32 status, u32 error, u8 ptype)
}
return;
}
+
+#if __FreeBSD_version >= 1100000
+uint64_t
+ixl_get_counter(if_t ifp, ift_counter cnt)
+{
+ struct ixl_vsi *vsi;
+
+ vsi = if_getsoftc(ifp);
+
+ switch (cnt) {
+ case IFCOUNTER_IPACKETS:
+ return (vsi->ipackets);
+ case IFCOUNTER_IERRORS:
+ return (vsi->ierrors);
+ case IFCOUNTER_OPACKETS:
+ return (vsi->opackets);
+ case IFCOUNTER_OERRORS:
+ return (vsi->oerrors);
+ case IFCOUNTER_COLLISIONS:
+ /* Collisions are by standard impossible in 40G/10G Ethernet */
+ return (0);
+ case IFCOUNTER_IBYTES:
+ return (vsi->ibytes);
+ case IFCOUNTER_OBYTES:
+ return (vsi->obytes);
+ case IFCOUNTER_IMCASTS:
+ return (vsi->imcasts);
+ case IFCOUNTER_OMCASTS:
+ return (vsi->omcasts);
+ case IFCOUNTER_IQDROPS:
+ return (vsi->iqdrops);
+ case IFCOUNTER_OQDROPS:
+ return (vsi->oqdrops);
+ case IFCOUNTER_NOPROTO:
+ return (vsi->noproto);
+ default:
+ return (if_get_counter_default(ifp, cnt));
+ }
+}
+#endif
diff --git a/sys/dev/ixl/ixlvc.c b/sys/dev/ixl/ixlvc.c
index 1f912b5..ef69a82 100644
--- a/sys/dev/ixl/ixlvc.c
+++ b/sys/dev/ixl/ixlvc.c
@@ -837,22 +837,33 @@ ixlv_request_stats(struct ixlv_sc *sc)
void
ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
{
- struct ifnet *ifp = sc->vsi.ifp;
+ struct ixl_vsi *vsi;
+ uint64_t tx_discards;
+ int i;
- ifp->if_ipackets = es->rx_unicast +
+ vsi = &sc->vsi;
+
+ tx_discards = es->tx_discards;
+ for (i = 0; i < sc->vsi.num_queues; i++)
+ tx_discards += sc->vsi.queues[i].txr.br->br_drops;
+
+ /* Update ifnet stats */
+ IXL_SET_IPACKETS(vsi, es->rx_unicast +
es->rx_multicast +
- es->rx_broadcast;
- ifp->if_opackets = es->tx_unicast +
+ es->rx_broadcast);
+ IXL_SET_OPACKETS(vsi, es->tx_unicast +
es->tx_multicast +
- es->tx_broadcast;
- ifp->if_ibytes = es->rx_bytes;
- ifp->if_obytes = es->tx_bytes;
- ifp->if_imcasts = es->rx_multicast;
- ifp->if_omcasts = es->tx_multicast;
-
- ifp->if_oerrors = es->tx_errors;
- ifp->if_iqdrops = es->rx_discards;
- ifp->if_noproto = es->rx_unknown_protocol;
+ es->tx_broadcast);
+ IXL_SET_IBYTES(vsi, es->rx_bytes);
+ IXL_SET_OBYTES(vsi, es->tx_bytes);
+ IXL_SET_IMCASTS(vsi, es->rx_multicast);
+ IXL_SET_OMCASTS(vsi, es->tx_multicast);
+
+ IXL_SET_OERRORS(vsi, es->tx_errors);
+ IXL_SET_IQDROPS(vsi, es->rx_discards);
+ IXL_SET_OQDROPS(vsi, tx_discards);
+ IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
+ IXL_SET_COLLISIONS(vsi, 0);
sc->vsi.eth_stats = *es;
}
diff --git a/sys/dev/nfe/if_nfe.c b/sys/dev/nfe/if_nfe.c
index fda21c0..1675f03 100644
--- a/sys/dev/nfe/if_nfe.c
+++ b/sys/dev/nfe/if_nfe.c
@@ -1630,7 +1630,7 @@ nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
}
#ifdef DEVICE_POLLING
-static poll_handler_drv_t nfe_poll;
+static poll_handler_t nfe_poll;
static int
@@ -1782,7 +1782,7 @@ nfe_ioctl(if_t ifp, u_long cmd, caddr_t data)
#ifdef DEVICE_POLLING
if ((mask & IFCAP_POLLING) != 0) {
if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
- error = ether_poll_register_drv(nfe_poll, ifp);
+ error = ether_poll_register(nfe_poll, ifp);
if (error)
break;
NFE_LOCK(sc);
diff --git a/sys/dev/sfxge/common/efsys.h b/sys/dev/sfxge/common/efsys.h
index 8fd1267..433c40b 100644
--- a/sys/dev/sfxge/common/efsys.h
+++ b/sys/dev/sfxge/common/efsys.h
@@ -53,44 +53,44 @@ extern "C" {
#define EFSYS_HAS_UINT64 1
#define EFSYS_USE_UINT64 0
#if _BYTE_ORDER == _BIG_ENDIAN
-#define EFSYS_IS_BIG_ENDIAN 1
-#define EFSYS_IS_LITTLE_ENDIAN 0
+#define EFSYS_IS_BIG_ENDIAN 1
+#define EFSYS_IS_LITTLE_ENDIAN 0
#elif _BYTE_ORDER == _LITTLE_ENDIAN
-#define EFSYS_IS_BIG_ENDIAN 0
-#define EFSYS_IS_LITTLE_ENDIAN 1
+#define EFSYS_IS_BIG_ENDIAN 0
+#define EFSYS_IS_LITTLE_ENDIAN 1
#endif
#include "efx_types.h"
/* Common code requires this */
#if __FreeBSD_version < 800068
-#define memmove(d, s, l) bcopy(s, d, l)
+#define memmove(d, s, l) bcopy(s, d, l)
#endif
-
+
/* FreeBSD equivalents of Solaris things */
#ifndef _NOTE
-#define _NOTE(s)
+#define _NOTE(s)
#endif
#ifndef B_FALSE
-#define B_FALSE FALSE
+#define B_FALSE FALSE
#endif
#ifndef B_TRUE
-#define B_TRUE TRUE
+#define B_TRUE TRUE
#endif
#ifndef IS_P2ALIGNED
-#define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
+#define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
#endif
#ifndef P2ROUNDUP
-#define P2ROUNDUP(x, align) (-(-(x) & -(align)))
+#define P2ROUNDUP(x, align) (-(-(x) & -(align)))
#endif
#ifndef IS2P
-#define ISP2(x) (((x) & ((x) - 1)) == 0)
+#define ISP2(x) (((x) & ((x) - 1)) == 0)
#endif
-#define ENOTACTIVE EINVAL
+#define ENOTACTIVE EINVAL
/* Memory type to use on FreeBSD */
MALLOC_DECLARE(M_SFXGE);
@@ -242,7 +242,7 @@ sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map,
#define EFSYS_OPT_PHY_PROPS 0
#define EFSYS_OPT_PHY_BIST 1
#define EFSYS_OPT_PHY_LED_CONTROL 1
-#define EFSYS_OPT_PHY_FLAGS 0
+#define EFSYS_OPT_PHY_FLAGS 0
#define EFSYS_OPT_VPD 1
#define EFSYS_OPT_NVRAM 1
@@ -256,8 +256,8 @@ sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map,
#define EFSYS_OPT_WOL 1
#define EFSYS_OPT_RX_SCALE 1
#define EFSYS_OPT_QSTATS 1
-#define EFSYS_OPT_FILTER 0
-#define EFSYS_OPT_RX_SCATTER 0
+#define EFSYS_OPT_FILTER 0
+#define EFSYS_OPT_RX_SCATTER 0
#define EFSYS_OPT_RX_HDR_SPLIT 0
#define EFSYS_OPT_EV_PREFETCH 0
@@ -272,7 +272,7 @@ typedef struct __efsys_identifier_s efsys_identifier_t;
#ifndef DTRACE_PROBE
-#define EFSYS_PROBE(_name)
+#define EFSYS_PROBE(_name)
#define EFSYS_PROBE1(_name, _type1, _arg1)
@@ -815,16 +815,16 @@ extern void sfxge_err(efsys_identifier_t *, unsigned int,
panic(#_exp); \
} while (0)
-#define EFSYS_ASSERT3(_x, _op, _y, _t) do { \
+#define EFSYS_ASSERT3(_x, _op, _y, _t) do { \
const _t __x = (_t)(_x); \
const _t __y = (_t)(_y); \
if (!(__x _op __y)) \
- panic("assertion failed at %s:%u", __FILE__, __LINE__); \
+ panic("assertion failed at %s:%u", __FILE__, __LINE__); \
} while(0)
-#define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
-#define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
-#define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
+#define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
+#define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
+#define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
#ifdef __cplusplus
}
diff --git a/sys/dev/sfxge/sfxge.c b/sys/dev/sfxge/sfxge.c
index 8f2c7bb..c0850ec 100644
--- a/sys/dev/sfxge/sfxge.c
+++ b/sys/dev/sfxge/sfxge.c
@@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
#include <sys/taskqueue.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
+#include <sys/syslog.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
@@ -57,16 +58,35 @@ __FBSDID("$FreeBSD$");
#include "sfxge.h"
#include "sfxge_rx.h"
-#define SFXGE_CAP (IFCAP_VLAN_MTU | \
+#define SFXGE_CAP (IFCAP_VLAN_MTU | \
IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | \
IFCAP_JUMBO_MTU | IFCAP_LRO | \
IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE)
-#define SFXGE_CAP_ENABLE SFXGE_CAP
-#define SFXGE_CAP_FIXED (IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | \
+#define SFXGE_CAP_ENABLE SFXGE_CAP
+#define SFXGE_CAP_FIXED (IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | \
IFCAP_JUMBO_MTU | IFCAP_LINKSTATE)
MALLOC_DEFINE(M_SFXGE, "sfxge", "Solarflare 10GigE driver");
+
+SYSCTL_NODE(_hw, OID_AUTO, sfxge, CTLFLAG_RD, 0,
+ "SFXGE driver parameters");
+
+#define SFXGE_PARAM_RX_RING SFXGE_PARAM(rx_ring)
+static int sfxge_rx_ring_entries = SFXGE_NDESCS;
+TUNABLE_INT(SFXGE_PARAM_RX_RING, &sfxge_rx_ring_entries);
+SYSCTL_INT(_hw_sfxge, OID_AUTO, rx_ring, CTLFLAG_RDTUN,
+ &sfxge_rx_ring_entries, 0,
+ "Maximum number of descriptors in a receive ring");
+
+#define SFXGE_PARAM_TX_RING SFXGE_PARAM(tx_ring)
+static int sfxge_tx_ring_entries = SFXGE_NDESCS;
+TUNABLE_INT(SFXGE_PARAM_TX_RING, &sfxge_tx_ring_entries);
+SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_ring, CTLFLAG_RDTUN,
+ &sfxge_tx_ring_entries, 0,
+ "Maximum number of descriptors in a transmit ring");
+
+
static void
sfxge_reset(void *arg, int npending);
@@ -78,7 +98,7 @@ sfxge_start(struct sfxge_softc *sc)
sx_assert(&sc->softc_lock, LA_XLOCKED);
if (sc->init_state == SFXGE_STARTED)
- return 0;
+ return (0);
if (sc->init_state != SFXGE_REGISTERED) {
rc = EINVAL;
@@ -223,7 +243,7 @@ sfxge_if_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
ifp->if_mtu = ifr->ifr_mtu;
error = sfxge_start(sc);
sx_xunlock(&sc->softc_lock);
- if (error) {
+ if (error != 0) {
ifp->if_flags &= ~IFF_UP;
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
if_down(ifp);
@@ -287,7 +307,7 @@ sfxge_ifnet_fini(struct ifnet *ifp)
if_free(ifp);
}
-static int
+static int
sfxge_ifnet_init(struct ifnet *ifp, struct sfxge_softc *sc)
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp);
@@ -314,8 +334,8 @@ sfxge_ifnet_init(struct ifnet *ifp, struct sfxge_softc *sc)
ifp->if_qflush = sfxge_if_qflush;
#else
ifp->if_start = sfxge_if_start;
- IFQ_SET_MAXLEN(&ifp->if_snd, SFXGE_NDESCS - 1);
- ifp->if_snd.ifq_drv_maxlen = SFXGE_NDESCS - 1;
+ IFQ_SET_MAXLEN(&ifp->if_snd, sc->txq_entries - 1);
+ ifp->if_snd.ifq_drv_maxlen = sc->txq_entries - 1;
IFQ_SET_READY(&ifp->if_snd);
mtx_init(&sc->tx_lock, "txq", NULL, MTX_DEF);
@@ -324,11 +344,11 @@ sfxge_ifnet_init(struct ifnet *ifp, struct sfxge_softc *sc)
if ((rc = sfxge_port_ifmedia_init(sc)) != 0)
goto fail;
- return 0;
+ return (0);
fail:
ether_ifdetach(sc->ifnet);
- return rc;
+ return (rc);
}
void
@@ -347,7 +367,7 @@ sfxge_bar_init(struct sfxge_softc *sc)
{
efsys_bar_t *esbp = &sc->bar;
- esbp->esb_rid = PCIR_BAR(EFX_MEM_BAR);
+ esbp->esb_rid = PCIR_BAR(EFX_MEM_BAR);
if ((esbp->esb_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
&esbp->esb_rid, RF_ACTIVE)) == NULL) {
device_printf(sc->dev, "Cannot allocate BAR region %d\n",
@@ -386,7 +406,7 @@ sfxge_create(struct sfxge_softc *sc)
device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "stats", CTLFLAG_RD, NULL, "Statistics");
- if (!sc->stats_node) {
+ if (sc->stats_node == NULL) {
error = ENOMEM;
goto fail;
}
@@ -414,6 +434,26 @@ sfxge_create(struct sfxge_softc *sc)
goto fail3;
sc->enp = enp;
+ if (!ISP2(sfxge_rx_ring_entries) ||
+ !(sfxge_rx_ring_entries & EFX_RXQ_NDESCS_MASK)) {
+ log(LOG_ERR, "%s=%d must be power of 2 from %u to %u",
+ SFXGE_PARAM_RX_RING, sfxge_rx_ring_entries,
+ EFX_RXQ_MINNDESCS, EFX_RXQ_MAXNDESCS);
+ error = EINVAL;
+ goto fail_rx_ring_entries;
+ }
+ sc->rxq_entries = sfxge_rx_ring_entries;
+
+ if (!ISP2(sfxge_tx_ring_entries) ||
+ !(sfxge_tx_ring_entries & EFX_TXQ_NDESCS_MASK)) {
+ log(LOG_ERR, "%s=%d must be power of 2 from %u to %u",
+ SFXGE_PARAM_TX_RING, sfxge_tx_ring_entries,
+ EFX_TXQ_MINNDESCS, EFX_TXQ_MAXNDESCS);
+ error = EINVAL;
+ goto fail_tx_ring_entries;
+ }
+ sc->txq_entries = sfxge_tx_ring_entries;
+
/* Initialize MCDI to talk to the microcontroller. */
if ((error = sfxge_mcdi_init(sc)) != 0)
goto fail4;
@@ -486,6 +526,8 @@ fail5:
sfxge_mcdi_fini(sc);
fail4:
+fail_tx_ring_entries:
+fail_rx_ring_entries:
sc->enp = NULL;
efx_nic_destroy(enp);
mtx_destroy(&sc->enp_lock);
@@ -554,14 +596,14 @@ sfxge_vpd_handler(SYSCTL_HANDLER_ARGS)
struct sfxge_softc *sc = arg1;
efx_vpd_value_t value;
int rc;
-
+
value.evv_tag = arg2 >> 16;
value.evv_keyword = arg2 & 0xffff;
if ((rc = efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value))
!= 0)
- return rc;
+ return (rc);
- return SYSCTL_OUT(req, value.evv_value, value.evv_length);
+ return (SYSCTL_OUT(req, value.evv_value, value.evv_length));
}
static void
@@ -623,12 +665,12 @@ sfxge_vpd_init(struct sfxge_softc *sc)
for (keyword[1] = 'A'; keyword[1] <= 'Z'; keyword[1]++)
sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, keyword);
- return 0;
-
+ return (0);
+
fail2:
free(sc->vpd_data, M_SFXGE);
fail:
- return rc;
+ return (rc);
}
static void
@@ -745,12 +787,12 @@ sfxge_probe(device_t dev)
pci_device_id = pci_get_device(dev);
rc = efx_family(pci_vendor_id, pci_device_id, &family);
- if (rc)
- return ENXIO;
+ if (rc != 0)
+ return (ENXIO);
KASSERT(family == EFX_FAMILY_SIENA, ("impossible controller family"));
device_set_desc(dev, "Solarflare SFC9000 family");
- return 0;
+ return (0);
}
static device_method_t sfxge_methods[] = {
diff --git a/sys/dev/sfxge/sfxge.h b/sys/dev/sfxge/sfxge.h
index d73d150..69b062a 100644
--- a/sys/dev/sfxge/sfxge.h
+++ b/sys/dev/sfxge/sfxge.h
@@ -30,7 +30,7 @@
*/
#ifndef _SFXGE_H
-#define _SFXGE_H
+#define _SFXGE_H
#include <sys/param.h>
#include <sys/kernel.h>
@@ -53,43 +53,45 @@
/* This should be right on most machines the driver will be used on, and
* we needn't care too much about wasting a few KB per interface.
*/
-#define CACHE_LINE_SIZE 128
+#define CACHE_LINE_SIZE 128
#endif
#ifndef IFCAP_LINKSTATE
-#define IFCAP_LINKSTATE 0
+#define IFCAP_LINKSTATE 0
#endif
#ifndef IFCAP_VLAN_HWTSO
-#define IFCAP_VLAN_HWTSO 0
+#define IFCAP_VLAN_HWTSO 0
#endif
#ifndef IFM_10G_T
-#define IFM_10G_T IFM_UNKNOWN
+#define IFM_10G_T IFM_UNKNOWN
#endif
#ifndef IFM_10G_KX4
-#define IFM_10G_KX4 IFM_10G_CX4
+#define IFM_10G_KX4 IFM_10G_CX4
#endif
#if __FreeBSD_version >= 800054
/* Networking core is multiqueue aware. We can manage our own TX
* queues and use m_pkthdr.flowid.
*/
-#define SFXGE_HAVE_MQ
+#define SFXGE_HAVE_MQ
#endif
#if (__FreeBSD_version >= 800501 && __FreeBSD_version < 900000) || \
__FreeBSD_version >= 900003
-#define SFXGE_HAVE_DESCRIBE_INTR
+#define SFXGE_HAVE_DESCRIBE_INTR
#endif
#ifdef IFM_ETH_RXPAUSE
-#define SFXGE_HAVE_PAUSE_MEDIAOPTS
+#define SFXGE_HAVE_PAUSE_MEDIAOPTS
#endif
#ifndef CTLTYPE_U64
-#define CTLTYPE_U64 CTLTYPE_QUAD
+#define CTLTYPE_U64 CTLTYPE_QUAD
#endif
#include "sfxge_rx.h"
#include "sfxge_tx.h"
-#define SFXGE_IP_ALIGN 2
+#define ROUNDUP_POW_OF_TWO(_n) (1ULL << flsl((_n) - 1))
-#define SFXGE_ETHERTYPE_LOOPBACK 0x9000 /* Xerox loopback */
+#define SFXGE_IP_ALIGN 2
+
+#define SFXGE_ETHERTYPE_LOOPBACK 0x9000 /* Xerox loopback */
enum sfxge_evq_state {
SFXGE_EVQ_UNINITIALIZED = 0,
@@ -106,6 +108,7 @@ struct sfxge_evq {
enum sfxge_evq_state init_state;
unsigned int index;
+ unsigned int entries;
efsys_mem_t mem;
unsigned int buf_base_id;
@@ -121,7 +124,6 @@ struct sfxge_evq {
struct sfxge_txq **txqs;
};
-#define SFXGE_NEVS 4096
#define SFXGE_NDESCS 1024
#define SFXGE_MODERATION 30
@@ -133,9 +135,9 @@ enum sfxge_intr_state {
};
struct sfxge_intr_hdl {
- int eih_rid;
- void *eih_tag;
- struct resource *eih_res;
+ int eih_rid;
+ void *eih_tag;
+ struct resource *eih_res;
};
struct sfxge_intr {
@@ -197,9 +199,10 @@ struct sfxge_softc {
device_t dev;
struct sx softc_lock;
enum sfxge_softc_state init_state;
- struct ifnet *ifnet;
+ struct ifnet *ifnet;
unsigned int if_flags;
struct sysctl_oid *stats_node;
+ struct sysctl_oid *txqs_node;
struct task task_reset;
@@ -209,7 +212,10 @@ struct sfxge_softc {
efx_nic_t *enp;
struct mtx enp_lock;
- bus_dma_tag_t parent_dma_tag;
+ unsigned int rxq_entries;
+ unsigned int txq_entries;
+
+ bus_dma_tag_t parent_dma_tag;
efsys_bar_t bar;
struct sfxge_intr intr;
@@ -243,8 +249,12 @@ struct sfxge_softc {
#endif
};
-#define SFXGE_LINK_UP(sc) ((sc)->port.link_mode != EFX_LINK_DOWN)
-#define SFXGE_RUNNING(sc) ((sc)->ifnet->if_drv_flags & IFF_DRV_RUNNING)
+#define SFXGE_LINK_UP(sc) ((sc)->port.link_mode != EFX_LINK_DOWN)
+#define SFXGE_RUNNING(sc) ((sc)->ifnet->if_drv_flags & IFF_DRV_RUNNING)
+
+#define SFXGE_PARAM(_name) "hw.sfxge." #_name
+
+SYSCTL_DECL(_hw_sfxge);
/*
* From sfxge.c.
@@ -299,6 +309,6 @@ extern void sfxge_mac_link_update(struct sfxge_softc *sc,
extern int sfxge_mac_filter_set(struct sfxge_softc *sc);
extern int sfxge_port_ifmedia_init(struct sfxge_softc *sc);
-#define SFXGE_MAX_MTU (9 * 1024)
+#define SFXGE_MAX_MTU (9 * 1024)
#endif /* _SFXGE_H */
diff --git a/sys/dev/sfxge/sfxge_dma.c b/sys/dev/sfxge/sfxge_dma.c
index 48d02d5..e2bf171 100644
--- a/sys/dev/sfxge/sfxge_dma.c
+++ b/sys/dev/sfxge/sfxge_dma.c
@@ -50,7 +50,7 @@ sfxge_dma_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
addr = arg;
- if (error) {
+ if (error != 0) {
*addr = 0;
return;
}
@@ -82,7 +82,7 @@ retry:
return (0);
}
#if defined(__i386__) || defined(__amd64__)
- while (m && seg_count < maxsegs) {
+ while (m != NULL && seg_count < maxsegs) {
/*
* firmware doesn't like empty segments
*/
@@ -164,11 +164,14 @@ sfxge_dma_alloc(struct sfxge_softc *sc, bus_size_t len, efsys_mem_t *esmp)
/*
* The callback gets error information about the mapping
- * and will have set our vaddr to NULL if something went
+ * and will have set esm_addr to 0 if something went
* wrong.
*/
- if (vaddr == NULL)
+ if (esmp->esm_addr == 0) {
+ bus_dmamem_free(esmp->esm_tag, esmp->esm_base, esmp->esm_map);
+ bus_dma_tag_destroy(esmp->esm_tag);
return (ENOMEM);
+ }
esmp->esm_base = vaddr;
@@ -197,7 +200,7 @@ sfxge_dma_init(struct sfxge_softc *sc)
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lock, lockarg */
- &sc->parent_dma_tag)) {
+ &sc->parent_dma_tag) != 0) {
device_printf(sc->dev, "Cannot allocate parent DMA tag\n");
return (ENOMEM);
}
diff --git a/sys/dev/sfxge/sfxge_ev.c b/sys/dev/sfxge/sfxge_ev.c
index 77f0a74..7c0aa7f 100644
--- a/sys/dev/sfxge/sfxge_ev.c
+++ b/sys/dev/sfxge/sfxge_ev.c
@@ -102,7 +102,7 @@ sfxge_ev_rx(void *arg, uint32_t label, uint32_t id, uint32_t size,
if (rxq->init_state != SFXGE_RXQ_STARTED)
goto done;
- expected = rxq->pending++ & (SFXGE_NDESCS - 1);
+ expected = rxq->pending++ & rxq->ptr_mask;
if (id != expected) {
evq->exception = B_TRUE;
@@ -226,7 +226,7 @@ sfxge_get_txq_by_label(struct sfxge_evq *evq, enum sfxge_txq_type label)
KASSERT((evq->index == 0 && label < SFXGE_TXQ_NTYPES) ||
(label == SFXGE_TXQ_IP_TCP_UDP_CKSUM), ("unexpected txq label"));
index = (evq->index == 0) ? label : (evq->index - 1 + SFXGE_TXQ_NTYPES);
- return evq->sc->txq[index];
+ return (evq->sc->txq[index]);
}
static boolean_t
@@ -247,10 +247,10 @@ sfxge_ev_tx(void *arg, uint32_t label, uint32_t id)
if (txq->init_state != SFXGE_TXQ_STARTED)
goto done;
- stop = (id + 1) & (SFXGE_NDESCS - 1);
- id = txq->pending & (SFXGE_NDESCS - 1);
+ stop = (id + 1) & txq->ptr_mask;
+ id = txq->pending & txq->ptr_mask;
- delta = (stop >= id) ? (stop - id) : (SFXGE_NDESCS - id + stop);
+ delta = (stop >= id) ? (stop - id) : (txq->entries - id + stop);
txq->pending += delta;
evq->tx_done++;
@@ -443,7 +443,7 @@ sfxge_ev_stat_handler(SYSCTL_HANDLER_ARGS)
sfxge_ev_stat_update(sc);
- return SYSCTL_OUT(req, &sc->ev_stats[id], sizeof(sc->ev_stats[id]));
+ return (SYSCTL_OUT(req, &sc->ev_stats[id], sizeof(sc->ev_stats[id])));
}
static void
@@ -493,7 +493,7 @@ sfxge_int_mod_handler(SYSCTL_HANDLER_ARGS)
sx_xlock(&sc->softc_lock);
- if (req->newptr) {
+ if (req->newptr != NULL) {
if ((error = SYSCTL_IN(req, &moderation, sizeof(moderation)))
!= 0)
goto out;
@@ -520,14 +520,14 @@ sfxge_int_mod_handler(SYSCTL_HANDLER_ARGS)
out:
sx_xunlock(&sc->softc_lock);
- return error;
+ return (error);
}
static boolean_t
sfxge_ev_initialized(void *arg)
{
struct sfxge_evq *evq;
-
+
evq = (struct sfxge_evq *)arg;
KASSERT(evq->init_state == SFXGE_EVQ_STARTING,
@@ -635,7 +635,7 @@ sfxge_ev_qstop(struct sfxge_softc *sc, unsigned int index)
efx_ev_qdestroy(evq->common);
efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
- EFX_EVQ_NBUFS(SFXGE_NEVS));
+ EFX_EVQ_NBUFS(evq->entries));
mtx_unlock(&evq->lock);
}
@@ -654,15 +654,15 @@ sfxge_ev_qstart(struct sfxge_softc *sc, unsigned int index)
("evq->init_state != SFXGE_EVQ_INITIALIZED"));
/* Clear all events. */
- (void)memset(esmp->esm_base, 0xff, EFX_EVQ_SIZE(SFXGE_NEVS));
+ (void)memset(esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries));
/* Program the buffer table. */
if ((rc = efx_sram_buf_tbl_set(sc->enp, evq->buf_base_id, esmp,
- EFX_EVQ_NBUFS(SFXGE_NEVS))) != 0)
- return rc;
+ EFX_EVQ_NBUFS(evq->entries))) != 0)
+ return (rc);
/* Create the common code event queue. */
- if ((rc = efx_ev_qcreate(sc->enp, index, esmp, SFXGE_NEVS,
+ if ((rc = efx_ev_qcreate(sc->enp, index, esmp, evq->entries,
evq->buf_base_id, &evq->common)) != 0)
goto fail;
@@ -705,7 +705,7 @@ fail2:
efx_ev_qdestroy(evq->common);
fail:
efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
- EFX_EVQ_NBUFS(SFXGE_NEVS));
+ EFX_EVQ_NBUFS(evq->entries));
return (rc);
}
@@ -746,7 +746,7 @@ sfxge_ev_start(struct sfxge_softc *sc)
/* Initialize the event module */
if ((rc = efx_ev_init(sc->enp)) != 0)
- return rc;
+ return (rc);
/* Start the event queues */
for (index = 0; index < intr->n_alloc; index++) {
@@ -802,15 +802,31 @@ sfxge_ev_qinit(struct sfxge_softc *sc, unsigned int index)
sc->evq[index] = evq;
esmp = &evq->mem;
+ /* Build an event queue with room for one event per tx and rx buffer,
+ * plus some extra for link state events and MCDI completions.
+ * There are three tx queues in the first event queue and one in
+ * other.
+ */
+ if (index == 0)
+ evq->entries =
+ ROUNDUP_POW_OF_TWO(sc->rxq_entries +
+ 3 * sc->txq_entries +
+ 128);
+ else
+ evq->entries =
+ ROUNDUP_POW_OF_TWO(sc->rxq_entries +
+ sc->txq_entries +
+ 128);
+
/* Initialise TX completion list */
evq->txqs = &evq->txq;
/* Allocate DMA space. */
- if ((rc = sfxge_dma_alloc(sc, EFX_EVQ_SIZE(SFXGE_NEVS), esmp)) != 0)
+ if ((rc = sfxge_dma_alloc(sc, EFX_EVQ_SIZE(evq->entries), esmp)) != 0)
return (rc);
/* Allocate buffer table entries. */
- sfxge_sram_buf_tbl_alloc(sc, EFX_EVQ_NBUFS(SFXGE_NEVS),
+ sfxge_sram_buf_tbl_alloc(sc, EFX_EVQ_NBUFS(evq->entries),
&evq->buf_base_id);
mtx_init(&evq->lock, "evq", NULL, MTX_DEF);
diff --git a/sys/dev/sfxge/sfxge_intr.c b/sys/dev/sfxge/sfxge_intr.c
index 17f5fcd..2f40603 100644
--- a/sys/dev/sfxge/sfxge_intr.c
+++ b/sys/dev/sfxge/sfxge_intr.c
@@ -70,19 +70,19 @@ sfxge_intr_line_filter(void *arg)
("intr->type != EFX_INTR_LINE"));
if (intr->state != SFXGE_INTR_STARTED)
- return FILTER_STRAY;
+ return (FILTER_STRAY);
(void)efx_intr_status_line(enp, &fatal, &qmask);
if (fatal) {
(void) efx_intr_disable(enp);
(void) efx_intr_fatal(enp);
- return FILTER_HANDLED;
+ return (FILTER_HANDLED);
}
if (qmask != 0) {
intr->zero_count = 0;
- return FILTER_SCHEDULE_THREAD;
+ return (FILTER_SCHEDULE_THREAD);
}
/* SF bug 15783: If the function is not asserting its IRQ and
@@ -97,13 +97,13 @@ sfxge_intr_line_filter(void *arg)
if (intr->zero_count++ == 0) {
if (evq->init_state == SFXGE_EVQ_STARTED) {
if (efx_ev_qpending(evq->common, evq->read_ptr))
- return FILTER_SCHEDULE_THREAD;
+ return (FILTER_SCHEDULE_THREAD);
efx_ev_qprime(evq->common, evq->read_ptr);
- return FILTER_HANDLED;
+ return (FILTER_HANDLED);
}
}
- return FILTER_STRAY;
+ return (FILTER_STRAY);
}
static void
@@ -175,7 +175,7 @@ sfxge_intr_bus_enable(struct sfxge_softc *sc)
default:
KASSERT(0, ("Invalid interrupt type"));
- return EINVAL;
+ return (EINVAL);
}
/* Try to add the handlers */
@@ -254,7 +254,7 @@ sfxge_intr_alloc(struct sfxge_softc *sc, int count)
table[i].eih_res = res;
}
- if (error) {
+ if (error != 0) {
count = i - 1;
for (i = 0; i < count; i++)
bus_release_resource(dev, SYS_RES_IRQ,
@@ -349,7 +349,7 @@ sfxge_intr_setup_msi(struct sfxge_softc *sc)
if (count == 0)
return (EINVAL);
- if ((error = pci_alloc_msi(dev, &count)) != 0)
+ if ((error = pci_alloc_msi(dev, &count)) != 0)
return (ENOMEM);
/* Allocate interrupt handler. */
@@ -424,7 +424,7 @@ void
sfxge_intr_stop(struct sfxge_softc *sc)
{
struct sfxge_intr *intr;
-
+
intr = &sc->intr;
KASSERT(intr->state == SFXGE_INTR_STARTED,
diff --git a/sys/dev/sfxge/sfxge_port.c b/sys/dev/sfxge/sfxge_port.c
index abd2924..6e28d7c 100644
--- a/sys/dev/sfxge/sfxge_port.c
+++ b/sys/dev/sfxge/sfxge_port.c
@@ -74,7 +74,7 @@ sfxge_mac_stat_update(struct sfxge_softc *sc)
/* Try to update the cached counters */
if ((rc = efx_mac_stats_update(sc->enp, esmp,
- port->mac_stats.decode_buf, NULL)) != EAGAIN)
+ port->mac_stats.decode_buf, NULL)) != EAGAIN)
goto out;
DELAY(100);
@@ -83,7 +83,7 @@ sfxge_mac_stat_update(struct sfxge_softc *sc)
rc = ETIMEDOUT;
out:
mtx_unlock(&port->lock);
- return rc;
+ return (rc);
}
static int
@@ -94,11 +94,11 @@ sfxge_mac_stat_handler(SYSCTL_HANDLER_ARGS)
int rc;
if ((rc = sfxge_mac_stat_update(sc)) != 0)
- return rc;
+ return (rc);
- return SYSCTL_OUT(req,
+ return (SYSCTL_OUT(req,
(uint64_t *)sc->port.mac_stats.decode_buf + id,
- sizeof(uint64_t));
+ sizeof(uint64_t)));
}
static void
@@ -130,9 +130,9 @@ sfxge_port_wanted_fc(struct sfxge_softc *sc)
struct ifmedia_entry *ifm = sc->media.ifm_cur;
if (ifm->ifm_media == (IFM_ETHER | IFM_AUTO))
- return EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
- return ((ifm->ifm_media & IFM_ETH_RXPAUSE) ? EFX_FCNTL_RESPOND : 0) |
- ((ifm->ifm_media & IFM_ETH_TXPAUSE) ? EFX_FCNTL_GENERATE : 0);
+ return (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE);
+ return (((ifm->ifm_media & IFM_ETH_RXPAUSE) ? EFX_FCNTL_RESPOND : 0) |
+ ((ifm->ifm_media & IFM_ETH_TXPAUSE) ? EFX_FCNTL_GENERATE : 0));
}
static unsigned int
@@ -150,13 +150,13 @@ sfxge_port_link_fc_ifm(struct sfxge_softc *sc)
static unsigned int
sfxge_port_wanted_fc(struct sfxge_softc *sc)
{
- return sc->port.wanted_fc;
+ return (sc->port.wanted_fc);
}
static unsigned int
sfxge_port_link_fc_ifm(struct sfxge_softc *sc)
{
- return 0;
+ return (0);
}
static int
@@ -172,7 +172,7 @@ sfxge_port_wanted_fc_handler(SYSCTL_HANDLER_ARGS)
mtx_lock(&port->lock);
- if (req->newptr) {
+ if (req->newptr != NULL) {
if ((error = SYSCTL_IN(req, &fcntl, sizeof(fcntl))) != 0)
goto out;
@@ -220,14 +220,14 @@ sfxge_port_link_fc_handler(SYSCTL_HANDLER_ARGS)
#endif /* SFXGE_HAVE_PAUSE_MEDIAOPTS */
-static const u_long sfxge_link_baudrate[EFX_LINK_NMODES] = {
+static const uint64_t sfxge_link_baudrate[EFX_LINK_NMODES] = {
[EFX_LINK_10HDX] = IF_Mbps(10),
[EFX_LINK_10FDX] = IF_Mbps(10),
[EFX_LINK_100HDX] = IF_Mbps(100),
[EFX_LINK_100FDX] = IF_Mbps(100),
[EFX_LINK_1000HDX] = IF_Gbps(1),
[EFX_LINK_1000FDX] = IF_Gbps(1),
- [EFX_LINK_10000FDX] = MIN(IF_Gbps(10ULL), ULONG_MAX),
+ [EFX_LINK_10000FDX] = IF_Gbps(10),
};
void
@@ -235,7 +235,7 @@ sfxge_mac_link_update(struct sfxge_softc *sc, efx_link_mode_t mode)
{
struct sfxge_port *port;
int link_state;
-
+
port = &sc->port;
if (port->link_mode == mode)
@@ -289,7 +289,7 @@ sfxge_mac_filter_set_locked(struct sfxge_softc *sc)
/* Set promisc-unicast and broadcast filter bits */
if ((rc = efx_mac_filter_set(enp, !!(ifp->if_flags & IFF_PROMISC),
B_TRUE)) != 0)
- return rc;
+ return (rc);
/* Set multicast hash filter */
if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
@@ -311,7 +311,7 @@ sfxge_mac_filter_set_locked(struct sfxge_softc *sc)
}
if_maddr_runlock(ifp);
}
- return efx_mac_hash_set(enp, bucket);
+ return (efx_mac_hash_set(enp, bucket));
}
int
@@ -336,7 +336,7 @@ sfxge_mac_filter_set(struct sfxge_softc *sc)
else
rc = 0;
mtx_unlock(&port->lock);
- return rc;
+ return (rc);
}
void
@@ -413,7 +413,7 @@ sfxge_port_start(struct sfxge_softc *sc)
/* Update MAC stats by DMA every second */
if ((rc = efx_mac_stats_periodic(enp, &port->mac_stats.dma_buf,
- 1000, B_FALSE)) != 0)
+ 1000, B_FALSE)) != 0)
goto fail2;
if ((rc = efx_mac_drain(enp, B_FALSE)) != 0)
@@ -435,7 +435,7 @@ fail4:
(void)efx_mac_drain(enp, B_TRUE);
fail3:
(void)efx_mac_stats_periodic(enp, &port->mac_stats.dma_buf,
- 0, B_FALSE);
+ 0, B_FALSE);
fail2:
efx_port_fini(sc->enp);
fail:
@@ -488,7 +488,7 @@ sfxge_phy_stat_update(struct sfxge_softc *sc)
rc = ETIMEDOUT;
out:
mtx_unlock(&port->lock);
- return rc;
+ return (rc);
}
static int
@@ -499,11 +499,11 @@ sfxge_phy_stat_handler(SYSCTL_HANDLER_ARGS)
int rc;
if ((rc = sfxge_phy_stat_update(sc)) != 0)
- return rc;
+ return (rc);
- return SYSCTL_OUT(req,
+ return (SYSCTL_OUT(req,
(uint32_t *)sc->port.phy_stats.decode_buf + id,
- sizeof(uint32_t));
+ sizeof(uint32_t)));
}
static void
@@ -619,7 +619,7 @@ fail:
free(port->phy_stats.decode_buf, M_SFXGE);
(void)mtx_destroy(&port->lock);
port->sc = NULL;
- return rc;
+ return (rc);
}
static int sfxge_link_mode[EFX_PHY_MEDIA_NTYPES][EFX_LINK_NMODES] = {
@@ -697,9 +697,9 @@ sfxge_media_change(struct ifnet *ifp)
rc = efx_phy_adv_cap_set(sc->enp, ifm->ifm_data);
out:
- sx_xunlock(&sc->softc_lock);
+ sx_xunlock(&sc->softc_lock);
- return rc;
+ return (rc);
}
int sfxge_port_ifmedia_init(struct sfxge_softc *sc)
@@ -788,7 +788,7 @@ int sfxge_port_ifmedia_init(struct sfxge_softc *sc)
best_mode_ifm = mode_ifm;
}
- if (best_mode_ifm)
+ if (best_mode_ifm != 0)
ifmedia_set(&sc->media, best_mode_ifm);
/* Now discard port state until interface is started. */
@@ -796,5 +796,5 @@ int sfxge_port_ifmedia_init(struct sfxge_softc *sc)
out2:
efx_nic_fini(sc->enp);
out:
- return rc;
+ return (rc);
}
diff --git a/sys/dev/sfxge/sfxge_rx.c b/sys/dev/sfxge/sfxge_rx.c
index 7209c38..66083d8 100644
--- a/sys/dev/sfxge/sfxge_rx.c
+++ b/sys/dev/sfxge/sfxge_rx.c
@@ -54,8 +54,7 @@ __FBSDID("$FreeBSD$");
#include "sfxge.h"
#include "sfxge_rx.h"
-#define RX_REFILL_THRESHOLD (EFX_RXQ_LIMIT(SFXGE_NDESCS) * 9 / 10)
-#define RX_REFILL_THRESHOLD_2 (RX_REFILL_THRESHOLD / 2)
+#define RX_REFILL_THRESHOLD(_entries) (EFX_RXQ_LIMIT(_entries) * 9 / 10)
/* Size of the LRO hash table. Must be a power of 2. A larger table
* means we can accelerate a larger number of streams.
@@ -87,10 +86,10 @@ static int lro_slow_start_packets = 2000;
static int lro_loss_packets = 20;
/* Flags for sfxge_lro_conn::l2_id; must not collide with EVL_VLID_MASK */
-#define SFXGE_LRO_L2_ID_VLAN 0x4000
-#define SFXGE_LRO_L2_ID_IPV6 0x8000
-#define SFXGE_LRO_CONN_IS_VLAN_ENCAP(c) ((c)->l2_id & SFXGE_LRO_L2_ID_VLAN)
-#define SFXGE_LRO_CONN_IS_TCPIPV4(c) (!((c)->l2_id & SFXGE_LRO_L2_ID_IPV6))
+#define SFXGE_LRO_L2_ID_VLAN 0x4000
+#define SFXGE_LRO_L2_ID_IPV6 0x8000
+#define SFXGE_LRO_CONN_IS_VLAN_ENCAP(c) ((c)->l2_id & SFXGE_LRO_L2_ID_VLAN)
+#define SFXGE_LRO_CONN_IS_TCPIPV4(c) (!((c)->l2_id & SFXGE_LRO_L2_ID_IPV6))
/* Compare IPv6 addresses, avoiding conditional branches */
static __inline unsigned long ipv6_addr_cmp(const struct in6_addr *left,
@@ -179,12 +178,12 @@ static inline struct mbuf *sfxge_rx_alloc_mbuf(struct sfxge_softc *sc)
m = (struct mbuf *)uma_zalloc_arg(zone_mbuf, &args, M_NOWAIT);
/* Allocate (and attach) packet buffer */
- if (m && !uma_zalloc_arg(sc->rx_buffer_zone, m, M_NOWAIT)) {
+ if (m != NULL && !uma_zalloc_arg(sc->rx_buffer_zone, m, M_NOWAIT)) {
uma_zfree(zone_mbuf, m);
m = NULL;
}
- return m;
+ return (m);
}
#define SFXGE_REFILL_BATCH 64
@@ -214,11 +213,11 @@ sfxge_rx_qfill(struct sfxge_rxq *rxq, unsigned int target, boolean_t retrying)
return;
rxfill = rxq->added - rxq->completed;
- KASSERT(rxfill <= EFX_RXQ_LIMIT(SFXGE_NDESCS),
- ("rxfill > EFX_RXQ_LIMIT(SFXGE_NDESCS)"));
- ntodo = min(EFX_RXQ_LIMIT(SFXGE_NDESCS) - rxfill, target);
- KASSERT(ntodo <= EFX_RXQ_LIMIT(SFXGE_NDESCS),
- ("ntodo > EFX_RQX_LIMIT(SFXGE_NDESCS)"));
+ KASSERT(rxfill <= EFX_RXQ_LIMIT(rxq->entries),
+ ("rxfill > EFX_RXQ_LIMIT(rxq->entries)"));
+ ntodo = min(EFX_RXQ_LIMIT(rxq->entries) - rxfill, target);
+ KASSERT(ntodo <= EFX_RXQ_LIMIT(rxq->entries),
+ ("ntodo > EFX_RQX_LIMIT(rxq->entries)"));
if (ntodo == 0)
return;
@@ -231,7 +230,7 @@ sfxge_rx_qfill(struct sfxge_rxq *rxq, unsigned int target, boolean_t retrying)
bus_dma_segment_t seg;
struct mbuf *m;
- id = (rxq->added + batch) & (SFXGE_NDESCS - 1);
+ id = (rxq->added + batch) & rxq->ptr_mask;
rx_desc = &rxq->queue[id];
KASSERT(rx_desc->mbuf == NULL, ("rx_desc->mbuf != NULL"));
@@ -274,7 +273,7 @@ sfxge_rx_qrefill(struct sfxge_rxq *rxq)
return;
/* Make sure the queue is full */
- sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(SFXGE_NDESCS), B_TRUE);
+ sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(rxq->entries), B_TRUE);
}
static void __sfxge_rx_deliver(struct sfxge_softc *sc, struct mbuf *m)
@@ -370,7 +369,7 @@ static void sfxge_lro_drop(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c)
KASSERT(!c->mbuf, ("found orphaned mbuf"));
- if (c->next_buf.mbuf) {
+ if (c->next_buf.mbuf != NULL) {
sfxge_rx_deliver(rxq->sc, &c->next_buf);
LIST_REMOVE(c, active_link);
}
@@ -510,7 +509,7 @@ sfxge_lro_try_merge(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c)
if (__predict_false(th_seq != c->next_seq)) {
/* Out-of-order, so start counting again. */
- if (c->mbuf)
+ if (c->mbuf != NULL)
sfxge_lro_deliver(&rxq->lro, c);
c->n_in_order_pkts -= lro_loss_packets;
c->next_seq = th_seq + data_length;
@@ -522,10 +521,10 @@ sfxge_lro_try_merge(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c)
now = ticks;
if (now - c->last_pkt_ticks > lro_idle_ticks) {
++rxq->lro.n_drop_idle;
- if (c->mbuf)
+ if (c->mbuf != NULL)
sfxge_lro_deliver(&rxq->lro, c);
sfxge_lro_drop(rxq, c);
- return 0;
+ return (0);
}
c->last_pkt_ticks = ticks;
@@ -537,12 +536,12 @@ sfxge_lro_try_merge(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c)
}
if (__predict_false(dont_merge)) {
- if (c->mbuf)
+ if (c->mbuf != NULL)
sfxge_lro_deliver(&rxq->lro, c);
if (th->th_flags & (TH_FIN | TH_RST)) {
++rxq->lro.n_drop_closed;
sfxge_lro_drop(rxq, c);
- return 0;
+ return (0);
}
goto deliver_buf_out;
}
@@ -563,11 +562,11 @@ sfxge_lro_try_merge(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c)
}
rx_buf->mbuf = NULL;
- return 1;
+ return (1);
deliver_buf_out:
sfxge_rx_deliver(rxq->sc, rx_buf);
- return 1;
+ return (1);
}
static void sfxge_lro_new_conn(struct sfxge_lro_state *st, uint32_t conn_hash,
@@ -621,7 +620,7 @@ sfxge_lro(struct sfxge_rxq *rxq, struct sfxge_rx_sw_desc *rx_buf)
struct sfxge_lro_conn *c;
uint16_t l2_id;
uint16_t l3_proto;
- void *nh;
+ void *nh;
struct tcphdr *th;
uint32_t conn_hash;
unsigned bucket;
@@ -671,7 +670,7 @@ sfxge_lro(struct sfxge_rxq *rxq, struct sfxge_rx_sw_desc *rx_buf)
continue;
if ((c->source - th->th_sport) | (c->dest - th->th_dport))
continue;
- if (c->mbuf) {
+ if (c->mbuf != NULL) {
if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) {
struct ip *c_iph, *iph = nh;
c_iph = c->nh;
@@ -691,7 +690,7 @@ sfxge_lro(struct sfxge_rxq *rxq, struct sfxge_rx_sw_desc *rx_buf)
TAILQ_REMOVE(&rxq->lro.conns[bucket], c, link);
TAILQ_INSERT_HEAD(&rxq->lro.conns[bucket], c, link);
- if (c->next_buf.mbuf) {
+ if (c->next_buf.mbuf != NULL) {
if (!sfxge_lro_try_merge(rxq, c))
goto deliver_now;
} else {
@@ -720,10 +719,10 @@ static void sfxge_lro_end_of_burst(struct sfxge_rxq *rxq)
while (!LIST_EMPTY(&st->active_conns)) {
c = LIST_FIRST(&st->active_conns);
- if (!c->delivered && c->mbuf)
+ if (!c->delivered && c->mbuf != NULL)
sfxge_lro_deliver(st, c);
if (sfxge_lro_try_merge(rxq, c)) {
- if (c->mbuf)
+ if (c->mbuf != NULL)
sfxge_lro_deliver(st, c);
LIST_REMOVE(c, active_link);
}
@@ -757,7 +756,7 @@ sfxge_rx_qcomplete(struct sfxge_rxq *rxq, boolean_t eop)
unsigned int id;
struct sfxge_rx_sw_desc *rx_desc;
- id = completed++ & (SFXGE_NDESCS - 1);
+ id = completed++ & rxq->ptr_mask;
rx_desc = &rxq->queue[id];
m = rx_desc->mbuf;
@@ -821,8 +820,8 @@ discard:
sfxge_lro_end_of_burst(rxq);
/* Top up the queue if necessary */
- if (level < RX_REFILL_THRESHOLD)
- sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(SFXGE_NDESCS), B_FALSE);
+ if (level < rxq->refill_threshold)
+ sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(rxq->entries), B_FALSE);
}
static void
@@ -836,7 +835,7 @@ sfxge_rx_qstop(struct sfxge_softc *sc, unsigned int index)
evq = sc->evq[index];
mtx_lock(&evq->lock);
-
+
KASSERT(rxq->init_state == SFXGE_RXQ_STARTED,
("rxq not started"));
@@ -881,10 +880,10 @@ again:
rxq->loopback = 0;
/* Destroy the common code receive queue. */
- efx_rx_qdestroy(rxq->common);
+ efx_rx_qdestroy(rxq->common);
efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id,
- EFX_RXQ_NBUFS(SFXGE_NDESCS));
+ EFX_RXQ_NBUFS(sc->rxq_entries));
mtx_unlock(&evq->lock);
}
@@ -908,12 +907,12 @@ sfxge_rx_qstart(struct sfxge_softc *sc, unsigned int index)
/* Program the buffer table. */
if ((rc = efx_sram_buf_tbl_set(sc->enp, rxq->buf_base_id, esmp,
- EFX_RXQ_NBUFS(SFXGE_NDESCS))) != 0)
- return rc;
+ EFX_RXQ_NBUFS(sc->rxq_entries))) != 0)
+ return (rc);
/* Create the common code receive queue. */
if ((rc = efx_rx_qcreate(sc->enp, index, index, EFX_RXQ_TYPE_DEFAULT,
- esmp, SFXGE_NDESCS, rxq->buf_base_id, evq->common,
+ esmp, sc->rxq_entries, rxq->buf_base_id, evq->common,
&rxq->common)) != 0)
goto fail;
@@ -925,7 +924,7 @@ sfxge_rx_qstart(struct sfxge_softc *sc, unsigned int index)
rxq->init_state = SFXGE_RXQ_STARTED;
/* Try to fill the queue from the pool. */
- sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(SFXGE_NDESCS), B_FALSE);
+ sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(sc->rxq_entries), B_FALSE);
mtx_unlock(&evq->lock);
@@ -933,8 +932,8 @@ sfxge_rx_qstart(struct sfxge_softc *sc, unsigned int index)
fail:
efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id,
- EFX_RXQ_NBUFS(SFXGE_NDESCS));
- return rc;
+ EFX_RXQ_NBUFS(sc->rxq_entries));
+ return (rc);
}
void
@@ -1105,6 +1104,9 @@ sfxge_rx_qinit(struct sfxge_softc *sc, unsigned int index)
rxq = malloc(sizeof(struct sfxge_rxq), M_SFXGE, M_ZERO | M_WAITOK);
rxq->sc = sc;
rxq->index = index;
+ rxq->entries = sc->rxq_entries;
+ rxq->ptr_mask = rxq->entries - 1;
+ rxq->refill_threshold = RX_REFILL_THRESHOLD(rxq->entries);
sc->rxq[index] = rxq;
esmp = &rxq->mem;
@@ -1112,16 +1114,16 @@ sfxge_rx_qinit(struct sfxge_softc *sc, unsigned int index)
evq = sc->evq[index];
/* Allocate and zero DMA space. */
- if ((rc = sfxge_dma_alloc(sc, EFX_RXQ_SIZE(SFXGE_NDESCS), esmp)) != 0)
+ if ((rc = sfxge_dma_alloc(sc, EFX_RXQ_SIZE(sc->rxq_entries), esmp)) != 0)
return (rc);
- (void)memset(esmp->esm_base, 0, EFX_RXQ_SIZE(SFXGE_NDESCS));
+ (void)memset(esmp->esm_base, 0, EFX_RXQ_SIZE(sc->rxq_entries));
/* Allocate buffer table entries. */
- sfxge_sram_buf_tbl_alloc(sc, EFX_RXQ_NBUFS(SFXGE_NDESCS),
+ sfxge_sram_buf_tbl_alloc(sc, EFX_RXQ_NBUFS(sc->rxq_entries),
&rxq->buf_base_id);
/* Allocate the context array and the flow table. */
- rxq->queue = malloc(sizeof(struct sfxge_rx_sw_desc) * SFXGE_NDESCS,
+ rxq->queue = malloc(sizeof(struct sfxge_rx_sw_desc) * sc->rxq_entries,
M_SFXGE, M_WAITOK | M_ZERO);
sfxge_lro_init(rxq);
@@ -1136,7 +1138,7 @@ static const struct {
const char *name;
size_t offset;
} sfxge_rx_stats[] = {
-#define SFXGE_RX_STAT(name, member) \
+#define SFXGE_RX_STAT(name, member) \
{ #name, offsetof(struct sfxge_rxq, member) }
SFXGE_RX_STAT(lro_merges, lro.n_merges),
SFXGE_RX_STAT(lro_bursts, lro.n_bursts),
@@ -1161,7 +1163,7 @@ sfxge_rx_stat_handler(SYSCTL_HANDLER_ARGS)
sum += *(unsigned int *)((caddr_t)sc->rxq[index] +
sfxge_rx_stats[id].offset);
- return SYSCTL_OUT(req, &sum, sizeof(sum));
+ return (SYSCTL_OUT(req, &sum, sizeof(sum)));
}
static void
diff --git a/sys/dev/sfxge/sfxge_rx.h b/sys/dev/sfxge/sfxge_rx.h
index 5a80fdb..4b3e73a 100644
--- a/sys/dev/sfxge/sfxge_rx.h
+++ b/sys/dev/sfxge/sfxge_rx.h
@@ -30,25 +30,25 @@
*/
#ifndef _SFXGE_RX_H
-#define _SFXGE_RX_H
+#define _SFXGE_RX_H
-#define SFXGE_MAGIC_RESERVED 0x8000
+#define SFXGE_MAGIC_RESERVED 0x8000
-#define SFXGE_MAGIC_DMAQ_LABEL_WIDTH 6
-#define SFXGE_MAGIC_DMAQ_LABEL_MASK \
- ((1 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH) - 1)
+#define SFXGE_MAGIC_DMAQ_LABEL_WIDTH 6
+#define SFXGE_MAGIC_DMAQ_LABEL_MASK \
+ ((1 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH) - 1)
-#define SFXGE_MAGIC_RX_QFLUSH_DONE \
- (SFXGE_MAGIC_RESERVED | (1 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
+#define SFXGE_MAGIC_RX_QFLUSH_DONE \
+ (SFXGE_MAGIC_RESERVED | (1 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
-#define SFXGE_MAGIC_RX_QFLUSH_FAILED \
- (SFXGE_MAGIC_RESERVED | (2 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
+#define SFXGE_MAGIC_RX_QFLUSH_FAILED \
+ (SFXGE_MAGIC_RESERVED | (2 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
-#define SFXGE_MAGIC_RX_QREFILL \
- (SFXGE_MAGIC_RESERVED | (3 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
+#define SFXGE_MAGIC_RX_QREFILL \
+ (SFXGE_MAGIC_RESERVED | (3 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
-#define SFXGE_MAGIC_TX_QFLUSH_DONE \
- (SFXGE_MAGIC_RESERVED | (4 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
+#define SFXGE_MAGIC_TX_QFLUSH_DONE \
+ (SFXGE_MAGIC_RESERVED | (4 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
#define SFXGE_RX_SCALE_MAX EFX_MAXRSS
@@ -159,6 +159,8 @@ struct sfxge_rxq {
efsys_mem_t mem;
unsigned int buf_base_id;
enum sfxge_rxq_state init_state;
+ unsigned int entries;
+ unsigned int ptr_mask;
struct sfxge_rx_sw_desc *queue __aligned(CACHE_LINE_SIZE);
unsigned int added;
@@ -166,6 +168,7 @@ struct sfxge_rxq {
unsigned int completed;
unsigned int loopback;
struct sfxge_lro_state lro;
+ unsigned int refill_threshold;
struct callout refill_callout;
unsigned int refill_delay;
diff --git a/sys/dev/sfxge/sfxge_tx.c b/sys/dev/sfxge/sfxge_tx.c
index 20f6f87..a12c747 100644
--- a/sys/dev/sfxge/sfxge_tx.c
+++ b/sys/dev/sfxge/sfxge_tx.c
@@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$");
#include <sys/smp.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
+#include <sys/syslog.h>
#include <net/bpf.h>
#include <net/ethernet.h>
@@ -74,8 +75,27 @@ __FBSDID("$FreeBSD$");
* the output at a packet boundary. Allow for a reasonable
* minimum MSS of 512.
*/
-#define SFXGE_TSO_MAX_DESC ((65535 / 512) * 2 + SFXGE_TX_MAPPING_MAX_SEG - 1)
-#define SFXGE_TXQ_BLOCK_LEVEL (SFXGE_NDESCS - SFXGE_TSO_MAX_DESC)
+#define SFXGE_TSO_MAX_DESC ((65535 / 512) * 2 + SFXGE_TX_MAPPING_MAX_SEG - 1)
+#define SFXGE_TXQ_BLOCK_LEVEL(_entries) ((_entries) - SFXGE_TSO_MAX_DESC)
+
+#ifdef SFXGE_HAVE_MQ
+
+#define SFXGE_PARAM_TX_DPL_GET_MAX SFXGE_PARAM(tx_dpl_get_max)
+static int sfxge_tx_dpl_get_max = SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT;
+TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_MAX, &sfxge_tx_dpl_get_max);
+SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_max, CTLFLAG_RDTUN,
+ &sfxge_tx_dpl_get_max, 0,
+ "Maximum number of packets in deferred packet get-list");
+
+#define SFXGE_PARAM_TX_DPL_PUT_MAX SFXGE_PARAM(tx_dpl_put_max)
+static int sfxge_tx_dpl_put_max = SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT;
+TUNABLE_INT(SFXGE_PARAM_TX_DPL_PUT_MAX, &sfxge_tx_dpl_put_max);
+SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_put_max, CTLFLAG_RDTUN,
+ &sfxge_tx_dpl_put_max, 0,
+ "Maximum number of packets in deferred packet put-list");
+
+#endif
+
/* Forward declarations. */
static inline void sfxge_tx_qdpl_service(struct sfxge_txq *txq);
@@ -101,7 +121,7 @@ sfxge_tx_qcomplete(struct sfxge_txq *txq)
struct sfxge_tx_mapping *stmp;
unsigned int id;
- id = completed++ & (SFXGE_NDESCS - 1);
+ id = completed++ & txq->ptr_mask;
stmp = &txq->stmp[id];
if (stmp->flags & TX_BUF_UNMAP) {
@@ -125,7 +145,7 @@ sfxge_tx_qcomplete(struct sfxge_txq *txq)
unsigned int level;
level = txq->added - txq->completed;
- if (level <= SFXGE_TXQ_UNBLOCK_LEVEL)
+ if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries))
sfxge_tx_qunblock(txq);
}
}
@@ -176,7 +196,7 @@ sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq)
KASSERT(*get_tailp == NULL, ("*get_tailp != NULL"));
*stdp->std_getp = get_next;
stdp->std_getp = get_tailp;
- stdp->std_count += count;
+ stdp->std_get_count += count;
}
#endif /* SFXGE_HAVE_MQ */
@@ -218,19 +238,19 @@ sfxge_tx_qlist_post(struct sfxge_txq *txq)
("efx_tx_qpost() refragmented descriptors"));
level = txq->added - txq->reaped;
- KASSERT(level <= SFXGE_NDESCS, ("overfilled TX queue"));
+ KASSERT(level <= txq->entries, ("overfilled TX queue"));
/* Clear the fragment list. */
txq->n_pend_desc = 0;
/* Have we reached the block level? */
- if (level < SFXGE_TXQ_BLOCK_LEVEL)
+ if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries))
return;
/* Reap, and check again */
sfxge_tx_qreap(txq);
level = txq->added - txq->reaped;
- if (level < SFXGE_TXQ_BLOCK_LEVEL)
+ if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries))
return;
txq->blocked = 1;
@@ -242,7 +262,7 @@ sfxge_tx_qlist_post(struct sfxge_txq *txq)
mb();
sfxge_tx_qreap(txq);
level = txq->added - txq->reaped;
- if (level < SFXGE_TXQ_BLOCK_LEVEL) {
+ if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries)) {
mb();
txq->blocked = 0;
}
@@ -271,7 +291,7 @@ static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
}
/* Load the packet for DMA. */
- id = txq->added & (SFXGE_NDESCS - 1);
+ id = txq->added & txq->ptr_mask;
stmp = &txq->stmp[id];
rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map,
mbuf, dma_seg, &n_dma_seg, 0);
@@ -318,7 +338,7 @@ static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
stmp->flags = 0;
if (__predict_false(stmp ==
- &txq->stmp[SFXGE_NDESCS - 1]))
+ &txq->stmp[txq->ptr_mask]))
stmp = &txq->stmp[0];
else
stmp++;
@@ -343,7 +363,7 @@ static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
/* Post the fragment list. */
sfxge_tx_qlist_post(txq);
- return 0;
+ return (0);
reject_mapped:
bus_dmamap_unload(txq->packet_dma_tag, *used_map);
@@ -352,7 +372,7 @@ reject:
m_freem(mbuf);
++txq->drops;
- return rc;
+ return (rc);
}
#ifdef SFXGE_HAVE_MQ
@@ -380,7 +400,7 @@ sfxge_tx_qdpl_drain(struct sfxge_txq *txq)
prefetch_read_many(txq->common);
mbuf = stdp->std_get;
- count = stdp->std_count;
+ count = stdp->std_get_count;
while (count != 0) {
KASSERT(mbuf != NULL, ("mbuf == NULL"));
@@ -412,22 +432,22 @@ sfxge_tx_qdpl_drain(struct sfxge_txq *txq)
if (count == 0) {
KASSERT(mbuf == NULL, ("mbuf != NULL"));
stdp->std_get = NULL;
- stdp->std_count = 0;
+ stdp->std_get_count = 0;
stdp->std_getp = &stdp->std_get;
} else {
stdp->std_get = mbuf;
- stdp->std_count = count;
+ stdp->std_get_count = count;
}
if (txq->added != pushed)
efx_tx_qpush(txq->common, txq->added);
- KASSERT(txq->blocked || stdp->std_count == 0,
+ KASSERT(txq->blocked || stdp->std_get_count == 0,
("queue unblocked but count is non-zero"));
}
-#define SFXGE_TX_QDPL_PENDING(_txq) \
- ((_txq)->dpl.std_put != 0)
+#define SFXGE_TX_QDPL_PENDING(_txq) \
+ ((_txq)->dpl.std_put != 0)
/*
* Service the deferred packet list.
@@ -476,12 +496,12 @@ sfxge_tx_qdpl_put(struct sfxge_txq *txq, struct mbuf *mbuf, int locked)
sfxge_tx_qdpl_swizzle(txq);
- if (stdp->std_count >= SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT)
+ if (stdp->std_get_count >= stdp->std_get_max)
return (ENOBUFS);
*(stdp->std_getp) = mbuf;
stdp->std_getp = &mbuf->m_nextpkt;
- stdp->std_count++;
+ stdp->std_get_count++;
} else {
volatile uintptr_t *putp;
uintptr_t old;
@@ -493,12 +513,12 @@ sfxge_tx_qdpl_put(struct sfxge_txq *txq, struct mbuf *mbuf, int locked)
do {
old = *putp;
- if (old) {
+ if (old != 0) {
struct mbuf *mp = (struct mbuf *)old;
old_len = mp->m_pkthdr.csum_data;
} else
old_len = 0;
- if (old_len >= SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT)
+ if (old_len >= stdp->std_put_max)
return (ENOBUFS);
mbuf->m_pkthdr.csum_data = old_len + 1;
mbuf->m_nextpkt = (void *)old;
@@ -559,7 +579,6 @@ fail:
m_freem(m);
atomic_add_long(&txq->early_drops, 1);
return (rc);
-
}
static void
@@ -576,8 +595,8 @@ sfxge_tx_qdpl_flush(struct sfxge_txq *txq)
m_freem(mbuf);
}
stdp->std_get = NULL;
- stdp->std_count = 0;
- stdp->std_getp = &stdp->std_get;
+ stdp->std_get_count = 0;
+ stdp->std_getp = &stdp->std_get;
mtx_unlock(&txq->lock);
}
@@ -599,7 +618,7 @@ sfxge_if_qflush(struct ifnet *ifp)
*/
int
sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m)
-{
+{
struct sfxge_softc *sc;
struct sfxge_txq *txq;
int rc;
@@ -652,7 +671,7 @@ static void sfxge_if_start_locked(struct ifnet *ifp)
}
while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
- IFQ_DRV_DEQUEUE(&ifp->if_snd, mbuf);
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, mbuf);
if (mbuf == NULL)
break;
@@ -757,47 +776,49 @@ static inline const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso)
/* Size of preallocated TSO header buffers. Larger blocks must be
* allocated from the heap.
*/
-#define TSOH_STD_SIZE 128
+#define TSOH_STD_SIZE 128
/* At most half the descriptors in the queue at any time will refer to
* a TSO header buffer, since they must always be followed by a
* payload descriptor referring to an mbuf.
*/
-#define TSOH_COUNT (SFXGE_NDESCS / 2u)
-#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
-#define TSOH_PAGE_COUNT ((TSOH_COUNT + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE)
+#define TSOH_COUNT(_txq_entries) ((_txq_entries) / 2u)
+#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
+#define TSOH_PAGE_COUNT(_txq_entries) \
+ ((TSOH_COUNT(_txq_entries) + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE)
static int tso_init(struct sfxge_txq *txq)
{
struct sfxge_softc *sc = txq->sc;
+ unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries);
int i, rc;
/* Allocate TSO header buffers */
- txq->tsoh_buffer = malloc(TSOH_PAGE_COUNT * sizeof(txq->tsoh_buffer[0]),
+ txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]),
M_SFXGE, M_WAITOK);
- for (i = 0; i < TSOH_PAGE_COUNT; i++) {
+ for (i = 0; i < tsoh_page_count; i++) {
rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]);
- if (rc)
+ if (rc != 0)
goto fail;
}
- return 0;
+ return (0);
fail:
while (i-- > 0)
sfxge_dma_free(&txq->tsoh_buffer[i]);
free(txq->tsoh_buffer, M_SFXGE);
txq->tsoh_buffer = NULL;
- return rc;
+ return (rc);
}
static void tso_fini(struct sfxge_txq *txq)
{
int i;
- if (txq->tsoh_buffer) {
- for (i = 0; i < TSOH_PAGE_COUNT; i++)
+ if (txq->tsoh_buffer != NULL) {
+ for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++)
sfxge_dma_free(&txq->tsoh_buffer[i]);
free(txq->tsoh_buffer, M_SFXGE);
}
@@ -925,7 +946,7 @@ static int tso_start_new_packet(struct sfxge_txq *txq,
/* We cannot use bus_dmamem_alloc() as that may sleep */
header = malloc(tso->header_len, M_SFXGE, M_NOWAIT);
if (__predict_false(!header))
- return ENOMEM;
+ return (ENOMEM);
rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map,
header, tso->header_len,
tso_map_long_header, &dma_addr,
@@ -938,7 +959,7 @@ static int tso_start_new_packet(struct sfxge_txq *txq,
rc = EINVAL;
}
free(header, M_SFXGE);
- return rc;
+ return (rc);
}
map = stmp->map;
@@ -987,7 +1008,7 @@ static int tso_start_new_packet(struct sfxge_txq *txq,
desc->eb_size = tso->header_len;
desc->eb_eop = 0;
- return 0;
+ return (0);
}
static int
@@ -1011,12 +1032,12 @@ sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
tso.dma_addr = dma_seg->ds_addr + tso.header_len;
}
- id = txq->added & (SFXGE_NDESCS - 1);
+ id = txq->added & txq->ptr_mask;
if (__predict_false(tso_start_new_packet(txq, &tso, id)))
- return -1;
+ return (-1);
while (1) {
- id = (id + 1) & (SFXGE_NDESCS - 1);
+ id = (id + 1) & txq->ptr_mask;
tso_fill_packet_with_fragment(txq, &tso);
/* Move onto the next fragment? */
@@ -1039,7 +1060,7 @@ sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
if (txq->n_pend_desc >
SFXGE_TSO_MAX_DESC - (1 + SFXGE_TX_MAPPING_MAX_SEG))
break;
- next_id = (id + 1) & (SFXGE_NDESCS - 1);
+ next_id = (id + 1) & txq->ptr_mask;
if (__predict_false(tso_start_new_packet(txq, &tso,
next_id)))
break;
@@ -1048,7 +1069,7 @@ sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
}
txq->tso_bursts++;
- return id;
+ return (id);
}
static void
@@ -1071,7 +1092,7 @@ sfxge_tx_qunblock(struct sfxge_txq *txq)
unsigned int level;
level = txq->added - txq->completed;
- if (level <= SFXGE_TXQ_UNBLOCK_LEVEL)
+ if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries))
txq->blocked = 0;
}
@@ -1147,7 +1168,7 @@ sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index)
txq->common = NULL;
efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
- EFX_TXQ_NBUFS(SFXGE_NDESCS));
+ EFX_TXQ_NBUFS(sc->txq_entries));
mtx_unlock(&evq->lock);
mtx_unlock(SFXGE_TXQ_LOCK(txq));
@@ -1173,8 +1194,8 @@ sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index)
/* Program the buffer table. */
if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp,
- EFX_TXQ_NBUFS(SFXGE_NDESCS))) != 0)
- return rc;
+ EFX_TXQ_NBUFS(sc->txq_entries))) != 0)
+ return (rc);
/* Determine the kind of queue we are creating. */
switch (txq->type) {
@@ -1195,12 +1216,12 @@ sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index)
/* Create the common code transmit queue. */
if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp,
- SFXGE_NDESCS, txq->buf_base_id, flags, evq->common,
+ sc->txq_entries, txq->buf_base_id, flags, evq->common,
&txq->common)) != 0)
goto fail;
mtx_lock(SFXGE_TXQ_LOCK(txq));
-
+
/* Enable the transmit queue. */
efx_tx_qenable(txq->common);
@@ -1212,8 +1233,8 @@ sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index)
fail:
efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
- EFX_TXQ_NBUFS(SFXGE_NDESCS));
- return rc;
+ EFX_TXQ_NBUFS(sc->txq_entries));
+ return (rc);
}
void
@@ -1229,7 +1250,7 @@ sfxge_tx_stop(struct sfxge_softc *sc)
sfxge_tx_qstop(sc, SFXGE_TXQ_IP_CKSUM);
encp = efx_nic_cfg_get(sc->enp);
- sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM);
+ sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM);
/* Tear down the transmit module */
efx_tx_fini(sc->enp);
@@ -1266,7 +1287,7 @@ fail3:
sfxge_tx_qstop(sc, SFXGE_TXQ_IP_CKSUM);
fail2:
- sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM);
+ sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM);
fail:
efx_tx_fini(sc->enp);
@@ -1281,7 +1302,7 @@ static void
sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index)
{
struct sfxge_txq *txq;
- unsigned int nmaps = SFXGE_NDESCS;
+ unsigned int nmaps;
txq = sc->txq[index];
@@ -1293,7 +1314,8 @@ sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index)
/* Free the context arrays. */
free(txq->pend_desc, M_SFXGE);
- while (nmaps--)
+ nmaps = sc->txq_entries;
+ while (nmaps-- != 0)
bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
free(txq->stmp, M_SFXGE);
@@ -1313,6 +1335,8 @@ static int
sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index,
enum sfxge_txq_type type, unsigned int evq_index)
{
+ char name[16];
+ struct sysctl_oid *txq_node;
struct sfxge_txq *txq;
struct sfxge_evq *evq;
#ifdef SFXGE_HAVE_MQ
@@ -1324,6 +1348,8 @@ sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index,
txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK);
txq->sc = sc;
+ txq->entries = sc->txq_entries;
+ txq->ptr_mask = txq->entries - 1;
sc->txq[txq_index] = txq;
esmp = &txq->mem;
@@ -1331,12 +1357,12 @@ sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index,
evq = sc->evq[evq_index];
/* Allocate and zero DMA space for the descriptor ring. */
- if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(SFXGE_NDESCS), esmp)) != 0)
+ if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0)
return (rc);
- (void)memset(esmp->esm_base, 0, EFX_TXQ_SIZE(SFXGE_NDESCS));
+ (void)memset(esmp->esm_base, 0, EFX_TXQ_SIZE(sc->txq_entries));
/* Allocate buffer table entries. */
- sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(SFXGE_NDESCS),
+ sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries),
&txq->buf_base_id);
/* Create a DMA tag for packet mappings. */
@@ -1350,29 +1376,59 @@ sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index,
}
/* Allocate pending descriptor array for batching writes. */
- txq->pend_desc = malloc(sizeof(efx_buffer_t) * SFXGE_NDESCS,
+ txq->pend_desc = malloc(sizeof(efx_buffer_t) * sc->txq_entries,
M_SFXGE, M_ZERO | M_WAITOK);
/* Allocate and initialise mbuf DMA mapping array. */
- txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * SFXGE_NDESCS,
+ txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries,
M_SFXGE, M_ZERO | M_WAITOK);
- for (nmaps = 0; nmaps < SFXGE_NDESCS; nmaps++) {
+ for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) {
rc = bus_dmamap_create(txq->packet_dma_tag, 0,
&txq->stmp[nmaps].map);
if (rc != 0)
goto fail2;
}
+ snprintf(name, sizeof(name), "%u", txq_index);
+ txq_node = SYSCTL_ADD_NODE(
+ device_get_sysctl_ctx(sc->dev),
+ SYSCTL_CHILDREN(sc->txqs_node),
+ OID_AUTO, name, CTLFLAG_RD, NULL, "");
+ if (txq_node == NULL) {
+ rc = ENOMEM;
+ goto fail_txq_node;
+ }
+
if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM &&
(rc = tso_init(txq)) != 0)
goto fail3;
#ifdef SFXGE_HAVE_MQ
+ if (sfxge_tx_dpl_get_max <= 0) {
+ log(LOG_ERR, "%s=%d must be greater than 0",
+ SFXGE_PARAM_TX_DPL_GET_MAX, sfxge_tx_dpl_get_max);
+ rc = EINVAL;
+ goto fail_tx_dpl_get_max;
+ }
+ if (sfxge_tx_dpl_put_max < 0) {
+ log(LOG_ERR, "%s=%d must be greater or equal to 0",
+ SFXGE_PARAM_TX_DPL_PUT_MAX, sfxge_tx_dpl_put_max);
+ rc = EINVAL;
+ goto fail_tx_dpl_put_max;
+ }
+
/* Initialize the deferred packet list. */
stdp = &txq->dpl;
+ stdp->std_put_max = sfxge_tx_dpl_put_max;
+ stdp->std_get_max = sfxge_tx_dpl_get_max;
stdp->std_getp = &stdp->std_get;
mtx_init(&txq->lock, "txq", NULL, MTX_DEF);
+
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(sc->dev),
+ SYSCTL_CHILDREN(txq_node), OID_AUTO,
+ "dpl_get_count", CTLFLAG_RD | CTLFLAG_STATS,
+ &stdp->std_get_count, 0, "");
#endif
txq->type = type;
@@ -1382,10 +1438,13 @@ sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index,
return (0);
+fail_tx_dpl_put_max:
+fail_tx_dpl_get_max:
fail3:
+fail_txq_node:
free(txq->pend_desc, M_SFXGE);
fail2:
- while (nmaps--)
+ while (nmaps-- != 0)
bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
free(txq->stmp, M_SFXGE);
bus_dma_tag_destroy(txq->packet_dma_tag);
@@ -1400,7 +1459,7 @@ static const struct {
const char *name;
size_t offset;
} sfxge_tx_stats[] = {
-#define SFXGE_TX_STAT(name, member) \
+#define SFXGE_TX_STAT(name, member) \
{ #name, offsetof(struct sfxge_txq, member) }
SFXGE_TX_STAT(tso_bursts, tso_bursts),
SFXGE_TX_STAT(tso_packets, tso_packets),
@@ -1426,7 +1485,7 @@ sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS)
sum += *(unsigned long *)((caddr_t)sc->txq[index] +
sfxge_tx_stats[id].offset);
- return SYSCTL_OUT(req, &sum, sizeof(sum));
+ return (SYSCTL_OUT(req, &sum, sizeof(sum)));
}
static void
@@ -1460,7 +1519,7 @@ sfxge_tx_fini(struct sfxge_softc *sc)
sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM);
- sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM);
+ sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM);
}
@@ -1476,6 +1535,15 @@ sfxge_tx_init(struct sfxge_softc *sc)
KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
("intr->state != SFXGE_INTR_INITIALIZED"));
+ sc->txqs_node = SYSCTL_ADD_NODE(
+ device_get_sysctl_ctx(sc->dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)),
+ OID_AUTO, "txq", CTLFLAG_RD, NULL, "Tx queues");
+ if (sc->txqs_node == NULL) {
+ rc = ENOMEM;
+ goto fail_txq_node;
+ }
+
/* Initialize the transmit queues */
if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM,
SFXGE_TXQ_NON_CKSUM, 0)) != 0)
@@ -1505,5 +1573,6 @@ fail2:
sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM);
fail:
+fail_txq_node:
return (rc);
}
diff --git a/sys/dev/sfxge/sfxge_tx.h b/sys/dev/sfxge/sfxge_tx.h
index 33ce8b9..67dea0c 100644
--- a/sys/dev/sfxge/sfxge_tx.h
+++ b/sys/dev/sfxge/sfxge_tx.h
@@ -30,7 +30,7 @@
*/
#ifndef _SFXGE_TX_H
-#define _SFXGE_TX_H
+#define _SFXGE_TX_H
#include <netinet/in.h>
#include <netinet/ip.h>
@@ -47,7 +47,7 @@
* could overlap all mbufs in the chain and also require an extra
* segment for a TSO header.
*/
-#define SFXGE_TX_PACKET_MAX_SEG (SFXGE_TX_MAPPING_MAX_SEG + 1)
+#define SFXGE_TX_PACKET_MAX_SEG (SFXGE_TX_MAPPING_MAX_SEG + 1)
/*
* Buffer mapping flags.
@@ -75,17 +75,21 @@ struct sfxge_tx_mapping {
enum sfxge_tx_buf_flags flags;
};
-#define SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT 64
+#define SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT 1024
#define SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT 64
/*
* Deferred packet list.
*/
struct sfxge_tx_dpl {
- uintptr_t std_put; /* Head of put list. */
- struct mbuf *std_get; /* Head of get list. */
- struct mbuf **std_getp; /* Tail of get list. */
- unsigned int std_count; /* Count of packets. */
+ unsigned int std_get_max; /* Maximum number of packets
+ * in get list */
+ unsigned int std_put_max; /* Maximum number of packets
+ * in put list */
+ uintptr_t std_put; /* Head of put list. */
+ struct mbuf *std_get; /* Head of get list. */
+ struct mbuf **std_getp; /* Tail of get list. */
+ unsigned int std_get_count; /* Packets in get list. */
};
@@ -106,16 +110,16 @@ enum sfxge_txq_type {
SFXGE_TXQ_NTYPES
};
-#define SFXGE_TXQ_UNBLOCK_LEVEL (EFX_TXQ_LIMIT(SFXGE_NDESCS) / 4)
+#define SFXGE_TXQ_UNBLOCK_LEVEL(_entries) (EFX_TXQ_LIMIT(_entries) / 4)
#define SFXGE_TX_BATCH 64
#ifdef SFXGE_HAVE_MQ
-#define SFXGE_TXQ_LOCK(txq) (&(txq)->lock)
-#define SFXGE_TX_SCALE(sc) ((sc)->intr.n_alloc)
+#define SFXGE_TXQ_LOCK(txq) (&(txq)->lock)
+#define SFXGE_TX_SCALE(sc) ((sc)->intr.n_alloc)
#else
-#define SFXGE_TXQ_LOCK(txq) (&(txq)->sc->tx_lock)
-#define SFXGE_TX_SCALE(sc) 1
+#define SFXGE_TXQ_LOCK(txq) (&(txq)->sc->tx_lock)
+#define SFXGE_TX_SCALE(sc) 1
#endif
struct sfxge_txq {
@@ -128,6 +132,8 @@ struct sfxge_txq {
unsigned int evq_index;
efsys_mem_t mem;
unsigned int buf_base_id;
+ unsigned int entries;
+ unsigned int ptr_mask;
struct sfxge_tx_mapping *stmp; /* Packets in flight. */
bus_dma_tag_t packet_dma_tag;
diff --git a/sys/dev/sound/usb/uaudio.c b/sys/dev/sound/usb/uaudio.c
index b24a639..3d48abc 100644
--- a/sys/dev/sound/usb/uaudio.c
+++ b/sys/dev/sound/usb/uaudio.c
@@ -232,7 +232,7 @@ struct uaudio_chan {
#define UAUDIO_SYNC_LESS 2
};
-#define UMIDI_CABLES_MAX 16 /* units */
+#define UMIDI_EMB_JACK_MAX 16 /* units */
#define UMIDI_TX_FRAMES 256 /* units */
#define UMIDI_TX_BUFFER (UMIDI_TX_FRAMES * 4) /* bytes */
@@ -263,7 +263,7 @@ struct umidi_sub_chan {
struct umidi_chan {
- struct umidi_sub_chan sub[UMIDI_CABLES_MAX];
+ struct umidi_sub_chan sub[UMIDI_EMB_JACK_MAX];
struct mtx mtx;
struct usb_xfer *xfer[UMIDI_N_TRANSFER];
@@ -275,7 +275,7 @@ struct umidi_chan {
uint8_t write_open_refcount;
uint8_t curr_cable;
- uint8_t max_cable;
+ uint8_t max_emb_jack;
uint8_t valid;
uint8_t single_command;
};
@@ -1481,6 +1481,7 @@ uaudio_chan_fill_info_sub(struct uaudio_softc *sc, struct usb_device *udev,
union uaudio_asid asid = { NULL };
union uaudio_asf1d asf1d = { NULL };
union uaudio_sed sed = { NULL };
+ struct usb_midi_streaming_endpoint_descriptor *msid = NULL;
usb_endpoint_descriptor_audio_t *ed1 = NULL;
const struct usb_audio_control_descriptor *acdp = NULL;
struct usb_config_descriptor *cd = usbd_get_config_descriptor(udev);
@@ -1498,6 +1499,7 @@ uaudio_chan_fill_info_sub(struct uaudio_softc *sc, struct usb_device *udev,
uint8_t bChannels;
uint8_t bBitResolution;
uint8_t audio_if = 0;
+ uint8_t midi_if = 0;
uint8_t uma_if_class;
while ((desc = usb_desc_foreach(cd, desc))) {
@@ -1533,7 +1535,8 @@ uaudio_chan_fill_info_sub(struct uaudio_softc *sc, struct usb_device *udev,
((id->bInterfaceClass == UICLASS_VENDOR) &&
(sc->sc_uq_au_vendor_class != 0)));
- if ((uma_if_class != 0) && (id->bInterfaceSubClass == UISUBCLASS_AUDIOSTREAM)) {
+ if ((uma_if_class != 0) &&
+ (id->bInterfaceSubClass == UISUBCLASS_AUDIOSTREAM)) {
audio_if = 1;
} else {
audio_if = 0;
@@ -1545,13 +1548,16 @@ uaudio_chan_fill_info_sub(struct uaudio_softc *sc, struct usb_device *udev,
/*
* XXX could allow multiple MIDI interfaces
*/
+ midi_if = 1;
if ((sc->sc_midi_chan.valid == 0) &&
- usbd_get_iface(udev, curidx)) {
+ (usbd_get_iface(udev, curidx) != NULL)) {
sc->sc_midi_chan.iface_index = curidx;
sc->sc_midi_chan.iface_alt_index = alt_index;
sc->sc_midi_chan.valid = 1;
}
+ } else {
+ midi_if = 0;
}
asid.v1 = NULL;
asf1d.v1 = NULL;
@@ -1560,14 +1566,25 @@ uaudio_chan_fill_info_sub(struct uaudio_softc *sc, struct usb_device *udev,
}
if (audio_if == 0) {
- if ((acdp == NULL) &&
- (desc->bDescriptorType == UDESC_CS_INTERFACE) &&
- (desc->bDescriptorSubtype == UDESCSUB_AC_HEADER) &&
- (desc->bLength >= sizeof(*acdp))) {
- acdp = (void *)desc;
- audio_rev = UGETW(acdp->bcdADC);
+ if (midi_if == 0) {
+ if ((acdp == NULL) &&
+ (desc->bDescriptorType == UDESC_CS_INTERFACE) &&
+ (desc->bDescriptorSubtype == UDESCSUB_AC_HEADER) &&
+ (desc->bLength >= sizeof(*acdp))) {
+ acdp = (void *)desc;
+ audio_rev = UGETW(acdp->bcdADC);
+ }
+ } else {
+ msid = (void *)desc;
+
+ /* get the maximum number of embedded jacks in use, if any */
+ if (msid->bLength >= sizeof(*msid) &&
+ msid->bDescriptorType == UDESC_CS_ENDPOINT &&
+ msid->bDescriptorSubtype == MS_GENERAL &&
+ msid->bNumEmbMIDIJack > sc->sc_midi_chan.max_emb_jack) {
+ sc->sc_midi_chan.max_emb_jack = msid->bNumEmbMIDIJack;
+ }
}
-
/*
* Don't collect any USB audio descriptors if
* this is not an USB audio stream interface.
@@ -5219,8 +5236,7 @@ umidi_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
*/
sub = &chan->sub[cn];
- if ((cmd_len != 0) &&
- (cn < chan->max_cable) &&
+ if ((cmd_len != 0) && (cn < chan->max_emb_jack) &&
(sub->read_open != 0)) {
/* Send data to the application */
@@ -5456,7 +5472,7 @@ tr_setup:
}
chan->curr_cable++;
- if (chan->curr_cable >= chan->max_cable)
+ if (chan->curr_cable >= chan->max_emb_jack)
chan->curr_cable = 0;
if (chan->curr_cable == start_cable) {
@@ -5493,7 +5509,7 @@ umidi_sub_by_fifo(struct usb_fifo *fifo)
struct umidi_sub_chan *sub;
uint32_t n;
- for (n = 0; n < UMIDI_CABLES_MAX; n++) {
+ for (n = 0; n < UMIDI_EMB_JACK_MAX; n++) {
sub = &chan->sub[n];
if ((sub->fifo.fp[USB_FIFO_RX] == fifo) ||
(sub->fifo.fp[USB_FIFO_TX] == fifo)) {
@@ -5676,12 +5692,12 @@ umidi_probe(device_t dev)
if (chan->single_command != 0)
device_printf(dev, "Single command MIDI quirk enabled\n");
- if ((chan->max_cable > UMIDI_CABLES_MAX) ||
- (chan->max_cable == 0)) {
- chan->max_cable = UMIDI_CABLES_MAX;
+ if ((chan->max_emb_jack == 0) ||
+ (chan->max_emb_jack > UMIDI_EMB_JACK_MAX)) {
+ chan->max_emb_jack = UMIDI_EMB_JACK_MAX;
}
- for (n = 0; n < chan->max_cable; n++) {
+ for (n = 0; n < chan->max_emb_jack; n++) {
sub = &chan->sub[n];
@@ -5719,9 +5735,8 @@ umidi_detach(device_t dev)
struct umidi_chan *chan = &sc->sc_midi_chan;
uint32_t n;
- for (n = 0; n < UMIDI_CABLES_MAX; n++) {
+ for (n = 0; n < UMIDI_EMB_JACK_MAX; n++)
usb_fifo_detach(&chan->sub[n].fifo);
- }
mtx_lock(&chan->mtx);
diff --git a/sys/dev/sound/usb/uaudioreg.h b/sys/dev/sound/usb/uaudioreg.h
index c2c1ad4..637b5b1 100644
--- a/sys/dev/sound/usb/uaudioreg.h
+++ b/sys/dev/sound/usb/uaudioreg.h
@@ -119,6 +119,13 @@ struct usb_audio_streaming_endpoint_descriptor {
uWord wLockDelay;
} __packed;
+struct usb_midi_streaming_endpoint_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bDescriptorSubtype;
+ uByte bNumEmbMIDIJack;
+} __packed;
+
struct usb_audio_streaming_type1_descriptor {
uByte bLength;
uByte bDescriptorType;
@@ -378,6 +385,7 @@ struct usb_audio_extension_unit_1 {
#define MASTER_CHAN 0
+#define MS_GENERAL 1
#define AS_GENERAL 1
#define FORMAT_TYPE 2
#define FORMAT_SPECIFIC 3
diff --git a/sys/dev/uart/uart.h b/sys/dev/uart/uart.h
index 0bcedde..8ab5022 100644
--- a/sys/dev/uart/uart.h
+++ b/sys/dev/uart/uart.h
@@ -65,6 +65,7 @@ struct uart_bas {
struct uart_class;
extern struct uart_class uart_imx_class __attribute__((weak));
+extern struct uart_class uart_msm_class __attribute__((weak));
extern struct uart_class uart_ns8250_class __attribute__((weak));
extern struct uart_class uart_quicc_class __attribute__((weak));
extern struct uart_class uart_s3c2410_class __attribute__((weak));
diff --git a/sys/dev/uart/uart_bus_fdt.c b/sys/dev/uart/uart_bus_fdt.c
index 92e155b..f52fec1 100644
--- a/sys/dev/uart/uart_bus_fdt.c
+++ b/sys/dev/uart/uart_bus_fdt.c
@@ -84,6 +84,7 @@ static struct ofw_compat_data compat_data[] = {
{"fsl,imx21-uart", (uintptr_t)&uart_imx_class},
{"fsl,mvf600-uart", (uintptr_t)&uart_vybrid_class},
{"lpc,uart", (uintptr_t)&uart_lpc_class},
+ {"qcom,uart-dm", (uintptr_t)&uart_msm_class},
{"ti,ns16550", (uintptr_t)&uart_ti8250_class},
{"ns16550", (uintptr_t)&uart_ns8250_class},
{NULL, (uintptr_t)NULL},
diff --git a/sys/dev/uart/uart_dev_imx.c b/sys/dev/uart/uart_dev_imx.c
index 43338fe..2def18c 100644
--- a/sys/dev/uart/uart_dev_imx.c
+++ b/sys/dev/uart/uart_dev_imx.c
@@ -90,6 +90,45 @@ imx_uart_probe(struct uart_bas *bas)
return (0);
}
+static u_int
+imx_uart_getbaud(struct uart_bas *bas)
+{
+ uint32_t rate, ubir, ubmr;
+ u_int baud, blo, bhi, i;
+ static const u_int predivs[] = {6, 5, 4, 3, 2, 1, 7, 1};
+ static const u_int std_rates[] = {
+ 9600, 14400, 19200, 38400, 57600, 115200, 230400, 460800, 921600
+ };
+
+ /*
+ * Get the baud rate the hardware is programmed for, then search the
+ * table of standard baud rates for a number that's within 3% of the
+ * actual rate the hardware is programmed for. It's more comforting to
+ * see that your console is running at 115200 than 114942. Note that
+ * here we cannot make a simplifying assumption that the predivider and
+ * numerator are 1 (like we do when setting the baud rate), because we
+ * don't know what u-boot might have set up.
+ */
+ i = (GETREG(bas, REG(UFCR)) & IMXUART_UFCR_RFDIV_MASK) >>
+ IMXUART_UFCR_RFDIV_SHIFT;
+ rate = imx_ccm_uart_hz() / predivs[i];
+ ubir = GETREG(bas, REG(UBIR)) + 1;
+ ubmr = GETREG(bas, REG(UBMR)) + 1;
+ baud = ((rate / 16 ) * ubir) / ubmr;
+
+ blo = (baud * 100) / 103;
+ bhi = (baud * 100) / 97;
+ for (i = 0; i < nitems(std_rates); i++) {
+ rate = std_rates[i];
+ if (rate >= blo && rate <= bhi) {
+ baud = rate;
+ break;
+ }
+ }
+
+ return (baud);
+}
+
static void
imx_uart_init(struct uart_bas *bas, int baudrate, int databits,
int stopbits, int parity)
@@ -348,8 +387,7 @@ imx_uart_bus_ioctl(struct uart_softc *sc, int request, intptr_t data)
/* TODO */
break;
case UART_IOCTL_BAUD:
- /* TODO */
- *(int*)data = 115200;
+ *(u_int*)data = imx_uart_getbaud(bas);
break;
default:
error = EINVAL;
diff --git a/sys/dev/uart/uart_dev_msm.c b/sys/dev/uart/uart_dev_msm.c
new file mode 100644
index 0000000..12dc8a7
--- /dev/null
+++ b/sys/dev/uart/uart_dev_msm.c
@@ -0,0 +1,568 @@
+/*-
+ * Copyright (c) 2014 Ganbold Tsagaankhuu <ganbold@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Qualcomm MSM7K/8K uart driver */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_ddb.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/kdb.h>
+#include <machine/bus.h>
+#include <machine/fdt.h>
+
+#include <dev/uart/uart.h>
+#include <dev/uart/uart_cpu.h>
+#include <dev/uart/uart_bus.h>
+#include <dev/uart/uart_dev_msm.h>
+
+#include "uart_if.h"
+
+#define DEF_CLK 7372800
+
+#define GETREG(bas, reg) \
+ bus_space_read_4((bas)->bst, (bas)->bsh, (reg))
+#define SETREG(bas, reg, value) \
+ bus_space_write_4((bas)->bst, (bas)->bsh, (reg), (value))
+
+static int msm_uart_param(struct uart_bas *, int, int, int, int);
+
+/*
+ * Low-level UART interface.
+ */
+static int msm_probe(struct uart_bas *bas);
+static void msm_init(struct uart_bas *bas, int, int, int, int);
+static void msm_term(struct uart_bas *bas);
+static void msm_putc(struct uart_bas *bas, int);
+static int msm_rxready(struct uart_bas *bas);
+static int msm_getc(struct uart_bas *bas, struct mtx *mtx);
+
+extern SLIST_HEAD(uart_devinfo_list, uart_devinfo) uart_sysdevs;
+
+static int
+msm_uart_param(struct uart_bas *bas, int baudrate, int databits,
+ int stopbits, int parity)
+{
+ int ulcon;
+
+ ulcon = 0;
+
+ switch (databits) {
+ case 5:
+ ulcon |= (UART_DM_5_BPS << 4);
+ break;
+ case 6:
+ ulcon |= (UART_DM_6_BPS << 4);
+ break;
+ case 7:
+ ulcon |= (UART_DM_7_BPS << 4);
+ break;
+ case 8:
+ ulcon |= (UART_DM_8_BPS << 4);
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ switch (parity) {
+ case UART_PARITY_NONE:
+ ulcon |= UART_DM_NO_PARITY;
+ break;
+ case UART_PARITY_ODD:
+ ulcon |= UART_DM_ODD_PARITY;
+ break;
+ case UART_PARITY_EVEN:
+ ulcon |= UART_DM_EVEN_PARITY;
+ break;
+ case UART_PARITY_SPACE:
+ ulcon |= UART_DM_SPACE_PARITY;
+ break;
+ case UART_PARITY_MARK:
+ default:
+ return (EINVAL);
+ }
+
+ switch (stopbits) {
+ case 1:
+ ulcon |= (UART_DM_SBL_1 << 2);
+ break;
+ case 2:
+ ulcon |= (UART_DM_SBL_2 << 2);
+ break;
+ default:
+ return (EINVAL);
+ }
+ uart_setreg(bas, UART_DM_MR2, ulcon);
+
+ /* Set 115200 for both TX and RX. */;
+ uart_setreg(bas, UART_DM_CSR, UART_DM_CSR_115200);
+ uart_barrier(bas);
+
+ return (0);
+}
+
+struct uart_ops uart_msm_ops = {
+ .probe = msm_probe,
+ .init = msm_init,
+ .term = msm_term,
+ .putc = msm_putc,
+ .rxready = msm_rxready,
+ .getc = msm_getc,
+};
+
+static int
+msm_probe(struct uart_bas *bas)
+{
+
+ return (0);
+}
+
+static void
+msm_init(struct uart_bas *bas, int baudrate, int databits, int stopbits,
+ int parity)
+{
+
+ if (bas->rclk == 0)
+ bas->rclk = DEF_CLK;
+
+ KASSERT(bas->rclk != 0, ("msm_init: Invalid rclk"));
+
+ /* Set default parameters */
+ msm_uart_param(bas, baudrate, databits, stopbits, parity);
+
+ /*
+ * Configure UART mode registers MR1 and MR2.
+ * Hardware flow control isn't supported.
+ */
+ uart_setreg(bas, UART_DM_MR1, 0x0);
+
+ /* Reset interrupt mask register. */
+ uart_setreg(bas, UART_DM_IMR, 0);
+
+ /*
+ * Configure Tx and Rx watermarks configuration registers.
+ * TX watermark value is set to 0 - interrupt is generated when
+ * FIFO level is less than or equal to 0.
+ */
+ uart_setreg(bas, UART_DM_TFWR, UART_DM_TFW_VALUE);
+
+ /* Set RX watermark value */
+ uart_setreg(bas, UART_DM_RFWR, UART_DM_RFW_VALUE);
+
+ /*
+ * Configure Interrupt Programming Register.
+ * Set initial Stale timeout value.
+ */
+ uart_setreg(bas, UART_DM_IPR, UART_DM_STALE_TIMEOUT_LSB);
+
+ /* Disable IRDA mode */
+ uart_setreg(bas, UART_DM_IRDA, 0x0);
+
+ /*
+ * Configure and enable sim interface if required.
+ * Configure hunt character value in HCR register.
+ * Keep it in reset state.
+ */
+ uart_setreg(bas, UART_DM_HCR, 0x0);
+
+ /* Issue soft reset command */
+ SETREG(bas, UART_DM_CR, UART_DM_RESET_TX);
+ SETREG(bas, UART_DM_CR, UART_DM_RESET_RX);
+ SETREG(bas, UART_DM_CR, UART_DM_RESET_ERROR_STATUS);
+ SETREG(bas, UART_DM_CR, UART_DM_RESET_BREAK_INT);
+ SETREG(bas, UART_DM_CR, UART_DM_RESET_STALE_INT);
+
+ /* Enable/Disable Rx/Tx DM interfaces */
+ /* Disable Data Mover for now. */
+ uart_setreg(bas, UART_DM_DMEN, 0x0);
+
+ /* Enable transmitter and receiver */
+ uart_setreg(bas, UART_DM_CR, UART_DM_CR_RX_ENABLE);
+ uart_setreg(bas, UART_DM_CR, UART_DM_CR_TX_ENABLE);
+
+ uart_barrier(bas);
+}
+
+static void
+msm_term(struct uart_bas *bas)
+{
+
+ /* XXX */
+}
+
+static void
+msm_putc(struct uart_bas *bas, int c)
+{
+ int limit;
+
+ /*
+ * Write to NO_CHARS_FOR_TX register the number of characters
+ * to be transmitted. However, before writing TX_FIFO must
+ * be empty as indicated by TX_READY interrupt in IMR register
+ */
+
+ /*
+ * Check if transmit FIFO is empty.
+ * If not wait for TX_READY interrupt.
+ */
+ limit = 1000;
+ if (!(uart_getreg(bas, UART_DM_SR) & UART_DM_SR_TXEMT)) {
+ while ((uart_getreg(bas, UART_DM_ISR) & UART_DM_TX_READY) == 0
+ && --limit)
+ DELAY(4);
+ }
+ /* FIFO is ready, write number of characters to be written */
+ uart_setreg(bas, UART_DM_NO_CHARS_FOR_TX, 1);
+
+ /* Wait till TX FIFO has space */
+ while ((uart_getreg(bas, UART_DM_SR) & UART_DM_SR_TXRDY) == 0)
+ DELAY(4);
+
+ /* TX FIFO has space. Write char */
+ SETREG(bas, UART_DM_TF(0), (c & 0xff));
+}
+
+static int
+msm_rxready(struct uart_bas *bas)
+{
+
+ /* Wait for a character to come ready */
+ return ((uart_getreg(bas, UART_DM_SR) & UART_DM_SR_RXRDY) ==
+ UART_DM_SR_RXRDY);
+}
+
+static int
+msm_getc(struct uart_bas *bas, struct mtx *mtx)
+{
+ int c;
+
+ uart_lock(mtx);
+
+ /* Wait for a character to come ready */
+ while ((uart_getreg(bas, UART_DM_SR) & UART_DM_SR_RXRDY) !=
+ UART_DM_SR_RXRDY)
+ DELAY(4);
+
+ /* Check for Overrun error. If so reset Error Status */
+ if (uart_getreg(bas, UART_DM_SR) & UART_DM_SR_UART_OVERRUN)
+ uart_setreg(bas, UART_DM_CR, UART_DM_RESET_ERROR_STATUS);
+
+ /* Read char */
+ c = uart_getreg(bas, UART_DM_RF(0));
+
+ uart_unlock(mtx);
+
+ return (c);
+}
+
+/*
+ * High-level UART interface.
+ */
+struct msm_uart_softc {
+ struct uart_softc base;
+ uint32_t ier;
+};
+
+static int msm_bus_probe(struct uart_softc *sc);
+static int msm_bus_attach(struct uart_softc *sc);
+static int msm_bus_flush(struct uart_softc *, int);
+static int msm_bus_getsig(struct uart_softc *);
+static int msm_bus_ioctl(struct uart_softc *, int, intptr_t);
+static int msm_bus_ipend(struct uart_softc *);
+static int msm_bus_param(struct uart_softc *, int, int, int, int);
+static int msm_bus_receive(struct uart_softc *);
+static int msm_bus_setsig(struct uart_softc *, int);
+static int msm_bus_transmit(struct uart_softc *);
+static void msm_bus_grab(struct uart_softc *);
+static void msm_bus_ungrab(struct uart_softc *);
+
+static kobj_method_t msm_methods[] = {
+ KOBJMETHOD(uart_probe, msm_bus_probe),
+ KOBJMETHOD(uart_attach, msm_bus_attach),
+ KOBJMETHOD(uart_flush, msm_bus_flush),
+ KOBJMETHOD(uart_getsig, msm_bus_getsig),
+ KOBJMETHOD(uart_ioctl, msm_bus_ioctl),
+ KOBJMETHOD(uart_ipend, msm_bus_ipend),
+ KOBJMETHOD(uart_param, msm_bus_param),
+ KOBJMETHOD(uart_receive, msm_bus_receive),
+ KOBJMETHOD(uart_setsig, msm_bus_setsig),
+ KOBJMETHOD(uart_transmit, msm_bus_transmit),
+ KOBJMETHOD(uart_grab, msm_bus_grab),
+ KOBJMETHOD(uart_ungrab, msm_bus_ungrab),
+ {0, 0 }
+};
+
+int
+msm_bus_probe(struct uart_softc *sc)
+{
+
+ sc->sc_txfifosz = 64;
+ sc->sc_rxfifosz = 64;
+
+ device_set_desc(sc->sc_dev, "Qualcomm HSUART");
+
+ return (0);
+}
+
+static int
+msm_bus_attach(struct uart_softc *sc)
+{
+ struct msm_uart_softc *u = (struct msm_uart_softc *)sc;
+ struct uart_bas *bas = &sc->sc_bas;
+
+ sc->sc_hwiflow = 0;
+ sc->sc_hwoflow = 0;
+
+ /* Set TX_READY, TXLEV, RXLEV, RXSTALE */
+ u->ier = UART_DM_IMR_ENABLED;
+
+ /* Configure Interrupt Mask register IMR */
+ uart_setreg(bas, UART_DM_IMR, u->ier);
+
+ return (0);
+}
+
+/*
+ * Write the current transmit buffer to the TX FIFO.
+ */
+static int
+msm_bus_transmit(struct uart_softc *sc)
+{
+ struct msm_uart_softc *u = (struct msm_uart_softc *)sc;
+ struct uart_bas *bas = &sc->sc_bas;
+ int i;
+
+ uart_lock(sc->sc_hwmtx);
+
+ /* Write some data */
+ for (i = 0; i < sc->sc_txdatasz; i++) {
+ /* Write TX data */
+ msm_putc(bas, sc->sc_txbuf[i]);
+ uart_barrier(bas);
+ }
+
+ /* TX FIFO is empty now, enable TX_READY interrupt */
+ u->ier |= UART_DM_TX_READY;
+ SETREG(bas, UART_DM_IMR, u->ier);
+ uart_barrier(bas);
+
+ /*
+ * Inform upper layer that it is transmitting data to hardware,
+ * this will be cleared when TXIDLE interrupt occurs.
+ */
+ sc->sc_txbusy = 1;
+ uart_unlock(sc->sc_hwmtx);
+
+ return (0);
+}
+
+static int
+msm_bus_setsig(struct uart_softc *sc, int sig)
+{
+
+ return (0);
+}
+
+static int
+msm_bus_receive(struct uart_softc *sc)
+{
+ struct msm_uart_softc *u = (struct msm_uart_softc *)sc;
+ struct uart_bas *bas;
+ int c;
+
+ bas = &sc->sc_bas;
+ uart_lock(sc->sc_hwmtx);
+
+ /* Initialize Receive Path and interrupt */
+ SETREG(bas, UART_DM_CR, UART_DM_RESET_STALE_INT);
+ SETREG(bas, UART_DM_CR, UART_DM_STALE_EVENT_ENABLE);
+ u->ier |= UART_DM_RXLEV;
+ SETREG(bas, UART_DM_IMR, u->ier);
+
+ /* Loop over until we are full, or no data is available */
+ while (uart_getreg(bas, UART_DM_SR) & UART_DM_SR_RXRDY) {
+ if (uart_rx_full(sc)) {
+ /* No space left in input buffer */
+ sc->sc_rxbuf[sc->sc_rxput] = UART_STAT_OVERRUN;
+ break;
+ }
+
+ /* Read RX FIFO */
+ c = uart_getreg(bas, UART_DM_RF(0));
+ uart_barrier(bas);
+
+ uart_rx_put(sc, c);
+ }
+
+ uart_unlock(sc->sc_hwmtx);
+
+ return (0);
+}
+
+static int
+msm_bus_param(struct uart_softc *sc, int baudrate, int databits,
+ int stopbits, int parity)
+{
+ int error;
+
+ if (sc->sc_bas.rclk == 0)
+ sc->sc_bas.rclk = DEF_CLK;
+
+ KASSERT(sc->sc_bas.rclk != 0, ("msm_init: Invalid rclk"));
+
+ uart_lock(sc->sc_hwmtx);
+ error = msm_uart_param(&sc->sc_bas, baudrate, databits, stopbits,
+ parity);
+ uart_unlock(sc->sc_hwmtx);
+
+ return (error);
+}
+
+static int
+msm_bus_ipend(struct uart_softc *sc)
+{
+ struct msm_uart_softc *u = (struct msm_uart_softc *)sc;
+ struct uart_bas *bas = &sc->sc_bas;
+ uint32_t isr;
+ int ipend;
+
+ uart_lock(sc->sc_hwmtx);
+
+ /* Get ISR status */
+ isr = GETREG(bas, UART_DM_MISR);
+
+ ipend = 0;
+
+ /* Uart RX starting, notify upper layer */
+ if (isr & UART_DM_RXLEV) {
+ u->ier &= ~UART_DM_RXLEV;
+ SETREG(bas, UART_DM_IMR, u->ier);
+ uart_barrier(bas);
+ ipend |= SER_INT_RXREADY;
+ }
+
+ /* Stale RX interrupt */
+ if (isr & UART_DM_RXSTALE) {
+ /* Disable and reset it */
+ SETREG(bas, UART_DM_CR, UART_DM_STALE_EVENT_DISABLE);
+ SETREG(bas, UART_DM_CR, UART_DM_RESET_STALE_INT);
+ uart_barrier(bas);
+ ipend |= SER_INT_RXREADY;
+ }
+
+ /* TX READY interrupt */
+ if (isr & UART_DM_TX_READY) {
+ /* Clear TX Ready */
+ SETREG(bas, UART_DM_CR, UART_DM_CLEAR_TX_READY);
+
+ /* Disable TX_READY */
+ u->ier &= ~UART_DM_TX_READY;
+ SETREG(bas, UART_DM_IMR, u->ier);
+ uart_barrier(bas);
+
+ if (sc->sc_txbusy != 0)
+ ipend |= SER_INT_TXIDLE;
+ }
+
+ if (isr & UART_DM_TXLEV) {
+ /* TX FIFO is empty */
+ u->ier &= ~UART_DM_TXLEV;
+ SETREG(bas, UART_DM_IMR, u->ier);
+ uart_barrier(bas);
+
+ if (sc->sc_txbusy != 0)
+ ipend |= SER_INT_TXIDLE;
+ }
+
+ uart_unlock(sc->sc_hwmtx);
+ return (ipend);
+}
+
+static int
+msm_bus_flush(struct uart_softc *sc, int what)
+{
+
+ return (0);
+}
+
+static int
+msm_bus_getsig(struct uart_softc *sc)
+{
+
+ return (0);
+}
+
+static int
+msm_bus_ioctl(struct uart_softc *sc, int request, intptr_t data)
+{
+
+ return (EINVAL);
+}
+
+static void
+msm_bus_grab(struct uart_softc *sc)
+{
+ struct uart_bas *bas = &sc->sc_bas;
+
+ /*
+ * XXX: Turn off all interrupts to enter polling mode. Leave the
+ * saved mask alone. We'll restore whatever it was in ungrab.
+ */
+ uart_lock(sc->sc_hwmtx);
+ SETREG(bas, UART_DM_CR, UART_DM_RESET_STALE_INT);
+ SETREG(bas, UART_DM_IMR, 0);
+ uart_barrier(bas);
+ uart_unlock(sc->sc_hwmtx);
+}
+
+static void
+msm_bus_ungrab(struct uart_softc *sc)
+{
+ struct msm_uart_softc *u = (struct msm_uart_softc *)sc;
+ struct uart_bas *bas = &sc->sc_bas;
+
+ /*
+ * Restore previous interrupt mask
+ */
+ uart_lock(sc->sc_hwmtx);
+ SETREG(bas, UART_DM_IMR, u->ier);
+ uart_barrier(bas);
+ uart_unlock(sc->sc_hwmtx);
+}
+
+struct uart_class uart_msm_class = {
+ "msm",
+ msm_methods,
+ sizeof(struct msm_uart_softc),
+ .uc_ops = &uart_msm_ops,
+ .uc_range = 8,
+ .uc_rclk = DEF_CLK,
+};
diff --git a/sys/dev/uart/uart_dev_msm.h b/sys/dev/uart/uart_dev_msm.h
new file mode 100644
index 0000000..89a7f19
--- /dev/null
+++ b/sys/dev/uart/uart_dev_msm.h
@@ -0,0 +1,229 @@
+/*-
+ * Copyright (c) 2014 Ganbold Tsagaankhuu <ganbold@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _UART_DM_H_
+#define _UART_DM_H_
+
+#define UART_DM_EXTR_BITS(value, start_pos, end_pos) \
+ ((value << (32 - end_pos)) >> (32 - (end_pos - start_pos)))
+
+/* UART Parity Mode */
+enum UART_DM_PARITY_MODE {
+ UART_DM_NO_PARITY,
+ UART_DM_ODD_PARITY,
+ UART_DM_EVEN_PARITY,
+ UART_DM_SPACE_PARITY
+};
+
+/* UART Stop Bit Length */
+enum UART_DM_STOP_BIT_LEN {
+ UART_DM_SBL_9_16,
+ UART_DM_SBL_1,
+ UART_DM_SBL_1_9_16,
+ UART_DM_SBL_2
+};
+
+/* UART Bits per Char */
+enum UART_DM_BITS_PER_CHAR {
+ UART_DM_5_BPS,
+ UART_DM_6_BPS,
+ UART_DM_7_BPS,
+ UART_DM_8_BPS
+};
+
+/* 8-N-1 Configuration */
+#define UART_DM_8_N_1_MODE (UART_DM_NO_PARITY | \
+ (UART_DM_SBL_1 << 2) | \
+ (UART_DM_8_BPS << 4))
+
+/* UART_DM Registers */
+
+/* UART Operational Mode Registers (HSUART) */
+#define UART_DM_MR1 0x00
+#define UART_DM_MR1_AUTO_RFR_LEVEL1_BMSK 0xffffff00
+#define UART_DM_MR1_AUTO_RFR_LEVEL0_BMSK 0x3f
+#define UART_DM_MR1_CTS_CTL_BMSK 0x40
+#define UART_DM_MR1_RX_RDY_CTL_BMSK 0x80
+
+#define UART_DM_MR2 0x04
+#define UART_DM_MR2_ERROR_MODE_BMSK 0x40
+#define UART_DM_MR2_BITS_PER_CHAR_BMSK 0x30
+#define UART_DM_MR2_STOP_BIT_LEN_BMSK 0x0c
+#define UART_DM_MR2_PARITY_MODE_BMSK 0x03
+#define UART_DM_RXBRK_ZERO_CHAR_OFF (1 << 8)
+#define UART_DM_LOOPBACK (1 << 7)
+
+/* UART Clock Selection Register, write only */
+#define UART_DM_CSR 0x08
+#define UART_DM_CSR_115200 0xff
+#define UART_DM_CSR_57600 0xee
+#define UART_DM_CSR_38400 0xdd
+#define UART_DM_CSR_28800 0xcc
+#define UART_DM_CSR_19200 0xbb
+#define UART_DM_CSR_14400 0xaa
+#define UART_DM_CSR_9600 0x99
+#define UART_DM_CSR_7200 0x88
+#define UART_DM_CSR_4800 0x77
+#define UART_DM_CSR_3600 0x66
+#define UART_DM_CSR_2400 0x55
+#define UART_DM_CSR_1200 0x44
+#define UART_DM_CSR_600 0x33
+#define UART_DM_CSR_300 0x22
+#define UART_DM_CSR_150 0x11
+#define UART_DM_CSR_75 0x00
+
+/* UART DM TX FIFO Registers - 4, write only */
+#define UART_DM_TF(x) (0x70 + (4 * (x)))
+
+/* UART Command Register, write only */
+#define UART_DM_CR 0x10
+#define UART_DM_CR_RX_ENABLE (1 << 0)
+#define UART_DM_CR_RX_DISABLE (1 << 1)
+#define UART_DM_CR_TX_ENABLE (1 << 2)
+#define UART_DM_CR_TX_DISABLE (1 << 3)
+
+/* UART_DM_CR channel command bit value (register field is bits 8:4) */
+#define UART_DM_RESET_RX 0x10
+#define UART_DM_RESET_TX 0x20
+#define UART_DM_RESET_ERROR_STATUS 0x30
+#define UART_DM_RESET_BREAK_INT 0x40
+#define UART_DM_START_BREAK 0x50
+#define UART_DM_STOP_BREAK 0x60
+#define UART_DM_RESET_CTS 0x70
+#define UART_DM_RESET_STALE_INT 0x80
+#define UART_DM_RFR_LOW 0xD0
+#define UART_DM_RFR_HIGH 0xE0
+#define UART_DM_CR_PROTECTION_EN 0x100
+#define UART_DM_STALE_EVENT_ENABLE 0x500
+#define UART_DM_STALE_EVENT_DISABLE 0x600
+#define UART_DM_FORCE_STALE_EVENT 0x400
+#define UART_DM_CLEAR_TX_READY 0x300
+#define UART_DM_RESET_TX_ERROR 0x800
+#define UART_DM_RESET_TX_DONE 0x810
+
+/* UART Interrupt Mask Register */
+#define UART_DM_IMR 0x14
+/* these can be used for both ISR and IMR registers */
+#define UART_DM_TXLEV (1 << 0)
+#define UART_DM_RXHUNT (1 << 1)
+#define UART_DM_RXBRK_CHNG (1 << 2)
+#define UART_DM_RXSTALE (1 << 3)
+#define UART_DM_RXLEV (1 << 4)
+#define UART_DM_DELTA_CTS (1 << 5)
+#define UART_DM_CURRENT_CTS (1 << 6)
+#define UART_DM_TX_READY (1 << 7)
+#define UART_DM_TX_ERROR (1 << 8)
+#define UART_DM_TX_DONE (1 << 9)
+#define UART_DM_RXBREAK_START (1 << 10)
+#define UART_DM_RXBREAK_END (1 << 11)
+#define UART_DM_PAR_FRAME_ERR_IRQ (1 << 12)
+
+#define UART_DM_IMR_ENABLED (UART_DM_TX_READY | \
+ UART_DM_TXLEV | \
+ UART_DM_RXLEV | \
+ UART_DM_RXSTALE)
+
+/* UART Interrupt Programming Register */
+#define UART_DM_IPR 0x18
+#define UART_DM_STALE_TIMEOUT_LSB 0x0f
+#define UART_DM_STALE_TIMEOUT_MSB 0x00
+#define UART_DM_IPR_STALE_TIMEOUT_MSB_BMSK 0xffffff80
+#define UART_DM_IPR_STALE_LSB_BMSK 0x1f
+
+/* UART Transmit/Receive FIFO Watermark Register */
+#define UART_DM_TFWR 0x1c
+/* Interrupt is generated when FIFO level is less than or equal to this value */
+#define UART_DM_TFW_VALUE 0
+
+#define UART_DM_RFWR 0x20
+/* Interrupt generated when no of words in RX FIFO is greater than this value */
+#define UART_DM_RFW_VALUE 0
+
+/* UART Hunt Character Register */
+#define UART_DM_HCR 0x24
+
+/* Used for RX transfer initialization */
+#define UART_DM_DMRX 0x34
+/* Default DMRX value - any value bigger than FIFO size would be fine */
+#define UART_DM_DMRX_DEF_VALUE 0x220
+
+/* Register to enable IRDA function */
+#define UART_DM_IRDA 0x38
+
+/* UART Data Mover Enable Register */
+#define UART_DM_DMEN 0x3c
+
+/* Number of characters for Transmission */
+#define UART_DM_NO_CHARS_FOR_TX 0x40
+
+/* UART RX FIFO Base Address */
+#define UART_DM_BADR 0x44
+
+#define UART_DM_SIM_CFG_ADDR 0x80
+
+/* Read only registers */
+/* UART Status Register */
+#define UART_DM_SR 0x08
+/* register field mask mapping */
+#define UART_DM_SR_RXRDY (1 << 0)
+#define UART_DM_SR_RXFULL (1 << 1)
+#define UART_DM_SR_TXRDY (1 << 2)
+#define UART_DM_SR_TXEMT (1 << 3)
+#define UART_DM_SR_UART_OVERRUN (1 << 4)
+#define UART_DM_SR_PAR_FRAME_ERR (1 << 5)
+#define UART_DM_RX_BREAK (1 << 6)
+#define UART_DM_HUNT_CHAR (1 << 7)
+#define UART_DM_RX_BRK_START_LAST (1 << 8)
+
+/* UART Receive FIFO Registers - 4 in numbers */
+#define UART_DM_RF(x) (0x70 + (4 * (x)))
+
+/* UART Masked Interrupt Status Register */
+#define UART_DM_MISR 0x10
+
+/* UART Interrupt Status Register */
+#define UART_DM_ISR 0x14
+
+/* Number of characters received since the end of last RX transfer */
+#define UART_DM_RX_TOTAL_SNAP 0x38
+
+/* UART TX FIFO Status Register */
+#define UART_DM_TXFS 0x4c
+#define UART_DM_TXFS_STATE_LSB(x) UART_DM_EXTR_BITS(x,0,6)
+#define UART_DM_TXFS_STATE_MSB(x) UART_DM_EXTR_BITS(x,14,31)
+#define UART_DM_TXFS_BUF_STATE(x) UART_DM_EXTR_BITS(x,7,9)
+#define UART_DM_TXFS_ASYNC_STATE(x) UART_DM_EXTR_BITS(x,10,13)
+
+/* UART RX FIFO Status Register */
+#define UART_DM_RXFS 0x50
+#define UART_DM_RXFS_STATE_LSB(x) UART_DM_EXTR_BITS(x,0,6)
+#define UART_DM_RXFS_STATE_MSB(x) UART_DM_EXTR_BITS(x,14,31)
+#define UART_DM_RXFS_BUF_STATE(x) UART_DM_EXTR_BITS(x,7,9)
+#define UART_DM_RXFS_ASYNC_STATE(x) UART_DM_EXTR_BITS(x,10,13)
+
+#endif /* _UART_DM_H_ */
diff --git a/sys/dev/usb/controller/xhci.c b/sys/dev/usb/controller/xhci.c
index f06964e..8676c0f 100644
--- a/sys/dev/usb/controller/xhci.c
+++ b/sys/dev/usb/controller/xhci.c
@@ -614,6 +614,10 @@ xhci_init(struct xhci_softc *sc, device_t self)
sc->sc_bus.devices = sc->sc_devices;
sc->sc_bus.devices_max = XHCI_MAX_DEVICES;
+ /* set default cycle state in case of early interrupts */
+ sc->sc_event_ccs = 1;
+ sc->sc_command_ccs = 1;
+
/* setup command queue mutex and condition varible */
cv_init(&sc->sc_cmd_cv, "CMDQ");
sx_init(&sc->sc_cmd_sx, "CMDQ lock");
@@ -2267,14 +2271,17 @@ xhci_configure_mask(struct usb_device *udev, uint32_t mask, uint8_t drop)
/* adjust */
x--;
- /* figure out maximum */
- if (x > sc->sc_hw.devs[index].context_num) {
+ /* figure out the maximum number of contexts */
+ if (x > sc->sc_hw.devs[index].context_num)
sc->sc_hw.devs[index].context_num = x;
- temp = xhci_ctx_get_le32(sc, &pinp->ctx_slot.dwSctx0);
- temp &= ~XHCI_SCTX_0_CTX_NUM_SET(31);
- temp |= XHCI_SCTX_0_CTX_NUM_SET(x + 1);
- xhci_ctx_set_le32(sc, &pinp->ctx_slot.dwSctx0, temp);
- }
+ else
+ x = sc->sc_hw.devs[index].context_num;
+
+ /* update number of contexts */
+ temp = xhci_ctx_get_le32(sc, &pinp->ctx_slot.dwSctx0);
+ temp &= ~XHCI_SCTX_0_CTX_NUM_SET(31);
+ temp |= XHCI_SCTX_0_CTX_NUM_SET(x + 1);
+ xhci_ctx_set_le32(sc, &pinp->ctx_slot.dwSctx0, temp);
}
return (0);
}
diff --git a/sys/dev/usb/controller/xhci.h b/sys/dev/usb/controller/xhci.h
index 408b429..7352e9c 100644
--- a/sys/dev/usb/controller/xhci.h
+++ b/sys/dev/usb/controller/xhci.h
@@ -493,7 +493,8 @@ struct xhci_softc {
uint8_t sc_noscratch;
/* root HUB device configuration */
uint8_t sc_conf;
- uint8_t sc_hub_idata[2];
+ /* root HUB port event bitmap, max 256 ports */
+ uint8_t sc_hub_idata[32];
/* size of context */
uint8_t sc_ctx_is_64_byte;
diff --git a/sys/dev/usb/usb_dev.c b/sys/dev/usb/usb_dev.c
index 3960673..2dad2bd 100644
--- a/sys/dev/usb/usb_dev.c
+++ b/sys/dev/usb/usb_dev.c
@@ -298,6 +298,10 @@ error:
}
mtx_unlock(&usb_ref_lock);
DPRINTFN(2, "fail\n");
+
+ /* clear all refs */
+ memset(crd, 0, sizeof(*crd));
+
return (USB_ERR_INVAL);
}
@@ -1093,8 +1097,8 @@ usb_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int fflag, struct thread*
goto done;
if (usb_usb_ref_device(cpd, &refs)) {
- err = ENXIO;
- goto done;
+ /* we lost the reference */
+ return (ENXIO);
}
err = (f->methods->f_ioctl_post) (f, cmd, addr, fflags);
@@ -1117,9 +1121,8 @@ usb_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int fflag, struct thread*
while (usb_ref_device(cpd, &refs, 1 /* need uref */)) {
if (usb_ref_device(cpd, &refs, 0)) {
- /* device no longer exits */
- err = ENXIO;
- goto done;
+ /* device no longer exists */
+ return (ENXIO);
}
usb_unref_device(cpd, &refs);
usb_pause_mtx(NULL, hz / 128);
@@ -1411,9 +1414,9 @@ usb_read(struct cdev *dev, struct uio *uio, int ioflag)
return (err);
err = usb_ref_device(cpd, &refs, 0 /* no uref */ );
- if (err) {
+ if (err)
return (ENXIO);
- }
+
fflags = cpd->fflags;
f = refs.rxfifo;
@@ -1537,9 +1540,9 @@ usb_write(struct cdev *dev, struct uio *uio, int ioflag)
return (err);
err = usb_ref_device(cpd, &refs, 0 /* no uref */ );
- if (err) {
+ if (err)
return (ENXIO);
- }
+
fflags = cpd->fflags;
f = refs.txfifo;
diff --git a/sys/dev/usb/usbdevs b/sys/dev/usb/usbdevs
index 330abbb..17a8806 100644
--- a/sys/dev/usb/usbdevs
+++ b/sys/dev/usb/usbdevs
@@ -3717,6 +3717,7 @@ product REALTEK RTL8191CU 0x8177 RTL8191CU
product REALTEK RTL8192CU 0x8178 RTL8192CU
product REALTEK RTL8192CE 0x817c RTL8192CE
product REALTEK RTL8188RU_1 0x817d RTL8188RU
+product REALTEK RTL8188RU_3 0x817f RTL8188RU
product REALTEK RTL8712 0x8712 RTL8712
product REALTEK RTL8713 0x8712 RTL8713
product REALTEK RTL8188RU_2 0x317f RTL8188RU
diff --git a/sys/dev/usb/wlan/if_urtwn.c b/sys/dev/usb/wlan/if_urtwn.c
index 676ad39..1f728d4 100644
--- a/sys/dev/usb/wlan/if_urtwn.c
+++ b/sys/dev/usb/wlan/if_urtwn.c
@@ -141,6 +141,7 @@ static const STRUCT_USB_HOST_ID urtwn_devs[] = {
URTWN_DEV(REALTEK, RTL8188CUS),
URTWN_DEV(REALTEK, RTL8188RU_1),
URTWN_DEV(REALTEK, RTL8188RU_2),
+ URTWN_DEV(REALTEK, RTL8188RU_3),
URTWN_DEV(REALTEK, RTL8191CU),
URTWN_DEV(REALTEK, RTL8192CE),
URTWN_DEV(REALTEK, RTL8192CU),
diff --git a/sys/dev/vt/vt.h b/sys/dev/vt/vt.h
index f384a60..578f1a8 100644
--- a/sys/dev/vt/vt.h
+++ b/sys/dev/vt/vt.h
@@ -260,6 +260,7 @@ struct vt_window {
unsigned int vw_number; /* (c) Window number. */
int vw_kbdmode; /* (?) Keyboard mode. */
int vw_prev_kbdmode;/* (?) Previous mode. */
+ int vw_kbdstate; /* (?) Keyboard state. */
int vw_grabbed; /* (?) Grab count. */
char *vw_kbdsq; /* Escape sequence queue*/
unsigned int vw_flags; /* (d) Per-window flags. */
diff --git a/sys/dev/vt/vt_core.c b/sys/dev/vt/vt_core.c
index 4e8d19b..2dd7e3f 100644
--- a/sys/dev/vt/vt_core.c
+++ b/sys/dev/vt/vt_core.c
@@ -298,6 +298,97 @@ vt_switch_timer(void *arg)
}
static int
+vt_save_kbd_mode(struct vt_window *vw, keyboard_t *kbd)
+{
+ int mode, ret;
+
+ mode = 0;
+ ret = kbdd_ioctl(kbd, KDGKBMODE, (caddr_t)&mode);
+ if (ret == ENOIOCTL)
+ ret = ENODEV;
+ if (ret != 0)
+ return (ret);
+
+ vw->vw_kbdmode = mode;
+
+ return (0);
+}
+
+static int
+vt_update_kbd_mode(struct vt_window *vw, keyboard_t *kbd)
+{
+ int ret;
+
+ ret = kbdd_ioctl(kbd, KDSKBMODE, (caddr_t)&vw->vw_kbdmode);
+ if (ret == ENOIOCTL)
+ ret = ENODEV;
+
+ return (ret);
+}
+
+static int
+vt_save_kbd_state(struct vt_window *vw, keyboard_t *kbd)
+{
+ int state, ret;
+
+ state = 0;
+ ret = kbdd_ioctl(kbd, KDGKBSTATE, (caddr_t)&state);
+ if (ret == ENOIOCTL)
+ ret = ENODEV;
+ if (ret != 0)
+ return (ret);
+
+ vw->vw_kbdstate &= ~LOCK_MASK;
+ vw->vw_kbdstate |= state & LOCK_MASK;
+
+ return (0);
+}
+
+static int
+vt_update_kbd_state(struct vt_window *vw, keyboard_t *kbd)
+{
+ int state, ret;
+
+ state = vw->vw_kbdstate & LOCK_MASK;
+ ret = kbdd_ioctl(kbd, KDSKBSTATE, (caddr_t)&state);
+ if (ret == ENOIOCTL)
+ ret = ENODEV;
+
+ return (ret);
+}
+
+static int
+vt_save_kbd_leds(struct vt_window *vw, keyboard_t *kbd)
+{
+ int leds, ret;
+
+ leds = 0;
+ ret = kbdd_ioctl(kbd, KDGETLED, (caddr_t)&leds);
+ if (ret == ENOIOCTL)
+ ret = ENODEV;
+ if (ret != 0)
+ return (ret);
+
+ vw->vw_kbdstate &= ~LED_MASK;
+ vw->vw_kbdstate |= leds & LED_MASK;
+
+ return (0);
+}
+
+static int
+vt_update_kbd_leds(struct vt_window *vw, keyboard_t *kbd)
+{
+ int leds, ret;
+
+ leds = vw->vw_kbdstate & LED_MASK;
+ ret = kbdd_ioctl(kbd, KDSETLED, (caddr_t)&leds);
+ if (ret == ENOIOCTL)
+ ret = ENODEV;
+
+ return (ret);
+}
+
+static int
vt_window_preswitch(struct vt_window *vw, struct vt_window *curvw)
{
@@ -409,7 +500,11 @@ vt_window_switch(struct vt_window *vw)
mtx_lock(&Giant);
kbd = kbd_get_keyboard(vd->vd_keyboard);
if (kbd != NULL) {
- kbdd_ioctl(kbd, KDSKBMODE, (void *)&vw->vw_kbdmode);
+ if (curvw->vw_kbdmode == K_XLATE)
+ vt_save_kbd_state(curvw, kbd);
+
+ vt_update_kbd_mode(vw, kbd);
+ vt_update_kbd_state(vw, kbd);
}
mtx_unlock(&Giant);
DPRINTF(10, "%s(ttyv%d) done\n", __func__, vw->vw_number);
@@ -602,7 +697,6 @@ static int
vt_processkey(keyboard_t *kbd, struct vt_device *vd, int c)
{
struct vt_window *vw = vd->vd_curwindow;
- int state = 0;
#if VT_ALT_TO_ESC_HACK
if (c & RELKEY) {
@@ -665,10 +759,9 @@ vt_processkey(keyboard_t *kbd, struct vt_device *vd, int c)
vt_proc_window_switch(vw);
return (0);
case SLK: {
-
- kbdd_ioctl(kbd, KDGKBSTATE, (caddr_t)&state);
+ vt_save_kbd_state(vw, kbd);
VT_LOCK(vd);
- if (state & SLKED) {
+ if (vw->vw_kbdstate & SLKED) {
/* Turn scrolling on. */
vw->vw_flags |= VWF_SCROLL;
VTBUF_SLCK_ENABLE(&vw->vw_buf);
@@ -1201,13 +1294,11 @@ vtterm_cngetc(struct terminal *tm)
struct vt_window *vw = tm->tm_softc;
struct vt_device *vd = vw->vw_device;
keyboard_t *kbd;
- int state;
u_int c;
if (vw->vw_kbdsq && *vw->vw_kbdsq)
return (*vw->vw_kbdsq++);
- state = 0;
/* Make sure the splash screen is not there. */
if (vd->vd_flags & VDF_SPLASH) {
/* Remove splash */
@@ -1223,8 +1314,8 @@ vtterm_cngetc(struct terminal *tm)
return (-1);
/* Force keyboard input mode to K_XLATE */
- c = K_XLATE;
- kbdd_ioctl(kbd, KDSKBMODE, (void *)&c);
+ vw->vw_kbdmode = K_XLATE;
+ vt_update_kbd_mode(vw, kbd);
/* Switch the keyboard to polling to make it work here. */
kbdd_poll(kbd, TRUE);
@@ -1243,8 +1334,8 @@ vtterm_cngetc(struct terminal *tm)
if (c & SPCLKEY) {
switch (c) {
case SPCLKEY | SLK:
- kbdd_ioctl(kbd, KDGKBSTATE, (caddr_t)&state);
- if (state & SLKED) {
+ vt_save_kbd_state(vw, kbd);
+ if (vw->vw_kbdstate & SLKED) {
/* Turn scrolling on. */
vw->vw_flags |= VWF_SCROLL;
VTBUF_SLCK_ENABLE(&vw->vw_buf);
@@ -1311,7 +1402,7 @@ vtterm_cngrab(struct terminal *tm)
/* We shall always use the keyboard in the XLATE mode here. */
vw->vw_prev_kbdmode = vw->vw_kbdmode;
vw->vw_kbdmode = K_XLATE;
- (void)kbdd_ioctl(kbd, KDSKBMODE, (caddr_t)&vw->vw_kbdmode);
+ vt_update_kbd_mode(vw, kbd);
kbdd_poll(kbd, TRUE);
}
@@ -1336,7 +1427,7 @@ vtterm_cnungrab(struct terminal *tm)
kbdd_poll(kbd, FALSE);
vw->vw_kbdmode = vw->vw_prev_kbdmode;
- (void)kbdd_ioctl(kbd, KDSKBMODE, (caddr_t)&vw->vw_kbdmode);
+ vt_update_kbd_mode(vw, kbd);
kbdd_disable(kbd);
}
@@ -1890,12 +1981,8 @@ skip_thunk:
case SETFKEY:
case KDGKBINFO:
case KDGKBTYPE:
- case KDSKBSTATE: /* set keyboard state (locks) */
- case KDGKBSTATE: /* get keyboard state (locks) */
case KDGETREPEAT: /* get keyboard repeat & delay rates */
case KDSETREPEAT: /* set keyboard repeat & delay rates (new) */
- case KDSETLED: /* set keyboard LED status */
- case KDGETLED: /* get keyboard LED status */
case KBADDKBD: /* add/remove keyboard to/from mux */
case KBRELKBD: {
error = 0;
@@ -1915,18 +2002,101 @@ skip_thunk:
}
return (error);
}
+ case KDGKBSTATE: { /* get keyboard state (locks) */
+ error = 0;
+
+ if (vw == vd->vd_curwindow) {
+ mtx_lock(&Giant);
+ kbd = kbd_get_keyboard(vd->vd_keyboard);
+ if (kbd != NULL)
+ error = vt_save_kbd_state(vw, kbd);
+ mtx_unlock(&Giant);
+
+ if (error != 0)
+ return (error);
+ }
+
+ *(int *)data = vw->vw_kbdstate & LOCK_MASK;
+
+ return (error);
+ }
+ case KDSKBSTATE: { /* set keyboard state (locks) */
+ int state;
+
+ state = *(int *)data;
+ if (state & ~LOCK_MASK)
+ return (EINVAL);
+
+ vw->vw_kbdstate &= ~LOCK_MASK;
+ vw->vw_kbdstate |= state;
+
+ error = 0;
+ if (vw == vd->vd_curwindow) {
+ mtx_lock(&Giant);
+ kbd = kbd_get_keyboard(vd->vd_keyboard);
+ if (kbd != NULL)
+ error = vt_update_kbd_state(vw, kbd);
+ mtx_unlock(&Giant);
+ }
+
+ return (error);
+ }
+ case KDGETLED: { /* get keyboard LED status */
+ error = 0;
+
+ if (vw == vd->vd_curwindow) {
+ mtx_lock(&Giant);
+ kbd = kbd_get_keyboard(vd->vd_keyboard);
+ if (kbd != NULL)
+ error = vt_save_kbd_leds(vw, kbd);
+ mtx_unlock(&Giant);
+
+ if (error != 0)
+ return (error);
+ }
+
+ *(int *)data = vw->vw_kbdstate & LED_MASK;
+
+ return (error);
+ }
+ case KDSETLED: { /* set keyboard LED status */
+ int leds;
+
+ leds = *(int *)data;
+ if (leds & ~LED_MASK)
+ return (EINVAL);
+
+ vw->vw_kbdstate &= ~LED_MASK;
+ vw->vw_kbdstate |= leds;
+
+ error = 0;
+ if (vw == vd->vd_curwindow) {
+ mtx_lock(&Giant);
+ kbd = kbd_get_keyboard(vd->vd_keyboard);
+ if (kbd != NULL)
+ error = vt_update_kbd_leds(vw, kbd);
+ mtx_unlock(&Giant);
+ }
+
+ return (error);
+ }
case KDGKBMODE: {
- int mode = -1;
+ error = 0;
- mtx_lock(&Giant);
- kbd = kbd_get_keyboard(vd->vd_keyboard);
- if (kbd != NULL) {
- kbdd_ioctl(kbd, KDGKBMODE, (void *)&mode);
+ if (vw == vd->vd_curwindow) {
+ mtx_lock(&Giant);
+ kbd = kbd_get_keyboard(vd->vd_keyboard);
+ if (kbd != NULL)
+ error = vt_save_kbd_mode(vw, kbd);
+ mtx_unlock(&Giant);
+
+ if (error != 0)
+ return (error);
}
- mtx_unlock(&Giant);
- DPRINTF(20, "mode %d, vw_kbdmode %d\n", mode, vw->vw_kbdmode);
- *(int *)data = mode;
- return (0);
+
+ *(int *)data = vw->vw_kbdmode;
+
+ return (error);
}
case KDSKBMODE: {
int mode;
@@ -1937,19 +2107,17 @@ skip_thunk:
case K_RAW:
case K_CODE:
vw->vw_kbdmode = mode;
- if (vw == vd->vd_curwindow) {
- keyboard_t *kbd;
- error = 0;
+ error = 0;
+ if (vw == vd->vd_curwindow) {
mtx_lock(&Giant);
kbd = kbd_get_keyboard(vd->vd_keyboard);
- if (kbd != NULL) {
- error = kbdd_ioctl(kbd, KDSKBMODE,
- (void *)&mode);
- }
+ if (kbd != NULL)
+ error = vt_update_kbd_mode(vw, kbd);
mtx_unlock(&Giant);
}
- return (0);
+
+ return (error);
default:
return (EINVAL);
}
@@ -1977,8 +2145,17 @@ skip_thunk:
return (0);
case CONS_GETINFO: {
vid_info_t *vi = (vid_info_t *)data;
+ if (vi->size != sizeof(struct vid_info))
+ return (EINVAL);
+
+ if (vw == vd->vd_curwindow) {
+ kbd = kbd_get_keyboard(vd->vd_keyboard);
+ if (kbd != NULL)
+ vt_save_kbd_state(vw, kbd);
+ }
vi->m_num = vd->vd_curwindow->vw_number + 1;
+ vi->mk_keylock = vw->vw_kbdstate & LOCK_MASK;
/* XXX: other fields! */
return (0);
}
@@ -2093,13 +2270,14 @@ skip_thunk:
(void *)vd, vt_kbdevent, vd);
if (i >= 0) {
if (vd->vd_keyboard != -1) {
+ vt_save_kbd_state(vd->vd_curwindow, kbd);
kbd_release(kbd, (void *)vd);
}
kbd = kbd_get_keyboard(i);
vd->vd_keyboard = i;
- (void)kbdd_ioctl(kbd, KDSKBMODE,
- (caddr_t)&vd->vd_curwindow->vw_kbdmode);
+ vt_update_kbd_mode(vd->vd_curwindow, kbd);
+ vt_update_kbd_state(vd->vd_curwindow, kbd);
} else {
error = EPERM; /* XXX */
}
@@ -2115,6 +2293,7 @@ skip_thunk:
mtx_unlock(&Giant);
return (EINVAL);
}
+ vt_save_kbd_state(vd->vd_curwindow, kbd);
error = kbd_release(kbd, (void *)vd);
if (error == 0) {
vd->vd_keyboard = -1;
diff --git a/sys/dev/xen/balloon/balloon.c b/sys/dev/xen/balloon/balloon.c
index 6503a00..e113e2c 100644
--- a/sys/dev/xen/balloon/balloon.c
+++ b/sys/dev/xen/balloon/balloon.c
@@ -39,6 +39,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/sysctl.h>
+#include <sys/module.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
@@ -348,25 +349,50 @@ watch_target(struct xs_watch *watch,
set_new_target(new_target >> KB_TO_PAGE_SHIFT);
}
-static void
-balloon_init_watcher(void *arg)
+/*------------------ Private Device Attachment Functions --------------------*/
+/**
+ * \brief Identify instances of this device type in the system.
+ *
+ * \param driver The driver performing this identify action.
+ * \param parent The NewBus parent device for any devices this method adds.
+ */
+static void
+xenballoon_identify(driver_t *driver __unused, device_t parent)
{
- int err;
-
- if (!is_running_on_xen())
- return;
+ /*
+ * A single device instance for our driver is always present
+ * in a system operating under Xen.
+ */
+ BUS_ADD_CHILD(parent, 0, driver->name, 0);
+}
- err = xs_register_watch(&target_watch);
- if (err)
- printf("Failed to set balloon watcher\n");
+/**
+ * \brief Probe for the existance of the Xen Balloon device
+ *
+ * \param dev NewBus device_t for this Xen control instance.
+ *
+ * \return Always returns 0 indicating success.
+ */
+static int
+xenballoon_probe(device_t dev)
+{
+ device_set_desc(dev, "Xen Balloon Device");
+ return (0);
}
-SYSINIT(balloon_init_watcher, SI_SUB_PSEUDO, SI_ORDER_ANY,
- balloon_init_watcher, NULL);
-static void
-balloon_init(void *arg)
+/**
+ * \brief Attach the Xen Balloon device.
+ *
+ * \param dev NewBus device_t for this Xen control instance.
+ *
+ * \return On success, 0. Otherwise an errno value indicating the
+ * type of failure.
+ */
+static int
+xenballoon_attach(device_t dev)
{
+ int err;
#ifndef XENHVM
vm_page_t page;
unsigned long pfn;
@@ -374,15 +400,13 @@ balloon_init(void *arg)
#define max_pfn HYPERVISOR_shared_info->arch.max_pfn
#endif
- if (!is_running_on_xen())
- return;
-
mtx_init(&balloon_mutex, "balloon_mutex", NULL, MTX_DEF);
#ifndef XENHVM
bs.current_pages = min(xen_start_info->nr_pages, max_pfn);
#else
- bs.current_pages = realmem;
+ bs.current_pages = xen_pv_domain() ?
+ HYPERVISOR_start_info->nr_pages : realmem;
#endif
bs.target_pages = bs.current_pages;
bs.balloon_low = 0;
@@ -403,17 +427,27 @@ balloon_init(void *arg)
#endif
target_watch.callback = watch_target;
-
- return;
-}
-SYSINIT(balloon_init, SI_SUB_PSEUDO, SI_ORDER_ANY, balloon_init, NULL);
-void balloon_update_driver_allowance(long delta);
+ err = xs_register_watch(&target_watch);
+ if (err)
+ device_printf(dev,
+ "xenballon: failed to set balloon watcher\n");
-void
-balloon_update_driver_allowance(long delta)
-{
- mtx_lock(&balloon_mutex);
- bs.driver_pages += delta;
- mtx_unlock(&balloon_mutex);
+ return (err);
}
+
+/*-------------------- Private Device Attachment Data -----------------------*/
+static device_method_t xenballoon_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_identify, xenballoon_identify),
+ DEVMETHOD(device_probe, xenballoon_probe),
+ DEVMETHOD(device_attach, xenballoon_attach),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(xenballoon, xenballoon_driver, xenballoon_methods, 0);
+devclass_t xenballoon_devclass;
+
+DRIVER_MODULE(xenballoon, xenstore, xenballoon_driver, xenballoon_devclass,
+ NULL, NULL);
diff --git a/sys/dev/xen/blkback/blkback.c b/sys/dev/xen/blkback/blkback.c
index 654f307..1273961 100644
--- a/sys/dev/xen/blkback/blkback.c
+++ b/sys/dev/xen/blkback/blkback.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009-2011 Spectra Logic Corporation
+ * Copyright (c) 2009-2012 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -784,6 +784,12 @@ struct xbb_softc {
/** Number of requests we have completed*/
uint64_t reqs_completed;
+ /** Number of requests we queued but not pushed*/
+ uint64_t reqs_queued_for_completion;
+
+ /** Number of requests we completed with an error status*/
+ uint64_t reqs_completed_with_error;
+
/** How many forced dispatches (i.e. without coalescing) have happend */
uint64_t forced_dispatch;
@@ -1143,7 +1149,7 @@ xbb_release_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
int wakeup)
{
- mtx_lock(&xbb->lock);
+ mtx_assert(&xbb->lock, MA_OWNED);
if (wakeup) {
wakeup = xbb->flags & XBBF_RESOURCE_SHORTAGE;
@@ -1167,8 +1173,6 @@ xbb_release_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
xbb_shutdown(xbb);
}
- mtx_unlock(&xbb->lock);
-
if (wakeup != 0)
taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
}
@@ -1261,16 +1265,16 @@ bailout_error:
if (nreq != NULL)
xbb_release_req(xbb, nreq);
- mtx_unlock(&xbb->lock);
-
if (nreqlist != NULL)
xbb_release_reqlist(xbb, nreqlist, /*wakeup*/ 0);
+ mtx_unlock(&xbb->lock);
+
return (1);
}
/**
- * Create and transmit a response to a blkif request.
+ * Create and queue a response to a blkif request.
*
* \param xbb Per-instance xbb configuration structure.
* \param req The request structure to which to respond.
@@ -1278,20 +1282,28 @@ bailout_error:
* in sys/xen/interface/io/blkif.h.
*/
static void
-xbb_send_response(struct xbb_softc *xbb, struct xbb_xen_req *req, int status)
+xbb_queue_response(struct xbb_softc *xbb, struct xbb_xen_req *req, int status)
{
blkif_response_t *resp;
- int more_to_do;
- int notify;
- more_to_do = 0;
+ /*
+ * The mutex is required here, and should be held across this call
+ * until after the subsequent call to xbb_push_responses(). This
+ * is to guarantee that another context won't queue responses and
+ * push them while we're active.
+ *
+ * That could lead to the other end being notified of responses
+ * before the resources have been freed on this end. The other end
+ * would then be able to queue additional I/O, and we may run out
+ * of resources because we haven't freed them all yet.
+ */
+ mtx_assert(&xbb->lock, MA_OWNED);
/*
* Place on the response ring for the relevant domain.
* For now, only the spacing between entries is different
* in the different ABIs, not the response entry layout.
*/
- mtx_lock(&xbb->lock);
switch (xbb->abi) {
case BLKIF_PROTOCOL_NATIVE:
resp = RING_GET_RESPONSE(&xbb->rings.native,
@@ -1315,8 +1327,38 @@ xbb_send_response(struct xbb_softc *xbb, struct xbb_xen_req *req, int status)
resp->operation = req->operation;
resp->status = status;
+ if (status != BLKIF_RSP_OKAY)
+ xbb->reqs_completed_with_error++;
+
xbb->rings.common.rsp_prod_pvt += BLKIF_SEGS_TO_BLOCKS(req->nr_pages);
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xbb->rings.common, notify);
+
+ xbb->reqs_queued_for_completion++;
+
+}
+
+/**
+ * Send queued responses to blkif requests.
+ *
+ * \param xbb Per-instance xbb configuration structure.
+ * \param run_taskqueue Flag that is set to 1 if the taskqueue
+ * should be run, 0 if it does not need to be run.
+ * \param notify Flag that is set to 1 if the other end should be
+ * notified via irq, 0 if the other end should not be
+ * notified.
+ */
+static void
+xbb_push_responses(struct xbb_softc *xbb, int *run_taskqueue, int *notify)
+{
+ int more_to_do;
+
+ /*
+ * The mutex is required here.
+ */
+ mtx_assert(&xbb->lock, MA_OWNED);
+
+ more_to_do = 0;
+
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xbb->rings.common, *notify);
if (xbb->rings.common.rsp_prod_pvt == xbb->rings.common.req_cons) {
@@ -1331,15 +1373,10 @@ xbb_send_response(struct xbb_softc *xbb, struct xbb_xen_req *req, int status)
more_to_do = 1;
}
- xbb->reqs_completed++;
+ xbb->reqs_completed += xbb->reqs_queued_for_completion;
+ xbb->reqs_queued_for_completion = 0;
- mtx_unlock(&xbb->lock);
-
- if (more_to_do)
- taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
-
- if (notify)
- xen_intr_signal(xbb->xen_intr_handle);
+ *run_taskqueue = more_to_do;
}
/**
@@ -1353,23 +1390,29 @@ xbb_complete_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
{
struct xbb_xen_req *nreq;
off_t sectors_sent;
+ int notify, run_taskqueue;
sectors_sent = 0;
if (reqlist->flags & XBB_REQLIST_MAPPED)
xbb_unmap_reqlist(reqlist);
+ mtx_lock(&xbb->lock);
+
/*
- * All I/O is done, send the response. A lock should not be
- * necessary here because the request list is complete, and
- * therefore this is the only context accessing this request
- * right now. The functions we call do their own locking if
- * necessary.
+ * All I/O is done, send the response. A lock is not necessary
+ * to protect the request list, because all requests have
+ * completed. Therefore this is the only context accessing this
+ * reqlist right now. However, in order to make sure that no one
+ * else queues responses onto the queue or pushes them to the other
+ * side while we're active, we need to hold the lock across the
+ * calls to xbb_queue_response() and xbb_push_responses().
*/
STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) {
off_t cur_sectors_sent;
- xbb_send_response(xbb, nreq, reqlist->status);
+ /* Put this response on the ring, but don't push yet */
+ xbb_queue_response(xbb, nreq, reqlist->status);
/* We don't report bytes sent if there is an error. */
if (reqlist->status == BLKIF_RSP_OKAY)
@@ -1404,6 +1447,16 @@ xbb_complete_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
/*then*/&reqlist->ds_t0);
xbb_release_reqlist(xbb, reqlist, /*wakeup*/ 1);
+
+ xbb_push_responses(xbb, &run_taskqueue, &notify);
+
+ mtx_unlock(&xbb->lock);
+
+ if (run_taskqueue)
+ taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
+
+ if (notify)
+ xen_intr_signal(xbb->xen_intr_handle);
}
/**
@@ -3589,6 +3642,16 @@ xbb_setup_sysctl(struct xbb_softc *xbb)
"how many I/O requests have been completed");
SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "reqs_queued_for_completion", CTLFLAG_RW,
+ &xbb->reqs_queued_for_completion,
+ "how many I/O requests queued but not yet pushed");
+
+ SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "reqs_completed_with_error", CTLFLAG_RW,
+ &xbb->reqs_completed_with_error,
+ "how many I/O requests completed with error status");
+
+ SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
"forced_dispatch", CTLFLAG_RW, &xbb->forced_dispatch,
"how many I/O dispatches were forced");
diff --git a/sys/dev/xen/xenstore/xenstore.c b/sys/dev/xen/xenstore/xenstore.c
new file mode 100644
index 0000000..4cf985a
--- /dev/null
+++ b/sys/dev/xen/xenstore/xenstore.c
@@ -0,0 +1,1703 @@
+/******************************************************************************
+ * xenstore.c
+ *
+ * Low-level kernel interface to the XenStore.
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ * Copyright (C) 2009,2010 Spectra Logic Corporation
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/sx.h>
+#include <sys/syslog.h>
+#include <sys/malloc.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/kthread.h>
+#include <sys/sbuf.h>
+#include <sys/sysctl.h>
+#include <sys/uio.h>
+#include <sys/unistd.h>
+#include <sys/queue.h>
+#include <sys/taskqueue.h>
+
+#include <machine/stdarg.h>
+
+#include <xen/xen-os.h>
+#include <xen/hypervisor.h>
+#include <xen/xen_intr.h>
+
+#include <xen/interface/hvm/params.h>
+#include <xen/hvm.h>
+
+#include <xen/xenstore/xenstorevar.h>
+#include <xen/xenstore/xenstore_internal.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+/**
+ * \file xenstore.c
+ * \brief XenStore interface
+ *
+ * The XenStore interface is a simple storage system that is a means of
+ * communicating state and configuration data between the Xen Domain 0
+ * and the various guest domains. All configuration data other than
+ * a small amount of essential information required during the early
+ * boot process of launching a Xen aware guest, is managed using the
+ * XenStore.
+ *
+ * The XenStore is ASCII string based, and has a structure and semantics
+ * similar to a filesystem. There are files and directories, the directories
+ * able to contain files or other directories. The depth of the hierachy
+ * is only limited by the XenStore's maximum path length.
+ *
+ * The communication channel between the XenStore service and other
+ * domains is via two, guest specific, ring buffers in a shared memory
+ * area. One ring buffer is used for communicating in each direction.
+ * The grant table references for this shared memory are given to the
+ * guest either via the xen_start_info structure for a fully para-
+ * virtualized guest, or via HVM hypercalls for a hardware virtualized
+ * guest.
+ *
+ * The XenStore communication relies on an event channel and thus
+ * interrupts. For this reason, the attachment of the XenStore
+ * relies on an interrupt driven configuration hook to hold off
+ * boot processing until communication with the XenStore service
+ * can be established.
+ *
+ * Several Xen services depend on the XenStore, most notably the
+ * XenBus used to discover and manage Xen devices. These services
+ * are implemented as NewBus child attachments to a bus exported
+ * by this XenStore driver.
+ */
+
+static struct xs_watch *find_watch(const char *token);
+
+MALLOC_DEFINE(M_XENSTORE, "xenstore", "XenStore data and results");
+
+/**
+ * Pointer to shared memory communication structures allowing us
+ * to communicate with the XenStore service.
+ *
+ * When operating in full PV mode, this pointer is set early in kernel
+ * startup from within xen_machdep.c. In HVM mode, we use hypercalls
+ * to get the guest frame number for the shared page and then map it
+ * into kva. See xs_init() for details.
+ */
+struct xenstore_domain_interface *xen_store;
+
+/*-------------------------- Private Data Structures ------------------------*/
+
+/**
+ * Structure capturing messages received from the XenStore service.
+ */
+struct xs_stored_msg {
+ TAILQ_ENTRY(xs_stored_msg) list;
+
+ struct xsd_sockmsg hdr;
+
+ union {
+ /* Queued replies. */
+ struct {
+ char *body;
+ } reply;
+
+ /* Queued watch events. */
+ struct {
+ struct xs_watch *handle;
+ const char **vec;
+ u_int vec_size;
+ } watch;
+ } u;
+};
+TAILQ_HEAD(xs_stored_msg_list, xs_stored_msg);
+
+/**
+ * Container for all XenStore related state.
+ */
+struct xs_softc {
+ /** Newbus device for the XenStore. */
+ device_t xs_dev;
+
+ /**
+ * Lock serializing access to ring producer/consumer
+ * indexes. Use of this lock guarantees that wakeups
+ * of blocking readers/writers are not missed due to
+ * races with the XenStore service.
+ */
+ struct mtx ring_lock;
+
+ /*
+ * Mutex used to insure exclusive access to the outgoing
+ * communication ring. We use a lock type that can be
+ * held while sleeping so that xs_write() can block waiting
+ * for space in the ring to free up, without allowing another
+ * writer to come in and corrupt a partial message write.
+ */
+ struct sx request_mutex;
+
+ /**
+ * A list of replies to our requests.
+ *
+ * The reply list is filled by xs_rcv_thread(). It
+ * is consumed by the context that issued the request
+ * to which a reply is made. The requester blocks in
+ * xs_read_reply().
+ *
+ * /note Only one requesting context can be active at a time.
+ * This is guaranteed by the request_mutex and insures
+ * that the requester sees replies matching the order
+ * of its requests.
+ */
+ struct xs_stored_msg_list reply_list;
+
+ /** Lock protecting the reply list. */
+ struct mtx reply_lock;
+
+ /**
+ * List of registered watches.
+ */
+ struct xs_watch_list registered_watches;
+
+ /** Lock protecting the registered watches list. */
+ struct mtx registered_watches_lock;
+
+ /**
+ * List of pending watch callback events.
+ */
+ struct xs_stored_msg_list watch_events;
+
+ /** Lock protecting the watch calback list. */
+ struct mtx watch_events_lock;
+
+ /**
+ * Sleepable lock used to prevent VM suspension while a
+ * xenstore transaction is outstanding.
+ *
+ * Each active transaction holds a shared lock on the
+ * suspend mutex. Our suspend method blocks waiting
+ * to acquire an exclusive lock. This guarantees that
+ * suspend processing will only proceed once all active
+ * transactions have been retired.
+ */
+ struct sx suspend_mutex;
+
+ /**
+ * The processid of the xenwatch thread.
+ */
+ pid_t xenwatch_pid;
+
+ /**
+ * Sleepable mutex used to gate the execution of XenStore
+ * watch event callbacks.
+ *
+ * xenwatch_thread holds an exclusive lock on this mutex
+ * while delivering event callbacks, and xenstore_unregister_watch()
+ * uses an exclusive lock of this mutex to guarantee that no
+ * callbacks of the just unregistered watch are pending
+ * before returning to its caller.
+ */
+ struct sx xenwatch_mutex;
+
+ /**
+ * The HVM guest pseudo-physical frame number. This is Xen's mapping
+ * of the true machine frame number into our "physical address space".
+ */
+ unsigned long gpfn;
+
+ /**
+ * The event channel for communicating with the
+ * XenStore service.
+ */
+ int evtchn;
+
+ /** Handle for XenStore interrupts. */
+ xen_intr_handle_t xen_intr_handle;
+
+ /**
+ * Interrupt driven config hook allowing us to defer
+ * attaching children until interrupts (and thus communication
+ * with the XenStore service) are available.
+ */
+ struct intr_config_hook xs_attachcb;
+
+ /**
+ * Xenstore is a user-space process that usually runs in Dom0,
+ * so if this domain is booting as Dom0, xenstore wont we accessible,
+ * and we have to defer the initialization of xenstore related
+ * devices to later (when xenstore is started).
+ */
+ bool initialized;
+
+ /**
+ * Task to run when xenstore is initialized (Dom0 only), will
+ * take care of attaching xenstore related devices.
+ */
+ struct task xs_late_init;
+};
+
+/*-------------------------------- Global Data ------------------------------*/
+static struct xs_softc xs;
+
+/*------------------------- Private Utility Functions -----------------------*/
+
+/**
+ * Count and optionally record pointers to a number of NUL terminated
+ * strings in a buffer.
+ *
+ * \param strings A pointer to a contiguous buffer of NUL terminated strings.
+ * \param dest An array to store pointers to each string found in strings.
+ * \param len The length of the buffer pointed to by strings.
+ *
+ * \return A count of the number of strings found.
+ */
+static u_int
+extract_strings(const char *strings, const char **dest, u_int len)
+{
+ u_int num;
+ const char *p;
+
+ for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) {
+ if (dest != NULL)
+ *dest++ = p;
+ num++;
+ }
+
+ return (num);
+}
+
+/**
+ * Convert a contiguous buffer containing a series of NUL terminated
+ * strings into an array of pointers to strings.
+ *
+ * The returned pointer references the array of string pointers which
+ * is followed by the storage for the string data. It is the client's
+ * responsibility to free this storage.
+ *
+ * The storage addressed by strings is free'd prior to split returning.
+ *
+ * \param strings A pointer to a contiguous buffer of NUL terminated strings.
+ * \param len The length of the buffer pointed to by strings.
+ * \param num The number of strings found and returned in the strings
+ * array.
+ *
+ * \return An array of pointers to the strings found in the input buffer.
+ */
+static const char **
+split(char *strings, u_int len, u_int *num)
+{
+ const char **ret;
+
+ /* Protect against unterminated buffers. */
+ if (len > 0)
+ strings[len - 1] = '\0';
+
+ /* Count the strings. */
+ *num = extract_strings(strings, /*dest*/NULL, len);
+
+ /* Transfer to one big alloc for easy freeing by the caller. */
+ ret = malloc(*num * sizeof(char *) + len, M_XENSTORE, M_WAITOK);
+ memcpy(&ret[*num], strings, len);
+ free(strings, M_XENSTORE);
+
+ /* Extract pointers to newly allocated array. */
+ strings = (char *)&ret[*num];
+ (void)extract_strings(strings, /*dest*/ret, len);
+
+ return (ret);
+}
+
+/*------------------------- Public Utility Functions -------------------------*/
+/*------- API comments for these methods can be found in xenstorevar.h -------*/
+struct sbuf *
+xs_join(const char *dir, const char *name)
+{
+ struct sbuf *sb;
+
+ sb = sbuf_new_auto();
+ sbuf_cat(sb, dir);
+ if (name[0] != '\0') {
+ sbuf_putc(sb, '/');
+ sbuf_cat(sb, name);
+ }
+ sbuf_finish(sb);
+
+ return (sb);
+}
+
+/*-------------------- Low Level Communication Management --------------------*/
+/**
+ * Interrupt handler for the XenStore event channel.
+ *
+ * XenStore reads and writes block on "xen_store" for buffer
+ * space. Wakeup any blocking operations when the XenStore
+ * service has modified the queues.
+ */
+static void
+xs_intr(void * arg __unused /*__attribute__((unused))*/)
+{
+
+ /* If xenstore has not been initialized, initialize it now */
+ if (!xs.initialized) {
+ xs.initialized = true;
+ /*
+ * Since this task is probing and attaching devices we
+ * have to hold the Giant lock.
+ */
+ taskqueue_enqueue(taskqueue_swi_giant, &xs.xs_late_init);
+ }
+
+ /*
+ * Hold ring lock across wakeup so that clients
+ * cannot miss a wakeup.
+ */
+ mtx_lock(&xs.ring_lock);
+ wakeup(xen_store);
+ mtx_unlock(&xs.ring_lock);
+}
+
+/**
+ * Verify that the indexes for a ring are valid.
+ *
+ * The difference between the producer and consumer cannot
+ * exceed the size of the ring.
+ *
+ * \param cons The consumer index for the ring to test.
+ * \param prod The producer index for the ring to test.
+ *
+ * \retval 1 If indexes are in range.
+ * \retval 0 If the indexes are out of range.
+ */
+static int
+xs_check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
+{
+
+ return ((prod - cons) <= XENSTORE_RING_SIZE);
+}
+
+/**
+ * Return a pointer to, and the length of, the contiguous
+ * free region available for output in a ring buffer.
+ *
+ * \param cons The consumer index for the ring.
+ * \param prod The producer index for the ring.
+ * \param buf The base address of the ring's storage.
+ * \param len The amount of contiguous storage available.
+ *
+ * \return A pointer to the start location of the free region.
+ */
+static void *
+xs_get_output_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod,
+ char *buf, uint32_t *len)
+{
+
+ *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
+ if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
+ *len = XENSTORE_RING_SIZE - (prod - cons);
+ return (buf + MASK_XENSTORE_IDX(prod));
+}
+
+/**
+ * Return a pointer to, and the length of, the contiguous
+ * data available to read from a ring buffer.
+ *
+ * \param cons The consumer index for the ring.
+ * \param prod The producer index for the ring.
+ * \param buf The base address of the ring's storage.
+ * \param len The amount of contiguous data available to read.
+ *
+ * \return A pointer to the start location of the available data.
+ */
+static const void *
+xs_get_input_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod,
+ const char *buf, uint32_t *len)
+{
+
+ *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
+ if ((prod - cons) < *len)
+ *len = prod - cons;
+ return (buf + MASK_XENSTORE_IDX(cons));
+}
+
+/**
+ * Transmit data to the XenStore service.
+ *
+ * \param tdata A pointer to the contiguous data to send.
+ * \param len The amount of data to send.
+ *
+ * \return On success 0, otherwise an errno value indicating the
+ * cause of failure.
+ *
+ * \invariant Called from thread context.
+ * \invariant The buffer pointed to by tdata is at least len bytes
+ * in length.
+ * \invariant xs.request_mutex exclusively locked.
+ */
+static int
+xs_write_store(const void *tdata, unsigned len)
+{
+ XENSTORE_RING_IDX cons, prod;
+ const char *data = (const char *)tdata;
+ int error;
+
+ sx_assert(&xs.request_mutex, SX_XLOCKED);
+ while (len != 0) {
+ void *dst;
+ u_int avail;
+
+ /* Hold lock so we can't miss wakeups should we block. */
+ mtx_lock(&xs.ring_lock);
+ cons = xen_store->req_cons;
+ prod = xen_store->req_prod;
+ if ((prod - cons) == XENSTORE_RING_SIZE) {
+ /*
+ * Output ring is full. Wait for a ring event.
+ *
+ * Note that the events from both queues
+ * are combined, so being woken does not
+ * guarantee that data exist in the read
+ * ring.
+ *
+ * To simplify error recovery and the retry,
+ * we specify PDROP so our lock is *not* held
+ * when msleep returns.
+ */
+ error = msleep(xen_store, &xs.ring_lock, PCATCH|PDROP,
+ "xbwrite", /*timeout*/0);
+ if (error && error != EWOULDBLOCK)
+ return (error);
+
+ /* Try again. */
+ continue;
+ }
+ mtx_unlock(&xs.ring_lock);
+
+ /* Verify queue sanity. */
+ if (!xs_check_indexes(cons, prod)) {
+ xen_store->req_cons = xen_store->req_prod = 0;
+ return (EIO);
+ }
+
+ dst = xs_get_output_chunk(cons, prod, xen_store->req, &avail);
+ if (avail > len)
+ avail = len;
+
+ memcpy(dst, data, avail);
+ data += avail;
+ len -= avail;
+
+ /*
+ * The store to the producer index, which indicates
+ * to the other side that new data has arrived, must
+ * be visible only after our copy of the data into the
+ * ring has completed.
+ */
+ wmb();
+ xen_store->req_prod += avail;
+
+ /*
+ * xen_intr_signal() implies mb(). The other side will see
+ * the change to req_prod at the time of the interrupt.
+ */
+ xen_intr_signal(xs.xen_intr_handle);
+ }
+
+ return (0);
+}
+
+/**
+ * Receive data from the XenStore service.
+ *
+ * \param tdata A pointer to the contiguous buffer to receive the data.
+ * \param len The amount of data to receive.
+ *
+ * \return On success 0, otherwise an errno value indicating the
+ * cause of failure.
+ *
+ * \invariant Called from thread context.
+ * \invariant The buffer pointed to by tdata is at least len bytes
+ * in length.
+ *
+ * \note xs_read does not perform any internal locking to guarantee
+ * serial access to the incoming ring buffer. However, there
+ * is only one context processing reads: xs_rcv_thread().
+ */
+static int
+xs_read_store(void *tdata, unsigned len)
+{
+ XENSTORE_RING_IDX cons, prod;
+ char *data = (char *)tdata;
+ int error;
+
+ while (len != 0) {
+ u_int avail;
+ const char *src;
+
+ /* Hold lock so we can't miss wakeups should we block. */
+ mtx_lock(&xs.ring_lock);
+ cons = xen_store->rsp_cons;
+ prod = xen_store->rsp_prod;
+ if (cons == prod) {
+ /*
+ * Nothing to read. Wait for a ring event.
+ *
+ * Note that the events from both queues
+ * are combined, so being woken does not
+ * guarantee that data exist in the read
+ * ring.
+ *
+ * To simplify error recovery and the retry,
+ * we specify PDROP so our lock is *not* held
+ * when msleep returns.
+ */
+ error = msleep(xen_store, &xs.ring_lock, PCATCH|PDROP,
+ "xbread", /*timeout*/0);
+ if (error && error != EWOULDBLOCK)
+ return (error);
+ continue;
+ }
+ mtx_unlock(&xs.ring_lock);
+
+ /* Verify queue sanity. */
+ if (!xs_check_indexes(cons, prod)) {
+ xen_store->rsp_cons = xen_store->rsp_prod = 0;
+ return (EIO);
+ }
+
+ src = xs_get_input_chunk(cons, prod, xen_store->rsp, &avail);
+ if (avail > len)
+ avail = len;
+
+ /*
+ * Insure the data we read is related to the indexes
+ * we read above.
+ */
+ rmb();
+
+ memcpy(data, src, avail);
+ data += avail;
+ len -= avail;
+
+ /*
+ * Insure that the producer of this ring does not see
+ * the ring space as free until after we have copied it
+ * out.
+ */
+ mb();
+ xen_store->rsp_cons += avail;
+
+ /*
+ * xen_intr_signal() implies mb(). The producer will see
+ * the updated consumer index when the event is delivered.
+ */
+ xen_intr_signal(xs.xen_intr_handle);
+ }
+
+ return (0);
+}
+
+/*----------------------- Received Message Processing ------------------------*/
+/**
+ * Block reading the next message from the XenStore service and
+ * process the result.
+ *
+ * \param type The returned type of the XenStore message received.
+ *
+ * \return 0 on success. Otherwise an errno value indicating the
+ * type of failure encountered.
+ */
+static int
+xs_process_msg(enum xsd_sockmsg_type *type)
+{
+ struct xs_stored_msg *msg;
+ char *body;
+ int error;
+
+ msg = malloc(sizeof(*msg), M_XENSTORE, M_WAITOK);
+ error = xs_read_store(&msg->hdr, sizeof(msg->hdr));
+ if (error) {
+ free(msg, M_XENSTORE);
+ return (error);
+ }
+
+ body = malloc(msg->hdr.len + 1, M_XENSTORE, M_WAITOK);
+ error = xs_read_store(body, msg->hdr.len);
+ if (error) {
+ free(body, M_XENSTORE);
+ free(msg, M_XENSTORE);
+ return (error);
+ }
+ body[msg->hdr.len] = '\0';
+
+ *type = msg->hdr.type;
+ if (msg->hdr.type == XS_WATCH_EVENT) {
+ msg->u.watch.vec = split(body, msg->hdr.len,
+ &msg->u.watch.vec_size);
+
+ mtx_lock(&xs.registered_watches_lock);
+ msg->u.watch.handle = find_watch(
+ msg->u.watch.vec[XS_WATCH_TOKEN]);
+ if (msg->u.watch.handle != NULL) {
+ mtx_lock(&xs.watch_events_lock);
+ TAILQ_INSERT_TAIL(&xs.watch_events, msg, list);
+ wakeup(&xs.watch_events);
+ mtx_unlock(&xs.watch_events_lock);
+ } else {
+ free(msg->u.watch.vec, M_XENSTORE);
+ free(msg, M_XENSTORE);
+ }
+ mtx_unlock(&xs.registered_watches_lock);
+ } else {
+ msg->u.reply.body = body;
+ mtx_lock(&xs.reply_lock);
+ TAILQ_INSERT_TAIL(&xs.reply_list, msg, list);
+ wakeup(&xs.reply_list);
+ mtx_unlock(&xs.reply_lock);
+ }
+
+ return (0);
+}
+
+/**
+ * Thread body of the XenStore receive thread.
+ *
+ * This thread blocks waiting for data from the XenStore service
+ * and processes and received messages.
+ */
+static void
+xs_rcv_thread(void *arg __unused)
+{
+ int error;
+ enum xsd_sockmsg_type type;
+
+ for (;;) {
+ error = xs_process_msg(&type);
+ if (error)
+ printf("XENSTORE error %d while reading message\n",
+ error);
+ }
+}
+
+/*---------------- XenStore Message Request/Reply Processing -----------------*/
+/**
+ * Filter invoked before transmitting any message to the XenStore service.
+ *
+ * The role of the filter may expand, but currently serves to manage
+ * the interactions of messages with transaction state.
+ *
+ * \param request_msg_type The message type for the request.
+ */
+static inline void
+xs_request_filter(uint32_t request_msg_type)
+{
+ if (request_msg_type == XS_TRANSACTION_START)
+ sx_slock(&xs.suspend_mutex);
+}
+
+/**
+ * Filter invoked after transmitting any message to the XenStore service.
+ *
+ * The role of the filter may expand, but currently serves to manage
+ * the interactions of messages with transaction state.
+ *
+ * \param request_msg_type The message type for the original request.
+ * \param reply_msg_type The message type for any received reply.
+ * \param request_reply_error The error status from the attempt to send
+ * the request or retrieve the reply.
+ */
+static inline void
+xs_reply_filter(uint32_t request_msg_type,
+ uint32_t reply_msg_type, int request_reply_error)
+{
+ /*
+ * The count of transactions drops if we attempted
+ * to end a transaction (even if that attempt fails
+ * in error), we receive a transaction end acknowledgement,
+ * or if our attempt to begin a transaction fails.
+ */
+ if (request_msg_type == XS_TRANSACTION_END
+ || (request_reply_error == 0 && reply_msg_type == XS_TRANSACTION_END)
+ || (request_msg_type == XS_TRANSACTION_START
+ && (request_reply_error != 0 || reply_msg_type == XS_ERROR)))
+ sx_sunlock(&xs.suspend_mutex);
+
+}
+
+#define xsd_error_count (sizeof(xsd_errors) / sizeof(xsd_errors[0]))
+
+/**
+ * Convert a XenStore error string into an errno number.
+ *
+ * \param errorstring The error string to convert.
+ *
+ * \return The errno best matching the input string.
+ *
+ * \note Unknown error strings are converted to EINVAL.
+ */
+static int
+xs_get_error(const char *errorstring)
+{
+ u_int i;
+
+ for (i = 0; i < xsd_error_count; i++) {
+ if (!strcmp(errorstring, xsd_errors[i].errstring))
+ return (xsd_errors[i].errnum);
+ }
+ log(LOG_WARNING, "XENSTORE xen store gave: unknown error %s",
+ errorstring);
+ return (EINVAL);
+}
+
+/**
+ * Block waiting for a reply to a message request.
+ *
+ * \param type The returned type of the reply.
+ * \param len The returned body length of the reply.
+ * \param result The returned body of the reply.
+ *
+ * \return 0 on success. Otherwise an errno indicating the
+ * cause of failure.
+ */
+static int
+xs_read_reply(enum xsd_sockmsg_type *type, u_int *len, void **result)
+{
+ struct xs_stored_msg *msg;
+ char *body;
+ int error;
+
+ mtx_lock(&xs.reply_lock);
+ while (TAILQ_EMPTY(&xs.reply_list)) {
+ error = mtx_sleep(&xs.reply_list, &xs.reply_lock,
+ PCATCH, "xswait", hz/10);
+ if (error && error != EWOULDBLOCK) {
+ mtx_unlock(&xs.reply_lock);
+ return (error);
+ }
+ }
+ msg = TAILQ_FIRST(&xs.reply_list);
+ TAILQ_REMOVE(&xs.reply_list, msg, list);
+ mtx_unlock(&xs.reply_lock);
+
+ *type = msg->hdr.type;
+ if (len)
+ *len = msg->hdr.len;
+ body = msg->u.reply.body;
+
+ free(msg, M_XENSTORE);
+ *result = body;
+ return (0);
+}
+
+/**
+ * Pass-thru interface for XenStore access by userland processes
+ * via the XenStore device.
+ *
+ * Reply type and length data are returned by overwriting these
+ * fields in the passed in request message.
+ *
+ * \param msg A properly formatted message to transmit to
+ * the XenStore service.
+ * \param result The returned body of the reply.
+ *
+ * \return 0 on success. Otherwise an errno indicating the cause
+ * of failure.
+ *
+ * \note The returned result is provided in malloced storage and thus
+ * must be free'd by the caller with 'free(result, M_XENSTORE);
+ */
+int
+xs_dev_request_and_reply(struct xsd_sockmsg *msg, void **result)
+{
+ uint32_t request_type;
+ int error;
+
+ request_type = msg->type;
+ xs_request_filter(request_type);
+
+ sx_xlock(&xs.request_mutex);
+ if ((error = xs_write_store(msg, sizeof(*msg) + msg->len)) == 0)
+ error = xs_read_reply(&msg->type, &msg->len, result);
+ sx_xunlock(&xs.request_mutex);
+
+ xs_reply_filter(request_type, msg->type, error);
+
+ return (error);
+}
+
+/**
+ * Send a message with an optionally muti-part body to the XenStore service.
+ *
+ * \param t The transaction to use for this request.
+ * \param request_type The type of message to send.
+ * \param iovec Pointers to the body sections of the request.
+ * \param num_vecs The number of body sections in the request.
+ * \param len The returned length of the reply.
+ * \param result The returned body of the reply.
+ *
+ * \return 0 on success. Otherwise an errno indicating
+ * the cause of failure.
+ *
+ * \note The returned result is provided in malloced storage and thus
+ * must be free'd by the caller with 'free(*result, M_XENSTORE);
+ */
+static int
+xs_talkv(struct xs_transaction t, enum xsd_sockmsg_type request_type,
+ const struct iovec *iovec, u_int num_vecs, u_int *len, void **result)
+{
+ struct xsd_sockmsg msg;
+ void *ret = NULL;
+ u_int i;
+ int error;
+
+ msg.tx_id = t.id;
+ msg.req_id = 0;
+ msg.type = request_type;
+ msg.len = 0;
+ for (i = 0; i < num_vecs; i++)
+ msg.len += iovec[i].iov_len;
+
+ xs_request_filter(request_type);
+
+ sx_xlock(&xs.request_mutex);
+ error = xs_write_store(&msg, sizeof(msg));
+ if (error) {
+ printf("xs_talkv failed %d\n", error);
+ goto error_lock_held;
+ }
+
+ for (i = 0; i < num_vecs; i++) {
+ error = xs_write_store(iovec[i].iov_base, iovec[i].iov_len);
+ if (error) {
+ printf("xs_talkv failed %d\n", error);
+ goto error_lock_held;
+ }
+ }
+
+ error = xs_read_reply(&msg.type, len, &ret);
+
+error_lock_held:
+ sx_xunlock(&xs.request_mutex);
+ xs_reply_filter(request_type, msg.type, error);
+ if (error)
+ return (error);
+
+ if (msg.type == XS_ERROR) {
+ error = xs_get_error(ret);
+ free(ret, M_XENSTORE);
+ return (error);
+ }
+
+ /* Reply is either error or an echo of our request message type. */
+ KASSERT(msg.type == request_type, ("bad xenstore message type"));
+
+ if (result)
+ *result = ret;
+ else
+ free(ret, M_XENSTORE);
+
+ return (0);
+}
+
+/**
+ * Wrapper for xs_talkv allowing easy transmission of a message with
+ * a single, contiguous, message body.
+ *
+ * \param t The transaction to use for this request.
+ * \param request_type The type of message to send.
+ * \param body The body of the request.
+ * \param len The returned length of the reply.
+ * \param result The returned body of the reply.
+ *
+ * \return 0 on success. Otherwise an errno indicating
+ * the cause of failure.
+ *
+ * \note The returned result is provided in malloced storage and thus
+ * must be free'd by the caller with 'free(*result, M_XENSTORE);
+ */
+static int
+xs_single(struct xs_transaction t, enum xsd_sockmsg_type request_type,
+ const char *body, u_int *len, void **result)
+{
+ struct iovec iovec;
+
+ iovec.iov_base = (void *)(uintptr_t)body;
+ iovec.iov_len = strlen(body) + 1;
+
+ return (xs_talkv(t, request_type, &iovec, 1, len, result));
+}
+
+/*------------------------- XenStore Watch Support ---------------------------*/
+/**
+ * Transmit a watch request to the XenStore service.
+ *
+ * \param path The path in the XenStore to watch.
+ * \param tocken A unique identifier for this watch.
+ *
+ * \return 0 on success. Otherwise an errno indicating the
+ * cause of failure.
+ */
+static int
+xs_watch(const char *path, const char *token)
+{
+ struct iovec iov[2];
+
+ iov[0].iov_base = (void *)(uintptr_t) path;
+ iov[0].iov_len = strlen(path) + 1;
+ iov[1].iov_base = (void *)(uintptr_t) token;
+ iov[1].iov_len = strlen(token) + 1;
+
+ return (xs_talkv(XST_NIL, XS_WATCH, iov, 2, NULL, NULL));
+}
+
+/**
+ * Transmit an uwatch request to the XenStore service.
+ *
+ * \param path The path in the XenStore to watch.
+ * \param tocken A unique identifier for this watch.
+ *
+ * \return 0 on success. Otherwise an errno indicating the
+ * cause of failure.
+ */
+static int
+xs_unwatch(const char *path, const char *token)
+{
+ struct iovec iov[2];
+
+ iov[0].iov_base = (void *)(uintptr_t) path;
+ iov[0].iov_len = strlen(path) + 1;
+ iov[1].iov_base = (void *)(uintptr_t) token;
+ iov[1].iov_len = strlen(token) + 1;
+
+ return (xs_talkv(XST_NIL, XS_UNWATCH, iov, 2, NULL, NULL));
+}
+
+/**
+ * Convert from watch token (unique identifier) to the associated
+ * internal tracking structure for this watch.
+ *
+ * \param tocken The unique identifier for the watch to find.
+ *
+ * \return A pointer to the found watch structure or NULL.
+ */
+static struct xs_watch *
+find_watch(const char *token)
+{
+ struct xs_watch *i, *cmp;
+
+ cmp = (void *)strtoul(token, NULL, 16);
+
+ LIST_FOREACH(i, &xs.registered_watches, list)
+ if (i == cmp)
+ return (i);
+
+ return (NULL);
+}
+
+/**
+ * Thread body of the XenStore watch event dispatch thread.
+ */
+static void
+xenwatch_thread(void *unused)
+{
+ struct xs_stored_msg *msg;
+
+ for (;;) {
+
+ mtx_lock(&xs.watch_events_lock);
+ while (TAILQ_EMPTY(&xs.watch_events))
+ mtx_sleep(&xs.watch_events,
+ &xs.watch_events_lock,
+ PWAIT | PCATCH, "waitev", hz/10);
+
+ mtx_unlock(&xs.watch_events_lock);
+ sx_xlock(&xs.xenwatch_mutex);
+
+ mtx_lock(&xs.watch_events_lock);
+ msg = TAILQ_FIRST(&xs.watch_events);
+ if (msg)
+ TAILQ_REMOVE(&xs.watch_events, msg, list);
+ mtx_unlock(&xs.watch_events_lock);
+
+ if (msg != NULL) {
+ /*
+ * XXX There are messages coming in with a NULL
+ * XXX callback. This deserves further investigation;
+ * XXX the workaround here simply prevents the kernel
+ * XXX from panic'ing on startup.
+ */
+ if (msg->u.watch.handle->callback != NULL)
+ msg->u.watch.handle->callback(
+ msg->u.watch.handle,
+ (const char **)msg->u.watch.vec,
+ msg->u.watch.vec_size);
+ free(msg->u.watch.vec, M_XENSTORE);
+ free(msg, M_XENSTORE);
+ }
+
+ sx_xunlock(&xs.xenwatch_mutex);
+ }
+}
+
+/*----------- XenStore Configuration, Initialization, and Control ------------*/
+/**
+ * Setup communication channels with the XenStore service.
+ *
+ * \return On success, 0. Otherwise an errno value indicating the
+ * type of failure.
+ */
+static int
+xs_init_comms(void)
+{
+ int error;
+
+ if (xen_store->rsp_prod != xen_store->rsp_cons) {
+ log(LOG_WARNING, "XENSTORE response ring is not quiescent "
+ "(%08x:%08x): fixing up\n",
+ xen_store->rsp_cons, xen_store->rsp_prod);
+ xen_store->rsp_cons = xen_store->rsp_prod;
+ }
+
+ xen_intr_unbind(&xs.xen_intr_handle);
+
+ error = xen_intr_bind_local_port(xs.xs_dev, xs.evtchn,
+ /*filter*/NULL, xs_intr, /*arg*/NULL, INTR_TYPE_NET|INTR_MPSAFE,
+ &xs.xen_intr_handle);
+ if (error) {
+ log(LOG_WARNING, "XENSTORE request irq failed %i\n", error);
+ return (error);
+ }
+
+ return (0);
+}
+
+/*------------------ Private Device Attachment Functions --------------------*/
+static void
+xs_identify(driver_t *driver, device_t parent)
+{
+
+ BUS_ADD_CHILD(parent, 0, "xenstore", 0);
+}
+
+/**
+ * Probe for the existance of the XenStore.
+ *
+ * \param dev
+ */
+static int
+xs_probe(device_t dev)
+{
+ /*
+ * We are either operating within a PV kernel or being probed
+ * as the child of the successfully attached xenpci device.
+ * Thus we are in a Xen environment and there will be a XenStore.
+ * Unconditionally return success.
+ */
+ device_set_desc(dev, "XenStore");
+ return (0);
+}
+
+static void
+xs_attach_deferred(void *arg)
+{
+
+ bus_generic_probe(xs.xs_dev);
+ bus_generic_attach(xs.xs_dev);
+
+ config_intrhook_disestablish(&xs.xs_attachcb);
+}
+
+static void
+xs_attach_late(void *arg, int pending)
+{
+
+ KASSERT((pending == 1), ("xs late attach queued several times"));
+ bus_generic_probe(xs.xs_dev);
+ bus_generic_attach(xs.xs_dev);
+}
+
+/**
+ * Attach to the XenStore.
+ *
+ * This routine also prepares for the probe/attach of drivers that rely
+ * on the XenStore.
+ */
+static int
+xs_attach(device_t dev)
+{
+ int error;
+
+ /* Allow us to get device_t from softc and vice-versa. */
+ xs.xs_dev = dev;
+ device_set_softc(dev, &xs);
+
+ /* Initialize the interface to xenstore. */
+ struct proc *p;
+
+ xs.initialized = false;
+ if (xen_hvm_domain()) {
+ xs.evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN);
+ xs.gpfn = hvm_get_parameter(HVM_PARAM_STORE_PFN);
+ xen_store = pmap_mapdev(xs.gpfn * PAGE_SIZE, PAGE_SIZE);
+ xs.initialized = true;
+ } else if (xen_pv_domain()) {
+ if (HYPERVISOR_start_info->store_evtchn == 0) {
+ struct evtchn_alloc_unbound alloc_unbound;
+
+ /* Allocate a local event channel for xenstore */
+ alloc_unbound.dom = DOMID_SELF;
+ alloc_unbound.remote_dom = DOMID_SELF;
+ error = HYPERVISOR_event_channel_op(
+ EVTCHNOP_alloc_unbound, &alloc_unbound);
+ if (error != 0)
+ panic(
+ "unable to alloc event channel for Dom0: %d",
+ error);
+
+ HYPERVISOR_start_info->store_evtchn =
+ alloc_unbound.port;
+ xs.evtchn = alloc_unbound.port;
+
+ /* Allocate memory for the xs shared ring */
+ xen_store = malloc(PAGE_SIZE, M_XENSTORE,
+ M_WAITOK | M_ZERO);
+ } else {
+ xs.evtchn = HYPERVISOR_start_info->store_evtchn;
+ xs.initialized = true;
+ }
+ } else {
+ panic("Unknown domain type, cannot initialize xenstore.");
+ }
+
+ TAILQ_INIT(&xs.reply_list);
+ TAILQ_INIT(&xs.watch_events);
+
+ mtx_init(&xs.ring_lock, "ring lock", NULL, MTX_DEF);
+ mtx_init(&xs.reply_lock, "reply lock", NULL, MTX_DEF);
+ sx_init(&xs.xenwatch_mutex, "xenwatch");
+ sx_init(&xs.request_mutex, "xenstore request");
+ sx_init(&xs.suspend_mutex, "xenstore suspend");
+ mtx_init(&xs.registered_watches_lock, "watches", NULL, MTX_DEF);
+ mtx_init(&xs.watch_events_lock, "watch events", NULL, MTX_DEF);
+
+ /* Initialize the shared memory rings to talk to xenstored */
+ error = xs_init_comms();
+ if (error)
+ return (error);
+
+ error = kproc_create(xenwatch_thread, NULL, &p, RFHIGHPID,
+ 0, "xenwatch");
+ if (error)
+ return (error);
+ xs.xenwatch_pid = p->p_pid;
+
+ error = kproc_create(xs_rcv_thread, NULL, NULL,
+ RFHIGHPID, 0, "xenstore_rcv");
+
+ xs.xs_attachcb.ich_func = xs_attach_deferred;
+ xs.xs_attachcb.ich_arg = NULL;
+ if (xs.initialized) {
+ config_intrhook_establish(&xs.xs_attachcb);
+ } else {
+ TASK_INIT(&xs.xs_late_init, 0, xs_attach_late, NULL);
+ }
+
+ return (error);
+}
+
+/**
+ * Prepare for suspension of this VM by halting XenStore access after
+ * all transactions and individual requests have completed.
+ */
+static int
+xs_suspend(device_t dev)
+{
+ int error;
+
+ /* Suspend child Xen devices. */
+ error = bus_generic_suspend(dev);
+ if (error != 0)
+ return (error);
+
+ sx_xlock(&xs.suspend_mutex);
+ sx_xlock(&xs.request_mutex);
+
+ return (0);
+}
+
+/**
+ * Resume XenStore operations after this VM is resumed.
+ */
+static int
+xs_resume(device_t dev __unused)
+{
+ struct xs_watch *watch;
+ char token[sizeof(watch) * 2 + 1];
+
+ xs_init_comms();
+
+ sx_xunlock(&xs.request_mutex);
+
+ /*
+ * No need for registered_watches_lock: the suspend_mutex
+ * is sufficient.
+ */
+ LIST_FOREACH(watch, &xs.registered_watches, list) {
+ sprintf(token, "%lX", (long)watch);
+ xs_watch(watch->node, token);
+ }
+
+ sx_xunlock(&xs.suspend_mutex);
+
+ /* Resume child Xen devices. */
+ bus_generic_resume(dev);
+
+ return (0);
+}
+
+/*-------------------- Private Device Attachment Data -----------------------*/
+static device_method_t xenstore_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_identify, xs_identify),
+ DEVMETHOD(device_probe, xs_probe),
+ DEVMETHOD(device_attach, xs_attach),
+ DEVMETHOD(device_detach, bus_generic_detach),
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+ DEVMETHOD(device_suspend, xs_suspend),
+ DEVMETHOD(device_resume, xs_resume),
+
+ /* Bus interface */
+ DEVMETHOD(bus_add_child, bus_generic_add_child),
+ DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource),
+ DEVMETHOD(bus_release_resource, bus_generic_release_resource),
+ DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
+ DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(xenstore, xenstore_driver, xenstore_methods, 0);
+static devclass_t xenstore_devclass;
+
+DRIVER_MODULE(xenstore, xenpv, xenstore_driver, xenstore_devclass, 0, 0);
+
+/*------------------------------- Sysctl Data --------------------------------*/
+/* XXX Shouldn't the node be somewhere else? */
+SYSCTL_NODE(_dev, OID_AUTO, xen, CTLFLAG_RD, NULL, "Xen");
+SYSCTL_INT(_dev_xen, OID_AUTO, xsd_port, CTLFLAG_RD, &xs.evtchn, 0, "");
+SYSCTL_ULONG(_dev_xen, OID_AUTO, xsd_kva, CTLFLAG_RD, (u_long *) &xen_store, 0, "");
+
+/*-------------------------------- Public API --------------------------------*/
+/*------- API comments for these methods can be found in xenstorevar.h -------*/
+int
+xs_directory(struct xs_transaction t, const char *dir, const char *node,
+ u_int *num, const char ***result)
+{
+ struct sbuf *path;
+ char *strings;
+ u_int len = 0;
+ int error;
+
+ path = xs_join(dir, node);
+ error = xs_single(t, XS_DIRECTORY, sbuf_data(path), &len,
+ (void **)&strings);
+ sbuf_delete(path);
+ if (error)
+ return (error);
+
+ *result = split(strings, len, num);
+
+ return (0);
+}
+
+int
+xs_exists(struct xs_transaction t, const char *dir, const char *node)
+{
+ const char **d;
+ int error, dir_n;
+
+ error = xs_directory(t, dir, node, &dir_n, &d);
+ if (error)
+ return (0);
+ free(d, M_XENSTORE);
+ return (1);
+}
+
+int
+xs_read(struct xs_transaction t, const char *dir, const char *node,
+ u_int *len, void **result)
+{
+ struct sbuf *path;
+ void *ret;
+ int error;
+
+ path = xs_join(dir, node);
+ error = xs_single(t, XS_READ, sbuf_data(path), len, &ret);
+ sbuf_delete(path);
+ if (error)
+ return (error);
+ *result = ret;
+ return (0);
+}
+
+int
+xs_write(struct xs_transaction t, const char *dir, const char *node,
+ const char *string)
+{
+ struct sbuf *path;
+ struct iovec iovec[2];
+ int error;
+
+ path = xs_join(dir, node);
+
+ iovec[0].iov_base = (void *)(uintptr_t) sbuf_data(path);
+ iovec[0].iov_len = sbuf_len(path) + 1;
+ iovec[1].iov_base = (void *)(uintptr_t) string;
+ iovec[1].iov_len = strlen(string);
+
+ error = xs_talkv(t, XS_WRITE, iovec, 2, NULL, NULL);
+ sbuf_delete(path);
+
+ return (error);
+}
+
+int
+xs_mkdir(struct xs_transaction t, const char *dir, const char *node)
+{
+ struct sbuf *path;
+ int ret;
+
+ path = xs_join(dir, node);
+ ret = xs_single(t, XS_MKDIR, sbuf_data(path), NULL, NULL);
+ sbuf_delete(path);
+
+ return (ret);
+}
+
+int
+xs_rm(struct xs_transaction t, const char *dir, const char *node)
+{
+ struct sbuf *path;
+ int ret;
+
+ path = xs_join(dir, node);
+ ret = xs_single(t, XS_RM, sbuf_data(path), NULL, NULL);
+ sbuf_delete(path);
+
+ return (ret);
+}
+
+int
+xs_rm_tree(struct xs_transaction xbt, const char *base, const char *node)
+{
+ struct xs_transaction local_xbt;
+ struct sbuf *root_path_sbuf;
+ struct sbuf *cur_path_sbuf;
+ char *root_path;
+ char *cur_path;
+ const char **dir;
+ int error;
+ int empty;
+
+retry:
+ root_path_sbuf = xs_join(base, node);
+ cur_path_sbuf = xs_join(base, node);
+ root_path = sbuf_data(root_path_sbuf);
+ cur_path = sbuf_data(cur_path_sbuf);
+ dir = NULL;
+ local_xbt.id = 0;
+
+ if (xbt.id == 0) {
+ error = xs_transaction_start(&local_xbt);
+ if (error != 0)
+ goto out;
+ xbt = local_xbt;
+ }
+
+ empty = 0;
+ while (1) {
+ u_int count;
+ u_int i;
+
+ error = xs_directory(xbt, cur_path, "", &count, &dir);
+ if (error)
+ goto out;
+
+ for (i = 0; i < count; i++) {
+ error = xs_rm(xbt, cur_path, dir[i]);
+ if (error == ENOTEMPTY) {
+ struct sbuf *push_dir;
+
+ /*
+ * Descend to clear out this sub directory.
+ * We'll return to cur_dir once push_dir
+ * is empty.
+ */
+ push_dir = xs_join(cur_path, dir[i]);
+ sbuf_delete(cur_path_sbuf);
+ cur_path_sbuf = push_dir;
+ cur_path = sbuf_data(cur_path_sbuf);
+ break;
+ } else if (error != 0) {
+ goto out;
+ }
+ }
+
+ free(dir, M_XENSTORE);
+ dir = NULL;
+
+ if (i == count) {
+ char *last_slash;
+
+ /* Directory is empty. It is now safe to remove. */
+ error = xs_rm(xbt, cur_path, "");
+ if (error != 0)
+ goto out;
+
+ if (!strcmp(cur_path, root_path))
+ break;
+
+ /* Return to processing the parent directory. */
+ last_slash = strrchr(cur_path, '/');
+ KASSERT(last_slash != NULL,
+ ("xs_rm_tree: mangled path %s", cur_path));
+ *last_slash = '\0';
+ }
+ }
+
+out:
+ sbuf_delete(cur_path_sbuf);
+ sbuf_delete(root_path_sbuf);
+ if (dir != NULL)
+ free(dir, M_XENSTORE);
+
+ if (local_xbt.id != 0) {
+ int terror;
+
+ terror = xs_transaction_end(local_xbt, /*abort*/error != 0);
+ xbt.id = 0;
+ if (terror == EAGAIN && error == 0)
+ goto retry;
+ }
+ return (error);
+}
+
+int
+xs_transaction_start(struct xs_transaction *t)
+{
+ char *id_str;
+ int error;
+
+ error = xs_single(XST_NIL, XS_TRANSACTION_START, "", NULL,
+ (void **)&id_str);
+ if (error == 0) {
+ t->id = strtoul(id_str, NULL, 0);
+ free(id_str, M_XENSTORE);
+ }
+ return (error);
+}
+
+int
+xs_transaction_end(struct xs_transaction t, int abort)
+{
+ char abortstr[2];
+
+ if (abort)
+ strcpy(abortstr, "F");
+ else
+ strcpy(abortstr, "T");
+
+ return (xs_single(t, XS_TRANSACTION_END, abortstr, NULL, NULL));
+}
+
+int
+xs_scanf(struct xs_transaction t, const char *dir, const char *node,
+ int *scancountp, const char *fmt, ...)
+{
+ va_list ap;
+ int error, ns;
+ char *val;
+
+ error = xs_read(t, dir, node, NULL, (void **) &val);
+ if (error)
+ return (error);
+
+ va_start(ap, fmt);
+ ns = vsscanf(val, fmt, ap);
+ va_end(ap);
+ free(val, M_XENSTORE);
+ /* Distinctive errno. */
+ if (ns == 0)
+ return (ERANGE);
+ if (scancountp)
+ *scancountp = ns;
+ return (0);
+}
+
+int
+xs_vprintf(struct xs_transaction t,
+ const char *dir, const char *node, const char *fmt, va_list ap)
+{
+ struct sbuf *sb;
+ int error;
+
+ sb = sbuf_new_auto();
+ sbuf_vprintf(sb, fmt, ap);
+ sbuf_finish(sb);
+ error = xs_write(t, dir, node, sbuf_data(sb));
+ sbuf_delete(sb);
+
+ return (error);
+}
+
+int
+xs_printf(struct xs_transaction t, const char *dir, const char *node,
+ const char *fmt, ...)
+{
+ va_list ap;
+ int error;
+
+ va_start(ap, fmt);
+ error = xs_vprintf(t, dir, node, fmt, ap);
+ va_end(ap);
+
+ return (error);
+}
+
+int
+xs_gather(struct xs_transaction t, const char *dir, ...)
+{
+ va_list ap;
+ const char *name;
+ int error;
+
+ va_start(ap, dir);
+ error = 0;
+ while (error == 0 && (name = va_arg(ap, char *)) != NULL) {
+ const char *fmt = va_arg(ap, char *);
+ void *result = va_arg(ap, void *);
+ char *p;
+
+ error = xs_read(t, dir, name, NULL, (void **) &p);
+ if (error)
+ break;
+
+ if (fmt) {
+ if (sscanf(p, fmt, result) == 0)
+ error = EINVAL;
+ free(p, M_XENSTORE);
+ } else
+ *(char **)result = p;
+ }
+ va_end(ap);
+
+ return (error);
+}
+
+int
+xs_register_watch(struct xs_watch *watch)
+{
+ /* Pointer in ascii is the token. */
+ char token[sizeof(watch) * 2 + 1];
+ int error;
+
+ sprintf(token, "%lX", (long)watch);
+
+ sx_slock(&xs.suspend_mutex);
+
+ mtx_lock(&xs.registered_watches_lock);
+ KASSERT(find_watch(token) == NULL, ("watch already registered"));
+ LIST_INSERT_HEAD(&xs.registered_watches, watch, list);
+ mtx_unlock(&xs.registered_watches_lock);
+
+ error = xs_watch(watch->node, token);
+
+ /* Ignore errors due to multiple registration. */
+ if (error == EEXIST)
+ error = 0;
+
+ if (error != 0) {
+ mtx_lock(&xs.registered_watches_lock);
+ LIST_REMOVE(watch, list);
+ mtx_unlock(&xs.registered_watches_lock);
+ }
+
+ sx_sunlock(&xs.suspend_mutex);
+
+ return (error);
+}
+
+void
+xs_unregister_watch(struct xs_watch *watch)
+{
+ struct xs_stored_msg *msg, *tmp;
+ char token[sizeof(watch) * 2 + 1];
+ int error;
+
+ sprintf(token, "%lX", (long)watch);
+
+ sx_slock(&xs.suspend_mutex);
+
+ mtx_lock(&xs.registered_watches_lock);
+ if (find_watch(token) == NULL) {
+ mtx_unlock(&xs.registered_watches_lock);
+ sx_sunlock(&xs.suspend_mutex);
+ return;
+ }
+ LIST_REMOVE(watch, list);
+ mtx_unlock(&xs.registered_watches_lock);
+
+ error = xs_unwatch(watch->node, token);
+ if (error)
+ log(LOG_WARNING, "XENSTORE Failed to release watch %s: %i\n",
+ watch->node, error);
+
+ sx_sunlock(&xs.suspend_mutex);
+
+ /* Cancel pending watch events. */
+ mtx_lock(&xs.watch_events_lock);
+ TAILQ_FOREACH_SAFE(msg, &xs.watch_events, list, tmp) {
+ if (msg->u.watch.handle != watch)
+ continue;
+ TAILQ_REMOVE(&xs.watch_events, msg, list);
+ free(msg->u.watch.vec, M_XENSTORE);
+ free(msg, M_XENSTORE);
+ }
+ mtx_unlock(&xs.watch_events_lock);
+
+ /* Flush any currently-executing callback, unless we are it. :-) */
+ if (curproc->p_pid != xs.xenwatch_pid) {
+ sx_xlock(&xs.xenwatch_mutex);
+ sx_xunlock(&xs.xenwatch_mutex);
+ }
+}
diff --git a/sys/dev/xen/xenstore/xenstore_dev.c b/sys/dev/xen/xenstore/xenstore_dev.c
new file mode 100644
index 0000000..54b5e82
--- /dev/null
+++ b/sys/dev/xen/xenstore/xenstore_dev.c
@@ -0,0 +1,287 @@
+/*
+ * xenstore_dev.c
+ *
+ * Driver giving user-space access to the kernel's connection to the
+ * XenStore service.
+ *
+ * Copyright (c) 2005, Christian Limpach
+ * Copyright (c) 2005, Rusty Russell, IBM Corporation
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/cdefs.h>
+#include <sys/errno.h>
+#include <sys/uio.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/conf.h>
+#include <sys/module.h>
+
+#include <xen/xen-os.h>
+
+#include <xen/hypervisor.h>
+#include <xen/xenstore/xenstorevar.h>
+#include <xen/xenstore/xenstore_internal.h>
+
+struct xs_dev_transaction {
+ LIST_ENTRY(xs_dev_transaction) list;
+ struct xs_transaction handle;
+};
+
+struct xs_dev_data {
+ /* In-progress transaction. */
+ LIST_HEAD(xdd_list_head, xs_dev_transaction) transactions;
+
+ /* Partial request. */
+ unsigned int len;
+ union {
+ struct xsd_sockmsg msg;
+ char buffer[PAGE_SIZE];
+ } u;
+
+ /* Response queue. */
+#define MASK_READ_IDX(idx) ((idx)&(PAGE_SIZE-1))
+ char read_buffer[PAGE_SIZE];
+ unsigned int read_cons, read_prod;
+};
+
+static int
+xs_dev_read(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ int error;
+ struct xs_dev_data *u = dev->si_drv1;
+
+ while (u->read_prod == u->read_cons) {
+ error = tsleep(u, PCATCH, "xsdread", hz/10);
+ if (error && error != EWOULDBLOCK)
+ return (error);
+ }
+
+ while (uio->uio_resid > 0) {
+ if (u->read_cons == u->read_prod)
+ break;
+ error = uiomove(&u->read_buffer[MASK_READ_IDX(u->read_cons)],
+ 1, uio);
+ if (error)
+ return (error);
+ u->read_cons++;
+ }
+ return (0);
+}
+
+static void
+xs_queue_reply(struct xs_dev_data *u, char *data, unsigned int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++, u->read_prod++)
+ u->read_buffer[MASK_READ_IDX(u->read_prod)] = data[i];
+
+ KASSERT((u->read_prod - u->read_cons) <= sizeof(u->read_buffer),
+ ("xenstore reply too big"));
+
+ wakeup(u);
+}
+
+static int
+xs_dev_write(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ int error;
+ struct xs_dev_data *u = dev->si_drv1;
+ struct xs_dev_transaction *trans;
+ void *reply;
+ int len = uio->uio_resid;
+
+ if ((len + u->len) > sizeof(u->u.buffer))
+ return (EINVAL);
+
+ error = uiomove(u->u.buffer + u->len, len, uio);
+ if (error)
+ return (error);
+
+ u->len += len;
+ if (u->len < (sizeof(u->u.msg) + u->u.msg.len))
+ return (0);
+
+ switch (u->u.msg.type) {
+ case XS_TRANSACTION_START:
+ case XS_TRANSACTION_END:
+ case XS_DIRECTORY:
+ case XS_READ:
+ case XS_GET_PERMS:
+ case XS_RELEASE:
+ case XS_GET_DOMAIN_PATH:
+ case XS_WRITE:
+ case XS_MKDIR:
+ case XS_RM:
+ case XS_SET_PERMS:
+ error = xs_dev_request_and_reply(&u->u.msg, &reply);
+ if (!error) {
+ if (u->u.msg.type == XS_TRANSACTION_START) {
+ trans = malloc(sizeof(*trans), M_XENSTORE,
+ M_WAITOK);
+ trans->handle.id = strtoul(reply, NULL, 0);
+ LIST_INSERT_HEAD(&u->transactions, trans, list);
+ } else if (u->u.msg.type == XS_TRANSACTION_END) {
+ LIST_FOREACH(trans, &u->transactions, list)
+ if (trans->handle.id == u->u.msg.tx_id)
+ break;
+#if 0 /* XXX does this mean the list is empty? */
+ BUG_ON(&trans->list == &u->transactions);
+#endif
+ LIST_REMOVE(trans, list);
+ free(trans, M_XENSTORE);
+ }
+ xs_queue_reply(u, (char *)&u->u.msg, sizeof(u->u.msg));
+ xs_queue_reply(u, (char *)reply, u->u.msg.len);
+ free(reply, M_XENSTORE);
+ }
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ if (error == 0)
+ u->len = 0;
+
+ return (error);
+}
+
+static int
+xs_dev_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
+{
+ struct xs_dev_data *u;
+
+#if 0 /* XXX figure out if equiv needed */
+ nonseekable_open(inode, filp);
+#endif
+ u = malloc(sizeof(*u), M_XENSTORE, M_WAITOK|M_ZERO);
+ LIST_INIT(&u->transactions);
+ dev->si_drv1 = u;
+
+ return (0);
+}
+
+static int
+xs_dev_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
+{
+ struct xs_dev_data *u = dev->si_drv1;
+ struct xs_dev_transaction *trans, *tmp;
+
+ LIST_FOREACH_SAFE(trans, &u->transactions, list, tmp) {
+ xs_transaction_end(trans->handle, 1);
+ LIST_REMOVE(trans, list);
+ free(trans, M_XENSTORE);
+ }
+
+ free(u, M_XENSTORE);
+ return (0);
+}
+
+static struct cdevsw xs_dev_cdevsw = {
+ .d_version = D_VERSION,
+ .d_read = xs_dev_read,
+ .d_write = xs_dev_write,
+ .d_open = xs_dev_open,
+ .d_close = xs_dev_close,
+ .d_name = "xs_dev",
+};
+
+/*------------------ Private Device Attachment Functions --------------------*/
+/**
+ * \brief Identify instances of this device type in the system.
+ *
+ * \param driver The driver performing this identify action.
+ * \param parent The NewBus parent device for any devices this method adds.
+ */
+static void
+xs_dev_identify(driver_t *driver __unused, device_t parent)
+{
+ /*
+ * A single device instance for our driver is always present
+ * in a system operating under Xen.
+ */
+ BUS_ADD_CHILD(parent, 0, driver->name, 0);
+}
+
+/**
+ * \brief Probe for the existance of the Xenstore device
+ *
+ * \param dev NewBus device_t for this instance.
+ *
+ * \return Always returns 0 indicating success.
+ */
+static int
+xs_dev_probe(device_t dev)
+{
+
+ device_set_desc(dev, "Xenstore user-space device");
+ return (0);
+}
+
+/**
+ * \brief Attach the Xenstore device.
+ *
+ * \param dev NewBus device_t for this instance.
+ *
+ * \return On success, 0. Otherwise an errno value indicating the
+ * type of failure.
+ */
+static int
+xs_dev_attach(device_t dev)
+{
+ struct cdev *xs_cdev;
+
+ xs_cdev = make_dev(&xs_dev_cdevsw, 0, UID_ROOT, GID_WHEEL, 0400,
+ "xen/xenstore");
+ if (xs_cdev == NULL)
+ return (EINVAL);
+
+ return (0);
+}
+
+/*-------------------- Private Device Attachment Data -----------------------*/
+static device_method_t xs_dev_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_identify, xs_dev_identify),
+ DEVMETHOD(device_probe, xs_dev_probe),
+ DEVMETHOD(device_attach, xs_dev_attach),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(xs_dev, xs_dev_driver, xs_dev_methods, 0);
+devclass_t xs_dev_devclass;
+
+DRIVER_MODULE(xs_dev, xenstore, xs_dev_driver, xs_dev_devclass,
+ NULL, NULL);
diff --git a/sys/dev/xen/xenstore/xenstored_dev.c b/sys/dev/xen/xenstore/xenstored_dev.c
new file mode 100644
index 0000000..ae24085
--- /dev/null
+++ b/sys/dev/xen/xenstore/xenstored_dev.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2014 Roger Pau Monné <roger.pau@citrix.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/cdefs.h>
+#include <sys/errno.h>
+#include <sys/uio.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/conf.h>
+#include <sys/module.h>
+
+#include <xen/xen-os.h>
+
+#include <xen/hypervisor.h>
+#include <xen/xenstore/xenstorevar.h>
+#include <xen/xenstore/xenstore_internal.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#define XSD_READ_SIZE 20
+
+static int xsd_dev_read(struct cdev *dev, struct uio *uio, int ioflag);
+static int xsd_dev_mmap(struct cdev *dev, vm_ooffset_t offset,
+ vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr);
+
+
+static struct cdevsw xsd_dev_cdevsw = {
+ .d_version = D_VERSION,
+ .d_read = xsd_dev_read,
+ .d_mmap = xsd_dev_mmap,
+ .d_name = "xsd_dev",
+};
+
+static int
+xsd_dev_read(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ char evtchn[XSD_READ_SIZE];
+ int error, len;
+
+ len = snprintf(evtchn, sizeof(evtchn), "%u",
+ HYPERVISOR_start_info->store_evtchn);
+ if (len < 0 || len > uio->uio_resid)
+ return (EINVAL);
+
+ error = uiomove(evtchn, len, uio);
+ if (error)
+ return (error);
+
+ return (0);
+}
+
+static int
+xsd_dev_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
+ int nprot, vm_memattr_t *memattr)
+{
+
+ if (offset != 0)
+ return (EINVAL);
+
+ *paddr = pmap_kextract((vm_offset_t)xen_store);
+
+ return (0);
+}
+
+/*------------------ Private Device Attachment Functions --------------------*/
+/**
+ * \brief Identify instances of this device type in the system.
+ *
+ * \param driver The driver performing this identify action.
+ * \param parent The NewBus parent device for any devices this method adds.
+ */
+static void
+xsd_dev_identify(driver_t *driver __unused, device_t parent)
+{
+
+ if (!xen_pv_domain())
+ return;
+ if (HYPERVISOR_start_info->store_mfn != 0)
+ return;
+
+ /*
+ * Only attach if xenstore is not available, because we are the
+ * domain that's supposed to run it.
+ */
+ BUS_ADD_CHILD(parent, 0, driver->name, 0);
+}
+
+/**
+ * \brief Probe for the existence of the Xenstored device
+ *
+ * \param dev NewBus device_t for this instance.
+ *
+ * \return Always returns 0 indicating success.
+ */
+static int
+xsd_dev_probe(device_t dev)
+{
+
+ device_set_desc(dev, "Xenstored user-space device");
+ return (BUS_PROBE_NOWILDCARD);
+}
+
+/**
+ * \brief Attach the Xenstored device.
+ *
+ * \param dev NewBus device_t for this instance.
+ *
+ * \return On success, 0. Otherwise an errno value indicating the
+ * type of failure.
+ */
+static int
+xsd_dev_attach(device_t dev)
+{
+ struct cdev *xsd_cdev;
+
+ xsd_cdev = make_dev(&xsd_dev_cdevsw, 0, UID_ROOT, GID_WHEEL, 0400,
+ "xen/xenstored");
+ if (xsd_cdev == NULL)
+ return (EINVAL);
+
+ return (0);
+}
+
+/*-------------------- Private Device Attachment Data -----------------------*/
+static device_method_t xsd_dev_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_identify, xsd_dev_identify),
+ DEVMETHOD(device_probe, xsd_dev_probe),
+ DEVMETHOD(device_attach, xsd_dev_attach),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(xsd_dev, xsd_dev_driver, xsd_dev_methods, 0);
+devclass_t xsd_dev_devclass;
+
+DRIVER_MODULE(xsd_dev, xenpv, xsd_dev_driver, xsd_dev_devclass,
+ NULL, NULL);
OpenPOWER on IntegriCloud