summaryrefslogtreecommitdiffstats
path: root/sys/dev/hme/if_hme.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/hme/if_hme.c')
-rw-r--r--sys/dev/hme/if_hme.c169
1 files changed, 158 insertions, 11 deletions
diff --git a/sys/dev/hme/if_hme.c b/sys/dev/hme/if_hme.c
index 7144799..b74cb92 100644
--- a/sys/dev/hme/if_hme.c
+++ b/sys/dev/hme/if_hme.c
@@ -54,9 +54,13 @@ __FBSDID("$FreeBSD$");
* maximum packet size (this is not verified). Buffers starting on odd
* boundaries must be mapped so that the burst can start on a natural boundary.
*
- * Checksumming is not yet supported.
+ * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading.
+ * In reality, we can do the same technique for UDP datagram too. However,
+ * the hardware doesn't compensate the checksum for UDP datagram which can yield
+ * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It
+ * can be reactivated by setting special link option link0 with ifconfig(8).
*/
-
+#define HME_CSUM_FEATURES (CSUM_TCP)
#define HMEDEBUG
#define KTR_HME KTR_CT2 /* XXX */
@@ -80,6 +84,12 @@ __FBSDID("$FreeBSD$");
#include <net/if_media.h>
#include <net/if_vlan_var.h>
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#include <netinet/tcp.h>
+#include <netinet/udp.h>
+
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
@@ -106,10 +116,12 @@ static int hme_mediachange(struct ifnet *);
static void hme_mediastatus(struct ifnet *, struct ifmediareq *);
static int hme_load_txmbuf(struct hme_softc *, struct mbuf *);
-static void hme_read(struct hme_softc *, int, int);
+static void hme_read(struct hme_softc *, int, int, u_int32_t);
static void hme_eint(struct hme_softc *, u_int);
static void hme_rint(struct hme_softc *);
static void hme_tint(struct hme_softc *);
+static void hme_txcksum(struct mbuf *, u_int32_t *);
+static void hme_rxcksum(struct mbuf *, u_int32_t);
static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
static void hme_rxdma_callback(void *, bus_dma_segment_t *, int,
@@ -266,6 +278,7 @@ hme_config(struct hme_softc *sc)
goto fail_txdesc;
}
+ sc->sc_csum_features = HME_CSUM_FEATURES;
/* Initialize ifnet structure. */
ifp->if_softc = sc;
if_initname(ifp, device_get_name(sc->sc_dev),
@@ -318,11 +331,12 @@ hme_config(struct hme_softc *sc)
ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr);
/*
- * Tell the upper layer(s) we support long frames.
+ * Tell the upper layer(s) we support long frames/checksum offloads.
*/
ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
- ifp->if_capabilities |= IFCAP_VLAN_MTU;
- ifp->if_capenable |= IFCAP_VLAN_MTU;
+ ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
+ ifp->if_hwassist |= sc->sc_csum_features;
+ ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
callout_init(&sc->sc_tick_ch, 0);
return (0);
@@ -658,7 +672,7 @@ hme_init(void *xsc)
struct hme_softc *sc = (struct hme_softc *)xsc;
struct ifnet *ifp = &sc->sc_arpcom.ac_if;
u_int8_t *ea;
- u_int32_t v;
+ u_int32_t n, v;
/*
* Initialization sequence. The numbered steps below correspond
@@ -742,6 +756,15 @@ hme_init(void *xsc)
v = HME_SEB_CFG_BURST64;
break;
}
+ /*
+ * Blindly setting 64bit transfers may hang PCI cards(Cheerio?).
+ * Allowing 64bit transfers breaks TX checksum offload as well.
+ * Don't know this comes from hardware bug or driver's DMAing
+ * scheme.
+ *
+ * if (sc->sc_pci == 0)
+ * v |= HME_SEB_CFG_64BIT;
+ */
HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
/* step 9. ETX Configuration: use mostly default values */
@@ -777,6 +800,10 @@ hme_init(void *xsc)
/* Enable DMA, fix RX first byte offset. */
v &= ~HME_ERX_CFG_FBO_MASK;
v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
+ /* RX TCP/UDP checksum offset */
+ n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
+ n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK;
+ v |= n;
CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
@@ -895,6 +922,43 @@ hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs,
("hme_txdma_callback: missed end of packet!"));
}
+/* TX TCP/UDP checksum */
+static void
+hme_txcksum(struct mbuf *m, u_int32_t *cflags)
+{
+ struct ip *ip;
+ u_int32_t offset, offset2;
+ caddr_t p;
+
+ for(; m && m->m_len == 0; m = m->m_next)
+ ;
+ if (m == NULL || m->m_len < ETHER_HDR_LEN) {
+ printf("hme_txcksum: m_len < ETHER_HDR_LEN\n");
+ return; /* checksum will be corrupted */
+ }
+ if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
+ if (m->m_len != ETHER_HDR_LEN) {
+ printf("hme_txcksum: m_len != ETHER_HDR_LEN\n");
+ return; /* checksum will be corrupted */
+ }
+ /* XXX */
+ for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
+ ;
+ if (m == NULL)
+ return; /* checksum will be corrupted */
+ ip = mtod(m, struct ip *);
+ } else {
+ p = mtod(m, caddr_t);
+ p += ETHER_HDR_LEN;
+ ip = (struct ip *)p;
+ }
+ offset2 = m->m_pkthdr.csum_data;
+ offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
+ *cflags = offset << HME_XD_TXCKSUM_SSHIFT;
+ *cflags |= ((offset + offset2) << HME_XD_TXCKSUM_OSHIFT);
+ *cflags |= HME_XD_TXCKSUM;
+}
+
/*
* Routine to dma map an mbuf chain, set up the descriptor rings accordingly and
* start the transmission.
@@ -907,11 +971,13 @@ hme_load_txmbuf(struct hme_softc *sc, struct mbuf *m0)
struct hme_txdma_arg cba;
struct hme_txdesc *td;
int error, si, ri;
- u_int32_t flags;
+ u_int32_t flags, cflags = 0;
si = sc->sc_rb.rb_tdhead;
if ((td = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
return (-1);
+ if ((m0->m_pkthdr.csum_flags & sc->sc_csum_features) != 0)
+ hme_txcksum(m0, &cflags);
td->htx_m = m0;
cba.hta_sc = sc;
cba.hta_htx = td;
@@ -935,7 +1001,7 @@ hme_load_txmbuf(struct hme_softc *sc, struct mbuf *m0)
do {
ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) |
- HME_XD_OWN;
+ HME_XD_OWN | cflags;
CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
ri, si, flags);
HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, flags);
@@ -953,7 +1019,7 @@ fail:
* Pass a packet to the higher levels.
*/
static void
-hme_read(struct hme_softc *sc, int ix, int len)
+hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags)
{
struct ifnet *ifp = &sc->sc_arpcom.ac_if;
struct mbuf *m;
@@ -988,6 +1054,9 @@ hme_read(struct hme_softc *sc, int ix, int len)
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
m_adj(m, HME_RXOFFS);
+ /* RX TCP/UDP checksum */
+ if (ifp->if_capenable & IFCAP_RXCSUM)
+ hme_rxcksum(m, flags);
/* Pass the packet up. */
(*ifp->if_input)(ifp, m);
}
@@ -1110,6 +1179,71 @@ hme_tint(struct hme_softc *sc)
}
/*
+ * RX TCP/UDP checksum
+ */
+static void
+hme_rxcksum(struct mbuf *m, u_int32_t flags)
+{
+ struct ether_header *eh;
+ struct ip *ip;
+ struct udphdr *uh;
+ int32_t hlen, len, pktlen;
+ u_int16_t cksum, *opts;
+ u_int32_t temp32;
+
+ pktlen = m->m_pkthdr.len;
+ if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
+ return;
+ eh = mtod(m, struct ether_header *);
+ if (eh->ether_type != htons(ETHERTYPE_IP))
+ return;
+ ip = (struct ip *)(eh + 1);
+ if (ip->ip_v != IPVERSION)
+ return;
+
+ hlen = ip->ip_hl << 2;
+ pktlen -= sizeof(struct ether_header);
+ if (hlen < sizeof(struct ip))
+ return;
+ if (ntohs(ip->ip_len) < hlen)
+ return;
+ if (ntohs(ip->ip_len) != pktlen)
+ return;
+ if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
+ return; /* can't handle fragmented packet */
+
+ switch (ip->ip_p) {
+ case IPPROTO_TCP:
+ if (pktlen < (hlen + sizeof(struct tcphdr)))
+ return;
+ break;
+ case IPPROTO_UDP:
+ if (pktlen < (hlen + sizeof(struct udphdr)))
+ return;
+ uh = (struct udphdr *)((caddr_t)ip + hlen);
+ if (uh->uh_sum == 0)
+ return; /* no checksum */
+ break;
+ default:
+ return;
+ }
+
+ cksum = ~(flags & HME_XD_RXCKSUM);
+ /* checksum fixup for IP options */
+ len = hlen - sizeof(struct ip);
+ if (len > 0) {
+ opts = (u_int16_t *)(ip + 1);
+ for (; len > 0; len -= sizeof(u_int16_t), opts++) {
+ temp32 = cksum - *opts;
+ temp32 = (temp32 >> 16) + (temp32 & 65535);
+ cksum = temp32 & 65535;
+ }
+ }
+ m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
+ m->m_pkthdr.csum_data = cksum;
+}
+
+/*
* Receive interrupt.
*/
static void
@@ -1139,7 +1273,7 @@ hme_rint(struct hme_softc *sc)
hme_discard_rxbuf(sc, ri);
} else {
len = HME_XD_DECODE_RSIZE(flags);
- hme_read(sc, ri, len);
+ hme_read(sc, ri, len, flags);
}
}
if (progress) {
@@ -1375,6 +1509,12 @@ hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
*/
hme_init(sc);
}
+ if ((ifp->if_flags & IFF_LINK0) != 0)
+ sc->sc_csum_features |= CSUM_UDP;
+ else
+ sc->sc_csum_features &= ~CSUM_UDP;
+ if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
+ ifp->if_hwassist = sc->sc_csum_features;
#ifdef HMEDEBUG
sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
#endif
@@ -1389,6 +1529,13 @@ hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
case SIOCSIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
break;
+ case SIOCSIFCAP:
+ ifp->if_capenable = ifr->ifr_reqcap;
+ if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
+ ifp->if_hwassist = sc->sc_csum_features;
+ else
+ ifp->if_hwassist = 0;
+ break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
OpenPOWER on IntegriCloud