summaryrefslogtreecommitdiffstats
path: root/sys/pci
diff options
context:
space:
mode:
authordwmalone <dwmalone@FreeBSD.org>2000-08-19 08:32:59 +0000
committerdwmalone <dwmalone@FreeBSD.org>2000-08-19 08:32:59 +0000
commitdf0e25bf6c3619217f1f2c8b5a35a6e706f2a0b4 (patch)
tree47f526cc36bae230ba5426a392413b1b46c0d678 /sys/pci
parent2f92e39a0fe52366609e44e5a1978feb243c8755 (diff)
downloadFreeBSD-src-df0e25bf6c3619217f1f2c8b5a35a6e706f2a0b4.zip
FreeBSD-src-df0e25bf6c3619217f1f2c8b5a35a6e706f2a0b4.tar.gz
Replace the mbuf external reference counting code with something
that should be better. The old code counted references to mbuf clusters by using the offset of the cluster from the start of memory allocated for mbufs and clusters as an index into an array of chars, which did the reference counting. If the external storage was not a cluster then reference counting had to be done by the code using that external storage. NetBSD's system of linked lists of mbufs was cosidered, but Alfred felt it would have locking issues when the kernel was made more SMP friendly. The system implimented uses a pool of unions to track external storage. The union contains an int for counting the references and a pointer for forming a free list. The reference counts are incremented and decremented atomically and so should be SMP friendly. This system can track reference counts for any sort of external storage. Access to the reference counting stuff is now through macros defined in mbuf.h, so it should be easier to make changes to the system in the future. The possibility of storing the reference count in one of the referencing mbufs was considered, but was rejected 'cos it would often leave extra mbufs allocated. Storing the reference count in the cluster was also considered, but because the external storage may not be a cluster this isn't an option. The size of the pool of reference counters is available in the stats provided by "netstat -m". PR: 19866 Submitted by: Bosko Milekic <bmilekic@dsuper.net> Reviewed by: alfred (glanced at by others on -net)
Diffstat (limited to 'sys/pci')
-rw-r--r--sys/pci/if_sk.c83
-rw-r--r--sys/pci/if_skreg.h1
-rw-r--r--sys/pci/if_ti.c83
-rw-r--r--sys/pci/if_tireg.h1
-rw-r--r--sys/pci/if_wb.c16
5 files changed, 32 insertions, 152 deletions
diff --git a/sys/pci/if_sk.c b/sys/pci/if_sk.c
index b74f3f1..d45ac26 100644
--- a/sys/pci/if_sk.c
+++ b/sys/pci/if_sk.c
@@ -148,8 +148,7 @@ static int sk_newbuf __P((struct sk_if_softc *,
struct sk_chain *, struct mbuf *));
static int sk_alloc_jumbo_mem __P((struct sk_if_softc *));
static void *sk_jalloc __P((struct sk_if_softc *));
-static void sk_jfree __P((caddr_t, u_int));
-static void sk_jref __P((caddr_t, u_int));
+static void sk_jfree __P((caddr_t, void *));
static int sk_init_rx_ring __P((struct sk_if_softc *));
static void sk_init_tx_ring __P((struct sk_if_softc *));
static u_int32_t sk_win_read_4 __P((struct sk_softc *, int));
@@ -690,12 +689,9 @@ static int sk_newbuf(sc_if, c, m)
}
/* Attach the buffer to the mbuf */
- m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
- m_new->m_flags |= M_EXT;
- m_new->m_ext.ext_size = m_new->m_pkthdr.len =
- m_new->m_len = SK_MCLBYTES;
- m_new->m_ext.ext_free = sk_jfree;
- m_new->m_ext.ext_ref = sk_jref;
+ MEXTADD(m_new, buf, SK_MCLBYTES, sk_jfree, NULL);
+ m_new->m_data = (void *)buf;
+ m_new->m_pkthdr.len = m_new->m_len = SK_MCLBYTES;
} else {
/*
* We're re-using a previously allocated mbuf;
@@ -765,7 +761,6 @@ static int sk_alloc_jumbo_mem(sc_if)
aptr[0] = (u_int64_t *)sc_if;
ptr += sizeof(u_int64_t);
sc_if->sk_cdata.sk_jslots[i].sk_buf = ptr;
- sc_if->sk_cdata.sk_jslots[i].sk_inuse = 0;
ptr += SK_MCLBYTES;
entry = malloc(sizeof(struct sk_jpool_entry),
M_DEVBUF, M_NOWAIT);
@@ -803,55 +798,15 @@ static void *sk_jalloc(sc_if)
SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
- sc_if->sk_cdata.sk_jslots[entry->slot].sk_inuse = 1;
return(sc_if->sk_cdata.sk_jslots[entry->slot].sk_buf);
}
/*
- * Adjust usage count on a jumbo buffer. In general this doesn't
- * get used much because our jumbo buffers don't get passed around
- * a lot, but it's implemented for correctness.
- */
-static void sk_jref(buf, size)
- caddr_t buf;
- u_int size;
-{
- struct sk_if_softc *sc_if;
- u_int64_t **aptr;
- register int i;
-
- /* Extract the softc struct pointer. */
- aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
- sc_if = (struct sk_if_softc *)(aptr[0]);
-
- if (sc_if == NULL)
- panic("sk_jref: can't find softc pointer!");
-
- if (size != SK_MCLBYTES)
- panic("sk_jref: adjusting refcount of buf of wrong size!");
-
- /* calculate the slot this buffer belongs to */
-
- i = ((vm_offset_t)aptr
- - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
-
- if ((i < 0) || (i >= SK_JSLOTS))
- panic("sk_jref: asked to reference buffer "
- "that we don't manage!");
- else if (sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0)
- panic("sk_jref: buffer already free!");
- else
- sc_if->sk_cdata.sk_jslots[i].sk_inuse++;
-
- return;
-}
-
-/*
* Release a jumbo buffer.
*/
-static void sk_jfree(buf, size)
+static void sk_jfree(buf, args)
caddr_t buf;
- u_int size;
+ void *args;
{
struct sk_if_softc *sc_if;
u_int64_t **aptr;
@@ -865,31 +820,19 @@ static void sk_jfree(buf, size)
if (sc_if == NULL)
panic("sk_jfree: can't find softc pointer!");
- if (size != SK_MCLBYTES)
- panic("sk_jfree: freeing buffer of wrong size!");
-
/* calculate the slot this buffer belongs to */
-
i = ((vm_offset_t)aptr
- (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
if ((i < 0) || (i >= SK_JSLOTS))
panic("sk_jfree: asked to free buffer that we don't manage!");
- else if (sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0)
- panic("sk_jfree: buffer already free!");
- else {
- sc_if->sk_cdata.sk_jslots[i].sk_inuse--;
- if(sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0) {
- entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
- if (entry == NULL)
- panic("sk_jfree: buffer not in use!");
- entry->slot = i;
- SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead,
- jpool_entries);
- SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
- entry, jpool_entries);
- }
- }
+
+ entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
+ if (entry == NULL)
+ panic("sk_jfree: buffer not in use!");
+ entry->slot = i;
+ SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
+ SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
return;
}
diff --git a/sys/pci/if_skreg.h b/sys/pci/if_skreg.h
index af17c1c..24cce27 100644
--- a/sys/pci/if_skreg.h
+++ b/sys/pci/if_skreg.h
@@ -1124,7 +1124,6 @@ struct sk_tx_desc {
struct sk_jslot {
caddr_t sk_buf;
- int sk_inuse;
};
struct sk_jpool_entry {
diff --git a/sys/pci/if_ti.c b/sys/pci/if_ti.c
index 7e1b6f7..56847f4 100644
--- a/sys/pci/if_ti.c
+++ b/sys/pci/if_ti.c
@@ -190,8 +190,7 @@ static void ti_cmd_ext __P((struct ti_softc *, struct ti_cmd_desc *,
static void ti_handle_events __P((struct ti_softc *));
static int ti_alloc_jumbo_mem __P((struct ti_softc *));
static void *ti_jalloc __P((struct ti_softc *));
-static void ti_jfree __P((caddr_t, u_int));
-static void ti_jref __P((caddr_t, u_int));
+static void ti_jfree __P((caddr_t, void *));
static int ti_newbuf_std __P((struct ti_softc *, int, struct mbuf *));
static int ti_newbuf_mini __P((struct ti_softc *, int, struct mbuf *));
static int ti_newbuf_jumbo __P((struct ti_softc *, int, struct mbuf *));
@@ -629,7 +628,6 @@ static int ti_alloc_jumbo_mem(sc)
aptr[0] = (u_int64_t *)sc;
ptr += sizeof(u_int64_t);
sc->ti_cdata.ti_jslots[i].ti_buf = ptr;
- sc->ti_cdata.ti_jslots[i].ti_inuse = 0;
ptr += (TI_JLEN - sizeof(u_int64_t));
entry = malloc(sizeof(struct ti_jpool_entry),
M_DEVBUF, M_NOWAIT);
@@ -665,55 +663,15 @@ static void *ti_jalloc(sc)
SLIST_REMOVE_HEAD(&sc->ti_jfree_listhead, jpool_entries);
SLIST_INSERT_HEAD(&sc->ti_jinuse_listhead, entry, jpool_entries);
- sc->ti_cdata.ti_jslots[entry->slot].ti_inuse = 1;
return(sc->ti_cdata.ti_jslots[entry->slot].ti_buf);
}
/*
- * Adjust usage count on a jumbo buffer. In general this doesn't
- * get used much because our jumbo buffers don't get passed around
- * too much, but it's implemented for correctness.
- */
-static void ti_jref(buf, size)
- caddr_t buf;
- u_int size;
-{
- struct ti_softc *sc;
- u_int64_t **aptr;
- register int i;
-
- /* Extract the softc struct pointer. */
- aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
- sc = (struct ti_softc *)(aptr[0]);
-
- if (sc == NULL)
- panic("ti_jref: can't find softc pointer!");
-
- if (size != TI_JUMBO_FRAMELEN)
- panic("ti_jref: adjusting refcount of buf of wrong size!");
-
- /* calculate the slot this buffer belongs to */
-
- i = ((vm_offset_t)aptr
- - (vm_offset_t)sc->ti_cdata.ti_jumbo_buf) / TI_JLEN;
-
- if ((i < 0) || (i >= TI_JSLOTS))
- panic("ti_jref: asked to reference buffer "
- "that we don't manage!");
- else if (sc->ti_cdata.ti_jslots[i].ti_inuse == 0)
- panic("ti_jref: buffer already free!");
- else
- sc->ti_cdata.ti_jslots[i].ti_inuse++;
-
- return;
-}
-
-/*
* Release a jumbo buffer.
*/
-static void ti_jfree(buf, size)
+static void ti_jfree(buf, args)
caddr_t buf;
- u_int size;
+ void *args;
{
struct ti_softc *sc;
u_int64_t **aptr;
@@ -727,31 +685,19 @@ static void ti_jfree(buf, size)
if (sc == NULL)
panic("ti_jfree: can't find softc pointer!");
- if (size != TI_JUMBO_FRAMELEN)
- panic("ti_jfree: freeing buffer of wrong size!");
-
/* calculate the slot this buffer belongs to */
-
i = ((vm_offset_t)aptr
- (vm_offset_t)sc->ti_cdata.ti_jumbo_buf) / TI_JLEN;
if ((i < 0) || (i >= TI_JSLOTS))
panic("ti_jfree: asked to free buffer that we don't manage!");
- else if (sc->ti_cdata.ti_jslots[i].ti_inuse == 0)
- panic("ti_jfree: buffer already free!");
- else {
- sc->ti_cdata.ti_jslots[i].ti_inuse--;
- if(sc->ti_cdata.ti_jslots[i].ti_inuse == 0) {
- entry = SLIST_FIRST(&sc->ti_jinuse_listhead);
- if (entry == NULL)
- panic("ti_jfree: buffer not in use!");
- entry->slot = i;
- SLIST_REMOVE_HEAD(&sc->ti_jinuse_listhead,
- jpool_entries);
- SLIST_INSERT_HEAD(&sc->ti_jfree_listhead,
- entry, jpool_entries);
- }
- }
+
+ entry = SLIST_FIRST(&sc->ti_jinuse_listhead);
+ if (entry == NULL)
+ panic("ti_jfree: buffer not in use!");
+ entry->slot = i;
+ SLIST_REMOVE_HEAD(&sc->ti_jinuse_listhead, jpool_entries);
+ SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries);
return;
}
@@ -877,12 +823,9 @@ static int ti_newbuf_jumbo(sc, i, m)
}
/* Attach the buffer to the mbuf. */
- m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
- m_new->m_flags |= M_EXT;
- m_new->m_len = m_new->m_pkthdr.len =
- m_new->m_ext.ext_size = TI_JUMBO_FRAMELEN;
- m_new->m_ext.ext_free = ti_jfree;
- m_new->m_ext.ext_ref = ti_jref;
+ m_new->m_data = (void *) buf;
+ m_new->m_len = m_new->m_pkthdr.len = TI_JUMBO_FRAMELEN;
+ MEXTADD(m_new, buf, TI_JUMBO_FRAMELEN, ti_jfree, NULL);
} else {
m_new = m;
m_new->m_data = m_new->m_ext.ext_buf;
diff --git a/sys/pci/if_tireg.h b/sys/pci/if_tireg.h
index 4712272..6346c67 100644
--- a/sys/pci/if_tireg.h
+++ b/sys/pci/if_tireg.h
@@ -1046,7 +1046,6 @@ struct ti_event_desc {
struct ti_jslot {
caddr_t ti_buf;
- int ti_inuse;
};
/*
diff --git a/sys/pci/if_wb.c b/sys/pci/if_wb.c
index 397ca20..b33c542 100644
--- a/sys/pci/if_wb.c
+++ b/sys/pci/if_wb.c
@@ -147,7 +147,7 @@ static int wb_probe __P((device_t));
static int wb_attach __P((device_t));
static int wb_detach __P((device_t));
-static void wb_bfree __P((caddr_t, u_int));
+static void wb_bfree __P((caddr_t, void *args));
static int wb_newbuf __P((struct wb_softc *,
struct wb_chain_onefrag *,
struct mbuf *));
@@ -1078,9 +1078,9 @@ static int wb_list_rx_init(sc)
return(0);
}
-static void wb_bfree(buf, size)
+static void wb_bfree(buf, args)
caddr_t buf;
- u_int size;
+ void *args;
{
return;
}
@@ -1102,13 +1102,9 @@ static int wb_newbuf(sc, c, m)
"list -- packet dropped!\n", sc->wb_unit);
return(ENOBUFS);
}
-
- m_new->m_data = m_new->m_ext.ext_buf = c->wb_buf;
- m_new->m_flags |= M_EXT;
- m_new->m_ext.ext_size = m_new->m_pkthdr.len =
- m_new->m_len = WB_BUFBYTES;
- m_new->m_ext.ext_free = wb_bfree;
- m_new->m_ext.ext_ref = wb_bfree;
+ m_new->m_data = c->wb_buf;
+ m_new->m_pkthdr.len = m_new->m_len = WB_BUFBYTES;
+ MEXTADD(m_new, c->wb_buf, WB_BUFBYTES, wb_bfree, NULL);
} else {
m_new = m;
m_new->m_len = m_new->m_pkthdr.len = WB_BUFBYTES;
OpenPOWER on IntegriCloud