summaryrefslogtreecommitdiffstats
path: root/sys/dev/netmap/netmap_kern.h
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/netmap/netmap_kern.h')
-rw-r--r--sys/dev/netmap/netmap_kern.h46
1 files changed, 34 insertions, 12 deletions
diff --git a/sys/dev/netmap/netmap_kern.h b/sys/dev/netmap/netmap_kern.h
index f271b5e..a1ac925 100644
--- a/sys/dev/netmap/netmap_kern.h
+++ b/sys/dev/netmap/netmap_kern.h
@@ -34,6 +34,8 @@
#ifndef _NET_NETMAP_KERN_H_
#define _NET_NETMAP_KERN_H_
+#define NETMAP_MEM2 // use the new memory allocator
+
#if defined(__FreeBSD__)
#define NM_LOCK_T struct mtx
#define NM_SELINFO_T struct selinfo
@@ -104,20 +106,25 @@ struct netmap_adapter {
int refcount; /* number of user-space descriptors using this
interface, which is equal to the number of
struct netmap_if objs in the mapped region. */
+ /*
+ * The selwakeup in the interrupt thread can use per-ring
+ * and/or global wait queues. We track how many clients
+ * of each type we have so we can optimize the drivers,
+ * and especially avoid huge contention on the locks.
+ */
+ int na_single; /* threads attached to a single hw queue */
+ int na_multi; /* threads attached to multiple hw queues */
int separate_locks; /* set if the interface suports different
locks for rx, tx and core. */
- u_int num_rx_queues; /* number of tx/rx queue pairs: this is
- a duplicate field needed to simplify the
- signature of ``netmap_detach``. */
- u_int num_tx_queues; // if nonzero, overrides num_queues XXX
+ u_int num_rx_rings; /* number of tx/rx ring pairs */
+ u_int num_tx_rings; // if nonzero, overrides num_rx_rings
u_int num_tx_desc; /* number of descriptor in each queue */
u_int num_rx_desc;
- u_int buff_size; // XXX deprecate, use NETMAP_BUF_SIZE
+ //u_int buff_size; // XXX deprecate, use NETMAP_BUF_SIZE
- //u_int flags; // XXX unused
/* tx_rings and rx_rings are private but allocated
* as a contiguous chunk of memory. Each array has
* N+1 entries, for the adapter queues and for the host queue.
@@ -129,9 +136,7 @@ struct netmap_adapter {
/* copy of if_qflush and if_transmit pointers, to intercept
* packets from the network stack when netmap is active.
- * XXX probably if_qflush is not necessary.
*/
- //void (*if_qflush)(struct ifnet *); // XXX unused
int (*if_transmit)(struct ifnet *, struct mbuf *);
/* references to the ifnet and device routines, used by
@@ -294,6 +299,21 @@ netmap_idx_k2n(struct netmap_kring *kr, int idx)
}
+#ifdef NETMAP_MEM2
+/* Entries of the look-up table. */
+struct lut_entry {
+ void *vaddr; /* virtual address. */
+ vm_paddr_t paddr; /* phisical address. */
+};
+
+struct netmap_obj_pool;
+extern struct lut_entry *netmap_buffer_lut;
+#define NMB_VA(i) (netmap_buffer_lut[i].vaddr)
+#define NMB_PA(i) (netmap_buffer_lut[i].paddr)
+#else /* NETMAP_MEM1 */
+#define NMB_VA(i) (netmap_buffer_base + (i * NETMAP_BUF_SIZE) )
+#endif /* NETMAP_MEM2 */
+
/*
* NMB return the virtual address of a buffer (buffer 0 on bad index)
* PNMB also fills the physical address
@@ -302,17 +322,19 @@ static inline void *
NMB(struct netmap_slot *slot)
{
uint32_t i = slot->buf_idx;
- return (i >= netmap_total_buffers) ? netmap_buffer_base :
- netmap_buffer_base + (i * NETMAP_BUF_SIZE);
+ return (i >= netmap_total_buffers) ? NMB_VA(0) : NMB_VA(i);
}
static inline void *
PNMB(struct netmap_slot *slot, uint64_t *pp)
{
uint32_t i = slot->buf_idx;
- void *ret = (i >= netmap_total_buffers) ? netmap_buffer_base :
- netmap_buffer_base + (i * NETMAP_BUF_SIZE);
+ void *ret = (i >= netmap_total_buffers) ? NMB_VA(0) : NMB_VA(i);
+#ifdef NETMAP_MEM2
+ *pp = (i >= netmap_total_buffers) ? NMB_PA(0) : NMB_PA(i);
+#else
*pp = vtophys(ret);
+#endif
return ret;
}
OpenPOWER on IntegriCloud