summaryrefslogtreecommitdiffstats
path: root/sys/dev/netmap/netmap_mem2.c
diff options
context:
space:
mode:
authorluigi <luigi@FreeBSD.org>2014-02-18 05:01:04 +0000
committerluigi <luigi@FreeBSD.org>2014-02-18 05:01:04 +0000
commit5bacc3bb87b954978543b0d82a4d5705e33f5c06 (patch)
treea79f129924ca9cf087c1e108d2d184a16ac1e42b /sys/dev/netmap/netmap_mem2.c
parentdd5bb071cd203986ef23e5ceecdcef3cea848542 (diff)
downloadFreeBSD-src-5bacc3bb87b954978543b0d82a4d5705e33f5c06.zip
FreeBSD-src-5bacc3bb87b954978543b0d82a4d5705e33f5c06.tar.gz
MFH: sync the netmap code with the one in HEAD
(enhanced VALE switch, netmap pipes, emulated netmap mode). See details in the log for svn 261909.
Diffstat (limited to 'sys/dev/netmap/netmap_mem2.c')
-rw-r--r--sys/dev/netmap/netmap_mem2.c1261
1 files changed, 832 insertions, 429 deletions
diff --git a/sys/dev/netmap/netmap_mem2.c b/sys/dev/netmap/netmap_mem2.c
index dcf4b06..5491845 100644
--- a/sys/dev/netmap/netmap_mem2.c
+++ b/sys/dev/netmap/netmap_mem2.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2013 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved.
+ * Copyright (C) 2012-2014 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -8,7 +8,7 @@
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
+ * documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
@@ -23,108 +23,49 @@
* SUCH DAMAGE.
*/
-/*
- * $FreeBSD$
- *
- * (New) memory allocator for netmap
- */
-
-/*
- * This allocator creates three memory pools:
- * nm_if_pool for the struct netmap_if
- * nm_ring_pool for the struct netmap_ring
- * nm_buf_pool for the packet buffers.
- *
- * that contain netmap objects. Each pool is made of a number of clusters,
- * multiple of a page size, each containing an integer number of objects.
- * The clusters are contiguous in user space but not in the kernel.
- * Only nm_buf_pool needs to be dma-able,
- * but for convenience use the same type of allocator for all.
- *
- * Once mapped, the three pools are exported to userspace
- * as a contiguous block, starting from nm_if_pool. Each
- * cluster (and pool) is an integral number of pages.
- * [ . . . ][ . . . . . .][ . . . . . . . . . .]
- * nm_if nm_ring nm_buf
- *
- * The userspace areas contain offsets of the objects in userspace.
- * When (at init time) we write these offsets, we find out the index
- * of the object, and from there locate the offset from the beginning
- * of the region.
- *
- * The invididual allocators manage a pool of memory for objects of
- * the same size.
- * The pool is split into smaller clusters, whose size is a
- * multiple of the page size. The cluster size is chosen
- * to minimize the waste for a given max cluster size
- * (we do it by brute force, as we have relatively few objects
- * per cluster).
- *
- * Objects are aligned to the cache line (64 bytes) rounding up object
- * sizes when needed. A bitmap contains the state of each object.
- * Allocation scans the bitmap; this is done only on attach, so we are not
- * too worried about performance
- *
- * For each allocator we can define (thorugh sysctl) the size and
- * number of each object. Memory is allocated at the first use of a
- * netmap file descriptor, and can be freed when all such descriptors
- * have been released (including unmapping the memory).
- * If memory is scarce, the system tries to get as much as possible
- * and the sysctl values reflect the actual allocation.
- * Together with desired values, the sysctl export also absolute
- * min and maximum values that cannot be overridden.
- *
- * struct netmap_if:
- * variable size, max 16 bytes per ring pair plus some fixed amount.
- * 1024 bytes should be large enough in practice.
- *
- * In the worst case we have one netmap_if per ring in the system.
- *
- * struct netmap_ring
- * variable size, 8 byte per slot plus some fixed amount.
- * Rings can be large (e.g. 4k slots, or >32Kbytes).
- * We default to 36 KB (9 pages), and a few hundred rings.
- *
- * struct netmap_buffer
- * The more the better, both because fast interfaces tend to have
- * many slots, and because we may want to use buffers to store
- * packets in userspace avoiding copies.
- * Must contain a full frame (eg 1518, or more for vlans, jumbo
- * frames etc.) plus be nicely aligned, plus some NICs restrict
- * the size to multiple of 1K or so. Default to 2K
- */
+#ifdef linux
+#include "bsd_glue.h"
+#endif /* linux */
-#define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */
+#ifdef __APPLE__
+#include "osx_glue.h"
+#endif /* __APPLE__ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h> /* prerequisite */
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <vm/vm.h> /* vtophys */
+#include <vm/pmap.h> /* vtophys */
+#include <sys/socket.h> /* sockaddrs */
+#include <sys/selinfo.h>
+#include <sys/sysctl.h>
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/vnet.h>
+#include <machine/bus.h> /* bus_dmamap_* */
+
+#endif /* __FreeBSD__ */
+
+#include <net/netmap.h>
+#include <dev/netmap/netmap_kern.h>
+#include "netmap_mem2.h"
#ifdef linux
-// XXX a mtx would suffice here 20130415 lr
-// #define NMA_LOCK_T safe_spinlock_t
-#define NMA_LOCK_T struct semaphore
-#define NMA_LOCK_INIT() sema_init(&nm_mem.nm_mtx, 1)
-#define NMA_LOCK_DESTROY()
-#define NMA_LOCK() down(&nm_mem.nm_mtx)
-#define NMA_UNLOCK() up(&nm_mem.nm_mtx)
+#define NMA_LOCK_INIT(n) sema_init(&(n)->nm_mtx, 1)
+#define NMA_LOCK_DESTROY(n)
+#define NMA_LOCK(n) down(&(n)->nm_mtx)
+#define NMA_UNLOCK(n) up(&(n)->nm_mtx)
#else /* !linux */
-#define NMA_LOCK_T struct mtx
-#define NMA_LOCK_INIT() mtx_init(&nm_mem.nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF)
-#define NMA_LOCK_DESTROY() mtx_destroy(&nm_mem.nm_mtx)
-#define NMA_LOCK() mtx_lock(&nm_mem.nm_mtx)
-#define NMA_UNLOCK() mtx_unlock(&nm_mem.nm_mtx)
+#define NMA_LOCK_INIT(n) mtx_init(&(n)->nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF)
+#define NMA_LOCK_DESTROY(n) mtx_destroy(&(n)->nm_mtx)
+#define NMA_LOCK(n) mtx_lock(&(n)->nm_mtx)
+#define NMA_UNLOCK(n) mtx_unlock(&(n)->nm_mtx)
#endif /* linux */
-enum {
- NETMAP_IF_POOL = 0,
- NETMAP_RING_POOL,
- NETMAP_BUF_POOL,
- NETMAP_POOLS_NR
-};
-
-
-struct netmap_obj_params {
- u_int size;
- u_int num;
-};
-
struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = {
[NETMAP_IF_POOL] = {
@@ -141,48 +82,31 @@ struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = {
},
};
-
-struct netmap_obj_pool {
- char name[16]; /* name of the allocator */
- u_int objtotal; /* actual total number of objects. */
- u_int objfree; /* number of free objects. */
- u_int clustentries; /* actual objects per cluster */
-
- /* limits */
- u_int objminsize; /* minimum object size */
- u_int objmaxsize; /* maximum object size */
- u_int nummin; /* minimum number of objects */
- u_int nummax; /* maximum number of objects */
-
- /* the total memory space is _numclusters*_clustsize */
- u_int _numclusters; /* how many clusters */
- u_int _clustsize; /* cluster size */
- u_int _objsize; /* actual object size */
-
- u_int _memtotal; /* _numclusters*_clustsize */
- struct lut_entry *lut; /* virt,phys addresses, objtotal entries */
- uint32_t *bitmap; /* one bit per buffer, 1 means free */
- uint32_t bitmap_slots; /* number of uint32 entries in bitmap */
+struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = {
+ [NETMAP_IF_POOL] = {
+ .size = 1024,
+ .num = 1,
+ },
+ [NETMAP_RING_POOL] = {
+ .size = 5*PAGE_SIZE,
+ .num = 4,
+ },
+ [NETMAP_BUF_POOL] = {
+ .size = 2048,
+ .num = 4098,
+ },
};
-struct netmap_mem_d {
- NMA_LOCK_T nm_mtx; /* protect the allocator */
- u_int nm_totalsize; /* shorthand */
-
- int finalized; /* !=0 iff preallocation done */
- int lasterr; /* last error for curr config */
- int refcount; /* existing priv structures */
- /* the three allocators */
- struct netmap_obj_pool pools[NETMAP_POOLS_NR];
-};
-
/*
* nm_mem is the memory allocator used for all physical interfaces
* running in netmap mode.
* Virtual (VALE) ports will have each its own allocator.
*/
-static struct netmap_mem_d nm_mem = { /* Our memory allocator. */
+static int netmap_mem_global_config(struct netmap_mem_d *nmd);
+static int netmap_mem_global_finalize(struct netmap_mem_d *nmd);
+static void netmap_mem_global_deref(struct netmap_mem_d *nmd);
+struct netmap_mem_d nm_mem = { /* Our memory allocator. */
.pools = {
[NETMAP_IF_POOL] = {
.name = "netmap_if",
@@ -206,62 +130,193 @@ static struct netmap_mem_d nm_mem = { /* Our memory allocator. */
.nummax = 1000000, /* one million! */
},
},
+ .config = netmap_mem_global_config,
+ .finalize = netmap_mem_global_finalize,
+ .deref = netmap_mem_global_deref,
+
+ .nm_id = 1,
+
+ .prev = &nm_mem,
+ .next = &nm_mem,
};
+
+struct netmap_mem_d *netmap_last_mem_d = &nm_mem;
+
// XXX logically belongs to nm_mem
struct lut_entry *netmap_buffer_lut; /* exported */
+/* blueprint for the private memory allocators */
+static int netmap_mem_private_config(struct netmap_mem_d *nmd);
+static int netmap_mem_private_finalize(struct netmap_mem_d *nmd);
+static void netmap_mem_private_deref(struct netmap_mem_d *nmd);
+const struct netmap_mem_d nm_blueprint = {
+ .pools = {
+ [NETMAP_IF_POOL] = {
+ .name = "%s_if",
+ .objminsize = sizeof(struct netmap_if),
+ .objmaxsize = 4096,
+ .nummin = 1,
+ .nummax = 100,
+ },
+ [NETMAP_RING_POOL] = {
+ .name = "%s_ring",
+ .objminsize = sizeof(struct netmap_ring),
+ .objmaxsize = 32*PAGE_SIZE,
+ .nummin = 2,
+ .nummax = 1024,
+ },
+ [NETMAP_BUF_POOL] = {
+ .name = "%s_buf",
+ .objminsize = 64,
+ .objmaxsize = 65536,
+ .nummin = 4,
+ .nummax = 1000000, /* one million! */
+ },
+ },
+ .config = netmap_mem_private_config,
+ .finalize = netmap_mem_private_finalize,
+ .deref = netmap_mem_private_deref,
+
+ .flags = NETMAP_MEM_PRIVATE,
+};
+
/* memory allocator related sysctls */
#define STRINGIFY(x) #x
+
#define DECLARE_SYSCTLS(id, name) \
SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
- SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
- CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
- SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
- CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
- SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
- CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s")
-
+ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
+ CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
+ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
+ CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
+ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
+ CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \
+ SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \
+ CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \
+ "Default size of private netmap " STRINGIFY(name) "s"); \
+ SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \
+ CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \
+ "Default number of private netmap " STRINGIFY(name) "s")
+
+SYSCTL_DECL(_dev_netmap);
DECLARE_SYSCTLS(NETMAP_IF_POOL, if);
DECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
+static int
+nm_mem_assign_id(struct netmap_mem_d *nmd)
+{
+ nm_memid_t id;
+ struct netmap_mem_d *scan = netmap_last_mem_d;
+ int error = ENOMEM;
+
+ NMA_LOCK(&nm_mem);
+
+ do {
+ /* we rely on unsigned wrap around */
+ id = scan->nm_id + 1;
+ if (id == 0) /* reserve 0 as error value */
+ id = 1;
+ scan = scan->next;
+ if (id != scan->nm_id) {
+ nmd->nm_id = id;
+ nmd->prev = scan->prev;
+ nmd->next = scan;
+ scan->prev->next = nmd;
+ scan->prev = nmd;
+ netmap_last_mem_d = nmd;
+ error = 0;
+ break;
+ }
+ } while (scan != netmap_last_mem_d);
+
+ NMA_UNLOCK(&nm_mem);
+ return error;
+}
+
+static void
+nm_mem_release_id(struct netmap_mem_d *nmd)
+{
+ NMA_LOCK(&nm_mem);
+
+ nmd->prev->next = nmd->next;
+ nmd->next->prev = nmd->prev;
+
+ if (netmap_last_mem_d == nmd)
+ netmap_last_mem_d = nmd->prev;
+
+ nmd->prev = nmd->next = NULL;
+
+ NMA_UNLOCK(&nm_mem);
+}
+
+
/*
- * Convert a userspace offset to a physical address.
- * XXX only called in the FreeBSD's netmap_mmap()
- * because in linux we map everything at once.
- *
* First, find the allocator that contains the requested offset,
* then locate the cluster through a lookup table.
*/
-static inline vm_paddr_t
-netmap_ofstophys(vm_offset_t offset)
+vm_paddr_t
+netmap_mem_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
{
int i;
- vm_offset_t o = offset;
- struct netmap_obj_pool *p = nm_mem.pools;
+ vm_ooffset_t o = offset;
+ vm_paddr_t pa;
+ struct netmap_obj_pool *p;
- for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i]._memtotal, i++) {
- if (offset >= p[i]._memtotal)
+ NMA_LOCK(nmd);
+ p = nmd->pools;
+
+ for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) {
+ if (offset >= p[i].memtotal)
continue;
// now lookup the cluster's address
- return p[i].lut[offset / p[i]._objsize].paddr +
+ pa = p[i].lut[offset / p[i]._objsize].paddr +
offset % p[i]._objsize;
+ NMA_UNLOCK(nmd);
+ return pa;
}
/* this is only in case of errors */
D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
- p[NETMAP_IF_POOL]._memtotal,
- p[NETMAP_IF_POOL]._memtotal
- + p[NETMAP_RING_POOL]._memtotal,
- p[NETMAP_IF_POOL]._memtotal
- + p[NETMAP_RING_POOL]._memtotal
- + p[NETMAP_BUF_POOL]._memtotal);
+ p[NETMAP_IF_POOL].memtotal,
+ p[NETMAP_IF_POOL].memtotal
+ + p[NETMAP_RING_POOL].memtotal,
+ p[NETMAP_IF_POOL].memtotal
+ + p[NETMAP_RING_POOL].memtotal
+ + p[NETMAP_BUF_POOL].memtotal);
+ NMA_UNLOCK(nmd);
return 0; // XXX bad address
}
+int
+netmap_mem_get_info(struct netmap_mem_d* nmd, u_int* size, u_int *memflags,
+ nm_memid_t *id)
+{
+ int error = 0;
+ NMA_LOCK(nmd);
+ error = nmd->config(nmd);
+ if (error)
+ goto out;
+ if (nmd->flags & NETMAP_MEM_FINALIZED) {
+ *size = nmd->nm_totalsize;
+ } else {
+ int i;
+ *size = 0;
+ for (i = 0; i < NETMAP_POOLS_NR; i++) {
+ struct netmap_obj_pool *p = nmd->pools + i;
+ *size += (p->_numclusters * p->_clustsize);
+ }
+ }
+ *memflags = nmd->flags;
+ *id = nmd->nm_id;
+out:
+ NMA_UNLOCK(nmd);
+ return error;
+}
+
/*
* we store objects by kernel address, need to find the offset
* within the pool to export the value to userspace.
@@ -271,7 +326,7 @@ netmap_ofstophys(vm_offset_t offset)
static ssize_t
netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
{
- int i, k = p->clustentries, n = p->objtotal;
+ int i, k = p->_clustentries, n = p->objtotal;
ssize_t ofs = 0;
for (i = 0; i < n; i += k, ofs += p->_clustsize) {
@@ -292,25 +347,35 @@ netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
}
/* Helper functions which convert virtual addresses to offsets */
-#define netmap_if_offset(v) \
- netmap_obj_offset(&nm_mem.pools[NETMAP_IF_POOL], (v))
+#define netmap_if_offset(n, v) \
+ netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
-#define netmap_ring_offset(v) \
- (nm_mem.pools[NETMAP_IF_POOL]._memtotal + \
- netmap_obj_offset(&nm_mem.pools[NETMAP_RING_POOL], (v)))
+#define netmap_ring_offset(n, v) \
+ ((n)->pools[NETMAP_IF_POOL].memtotal + \
+ netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
-#define netmap_buf_offset(v) \
- (nm_mem.pools[NETMAP_IF_POOL]._memtotal + \
- nm_mem.pools[NETMAP_RING_POOL]._memtotal + \
- netmap_obj_offset(&nm_mem.pools[NETMAP_BUF_POOL], (v)))
+#define netmap_buf_offset(n, v) \
+ ((n)->pools[NETMAP_IF_POOL].memtotal + \
+ (n)->pools[NETMAP_RING_POOL].memtotal + \
+ netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)))
+ssize_t
+netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *addr)
+{
+ ssize_t v;
+ NMA_LOCK(nmd);
+ v = netmap_if_offset(nmd, addr);
+ NMA_UNLOCK(nmd);
+ return v;
+}
+
/*
* report the index, and use start position as a hint,
* otherwise buffer allocation becomes terribly expensive.
*/
static void *
-netmap_obj_malloc(struct netmap_obj_pool *p, int len, uint32_t *start, uint32_t *index)
+netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index)
{
uint32_t i = 0; /* index in the bitmap */
uint32_t mask, j; /* slot counter */
@@ -323,7 +388,7 @@ netmap_obj_malloc(struct netmap_obj_pool *p, int len, uint32_t *start, uint32_t
}
if (p->objfree == 0) {
- D("%s allocator: run out of memory", p->name);
+ D("no more %s objects", p->name);
return NULL;
}
if (start)
@@ -356,28 +421,41 @@ netmap_obj_malloc(struct netmap_obj_pool *p, int len, uint32_t *start, uint32_t
/*
- * free by index, not by address. This is slow, but is only used
- * for a small number of objects (rings, nifp)
+ * free by index, not by address.
+ * XXX should we also cleanup the content ?
*/
-static void
+static int
netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
{
+ uint32_t *ptr, mask;
+
if (j >= p->objtotal) {
D("invalid index %u, max %u", j, p->objtotal);
- return;
+ return 1;
+ }
+ ptr = &p->bitmap[j / 32];
+ mask = (1 << (j % 32));
+ if (*ptr & mask) {
+ D("ouch, double free on buffer %d", j);
+ return 1;
+ } else {
+ *ptr |= mask;
+ p->objfree++;
+ return 0;
}
- p->bitmap[j / 32] |= (1 << (j % 32));
- p->objfree++;
- return;
}
+/*
+ * free by address. This is slow but is only used for a few
+ * objects (rings, nifp)
+ */
static void
netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
{
- int i, j, n = p->_memtotal / p->_clustsize;
+ u_int i, j, n = p->numclusters;
- for (i = 0, j = 0; i < n; i++, j += p->clustentries) {
- void *base = p->lut[i * p->clustentries].vaddr;
+ for (i = 0, j = 0; i < n; i++, j += p->_clustentries) {
+ void *base = p->lut[i * p->_clustentries].vaddr;
ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
/* Given address, is out of the scope of the current cluster.*/
@@ -385,7 +463,7 @@ netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
continue;
j = j + relofs / p->_objsize;
- KASSERT(j != 0, ("Cannot free object 0"));
+ /* KASSERT(j != 0, ("Cannot free object 0")); */
netmap_obj_free(p, j);
return;
}
@@ -393,43 +471,91 @@ netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
vaddr, p->name);
}
-#define netmap_if_malloc(len) netmap_obj_malloc(&nm_mem.pools[NETMAP_IF_POOL], len, NULL, NULL)
-#define netmap_if_free(v) netmap_obj_free_va(&nm_mem.pools[NETMAP_IF_POOL], (v))
-#define netmap_ring_malloc(len) netmap_obj_malloc(&nm_mem.pools[NETMAP_RING_POOL], len, NULL, NULL)
-#define netmap_ring_free(v) netmap_obj_free_va(&nm_mem.pools[NETMAP_RING_POOL], (v))
-#define netmap_buf_malloc(_pos, _index) \
- netmap_obj_malloc(&nm_mem.pools[NETMAP_BUF_POOL], NETMAP_BUF_SIZE, _pos, _index)
+#define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
+#define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
+#define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
+#define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
+#define netmap_buf_malloc(n, _pos, _index) \
+ netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], NETMAP_BDG_BUF_SIZE(n), _pos, _index)
+#if 0 // XXX unused
/* Return the index associated to the given packet buffer */
-#define netmap_buf_index(v) \
- (netmap_obj_offset(&nm_mem.pools[NETMAP_BUF_POOL], (v)) / nm_mem.pools[NETMAP_BUF_POOL]._objsize)
+#define netmap_buf_index(n, v) \
+ (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
+#endif
+
+/*
+ * allocate extra buffers in a linked list.
+ * returns the actual number.
+ */
+uint32_t
+netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n)
+{
+ struct netmap_mem_d *nmd = na->nm_mem;
+ uint32_t i, pos = 0; /* opaque, scan position in the bitmap */
+
+ NMA_LOCK(nmd);
+
+ *head = 0; /* default, 'null' index ie empty list */
+ for (i = 0 ; i < n; i++) {
+ uint32_t cur = *head; /* save current head */
+ uint32_t *p = netmap_buf_malloc(nmd, &pos, head);
+ if (p == NULL) {
+ D("no more buffers after %d of %d", i, n);
+ *head = cur; /* restore */
+ break;
+ }
+ RD(5, "allocate buffer %d -> %d", *head, cur);
+ *p = cur; /* link to previous head */
+ }
+
+ NMA_UNLOCK(nmd);
+
+ return i;
+}
+
+static void
+netmap_extra_free(struct netmap_adapter *na, uint32_t head)
+{
+ struct lut_entry *lut = na->na_lut;
+ struct netmap_mem_d *nmd = na->nm_mem;
+ struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
+ uint32_t i, cur, *buf;
+
+ D("freeing the extra list");
+ for (i = 0; head >=2 && head < p->objtotal; i++) {
+ cur = head;
+ buf = lut[head].vaddr;
+ head = *buf;
+ *buf = 0;
+ if (netmap_obj_free(p, cur))
+ break;
+ }
+ if (head != 0)
+ D("breaking with head %d", head);
+ D("freed %d buffers", i);
+}
/* Return nonzero on error */
static int
-netmap_new_bufs(struct netmap_if *nifp,
- struct netmap_slot *slot, u_int n)
+netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
{
- struct netmap_obj_pool *p = &nm_mem.pools[NETMAP_BUF_POOL];
- int i = 0; /* slot counter */
+ struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
+ u_int i = 0; /* slot counter */
uint32_t pos = 0; /* slot in p->bitmap */
uint32_t index = 0; /* buffer index */
- (void)nifp; /* UNUSED */
for (i = 0; i < n; i++) {
- void *vaddr = netmap_buf_malloc(&pos, &index);
+ void *vaddr = netmap_buf_malloc(nmd, &pos, &index);
if (vaddr == NULL) {
- D("unable to locate empty packet buffer");
+ D("no more buffers after %d of %d", i, n);
goto cleanup;
}
slot[i].buf_idx = index;
slot[i].len = p->_objsize;
- /* XXX setting flags=NS_BUF_CHANGED forces a pointer reload
- * in the NIC ring. This is a hack that hides missing
- * initializations in the drivers, and should go away.
- */
- // slot[i].flags = NS_BUF_CHANGED;
+ slot[i].flags = 0;
}
ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos);
@@ -444,11 +570,24 @@ cleanup:
return (ENOMEM);
}
+static void
+netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index)
+{
+ struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
+ u_int i;
+
+ for (i = 0; i < n; i++) {
+ slot[i].buf_idx = index;
+ slot[i].len = p->_objsize;
+ slot[i].flags = 0;
+ }
+}
+
static void
-netmap_free_buf(struct netmap_if *nifp, uint32_t i)
+netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i)
{
- struct netmap_obj_pool *p = &nm_mem.pools[NETMAP_BUF_POOL];
+ struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
if (i < 2 || i >= p->objtotal) {
D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
@@ -457,19 +596,34 @@ netmap_free_buf(struct netmap_if *nifp, uint32_t i)
netmap_obj_free(p, i);
}
+
+static void
+netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
+{
+ u_int i;
+
+ for (i = 0; i < n; i++) {
+ if (slot[i].buf_idx > 2)
+ netmap_free_buf(nmd, slot[i].buf_idx);
+ }
+}
+
static void
netmap_reset_obj_allocator(struct netmap_obj_pool *p)
{
+
if (p == NULL)
return;
if (p->bitmap)
free(p->bitmap, M_NETMAP);
p->bitmap = NULL;
if (p->lut) {
- int i;
- for (i = 0; i < p->objtotal; i += p->clustentries) {
+ u_int i;
+ size_t sz = p->_clustsize;
+
+ for (i = 0; i < p->objtotal; i += p->_clustentries) {
if (p->lut[i].vaddr)
- contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP);
+ contigfree(p->lut[i].vaddr, sz, M_NETMAP);
}
bzero(p->lut, sizeof(struct lut_entry) * p->objtotal);
#ifdef linux
@@ -479,6 +633,10 @@ netmap_reset_obj_allocator(struct netmap_obj_pool *p)
#endif
}
p->lut = NULL;
+ p->objtotal = 0;
+ p->memtotal = 0;
+ p->numclusters = 0;
+ p->objfree = 0;
}
/*
@@ -496,8 +654,7 @@ netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
* We receive a request for objtotal objects, of size objsize each.
* Internally we may round up both numbers, as we allocate objects
* in small clusters multiple of the page size.
- * In the allocator we don't need to store the objsize,
- * but we do need to keep track of objtotal' and clustentries,
+ * We need to keep track of objtotal and clustentries,
* as they are needed when freeing memory.
*
* XXX note -- userspace needs the buffers to be contiguous,
@@ -509,16 +666,21 @@ netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
static int
netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
{
- int i, n;
+ int i;
u_int clustsize; /* the cluster size, multiple of page size */
u_int clustentries; /* how many objects per entry */
+ /* we store the current request, so we can
+ * detect configuration changes later */
+ p->r_objtotal = objtotal;
+ p->r_objsize = objsize;
+
#define MAX_CLUSTSIZE (1<<17)
-#define LINE_ROUND 64
+#define LINE_ROUND NM_CACHE_ALIGN // 64
if (objsize >= MAX_CLUSTSIZE) {
/* we could do it but there is no point */
D("unsupported allocation for %d bytes", objsize);
- goto error;
+ return EINVAL;
}
/* make sure objsize is a multiple of LINE_ROUND */
i = (objsize & (LINE_ROUND - 1));
@@ -529,12 +691,12 @@ netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int obj
if (objsize < p->objminsize || objsize > p->objmaxsize) {
D("requested objsize %d out of range [%d, %d]",
objsize, p->objminsize, p->objmaxsize);
- goto error;
+ return EINVAL;
}
if (objtotal < p->nummin || objtotal > p->nummax) {
D("requested objtotal %d out of range [%d, %d]",
objtotal, p->nummin, p->nummax);
- goto error;
+ return EINVAL;
}
/*
* Compute number of objects using a brute-force approach:
@@ -568,22 +730,15 @@ netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int obj
* The number of clusters is n = ceil(objtotal/clustentries)
* objtotal' = n * clustentries
*/
- p->clustentries = clustentries;
+ p->_clustentries = clustentries;
p->_clustsize = clustsize;
- n = (objtotal + clustentries - 1) / clustentries;
- p->_numclusters = n;
- p->objtotal = n * clustentries;
- p->objfree = p->objtotal - 2; /* obj 0 and 1 are reserved */
- p->_memtotal = p->_numclusters * p->_clustsize;
- p->_objsize = objsize;
-
- return 0;
+ p->_numclusters = (objtotal + clustentries - 1) / clustentries;
-error:
+ /* actual values (may be larger than requested) */
p->_objsize = objsize;
- p->objtotal = objtotal;
+ p->_objtotal = p->_numclusters * clustentries;
- return EINVAL;
+ return 0;
}
@@ -591,7 +746,12 @@ error:
static int
netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
{
- int i, n;
+ int i; /* must be signed */
+ size_t n;
+
+ /* optimistically assume we have enough memory */
+ p->numclusters = p->_numclusters;
+ p->objtotal = p->_objtotal;
n = sizeof(struct lut_entry) * p->objtotal;
#ifdef linux
@@ -600,7 +760,7 @@ netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
p->lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO);
#endif
if (p->lut == NULL) {
- D("Unable to create lookup table (%d bytes) for '%s'", n, p->name);
+ D("Unable to create lookup table (%d bytes) for '%s'", (int)n, p->name);
goto clean;
}
@@ -608,7 +768,7 @@ netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
n = (p->objtotal + 31) / 32;
p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO);
if (p->bitmap == NULL) {
- D("Unable to create bitmap (%d entries) for allocator '%s'", n,
+ D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n,
p->name);
goto clean;
}
@@ -617,31 +777,34 @@ netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
/*
* Allocate clusters, init pointers and bitmap
*/
- for (i = 0; i < p->objtotal;) {
- int lim = i + p->clustentries;
+
+ n = p->_clustsize;
+ for (i = 0; i < (int)p->objtotal;) {
+ int lim = i + p->_clustentries;
char *clust;
- clust = contigmalloc(p->_clustsize, M_NETMAP, M_NOWAIT | M_ZERO,
- 0, -1UL, PAGE_SIZE, 0);
+ clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO,
+ (size_t)0, -1UL, PAGE_SIZE, 0);
if (clust == NULL) {
/*
* If we get here, there is a severe memory shortage,
* so halve the allocated memory to reclaim some.
- * XXX check boundaries
*/
D("Unable to create cluster at %d for '%s' allocator",
i, p->name);
+ if (i < 2) /* nothing to halve */
+ goto out;
lim = i / 2;
for (i--; i >= lim; i--) {
p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) );
- if (i % p->clustentries == 0 && p->lut[i].vaddr)
+ if (i % p->_clustentries == 0 && p->lut[i].vaddr)
contigfree(p->lut[i].vaddr,
- p->_clustsize, M_NETMAP);
+ n, M_NETMAP);
}
+ out:
p->objtotal = i;
- p->objfree = p->objtotal - 2;
- p->_numclusters = i / p->clustentries;
- p->_memtotal = p->_numclusters * p->_clustsize;
+ /* we may have stopped in the middle of a cluster */
+ p->numclusters = (i + p->_clustentries - 1) / p->_clustentries;
break;
}
for (; i < lim; i++, clust += p->_objsize) {
@@ -650,11 +813,14 @@ netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
p->lut[i].paddr = vtophys(clust);
}
}
- p->bitmap[0] = ~3; /* objs 0 and 1 is always busy */
+ p->objfree = p->objtotal;
+ p->memtotal = p->numclusters * p->_clustsize;
+ if (p->objfree == 0)
+ goto clean;
if (netmap_verbose)
D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
- p->_numclusters, p->_clustsize >> 10,
- p->_memtotal >> 10, p->name);
+ p->numclusters, p->_clustsize >> 10,
+ p->memtotal >> 10, p->name);
return 0;
@@ -665,310 +831,547 @@ clean:
/* call with lock held */
static int
-netmap_memory_config_changed(void)
+netmap_memory_config_changed(struct netmap_mem_d *nmd)
{
int i;
for (i = 0; i < NETMAP_POOLS_NR; i++) {
- if (nm_mem.pools[i]._objsize != netmap_params[i].size ||
- nm_mem.pools[i].objtotal != netmap_params[i].num)
+ if (nmd->pools[i].r_objsize != netmap_params[i].size ||
+ nmd->pools[i].r_objtotal != netmap_params[i].num)
return 1;
}
return 0;
}
+static void
+netmap_mem_reset_all(struct netmap_mem_d *nmd)
+{
+ int i;
+
+ if (netmap_verbose)
+ D("resetting %p", nmd);
+ for (i = 0; i < NETMAP_POOLS_NR; i++) {
+ netmap_reset_obj_allocator(&nmd->pools[i]);
+ }
+ nmd->flags &= ~NETMAP_MEM_FINALIZED;
+}
+
+static int
+netmap_mem_finalize_all(struct netmap_mem_d *nmd)
+{
+ int i;
+ if (nmd->flags & NETMAP_MEM_FINALIZED)
+ return 0;
+ nmd->lasterr = 0;
+ nmd->nm_totalsize = 0;
+ for (i = 0; i < NETMAP_POOLS_NR; i++) {
+ nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]);
+ if (nmd->lasterr)
+ goto error;
+ nmd->nm_totalsize += nmd->pools[i].memtotal;
+ }
+ /* buffers 0 and 1 are reserved */
+ nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
+ nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3;
+ nmd->flags |= NETMAP_MEM_FINALIZED;
+
+ if (netmap_verbose)
+ D("interfaces %d KB, rings %d KB, buffers %d MB",
+ nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
+ nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
+ nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
+
+ if (netmap_verbose)
+ D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
+
+
+ return 0;
+error:
+ netmap_mem_reset_all(nmd);
+ return nmd->lasterr;
+}
+
+
+
+void
+netmap_mem_private_delete(struct netmap_mem_d *nmd)
+{
+ if (nmd == NULL)
+ return;
+ if (netmap_verbose)
+ D("deleting %p", nmd);
+ if (nmd->refcount > 0)
+ D("bug: deleting mem allocator with refcount=%d!", nmd->refcount);
+ nm_mem_release_id(nmd);
+ if (netmap_verbose)
+ D("done deleting %p", nmd);
+ NMA_LOCK_DESTROY(nmd);
+ free(nmd, M_DEVBUF);
+}
+
+static int
+netmap_mem_private_config(struct netmap_mem_d *nmd)
+{
+ /* nothing to do, we are configured on creation
+ * and configuration never changes thereafter
+ */
+ return 0;
+}
+
+static int
+netmap_mem_private_finalize(struct netmap_mem_d *nmd)
+{
+ int err;
+ NMA_LOCK(nmd);
+ nmd->refcount++;
+ err = netmap_mem_finalize_all(nmd);
+ NMA_UNLOCK(nmd);
+ return err;
+
+}
+
+static void
+netmap_mem_private_deref(struct netmap_mem_d *nmd)
+{
+ NMA_LOCK(nmd);
+ if (--nmd->refcount <= 0)
+ netmap_mem_reset_all(nmd);
+ NMA_UNLOCK(nmd);
+}
+
+
+/*
+ * allocator for private memory
+ */
+struct netmap_mem_d *
+netmap_mem_private_new(const char *name, u_int txr, u_int txd,
+ u_int rxr, u_int rxd, u_int extra_bufs, u_int npipes, int *perr)
+{
+ struct netmap_mem_d *d = NULL;
+ struct netmap_obj_params p[NETMAP_POOLS_NR];
+ int i, err;
+ u_int v, maxd;
+
+ d = malloc(sizeof(struct netmap_mem_d),
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (d == NULL) {
+ err = ENOMEM;
+ goto error;
+ }
+
+ *d = nm_blueprint;
+
+ err = nm_mem_assign_id(d);
+ if (err)
+ goto error;
+
+ /* account for the fake host rings */
+ txr++;
+ rxr++;
+
+ /* copy the min values */
+ for (i = 0; i < NETMAP_POOLS_NR; i++) {
+ p[i] = netmap_min_priv_params[i];
+ }
+
+ /* possibly increase them to fit user request */
+ v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr);
+ if (p[NETMAP_IF_POOL].size < v)
+ p[NETMAP_IF_POOL].size = v;
+ v = 2 + 4 * npipes;
+ if (p[NETMAP_IF_POOL].num < v)
+ p[NETMAP_IF_POOL].num = v;
+ maxd = (txd > rxd) ? txd : rxd;
+ v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd;
+ if (p[NETMAP_RING_POOL].size < v)
+ p[NETMAP_RING_POOL].size = v;
+ /* each pipe endpoint needs two tx rings (1 normal + 1 host, fake)
+ * and two rx rings (again, 1 normal and 1 fake host)
+ */
+ v = txr + rxr + 8 * npipes;
+ if (p[NETMAP_RING_POOL].num < v)
+ p[NETMAP_RING_POOL].num = v;
+ /* for each pipe we only need the buffers for the 4 "real" rings.
+ * On the other end, the pipe ring dimension may be different from
+ * the parent port ring dimension. As a compromise, we allocate twice the
+ * space actually needed if the pipe rings were the same size as the parent rings
+ */
+ v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs;
+ /* the +2 is for the tx and rx fake buffers (indices 0 and 1) */
+ if (p[NETMAP_BUF_POOL].num < v)
+ p[NETMAP_BUF_POOL].num = v;
+
+ if (netmap_verbose)
+ D("req if %d*%d ring %d*%d buf %d*%d",
+ p[NETMAP_IF_POOL].num,
+ p[NETMAP_IF_POOL].size,
+ p[NETMAP_RING_POOL].num,
+ p[NETMAP_RING_POOL].size,
+ p[NETMAP_BUF_POOL].num,
+ p[NETMAP_BUF_POOL].size);
+
+ for (i = 0; i < NETMAP_POOLS_NR; i++) {
+ snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ,
+ nm_blueprint.pools[i].name,
+ name);
+ err = netmap_config_obj_allocator(&d->pools[i],
+ p[i].num, p[i].size);
+ if (err)
+ goto error;
+ }
+
+ d->flags &= ~NETMAP_MEM_FINALIZED;
+
+ NMA_LOCK_INIT(d);
+
+ return d;
+error:
+ netmap_mem_private_delete(d);
+ if (perr)
+ *perr = err;
+ return NULL;
+}
+
/* call with lock held */
static int
-netmap_memory_config(void)
+netmap_mem_global_config(struct netmap_mem_d *nmd)
{
int i;
- if (!netmap_memory_config_changed())
+ if (nmd->refcount)
+ /* already in use, we cannot change the configuration */
+ goto out;
+
+ if (!netmap_memory_config_changed(nmd))
goto out;
D("reconfiguring");
- if (nm_mem.finalized) {
+ if (nmd->flags & NETMAP_MEM_FINALIZED) {
/* reset previous allocation */
for (i = 0; i < NETMAP_POOLS_NR; i++) {
- netmap_reset_obj_allocator(&nm_mem.pools[i]);
+ netmap_reset_obj_allocator(&nmd->pools[i]);
}
- nm_mem.finalized = 0;
- }
+ nmd->flags &= ~NETMAP_MEM_FINALIZED;
+ }
for (i = 0; i < NETMAP_POOLS_NR; i++) {
- nm_mem.lasterr = netmap_config_obj_allocator(&nm_mem.pools[i],
+ nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
netmap_params[i].num, netmap_params[i].size);
- if (nm_mem.lasterr)
+ if (nmd->lasterr)
goto out;
}
- D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers",
- nm_mem.pools[NETMAP_IF_POOL]._memtotal >> 10,
- nm_mem.pools[NETMAP_RING_POOL]._memtotal >> 10,
- nm_mem.pools[NETMAP_BUF_POOL]._memtotal >> 20);
-
out:
- return nm_mem.lasterr;
+ return nmd->lasterr;
}
-/* call with lock held */
static int
-netmap_memory_finalize(void)
+netmap_mem_global_finalize(struct netmap_mem_d *nmd)
{
- int i;
- u_int totalsize = 0;
+ int err;
+
+ NMA_LOCK(nmd);
- nm_mem.refcount++;
- if (nm_mem.refcount > 1) {
- ND("busy (refcount %d)", nm_mem.refcount);
- goto out;
- }
/* update configuration if changed */
- if (netmap_memory_config())
+ if (netmap_mem_global_config(nmd))
goto out;
- if (nm_mem.finalized) {
+ nmd->refcount++;
+
+ if (nmd->flags & NETMAP_MEM_FINALIZED) {
/* may happen if config is not changed */
ND("nothing to do");
goto out;
}
- for (i = 0; i < NETMAP_POOLS_NR; i++) {
- nm_mem.lasterr = netmap_finalize_obj_allocator(&nm_mem.pools[i]);
- if (nm_mem.lasterr)
- goto cleanup;
- totalsize += nm_mem.pools[i]._memtotal;
- }
- nm_mem.nm_totalsize = totalsize;
+ if (netmap_mem_finalize_all(nmd))
+ goto out;
/* backward compatibility */
- netmap_buf_size = nm_mem.pools[NETMAP_BUF_POOL]._objsize;
- netmap_total_buffers = nm_mem.pools[NETMAP_BUF_POOL].objtotal;
+ netmap_buf_size = nmd->pools[NETMAP_BUF_POOL]._objsize;
+ netmap_total_buffers = nmd->pools[NETMAP_BUF_POOL].objtotal;
- netmap_buffer_lut = nm_mem.pools[NETMAP_BUF_POOL].lut;
- netmap_buffer_base = nm_mem.pools[NETMAP_BUF_POOL].lut[0].vaddr;
+ netmap_buffer_lut = nmd->pools[NETMAP_BUF_POOL].lut;
+ netmap_buffer_base = nmd->pools[NETMAP_BUF_POOL].lut[0].vaddr;
- nm_mem.finalized = 1;
- nm_mem.lasterr = 0;
-
- /* make sysctl values match actual values in the pools */
- for (i = 0; i < NETMAP_POOLS_NR; i++) {
- netmap_params[i].size = nm_mem.pools[i]._objsize;
- netmap_params[i].num = nm_mem.pools[i].objtotal;
- }
+ nmd->lasterr = 0;
out:
- if (nm_mem.lasterr)
- nm_mem.refcount--;
+ if (nmd->lasterr)
+ nmd->refcount--;
+ err = nmd->lasterr;
- return nm_mem.lasterr;
+ NMA_UNLOCK(nmd);
-cleanup:
- for (i = 0; i < NETMAP_POOLS_NR; i++) {
- netmap_reset_obj_allocator(&nm_mem.pools[i]);
- }
- nm_mem.refcount--;
+ return err;
- return nm_mem.lasterr;
}
-static int
-netmap_memory_init(void)
+int
+netmap_mem_init(void)
{
- NMA_LOCK_INIT();
+ NMA_LOCK_INIT(&nm_mem);
return (0);
}
-static void
-netmap_memory_fini(void)
+void
+netmap_mem_fini(void)
{
int i;
for (i = 0; i < NETMAP_POOLS_NR; i++) {
netmap_destroy_obj_allocator(&nm_mem.pools[i]);
}
- NMA_LOCK_DESTROY();
+ NMA_LOCK_DESTROY(&nm_mem);
}
static void
netmap_free_rings(struct netmap_adapter *na)
{
- int i;
+ struct netmap_kring *kring;
+ struct netmap_ring *ring;
if (!na->tx_rings)
return;
- for (i = 0; i < na->num_tx_rings + 1; i++) {
- netmap_ring_free(na->tx_rings[i].ring);
- na->tx_rings[i].ring = NULL;
+ for (kring = na->tx_rings; kring != na->rx_rings; kring++) {
+ ring = kring->ring;
+ if (ring == NULL)
+ continue;
+ netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots);
+ netmap_ring_free(na->nm_mem, ring);
+ kring->ring = NULL;
}
- for (i = 0; i < na->num_rx_rings + 1; i++) {
- netmap_ring_free(na->rx_rings[i].ring);
- na->rx_rings[i].ring = NULL;
+ for (/* cont'd from above */; kring != na->tailroom; kring++) {
+ ring = kring->ring;
+ if (ring == NULL)
+ continue;
+ netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots);
+ netmap_ring_free(na->nm_mem, ring);
+ kring->ring = NULL;
}
- free(na->tx_rings, M_DEVBUF);
- na->tx_rings = na->rx_rings = NULL;
}
-
-
-/* call with NMA_LOCK held */
-/*
- * Allocate the per-fd structure netmap_if.
- * If this is the first instance, also allocate the krings, rings etc.
+/* call with NMA_LOCK held *
+ *
+ * Allocate netmap rings and buffers for this card
+ * The rings are contiguous, but have variable size.
+ * The kring array must follow the layout described
+ * in netmap_krings_create().
*/
-static void *
-netmap_if_new(const char *ifname, struct netmap_adapter *na)
+int
+netmap_mem_rings_create(struct netmap_adapter *na)
{
- struct netmap_if *nifp;
struct netmap_ring *ring;
- ssize_t base; /* handy for relative offsets between rings and nifp */
- u_int i, len, ndesc, ntx, nrx;
+ u_int len, ndesc;
struct netmap_kring *kring;
+ u_int i;
- if (netmap_update_config(na)) {
- /* configuration mismatch, report and fail */
- return NULL;
- }
- ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */
- nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */
- /*
- * the descriptor is followed inline by an array of offsets
- * to the tx and rx rings in the shared memory region.
- */
- len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t);
- nifp = netmap_if_malloc(len);
- if (nifp == NULL) {
- return NULL;
- }
-
- /* initialize base fields -- override const */
- *(int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
- *(int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
- strncpy(nifp->ni_name, ifname, IFNAMSIZ);
-
- (na->refcount)++; /* XXX atomic ? we are under lock */
- if (na->refcount > 1) { /* already setup, we are done */
- goto final;
- }
+ NMA_LOCK(na->nm_mem);
- len = (ntx + nrx) * sizeof(struct netmap_kring);
- na->tx_rings = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
- if (na->tx_rings == NULL) {
- D("Cannot allocate krings for %s", ifname);
- goto cleanup;
- }
- na->rx_rings = na->tx_rings + ntx;
-
- /*
- * First instance, allocate netmap rings and buffers for this card
- * The rings are contiguous, but have variable size.
- */
- for (i = 0; i < ntx; i++) { /* Transmit rings */
- kring = &na->tx_rings[i];
- ndesc = na->num_tx_desc;
- bzero(kring, sizeof(*kring));
+ /* transmit rings */
+ for (i =0, kring = na->tx_rings; kring != na->rx_rings; kring++, i++) {
+ if (kring->ring) {
+ ND("%s %ld already created", kring->name, kring - na->tx_rings);
+ continue; /* already created by somebody else */
+ }
+ ndesc = kring->nkr_num_slots;
len = sizeof(struct netmap_ring) +
ndesc * sizeof(struct netmap_slot);
- ring = netmap_ring_malloc(len);
+ ring = netmap_ring_malloc(na->nm_mem, len);
if (ring == NULL) {
- D("Cannot allocate tx_ring[%d] for %s", i, ifname);
+ D("Cannot allocate tx_ring");
goto cleanup;
}
- ND("txring[%d] at %p ofs %d", i, ring);
- kring->na = na;
+ ND("txring at %p", ring);
kring->ring = ring;
- *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
- *(ssize_t *)(uintptr_t)&ring->buf_ofs =
- (nm_mem.pools[NETMAP_IF_POOL]._memtotal +
- nm_mem.pools[NETMAP_RING_POOL]._memtotal) -
- netmap_ring_offset(ring);
-
- /*
- * IMPORTANT:
- * Always keep one slot empty, so we can detect new
- * transmissions comparing cur and nr_hwcur (they are
- * the same only if there are no new transmissions).
- */
- ring->avail = kring->nr_hwavail = ndesc - 1;
- ring->cur = kring->nr_hwcur = 0;
- *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE;
- ND("initializing slots for txring[%d]", i);
- if (netmap_new_bufs(nifp, ring->slot, ndesc)) {
- D("Cannot allocate buffers for tx_ring[%d] for %s", i, ifname);
- goto cleanup;
+ *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
+ *(int64_t *)(uintptr_t)&ring->buf_ofs =
+ (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
+ na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
+ netmap_ring_offset(na->nm_mem, ring);
+
+ /* copy values from kring */
+ ring->head = kring->rhead;
+ ring->cur = kring->rcur;
+ ring->tail = kring->rtail;
+ *(uint16_t *)(uintptr_t)&ring->nr_buf_size =
+ NETMAP_BDG_BUF_SIZE(na->nm_mem);
+ ND("%s h %d c %d t %d", kring->name,
+ ring->head, ring->cur, ring->tail);
+ ND("initializing slots for txring");
+ if (i != na->num_tx_rings || (na->na_flags & NAF_HOST_RINGS)) {
+ /* this is a real ring */
+ if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
+ D("Cannot allocate buffers for tx_ring");
+ goto cleanup;
+ }
+ } else {
+ /* this is a fake tx ring, set all indices to 0 */
+ netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0);
}
}
- for (i = 0; i < nrx; i++) { /* Receive rings */
- kring = &na->rx_rings[i];
- ndesc = na->num_rx_desc;
- bzero(kring, sizeof(*kring));
+ /* receive rings */
+ for ( i = 0 /* kring cont'd from above */ ; kring != na->tailroom; kring++, i++) {
+ if (kring->ring) {
+ ND("%s %ld already created", kring->name, kring - na->rx_rings);
+ continue; /* already created by somebody else */
+ }
+ ndesc = kring->nkr_num_slots;
len = sizeof(struct netmap_ring) +
ndesc * sizeof(struct netmap_slot);
- ring = netmap_ring_malloc(len);
+ ring = netmap_ring_malloc(na->nm_mem, len);
if (ring == NULL) {
- D("Cannot allocate rx_ring[%d] for %s", i, ifname);
+ D("Cannot allocate rx_ring");
goto cleanup;
}
- ND("rxring[%d] at %p ofs %d", i, ring);
-
- kring->na = na;
+ ND("rxring at %p", ring);
kring->ring = ring;
- *(int *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc;
- *(ssize_t *)(uintptr_t)&ring->buf_ofs =
- (nm_mem.pools[NETMAP_IF_POOL]._memtotal +
- nm_mem.pools[NETMAP_RING_POOL]._memtotal) -
- netmap_ring_offset(ring);
-
- ring->cur = kring->nr_hwcur = 0;
- ring->avail = kring->nr_hwavail = 0; /* empty */
- *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE;
- ND("initializing slots for rxring[%d]", i);
- if (netmap_new_bufs(nifp, ring->slot, ndesc)) {
- D("Cannot allocate buffers for rx_ring[%d] for %s", i, ifname);
- goto cleanup;
+ *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
+ *(int64_t *)(uintptr_t)&ring->buf_ofs =
+ (na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
+ na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
+ netmap_ring_offset(na->nm_mem, ring);
+
+ /* copy values from kring */
+ ring->head = kring->rhead;
+ ring->cur = kring->rcur;
+ ring->tail = kring->rtail;
+ *(int *)(uintptr_t)&ring->nr_buf_size =
+ NETMAP_BDG_BUF_SIZE(na->nm_mem);
+ ND("%s h %d c %d t %d", kring->name,
+ ring->head, ring->cur, ring->tail);
+ ND("initializing slots for rxring %p", ring);
+ if (i != na->num_rx_rings || (na->na_flags & NAF_HOST_RINGS)) {
+ /* this is a real ring */
+ if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
+ D("Cannot allocate buffers for rx_ring");
+ goto cleanup;
+ }
+ } else {
+ /* this is a fake rx ring, set all indices to 1 */
+ netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 1);
}
}
-#ifdef linux
- // XXX initialize the selrecord structs.
- for (i = 0; i < ntx; i++)
- init_waitqueue_head(&na->tx_rings[i].si);
- for (i = 0; i < nrx; i++)
- init_waitqueue_head(&na->rx_rings[i].si);
- init_waitqueue_head(&na->tx_si);
- init_waitqueue_head(&na->rx_si);
-#endif
-final:
+
+ NMA_UNLOCK(na->nm_mem);
+
+ return 0;
+
+cleanup:
+ netmap_free_rings(na);
+
+ NMA_UNLOCK(na->nm_mem);
+
+ return ENOMEM;
+}
+
+void
+netmap_mem_rings_delete(struct netmap_adapter *na)
+{
+ /* last instance, release bufs and rings */
+ NMA_LOCK(na->nm_mem);
+
+ netmap_free_rings(na);
+
+ NMA_UNLOCK(na->nm_mem);
+}
+
+
+/* call with NMA_LOCK held */
+/*
+ * Allocate the per-fd structure netmap_if.
+ *
+ * We assume that the configuration stored in na
+ * (number of tx/rx rings and descs) does not change while
+ * the interface is in netmap mode.
+ */
+struct netmap_if *
+netmap_mem_if_new(const char *ifname, struct netmap_adapter *na)
+{
+ struct netmap_if *nifp;
+ ssize_t base; /* handy for relative offsets between rings and nifp */
+ u_int i, len, ntx, nrx;
+
+ /* account for the (eventually fake) host rings */
+ ntx = na->num_tx_rings + 1;
+ nrx = na->num_rx_rings + 1;
+ /*
+ * the descriptor is followed inline by an array of offsets
+ * to the tx and rx rings in the shared memory region.
+ */
+
+ NMA_LOCK(na->nm_mem);
+
+ len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t);
+ nifp = netmap_if_malloc(na->nm_mem, len);
+ if (nifp == NULL) {
+ NMA_UNLOCK(na->nm_mem);
+ return NULL;
+ }
+
+ /* initialize base fields -- override const */
+ *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
+ *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
+ strncpy(nifp->ni_name, ifname, (size_t)IFNAMSIZ);
+
/*
* fill the slots for the rx and tx rings. They contain the offset
* between the ring and nifp, so the information is usable in
* userspace to reach the ring from the nifp.
*/
- base = netmap_if_offset(nifp);
+ base = netmap_if_offset(na->nm_mem, nifp);
for (i = 0; i < ntx; i++) {
*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] =
- netmap_ring_offset(na->tx_rings[i].ring) - base;
+ netmap_ring_offset(na->nm_mem, na->tx_rings[i].ring) - base;
}
for (i = 0; i < nrx; i++) {
*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] =
- netmap_ring_offset(na->rx_rings[i].ring) - base;
+ netmap_ring_offset(na->nm_mem, na->rx_rings[i].ring) - base;
}
+
+ NMA_UNLOCK(na->nm_mem);
+
return (nifp);
-cleanup:
- netmap_free_rings(na);
- netmap_if_free(nifp);
- (na->refcount)--;
- return NULL;
}
-/* call with NMA_LOCK held */
+void
+netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
+{
+ if (nifp == NULL)
+ /* nothing to do */
+ return;
+ NMA_LOCK(na->nm_mem);
+ if (nifp->ni_bufs_head)
+ netmap_extra_free(na, nifp->ni_bufs_head);
+ netmap_if_free(na->nm_mem, nifp);
+
+ NMA_UNLOCK(na->nm_mem);
+}
+
static void
-netmap_memory_deref(void)
+netmap_mem_global_deref(struct netmap_mem_d *nmd)
{
- nm_mem.refcount--;
+ NMA_LOCK(nmd);
+
+ nmd->refcount--;
if (netmap_verbose)
- D("refcount = %d", nm_mem.refcount);
+ D("refcount = %d", nmd->refcount);
+
+ NMA_UNLOCK(nmd);
+}
+
+int
+netmap_mem_finalize(struct netmap_mem_d *nmd)
+{
+ return nmd->finalize(nmd);
+}
+
+void
+netmap_mem_deref(struct netmap_mem_d *nmd)
+{
+ return nmd->deref(nmd);
}
OpenPOWER on IntegriCloud