summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjmg <jmg@FreeBSD.org>2005-03-17 19:34:57 +0000
committerjmg <jmg@FreeBSD.org>2005-03-17 19:34:57 +0000
commit19da85af4a083ce6f05e67a3b9740ca3149f21c9 (patch)
tree82aef6788a453aea9f3b1916ad69603245947fb3 /sys
parent755ffaa47d613fc8fe32776a907f4d8e7a8c9c4d (diff)
downloadFreeBSD-src-19da85af4a083ce6f05e67a3b9740ca3149f21c9.zip
FreeBSD-src-19da85af4a083ce6f05e67a3b9740ca3149f21c9.tar.gz
add m_copyup function.. This can be used to help make our ip stack less
alignment restrictive, and help performance on some ethernet cards which currently copy the entire packet a couple bytes to get the packet aligned properly... Wordsmithing by: dwhite Obtained from: NetBSD (code only) I'll clean it up later: rwatson
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/uipc_mbuf.c48
-rw-r--r--sys/sys/mbuf.h1
2 files changed, 49 insertions, 0 deletions
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 8a236a6..f5ba9ab 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -767,6 +767,54 @@ bad:
}
/*
+ * Like m_pullup(), except a new mbuf is always allocated, and we allow
+ * the amount of empty space before the data in the new mbuf to be specified
+ * (in the event that the caller expects to prepend later).
+ */
+int MSFail;
+
+struct mbuf *
+m_copyup(struct mbuf *n, int len, int dstoff)
+{
+ struct mbuf *m;
+ int count, space;
+
+ if (len > (MHLEN - dstoff))
+ goto bad;
+ MGET(m, M_DONTWAIT, n->m_type);
+ if (m == NULL)
+ goto bad;
+ m->m_len = 0;
+ if (n->m_flags & M_PKTHDR)
+ M_MOVE_PKTHDR(m, n);
+ m->m_data += dstoff;
+ space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
+ do {
+ count = min(min(max(len, max_protohdr), space), n->m_len);
+ memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
+ (unsigned)count);
+ len -= count;
+ m->m_len += count;
+ n->m_len -= count;
+ space -= count;
+ if (n->m_len)
+ n->m_data += count;
+ else
+ n = m_free(n);
+ } while (len > 0 && n);
+ if (len > 0) {
+ (void) m_free(m);
+ goto bad;
+ }
+ m->m_next = n;
+ return (m);
+ bad:
+ m_freem(n);
+ MSFail++;
+ return (NULL);
+}
+
+/*
* Partition an mbuf chain in two pieces, returning the tail --
* all but the first len0 bytes. In case of failure, it returns NULL and
* attempts to restore the chain to its original state.
diff --git a/sys/sys/mbuf.h b/sys/sys/mbuf.h
index 0703916..a3d2a61 100644
--- a/sys/sys/mbuf.h
+++ b/sys/sys/mbuf.h
@@ -564,6 +564,7 @@ void m_copydata(const struct mbuf *, int, int, caddr_t);
struct mbuf *m_copym(struct mbuf *, int, int, int);
struct mbuf *m_copypacket(struct mbuf *, int);
void m_copy_pkthdr(struct mbuf *, struct mbuf *);
+struct mbuf *m_copyup(struct mbuf *n, int len, int dstoff);
struct mbuf *m_defrag(struct mbuf *, int);
struct mbuf *m_devget(char *, int, int, struct ifnet *,
void (*)(char *, caddr_t, u_int));
OpenPOWER on IntegriCloud