summaryrefslogtreecommitdiffstats
path: root/sys/net
diff options
context:
space:
mode:
authormelifaro <melifaro@FreeBSD.org>2015-12-05 09:50:37 +0000
committermelifaro <melifaro@FreeBSD.org>2015-12-05 09:50:37 +0000
commit67195f5ef1511d34c1b20ecf640143aaf345e932 (patch)
treef94284003fe1e92113f3cdaf4ec9d539c4182d40 /sys/net
parent680de71cbf79fbb6681e4e27d6a5efc164939971 (diff)
downloadFreeBSD-src-67195f5ef1511d34c1b20ecf640143aaf345e932.zip
FreeBSD-src-67195f5ef1511d34c1b20ecf640143aaf345e932.tar.gz
Remove LLE read lock from IPv4 fast path.
LLE structure is mostly unchanged during its lifecycle. To be more specific, there are 2 things relevant for fast path lookup code: 1) link-level address change. Since r286722, these updates are performed under AFDATA WLOCK. 2) Some sort of feedback indicating that this particular entry is used so we re-send arp request to perform reachability verification instead of expiring entry. The only signal that is needed from fast path is something like binary yes/no. The latter is solved by the following changes: 1) introduce special r_skip_req field which is read lockless by fast path, but updated under (new) req_mutex mutex. If this field is non-zero, then fast path will acquire lock and set it back to 0. 2) introduce simple state machine: incomplete->reachable<->verify->deleted. Before that we implicitely had incomplete->reachable->deleted state machine, with V_arpt_keep between "reachable" and "deleted". Verification was performed in runtime 5 seconds before V_arpt_keep expire. This is changed to "change state to verify 5 seconds before V_arpt_keep, set r_skip_req to non-zero value and check it every second". If the value is zero - then send arp verification probe. These changes do not introduce any signifficant control plane overhead: typically lle callout timer would fire 1 time more each V_arpt_keep (1200s) for used lles and up to arp_maxtries (5) for dead lles. As a result, all packets towards "reachable" lle are handled by fast path without acquiring lle read lock. Additional "req_mutex" is needed because callout / arpresolve_slow() or eventhandler might keep LLE lock for signifficant amount of time, which might not be feasible for fast path locking (e.g. having rmlock as ether AFDATA or lltable own lock). Differential Revision: https://reviews.freebsd.org/D3688
Diffstat (limited to 'sys/net')
-rw-r--r--sys/net/if_llatbl.c2
-rw-r--r--sys/net/if_llatbl.h15
2 files changed, 16 insertions, 1 deletions
diff --git a/sys/net/if_llatbl.c b/sys/net/if_llatbl.c
index 0a89225..c7b1042 100644
--- a/sys/net/if_llatbl.c
+++ b/sys/net/if_llatbl.c
@@ -284,6 +284,7 @@ lltable_set_entry_addr(struct ifnet *ifp, struct llentry *lle,
bcopy(lladdr, &lle->ll_addr, ifp->if_addrlen);
lle->la_flags |= LLE_VALID;
+ lle->r_flags |= RLLE_VALID;
}
/*
@@ -640,6 +641,7 @@ lla_rt_output(struct rt_msghdr *rtm, struct rt_addrinfo *info)
if ((rtm->rtm_flags & RTF_ANNOUNCE))
lle->la_flags |= LLE_PUB;
lle->la_flags |= LLE_VALID;
+ lle->r_flags |= RLLE_VALID;
lle->la_expire = rtm->rtm_rmx.rmx_expire;
laflags = lle->la_flags;
diff --git a/sys/net/if_llatbl.h b/sys/net/if_llatbl.h
index 2009a7a..044959e 100644
--- a/sys/net/if_llatbl.h
+++ b/sys/net/if_llatbl.h
@@ -63,7 +63,8 @@ struct llentry {
uint16_t mac16[3];
uint8_t mac8[20]; /* IB needs 20 bytes. */
} ll_addr;
- uint32_t spare0;
+ uint16_t r_flags; /* LLE runtime flags */
+ uint16_t r_skip_req; /* feedback from fast path */
uint64_t spare1;
struct lltable *lle_tbl;
@@ -83,6 +84,7 @@ struct llentry {
LIST_ENTRY(llentry) lle_chain; /* chain of deleted items */
struct callout lle_timer;
struct rwlock lle_lock;
+ struct mtx req_mtx;
};
#define LLE_WLOCK(lle) rw_wlock(&(lle)->lle_lock)
@@ -95,6 +97,12 @@ struct llentry {
#define LLE_LOCK_DESTROY(lle) rw_destroy(&(lle)->lle_lock)
#define LLE_WLOCK_ASSERT(lle) rw_assert(&(lle)->lle_lock, RA_WLOCKED)
+#define LLE_REQ_INIT(lle) mtx_init(&(lle)->req_mtx, "lle req", \
+ NULL, MTX_DEF)
+#define LLE_REQ_DESTROY(lle) mtx_destroy(&(lle)->req_mtx)
+#define LLE_REQ_LOCK(lle) mtx_lock(&(lle)->req_mtx)
+#define LLE_REQ_UNLOCK(lle) mtx_unlock(&(lle)->req_mtx)
+
#define LLE_IS_VALID(lle) (((lle) != NULL) && ((lle) != (void *)-1))
#define LLE_ADDREF(lle) do { \
@@ -187,6 +195,11 @@ MALLOC_DECLARE(M_LLTABLE);
#define LLE_LINKED 0x0040 /* linked to lookup structure */
/* LLE request flags */
#define LLE_EXCLUSIVE 0x2000 /* return lle xlocked */
+#define LLE_UNLOCKED 0x4000 /* return lle unlocked */
+
+/* LLE flags used by fastpath code */
+#define RLLE_VALID 0x0001 /* entry is valid */
+#define RLLE_IFADDR LLE_IFADDR /* entry is ifaddr */
#define LLATBL_HASH(key, mask) \
(((((((key >> 8) ^ key) >> 8) ^ key) >> 8) ^ key) & mask)
OpenPOWER on IntegriCloud