summaryrefslogtreecommitdiffstats
path: root/sys/net/if_llatbl.h
diff options
context:
space:
mode:
authorrwatson <rwatson@FreeBSD.org>2009-08-25 09:52:38 +0000
committerrwatson <rwatson@FreeBSD.org>2009-08-25 09:52:38 +0000
commit544dfa0789561f2dbb98a7740453fce7aeea4f5b (patch)
tree4a34d8a5aab4678ff905e0a94bd345c9b1c8d805 /sys/net/if_llatbl.h
parent6aac59f9fa910996e553304fbd819aedc2abb25d (diff)
downloadFreeBSD-src-544dfa0789561f2dbb98a7740453fce7aeea4f5b.zip
FreeBSD-src-544dfa0789561f2dbb98a7740453fce7aeea4f5b.tar.gz
Use locks specific to the lltable code, rather than borrow the ifnet
list/index locks, to protect link layer address tables. This avoids lock order issues during interface teardown, but maintains the bug that sysctl copy routines may be called while a non-sleepable lock is held. Reviewed by: bz, kmacy MFC after: 3 days
Diffstat (limited to 'sys/net/if_llatbl.h')
-rw-r--r--sys/net/if_llatbl.h7
1 files changed, 7 insertions, 0 deletions
diff --git a/sys/net/if_llatbl.h b/sys/net/if_llatbl.h
index 4d721ef..f54c78a 100644
--- a/sys/net/if_llatbl.h
+++ b/sys/net/if_llatbl.h
@@ -41,6 +41,13 @@ struct rt_addrinfo;
struct llentry;
LIST_HEAD(llentries, llentry);
+extern struct rwlock lltable_rwlock;
+#define LLTABLE_RLOCK() rw_rlock(&lltable_rwlock)
+#define LLTABLE_RUNLOCK() rw_runlock(&lltable_rwlock)
+#define LLTABLE_WLOCK() rw_wlock(&lltable_rwlock)
+#define LLTABLE_WUNLOCK() rw_wunlock(&lltable_rwlock)
+#define LLTABLE_LOCK_ASSERT() rw_assert(&lltable_rwlock, RA_LOCKED)
+
/*
* Code referencing llentry must at least hold
* a shared lock
OpenPOWER on IntegriCloud