summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/kern/uipc_socket.c4
-rw-r--r--sys/net/if.c2
-rw-r--r--sys/net/if_llatbl.c2
-rw-r--r--sys/net/if_llatbl.h2
-rw-r--r--sys/net/if_var.h6
-rw-r--r--sys/netinet/in_pcb.h2
-rw-r--r--sys/netinet/in_var.h2
-rw-r--r--sys/netinet/ip_id.c2
-rw-r--r--sys/netinet/ip_input.c4
-rw-r--r--sys/netinet/tcp_subr.c2
-rw-r--r--sys/sys/socketvar.h2
11 files changed, 15 insertions, 15 deletions
diff --git a/sys/kern/uipc_socket.c b/sys/kern/uipc_socket.c
index 7986c4b..c9b48d3 100644
--- a/sys/kern/uipc_socket.c
+++ b/sys/kern/uipc_socket.c
@@ -240,14 +240,14 @@ SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW,
* accept_mtx locks down per-socket fields relating to accept queues. See
* socketvar.h for an annotation of the protected fields of struct socket.
*/
-struct mtx_padalign accept_mtx;
+struct mtx accept_mtx;
MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF);
/*
* so_global_mtx protects so_gencnt, numopensockets, and the per-socket
* so_gencnt field.
*/
-static struct mtx_padalign so_global_mtx;
+static struct mtx so_global_mtx;
MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
/*
diff --git a/sys/net/if.c b/sys/net/if.c
index 39dc941..dc5681e 100644
--- a/sys/net/if.c
+++ b/sys/net/if.c
@@ -206,7 +206,7 @@ VNET_DEFINE(struct ifindex_entry *, ifindex_table);
* also to stablize it over long-running ioctls, without introducing priority
* inversions and deadlocks.
*/
-struct rwlock_padalign ifnet_rwlock;
+struct rwlock ifnet_rwlock;
struct sx ifnet_sxlock;
/*
diff --git a/sys/net/if_llatbl.c b/sys/net/if_llatbl.c
index f19f6cf..84ea6c6 100644
--- a/sys/net/if_llatbl.c
+++ b/sys/net/if_llatbl.c
@@ -67,7 +67,7 @@ static VNET_DEFINE(SLIST_HEAD(, lltable), lltables);
static void vnet_lltable_init(void);
-struct rwlock_padalign lltable_rwlock;
+struct rwlock lltable_rwlock;
RW_SYSINIT(lltable_rwlock, &lltable_rwlock, "lltable_rwlock");
/*
diff --git a/sys/net/if_llatbl.h b/sys/net/if_llatbl.h
index e09145c..693ccd5 100644
--- a/sys/net/if_llatbl.h
+++ b/sys/net/if_llatbl.h
@@ -43,7 +43,7 @@ struct rt_addrinfo;
struct llentry;
LIST_HEAD(llentries, llentry);
-extern struct rwlock_padalign lltable_rwlock;
+extern struct rwlock lltable_rwlock;
#define LLTABLE_RLOCK() rw_rlock(&lltable_rwlock)
#define LLTABLE_RUNLOCK() rw_runlock(&lltable_rwlock)
#define LLTABLE_WLOCK() rw_wlock(&lltable_rwlock)
diff --git a/sys/net/if_var.h b/sys/net/if_var.h
index 41ac056..ce8f06a 100644
--- a/sys/net/if_var.h
+++ b/sys/net/if_var.h
@@ -191,9 +191,9 @@ struct ifnet {
void *if_unused[2];
void *if_afdata[AF_MAX];
int if_afdata_initialized;
+ struct rwlock if_afdata_lock;
struct task if_linktask; /* task for link change events */
- struct rwlock_padalign if_afdata_lock;
- struct rwlock_padalign if_addr_lock; /* lock to protect address lists */
+ struct rwlock if_addr_lock; /* lock to protect address lists */
LIST_ENTRY(ifnet) if_clones; /* interfaces of a cloner */
TAILQ_HEAD(, ifg_list) if_groups; /* linked list of groups per if */
@@ -832,7 +832,7 @@ struct ifmultiaddr {
#ifdef _KERNEL
-extern struct rwlock_padalign ifnet_rwlock;
+extern struct rwlock ifnet_rwlock;
extern struct sx ifnet_sxlock;
#define IFNET_LOCK_INIT() do { \
diff --git a/sys/netinet/in_pcb.h b/sys/netinet/in_pcb.h
index 5274a90..2df90b0 100644
--- a/sys/netinet/in_pcb.h
+++ b/sys/netinet/in_pcb.h
@@ -330,7 +330,7 @@ struct inpcbinfo {
/*
* Global lock protecting non-pcbgroup hash lookup tables.
*/
- struct rwlock_padalign ipi_hash_lock;
+ struct rwlock ipi_hash_lock;
/*
* Global hash of inpcbs, hashed by local and foreign addresses and
diff --git a/sys/netinet/in_var.h b/sys/netinet/in_var.h
index bf27652..8657dbb 100644
--- a/sys/netinet/in_var.h
+++ b/sys/netinet/in_var.h
@@ -116,7 +116,7 @@ VNET_DECLARE(u_long, in_ifaddrhmask); /* mask for hash table */
#define INADDR_HASH(x) \
(&V_in_ifaddrhashtbl[INADDR_HASHVAL(x) & V_in_ifaddrhmask])
-extern struct rwlock_padalign in_ifaddr_lock;
+extern struct rwlock in_ifaddr_lock;
#define IN_IFADDR_LOCK_ASSERT() rw_assert(&in_ifaddr_lock, RA_LOCKED)
#define IN_IFADDR_RLOCK() rw_rlock(&in_ifaddr_lock)
diff --git a/sys/netinet/ip_id.c b/sys/netinet/ip_id.c
index 7b9710f..ce8c6b4 100644
--- a/sys/netinet/ip_id.c
+++ b/sys/netinet/ip_id.c
@@ -97,7 +97,7 @@ static int array_ptr = 0;
static int array_size = 8192;
static int random_id_collisions = 0;
static int random_id_total = 0;
-static struct mtx_padalign ip_id_mtx;
+static struct mtx ip_id_mtx;
static void ip_initid(void);
static int sysctl_ip_id_change(SYSCTL_HANDLER_ARGS);
diff --git a/sys/netinet/ip_input.c b/sys/netinet/ip_input.c
index 17a4e1d..f38c6fa 100644
--- a/sys/netinet/ip_input.c
+++ b/sys/netinet/ip_input.c
@@ -85,7 +85,7 @@ __FBSDID("$FreeBSD$");
CTASSERT(sizeof(struct ip) == 20);
#endif
-struct rwlock_padalign in_ifaddr_lock;
+struct rwlock in_ifaddr_lock;
RW_SYSINIT(in_ifaddr_lock, &in_ifaddr_lock, "in_ifaddr_lock");
VNET_DEFINE(int, rsvp_on);
@@ -155,7 +155,7 @@ VNET_DEFINE(u_long, in_ifaddrhmask); /* mask for hash table */
static VNET_DEFINE(uma_zone_t, ipq_zone);
static VNET_DEFINE(TAILQ_HEAD(ipqhead, ipq), ipq[IPREASS_NHASH]);
-static struct mtx_padalign ipqlock;
+static struct mtx ipqlock;
#define V_ipq_zone VNET(ipq_zone)
#define V_ipq VNET(ipq)
diff --git a/sys/netinet/tcp_subr.c b/sys/netinet/tcp_subr.c
index 1236d7d..e87d58e 100644
--- a/sys/netinet/tcp_subr.c
+++ b/sys/netinet/tcp_subr.c
@@ -255,7 +255,7 @@ static VNET_DEFINE(uma_zone_t, tcpcb_zone);
#define V_tcpcb_zone VNET(tcpcb_zone)
MALLOC_DEFINE(M_TCPLOG, "tcplog", "TCP address and flags print buffers");
-static struct mtx_padalign isn_mtx;
+static struct mtx isn_mtx;
#define ISN_LOCK_INIT() mtx_init(&isn_mtx, "isn_mtx", NULL, MTX_DEF)
#define ISN_LOCK() mtx_lock(&isn_mtx)
diff --git a/sys/sys/socketvar.h b/sys/sys/socketvar.h
index f49c31f..77f31df 100644
--- a/sys/sys/socketvar.h
+++ b/sys/sys/socketvar.h
@@ -133,7 +133,7 @@ struct socket {
* avoid defining a lock order between listen and accept sockets
* until such time as it proves to be a good idea.
*/
-extern struct mtx_padalign accept_mtx;
+extern struct mtx accept_mtx;
#define ACCEPT_LOCK_ASSERT() mtx_assert(&accept_mtx, MA_OWNED)
#define ACCEPT_UNLOCK_ASSERT() mtx_assert(&accept_mtx, MA_NOTOWNED)
#define ACCEPT_LOCK() mtx_lock(&accept_mtx)
OpenPOWER on IntegriCloud