summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/sys_machdep.c4
-rw-r--r--sys/amd64/amd64/vm_machdep.c7
-rw-r--r--sys/contrib/ipfilter/netinet/ip_ftp_pxy.c4
-rw-r--r--sys/dev/bxe/bxe.c145
-rw-r--r--sys/i386/i386/vm_machdep.c7
-rw-r--r--sys/kern/kern_timeout.c10
-rw-r--r--sys/kern/subr_sleepqueue.c3
-rw-r--r--sys/sys/callout.h8
8 files changed, 142 insertions, 46 deletions
diff --git a/sys/amd64/amd64/sys_machdep.c b/sys/amd64/amd64/sys_machdep.c
index 98d47b8..ff3bd3e 100644
--- a/sys/amd64/amd64/sys_machdep.c
+++ b/sys/amd64/amd64/sys_machdep.c
@@ -591,8 +591,8 @@ amd64_set_ldt(td, uap, descs)
struct i386_ldt_args *uap;
struct user_segment_descriptor *descs;
{
- int error = 0, i;
- int largest_ld;
+ int error = 0;
+ unsigned int largest_ld, i;
struct mdproc *mdp = &td->td_proc->p_md;
struct proc_ldt *pldt;
struct user_segment_descriptor *dp;
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index bcc68c0..aaa3741 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -100,8 +100,8 @@ get_pcb_user_save_td(struct thread *td)
vm_offset_t p;
p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
- cpu_max_ext_state_size;
- KASSERT((p % 64) == 0, ("Unaligned pcb_user_save area"));
+ roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN);
+ KASSERT((p % XSAVE_AREA_ALIGN) == 0, ("Unaligned pcb_user_save area"));
return ((struct savefpu *)p);
}
@@ -120,7 +120,8 @@ get_pcb_td(struct thread *td)
vm_offset_t p;
p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
- cpu_max_ext_state_size - sizeof(struct pcb);
+ roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN) -
+ sizeof(struct pcb);
return ((struct pcb *)p);
}
diff --git a/sys/contrib/ipfilter/netinet/ip_ftp_pxy.c b/sys/contrib/ipfilter/netinet/ip_ftp_pxy.c
index ff83976..00692b8 100644
--- a/sys/contrib/ipfilter/netinet/ip_ftp_pxy.c
+++ b/sys/contrib/ipfilter/netinet/ip_ftp_pxy.c
@@ -1951,10 +1951,6 @@ ipf_p_ftp_epsv(softf, fin, ip, nat, ftp, dlen)
ap += *s++ - '0';
}
- if (!s) {
- return 0;
-}
-
if (*s == '|')
s++;
if (*s == ')')
diff --git a/sys/dev/bxe/bxe.c b/sys/dev/bxe/bxe.c
index ecfb2c3..585f190 100644
--- a/sys/dev/bxe/bxe.c
+++ b/sys/dev/bxe/bxe.c
@@ -680,6 +680,8 @@ static void bxe_handle_fp_tq(void *context, int pending);
static int bxe_add_cdev(struct bxe_softc *sc);
static void bxe_del_cdev(struct bxe_softc *sc);
static int bxe_grc_dump(struct bxe_softc *sc);
+static int bxe_alloc_buf_rings(struct bxe_softc *sc);
+static void bxe_free_buf_rings(struct bxe_softc *sc);
/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
uint32_t
@@ -4204,9 +4206,20 @@ bxe_nic_unload(struct bxe_softc *sc,
{
uint8_t global = FALSE;
uint32_t val;
+ int i;
BXE_CORE_LOCK_ASSERT(sc);
+ sc->ifnet->if_drv_flags &= ~IFF_DRV_RUNNING;
+
+ for (i = 0; i < sc->num_queues; i++) {
+ struct bxe_fastpath *fp;
+
+ fp = &sc->fp[i];
+ BXE_FP_TX_LOCK(fp);
+ BXE_FP_TX_UNLOCK(fp);
+ }
+
BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
/* mark driver as unloaded in shmem2 */
@@ -6245,8 +6258,6 @@ bxe_free_fp_buffers(struct bxe_softc *sc)
m_freem(m);
BXE_FP_TX_UNLOCK(fp);
}
- buf_ring_free(fp->tx_br, M_DEVBUF);
- fp->tx_br = NULL;
}
#endif
@@ -6276,14 +6287,6 @@ bxe_free_fp_buffers(struct bxe_softc *sc)
}
/* XXX verify all mbufs were reclaimed */
-
- if (mtx_initialized(&fp->tx_mtx)) {
- mtx_destroy(&fp->tx_mtx);
- }
-
- if (mtx_initialized(&fp->rx_mtx)) {
- mtx_destroy(&fp->rx_mtx);
- }
}
}
@@ -6505,15 +6508,6 @@ bxe_alloc_fp_buffers(struct bxe_softc *sc)
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
-#if __FreeBSD_version >= 800000
- fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
- M_DONTWAIT, &fp->tx_mtx);
- if (fp->tx_br == NULL) {
- BLOGE(sc, "buf_ring alloc fail for fp[%02d]\n", i);
- goto bxe_alloc_fp_buffers_error;
- }
-#endif
-
ring_prod = cqe_ring_prod = 0;
fp->rx_bd_cons = 0;
fp->rx_cq_cons = 0;
@@ -9621,14 +9615,6 @@ bxe_init_eth_fp(struct bxe_softc *sc,
fp->sc = sc;
fp->index = idx;
- snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
- "bxe%d_fp%d_tx_lock", sc->unit, idx);
- mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
-
- snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
- "bxe%d_fp%d_rx_lock", sc->unit, idx);
- mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
-
fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
@@ -15820,6 +15806,89 @@ bxe_add_sysctls(struct bxe_softc *sc)
}
}
+static int
+bxe_alloc_buf_rings(struct bxe_softc *sc)
+{
+#if __FreeBSD_version >= 800000
+
+ int i;
+ struct bxe_fastpath *fp;
+
+ for (i = 0; i < sc->num_queues; i++) {
+
+ fp = &sc->fp[i];
+
+ fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
+ M_NOWAIT, &fp->tx_mtx);
+ if (fp->tx_br == NULL)
+ return (-1);
+ }
+#endif
+ return (0);
+}
+
+static void
+bxe_free_buf_rings(struct bxe_softc *sc)
+{
+#if __FreeBSD_version >= 800000
+
+ int i;
+ struct bxe_fastpath *fp;
+
+ for (i = 0; i < sc->num_queues; i++) {
+
+ fp = &sc->fp[i];
+
+ if (fp->tx_br) {
+ buf_ring_free(fp->tx_br, M_DEVBUF);
+ fp->tx_br = NULL;
+ }
+ }
+
+#endif
+}
+
+static void
+bxe_init_fp_mutexs(struct bxe_softc *sc)
+{
+ int i;
+ struct bxe_fastpath *fp;
+
+ for (i = 0; i < sc->num_queues; i++) {
+
+ fp = &sc->fp[i];
+
+ snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
+ "bxe%d_fp%d_tx_lock", sc->unit, i);
+ mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
+
+ snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
+ "bxe%d_fp%d_rx_lock", sc->unit, i);
+ mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
+ }
+}
+
+static void
+bxe_destroy_fp_mutexs(struct bxe_softc *sc)
+{
+ int i;
+ struct bxe_fastpath *fp;
+
+ for (i = 0; i < sc->num_queues; i++) {
+
+ fp = &sc->fp[i];
+
+ if (mtx_initialized(&fp->tx_mtx)) {
+ mtx_destroy(&fp->tx_mtx);
+ }
+
+ if (mtx_initialized(&fp->rx_mtx)) {
+ mtx_destroy(&fp->rx_mtx);
+ }
+ }
+}
+
+
/*
* Device attach function.
*
@@ -15931,8 +16000,25 @@ bxe_attach(device_t dev)
return (ENXIO);
}
+ bxe_init_fp_mutexs(sc);
+
+ if (bxe_alloc_buf_rings(sc) != 0) {
+ bxe_free_buf_rings(sc);
+ bxe_interrupt_free(sc);
+ bxe_del_cdev(sc);
+ if (sc->ifnet != NULL) {
+ ether_ifdetach(sc->ifnet);
+ }
+ ifmedia_removeall(&sc->ifmedia);
+ bxe_release_mutexes(sc);
+ bxe_deallocate_bars(sc);
+ pci_disable_busmaster(dev);
+ return (ENXIO);
+ }
+
/* allocate ilt */
if (bxe_alloc_ilt_mem(sc) != 0) {
+ bxe_free_buf_rings(sc);
bxe_interrupt_free(sc);
bxe_del_cdev(sc);
if (sc->ifnet != NULL) {
@@ -15948,6 +16034,7 @@ bxe_attach(device_t dev)
/* allocate the host hardware/software hsi structures */
if (bxe_alloc_hsi_mem(sc) != 0) {
bxe_free_ilt_mem(sc);
+ bxe_free_buf_rings(sc);
bxe_interrupt_free(sc);
bxe_del_cdev(sc);
if (sc->ifnet != NULL) {
@@ -16055,12 +16142,16 @@ bxe_detach(device_t dev)
/* free ilt */
bxe_free_ilt_mem(sc);
+ bxe_free_buf_rings(sc);
+
/* release the interrupts */
bxe_interrupt_free(sc);
/* Release the mutexes*/
+ bxe_destroy_fp_mutexs(sc);
bxe_release_mutexes(sc);
+
/* Release the PCIe BAR mapped memory */
bxe_deallocate_bars(sc);
diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c
index 7b73a2d..a633f72 100644
--- a/sys/i386/i386/vm_machdep.c
+++ b/sys/i386/i386/vm_machdep.c
@@ -159,8 +159,8 @@ get_pcb_user_save_td(struct thread *td)
{
vm_offset_t p;
p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
- cpu_max_ext_state_size;
- KASSERT((p % 64) == 0, ("Unaligned pcb_user_save area"));
+ roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN);
+ KASSERT((p % XSAVE_AREA_ALIGN) == 0, ("Unaligned pcb_user_save area"));
return ((union savefpu *)p);
}
@@ -179,7 +179,8 @@ get_pcb_td(struct thread *td)
vm_offset_t p;
p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
- cpu_max_ext_state_size - sizeof(struct pcb);
+ roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN) -
+ sizeof(struct pcb);
return ((struct pcb *)p);
}
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index c3a2226..a3402f8 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -1114,9 +1114,9 @@ callout_schedule(struct callout *c, int to_ticks)
}
int
-_callout_stop_safe(c, safe)
+_callout_stop_safe(c, flags)
struct callout *c;
- int safe;
+ int flags;
{
struct callout_cpu *cc, *old_cc;
struct lock_class *class;
@@ -1127,7 +1127,7 @@ _callout_stop_safe(c, safe)
* Some old subsystems don't hold Giant while running a callout_stop(),
* so just discard this check for the moment.
*/
- if (!safe && c->c_lock != NULL) {
+ if ((flags & CS_DRAIN) == 0 && c->c_lock != NULL) {
if (c->c_lock == &Giant.lock_object)
use_lock = mtx_owned(&Giant);
else {
@@ -1207,7 +1207,7 @@ again:
return (0);
}
- if (safe) {
+ if ((flags & CS_DRAIN) != 0) {
/*
* The current callout is running (or just
* about to run) and blocking is allowed, so
@@ -1319,7 +1319,7 @@ again:
CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
c, c->c_func, c->c_arg);
CC_UNLOCK(cc);
- return (0);
+ return ((flags & CS_MIGRBLOCK) != 0);
}
CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
c, c->c_func, c->c_arg);
diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c
index d4ae25f..c490460 100644
--- a/sys/kern/subr_sleepqueue.c
+++ b/sys/kern/subr_sleepqueue.c
@@ -572,7 +572,8 @@ sleepq_check_timeout(void)
* another CPU, so synchronize with it to avoid having it
* accidentally wake up a subsequent sleep.
*/
- else if (callout_stop(&td->td_slpcallout) == 0) {
+ else if (_callout_stop_safe(&td->td_slpcallout, CS_MIGRBLOCK)
+ == 0) {
td->td_flags |= TDF_TIMEOUT;
TD_SET_SLEEPING(td);
mi_switch(SW_INVOL | SWT_SLEEPQTIMO, NULL);
diff --git a/sys/sys/callout.h b/sys/sys/callout.h
index 6e18ae7..d3f2bca 100644
--- a/sys/sys/callout.h
+++ b/sys/sys/callout.h
@@ -62,6 +62,12 @@ struct callout_handle {
struct callout *callout;
};
+/* Flags for callout_stop_safe() */
+#define CS_DRAIN 0x0001 /* callout_drain(), wait allowed */
+#define CS_MIGRBLOCK 0x0002 /* Block migration, return value
+ indicates that the callout was
+ executing */
+
#ifdef _KERNEL
/*
* Note the flags field is actually *two* fields. The c_flags
@@ -81,7 +87,7 @@ struct callout_handle {
*/
#define callout_active(c) ((c)->c_flags & CALLOUT_ACTIVE)
#define callout_deactivate(c) ((c)->c_flags &= ~CALLOUT_ACTIVE)
-#define callout_drain(c) _callout_stop_safe(c, 1)
+#define callout_drain(c) _callout_stop_safe(c, CS_DRAIN)
void callout_init(struct callout *, int);
void _callout_init_lock(struct callout *, struct lock_object *, int);
#define callout_init_mtx(c, mtx, flags) \
OpenPOWER on IntegriCloud