summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorwpaul <wpaul@FreeBSD.org>2004-04-14 07:48:03 +0000
committerwpaul <wpaul@FreeBSD.org>2004-04-14 07:48:03 +0000
commit9765d24df650a8593a1d8dbd170e1f17b4bcb60f (patch)
tree2b4bcda838ff86f31726acf476842f624cb8bd4f
parent6ad9bc9a7779734ead6ac09fa9f5ef748368d800 (diff)
downloadFreeBSD-src-9765d24df650a8593a1d8dbd170e1f17b4bcb60f.zip
FreeBSD-src-9765d24df650a8593a1d8dbd170e1f17b4bcb60f.tar.gz
Continue my efforts to imitate Windows as closely as possible by
attempting to duplicate Windows spinlocks. Windows spinlocks differ from FreeBSD spinlocks in the way they block preemption. FreeBSD spinlocks use critical_enter(), which masks off _all_ interrupts. This prevents any other threads from being scheduled, but it also prevents ISRs from running. In Windows, preemption is achieved by raising the processor IRQL to DISPATCH_LEVEL, which prevents other threads from preempting you, but does _not_ prevent device ISRs from running. (This is essentially what Solaris calls dispatcher locks.) The Windows spinlock itself (kspin_lock) is just an integer value which is atomically set when you acquire the lock and atomically cleared when you release it. FreeBSD doesn't have IRQ levels, so we have to cheat a little by using thread priorities: normal thread priority is PASSIVE_LEVEL, lowest interrupt thread priority is DISPATCH_LEVEL, highest thread priority is DEVICE_LEVEL (PI_REALTIME) and critical_enter() is HIGH_LEVEL. In practice, only PASSIVE_LEVEL and DISPATCH_LEVEL matter to us. The immediate benefit of all this is that I no longer have to rely on a mutex pool. Now, I'm sure many people will be seized by the urge to criticize me for doing an end run around our own spinlock implementation, but it makes more sense to do it this way. Well, it does to me anyway. Overview of the changes: - Properly implement hal_lock(), hal_unlock(), hal_irql(), hal_raise_irql() and hal_lower_irql() so that they more closely resemble their Windows counterparts. The IRQL is determined by thread priority. - Make ntoskrnl_lock_dpc() and ntoskrnl_unlock_dpc() do what they do in Windows, which is to atomically set/clear the lock value. These routines are designed to be called from DISPATCH_LEVEL, and are actually half of the work involved in acquiring/releasing spinlocks. - Add FASTCALL1(), FASTCALL2() and FASTCALL3() macros/wrappers that allow us to call a _fastcall function in spite of the fact that our version of gcc doesn't support __attribute__((__fastcall__)) yet. The macros take 1, 2 or 3 arguments, respectively. We need to call hal_lock(), hal_unlock() etc... ourselves, but can't really invoke the function directly. I could have just made the underlying functions native routines and put _fastcall wrappers around them for the benefit of Windows binaries, but that would create needless bloat. - Remove ndis_mtxpool and all references to it. We don't need it anymore. - Re-implement the NdisSpinLock routines so that they use hal_lock() and friends like they do in Windows. - Use the new spinlock methods for handling lookaside lists and linked list updates in place of the mutex locks that were there before. - Remove mutex locking from ndis_isr() and ndis_intrhand() since they're already called with ndis_intrmtx held in if_ndis.c. - Put ndis_destroy_lock() code under explicit #ifdef notdef/#endif. It turns out there are some drivers which stupidly free the memory in which their spinlocks reside before calling ndis_destroy_lock() on them (touch-after-free bug). The ADMtek wireless driver is guilty of this faux pas. (Why this doesn't clobber Windows I have no idea.) - Make NdisDprAcquireSpinLock() and NdisDprReleaseSpinLock() into real functions instead of aliasing them to NdisAcaquireSpinLock() and NdisReleaseSpinLock(). The Dpr routines use KeAcquireSpinLockAtDpcLevel() level and KeReleaseSpinLockFromDpcLevel(), which acquires the lock without twiddling the IRQL. - In ndis_linksts_done(), do _not_ call ndis_80211_getstate(). Some drivers may call the status/status done callbacks as the result of setting an OID: ndis_80211_getstate() gets OIDs, which means we might cause the driver to recursively access some of its internal structures unexpectedly. The ndis_ticktask() routine will call ndis_80211_getstate() for us eventually anyway. - Fix the channel setting code a little in ndis_80211_setstate(), and initialize the channel to IEEE80211_CHAN_ANYC. (The Microsoft spec says you're not supposed to twiddle the channel in BSS mode; I may need to enforce this later.) This fixes the problems I was having with the ADMtek adm8211 driver: we were setting the channel to a non-standard default, which would cause it to fail to associate in BSS mode. - Use hal_raise_irql() to raise our IRQL to DISPATCH_LEVEL when calling certain miniport routines, per the Microsoft documentation. I think that's everything. Hopefully, other than fixing the ADMtek driver, there should be no apparent change in behavior.
-rw-r--r--sys/compat/ndis/hal_var.h8
-rw-r--r--sys/compat/ndis/kern_ndis.c113
-rw-r--r--sys/compat/ndis/ndis_var.h5
-rw-r--r--sys/compat/ndis/ntoskrnl_var.h39
-rw-r--r--sys/compat/ndis/pe_var.h45
-rw-r--r--sys/compat/ndis/subr_hal.c137
-rw-r--r--sys/compat/ndis/subr_ndis.c89
-rw-r--r--sys/compat/ndis/subr_ntoskrnl.c141
-rw-r--r--sys/dev/if_ndis/if_ndis.c86
-rw-r--r--sys/dev/if_ndis/if_ndisvar.h11
10 files changed, 468 insertions, 206 deletions
diff --git a/sys/compat/ndis/hal_var.h b/sys/compat/ndis/hal_var.h
index a8be257..64a209f 100644
--- a/sys/compat/ndis/hal_var.h
+++ b/sys/compat/ndis/hal_var.h
@@ -45,4 +45,12 @@
extern image_patch_table hal_functbl[];
+__BEGIN_DECLS
+__stdcall extern uint8_t hal_lock(/*kspin_lock * */void);
+__stdcall extern void hal_unlock(/*kspin_lock *, uint8_t*/void);
+__stdcall extern uint8_t hal_raise_irql(/*uint8_t*/ void);
+__stdcall extern void hal_lower_irql(/*uint8_t*/ void);
+__stdcall extern uint8_t hal_irql(void);
+__END_DECLS
+
#endif /* _HAL_VAR_H_ */
diff --git a/sys/compat/ndis/kern_ndis.c b/sys/compat/ndis/kern_ndis.c
index cd928dc..733e0d3 100644
--- a/sys/compat/ndis/kern_ndis.c
+++ b/sys/compat/ndis/kern_ndis.c
@@ -106,9 +106,8 @@ static int ndis_enlarge_thrqueue(int);
static int ndis_shrink_thrqueue(int);
static void ndis_runq(void *);
-extern struct mtx_pool *ndis_mtxpool;
static uma_zone_t ndis_packet_zone, ndis_buffer_zone;
-struct mtx *ndis_thr_mtx;
+struct mtx ndis_thr_mtx;
static STAILQ_HEAD(ndisqhead, ndis_req) ndis_ttodo;
struct ndisqhead ndis_itodo;
struct ndisqhead ndis_free;
@@ -207,23 +206,25 @@ ndis_runq(arg)
p = arg;
while (1) {
- kthread_suspend(p->np_p, 0);
+
+ /* Sleep, but preserve our original priority. */
+ ndis_thsuspend(p->np_p, 0);
/* Look for any jobs on the work queue. */
- mtx_pool_lock(ndis_mtxpool, ndis_thr_mtx);
+ mtx_lock(&ndis_thr_mtx);
p->np_state = NDIS_PSTATE_RUNNING;
while(STAILQ_FIRST(p->np_q) != NULL) {
r = STAILQ_FIRST(p->np_q);
STAILQ_REMOVE_HEAD(p->np_q, link);
- mtx_pool_unlock(ndis_mtxpool, ndis_thr_mtx);
+ mtx_unlock(&ndis_thr_mtx);
/* Do the work. */
if (r->nr_func != NULL)
(*r->nr_func)(r->nr_arg);
- mtx_pool_lock(ndis_mtxpool, ndis_thr_mtx);
+ mtx_lock(&ndis_thr_mtx);
STAILQ_INSERT_HEAD(&ndis_free, r, link);
/* Check for a shutdown request */
@@ -232,7 +233,7 @@ ndis_runq(arg)
die = r;
}
p->np_state = NDIS_PSTATE_SLEEPING;
- mtx_pool_unlock(ndis_mtxpool, ndis_thr_mtx);
+ mtx_unlock(&ndis_thr_mtx);
/* Bail if we were told to shut down. */
@@ -242,6 +243,7 @@ ndis_runq(arg)
wakeup(die);
kthread_exit(0);
+ return; /* notreached */
}
static int
@@ -250,7 +252,9 @@ ndis_create_kthreads()
struct ndis_req *r;
int i, error = 0;
- ndis_thr_mtx = mtx_pool_alloc(ndis_mtxpool);
+ mtx_init(&ndis_thr_mtx, "NDIS thread lock",
+ MTX_NDIS_LOCK, MTX_DEF);
+
STAILQ_INIT(&ndis_ttodo);
STAILQ_INIT(&ndis_itodo);
STAILQ_INIT(&ndis_free);
@@ -308,6 +312,8 @@ ndis_destroy_kthreads()
free(r, M_DEVBUF);
}
+ mtx_destroy(&ndis_thr_mtx);
+
return;
}
@@ -329,16 +335,16 @@ ndis_stop_thread(t)
/* Create and post a special 'exit' job. */
- mtx_pool_lock(ndis_mtxpool, ndis_thr_mtx);
+ mtx_lock(&ndis_thr_mtx);
r = STAILQ_FIRST(&ndis_free);
STAILQ_REMOVE_HEAD(&ndis_free, link);
r->nr_func = NULL;
r->nr_arg = NULL;
r->nr_exit = TRUE;
STAILQ_INSERT_TAIL(q, r, link);
- mtx_pool_unlock(ndis_mtxpool, ndis_thr_mtx);
+ mtx_unlock(&ndis_thr_mtx);
- kthread_resume(p);
+ ndis_thresume(p);
/* wait for thread exit */
@@ -346,12 +352,12 @@ ndis_stop_thread(t)
/* Now empty the job list. */
- mtx_pool_lock(ndis_mtxpool, ndis_thr_mtx);
+ mtx_lock(&ndis_thr_mtx);
while ((r = STAILQ_FIRST(q)) != NULL) {
STAILQ_REMOVE_HEAD(q, link);
STAILQ_INSERT_HEAD(&ndis_free, r, link);
}
- mtx_pool_unlock(ndis_mtxpool, ndis_thr_mtx);
+ mtx_unlock(&ndis_thr_mtx);
return;
}
@@ -367,10 +373,10 @@ ndis_enlarge_thrqueue(cnt)
r = malloc(sizeof(struct ndis_req), M_DEVBUF, M_WAITOK);
if (r == NULL)
return(ENOMEM);
- mtx_pool_lock(ndis_mtxpool, ndis_thr_mtx);
+ mtx_lock(&ndis_thr_mtx);
STAILQ_INSERT_HEAD(&ndis_free, r, link);
ndis_jobs++;
- mtx_pool_unlock(ndis_mtxpool, ndis_thr_mtx);
+ mtx_unlock(&ndis_thr_mtx);
}
return(0);
@@ -384,15 +390,15 @@ ndis_shrink_thrqueue(cnt)
int i;
for (i = 0; i < cnt; i++) {
- mtx_pool_lock(ndis_mtxpool, ndis_thr_mtx);
+ mtx_lock(&ndis_thr_mtx);
r = STAILQ_FIRST(&ndis_free);
if (r == NULL) {
- mtx_pool_unlock(ndis_mtxpool, ndis_thr_mtx);
+ mtx_unlock(&ndis_thr_mtx);
return(ENOMEM);
}
STAILQ_REMOVE_HEAD(&ndis_free, link);
ndis_jobs--;
- mtx_pool_unlock(ndis_mtxpool, ndis_thr_mtx);
+ mtx_unlock(&ndis_thr_mtx);
free(r, M_DEVBUF);
}
@@ -417,17 +423,17 @@ ndis_unsched(func, arg, t)
p = ndis_iproc.np_p;
}
- mtx_pool_lock(ndis_mtxpool, ndis_thr_mtx);
+ mtx_lock(&ndis_thr_mtx);
STAILQ_FOREACH(r, q, link) {
if (r->nr_func == func && r->nr_arg == arg) {
STAILQ_REMOVE(q, r, ndis_req, link);
STAILQ_INSERT_HEAD(&ndis_free, r, link);
- mtx_pool_unlock(ndis_mtxpool, ndis_thr_mtx);
+ mtx_unlock(&ndis_thr_mtx);
return(0);
}
}
- mtx_pool_unlock(ndis_mtxpool, ndis_thr_mtx);
+ mtx_unlock(&ndis_thr_mtx);
return(ENOENT);
}
@@ -451,20 +457,20 @@ ndis_sched(func, arg, t)
p = ndis_iproc.np_p;
}
- mtx_pool_lock(ndis_mtxpool, ndis_thr_mtx);
+ mtx_lock(&ndis_thr_mtx);
/*
* Check to see if an instance of this job is already
* pending. If so, don't bother queuing it again.
*/
STAILQ_FOREACH(r, q, link) {
if (r->nr_func == func && r->nr_arg == arg) {
- mtx_pool_unlock(ndis_mtxpool, ndis_thr_mtx);
+ mtx_unlock(&ndis_thr_mtx);
return(0);
}
}
r = STAILQ_FIRST(&ndis_free);
if (r == NULL) {
- mtx_pool_unlock(ndis_mtxpool, ndis_thr_mtx);
+ mtx_unlock(&ndis_thr_mtx);
return(EAGAIN);
}
STAILQ_REMOVE_HEAD(&ndis_free, link);
@@ -476,7 +482,7 @@ ndis_sched(func, arg, t)
s = ndis_tproc.np_state;
else
s = ndis_iproc.np_state;
- mtx_pool_unlock(ndis_mtxpool, ndis_thr_mtx);
+ mtx_unlock(&ndis_thr_mtx);
/*
* Post the job, but only if the thread is actually blocked
@@ -486,11 +492,32 @@ ndis_sched(func, arg, t)
* it up until KeWaitForObject() gets woken up on its own.
*/
if (s == NDIS_PSTATE_SLEEPING)
- kthread_resume(p);
+ ndis_thresume(p);
return(0);
}
+int
+ndis_thsuspend(p, timo)
+ struct proc *p;
+ int timo;
+{
+ int error;
+
+ PROC_LOCK(p);
+ error = msleep(&p->p_siglist, &p->p_mtx,
+ curthread->td_priority|PDROP, "ndissp", timo);
+ return(error);
+}
+
+void
+ndis_thresume(p)
+ struct proc *p;
+{
+ wakeup(&p->p_siglist);
+ return;
+}
+
__stdcall static void
ndis_sendrsrcavail_func(adapter)
ndis_handle adapter;
@@ -706,6 +733,7 @@ ndis_return_packet(buf, arg)
ndis_handle adapter;
ndis_packet *p;
__stdcall ndis_return_handler returnfunc;
+ uint8_t irql;
if (arg == NULL)
return;
@@ -722,8 +750,11 @@ ndis_return_packet(buf, arg)
sc = p->np_softc;
returnfunc = sc->ndis_chars.nmc_return_packet_func;
adapter = sc->ndis_block.nmb_miniportadapterctx;
- if (adapter != NULL)
+ if (adapter != NULL) {
+ irql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL);
returnfunc(adapter, p);
+ FASTCALL1(hal_lower_irql, irql);
+ }
return;
}
@@ -1035,6 +1066,7 @@ ndis_set_info(arg, oid, buf, buflen)
__stdcall ndis_setinfo_handler setfunc;
uint32_t byteswritten = 0, bytesneeded = 0;
int error;
+ uint8_t irql;
sc = arg;
NDIS_LOCK(sc);
@@ -1045,13 +1077,16 @@ ndis_set_info(arg, oid, buf, buflen)
if (adapter == NULL || setfunc == NULL)
return(ENXIO);
+ irql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL);
rval = setfunc(adapter, oid, buf, *buflen,
&byteswritten, &bytesneeded);
+ FASTCALL1(hal_lower_irql, irql);
if (rval == NDIS_STATUS_PENDING) {
PROC_LOCK(curthread->td_proc);
error = msleep(&sc->ndis_block.nmb_wkupdpctimer,
- &curthread->td_proc->p_mtx, PPAUSE|PDROP,
+ &curthread->td_proc->p_mtx,
+ curthread->td_priority|PDROP,
"ndisset", 5 * hz);
rval = sc->ndis_block.nmb_setstat;
}
@@ -1091,6 +1126,7 @@ ndis_send_packets(arg, packets, cnt)
__stdcall ndis_senddone_func senddonefunc;
int i;
ndis_packet *p;
+ uint8_t irql;
sc = arg;
adapter = sc->ndis_block.nmb_miniportadapterctx;
@@ -1098,7 +1134,9 @@ ndis_send_packets(arg, packets, cnt)
return(ENXIO);
sendfunc = sc->ndis_chars.nmc_sendmulti_func;
senddonefunc = sc->ndis_block.nmb_senddone_func;
+ irql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL);
sendfunc(adapter, packets, cnt);
+ FASTCALL1(hal_lower_irql, irql);
for (i = 0; i < cnt; i++) {
p = packets[i];
@@ -1126,6 +1164,7 @@ ndis_send_packet(arg, packet)
ndis_status status;
__stdcall ndis_sendsingle_handler sendfunc;
__stdcall ndis_senddone_func senddonefunc;
+ uint8_t irql;
sc = arg;
adapter = sc->ndis_block.nmb_miniportadapterctx;
@@ -1134,7 +1173,9 @@ ndis_send_packet(arg, packet)
sendfunc = sc->ndis_chars.nmc_sendsingle_func;
senddonefunc = sc->ndis_block.nmb_senddone_func;
+ irql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL);
status = sendfunc(adapter, packet, packet->np_private.npp_flags);
+ FASTCALL1(hal_lower_irql, irql);
if (status == NDIS_STATUS_PENDING)
return(0);
@@ -1210,6 +1251,7 @@ ndis_reset_nic(arg)
uint8_t addressing_reset;
struct ifnet *ifp;
int rval;
+ uint8_t irql;
sc = arg;
ifp = &sc->arpcom.ac_if;
@@ -1220,11 +1262,14 @@ ndis_reset_nic(arg)
if (adapter == NULL || resetfunc == NULL)
return(EIO);
+ irql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL);
rval = resetfunc(&addressing_reset, adapter);
+ FASTCALL1(hal_lower_irql, irql);
+
if (rval == NDIS_STATUS_PENDING) {
PROC_LOCK(curthread->td_proc);
msleep(sc, &curthread->td_proc->p_mtx,
- PPAUSE|PDROP, "ndisrst", 0);
+ curthread->td_priority|PDROP, "ndisrst", 0);
}
return(0);
@@ -1346,10 +1391,8 @@ ndis_enable_intr(arg)
__stdcall ndis_enable_interrupts_handler intrenbfunc;
sc = arg;
- NDIS_LOCK(sc);
adapter = sc->ndis_block.nmb_miniportadapterctx;
intrenbfunc = sc->ndis_chars.nmc_enable_interrupts_func;
- NDIS_UNLOCK(sc);
if (adapter == NULL || intrenbfunc == NULL)
return;
intrenbfunc(adapter);
@@ -1392,10 +1435,8 @@ ndis_isr(arg, ourintr, callhandler)
return(EINVAL);
sc = arg;
- NDIS_LOCK(sc);
adapter = sc->ndis_block.nmb_miniportadapterctx;
isrfunc = sc->ndis_chars.nmc_isr_func;
- NDIS_UNLOCK(sc);
if (adapter == NULL || isrfunc == NULL)
return(ENXIO);
@@ -1443,6 +1484,7 @@ ndis_get_info(arg, oid, buf, buflen)
__stdcall ndis_queryinfo_handler queryfunc;
uint32_t byteswritten = 0, bytesneeded = 0;
int error;
+ uint8_t irql;
sc = arg;
NDIS_LOCK(sc);
@@ -1453,15 +1495,18 @@ ndis_get_info(arg, oid, buf, buflen)
if (adapter == NULL || queryfunc == NULL)
return(ENXIO);
+ irql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL);
rval = queryfunc(adapter, oid, buf, *buflen,
&byteswritten, &bytesneeded);
+ FASTCALL1(hal_lower_irql, irql);
/* Wait for requests that block. */
if (rval == NDIS_STATUS_PENDING) {
PROC_LOCK(curthread->td_proc);
error = msleep(&sc->ndis_block.nmb_wkupdpctimer,
- &curthread->td_proc->p_mtx, PPAUSE|PDROP,
+ &curthread->td_proc->p_mtx,
+ curthread->td_priority|PDROP,
"ndisget", 5 * hz);
rval = sc->ndis_block.nmb_getstat;
}
diff --git a/sys/compat/ndis/ndis_var.h b/sys/compat/ndis/ndis_var.h
index cd4d82b..f2c3566 100644
--- a/sys/compat/ndis/ndis_var.h
+++ b/sys/compat/ndis/ndis_var.h
@@ -1152,6 +1152,9 @@ typedef struct ndis_packet ndis_packet;
/* mbuf ext type for NDIS */
#define EXT_NDIS 0x999
+/* mtx type for NDIS */
+#define MTX_NDIS_LOCK "NDIS lock"
+
struct ndis_filterdbs {
union {
void *nf_ethdb;
@@ -1513,6 +1516,8 @@ extern int ndis_add_sysctl(void *, char *, char *, char *, int);
extern int ndis_flush_sysctls(void *);
extern int ndis_sched(void (*)(void *), void *, int);
extern int ndis_unsched(void (*)(void *), void *, int);
+extern int ndis_thsuspend(struct proc *, int);
+extern void ndis_thresume(struct proc *);
__END_DECLS
#endif /* _NDIS_VAR_H_ */
diff --git a/sys/compat/ndis/ntoskrnl_var.h b/sys/compat/ndis/ntoskrnl_var.h
index a6d2d80..1e7b5a1 100644
--- a/sys/compat/ndis/ntoskrnl_var.h
+++ b/sys/compat/ndis/ntoskrnl_var.h
@@ -189,6 +189,7 @@ typedef struct nt_dispatch_header nt_dispatch_header;
#define LOW_LEVEL 0
#define APC_LEVEL 1
#define DISPATCH_LEVEL 2
+#define DEVICE_LEVEL (DISPATCH_LEVEL + 1)
#define PROFILE_LEVEL 27
#define CLOCK1_LEVEL 28
#define CLOCK2_LEVEL 28
@@ -199,6 +200,18 @@ typedef struct nt_dispatch_header nt_dispatch_header;
#define SYNC_LEVEL_UP DISPATCH_LEVEL
#define SYNC_LEVEL_MP (IPI_LEVEL - 1)
+#define AT_PASSIVE_LEVEL(td) \
+ ((td)->td_proc->p_flag & P_KTHREAD == FALSE)
+
+#define AT_DISPATCH_LEVEL(td) \
+ ((td)->td_priority == PI_SOFT)
+
+#define AT_DIRQL_LEVEL(td) \
+ ((td)->td_priority < PRI_MIN_KERN)
+
+#define AT_HIGH_LEVEL(td) \
+ ((td)->td_critnest != 0)
+
struct nt_objref {
nt_dispatch_header no_dh;
void *no_obj;
@@ -474,7 +487,6 @@ typedef uint32_t (*driver_dispatch)(device_object *, irp *);
#define NDIS_KSTACK_PAGES 8
extern image_patch_table ntoskrnl_functbl[];
-extern struct mtx *ntoskrnl_dispatchlock;
__BEGIN_DECLS
extern int ntoskrnl_libinit(void);
@@ -489,13 +501,26 @@ __stdcall extern uint8_t ntoskrnl_set_timer_ex(ktimer *, int64_t,
uint32_t, kdpc *);
__stdcall extern uint8_t ntoskrnl_cancel_timer(ktimer *);
__stdcall extern uint8_t ntoskrnl_read_timer(ktimer *);
-__stdcall uint32_t ntoskrnl_waitforobj(nt_dispatch_header *, uint32_t,
+__stdcall extern uint32_t ntoskrnl_waitforobj(nt_dispatch_header *, uint32_t,
uint32_t, uint8_t, int64_t *);
-__stdcall void ntoskrnl_init_event(nt_kevent *, uint32_t, uint8_t);
-__stdcall void ntoskrnl_clear_event(nt_kevent *);
-__stdcall uint32_t ntoskrnl_read_event(nt_kevent *);
-__stdcall uint32_t ntoskrnl_set_event(nt_kevent *, uint32_t, uint8_t);
-__stdcall uint32_t ntoskrnl_reset_event(nt_kevent *);
+__stdcall extern void ntoskrnl_init_event(nt_kevent *, uint32_t, uint8_t);
+__stdcall extern void ntoskrnl_clear_event(nt_kevent *);
+__stdcall extern uint32_t ntoskrnl_read_event(nt_kevent *);
+__stdcall extern uint32_t ntoskrnl_set_event(nt_kevent *, uint32_t, uint8_t);
+__stdcall extern uint32_t ntoskrnl_reset_event(nt_kevent *);
+__stdcall extern void ntoskrnl_lock_dpc(/*kspin_lock * */ void);
+__stdcall extern void ntoskrnl_unlock_dpc(/*kspin_lock * */ void);
+
+/*
+ * On the Windows x86 arch, KeAcquireSpinLock() and KeReleaseSpinLock()
+ * routines live in the HAL. We try to imitate this behavior.
+ */
+#ifdef __i386__
+#define ntoskrnl_acquire_spinlock(a, b) \
+ *(b) = FASTCALL(hal_lock, a, 0)
+#define ntoskrnl_release_spinlock(a, b) \
+ FASTCALL(hal_unlock, a, b)
+#endif /* __i386__ */
__END_DECLS
#endif /* _NTOSKRNL_VAR_H_ */
diff --git a/sys/compat/ndis/pe_var.h b/sys/compat/ndis/pe_var.h
index d267a8c..e156f0f 100644
--- a/sys/compat/ndis/pe_var.h
+++ b/sys/compat/ndis/pe_var.h
@@ -422,6 +422,51 @@ typedef struct image_patch_table image_patch_table;
#define __stdcall __attribute__((__stdcall__))
#endif
+
+/*
+ * This mess allows us to call a _fastcall style routine with our
+ * version of gcc, which lacks __attribute__((__fastcall__)). Only
+ * has meaning on x86; everywhere else, it's a no-op.
+ */
+
+#ifdef __i386__
+typedef __stdcall int (*fcall)(void);
+typedef __stdcall int (*fcall2)(int);
+static __inline uint32_t
+fastcall1(fcall f, uint32_t a)
+{
+ __asm__ __volatile__ ("movl %0,%%ecx" : : "r" (a));
+ return(f());
+}
+
+static __inline uint32_t
+fastcall2(fcall f, uint32_t a, uint32_t b)
+{
+ __asm__ __volatile__ ("movl %0,%%ecx" : : "r" (a));
+ __asm__ __volatile__ ("movl %0,%%edx" : : "r" (b));
+ return(f());
+}
+
+static __inline uint32_t
+fastcall3(fcall2 f, uint32_t a, uint32_t b, uint32_t c)
+{
+ __asm__ __volatile__ ("movl %0,%%ecx" : : "r" (a));
+ __asm__ __volatile__ ("movl %0,%%edx" : : "r" (b));
+ return(f(c));
+}
+
+#define FASTCALL1(f, a) \
+ fastcall1((fcall)(f), (uint32_t)(a))
+#define FASTCALL2(f, a, b) \
+ fastcall2((fcall)(f), (uint32_t)(a), (uint32_t)(b))
+#define FASTCALL3(f, a, b, c) \
+ fastcall3((fcall2)(f), (uint32_t)(a), (uint32_t)(b), (uint32_t)(c))
+#else
+#define FASTCALL1(f, a) (f)((a))
+#define FASTCALL2(f, a, b) (f)((a), (b))
+#define FASTCALL3(f, a, b, c) (f)((a), (b), (c))
+#endif /* __i386__ */
+
__BEGIN_DECLS
extern int pe_get_dos_header(vm_offset_t, image_dos_header *);
extern int pe_is_nt_image(vm_offset_t);
diff --git a/sys/compat/ndis/subr_hal.c b/sys/compat/ndis/subr_hal.c
index 972508f..beae7c1 100644
--- a/sys/compat/ndis/subr_hal.c
+++ b/sys/compat/ndis/subr_hal.c
@@ -41,6 +41,8 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
#include <sys/systm.h>
#include <machine/clock.h>
@@ -76,12 +78,7 @@ __stdcall static void hal_readport_buf_ushort(uint16_t *,
uint16_t *, uint32_t);
__stdcall static void hal_readport_buf_uchar(uint8_t *,
uint8_t *, uint32_t);
-__stdcall static uint8_t hal_lock(/*kspin_lock * */void);
-__stdcall static void hal_unlock(/*kspin_lock *, uint8_t*/void);
-__stdcall static uint8_t hal_irql(void);
__stdcall static uint64_t hal_perfcount(uint64_t *);
-__stdcall static uint8_t hal_raise_irql(/*uint8_t*/ void);
-__stdcall static void hal_lower_irql(/*uint8_t*/ void);
__stdcall static void dummy (void);
extern struct mtx_pool *ndis_mtxpool;
@@ -208,33 +205,94 @@ hal_readport_buf_uchar(port, val, cnt)
return;
}
-__stdcall static uint8_t
+/*
+ * The spinlock implementation in Windows differs from that of FreeBSD.
+ * The basic operation of spinlocks involves two steps: 1) spin in a
+ * tight loop while trying to acquire a lock, 2) after obtaining the
+ * lock, disable preemption. (Note that on uniprocessor systems, you're
+ * allowed to skip the first step and just lock out pre-emption, since
+ * it's not possible for you to be in contention with another running
+ * thread.) Later, you release the lock then re-enable preemption.
+ * The difference between Windows and FreeBSD lies in how preemption
+ * is disabled. In FreeBSD, it's done using critical_enter(), which on
+ * the x86 arch translates to a cli instruction. This masks off all
+ * interrupts, and effectively stops the scheduler from ever running
+ * so _nothing_ can execute except the current thread. In Windows,
+ * preemption is disabled by raising the processor IRQL to DISPATCH_LEVEL.
+ * This stops other threads from running, but does _not_ block device
+ * interrupts. This means ISRs can still run, and they can make other
+ * threads runable, but those other threads won't be able to execute
+ * until the current thread lowers the IRQL to something less than
+ * DISPATCH_LEVEL.
+ *
+ * In FreeBSD, ISRs run in interrupt threads, so to duplicate the
+ * Windows notion of IRQLs, we use the following rules:
+ *
+ * PASSIVE_LEVEL == normal kernel thread priority
+ * DISPATCH_LEVEL == lowest interrupt thread priotity (PI_SOFT)
+ * DEVICE_LEVEL == highest interrupt thread priority (PI_REALTIME)
+ * HIGH_LEVEL == interrupts disabled (critical_enter())
+ *
+ * Be aware that, at least on the x86 arch, the Windows spinlock
+ * functions are divided up in peculiar ways. The actual spinlock
+ * functions are KfAcquireSpinLock() and KfReleaseSpinLock(), and
+ * they live in HAL.dll. Meanwhile, KeInitializeSpinLock(),
+ * KefAcquireSpinLockAtDpcLevel() and KefReleaseSpinLockFromDpcLevel()
+ * live in ntoskrnl.exe. Most Windows source code will call
+ * KeAcquireSpinLock() and KeReleaseSpinLock(), but these are just
+ * macros that call KfAcquireSpinLock() and KfReleaseSpinLock().
+ * KefAcquireSpinLockAtDpcLevel() and KefReleaseSpinLockFromDpcLevel()
+ * perform the lock aquisition/release functions without doing the
+ * IRQL manipulation, and are used when one is already running at
+ * DISPATCH_LEVEL. Make sense? Good.
+ *
+ * According to the Microsoft documentation, any thread that calls
+ * KeAcquireSpinLock() must be running at IRQL <= DISPATCH_LEVEL. If
+ * we detect someone trying to acquire a spinlock from DEVICE_LEVEL
+ * or HIGH_LEVEL, we panic.
+ */
+
+__stdcall uint8_t
hal_lock(/*lock*/void)
{
kspin_lock *lock;
+ uint8_t oldirql;
__asm__ __volatile__ ("" : "=c" (lock));
- mtx_pool_lock(ndis_mtxpool, (struct mtx *)*lock);
- return(0);
+ /* I am so going to hell for this. */
+ if (hal_irql() > DISPATCH_LEVEL)
+ panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
+
+ oldirql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL);
+ FASTCALL1(ntoskrnl_lock_dpc, lock);
+ return(oldirql);
}
-__stdcall static void
+__stdcall void
hal_unlock(/*lock, newirql*/void)
{
kspin_lock *lock;
- uint8_t newiqrl;
+ uint8_t newirql;
+
+ __asm__ __volatile__ ("" : "=c" (lock), "=d" (newirql));
- __asm__ __volatile__ ("" : "=c" (lock), "=d" (newiqrl));
+ FASTCALL1(ntoskrnl_unlock_dpc, lock);
+ FASTCALL1(hal_lower_irql, newirql);
- mtx_pool_unlock(ndis_mtxpool, (struct mtx *)*lock);
return;
}
-__stdcall static uint8_t
+__stdcall uint8_t
hal_irql(void)
{
- return(DISPATCH_LEVEL);
+ if (AT_DISPATCH_LEVEL(curthread))
+ return(DISPATCH_LEVEL);
+ if (AT_DIRQL_LEVEL(curthread))
+ return(DEVICE_LEVEL);
+ if (AT_HIGH_LEVEL(curthread))
+ return(HIGH_LEVEL);
+ return(PASSIVE_LEVEL);
}
__stdcall static uint64_t
@@ -247,22 +305,63 @@ hal_perfcount(freq)
return((uint64_t)ticks);
}
-__stdcall static uint8_t
+__stdcall uint8_t
hal_raise_irql(/*irql*/ void)
{
uint8_t irql;
+ uint8_t oldirql;
__asm__ __volatile__ ("" : "=c" (irql));
- return(0);
+ switch(irql) {
+ case HIGH_LEVEL:
+ oldirql = hal_irql();
+ critical_enter();
+ break;
+ case DEVICE_LEVEL:
+ mtx_lock_spin(&sched_lock);
+ oldirql = curthread->td_priority;
+ sched_prio(curthread, PI_REALTIME);
+ mtx_unlock_spin(&sched_lock);
+ break;
+ case DISPATCH_LEVEL:
+ mtx_lock_spin(&sched_lock);
+ oldirql = curthread->td_priority;
+ sched_prio(curthread, PI_SOFT);
+ mtx_unlock_spin(&sched_lock);
+ break;
+ default:
+ panic("can't raise IRQL to unknown level %d", irql);
+ break;
+ }
+
+ return(oldirql);
}
-__stdcall static void
-hal_lower_irql(/*irql*/ void)
+__stdcall void
+hal_lower_irql(/*oldirql*/ void)
{
+ uint8_t oldirql;
uint8_t irql;
- __asm__ __volatile__ ("" : "=c" (irql));
+ __asm__ __volatile__ ("" : "=c" (oldirql));
+
+ irql = hal_irql();
+
+ switch (irql) {
+ case HIGH_LEVEL:
+ critical_exit();
+ break;
+ case DEVICE_LEVEL:
+ case DISPATCH_LEVEL:
+ mtx_lock_spin(&sched_lock);
+ sched_prio(curthread, oldirql);
+ mtx_unlock_spin(&sched_lock);
+ break;
+ default:
+ panic("can't lower IRQL to unknown level %d", irql);
+ break;
+ }
return;
}
diff --git a/sys/compat/ndis/subr_ndis.c b/sys/compat/ndis/subr_ndis.c
index 38bc6b8..24320b0 100644
--- a/sys/compat/ndis/subr_ndis.c
+++ b/sys/compat/ndis/subr_ndis.c
@@ -97,15 +97,14 @@ __FBSDID("$FreeBSD$");
#include <compat/ndis/pe_var.h>
#include <compat/ndis/resource_var.h>
#include <compat/ndis/ntoskrnl_var.h>
+#include <compat/ndis/hal_var.h>
#include <compat/ndis/ndis_var.h>
#include <compat/ndis/cfg_var.h>
#include <dev/if_ndis/if_ndisvar.h>
#define FUNC void(*)(void)
-static struct mtx *ndis_interlock;
static char ndis_filepath[MAXPATHLEN];
-struct mtx_pool *ndis_mtxpool;
extern struct nd_head ndis_devhead;
SYSCTL_STRING(_hw, OID_AUTO, ndis_filepath, CTLFLAG_RW, ndis_filepath,
@@ -139,6 +138,8 @@ __stdcall static void ndis_create_lock(ndis_spin_lock *);
__stdcall static void ndis_destroy_lock(ndis_spin_lock *);
__stdcall static void ndis_lock(ndis_spin_lock *);
__stdcall static void ndis_unlock(ndis_spin_lock *);
+__stdcall static void ndis_lock_dpr(ndis_spin_lock *);
+__stdcall static void ndis_unlock_dpr(ndis_spin_lock *);
__stdcall static uint32_t ndis_read_pci(ndis_handle, uint32_t,
uint32_t, void *, uint32_t);
__stdcall static uint32_t ndis_write_pci(ndis_handle, uint32_t,
@@ -296,16 +297,12 @@ int
ndis_libinit()
{
strcpy(ndis_filepath, "/compat/ndis");
- ndis_mtxpool = mtx_pool_create("ndis mutex pool",
- 1024, MTX_DEF | MTX_RECURSE | MTX_DUPOK);;
- ndis_interlock = mtx_pool_alloc(ndis_mtxpool);
return(0);
}
int
ndis_libfini()
{
- mtx_pool_destroy(&ndis_mtxpool);
return(0);
}
@@ -712,35 +709,82 @@ ndis_close_cfg(cfg)
return;
}
+/*
+ * Initialize a Windows spinlock.
+ */
__stdcall static void
ndis_create_lock(lock)
ndis_spin_lock *lock;
{
- lock->nsl_spinlock = (ndis_kspin_lock)mtx_pool_alloc(ndis_mtxpool);
+ lock->nsl_spinlock = 0;
+ lock->nsl_kirql = 0;
+
return;
}
+/*
+ * Destroy a Windows spinlock. This is a no-op for now. There are two reasons
+ * for this. One is that it's sort of superfluous: we don't have to do anything
+ * special to deallocate the spinlock. The other is that there are some buggy
+ * drivers which call NdisFreeSpinLock() _after_ calling NdisFreeMemory() on
+ * the block of memory in which the spinlock resides. (Yes, ADMtek, I'm
+ * talking to you.)
+ */
__stdcall static void
ndis_destroy_lock(lock)
ndis_spin_lock *lock;
{
- /* We use a mutex pool, so this is a no-op. */
+#ifdef notdef
+ lock->nsl_spinlock = 0;
+ lock->nsl_kirql = 0;
+#endif
return;
}
+/*
+ * Acquire a spinlock from IRQL <= DISPATCH_LEVEL.
+ */
+
__stdcall static void
ndis_lock(lock)
ndis_spin_lock *lock;
{
- mtx_pool_lock(ndis_mtxpool, (struct mtx *)lock->nsl_spinlock);
+ lock->nsl_kirql = FASTCALL2(hal_lock,
+ &lock->nsl_spinlock, DISPATCH_LEVEL);
return;
}
+/*
+ * Release a spinlock from IRQL == DISPATCH_LEVEL.
+ */
+
__stdcall static void
ndis_unlock(lock)
ndis_spin_lock *lock;
{
- mtx_pool_unlock(ndis_mtxpool, (struct mtx *)lock->nsl_spinlock);
+ FASTCALL2(hal_unlock, &lock->nsl_spinlock, lock->nsl_kirql);
+ return;
+}
+
+/*
+ * Acquire a spinlock when already running at IRQL == DISPATCH_LEVEL.
+ */
+__stdcall static void
+ndis_lock_dpr(lock)
+ ndis_spin_lock *lock;
+{
+ FASTCALL1(ntoskrnl_lock_dpc, &lock->nsl_spinlock);
+ return;
+}
+
+/*
+ * Release a spinlock without leaving IRQL == DISPATCH_LEVEL.
+ */
+__stdcall static void
+ndis_unlock_dpr(lock)
+ ndis_spin_lock *lock;
+{
+ FASTCALL1(ntoskrnl_unlock_dpc, &lock->nsl_spinlock);
return;
}
@@ -2196,13 +2240,14 @@ ndis_insert_head(head, entry, lock)
{
list_entry *flink;
- mtx_pool_lock(ndis_mtxpool, (struct mtx *)lock->nsl_spinlock);
+ lock->nsl_kirql = FASTCALL2(hal_lock,
+ &lock->nsl_spinlock, DISPATCH_LEVEL);
flink = head->nle_flink;
entry->nle_flink = flink;
entry->nle_blink = head;
flink->nle_blink = entry;
head->nle_flink = entry;
- mtx_pool_unlock(ndis_mtxpool, (struct mtx *)lock->nsl_spinlock);
+ FASTCALL2(hal_unlock, &lock->nsl_spinlock, lock->nsl_kirql);
return(flink);
}
@@ -2215,12 +2260,13 @@ ndis_remove_head(head, lock)
list_entry *flink;
list_entry *entry;
- mtx_pool_lock(ndis_mtxpool, (struct mtx *)lock->nsl_spinlock);
+ lock->nsl_kirql = FASTCALL2(hal_lock,
+ &lock->nsl_spinlock, DISPATCH_LEVEL);
entry = head->nle_flink;
flink = entry->nle_flink;
head->nle_flink = flink;
flink->nle_blink = head;
- mtx_pool_unlock(ndis_mtxpool, (struct mtx *)lock->nsl_spinlock);
+ FASTCALL2(hal_unlock, &lock->nsl_spinlock, lock->nsl_kirql);
return(entry);
}
@@ -2233,13 +2279,14 @@ ndis_insert_tail(head, entry, lock)
{
list_entry *blink;
- mtx_pool_lock(ndis_mtxpool, (struct mtx *)lock->nsl_spinlock);
+ lock->nsl_kirql = FASTCALL2(hal_lock,
+ &lock->nsl_spinlock, DISPATCH_LEVEL);
blink = head->nle_blink;
entry->nle_flink = head;
entry->nle_blink = blink;
blink->nle_flink = entry;
head->nle_blink = entry;
- mtx_pool_unlock(ndis_mtxpool, (struct mtx *)lock->nsl_spinlock);
+ FASTCALL2(hal_unlock, &lock->nsl_spinlock, lock->nsl_kirql);
return(blink);
}
@@ -2259,9 +2306,9 @@ ndis_sync_with_intr(intr, syncfunc, syncctx)
sc = (struct ndis_softc *)intr->ni_block->nmb_ifp;
sync = syncfunc;
- mtx_pool_lock(ndis_mtxpool, sc->ndis_intrmtx);
+ mtx_lock(&sc->ndis_intrmtx);
rval = sync(syncctx);
- mtx_pool_unlock(ndis_mtxpool, sc->ndis_intrmtx);
+ mtx_unlock(&sc->ndis_intrmtx);
return(rval);
}
@@ -2853,10 +2900,10 @@ image_patch_table ndis_functbl[] = {
{ "NdisCloseConfiguration", (FUNC)ndis_close_cfg },
{ "NdisReadConfiguration", (FUNC)ndis_read_cfg },
{ "NdisOpenConfiguration", (FUNC)ndis_open_cfg },
- { "NdisReleaseSpinLock", (FUNC)ndis_unlock },
- { "NdisDprAcquireSpinLock", (FUNC)ndis_lock },
- { "NdisDprReleaseSpinLock", (FUNC)ndis_unlock },
{ "NdisAcquireSpinLock", (FUNC)ndis_lock },
+ { "NdisReleaseSpinLock", (FUNC)ndis_unlock },
+ { "NdisDprAcquireSpinLock", (FUNC)ndis_lock_dpr },
+ { "NdisDprReleaseSpinLock", (FUNC)ndis_unlock_dpr },
{ "NdisAllocateSpinLock", (FUNC)ndis_create_lock },
{ "NdisFreeSpinLock", (FUNC)ndis_destroy_lock },
{ "NdisFreeMemory", (FUNC)ndis_free },
diff --git a/sys/compat/ndis/subr_ntoskrnl.c b/sys/compat/ndis/subr_ntoskrnl.c
index 8c9646c..7162d59 100644
--- a/sys/compat/ndis/subr_ntoskrnl.c
+++ b/sys/compat/ndis/subr_ntoskrnl.c
@@ -121,8 +121,6 @@ __stdcall static slist_entry *ntoskrnl_push_slist_ex(/*slist_header *,
slist_entry *,*/ kspin_lock *);
__stdcall static slist_entry *ntoskrnl_pop_slist_ex(/*slist_header *,
kspin_lock * */void);
-__stdcall static void ntoskrnl_lock_dpc(/*kspin_lock * */ void);
-__stdcall static void ntoskrnl_unlock_dpc(/*kspin_lock * */ void);
__stdcall static uint32_t
ntoskrnl_interlock_inc(/*volatile uint32_t * */ void);
__stdcall static uint32_t
@@ -167,17 +165,17 @@ static uint32_t ntoskrnl_dbgprint(char *, ...);
__stdcall static void ntoskrnl_debugger(void);
__stdcall static void dummy(void);
-static struct mtx *ntoskrnl_interlock;
-struct mtx *ntoskrnl_dispatchlock;
-extern struct mtx_pool *ndis_mtxpool;
+static struct mtx ntoskrnl_dispatchlock;
+static kspin_lock ntoskrnl_global;
static int ntoskrnl_kth = 0;
static struct nt_objref_head ntoskrnl_reflist;
int
ntoskrnl_libinit()
{
- ntoskrnl_interlock = mtx_pool_alloc(ndis_mtxpool);
- ntoskrnl_dispatchlock = mtx_pool_alloc(ndis_mtxpool);
+ mtx_init(&ntoskrnl_dispatchlock,
+ "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF);
+ ntoskrnl_init_lock(&ntoskrnl_global);
TAILQ_INIT(&ntoskrnl_reflist);
return(0);
}
@@ -185,6 +183,7 @@ ntoskrnl_libinit()
int
ntoskrnl_libfini()
{
+ mtx_destroy(&ntoskrnl_dispatchlock);
return(0);
}
@@ -324,16 +323,13 @@ ntoskrnl_wakeup(arg)
obj = arg;
- mtx_pool_lock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_lock(&ntoskrnl_dispatchlock);
obj->dh_sigstate = TRUE;
e = obj->dh_waitlisthead.nle_flink;
while (e != &obj->dh_waitlisthead) {
w = (wait_block *)e;
td = w->wb_kthread;
- if (td->td_proc->p_flag & P_KTHREAD)
- kthread_resume(td->td_proc);
- else
- wakeup(td);
+ ndis_thresume(td->td_proc);
/*
* For synchronization objects, only wake up
* the first waiter.
@@ -342,7 +338,7 @@ ntoskrnl_wakeup(arg)
break;
e = e->nle_flink;
}
- mtx_pool_unlock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_unlock(&ntoskrnl_dispatchlock);
return;
}
@@ -436,7 +432,7 @@ ntoskrnl_waitforobj(obj, reason, mode, alertable, duetime)
if (obj == NULL)
return(STATUS_INVALID_PARAMETER);
- mtx_pool_lock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_lock(&ntoskrnl_dispatchlock);
/*
* See if the object is a mutex. If so, and we already own
@@ -455,13 +451,13 @@ ntoskrnl_waitforobj(obj, reason, mode, alertable, duetime)
obj->dh_sigstate = FALSE;
km->km_acquirecnt++;
km->km_ownerthread = curthread->td_proc;
- mtx_pool_unlock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_unlock(&ntoskrnl_dispatchlock);
return (STATUS_SUCCESS);
}
} else if (obj->dh_sigstate == TRUE) {
if (obj->dh_type == EVENT_TYPE_SYNC)
obj->dh_sigstate = FALSE;
- mtx_pool_unlock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_unlock(&ntoskrnl_dispatchlock);
return (STATUS_SUCCESS);
}
@@ -496,22 +492,18 @@ ntoskrnl_waitforobj(obj, reason, mode, alertable, duetime)
}
}
- mtx_pool_unlock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_unlock(&ntoskrnl_dispatchlock);
- if (td->td_proc->p_flag & P_KTHREAD)
- error = kthread_suspend(td->td_proc,
- duetime == NULL ? 0 : tvtohz(&tv));
- else
- error = tsleep(td, PPAUSE|PDROP, "ndisws",
- duetime == NULL ? 0 : tvtohz(&tv));
+ error = ndis_thsuspend(td->td_proc,
+ duetime == NULL ? 0 : tvtohz(&tv));
- mtx_pool_lock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_lock(&ntoskrnl_dispatchlock);
/* We timed out. Leave the object alone and return status. */
if (error == EWOULDBLOCK) {
REMOVE_LIST_ENTRY((&w.wb_waitlist));
- mtx_pool_unlock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_unlock(&ntoskrnl_dispatchlock);
return(STATUS_TIMEOUT);
}
@@ -534,7 +526,7 @@ ntoskrnl_waitforobj(obj, reason, mode, alertable, duetime)
obj->dh_sigstate = FALSE;
REMOVE_LIST_ENTRY((&w.wb_waitlist));
- mtx_pool_unlock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_unlock(&ntoskrnl_dispatchlock);
return(STATUS_SUCCESS);
}
@@ -565,7 +557,7 @@ ntoskrnl_waitforobjs(cnt, obj, wtype, reason, mode,
if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
return(STATUS_INVALID_PARAMETER);
- mtx_pool_lock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_lock(&ntoskrnl_dispatchlock);
if (wb_array == NULL)
w = &_wb_array[0];
@@ -583,8 +575,7 @@ ntoskrnl_waitforobjs(cnt, obj, wtype, reason, mode,
km->km_acquirecnt++;
km->km_ownerthread = curthread->td_proc;
if (wtype == WAITTYPE_ANY) {
- mtx_pool_unlock(ndis_mtxpool,
- ntoskrnl_dispatchlock);
+ mtx_unlock(&ntoskrnl_dispatchlock);
return (STATUS_WAIT_0 + i);
}
}
@@ -592,8 +583,7 @@ ntoskrnl_waitforobjs(cnt, obj, wtype, reason, mode,
if (obj[i]->dh_type == EVENT_TYPE_SYNC)
obj[i]->dh_sigstate = FALSE;
if (wtype == WAITTYPE_ANY) {
- mtx_pool_unlock(ndis_mtxpool,
- ntoskrnl_dispatchlock);
+ mtx_unlock(&ntoskrnl_dispatchlock);
return (STATUS_WAIT_0 + i);
}
}
@@ -633,16 +623,12 @@ ntoskrnl_waitforobjs(cnt, obj, wtype, reason, mode,
while (wcnt) {
nanotime(&t1);
- mtx_pool_unlock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_unlock(&ntoskrnl_dispatchlock);
- if (td->td_proc->p_flag & P_KTHREAD)
- error = kthread_suspend(td->td_proc,
- duetime == NULL ? 0 : tvtohz(&tv));
- else
- error = tsleep(td, PPAUSE|PDROP, "ndisws",
- duetime == NULL ? 0 : tvtohz(&tv));
+ error = ndis_thsuspend(td->td_proc,
+ duetime == NULL ? 0 : tvtohz(&tv));
- mtx_pool_lock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_lock(&ntoskrnl_dispatchlock);
nanotime(&t2);
for (i = 0; i < cnt; i++) {
@@ -678,16 +664,16 @@ ntoskrnl_waitforobjs(cnt, obj, wtype, reason, mode,
}
if (error == EWOULDBLOCK) {
- mtx_pool_unlock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_unlock(&ntoskrnl_dispatchlock);
return(STATUS_TIMEOUT);
}
if (wtype == WAITTYPE_ANY && wcnt) {
- mtx_pool_unlock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_unlock(&ntoskrnl_dispatchlock);
return(STATUS_WAIT_0 + widx);
}
- mtx_pool_unlock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_unlock(&ntoskrnl_dispatchlock);
return(STATUS_SUCCESS);
}
@@ -880,9 +866,12 @@ ntoskrnl_init_lookaside(lookaside, allocfunc, freefunc,
uint32_t tag;
uint16_t depth;
{
- struct mtx *mtx;
+ bzero((char *)lookaside, sizeof(paged_lookaside_list));
- lookaside->nll_l.gl_size = size;
+ if (size < sizeof(slist_entry))
+ lookaside->nll_l.gl_size = sizeof(slist_entry);
+ else
+ lookaside->nll_l.gl_size = size;
lookaside->nll_l.gl_tag = tag;
if (allocfunc == NULL)
lookaside->nll_l.gl_allocfunc = ntoskrnl_allocfunc;
@@ -894,8 +883,7 @@ ntoskrnl_init_lookaside(lookaside, allocfunc, freefunc,
else
lookaside->nll_l.gl_freefunc = freefunc;
- mtx = mtx_pool_alloc(ndis_mtxpool);
- lookaside->nll_obsoletelock = (kspin_lock)mtx;
+ ntoskrnl_init_lock(&lookaside->nll_obsoletelock);
lookaside->nll_l.gl_depth = LOOKASIDE_DEPTH;
lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
@@ -928,8 +916,6 @@ ntoskrnl_init_nplookaside(lookaside, allocfunc, freefunc,
uint32_t tag;
uint16_t depth;
{
- struct mtx *mtx;
-
bzero((char *)lookaside, sizeof(npaged_lookaside_list));
if (size < sizeof(slist_entry))
@@ -947,8 +933,7 @@ ntoskrnl_init_nplookaside(lookaside, allocfunc, freefunc,
else
lookaside->nll_l.gl_freefunc = freefunc;
- mtx = mtx_pool_alloc(ndis_mtxpool);
- lookaside->nll_obsoletelock = (kspin_lock)mtx;
+ ntoskrnl_init_lock(&lookaside->nll_obsoletelock);
lookaside->nll_l.gl_depth = LOOKASIDE_DEPTH;
lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
@@ -987,9 +972,8 @@ ntoskrnl_push_slist(/*head, entry*/ void)
__asm__ __volatile__ ("" : "=c" (head), "=d" (entry));
- mtx_pool_lock(ndis_mtxpool, ntoskrnl_interlock);
- oldhead = ntoskrnl_pushsl(head, entry);
- mtx_pool_unlock(ndis_mtxpool, ntoskrnl_interlock);
+ oldhead = (slist_entry *)FASTCALL3(ntoskrnl_push_slist_ex,
+ head, entry, &ntoskrnl_global);
return(oldhead);
}
@@ -1002,9 +986,8 @@ ntoskrnl_pop_slist(/*head*/ void)
__asm__ __volatile__ ("" : "=c" (head));
- mtx_pool_lock(ndis_mtxpool, ntoskrnl_interlock);
- first = ntoskrnl_popsl(head);
- mtx_pool_unlock(ndis_mtxpool, ntoskrnl_interlock);
+ first = (slist_entry *)FASTCALL2(ntoskrnl_pop_slist_ex,
+ head, &ntoskrnl_global);
return(first);
}
@@ -1016,12 +999,13 @@ ntoskrnl_push_slist_ex(/*head, entry,*/ lock)
slist_header *head;
slist_entry *entry;
slist_entry *oldhead;
+ uint8_t irql;
__asm__ __volatile__ ("" : "=c" (head), "=d" (entry));
- mtx_pool_lock(ndis_mtxpool, (struct mtx *)*lock);
+ irql = FASTCALL2(hal_lock, lock, DISPATCH_LEVEL);
oldhead = ntoskrnl_pushsl(head, entry);
- mtx_pool_unlock(ndis_mtxpool, (struct mtx *)*lock);
+ FASTCALL2(hal_unlock, lock, irql);
return(oldhead);
}
@@ -1032,36 +1016,38 @@ ntoskrnl_pop_slist_ex(/*head, lock*/ void)
slist_header *head;
kspin_lock *lock;
slist_entry *first;
+ uint8_t irql;
__asm__ __volatile__ ("" : "=c" (head), "=d" (lock));
- mtx_pool_lock(ndis_mtxpool, (struct mtx *)*lock);
+ irql = FASTCALL2(hal_lock, lock, DISPATCH_LEVEL);
first = ntoskrnl_popsl(head);
- mtx_pool_unlock(ndis_mtxpool, (struct mtx *)*lock);
+ FASTCALL2(hal_unlock, lock, irql);
return(first);
}
-__stdcall static void
+__stdcall void
ntoskrnl_lock_dpc(/*lock*/ void)
{
kspin_lock *lock;
__asm__ __volatile__ ("" : "=c" (lock));
- mtx_pool_lock(ndis_mtxpool, (struct mtx *)*lock);
+ while (atomic_cmpset_int((volatile u_int *)lock, 0, 1) == 0)
+ /* do noting */;
return;
}
-__stdcall static void
+__stdcall void
ntoskrnl_unlock_dpc(/*lock*/ void)
{
kspin_lock *lock;
__asm__ __volatile__ ("" : "=c" (lock));
- mtx_pool_unlock(ndis_mtxpool, (struct mtx *)*lock);
+ atomic_cmpset_int((volatile u_int *)lock, 1, 0);
return;
}
@@ -1093,12 +1079,13 @@ ntoskrnl_interlock_addstat(/*addend, inc*/)
{
uint64_t *addend;
uint32_t inc;
+ uint8_t irql;
__asm__ __volatile__ ("" : "=c" (addend), "=d" (inc));
- mtx_pool_lock(ndis_mtxpool, ntoskrnl_interlock);
+ irql = FASTCALL2(hal_lock, &ntoskrnl_global, DISPATCH_LEVEL);
*addend += inc;
- mtx_pool_unlock(ndis_mtxpool, ntoskrnl_interlock);
+ FASTCALL2(hal_unlock, &ntoskrnl_global, irql);
return;
};
@@ -1196,7 +1183,7 @@ __stdcall static void
ntoskrnl_init_lock(lock)
kspin_lock *lock;
{
- *lock = (kspin_lock)mtx_pool_alloc(ndis_mtxpool);
+ *lock = 0;
return;
}
@@ -1423,18 +1410,18 @@ ntoskrnl_release_mutex(kmutex, kwait)
kmutant *kmutex;
uint8_t kwait;
{
- mtx_pool_lock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_lock(&ntoskrnl_dispatchlock);
if (kmutex->km_ownerthread != curthread->td_proc) {
- mtx_pool_unlock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_unlock(&ntoskrnl_dispatchlock);
return(STATUS_MUTANT_NOT_OWNED);
}
kmutex->km_acquirecnt--;
if (kmutex->km_acquirecnt == 0) {
kmutex->km_ownerthread = NULL;
- mtx_pool_unlock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_unlock(&ntoskrnl_dispatchlock);
ntoskrnl_wakeup(&kmutex->km_header);
} else
- mtx_pool_unlock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_unlock(&ntoskrnl_dispatchlock);
return(kmutex->km_acquirecnt);
}
@@ -1465,10 +1452,10 @@ ntoskrnl_reset_event(kevent)
{
uint32_t prevstate;
- mtx_pool_lock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_lock(&ntoskrnl_dispatchlock);
prevstate = kevent->k_header.dh_sigstate;
kevent->k_header.dh_sigstate = FALSE;
- mtx_pool_unlock(ndis_mtxpool, ntoskrnl_dispatchlock);
+ mtx_unlock(&ntoskrnl_dispatchlock);
return(prevstate);
}
@@ -1628,7 +1615,7 @@ ntoskrnl_thread_exit(status)
ntoskrnl_kth--;
- kthread_exit(0);
+ kthread_exit(0);
return(0); /* notreached */
}
@@ -1721,7 +1708,8 @@ ntoskrnl_init_timer_ex(timer, type)
/*
* This is a wrapper for Windows deferred procedure calls that
* have been placed on an NDIS thread work queue. We need it
- * since the DPC could be a _stdcall function.
+ * since the DPC could be a _stdcall function. Also, as far as
+ * I can tell, defered procedure calls must run at DISPATCH_LEVEL.
*/
static void
ntoskrnl_run_dpc(arg)
@@ -1729,10 +1717,13 @@ ntoskrnl_run_dpc(arg)
{
__stdcall kdpc_func dpcfunc;
kdpc *dpc;
+ uint8_t irql;
dpc = arg;
dpcfunc = (kdpc_func)dpc->k_deferedfunc;
+ irql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL);
dpcfunc(dpc, dpc->k_deferredctx, dpc->k_sysarg1, dpc->k_sysarg2);
+ FASTCALL1(hal_lower_irql, irql);
return;
}
diff --git a/sys/dev/if_ndis/if_ndis.c b/sys/dev/if_ndis/if_ndis.c
index 901b313..f7467d9 100644
--- a/sys/dev/if_ndis/if_ndis.c
+++ b/sys/dev/if_ndis/if_ndis.c
@@ -70,6 +70,7 @@ __FBSDID("$FreeBSD$");
#include <compat/ndis/pe_var.h>
#include <compat/ndis/resource_var.h>
+#include <compat/ndis/hal_var.h>
#include <compat/ndis/ntoskrnl_var.h>
#include <compat/ndis/ndis_var.h>
#include <compat/ndis/cfg_var.h>
@@ -371,8 +372,10 @@ ndis_attach(dev)
sc = device_get_softc(dev);
- sc->ndis_mtx = mtx_pool_alloc(ndis_mtxpool);
- sc->ndis_intrmtx = mtx_pool_alloc(ndis_mtxpool);
+ mtx_init(&sc->ndis_mtx, "ndis softc lock",
+ MTX_NETWORK_LOCK, MTX_DEF);
+ mtx_init(&sc->ndis_intrmtx,
+ "ndis irq lock", MTX_NETWORK_LOCK, MTX_DEF);
/*
* Hook interrupt early, since calling the driver's
@@ -492,7 +495,6 @@ ndis_attach(dev)
/* Do media setup */
if (sc->ndis_80211) {
struct ieee80211com *ic = (void *)ifp;
- ndis_80211_config config;
ndis_80211_rates_ex rates;
struct ndis_80211_nettype_list *ntl;
uint32_t arg;
@@ -650,27 +652,11 @@ nonettypes:
r = ndis_get_info(sc, OID_802_11_POWER_MODE, &arg, &i);
if (r == 0)
ic->ic_caps |= IEEE80211_C_PMGT;
- i = sizeof(config);
- bzero((char *)&config, sizeof(config));
- config.nc_length = i;
- config.nc_fhconfig.ncf_length = sizeof(ndis_80211_config_fh);
- r = ndis_get_info(sc, OID_802_11_CONFIGURATION, &config, &i);
- if (r == 0) {
- int chan;
- chan = ieee80211_mhz2ieee(config.nc_dsconfig / 1000, 0);
- if (chan < 0 || chan >= IEEE80211_CHAN_MAX) {
- ic->ic_ibss_chan = &ic->ic_channels[1];
- } else
- ic->ic_ibss_chan = &ic->ic_channels[chan];
- } else {
- device_printf(sc->ndis_dev, "couldn't retrieve "
- "channel info: %d\n", r);
- ic->ic_ibss_chan = &ic->ic_channels[1];
- }
bcopy(eaddr, &ic->ic_myaddr, sizeof(eaddr));
ieee80211_ifattach(ifp);
ieee80211_media_init(ifp, ieee80211_media_change,
ndis_media_status);
+ ic->ic_ibss_chan = IEEE80211_CHAN_ANYC;
ic->ic_bss->ni_chan = ic->ic_ibss_chan;
} else {
ifmedia_init(&sc->ifmedia, IFM_IMASK, ndis_ifmedia_upd,
@@ -757,6 +743,9 @@ ndis_detach(dev)
sysctl_ctx_free(&sc->ndis_ctx);
+ mtx_destroy(&sc->ndis_mtx);
+ mtx_destroy(&sc->ndis_intrmtx);
+
return(0);
}
@@ -1073,8 +1062,6 @@ ndis_linksts_done(adapter)
ndis_sched(ndis_starttask, ifp, NDIS_TASKQUEUE);
break;
case NDIS_STATUS_MEDIA_DISCONNECT:
- if (sc->ndis_80211)
- ndis_getstate_80211(sc);
if (sc->ndis_link)
ndis_sched(ndis_ticktask, sc, NDIS_TASKQUEUE);
break;
@@ -1091,14 +1078,17 @@ ndis_intrtask(arg)
{
struct ndis_softc *sc;
struct ifnet *ifp;
+ uint8_t irql;
sc = arg;
ifp = &sc->arpcom.ac_if;
+ irql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL);
ndis_intrhand(sc);
- mtx_pool_lock(ndis_mtxpool, sc->ndis_intrmtx);
+ FASTCALL1(hal_lower_irql, irql);
+ mtx_lock(&sc->ndis_intrmtx);
ndis_enable_intr(sc);
- mtx_pool_unlock(ndis_mtxpool, sc->ndis_intrmtx);
+ mtx_unlock(&sc->ndis_intrmtx);
return;
}
@@ -1118,14 +1108,14 @@ ndis_intr(arg)
if (sc->ndis_block.nmb_miniportadapterctx == NULL)
return;
- mtx_pool_lock(ndis_mtxpool, sc->ndis_intrmtx);
+ mtx_lock(&sc->ndis_intrmtx);
if (sc->ndis_block.nmb_interrupt->ni_isrreq == TRUE)
ndis_isr(sc, &is_our_intr, &call_isr);
else {
ndis_disable_intr(sc);
call_isr = 1;
}
- mtx_pool_unlock(ndis_mtxpool, sc->ndis_intrmtx);
+ mtx_unlock(&sc->ndis_intrmtx);
if ((is_our_intr || call_isr))
ndis_sched(ndis_intrtask, ifp, NDIS_SWI);
@@ -1617,32 +1607,22 @@ ndis_setstate_80211(sc)
device_printf (sc->ndis_dev, "set auth failed: %d\n", rval);
#endif
- /* Set SSID. */
-
- len = sizeof(ssid);
- bzero((char *)&ssid, len);
- ssid.ns_ssidlen = ic->ic_des_esslen;
- if (ssid.ns_ssidlen == 0) {
- ssid.ns_ssidlen = 1;
- } else
- bcopy(ic->ic_des_essid, ssid.ns_ssid, ssid.ns_ssidlen);
- rval = ndis_set_info(sc, OID_802_11_SSID, &ssid, &len);
-
- if (rval)
- device_printf (sc->ndis_dev, "set ssid failed: %d\n", rval);
-
len = sizeof(config);
bzero((char *)&config, len);
config.nc_length = len;
config.nc_fhconfig.ncf_length = sizeof(ndis_80211_config_fh);
- rval = ndis_get_info(sc, OID_802_11_CONFIGURATION, &config, &len);
- if (rval == 0) {
- int chan;
+ rval = ndis_get_info(sc, OID_802_11_CONFIGURATION, &config, &len);
- chan = ieee80211_chan2ieee(ic, ic->ic_bss->ni_chan);
+ if (rval == 0 && ic->ic_ibss_chan != IEEE80211_CHAN_ANYC) {
+ int chan, chanflag;
+
+ chan = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
+ chanflag = config.nc_dsconfig > 2500000 ? IEEE80211_CHAN_2GHZ :
+ IEEE80211_CHAN_5GHZ;
if (chan != ieee80211_mhz2ieee(config.nc_dsconfig / 1000, 0)) {
config.nc_dsconfig =
- ic->ic_bss->ni_chan->ic_freq * 1000;
+ ic->ic_ibss_chan->ic_freq * 1000;
+ ic->ic_bss->ni_chan = ic->ic_ibss_chan;
len = sizeof(config);
config.nc_length = len;
config.nc_fhconfig.ncf_length =
@@ -1654,10 +1634,24 @@ ndis_setstate_80211(sc)
"DS config to %ukHz: %d\n",
config.nc_dsconfig, rval);
}
- } else
+ } else if (rval)
device_printf(sc->ndis_dev, "couldn't retrieve "
"channel info: %d\n", rval);
+ /* Set SSID -- always do this last. */
+
+ len = sizeof(ssid);
+ bzero((char *)&ssid, len);
+ ssid.ns_ssidlen = ic->ic_des_esslen;
+ if (ssid.ns_ssidlen == 0) {
+ ssid.ns_ssidlen = 1;
+ } else
+ bcopy(ic->ic_des_essid, ssid.ns_ssid, ssid.ns_ssidlen);
+ rval = ndis_set_info(sc, OID_802_11_SSID, &ssid, &len);
+
+ if (rval)
+ device_printf (sc->ndis_dev, "set ssid failed: %d\n", rval);
+
return;
}
diff --git a/sys/dev/if_ndis/if_ndisvar.h b/sys/dev/if_ndis/if_ndisvar.h
index c2bc6b6..2b2f47f 100644
--- a/sys/dev/if_ndis/if_ndisvar.h
+++ b/sys/dev/if_ndis/if_ndisvar.h
@@ -32,6 +32,9 @@
* $FreeBSD$
*/
+#define NDIS_DEFAULT_NODENAME "FreeBSD NDIS node"
+#define NDIS_NODENAME_LEN 32
+
struct ndis_pci_type {
uint16_t ndis_vid;
uint16_t ndis_did;
@@ -87,8 +90,8 @@ struct ndis_softc {
struct resource *ndis_res_am; /* attribute mem (pccard) */
struct resource *ndis_res_cm; /* common mem (pccard) */
int ndis_rescnt;
- struct mtx *ndis_mtx;
- struct mtx *ndis_intrmtx;
+ struct mtx ndis_mtx;
+ struct mtx ndis_intrmtx;
device_t ndis_dev;
int ndis_unit;
ndis_miniport_block ndis_block;
@@ -124,6 +127,6 @@ struct ndis_softc {
int ndis_mmapcnt;
};
-#define NDIS_LOCK(_sc) mtx_pool_lock(ndis_mtxpool, (_sc)->ndis_mtx)
-#define NDIS_UNLOCK(_sc) mtx_pool_unlock(ndis_mtxpool, (_sc)->ndis_mtx)
+#define NDIS_LOCK(_sc) mtx_lock(&(_sc)->ndis_mtx)
+#define NDIS_UNLOCK(_sc) mtx_unlock(&(_sc)->ndis_mtx)
OpenPOWER on IntegriCloud