summaryrefslogtreecommitdiffstats
path: root/sys/compat
diff options
context:
space:
mode:
authorwpaul <wpaul@FreeBSD.org>2005-05-05 03:56:09 +0000
committerwpaul <wpaul@FreeBSD.org>2005-05-05 03:56:09 +0000
commite9bace5ba16735b4fd2dbe727166b3720fbf5101 (patch)
tree30ee12e28c23fde2935b1a325aa64886d934d365 /sys/compat
parente5184a989dd8bb5df40b429933d7a7222b058dfd (diff)
downloadFreeBSD-src-e9bace5ba16735b4fd2dbe727166b3720fbf5101.zip
FreeBSD-src-e9bace5ba16735b4fd2dbe727166b3720fbf5101.tar.gz
This commit makes a bunch of changes, some big, some not so big.
- Remove the old task threads from kern_ndis.c and reimplement them in subr_ntoskrnl.c, in order to more properly emulate the Windows DPC API. Each CPU gets its own DPC queue/thread, and each queue can have low, medium and high importance DPCs. New APIs implemented: KeSetTargetProcessorDpc(), KeSetImportanceDpc() and KeFlushQueuedDpcs(). (This is the biggest change.) - Fix a bug in NdisMInitializeTimer(): the k_dpc pointer in the nmt_timer embedded in the ndis_miniport_timer struct must be set to point to the DPC, also embedded in the struct. Failing to do this breaks dequeueing of DPCs submitted via timers, and in turn breaks cancelling timers. - Fix a bug in KeCancelTimer(): if the timer is interted in the timer queue (i.e. the timeout callback is still pending), we have to both untimeout() the timer _and_ call KeRemoveQueueDpc() to nuke the DPC that might be pending. Failing to do this breaks cancellation of periodic timers, which always appear to be inserted in the timer queue. - Make use of the nmt_nexttimer field in ndis_miniport_timer: keep a queue of pending timers and cancel them all in ndis_halt_nic(), prior to calling MiniportHalt(). Also call KeFlushQueuedDpcs() to make sure any DPCs queued by the timers have expired. - Modify NdisMAllocateSharedMemory() and NdisMFreeSharedMemory() to keep track of both the virtual and physical addresses of the shared memory buffers that get handed out. The AirGo MIMO driver appears to have a bug in it: for one of the segments is allocates, it returns the wrong virtual address. This would confuse NdisMFreeSharedMemory() and cause a crash. Why it doesn't crash Windows too I have no idea (from reading the documentation for NdisMFreeSharedMemory(), it appears to be a violation of the API). - Implement strstr(), strchr() and MmIsAddressValid(). - Implement IoAllocateWorkItem(), IoFreeWorkItem(), IoQueueWorkItem() and ExQueueWorkItem(). (This is the second biggest change.) - Make NdisScheduleWorkItem() call ExQueueWorkItem(). (Note that the ExQueueWorkItem() API is deprecated by Microsoft, but NDIS still uses it, since NdisScheduleWorkItem() is incompatible with the IoXXXWorkItem() API.) - Change if_ndis.c to use the NdisScheduleWorkItem() interface for scheduling tasks. With all these changes and fixes, the AirGo MIMO driver for the Belkin F5D8010 Pre-N card now works. Special thanks to Paul Robinson (paul dawt robinson at pwermedia dawt net) for the loan of a card for testing.
Diffstat (limited to 'sys/compat')
-rw-r--r--sys/compat/ndis/kern_ndis.c447
-rw-r--r--sys/compat/ndis/ndis_var.h48
-rw-r--r--sys/compat/ndis/ntoskrnl_var.h72
-rw-r--r--sys/compat/ndis/subr_hal.c2
-rw-r--r--sys/compat/ndis/subr_ndis.c192
-rw-r--r--sys/compat/ndis/subr_ntoskrnl.c580
-rw-r--r--sys/compat/ndis/subr_usbd.c2
7 files changed, 896 insertions, 447 deletions
diff --git a/sys/compat/ndis/kern_ndis.c b/sys/compat/ndis/kern_ndis.c
index 56832ea..0dfcb53 100644
--- a/sys/compat/ndis/kern_ndis.c
+++ b/sys/compat/ndis/kern_ndis.c
@@ -84,6 +84,7 @@ static void ndis_resetdone_func(ndis_handle, ndis_status, uint8_t);
static void ndis_sendrsrcavail_func(ndis_handle);
static void ndis_intrhand(kdpc *, device_object *,
irp *, struct ndis_softc *);
+static void ndis_return(kdpc *, void *, void *, void *);
static image_patch_table kernndis_functbl[] = {
IMPORT_SFUNC(ndis_status_func, 4),
@@ -93,42 +94,14 @@ static image_patch_table kernndis_functbl[] = {
IMPORT_SFUNC(ndis_resetdone_func, 3),
IMPORT_SFUNC(ndis_sendrsrcavail_func, 1),
IMPORT_SFUNC(ndis_intrhand, 4),
+ IMPORT_SFUNC(ndis_return, 1),
{ NULL, NULL, NULL }
};
struct nd_head ndis_devhead;
-struct ndis_req {
- void (*nr_func)(void *);
- void *nr_arg;
- int nr_exit;
- STAILQ_ENTRY(ndis_req) link;
-};
-
-struct ndisproc {
- struct ndisqhead *np_q;
- struct proc *np_p;
- int np_state;
-};
-
-static void ndis_return(void *);
-static int ndis_create_kthreads(void);
-static void ndis_destroy_kthreads(void);
-static void ndis_stop_thread(int);
-static int ndis_enlarge_thrqueue(int);
-static int ndis_shrink_thrqueue(int);
-static void ndis_runq(void *);
-
-static struct mtx ndis_thr_mtx;
static struct mtx ndis_req_mtx;
-static STAILQ_HEAD(ndisqhead, ndis_req) ndis_ttodo;
-static struct ndisqhead ndis_itodo;
-static struct ndisqhead ndis_free;
-static int ndis_jobs = 32;
-
-static struct ndisproc ndis_tproc;
-static struct ndisproc ndis_iproc;
/*
* This allows us to export our symbols to other modules.
@@ -160,14 +133,13 @@ ndis_modevent(module_t mod, int cmd, void *arg)
patch++;
}
- ndis_create_kthreads();
-
TAILQ_INIT(&ndis_devhead);
+ mtx_init(&ndis_req_mtx, "NDIS request lock",
+ MTX_NDIS_LOCK, MTX_DEF);
+
break;
case MOD_SHUTDOWN:
- /* stop kthreads */
- ndis_destroy_kthreads();
if (TAILQ_FIRST(&ndis_devhead) == NULL) {
/* Shut down subsystems */
hal_libfini();
@@ -181,12 +153,10 @@ ndis_modevent(module_t mod, int cmd, void *arg)
windrv_unwrap(patch->ipt_wrap);
patch++;
}
+ mtx_destroy(&ndis_req_mtx);
}
break;
case MOD_UNLOAD:
- /* stop kthreads */
- ndis_destroy_kthreads();
-
/* Shut down subsystems */
hal_libfini();
ndis_libfini();
@@ -200,6 +170,8 @@ ndis_modevent(module_t mod, int cmd, void *arg)
patch++;
}
+ mtx_destroy(&ndis_req_mtx);
+
break;
default:
error = EINVAL;
@@ -211,326 +183,6 @@ ndis_modevent(module_t mod, int cmd, void *arg)
DEV_MODULE(ndisapi, ndis_modevent, NULL);
MODULE_VERSION(ndisapi, 1);
-/*
- * We create two kthreads for the NDIS subsystem. One of them is a task
- * queue for performing various odd jobs. The other is an swi thread
- * reserved exclusively for running interrupt handlers. The reason we
- * have our own task queue is that there are some cases where we may
- * need to sleep for a significant amount of time, and if we were to
- * use one of the taskqueue threads, we might delay the processing
- * of other pending tasks which might need to run right away. We have
- * a separate swi thread because we don't want our interrupt handling
- * to be delayed either.
- *
- * By default there are 32 jobs available to start, and another 8
- * are added to the free list each time a new device is created.
- */
-
-static void
-ndis_runq(arg)
- void *arg;
-{
- struct ndis_req *r = NULL, *die = NULL;
- struct ndisproc *p;
-
- p = arg;
-
- while (1) {
-
- /* Sleep, but preserve our original priority. */
- ndis_thsuspend(p->np_p, NULL, 0);
-
- /* Look for any jobs on the work queue. */
-
- mtx_lock_spin(&ndis_thr_mtx);
- p->np_state = NDIS_PSTATE_RUNNING;
- while(STAILQ_FIRST(p->np_q) != NULL) {
- r = STAILQ_FIRST(p->np_q);
- STAILQ_REMOVE_HEAD(p->np_q, link);
- mtx_unlock_spin(&ndis_thr_mtx);
-
- /* Do the work. */
-
- if (r->nr_func != NULL)
- (*r->nr_func)(r->nr_arg);
-
- mtx_lock_spin(&ndis_thr_mtx);
- STAILQ_INSERT_HEAD(&ndis_free, r, link);
-
- /* Check for a shutdown request */
-
- if (r->nr_exit == TRUE)
- die = r;
- }
- p->np_state = NDIS_PSTATE_SLEEPING;
- mtx_unlock_spin(&ndis_thr_mtx);
-
- /* Bail if we were told to shut down. */
-
- if (die != NULL)
- break;
- }
-
- wakeup(die);
-#if __FreeBSD_version < 502113
- mtx_lock(&Giant);
-#endif
- kthread_exit(0);
- return; /* notreached */
-}
-
-static int
-ndis_create_kthreads()
-{
- struct ndis_req *r;
- int i, error = 0;
-
- mtx_init(&ndis_thr_mtx, "NDIS thread lock", NULL, MTX_SPIN);
- mtx_init(&ndis_req_mtx, "NDIS request lock", MTX_NDIS_LOCK, MTX_DEF);
-
- STAILQ_INIT(&ndis_ttodo);
- STAILQ_INIT(&ndis_itodo);
- STAILQ_INIT(&ndis_free);
-
- for (i = 0; i < ndis_jobs; i++) {
- r = malloc(sizeof(struct ndis_req), M_DEVBUF, M_WAITOK);
- if (r == NULL) {
- error = ENOMEM;
- break;
- }
- STAILQ_INSERT_HEAD(&ndis_free, r, link);
- }
-
- if (error == 0) {
- ndis_tproc.np_q = &ndis_ttodo;
- ndis_tproc.np_state = NDIS_PSTATE_SLEEPING;
- error = kthread_create(ndis_runq, &ndis_tproc,
- &ndis_tproc.np_p, RFHIGHPID,
- NDIS_KSTACK_PAGES, "ndis taskqueue");
- }
-
- if (error == 0) {
- ndis_iproc.np_q = &ndis_itodo;
- ndis_iproc.np_state = NDIS_PSTATE_SLEEPING;
- error = kthread_create(ndis_runq, &ndis_iproc,
- &ndis_iproc.np_p, RFHIGHPID,
- NDIS_KSTACK_PAGES, "ndis swi");
- }
-
- if (error) {
- while ((r = STAILQ_FIRST(&ndis_free)) != NULL) {
- STAILQ_REMOVE_HEAD(&ndis_free, link);
- free(r, M_DEVBUF);
- }
- return(error);
- }
-
- return(0);
-}
-
-static void
-ndis_destroy_kthreads()
-{
- struct ndis_req *r;
-
- /* Stop the threads. */
-
- ndis_stop_thread(NDIS_TASKQUEUE);
- ndis_stop_thread(NDIS_SWI);
-
- /* Destroy request structures. */
-
- while ((r = STAILQ_FIRST(&ndis_free)) != NULL) {
- STAILQ_REMOVE_HEAD(&ndis_free, link);
- free(r, M_DEVBUF);
- }
-
- mtx_destroy(&ndis_req_mtx);
- mtx_destroy(&ndis_thr_mtx);
-
- return;
-}
-
-static void
-ndis_stop_thread(t)
- int t;
-{
- struct ndis_req *r;
- struct ndisqhead *q;
- struct proc *p;
-
- if (t == NDIS_TASKQUEUE) {
- q = &ndis_ttodo;
- p = ndis_tproc.np_p;
- } else {
- q = &ndis_itodo;
- p = ndis_iproc.np_p;
- }
-
- /* Create and post a special 'exit' job. */
-
- mtx_lock_spin(&ndis_thr_mtx);
- r = STAILQ_FIRST(&ndis_free);
- STAILQ_REMOVE_HEAD(&ndis_free, link);
- r->nr_func = NULL;
- r->nr_arg = NULL;
- r->nr_exit = TRUE;
- STAILQ_INSERT_TAIL(q, r, link);
- mtx_unlock_spin(&ndis_thr_mtx);
-
- ndis_thresume(p);
-
- /* wait for thread exit */
-
- tsleep(r, curthread->td_priority|PCATCH, "ndisthexit", hz * 60);
-
- /* Now empty the job list. */
-
- mtx_lock_spin(&ndis_thr_mtx);
- while ((r = STAILQ_FIRST(q)) != NULL) {
- STAILQ_REMOVE_HEAD(q, link);
- STAILQ_INSERT_HEAD(&ndis_free, r, link);
- }
- mtx_unlock_spin(&ndis_thr_mtx);
-
- return;
-}
-
-static int
-ndis_enlarge_thrqueue(cnt)
- int cnt;
-{
- struct ndis_req *r;
- int i;
-
- for (i = 0; i < cnt; i++) {
- r = malloc(sizeof(struct ndis_req), M_DEVBUF, M_WAITOK);
- if (r == NULL)
- return(ENOMEM);
- mtx_lock_spin(&ndis_thr_mtx);
- STAILQ_INSERT_HEAD(&ndis_free, r, link);
- ndis_jobs++;
- mtx_unlock_spin(&ndis_thr_mtx);
- }
-
- return(0);
-}
-
-static int
-ndis_shrink_thrqueue(cnt)
- int cnt;
-{
- struct ndis_req *r;
- int i;
-
- for (i = 0; i < cnt; i++) {
- mtx_lock_spin(&ndis_thr_mtx);
- r = STAILQ_FIRST(&ndis_free);
- if (r == NULL) {
- mtx_unlock_spin(&ndis_thr_mtx);
- return(ENOMEM);
- }
- STAILQ_REMOVE_HEAD(&ndis_free, link);
- ndis_jobs--;
- mtx_unlock_spin(&ndis_thr_mtx);
- free(r, M_DEVBUF);
- }
-
- return(0);
-}
-
-int
-ndis_unsched(func, arg, t)
- void (*func)(void *);
- void *arg;
- int t;
-{
- struct ndis_req *r;
- struct ndisqhead *q;
- struct proc *p;
-
- if (t == NDIS_TASKQUEUE) {
- q = &ndis_ttodo;
- p = ndis_tproc.np_p;
- } else {
- q = &ndis_itodo;
- p = ndis_iproc.np_p;
- }
-
- mtx_lock_spin(&ndis_thr_mtx);
- STAILQ_FOREACH(r, q, link) {
- if (r->nr_func == func && r->nr_arg == arg) {
- STAILQ_REMOVE(q, r, ndis_req, link);
- STAILQ_INSERT_HEAD(&ndis_free, r, link);
- mtx_unlock_spin(&ndis_thr_mtx);
- return(0);
- }
- }
-
- mtx_unlock_spin(&ndis_thr_mtx);
-
- return(ENOENT);
-}
-
-int
-ndis_sched(func, arg, t)
- void (*func)(void *);
- void *arg;
- int t;
-{
- struct ndis_req *r;
- struct ndisqhead *q;
- struct proc *p;
- int s;
-
- if (t == NDIS_TASKQUEUE) {
- q = &ndis_ttodo;
- p = ndis_tproc.np_p;
- } else {
- q = &ndis_itodo;
- p = ndis_iproc.np_p;
- }
-
- mtx_lock_spin(&ndis_thr_mtx);
- /*
- * Check to see if an instance of this job is already
- * pending. If so, don't bother queuing it again.
- */
- STAILQ_FOREACH(r, q, link) {
- if (r->nr_func == func && r->nr_arg == arg) {
- mtx_unlock_spin(&ndis_thr_mtx);
- return(0);
- }
- }
- r = STAILQ_FIRST(&ndis_free);
- if (r == NULL) {
- mtx_unlock_spin(&ndis_thr_mtx);
- return(EAGAIN);
- }
- STAILQ_REMOVE_HEAD(&ndis_free, link);
- r->nr_func = func;
- r->nr_arg = arg;
- r->nr_exit = FALSE;
- STAILQ_INSERT_TAIL(q, r, link);
- if (t == NDIS_TASKQUEUE)
- s = ndis_tproc.np_state;
- else
- s = ndis_iproc.np_state;
- mtx_unlock_spin(&ndis_thr_mtx);
-
- /*
- * Post the job, but only if the thread is actually blocked
- * on its own suspend call. If a driver queues up a job with
- * NdisScheduleWorkItem() which happens to do a KeWaitForObject(),
- * it may suspend there, and in that case we don't want to wake
- * it up until KeWaitForObject() gets woken up on its own.
- */
- if (s == NDIS_PSTATE_SLEEPING)
- ndis_thresume(p);
-
- return(0);
-}
-
int
ndis_thsuspend(p, m, timo)
struct proc *p;
@@ -680,6 +332,7 @@ ndis_create_sysctls(arg)
while(1) {
if (vals->nc_cfgkey == NULL)
break;
+
if (vals->nc_idx != sc->ndis_devidx) {
vals++;
continue;
@@ -814,8 +467,11 @@ ndis_flush_sysctls(arg)
}
static void
-ndis_return(arg)
+ndis_return(dpc, arg, sysarg1, sysarg2)
+ kdpc *dpc;
void *arg;
+ void *sysarg1;
+ void *sysarg2;
{
struct ndis_softc *sc;
ndis_return_handler returnfunc;
@@ -858,7 +514,8 @@ ndis_return_packet(buf, arg)
if (p->np_refcnt)
return;
- ndis_sched(ndis_return, p, NDIS_TASKQUEUE);
+ KeInitializeDpc(&p->np_dpc, kernndis_functbl[7].ipt_wrap, p);
+ KeInsertQueueDpc(&p->np_dpc, NULL, NULL);
return;
}
@@ -948,6 +605,7 @@ ndis_convert_res(arg)
* in order to fix this, we have to create our own
* temporary list with the entries in reverse order.
*/
+
SLIST_FOREACH(brle, brl, link) {
n = malloc(sizeof(struct resource_list_entry),
M_TEMP, M_NOWAIT);
@@ -1180,6 +838,8 @@ ndis_set_info(arg, oid, buf, buflen)
sc = arg;
+ mtx_lock(&ndis_req_mtx);
+
KeAcquireSpinLock(&sc->ndis_block->nmb_lock, &irql);
if (sc->ndis_block->nmb_pendingreq != NULL)
@@ -1193,6 +853,7 @@ ndis_set_info(arg, oid, buf, buflen)
if (adapter == NULL || setfunc == NULL) {
sc->ndis_block->nmb_pendingreq = NULL;
KeReleaseSpinLock(&sc->ndis_block->nmb_lock, irql);
+ mtx_unlock(&ndis_req_mtx);
return(ENXIO);
}
@@ -1204,13 +865,13 @@ ndis_set_info(arg, oid, buf, buflen)
KeReleaseSpinLock(&sc->ndis_block->nmb_lock, irql);
if (rval == NDIS_STATUS_PENDING) {
- mtx_lock(&ndis_req_mtx);
error = msleep(&sc->ndis_block->nmb_setstat,
&ndis_req_mtx,
curthread->td_priority|PDROP,
"ndisset", 5 * hz);
rval = sc->ndis_block->nmb_setstat;
- }
+ } else
+ mtx_unlock(&ndis_req_mtx);
if (byteswritten)
@@ -1396,6 +1057,8 @@ ndis_reset_nic(arg)
if (adapter == NULL || resetfunc == NULL)
return(EIO);
+ mtx_lock(&ndis_req_mtx);
+
if (NDIS_SERIALIZED(sc->ndis_block))
KeAcquireSpinLock(&sc->ndis_block->nmb_lock, &irql);
@@ -1405,14 +1068,16 @@ ndis_reset_nic(arg)
KeReleaseSpinLock(&sc->ndis_block->nmb_lock, irql);
if (rval == NDIS_STATUS_PENDING) {
- mtx_lock(&ndis_req_mtx);
msleep(sc, &ndis_req_mtx,
curthread->td_priority|PDROP, "ndisrst", 0);
- }
+ } else
+ mtx_unlock(&ndis_req_mtx);
return(0);
}
+#define NDIS_REAP_TIMERS
+
int
ndis_halt_nic(arg)
void *arg;
@@ -1421,6 +1086,10 @@ ndis_halt_nic(arg)
ndis_handle adapter;
ndis_halt_handler haltfunc;
struct ifnet *ifp;
+#ifdef NDIS_REAP_TIMERS
+ ndis_miniport_timer *t, *n;
+ uint8_t irql;
+#endif
sc = arg;
ifp = &sc->arpcom.ac_if;
@@ -1432,6 +1101,28 @@ ndis_halt_nic(arg)
return(EIO);
}
+#ifdef NDIS_REAP_TIMERS
+ /*
+ * Drivers are sometimes very lax about cancelling all
+ * their timers. Cancel them all ourselves, just to be
+ * safe. We must do this before invoking MiniportHalt(),
+ * since if we wait until after, the memory in which
+ * the timers reside will no longer be valid.
+ */
+
+ KeAcquireSpinLock(&sc->ndis_block->nmb_lock, &irql);
+ t = sc->ndis_block->nmb_timerlist;
+ while (t != NULL) {
+ KeCancelTimer(&t->nmt_ktimer);
+ n = t;
+ t = t->nmt_nexttimer;
+ n->nmt_nexttimer = NULL;
+ }
+ sc->ndis_block->nmb_timerlist = NULL;
+ KeReleaseSpinLock(&sc->ndis_block->nmb_lock, irql);
+ KeFlushQueuedDpcs();
+#endif
+
/*
* The adapter context is only valid after the init
* handler has been called, and is invalid once the
@@ -1441,7 +1132,9 @@ ndis_halt_nic(arg)
haltfunc = sc->ndis_chars->nmc_halt_func;
NDIS_UNLOCK(sc);
+ mtx_lock(&ndis_req_mtx);
MSCALL1(haltfunc, adapter);
+ mtx_unlock(&ndis_req_mtx);
NDIS_LOCK(sc);
sc->ndis_block->nmb_miniportadapterctx = NULL;
@@ -1471,7 +1164,6 @@ ndis_shutdown_nic(arg)
else
MSCALL1(shutdownfunc, sc->ndis_chars->nmc_rsvd0);
- ndis_shrink_thrqueue(8);
TAILQ_REMOVE(&ndis_devhead, sc->ndis_block, link);
return(0);
@@ -1497,11 +1189,15 @@ ndis_init_nic(arg)
initfunc = sc->ndis_chars->nmc_init_func;
NDIS_UNLOCK(sc);
+ sc->ndis_block->nmb_timerlist = NULL;
+
for (i = 0; i < NdisMediumMax; i++)
mediumarray[i] = i;
+ mtx_lock(&ndis_req_mtx);
status = MSCALL6(initfunc, &openstatus, &chosenmedium,
mediumarray, NdisMediumMax, block, block);
+ mtx_unlock(&ndis_req_mtx);
/*
* If the init fails, blow away the other exported routines
@@ -1515,6 +1211,18 @@ ndis_init_nic(arg)
return(ENXIO);
}
+ /*
+ * This may look really goofy, but apparently it is possible
+ * to halt a miniport too soon after it's been initialized.
+ * After MiniportInitialize() finishes, pause for 1 second
+ * to give the chip a chance to handle any short-lived timers
+ * that were set in motion. If we call MiniportHalt() too soon,
+ * some of the timers may not be cancelled, because the driver
+ * expects them to fire before the halt is called.
+ */
+
+ ndis_thsuspend(curthread->td_proc, NULL, hz);
+
return(0);
}
@@ -1630,8 +1338,9 @@ ndis_get_info(arg, oid, buf, buflen)
uint32_t byteswritten = 0, bytesneeded = 0;
int error;
uint8_t irql;
-
+
sc = arg;
+ mtx_lock(&ndis_req_mtx);
KeAcquireSpinLock(&sc->ndis_block->nmb_lock, &irql);
if (sc->ndis_block->nmb_pendingreq != NULL)
@@ -1645,6 +1354,7 @@ ndis_get_info(arg, oid, buf, buflen)
if (adapter == NULL || queryfunc == NULL) {
sc->ndis_block->nmb_pendingreq = NULL;
KeReleaseSpinLock(&sc->ndis_block->nmb_lock, irql);
+ mtx_unlock(&ndis_req_mtx);
return(ENXIO);
}
@@ -1658,13 +1368,13 @@ ndis_get_info(arg, oid, buf, buflen)
/* Wait for requests that block. */
if (rval == NDIS_STATUS_PENDING) {
- mtx_lock(&ndis_req_mtx);
error = msleep(&sc->ndis_block->nmb_getstat,
&ndis_req_mtx,
curthread->td_priority|PDROP,
"ndisget", 5 * hz);
rval = sc->ndis_block->nmb_getstat;
- }
+ } else
+ mtx_unlock(&ndis_req_mtx);
if (byteswritten)
*buflen = byteswritten;
@@ -1719,7 +1429,9 @@ NdisAddDevice(drv, pdo)
sc->ndis_block = block;
sc->ndis_chars = IoGetDriverObjectExtension(drv, (void *)1);
+ /* Give interrupt handling priority over timers. */
IoInitializeDpcRequest(fdo, kernndis_functbl[6].ipt_wrap);
+ KeSetImportanceDpc(&fdo->do_dpc, KDPC_IMPORTANCE_HIGH);
/* Finish up BSD-specific setup. */
@@ -1732,8 +1444,6 @@ NdisAddDevice(drv, pdo)
block->nmb_sendrsrc_func = kernndis_functbl[5].ipt_wrap;
block->nmb_pendingreq = NULL;
- ndis_enlarge_thrqueue(8);
-
TAILQ_INSERT_TAIL(&ndis_devhead, block, link);
return (STATUS_SUCCESS);
@@ -1753,7 +1463,6 @@ ndis_unload_driver(arg)
ndis_flush_sysctls(sc);
- ndis_shrink_thrqueue(8);
TAILQ_REMOVE(&ndis_devhead, sc->ndis_block, link);
fdo = sc->ndis_block->nmb_deviceobj;
diff --git a/sys/compat/ndis/ndis_var.h b/sys/compat/ndis/ndis_var.h
index ad67509..22e614e 100644
--- a/sys/compat/ndis/ndis_var.h
+++ b/sys/compat/ndis/ndis_var.h
@@ -898,6 +898,26 @@ struct ndis_spin_lock {
typedef struct ndis_spin_lock ndis_spin_lock;
+struct ndis_rw_lock {
+ union {
+ kspin_lock nrl_spinlock;
+ void *nrl_ctx;
+ } u;
+ uint8_t nrl_rsvd[16];
+};
+
+#define nrl_spinlock u.nrl_spinlock
+#define nrl_ctx u.nrl_ctx;
+
+typedef struct ndis_rw_lock ndis_rw_lock;
+
+struct ndis_lock_state {
+ uint16_t nls_lockstate;
+ ndis_kirql nls_oldirql;
+};
+
+typedef struct ndis_lock_state ndis_lock_state;
+
struct ndis_request {
uint8_t nr_macreserved[4*sizeof(void *)];
uint32_t nr_requesttype;
@@ -955,17 +975,11 @@ enum ndis_interrupt_mode {
typedef enum ndis_interrupt_mode ndis_interrupt_mode;
-struct ndis_work_item;
-
-typedef void (*ndis_proc)(struct ndis_work_item *, void *);
-
-struct ndis_work_item {
- void *nwi_ctx;
- void *nwi_func;
- uint8_t nwi_wraprsvd[sizeof(void *) * 8];
-};
+#define NUMBER_OF_SINGLE_WORK_ITEMS 6
-typedef struct ndis_work_item ndis_work_item;
+typedef work_queue_item ndis_work_item;
+typedef work_item_func ndis_proc;
+#define NdisInitializeWorkItem(w, f, c) ExInitializeWorkItem(w, f, c)
#ifdef notdef
struct ndis_buffer {
@@ -1130,6 +1144,8 @@ struct ndis_packet_oob {
typedef struct ndis_packet_oob ndis_packet_oob;
+#define PROTOCOL_RESERVED_SIZE_IN_PACKET (4 * sizeof(void *))
+
struct ndis_packet {
ndis_packet_private np_private;
union {
@@ -1148,7 +1164,7 @@ struct ndis_packet {
} np_macrsvd;
} u;
uint32_t *np_rsvd[2];
- uint8_t nm_protocolreserved[1];
+ uint8_t nm_protocolreserved[PROTOCOL_RESERVED_SIZE_IN_PACKET];
/*
* This next part is probably wrong, but we need some place
@@ -1164,13 +1180,12 @@ struct ndis_packet {
void *np_softc;
void *np_m0;
int np_txidx;
+ kdpc np_dpc;
kspin_lock np_lock;
};
typedef struct ndis_packet ndis_packet;
-#define PROTOCOL_RESERVED_SIZE_IN_PACKET (4 * sizeof(void *))
-
/* mbuf ext type for NDIS */
#define EXT_NDIS 0x999
@@ -1339,6 +1354,7 @@ TAILQ_HEAD(nte_head, ndis_timer_entry);
struct ndis_fh {
int nf_type;
+ char *nf_name;
void *nf_vp;
void *nf_map;
uint32_t nf_maplen;
@@ -1470,6 +1486,9 @@ struct ndis_miniport_block {
ndis_status nmb_getstat;
ndis_status nmb_setstat;
vm_offset_t nmb_img;
+ ndis_miniport_timer *nmb_timerlist;
+ io_workitem *nmb_workitems[NUMBER_OF_SINGLE_WORK_ITEMS];
+ int nmb_item_idx;
TAILQ_ENTRY(ndis_miniport_block) link;
};
@@ -1586,8 +1605,6 @@ extern int ndis_destroy_dma(void *);
extern int ndis_create_sysctls(void *);
extern int ndis_add_sysctl(void *, char *, char *, char *, int);
extern int ndis_flush_sysctls(void *);
-extern int ndis_sched(void (*)(void *), void *, int);
-extern int ndis_unsched(void (*)(void *), void *, int);
extern int ndis_thsuspend(struct proc *, struct mtx *, int);
extern void ndis_thresume(struct proc *);
extern int ndis_strcasecmp(const char *, const char *);
@@ -1603,6 +1620,7 @@ extern void NdisFreePacketPool(ndis_handle);
extern void NdisAllocatePacket(ndis_status *,
ndis_packet **, ndis_handle);
extern void NdisFreePacket(ndis_packet *);
+extern ndis_status NdisScheduleWorkItem(ndis_work_item *);
__END_DECLS
diff --git a/sys/compat/ndis/ntoskrnl_var.h b/sys/compat/ndis/ntoskrnl_var.h
index 2ef6915..db736fa 100644
--- a/sys/compat/ndis/ntoskrnl_var.h
+++ b/sys/compat/ndis/ntoskrnl_var.h
@@ -255,6 +255,9 @@ typedef struct list_entry list_entry;
l->nle_flink = e; \
} while (0)
+#define CONTAINING_RECORD(addr, type, field) \
+ ((type *)((vm_offset_t)(addr) - (vm_offset_t)(&((type *)0)->field)))
+
struct nt_dispatch_header {
uint8_t dh_type;
uint8_t dh_abs;
@@ -355,16 +358,22 @@ typedef void (*kdpc_func)(struct kdpc *, void *, void *, void *);
struct kdpc {
uint16_t k_type;
- uint8_t k_num;
- uint8_t k_importance;
+ uint8_t k_num; /* CPU number */
+ uint8_t k_importance; /* priority */
list_entry k_dpclistentry;
void *k_deferedfunc;
void *k_deferredctx;
void *k_sysarg1;
void *k_sysarg2;
- register_t k_lock;
+ void *k_lock;
};
+#define KDPC_IMPORTANCE_LOW 0
+#define KDPC_IMPORTANCE_MEDIUM 1
+#define KDPC_IMPORTANCE_HIGH 2
+
+#define KDPC_CPU_DEFAULT 255
+
typedef struct kdpc kdpc;
/*
@@ -1146,6 +1155,50 @@ typedef struct driver_object driver_object;
#define MaxPoolType 0x00000007
/*
+ * IO_WORKITEM is an opaque structures that must be allocated
+ * via IoAllocateWorkItem() and released via IoFreeWorkItem().
+ * Consequently, we can define it any way we want.
+ */
+typedef void (*io_workitem_func)(device_object *, void *);
+
+struct io_workitem {
+ io_workitem_func iw_func;
+ void *iw_ctx;
+ list_entry iw_listentry;
+ device_object *iw_dobj;
+};
+
+typedef struct io_workitem io_workitem;
+
+#define WORKQUEUE_CRITICAL 0
+#define WORKQUEUE_DELAYED 1
+#define WORKQUEUE_HUPERCRITICAL 2
+
+/*
+ * Older, deprecated work item API, needed to support NdisQueueWorkItem().
+ */
+
+struct work_queue_item;
+
+typedef void (*work_item_func)(struct work_queue_item *, void *);
+
+struct work_queue_item {
+ list_entry wqi_entry;
+ work_item_func wqi_func;
+ void *wqi_ctx;
+};
+
+typedef struct work_queue_item work_queue_item;
+
+#define ExInitializeWorkItem(w, func, ctx) \
+ do { \
+ (w)->wqi_func = (func); \
+ (w)->wqi_ctx = (ctx); \
+ INIT_LIST_HEAD(&((w)->wqi_entry)); \
+ } while (0); \
+
+
+/*
* FreeBSD's kernel stack is 2 pages in size by default. The
* Windows stack is larger, so we need to give our threads more
* stack pages. 4 should be enough, we use 8 just to extra safe.
@@ -1196,6 +1249,10 @@ extern int ntoskrnl_libfini(void);
extern void KeInitializeDpc(kdpc *, void *, void *);
extern uint8_t KeInsertQueueDpc(kdpc *, void *, void *);
extern uint8_t KeRemoveQueueDpc(kdpc *);
+extern void KeSetImportanceDpc(kdpc *, uint32_t);
+extern void KeSetTargetProcessorDpc(kdpc *, uint8_t);
+extern void KeFlushQueuedDpcs(void);
+extern uint32_t KeGetCurrentProcessorNumber(void);
extern void KeInitializeTimer(ktimer *);
extern void KeInitializeTimerEx(ktimer *, uint32_t);
extern uint8_t KeSetTimer(ktimer *, int64_t, kdpc *);
@@ -1237,8 +1294,13 @@ extern uint8_t IoCancelIrp(irp *);
extern void IoDetachDevice(device_object *);
extern device_object *IoAttachDeviceToDeviceStack(device_object *,
device_object *);
-mdl *IoAllocateMdl(void *, uint32_t, uint8_t, uint8_t, irp *);
-void IoFreeMdl(mdl *);
+extern mdl *IoAllocateMdl(void *, uint32_t, uint8_t, uint8_t, irp *);
+extern void IoFreeMdl(mdl *);
+extern io_workitem *IoAllocateWorkItem(device_object *);
+extern void ExQueueWorkItem(work_queue_item *, u_int32_t);
+extern void IoFreeWorkItem(io_workitem *);
+extern void IoQueueWorkItem(io_workitem *, io_workitem_func,
+ uint32_t, void *);
#define IoCallDriver(a, b) IofCallDriver(a, b)
#define IoCompleteRequest(a, b) IofCompleteRequest(a, b)
diff --git a/sys/compat/ndis/subr_hal.c b/sys/compat/ndis/subr_hal.c
index c992c7e..96ecd37 100644
--- a/sys/compat/ndis/subr_hal.c
+++ b/sys/compat/ndis/subr_hal.c
@@ -411,7 +411,7 @@ image_patch_table hal_functbl[] = {
* in this table.
*/
- { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_CDECL },
+ { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
/* End of list. */
diff --git a/sys/compat/ndis/subr_ndis.c b/sys/compat/ndis/subr_ndis.c
index 7c0d4ad..e3949f8 100644
--- a/sys/compat/ndis/subr_ndis.c
+++ b/sys/compat/ndis/subr_ndis.c
@@ -144,6 +144,10 @@ static void NdisAcquireSpinLock(ndis_spin_lock *);
static void NdisReleaseSpinLock(ndis_spin_lock *);
static void NdisDprAcquireSpinLock(ndis_spin_lock *);
static void NdisDprReleaseSpinLock(ndis_spin_lock *);
+static void NdisInitializeReadWriteLock(ndis_rw_lock *);
+static void NdisAcquireReadWriteLock(ndis_rw_lock *,
+ uint8_t, ndis_lock_state *);
+static void NdisReleaseReadWriteLock(ndis_rw_lock *, ndis_lock_state *);
static uint32_t NdisReadPciSlotInformation(ndis_handle, uint32_t,
uint32_t, void *, uint32_t);
static uint32_t NdisWritePciSlotInformation(ndis_handle, uint32_t,
@@ -178,7 +182,7 @@ static void NdisMFreeMapRegisters(ndis_handle);
static void ndis_mapshared_cb(void *, bus_dma_segment_t *, int, int);
static void NdisMAllocateSharedMemory(ndis_handle, uint32_t,
uint8_t, void **, ndis_physaddr *);
-static void ndis_asyncmem_complete(void *);
+static void ndis_asyncmem_complete(device_object *, void *);
static ndis_status NdisMAllocateSharedMemoryAsync(ndis_handle,
uint32_t, uint8_t, void *);
static void NdisMFreeSharedMemory(ndis_handle, uint32_t,
@@ -270,9 +274,7 @@ static uint8_t NdisSystemProcessorCount(void);
static void NdisMIndicateStatusComplete(ndis_handle);
static void NdisMIndicateStatus(ndis_handle, ndis_status,
void *, uint32_t);
-static void ndis_workfunc(void *);
static funcptr ndis_findwrap(funcptr);
-static ndis_status NdisScheduleWorkItem(ndis_work_item *);
static void NdisCopyFromPacketToPacket(ndis_packet *,
uint32_t, uint32_t, ndis_packet *, uint32_t, uint32_t *);
static void NdisCopyFromPacketToPacketSafe(ndis_packet *,
@@ -558,7 +560,6 @@ NdisOpenConfiguration(status, cfg, wrapctx)
ndis_handle *cfg;
ndis_handle wrapctx;
{
-
*cfg = wrapctx;
*status = NDIS_STATUS_SUCCESS;
@@ -574,6 +575,7 @@ NdisOpenConfigurationKeyByName(status, cfg, subkey, subhandle)
{
*subhandle = cfg;
*status = NDIS_STATUS_SUCCESS;
+
return;
}
@@ -586,6 +588,7 @@ NdisOpenConfigurationKeyByIndex(status, cfg, idx, subkey, subhandle)
ndis_handle *subhandle;
{
*status = NDIS_STATUS_FAILURE;
+
return;
}
@@ -754,6 +757,7 @@ NdisReadConfiguration(status, parm, cfg, key, type)
free(keystr, M_DEVBUF);
*status = NDIS_STATUS_FAILURE;
+
return;
}
@@ -923,6 +927,44 @@ NdisDprReleaseSpinLock(lock)
return;
}
+static void
+NdisInitializeReadWriteLock(lock)
+ ndis_rw_lock *lock;
+{
+ KeInitializeSpinLock(&lock->nrl_spinlock);
+ bzero((char *)&lock->nrl_rsvd, sizeof(lock->nrl_rsvd));
+ return;
+}
+
+static void
+NdisAcquireReadWriteLock(lock, writeacc, state)
+ ndis_rw_lock *lock;
+ uint8_t writeacc;
+ ndis_lock_state *state;
+{
+ if (writeacc == TRUE) {
+ KeAcquireSpinLock(&lock->nrl_spinlock, &state->nls_oldirql);
+ lock->nrl_rsvd[0]++;
+ } else
+ lock->nrl_rsvd[1]++;
+
+ return;
+}
+
+static void
+NdisReleaseReadWriteLock(lock, state)
+ ndis_rw_lock *lock;
+ ndis_lock_state *state;
+{
+ if (lock->nrl_rsvd[0]) {
+ lock->nrl_rsvd[0]--;
+ KeReleaseSpinLock(&lock->nrl_spinlock, state->nls_oldirql);
+ } else
+ lock->nrl_rsvd[1]--;
+
+ return;
+}
+
static uint32_t
NdisReadPciSlotInformation(adapter, slot, offset, buf, len)
ndis_handle adapter;
@@ -1011,28 +1053,36 @@ NdisWriteErrorLogEntry(ndis_handle adapter, ndis_error_code code,
char msgbuf[ERRMSGLEN];
device_t dev;
driver_object *drv;
+ struct ndis_softc *sc;
+ struct ifnet *ifp;
block = (ndis_miniport_block *)adapter;
dev = block->nmb_physdeviceobj->do_devext;
drv = block->nmb_deviceobj->do_drvobj;
+ sc = device_get_softc(dev);
+ ifp = &sc->arpcom.ac_if;
error = pe_get_message((vm_offset_t)drv->dro_driverstart,
code, &str, &i, &flags);
- if (error == 0 && flags & MESSAGE_RESOURCE_UNICODE) {
+ if (error == 0 && flags & MESSAGE_RESOURCE_UNICODE &&
+ ifp->if_flags & IFF_DEBUG) {
ustr = msgbuf;
ndis_unicode_to_ascii((uint16_t *)str,
((i / 2)) > (ERRMSGLEN - 1) ? ERRMSGLEN : i, &ustr);
str = ustr;
}
+
device_printf (dev, "NDIS ERROR: %x (%s)\n", code,
str == NULL ? "unknown error" : str);
- device_printf (dev, "NDIS NUMERRORS: %x\n", numerrors);
- va_start(ap, numerrors);
- for (i = 0; i < numerrors; i++)
- device_printf (dev, "argptr: %p\n",
- va_arg(ap, void *));
- va_end(ap);
+ if (ifp->if_flags & IFF_DEBUG) {
+ device_printf (dev, "NDIS NUMERRORS: %x\n", numerrors);
+ va_start(ap, numerrors);
+ for (i = 0; i < numerrors; i++)
+ device_printf (dev, "argptr: %p\n",
+ va_arg(ap, void *));
+ va_end(ap);
+ }
return;
}
@@ -1147,6 +1197,7 @@ NdisInitializeTimer(timer, func, ctx)
{
KeInitializeTimer(&timer->nt_ktimer);
KeInitializeDpc(&timer->nt_kdpc, func, ctx);
+ KeSetImportanceDpc(&timer->nt_kdpc, KDPC_IMPORTANCE_LOW);
return;
}
@@ -1200,6 +1251,8 @@ NdisMInitializeTimer(timer, handle, func, ctx)
ndis_timer_function func;
void *ctx;
{
+ uint8_t irql;
+
/* Save the driver's funcptr and context */
timer->nmt_timerfunc = func;
@@ -1215,6 +1268,14 @@ NdisMInitializeTimer(timer, handle, func, ctx)
KeInitializeTimer(&timer->nmt_ktimer);
KeInitializeDpc(&timer->nmt_kdpc,
ndis_findwrap((funcptr)ndis_timercall), timer);
+ timer->nmt_ktimer.k_dpc = &timer->nmt_kdpc;
+
+ KeAcquireSpinLock(&timer->nmt_block->nmb_lock, &irql);
+
+ timer->nmt_nexttimer = timer->nmt_block->nmb_timerlist;
+ timer->nmt_block->nmb_timerlist = timer;
+
+ KeReleaseSpinLock(&timer->nmt_block->nmb_lock, irql);
return;
}
@@ -1447,6 +1508,7 @@ ndis_mapshared_cb(arg, segs, nseg, error)
/*
* This maps to bus_dmamem_alloc().
*/
+
static void
NdisMAllocateSharedMemory(adapter, len, cached, vaddr, paddr)
ndis_handle adapter;
@@ -1513,6 +1575,16 @@ NdisMAllocateSharedMemory(adapter, len, cached, vaddr, paddr)
return;
}
+ /*
+ * Save the physical address along with the source address.
+ * The AirGo MIMO driver will call NdisMFreeSharedMemory()
+ * with a bogus virtual address sometimes, but with a valid
+ * physical address. To keep this from causing trouble, we
+ * use the physical address to as a sanity check in case
+ * searching based on the virtual address fails.
+ */
+
+ sh->ndis_paddr.np_quad = paddr->np_quad;
sh->ndis_saddr = *vaddr;
sh->ndis_next = sc->ndis_shlist;
sc->ndis_shlist = sh;
@@ -1521,14 +1593,15 @@ NdisMAllocateSharedMemory(adapter, len, cached, vaddr, paddr)
}
struct ndis_allocwork {
- ndis_handle na_adapter;
uint32_t na_len;
uint8_t na_cached;
void *na_ctx;
+ io_workitem *na_iw;
};
static void
-ndis_asyncmem_complete(arg)
+ndis_asyncmem_complete(dobj, arg)
+ device_object *dobj;
void *arg;
{
ndis_miniport_block *block;
@@ -1539,18 +1612,19 @@ ndis_asyncmem_complete(arg)
ndis_allocdone_handler donefunc;
w = arg;
- block = (ndis_miniport_block *)w->na_adapter;
+ block = (ndis_miniport_block *)dobj->do_devext;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
vaddr = NULL;
paddr.np_quad = 0;
donefunc = sc->ndis_chars->nmc_allocate_complete_func;
- NdisMAllocateSharedMemory(w->na_adapter, w->na_len,
+ NdisMAllocateSharedMemory(block, w->na_len,
w->na_cached, &vaddr, &paddr);
- MSCALL5(donefunc, w->na_adapter, vaddr, &paddr, w->na_len, w->na_ctx);
+ MSCALL5(donefunc, block, vaddr, &paddr, w->na_len, w->na_ctx);
- free(arg, M_DEVBUF);
+ IoFreeWorkItem(w->na_iw);
+ free(w, M_DEVBUF);
return;
}
@@ -1562,29 +1636,31 @@ NdisMAllocateSharedMemoryAsync(adapter, len, cached, ctx)
uint8_t cached;
void *ctx;
{
+ ndis_miniport_block *block;
struct ndis_allocwork *w;
+ io_workitem *iw;
+ io_workitem_func ifw;
if (adapter == NULL)
return(NDIS_STATUS_FAILURE);
+ block = adapter;
+
+ iw = IoAllocateWorkItem(block->nmb_deviceobj);
+ if (iw == NULL)
+ return(NDIS_STATUS_FAILURE);
+
w = malloc(sizeof(struct ndis_allocwork), M_TEMP, M_NOWAIT);
if (w == NULL)
return(NDIS_STATUS_FAILURE);
- w->na_adapter = adapter;
w->na_cached = cached;
w->na_len = len;
w->na_ctx = ctx;
- /*
- * Pawn this work off on the SWI thread instead of the
- * taskqueue thread, because sometimes drivers will queue
- * up work items on the taskqueue thread that will block,
- * which would prevent the memory allocation from completing
- * when we need it.
- */
- ndis_sched(ndis_asyncmem_complete, w, NDIS_SWI);
+ ifw = (io_workitem_func)ndis_findwrap((funcptr)ndis_asyncmem_complete);
+ IoQueueWorkItem(iw, ifw, WORKQUEUE_DELAYED, w);
return(NDIS_STATUS_PENDING);
}
@@ -1600,6 +1676,7 @@ NdisMFreeSharedMemory(adapter, len, cached, vaddr, paddr)
ndis_miniport_block *block;
struct ndis_softc *sc;
struct ndis_shmem *sh, *prev;
+ int checks = 0;
if (vaddr == NULL || adapter == NULL)
return;
@@ -1614,14 +1691,28 @@ NdisMFreeSharedMemory(adapter, len, cached, vaddr, paddr)
return;
while (sh) {
+ checks++;
if (sh->ndis_saddr == vaddr)
break;
+ /*
+ * Check the physaddr too, just in case the driver lied
+ * about the virtual address.
+ */
+ if (sh->ndis_paddr.np_quad == paddr.np_quad)
+ break;
prev = sh;
sh = sh->ndis_next;
}
+ if (sh == NULL) {
+ printf("NDIS: buggy driver tried to free "
+ "invalid shared memory: vaddr: %p paddr: 0x%qx\n",
+ vaddr, paddr.np_quad);
+ return;
+ }
+
bus_dmamap_unload(sh->ndis_stag, sh->ndis_smap);
- bus_dmamem_free(sh->ndis_stag, vaddr, sh->ndis_smap);
+ bus_dmamem_free(sh->ndis_stag, sh->ndis_saddr, sh->ndis_smap);
bus_dma_tag_destroy(sh->ndis_stag);
if (sh == sc->ndis_shlist)
@@ -1684,7 +1775,7 @@ static uint32_t
NdisMGetDmaAlignment(handle)
ndis_handle handle;
{
- return(128);
+ return(16);
}
/*
@@ -2132,7 +2223,6 @@ NdisInitializeEvent(event)
* events, and should be initialized to the
* not signaled state.
*/
-
KeInitializeEvent(&event->ne_event, EVENT_TYPE_NOTIFY, FALSE);
return;
}
@@ -2162,9 +2252,8 @@ NdisWaitEvent(event, msecs)
uint32_t rval;
duetime = ((int64_t)msecs * -10000);
-
rval = KeWaitForSingleObject((nt_dispatch_header *)event,
- 0, 0, TRUE, msecs ? &duetime : NULL);
+ 0, 0, TRUE, msecs ? & duetime : NULL);
if (rval == STATUS_TIMEOUT)
return(FALSE);
@@ -2354,6 +2443,8 @@ NdisMSleep(usecs)
* period does not in fact elapse. As a workaround, if the
* attempt to sleep delay fails, we do a hard DELAY() instead.
*/
+ tv.tv_sec = 0;
+ tv.tv_usec = usecs;
if (ndis_thsuspend(curthread->td_proc, NULL, tvtohz(&tv)) == 0)
DELAY(usecs);
@@ -2737,10 +2828,13 @@ NdisOpenFile(status, filehandle, filelength, filename, highestaddr)
fh = ExAllocatePoolWithTag(NonPagedPool, sizeof(ndis_fh), 0);
if (fh == NULL) {
+ free(afilename, M_DEVBUF);
*status = NDIS_STATUS_RESOURCES;
return;
}
+ fh->nf_name = afilename;
+
/*
* During system bootstrap, it's impossible to load files
* from the rootfs since it's not mounted yet. We therefore
@@ -2778,7 +2872,6 @@ NdisOpenFile(status, filehandle, filelength, filename, highestaddr)
fh->nf_type = NDIS_FH_TYPE_MODULE;
*filelength = fh->nf_maplen = (kldend - kldstart) & 0xFFFFFFFF;
*filehandle = fh;
- free(afilename, M_DEVBUF);
*status = NDIS_STATUS_SUCCESS;
return;
}
@@ -2797,6 +2890,7 @@ NdisOpenFile(status, filehandle, filelength, filename, highestaddr)
path = ExAllocatePoolWithTag(NonPagedPool, MAXPATHLEN, 0);
if (path == NULL) {
ExFreePool(fh);
+ free(afilename, M_DEVBUF);
*status = NDIS_STATUS_RESOURCES;
return;
}
@@ -2823,6 +2917,7 @@ NdisOpenFile(status, filehandle, filelength, filename, highestaddr)
ExFreePool(fh);
printf("NDIS: open file %s failed: %d\n", path, error);
ExFreePool(path);
+ free(afilename, M_DEVBUF);
return;
}
@@ -2876,7 +2971,7 @@ NdisMapFile(status, mappedbuffer, filehandle)
if (fh->nf_type == NDIS_FH_TYPE_MODULE) {
lf = fh->nf_vp;
- if (ndis_find_sym(lf, lf->filename, "_start", &kldstart)) {
+ if (ndis_find_sym(lf, fh->nf_name, "_start", &kldstart)) {
*status = NDIS_STATUS_FAILURE;
return;
}
@@ -2952,6 +3047,7 @@ NdisCloseFile(filehandle)
}
fh->nf_vp = NULL;
+ free(fh->nf_name, M_DEVBUF);
ExFreePool(fh);
return;
@@ -2998,24 +3094,20 @@ NdisMIndicateStatus(adapter, status, sbuf, slen)
return;
}
-static void
-ndis_workfunc(ctx)
- void *ctx;
-{
- ndis_work_item *work;
- ndis_proc workfunc;
-
- work = ctx;
- workfunc = work->nwi_func;
- MSCALL2(workfunc, work, work->nwi_ctx);
- return;
-}
+/*
+ * The DDK documentation says that you should use IoQueueWorkItem()
+ * instead of ExQueueWorkItem(). The problem is, IoQueueWorkItem()
+ * is fundamentally incompatible with NdisScheduleWorkItem(), which
+ * depends on the API semantics of ExQueueWorkItem(). In our world,
+ * ExQueueWorkItem() is implemented on top of IoAllocateQueueItem()
+ * anyway.
+ */
-static ndis_status
+ndis_status
NdisScheduleWorkItem(work)
ndis_work_item *work;
{
- ndis_sched(ndis_workfunc, work, NDIS_TASKQUEUE);
+ ExQueueWorkItem(work, WORKQUEUE_DELAYED);
return(NDIS_STATUS_SUCCESS);
}
@@ -3232,6 +3324,9 @@ image_patch_table ndis_functbl[] = {
IMPORT_SFUNC(NdisDprAcquireSpinLock, 1),
IMPORT_SFUNC(NdisDprReleaseSpinLock, 1),
IMPORT_SFUNC(NdisAllocateSpinLock, 1),
+ IMPORT_SFUNC(NdisInitializeReadWriteLock, 1),
+ IMPORT_SFUNC(NdisAcquireReadWriteLock, 3),
+ IMPORT_SFUNC(NdisReleaseReadWriteLock, 2),
IMPORT_SFUNC(NdisFreeSpinLock, 1),
IMPORT_SFUNC(NdisFreeMemory, 3),
IMPORT_SFUNC(NdisReadPciSlotInformation, 5),
@@ -3308,6 +3403,7 @@ image_patch_table ndis_functbl[] = {
IMPORT_SFUNC(NdisMQueryAdapterInstanceName, 2),
IMPORT_SFUNC(NdisMRegisterUnloadHandler, 2),
IMPORT_SFUNC(ndis_timercall, 4),
+ IMPORT_SFUNC(ndis_asyncmem_complete, 2),
/*
* This last entry is a catch-all for any function we haven't
@@ -3316,7 +3412,7 @@ image_patch_table ndis_functbl[] = {
* in this table.
*/
- { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_CDECL },
+ { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
/* End of list. */
diff --git a/sys/compat/ndis/subr_ntoskrnl.c b/sys/compat/ndis/subr_ntoskrnl.c
index b3b9040..e2e3838 100644
--- a/sys/compat/ndis/subr_ntoskrnl.c
+++ b/sys/compat/ndis/subr_ntoskrnl.c
@@ -51,6 +51,8 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/kthread.h>
#include <sys/module.h>
+#include <sys/smp.h>
+#include <sys/sched.h>
#include <machine/atomic.h>
#include <machine/clock.h>
@@ -66,6 +68,8 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <vm/uma.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
#include <compat/ndis/pe_var.h>
#include <compat/ndis/cfg_var.h>
@@ -74,6 +78,22 @@ __FBSDID("$FreeBSD$");
#include <compat/ndis/hal_var.h>
#include <compat/ndis/ndis_var.h>
+struct kdpc_queue {
+ list_entry kq_high;
+ list_entry kq_low;
+ list_entry kq_med;
+ struct thread *kq_td;
+ int kq_state;
+ int kq_cpu;
+ int kq_exit;
+ struct mtx kq_lock;
+ nt_kevent kq_proc;
+ nt_kevent kq_done;
+ nt_kevent kq_dead;
+};
+
+typedef struct kdpc_queue kdpc_queue;
+
static uint8_t RtlEqualUnicodeString(ndis_unicode_string *,
ndis_unicode_string *, uint8_t);
static void RtlCopyUnicodeString(ndis_unicode_string *,
@@ -100,6 +120,11 @@ static uint32_t KeWaitForMultipleObjects(uint32_t,
static void ntoskrnl_wakeup(void *);
static void ntoskrnl_timercall(void *);
static void ntoskrnl_run_dpc(void *);
+static void ntoskrnl_dpc_thread(void *);
+static void ntoskrnl_destroy_dpc_threads(void);
+static void ntoskrnl_workitem_thread(void *);
+static void ntoskrnl_workitem(device_object *, void *);
+static uint8_t ntoskrnl_insert_dpc(list_entry *, kdpc *);
static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
static uint16_t READ_REGISTER_USHORT(uint16_t *);
static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
@@ -144,6 +169,7 @@ static void *MmMapLockedPages(mdl *, uint8_t);
static void *MmMapLockedPagesSpecifyCache(mdl *,
uint8_t, uint32_t, void *, uint32_t, uint32_t);
static void MmUnmapLockedPages(void *, mdl *);
+static uint8_t MmIsAddressValid(void *);
static size_t RtlCompareMemory(const void *, const void *, size_t);
static void RtlInitAnsiString(ndis_ansi_string *, char *);
static void RtlInitUnicodeString(ndis_unicode_string *,
@@ -172,6 +198,7 @@ static ndis_status ObReferenceObjectByHandle(ndis_handle,
static void ObfDereferenceObject(void *);
static uint32_t ZwClose(ndis_handle);
static void *ntoskrnl_memset(void *, int, size_t);
+static char *ntoskrnl_strstr(char *, char *);
static funcptr ntoskrnl_findwrap(funcptr);
static uint32_t DbgPrint(char *, ...);
static void DbgBreakPoint(void);
@@ -183,18 +210,60 @@ static kspin_lock ntoskrnl_cancellock;
static int ntoskrnl_kth = 0;
static struct nt_objref_head ntoskrnl_reflist;
static uma_zone_t mdl_zone;
+static uma_zone_t iw_zone;
+static struct kdpc_queue *kq_queues;
+static struct kdpc_queue *wq_queue;
int
ntoskrnl_libinit()
{
image_patch_table *patch;
+ int error;
+ struct proc *p;
+ kdpc_queue *kq;
+ int i;
+ char name[64];
mtx_init(&ntoskrnl_dispatchlock,
- "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF);
+ "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF|MTX_RECURSE);
KeInitializeSpinLock(&ntoskrnl_global);
KeInitializeSpinLock(&ntoskrnl_cancellock);
TAILQ_INIT(&ntoskrnl_reflist);
+ kq_queues = ExAllocatePoolWithTag(NonPagedPool,
+ sizeof(kdpc_queue) * mp_ncpus, 0);
+
+ if (kq_queues == NULL)
+ return(ENOMEM);
+
+ wq_queue = ExAllocatePoolWithTag(NonPagedPool,
+ sizeof(kdpc_queue), 0);
+
+ if (wq_queue == NULL)
+ return(ENOMEM);
+
+ bzero((char *)kq_queues, sizeof(kdpc_queue) * mp_ncpus);
+ bzero((char *)wq_queue, sizeof(kdpc_queue));
+
+ for (i = 0; i < mp_ncpus; i++) {
+ kq = kq_queues + i;
+ kq->kq_cpu = i;
+ sprintf(name, "Windows DPC %d", i);
+ error = kthread_create(ntoskrnl_dpc_thread, kq, &p,
+ RFHIGHPID, NDIS_KSTACK_PAGES, name);
+ if (error)
+ panic("failed to launch DPC thread");
+ }
+
+ /*
+ * Launch the workitem thread.
+ */
+
+ error = kthread_create(ntoskrnl_workitem_thread, wq_queue, &p,
+ RFHIGHPID, NDIS_KSTACK_PAGES, "Windows WorkItem");
+ if (error)
+ panic("failed to launch workitem thread");
+
patch = ntoskrnl_functbl;
while (patch->ipt_func != NULL) {
windrv_wrap((funcptr)patch->ipt_func,
@@ -219,6 +288,9 @@ ntoskrnl_libinit()
mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE,
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
+ iw_zone = uma_zcreate("Windows WorkItem", sizeof(io_workitem),
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
+
return(0);
}
@@ -233,7 +305,20 @@ ntoskrnl_libfini()
patch++;
}
+ /* Stop the DPC queues. */
+ ntoskrnl_destroy_dpc_threads();
+
+ /* Stop the workitem queue. */
+ wq_queue->kq_exit = 1;
+ KeSetEvent(&wq_queue->kq_proc, 0, FALSE);
+ KeWaitForSingleObject((nt_dispatch_header *)&wq_queue->kq_dead,
+ 0, 0, TRUE, NULL);
+
+ ExFreePool(kq_queues);
+ ExFreePool(wq_queue);
+
uma_zdestroy(mdl_zone);
+ uma_zdestroy(iw_zone);
mtx_destroy(&ntoskrnl_dispatchlock);
@@ -253,6 +338,27 @@ ntoskrnl_memset(buf, ch, size)
return(memset(buf, ch, size));
}
+static char *
+ntoskrnl_strstr(s, find)
+ char *s, *find;
+{
+ char c, sc;
+ size_t len;
+
+ if ((c = *find++) != 0) {
+ len = strlen(find);
+ do {
+ do {
+ if ((sc = *s++) == 0)
+ return (NULL);
+ } while (sc != c);
+ } while (strncmp(s, find, len) != 0);
+ s--;
+ }
+ return ((char *)s);
+}
+
+
static uint8_t
RtlEqualUnicodeString(str1, str2, caseinsensitive)
ndis_unicode_string *str1;
@@ -1009,8 +1115,25 @@ ntoskrnl_wakeup(arg)
obj = arg;
- obj->dh_sigstate = TRUE;
e = obj->dh_waitlisthead.nle_flink;
+
+ /*
+ * What happens if someone tells us to wake up
+ * threads waiting on an object, but nobody's
+ * waiting on it at the moment? For sync events,
+ * the signal state is supposed to be automatically
+ * reset, but this only happens in the KeWaitXXX()
+ * functions. If nobody is waiting, the state never
+ * gets cleared.
+ */
+
+ if (e == &obj->dh_waitlisthead) {
+ if (obj->dh_type == EVENT_TYPE_SYNC)
+ obj->dh_sigstate = FALSE;
+ return;
+ }
+
+ obj->dh_sigstate = TRUE;
while (e != &obj->dh_waitlisthead) {
w = (wait_block *)e;
td = w->wb_kthread;
@@ -1968,6 +2091,172 @@ MmUnmapLockedPages(vaddr, buf)
return;
}
+/*
+ * This function has a problem in that it will break if you
+ * compile this module without PAE and try to use it on a PAE
+ * kernel. Unfortunately, there's no way around this at the
+ * moment. It's slightly less broken that using pmap_kextract().
+ * You'd think the virtual memory subsystem would help us out
+ * here, but it doesn't.
+ */
+
+static uint8_t
+MmIsAddressValid(vaddr)
+ void *vaddr;
+{
+ if (pmap_extract(kernel_map->pmap, (vm_offset_t)vaddr))
+ return(TRUE);
+
+ return(FALSE);
+}
+
+/*
+ * Workitems are unlike DPCs, in that they run in a user-mode thread
+ * context rather than at DISPATCH_LEVEL in kernel context. In our
+ * case we run them in kernel context anyway.
+ */
+static void
+ntoskrnl_workitem_thread(arg)
+ void *arg;
+{
+ kdpc_queue *kq;
+ list_entry *l;
+ io_workitem *iw;
+
+ kq = arg;
+
+ INIT_LIST_HEAD(&kq->kq_med);
+ kq->kq_td = curthread;
+ kq->kq_exit = 0;
+ kq->kq_state = NDIS_PSTATE_SLEEPING;
+ mtx_init(&kq->kq_lock, "NDIS thread lock", NULL, MTX_SPIN);
+ KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
+ KeInitializeEvent(&kq->kq_dead, EVENT_TYPE_SYNC, FALSE);
+
+ while (1) {
+
+ KeWaitForSingleObject((nt_dispatch_header *)&kq->kq_proc,
+ 0, 0, TRUE, NULL);
+
+ mtx_lock_spin(&kq->kq_lock);
+
+ if (kq->kq_exit) {
+ mtx_unlock_spin(&kq->kq_lock);
+ KeSetEvent(&kq->kq_dead, 0, FALSE);
+ break;
+ }
+
+ kq->kq_state = NDIS_PSTATE_RUNNING;
+
+ l = kq->kq_med.nle_flink;
+ while (l != & kq->kq_med) {
+ iw = CONTAINING_RECORD(l,
+ io_workitem, iw_listentry);
+ REMOVE_LIST_HEAD((&kq->kq_med));
+ if (iw->iw_func == NULL) {
+ l = kq->kq_med.nle_flink;
+ continue;
+ }
+ mtx_unlock_spin(&kq->kq_lock);
+ MSCALL2(iw->iw_func, iw->iw_dobj, iw->iw_ctx);
+ mtx_lock_spin(&kq->kq_lock);
+ l = kq->kq_med.nle_flink;
+ }
+
+ kq->kq_state = NDIS_PSTATE_SLEEPING;
+
+ mtx_unlock_spin(&kq->kq_lock);
+ }
+
+ mtx_destroy(&kq->kq_lock);
+#if __FreeBSD_version < 502113
+ mtx_lock(&Giant);
+#endif
+ kthread_exit(0);
+ return; /* notreached */
+}
+
+io_workitem *
+IoAllocateWorkItem(dobj)
+ device_object *dobj;
+{
+ io_workitem *iw;
+
+ iw = uma_zalloc(iw_zone, M_NOWAIT);
+ if (iw == NULL)
+ return(NULL);
+
+ INIT_LIST_HEAD(&iw->iw_listentry);
+ iw->iw_dobj = dobj;
+
+ return(iw);
+}
+
+void
+IoFreeWorkItem(iw)
+ io_workitem *iw;
+{
+ uma_zfree(iw_zone, iw);
+ return;
+}
+
+void
+IoQueueWorkItem(iw, iw_func, qtype, ctx)
+ io_workitem *iw;
+ io_workitem_func iw_func;
+ uint32_t qtype;
+ void *ctx;
+{
+ int state;
+
+ iw->iw_func = iw_func;
+ iw->iw_ctx = ctx;
+
+ mtx_lock_spin(&wq_queue->kq_lock);
+ INSERT_LIST_TAIL((&wq_queue->kq_med), (&iw->iw_listentry));
+ state = wq_queue->kq_state;
+ mtx_unlock_spin(&wq_queue->kq_lock);
+ if (state == NDIS_PSTATE_SLEEPING)
+ KeSetEvent(&wq_queue->kq_proc, 0, FALSE);
+ return;
+}
+
+static void
+ntoskrnl_workitem(dobj, arg)
+ device_object *dobj;
+ void *arg;
+{
+ io_workitem *iw;
+ work_queue_item *w;
+ work_item_func f;
+
+ iw = arg;
+ w = (work_queue_item *)dobj;
+ f = (work_item_func)w->wqi_func;
+ uma_zfree(iw_zone, iw);
+ MSCALL2(f, w, w->wqi_ctx);
+
+ return;
+}
+
+void
+ExQueueWorkItem(w, qtype)
+ work_queue_item *w;
+ uint32_t qtype;
+{
+ io_workitem *iw;
+ io_workitem_func iwf;
+
+ iw = IoAllocateWorkItem((device_object *)w);
+ if (iw == NULL)
+ return;
+
+ iwf = (io_workitem_func)ntoskrnl_findwrap((funcptr)ntoskrnl_workitem);
+ IoQueueWorkItem(iw, iwf, qtype, iw);
+
+ return;
+}
+
static size_t
RtlCompareMemory(s1, s2, len)
const void *s1;
@@ -2500,6 +2789,7 @@ KeInitializeTimerEx(timer, type)
if (timer == NULL)
return;
+ bzero((char *)timer, sizeof(ktimer));
INIT_LIST_HEAD((&timer->k_header.dh_waitlisthead));
timer->k_header.dh_sigstate = FALSE;
timer->k_header.dh_inserted = FALSE;
@@ -2511,6 +2801,114 @@ KeInitializeTimerEx(timer, type)
}
/*
+ * DPC subsystem. A Windows Defered Procedure Call has the following
+ * properties:
+ * - It runs at DISPATCH_LEVEL.
+ * - It can have one of 3 importance values that control when it
+ * runs relative to other DPCs in the queue.
+ * - On SMP systems, it can be set to run on a specific processor.
+ * In order to satisfy the last property, we create a DPC thread for
+ * each CPU in the system and bind it to that CPU. Each thread
+ * maintains three queues with different importance levels, which
+ * will be processed in order from lowest to highest.
+ *
+ * In Windows, interrupt handlers run as DPCs. (Not to be confused
+ * with ISRs, which run in interrupt context and can preempt DPCs.)
+ * ISRs are given the highest importance so that they'll take
+ * precedence over timers and other things.
+ */
+
+static void
+ntoskrnl_dpc_thread(arg)
+ void *arg;
+{
+ kdpc_queue *kq;
+ kdpc *d;
+ list_entry *l;
+
+ kq = arg;
+
+ INIT_LIST_HEAD(&kq->kq_high);
+ INIT_LIST_HEAD(&kq->kq_low);
+ INIT_LIST_HEAD(&kq->kq_med);
+ kq->kq_td = curthread;
+ kq->kq_exit = 0;
+ kq->kq_state = NDIS_PSTATE_SLEEPING;
+ mtx_init(&kq->kq_lock, "NDIS thread lock", NULL, MTX_SPIN);
+ KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
+ KeInitializeEvent(&kq->kq_done, EVENT_TYPE_SYNC, FALSE);
+ KeInitializeEvent(&kq->kq_dead, EVENT_TYPE_SYNC, FALSE);
+
+ sched_pin();
+
+ while (1) {
+ KeWaitForSingleObject((nt_dispatch_header *)&kq->kq_proc,
+ 0, 0, TRUE, NULL);
+
+ mtx_lock_spin(&kq->kq_lock);
+
+ if (kq->kq_exit) {
+ mtx_unlock_spin(&kq->kq_lock);
+ KeSetEvent(&kq->kq_dead, 0, FALSE);
+ break;
+ }
+
+ kq->kq_state = NDIS_PSTATE_RUNNING;
+
+ /* Process high importance list first. */
+
+ l = kq->kq_high.nle_flink;
+ while (l != &kq->kq_high) {
+ d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
+ REMOVE_LIST_ENTRY((&d->k_dpclistentry));
+ mtx_unlock_spin(&kq->kq_lock);
+ ntoskrnl_run_dpc(d);
+ mtx_lock_spin(&kq->kq_lock);
+ l = kq->kq_high.nle_flink;
+ }
+
+ /* Now the medium importance list. */
+
+ l = kq->kq_med.nle_flink;
+ while (l != &kq->kq_med) {
+ d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
+ REMOVE_LIST_ENTRY((&d->k_dpclistentry));
+ mtx_unlock_spin(&kq->kq_lock);
+ ntoskrnl_run_dpc(d);
+ mtx_lock_spin(&kq->kq_lock);
+ l = kq->kq_med.nle_flink;
+ }
+
+ /* And finally the low importance list. */
+
+ l = kq->kq_low.nle_flink;
+ while (l != &kq->kq_low) {
+ d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
+ REMOVE_LIST_ENTRY((&d->k_dpclistentry));
+ mtx_unlock_spin(&kq->kq_lock);
+ ntoskrnl_run_dpc(d);
+ mtx_lock_spin(&kq->kq_lock);
+ l = kq->kq_low.nle_flink;
+ }
+
+ kq->kq_state = NDIS_PSTATE_SLEEPING;
+
+ mtx_unlock_spin(&kq->kq_lock);
+
+ KeSetEvent(&kq->kq_done, 0, FALSE);
+
+ }
+
+ mtx_destroy(&kq->kq_lock);
+#if __FreeBSD_version < 502113
+ mtx_lock(&Giant);
+#endif
+ kthread_exit(0);
+ return; /* notreached */
+}
+
+
+/*
* This is a wrapper for Windows deferred procedure calls that
* have been placed on an NDIS thread work queue. We need it
* since the DPC could be a _stdcall function. Also, as far as
@@ -2520,12 +2918,14 @@ static void
ntoskrnl_run_dpc(arg)
void *arg;
{
- kdpc_func dpcfunc;
+ kdpc_func dpcfunc;
kdpc *dpc;
uint8_t irql;
dpc = arg;
dpcfunc = dpc->k_deferedfunc;
+ if (dpcfunc == NULL)
+ return;
irql = KeRaiseIrql(DISPATCH_LEVEL);
MSCALL4(dpcfunc, dpc, dpc->k_deferredctx,
dpc->k_sysarg1, dpc->k_sysarg2);
@@ -2534,6 +2934,49 @@ ntoskrnl_run_dpc(arg)
return;
}
+static void
+ntoskrnl_destroy_dpc_threads(void)
+{
+ kdpc_queue *kq;
+ kdpc dpc;
+ int i;
+
+ kq = kq_queues;
+ for (i = 0; i < mp_ncpus; i++) {
+ kq += i;
+
+ kq->kq_exit = 1;
+ KeInitializeDpc(&dpc, NULL, NULL);
+ KeSetTargetProcessorDpc(&dpc, i);
+ KeInsertQueueDpc(&dpc, NULL, NULL);
+
+ KeWaitForSingleObject((nt_dispatch_header *)&kq->kq_dead,
+ 0, 0, TRUE, NULL);
+ }
+
+ return;
+}
+
+static uint8_t
+ntoskrnl_insert_dpc(head, dpc)
+ list_entry *head;
+ kdpc *dpc;
+{
+ list_entry *l;
+ kdpc *d;
+
+ l = head->nle_flink;
+ while (l != head) {
+ d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
+ if (d == dpc)
+ return(FALSE);
+ l = l->nle_flink;
+ }
+
+ INSERT_LIST_TAIL((head), (&dpc->k_dpclistentry));
+ return (TRUE);
+}
+
void
KeInitializeDpc(dpc, dpcfunc, dpcctx)
kdpc *dpc;
@@ -2546,6 +2989,15 @@ KeInitializeDpc(dpc, dpcfunc, dpcctx)
dpc->k_deferedfunc = dpcfunc;
dpc->k_deferredctx = dpcctx;
+ dpc->k_num = KDPC_CPU_DEFAULT;
+ dpc->k_importance = KDPC_IMPORTANCE_MEDIUM;
+ dpc->k_num = KeGetCurrentProcessorNumber();
+ /*
+ * In case someone tries to dequeue a DPC that
+ * hasn't been queued yet.
+ */
+ dpc->k_lock = NULL /*&ntoskrnl_dispatchlock*/;
+ INIT_LIST_HEAD((&dpc->k_dpclistentry));
return;
}
@@ -2556,25 +3008,123 @@ KeInsertQueueDpc(dpc, sysarg1, sysarg2)
void *sysarg1;
void *sysarg2;
{
+ kdpc_queue *kq;
+ uint8_t r;
+ int state;
+
+ if (dpc == NULL)
+ return(FALSE);
+
dpc->k_sysarg1 = sysarg1;
dpc->k_sysarg2 = sysarg2;
- if (ndis_sched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
- return(FALSE);
+ /*
+ * By default, the DPC is queued to run on the same CPU
+ * that scheduled it.
+ */
- return(TRUE);
+ kq = kq_queues;
+ if (dpc->k_num == KDPC_CPU_DEFAULT)
+ kq += curthread->td_oncpu;
+ else
+ kq += dpc->k_num;
+
+ /*
+ * Also by default, we put the DPC on the medium
+ * priority queue.
+ */
+
+ mtx_lock_spin(&kq->kq_lock);
+ if (dpc->k_importance == KDPC_IMPORTANCE_HIGH)
+ r = ntoskrnl_insert_dpc(&kq->kq_high, dpc);
+ else if (dpc->k_importance == KDPC_IMPORTANCE_LOW)
+ r = ntoskrnl_insert_dpc(&kq->kq_low, dpc);
+ else
+ r = ntoskrnl_insert_dpc(&kq->kq_med, dpc);
+ dpc->k_lock = &kq->kq_lock;
+ state = kq->kq_state;
+ mtx_unlock_spin(&kq->kq_lock);
+ if (r == TRUE && state == NDIS_PSTATE_SLEEPING)
+ KeSetEvent(&kq->kq_proc, 0, FALSE);
+
+ return(r);
}
uint8_t
KeRemoveQueueDpc(dpc)
kdpc *dpc;
{
- if (ndis_unsched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
+ if (dpc == NULL)
+ return(FALSE);
+
+ if (dpc->k_lock == NULL)
+ return(FALSE);
+ mtx_lock_spin(dpc->k_lock);
+ if (dpc->k_dpclistentry.nle_flink == &dpc->k_dpclistentry) {
+ mtx_unlock_spin(dpc->k_lock);
return(FALSE);
+ }
+
+ REMOVE_LIST_ENTRY((&dpc->k_dpclistentry));
+ mtx_unlock_spin(dpc->k_lock);
return(TRUE);
}
+void
+KeSetImportanceDpc(dpc, imp)
+ kdpc *dpc;
+ uint32_t imp;
+{
+ if (imp != KDPC_IMPORTANCE_LOW &&
+ imp != KDPC_IMPORTANCE_MEDIUM &&
+ imp != KDPC_IMPORTANCE_HIGH)
+ return;
+
+ dpc->k_importance = (uint8_t)imp;
+ return;
+}
+
+void
+KeSetTargetProcessorDpc(dpc, cpu)
+ kdpc *dpc;
+ uint8_t cpu;
+{
+ if (cpu > mp_ncpus)
+ return;
+
+ dpc->k_num = cpu;
+ return;
+}
+
+void
+KeFlushQueuedDpcs(void)
+{
+ kdpc_queue *kq;
+ int i;
+
+ /*
+ * Poke each DPC queue and wait
+ * for them to drain.
+ */
+
+ kq = kq_queues;
+ for (i = 0; i < mp_ncpus; i++) {
+ kq += i;
+ KeSetEvent(&kq->kq_proc, 0, FALSE);
+ KeWaitForSingleObject((nt_dispatch_header *)&kq->kq_done,
+ 0, 0, TRUE, NULL);
+ }
+
+ return;
+}
+
+uint32_t
+KeGetCurrentProcessorNumber(void)
+{
+ return((uint32_t)curthread->td_oncpu);
+}
+
uint8_t
KeSetTimerEx(timer, duetime, period, dpc)
ktimer *timer;
@@ -2648,6 +3198,8 @@ KeCancelTimer(timer)
if (timer->k_header.dh_inserted == TRUE) {
untimeout(ntoskrnl_timercall, timer, timer->k_handle);
+ if (timer->k_dpc != NULL)
+ KeRemoveQueueDpc(timer->k_dpc);
pending = TRUE;
} else
pending = KeRemoveQueueDpc(timer->k_dpc);
@@ -2695,6 +3247,8 @@ image_patch_table ntoskrnl_functbl[] = {
IMPORT_CFUNC(strncpy, 0),
IMPORT_CFUNC(strcpy, 0),
IMPORT_CFUNC(strlen, 0),
+ IMPORT_CFUNC_MAP(strstr, ntoskrnl_strstr, 0),
+ IMPORT_CFUNC_MAP(strchr, index, 0),
IMPORT_CFUNC(memcpy, 0),
IMPORT_CFUNC_MAP(memmove, ntoskrnl_memset, 0),
IMPORT_CFUNC_MAP(memset, ntoskrnl_memset, 0),
@@ -2783,9 +3337,15 @@ image_patch_table ntoskrnl_functbl[] = {
IMPORT_SFUNC(MmMapLockedPagesSpecifyCache, 6),
IMPORT_SFUNC(MmUnmapLockedPages, 2),
IMPORT_SFUNC(MmBuildMdlForNonPagedPool, 1),
+ IMPORT_SFUNC(MmIsAddressValid, 1),
IMPORT_SFUNC(KeInitializeSpinLock, 1),
IMPORT_SFUNC(IoIsWdmVersionAvailable, 2),
IMPORT_SFUNC(IoGetDeviceProperty, 5),
+ IMPORT_SFUNC(IoAllocateWorkItem, 1),
+ IMPORT_SFUNC(IoFreeWorkItem, 1),
+ IMPORT_SFUNC(IoQueueWorkItem, 4),
+ IMPORT_SFUNC(ExQueueWorkItem, 2),
+ IMPORT_SFUNC(ntoskrnl_workitem, 2),
IMPORT_SFUNC(KeInitializeMutex, 2),
IMPORT_SFUNC(KeReleaseMutex, 2),
IMPORT_SFUNC(KeReadStateMutex, 1),
@@ -2803,6 +3363,10 @@ image_patch_table ntoskrnl_functbl[] = {
IMPORT_SFUNC(KeInitializeDpc, 3),
IMPORT_SFUNC(KeInsertQueueDpc, 3),
IMPORT_SFUNC(KeRemoveQueueDpc, 1),
+ IMPORT_SFUNC(KeSetImportanceDpc, 2),
+ IMPORT_SFUNC(KeSetTargetProcessorDpc, 2),
+ IMPORT_SFUNC(KeFlushQueuedDpcs, 0),
+ IMPORT_SFUNC(KeGetCurrentProcessorNumber, 1),
IMPORT_SFUNC(ObReferenceObjectByHandle, 6),
IMPORT_FFUNC(ObfDereferenceObject, 1),
IMPORT_SFUNC(ZwClose, 1),
@@ -2816,7 +3380,7 @@ image_patch_table ntoskrnl_functbl[] = {
* in this table.
*/
- { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_CDECL },
+ { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
/* End of list. */
diff --git a/sys/compat/ndis/subr_usbd.c b/sys/compat/ndis/subr_usbd.c
index 8df78a4..3aa322e 100644
--- a/sys/compat/ndis/subr_usbd.c
+++ b/sys/compat/ndis/subr_usbd.c
@@ -149,7 +149,7 @@ image_patch_table usbd_functbl[] = {
* in this table.
*/
- { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_CDECL },
+ { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
/* End of list. */
OpenPOWER on IntegriCloud