summaryrefslogtreecommitdiffstats
path: root/sys/compat
diff options
context:
space:
mode:
authorwpaul <wpaul@FreeBSD.org>2005-10-18 19:52:15 +0000
committerwpaul <wpaul@FreeBSD.org>2005-10-18 19:52:15 +0000
commit81737fff083ffbd9a4647cdb650c88e3b2ddfb0b (patch)
treeeb7542dc4f8dcba11ed0c854bba191337a85c784 /sys/compat
parent71b612fc9b17c019175bbd48e522b6d5079418d7 (diff)
downloadFreeBSD-src-81737fff083ffbd9a4647cdb650c88e3b2ddfb0b.zip
FreeBSD-src-81737fff083ffbd9a4647cdb650c88e3b2ddfb0b.tar.gz
Another round of cleanups and fixes:
- Change ndis_return() from a DPC to a workitem so that it doesn't run at DISPATCH_LEVEL (with the dispatcher lock held). - In if_ndis.c, submit packets to the stack via (*ifp->if_input)() in a workitem instead of doing it directly in ndis_rxeof(), because ndis_rxeof() runs in a DPC, and hence at DISPATCH_LEVEL. This implies that the 'dispatch level' mutex for the current CPU is being held, and we don't want to call if_input while holding any locks. - Reimplement IoConnectInterrupt()/IoDisconnectInterrupt(). The original approach I used to track down the interrupt resource (by scanning the device tree starting at the nexus) is prone to problems when two devices share an interrupt. (E.g removing ndis1 might disable interrupts for ndis0.) The new approach is to multiplex all the NDIS interrupts through a common internal dispatcher (ntoskrnl_intr()) and allow IoConnectInterrupt()/IoDisconnectInterrupt() to add or remove interrupts from the dispatch list. - Implement KeAcquireInterruptSpinLock() and KeReleaseInterruptSpinLock(). - Change the DPC and workitem threads to use the KeXXXSpinLock API instead of mtx_lock_spin()/mtx_unlock_spin(). - Simplify the NdisXXXPacket routines by creating an actual packet pool structure and using the InterlockedSList routines to manage the packet queue. - Only honor the value returned by OID_GEN_MAXIMUM_SEND_PACKETS for serialized drivers. For deserialized drivers, we now create a packet array of 64 entries. (The Microsoft DDK documentation says that for deserialized miniports, OID_GEN_MAXIMUM_SEND_PACKETS is ignored, and the driver for the Marvell 8335 chip, which is a deserialized miniport, returns 1 when queried.) - Clean up timer handling in subr_ntoskrnl. - Add the following conditional debugging code: NTOSKRNL_DEBUG_TIMERS - add debugging and stats for timers NDIS_DEBUG_PACKETS - add extra sanity checking for NdisXXXPacket API NTOSKRNL_DEBUG_SPINLOCKS - add test for spinning too long - In kern_ndis.c, always start the HAL first and shut it down last, since Windows spinlocks depend on it. Ntoskrnl should similarly be started second and shut down next to last.
Diffstat (limited to 'sys/compat')
-rw-r--r--sys/compat/ndis/kern_ndis.c118
-rw-r--r--sys/compat/ndis/ndis_var.h22
-rw-r--r--sys/compat/ndis/ntoskrnl_var.h12
-rw-r--r--sys/compat/ndis/subr_ndis.c185
-rw-r--r--sys/compat/ndis/subr_ntoskrnl.c601
5 files changed, 545 insertions, 393 deletions
diff --git a/sys/compat/ndis/kern_ndis.c b/sys/compat/ndis/kern_ndis.c
index a3fd8309..a5754b6 100644
--- a/sys/compat/ndis/kern_ndis.c
+++ b/sys/compat/ndis/kern_ndis.c
@@ -84,7 +84,7 @@ static void ndis_resetdone_func(ndis_handle, ndis_status, uint8_t);
static void ndis_sendrsrcavail_func(ndis_handle);
static void ndis_intrsetup(kdpc *, device_object *,
irp *, struct ndis_softc *);
-static void ndis_return(kdpc *, void *, void *, void *);
+static void ndis_return(device_object *, void *);
static image_patch_table kernndis_functbl[] = {
IMPORT_SFUNC(ndis_status_func, 4),
@@ -106,6 +106,18 @@ static struct nd_head ndis_devhead;
* Note that we call ourselves 'ndisapi' to avoid a namespace
* collision with if_ndis.ko, which internally calls itself
* 'ndis.'
+ *
+ * Note: some of the subsystems depend on each other, so the
+ * order in which they're started is important. The order of
+ * importance is:
+ *
+ * HAL - spinlocks and IRQL manipulation
+ * ntoskrnl - DPC and workitem threads, object waiting
+ * windrv - driver/device registration
+ *
+ * The HAL should also be the last thing shut down, since
+ * the ntoskrnl subsystem will use spinlocks right up until
+ * the DPC and workitem threads are terminated.
*/
static int
@@ -117,10 +129,10 @@ ndis_modevent(module_t mod, int cmd, void *arg)
switch (cmd) {
case MOD_LOAD:
/* Initialize subsystems */
- windrv_libinit();
hal_libinit();
- ndis_libinit();
ntoskrnl_libinit();
+ windrv_libinit();
+ ndis_libinit();
usbd_libinit();
patch = kernndis_functbl;
@@ -137,11 +149,11 @@ ndis_modevent(module_t mod, int cmd, void *arg)
case MOD_SHUTDOWN:
if (TAILQ_FIRST(&ndis_devhead) == NULL) {
/* Shut down subsystems */
- hal_libfini();
ndis_libfini();
- ntoskrnl_libfini();
usbd_libfini();
windrv_libfini();
+ ntoskrnl_libfini();
+ hal_libfini();
patch = kernndis_functbl;
while (patch->ipt_func != NULL) {
@@ -152,11 +164,11 @@ ndis_modevent(module_t mod, int cmd, void *arg)
break;
case MOD_UNLOAD:
/* Shut down subsystems */
- hal_libfini();
ndis_libfini();
- ntoskrnl_libfini();
usbd_libfini();
windrv_libfini();
+ ntoskrnl_libfini();
+ hal_libfini();
patch = kernndis_functbl;
while (patch->ipt_func != NULL) {
@@ -441,32 +453,39 @@ ndis_flush_sysctls(arg)
}
static void
-ndis_return(dpc, arg, sysarg1, sysarg2)
- kdpc *dpc;
+ndis_return(dobj, arg)
+ device_object *dobj;
void *arg;
- void *sysarg1;
- void *sysarg2;
{
- struct ndis_softc *sc;
+ ndis_miniport_block *block;
+ ndis_miniport_characteristics *ch;
ndis_return_handler returnfunc;
ndis_handle adapter;
ndis_packet *p;
uint8_t irql;
+ list_entry *l;
+
+ block = arg;
+ ch = IoGetDriverObjectExtension(dobj->do_drvobj, (void *)1);
p = arg;
- sc = p->np_softc;
- adapter = sc->ndis_block->nmb_miniportadapterctx;
+ adapter = block->nmb_miniportadapterctx;
if (adapter == NULL)
return;
- returnfunc = sc->ndis_chars->nmc_return_packet_func;
+ returnfunc = ch->nmc_return_packet_func;
- if (NDIS_SERIALIZED(sc->ndis_block))
- KeAcquireSpinLock(&sc->ndis_block->nmb_lock, &irql);
- MSCALL2(returnfunc, adapter, p);
- if (NDIS_SERIALIZED(sc->ndis_block))
- KeReleaseSpinLock(&sc->ndis_block->nmb_lock, irql);
+ KeAcquireSpinLock(&block->nmb_returnlock, &irql);
+ while (!IsListEmpty(&block->nmb_returnlist)) {
+ l = RemoveHeadList((&block->nmb_returnlist));
+ p = CONTAINING_RECORD(l, ndis_packet, np_list);
+ InitializeListHead((&p->np_list));
+ KeReleaseSpinLock(&block->nmb_returnlock, irql);
+ MSCALL2(returnfunc, adapter, p);
+ KeAcquireSpinLock(&block->nmb_returnlock, &irql);
+ }
+ KeReleaseSpinLock(&block->nmb_returnlock, irql);
return;
}
@@ -477,6 +496,7 @@ ndis_return_packet(buf, arg)
void *arg;
{
ndis_packet *p;
+ ndis_miniport_block *block;
if (arg == NULL)
return;
@@ -490,8 +510,16 @@ ndis_return_packet(buf, arg)
if (p->np_refcnt)
return;
- KeInitializeDpc(&p->np_dpc, kernndis_functbl[7].ipt_wrap, p);
- KeInsertQueueDpc(&p->np_dpc, NULL, NULL);
+ block = ((struct ndis_softc *)p->np_softc)->ndis_block;
+
+ KeAcquireSpinLockAtDpcLevel(&block->nmb_returnlock);
+ InitializeListHead((&p->np_list));
+ InsertHeadList((&block->nmb_returnlist), (&p->np_list));
+ KeReleaseSpinLockFromDpcLevel(&block->nmb_returnlock);
+
+ IoQueueWorkItem(block->nmb_returnitem,
+ (io_workitem_func)kernndis_functbl[7].ipt_wrap,
+ WORKQUEUE_CRITICAL, block);
return;
}
@@ -621,8 +649,13 @@ ndis_convert_res(arg)
case SYS_RES_IRQ:
prd->cprd_type = CmResourceTypeInterrupt;
prd->cprd_flags = 0;
+ /*
+ * Always mark interrupt resources as
+ * shared, since in our implementation,
+ * they will be.
+ */
prd->cprd_sharedisp =
- CmResourceShareDeviceExclusive;
+ CmResourceShareShared;
prd->u.cprd_intr.cprd_level = brle->start;
prd->u.cprd_intr.cprd_vector = brle->start;
prd->u.cprd_intr.cprd_affinity = 0;
@@ -1087,8 +1120,12 @@ ndis_halt_nic(arg)
#ifdef NDIS_REAP_TIMERS
ndis_miniport_timer *t, *n;
#endif
+ ndis_miniport_block *block;
+ int empty = 0;
+ uint8_t irql;
sc = arg;
+ block = sc->ndis_block;
#ifdef NDIS_REAP_TIMERS
/*
@@ -1111,6 +1148,19 @@ ndis_halt_nic(arg)
if (!cold)
KeFlushQueuedDpcs();
+ /*
+ * Wait for all packets to be returned.
+ */
+
+ while (1) {
+ KeAcquireSpinLock(&block->nmb_returnlock, &irql);
+ empty = IsListEmpty(&block->nmb_returnlist);
+ KeReleaseSpinLock(&block->nmb_returnlock, irql);
+ if (empty)
+ break;
+ NdisMSleep(1000);
+ }
+
NDIS_LOCK(sc);
adapter = sc->ndis_block->nmb_miniportadapterctx;
if (adapter == NULL) {
@@ -1398,6 +1448,17 @@ NdisAddDevice(drv, pdo)
ndis_miniport_block *block;
struct ndis_softc *sc;
uint32_t status;
+ int error;
+
+ sc = device_get_softc(pdo->do_devext);
+
+ if (sc->ndis_iftype == PCMCIABus || sc->ndis_iftype == PCIBus) {
+ error = bus_setup_intr(sc->ndis_dev, sc->ndis_irq,
+ INTR_TYPE_NET | INTR_MPSAFE,
+ ntoskrnl_intr, NULL, &sc->ndis_intrhand);
+ if (error)
+ return(NDIS_STATUS_FAILURE);
+ }
status = IoCreateDevice(drv, sizeof(ndis_miniport_block), NULL,
FILE_DEVICE_UNKNOWN, 0, FALSE, &fdo);
@@ -1412,17 +1473,19 @@ NdisAddDevice(drv, pdo)
block->nmb_physdeviceobj = pdo;
block->nmb_nextdeviceobj = IoAttachDeviceToDeviceStack(fdo, pdo);
KeInitializeSpinLock(&block->nmb_lock);
- InitializeListHead(&block->nmb_parmlist);
+ KeInitializeSpinLock(&block->nmb_returnlock);
KeInitializeEvent(&block->nmb_getevent, EVENT_TYPE_NOTIFY, TRUE);
KeInitializeEvent(&block->nmb_setevent, EVENT_TYPE_NOTIFY, TRUE);
KeInitializeEvent(&block->nmb_resetevent, EVENT_TYPE_NOTIFY, TRUE);
+ InitializeListHead(&block->nmb_parmlist);
+ InitializeListHead(&block->nmb_returnlist);
+ block->nmb_returnitem = IoAllocateWorkItem(fdo);
/*
* Stash pointers to the miniport block and miniport
* characteristics info in the if_ndis softc so the
* UNIX wrapper driver can get to them later.
*/
- sc = device_get_softc(pdo->do_devext);
sc->ndis_block = block;
sc->ndis_chars = IoGetDriverObjectExtension(drv, (void *)1);
@@ -1471,6 +1534,10 @@ ndis_unload_driver(arg)
sc = arg;
+ if (sc->ndis_intrhand)
+ bus_teardown_intr(sc->ndis_dev,
+ sc->ndis_irq, sc->ndis_intrhand);
+
if (sc->ndis_block->nmb_rlist != NULL)
free(sc->ndis_block->nmb_rlist, M_DEVBUF);
@@ -1481,6 +1548,7 @@ ndis_unload_driver(arg)
if (sc->ndis_chars->nmc_transferdata_func != NULL)
NdisFreePacketPool(sc->ndis_block->nmb_rxpool);
fdo = sc->ndis_block->nmb_deviceobj;
+ IoFreeWorkItem(sc->ndis_block->nmb_returnitem);
IoDetachDevice(sc->ndis_block->nmb_nextdeviceobj);
IoDeleteDevice(fdo);
diff --git a/sys/compat/ndis/ndis_var.h b/sys/compat/ndis/ndis_var.h
index 5a3d525..6f3772d 100644
--- a/sys/compat/ndis/ndis_var.h
+++ b/sys/compat/ndis/ndis_var.h
@@ -343,6 +343,7 @@ typedef uint8_t ndis_kirql;
#define NDIS_80211_NETTYPE_11DS 0x00000001
#define NDIS_80211_NETTYPE_11OFDM5 0x00000002
#define NDIS_80211_NETTYPE_11OFDM24 0x00000003
+#define NDIS_80211_NETTYPE_AUTO 0x00000004
struct ndis_80211_nettype_list {
uint32_t ntl_items;
@@ -1312,12 +1313,24 @@ struct ndis_packet {
void *np_softc;
void *np_m0;
int np_txidx;
- kdpc np_dpc;
- kspin_lock np_lock;
+ list_entry np_list;
};
typedef struct ndis_packet ndis_packet;
+struct ndis_packet_pool {
+ slist_header np_head;
+ int np_dead;
+ nt_kevent np_event;
+ kspin_lock np_lock;
+ int np_cnt;
+ int np_len;
+ int np_protrsvd;
+ void *np_pktmem;
+};
+
+typedef struct ndis_packet_pool ndis_packet_pool;
+
/* mbuf ext type for NDIS */
#define EXT_NDIS 0x999
@@ -1617,8 +1630,11 @@ struct ndis_miniport_block {
ndis_status nmb_setstat;
nt_kevent nmb_setevent;
nt_kevent nmb_resetevent;
+ io_workitem *nmb_returnitem;
ndis_miniport_timer *nmb_timerlist;
ndis_handle nmb_rxpool;
+ list_entry nmb_returnlist;
+ kspin_lock nmb_returnlock;
TAILQ_ENTRY(ndis_miniport_block) link;
};
@@ -1747,7 +1763,7 @@ extern void NdisAllocatePacket(ndis_status *,
ndis_packet **, ndis_handle);
extern void NdisFreePacket(ndis_packet *);
extern ndis_status NdisScheduleWorkItem(ndis_work_item *);
-
+extern void NdisMSleep(uint32_t);
__END_DECLS
#endif /* _NDIS_VAR_H_ */
diff --git a/sys/compat/ndis/ntoskrnl_var.h b/sys/compat/ndis/ntoskrnl_var.h
index f954c14..5ec1f54 100644
--- a/sys/compat/ndis/ntoskrnl_var.h
+++ b/sys/compat/ndis/ntoskrnl_var.h
@@ -35,6 +35,8 @@
#ifndef _NTOSKRNL_VAR_H_
#define _NTOSKRNL_VAR_H_
+#define MTX_NTOSKRNL_SPIN_LOCK "NDIS thread lock"
+
/*
* us_buf is really a wchar_t *, but it's inconvenient to include
* all the necessary header goop needed to define it, and it's a
@@ -573,7 +575,9 @@ typedef struct custom_extension custom_extension;
*/
struct kinterrupt {
+ list_entry ki_list;
device_t ki_dev;
+ int ki_rid;
void *ki_cookie;
struct resource *ki_irq;
kspin_lock ki_lock_priv;
@@ -1304,6 +1308,12 @@ extern void ctxsw_wtou(void);
extern int ntoskrnl_libinit(void);
extern int ntoskrnl_libfini(void);
+extern void ntoskrnl_intr(void *);
+
+extern uint16_t ExQueryDepthSList(slist_header *);
+extern slist_entry
+ *InterlockedPushEntrySList(slist_header *, slist_entry *);
+extern slist_entry *InterlockedPopEntrySList(slist_header *);
extern uint32_t RtlUnicodeStringToAnsiString(ansi_string *,
unicode_string *, uint8_t);
extern uint32_t RtlAnsiStringToUnicodeString(unicode_string *,
@@ -1342,6 +1352,8 @@ extern void KeAcquireSpinLockAtDpcLevel(kspin_lock *);
extern void KeReleaseSpinLockFromDpcLevel(kspin_lock *);
#endif
extern void KeInitializeSpinLock(kspin_lock *);
+extern uint8_t KeAcquireInterruptSpinLock(kinterrupt *);
+extern void KeReleaseInterruptSpinLock(kinterrupt *, uint8_t);
extern uint8_t KeSynchronizeExecution(kinterrupt *, void *, void *);
extern uintptr_t InterlockedExchange(volatile uint32_t *,
uintptr_t);
diff --git a/sys/compat/ndis/subr_ndis.c b/sys/compat/ndis/subr_ndis.c
index 7813515..8e4d640 100644
--- a/sys/compat/ndis/subr_ndis.c
+++ b/sys/compat/ndis/subr_ndis.c
@@ -112,7 +112,6 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_map.h>
static char ndis_filepath[MAXPATHLEN];
-extern struct nd_head ndis_devhead;
SYSCTL_STRING(_hw, OID_AUTO, ndis_filepath, CTLFLAG_RW, ndis_filepath,
MAXPATHLEN, "Path used by NdisOpenFile() to search for files");
@@ -238,7 +237,6 @@ static void NdisGetBufferPhysicalArraySize(ndis_buffer *,
uint32_t *);
static void NdisQueryBufferOffset(ndis_buffer *,
uint32_t *, uint32_t *);
-static void NdisMSleep(uint32_t);
static uint32_t NdisReadPcmciaAttributeMemory(ndis_handle,
uint32_t, void *, uint32_t);
static uint32_t NdisWritePcmciaAttributeMemory(ndis_handle,
@@ -421,7 +419,6 @@ NdisMRegisterMiniport(handle, characteristics, len)
if (IoAllocateDriverObjectExtension(drv, (void *)1,
sizeof(ndis_miniport_characteristics), (void **)&ch) !=
STATUS_SUCCESS) {
- printf("register error\n");
return(NDIS_STATUS_RESOURCES);
}
@@ -1846,27 +1843,43 @@ NdisAllocatePacketPool(status, pool, descnum, protrsvdlen)
uint32_t descnum;
uint32_t protrsvdlen;
{
- ndis_packet *cur;
+ ndis_packet_pool *p;
+ ndis_packet *packets;
int i;
- *pool = malloc((sizeof(ndis_packet) + protrsvdlen) *
- ((descnum + NDIS_POOL_EXTRA) + 1),
- M_DEVBUF, M_NOWAIT|M_ZERO);
-
- if (*pool == NULL) {
+ p = ExAllocatePoolWithTag(NonPagedPool, sizeof(ndis_packet_pool), 0);
+ if (p == NULL) {
*status = NDIS_STATUS_RESOURCES;
return;
}
- cur = (ndis_packet *)*pool;
- KeInitializeSpinLock(&cur->np_lock);
- cur->np_private.npp_flags = 0x1; /* mark the head of the list */
- cur->np_private.npp_totlen = 0; /* init deletetion flag */
- for (i = 0; i < (descnum + NDIS_POOL_EXTRA); i++) {
- cur->np_private.npp_head = (ndis_handle)(cur + 1);
- cur++;
+ p->np_cnt = descnum + NDIS_POOL_EXTRA;
+ p->np_protrsvd = protrsvdlen;
+ p->np_len = sizeof(ndis_packet) + protrsvdlen;
+
+ packets = ExAllocatePoolWithTag(NonPagedPool, p->np_cnt *
+ p->np_len, 0);
+
+
+ if (packets == NULL) {
+ ExFreePool(p);
+ *status = NDIS_STATUS_RESOURCES;
+ return;
}
+ p->np_pktmem = packets;
+
+ for (i = 0; i < p->np_cnt; i++)
+ InterlockedPushEntrySList(&p->np_head,
+ (struct slist_entry *)&packets[i]);
+
+#ifdef NDIS_DEBUG_PACKETS
+ p->np_dead = 0;
+ KeInitializeSpinLock(&p->np_lock);
+ KeInitializeEvent(&p->np_event, EVENT_TYPE_NOTIFY, TRUE);
+#endif
+
+ *pool = p;
*status = NDIS_STATUS_SUCCESS;
return;
}
@@ -1887,41 +1900,42 @@ uint32_t
NdisPacketPoolUsage(pool)
ndis_handle pool;
{
- ndis_packet *head;
- uint8_t irql;
- uint32_t cnt;
+ ndis_packet_pool *p;
- head = (ndis_packet *)pool;
- KeAcquireSpinLock(&head->np_lock, &irql);
- cnt = head->np_private.npp_count;
- KeReleaseSpinLock(&head->np_lock, irql);
-
- return(cnt);
+ p = (ndis_packet_pool *)pool;
+ return(p->np_cnt - ExQueryDepthSList(&p->np_head));
}
void
NdisFreePacketPool(pool)
ndis_handle pool;
{
- ndis_packet *head;
+ ndis_packet_pool *p;
+ int usage;
+#ifdef NDIS_DEBUG_PACKETS
uint8_t irql;
+#endif
- head = pool;
+ p = (ndis_packet_pool *)pool;
- /* Mark this pool as 'going away.' */
+#ifdef NDIS_DEBUG_PACKETS
+ KeAcquireSpinLock(&p->np_lock, &irql);
+#endif
- KeAcquireSpinLock(&head->np_lock, &irql);
- head->np_private.npp_totlen = 1;
+ usage = NdisPacketPoolUsage(pool);
- /* If there are no buffers loaned out, destroy the pool. */
+#ifdef NDIS_DEBUG_PACKETS
+ if (usage) {
+ p->np_dead = 1;
+ KeResetEvent(&p->np_event);
+ KeReleaseSpinLock(&p->np_lock, irql);
+ KeWaitForSingleObject(&p->np_event, 0, 0, FALSE, NULL);
+ } else
+ KeReleaseSpinLock(&p->np_lock, irql);
+#endif
- if (head->np_private.npp_count == 0) {
- KeReleaseSpinLock(&head->np_lock, irql);
- free(pool, M_DEVBUF);
- } else {
- printf("NDIS: buggy driver deleting active packet pool!\n");
- KeReleaseSpinLock(&head->np_lock, irql);
- }
+ ExFreePool(p->np_pktmem);
+ ExFreePool(p);
return;
}
@@ -1932,42 +1946,41 @@ NdisAllocatePacket(status, packet, pool)
ndis_packet **packet;
ndis_handle pool;
{
- ndis_packet *head, *pkt;
+ ndis_packet_pool *p;
+ ndis_packet *pkt;
+#ifdef NDIS_DEBUG_PACKETS
uint8_t irql;
+#endif
- head = (ndis_packet *)pool;
- KeAcquireSpinLock(&head->np_lock, &irql);
+ p = (ndis_packet_pool *)pool;
- if (head->np_private.npp_flags != 0x1) {
- *status = NDIS_STATUS_FAILURE;
- KeReleaseSpinLock(&head->np_lock, irql);
+#ifdef NDIS_DEBUG_PACKETS
+ KeAcquireSpinLock(&p->np_lock, &irql);
+ if (p->np_dead) {
+ KeReleaseSpinLock(&p->np_lock, irql);
+ printf("NDIS: tried to allocate packet from dead pool %p\n",
+ pool);
+ *status = NDIS_STATUS_RESOURCES;
return;
}
+#endif
- /*
- * If this pool is marked as 'going away' don't allocate any
- * more packets out of it.
- */
-
- if (head->np_private.npp_totlen) {
- *status = NDIS_STATUS_FAILURE;
- KeReleaseSpinLock(&head->np_lock, irql);
- return;
- }
+ pkt = (ndis_packet *)InterlockedPopEntrySList(&p->np_head);
- pkt = (ndis_packet *)head->np_private.npp_head;
+#ifdef NDIS_DEBUG_PACKETS
+ KeReleaseSpinLock(&p->np_lock, irql);
+#endif
if (pkt == NULL) {
*status = NDIS_STATUS_RESOURCES;
- KeReleaseSpinLock(&head->np_lock, irql);
return;
}
- head->np_private.npp_head = pkt->np_private.npp_head;
- pkt->np_private.npp_head = pkt->np_private.npp_tail = NULL;
+ bzero((char *)pkt, sizeof(ndis_packet));
+
/* Save pointer to the pool. */
- pkt->np_private.npp_pool = head;
+ pkt->np_private.npp_pool = pool;
/* Set the oob offset pointer. Lots of things expect this. */
pkt->np_private.npp_packetooboffset = offsetof(ndis_packet, np_oob);
@@ -1983,11 +1996,8 @@ NdisAllocatePacket(status, packet, pool)
*packet = pkt;
- head->np_private.npp_count++;
*status = NDIS_STATUS_SUCCESS;
- KeReleaseSpinLock(&head->np_lock, irql);
-
return;
}
@@ -1995,34 +2005,26 @@ void
NdisFreePacket(packet)
ndis_packet *packet;
{
- ndis_packet *head;
+ ndis_packet_pool *p;
+#ifdef NDIS_DEBUG_PACKETS
uint8_t irql;
+#endif
- if (packet == NULL || packet->np_private.npp_pool == NULL)
- return;
-
- head = packet->np_private.npp_pool;
- KeAcquireSpinLock(&head->np_lock, &irql);
+ p = (ndis_packet_pool *)packet->np_private.npp_pool;
- if (head->np_private.npp_flags != 0x1) {
- KeReleaseSpinLock(&head->np_lock, irql);
- return;
- }
-
- packet->np_private.npp_head = head->np_private.npp_head;
- head->np_private.npp_head = (ndis_buffer *)packet;
- head->np_private.npp_count--;
+#ifdef NDIS_DEBUG_PACKETS
+ KeAcquireSpinLock(&p->np_lock, &irql);
+#endif
- /*
- * If the pool has been marked for deletion and there are
- * no more packets outstanding, nuke the pool.
- */
+ InterlockedPushEntrySList(&p->np_head, (slist_entry *)packet);
- if (head->np_private.npp_totlen && head->np_private.npp_count == 0) {
- KeReleaseSpinLock(&head->np_lock, irql);
- free(head, M_DEVBUF);
- } else
- KeReleaseSpinLock(&head->np_lock, irql);
+#ifdef NDIS_DEBUG_PACKETS
+ if (p->np_dead) {
+ if (ExQueryDepthSList(&p->np_head) == p->np_cnt)
+ KeSetEvent(&p->np_event, IO_NO_INCREMENT, FALSE);
+ }
+ KeReleaseSpinLock(&p->np_lock, irql);
+#endif
return;
}
@@ -2255,7 +2257,7 @@ static void
NdisSetEvent(event)
ndis_event *event;
{
- KeSetEvent(&event->ne_event, 0, 0);
+ KeSetEvent(&event->ne_event, IO_NO_INCREMENT, FALSE);
return;
}
@@ -2276,7 +2278,7 @@ NdisWaitEvent(event, msecs)
uint32_t rval;
duetime = ((int64_t)msecs * -10000);
- rval = KeWaitForSingleObject((nt_dispatch_header *)event,
+ rval = KeWaitForSingleObject(event,
0, 0, TRUE, msecs ? & duetime : NULL);
if (rval == STATUS_TIMEOUT)
@@ -2479,8 +2481,7 @@ NdisMDeregisterInterrupt(intr)
IoDisconnectInterrupt(intr->ni_introbj);
- KeWaitForSingleObject((nt_dispatch_header *)&intr->ni_dpcevt,
- 0, 0, FALSE, NULL);
+ KeWaitForSingleObject(&intr->ni_dpcevt, 0, 0, FALSE, NULL);
KeResetEvent(&intr->ni_dpcevt);
return;
@@ -2569,7 +2570,7 @@ NdisQueryBufferOffset(buf, off, len)
return;
}
-static void
+void
NdisMSleep(usecs)
uint32_t usecs;
{
@@ -2586,9 +2587,9 @@ NdisMSleep(usecs)
else {
KeInitializeTimer(&timer);
KeSetTimer(&timer, ((int64_t)usecs * -10), NULL);
- KeWaitForSingleObject((nt_dispatch_header *)&timer,
- 0, 0, FALSE, NULL);
+ KeWaitForSingleObject(&timer, 0, 0, FALSE, NULL);
}
+
return;
}
diff --git a/sys/compat/ndis/subr_ntoskrnl.c b/sys/compat/ndis/subr_ntoskrnl.c
index 023526d..f056466 100644
--- a/sys/compat/ndis/subr_ntoskrnl.c
+++ b/sys/compat/ndis/subr_ntoskrnl.c
@@ -54,6 +54,7 @@ __FBSDID("$FreeBSD$");
#include <sys/module.h>
#include <sys/smp.h>
#include <sys/sched.h>
+#include <sys/sysctl.h>
#include <machine/atomic.h>
#include <machine/clock.h>
@@ -78,12 +79,20 @@ __FBSDID("$FreeBSD$");
#include <compat/ndis/hal_var.h>
#include <compat/ndis/ndis_var.h>
+#ifdef NTOSKRNL_DEBUG_TIMERS
+static int sysctl_show_timers(SYSCTL_HANDLER_ARGS);
+
+SYSCTL_PROC(_debug, OID_AUTO, ntoskrnl_timers, CTLFLAG_RW, 0, 0,
+ sysctl_show_timers, "I", "Show ntoskrnl timer stats");
+#endif
+
struct kdpc_queue {
list_entry kq_disp;
struct thread *kq_td;
int kq_cpu;
int kq_exit;
- struct mtx kq_lock;
+ int kq_running;
+ kspin_lock kq_lock;
nt_kevent kq_proc;
nt_kevent kq_done;
nt_kevent kq_dead;
@@ -99,9 +108,25 @@ struct wb_ext {
typedef struct wb_ext wb_ext;
#define NTOSKRNL_TIMEOUTS 256
-struct callout ntoskrnl_callout[NTOSKRNL_TIMEOUTS];
-int ntoskrnl_callidx;
-#define CALLOUT_INC(i) (i) = ((i) + 1) % NTOSKRNL_TIMEOUTS
+#ifdef NTOSKRNL_DEBUG_TIMERS
+static uint64_t ntoskrnl_timer_fires;
+static uint64_t ntoskrnl_timer_sets;
+static uint64_t ntoskrnl_timer_reloads;
+static uint64_t ntoskrnl_timer_cancels;
+#endif
+
+struct callout_entry {
+ struct callout ce_callout;
+ list_entry ce_list;
+};
+
+typedef struct callout_entry callout_entry;
+
+static struct list_entry ntoskrnl_calllist;
+static struct mtx ntoskrnl_calllock;
+
+static struct list_entry ntoskrnl_intlist;
+static kspin_lock ntoskrnl_intlock;
static uint8_t RtlEqualUnicodeString(unicode_string *,
unicode_string *, uint8_t);
@@ -126,8 +151,12 @@ static void ntoskrnl_waittest(nt_dispatch_header *, uint32_t);
static void ntoskrnl_satisfy_wait(nt_dispatch_header *, struct thread *);
static void ntoskrnl_satisfy_multiple_waits(wait_block *);
static int ntoskrnl_is_signalled(nt_dispatch_header *, struct thread *);
+static void ntoskrnl_insert_timer(ktimer *, int);
+static void ntoskrnl_remove_timer(ktimer *);
+#ifdef NTOSKRNL_DEBUG_TIMERS
+static void ntoskrnl_show_timers(void);
+#endif
static void ntoskrnl_timercall(void *);
-static void ntoskrnl_run_dpc(void *);
static void ntoskrnl_dpc_thread(void *);
static void ntoskrnl_destroy_dpc_threads(void);
static void ntoskrnl_destroy_workitem_threads(void);
@@ -136,9 +165,6 @@ static void ntoskrnl_workitem(device_object *, void *);
static void ntoskrnl_unicode_to_ascii(uint16_t *, char *, int);
static void ntoskrnl_ascii_to_unicode(char *, uint16_t *, int);
static uint8_t ntoskrnl_insert_dpc(list_entry *, kdpc *);
-static device_t ntoskrnl_finddev(device_t, uint32_t,
- uint8_t, uint8_t, struct resource **);
-static void ntoskrnl_intr(void *);
static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
static uint16_t READ_REGISTER_USHORT(uint16_t *);
static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
@@ -166,14 +192,10 @@ static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
uint32_t, size_t, uint32_t, uint16_t);
static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
static slist_entry
- *InterlockedPushEntrySList(slist_header *, slist_entry *);
-static slist_entry *InterlockedPopEntrySList(slist_header *);
-static slist_entry
*ExInterlockedPushEntrySList(slist_header *,
slist_entry *, kspin_lock *);
static slist_entry
*ExInterlockedPopEntrySList(slist_header *, kspin_lock *);
-static uint16_t ExQueryDepthSList(slist_header *);
static uint32_t InterlockedIncrement(volatile uint32_t *);
static uint32_t InterlockedDecrement(volatile uint32_t *);
static void ExInterlockedAddLargeStatistic(uint64_t *, uint32_t);
@@ -215,7 +237,7 @@ static void DbgBreakPoint(void);
static void dummy(void);
static struct mtx ntoskrnl_dispatchlock;
-static kspin_lock ntoskrnl_global;
+static struct mtx ntoskrnl_interlock;
static kspin_lock ntoskrnl_cancellock;
static int ntoskrnl_kth = 0;
static struct nt_objref_head ntoskrnl_reflist;
@@ -232,15 +254,21 @@ ntoskrnl_libinit()
int error;
struct proc *p;
kdpc_queue *kq;
+ callout_entry *e;
int i;
char name[64];
mtx_init(&ntoskrnl_dispatchlock,
"ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF|MTX_RECURSE);
- KeInitializeSpinLock(&ntoskrnl_global);
+ mtx_init(&ntoskrnl_interlock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
KeInitializeSpinLock(&ntoskrnl_cancellock);
+ KeInitializeSpinLock(&ntoskrnl_intlock);
TAILQ_INIT(&ntoskrnl_reflist);
+ InitializeListHead(&ntoskrnl_calllist);
+ InitializeListHead(&ntoskrnl_intlist);
+ mtx_init(&ntoskrnl_calllock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
+
kq_queues = ExAllocatePoolWithTag(NonPagedPool,
sizeof(kdpc_queue) * mp_ncpus, 0);
@@ -291,8 +319,15 @@ ntoskrnl_libinit()
patch++;
}
- for (i = 0; i < NTOSKRNL_TIMEOUTS; i++)
- callout_init(&ntoskrnl_callout[i], CALLOUT_MPSAFE);
+ for (i = 0; i < NTOSKRNL_TIMEOUTS; i++) {
+ e = ExAllocatePoolWithTag(NonPagedPool,
+ sizeof(callout_entry), 0);
+ if (e == NULL)
+ panic("failed to allocate timeouts");
+ mtx_lock_spin(&ntoskrnl_calllock);
+ InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
+ mtx_unlock_spin(&ntoskrnl_calllock);
+ }
/*
* MDLs are supposed to be variable size (they describe
@@ -320,6 +355,8 @@ int
ntoskrnl_libfini()
{
image_patch_table *patch;
+ callout_entry *e;
+ list_entry *l;
patch = ntoskrnl_functbl;
while (patch->ipt_func != NULL) {
@@ -338,7 +375,19 @@ ntoskrnl_libfini()
uma_zdestroy(mdl_zone);
uma_zdestroy(iw_zone);
+ mtx_lock_spin(&ntoskrnl_calllock);
+ while(!IsListEmpty(&ntoskrnl_calllist)) {
+ l = RemoveHeadList(&ntoskrnl_calllist);
+ e = CONTAINING_RECORD(l, callout_entry, ce_list);
+ mtx_unlock_spin(&ntoskrnl_calllock);
+ ExFreePool(e);
+ mtx_lock_spin(&ntoskrnl_calllock);
+ }
+ mtx_unlock_spin(&ntoskrnl_calllock);
+
mtx_destroy(&ntoskrnl_dispatchlock);
+ mtx_destroy(&ntoskrnl_interlock);
+ mtx_destroy(&ntoskrnl_calllock);
return(0);
}
@@ -1132,100 +1181,41 @@ IofCompleteRequest(ip, prioboost)
return;
}
-static device_t
-ntoskrnl_finddev(dev, vector, irql, shared, res)
- device_t dev;
- uint32_t vector;
+void
+ntoskrnl_intr(arg)
+ void *arg;
+{
+ kinterrupt *iobj;
uint8_t irql;
- uint8_t shared;
- struct resource **res;
-{
- device_t *children;
- device_t matching_dev;
- int childcnt;
- struct resource *r;
- struct resource_list *rl;
- struct resource_list_entry *rle;
- uint32_t flags;
- int i;
-
- /* We only want devices that have been successfully probed. */
-
- if (device_is_alive(dev) == FALSE)
- return(NULL);
-
- device_get_children(dev, &children, &childcnt);
-
- /*
- * If this device has no children, it's a leaf: we can
- * examine its resources. If the interrupt resource we
- * want isn't here, we return NULL, otherwise we return
- * the device to terminate the recursive search.
- */
-
- if (childcnt == 0) {
- rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
- if (rl == NULL)
- return(NULL);
-#if __FreeBSD_version < 600022
- SLIST_FOREACH(rle, rl, link) {
-#else
- STAILQ_FOREACH(rle, rl, link) {
-#endif
- r = rle->res;
-
- if (r == NULL)
- continue;
-
- flags = rman_get_flags(r);
-
- if (!(flags & RF_ACTIVE))
- continue;
-
- if (shared == TRUE && !(flags & RF_SHAREABLE))
- continue;
-
- if (rle->type == SYS_RES_IRQ &&
- rman_get_start(r) == irql) {
- *res = r;
- return(dev);
- }
- }
- /* No match. */
- return (NULL);
- }
-
- /*
- * If this device has children, do another
- * level of recursion to inspect them.
- */
+ list_entry *l;
- for (i = 0; i < childcnt; i++) {
- matching_dev = ntoskrnl_finddev(children[i],
- vector, irql, shared, res);
- if (matching_dev != NULL) {
- free(children, M_TEMP);
- return(matching_dev);
- }
+ KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
+ l = ntoskrnl_intlist.nle_flink;
+ while (l != &ntoskrnl_intlist) {
+ iobj = CONTAINING_RECORD(l, kinterrupt, ki_list);
+ MSCALL1(iobj->ki_svcfunc, iobj->ki_svcctx);
+ l = l->nle_flink;
}
+ KeReleaseSpinLock(&ntoskrnl_intlock, irql);
- free(children, M_TEMP);
- return(NULL);
+ return;
}
-static void
-ntoskrnl_intr(arg)
- void *arg;
-{
+uint8_t
+KeAcquireInterruptSpinLock(iobj)
kinterrupt *iobj;
+{
uint8_t irql;
+ KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
+ return(irql);
+}
- iobj = arg;
-
- KeAcquireSpinLock(iobj->ki_lock, &irql);
- MSCALL1(iobj->ki_svcfunc, iobj->ki_svcctx);
- KeReleaseSpinLock(iobj->ki_lock, irql);
-
+void
+KeReleaseInterruptSpinLock(iobj, irql)
+ kinterrupt *iobj;
+ uint8_t irql;
+{
+ KeReleaseSpinLock(&ntoskrnl_intlock, irql);
return;
}
@@ -1237,18 +1227,27 @@ KeSynchronizeExecution(iobj, syncfunc, syncctx)
{
uint8_t irql;
- KeAcquireSpinLock(iobj->ki_lock, &irql);
+ KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
MSCALL1(syncfunc, syncctx);
- KeReleaseSpinLock(iobj->ki_lock, irql);
+ KeReleaseSpinLock(&ntoskrnl_intlock, irql);
return(TRUE);
}
/*
- * This routine is a pain because the only thing we get passed
- * here is the interrupt request level and vector, but bus_setup_intr()
- * needs the device too. We can hack around this for now, but it's
- * awkward.
+ * IoConnectInterrupt() is passed only the interrupt vector and
+ * irql that a device wants to use, but no device-specific tag
+ * of any kind. This conflicts rather badly with FreeBSD's
+ * bus_setup_intr(), which needs the device_t for the device
+ * requesting interrupt delivery. In order to bypass this
+ * inconsistency, we implement a second level of interrupt
+ * dispatching on top of bus_setup_intr(). All devices use
+ * ntoskrnl_intr() as their ISR, and any device requesting
+ * interrupts will be registered with ntoskrnl_intr()'s interrupt
+ * dispatch list. When an interrupt arrives, we walk the list
+ * and invoke all the registered ISRs. This effectively makes all
+ * interrupts shared, but it's the only way to duplicate the
+ * semantics of IoConnectInterrupt() and IoDisconnectInterrupt() properly.
*/
uint32_t
@@ -1266,35 +1265,12 @@ IoConnectInterrupt(iobj, svcfunc, svcctx, lock, vector, irql,
uint32_t affinity;
uint8_t savefloat;
{
- devclass_t nexus_class;
- device_t *nexus_devs, devp;
- int nexus_count = 0;
- device_t matching_dev = NULL;
- struct resource *res;
- int i, error;
-
- nexus_class = devclass_find("nexus");
- devclass_get_devices(nexus_class, &nexus_devs, &nexus_count);
-
- for (i = 0; i < nexus_count; i++) {
- devp = nexus_devs[i];
- matching_dev = ntoskrnl_finddev(devp, vector,
- irql, shared, &res);
- if (matching_dev)
- break;
- }
-
- free(nexus_devs, M_TEMP);
-
- if (matching_dev == NULL)
- return(STATUS_INVALID_PARAMETER);
+ uint8_t curirql;
*iobj = ExAllocatePoolWithTag(NonPagedPool, sizeof(kinterrupt), 0);
if (*iobj == NULL)
return(STATUS_INSUFFICIENT_RESOURCES);
- (*iobj)->ki_dev = matching_dev;
- (*iobj)->ki_irq = res;
(*iobj)->ki_svcfunc = svcfunc;
(*iobj)->ki_svcctx = svcctx;
@@ -1304,13 +1280,9 @@ IoConnectInterrupt(iobj, svcfunc, svcctx, lock, vector, irql,
} else
(*iobj)->ki_lock = lock;
- error = bus_setup_intr(matching_dev, res, INTR_TYPE_NET | INTR_MPSAFE,
- ntoskrnl_intr, *iobj, &(*iobj)->ki_cookie);
-
- if (error) {
- ExFreePool(iobj);
- return (STATUS_INVALID_PARAMETER);
- }
+ KeAcquireSpinLock(&ntoskrnl_intlock, &curirql);
+ InsertHeadList((&ntoskrnl_intlist), (&(*iobj)->ki_list));
+ KeReleaseSpinLock(&ntoskrnl_intlock, curirql);
return(STATUS_SUCCESS);
}
@@ -1319,10 +1291,15 @@ void
IoDisconnectInterrupt(iobj)
kinterrupt *iobj;
{
+ uint8_t irql;
+
if (iobj == NULL)
return;
- bus_teardown_intr(iobj->ki_dev, iobj->ki_irq, iobj->ki_cookie);
+ KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
+ RemoveEntryList((&iobj->ki_list));
+ KeReleaseSpinLock(&ntoskrnl_intlock, irql);
+
ExFreePool(iobj);
return;
@@ -1537,6 +1514,7 @@ ntoskrnl_waittest(obj, increment)
if (satisfied == TRUE)
cv_broadcastpri(&we->we_cv, w->wb_oldpri -
(increment * 4));
+
e = e->nle_flink;
}
@@ -2227,25 +2205,29 @@ ExDeleteNPagedLookasideList(lookaside)
return;
}
-static slist_entry *
+slist_entry *
InterlockedPushEntrySList(head, entry)
slist_header *head;
slist_entry *entry;
{
slist_entry *oldhead;
- oldhead = ExInterlockedPushEntrySList(head, entry, &ntoskrnl_global);
+ mtx_lock_spin(&ntoskrnl_interlock);
+ oldhead = ntoskrnl_pushsl(head, entry);
+ mtx_unlock_spin(&ntoskrnl_interlock);
return(oldhead);
}
-static slist_entry *
+slist_entry *
InterlockedPopEntrySList(head)
slist_header *head;
{
slist_entry *first;
- first = ExInterlockedPopEntrySList(head, &ntoskrnl_global);
+ mtx_lock_spin(&ntoskrnl_interlock);
+ first = ntoskrnl_popsl(head);
+ mtx_unlock_spin(&ntoskrnl_interlock);
return(first);
}
@@ -2256,14 +2238,7 @@ ExInterlockedPushEntrySList(head, entry, lock)
slist_entry *entry;
kspin_lock *lock;
{
- slist_entry *oldhead;
- uint8_t irql;
-
- KeAcquireSpinLock(lock, &irql);
- oldhead = ntoskrnl_pushsl(head, entry);
- KeReleaseSpinLock(lock, irql);
-
- return(oldhead);
+ return(InterlockedPushEntrySList(head, entry));
}
static slist_entry *
@@ -2271,37 +2246,22 @@ ExInterlockedPopEntrySList(head, lock)
slist_header *head;
kspin_lock *lock;
{
- slist_entry *first;
- uint8_t irql;
-
- KeAcquireSpinLock(lock, &irql);
- first = ntoskrnl_popsl(head);
- KeReleaseSpinLock(lock, irql);
-
- return(first);
+ return(InterlockedPopEntrySList(head));
}
-static uint16_t
+uint16_t
ExQueryDepthSList(head)
slist_header *head;
{
uint16_t depth;
- uint8_t irql;
- KeAcquireSpinLock(&ntoskrnl_global, &irql);
+ mtx_lock_spin(&ntoskrnl_interlock);
depth = head->slh_list.slh_depth;
- KeReleaseSpinLock(&ntoskrnl_global, irql);
+ mtx_unlock_spin(&ntoskrnl_interlock);
return(depth);
}
-/*
- * The KeInitializeSpinLock(), KefAcquireSpinLockAtDpcLevel()
- * and KefReleaseSpinLockFromDpcLevel() appear to be analagous
- * to splnet()/splx() in their use. We can't create a new mutex
- * lock here because there is no complimentary KeFreeSpinLock()
- * function. Instead, we grab a mutex from the mutex pool.
- */
void
KeInitializeSpinLock(lock)
kspin_lock *lock;
@@ -2316,8 +2276,18 @@ void
KefAcquireSpinLockAtDpcLevel(lock)
kspin_lock *lock;
{
- while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
+#ifdef NTOSKRNL_DEBUG_SPINLOCKS
+ int i = 0;
+#endif
+
+ while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0) {
/* sit and spin */;
+#ifdef NTOSKRNL_DEBUG_SPINLOCKS
+ i++;
+ if (i > 200000000)
+ panic("DEADLOCK!");
+#endif
+ }
return;
}
@@ -2368,13 +2338,12 @@ InterlockedExchange(dst, val)
volatile uint32_t *dst;
uintptr_t val;
{
- uint8_t irql;
uintptr_t r;
- KeAcquireSpinLock(&ntoskrnl_global, &irql);
+ mtx_lock_spin(&ntoskrnl_interlock);
r = *dst;
*dst = val;
- KeReleaseSpinLock(&ntoskrnl_global, irql);
+ mtx_unlock_spin(&ntoskrnl_interlock);
return(r);
}
@@ -2400,11 +2369,9 @@ ExInterlockedAddLargeStatistic(addend, inc)
uint64_t *addend;
uint32_t inc;
{
- uint8_t irql;
-
- KeAcquireSpinLock(&ntoskrnl_global, &irql);
+ mtx_lock_spin(&ntoskrnl_interlock);
*addend += inc;
- KeReleaseSpinLock(&ntoskrnl_global, irql);
+ mtx_unlock_spin(&ntoskrnl_interlock);
return;
};
@@ -2579,23 +2546,24 @@ ntoskrnl_workitem_thread(arg)
kdpc_queue *kq;
list_entry *l;
io_workitem *iw;
+ uint8_t irql;
kq = arg;
InitializeListHead(&kq->kq_disp);
kq->kq_td = curthread;
kq->kq_exit = 0;
- mtx_init(&kq->kq_lock, "NDIS thread lock", NULL, MTX_SPIN);
+ KeInitializeSpinLock(&kq->kq_lock);
KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
KeInitializeEvent(&kq->kq_dead, EVENT_TYPE_SYNC, FALSE);
while (1) {
KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
- mtx_lock_spin(&kq->kq_lock);
+ KeAcquireSpinLock(&kq->kq_lock, &irql);
if (kq->kq_exit) {
- mtx_unlock_spin(&kq->kq_lock);
+ KeReleaseSpinLock(&kq->kq_lock, irql);
KeSetEvent(&kq->kq_dead, IO_NO_INCREMENT, FALSE);
break;
}
@@ -2607,16 +2575,14 @@ ntoskrnl_workitem_thread(arg)
InitializeListHead((&iw->iw_listentry));
if (iw->iw_func == NULL)
continue;
- mtx_unlock_spin(&kq->kq_lock);
+ KeReleaseSpinLock(&kq->kq_lock, irql);
MSCALL2(iw->iw_func, iw->iw_dobj, iw->iw_ctx);
- mtx_lock_spin(&kq->kq_lock);
+ KeAcquireSpinLock(&kq->kq_lock, &irql);
}
- mtx_unlock_spin(&kq->kq_lock);
+ KeReleaseSpinLock(&kq->kq_lock, irql);
}
- mtx_destroy(&kq->kq_lock);
-
#if __FreeBSD_version < 502113
mtx_lock(&Giant);
#endif
@@ -2679,13 +2645,11 @@ IoQueueWorkItem(iw, iw_func, qtype, ctx)
kdpc_queue *kq;
list_entry *l;
io_workitem *cur;
-
- iw->iw_func = iw_func;
- iw->iw_ctx = ctx;
+ uint8_t irql;
kq = wq_queues + iw->iw_idx;
- mtx_lock_spin(&kq->kq_lock);
+ KeAcquireSpinLock(&kq->kq_lock, &irql);
/*
* Traverse the list and make sure this workitem hasn't
@@ -2698,14 +2662,17 @@ IoQueueWorkItem(iw, iw_func, qtype, ctx)
cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
if (cur == iw) {
/* Already queued -- do nothing. */
- mtx_unlock_spin(&kq->kq_lock);
+ KeReleaseSpinLock(&kq->kq_lock, irql);
return;
}
l = l->nle_flink;
}
+ iw->iw_func = iw_func;
+ iw->iw_ctx = ctx;
+
InsertTailList((&kq->kq_disp), (&iw->iw_listentry));
- mtx_unlock_spin(&kq->kq_lock);
+ KeReleaseSpinLock(&kq->kq_lock, irql);
KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
@@ -2765,6 +2732,7 @@ ExQueueWorkItem(w, qtype)
kdpc_queue *kq;
list_entry *l;
io_workitem *cur;
+ uint8_t irql;
/*
@@ -2777,18 +2745,18 @@ ExQueueWorkItem(w, qtype)
*/
kq = wq_queues + WORKITEM_LEGACY_THREAD;
- mtx_lock_spin(&kq->kq_lock);
+ KeAcquireSpinLock(&kq->kq_lock, &irql);
l = kq->kq_disp.nle_flink;
while (l != &kq->kq_disp) {
cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
if (cur->iw_dobj == (device_object *)w) {
/* Already queued -- do nothing. */
- mtx_unlock_spin(&kq->kq_lock);
+ KeReleaseSpinLock(&kq->kq_lock, irql);
return;
}
l = l->nle_flink;
}
- mtx_unlock_spin(&kq->kq_lock);
+ KeReleaseSpinLock(&kq->kq_lock, irql);
iw = IoAllocateWorkItem((device_object *)w);
if (iw == NULL)
@@ -3362,12 +3330,28 @@ ntoskrnl_timercall(arg)
{
ktimer *timer;
struct timeval tv;
+ kdpc *dpc;
mtx_lock(&ntoskrnl_dispatchlock);
timer = arg;
- callout_init(timer->k_callout, CALLOUT_MPSAFE);
+#ifdef NTOSKRNL_DEBUG_TIMERS
+ ntoskrnl_timer_fires++;
+#endif
+ ntoskrnl_remove_timer(timer);
+
+ /*
+ * This should never happen, but complain
+ * if it does.
+ */
+
+ if (timer->k_header.dh_inserted == FALSE) {
+ mtx_unlock(&ntoskrnl_dispatchlock);
+ printf("NTOS: timer %p fired even though "
+ "it was canceled\n", timer);
+ return;
+ }
/* Mark the timer as no longer being on the timer queue. */
@@ -3391,18 +3375,111 @@ ntoskrnl_timercall(arg)
tv.tv_sec = 0;
tv.tv_usec = timer->k_period * 1000;
timer->k_header.dh_inserted = TRUE;
- timer->k_callout = &ntoskrnl_callout[ntoskrnl_callidx];
- CALLOUT_INC(ntoskrnl_callidx);
- callout_reset(timer->k_callout, tvtohz(&tv),
- ntoskrnl_timercall, timer);
+ ntoskrnl_insert_timer(timer, tvtohz(&tv));
+#ifdef NTOSKRNL_DEBUG_TIMERS
+ ntoskrnl_timer_reloads++;
+#endif
}
+ dpc = timer->k_dpc;
+
+ mtx_unlock(&ntoskrnl_dispatchlock);
+
/* If there's a DPC associated with the timer, queue it up. */
- if (timer->k_dpc != NULL)
- KeInsertQueueDpc(timer->k_dpc, NULL, NULL);
+ if (dpc != NULL)
+ KeInsertQueueDpc(dpc, NULL, NULL);
- mtx_unlock(&ntoskrnl_dispatchlock);
+ return;
+}
+
+#ifdef NTOSKRNL_DEBUG_TIMERS
+static int
+sysctl_show_timers(SYSCTL_HANDLER_ARGS)
+{
+ int ret;
+
+ ret = 0;
+ ntoskrnl_show_timers();
+ return (sysctl_handle_int(oidp, &ret, 0, req));
+}
+
+static void
+ntoskrnl_show_timers()
+{
+ int i = 0;
+ list_entry *l;
+
+ mtx_lock_spin(&ntoskrnl_calllock);
+ l = ntoskrnl_calllist.nle_flink;
+ while(l != &ntoskrnl_calllist) {
+ i++;
+ l = l->nle_flink;
+ }
+ mtx_unlock_spin(&ntoskrnl_calllock);
+
+ printf("\n");
+ printf("%d timers available (out of %d)\n", i, NTOSKRNL_TIMEOUTS);
+ printf("timer sets: %qu\n", ntoskrnl_timer_sets);
+ printf("timer reloads: %qu\n", ntoskrnl_timer_reloads);
+ printf("timer cancels: %qu\n", ntoskrnl_timer_cancels);
+ printf("timer fires: %qu\n", ntoskrnl_timer_fires);
+ printf("\n");
+
+ return;
+}
+#endif
+
+/*
+ * Must be called with dispatcher lock held.
+ */
+
+static void
+ntoskrnl_insert_timer(timer, ticks)
+ ktimer *timer;
+ int ticks;
+{
+ callout_entry *e;
+ list_entry *l;
+ struct callout *c;
+
+ /*
+ * Try and allocate a timer.
+ */
+ mtx_lock_spin(&ntoskrnl_calllock);
+ if (IsListEmpty(&ntoskrnl_calllist)) {
+ mtx_unlock_spin(&ntoskrnl_calllock);
+#ifdef NTOSKRNL_DEBUG_TIMERS
+ ntoskrnl_show_timers();
+#endif
+ panic("out of timers!");
+ }
+ l = RemoveHeadList(&ntoskrnl_calllist);
+ mtx_unlock_spin(&ntoskrnl_calllock);
+
+ e = CONTAINING_RECORD(l, callout_entry, ce_list);
+ c = &e->ce_callout;
+
+ timer->k_callout = c;
+
+ callout_init(c, CALLOUT_MPSAFE);
+ callout_reset(c, ticks, ntoskrnl_timercall, timer);
+
+ return;
+}
+
+static void
+ntoskrnl_remove_timer(timer)
+ ktimer *timer;
+{
+ callout_entry *e;
+
+ e = (callout_entry *)timer->k_callout;
+ callout_stop(timer->k_callout);
+
+ mtx_lock_spin(&ntoskrnl_calllock);
+ InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
+ mtx_unlock_spin(&ntoskrnl_calllock);
return;
}
@@ -3465,13 +3542,15 @@ ntoskrnl_dpc_thread(arg)
kdpc_queue *kq;
kdpc *d;
list_entry *l;
+ uint8_t irql;
kq = arg;
InitializeListHead(&kq->kq_disp);
kq->kq_td = curthread;
kq->kq_exit = 0;
- mtx_init(&kq->kq_lock, "NDIS thread lock", NULL, MTX_SPIN);
+ kq->kq_running = FALSE;
+ KeInitializeSpinLock(&kq->kq_lock);
KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
KeInitializeEvent(&kq->kq_done, EVENT_TYPE_SYNC, FALSE);
KeInitializeEvent(&kq->kq_dead, EVENT_TYPE_SYNC, FALSE);
@@ -3493,31 +3572,33 @@ ntoskrnl_dpc_thread(arg)
while (1) {
KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
- mtx_lock_spin(&kq->kq_lock);
+ KeAcquireSpinLock(&kq->kq_lock, &irql);
if (kq->kq_exit) {
- mtx_unlock_spin(&kq->kq_lock);
+ KeReleaseSpinLock(&kq->kq_lock, irql);
KeSetEvent(&kq->kq_dead, IO_NO_INCREMENT, FALSE);
break;
}
+ kq->kq_running = TRUE;
+
while (!IsListEmpty(&kq->kq_disp)) {
l = RemoveHeadList((&kq->kq_disp));
d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
InitializeListHead((&d->k_dpclistentry));
- d->k_lock = NULL;
- mtx_unlock_spin(&kq->kq_lock);
- ntoskrnl_run_dpc(d);
- mtx_lock_spin(&kq->kq_lock);
+ KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
+ MSCALL4(d->k_deferedfunc, d, d->k_deferredctx,
+ d->k_sysarg1, d->k_sysarg2);
+ KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
}
- mtx_unlock_spin(&kq->kq_lock);
+ kq->kq_running = FALSE;
- KeSetEvent(&kq->kq_done, IO_NO_INCREMENT, FALSE);
+ KeReleaseSpinLock(&kq->kq_lock, irql);
+ KeSetEvent(&kq->kq_done, IO_NO_INCREMENT, FALSE);
}
- mtx_destroy(&kq->kq_lock);
#if __FreeBSD_version < 502113
mtx_lock(&Giant);
#endif
@@ -3525,33 +3606,6 @@ ntoskrnl_dpc_thread(arg)
return; /* notreached */
}
-
-/*
- * This is a wrapper for Windows deferred procedure calls that
- * have been placed on an NDIS thread work queue. We need it
- * since the DPC could be a _stdcall function. Also, as far as
- * I can tell, defered procedure calls must run at DISPATCH_LEVEL.
- */
-static void
-ntoskrnl_run_dpc(arg)
- void *arg;
-{
- kdpc_func dpcfunc;
- kdpc *dpc;
- uint8_t irql;
-
- dpc = arg;
- dpcfunc = dpc->k_deferedfunc;
- if (dpcfunc == NULL)
- return;
- irql = KeRaiseIrql(DISPATCH_LEVEL);
- MSCALL4(dpcfunc, dpc, dpc->k_deferredctx,
- dpc->k_sysarg1, dpc->k_sysarg2);
- KeLowerIrql(irql);
-
- return;
-}
-
static void
ntoskrnl_destroy_dpc_threads(void)
{
@@ -3589,10 +3643,10 @@ ntoskrnl_insert_dpc(head, dpc)
l = l->nle_flink;
}
- if (dpc->k_importance == KDPC_IMPORTANCE_HIGH)
- InsertHeadList((head), (&dpc->k_dpclistentry));
- else
+ if (dpc->k_importance == KDPC_IMPORTANCE_LOW)
InsertTailList((head), (&dpc->k_dpclistentry));
+ else
+ InsertHeadList((head), (&dpc->k_dpclistentry));
return (TRUE);
}
@@ -3611,11 +3665,6 @@ KeInitializeDpc(dpc, dpcfunc, dpcctx)
dpc->k_deferredctx = dpcctx;
dpc->k_num = KDPC_CPU_DEFAULT;
dpc->k_importance = KDPC_IMPORTANCE_MEDIUM;
- /*
- * In case someone tries to dequeue a DPC that
- * hasn't been queued yet.
- */
- dpc->k_lock = NULL;
InitializeListHead((&dpc->k_dpclistentry));
return;
@@ -3629,12 +3678,12 @@ KeInsertQueueDpc(dpc, sysarg1, sysarg2)
{
kdpc_queue *kq;
uint8_t r;
+ uint8_t irql;
if (dpc == NULL)
return(FALSE);
- dpc->k_sysarg1 = sysarg1;
- dpc->k_sysarg2 = sysarg2;
+ irql = KeRaiseIrql(DISPATCH_LEVEL);
/*
* By default, the DPC is queued to run on the same CPU
@@ -3647,16 +3696,13 @@ KeInsertQueueDpc(dpc, sysarg1, sysarg2)
else
kq += dpc->k_num;
- /*
- * Also by default, we put the DPC on the medium
- * priority queue.
- */
-
- mtx_lock_spin(&kq->kq_lock);
+ KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
r = ntoskrnl_insert_dpc(&kq->kq_disp, dpc);
- if (r == TRUE)
- dpc->k_lock = &kq->kq_lock;
- mtx_unlock_spin(&kq->kq_lock);
+ if (r == TRUE) {
+ dpc->k_sysarg1 = sysarg1;
+ dpc->k_sysarg2 = sysarg2;
+ }
+ KeReleaseSpinLock(&kq->kq_lock, irql);
if (r == FALSE)
return(r);
@@ -3670,26 +3716,27 @@ uint8_t
KeRemoveQueueDpc(dpc)
kdpc *dpc;
{
- struct mtx *lock;
+ kdpc_queue *kq;
+ uint8_t irql;
if (dpc == NULL)
return(FALSE);
- lock = dpc->k_lock;
+ irql = KeRaiseIrql(DISPATCH_LEVEL);
- if (lock == NULL)
- return(FALSE);
+ kq = kq_queues + dpc->k_num;
- mtx_lock_spin(lock);
- dpc->k_lock = NULL;
+ KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
if (dpc->k_dpclistentry.nle_flink == &dpc->k_dpclistentry) {
- mtx_unlock_spin(lock);
+ KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
+ KeLowerIrql(irql);
return(FALSE);
}
RemoveEntryList((&dpc->k_dpclistentry));
InitializeListHead((&dpc->k_dpclistentry));
- mtx_unlock_spin(lock);
+
+ KeReleaseSpinLock(&kq->kq_lock, irql);
return(TRUE);
}
@@ -3763,7 +3810,10 @@ KeSetTimerEx(timer, duetime, period, dpc)
mtx_lock(&ntoskrnl_dispatchlock);
if (timer->k_header.dh_inserted == TRUE) {
- callout_stop(timer->k_callout);
+ ntoskrnl_remove_timer(timer);
+#ifdef NTOSKRNL_DEBUG_TIMERS
+ ntoskrnl_timer_cancels++;
+#endif
timer->k_header.dh_inserted = FALSE;
pending = TRUE;
} else
@@ -3790,10 +3840,10 @@ KeSetTimerEx(timer, duetime, period, dpc)
}
timer->k_header.dh_inserted = TRUE;
- timer->k_callout = &ntoskrnl_callout[ntoskrnl_callidx];
- CALLOUT_INC(ntoskrnl_callidx);
- callout_reset(timer->k_callout, tvtohz(&tv),
- ntoskrnl_timercall, timer);
+ ntoskrnl_insert_timer(timer, tvtohz(&tv));
+#ifdef NTOSKRNL_DEBUG_TIMERS
+ ntoskrnl_timer_sets++;
+#endif
mtx_unlock(&ntoskrnl_dispatchlock);
@@ -3830,7 +3880,10 @@ KeCancelTimer(timer)
if (timer->k_header.dh_inserted == TRUE) {
timer->k_header.dh_inserted = FALSE;
- callout_stop(timer->k_callout);
+ ntoskrnl_remove_timer(timer);
+#ifdef NTOSKRNL_DEBUG_TIMERS
+ ntoskrnl_timer_cancels++;
+#endif
}
mtx_unlock(&ntoskrnl_dispatchlock);
@@ -3905,6 +3958,8 @@ image_patch_table ntoskrnl_functbl[] = {
IMPORT_SFUNC(IoMakeAssociatedIrp, 2),
IMPORT_SFUNC(IoFreeIrp, 1),
IMPORT_SFUNC(IoInitializeIrp, 3),
+ IMPORT_SFUNC(KeAcquireInterruptSpinLock, 1),
+ IMPORT_SFUNC(KeReleaseInterruptSpinLock, 2),
IMPORT_SFUNC(KeSynchronizeExecution, 3),
IMPORT_SFUNC(KeWaitForSingleObject, 5),
IMPORT_SFUNC(KeWaitForMultipleObjects, 8),
OpenPOWER on IntegriCloud