3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
32 * $FreeBSD: src/sys/compat/ndis/subr_ntoskrnl.c,v 1.40 2004/07/20 20:28:57 wpaul Exp $
33 * $DragonFly: src/sys/emulation/ndis/subr_ntoskrnl.c,v 1.13 2006/12/23 00:27:02 swildner Exp $
36 #include <sys/ctype.h>
37 #include <sys/unistd.h>
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/errno.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
45 #include <sys/callout.h>
46 #if __FreeBSD_version > 502113
49 #include <sys/kernel.h>
51 #include <sys/kthread.h>
55 #include <machine/atomic.h>
56 #include <machine/clock.h>
57 #include <machine/stdarg.h>
61 #include "resource_var.h"
62 #include "ntoskrnl_var.h"
66 #define __regparm __attribute__((regparm(3)))
68 #define FUNC void(*)(void)
70 __stdcall static uint8_t ntoskrnl_unicode_equal(ndis_unicode_string *,
71 ndis_unicode_string *, uint8_t);
72 __stdcall static void ntoskrnl_unicode_copy(ndis_unicode_string *,
73 ndis_unicode_string *);
74 __stdcall static ndis_status ntoskrnl_unicode_to_ansi(ndis_ansi_string *,
75 ndis_unicode_string *, uint8_t);
76 __stdcall static ndis_status ntoskrnl_ansi_to_unicode(ndis_unicode_string *,
77 ndis_ansi_string *, uint8_t);
78 __stdcall static void *ntoskrnl_iobuildsynchfsdreq(uint32_t, void *,
79 void *, uint32_t, uint32_t *, void *, void *);
84 __stdcall __regcall static uint32_t
85 ntoskrnl_iofcalldriver(REGARGS2(void *dobj, void *irp));
86 __stdcall __regcall static void
87 ntoskrnl_iofcompletereq(REGARGS2(void *irp, uint8_t prioboost));
88 __stdcall __regcall static slist_entry *
89 ntoskrnl_push_slist(REGARGS2(slist_header *head, slist_entry *entry));
90 __stdcall __regcall static slist_entry *
91 ntoskrnl_pop_slist(REGARGS1(slist_header *head));
92 __stdcall __regcall static slist_entry *
93 ntoskrnl_push_slist_ex(REGARGS2(slist_header *head, slist_entry *entry), kspin_lock *lock);
94 __stdcall __regcall static slist_entry *
95 ntoskrnl_pop_slist_ex(REGARGS2(slist_header *head, kspin_lock *lock));
97 __stdcall __regcall static uint32_t
98 ntoskrnl_interlock_inc(REGARGS1(volatile uint32_t *addend));
99 __stdcall __regcall static uint32_t
100 ntoskrnl_interlock_dec(REGARGS1(volatile uint32_t *addend));
101 __stdcall __regcall static void
102 ntoskrnl_interlock_addstat(REGARGS2(uint64_t *addend, uint32_t inc));
103 __stdcall __regcall static void
104 ntoskrnl_objderef(REGARGS1(void *object));
106 __stdcall static uint32_t ntoskrnl_waitforobjs(uint32_t,
107 nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
108 int64_t *, wait_block *);
109 static void ntoskrnl_wakeup(void *);
110 static void ntoskrnl_timercall(void *);
111 static void ntoskrnl_run_dpc(void *);
112 __stdcall static void ntoskrnl_writereg_ushort(uint16_t *, uint16_t);
113 __stdcall static uint16_t ntoskrnl_readreg_ushort(uint16_t *);
114 __stdcall static void ntoskrnl_writereg_ulong(uint32_t *, uint32_t);
115 __stdcall static uint32_t ntoskrnl_readreg_ulong(uint32_t *);
116 __stdcall static void ntoskrnl_writereg_uchar(uint8_t *, uint8_t);
117 __stdcall static uint8_t ntoskrnl_readreg_uchar(uint8_t *);
118 __stdcall static int64_t _allmul(int64_t, int64_t);
119 __stdcall static int64_t _alldiv(int64_t, int64_t);
120 __stdcall static int64_t _allrem(int64_t, int64_t);
121 __regparm static int64_t _allshr(int64_t, uint8_t);
122 __regparm static int64_t _allshl(int64_t, uint8_t);
123 __stdcall static uint64_t _aullmul(uint64_t, uint64_t);
124 __stdcall static uint64_t _aulldiv(uint64_t, uint64_t);
125 __stdcall static uint64_t _aullrem(uint64_t, uint64_t);
126 __regparm static uint64_t _aullshr(uint64_t, uint8_t);
127 __regparm static uint64_t _aullshl(uint64_t, uint8_t);
128 __stdcall static void *ntoskrnl_allocfunc(uint32_t, size_t, uint32_t);
129 __stdcall static void ntoskrnl_freefunc(void *);
130 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
131 static slist_entry *ntoskrnl_popsl(slist_header *);
132 __stdcall static void ntoskrnl_init_lookaside(paged_lookaside_list *,
133 lookaside_alloc_func *, lookaside_free_func *,
134 uint32_t, size_t, uint32_t, uint16_t);
135 __stdcall static void ntoskrnl_delete_lookaside(paged_lookaside_list *);
136 __stdcall static void ntoskrnl_init_nplookaside(npaged_lookaside_list *,
137 lookaside_alloc_func *, lookaside_free_func *,
138 uint32_t, size_t, uint32_t, uint16_t);
139 __stdcall static void ntoskrnl_delete_nplookaside(npaged_lookaside_list *);
140 __stdcall static void ntoskrnl_freemdl(ndis_buffer *);
141 __stdcall static uint32_t ntoskrnl_sizeofmdl(void *, size_t);
142 __stdcall static void ntoskrnl_build_npaged_mdl(ndis_buffer *);
143 __stdcall static void *ntoskrnl_mmaplockedpages(ndis_buffer *, uint8_t);
144 __stdcall static void *ntoskrnl_mmaplockedpages_cache(ndis_buffer *,
145 uint8_t, uint32_t, void *, uint32_t, uint32_t);
146 __stdcall static void ntoskrnl_munmaplockedpages(void *, ndis_buffer *);
147 __stdcall static void ntoskrnl_init_lock(kspin_lock *);
148 __stdcall static size_t ntoskrnl_memcmp(const void *, const void *, size_t);
149 __stdcall static void ntoskrnl_init_ansi_string(ndis_ansi_string *, char *);
150 __stdcall static void ntoskrnl_init_unicode_string(ndis_unicode_string *,
152 __stdcall static void ntoskrnl_free_unicode_string(ndis_unicode_string *);
153 __stdcall static void ntoskrnl_free_ansi_string(ndis_ansi_string *);
154 __stdcall static ndis_status ntoskrnl_unicode_to_int(ndis_unicode_string *,
155 uint32_t, uint32_t *);
156 static int atoi (const char *);
157 static long atol (const char *);
158 static int rand(void);
159 static void ntoskrnl_time(uint64_t *);
160 __stdcall static uint8_t ntoskrnl_wdmver(uint8_t, uint8_t);
161 static void ntoskrnl_thrfunc(void *);
162 __stdcall static ndis_status ntoskrnl_create_thread(ndis_handle *,
163 uint32_t, void *, ndis_handle, void *, void *, void *);
164 __stdcall static ndis_status ntoskrnl_thread_exit(ndis_status);
165 __stdcall static ndis_status ntoskrnl_devprop(device_object *, uint32_t,
166 uint32_t, void *, uint32_t *);
167 __stdcall static void ntoskrnl_init_mutex(kmutant *, uint32_t);
168 __stdcall static uint32_t ntoskrnl_release_mutex(kmutant *, uint8_t);
169 __stdcall static uint32_t ntoskrnl_read_mutex(kmutant *);
170 __stdcall static ndis_status ntoskrnl_objref(ndis_handle, uint32_t, void *,
171 uint8_t, void **, void **);
172 __stdcall static uint32_t ntoskrnl_zwclose(ndis_handle);
173 static uint32_t ntoskrnl_dbgprint(char *, ...);
174 __stdcall static void ntoskrnl_debugger(void);
175 __stdcall static void dummy(void);
177 static struct lwkt_token ntoskrnl_dispatchtoken;
178 static kspin_lock ntoskrnl_global;
179 static int ntoskrnl_kth = 0;
180 static struct nt_objref_head ntoskrnl_reflist;
182 static MALLOC_DEFINE(M_NDIS, "ndis", "ndis emulation");
185 ntoskrnl_libinit(void)
187 lwkt_token_init(&ntoskrnl_dispatchtoken, 1);
188 ntoskrnl_init_lock(&ntoskrnl_global);
189 TAILQ_INIT(&ntoskrnl_reflist);
194 ntoskrnl_libfini(void)
196 lwkt_token_uninit(&ntoskrnl_dispatchtoken);
200 __stdcall static uint8_t
201 ntoskrnl_unicode_equal(ndis_unicode_string *str1,
202 ndis_unicode_string *str2,
203 uint8_t caseinsensitive)
207 if (str1->nus_len != str2->nus_len)
210 for (i = 0; i < str1->nus_len; i++) {
211 if (caseinsensitive == TRUE) {
212 if (toupper((char)(str1->nus_buf[i] & 0xFF)) !=
213 toupper((char)(str2->nus_buf[i] & 0xFF)))
216 if (str1->nus_buf[i] != str2->nus_buf[i])
224 __stdcall static void
225 ntoskrnl_unicode_copy(ndis_unicode_string *dest,
226 ndis_unicode_string *src)
229 if (dest->nus_maxlen >= src->nus_len)
230 dest->nus_len = src->nus_len;
232 dest->nus_len = dest->nus_maxlen;
233 memcpy(dest->nus_buf, src->nus_buf, dest->nus_len);
237 __stdcall static ndis_status
238 ntoskrnl_unicode_to_ansi(ndis_ansi_string *dest,
239 ndis_unicode_string *src,
244 if (dest == NULL || src == NULL)
245 return(NDIS_STATUS_FAILURE);
247 if (allocate == TRUE) {
248 if (ndis_unicode_to_ascii(src->nus_buf, src->nus_len, &astr))
249 return(NDIS_STATUS_FAILURE);
250 dest->nas_buf = astr;
251 dest->nas_len = dest->nas_maxlen = strlen(astr);
253 dest->nas_len = src->nus_len / 2; /* XXX */
254 if (dest->nas_maxlen < dest->nas_len)
255 dest->nas_len = dest->nas_maxlen;
256 ndis_unicode_to_ascii(src->nus_buf, dest->nas_len * 2,
259 return (NDIS_STATUS_SUCCESS);
262 __stdcall static ndis_status
263 ntoskrnl_ansi_to_unicode(ndis_unicode_string *dest,
264 ndis_ansi_string *src,
267 uint16_t *ustr = NULL;
269 if (dest == NULL || src == NULL)
270 return(NDIS_STATUS_FAILURE);
272 if (allocate == TRUE) {
273 if (ndis_ascii_to_unicode(src->nas_buf, &ustr))
274 return(NDIS_STATUS_FAILURE);
275 dest->nus_buf = ustr;
276 dest->nus_len = dest->nus_maxlen = strlen(src->nas_buf) * 2;
278 dest->nus_len = src->nas_len * 2; /* XXX */
279 if (dest->nus_maxlen < dest->nus_len)
280 dest->nus_len = dest->nus_maxlen;
281 ndis_ascii_to_unicode(src->nas_buf, &dest->nus_buf);
283 return (NDIS_STATUS_SUCCESS);
286 __stdcall static void *
287 ntoskrnl_iobuildsynchfsdreq(uint32_t func, void *dobj, void *buf,
288 uint32_t len, uint32_t *off,
289 void *event, void *status)
294 __stdcall __regcall static uint32_t
295 ntoskrnl_iofcalldriver(REGARGS2(void *dobj, void *irp))
300 __stdcall __regcall static void
301 ntoskrnl_iofcompletereq(REGARGS2(void *irp, uint8_t prioboost))
306 ntoskrnl_wakeup(void *arg)
308 nt_dispatch_header *obj;
315 lwkt_gettoken(&ntoskrnl_dispatchtoken);
316 obj->dh_sigstate = TRUE;
317 e = obj->dh_waitlisthead.nle_flink;
318 while (e != &obj->dh_waitlisthead) {
323 * For synchronization objects, only wake up
326 if (obj->dh_type == EVENT_TYPE_SYNC)
330 lwkt_reltoken(&ntoskrnl_dispatchtoken);
334 ntoskrnl_time(uint64_t *tval)
339 *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
346 * KeWaitForSingleObject() is a tricky beast, because it can be used
347 * with several different object types: semaphores, timers, events,
348 * mutexes and threads. Semaphores don't appear very often, but the
349 * other object types are quite common. KeWaitForSingleObject() is
350 * what's normally used to acquire a mutex, and it can be used to
351 * wait for a thread termination.
353 * The Windows NDIS API is implemented in terms of Windows kernel
354 * primitives, and some of the object manipulation is duplicated in
355 * NDIS. For example, NDIS has timers and events, which are actually
356 * Windows kevents and ktimers. Now, you're supposed to only use the
357 * NDIS variants of these objects within the confines of the NDIS API,
358 * but there are some naughty developers out there who will use
359 * KeWaitForSingleObject() on NDIS timer and event objects, so we
360 * have to support that as well. Conseqently, our NDIS timer and event
361 * code has to be closely tied into our ntoskrnl timer and event code,
362 * just as it is in Windows.
364 * KeWaitForSingleObject() may do different things for different kinds
367 * - For events, we check if the event has been signalled. If the
368 * event is already in the signalled state, we just return immediately,
369 * otherwise we wait for it to be set to the signalled state by someone
370 * else calling KeSetEvent(). Events can be either synchronization or
371 * notification events.
373 * - For timers, if the timer has already fired and the timer is in
374 * the signalled state, we just return, otherwise we wait on the
375 * timer. Unlike an event, timers get signalled automatically when
376 * they expire rather than someone having to trip them manually.
377 * Timers initialized with KeInitializeTimer() are always notification
378 * events: KeInitializeTimerEx() lets you initialize a timer as
379 * either a notification or synchronization event.
381 * - For mutexes, we try to acquire the mutex and if we can't, we wait
382 * on the mutex until it's available and then grab it. When a mutex is
383 * released, it enters the signaled state, which wakes up one of the
384 * threads waiting to acquire it. Mutexes are always synchronization
387 * - For threads, the only thing we do is wait until the thread object
388 * enters a signalled state, which occurs when the thread terminates.
389 * Threads are always notification events.
391 * A notification event wakes up all threads waiting on an object. A
392 * synchronization event wakes up just one. Also, a synchronization event
393 * is auto-clearing, which means we automatically set the event back to
394 * the non-signalled state once the wakeup is done.
398 ntoskrnl_waitforobj(nt_dispatch_header *obj, uint32_t reason,
399 uint32_t mode, uint8_t alertable, int64_t *duetime)
401 struct thread *td = curthread;
410 return(STATUS_INVALID_PARAMETER);
412 lwkt_gettoken(&ntoskrnl_dispatchtoken);
415 * See if the object is a mutex. If so, and we already own
416 * it, then just increment the acquisition count and return.
418 * For any other kind of object, see if it's already in the
419 * signalled state, and if it is, just return. If the object
420 * is marked as a synchronization event, reset the state to
424 if (obj->dh_size == OTYPE_MUTEX) {
426 if (km->km_ownerthread == NULL ||
427 km->km_ownerthread == curthread->td_proc) {
428 obj->dh_sigstate = FALSE;
430 km->km_ownerthread = curthread->td_proc;
431 lwkt_reltoken(&ntoskrnl_dispatchtoken);
432 return (STATUS_SUCCESS);
434 } else if (obj->dh_sigstate == TRUE) {
435 if (obj->dh_type == EVENT_TYPE_SYNC)
436 obj->dh_sigstate = FALSE;
437 lwkt_reltoken(&ntoskrnl_dispatchtoken);
438 return (STATUS_SUCCESS);
444 INSERT_LIST_TAIL((&obj->dh_waitlisthead), (&w.wb_waitlist));
447 * The timeout value is specified in 100 nanosecond units
448 * and can be a positive or negative number. If it's positive,
449 * then the duetime is absolute, and we need to convert it
450 * to an absolute offset relative to now in order to use it.
451 * If it's negative, then the duetime is relative and we
452 * just have to convert the units.
455 if (duetime != NULL) {
457 tv.tv_sec = - (*duetime) / 10000000;
458 tv.tv_usec = (- (*duetime) / 10) -
459 (tv.tv_sec * 1000000);
461 ntoskrnl_time(&curtime);
462 if (*duetime < curtime)
463 tv.tv_sec = tv.tv_usec = 0;
465 tv.tv_sec = ((*duetime) - curtime) / 10000000;
466 tv.tv_usec = ((*duetime) - curtime) / 10 -
467 (tv.tv_sec * 1000000);
472 lwkt_reltoken(&ntoskrnl_dispatchtoken);
474 ticks = 1 + tv.tv_sec * hz + tv.tv_usec * hz / 1000000;
475 error = ndis_thsuspend(td, duetime == NULL ? 0 : ticks);
477 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
479 /* We timed out. Leave the object alone and return status. */
481 if (error == EWOULDBLOCK) {
482 REMOVE_LIST_ENTRY((&w.wb_waitlist));
483 lwkt_reltoken(&ntoskrnl_dispatchtoken);
484 return(STATUS_TIMEOUT);
488 * Mutexes are always synchronization objects, which means
489 * if several threads are waiting to acquire it, only one will
490 * be woken up. If that one is us, and the mutex is up for grabs,
494 if (obj->dh_size == OTYPE_MUTEX) {
496 if (km->km_ownerthread == NULL) {
497 km->km_ownerthread = curthread->td_proc;
502 if (obj->dh_type == EVENT_TYPE_SYNC)
503 obj->dh_sigstate = FALSE;
504 REMOVE_LIST_ENTRY((&w.wb_waitlist));
506 lwkt_reltoken(&ntoskrnl_dispatchtoken);
508 return(STATUS_SUCCESS);
511 __stdcall static uint32_t
512 ntoskrnl_waitforobjs(uint32_t cnt, nt_dispatch_header *obj[],
513 uint32_t wtype, uint32_t reason, uint32_t mode,
514 uint8_t alertable, int64_t *duetime,
515 wait_block *wb_array)
517 struct thread *td = curthread;
519 wait_block _wb_array[THREAD_WAIT_OBJECTS];
522 int i, wcnt = 0, widx = 0, error = 0;
524 struct timespec t1, t2;
526 if (cnt > MAX_WAIT_OBJECTS)
527 return(STATUS_INVALID_PARAMETER);
528 if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
529 return(STATUS_INVALID_PARAMETER);
531 lwkt_gettoken(&ntoskrnl_dispatchtoken);
533 if (wb_array == NULL)
538 tv.tv_sec = 0; /* fix compiler warning */
539 tv.tv_usec = 0; /* fix compiler warning */
541 /* First pass: see if we can satisfy any waits immediately. */
543 for (i = 0; i < cnt; i++) {
544 if (obj[i]->dh_size == OTYPE_MUTEX) {
545 km = (kmutant *)obj[i];
546 if (km->km_ownerthread == NULL ||
547 km->km_ownerthread == curthread->td_proc) {
548 obj[i]->dh_sigstate = FALSE;
550 km->km_ownerthread = curthread->td_proc;
551 if (wtype == WAITTYPE_ANY) {
552 lwkt_reltoken(&ntoskrnl_dispatchtoken);
553 return (STATUS_WAIT_0 + i);
556 } else if (obj[i]->dh_sigstate == TRUE) {
557 if (obj[i]->dh_type == EVENT_TYPE_SYNC)
558 obj[i]->dh_sigstate = FALSE;
559 if (wtype == WAITTYPE_ANY) {
560 lwkt_reltoken(&ntoskrnl_dispatchtoken);
561 return (STATUS_WAIT_0 + i);
567 * Second pass: set up wait for anything we can't
568 * satisfy immediately.
571 for (i = 0; i < cnt; i++) {
572 if (obj[i]->dh_sigstate == TRUE)
574 INSERT_LIST_TAIL((&obj[i]->dh_waitlisthead),
575 (&w[i].wb_waitlist));
576 w[i].wb_kthread = td;
577 w[i].wb_object = obj[i];
583 tv.tv_sec = -*duetime / 10000000;
584 tv.tv_usec = (-*duetime / 10) - (tv.tv_sec * 1000000);
586 ntoskrnl_time(&curtime);
587 if (*duetime < curtime) {
591 tv.tv_sec = ((*duetime) - curtime) / 10000000;
592 tv.tv_usec = ((*duetime) - curtime) / 10 -
593 (tv.tv_sec * 1000000);
600 lwkt_reltoken(&ntoskrnl_dispatchtoken);
603 ticks = 1 + tv.tv_sec * hz + tv.tv_usec * hz / 1000000;
604 error = ndis_thsuspend(td, ticks);
606 error = ndis_thsuspend(td, 0);
609 lwkt_gettoken(&ntoskrnl_dispatchtoken);
612 for (i = 0; i < cnt; i++) {
613 if (obj[i]->dh_size == OTYPE_MUTEX) {
615 if (km->km_ownerthread == NULL) {
621 if (obj[i]->dh_sigstate == TRUE) {
623 if (obj[i]->dh_type == EVENT_TYPE_SYNC)
624 obj[i]->dh_sigstate = FALSE;
625 REMOVE_LIST_ENTRY((&w[i].wb_waitlist));
630 if (error || wtype == WAITTYPE_ANY)
634 tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
635 tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
640 for (i = 0; i < cnt; i++)
641 REMOVE_LIST_ENTRY((&w[i].wb_waitlist));
644 if (error == EWOULDBLOCK) {
645 lwkt_reltoken(&ntoskrnl_dispatchtoken);
646 return(STATUS_TIMEOUT);
649 if (wtype == WAITTYPE_ANY && wcnt) {
650 lwkt_reltoken(&ntoskrnl_dispatchtoken);
651 return(STATUS_WAIT_0 + widx);
654 lwkt_reltoken(&ntoskrnl_dispatchtoken);
656 return(STATUS_SUCCESS);
659 __stdcall static void
660 ntoskrnl_writereg_ushort(uint16_t *reg, uint16_t val)
662 bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
666 __stdcall static uint16_t
667 ntoskrnl_readreg_ushort(uint16_t *reg)
669 return(bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
672 __stdcall static void
673 ntoskrnl_writereg_ulong(uint32_t *reg, uint32_t val)
675 bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
679 __stdcall static uint32_t
680 ntoskrnl_readreg_ulong(uint32_t *reg)
682 return(bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
685 __stdcall static uint8_t
686 ntoskrnl_readreg_uchar(uint8_t *reg)
688 return(bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
691 __stdcall static void
692 ntoskrnl_writereg_uchar(uint8_t *reg, uint8_t val)
694 bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
698 __stdcall static int64_t
699 _allmul(int64_t a, int64_t b)
704 __stdcall static int64_t
705 _alldiv(int64_t a, int64_t b)
710 __stdcall static int64_t
711 _allrem(int64_t a, int64_t b)
716 __stdcall static uint64_t
717 _aullmul(uint64_t a, uint64_t b)
722 __stdcall static uint64_t
723 _aulldiv(uint64_t a, uint64_t b)
728 __stdcall static uint64_t
729 _aullrem(uint64_t a, uint64_t b)
734 __regparm static int64_t
735 _allshl(int64_t a, uint8_t b)
740 __regparm static uint64_t
741 _aullshl(uint64_t a, uint8_t b)
746 __regparm static int64_t
747 _allshr(int64_t a, uint8_t b)
752 __regparm static uint64_t
753 _aullshr(uint64_t a, uint8_t b)
759 ntoskrnl_pushsl(slist_header *head, slist_entry *entry)
761 slist_entry *oldhead;
763 oldhead = head->slh_list.slh_next;
764 entry->sl_next = head->slh_list.slh_next;
765 head->slh_list.slh_next = entry;
766 head->slh_list.slh_depth++;
767 head->slh_list.slh_seq++;
773 ntoskrnl_popsl(slist_header *head)
777 first = head->slh_list.slh_next;
779 head->slh_list.slh_next = first->sl_next;
780 head->slh_list.slh_depth--;
781 head->slh_list.slh_seq++;
787 __stdcall static void *
788 ntoskrnl_allocfunc(uint32_t pooltype, size_t size, uint32_t tag)
790 return(kmalloc(size, M_DEVBUF, M_WAITOK));
793 __stdcall static void
794 ntoskrnl_freefunc(void *buf)
796 kfree(buf, M_DEVBUF);
800 __stdcall static void
801 ntoskrnl_init_lookaside(paged_lookaside_list *lookaside,
802 lookaside_alloc_func *allocfunc,
803 lookaside_free_func *freefunc,
804 uint32_t flags, size_t size,
805 uint32_t tag, uint16_t depth)
807 bzero((char *)lookaside, sizeof(paged_lookaside_list));
809 if (size < sizeof(slist_entry))
810 lookaside->nll_l.gl_size = sizeof(slist_entry);
812 lookaside->nll_l.gl_size = size;
813 lookaside->nll_l.gl_tag = tag;
814 if (allocfunc == NULL)
815 lookaside->nll_l.gl_allocfunc = ntoskrnl_allocfunc;
817 lookaside->nll_l.gl_allocfunc = allocfunc;
819 if (freefunc == NULL)
820 lookaside->nll_l.gl_freefunc = ntoskrnl_freefunc;
822 lookaside->nll_l.gl_freefunc = freefunc;
824 ntoskrnl_init_lock(&lookaside->nll_obsoletelock);
826 lookaside->nll_l.gl_depth = LOOKASIDE_DEPTH;
827 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
832 __stdcall static void
833 ntoskrnl_delete_lookaside(paged_lookaside_list *lookaside)
836 __stdcall void (*freefunc)(void *);
838 freefunc = lookaside->nll_l.gl_freefunc;
839 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
845 __stdcall static void
846 ntoskrnl_init_nplookaside(npaged_lookaside_list *lookaside,
847 lookaside_alloc_func *allocfunc,
848 lookaside_free_func *freefunc,
849 uint32_t flags, size_t size,
850 uint32_t tag, uint16_t depth)
852 bzero((char *)lookaside, sizeof(npaged_lookaside_list));
854 if (size < sizeof(slist_entry))
855 lookaside->nll_l.gl_size = sizeof(slist_entry);
857 lookaside->nll_l.gl_size = size;
858 lookaside->nll_l.gl_tag = tag;
859 if (allocfunc == NULL)
860 lookaside->nll_l.gl_allocfunc = ntoskrnl_allocfunc;
862 lookaside->nll_l.gl_allocfunc = allocfunc;
864 if (freefunc == NULL)
865 lookaside->nll_l.gl_freefunc = ntoskrnl_freefunc;
867 lookaside->nll_l.gl_freefunc = freefunc;
869 ntoskrnl_init_lock(&lookaside->nll_obsoletelock);
871 lookaside->nll_l.gl_depth = LOOKASIDE_DEPTH;
872 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
877 __stdcall static void
878 ntoskrnl_delete_nplookaside(npaged_lookaside_list *lookaside)
881 __stdcall void (*freefunc)(void *);
883 freefunc = lookaside->nll_l.gl_freefunc;
884 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
891 * Note: the interlocked slist push and pop routines are
892 * declared to be _fastcall in Windows. gcc 3.4 is supposed
893 * to have support for this calling convention, however we
894 * don't have that version available yet, so we kludge things
895 * up using some inline assembly.
898 __stdcall __regcall static slist_entry *
899 ntoskrnl_push_slist(REGARGS2(slist_header *head, slist_entry *entry))
901 slist_entry *oldhead;
903 oldhead = (slist_entry *)FASTCALL3(ntoskrnl_push_slist_ex,
904 head, entry, &ntoskrnl_global);
909 __stdcall __regcall static slist_entry *
910 ntoskrnl_pop_slist(REGARGS1(slist_header *head))
914 first = (slist_entry *)FASTCALL2(ntoskrnl_pop_slist_ex,
915 head, &ntoskrnl_global);
920 __stdcall __regcall static slist_entry *
921 ntoskrnl_push_slist_ex(REGARGS2(slist_header *head, slist_entry *entry), kspin_lock *lock)
923 slist_entry *oldhead;
926 irql = FASTCALL2(hal_lock, lock, DISPATCH_LEVEL);
927 oldhead = ntoskrnl_pushsl(head, entry);
928 FASTCALL2(hal_unlock, lock, irql);
933 __stdcall __regcall static slist_entry *
934 ntoskrnl_pop_slist_ex(REGARGS2(slist_header *head, kspin_lock *lock))
939 irql = FASTCALL2(hal_lock, lock, DISPATCH_LEVEL);
940 first = ntoskrnl_popsl(head);
941 FASTCALL2(hal_unlock, lock, irql);
946 __stdcall __regcall void
947 ntoskrnl_lock_dpc(REGARGS1(kspin_lock *lock))
949 while (atomic_poll_acquire_int((volatile u_int *)lock) == 0)
953 __stdcall __regcall void
954 ntoskrnl_unlock_dpc(REGARGS1(kspin_lock *lock))
956 atomic_poll_release_int((volatile u_int *)lock);
959 __stdcall __regcall static uint32_t
960 ntoskrnl_interlock_inc(REGARGS1(volatile uint32_t *addend))
962 atomic_add_long((volatile u_long *)addend, 1);
966 __stdcall __regcall static uint32_t
967 ntoskrnl_interlock_dec(REGARGS1(volatile uint32_t *addend))
969 atomic_subtract_long((volatile u_long *)addend, 1);
973 __stdcall __regcall static void
974 ntoskrnl_interlock_addstat(REGARGS2(uint64_t *addend, uint32_t inc))
978 irql = FASTCALL2(hal_lock, &ntoskrnl_global, DISPATCH_LEVEL);
980 FASTCALL2(hal_unlock, &ntoskrnl_global, irql);
985 __stdcall static void
986 ntoskrnl_freemdl(ndis_buffer *mdl)
990 if (mdl == NULL || mdl->nb_process == NULL)
993 head = mdl->nb_process;
995 if (head->nb_flags != 0x1)
998 mdl->nb_next = head->nb_next;
1001 /* Decrement count of busy buffers. */
1003 head->nb_bytecount--;
1006 * If the pool has been marked for deletion and there are
1007 * no more buffers outstanding, nuke the pool.
1010 if (head->nb_byteoffset && head->nb_bytecount == 0)
1011 kfree(head, M_DEVBUF);
1016 __stdcall static uint32_t
1017 ntoskrnl_sizeofmdl(void *vaddr, size_t len)
1021 l = sizeof(struct ndis_buffer) +
1022 (sizeof(uint32_t) * SPAN_PAGES(vaddr, len));
1027 __stdcall static void
1028 ntoskrnl_build_npaged_mdl(ndis_buffer *mdl)
1030 mdl->nb_mappedsystemva = (char *)mdl->nb_startva + mdl->nb_byteoffset;
1034 __stdcall static void *
1035 ntoskrnl_mmaplockedpages(ndis_buffer *buf, uint8_t accessmode)
1037 return(MDL_VA(buf));
1040 __stdcall static void *
1041 ntoskrnl_mmaplockedpages_cache(ndis_buffer *buf, uint8_t accessmode,
1042 uint32_t cachetype, void *vaddr,
1043 uint32_t bugcheck, uint32_t prio)
1045 return(MDL_VA(buf));
1048 __stdcall static void
1049 ntoskrnl_munmaplockedpages(void *vaddr, ndis_buffer *buf)
1055 * The KeInitializeSpinLock(), KefAcquireSpinLockAtDpcLevel()
1056 * and KefReleaseSpinLockFromDpcLevel() appear to be analagous
1057 * to crit_enter()/crit_exit() in their use. We can't create a new mutex
1058 * lock here because there is no complimentary KeFreeSpinLock()
1059 * function. Instead, we grab a mutex from the mutex pool.
1061 __stdcall static void
1062 ntoskrnl_init_lock(kspin_lock *lock)
1069 __stdcall static size_t
1070 ntoskrnl_memcmp(const void *s1, const void *s2, size_t len)
1072 size_t i, total = 0;
1075 m1 = __DECONST(char *, s1);
1076 m2 = __DECONST(char *, s2);
1078 for (i = 0; i < len; i++) {
1085 __stdcall static void
1086 ntoskrnl_init_ansi_string(ndis_ansi_string *dst, char *src)
1088 ndis_ansi_string *a;
1094 a->nas_len = a->nas_maxlen = 0;
1098 a->nas_len = a->nas_maxlen = strlen(src);
1104 __stdcall static void
1105 ntoskrnl_init_unicode_string(ndis_unicode_string *dst, uint16_t *src)
1107 ndis_unicode_string *u;
1114 u->nus_len = u->nus_maxlen = 0;
1121 u->nus_len = u->nus_maxlen = i * 2;
1127 __stdcall ndis_status
1128 ntoskrnl_unicode_to_int(ndis_unicode_string *ustr, uint32_t base,
1136 uchr = ustr->nus_buf;
1137 len = ustr->nus_len;
1138 bzero(abuf, sizeof(abuf));
1140 if ((char)((*uchr) & 0xFF) == '-') {
1144 } else if ((char)((*uchr) & 0xFF) == '+') {
1151 if ((char)((*uchr) & 0xFF) == 'b') {
1155 } else if ((char)((*uchr) & 0xFF) == 'o') {
1159 } else if ((char)((*uchr) & 0xFF) == 'x') {
1173 ndis_unicode_to_ascii(uchr, len, &astr);
1174 *val = strtoul(abuf, NULL, base);
1176 return(NDIS_STATUS_SUCCESS);
1179 __stdcall static void
1180 ntoskrnl_free_unicode_string(ndis_unicode_string *ustr)
1182 if (ustr->nus_buf == NULL)
1184 kfree(ustr->nus_buf, M_DEVBUF);
1185 ustr->nus_buf = NULL;
1189 __stdcall static void
1190 ntoskrnl_free_ansi_string(ndis_ansi_string *astr)
1192 if (astr->nas_buf == NULL)
1194 kfree(astr->nas_buf, M_DEVBUF);
1195 astr->nas_buf = NULL;
1200 atoi(const char *str)
1202 return (int)strtol(str, NULL, 10);
1206 atol(const char *str)
1208 return strtol(str, NULL, 10);
1217 skrandom(tv.tv_usec);
1218 return((int)krandom());
1221 __stdcall static uint8_t
1222 ntoskrnl_wdmver(uint8_t major, uint8_t minor)
1224 if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
1229 __stdcall static ndis_status
1230 ntoskrnl_devprop(device_object *devobj, uint32_t regprop, uint32_t buflen,
1231 void *prop, uint32_t *reslen)
1233 ndis_miniport_block *block;
1235 block = devobj->do_rsvd;
1238 case DEVPROP_DRIVER_KEYNAME:
1239 ndis_ascii_to_unicode(__DECONST(char *,
1240 device_get_nameunit(block->nmb_dev)), (uint16_t **)&prop);
1241 *reslen = strlen(device_get_nameunit(block->nmb_dev)) * 2;
1244 return(STATUS_INVALID_PARAMETER_2);
1248 return(STATUS_SUCCESS);
1251 __stdcall static void
1252 ntoskrnl_init_mutex(kmutant *kmutex, uint32_t level)
1254 INIT_LIST_HEAD((&kmutex->km_header.dh_waitlisthead));
1255 kmutex->km_abandoned = FALSE;
1256 kmutex->km_apcdisable = 1;
1257 kmutex->km_header.dh_sigstate = TRUE;
1258 kmutex->km_header.dh_type = EVENT_TYPE_SYNC;
1259 kmutex->km_header.dh_size = OTYPE_MUTEX;
1260 kmutex->km_acquirecnt = 0;
1261 kmutex->km_ownerthread = NULL;
1265 __stdcall static uint32_t
1266 ntoskrnl_release_mutex(kmutant *kmutex, uint8_t kwait)
1268 lwkt_gettoken(&ntoskrnl_dispatchtoken);
1269 if (kmutex->km_ownerthread != curthread->td_proc) {
1270 lwkt_reltoken(&ntoskrnl_dispatchtoken);
1271 return(STATUS_MUTANT_NOT_OWNED);
1273 kmutex->km_acquirecnt--;
1274 if (kmutex->km_acquirecnt == 0) {
1275 kmutex->km_ownerthread = NULL;
1276 lwkt_reltoken(&ntoskrnl_dispatchtoken);
1277 ntoskrnl_wakeup(&kmutex->km_header);
1279 lwkt_reltoken(&ntoskrnl_dispatchtoken);
1282 return(kmutex->km_acquirecnt);
1285 __stdcall static uint32_t
1286 ntoskrnl_read_mutex(kmutant *kmutex)
1288 return(kmutex->km_header.dh_sigstate);
1292 ntoskrnl_init_event(nt_kevent *kevent, uint32_t type, uint8_t state)
1294 INIT_LIST_HEAD((&kevent->k_header.dh_waitlisthead));
1295 kevent->k_header.dh_sigstate = state;
1296 kevent->k_header.dh_type = type;
1297 kevent->k_header.dh_size = OTYPE_EVENT;
1302 ntoskrnl_reset_event(nt_kevent *kevent)
1306 lwkt_gettoken(&ntoskrnl_dispatchtoken);
1307 prevstate = kevent->k_header.dh_sigstate;
1308 kevent->k_header.dh_sigstate = FALSE;
1309 lwkt_reltoken(&ntoskrnl_dispatchtoken);
1315 ntoskrnl_set_event(nt_kevent *kevent, uint32_t increment, uint8_t kwait)
1319 prevstate = kevent->k_header.dh_sigstate;
1320 ntoskrnl_wakeup(&kevent->k_header);
1326 ntoskrnl_clear_event(nt_kevent *kevent)
1328 kevent->k_header.dh_sigstate = FALSE;
1333 ntoskrnl_read_event(nt_kevent *kevent)
1335 return(kevent->k_header.dh_sigstate);
1338 __stdcall static ndis_status
1339 ntoskrnl_objref(ndis_handle handle, uint32_t reqaccess, void *otype,
1340 uint8_t accessmode, void **object, void **handleinfo)
1344 nr = kmalloc(sizeof(nt_objref), M_DEVBUF, M_WAITOK|M_ZERO);
1346 INIT_LIST_HEAD((&nr->no_dh.dh_waitlisthead));
1347 nr->no_obj = handle;
1348 nr->no_dh.dh_size = OTYPE_THREAD;
1349 TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
1352 return(NDIS_STATUS_SUCCESS);
1355 __stdcall __regcall static void
1356 ntoskrnl_objderef(REGARGS1(void *object))
1361 TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
1362 kfree(nr, M_DEVBUF);
1367 __stdcall static uint32_t
1368 ntoskrnl_zwclose(ndis_handle handle)
1370 return(STATUS_SUCCESS);
1374 * This is here just in case the thread returns without calling
1375 * PsTerminateSystemThread().
1378 ntoskrnl_thrfunc(void *arg)
1380 thread_context *thrctx;
1381 __stdcall uint32_t (*tfunc)(void *);
1386 tfunc = thrctx->tc_thrfunc;
1387 tctx = thrctx->tc_thrctx;
1388 kfree(thrctx, M_TEMP);
1392 ntoskrnl_thread_exit(rval);
1393 return; /* notreached */
1396 __stdcall static ndis_status
1397 ntoskrnl_create_thread(ndis_handle *handle, uint32_t reqaccess,
1398 void *objattrs, ndis_handle phandle,
1399 void *clientid, void *thrfunc, void *thrctx)
1406 tc = kmalloc(sizeof(thread_context), M_TEMP, M_WAITOK);
1408 tc->tc_thrctx = thrctx;
1409 tc->tc_thrfunc = thrfunc;
1411 ksprintf(tname, "windows kthread %d", ntoskrnl_kth);
1412 error = kthread_create_stk(ntoskrnl_thrfunc, tc, &td,
1413 NDIS_KSTACK_PAGES * PAGE_SIZE, tname);
1422 * In Windows, the exit of a thread is an event that you're allowed
1423 * to wait on, assuming you've obtained a reference to the thread using
1424 * ObReferenceObjectByHandle(). Unfortunately, the only way we can
1425 * simulate this behavior is to register each thread we create in a
1426 * reference list, and if someone holds a reference to us, we poke
1429 __stdcall static ndis_status
1430 ntoskrnl_thread_exit(ndis_status status)
1432 struct nt_objref *nr;
1434 TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
1435 if (nr->no_obj != curthread)
1437 ntoskrnl_wakeup(&nr->no_dh);
1444 return(0); /* notreached */
1448 ntoskrnl_dbgprint(char *fmt, ...)
1453 __va_start(ap, fmt);
1457 return(STATUS_SUCCESS);
1460 __stdcall static void
1461 ntoskrnl_debugger(void)
1464 #if __FreeBSD_version < 502113
1465 Debugger("ntoskrnl_debugger(): breakpoint");
1467 kdb_enter("ntoskrnl_debugger(): breakpoint");
1472 ntoskrnl_timercall(void *arg)
1478 timer->k_header.dh_inserted = FALSE;
1481 * If this is a periodic timer, re-arm it
1482 * so it will fire again. We do this before
1483 * calling any deferred procedure calls because
1484 * it's possible the DPC might cancel the timer,
1485 * in which case it would be wrong for us to
1486 * re-arm it again afterwards.
1489 if (timer->k_period) {
1490 timer->k_header.dh_inserted = TRUE;
1491 callout_reset(timer->k_handle, 1 + timer->k_period * hz / 1000,
1492 ntoskrnl_timercall, timer);
1494 callout_deactivate(timer->k_handle);
1495 kfree(timer->k_handle, M_NDIS);
1496 timer->k_handle = NULL;
1499 if (timer->k_dpc != NULL)
1500 ntoskrnl_queue_dpc(timer->k_dpc, NULL, NULL);
1502 ntoskrnl_wakeup(&timer->k_header);
1506 ntoskrnl_init_timer(ktimer *timer)
1511 ntoskrnl_init_timer_ex(timer, EVENT_TYPE_NOTIFY);
1515 ntoskrnl_init_timer_ex(ktimer *timer, uint32_t type)
1520 INIT_LIST_HEAD((&timer->k_header.dh_waitlisthead));
1521 timer->k_header.dh_sigstate = FALSE;
1522 timer->k_header.dh_inserted = FALSE;
1523 timer->k_header.dh_type = type;
1524 timer->k_header.dh_size = OTYPE_TIMER;
1525 timer->k_handle = NULL;
1531 * This is a wrapper for Windows deferred procedure calls that
1532 * have been placed on an NDIS thread work queue. We need it
1533 * since the DPC could be a _stdcall function. Also, as far as
1534 * I can tell, defered procedure calls must run at DISPATCH_LEVEL.
1537 ntoskrnl_run_dpc(void *arg)
1544 dpcfunc = (kdpc_func)dpc->k_deferedfunc;
1545 irql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL);
1546 dpcfunc(dpc, dpc->k_deferredctx, dpc->k_sysarg1, dpc->k_sysarg2);
1547 FASTCALL1(hal_lower_irql, irql);
1553 ntoskrnl_init_dpc(kdpc *dpc, void *dpcfunc, void *dpcctx)
1558 dpc->k_deferedfunc = dpcfunc;
1559 dpc->k_deferredctx = dpcctx;
1565 ntoskrnl_queue_dpc(kdpc *dpc, void *sysarg1, void *sysarg2)
1567 dpc->k_sysarg1 = sysarg1;
1568 dpc->k_sysarg2 = sysarg2;
1569 if (ndis_sched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
1576 ntoskrnl_dequeue_dpc(kdpc *dpc)
1578 if (ndis_unsched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
1585 ntoskrnl_set_timer_ex(ktimer *timer, int64_t duetime, uint32_t period,
1596 if (timer->k_header.dh_inserted == TRUE) {
1597 if (timer->k_handle != NULL)
1598 callout_stop(timer->k_handle);
1599 timer->k_header.dh_inserted = FALSE;
1604 timer->k_duetime = duetime;
1605 timer->k_period = period;
1606 timer->k_header.dh_sigstate = FALSE;
1610 tv.tv_sec = - (duetime) / 10000000;
1611 tv.tv_usec = (- (duetime) / 10) -
1612 (tv.tv_sec * 1000000);
1614 ntoskrnl_time(&curtime);
1615 if (duetime < curtime)
1616 tv.tv_sec = tv.tv_usec = 0;
1618 tv.tv_sec = ((duetime) - curtime) / 10000000;
1619 tv.tv_usec = ((duetime) - curtime) / 10 -
1620 (tv.tv_sec * 1000000);
1624 ticks = 1 + tv.tv_sec * hz + tv.tv_usec * hz / 1000000;
1625 timer->k_header.dh_inserted = TRUE;
1626 if (timer->k_handle == NULL) {
1627 timer->k_handle = kmalloc(sizeof(struct callout), M_NDIS,
1629 callout_init(timer->k_handle);
1631 callout_reset(timer->k_handle, ticks, ntoskrnl_timercall, timer);
1637 ntoskrnl_set_timer(ktimer *timer, int64_t duetime, kdpc *dpc)
1639 return (ntoskrnl_set_timer_ex(timer, duetime, 0, dpc));
1643 ntoskrnl_cancel_timer(ktimer *timer)
1650 if (timer->k_header.dh_inserted == TRUE) {
1651 if (timer->k_handle != NULL) {
1652 callout_stop(timer->k_handle);
1653 kfree(timer->k_handle, M_NDIS);
1654 timer->k_handle = NULL;
1656 if (timer->k_dpc != NULL)
1657 ntoskrnl_dequeue_dpc(timer->k_dpc);
1667 ntoskrnl_read_timer(ktimer *timer)
1669 return(timer->k_header.dh_sigstate);
1672 __stdcall static void
1675 kprintf ("ntoskrnl dummy called...\n");
1680 image_patch_table ntoskrnl_functbl[] = {
1681 { "RtlCompareMemory", (FUNC)ntoskrnl_memcmp },
1682 { "RtlEqualUnicodeString", (FUNC)ntoskrnl_unicode_equal },
1683 { "RtlCopyUnicodeString", (FUNC)ntoskrnl_unicode_copy },
1684 { "RtlUnicodeStringToAnsiString", (FUNC)ntoskrnl_unicode_to_ansi },
1685 { "RtlAnsiStringToUnicodeString", (FUNC)ntoskrnl_ansi_to_unicode },
1686 { "RtlInitAnsiString", (FUNC)ntoskrnl_init_ansi_string },
1687 { "RtlInitUnicodeString", (FUNC)ntoskrnl_init_unicode_string },
1688 { "RtlFreeAnsiString", (FUNC)ntoskrnl_free_ansi_string },
1689 { "RtlFreeUnicodeString", (FUNC)ntoskrnl_free_unicode_string },
1690 { "RtlUnicodeStringToInteger", (FUNC)ntoskrnl_unicode_to_int },
1691 { "sprintf", (FUNC)ksprintf },
1692 { "vsprintf", (FUNC)kvsprintf },
1693 { "_snprintf", (FUNC)ksnprintf },
1694 { "_vsnprintf", (FUNC)kvsnprintf },
1695 { "DbgPrint", (FUNC)ntoskrnl_dbgprint },
1696 { "DbgBreakPoint", (FUNC)ntoskrnl_debugger },
1697 { "strncmp", (FUNC)strncmp },
1698 { "strcmp", (FUNC)strcmp },
1699 { "strncpy", (FUNC)strncpy },
1700 { "strcpy", (FUNC)strcpy },
1701 { "strlen", (FUNC)strlen },
1702 { "memcpy", (FUNC)memcpy },
1703 { "memmove", (FUNC)memcpy },
1704 { "memset", (FUNC)memset },
1705 { "IofCallDriver", (FUNC)ntoskrnl_iofcalldriver },
1706 { "IofCompleteRequest", (FUNC)ntoskrnl_iofcompletereq },
1707 { "IoBuildSynchronousFsdRequest", (FUNC)ntoskrnl_iobuildsynchfsdreq },
1708 { "KeWaitForSingleObject", (FUNC)ntoskrnl_waitforobj },
1709 { "KeWaitForMultipleObjects", (FUNC)ntoskrnl_waitforobjs },
1710 { "_allmul", (FUNC)_allmul },
1711 { "_alldiv", (FUNC)_alldiv },
1712 { "_allrem", (FUNC)_allrem },
1713 { "_allshr", (FUNC)_allshr },
1714 { "_allshl", (FUNC)_allshl },
1715 { "_aullmul", (FUNC)_aullmul },
1716 { "_aulldiv", (FUNC)_aulldiv },
1717 { "_aullrem", (FUNC)_aullrem },
1718 { "_aullshr", (FUNC)_aullshr },
1719 { "_aullshl", (FUNC)_aullshl },
1720 { "atoi", (FUNC)atoi },
1721 { "atol", (FUNC)atol },
1722 { "rand", (FUNC)rand },
1723 { "WRITE_REGISTER_USHORT", (FUNC)ntoskrnl_writereg_ushort },
1724 { "READ_REGISTER_USHORT", (FUNC)ntoskrnl_readreg_ushort },
1725 { "WRITE_REGISTER_ULONG", (FUNC)ntoskrnl_writereg_ulong },
1726 { "READ_REGISTER_ULONG", (FUNC)ntoskrnl_readreg_ulong },
1727 { "READ_REGISTER_UCHAR", (FUNC)ntoskrnl_readreg_uchar },
1728 { "WRITE_REGISTER_UCHAR", (FUNC)ntoskrnl_writereg_uchar },
1729 { "ExInitializePagedLookasideList", (FUNC)ntoskrnl_init_lookaside },
1730 { "ExDeletePagedLookasideList", (FUNC)ntoskrnl_delete_lookaside },
1731 { "ExInitializeNPagedLookasideList", (FUNC)ntoskrnl_init_nplookaside },
1732 { "ExDeleteNPagedLookasideList", (FUNC)ntoskrnl_delete_nplookaside },
1733 { "InterlockedPopEntrySList", (FUNC)ntoskrnl_pop_slist },
1734 { "InterlockedPushEntrySList", (FUNC)ntoskrnl_push_slist },
1735 { "ExInterlockedPopEntrySList", (FUNC)ntoskrnl_pop_slist_ex },
1736 { "ExInterlockedPushEntrySList",(FUNC)ntoskrnl_push_slist_ex },
1737 { "KefAcquireSpinLockAtDpcLevel", (FUNC)ntoskrnl_lock_dpc },
1738 { "KefReleaseSpinLockFromDpcLevel", (FUNC)ntoskrnl_unlock_dpc },
1739 { "InterlockedIncrement", (FUNC)ntoskrnl_interlock_inc },
1740 { "InterlockedDecrement", (FUNC)ntoskrnl_interlock_dec },
1741 { "ExInterlockedAddLargeStatistic",
1742 (FUNC)ntoskrnl_interlock_addstat },
1743 { "IoFreeMdl", (FUNC)ntoskrnl_freemdl },
1744 { "MmSizeOfMdl", (FUNC)ntoskrnl_sizeofmdl },
1745 { "MmMapLockedPages", (FUNC)ntoskrnl_mmaplockedpages },
1746 { "MmMapLockedPagesSpecifyCache",
1747 (FUNC)ntoskrnl_mmaplockedpages_cache },
1748 { "MmUnmapLockedPages", (FUNC)ntoskrnl_munmaplockedpages },
1749 { "MmBuildMdlForNonPagedPool", (FUNC)ntoskrnl_build_npaged_mdl },
1750 { "KeInitializeSpinLock", (FUNC)ntoskrnl_init_lock },
1751 { "IoIsWdmVersionAvailable", (FUNC)ntoskrnl_wdmver },
1752 { "IoGetDeviceProperty", (FUNC)ntoskrnl_devprop },
1753 { "KeInitializeMutex", (FUNC)ntoskrnl_init_mutex },
1754 { "KeReleaseMutex", (FUNC)ntoskrnl_release_mutex },
1755 { "KeReadStateMutex", (FUNC)ntoskrnl_read_mutex },
1756 { "KeInitializeEvent", (FUNC)ntoskrnl_init_event },
1757 { "KeSetEvent", (FUNC)ntoskrnl_set_event },
1758 { "KeResetEvent", (FUNC)ntoskrnl_reset_event },
1759 { "KeClearEvent", (FUNC)ntoskrnl_clear_event },
1760 { "KeReadStateEvent", (FUNC)ntoskrnl_read_event },
1761 { "KeInitializeTimer", (FUNC)ntoskrnl_init_timer },
1762 { "KeInitializeTimerEx", (FUNC)ntoskrnl_init_timer_ex },
1763 { "KeSetTimer", (FUNC)ntoskrnl_set_timer },
1764 { "KeSetTimerEx", (FUNC)ntoskrnl_set_timer_ex },
1765 { "KeCancelTimer", (FUNC)ntoskrnl_cancel_timer },
1766 { "KeReadStateTimer", (FUNC)ntoskrnl_read_timer },
1767 { "KeInitializeDpc", (FUNC)ntoskrnl_init_dpc },
1768 { "KeInsertQueueDpc", (FUNC)ntoskrnl_queue_dpc },
1769 { "KeRemoveQueueDpc", (FUNC)ntoskrnl_dequeue_dpc },
1770 { "ObReferenceObjectByHandle", (FUNC)ntoskrnl_objref },
1771 { "ObfDereferenceObject", (FUNC)ntoskrnl_objderef },
1772 { "ZwClose", (FUNC)ntoskrnl_zwclose },
1773 { "PsCreateSystemThread", (FUNC)ntoskrnl_create_thread },
1774 { "PsTerminateSystemThread", (FUNC)ntoskrnl_thread_exit },
1777 * This last entry is a catch-all for any function we haven't
1778 * implemented yet. The PE import list patching routine will
1779 * use it for any function that doesn't have an explicit match
1783 { NULL, (FUNC)dummy },