3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
32 * $FreeBSD: src/sys/compat/ndis/subr_ntoskrnl.c,v 1.40 2004/07/20 20:28:57 wpaul Exp $
33 * $DragonFly: src/sys/emulation/ndis/subr_ntoskrnl.c,v 1.4 2004/09/20 06:32:41 dillon Exp $
36 #include <sys/ctype.h>
37 #include <sys/unistd.h>
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/errno.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
45 #include <sys/callout.h>
46 #if __FreeBSD_version > 502113
49 #include <sys/kernel.h>
51 #include <sys/kthread.h>
53 #include <machine/atomic.h>
54 #include <machine/clock.h>
55 #include <machine/bus_memio.h>
56 #include <machine/bus_pio.h>
57 #include <machine/bus.h>
58 #include <machine/stdarg.h>
65 #include "resource_var.h"
66 #include "ntoskrnl_var.h"
70 #define __regparm __attribute__((regparm(3)))
72 #define FUNC void(*)(void)
74 __stdcall static uint8_t ntoskrnl_unicode_equal(ndis_unicode_string *,
75 ndis_unicode_string *, uint8_t);
76 __stdcall static void ntoskrnl_unicode_copy(ndis_unicode_string *,
77 ndis_unicode_string *);
78 __stdcall static ndis_status ntoskrnl_unicode_to_ansi(ndis_ansi_string *,
79 ndis_unicode_string *, uint8_t);
80 __stdcall static ndis_status ntoskrnl_ansi_to_unicode(ndis_unicode_string *,
81 ndis_ansi_string *, uint8_t);
82 __stdcall static void *ntoskrnl_iobuildsynchfsdreq(uint32_t, void *,
83 void *, uint32_t, uint32_t *, void *, void *);
88 __stdcall __regcall static uint32_t
89 ntoskrnl_iofcalldriver(REGARGS2(void *dobj, void *irp));
90 __stdcall __regcall static void
91 ntoskrnl_iofcompletereq(REGARGS2(void *irp, uint8_t prioboost));
92 __stdcall __regcall static slist_entry *
93 ntoskrnl_push_slist(REGARGS2(slist_header *head, slist_entry *entry));
94 __stdcall __regcall static slist_entry *
95 ntoskrnl_pop_slist(REGARGS1(slist_header *head));
96 __stdcall __regcall static slist_entry *
97 ntoskrnl_push_slist_ex(REGARGS2(slist_header *head, slist_entry *entry), kspin_lock *lock);
98 __stdcall __regcall static slist_entry *
99 ntoskrnl_pop_slist_ex(REGARGS2(slist_header *head, kspin_lock *lock));
101 __stdcall __regcall static uint32_t
102 ntoskrnl_interlock_inc(REGARGS1(volatile uint32_t *addend));
103 __stdcall __regcall static uint32_t
104 ntoskrnl_interlock_dec(REGARGS1(volatile uint32_t *addend));
105 __stdcall __regcall static void
106 ntoskrnl_interlock_addstat(REGARGS2(uint64_t *addend, uint32_t inc));
107 __stdcall __regcall static void
108 ntoskrnl_objderef(REGARGS1(void *object));
110 __stdcall static uint32_t ntoskrnl_waitforobjs(uint32_t,
111 nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
112 int64_t *, wait_block *);
113 static void ntoskrnl_wakeup(void *);
114 static void ntoskrnl_timercall(void *);
115 static void ntoskrnl_run_dpc(void *);
116 __stdcall static void ntoskrnl_writereg_ushort(uint16_t *, uint16_t);
117 __stdcall static uint16_t ntoskrnl_readreg_ushort(uint16_t *);
118 __stdcall static void ntoskrnl_writereg_ulong(uint32_t *, uint32_t);
119 __stdcall static uint32_t ntoskrnl_readreg_ulong(uint32_t *);
120 __stdcall static void ntoskrnl_writereg_uchar(uint8_t *, uint8_t);
121 __stdcall static uint8_t ntoskrnl_readreg_uchar(uint8_t *);
122 __stdcall static int64_t _allmul(int64_t, int64_t);
123 __stdcall static int64_t _alldiv(int64_t, int64_t);
124 __stdcall static int64_t _allrem(int64_t, int64_t);
125 __regparm static int64_t _allshr(int64_t, uint8_t);
126 __regparm static int64_t _allshl(int64_t, uint8_t);
127 __stdcall static uint64_t _aullmul(uint64_t, uint64_t);
128 __stdcall static uint64_t _aulldiv(uint64_t, uint64_t);
129 __stdcall static uint64_t _aullrem(uint64_t, uint64_t);
130 __regparm static uint64_t _aullshr(uint64_t, uint8_t);
131 __regparm static uint64_t _aullshl(uint64_t, uint8_t);
132 __stdcall static void *ntoskrnl_allocfunc(uint32_t, size_t, uint32_t);
133 __stdcall static void ntoskrnl_freefunc(void *);
134 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
135 static slist_entry *ntoskrnl_popsl(slist_header *);
136 __stdcall static void ntoskrnl_init_lookaside(paged_lookaside_list *,
137 lookaside_alloc_func *, lookaside_free_func *,
138 uint32_t, size_t, uint32_t, uint16_t);
139 __stdcall static void ntoskrnl_delete_lookaside(paged_lookaside_list *);
140 __stdcall static void ntoskrnl_init_nplookaside(npaged_lookaside_list *,
141 lookaside_alloc_func *, lookaside_free_func *,
142 uint32_t, size_t, uint32_t, uint16_t);
143 __stdcall static void ntoskrnl_delete_nplookaside(npaged_lookaside_list *);
144 __stdcall static void ntoskrnl_freemdl(ndis_buffer *);
145 __stdcall static uint32_t ntoskrnl_sizeofmdl(void *, size_t);
146 __stdcall static void ntoskrnl_build_npaged_mdl(ndis_buffer *);
147 __stdcall static void *ntoskrnl_mmaplockedpages(ndis_buffer *, uint8_t);
148 __stdcall static void *ntoskrnl_mmaplockedpages_cache(ndis_buffer *,
149 uint8_t, uint32_t, void *, uint32_t, uint32_t);
150 __stdcall static void ntoskrnl_munmaplockedpages(void *, ndis_buffer *);
151 __stdcall static void ntoskrnl_init_lock(kspin_lock *);
152 __stdcall static size_t ntoskrnl_memcmp(const void *, const void *, size_t);
153 __stdcall static void ntoskrnl_init_ansi_string(ndis_ansi_string *, char *);
154 __stdcall static void ntoskrnl_init_unicode_string(ndis_unicode_string *,
156 __stdcall static void ntoskrnl_free_unicode_string(ndis_unicode_string *);
157 __stdcall static void ntoskrnl_free_ansi_string(ndis_ansi_string *);
158 __stdcall static ndis_status ntoskrnl_unicode_to_int(ndis_unicode_string *,
159 uint32_t, uint32_t *);
160 static int atoi (const char *);
161 static long atol (const char *);
162 static int rand(void);
163 static void ntoskrnl_time(uint64_t *);
164 __stdcall static uint8_t ntoskrnl_wdmver(uint8_t, uint8_t);
165 static void ntoskrnl_thrfunc(void *);
166 __stdcall static ndis_status ntoskrnl_create_thread(ndis_handle *,
167 uint32_t, void *, ndis_handle, void *, void *, void *);
168 __stdcall static ndis_status ntoskrnl_thread_exit(ndis_status);
169 __stdcall static ndis_status ntoskrnl_devprop(device_object *, uint32_t,
170 uint32_t, void *, uint32_t *);
171 __stdcall static void ntoskrnl_init_mutex(kmutant *, uint32_t);
172 __stdcall static uint32_t ntoskrnl_release_mutex(kmutant *, uint8_t);
173 __stdcall static uint32_t ntoskrnl_read_mutex(kmutant *);
174 __stdcall static ndis_status ntoskrnl_objref(ndis_handle, uint32_t, void *,
175 uint8_t, void **, void **);
176 __stdcall static uint32_t ntoskrnl_zwclose(ndis_handle);
177 static uint32_t ntoskrnl_dbgprint(char *, ...);
178 __stdcall static void ntoskrnl_debugger(void);
179 __stdcall static void dummy(void);
181 static struct lwkt_token ntoskrnl_dispatchtoken;
182 static kspin_lock ntoskrnl_global;
183 static int ntoskrnl_kth = 0;
184 static struct nt_objref_head ntoskrnl_reflist;
186 static MALLOC_DEFINE(M_NDIS, "ndis", "ndis emulation");
191 lwkt_token_init(&ntoskrnl_dispatchtoken);
192 ntoskrnl_init_lock(&ntoskrnl_global);
193 TAILQ_INIT(&ntoskrnl_reflist);
200 lwkt_token_uninit(&ntoskrnl_dispatchtoken);
204 __stdcall static uint8_t
205 ntoskrnl_unicode_equal(str1, str2, caseinsensitive)
206 ndis_unicode_string *str1;
207 ndis_unicode_string *str2;
208 uint8_t caseinsensitive;
212 if (str1->nus_len != str2->nus_len)
215 for (i = 0; i < str1->nus_len; i++) {
216 if (caseinsensitive == TRUE) {
217 if (toupper((char)(str1->nus_buf[i] & 0xFF)) !=
218 toupper((char)(str2->nus_buf[i] & 0xFF)))
221 if (str1->nus_buf[i] != str2->nus_buf[i])
229 __stdcall static void
230 ntoskrnl_unicode_copy(dest, src)
231 ndis_unicode_string *dest;
232 ndis_unicode_string *src;
235 if (dest->nus_maxlen >= src->nus_len)
236 dest->nus_len = src->nus_len;
238 dest->nus_len = dest->nus_maxlen;
239 memcpy(dest->nus_buf, src->nus_buf, dest->nus_len);
243 __stdcall static ndis_status
244 ntoskrnl_unicode_to_ansi(dest, src, allocate)
245 ndis_ansi_string *dest;
246 ndis_unicode_string *src;
251 if (dest == NULL || src == NULL)
252 return(NDIS_STATUS_FAILURE);
254 if (allocate == TRUE) {
255 if (ndis_unicode_to_ascii(src->nus_buf, src->nus_len, &astr))
256 return(NDIS_STATUS_FAILURE);
257 dest->nas_buf = astr;
258 dest->nas_len = dest->nas_maxlen = strlen(astr);
260 dest->nas_len = src->nus_len / 2; /* XXX */
261 if (dest->nas_maxlen < dest->nas_len)
262 dest->nas_len = dest->nas_maxlen;
263 ndis_unicode_to_ascii(src->nus_buf, dest->nas_len * 2,
266 return (NDIS_STATUS_SUCCESS);
269 __stdcall static ndis_status
270 ntoskrnl_ansi_to_unicode(dest, src, allocate)
271 ndis_unicode_string *dest;
272 ndis_ansi_string *src;
275 uint16_t *ustr = NULL;
277 if (dest == NULL || src == NULL)
278 return(NDIS_STATUS_FAILURE);
280 if (allocate == TRUE) {
281 if (ndis_ascii_to_unicode(src->nas_buf, &ustr))
282 return(NDIS_STATUS_FAILURE);
283 dest->nus_buf = ustr;
284 dest->nus_len = dest->nus_maxlen = strlen(src->nas_buf) * 2;
286 dest->nus_len = src->nas_len * 2; /* XXX */
287 if (dest->nus_maxlen < dest->nus_len)
288 dest->nus_len = dest->nus_maxlen;
289 ndis_ascii_to_unicode(src->nas_buf, &dest->nus_buf);
291 return (NDIS_STATUS_SUCCESS);
294 __stdcall static void *
295 ntoskrnl_iobuildsynchfsdreq(func, dobj, buf, len, off, event, status)
307 __stdcall __regcall static uint32_t
308 ntoskrnl_iofcalldriver(REGARGS2(void *dobj, void *irp))
313 __stdcall __regcall static void
314 ntoskrnl_iofcompletereq(REGARGS2(void *irp, uint8_t prioboost))
322 nt_dispatch_header *obj;
326 struct lwkt_tokref tokref;
330 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
331 obj->dh_sigstate = TRUE;
332 e = obj->dh_waitlisthead.nle_flink;
333 while (e != &obj->dh_waitlisthead) {
338 * For synchronization objects, only wake up
341 if (obj->dh_type == EVENT_TYPE_SYNC)
345 lwkt_reltoken(&tokref);
357 *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
364 * KeWaitForSingleObject() is a tricky beast, because it can be used
365 * with several different object types: semaphores, timers, events,
366 * mutexes and threads. Semaphores don't appear very often, but the
367 * other object types are quite common. KeWaitForSingleObject() is
368 * what's normally used to acquire a mutex, and it can be used to
369 * wait for a thread termination.
371 * The Windows NDIS API is implemented in terms of Windows kernel
372 * primitives, and some of the object manipulation is duplicated in
373 * NDIS. For example, NDIS has timers and events, which are actually
374 * Windows kevents and ktimers. Now, you're supposed to only use the
375 * NDIS variants of these objects within the confines of the NDIS API,
376 * but there are some naughty developers out there who will use
377 * KeWaitForSingleObject() on NDIS timer and event objects, so we
378 * have to support that as well. Conseqently, our NDIS timer and event
379 * code has to be closely tied into our ntoskrnl timer and event code,
380 * just as it is in Windows.
382 * KeWaitForSingleObject() may do different things for different kinds
385 * - For events, we check if the event has been signalled. If the
386 * event is already in the signalled state, we just return immediately,
387 * otherwise we wait for it to be set to the signalled state by someone
388 * else calling KeSetEvent(). Events can be either synchronization or
389 * notification events.
391 * - For timers, if the timer has already fired and the timer is in
392 * the signalled state, we just return, otherwise we wait on the
393 * timer. Unlike an event, timers get signalled automatically when
394 * they expire rather than someone having to trip them manually.
395 * Timers initialized with KeInitializeTimer() are always notification
396 * events: KeInitializeTimerEx() lets you initialize a timer as
397 * either a notification or synchronization event.
399 * - For mutexes, we try to acquire the mutex and if we can't, we wait
400 * on the mutex until it's available and then grab it. When a mutex is
401 * released, it enters the signaled state, which wakes up one of the
402 * threads waiting to acquire it. Mutexes are always synchronization
405 * - For threads, the only thing we do is wait until the thread object
406 * enters a signalled state, which occurs when the thread terminates.
407 * Threads are always notification events.
409 * A notification event wakes up all threads waiting on an object. A
410 * synchronization event wakes up just one. Also, a synchronization event
411 * is auto-clearing, which means we automatically set the event back to
412 * the non-signalled state once the wakeup is done.
416 ntoskrnl_waitforobj(obj, reason, mode, alertable, duetime)
417 nt_dispatch_header *obj;
423 struct thread *td = curthread;
430 struct lwkt_tokref tokref;
433 return(STATUS_INVALID_PARAMETER);
435 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
438 * See if the object is a mutex. If so, and we already own
439 * it, then just increment the acquisition count and return.
441 * For any other kind of object, see if it's already in the
442 * signalled state, and if it is, just return. If the object
443 * is marked as a synchronization event, reset the state to
447 if (obj->dh_size == OTYPE_MUTEX) {
449 if (km->km_ownerthread == NULL ||
450 km->km_ownerthread == curthread->td_proc) {
451 obj->dh_sigstate = FALSE;
453 km->km_ownerthread = curthread->td_proc;
454 lwkt_reltoken(&tokref);
455 return (STATUS_SUCCESS);
457 } else if (obj->dh_sigstate == TRUE) {
458 if (obj->dh_type == EVENT_TYPE_SYNC)
459 obj->dh_sigstate = FALSE;
460 lwkt_reltoken(&tokref);
461 return (STATUS_SUCCESS);
467 INSERT_LIST_TAIL((&obj->dh_waitlisthead), (&w.wb_waitlist));
470 * The timeout value is specified in 100 nanosecond units
471 * and can be a positive or negative number. If it's positive,
472 * then the duetime is absolute, and we need to convert it
473 * to an absolute offset relative to now in order to use it.
474 * If it's negative, then the duetime is relative and we
475 * just have to convert the units.
478 if (duetime != NULL) {
480 tv.tv_sec = - (*duetime) / 10000000;
481 tv.tv_usec = (- (*duetime) / 10) -
482 (tv.tv_sec * 1000000);
484 ntoskrnl_time(&curtime);
485 if (*duetime < curtime)
486 tv.tv_sec = tv.tv_usec = 0;
488 tv.tv_sec = ((*duetime) - curtime) / 10000000;
489 tv.tv_usec = ((*duetime) - curtime) / 10 -
490 (tv.tv_sec * 1000000);
495 lwkt_reltoken(&tokref);
497 ticks = 1 + tv.tv_sec * hz + tv.tv_usec * hz / 1000000;
498 error = ndis_thsuspend(td, duetime == NULL ? 0 : ticks);
500 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
502 /* We timed out. Leave the object alone and return status. */
504 if (error == EWOULDBLOCK) {
505 REMOVE_LIST_ENTRY((&w.wb_waitlist));
506 lwkt_reltoken(&tokref);
507 return(STATUS_TIMEOUT);
511 * Mutexes are always synchronization objects, which means
512 * if several threads are waiting to acquire it, only one will
513 * be woken up. If that one is us, and the mutex is up for grabs,
517 if (obj->dh_size == OTYPE_MUTEX) {
519 if (km->km_ownerthread == NULL) {
520 km->km_ownerthread = curthread->td_proc;
525 if (obj->dh_type == EVENT_TYPE_SYNC)
526 obj->dh_sigstate = FALSE;
527 REMOVE_LIST_ENTRY((&w.wb_waitlist));
529 lwkt_reltoken(&tokref);
531 return(STATUS_SUCCESS);
534 __stdcall static uint32_t
535 ntoskrnl_waitforobjs(cnt, obj, wtype, reason, mode,
536 alertable, duetime, wb_array)
538 nt_dispatch_header *obj[];
544 wait_block *wb_array;
546 struct thread *td = curthread;
548 wait_block _wb_array[THREAD_WAIT_OBJECTS];
551 int i, wcnt = 0, widx = 0, error = 0;
553 struct timespec t1, t2;
554 struct lwkt_tokref tokref;
556 if (cnt > MAX_WAIT_OBJECTS)
557 return(STATUS_INVALID_PARAMETER);
558 if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
559 return(STATUS_INVALID_PARAMETER);
561 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
563 if (wb_array == NULL)
568 /* First pass: see if we can satisfy any waits immediately. */
570 for (i = 0; i < cnt; i++) {
571 if (obj[i]->dh_size == OTYPE_MUTEX) {
572 km = (kmutant *)obj[i];
573 if (km->km_ownerthread == NULL ||
574 km->km_ownerthread == curthread->td_proc) {
575 obj[i]->dh_sigstate = FALSE;
577 km->km_ownerthread = curthread->td_proc;
578 if (wtype == WAITTYPE_ANY) {
579 lwkt_reltoken(&tokref);
580 return (STATUS_WAIT_0 + i);
583 } else if (obj[i]->dh_sigstate == TRUE) {
584 if (obj[i]->dh_type == EVENT_TYPE_SYNC)
585 obj[i]->dh_sigstate = FALSE;
586 if (wtype == WAITTYPE_ANY) {
587 lwkt_reltoken(&tokref);
588 return (STATUS_WAIT_0 + i);
594 * Second pass: set up wait for anything we can't
595 * satisfy immediately.
598 for (i = 0; i < cnt; i++) {
599 if (obj[i]->dh_sigstate == TRUE)
601 INSERT_LIST_TAIL((&obj[i]->dh_waitlisthead),
602 (&w[i].wb_waitlist));
603 w[i].wb_kthread = td;
604 w[i].wb_object = obj[i];
608 if (duetime != NULL) {
610 tv.tv_sec = - (*duetime) / 10000000;
611 tv.tv_usec = (- (*duetime) / 10) -
612 (tv.tv_sec * 1000000);
614 ntoskrnl_time(&curtime);
615 if (*duetime < curtime)
616 tv.tv_sec = tv.tv_usec = 0;
618 tv.tv_sec = ((*duetime) - curtime) / 10000000;
619 tv.tv_usec = ((*duetime) - curtime) / 10 -
620 (tv.tv_sec * 1000000);
627 lwkt_reltoken(&tokref);
629 ticks = 1 + tv.tv_sec * hz + tv.tv_usec * hz / 1000000;
631 error = ndis_thsuspend(td, duetime == NULL ? 0 : ticks);
633 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
636 for (i = 0; i < cnt; i++) {
637 if (obj[i]->dh_size == OTYPE_MUTEX) {
639 if (km->km_ownerthread == NULL) {
645 if (obj[i]->dh_sigstate == TRUE) {
647 if (obj[i]->dh_type == EVENT_TYPE_SYNC)
648 obj[i]->dh_sigstate = FALSE;
649 REMOVE_LIST_ENTRY((&w[i].wb_waitlist));
654 if (error || wtype == WAITTYPE_ANY)
657 if (duetime != NULL) {
658 tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
659 tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
664 for (i = 0; i < cnt; i++)
665 REMOVE_LIST_ENTRY((&w[i].wb_waitlist));
668 if (error == EWOULDBLOCK) {
669 lwkt_reltoken(&tokref);
670 return(STATUS_TIMEOUT);
673 if (wtype == WAITTYPE_ANY && wcnt) {
674 lwkt_reltoken(&tokref);
675 return(STATUS_WAIT_0 + widx);
678 lwkt_reltoken(&tokref);
680 return(STATUS_SUCCESS);
683 __stdcall static void
684 ntoskrnl_writereg_ushort(reg, val)
688 bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
692 __stdcall static uint16_t
693 ntoskrnl_readreg_ushort(reg)
696 return(bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
699 __stdcall static void
700 ntoskrnl_writereg_ulong(reg, val)
704 bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
708 __stdcall static uint32_t
709 ntoskrnl_readreg_ulong(reg)
712 return(bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
715 __stdcall static uint8_t
716 ntoskrnl_readreg_uchar(reg)
719 return(bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
722 __stdcall static void
723 ntoskrnl_writereg_uchar(reg, val)
727 bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
731 __stdcall static int64_t
739 __stdcall static int64_t
747 __stdcall static int64_t
755 __stdcall static uint64_t
763 __stdcall static uint64_t
771 __stdcall static uint64_t
779 __regparm static int64_t
787 __regparm static uint64_t
795 __regparm static int64_t
803 __regparm static uint64_t
812 ntoskrnl_pushsl(head, entry)
816 slist_entry *oldhead;
818 oldhead = head->slh_list.slh_next;
819 entry->sl_next = head->slh_list.slh_next;
820 head->slh_list.slh_next = entry;
821 head->slh_list.slh_depth++;
822 head->slh_list.slh_seq++;
833 first = head->slh_list.slh_next;
835 head->slh_list.slh_next = first->sl_next;
836 head->slh_list.slh_depth--;
837 head->slh_list.slh_seq++;
843 __stdcall static void *
844 ntoskrnl_allocfunc(pooltype, size, tag)
849 return(malloc(size, M_DEVBUF, M_WAITOK));
852 __stdcall static void
853 ntoskrnl_freefunc(buf)
860 __stdcall static void
861 ntoskrnl_init_lookaside(lookaside, allocfunc, freefunc,
862 flags, size, tag, depth)
863 paged_lookaside_list *lookaside;
864 lookaside_alloc_func *allocfunc;
865 lookaside_free_func *freefunc;
871 bzero((char *)lookaside, sizeof(paged_lookaside_list));
873 if (size < sizeof(slist_entry))
874 lookaside->nll_l.gl_size = sizeof(slist_entry);
876 lookaside->nll_l.gl_size = size;
877 lookaside->nll_l.gl_tag = tag;
878 if (allocfunc == NULL)
879 lookaside->nll_l.gl_allocfunc = ntoskrnl_allocfunc;
881 lookaside->nll_l.gl_allocfunc = allocfunc;
883 if (freefunc == NULL)
884 lookaside->nll_l.gl_freefunc = ntoskrnl_freefunc;
886 lookaside->nll_l.gl_freefunc = freefunc;
888 ntoskrnl_init_lock(&lookaside->nll_obsoletelock);
890 lookaside->nll_l.gl_depth = LOOKASIDE_DEPTH;
891 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
896 __stdcall static void
897 ntoskrnl_delete_lookaside(lookaside)
898 paged_lookaside_list *lookaside;
901 __stdcall void (*freefunc)(void *);
903 freefunc = lookaside->nll_l.gl_freefunc;
904 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
910 __stdcall static void
911 ntoskrnl_init_nplookaside(lookaside, allocfunc, freefunc,
912 flags, size, tag, depth)
913 npaged_lookaside_list *lookaside;
914 lookaside_alloc_func *allocfunc;
915 lookaside_free_func *freefunc;
921 bzero((char *)lookaside, sizeof(npaged_lookaside_list));
923 if (size < sizeof(slist_entry))
924 lookaside->nll_l.gl_size = sizeof(slist_entry);
926 lookaside->nll_l.gl_size = size;
927 lookaside->nll_l.gl_tag = tag;
928 if (allocfunc == NULL)
929 lookaside->nll_l.gl_allocfunc = ntoskrnl_allocfunc;
931 lookaside->nll_l.gl_allocfunc = allocfunc;
933 if (freefunc == NULL)
934 lookaside->nll_l.gl_freefunc = ntoskrnl_freefunc;
936 lookaside->nll_l.gl_freefunc = freefunc;
938 ntoskrnl_init_lock(&lookaside->nll_obsoletelock);
940 lookaside->nll_l.gl_depth = LOOKASIDE_DEPTH;
941 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
946 __stdcall static void
947 ntoskrnl_delete_nplookaside(lookaside)
948 npaged_lookaside_list *lookaside;
951 __stdcall void (*freefunc)(void *);
953 freefunc = lookaside->nll_l.gl_freefunc;
954 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
961 * Note: the interlocked slist push and pop routines are
962 * declared to be _fastcall in Windows. gcc 3.4 is supposed
963 * to have support for this calling convention, however we
964 * don't have that version available yet, so we kludge things
965 * up using some inline assembly.
968 __stdcall __regcall static slist_entry *
969 ntoskrnl_push_slist(REGARGS2(slist_header *head, slist_entry *entry))
971 slist_entry *oldhead;
973 oldhead = (slist_entry *)FASTCALL3(ntoskrnl_push_slist_ex,
974 head, entry, &ntoskrnl_global);
979 __stdcall __regcall static slist_entry *
980 ntoskrnl_pop_slist(REGARGS1(slist_header *head))
984 first = (slist_entry *)FASTCALL2(ntoskrnl_pop_slist_ex,
985 head, &ntoskrnl_global);
990 __stdcall __regcall static slist_entry *
991 ntoskrnl_push_slist_ex(REGARGS2(slist_header *head, slist_entry *entry), kspin_lock *lock)
993 slist_entry *oldhead;
996 irql = FASTCALL2(hal_lock, lock, DISPATCH_LEVEL);
997 oldhead = ntoskrnl_pushsl(head, entry);
998 FASTCALL2(hal_unlock, lock, irql);
1003 __stdcall __regcall static slist_entry *
1004 ntoskrnl_pop_slist_ex(REGARGS2(slist_header *head, kspin_lock *lock))
1009 irql = FASTCALL2(hal_lock, lock, DISPATCH_LEVEL);
1010 first = ntoskrnl_popsl(head);
1011 FASTCALL2(hal_unlock, lock, irql);
1016 __stdcall __regcall void
1017 ntoskrnl_lock_dpc(REGARGS1(kspin_lock *lock))
1019 while (atomic_poll_acquire_int((volatile u_int *)lock) == 0)
1023 __stdcall __regcall void
1024 ntoskrnl_unlock_dpc(REGARGS1(kspin_lock *lock))
1026 atomic_poll_release_int((volatile u_int *)lock);
1029 __stdcall __regcall static uint32_t
1030 ntoskrnl_interlock_inc(REGARGS1(volatile uint32_t *addend))
1032 atomic_add_long((volatile u_long *)addend, 1);
1036 __stdcall __regcall static uint32_t
1037 ntoskrnl_interlock_dec(REGARGS1(volatile uint32_t *addend))
1039 atomic_subtract_long((volatile u_long *)addend, 1);
1043 __stdcall __regcall static void
1044 ntoskrnl_interlock_addstat(REGARGS2(uint64_t *addend, uint32_t inc))
1048 irql = FASTCALL2(hal_lock, &ntoskrnl_global, DISPATCH_LEVEL);
1050 FASTCALL2(hal_unlock, &ntoskrnl_global, irql);
1055 __stdcall static void
1056 ntoskrnl_freemdl(mdl)
1061 if (mdl == NULL || mdl->nb_process == NULL)
1064 head = mdl->nb_process;
1066 if (head->nb_flags != 0x1)
1069 mdl->nb_next = head->nb_next;
1070 head->nb_next = mdl;
1072 /* Decrement count of busy buffers. */
1074 head->nb_bytecount--;
1077 * If the pool has been marked for deletion and there are
1078 * no more buffers outstanding, nuke the pool.
1081 if (head->nb_byteoffset && head->nb_bytecount == 0)
1082 free(head, M_DEVBUF);
1087 __stdcall static uint32_t
1088 ntoskrnl_sizeofmdl(vaddr, len)
1094 l = sizeof(struct ndis_buffer) +
1095 (sizeof(uint32_t) * SPAN_PAGES(vaddr, len));
1100 __stdcall static void
1101 ntoskrnl_build_npaged_mdl(mdl)
1104 mdl->nb_mappedsystemva = (char *)mdl->nb_startva + mdl->nb_byteoffset;
1108 __stdcall static void *
1109 ntoskrnl_mmaplockedpages(buf, accessmode)
1113 return(MDL_VA(buf));
1116 __stdcall static void *
1117 ntoskrnl_mmaplockedpages_cache(buf, accessmode, cachetype, vaddr,
1126 return(MDL_VA(buf));
1129 __stdcall static void
1130 ntoskrnl_munmaplockedpages(vaddr, buf)
1138 * The KeInitializeSpinLock(), KefAcquireSpinLockAtDpcLevel()
1139 * and KefReleaseSpinLockFromDpcLevel() appear to be analagous
1140 * to splnet()/splx() in their use. We can't create a new mutex
1141 * lock here because there is no complimentary KeFreeSpinLock()
1142 * function. Instead, we grab a mutex from the mutex pool.
1144 __stdcall static void
1145 ntoskrnl_init_lock(lock)
1153 __stdcall static size_t
1154 ntoskrnl_memcmp(s1, s2, len)
1159 size_t i, total = 0;
1162 m1 = __DECONST(char *, s1);
1163 m2 = __DECONST(char *, s2);
1165 for (i = 0; i < len; i++) {
1172 __stdcall static void
1173 ntoskrnl_init_ansi_string(dst, src)
1174 ndis_ansi_string *dst;
1177 ndis_ansi_string *a;
1183 a->nas_len = a->nas_maxlen = 0;
1187 a->nas_len = a->nas_maxlen = strlen(src);
1193 __stdcall static void
1194 ntoskrnl_init_unicode_string(dst, src)
1195 ndis_unicode_string *dst;
1198 ndis_unicode_string *u;
1205 u->nus_len = u->nus_maxlen = 0;
1212 u->nus_len = u->nus_maxlen = i * 2;
1218 __stdcall ndis_status
1219 ntoskrnl_unicode_to_int(ustr, base, val)
1220 ndis_unicode_string *ustr;
1229 uchr = ustr->nus_buf;
1230 len = ustr->nus_len;
1231 bzero(abuf, sizeof(abuf));
1233 if ((char)((*uchr) & 0xFF) == '-') {
1237 } else if ((char)((*uchr) & 0xFF) == '+') {
1244 if ((char)((*uchr) & 0xFF) == 'b') {
1248 } else if ((char)((*uchr) & 0xFF) == 'o') {
1252 } else if ((char)((*uchr) & 0xFF) == 'x') {
1266 ndis_unicode_to_ascii(uchr, len, &astr);
1267 *val = strtoul(abuf, NULL, base);
1269 return(NDIS_STATUS_SUCCESS);
1272 __stdcall static void
1273 ntoskrnl_free_unicode_string(ustr)
1274 ndis_unicode_string *ustr;
1276 if (ustr->nus_buf == NULL)
1278 free(ustr->nus_buf, M_DEVBUF);
1279 ustr->nus_buf = NULL;
1283 __stdcall static void
1284 ntoskrnl_free_ansi_string(astr)
1285 ndis_ansi_string *astr;
1287 if (astr->nas_buf == NULL)
1289 free(astr->nas_buf, M_DEVBUF);
1290 astr->nas_buf = NULL;
1298 return (int)strtol(str, (char **)NULL, 10);
1305 return strtol(str, (char **)NULL, 10);
1314 srandom(tv.tv_usec);
1315 return((int)random());
1318 __stdcall static uint8_t
1319 ntoskrnl_wdmver(major, minor)
1323 if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
1328 __stdcall static ndis_status
1329 ntoskrnl_devprop(devobj, regprop, buflen, prop, reslen)
1330 device_object *devobj;
1336 ndis_miniport_block *block;
1338 block = devobj->do_rsvd;
1341 case DEVPROP_DRIVER_KEYNAME:
1342 ndis_ascii_to_unicode(__DECONST(char *,
1343 device_get_nameunit(block->nmb_dev)), (uint16_t **)&prop);
1344 *reslen = strlen(device_get_nameunit(block->nmb_dev)) * 2;
1347 return(STATUS_INVALID_PARAMETER_2);
1351 return(STATUS_SUCCESS);
1354 __stdcall static void
1355 ntoskrnl_init_mutex(kmutex, level)
1359 INIT_LIST_HEAD((&kmutex->km_header.dh_waitlisthead));
1360 kmutex->km_abandoned = FALSE;
1361 kmutex->km_apcdisable = 1;
1362 kmutex->km_header.dh_sigstate = TRUE;
1363 kmutex->km_header.dh_type = EVENT_TYPE_SYNC;
1364 kmutex->km_header.dh_size = OTYPE_MUTEX;
1365 kmutex->km_acquirecnt = 0;
1366 kmutex->km_ownerthread = NULL;
1370 __stdcall static uint32_t
1371 ntoskrnl_release_mutex(kmutex, kwait)
1375 struct lwkt_tokref tokref;
1377 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
1378 if (kmutex->km_ownerthread != curthread->td_proc) {
1379 lwkt_reltoken(&tokref);
1380 return(STATUS_MUTANT_NOT_OWNED);
1382 kmutex->km_acquirecnt--;
1383 if (kmutex->km_acquirecnt == 0) {
1384 kmutex->km_ownerthread = NULL;
1385 lwkt_reltoken(&tokref);
1386 ntoskrnl_wakeup(&kmutex->km_header);
1388 lwkt_reltoken(&tokref);
1390 return(kmutex->km_acquirecnt);
1393 __stdcall static uint32_t
1394 ntoskrnl_read_mutex(kmutex)
1397 return(kmutex->km_header.dh_sigstate);
1401 ntoskrnl_init_event(kevent, type, state)
1406 INIT_LIST_HEAD((&kevent->k_header.dh_waitlisthead));
1407 kevent->k_header.dh_sigstate = state;
1408 kevent->k_header.dh_type = type;
1409 kevent->k_header.dh_size = OTYPE_EVENT;
1414 ntoskrnl_reset_event(kevent)
1418 struct lwkt_tokref tokref;
1420 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
1421 prevstate = kevent->k_header.dh_sigstate;
1422 kevent->k_header.dh_sigstate = FALSE;
1423 lwkt_reltoken(&tokref);
1429 ntoskrnl_set_event(kevent, increment, kwait)
1436 prevstate = kevent->k_header.dh_sigstate;
1437 ntoskrnl_wakeup(&kevent->k_header);
1443 ntoskrnl_clear_event(kevent)
1446 kevent->k_header.dh_sigstate = FALSE;
1451 ntoskrnl_read_event(kevent)
1454 return(kevent->k_header.dh_sigstate);
1457 __stdcall static ndis_status
1458 ntoskrnl_objref(handle, reqaccess, otype, accessmode, object, handleinfo)
1468 nr = malloc(sizeof(nt_objref), M_DEVBUF, M_WAITOK|M_ZERO);
1470 return(NDIS_STATUS_FAILURE);
1472 INIT_LIST_HEAD((&nr->no_dh.dh_waitlisthead));
1473 nr->no_obj = handle;
1474 nr->no_dh.dh_size = OTYPE_THREAD;
1475 TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
1478 return(NDIS_STATUS_SUCCESS);
1481 __stdcall __regcall static void
1482 ntoskrnl_objderef(REGARGS1(void *object))
1487 TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
1493 __stdcall static uint32_t
1494 ntoskrnl_zwclose(handle)
1497 return(STATUS_SUCCESS);
1501 * This is here just in case the thread returns without calling
1502 * PsTerminateSystemThread().
1505 ntoskrnl_thrfunc(arg)
1508 thread_context *thrctx;
1509 __stdcall uint32_t (*tfunc)(void *);
1514 tfunc = thrctx->tc_thrfunc;
1515 tctx = thrctx->tc_thrctx;
1516 free(thrctx, M_TEMP);
1520 ntoskrnl_thread_exit(rval);
1521 return; /* notreached */
1524 __stdcall static ndis_status
1525 ntoskrnl_create_thread(handle, reqaccess, objattrs, phandle,
1526 clientid, thrfunc, thrctx)
1527 ndis_handle *handle;
1530 ndis_handle phandle;
1540 tc = malloc(sizeof(thread_context), M_TEMP, M_WAITOK);
1542 return(NDIS_STATUS_FAILURE);
1544 tc->tc_thrctx = thrctx;
1545 tc->tc_thrfunc = thrfunc;
1547 sprintf(tname, "windows kthread %d", ntoskrnl_kth);
1548 error = kthread_create_stk(ntoskrnl_thrfunc, tc, &td,
1549 NDIS_KSTACK_PAGES * PAGE_SIZE, tname);
1558 * In Windows, the exit of a thread is an event that you're allowed
1559 * to wait on, assuming you've obtained a reference to the thread using
1560 * ObReferenceObjectByHandle(). Unfortunately, the only way we can
1561 * simulate this behavior is to register each thread we create in a
1562 * reference list, and if someone holds a reference to us, we poke
1565 __stdcall static ndis_status
1566 ntoskrnl_thread_exit(status)
1569 struct nt_objref *nr;
1571 TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
1572 if (nr->no_obj != curthread)
1574 ntoskrnl_wakeup(&nr->no_dh);
1581 return(0); /* notreached */
1585 ntoskrnl_dbgprint(char *fmt, ...)
1590 __va_start(ap, fmt);
1594 return(STATUS_SUCCESS);
1597 __stdcall static void
1598 ntoskrnl_debugger(void)
1601 #if __FreeBSD_version < 502113
1602 Debugger("ntoskrnl_debugger(): breakpoint");
1604 kdb_enter("ntoskrnl_debugger(): breakpoint");
1609 ntoskrnl_timercall(arg)
1616 timer->k_header.dh_inserted = FALSE;
1619 * If this is a periodic timer, re-arm it
1620 * so it will fire again. We do this before
1621 * calling any deferred procedure calls because
1622 * it's possible the DPC might cancel the timer,
1623 * in which case it would be wrong for us to
1624 * re-arm it again afterwards.
1627 if (timer->k_period) {
1628 timer->k_header.dh_inserted = TRUE;
1629 callout_reset(timer->k_handle, 1 + timer->k_period * hz / 1000,
1630 ntoskrnl_timercall, timer);
1632 callout_deactivate(timer->k_handle);
1633 free(timer->k_handle, M_NDIS);
1634 timer->k_handle = NULL;
1637 if (timer->k_dpc != NULL)
1638 ntoskrnl_queue_dpc(timer->k_dpc, NULL, NULL);
1640 ntoskrnl_wakeup(&timer->k_header);
1644 ntoskrnl_init_timer(timer)
1650 ntoskrnl_init_timer_ex(timer, EVENT_TYPE_NOTIFY);
1654 ntoskrnl_init_timer_ex(timer, type)
1661 INIT_LIST_HEAD((&timer->k_header.dh_waitlisthead));
1662 timer->k_header.dh_sigstate = FALSE;
1663 timer->k_header.dh_inserted = FALSE;
1664 timer->k_header.dh_type = type;
1665 timer->k_header.dh_size = OTYPE_TIMER;
1666 timer->k_handle = NULL;
1672 * This is a wrapper for Windows deferred procedure calls that
1673 * have been placed on an NDIS thread work queue. We need it
1674 * since the DPC could be a _stdcall function. Also, as far as
1675 * I can tell, defered procedure calls must run at DISPATCH_LEVEL.
1678 ntoskrnl_run_dpc(arg)
1686 dpcfunc = (kdpc_func)dpc->k_deferedfunc;
1687 irql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL);
1688 dpcfunc(dpc, dpc->k_deferredctx, dpc->k_sysarg1, dpc->k_sysarg2);
1689 FASTCALL1(hal_lower_irql, irql);
1695 ntoskrnl_init_dpc(dpc, dpcfunc, dpcctx)
1703 dpc->k_deferedfunc = dpcfunc;
1704 dpc->k_deferredctx = dpcctx;
1710 ntoskrnl_queue_dpc(dpc, sysarg1, sysarg2)
1715 dpc->k_sysarg1 = sysarg1;
1716 dpc->k_sysarg2 = sysarg2;
1717 if (ndis_sched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
1724 ntoskrnl_dequeue_dpc(dpc)
1727 if (ndis_unsched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
1734 ntoskrnl_set_timer_ex(timer, duetime, period, dpc)
1748 if (timer->k_header.dh_inserted == TRUE) {
1749 if (timer->k_handle != NULL)
1750 callout_stop(timer->k_handle);
1751 timer->k_header.dh_inserted = FALSE;
1756 timer->k_duetime = duetime;
1757 timer->k_period = period;
1758 timer->k_header.dh_sigstate = FALSE;
1762 tv.tv_sec = - (duetime) / 10000000;
1763 tv.tv_usec = (- (duetime) / 10) -
1764 (tv.tv_sec * 1000000);
1766 ntoskrnl_time(&curtime);
1767 if (duetime < curtime)
1768 tv.tv_sec = tv.tv_usec = 0;
1770 tv.tv_sec = ((duetime) - curtime) / 10000000;
1771 tv.tv_usec = ((duetime) - curtime) / 10 -
1772 (tv.tv_sec * 1000000);
1776 ticks = 1 + tv.tv_sec * hz + tv.tv_usec * hz / 1000000;
1777 timer->k_header.dh_inserted = TRUE;
1778 if (timer->k_handle == NULL) {
1779 timer->k_handle = malloc(sizeof(struct callout), M_NDIS,
1781 callout_init(timer->k_handle);
1783 callout_reset(timer->k_handle, ticks, ntoskrnl_timercall, timer);
1789 ntoskrnl_set_timer(timer, duetime, dpc)
1794 return (ntoskrnl_set_timer_ex(timer, duetime, 0, dpc));
1798 ntoskrnl_cancel_timer(timer)
1806 if (timer->k_header.dh_inserted == TRUE) {
1807 if (timer->k_handle != NULL) {
1808 callout_stop(timer->k_handle);
1809 free(timer->k_handle, M_NDIS);
1810 timer->k_handle = NULL;
1812 if (timer->k_dpc != NULL)
1813 ntoskrnl_dequeue_dpc(timer->k_dpc);
1823 ntoskrnl_read_timer(timer)
1826 return(timer->k_header.dh_sigstate);
1829 __stdcall static void
1832 printf ("ntoskrnl dummy called...\n");
1837 image_patch_table ntoskrnl_functbl[] = {
1838 { "RtlCompareMemory", (FUNC)ntoskrnl_memcmp },
1839 { "RtlEqualUnicodeString", (FUNC)ntoskrnl_unicode_equal },
1840 { "RtlCopyUnicodeString", (FUNC)ntoskrnl_unicode_copy },
1841 { "RtlUnicodeStringToAnsiString", (FUNC)ntoskrnl_unicode_to_ansi },
1842 { "RtlAnsiStringToUnicodeString", (FUNC)ntoskrnl_ansi_to_unicode },
1843 { "RtlInitAnsiString", (FUNC)ntoskrnl_init_ansi_string },
1844 { "RtlInitUnicodeString", (FUNC)ntoskrnl_init_unicode_string },
1845 { "RtlFreeAnsiString", (FUNC)ntoskrnl_free_ansi_string },
1846 { "RtlFreeUnicodeString", (FUNC)ntoskrnl_free_unicode_string },
1847 { "RtlUnicodeStringToInteger", (FUNC)ntoskrnl_unicode_to_int },
1848 { "sprintf", (FUNC)sprintf },
1849 { "vsprintf", (FUNC)vsprintf },
1850 { "_snprintf", (FUNC)snprintf },
1851 { "_vsnprintf", (FUNC)vsnprintf },
1852 { "DbgPrint", (FUNC)ntoskrnl_dbgprint },
1853 { "DbgBreakPoint", (FUNC)ntoskrnl_debugger },
1854 { "strncmp", (FUNC)strncmp },
1855 { "strcmp", (FUNC)strcmp },
1856 { "strncpy", (FUNC)strncpy },
1857 { "strcpy", (FUNC)strcpy },
1858 { "strlen", (FUNC)strlen },
1859 { "memcpy", (FUNC)memcpy },
1860 { "memmove", (FUNC)memcpy },
1861 { "memset", (FUNC)memset },
1862 { "IofCallDriver", (FUNC)ntoskrnl_iofcalldriver },
1863 { "IofCompleteRequest", (FUNC)ntoskrnl_iofcompletereq },
1864 { "IoBuildSynchronousFsdRequest", (FUNC)ntoskrnl_iobuildsynchfsdreq },
1865 { "KeWaitForSingleObject", (FUNC)ntoskrnl_waitforobj },
1866 { "KeWaitForMultipleObjects", (FUNC)ntoskrnl_waitforobjs },
1867 { "_allmul", (FUNC)_allmul },
1868 { "_alldiv", (FUNC)_alldiv },
1869 { "_allrem", (FUNC)_allrem },
1870 { "_allshr", (FUNC)_allshr },
1871 { "_allshl", (FUNC)_allshl },
1872 { "_aullmul", (FUNC)_aullmul },
1873 { "_aulldiv", (FUNC)_aulldiv },
1874 { "_aullrem", (FUNC)_aullrem },
1875 { "_aullshr", (FUNC)_aullshr },
1876 { "_aullshl", (FUNC)_aullshl },
1877 { "atoi", (FUNC)atoi },
1878 { "atol", (FUNC)atol },
1879 { "rand", (FUNC)rand },
1880 { "WRITE_REGISTER_USHORT", (FUNC)ntoskrnl_writereg_ushort },
1881 { "READ_REGISTER_USHORT", (FUNC)ntoskrnl_readreg_ushort },
1882 { "WRITE_REGISTER_ULONG", (FUNC)ntoskrnl_writereg_ulong },
1883 { "READ_REGISTER_ULONG", (FUNC)ntoskrnl_readreg_ulong },
1884 { "READ_REGISTER_UCHAR", (FUNC)ntoskrnl_readreg_uchar },
1885 { "WRITE_REGISTER_UCHAR", (FUNC)ntoskrnl_writereg_uchar },
1886 { "ExInitializePagedLookasideList", (FUNC)ntoskrnl_init_lookaside },
1887 { "ExDeletePagedLookasideList", (FUNC)ntoskrnl_delete_lookaside },
1888 { "ExInitializeNPagedLookasideList", (FUNC)ntoskrnl_init_nplookaside },
1889 { "ExDeleteNPagedLookasideList", (FUNC)ntoskrnl_delete_nplookaside },
1890 { "InterlockedPopEntrySList", (FUNC)ntoskrnl_pop_slist },
1891 { "InterlockedPushEntrySList", (FUNC)ntoskrnl_push_slist },
1892 { "ExInterlockedPopEntrySList", (FUNC)ntoskrnl_pop_slist_ex },
1893 { "ExInterlockedPushEntrySList",(FUNC)ntoskrnl_push_slist_ex },
1894 { "KefAcquireSpinLockAtDpcLevel", (FUNC)ntoskrnl_lock_dpc },
1895 { "KefReleaseSpinLockFromDpcLevel", (FUNC)ntoskrnl_unlock_dpc },
1896 { "InterlockedIncrement", (FUNC)ntoskrnl_interlock_inc },
1897 { "InterlockedDecrement", (FUNC)ntoskrnl_interlock_dec },
1898 { "ExInterlockedAddLargeStatistic",
1899 (FUNC)ntoskrnl_interlock_addstat },
1900 { "IoFreeMdl", (FUNC)ntoskrnl_freemdl },
1901 { "MmSizeOfMdl", (FUNC)ntoskrnl_sizeofmdl },
1902 { "MmMapLockedPages", (FUNC)ntoskrnl_mmaplockedpages },
1903 { "MmMapLockedPagesSpecifyCache",
1904 (FUNC)ntoskrnl_mmaplockedpages_cache },
1905 { "MmUnmapLockedPages", (FUNC)ntoskrnl_munmaplockedpages },
1906 { "MmBuildMdlForNonPagedPool", (FUNC)ntoskrnl_build_npaged_mdl },
1907 { "KeInitializeSpinLock", (FUNC)ntoskrnl_init_lock },
1908 { "IoIsWdmVersionAvailable", (FUNC)ntoskrnl_wdmver },
1909 { "IoGetDeviceProperty", (FUNC)ntoskrnl_devprop },
1910 { "KeInitializeMutex", (FUNC)ntoskrnl_init_mutex },
1911 { "KeReleaseMutex", (FUNC)ntoskrnl_release_mutex },
1912 { "KeReadStateMutex", (FUNC)ntoskrnl_read_mutex },
1913 { "KeInitializeEvent", (FUNC)ntoskrnl_init_event },
1914 { "KeSetEvent", (FUNC)ntoskrnl_set_event },
1915 { "KeResetEvent", (FUNC)ntoskrnl_reset_event },
1916 { "KeClearEvent", (FUNC)ntoskrnl_clear_event },
1917 { "KeReadStateEvent", (FUNC)ntoskrnl_read_event },
1918 { "KeInitializeTimer", (FUNC)ntoskrnl_init_timer },
1919 { "KeInitializeTimerEx", (FUNC)ntoskrnl_init_timer_ex },
1920 { "KeSetTimer", (FUNC)ntoskrnl_set_timer },
1921 { "KeSetTimerEx", (FUNC)ntoskrnl_set_timer_ex },
1922 { "KeCancelTimer", (FUNC)ntoskrnl_cancel_timer },
1923 { "KeReadStateTimer", (FUNC)ntoskrnl_read_timer },
1924 { "KeInitializeDpc", (FUNC)ntoskrnl_init_dpc },
1925 { "KeInsertQueueDpc", (FUNC)ntoskrnl_queue_dpc },
1926 { "KeRemoveQueueDpc", (FUNC)ntoskrnl_dequeue_dpc },
1927 { "ObReferenceObjectByHandle", (FUNC)ntoskrnl_objref },
1928 { "ObfDereferenceObject", (FUNC)ntoskrnl_objderef },
1929 { "ZwClose", (FUNC)ntoskrnl_zwclose },
1930 { "PsCreateSystemThread", (FUNC)ntoskrnl_create_thread },
1931 { "PsTerminateSystemThread", (FUNC)ntoskrnl_thread_exit },
1934 * This last entry is a catch-all for any function we haven't
1935 * implemented yet. The PE import list patching routine will
1936 * use it for any function that doesn't have an explicit match
1940 { NULL, (FUNC)dummy },