3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
32 * $FreeBSD: src/sys/compat/ndis/subr_ntoskrnl.c,v 1.40 2004/07/20 20:28:57 wpaul Exp $
33 * $DragonFly: src/sys/emulation/ndis/subr_ntoskrnl.c,v 1.7 2005/12/10 16:06:20 swildner Exp $
36 #include <sys/ctype.h>
37 #include <sys/unistd.h>
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/errno.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
45 #include <sys/callout.h>
46 #if __FreeBSD_version > 502113
49 #include <sys/kernel.h>
51 #include <sys/kthread.h>
53 #include <machine/atomic.h>
54 #include <machine/clock.h>
55 #include <machine/bus_memio.h>
56 #include <machine/bus_pio.h>
57 #include <machine/bus.h>
58 #include <machine/stdarg.h>
65 #include "resource_var.h"
66 #include "ntoskrnl_var.h"
70 #define __regparm __attribute__((regparm(3)))
72 #define FUNC void(*)(void)
74 __stdcall static uint8_t ntoskrnl_unicode_equal(ndis_unicode_string *,
75 ndis_unicode_string *, uint8_t);
76 __stdcall static void ntoskrnl_unicode_copy(ndis_unicode_string *,
77 ndis_unicode_string *);
78 __stdcall static ndis_status ntoskrnl_unicode_to_ansi(ndis_ansi_string *,
79 ndis_unicode_string *, uint8_t);
80 __stdcall static ndis_status ntoskrnl_ansi_to_unicode(ndis_unicode_string *,
81 ndis_ansi_string *, uint8_t);
82 __stdcall static void *ntoskrnl_iobuildsynchfsdreq(uint32_t, void *,
83 void *, uint32_t, uint32_t *, void *, void *);
88 __stdcall __regcall static uint32_t
89 ntoskrnl_iofcalldriver(REGARGS2(void *dobj, void *irp));
90 __stdcall __regcall static void
91 ntoskrnl_iofcompletereq(REGARGS2(void *irp, uint8_t prioboost));
92 __stdcall __regcall static slist_entry *
93 ntoskrnl_push_slist(REGARGS2(slist_header *head, slist_entry *entry));
94 __stdcall __regcall static slist_entry *
95 ntoskrnl_pop_slist(REGARGS1(slist_header *head));
96 __stdcall __regcall static slist_entry *
97 ntoskrnl_push_slist_ex(REGARGS2(slist_header *head, slist_entry *entry), kspin_lock *lock);
98 __stdcall __regcall static slist_entry *
99 ntoskrnl_pop_slist_ex(REGARGS2(slist_header *head, kspin_lock *lock));
101 __stdcall __regcall static uint32_t
102 ntoskrnl_interlock_inc(REGARGS1(volatile uint32_t *addend));
103 __stdcall __regcall static uint32_t
104 ntoskrnl_interlock_dec(REGARGS1(volatile uint32_t *addend));
105 __stdcall __regcall static void
106 ntoskrnl_interlock_addstat(REGARGS2(uint64_t *addend, uint32_t inc));
107 __stdcall __regcall static void
108 ntoskrnl_objderef(REGARGS1(void *object));
110 __stdcall static uint32_t ntoskrnl_waitforobjs(uint32_t,
111 nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
112 int64_t *, wait_block *);
113 static void ntoskrnl_wakeup(void *);
114 static void ntoskrnl_timercall(void *);
115 static void ntoskrnl_run_dpc(void *);
116 __stdcall static void ntoskrnl_writereg_ushort(uint16_t *, uint16_t);
117 __stdcall static uint16_t ntoskrnl_readreg_ushort(uint16_t *);
118 __stdcall static void ntoskrnl_writereg_ulong(uint32_t *, uint32_t);
119 __stdcall static uint32_t ntoskrnl_readreg_ulong(uint32_t *);
120 __stdcall static void ntoskrnl_writereg_uchar(uint8_t *, uint8_t);
121 __stdcall static uint8_t ntoskrnl_readreg_uchar(uint8_t *);
122 __stdcall static int64_t _allmul(int64_t, int64_t);
123 __stdcall static int64_t _alldiv(int64_t, int64_t);
124 __stdcall static int64_t _allrem(int64_t, int64_t);
125 __regparm static int64_t _allshr(int64_t, uint8_t);
126 __regparm static int64_t _allshl(int64_t, uint8_t);
127 __stdcall static uint64_t _aullmul(uint64_t, uint64_t);
128 __stdcall static uint64_t _aulldiv(uint64_t, uint64_t);
129 __stdcall static uint64_t _aullrem(uint64_t, uint64_t);
130 __regparm static uint64_t _aullshr(uint64_t, uint8_t);
131 __regparm static uint64_t _aullshl(uint64_t, uint8_t);
132 __stdcall static void *ntoskrnl_allocfunc(uint32_t, size_t, uint32_t);
133 __stdcall static void ntoskrnl_freefunc(void *);
134 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
135 static slist_entry *ntoskrnl_popsl(slist_header *);
136 __stdcall static void ntoskrnl_init_lookaside(paged_lookaside_list *,
137 lookaside_alloc_func *, lookaside_free_func *,
138 uint32_t, size_t, uint32_t, uint16_t);
139 __stdcall static void ntoskrnl_delete_lookaside(paged_lookaside_list *);
140 __stdcall static void ntoskrnl_init_nplookaside(npaged_lookaside_list *,
141 lookaside_alloc_func *, lookaside_free_func *,
142 uint32_t, size_t, uint32_t, uint16_t);
143 __stdcall static void ntoskrnl_delete_nplookaside(npaged_lookaside_list *);
144 __stdcall static void ntoskrnl_freemdl(ndis_buffer *);
145 __stdcall static uint32_t ntoskrnl_sizeofmdl(void *, size_t);
146 __stdcall static void ntoskrnl_build_npaged_mdl(ndis_buffer *);
147 __stdcall static void *ntoskrnl_mmaplockedpages(ndis_buffer *, uint8_t);
148 __stdcall static void *ntoskrnl_mmaplockedpages_cache(ndis_buffer *,
149 uint8_t, uint32_t, void *, uint32_t, uint32_t);
150 __stdcall static void ntoskrnl_munmaplockedpages(void *, ndis_buffer *);
151 __stdcall static void ntoskrnl_init_lock(kspin_lock *);
152 __stdcall static size_t ntoskrnl_memcmp(const void *, const void *, size_t);
153 __stdcall static void ntoskrnl_init_ansi_string(ndis_ansi_string *, char *);
154 __stdcall static void ntoskrnl_init_unicode_string(ndis_unicode_string *,
156 __stdcall static void ntoskrnl_free_unicode_string(ndis_unicode_string *);
157 __stdcall static void ntoskrnl_free_ansi_string(ndis_ansi_string *);
158 __stdcall static ndis_status ntoskrnl_unicode_to_int(ndis_unicode_string *,
159 uint32_t, uint32_t *);
160 static int atoi (const char *);
161 static long atol (const char *);
162 static int rand(void);
163 static void ntoskrnl_time(uint64_t *);
164 __stdcall static uint8_t ntoskrnl_wdmver(uint8_t, uint8_t);
165 static void ntoskrnl_thrfunc(void *);
166 __stdcall static ndis_status ntoskrnl_create_thread(ndis_handle *,
167 uint32_t, void *, ndis_handle, void *, void *, void *);
168 __stdcall static ndis_status ntoskrnl_thread_exit(ndis_status);
169 __stdcall static ndis_status ntoskrnl_devprop(device_object *, uint32_t,
170 uint32_t, void *, uint32_t *);
171 __stdcall static void ntoskrnl_init_mutex(kmutant *, uint32_t);
172 __stdcall static uint32_t ntoskrnl_release_mutex(kmutant *, uint8_t);
173 __stdcall static uint32_t ntoskrnl_read_mutex(kmutant *);
174 __stdcall static ndis_status ntoskrnl_objref(ndis_handle, uint32_t, void *,
175 uint8_t, void **, void **);
176 __stdcall static uint32_t ntoskrnl_zwclose(ndis_handle);
177 static uint32_t ntoskrnl_dbgprint(char *, ...);
178 __stdcall static void ntoskrnl_debugger(void);
179 __stdcall static void dummy(void);
181 static struct lwkt_token ntoskrnl_dispatchtoken;
182 static kspin_lock ntoskrnl_global;
183 static int ntoskrnl_kth = 0;
184 static struct nt_objref_head ntoskrnl_reflist;
186 static MALLOC_DEFINE(M_NDIS, "ndis", "ndis emulation");
189 ntoskrnl_libinit(void)
191 lwkt_token_init(&ntoskrnl_dispatchtoken);
192 ntoskrnl_init_lock(&ntoskrnl_global);
193 TAILQ_INIT(&ntoskrnl_reflist);
198 ntoskrnl_libfini(void)
200 lwkt_token_uninit(&ntoskrnl_dispatchtoken);
204 __stdcall static uint8_t
205 ntoskrnl_unicode_equal(ndis_unicode_string *str1,
206 ndis_unicode_string *str2,
207 uint8_t caseinsensitive)
211 if (str1->nus_len != str2->nus_len)
214 for (i = 0; i < str1->nus_len; i++) {
215 if (caseinsensitive == TRUE) {
216 if (toupper((char)(str1->nus_buf[i] & 0xFF)) !=
217 toupper((char)(str2->nus_buf[i] & 0xFF)))
220 if (str1->nus_buf[i] != str2->nus_buf[i])
228 __stdcall static void
229 ntoskrnl_unicode_copy(ndis_unicode_string *dest,
230 ndis_unicode_string *src)
233 if (dest->nus_maxlen >= src->nus_len)
234 dest->nus_len = src->nus_len;
236 dest->nus_len = dest->nus_maxlen;
237 memcpy(dest->nus_buf, src->nus_buf, dest->nus_len);
241 __stdcall static ndis_status
242 ntoskrnl_unicode_to_ansi(ndis_ansi_string *dest,
243 ndis_unicode_string *src,
248 if (dest == NULL || src == NULL)
249 return(NDIS_STATUS_FAILURE);
251 if (allocate == TRUE) {
252 if (ndis_unicode_to_ascii(src->nus_buf, src->nus_len, &astr))
253 return(NDIS_STATUS_FAILURE);
254 dest->nas_buf = astr;
255 dest->nas_len = dest->nas_maxlen = strlen(astr);
257 dest->nas_len = src->nus_len / 2; /* XXX */
258 if (dest->nas_maxlen < dest->nas_len)
259 dest->nas_len = dest->nas_maxlen;
260 ndis_unicode_to_ascii(src->nus_buf, dest->nas_len * 2,
263 return (NDIS_STATUS_SUCCESS);
266 __stdcall static ndis_status
267 ntoskrnl_ansi_to_unicode(ndis_unicode_string *dest,
268 ndis_ansi_string *src,
271 uint16_t *ustr = NULL;
273 if (dest == NULL || src == NULL)
274 return(NDIS_STATUS_FAILURE);
276 if (allocate == TRUE) {
277 if (ndis_ascii_to_unicode(src->nas_buf, &ustr))
278 return(NDIS_STATUS_FAILURE);
279 dest->nus_buf = ustr;
280 dest->nus_len = dest->nus_maxlen = strlen(src->nas_buf) * 2;
282 dest->nus_len = src->nas_len * 2; /* XXX */
283 if (dest->nus_maxlen < dest->nus_len)
284 dest->nus_len = dest->nus_maxlen;
285 ndis_ascii_to_unicode(src->nas_buf, &dest->nus_buf);
287 return (NDIS_STATUS_SUCCESS);
290 __stdcall static void *
291 ntoskrnl_iobuildsynchfsdreq(uint32_t func, void *dobj, void *buf,
292 uint32_t len, uint32_t *off,
293 void *event, void *status)
298 __stdcall __regcall static uint32_t
299 ntoskrnl_iofcalldriver(REGARGS2(void *dobj, void *irp))
304 __stdcall __regcall static void
305 ntoskrnl_iofcompletereq(REGARGS2(void *irp, uint8_t prioboost))
310 ntoskrnl_wakeup(void *arg)
312 nt_dispatch_header *obj;
316 struct lwkt_tokref tokref;
320 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
321 obj->dh_sigstate = TRUE;
322 e = obj->dh_waitlisthead.nle_flink;
323 while (e != &obj->dh_waitlisthead) {
328 * For synchronization objects, only wake up
331 if (obj->dh_type == EVENT_TYPE_SYNC)
335 lwkt_reltoken(&tokref);
341 ntoskrnl_time(uint64_t *tval)
346 *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
353 * KeWaitForSingleObject() is a tricky beast, because it can be used
354 * with several different object types: semaphores, timers, events,
355 * mutexes and threads. Semaphores don't appear very often, but the
356 * other object types are quite common. KeWaitForSingleObject() is
357 * what's normally used to acquire a mutex, and it can be used to
358 * wait for a thread termination.
360 * The Windows NDIS API is implemented in terms of Windows kernel
361 * primitives, and some of the object manipulation is duplicated in
362 * NDIS. For example, NDIS has timers and events, which are actually
363 * Windows kevents and ktimers. Now, you're supposed to only use the
364 * NDIS variants of these objects within the confines of the NDIS API,
365 * but there are some naughty developers out there who will use
366 * KeWaitForSingleObject() on NDIS timer and event objects, so we
367 * have to support that as well. Conseqently, our NDIS timer and event
368 * code has to be closely tied into our ntoskrnl timer and event code,
369 * just as it is in Windows.
371 * KeWaitForSingleObject() may do different things for different kinds
374 * - For events, we check if the event has been signalled. If the
375 * event is already in the signalled state, we just return immediately,
376 * otherwise we wait for it to be set to the signalled state by someone
377 * else calling KeSetEvent(). Events can be either synchronization or
378 * notification events.
380 * - For timers, if the timer has already fired and the timer is in
381 * the signalled state, we just return, otherwise we wait on the
382 * timer. Unlike an event, timers get signalled automatically when
383 * they expire rather than someone having to trip them manually.
384 * Timers initialized with KeInitializeTimer() are always notification
385 * events: KeInitializeTimerEx() lets you initialize a timer as
386 * either a notification or synchronization event.
388 * - For mutexes, we try to acquire the mutex and if we can't, we wait
389 * on the mutex until it's available and then grab it. When a mutex is
390 * released, it enters the signaled state, which wakes up one of the
391 * threads waiting to acquire it. Mutexes are always synchronization
394 * - For threads, the only thing we do is wait until the thread object
395 * enters a signalled state, which occurs when the thread terminates.
396 * Threads are always notification events.
398 * A notification event wakes up all threads waiting on an object. A
399 * synchronization event wakes up just one. Also, a synchronization event
400 * is auto-clearing, which means we automatically set the event back to
401 * the non-signalled state once the wakeup is done.
405 ntoskrnl_waitforobj(nt_dispatch_header *obj, uint32_t reason,
406 uint32_t mode, uint8_t alertable, int64_t *duetime)
408 struct thread *td = curthread;
415 struct lwkt_tokref tokref;
418 return(STATUS_INVALID_PARAMETER);
420 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
423 * See if the object is a mutex. If so, and we already own
424 * it, then just increment the acquisition count and return.
426 * For any other kind of object, see if it's already in the
427 * signalled state, and if it is, just return. If the object
428 * is marked as a synchronization event, reset the state to
432 if (obj->dh_size == OTYPE_MUTEX) {
434 if (km->km_ownerthread == NULL ||
435 km->km_ownerthread == curthread->td_proc) {
436 obj->dh_sigstate = FALSE;
438 km->km_ownerthread = curthread->td_proc;
439 lwkt_reltoken(&tokref);
440 return (STATUS_SUCCESS);
442 } else if (obj->dh_sigstate == TRUE) {
443 if (obj->dh_type == EVENT_TYPE_SYNC)
444 obj->dh_sigstate = FALSE;
445 lwkt_reltoken(&tokref);
446 return (STATUS_SUCCESS);
452 INSERT_LIST_TAIL((&obj->dh_waitlisthead), (&w.wb_waitlist));
455 * The timeout value is specified in 100 nanosecond units
456 * and can be a positive or negative number. If it's positive,
457 * then the duetime is absolute, and we need to convert it
458 * to an absolute offset relative to now in order to use it.
459 * If it's negative, then the duetime is relative and we
460 * just have to convert the units.
463 if (duetime != NULL) {
465 tv.tv_sec = - (*duetime) / 10000000;
466 tv.tv_usec = (- (*duetime) / 10) -
467 (tv.tv_sec * 1000000);
469 ntoskrnl_time(&curtime);
470 if (*duetime < curtime)
471 tv.tv_sec = tv.tv_usec = 0;
473 tv.tv_sec = ((*duetime) - curtime) / 10000000;
474 tv.tv_usec = ((*duetime) - curtime) / 10 -
475 (tv.tv_sec * 1000000);
480 lwkt_reltoken(&tokref);
482 ticks = 1 + tv.tv_sec * hz + tv.tv_usec * hz / 1000000;
483 error = ndis_thsuspend(td, duetime == NULL ? 0 : ticks);
485 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
487 /* We timed out. Leave the object alone and return status. */
489 if (error == EWOULDBLOCK) {
490 REMOVE_LIST_ENTRY((&w.wb_waitlist));
491 lwkt_reltoken(&tokref);
492 return(STATUS_TIMEOUT);
496 * Mutexes are always synchronization objects, which means
497 * if several threads are waiting to acquire it, only one will
498 * be woken up. If that one is us, and the mutex is up for grabs,
502 if (obj->dh_size == OTYPE_MUTEX) {
504 if (km->km_ownerthread == NULL) {
505 km->km_ownerthread = curthread->td_proc;
510 if (obj->dh_type == EVENT_TYPE_SYNC)
511 obj->dh_sigstate = FALSE;
512 REMOVE_LIST_ENTRY((&w.wb_waitlist));
514 lwkt_reltoken(&tokref);
516 return(STATUS_SUCCESS);
519 __stdcall static uint32_t
520 ntoskrnl_waitforobjs(uint32_t cnt, nt_dispatch_header *obj[],
521 uint32_t wtype, uint32_t reason, uint32_t mode,
522 uint8_t alertable, int64_t *duetime,
523 wait_block *wb_array)
525 struct thread *td = curthread;
527 wait_block _wb_array[THREAD_WAIT_OBJECTS];
530 int i, wcnt = 0, widx = 0, error = 0;
532 struct timespec t1, t2;
533 struct lwkt_tokref tokref;
535 if (cnt > MAX_WAIT_OBJECTS)
536 return(STATUS_INVALID_PARAMETER);
537 if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
538 return(STATUS_INVALID_PARAMETER);
540 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
542 if (wb_array == NULL)
547 /* First pass: see if we can satisfy any waits immediately. */
549 for (i = 0; i < cnt; i++) {
550 if (obj[i]->dh_size == OTYPE_MUTEX) {
551 km = (kmutant *)obj[i];
552 if (km->km_ownerthread == NULL ||
553 km->km_ownerthread == curthread->td_proc) {
554 obj[i]->dh_sigstate = FALSE;
556 km->km_ownerthread = curthread->td_proc;
557 if (wtype == WAITTYPE_ANY) {
558 lwkt_reltoken(&tokref);
559 return (STATUS_WAIT_0 + i);
562 } else if (obj[i]->dh_sigstate == TRUE) {
563 if (obj[i]->dh_type == EVENT_TYPE_SYNC)
564 obj[i]->dh_sigstate = FALSE;
565 if (wtype == WAITTYPE_ANY) {
566 lwkt_reltoken(&tokref);
567 return (STATUS_WAIT_0 + i);
573 * Second pass: set up wait for anything we can't
574 * satisfy immediately.
577 for (i = 0; i < cnt; i++) {
578 if (obj[i]->dh_sigstate == TRUE)
580 INSERT_LIST_TAIL((&obj[i]->dh_waitlisthead),
581 (&w[i].wb_waitlist));
582 w[i].wb_kthread = td;
583 w[i].wb_object = obj[i];
587 if (duetime != NULL) {
589 tv.tv_sec = - (*duetime) / 10000000;
590 tv.tv_usec = (- (*duetime) / 10) -
591 (tv.tv_sec * 1000000);
593 ntoskrnl_time(&curtime);
594 if (*duetime < curtime)
595 tv.tv_sec = tv.tv_usec = 0;
597 tv.tv_sec = ((*duetime) - curtime) / 10000000;
598 tv.tv_usec = ((*duetime) - curtime) / 10 -
599 (tv.tv_sec * 1000000);
606 lwkt_reltoken(&tokref);
608 ticks = 1 + tv.tv_sec * hz + tv.tv_usec * hz / 1000000;
610 error = ndis_thsuspend(td, duetime == NULL ? 0 : ticks);
612 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
615 for (i = 0; i < cnt; i++) {
616 if (obj[i]->dh_size == OTYPE_MUTEX) {
618 if (km->km_ownerthread == NULL) {
624 if (obj[i]->dh_sigstate == TRUE) {
626 if (obj[i]->dh_type == EVENT_TYPE_SYNC)
627 obj[i]->dh_sigstate = FALSE;
628 REMOVE_LIST_ENTRY((&w[i].wb_waitlist));
633 if (error || wtype == WAITTYPE_ANY)
636 if (duetime != NULL) {
637 tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
638 tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
643 for (i = 0; i < cnt; i++)
644 REMOVE_LIST_ENTRY((&w[i].wb_waitlist));
647 if (error == EWOULDBLOCK) {
648 lwkt_reltoken(&tokref);
649 return(STATUS_TIMEOUT);
652 if (wtype == WAITTYPE_ANY && wcnt) {
653 lwkt_reltoken(&tokref);
654 return(STATUS_WAIT_0 + widx);
657 lwkt_reltoken(&tokref);
659 return(STATUS_SUCCESS);
662 __stdcall static void
663 ntoskrnl_writereg_ushort(uint16_t *reg, uint16_t val)
665 bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
669 __stdcall static uint16_t
670 ntoskrnl_readreg_ushort(uint16_t *reg)
672 return(bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
675 __stdcall static void
676 ntoskrnl_writereg_ulong(uint32_t *reg, uint32_t val)
678 bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
682 __stdcall static uint32_t
683 ntoskrnl_readreg_ulong(uint32_t *reg)
685 return(bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
688 __stdcall static uint8_t
689 ntoskrnl_readreg_uchar(uint8_t *reg)
691 return(bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
694 __stdcall static void
695 ntoskrnl_writereg_uchar(uint8_t *reg, uint8_t val)
697 bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
701 __stdcall static int64_t
702 _allmul(int64_t a, int64_t b)
707 __stdcall static int64_t
708 _alldiv(int64_t a, int64_t b)
713 __stdcall static int64_t
714 _allrem(int64_t a, int64_t b)
719 __stdcall static uint64_t
720 _aullmul(uint64_t a, uint64_t b)
725 __stdcall static uint64_t
726 _aulldiv(uint64_t a, uint64_t b)
731 __stdcall static uint64_t
732 _aullrem(uint64_t a, uint64_t b)
737 __regparm static int64_t
738 _allshl(int64_t a, uint8_t b)
743 __regparm static uint64_t
744 _aullshl(uint64_t a, uint8_t b)
749 __regparm static int64_t
750 _allshr(int64_t a, uint8_t b)
755 __regparm static uint64_t
756 _aullshr(uint64_t a, uint8_t b)
762 ntoskrnl_pushsl(slist_header *head, slist_entry *entry)
764 slist_entry *oldhead;
766 oldhead = head->slh_list.slh_next;
767 entry->sl_next = head->slh_list.slh_next;
768 head->slh_list.slh_next = entry;
769 head->slh_list.slh_depth++;
770 head->slh_list.slh_seq++;
776 ntoskrnl_popsl(slist_header *head)
780 first = head->slh_list.slh_next;
782 head->slh_list.slh_next = first->sl_next;
783 head->slh_list.slh_depth--;
784 head->slh_list.slh_seq++;
790 __stdcall static void *
791 ntoskrnl_allocfunc(uint32_t pooltype, size_t size, uint32_t tag)
793 return(malloc(size, M_DEVBUF, M_WAITOK));
796 __stdcall static void
797 ntoskrnl_freefunc(void *buf)
803 __stdcall static void
804 ntoskrnl_init_lookaside(paged_lookaside_list *lookaside,
805 lookaside_alloc_func *allocfunc,
806 lookaside_free_func *freefunc,
807 uint32_t flags, size_t size,
808 uint32_t tag, uint16_t depth)
810 bzero((char *)lookaside, sizeof(paged_lookaside_list));
812 if (size < sizeof(slist_entry))
813 lookaside->nll_l.gl_size = sizeof(slist_entry);
815 lookaside->nll_l.gl_size = size;
816 lookaside->nll_l.gl_tag = tag;
817 if (allocfunc == NULL)
818 lookaside->nll_l.gl_allocfunc = ntoskrnl_allocfunc;
820 lookaside->nll_l.gl_allocfunc = allocfunc;
822 if (freefunc == NULL)
823 lookaside->nll_l.gl_freefunc = ntoskrnl_freefunc;
825 lookaside->nll_l.gl_freefunc = freefunc;
827 ntoskrnl_init_lock(&lookaside->nll_obsoletelock);
829 lookaside->nll_l.gl_depth = LOOKASIDE_DEPTH;
830 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
835 __stdcall static void
836 ntoskrnl_delete_lookaside(paged_lookaside_list *lookaside)
839 __stdcall void (*freefunc)(void *);
841 freefunc = lookaside->nll_l.gl_freefunc;
842 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
848 __stdcall static void
849 ntoskrnl_init_nplookaside(npaged_lookaside_list *lookaside,
850 lookaside_alloc_func *allocfunc,
851 lookaside_free_func *freefunc,
852 uint32_t flags, size_t size,
853 uint32_t tag, uint16_t depth)
855 bzero((char *)lookaside, sizeof(npaged_lookaside_list));
857 if (size < sizeof(slist_entry))
858 lookaside->nll_l.gl_size = sizeof(slist_entry);
860 lookaside->nll_l.gl_size = size;
861 lookaside->nll_l.gl_tag = tag;
862 if (allocfunc == NULL)
863 lookaside->nll_l.gl_allocfunc = ntoskrnl_allocfunc;
865 lookaside->nll_l.gl_allocfunc = allocfunc;
867 if (freefunc == NULL)
868 lookaside->nll_l.gl_freefunc = ntoskrnl_freefunc;
870 lookaside->nll_l.gl_freefunc = freefunc;
872 ntoskrnl_init_lock(&lookaside->nll_obsoletelock);
874 lookaside->nll_l.gl_depth = LOOKASIDE_DEPTH;
875 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
880 __stdcall static void
881 ntoskrnl_delete_nplookaside(npaged_lookaside_list *lookaside)
884 __stdcall void (*freefunc)(void *);
886 freefunc = lookaside->nll_l.gl_freefunc;
887 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
894 * Note: the interlocked slist push and pop routines are
895 * declared to be _fastcall in Windows. gcc 3.4 is supposed
896 * to have support for this calling convention, however we
897 * don't have that version available yet, so we kludge things
898 * up using some inline assembly.
901 __stdcall __regcall static slist_entry *
902 ntoskrnl_push_slist(REGARGS2(slist_header *head, slist_entry *entry))
904 slist_entry *oldhead;
906 oldhead = (slist_entry *)FASTCALL3(ntoskrnl_push_slist_ex,
907 head, entry, &ntoskrnl_global);
912 __stdcall __regcall static slist_entry *
913 ntoskrnl_pop_slist(REGARGS1(slist_header *head))
917 first = (slist_entry *)FASTCALL2(ntoskrnl_pop_slist_ex,
918 head, &ntoskrnl_global);
923 __stdcall __regcall static slist_entry *
924 ntoskrnl_push_slist_ex(REGARGS2(slist_header *head, slist_entry *entry), kspin_lock *lock)
926 slist_entry *oldhead;
929 irql = FASTCALL2(hal_lock, lock, DISPATCH_LEVEL);
930 oldhead = ntoskrnl_pushsl(head, entry);
931 FASTCALL2(hal_unlock, lock, irql);
936 __stdcall __regcall static slist_entry *
937 ntoskrnl_pop_slist_ex(REGARGS2(slist_header *head, kspin_lock *lock))
942 irql = FASTCALL2(hal_lock, lock, DISPATCH_LEVEL);
943 first = ntoskrnl_popsl(head);
944 FASTCALL2(hal_unlock, lock, irql);
949 __stdcall __regcall void
950 ntoskrnl_lock_dpc(REGARGS1(kspin_lock *lock))
952 while (atomic_poll_acquire_int((volatile u_int *)lock) == 0)
956 __stdcall __regcall void
957 ntoskrnl_unlock_dpc(REGARGS1(kspin_lock *lock))
959 atomic_poll_release_int((volatile u_int *)lock);
962 __stdcall __regcall static uint32_t
963 ntoskrnl_interlock_inc(REGARGS1(volatile uint32_t *addend))
965 atomic_add_long((volatile u_long *)addend, 1);
969 __stdcall __regcall static uint32_t
970 ntoskrnl_interlock_dec(REGARGS1(volatile uint32_t *addend))
972 atomic_subtract_long((volatile u_long *)addend, 1);
976 __stdcall __regcall static void
977 ntoskrnl_interlock_addstat(REGARGS2(uint64_t *addend, uint32_t inc))
981 irql = FASTCALL2(hal_lock, &ntoskrnl_global, DISPATCH_LEVEL);
983 FASTCALL2(hal_unlock, &ntoskrnl_global, irql);
988 __stdcall static void
989 ntoskrnl_freemdl(ndis_buffer *mdl)
993 if (mdl == NULL || mdl->nb_process == NULL)
996 head = mdl->nb_process;
998 if (head->nb_flags != 0x1)
1001 mdl->nb_next = head->nb_next;
1002 head->nb_next = mdl;
1004 /* Decrement count of busy buffers. */
1006 head->nb_bytecount--;
1009 * If the pool has been marked for deletion and there are
1010 * no more buffers outstanding, nuke the pool.
1013 if (head->nb_byteoffset && head->nb_bytecount == 0)
1014 free(head, M_DEVBUF);
1019 __stdcall static uint32_t
1020 ntoskrnl_sizeofmdl(void *vaddr, size_t len)
1024 l = sizeof(struct ndis_buffer) +
1025 (sizeof(uint32_t) * SPAN_PAGES(vaddr, len));
1030 __stdcall static void
1031 ntoskrnl_build_npaged_mdl(ndis_buffer *mdl)
1033 mdl->nb_mappedsystemva = (char *)mdl->nb_startva + mdl->nb_byteoffset;
1037 __stdcall static void *
1038 ntoskrnl_mmaplockedpages(ndis_buffer *buf, uint8_t accessmode)
1040 return(MDL_VA(buf));
1043 __stdcall static void *
1044 ntoskrnl_mmaplockedpages_cache(ndis_buffer *buf, uint8_t accessmode,
1045 uint32_t cachetype, void *vaddr,
1046 uint32_t bugcheck, uint32_t prio)
1048 return(MDL_VA(buf));
1051 __stdcall static void
1052 ntoskrnl_munmaplockedpages(void *vaddr, ndis_buffer *buf)
1058 * The KeInitializeSpinLock(), KefAcquireSpinLockAtDpcLevel()
1059 * and KefReleaseSpinLockFromDpcLevel() appear to be analagous
1060 * to crit_enter()/crit_exit() in their use. We can't create a new mutex
1061 * lock here because there is no complimentary KeFreeSpinLock()
1062 * function. Instead, we grab a mutex from the mutex pool.
1064 __stdcall static void
1065 ntoskrnl_init_lock(kspin_lock *lock)
1072 __stdcall static size_t
1073 ntoskrnl_memcmp(const void *s1, const void *s2, size_t len)
1075 size_t i, total = 0;
1078 m1 = __DECONST(char *, s1);
1079 m2 = __DECONST(char *, s2);
1081 for (i = 0; i < len; i++) {
1088 __stdcall static void
1089 ntoskrnl_init_ansi_string(ndis_ansi_string *dst, char *src)
1091 ndis_ansi_string *a;
1097 a->nas_len = a->nas_maxlen = 0;
1101 a->nas_len = a->nas_maxlen = strlen(src);
1107 __stdcall static void
1108 ntoskrnl_init_unicode_string(ndis_unicode_string *dst, uint16_t *src)
1110 ndis_unicode_string *u;
1117 u->nus_len = u->nus_maxlen = 0;
1124 u->nus_len = u->nus_maxlen = i * 2;
1130 __stdcall ndis_status
1131 ntoskrnl_unicode_to_int(ndis_unicode_string *ustr, uint32_t base,
1139 uchr = ustr->nus_buf;
1140 len = ustr->nus_len;
1141 bzero(abuf, sizeof(abuf));
1143 if ((char)((*uchr) & 0xFF) == '-') {
1147 } else if ((char)((*uchr) & 0xFF) == '+') {
1154 if ((char)((*uchr) & 0xFF) == 'b') {
1158 } else if ((char)((*uchr) & 0xFF) == 'o') {
1162 } else if ((char)((*uchr) & 0xFF) == 'x') {
1176 ndis_unicode_to_ascii(uchr, len, &astr);
1177 *val = strtoul(abuf, NULL, base);
1179 return(NDIS_STATUS_SUCCESS);
1182 __stdcall static void
1183 ntoskrnl_free_unicode_string(ndis_unicode_string *ustr)
1185 if (ustr->nus_buf == NULL)
1187 free(ustr->nus_buf, M_DEVBUF);
1188 ustr->nus_buf = NULL;
1192 __stdcall static void
1193 ntoskrnl_free_ansi_string(ndis_ansi_string *astr)
1195 if (astr->nas_buf == NULL)
1197 free(astr->nas_buf, M_DEVBUF);
1198 astr->nas_buf = NULL;
1203 atoi(const char *str)
1205 return (int)strtol(str, (char **)NULL, 10);
1209 atol(const char *str)
1211 return strtol(str, (char **)NULL, 10);
1220 srandom(tv.tv_usec);
1221 return((int)random());
1224 __stdcall static uint8_t
1225 ntoskrnl_wdmver(uint8_t major, uint8_t minor)
1227 if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
1232 __stdcall static ndis_status
1233 ntoskrnl_devprop(device_object *devobj, uint32_t regprop, uint32_t buflen,
1234 void *prop, uint32_t *reslen)
1236 ndis_miniport_block *block;
1238 block = devobj->do_rsvd;
1241 case DEVPROP_DRIVER_KEYNAME:
1242 ndis_ascii_to_unicode(__DECONST(char *,
1243 device_get_nameunit(block->nmb_dev)), (uint16_t **)&prop);
1244 *reslen = strlen(device_get_nameunit(block->nmb_dev)) * 2;
1247 return(STATUS_INVALID_PARAMETER_2);
1251 return(STATUS_SUCCESS);
1254 __stdcall static void
1255 ntoskrnl_init_mutex(kmutant *kmutex, uint32_t level)
1257 INIT_LIST_HEAD((&kmutex->km_header.dh_waitlisthead));
1258 kmutex->km_abandoned = FALSE;
1259 kmutex->km_apcdisable = 1;
1260 kmutex->km_header.dh_sigstate = TRUE;
1261 kmutex->km_header.dh_type = EVENT_TYPE_SYNC;
1262 kmutex->km_header.dh_size = OTYPE_MUTEX;
1263 kmutex->km_acquirecnt = 0;
1264 kmutex->km_ownerthread = NULL;
1268 __stdcall static uint32_t
1269 ntoskrnl_release_mutex(kmutant *kmutex, uint8_t kwait)
1271 struct lwkt_tokref tokref;
1273 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
1274 if (kmutex->km_ownerthread != curthread->td_proc) {
1275 lwkt_reltoken(&tokref);
1276 return(STATUS_MUTANT_NOT_OWNED);
1278 kmutex->km_acquirecnt--;
1279 if (kmutex->km_acquirecnt == 0) {
1280 kmutex->km_ownerthread = NULL;
1281 lwkt_reltoken(&tokref);
1282 ntoskrnl_wakeup(&kmutex->km_header);
1284 lwkt_reltoken(&tokref);
1286 return(kmutex->km_acquirecnt);
1289 __stdcall static uint32_t
1290 ntoskrnl_read_mutex(kmutant *kmutex)
1292 return(kmutex->km_header.dh_sigstate);
1296 ntoskrnl_init_event(nt_kevent *kevent, uint32_t type, uint8_t state)
1298 INIT_LIST_HEAD((&kevent->k_header.dh_waitlisthead));
1299 kevent->k_header.dh_sigstate = state;
1300 kevent->k_header.dh_type = type;
1301 kevent->k_header.dh_size = OTYPE_EVENT;
1306 ntoskrnl_reset_event(nt_kevent *kevent)
1309 struct lwkt_tokref tokref;
1311 lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken);
1312 prevstate = kevent->k_header.dh_sigstate;
1313 kevent->k_header.dh_sigstate = FALSE;
1314 lwkt_reltoken(&tokref);
1320 ntoskrnl_set_event(nt_kevent *kevent, uint32_t increment, uint8_t kwait)
1324 prevstate = kevent->k_header.dh_sigstate;
1325 ntoskrnl_wakeup(&kevent->k_header);
1331 ntoskrnl_clear_event(nt_kevent *kevent)
1333 kevent->k_header.dh_sigstate = FALSE;
1338 ntoskrnl_read_event(nt_kevent *kevent)
1340 return(kevent->k_header.dh_sigstate);
1343 __stdcall static ndis_status
1344 ntoskrnl_objref(ndis_handle handle, uint32_t reqaccess, void *otype,
1345 uint8_t accessmode, void **object, void **handleinfo)
1349 nr = malloc(sizeof(nt_objref), M_DEVBUF, M_WAITOK|M_ZERO);
1351 INIT_LIST_HEAD((&nr->no_dh.dh_waitlisthead));
1352 nr->no_obj = handle;
1353 nr->no_dh.dh_size = OTYPE_THREAD;
1354 TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
1357 return(NDIS_STATUS_SUCCESS);
1360 __stdcall __regcall static void
1361 ntoskrnl_objderef(REGARGS1(void *object))
1366 TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
1372 __stdcall static uint32_t
1373 ntoskrnl_zwclose(ndis_handle handle)
1375 return(STATUS_SUCCESS);
1379 * This is here just in case the thread returns without calling
1380 * PsTerminateSystemThread().
1383 ntoskrnl_thrfunc(void *arg)
1385 thread_context *thrctx;
1386 __stdcall uint32_t (*tfunc)(void *);
1391 tfunc = thrctx->tc_thrfunc;
1392 tctx = thrctx->tc_thrctx;
1393 free(thrctx, M_TEMP);
1397 ntoskrnl_thread_exit(rval);
1398 return; /* notreached */
1401 __stdcall static ndis_status
1402 ntoskrnl_create_thread(ndis_handle *handle, uint32_t reqaccess,
1403 void *objattrs, ndis_handle phandle,
1404 void *clientid, void *thrfunc, void *thrctx)
1411 tc = malloc(sizeof(thread_context), M_TEMP, M_WAITOK);
1413 tc->tc_thrctx = thrctx;
1414 tc->tc_thrfunc = thrfunc;
1416 sprintf(tname, "windows kthread %d", ntoskrnl_kth);
1417 error = kthread_create_stk(ntoskrnl_thrfunc, tc, &td,
1418 NDIS_KSTACK_PAGES * PAGE_SIZE, tname);
1427 * In Windows, the exit of a thread is an event that you're allowed
1428 * to wait on, assuming you've obtained a reference to the thread using
1429 * ObReferenceObjectByHandle(). Unfortunately, the only way we can
1430 * simulate this behavior is to register each thread we create in a
1431 * reference list, and if someone holds a reference to us, we poke
1434 __stdcall static ndis_status
1435 ntoskrnl_thread_exit(ndis_status status)
1437 struct nt_objref *nr;
1439 TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
1440 if (nr->no_obj != curthread)
1442 ntoskrnl_wakeup(&nr->no_dh);
1449 return(0); /* notreached */
1453 ntoskrnl_dbgprint(char *fmt, ...)
1458 __va_start(ap, fmt);
1462 return(STATUS_SUCCESS);
1465 __stdcall static void
1466 ntoskrnl_debugger(void)
1469 #if __FreeBSD_version < 502113
1470 Debugger("ntoskrnl_debugger(): breakpoint");
1472 kdb_enter("ntoskrnl_debugger(): breakpoint");
1477 ntoskrnl_timercall(void *arg)
1483 timer->k_header.dh_inserted = FALSE;
1486 * If this is a periodic timer, re-arm it
1487 * so it will fire again. We do this before
1488 * calling any deferred procedure calls because
1489 * it's possible the DPC might cancel the timer,
1490 * in which case it would be wrong for us to
1491 * re-arm it again afterwards.
1494 if (timer->k_period) {
1495 timer->k_header.dh_inserted = TRUE;
1496 callout_reset(timer->k_handle, 1 + timer->k_period * hz / 1000,
1497 ntoskrnl_timercall, timer);
1499 callout_deactivate(timer->k_handle);
1500 free(timer->k_handle, M_NDIS);
1501 timer->k_handle = NULL;
1504 if (timer->k_dpc != NULL)
1505 ntoskrnl_queue_dpc(timer->k_dpc, NULL, NULL);
1507 ntoskrnl_wakeup(&timer->k_header);
1511 ntoskrnl_init_timer(ktimer *timer)
1516 ntoskrnl_init_timer_ex(timer, EVENT_TYPE_NOTIFY);
1520 ntoskrnl_init_timer_ex(ktimer *timer, uint32_t type)
1525 INIT_LIST_HEAD((&timer->k_header.dh_waitlisthead));
1526 timer->k_header.dh_sigstate = FALSE;
1527 timer->k_header.dh_inserted = FALSE;
1528 timer->k_header.dh_type = type;
1529 timer->k_header.dh_size = OTYPE_TIMER;
1530 timer->k_handle = NULL;
1536 * This is a wrapper for Windows deferred procedure calls that
1537 * have been placed on an NDIS thread work queue. We need it
1538 * since the DPC could be a _stdcall function. Also, as far as
1539 * I can tell, defered procedure calls must run at DISPATCH_LEVEL.
1542 ntoskrnl_run_dpc(void *arg)
1549 dpcfunc = (kdpc_func)dpc->k_deferedfunc;
1550 irql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL);
1551 dpcfunc(dpc, dpc->k_deferredctx, dpc->k_sysarg1, dpc->k_sysarg2);
1552 FASTCALL1(hal_lower_irql, irql);
1558 ntoskrnl_init_dpc(kdpc *dpc, void *dpcfunc, void *dpcctx)
1563 dpc->k_deferedfunc = dpcfunc;
1564 dpc->k_deferredctx = dpcctx;
1570 ntoskrnl_queue_dpc(kdpc *dpc, void *sysarg1, void *sysarg2)
1572 dpc->k_sysarg1 = sysarg1;
1573 dpc->k_sysarg2 = sysarg2;
1574 if (ndis_sched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
1581 ntoskrnl_dequeue_dpc(kdpc *dpc)
1583 if (ndis_unsched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
1590 ntoskrnl_set_timer_ex(ktimer *timer, int64_t duetime, uint32_t period,
1601 if (timer->k_header.dh_inserted == TRUE) {
1602 if (timer->k_handle != NULL)
1603 callout_stop(timer->k_handle);
1604 timer->k_header.dh_inserted = FALSE;
1609 timer->k_duetime = duetime;
1610 timer->k_period = period;
1611 timer->k_header.dh_sigstate = FALSE;
1615 tv.tv_sec = - (duetime) / 10000000;
1616 tv.tv_usec = (- (duetime) / 10) -
1617 (tv.tv_sec * 1000000);
1619 ntoskrnl_time(&curtime);
1620 if (duetime < curtime)
1621 tv.tv_sec = tv.tv_usec = 0;
1623 tv.tv_sec = ((duetime) - curtime) / 10000000;
1624 tv.tv_usec = ((duetime) - curtime) / 10 -
1625 (tv.tv_sec * 1000000);
1629 ticks = 1 + tv.tv_sec * hz + tv.tv_usec * hz / 1000000;
1630 timer->k_header.dh_inserted = TRUE;
1631 if (timer->k_handle == NULL) {
1632 timer->k_handle = malloc(sizeof(struct callout), M_NDIS,
1634 callout_init(timer->k_handle);
1636 callout_reset(timer->k_handle, ticks, ntoskrnl_timercall, timer);
1642 ntoskrnl_set_timer(ktimer *timer, int64_t duetime, kdpc *dpc)
1644 return (ntoskrnl_set_timer_ex(timer, duetime, 0, dpc));
1648 ntoskrnl_cancel_timer(ktimer *timer)
1655 if (timer->k_header.dh_inserted == TRUE) {
1656 if (timer->k_handle != NULL) {
1657 callout_stop(timer->k_handle);
1658 free(timer->k_handle, M_NDIS);
1659 timer->k_handle = NULL;
1661 if (timer->k_dpc != NULL)
1662 ntoskrnl_dequeue_dpc(timer->k_dpc);
1672 ntoskrnl_read_timer(ktimer *timer)
1674 return(timer->k_header.dh_sigstate);
1677 __stdcall static void
1680 printf ("ntoskrnl dummy called...\n");
1685 image_patch_table ntoskrnl_functbl[] = {
1686 { "RtlCompareMemory", (FUNC)ntoskrnl_memcmp },
1687 { "RtlEqualUnicodeString", (FUNC)ntoskrnl_unicode_equal },
1688 { "RtlCopyUnicodeString", (FUNC)ntoskrnl_unicode_copy },
1689 { "RtlUnicodeStringToAnsiString", (FUNC)ntoskrnl_unicode_to_ansi },
1690 { "RtlAnsiStringToUnicodeString", (FUNC)ntoskrnl_ansi_to_unicode },
1691 { "RtlInitAnsiString", (FUNC)ntoskrnl_init_ansi_string },
1692 { "RtlInitUnicodeString", (FUNC)ntoskrnl_init_unicode_string },
1693 { "RtlFreeAnsiString", (FUNC)ntoskrnl_free_ansi_string },
1694 { "RtlFreeUnicodeString", (FUNC)ntoskrnl_free_unicode_string },
1695 { "RtlUnicodeStringToInteger", (FUNC)ntoskrnl_unicode_to_int },
1696 { "sprintf", (FUNC)sprintf },
1697 { "vsprintf", (FUNC)vsprintf },
1698 { "_snprintf", (FUNC)snprintf },
1699 { "_vsnprintf", (FUNC)vsnprintf },
1700 { "DbgPrint", (FUNC)ntoskrnl_dbgprint },
1701 { "DbgBreakPoint", (FUNC)ntoskrnl_debugger },
1702 { "strncmp", (FUNC)strncmp },
1703 { "strcmp", (FUNC)strcmp },
1704 { "strncpy", (FUNC)strncpy },
1705 { "strcpy", (FUNC)strcpy },
1706 { "strlen", (FUNC)strlen },
1707 { "memcpy", (FUNC)memcpy },
1708 { "memmove", (FUNC)memcpy },
1709 { "memset", (FUNC)memset },
1710 { "IofCallDriver", (FUNC)ntoskrnl_iofcalldriver },
1711 { "IofCompleteRequest", (FUNC)ntoskrnl_iofcompletereq },
1712 { "IoBuildSynchronousFsdRequest", (FUNC)ntoskrnl_iobuildsynchfsdreq },
1713 { "KeWaitForSingleObject", (FUNC)ntoskrnl_waitforobj },
1714 { "KeWaitForMultipleObjects", (FUNC)ntoskrnl_waitforobjs },
1715 { "_allmul", (FUNC)_allmul },
1716 { "_alldiv", (FUNC)_alldiv },
1717 { "_allrem", (FUNC)_allrem },
1718 { "_allshr", (FUNC)_allshr },
1719 { "_allshl", (FUNC)_allshl },
1720 { "_aullmul", (FUNC)_aullmul },
1721 { "_aulldiv", (FUNC)_aulldiv },
1722 { "_aullrem", (FUNC)_aullrem },
1723 { "_aullshr", (FUNC)_aullshr },
1724 { "_aullshl", (FUNC)_aullshl },
1725 { "atoi", (FUNC)atoi },
1726 { "atol", (FUNC)atol },
1727 { "rand", (FUNC)rand },
1728 { "WRITE_REGISTER_USHORT", (FUNC)ntoskrnl_writereg_ushort },
1729 { "READ_REGISTER_USHORT", (FUNC)ntoskrnl_readreg_ushort },
1730 { "WRITE_REGISTER_ULONG", (FUNC)ntoskrnl_writereg_ulong },
1731 { "READ_REGISTER_ULONG", (FUNC)ntoskrnl_readreg_ulong },
1732 { "READ_REGISTER_UCHAR", (FUNC)ntoskrnl_readreg_uchar },
1733 { "WRITE_REGISTER_UCHAR", (FUNC)ntoskrnl_writereg_uchar },
1734 { "ExInitializePagedLookasideList", (FUNC)ntoskrnl_init_lookaside },
1735 { "ExDeletePagedLookasideList", (FUNC)ntoskrnl_delete_lookaside },
1736 { "ExInitializeNPagedLookasideList", (FUNC)ntoskrnl_init_nplookaside },
1737 { "ExDeleteNPagedLookasideList", (FUNC)ntoskrnl_delete_nplookaside },
1738 { "InterlockedPopEntrySList", (FUNC)ntoskrnl_pop_slist },
1739 { "InterlockedPushEntrySList", (FUNC)ntoskrnl_push_slist },
1740 { "ExInterlockedPopEntrySList", (FUNC)ntoskrnl_pop_slist_ex },
1741 { "ExInterlockedPushEntrySList",(FUNC)ntoskrnl_push_slist_ex },
1742 { "KefAcquireSpinLockAtDpcLevel", (FUNC)ntoskrnl_lock_dpc },
1743 { "KefReleaseSpinLockFromDpcLevel", (FUNC)ntoskrnl_unlock_dpc },
1744 { "InterlockedIncrement", (FUNC)ntoskrnl_interlock_inc },
1745 { "InterlockedDecrement", (FUNC)ntoskrnl_interlock_dec },
1746 { "ExInterlockedAddLargeStatistic",
1747 (FUNC)ntoskrnl_interlock_addstat },
1748 { "IoFreeMdl", (FUNC)ntoskrnl_freemdl },
1749 { "MmSizeOfMdl", (FUNC)ntoskrnl_sizeofmdl },
1750 { "MmMapLockedPages", (FUNC)ntoskrnl_mmaplockedpages },
1751 { "MmMapLockedPagesSpecifyCache",
1752 (FUNC)ntoskrnl_mmaplockedpages_cache },
1753 { "MmUnmapLockedPages", (FUNC)ntoskrnl_munmaplockedpages },
1754 { "MmBuildMdlForNonPagedPool", (FUNC)ntoskrnl_build_npaged_mdl },
1755 { "KeInitializeSpinLock", (FUNC)ntoskrnl_init_lock },
1756 { "IoIsWdmVersionAvailable", (FUNC)ntoskrnl_wdmver },
1757 { "IoGetDeviceProperty", (FUNC)ntoskrnl_devprop },
1758 { "KeInitializeMutex", (FUNC)ntoskrnl_init_mutex },
1759 { "KeReleaseMutex", (FUNC)ntoskrnl_release_mutex },
1760 { "KeReadStateMutex", (FUNC)ntoskrnl_read_mutex },
1761 { "KeInitializeEvent", (FUNC)ntoskrnl_init_event },
1762 { "KeSetEvent", (FUNC)ntoskrnl_set_event },
1763 { "KeResetEvent", (FUNC)ntoskrnl_reset_event },
1764 { "KeClearEvent", (FUNC)ntoskrnl_clear_event },
1765 { "KeReadStateEvent", (FUNC)ntoskrnl_read_event },
1766 { "KeInitializeTimer", (FUNC)ntoskrnl_init_timer },
1767 { "KeInitializeTimerEx", (FUNC)ntoskrnl_init_timer_ex },
1768 { "KeSetTimer", (FUNC)ntoskrnl_set_timer },
1769 { "KeSetTimerEx", (FUNC)ntoskrnl_set_timer_ex },
1770 { "KeCancelTimer", (FUNC)ntoskrnl_cancel_timer },
1771 { "KeReadStateTimer", (FUNC)ntoskrnl_read_timer },
1772 { "KeInitializeDpc", (FUNC)ntoskrnl_init_dpc },
1773 { "KeInsertQueueDpc", (FUNC)ntoskrnl_queue_dpc },
1774 { "KeRemoveQueueDpc", (FUNC)ntoskrnl_dequeue_dpc },
1775 { "ObReferenceObjectByHandle", (FUNC)ntoskrnl_objref },
1776 { "ObfDereferenceObject", (FUNC)ntoskrnl_objderef },
1777 { "ZwClose", (FUNC)ntoskrnl_zwclose },
1778 { "PsCreateSystemThread", (FUNC)ntoskrnl_create_thread },
1779 { "PsTerminateSystemThread", (FUNC)ntoskrnl_thread_exit },
1782 * This last entry is a catch-all for any function we haven't
1783 * implemented yet. The PE import list patching routine will
1784 * use it for any function that doesn't have an explicit match
1788 { NULL, (FUNC)dummy },