1a870b55ddddaa68c3fcfaec3505a0819bff7d95
[dragonfly.git] / sys / emulation / ndis / subr_ntoskrnl.c
1 /*-
2  * Copyright (c) 2003
3  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  * $FreeBSD: src/sys/compat/ndis/subr_ntoskrnl.c,v 1.117 2012/11/17 01:51:26 svnexp Exp $
33  */
34
35 #include <sys/ctype.h>
36 #include <sys/unistd.h>
37 #include <sys/param.h>
38 #include <sys/types.h>
39 #include <sys/errno.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/lock.h>
43 #include <sys/thread2.h>
44 #include <sys/mutex.h>
45 #include <sys/mutex2.h>
46
47 #include <sys/callout.h>
48 #include <sys/kernel.h>
49 #include <sys/proc.h>
50 #include <sys/condvar.h>
51 #include <sys/kthread.h>
52 #include <sys/module.h>
53 #include <sys/sched.h>
54 #include <sys/sysctl.h>
55
56 #include <machine/atomic.h>
57
58 #include <sys/bus.h>
59 #include <sys/rman.h>
60 #include <sys/objcache.h>
61
62 #include <vm/vm.h>
63 #include <vm/vm_param.h>
64 #include <vm/pmap.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_extern.h>
68
69 #include <emulation/ndis/pe_var.h>
70 #include <emulation/ndis/cfg_var.h>
71 #include <emulation/ndis/resource_var.h>
72 #include <emulation/ndis/ntoskrnl_var.h>
73 #include <emulation/ndis/hal_var.h>
74 #include <emulation/ndis/ndis_var.h>
75
76 #include <machine/stdarg.h>
77
78 #ifdef NTOSKRNL_DEBUG_TIMERS
79 static int sysctl_show_timers(SYSCTL_HANDLER_ARGS);
80
81 SYSCTL_PROC(_debug, OID_AUTO, ntoskrnl_timers, CTLTYPE_INT | CTLFLAG_RW,
82     NULL, 0, sysctl_show_timers, "I",
83     "Show ntoskrnl timer stats");
84 #endif
85
86 struct kdpc_queue {
87         list_entry              kq_disp;
88         struct thread           *kq_td;
89         int                     kq_cpu;
90         int                     kq_exit;
91         int                     kq_running;
92         kspin_lock              kq_lock;
93         nt_kevent               kq_proc;
94         nt_kevent               kq_done;
95 };
96
97 typedef struct kdpc_queue kdpc_queue;
98
99 struct wb_ext {
100         struct cv               we_cv;
101         struct thread           *we_td;
102 };
103
104 typedef struct wb_ext wb_ext;
105
106 #define NTOSKRNL_TIMEOUTS       256
107 #ifdef NTOSKRNL_DEBUG_TIMERS
108 static uint64_t ntoskrnl_timer_fires;
109 static uint64_t ntoskrnl_timer_sets;
110 static uint64_t ntoskrnl_timer_reloads;
111 static uint64_t ntoskrnl_timer_cancels;
112 #endif
113
114 struct callout_entry {
115         struct callout          ce_callout;
116         list_entry              ce_list;
117 };
118
119 typedef struct callout_entry callout_entry;
120
121 static struct list_entry ntoskrnl_calllist;
122 static struct mtx ntoskrnl_calllock;
123 struct kuser_shared_data kuser_shared_data;
124
125 static struct list_entry ntoskrnl_intlist;
126 static kspin_lock ntoskrnl_intlock;
127
128 static uint8_t RtlEqualUnicodeString(unicode_string *,
129         unicode_string *, uint8_t);
130 static void RtlCopyString(ansi_string *, const ansi_string *);
131 static void RtlCopyUnicodeString(unicode_string *,
132         unicode_string *);
133 static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
134          void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
135 static irp *IoBuildAsynchronousFsdRequest(uint32_t,
136         device_object *, void *, uint32_t, uint64_t *, io_status_block *);
137 static irp *IoBuildDeviceIoControlRequest(uint32_t,
138         device_object *, void *, uint32_t, void *, uint32_t,
139         uint8_t, nt_kevent *, io_status_block *);
140 static irp *IoAllocateIrp(uint8_t, uint8_t);
141 static void IoReuseIrp(irp *, uint32_t);
142 static void IoFreeIrp(irp *);
143 static void IoInitializeIrp(irp *, uint16_t, uint8_t);
144 static irp *IoMakeAssociatedIrp(irp *, uint8_t);
145 static uint32_t KeWaitForMultipleObjects(uint32_t,
146         nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
147         int64_t *, wait_block *);
148 static void ntoskrnl_waittest(nt_dispatch_header *, uint32_t);
149 static void ntoskrnl_satisfy_wait(nt_dispatch_header *, struct thread *);
150 static void ntoskrnl_satisfy_multiple_waits(wait_block *);
151 static int ntoskrnl_is_signalled(nt_dispatch_header *, struct thread *);
152 static void ntoskrnl_insert_timer(ktimer *, int);
153 static void ntoskrnl_remove_timer(ktimer *);
154 #ifdef NTOSKRNL_DEBUG_TIMERS
155 static void ntoskrnl_show_timers(void);
156 #endif
157 static void ntoskrnl_timercall(void *);
158 static void ntoskrnl_dpc_thread(void *);
159 static void ntoskrnl_destroy_dpc_threads(void);
160 static void ntoskrnl_destroy_workitem_threads(void);
161 static void ntoskrnl_workitem_thread(void *);
162 static void ntoskrnl_workitem(device_object *, void *);
163 static void ntoskrnl_unicode_to_ascii(uint16_t *, char *, int);
164 static void ntoskrnl_ascii_to_unicode(char *, uint16_t *, int);
165 static uint8_t ntoskrnl_insert_dpc(list_entry *, kdpc *);
166 static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
167 static uint16_t READ_REGISTER_USHORT(uint16_t *);
168 static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
169 static uint32_t READ_REGISTER_ULONG(uint32_t *);
170 static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
171 static uint8_t READ_REGISTER_UCHAR(uint8_t *);
172 static int64_t _allmul(int64_t, int64_t);
173 static int64_t _alldiv(int64_t, int64_t);
174 static int64_t _allrem(int64_t, int64_t);
175 static int64_t _allshr(int64_t, uint8_t);
176 static int64_t _allshl(int64_t, uint8_t);
177 static uint64_t _aullmul(uint64_t, uint64_t);
178 static uint64_t _aulldiv(uint64_t, uint64_t);
179 static uint64_t _aullrem(uint64_t, uint64_t);
180 static uint64_t _aullshr(uint64_t, uint8_t);
181 static uint64_t _aullshl(uint64_t, uint8_t);
182 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
183 static void InitializeSListHead(slist_header *);
184 static slist_entry *ntoskrnl_popsl(slist_header *);
185 static void ExFreePoolWithTag(void *, uint32_t);
186 static void ExInitializePagedLookasideList(paged_lookaside_list *,
187         lookaside_alloc_func *, lookaside_free_func *,
188         uint32_t, size_t, uint32_t, uint16_t);
189 static void ExDeletePagedLookasideList(paged_lookaside_list *);
190 static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
191         lookaside_alloc_func *, lookaside_free_func *,
192         uint32_t, size_t, uint32_t, uint16_t);
193 static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
194 static slist_entry
195         *ExInterlockedPushEntrySList(slist_header *,
196         slist_entry *, kspin_lock *);
197 static slist_entry
198         *ExInterlockedPopEntrySList(slist_header *, kspin_lock *);
199 static uint32_t InterlockedIncrement(volatile uint32_t *);
200 static uint32_t InterlockedDecrement(volatile uint32_t *);
201 static void ExInterlockedAddLargeStatistic(uint64_t *, uint32_t);
202 static void *MmAllocateContiguousMemory(uint32_t, uint64_t);
203 static void *MmAllocateContiguousMemorySpecifyCache(uint32_t,
204         uint64_t, uint64_t, uint64_t, enum nt_caching_type);
205 static void MmFreeContiguousMemory(void *);
206 static void MmFreeContiguousMemorySpecifyCache(void *, uint32_t,
207         enum nt_caching_type);
208 static uint32_t MmSizeOfMdl(void *, size_t);
209 static void *MmMapLockedPages(mdl *, uint8_t);
210 static void *MmMapLockedPagesSpecifyCache(mdl *,
211         uint8_t, uint32_t, void *, uint32_t, uint32_t);
212 static void MmUnmapLockedPages(void *, mdl *);
213 static device_t ntoskrnl_finddev(device_t, uint64_t, struct resource **);
214 static void RtlZeroMemory(void *, size_t);
215 static void RtlSecureZeroMemory(void *, size_t);
216 static void RtlFillMemory(void *, size_t, uint8_t);
217 static void RtlMoveMemory(void *, const void *, size_t);
218 static ndis_status RtlCharToInteger(const char *, uint32_t, uint32_t *);
219 static void RtlCopyMemory(void *, const void *, size_t);
220 static size_t RtlCompareMemory(const void *, const void *, size_t);
221 static ndis_status RtlUnicodeStringToInteger(unicode_string *,
222         uint32_t, uint32_t *);
223 static int atoi (const char *);
224 static long atol (const char *);
225 static int rand(void);
226 static void srand(unsigned int);
227 static void KeQuerySystemTime(uint64_t *);
228 static uint32_t KeTickCount(void);
229 static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
230 static int32_t IoOpenDeviceRegistryKey(struct device_object *, uint32_t,
231     uint32_t, void **);
232 static void ntoskrnl_thrfunc(void *);
233 static ndis_status PsCreateSystemThread(ndis_handle *,
234         uint32_t, void *, ndis_handle, void *, void *, void *);
235 static ndis_status PsTerminateSystemThread(ndis_status);
236 static ndis_status IoGetDeviceObjectPointer(unicode_string *,
237         uint32_t, void *, device_object *);
238 static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
239         uint32_t, void *, uint32_t *);
240 static void KeInitializeMutex(kmutant *, uint32_t);
241 static uint32_t KeReleaseMutex(kmutant *, uint8_t);
242 static uint32_t KeReadStateMutex(kmutant *);
243 static ndis_status ObReferenceObjectByHandle(ndis_handle,
244         uint32_t, void *, uint8_t, void **, void **);
245 static void ObfDereferenceObject(void *);
246 static uint32_t ZwClose(ndis_handle);
247 static uint32_t WmiQueryTraceInformation(uint32_t, void *, uint32_t,
248         uint32_t, void *);
249 static uint32_t WmiTraceMessage(uint64_t, uint32_t, void *, uint16_t, ...);
250 static uint32_t IoWMIRegistrationControl(device_object *, uint32_t);
251 static void *ntoskrnl_memset(void *, int, size_t);
252 static void *ntoskrnl_memmove(void *, void *, size_t);
253 static void *ntoskrnl_memchr(void *, unsigned char, size_t);
254 static char *ntoskrnl_strstr(char *, char *);
255 static char *ntoskrnl_strncat(char *, char *, size_t);
256 static int ntoskrnl_toupper(int);
257 static int ntoskrnl_tolower(int);
258 static funcptr ntoskrnl_findwrap(funcptr);
259 static uint32_t DbgPrint(char *, ...) __printflike(1, 2);
260 static void DbgBreakPoint(void);
261 static void KeBugCheckEx(uint32_t, u_long, u_long, u_long, u_long);
262 static int32_t KeDelayExecutionThread(uint8_t, uint8_t, int64_t *);
263 static int32_t KeSetPriorityThread(struct thread *, int32_t);
264 static void dummy(void);
265
266 static struct lock ntoskrnl_dispatchlock;
267 static struct mtx ntoskrnl_interlock;
268 static kspin_lock ntoskrnl_cancellock;
269 static int ntoskrnl_kth = 0;
270 static struct nt_objref_head ntoskrnl_reflist;
271 static struct objcache *mdl_cache;
272 static struct objcache *iw_cache;
273 static struct kdpc_queue *kq_queues;
274 static struct kdpc_queue *wq_queues;
275 static int wq_idx = 0;
276
277 static struct objcache_malloc_args mdl_alloc_args = {
278         MDL_ZONE_SIZE, M_DEVBUF
279 };
280 static struct objcache_malloc_args iw_alloc_args = {
281         sizeof(io_workitem), M_DEVBUF
282 };
283
284 int
285 ntoskrnl_libinit(void)
286 {
287         image_patch_table       *patch;
288         int                     error;
289         struct thread           *p;
290         kdpc_queue              *kq;
291         callout_entry           *e;
292         int                     i;
293
294         lockinit(&ntoskrnl_dispatchlock, MTX_NDIS_LOCK, 0, LK_CANRECURSE);
295         mtx_init(&ntoskrnl_interlock, "ndis1");
296         KeInitializeSpinLock(&ntoskrnl_cancellock);
297         KeInitializeSpinLock(&ntoskrnl_intlock);
298         TAILQ_INIT(&ntoskrnl_reflist);
299
300         InitializeListHead(&ntoskrnl_calllist);
301         InitializeListHead(&ntoskrnl_intlist);
302         mtx_init(&ntoskrnl_calllock, "ndis2");
303
304         kq_queues = ExAllocatePoolWithTag(NonPagedPool,
305 #ifdef NTOSKRNL_MULTIPLE_DPCS
306             sizeof(kdpc_queue) * ncpus, 0);
307 #else
308             sizeof(kdpc_queue), 0);
309 #endif
310
311         if (kq_queues == NULL)
312                 return (ENOMEM);
313
314         wq_queues = ExAllocatePoolWithTag(NonPagedPool,
315             sizeof(kdpc_queue) * WORKITEM_THREADS, 0);
316
317         if (wq_queues == NULL)
318                 return (ENOMEM);
319
320 #ifdef NTOSKRNL_MULTIPLE_DPCS
321         bzero((char *)kq_queues, sizeof(kdpc_queue) * ncpus);
322 #else
323         bzero((char *)kq_queues, sizeof(kdpc_queue));
324 #endif
325         bzero((char *)wq_queues, sizeof(kdpc_queue) * WORKITEM_THREADS);
326
327         /*
328          * Launch the DPC threads.
329          */
330
331 #ifdef NTOSKRNL_MULTIPLE_DPCS
332         for (i = 0; i < ncpus; i++) {
333 #else
334         for (i = 0; i < 1; i++) {
335 #endif
336                 kq = kq_queues + i;
337                 kq->kq_cpu = i;
338                 error = kthread_create_cpu(ntoskrnl_dpc_thread, kq, &p, i,
339                     "Win DPC %d", i);
340                 if (error)
341                         panic("failed to launch DPC thread");
342         }
343
344         /*
345          * Launch the workitem threads.
346          */
347
348         for (i = 0; i < WORKITEM_THREADS; i++) {
349                 kq = wq_queues + i;
350                 error = kthread_create(ntoskrnl_workitem_thread, kq, &p,
351                     "Win Workitem %d", i);
352                 if (error)
353                         panic("failed to launch workitem thread");
354         }
355
356         patch = ntoskrnl_functbl;
357         while (patch->ipt_func != NULL) {
358                 windrv_wrap((funcptr)patch->ipt_func,
359                     (funcptr *)&patch->ipt_wrap,
360                     patch->ipt_argcnt, patch->ipt_ftype);
361                 patch++;
362         }
363
364         for (i = 0; i < NTOSKRNL_TIMEOUTS; i++) {
365                 e = ExAllocatePoolWithTag(NonPagedPool,
366                     sizeof(callout_entry), 0);
367                 if (e == NULL)
368                         panic("failed to allocate timeouts");
369                 mtx_spinlock(&ntoskrnl_calllock);
370                 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
371                 mtx_spinunlock(&ntoskrnl_calllock);
372         }
373
374         /*
375          * MDLs are supposed to be variable size (they describe
376          * buffers containing some number of pages, but we don't
377          * know ahead of time how many pages that will be). But
378          * always allocating them off the heap is very slow. As
379          * a compromise, we create an MDL UMA zone big enough to
380          * handle any buffer requiring up to 16 pages, and we
381          * use those for any MDLs for buffers of 16 pages or less
382          * in size. For buffers larger than that (which we assume
383          * will be few and far between, we allocate the MDLs off
384          * the heap.
385          *
386          * CHANGED TO USING objcache(9) IN DRAGONFLY
387          */
388
389         mdl_cache = objcache_create("Windows MDL", 0, 0,
390             NULL, NULL, NULL, objcache_malloc_alloc, objcache_malloc_free,
391             &mdl_alloc_args);
392
393         iw_cache = objcache_create("Windows WorkItem", 0, 0,
394             NULL, NULL, NULL, objcache_malloc_alloc, objcache_malloc_free,
395             &iw_alloc_args);
396
397         return (0);
398 }
399
400 int
401 ntoskrnl_libfini(void)
402 {
403         image_patch_table       *patch;
404         callout_entry           *e;
405         list_entry              *l;
406
407         patch = ntoskrnl_functbl;
408         while (patch->ipt_func != NULL) {
409                 windrv_unwrap(patch->ipt_wrap);
410                 patch++;
411         }
412
413         /* Stop the workitem queues. */
414         ntoskrnl_destroy_workitem_threads();
415         /* Stop the DPC queues. */
416         ntoskrnl_destroy_dpc_threads();
417
418         ExFreePool(kq_queues);
419         ExFreePool(wq_queues);
420
421         objcache_destroy(mdl_cache);
422         objcache_destroy(iw_cache);
423
424         mtx_spinlock(&ntoskrnl_calllock);
425         while(!IsListEmpty(&ntoskrnl_calllist)) {
426                 l = RemoveHeadList(&ntoskrnl_calllist);
427                 e = CONTAINING_RECORD(l, callout_entry, ce_list);
428                 mtx_spinunlock(&ntoskrnl_calllock);
429                 ExFreePool(e);
430                 mtx_spinlock(&ntoskrnl_calllock);
431         }
432         mtx_spinunlock(&ntoskrnl_calllock);
433
434         lockuninit(&ntoskrnl_dispatchlock);
435         mtx_uninit(&ntoskrnl_interlock);
436         mtx_uninit(&ntoskrnl_calllock);
437
438         return (0);
439 }
440
441 /*
442  * We need to be able to reference this externally from the wrapper;
443  * GCC only generates a local implementation of memset.
444  */
445 static void *
446 ntoskrnl_memset(void *buf, int ch, size_t size)
447 {
448         return (memset(buf, ch, size));
449 }
450
451 static void *
452 ntoskrnl_memmove(void *dst, void *src, size_t size)
453 {
454         bcopy(src, dst, size);
455         return (dst);
456 }
457
458 static void *
459 ntoskrnl_memchr(void *buf, unsigned char ch, size_t len)
460 {
461         if (len != 0) {
462                 unsigned char *p = buf;
463
464                 do {
465                         if (*p++ == ch)
466                                 return (p - 1);
467                 } while (--len != 0);
468         }
469         return (NULL);
470 }
471
472 static char *
473 ntoskrnl_strstr(char *s, char *find)
474 {
475         char c, sc;
476         size_t len;
477
478         if ((c = *find++) != 0) {
479                 len = strlen(find);
480                 do {
481                         do {
482                                 if ((sc = *s++) == 0)
483                                         return (NULL);
484                         } while (sc != c);
485                 } while (strncmp(s, find, len) != 0);
486                 s--;
487         }
488         return (s);
489 }
490
491 /* Taken from libc */
492 static char *
493 ntoskrnl_strncat(char *dst, char *src, size_t n)
494 {
495         if (n != 0) {
496                 char *d = dst;
497                 const char *s = src;
498
499                 while (*d != 0)
500                         d++;
501                 do {
502                         if ((*d = *s++) == 0)
503                                 break;
504                         d++;
505                 } while (--n != 0);
506                 *d = 0;
507         }
508         return (dst);
509 }
510
511 static int
512 ntoskrnl_toupper(int c)
513 {
514         return (toupper(c));
515 }
516
517 static int
518 ntoskrnl_tolower(int c)
519 {
520         return (tolower(c));
521 }
522
523 static uint8_t
524 RtlEqualUnicodeString(unicode_string *str1, unicode_string *str2,
525         uint8_t caseinsensitive)
526 {
527         int                     i;
528
529         if (str1->us_len != str2->us_len)
530                 return (FALSE);
531
532         for (i = 0; i < str1->us_len; i++) {
533                 if (caseinsensitive == TRUE) {
534                         if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
535                             toupper((char)(str2->us_buf[i] & 0xFF)))
536                                 return (FALSE);
537                 } else {
538                         if (str1->us_buf[i] != str2->us_buf[i])
539                                 return (FALSE);
540                 }
541         }
542
543         return (TRUE);
544 }
545
546 static void
547 RtlCopyString(ansi_string *dst, const ansi_string *src)
548 {
549         if (src != NULL && src->as_buf != NULL && dst->as_buf != NULL) {
550                 dst->as_len = min(src->as_len, dst->as_maxlen);
551                 memcpy(dst->as_buf, src->as_buf, dst->as_len);
552                 if (dst->as_len < dst->as_maxlen)
553                         dst->as_buf[dst->as_len] = 0;
554         } else
555                 dst->as_len = 0;
556 }
557
558 static void
559 RtlCopyUnicodeString(unicode_string *dest, unicode_string *src)
560 {
561
562         if (dest->us_maxlen >= src->us_len)
563                 dest->us_len = src->us_len;
564         else
565                 dest->us_len = dest->us_maxlen;
566         memcpy(dest->us_buf, src->us_buf, dest->us_len);
567 }
568
569 static void
570 ntoskrnl_ascii_to_unicode(char *ascii, uint16_t *unicode, int len)
571 {
572         int                     i;
573         uint16_t                *ustr;
574
575         ustr = unicode;
576         for (i = 0; i < len; i++) {
577                 *ustr = (uint16_t)ascii[i];
578                 ustr++;
579         }
580 }
581
582 static void
583 ntoskrnl_unicode_to_ascii(uint16_t *unicode, char *ascii, int len)
584 {
585         int                     i;
586         uint8_t                 *astr;
587
588         astr = ascii;
589         for (i = 0; i < len / 2; i++) {
590                 *astr = (uint8_t)unicode[i];
591                 astr++;
592         }
593 }
594
595 uint32_t
596 RtlUnicodeStringToAnsiString(ansi_string *dest, unicode_string *src, uint8_t allocate)
597 {
598         if (dest == NULL || src == NULL)
599                 return (STATUS_INVALID_PARAMETER);
600
601         dest->as_len = src->us_len / 2;
602         if (dest->as_maxlen < dest->as_len)
603                 dest->as_len = dest->as_maxlen;
604
605         if (allocate == TRUE) {
606                 dest->as_buf = ExAllocatePoolWithTag(NonPagedPool,
607                     (src->us_len / 2) + 1, 0);
608                 if (dest->as_buf == NULL)
609                         return (STATUS_INSUFFICIENT_RESOURCES);
610                 dest->as_len = dest->as_maxlen = src->us_len / 2;
611         } else {
612                 dest->as_len = src->us_len / 2; /* XXX */
613                 if (dest->as_maxlen < dest->as_len)
614                         dest->as_len = dest->as_maxlen;
615         }
616
617         ntoskrnl_unicode_to_ascii(src->us_buf, dest->as_buf,
618             dest->as_len * 2);
619
620         return (STATUS_SUCCESS);
621 }
622
623 uint32_t
624 RtlAnsiStringToUnicodeString(unicode_string *dest, ansi_string *src,
625         uint8_t allocate)
626 {
627         if (dest == NULL || src == NULL)
628                 return (STATUS_INVALID_PARAMETER);
629
630         if (allocate == TRUE) {
631                 dest->us_buf = ExAllocatePoolWithTag(NonPagedPool,
632                     src->as_len * 2, 0);
633                 if (dest->us_buf == NULL)
634                         return (STATUS_INSUFFICIENT_RESOURCES);
635                 dest->us_len = dest->us_maxlen = strlen(src->as_buf) * 2;
636         } else {
637                 dest->us_len = src->as_len * 2; /* XXX */
638                 if (dest->us_maxlen < dest->us_len)
639                         dest->us_len = dest->us_maxlen;
640         }
641
642         ntoskrnl_ascii_to_unicode(src->as_buf, dest->us_buf,
643             dest->us_len / 2);
644
645         return (STATUS_SUCCESS);
646 }
647
648 void *
649 ExAllocatePoolWithTag(uint32_t pooltype, size_t len, uint32_t tag)
650 {
651         void                    *buf;
652
653         buf = kmalloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
654         if (buf == NULL)
655                 return (NULL);
656
657         return (buf);
658 }
659
660 static void
661 ExFreePoolWithTag(void *buf, uint32_t tag)
662 {
663         ExFreePool(buf);
664 }
665
666 void
667 ExFreePool(void *buf)
668 {
669         kfree(buf, M_DEVBUF);
670 }
671
672 uint32_t
673 IoAllocateDriverObjectExtension(driver_object *drv, void *clid,
674     uint32_t extlen, void **ext)
675 {
676         custom_extension        *ce;
677
678         ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
679             + extlen, 0);
680
681         if (ce == NULL)
682                 return (STATUS_INSUFFICIENT_RESOURCES);
683
684         ce->ce_clid = clid;
685         InsertTailList((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
686
687         *ext = (void *)(ce + 1);
688
689         return (STATUS_SUCCESS);
690 }
691
692 void *
693 IoGetDriverObjectExtension(driver_object *drv, void *clid)
694 {
695         list_entry              *e;
696         custom_extension        *ce;
697
698         /*
699          * Sanity check. Our dummy bus drivers don't have
700          * any driver extentions.
701          */
702
703         if (drv->dro_driverext == NULL)
704                 return (NULL);
705
706         e = drv->dro_driverext->dre_usrext.nle_flink;
707         while (e != &drv->dro_driverext->dre_usrext) {
708                 ce = (custom_extension *)e;
709                 if (ce->ce_clid == clid)
710                         return ((void *)(ce + 1));
711                 e = e->nle_flink;
712         }
713
714         return (NULL);
715 }
716
717
718 uint32_t
719 IoCreateDevice(driver_object *drv, uint32_t devextlen, unicode_string *devname,
720         uint32_t devtype, uint32_t devchars, uint8_t exclusive,
721         device_object **newdev)
722 {
723         device_object           *dev;
724
725         dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
726         if (dev == NULL)
727                 return (STATUS_INSUFFICIENT_RESOURCES);
728
729         dev->do_type = devtype;
730         dev->do_drvobj = drv;
731         dev->do_currirp = NULL;
732         dev->do_flags = 0;
733
734         if (devextlen) {
735                 dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
736                     devextlen, 0);
737
738                 if (dev->do_devext == NULL) {
739                         ExFreePool(dev);
740                         return (STATUS_INSUFFICIENT_RESOURCES);
741                 }
742
743                 bzero(dev->do_devext, devextlen);
744         } else
745                 dev->do_devext = NULL;
746
747         dev->do_size = sizeof(device_object) + devextlen;
748         dev->do_refcnt = 1;
749         dev->do_attacheddev = NULL;
750         dev->do_nextdev = NULL;
751         dev->do_devtype = devtype;
752         dev->do_stacksize = 1;
753         dev->do_alignreq = 1;
754         dev->do_characteristics = devchars;
755         dev->do_iotimer = NULL;
756         KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
757
758         /*
759          * Vpd is used for disk/tape devices,
760          * but we don't support those. (Yet.)
761          */
762         dev->do_vpb = NULL;
763
764         dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
765             sizeof(devobj_extension), 0);
766
767         if (dev->do_devobj_ext == NULL) {
768                 if (dev->do_devext != NULL)
769                         ExFreePool(dev->do_devext);
770                 ExFreePool(dev);
771                 return (STATUS_INSUFFICIENT_RESOURCES);
772         }
773
774         dev->do_devobj_ext->dve_type = 0;
775         dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
776         dev->do_devobj_ext->dve_devobj = dev;
777
778         /*
779          * Attach this device to the driver object's list
780          * of devices. Note: this is not the same as attaching
781          * the device to the device stack. The driver's AddDevice
782          * routine must explicitly call IoAddDeviceToDeviceStack()
783          * to do that.
784          */
785
786         if (drv->dro_devobj == NULL) {
787                 drv->dro_devobj = dev;
788                 dev->do_nextdev = NULL;
789         } else {
790                 dev->do_nextdev = drv->dro_devobj;
791                 drv->dro_devobj = dev;
792         }
793
794         *newdev = dev;
795
796         return (STATUS_SUCCESS);
797 }
798
799 void
800 IoDeleteDevice(device_object *dev)
801 {
802         device_object           *prev;
803
804         if (dev == NULL)
805                 return;
806
807         if (dev->do_devobj_ext != NULL)
808                 ExFreePool(dev->do_devobj_ext);
809
810         if (dev->do_devext != NULL)
811                 ExFreePool(dev->do_devext);
812
813         /* Unlink the device from the driver's device list. */
814
815         prev = dev->do_drvobj->dro_devobj;
816         if (prev == dev)
817                 dev->do_drvobj->dro_devobj = dev->do_nextdev;
818         else {
819                 while (prev->do_nextdev != dev)
820                         prev = prev->do_nextdev;
821                 prev->do_nextdev = dev->do_nextdev;
822         }
823
824         ExFreePool(dev);
825 }
826
827 device_object *
828 IoGetAttachedDevice(device_object *dev)
829 {
830         device_object           *d;
831
832         if (dev == NULL)
833                 return (NULL);
834
835         d = dev;
836
837         while (d->do_attacheddev != NULL)
838                 d = d->do_attacheddev;
839
840         return (d);
841 }
842
843 static irp *
844 IoBuildSynchronousFsdRequest(uint32_t func, device_object *dobj, void *buf,
845     uint32_t len, uint64_t *off, nt_kevent *event, io_status_block *status)
846 {
847         irp                     *ip;
848
849         ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
850         if (ip == NULL)
851                 return (NULL);
852         ip->irp_usrevent = event;
853
854         return (ip);
855 }
856
857 static irp *
858 IoBuildAsynchronousFsdRequest(uint32_t func, device_object *dobj, void *buf,
859     uint32_t len, uint64_t *off, io_status_block *status)
860 {
861         irp                     *ip;
862         io_stack_location       *sl;
863
864         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
865         if (ip == NULL)
866                 return (NULL);
867
868         ip->irp_usriostat = status;
869         ip->irp_tail.irp_overlay.irp_thread = NULL;
870
871         sl = IoGetNextIrpStackLocation(ip);
872         sl->isl_major = func;
873         sl->isl_minor = 0;
874         sl->isl_flags = 0;
875         sl->isl_ctl = 0;
876         sl->isl_devobj = dobj;
877         sl->isl_fileobj = NULL;
878         sl->isl_completionfunc = NULL;
879
880         ip->irp_userbuf = buf;
881
882         if (dobj->do_flags & DO_BUFFERED_IO) {
883                 ip->irp_assoc.irp_sysbuf =
884                     ExAllocatePoolWithTag(NonPagedPool, len, 0);
885                 if (ip->irp_assoc.irp_sysbuf == NULL) {
886                         IoFreeIrp(ip);
887                         return (NULL);
888                 }
889                 bcopy(buf, ip->irp_assoc.irp_sysbuf, len);
890         }
891
892         if (dobj->do_flags & DO_DIRECT_IO) {
893                 ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
894                 if (ip->irp_mdl == NULL) {
895                         if (ip->irp_assoc.irp_sysbuf != NULL)
896                                 ExFreePool(ip->irp_assoc.irp_sysbuf);
897                         IoFreeIrp(ip);
898                         return (NULL);
899                 }
900                 ip->irp_userbuf = NULL;
901                 ip->irp_assoc.irp_sysbuf = NULL;
902         }
903
904         if (func == IRP_MJ_READ) {
905                 sl->isl_parameters.isl_read.isl_len = len;
906                 if (off != NULL)
907                         sl->isl_parameters.isl_read.isl_byteoff = *off;
908                 else
909                         sl->isl_parameters.isl_read.isl_byteoff = 0;
910         }
911
912         if (func == IRP_MJ_WRITE) {
913                 sl->isl_parameters.isl_write.isl_len = len;
914                 if (off != NULL)
915                         sl->isl_parameters.isl_write.isl_byteoff = *off;
916                 else
917                         sl->isl_parameters.isl_write.isl_byteoff = 0;
918         }
919
920         return (ip);
921 }
922
923 static irp *
924 IoBuildDeviceIoControlRequest(uint32_t iocode, device_object *dobj, void *ibuf,
925         uint32_t ilen, void *obuf, uint32_t olen, uint8_t isinternal,
926         nt_kevent *event, io_status_block *status)
927 {
928         irp                     *ip;
929         io_stack_location       *sl;
930         uint32_t                buflen;
931
932         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
933         if (ip == NULL)
934                 return (NULL);
935         ip->irp_usrevent = event;
936         ip->irp_usriostat = status;
937         ip->irp_tail.irp_overlay.irp_thread = NULL;
938
939         sl = IoGetNextIrpStackLocation(ip);
940         sl->isl_major = isinternal == TRUE ?
941             IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
942         sl->isl_minor = 0;
943         sl->isl_flags = 0;
944         sl->isl_ctl = 0;
945         sl->isl_devobj = dobj;
946         sl->isl_fileobj = NULL;
947         sl->isl_completionfunc = NULL;
948         sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
949         sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
950         sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
951
952         switch(IO_METHOD(iocode)) {
953         case METHOD_BUFFERED:
954                 if (ilen > olen)
955                         buflen = ilen;
956                 else
957                         buflen = olen;
958                 if (buflen) {
959                         ip->irp_assoc.irp_sysbuf =
960                             ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
961                         if (ip->irp_assoc.irp_sysbuf == NULL) {
962                                 IoFreeIrp(ip);
963                                 return (NULL);
964                         }
965                 }
966                 if (ilen && ibuf != NULL) {
967                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
968                         bzero((char *)ip->irp_assoc.irp_sysbuf + ilen,
969                             buflen - ilen);
970                 } else
971                         bzero(ip->irp_assoc.irp_sysbuf, ilen);
972                 ip->irp_userbuf = obuf;
973                 break;
974         case METHOD_IN_DIRECT:
975         case METHOD_OUT_DIRECT:
976                 if (ilen && ibuf != NULL) {
977                         ip->irp_assoc.irp_sysbuf =
978                             ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
979                         if (ip->irp_assoc.irp_sysbuf == NULL) {
980                                 IoFreeIrp(ip);
981                                 return (NULL);
982                         }
983                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
984                 }
985                 if (olen && obuf != NULL) {
986                         ip->irp_mdl = IoAllocateMdl(obuf, olen,
987                             FALSE, FALSE, ip);
988                         /*
989                          * Normally we would MmProbeAndLockPages()
990                          * here, but we don't have to in our
991                          * imlementation.
992                          */
993                 }
994                 break;
995         case METHOD_NEITHER:
996                 ip->irp_userbuf = obuf;
997                 sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
998                 break;
999         default:
1000                 break;
1001         }
1002
1003         /*
1004          * Ideally, we should associate this IRP with the calling
1005          * thread here.
1006          */
1007
1008         return (ip);
1009 }
1010
1011 static irp *
1012 IoAllocateIrp(uint8_t stsize, uint8_t chargequota)
1013 {
1014         irp                     *i;
1015
1016         i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
1017         if (i == NULL)
1018                 return (NULL);
1019
1020         IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
1021
1022         return (i);
1023 }
1024
1025 static irp *
1026 IoMakeAssociatedIrp(irp *ip, uint8_t stsize)
1027 {
1028         irp                     *associrp;
1029
1030         associrp = IoAllocateIrp(stsize, FALSE);
1031         if (associrp == NULL)
1032                 return (NULL);
1033
1034         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
1035         associrp->irp_flags |= IRP_ASSOCIATED_IRP;
1036         associrp->irp_tail.irp_overlay.irp_thread =
1037             ip->irp_tail.irp_overlay.irp_thread;
1038         associrp->irp_assoc.irp_master = ip;
1039         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1040
1041         return (associrp);
1042 }
1043
1044 static void
1045 IoFreeIrp(irp *ip)
1046 {
1047         ExFreePool(ip);
1048 }
1049
1050 static void
1051 IoInitializeIrp(irp *io, uint16_t psize, uint8_t ssize)
1052 {
1053         bzero((char *)io, IoSizeOfIrp(ssize));
1054         io->irp_size = psize;
1055         io->irp_stackcnt = ssize;
1056         io->irp_currentstackloc = ssize;
1057         InitializeListHead(&io->irp_thlist);
1058         io->irp_tail.irp_overlay.irp_csl =
1059             (io_stack_location *)(io + 1) + ssize;
1060 }
1061
1062 static void
1063 IoReuseIrp(irp *ip, uint32_t status)
1064 {
1065         uint8_t                 allocflags;
1066
1067         allocflags = ip->irp_allocflags;
1068         IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
1069         ip->irp_iostat.isb_status = status;
1070         ip->irp_allocflags = allocflags;
1071 }
1072
1073 void
1074 IoAcquireCancelSpinLock(uint8_t *irql)
1075 {
1076         KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
1077 }
1078
1079 void
1080 IoReleaseCancelSpinLock(uint8_t irql)
1081 {
1082         KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
1083 }
1084
1085 uint8_t
1086 IoCancelIrp(irp *ip)
1087 {
1088         cancel_func             cfunc;
1089         uint8_t                 cancelirql;
1090
1091         IoAcquireCancelSpinLock(&cancelirql);
1092         cfunc = IoSetCancelRoutine(ip, NULL);
1093         ip->irp_cancel = TRUE;
1094         if (cfunc == NULL) {
1095                 IoReleaseCancelSpinLock(cancelirql);
1096                 return (FALSE);
1097         }
1098         ip->irp_cancelirql = cancelirql;
1099         MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
1100         return (uint8_t)IoSetCancelValue(ip, TRUE);
1101 }
1102
1103 uint32_t
1104 IofCallDriver(device_object *dobj, irp *ip)
1105 {
1106         driver_object           *drvobj;
1107         io_stack_location       *sl;
1108         uint32_t                status;
1109         driver_dispatch         disp;
1110
1111         drvobj = dobj->do_drvobj;
1112
1113         if (ip->irp_currentstackloc <= 0)
1114                 panic("IoCallDriver(): out of stack locations");
1115
1116         IoSetNextIrpStackLocation(ip);
1117         sl = IoGetCurrentIrpStackLocation(ip);
1118
1119         sl->isl_devobj = dobj;
1120
1121         disp = drvobj->dro_dispatch[sl->isl_major];
1122         status = MSCALL2(disp, dobj, ip);
1123
1124         return (status);
1125 }
1126
1127 void
1128 IofCompleteRequest(irp *ip, uint8_t prioboost)
1129 {
1130         uint32_t                status;
1131         device_object           *dobj;
1132         io_stack_location       *sl;
1133         completion_func         cf;
1134
1135         KASSERT(ip->irp_iostat.isb_status != STATUS_PENDING,
1136             ("incorrect IRP(%p) status (STATUS_PENDING)", ip));
1137
1138         sl = IoGetCurrentIrpStackLocation(ip);
1139         IoSkipCurrentIrpStackLocation(ip);
1140
1141         do {
1142                 if (sl->isl_ctl & SL_PENDING_RETURNED)
1143                         ip->irp_pendingreturned = TRUE;
1144
1145                 if (ip->irp_currentstackloc != (ip->irp_stackcnt + 1))
1146                         dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
1147                 else
1148                         dobj = NULL;
1149
1150                 if (sl->isl_completionfunc != NULL &&
1151                     ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
1152                     sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
1153                     (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
1154                     sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
1155                     (ip->irp_cancel == TRUE &&
1156                     sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
1157                         cf = sl->isl_completionfunc;
1158                         status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
1159                         if (status == STATUS_MORE_PROCESSING_REQUIRED)
1160                                 return;
1161                 } else {
1162                         if ((ip->irp_currentstackloc <= ip->irp_stackcnt) &&
1163                             (ip->irp_pendingreturned == TRUE))
1164                                 IoMarkIrpPending(ip);
1165                 }
1166
1167                 /* move to the next.  */
1168                 IoSkipCurrentIrpStackLocation(ip);
1169                 sl++;
1170         } while (ip->irp_currentstackloc <= (ip->irp_stackcnt + 1));
1171
1172         if (ip->irp_usriostat != NULL)
1173                 *ip->irp_usriostat = ip->irp_iostat;
1174         if (ip->irp_usrevent != NULL)
1175                 KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
1176
1177         /* Handle any associated IRPs. */
1178
1179         if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
1180                 uint32_t                masterirpcnt;
1181                 irp                     *masterirp;
1182                 mdl                     *m;
1183
1184                 masterirp = ip->irp_assoc.irp_master;
1185                 masterirpcnt =
1186                     InterlockedDecrement(&masterirp->irp_assoc.irp_irpcnt);
1187
1188                 while ((m = ip->irp_mdl) != NULL) {
1189                         ip->irp_mdl = m->mdl_next;
1190                         IoFreeMdl(m);
1191                 }
1192                 IoFreeIrp(ip);
1193                 if (masterirpcnt == 0)
1194                         IoCompleteRequest(masterirp, IO_NO_INCREMENT);
1195                 return;
1196         }
1197
1198         /* With any luck, these conditions will never arise. */
1199
1200         if (ip->irp_flags & IRP_PAGING_IO) {
1201                 if (ip->irp_mdl != NULL)
1202                         IoFreeMdl(ip->irp_mdl);
1203                 IoFreeIrp(ip);
1204         }
1205 }
1206
1207 void
1208 ntoskrnl_intr(void *arg)
1209 {
1210         kinterrupt              *iobj;
1211         uint8_t                 irql;
1212         uint8_t                 claimed;
1213         list_entry              *l;
1214
1215         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1216         l = ntoskrnl_intlist.nle_flink;
1217         while (l != &ntoskrnl_intlist) {
1218                 iobj = CONTAINING_RECORD(l, kinterrupt, ki_list);
1219                 claimed = MSCALL2(iobj->ki_svcfunc, iobj, iobj->ki_svcctx);
1220                 if (claimed == TRUE)
1221                         break;
1222                 l = l->nle_flink;
1223         }
1224         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1225 }
1226
1227 uint8_t
1228 KeAcquireInterruptSpinLock(kinterrupt *iobj)
1229 {
1230         uint8_t                 irql;
1231         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1232         return (irql);
1233 }
1234
1235 void
1236 KeReleaseInterruptSpinLock(kinterrupt *iobj, uint8_t irql)
1237 {
1238         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1239 }
1240
1241 uint8_t
1242 KeSynchronizeExecution(kinterrupt *iobj, void *syncfunc, void *syncctx)
1243 {
1244         uint8_t                 irql;
1245
1246         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1247         MSCALL1(syncfunc, syncctx);
1248         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1249
1250         return (TRUE);
1251 }
1252
1253 /*
1254  * IoConnectInterrupt() is passed only the interrupt vector and
1255  * irql that a device wants to use, but no device-specific tag
1256  * of any kind. This conflicts rather badly with FreeBSD's
1257  * bus_setup_intr(), which needs the device_t for the device
1258  * requesting interrupt delivery. In order to bypass this
1259  * inconsistency, we implement a second level of interrupt
1260  * dispatching on top of bus_setup_intr(). All devices use
1261  * ntoskrnl_intr() as their ISR, and any device requesting
1262  * interrupts will be registered with ntoskrnl_intr()'s interrupt
1263  * dispatch list. When an interrupt arrives, we walk the list
1264  * and invoke all the registered ISRs. This effectively makes all
1265  * interrupts shared, but it's the only way to duplicate the
1266  * semantics of IoConnectInterrupt() and IoDisconnectInterrupt() properly.
1267  */
1268
1269 uint32_t
1270 IoConnectInterrupt(kinterrupt **iobj, void *svcfunc, void *svcctx,
1271         kspin_lock *lock, uint32_t vector, uint8_t irql, uint8_t syncirql,
1272         uint8_t imode, uint8_t shared, uint32_t affinity, uint8_t savefloat)
1273 {
1274         uint8_t                 curirql;
1275
1276         *iobj = ExAllocatePoolWithTag(NonPagedPool, sizeof(kinterrupt), 0);
1277         if (*iobj == NULL)
1278                 return (STATUS_INSUFFICIENT_RESOURCES);
1279
1280         (*iobj)->ki_svcfunc = svcfunc;
1281         (*iobj)->ki_svcctx = svcctx;
1282
1283         if (lock == NULL) {
1284                 KeInitializeSpinLock(&(*iobj)->ki_lock_priv);
1285                 (*iobj)->ki_lock = &(*iobj)->ki_lock_priv;
1286         } else
1287                 (*iobj)->ki_lock = lock;
1288
1289         KeAcquireSpinLock(&ntoskrnl_intlock, &curirql);
1290         InsertHeadList((&ntoskrnl_intlist), (&(*iobj)->ki_list));
1291         KeReleaseSpinLock(&ntoskrnl_intlock, curirql);
1292
1293         return (STATUS_SUCCESS);
1294 }
1295
1296 void
1297 IoDisconnectInterrupt(kinterrupt *iobj)
1298 {
1299         uint8_t                 irql;
1300
1301         if (iobj == NULL)
1302                 return;
1303
1304         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1305         RemoveEntryList((&iobj->ki_list));
1306         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1307
1308         ExFreePool(iobj);
1309 }
1310
1311 device_object *
1312 IoAttachDeviceToDeviceStack(device_object *src, device_object *dst)
1313 {
1314         device_object           *attached;
1315
1316         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
1317         attached = IoGetAttachedDevice(dst);
1318         attached->do_attacheddev = src;
1319         src->do_attacheddev = NULL;
1320         src->do_stacksize = attached->do_stacksize + 1;
1321         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1322
1323         return (attached);
1324 }
1325
1326 void
1327 IoDetachDevice(device_object *topdev)
1328 {
1329         device_object           *tail;
1330
1331         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
1332
1333         /* First, break the chain. */
1334         tail = topdev->do_attacheddev;
1335         if (tail == NULL) {
1336                 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1337                 return;
1338         }
1339         topdev->do_attacheddev = tail->do_attacheddev;
1340         topdev->do_refcnt--;
1341
1342         /* Now reduce the stacksize count for the takm_il objects. */
1343
1344         tail = topdev->do_attacheddev;
1345         while (tail != NULL) {
1346                 tail->do_stacksize--;
1347                 tail = tail->do_attacheddev;
1348         }
1349
1350         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1351 }
1352
1353 /*
1354  * For the most part, an object is considered signalled if
1355  * dh_sigstate == TRUE. The exception is for mutant objects
1356  * (mutexes), where the logic works like this:
1357  *
1358  * - If the thread already owns the object and sigstate is
1359  *   less than or equal to 0, then the object is considered
1360  *   signalled (recursive acquisition).
1361  * - If dh_sigstate == 1, the object is also considered
1362  *   signalled.
1363  */
1364
1365 static int
1366 ntoskrnl_is_signalled(nt_dispatch_header *obj, struct thread *td)
1367 {
1368         kmutant                 *km;
1369
1370         if (obj->dh_type == DISP_TYPE_MUTANT) {
1371                 km = (kmutant *)obj;
1372                 if ((obj->dh_sigstate <= 0 && km->km_ownerthread == td) ||
1373                     obj->dh_sigstate == 1)
1374                         return (TRUE);
1375                 return (FALSE);
1376         }
1377
1378         if (obj->dh_sigstate > 0)
1379                 return (TRUE);
1380         return (FALSE);
1381 }
1382
1383 static void
1384 ntoskrnl_satisfy_wait(nt_dispatch_header *obj, struct thread *td)
1385 {
1386         kmutant                 *km;
1387
1388         switch (obj->dh_type) {
1389         case DISP_TYPE_MUTANT:
1390                 km = (struct kmutant *)obj;
1391                 obj->dh_sigstate--;
1392                 /*
1393                  * If sigstate reaches 0, the mutex is now
1394                  * non-signalled (the new thread owns it).
1395                  */
1396                 if (obj->dh_sigstate == 0) {
1397                         km->km_ownerthread = td;
1398                         if (km->km_abandoned == TRUE)
1399                                 km->km_abandoned = FALSE;
1400                 }
1401                 break;
1402         /* Synchronization objects get reset to unsignalled. */
1403         case DISP_TYPE_SYNCHRONIZATION_EVENT:
1404         case DISP_TYPE_SYNCHRONIZATION_TIMER:
1405                 obj->dh_sigstate = 0;
1406                 break;
1407         case DISP_TYPE_SEMAPHORE:
1408                 obj->dh_sigstate--;
1409                 break;
1410         default:
1411                 break;
1412         }
1413 }
1414
1415 static void
1416 ntoskrnl_satisfy_multiple_waits(wait_block *wb)
1417 {
1418         wait_block              *cur;
1419         struct thread           *td;
1420
1421         cur = wb;
1422         td = wb->wb_kthread;
1423
1424         do {
1425                 ntoskrnl_satisfy_wait(wb->wb_object, td);
1426                 cur->wb_awakened = TRUE;
1427                 cur = cur->wb_next;
1428         } while (cur != wb);
1429 }
1430
1431 /* Always called with dispatcher lock held. */
1432 static void
1433 ntoskrnl_waittest(nt_dispatch_header *obj, uint32_t increment)
1434 {
1435         wait_block              *w, *next;
1436         list_entry              *e;
1437         struct thread           *td;
1438         wb_ext                  *we;
1439         int                     satisfied;
1440
1441         /*
1442          * Once an object has been signalled, we walk its list of
1443          * wait blocks. If a wait block can be awakened, then satisfy
1444          * waits as necessary and wake the thread.
1445          *
1446          * The rules work like this:
1447          *
1448          * If a wait block is marked as WAITTYPE_ANY, then
1449          * we can satisfy the wait conditions on the current
1450          * object and wake the thread right away. Satisfying
1451          * the wait also has the effect of breaking us out
1452          * of the search loop.
1453          *
1454          * If the object is marked as WAITTYLE_ALL, then the
1455          * wait block will be part of a circularly linked
1456          * list of wait blocks belonging to a waiting thread
1457          * that's sleeping in KeWaitForMultipleObjects(). In
1458          * order to wake the thread, all the objects in the
1459          * wait list must be in the signalled state. If they
1460          * are, we then satisfy all of them and wake the
1461          * thread.
1462          *
1463          */
1464
1465         e = obj->dh_waitlisthead.nle_flink;
1466
1467         while (e != &obj->dh_waitlisthead && obj->dh_sigstate > 0) {
1468                 w = CONTAINING_RECORD(e, wait_block, wb_waitlist);
1469                 we = w->wb_ext;
1470                 td = we->we_td;
1471                 satisfied = FALSE;
1472                 if (w->wb_waittype == WAITTYPE_ANY) {
1473                         /*
1474                          * Thread can be awakened if
1475                          * any wait is satisfied.
1476                          */
1477                         ntoskrnl_satisfy_wait(obj, td);
1478                         satisfied = TRUE;
1479                         w->wb_awakened = TRUE;
1480                 } else {
1481                         /*
1482                          * Thread can only be woken up
1483                          * if all waits are satisfied.
1484                          * If the thread is waiting on multiple
1485                          * objects, they should all be linked
1486                          * through the wb_next pointers in the
1487                          * wait blocks.
1488                          */
1489                         satisfied = TRUE;
1490                         next = w->wb_next;
1491                         while (next != w) {
1492                                 if (ntoskrnl_is_signalled(obj, td) == FALSE) {
1493                                         satisfied = FALSE;
1494                                         break;
1495                                 }
1496                                 next = next->wb_next;
1497                         }
1498                         ntoskrnl_satisfy_multiple_waits(w);
1499                 }
1500
1501                 if (satisfied == TRUE)
1502                         cv_broadcastpri(&we->we_cv,
1503                             (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
1504                             w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
1505
1506                 e = e->nle_flink;
1507         }
1508 }
1509
1510 /*
1511  * Return the number of 100 nanosecond intervals since
1512  * January 1, 1601. (?!?!)
1513  */
1514 void
1515 ntoskrnl_time(uint64_t *tval)
1516 {
1517         struct timespec         ts;
1518
1519         nanotime(&ts);
1520         *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
1521             11644473600 * 10000000; /* 100ns ticks from 1601 to 1970 */
1522 }
1523
1524 static void
1525 KeQuerySystemTime(uint64_t *current_time)
1526 {
1527         ntoskrnl_time(current_time);
1528 }
1529
1530 static uint32_t
1531 KeTickCount(void)
1532 {
1533         struct timeval tv;
1534         getmicrouptime(&tv);
1535         return tvtohz_high(&tv);
1536 }
1537
1538
1539 /*
1540  * KeWaitForSingleObject() is a tricky beast, because it can be used
1541  * with several different object types: semaphores, timers, events,
1542  * mutexes and threads. Semaphores don't appear very often, but the
1543  * other object types are quite common. KeWaitForSingleObject() is
1544  * what's normally used to acquire a mutex, and it can be used to
1545  * wait for a thread termination.
1546  *
1547  * The Windows NDIS API is implemented in terms of Windows kernel
1548  * primitives, and some of the object manipulation is duplicated in
1549  * NDIS. For example, NDIS has timers and events, which are actually
1550  * Windows kevents and ktimers. Now, you're supposed to only use the
1551  * NDIS variants of these objects within the confines of the NDIS API,
1552  * but there are some naughty developers out there who will use
1553  * KeWaitForSingleObject() on NDIS timer and event objects, so we
1554  * have to support that as well. Conseqently, our NDIS timer and event
1555  * code has to be closely tied into our ntoskrnl timer and event code,
1556  * just as it is in Windows.
1557  *
1558  * KeWaitForSingleObject() may do different things for different kinds
1559  * of objects:
1560  *
1561  * - For events, we check if the event has been signalled. If the
1562  *   event is already in the signalled state, we just return immediately,
1563  *   otherwise we wait for it to be set to the signalled state by someone
1564  *   else calling KeSetEvent(). Events can be either synchronization or
1565  *   notification events.
1566  *
1567  * - For timers, if the timer has already fired and the timer is in
1568  *   the signalled state, we just return, otherwise we wait on the
1569  *   timer. Unlike an event, timers get signalled automatically when
1570  *   they expire rather than someone having to trip them manually.
1571  *   Timers initialized with KeInitializeTimer() are always notification
1572  *   events: KeInitializeTimerEx() lets you initialize a timer as
1573  *   either a notification or synchronization event.
1574  *
1575  * - For mutexes, we try to acquire the mutex and if we can't, we wait
1576  *   on the mutex until it's available and then grab it. When a mutex is
1577  *   released, it enters the signalled state, which wakes up one of the
1578  *   threads waiting to acquire it. Mutexes are always synchronization
1579  *   events.
1580  *
1581  * - For threads, the only thing we do is wait until the thread object
1582  *   enters a signalled state, which occurs when the thread terminates.
1583  *   Threads are always notification events.
1584  *
1585  * A notification event wakes up all threads waiting on an object. A
1586  * synchronization event wakes up just one. Also, a synchronization event
1587  * is auto-clearing, which means we automatically set the event back to
1588  * the non-signalled state once the wakeup is done.
1589  */
1590
1591 uint32_t
1592 KeWaitForSingleObject(void *arg, uint32_t reason, uint32_t mode,
1593     uint8_t alertable, int64_t *duetime)
1594 {
1595         wait_block              w;
1596         struct thread           *td = curthread;
1597         struct timeval          tv;
1598         int                     error = 0;
1599         uint64_t                curtime;
1600         wb_ext                  we;
1601         nt_dispatch_header      *obj;
1602
1603         obj = arg;
1604
1605         if (obj == NULL)
1606                 return (STATUS_INVALID_PARAMETER);
1607
1608         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
1609
1610         cv_init(&we.we_cv, "KeWFS");
1611         we.we_td = td;
1612
1613         /*
1614          * Check to see if this object is already signalled,
1615          * and just return without waiting if it is.
1616          */
1617         if (ntoskrnl_is_signalled(obj, td) == TRUE) {
1618                 /* Sanity check the signal state value. */
1619                 if (obj->dh_sigstate != INT32_MIN) {
1620                         ntoskrnl_satisfy_wait(obj, curthread);
1621                         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1622                         return (STATUS_SUCCESS);
1623                 } else {
1624                         /*
1625                          * There's a limit to how many times we can
1626                          * recursively acquire a mutant. If we hit
1627                          * the limit, something is very wrong.
1628                          */
1629                         if (obj->dh_type == DISP_TYPE_MUTANT) {
1630                                 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1631                                 panic("mutant limit exceeded");
1632                         }
1633                 }
1634         }
1635
1636         bzero((char *)&w, sizeof(wait_block));
1637         w.wb_object = obj;
1638         w.wb_ext = &we;
1639         w.wb_waittype = WAITTYPE_ANY;
1640         w.wb_next = &w;
1641         w.wb_waitkey = 0;
1642         w.wb_awakened = FALSE;
1643         w.wb_oldpri = td->td_pri;
1644
1645         InsertTailList((&obj->dh_waitlisthead), (&w.wb_waitlist));
1646
1647         /*
1648          * The timeout value is specified in 100 nanosecond units
1649          * and can be a positive or negative number. If it's positive,
1650          * then the duetime is absolute, and we need to convert it
1651          * to an absolute offset relative to now in order to use it.
1652          * If it's negative, then the duetime is relative and we
1653          * just have to convert the units.
1654          */
1655
1656         if (duetime != NULL) {
1657                 if (*duetime < 0) {
1658                         tv.tv_sec = - (*duetime) / 10000000;
1659                         tv.tv_usec = (- (*duetime) / 10) -
1660                             (tv.tv_sec * 1000000);
1661                 } else {
1662                         ntoskrnl_time(&curtime);
1663                         if (*duetime < curtime)
1664                                 tv.tv_sec = tv.tv_usec = 0;
1665                         else {
1666                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1667                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
1668                                     (tv.tv_sec * 1000000);
1669                         }
1670                 }
1671         }
1672
1673         if (duetime == NULL)
1674                 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1675         else
1676                 error = cv_timedwait(&we.we_cv,
1677                     &ntoskrnl_dispatchlock, tvtohz_high(&tv));
1678
1679         RemoveEntryList(&w.wb_waitlist);
1680
1681         cv_destroy(&we.we_cv);
1682
1683         /* We timed out. Leave the object alone and return status. */
1684
1685         if (error == EWOULDBLOCK) {
1686                 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1687                 return (STATUS_TIMEOUT);
1688         }
1689
1690         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1691
1692         return (STATUS_SUCCESS);
1693 /*
1694         return (KeWaitForMultipleObjects(1, &obj, WAITTYPE_ALL, reason,
1695             mode, alertable, duetime, &w));
1696 */
1697 }
1698
1699 static uint32_t
1700 KeWaitForMultipleObjects(uint32_t cnt, nt_dispatch_header *obj[], uint32_t wtype,
1701         uint32_t reason, uint32_t mode, uint8_t alertable, int64_t *duetime,
1702         wait_block *wb_array)
1703 {
1704         struct thread           *td = curthread;
1705         wait_block              *whead, *w;
1706         wait_block              _wb_array[MAX_WAIT_OBJECTS];
1707         nt_dispatch_header      *cur;
1708         struct timeval          tv;
1709         int                     i, wcnt = 0, error = 0;
1710         uint64_t                curtime;
1711         struct timespec         t1, t2;
1712         uint32_t                status = STATUS_SUCCESS;
1713         wb_ext                  we;
1714
1715         if (cnt > MAX_WAIT_OBJECTS)
1716                 return (STATUS_INVALID_PARAMETER);
1717         if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
1718                 return (STATUS_INVALID_PARAMETER);
1719
1720         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
1721
1722         cv_init(&we.we_cv, "KeWFM");
1723         we.we_td = td;
1724
1725         if (wb_array == NULL)
1726                 whead = _wb_array;
1727         else
1728                 whead = wb_array;
1729
1730         bzero((char *)whead, sizeof(wait_block) * cnt);
1731
1732         /* First pass: see if we can satisfy any waits immediately. */
1733
1734         wcnt = 0;
1735         w = whead;
1736
1737         for (i = 0; i < cnt; i++) {
1738                 InsertTailList((&obj[i]->dh_waitlisthead),
1739                     (&w->wb_waitlist));
1740                 w->wb_ext = &we;
1741                 w->wb_object = obj[i];
1742                 w->wb_waittype = wtype;
1743                 w->wb_waitkey = i;
1744                 w->wb_awakened = FALSE;
1745                 w->wb_oldpri = td->td_pri;
1746                 w->wb_next = w + 1;
1747                 w++;
1748                 wcnt++;
1749                 if (ntoskrnl_is_signalled(obj[i], td)) {
1750                         /*
1751                          * There's a limit to how many times
1752                          * we can recursively acquire a mutant.
1753                          * If we hit the limit, something
1754                          * is very wrong.
1755                          */
1756                         if (obj[i]->dh_sigstate == INT32_MIN &&
1757                             obj[i]->dh_type == DISP_TYPE_MUTANT) {
1758                                 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1759                                 panic("mutant limit exceeded");
1760                         }
1761
1762                         /*
1763                          * If this is a WAITTYPE_ANY wait, then
1764                          * satisfy the waited object and exit
1765                          * right now.
1766                          */
1767
1768                         if (wtype == WAITTYPE_ANY) {
1769                                 ntoskrnl_satisfy_wait(obj[i], td);
1770                                 status = STATUS_WAIT_0 + i;
1771                                 goto wait_done;
1772                         } else {
1773                                 w--;
1774                                 wcnt--;
1775                                 w->wb_object = NULL;
1776                                 RemoveEntryList(&w->wb_waitlist);
1777                         }
1778                 }
1779         }
1780
1781         /*
1782          * If this is a WAITTYPE_ALL wait and all objects are
1783          * already signalled, satisfy the waits and exit now.
1784          */
1785
1786         if (wtype == WAITTYPE_ALL && wcnt == 0) {
1787                 for (i = 0; i < cnt; i++)
1788                         ntoskrnl_satisfy_wait(obj[i], td);
1789                 status = STATUS_SUCCESS;
1790                 goto wait_done;
1791         }
1792
1793         /*
1794          * Create a circular waitblock list. The waitcount
1795          * must always be non-zero when we get here.
1796          */
1797
1798         (w - 1)->wb_next = whead;
1799
1800         /* Wait on any objects that aren't yet signalled. */
1801
1802         /* Calculate timeout, if any. */
1803
1804         if (duetime != NULL) {
1805                 if (*duetime < 0) {
1806                         tv.tv_sec = - (*duetime) / 10000000;
1807                         tv.tv_usec = (- (*duetime) / 10) -
1808                             (tv.tv_sec * 1000000);
1809                 } else {
1810                         ntoskrnl_time(&curtime);
1811                         if (*duetime < curtime)
1812                                 tv.tv_sec = tv.tv_usec = 0;
1813                         else {
1814                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1815                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
1816                                     (tv.tv_sec * 1000000);
1817                         }
1818                 }
1819         }
1820
1821         while (wcnt) {
1822                 nanotime(&t1);
1823
1824                 if (duetime == NULL)
1825                         cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1826                 else
1827                         error = cv_timedwait(&we.we_cv,
1828                             &ntoskrnl_dispatchlock, tvtohz_high(&tv));
1829
1830                 /* Wait with timeout expired. */
1831
1832                 if (error) {
1833                         status = STATUS_TIMEOUT;
1834                         goto wait_done;
1835                 }
1836
1837                 nanotime(&t2);
1838
1839                 /* See what's been signalled. */
1840
1841                 w = whead;
1842                 do {
1843                         cur = w->wb_object;
1844                         if (ntoskrnl_is_signalled(cur, td) == TRUE ||
1845                             w->wb_awakened == TRUE) {
1846                                 /* Sanity check the signal state value. */
1847                                 if (cur->dh_sigstate == INT32_MIN &&
1848                                     cur->dh_type == DISP_TYPE_MUTANT) {
1849                                         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1850                                         panic("mutant limit exceeded");
1851                                 }
1852                                 wcnt--;
1853                                 if (wtype == WAITTYPE_ANY) {
1854                                         status = w->wb_waitkey &
1855                                             STATUS_WAIT_0;
1856                                         goto wait_done;
1857                                 }
1858                         }
1859                         w = w->wb_next;
1860                 } while (w != whead);
1861
1862                 /*
1863                  * If all objects have been signalled, or if this
1864                  * is a WAITTYPE_ANY wait and we were woke up by
1865                  * someone, we can bail.
1866                  */
1867
1868                 if (wcnt == 0) {
1869                         status = STATUS_SUCCESS;
1870                         goto wait_done;
1871                 }
1872
1873                 /*
1874                  * If this is WAITTYPE_ALL wait, and there's still
1875                  * objects that haven't been signalled, deduct the
1876                  * time that's elapsed so far from the timeout and
1877                  * wait again (or continue waiting indefinitely if
1878                  * there's no timeout).
1879                  */
1880
1881                 if (duetime != NULL) {
1882                         tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
1883                         tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
1884                 }
1885         }
1886
1887
1888 wait_done:
1889
1890         cv_destroy(&we.we_cv);
1891
1892         for (i = 0; i < cnt; i++) {
1893                 if (whead[i].wb_object != NULL)
1894                         RemoveEntryList(&whead[i].wb_waitlist);
1895
1896         }
1897         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1898
1899         return (status);
1900 }
1901
1902 static void
1903 WRITE_REGISTER_USHORT(uint16_t *reg, uint16_t val)
1904 {
1905         bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1906 }
1907
1908 static uint16_t
1909 READ_REGISTER_USHORT(uint16_t *reg)
1910 {
1911         return (bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1912 }
1913
1914 static void
1915 WRITE_REGISTER_ULONG(uint32_t *reg, uint32_t val)
1916 {
1917         bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1918 }
1919
1920 static uint32_t
1921 READ_REGISTER_ULONG(uint32_t *reg)
1922 {
1923         return (bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1924 }
1925
1926 static uint8_t
1927 READ_REGISTER_UCHAR(uint8_t *reg)
1928 {
1929         return (bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1930 }
1931
1932 static void
1933 WRITE_REGISTER_UCHAR(uint8_t *reg, uint8_t val)
1934 {
1935         bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1936 }
1937
1938 static int64_t
1939 _allmul(int64_t a, int64_t b)
1940 {
1941         return (a * b);
1942 }
1943
1944 static int64_t
1945 _alldiv(int64_t a, int64_t b)
1946 {
1947         return (a / b);
1948 }
1949
1950 static int64_t
1951 _allrem(int64_t a, int64_t b)
1952 {
1953         return (a % b);
1954 }
1955
1956 static uint64_t
1957 _aullmul(uint64_t a, uint64_t b)
1958 {
1959         return (a * b);
1960 }
1961
1962 static uint64_t
1963 _aulldiv(uint64_t a, uint64_t b)
1964 {
1965         return (a / b);
1966 }
1967
1968 static uint64_t
1969 _aullrem(uint64_t a, uint64_t b)
1970 {
1971         return (a % b);
1972 }
1973
1974 static int64_t
1975 _allshl(int64_t a, uint8_t b)
1976 {
1977         return (a << b);
1978 }
1979
1980 static uint64_t
1981 _aullshl(uint64_t a, uint8_t b)
1982 {
1983         return (a << b);
1984 }
1985
1986 static int64_t
1987 _allshr(int64_t a, uint8_t b)
1988 {
1989         return (a >> b);
1990 }
1991
1992 static uint64_t
1993 _aullshr(uint64_t a, uint8_t b)
1994 {
1995         return (a >> b);
1996 }
1997
1998 static slist_entry *
1999 ntoskrnl_pushsl(slist_header *head, slist_entry *entry)
2000 {
2001         slist_entry             *oldhead;
2002
2003         oldhead = head->slh_list.slh_next;
2004         entry->sl_next = head->slh_list.slh_next;
2005         head->slh_list.slh_next = entry;
2006         head->slh_list.slh_depth++;
2007         head->slh_list.slh_seq++;
2008
2009         return (oldhead);
2010 }
2011
2012 static void
2013 InitializeSListHead(slist_header *head)
2014 {
2015         memset(head, 0, sizeof(*head));
2016 }
2017
2018 static slist_entry *
2019 ntoskrnl_popsl(slist_header *head)
2020 {
2021         slist_entry             *first;
2022
2023         first = head->slh_list.slh_next;
2024         if (first != NULL) {
2025                 head->slh_list.slh_next = first->sl_next;
2026                 head->slh_list.slh_depth--;
2027                 head->slh_list.slh_seq++;
2028         }
2029
2030         return (first);
2031 }
2032
2033 /*
2034  * We need this to make lookaside lists work for amd64.
2035  * We pass a pointer to ExAllocatePoolWithTag() the lookaside
2036  * list structure. For amd64 to work right, this has to be a
2037  * pointer to the wrapped version of the routine, not the
2038  * original. Letting the Windows driver invoke the original
2039  * function directly will result in a convention calling
2040  * mismatch and a pretty crash. On x86, this effectively
2041  * becomes a no-op since ipt_func and ipt_wrap are the same.
2042  */
2043
2044 static funcptr
2045 ntoskrnl_findwrap(funcptr func)
2046 {
2047         image_patch_table       *patch;
2048
2049         patch = ntoskrnl_functbl;
2050         while (patch->ipt_func != NULL) {
2051                 if ((funcptr)patch->ipt_func == func)
2052                         return ((funcptr)patch->ipt_wrap);
2053                 patch++;
2054         }
2055
2056         return (NULL);
2057 }
2058
2059 static void
2060 ExInitializePagedLookasideList(paged_lookaside_list *lookaside,
2061         lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc,
2062         uint32_t flags, size_t size, uint32_t tag, uint16_t depth)
2063 {
2064         bzero((char *)lookaside, sizeof(paged_lookaside_list));
2065
2066         if (size < sizeof(slist_entry))
2067                 lookaside->nll_l.gl_size = sizeof(slist_entry);
2068         else
2069                 lookaside->nll_l.gl_size = size;
2070         lookaside->nll_l.gl_tag = tag;
2071         if (allocfunc == NULL)
2072                 lookaside->nll_l.gl_allocfunc =
2073                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2074         else
2075                 lookaside->nll_l.gl_allocfunc = allocfunc;
2076
2077         if (freefunc == NULL)
2078                 lookaside->nll_l.gl_freefunc =
2079                     ntoskrnl_findwrap((funcptr)ExFreePool);
2080         else
2081                 lookaside->nll_l.gl_freefunc = freefunc;
2082
2083         lookaside->nll_l.gl_type = NonPagedPool;
2084         lookaside->nll_l.gl_depth = depth;
2085         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2086 }
2087
2088 static void
2089 ExDeletePagedLookasideList(paged_lookaside_list *lookaside)
2090 {
2091         void                    *buf;
2092         void            (*freefunc)(void *);
2093
2094         freefunc = lookaside->nll_l.gl_freefunc;
2095         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2096                 MSCALL1(freefunc, buf);
2097 }
2098
2099 static void
2100 ExInitializeNPagedLookasideList(npaged_lookaside_list *lookaside,
2101         lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc,
2102         uint32_t flags, size_t size, uint32_t tag, uint16_t depth)
2103 {
2104         bzero((char *)lookaside, sizeof(npaged_lookaside_list));
2105
2106         if (size < sizeof(slist_entry))
2107                 lookaside->nll_l.gl_size = sizeof(slist_entry);
2108         else
2109                 lookaside->nll_l.gl_size = size;
2110         lookaside->nll_l.gl_tag = tag;
2111         if (allocfunc == NULL)
2112                 lookaside->nll_l.gl_allocfunc =
2113                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2114         else
2115                 lookaside->nll_l.gl_allocfunc = allocfunc;
2116
2117         if (freefunc == NULL)
2118                 lookaside->nll_l.gl_freefunc =
2119                     ntoskrnl_findwrap((funcptr)ExFreePool);
2120         else
2121                 lookaside->nll_l.gl_freefunc = freefunc;
2122
2123         lookaside->nll_l.gl_type = NonPagedPool;
2124         lookaside->nll_l.gl_depth = depth;
2125         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2126 }
2127
2128 static void
2129 ExDeleteNPagedLookasideList(npaged_lookaside_list *lookaside)
2130 {
2131         void                    *buf;
2132         void            (*freefunc)(void *);
2133
2134         freefunc = lookaside->nll_l.gl_freefunc;
2135         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2136                 MSCALL1(freefunc, buf);
2137 }
2138
2139 slist_entry *
2140 InterlockedPushEntrySList(slist_header *head, slist_entry *entry)
2141 {
2142         slist_entry             *oldhead;
2143
2144         mtx_spinlock(&ntoskrnl_interlock);
2145         oldhead = ntoskrnl_pushsl(head, entry);
2146         mtx_spinunlock(&ntoskrnl_interlock);
2147
2148         return (oldhead);
2149 }
2150
2151 slist_entry *
2152 InterlockedPopEntrySList(slist_header *head)
2153 {
2154         slist_entry             *first;
2155
2156         mtx_spinlock(&ntoskrnl_interlock);
2157         first = ntoskrnl_popsl(head);
2158         mtx_spinunlock(&ntoskrnl_interlock);
2159
2160         return (first);
2161 }
2162
2163 static slist_entry *
2164 ExInterlockedPushEntrySList(slist_header *head, slist_entry *entry,
2165     kspin_lock *lock)
2166 {
2167         return (InterlockedPushEntrySList(head, entry));
2168 }
2169
2170 static slist_entry *
2171 ExInterlockedPopEntrySList(slist_header *head, kspin_lock *lock)
2172 {
2173         return (InterlockedPopEntrySList(head));
2174 }
2175
2176 uint16_t
2177 ExQueryDepthSList(slist_header *head)
2178 {
2179         uint16_t                depth;
2180
2181         mtx_spinlock(&ntoskrnl_interlock);
2182         depth = head->slh_list.slh_depth;
2183         mtx_spinunlock(&ntoskrnl_interlock);
2184
2185         return (depth);
2186 }
2187
2188 void
2189 KeInitializeSpinLock(kspin_lock *lock)
2190 {
2191         *lock = 0;
2192 }
2193
2194 void
2195 KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
2196 {
2197         while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
2198                 /* sit and spin */;
2199 }
2200
2201 void
2202 KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
2203 {
2204         atomic_store_rel_int((volatile u_int *)lock, 0);
2205 }
2206
2207 uintptr_t
2208 InterlockedExchange(volatile uint32_t *dst, uintptr_t val)
2209 {
2210         uintptr_t               r;
2211
2212         mtx_spinlock(&ntoskrnl_interlock);
2213         r = *dst;
2214         *dst = val;
2215         mtx_spinunlock(&ntoskrnl_interlock);
2216
2217         return (r);
2218 }
2219
2220 static uint32_t
2221 InterlockedIncrement(volatile uint32_t *addend)
2222 {
2223         atomic_add_long((volatile u_long *)addend, 1);
2224         return (*addend);
2225 }
2226
2227 static uint32_t
2228 InterlockedDecrement(volatile uint32_t *addend)
2229 {
2230         atomic_subtract_long((volatile u_long *)addend, 1);
2231         return (*addend);
2232 }
2233
2234 static void
2235 ExInterlockedAddLargeStatistic(uint64_t *addend, uint32_t inc)
2236 {
2237         mtx_spinlock(&ntoskrnl_interlock);
2238         *addend += inc;
2239         mtx_spinunlock(&ntoskrnl_interlock);
2240 };
2241
2242 mdl *
2243 IoAllocateMdl(void *vaddr, uint32_t len, uint8_t secondarybuf,
2244         uint8_t chargequota, irp *iopkt)
2245 {
2246         mdl                     *m;
2247         int                     zone = 0;
2248
2249         if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
2250                 m = ExAllocatePoolWithTag(NonPagedPool,
2251                     MmSizeOfMdl(vaddr, len), 0);
2252         else {
2253                 m = objcache_get(mdl_cache, M_NOWAIT);
2254                 bzero(m, sizeof(mdl));
2255                 zone++;
2256         }
2257
2258         if (m == NULL)
2259                 return (NULL);
2260
2261         MmInitializeMdl(m, vaddr, len);
2262
2263         /*
2264          * MmInitializMdl() clears the flags field, so we
2265          * have to set this here. If the MDL came from the
2266          * MDL UMA zone, tag it so we can release it to
2267          * the right place later.
2268          */
2269         if (zone)
2270                 m->mdl_flags = MDL_ZONE_ALLOCED;
2271
2272         if (iopkt != NULL) {
2273                 if (secondarybuf == TRUE) {
2274                         mdl                     *last;
2275                         last = iopkt->irp_mdl;
2276                         while (last->mdl_next != NULL)
2277                                 last = last->mdl_next;
2278                         last->mdl_next = m;
2279                 } else {
2280                         if (iopkt->irp_mdl != NULL)
2281                                 panic("leaking an MDL in IoAllocateMdl()");
2282                         iopkt->irp_mdl = m;
2283                 }
2284         }
2285
2286         return (m);
2287 }
2288
2289 void
2290 IoFreeMdl(mdl *m)
2291 {
2292         if (m == NULL)
2293                 return;
2294
2295         if (m->mdl_flags & MDL_ZONE_ALLOCED)
2296                 objcache_put(mdl_cache, m);
2297         else
2298                 ExFreePool(m);
2299 }
2300
2301 static void *
2302 MmAllocateContiguousMemory(uint32_t size, uint64_t highest)
2303 {
2304         void *addr;
2305         size_t pagelength = roundup(size, PAGE_SIZE);
2306
2307         addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2308
2309         return (addr);
2310 }
2311
2312 #if 0 /* XXX swildner */
2313 static void *
2314 MmAllocateContiguousMemorySpecifyCache(uint32_t size, uint64_t lowest,
2315     uint64_t highest, uint64_t boundary, enum nt_caching_type cachetype)
2316 {
2317         vm_memattr_t            memattr;
2318         void                    *ret;
2319
2320         switch (cachetype) {
2321         case MmNonCached:
2322                 memattr = VM_MEMATTR_UNCACHEABLE;
2323                 break;
2324         case MmWriteCombined:
2325                 memattr = VM_MEMATTR_WRITE_COMBINING;
2326                 break;
2327         case MmNonCachedUnordered:
2328                 memattr = VM_MEMATTR_UNCACHEABLE;
2329                 break;
2330         case MmCached:
2331         case MmHardwareCoherentCached:
2332         case MmUSWCCached:
2333         default:
2334                 memattr = VM_MEMATTR_DEFAULT;
2335                 break;
2336         }
2337
2338         ret = (void *)kmem_alloc_contig(kernel_map, size, M_ZERO | M_NOWAIT,
2339             lowest, highest, PAGE_SIZE, boundary, memattr);
2340         if (ret != NULL)
2341                 malloc_type_allocated(M_DEVBUF, round_page(size));
2342         return (ret);
2343 }
2344 #else
2345 static void *
2346 MmAllocateContiguousMemorySpecifyCache(uint32_t size, uint64_t lowest,
2347     uint64_t highest, uint64_t boundary, enum nt_caching_type cachetype)
2348 {
2349 #if 0
2350         void *addr;
2351         size_t pagelength = roundup(size, PAGE_SIZE);
2352
2353         addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2354
2355         return(addr);
2356 #else
2357         panic("%s", __func__);
2358 #endif
2359 }
2360 #endif
2361
2362 static void
2363 MmFreeContiguousMemory(void *base)
2364 {
2365         ExFreePool(base);
2366 }
2367
2368 static void
2369 MmFreeContiguousMemorySpecifyCache(void *base, uint32_t size,
2370     enum nt_caching_type cachetype)
2371 {
2372         contigfree(base, size, M_DEVBUF);
2373 }
2374
2375 static uint32_t
2376 MmSizeOfMdl(void *vaddr, size_t len)
2377 {
2378         uint32_t                l;
2379
2380         l = sizeof(struct mdl) +
2381             (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
2382
2383         return (l);
2384 }
2385
2386 /*
2387  * The Microsoft documentation says this routine fills in the
2388  * page array of an MDL with the _physical_ page addresses that
2389  * comprise the buffer, but we don't really want to do that here.
2390  * Instead, we just fill in the page array with the kernel virtual
2391  * addresses of the buffers.
2392  */
2393 void
2394 MmBuildMdlForNonPagedPool(mdl *m)
2395 {
2396         vm_offset_t             *mdl_pages;
2397         int                     pagecnt, i;
2398
2399         pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
2400
2401         if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
2402                 panic("not enough pages in MDL to describe buffer");
2403
2404         mdl_pages = MmGetMdlPfnArray(m);
2405
2406         for (i = 0; i < pagecnt; i++)
2407                 *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
2408
2409         m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
2410         m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
2411 }
2412
2413 static void *
2414 MmMapLockedPages(mdl *buf, uint8_t accessmode)
2415 {
2416         buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
2417         return (MmGetMdlVirtualAddress(buf));
2418 }
2419
2420 static void *
2421 MmMapLockedPagesSpecifyCache(mdl *buf, uint8_t accessmode, uint32_t cachetype,
2422         void *vaddr, uint32_t bugcheck, uint32_t prio)
2423 {
2424         return (MmMapLockedPages(buf, accessmode));
2425 }
2426
2427 static void
2428 MmUnmapLockedPages(void *vaddr, mdl *buf)
2429 {
2430         buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
2431 }
2432
2433 /*
2434  * This function has a problem in that it will break if you
2435  * compile this module without PAE and try to use it on a PAE
2436  * kernel. Unfortunately, there's no way around this at the
2437  * moment. It's slightly less broken that using pmap_kextract().
2438  * You'd think the virtual memory subsystem would help us out
2439  * here, but it doesn't.
2440  */
2441
2442 static uint64_t
2443 MmGetPhysicalAddress(void *base)
2444 {
2445         return (pmap_extract(kernel_map.pmap, (vm_offset_t)base));
2446 }
2447
2448 void *
2449 MmGetSystemRoutineAddress(unicode_string *ustr)
2450 {
2451         ansi_string             astr;
2452
2453         if (RtlUnicodeStringToAnsiString(&astr, ustr, TRUE))
2454                 return (NULL);
2455         return (ndis_get_routine_address(ntoskrnl_functbl, astr.as_buf));
2456 }
2457
2458 uint8_t
2459 MmIsAddressValid(void *vaddr)
2460 {
2461         if (pmap_extract(kernel_map.pmap, (vm_offset_t)vaddr))
2462                 return (TRUE);
2463
2464         return (FALSE);
2465 }
2466
2467 void *
2468 MmMapIoSpace(uint64_t paddr, uint32_t len, uint32_t cachetype)
2469 {
2470         devclass_t              nexus_class;
2471         device_t                *nexus_devs, devp;
2472         int                     nexus_count = 0;
2473         device_t                matching_dev = NULL;
2474         struct resource         *res;
2475         int                     i;
2476         vm_offset_t             v;
2477
2478         /* There will always be at least one nexus. */
2479
2480         nexus_class = devclass_find("nexus");
2481         devclass_get_devices(nexus_class, &nexus_devs, &nexus_count);
2482
2483         for (i = 0; i < nexus_count; i++) {
2484                 devp = nexus_devs[i];
2485                 matching_dev = ntoskrnl_finddev(devp, paddr, &res);
2486                 if (matching_dev)
2487                         break;
2488         }
2489
2490         kfree(nexus_devs, M_TEMP);
2491
2492         if (matching_dev == NULL)
2493                 return (NULL);
2494
2495         v = (vm_offset_t)rman_get_virtual(res);
2496         if (paddr > rman_get_start(res))
2497                 v += paddr - rman_get_start(res);
2498
2499         return ((void *)v);
2500 }
2501
2502 void
2503 MmUnmapIoSpace(void *vaddr, size_t len)
2504 {
2505 }
2506
2507
2508 static device_t
2509 ntoskrnl_finddev(device_t dev, uint64_t paddr, struct resource **res)
2510 {
2511         device_t                *children = NULL;
2512         device_t                matching_dev;
2513         int                     childcnt;
2514         struct resource         *r;
2515         struct resource_list    *rl;
2516         struct resource_list_entry      *rle;
2517         uint32_t                flags;
2518         int                     i;
2519
2520         /* We only want devices that have been successfully probed. */
2521
2522         if (device_is_alive(dev) == FALSE)
2523                 return (NULL);
2524
2525         rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
2526         if (rl != NULL) {
2527                 SLIST_FOREACH(rle, rl, link) {
2528                         r = rle->res;
2529
2530                         if (r == NULL)
2531                                 continue;
2532
2533                         flags = rman_get_flags(r);
2534
2535                         if (rle->type == SYS_RES_MEMORY &&
2536                             paddr >= rman_get_start(r) &&
2537                             paddr <= rman_get_end(r)) {
2538                                 if (!(flags & RF_ACTIVE))
2539                                         bus_activate_resource(dev,
2540                                             SYS_RES_MEMORY, 0, r);
2541                                 *res = r;
2542                                 return (dev);
2543                         }
2544                 }
2545         }
2546
2547         /*
2548          * If this device has children, do another
2549          * level of recursion to inspect them.
2550          */
2551
2552         device_get_children(dev, &children, &childcnt);
2553
2554         for (i = 0; i < childcnt; i++) {
2555                 matching_dev = ntoskrnl_finddev(children[i], paddr, res);
2556                 if (matching_dev != NULL) {
2557                         kfree(children, M_TEMP);
2558                         return (matching_dev);
2559                 }
2560         }
2561
2562
2563         /* Won't somebody please think of the children! */
2564
2565         if (children != NULL)
2566                 kfree(children, M_TEMP);
2567
2568         return (NULL);
2569 }
2570
2571 /*
2572  * Workitems are unlike DPCs, in that they run in a user-mode thread
2573  * context rather than at DISPATCH_LEVEL in kernel context. In our
2574  * case we run them in kernel context anyway.
2575  */
2576 static void
2577 ntoskrnl_workitem_thread(void *arg)
2578 {
2579         kdpc_queue              *kq;
2580         list_entry              *l;
2581         io_workitem             *iw;
2582         uint8_t                 irql;
2583
2584         kq = arg;
2585
2586         InitializeListHead(&kq->kq_disp);
2587         kq->kq_td = curthread;
2588         kq->kq_exit = 0;
2589         KeInitializeSpinLock(&kq->kq_lock);
2590         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
2591
2592         while (1) {
2593                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
2594
2595                 KeAcquireSpinLock(&kq->kq_lock, &irql);
2596
2597                 if (kq->kq_exit) {
2598                         kq->kq_exit = 0;
2599                         KeReleaseSpinLock(&kq->kq_lock, irql);
2600                         break;
2601                 }
2602
2603                 while (!IsListEmpty(&kq->kq_disp)) {
2604                         l = RemoveHeadList(&kq->kq_disp);
2605                         iw = CONTAINING_RECORD(l,
2606                             io_workitem, iw_listentry);
2607                         InitializeListHead((&iw->iw_listentry));
2608                         if (iw->iw_func == NULL)
2609                                 continue;
2610                         KeReleaseSpinLock(&kq->kq_lock, irql);
2611                         MSCALL2(iw->iw_func, iw->iw_dobj, iw->iw_ctx);
2612                         KeAcquireSpinLock(&kq->kq_lock, &irql);
2613                 }
2614
2615                 KeReleaseSpinLock(&kq->kq_lock, irql);
2616         }
2617
2618         wakeup(curthread);
2619         kthread_exit();
2620         return; /* notreached */
2621 }
2622
2623 static ndis_status
2624 RtlCharToInteger(const char *src, uint32_t base, uint32_t *val)
2625 {
2626         int negative = 0;
2627         uint32_t res;
2628
2629         if (!src || !val)
2630                 return (STATUS_ACCESS_VIOLATION);
2631         while (*src != '\0' && *src <= ' ')
2632                 src++;
2633         if (*src == '+')
2634                 src++;
2635         else if (*src == '-') {
2636                 src++;
2637                 negative = 1;
2638         }
2639         if (base == 0) {
2640                 base = 10;
2641                 if (*src == '0') {
2642                         src++;
2643                         if (*src == 'b') {
2644                                 base = 2;
2645                                 src++;
2646                         } else if (*src == 'o') {
2647                                 base = 8;
2648                                 src++;
2649                         } else if (*src == 'x') {
2650                                 base = 16;
2651                                 src++;
2652                         }
2653                 }
2654         } else if (!(base == 2 || base == 8 || base == 10 || base == 16))
2655                 return (STATUS_INVALID_PARAMETER);
2656
2657         for (res = 0; *src; src++) {
2658                 int v;
2659                 if (isdigit(*src))
2660                         v = *src - '0';
2661                 else if (isxdigit(*src))
2662                         v = tolower(*src) - 'a' + 10;
2663                 else
2664                         v = base;
2665                 if (v >= base)
2666                         return (STATUS_INVALID_PARAMETER);
2667                 res = res * base + v;
2668         }
2669         *val = negative ? -res : res;
2670         return (STATUS_SUCCESS);
2671 }
2672
2673 static void
2674 ntoskrnl_destroy_workitem_threads(void)
2675 {
2676         kdpc_queue              *kq;
2677         int                     i;
2678
2679         for (i = 0; i < WORKITEM_THREADS; i++) {
2680                 kq = wq_queues + i;
2681                 kq->kq_exit = 1;
2682                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2683                 while (kq->kq_exit)
2684                         tsleep(kq->kq_td, 0, "waitiw", hz/10);
2685         }
2686 }
2687
2688 io_workitem *
2689 IoAllocateWorkItem(device_object *dobj)
2690 {
2691         io_workitem             *iw;
2692
2693         iw = objcache_get(iw_cache, M_NOWAIT);
2694         if (iw == NULL)
2695                 return (NULL);
2696
2697         InitializeListHead(&iw->iw_listentry);
2698         iw->iw_dobj = dobj;
2699
2700         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
2701         iw->iw_idx = wq_idx;
2702         WORKIDX_INC(wq_idx);
2703         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
2704
2705         return (iw);
2706 }
2707
2708 void
2709 IoFreeWorkItem(io_workitem *iw)
2710 {
2711         objcache_put(iw_cache, iw);
2712 }
2713
2714 void
2715 IoQueueWorkItem(io_workitem *iw, io_workitem_func iw_func, uint32_t qtype,
2716     void *ctx)
2717 {
2718         kdpc_queue              *kq;
2719         list_entry              *l;
2720         io_workitem             *cur;
2721         uint8_t                 irql;
2722
2723         kq = wq_queues + iw->iw_idx;
2724
2725         KeAcquireSpinLock(&kq->kq_lock, &irql);
2726
2727         /*
2728          * Traverse the list and make sure this workitem hasn't
2729          * already been inserted. Queuing the same workitem
2730          * twice will hose the list but good.
2731          */
2732
2733         l = kq->kq_disp.nle_flink;
2734         while (l != &kq->kq_disp) {
2735                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2736                 if (cur == iw) {
2737                         /* Already queued -- do nothing. */
2738                         KeReleaseSpinLock(&kq->kq_lock, irql);
2739                         return;
2740                 }
2741                 l = l->nle_flink;
2742         }
2743
2744         iw->iw_func = iw_func;
2745         iw->iw_ctx = ctx;
2746
2747         InsertTailList((&kq->kq_disp), (&iw->iw_listentry));
2748         KeReleaseSpinLock(&kq->kq_lock, irql);
2749
2750         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2751 }
2752
2753 static void
2754 ntoskrnl_workitem(device_object *dobj, void *arg)
2755 {
2756         io_workitem             *iw;
2757         work_queue_item         *w;
2758         work_item_func          f;
2759
2760         iw = arg;
2761         w = (work_queue_item *)dobj;
2762         f = (work_item_func)w->wqi_func;
2763         objcache_put(iw_cache, iw);
2764         MSCALL2(f, w, w->wqi_ctx);
2765 }
2766
2767 /*
2768  * The ExQueueWorkItem() API is deprecated in Windows XP. Microsoft
2769  * warns that it's unsafe and to use IoQueueWorkItem() instead. The
2770  * problem with ExQueueWorkItem() is that it can't guard against
2771  * the condition where a driver submits a job to the work queue and
2772  * is then unloaded before the job is able to run. IoQueueWorkItem()
2773  * acquires a reference to the device's device_object via the
2774  * object manager and retains it until after the job has completed,
2775  * which prevents the driver from being unloaded before the job
2776  * runs. (We don't currently support this behavior, though hopefully
2777  * that will change once the object manager API is fleshed out a bit.)
2778  *
2779  * Having said all that, the ExQueueWorkItem() API remains, because
2780  * there are still other parts of Windows that use it, including
2781  * NDIS itself: NdisScheduleWorkItem() calls ExQueueWorkItem().
2782  * We fake up the ExQueueWorkItem() API on top of our implementation
2783  * of IoQueueWorkItem(). Workitem thread #3 is reserved exclusively
2784  * for ExQueueWorkItem() jobs, and we pass a pointer to the work
2785  * queue item (provided by the caller) in to IoAllocateWorkItem()
2786  * instead of the device_object. We need to save this pointer so
2787  * we can apply a sanity check: as with the DPC queue and other
2788  * workitem queues, we can't allow the same work queue item to
2789  * be queued twice. If it's already pending, we silently return
2790  */
2791
2792 void
2793 ExQueueWorkItem(work_queue_item *w, uint32_t qtype)
2794 {
2795         io_workitem             *iw;
2796         io_workitem_func        iwf;
2797         kdpc_queue              *kq;
2798         list_entry              *l;
2799         io_workitem             *cur;
2800         uint8_t                 irql;
2801
2802
2803         /*
2804          * We need to do a special sanity test to make sure
2805          * the ExQueueWorkItem() API isn't used to queue
2806          * the same workitem twice. Rather than checking the
2807          * io_workitem pointer itself, we test the attached
2808          * device object, which is really a pointer to the
2809          * legacy work queue item structure.
2810          */
2811
2812         kq = wq_queues + WORKITEM_LEGACY_THREAD;
2813         KeAcquireSpinLock(&kq->kq_lock, &irql);
2814         l = kq->kq_disp.nle_flink;
2815         while (l != &kq->kq_disp) {
2816                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2817                 if (cur->iw_dobj == (device_object *)w) {
2818                         /* Already queued -- do nothing. */
2819                         KeReleaseSpinLock(&kq->kq_lock, irql);
2820                         return;
2821                 }
2822                 l = l->nle_flink;
2823         }
2824         KeReleaseSpinLock(&kq->kq_lock, irql);
2825
2826         iw = IoAllocateWorkItem((device_object *)w);
2827         if (iw == NULL)
2828                 return;
2829
2830         iw->iw_idx = WORKITEM_LEGACY_THREAD;
2831         iwf = (io_workitem_func)ntoskrnl_findwrap((funcptr)ntoskrnl_workitem);
2832         IoQueueWorkItem(iw, iwf, qtype, iw);
2833 }
2834
2835 static void
2836 RtlZeroMemory(void *dst, size_t len)
2837 {
2838         bzero(dst, len);
2839 }
2840
2841 static void
2842 RtlSecureZeroMemory(void *dst, size_t len)
2843 {
2844         memset(dst, 0, len);
2845 }
2846
2847 static void
2848 RtlFillMemory(void *dst, size_t len, uint8_t c)
2849 {
2850         memset(dst, c, len);
2851 }
2852
2853 static void
2854 RtlMoveMemory(void *dst, const void *src, size_t len)
2855 {
2856         memmove(dst, src, len);
2857 }
2858
2859 static void
2860 RtlCopyMemory(void *dst, const void *src, size_t len)
2861 {
2862         bcopy(src, dst, len);
2863 }
2864
2865 static size_t
2866 RtlCompareMemory(const void *s1, const void *s2, size_t len)
2867 {
2868         size_t                  i;
2869         uint8_t                 *m1, *m2;
2870
2871         m1 = __DECONST(char *, s1);
2872         m2 = __DECONST(char *, s2);
2873
2874         for (i = 0; i < len && m1[i] == m2[i]; i++);
2875         return (i);
2876 }
2877
2878 void
2879 RtlInitAnsiString(ansi_string *dst, char *src)
2880 {
2881         ansi_string             *a;
2882
2883         a = dst;
2884         if (a == NULL)
2885                 return;
2886         if (src == NULL) {
2887                 a->as_len = a->as_maxlen = 0;
2888                 a->as_buf = NULL;
2889         } else {
2890                 a->as_buf = src;
2891                 a->as_len = a->as_maxlen = strlen(src);
2892         }
2893 }
2894
2895 void
2896 RtlInitUnicodeString(unicode_string *dst, uint16_t *src)
2897 {
2898         unicode_string          *u;
2899         int                     i;
2900
2901         u = dst;
2902         if (u == NULL)
2903                 return;
2904         if (src == NULL) {
2905                 u->us_len = u->us_maxlen = 0;
2906                 u->us_buf = NULL;
2907         } else {
2908                 i = 0;
2909                 while(src[i] != 0)
2910                         i++;
2911                 u->us_buf = src;
2912                 u->us_len = u->us_maxlen = i * 2;
2913         }
2914 }
2915
2916 ndis_status
2917 RtlUnicodeStringToInteger(unicode_string *ustr, uint32_t base, uint32_t *val)
2918 {
2919         uint16_t                *uchr;
2920         int                     len, neg = 0;
2921         char                    abuf[64];
2922         char                    *astr;
2923
2924         uchr = ustr->us_buf;
2925         len = ustr->us_len;
2926         bzero(abuf, sizeof(abuf));
2927
2928         if ((char)((*uchr) & 0xFF) == '-') {
2929                 neg = 1;
2930                 uchr++;
2931                 len -= 2;
2932         } else if ((char)((*uchr) & 0xFF) == '+') {
2933                 neg = 0;
2934                 uchr++;
2935                 len -= 2;
2936         }
2937
2938         if (base == 0) {
2939                 if ((char)((*uchr) & 0xFF) == 'b') {
2940                         base = 2;
2941                         uchr++;
2942                         len -= 2;
2943                 } else if ((char)((*uchr) & 0xFF) == 'o') {
2944                         base = 8;
2945                         uchr++;
2946                         len -= 2;
2947                 } else if ((char)((*uchr) & 0xFF) == 'x') {
2948                         base = 16;
2949                         uchr++;
2950                         len -= 2;
2951                 } else
2952                         base = 10;
2953         }
2954
2955         astr = abuf;
2956         if (neg) {
2957                 strcpy(astr, "-");
2958                 astr++;
2959         }
2960
2961         ntoskrnl_unicode_to_ascii(uchr, astr, len);
2962         *val = strtoul(abuf, NULL, base);
2963
2964         return (STATUS_SUCCESS);
2965 }
2966
2967 void
2968 RtlFreeUnicodeString(unicode_string *ustr)
2969 {
2970         if (ustr->us_buf == NULL)
2971                 return;
2972         ExFreePool(ustr->us_buf);
2973         ustr->us_buf = NULL;
2974 }
2975
2976 void
2977 RtlFreeAnsiString(ansi_string *astr)
2978 {
2979         if (astr->as_buf == NULL)
2980                 return;
2981         ExFreePool(astr->as_buf);
2982         astr->as_buf = NULL;
2983 }
2984
2985 static int
2986 atoi(const char *str)
2987 {
2988         return (int)strtol(str, NULL, 10);
2989 }
2990
2991 static long
2992 atol(const char *str)
2993 {
2994         return strtol(str, NULL, 10);
2995 }
2996
2997 static int
2998 rand(void)
2999 {
3000         struct timeval          tv;
3001
3002         microtime(&tv);
3003         skrandom(tv.tv_usec);
3004         return ((int)krandom());
3005 }
3006
3007 static void
3008 srand(unsigned int seed)
3009 {
3010         skrandom(seed);
3011 }
3012
3013 static uint8_t
3014 IoIsWdmVersionAvailable(uint8_t major, uint8_t minor)
3015 {
3016         if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
3017                 return (TRUE);
3018         return (FALSE);
3019 }
3020
3021 static int32_t
3022 IoOpenDeviceRegistryKey(struct device_object *devobj, uint32_t type,
3023     uint32_t mask, void **key)
3024 {
3025         return (NDIS_STATUS_INVALID_DEVICE_REQUEST);
3026 }
3027
3028 static ndis_status
3029 IoGetDeviceObjectPointer(unicode_string *name, uint32_t reqaccess,
3030     void *fileobj, device_object *devobj)
3031 {
3032         return (STATUS_SUCCESS);
3033 }
3034
3035 static ndis_status
3036 IoGetDeviceProperty(device_object *devobj, uint32_t regprop, uint32_t buflen,
3037     void *prop, uint32_t *reslen)
3038 {
3039         driver_object           *drv;
3040         uint16_t                **name;
3041
3042         drv = devobj->do_drvobj;
3043
3044         switch (regprop) {
3045         case DEVPROP_DRIVER_KEYNAME:
3046                 name = prop;
3047                 *name = drv->dro_drivername.us_buf;
3048                 *reslen = drv->dro_drivername.us_len;
3049                 break;
3050         default:
3051                 return (STATUS_INVALID_PARAMETER_2);
3052                 break;
3053         }
3054
3055         return (STATUS_SUCCESS);
3056 }
3057
3058 static void
3059 KeInitializeMutex(kmutant *kmutex, uint32_t level)
3060 {
3061         InitializeListHead((&kmutex->km_header.dh_waitlisthead));
3062         kmutex->km_abandoned = FALSE;
3063         kmutex->km_apcdisable = 1;
3064         kmutex->km_header.dh_sigstate = 1;
3065         kmutex->km_header.dh_type = DISP_TYPE_MUTANT;
3066         kmutex->km_header.dh_size = sizeof(kmutant) / sizeof(uint32_t);
3067         kmutex->km_ownerthread = NULL;
3068 }
3069
3070 static uint32_t
3071 KeReleaseMutex(kmutant *kmutex, uint8_t kwait)
3072 {
3073         uint32_t                prevstate;
3074
3075         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3076         prevstate = kmutex->km_header.dh_sigstate;
3077         if (kmutex->km_ownerthread != curthread) {
3078                 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3079                 return (STATUS_MUTANT_NOT_OWNED);
3080         }
3081
3082         kmutex->km_header.dh_sigstate++;
3083         kmutex->km_abandoned = FALSE;
3084
3085         if (kmutex->km_header.dh_sigstate == 1) {
3086                 kmutex->km_ownerthread = NULL;
3087                 ntoskrnl_waittest(&kmutex->km_header, IO_NO_INCREMENT);
3088         }
3089
3090         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3091
3092         return (prevstate);
3093 }
3094
3095 static uint32_t
3096 KeReadStateMutex(kmutant *kmutex)
3097 {
3098         return (kmutex->km_header.dh_sigstate);
3099 }
3100
3101 void
3102 KeInitializeEvent(nt_kevent *kevent, uint32_t type, uint8_t state)
3103 {
3104         InitializeListHead((&kevent->k_header.dh_waitlisthead));
3105         kevent->k_header.dh_sigstate = state;
3106         if (type == EVENT_TYPE_NOTIFY)
3107                 kevent->k_header.dh_type = DISP_TYPE_NOTIFICATION_EVENT;
3108         else
3109                 kevent->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_EVENT;
3110         kevent->k_header.dh_size = sizeof(nt_kevent) / sizeof(uint32_t);
3111 }
3112
3113 uint32_t
3114 KeResetEvent(nt_kevent *kevent)
3115 {
3116         uint32_t                prevstate;
3117
3118         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3119         prevstate = kevent->k_header.dh_sigstate;
3120         kevent->k_header.dh_sigstate = FALSE;
3121         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3122
3123         return (prevstate);
3124 }
3125
3126 uint32_t
3127 KeSetEvent(nt_kevent *kevent, uint32_t increment, uint8_t kwait)
3128 {
3129         uint32_t                prevstate;
3130         wait_block              *w;
3131         nt_dispatch_header      *dh;
3132         wb_ext                  *we;
3133
3134         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3135         prevstate = kevent->k_header.dh_sigstate;
3136         dh = &kevent->k_header;
3137
3138         if (IsListEmpty(&dh->dh_waitlisthead))
3139                 /*
3140                  * If there's nobody in the waitlist, just set
3141                  * the state to signalled.
3142                  */
3143                 dh->dh_sigstate = 1;
3144         else {
3145                 /*
3146                  * Get the first waiter. If this is a synchronization
3147                  * event, just wake up that one thread (don't bother
3148                  * setting the state to signalled since we're supposed
3149                  * to automatically clear synchronization events anyway).
3150                  *
3151                  * If it's a notification event, or the first
3152                  * waiter is doing a WAITTYPE_ALL wait, go through
3153                  * the full wait satisfaction process.
3154                  */
3155                 w = CONTAINING_RECORD(dh->dh_waitlisthead.nle_flink,
3156                     wait_block, wb_waitlist);
3157                 we = w->wb_ext;
3158                 if (kevent->k_header.dh_type == DISP_TYPE_NOTIFICATION_EVENT ||
3159                     w->wb_waittype == WAITTYPE_ALL) {
3160                         if (prevstate == 0) {
3161                                 dh->dh_sigstate = 1;
3162                                 ntoskrnl_waittest(dh, increment);
3163                         }
3164                 } else {
3165                         w->wb_awakened |= TRUE;
3166                         cv_broadcastpri(&we->we_cv,
3167                             (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
3168                             w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
3169                 }
3170         }
3171
3172         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3173
3174         return (prevstate);
3175 }
3176
3177 void
3178 KeClearEvent(nt_kevent *kevent)
3179 {
3180         kevent->k_header.dh_sigstate = FALSE;
3181 }
3182
3183 uint32_t
3184 KeReadStateEvent(nt_kevent *kevent)
3185 {
3186         return (kevent->k_header.dh_sigstate);
3187 }
3188
3189 /*
3190  * The object manager in Windows is responsible for managing
3191  * references and access to various types of objects, including
3192  * device_objects, events, threads, timers and so on. However,
3193  * there's a difference in the way objects are handled in user
3194  * mode versus kernel mode.
3195  *
3196  * In user mode (i.e. Win32 applications), all objects are
3197  * managed by the object manager. For example, when you create
3198  * a timer or event object, you actually end up with an
3199  * object_header (for the object manager's bookkeeping
3200  * purposes) and an object body (which contains the actual object
3201  * structure, e.g. ktimer, kevent, etc...). This allows Windows
3202  * to manage resource quotas and to enforce access restrictions
3203  * on basically every kind of system object handled by the kernel.
3204  *
3205  * However, in kernel mode, you only end up using the object
3206  * manager some of the time. For example, in a driver, you create
3207  * a timer object by simply allocating the memory for a ktimer
3208  * structure and initializing it with KeInitializeTimer(). Hence,
3209  * the timer has no object_header and no reference counting or
3210  * security/resource checks are done on it. The assumption in
3211  * this case is that if you're running in kernel mode, you know
3212  * what you're doing, and you're already at an elevated privilege
3213  * anyway.
3214  *
3215  * There are some exceptions to this. The two most important ones
3216  * for our purposes are device_objects and threads. We need to use
3217  * the object manager to do reference counting on device_objects,
3218  * and for threads, you can only get a pointer to a thread's
3219  * dispatch header by using ObReferenceObjectByHandle() on the
3220  * handle returned by PsCreateSystemThread().
3221  */
3222
3223 static ndis_status
3224 ObReferenceObjectByHandle(ndis_handle handle, uint32_t reqaccess, void *otype,
3225         uint8_t accessmode, void **object, void **handleinfo)
3226 {
3227         nt_objref               *nr;
3228
3229         nr = kmalloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
3230         if (nr == NULL)
3231                 return (STATUS_INSUFFICIENT_RESOURCES);
3232
3233         InitializeListHead((&nr->no_dh.dh_waitlisthead));
3234         nr->no_obj = handle;
3235         nr->no_dh.dh_type = DISP_TYPE_THREAD;
3236         nr->no_dh.dh_sigstate = 0;
3237         nr->no_dh.dh_size = (uint8_t)(sizeof(struct thread) /
3238             sizeof(uint32_t));
3239         TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
3240         *object = nr;
3241
3242         return (STATUS_SUCCESS);
3243 }
3244
3245 static void
3246 ObfDereferenceObject(void *object)
3247 {
3248         nt_objref               *nr;
3249
3250         nr = object;
3251         TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
3252         kfree(nr, M_DEVBUF);
3253 }
3254
3255 static uint32_t
3256 ZwClose(ndis_handle handle)
3257 {
3258         return (STATUS_SUCCESS);
3259 }
3260
3261 static uint32_t
3262 WmiQueryTraceInformation(uint32_t traceclass, void *traceinfo,
3263     uint32_t infolen, uint32_t reqlen, void *buf)
3264 {
3265         return (STATUS_NOT_FOUND);
3266 }
3267
3268 static uint32_t
3269 WmiTraceMessage(uint64_t loghandle, uint32_t messageflags,
3270         void *guid, uint16_t messagenum, ...)
3271 {
3272         return (STATUS_SUCCESS);
3273 }
3274
3275 static uint32_t
3276 IoWMIRegistrationControl(device_object *dobj, uint32_t action)
3277 {
3278         return (STATUS_SUCCESS);
3279 }
3280
3281 /*
3282  * This is here just in case the thread returns without calling
3283  * PsTerminateSystemThread().
3284  */
3285 static void
3286 ntoskrnl_thrfunc(void *arg)
3287 {
3288         thread_context          *thrctx;
3289         uint32_t (*tfunc)(void *);
3290         void                    *tctx;
3291         uint32_t                rval;
3292
3293         thrctx = arg;
3294         tfunc = thrctx->tc_thrfunc;
3295         tctx = thrctx->tc_thrctx;
3296         kfree(thrctx, M_TEMP);
3297
3298         rval = MSCALL1(tfunc, tctx);
3299
3300         PsTerminateSystemThread(rval);
3301         return; /* notreached */
3302 }
3303
3304 static ndis_status
3305 PsCreateSystemThread(ndis_handle *handle, uint32_t reqaccess, void *objattrs,
3306     ndis_handle phandle, void *clientid, void *thrfunc, void *thrctx)
3307 {
3308         int                     error;
3309         thread_context          *tc;
3310         struct thread           *p;
3311
3312         tc = kmalloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
3313         if (tc == NULL)
3314                 return (STATUS_INSUFFICIENT_RESOURCES);
3315
3316         tc->tc_thrctx = thrctx;
3317         tc->tc_thrfunc = thrfunc;
3318
3319         error = kthread_create(ntoskrnl_thrfunc, tc, &p, "Win kthread %d",
3320             ntoskrnl_kth);
3321
3322         if (error) {
3323                 kfree(tc, M_TEMP);
3324                 return (STATUS_INSUFFICIENT_RESOURCES);
3325         }
3326
3327         *handle = p;
3328         ntoskrnl_kth++;
3329
3330         return (STATUS_SUCCESS);
3331 }
3332
3333 /*
3334  * In Windows, the exit of a thread is an event that you're allowed
3335  * to wait on, assuming you've obtained a reference to the thread using
3336  * ObReferenceObjectByHandle(). Unfortunately, the only way we can
3337  * simulate this behavior is to register each thread we create in a
3338  * reference list, and if someone holds a reference to us, we poke
3339  * them.
3340  */
3341 static ndis_status
3342 PsTerminateSystemThread(ndis_status status)
3343 {
3344         struct nt_objref        *nr;
3345
3346         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3347         TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
3348                 if (nr->no_obj != curthread->td_proc)
3349                         continue;
3350                 nr->no_dh.dh_sigstate = 1;
3351                 ntoskrnl_waittest(&nr->no_dh, IO_NO_INCREMENT);
3352                 break;
3353         }
3354         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3355
3356         ntoskrnl_kth--;
3357
3358         wakeup(curthread);
3359         kthread_exit();
3360         return (0);     /* notreached */
3361 }
3362
3363 static uint32_t
3364 DbgPrint(char *fmt, ...)
3365 {
3366         __va_list               ap;
3367
3368         if (bootverbose) {
3369                 __va_start(ap, fmt);
3370                 kvprintf(fmt, ap);
3371                 __va_end(ap);
3372         }
3373
3374         return (STATUS_SUCCESS);
3375 }
3376
3377 static void
3378 DbgBreakPoint(void)
3379 {
3380
3381         Debugger("DbgBreakPoint(): breakpoint");
3382 }
3383
3384 static void
3385 KeBugCheckEx(uint32_t code, u_long param1, u_long param2, u_long param3,
3386     u_long param4)
3387 {
3388         panic("KeBugCheckEx: STOP 0x%X", code);
3389 }
3390
3391 static void
3392 ntoskrnl_timercall(void *arg)
3393 {
3394         ktimer                  *timer;
3395         struct timeval          tv;
3396         kdpc                    *dpc;
3397
3398         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3399
3400         timer = arg;
3401
3402 #ifdef NTOSKRNL_DEBUG_TIMERS
3403         ntoskrnl_timer_fires++;
3404 #endif
3405         ntoskrnl_remove_timer(timer);
3406
3407         /*
3408          * This should never happen, but complain
3409          * if it does.
3410          */
3411
3412         if (timer->k_header.dh_inserted == FALSE) {
3413                 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3414                 kprintf("NTOS: timer %p fired even though "
3415                     "it was canceled\n", timer);
3416                 return;
3417         }
3418
3419         /* Mark the timer as no longer being on the timer queue. */
3420
3421         timer->k_header.dh_inserted = FALSE;
3422
3423         /* Now signal the object and satisfy any waits on it. */
3424
3425         timer->k_header.dh_sigstate = 1;
3426         ntoskrnl_waittest(&timer->k_header, IO_NO_INCREMENT);
3427
3428         /*
3429          * If this is a periodic timer, re-arm it
3430          * so it will fire again. We do this before
3431          * calling any deferred procedure calls because
3432          * it's possible the DPC might cancel the timer,
3433          * in which case it would be wrong for us to
3434          * re-arm it again afterwards.
3435          */
3436
3437         if (timer->k_period) {
3438                 tv.tv_sec = 0;
3439                 tv.tv_usec = timer->k_period * 1000;
3440                 timer->k_header.dh_inserted = TRUE;
3441                 ntoskrnl_insert_timer(timer, tvtohz_high(&tv));
3442 #ifdef NTOSKRNL_DEBUG_TIMERS
3443                 ntoskrnl_timer_reloads++;
3444 #endif
3445         }
3446
3447         dpc = timer->k_dpc;
3448
3449         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3450
3451         /* If there's a DPC associated with the timer, queue it up. */
3452
3453         if (dpc != NULL)
3454                 KeInsertQueueDpc(dpc, NULL, NULL);
3455 }
3456
3457 #ifdef NTOSKRNL_DEBUG_TIMERS
3458 static int
3459 sysctl_show_timers(SYSCTL_HANDLER_ARGS)
3460 {
3461         int                     ret;
3462
3463         ret = 0;
3464         ntoskrnl_show_timers();
3465         return (sysctl_handle_int(oidp, &ret, 0, req));
3466 }
3467
3468 static void
3469 ntoskrnl_show_timers(void)
3470 {
3471         int                     i = 0;
3472         list_entry              *l;
3473
3474         mtx_spinlock(&ntoskrnl_calllock);
3475         l = ntoskrnl_calllist.nle_flink;
3476         while(l != &ntoskrnl_calllist) {
3477                 i++;
3478                 l = l->nle_flink;
3479         }
3480         mtx_spinunlock(&ntoskrnl_calllock);
3481
3482         kprintf("\n");
3483         kprintf("%d timers available (out of %d)\n", i, NTOSKRNL_TIMEOUTS);
3484         kprintf("timer sets: %qu\n", ntoskrnl_timer_sets);
3485         kprintf("timer reloads: %qu\n", ntoskrnl_timer_reloads);
3486         kprintf("timer cancels: %qu\n", ntoskrnl_timer_cancels);
3487         kprintf("timer fires: %qu\n", ntoskrnl_timer_fires);
3488         kprintf("\n");
3489 }
3490 #endif
3491
3492 /*
3493  * Must be called with dispatcher lock held.
3494  */
3495
3496 static void
3497 ntoskrnl_insert_timer(ktimer *timer, int ticks)
3498 {
3499         callout_entry           *e;
3500         list_entry              *l;
3501         struct callout          *c;
3502
3503         /*
3504          * Try and allocate a timer.
3505          */
3506         mtx_spinlock(&ntoskrnl_calllock);
3507         if (IsListEmpty(&ntoskrnl_calllist)) {
3508                 mtx_spinunlock(&ntoskrnl_calllock);
3509 #ifdef NTOSKRNL_DEBUG_TIMERS
3510                 ntoskrnl_show_timers();
3511 #endif
3512                 panic("out of timers!");
3513         }
3514         l = RemoveHeadList(&ntoskrnl_calllist);
3515         mtx_spinunlock(&ntoskrnl_calllock);
3516
3517         e = CONTAINING_RECORD(l, callout_entry, ce_list);
3518         c = &e->ce_callout;
3519
3520         timer->k_callout = c;
3521
3522         callout_init_mp(c);
3523         callout_reset(c, ticks, ntoskrnl_timercall, timer);
3524 }
3525
3526 static void
3527 ntoskrnl_remove_timer(ktimer *timer)
3528 {
3529         callout_entry           *e;
3530
3531         e = (callout_entry *)timer->k_callout;
3532         callout_stop(timer->k_callout);
3533
3534         mtx_spinlock(&ntoskrnl_calllock);
3535         InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
3536         mtx_spinunlock(&ntoskrnl_calllock);
3537 }
3538
3539 void
3540 KeInitializeTimer(ktimer *timer)
3541 {
3542         if (timer == NULL)
3543                 return;
3544
3545         KeInitializeTimerEx(timer,  EVENT_TYPE_NOTIFY);
3546 }
3547
3548 void
3549 KeInitializeTimerEx(ktimer *timer, uint32_t type)
3550 {
3551         if (timer == NULL)
3552                 return;
3553
3554         bzero((char *)timer, sizeof(ktimer));
3555         InitializeListHead((&timer->k_header.dh_waitlisthead));
3556         timer->k_header.dh_sigstate = FALSE;
3557         timer->k_header.dh_inserted = FALSE;
3558         if (type == EVENT_TYPE_NOTIFY)
3559                 timer->k_header.dh_type = DISP_TYPE_NOTIFICATION_TIMER;
3560         else
3561                 timer->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_TIMER;
3562         timer->k_header.dh_size = sizeof(ktimer) / sizeof(uint32_t);
3563 }
3564
3565 /*
3566  * DPC subsystem. A Windows Defered Procedure Call has the following
3567  * properties:
3568  * - It runs at DISPATCH_LEVEL.
3569  * - It can have one of 3 importance values that control when it
3570  *   runs relative to other DPCs in the queue.
3571  * - On SMP systems, it can be set to run on a specific processor.
3572  * In order to satisfy the last property, we create a DPC thread for
3573  * each CPU in the system and bind it to that CPU. Each thread
3574  * maintains three queues with different importance levels, which
3575  * will be processed in order from lowest to highest.
3576  *
3577  * In Windows, interrupt handlers run as DPCs. (Not to be confused
3578  * with ISRs, which run in interrupt context and can preempt DPCs.)
3579  * ISRs are given the highest importance so that they'll take
3580  * precedence over timers and other things.
3581  */
3582
3583 static void
3584 ntoskrnl_dpc_thread(void *arg)
3585 {
3586         kdpc_queue              *kq;
3587         kdpc                    *d;
3588         list_entry              *l;
3589         uint8_t                 irql;
3590
3591         kq = arg;
3592
3593         InitializeListHead(&kq->kq_disp);
3594         kq->kq_td = curthread;
3595         kq->kq_exit = 0;
3596         kq->kq_running = FALSE;
3597         KeInitializeSpinLock(&kq->kq_lock);
3598         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
3599         KeInitializeEvent(&kq->kq_done, EVENT_TYPE_SYNC, FALSE);
3600
3601         /*
3602          * Elevate our priority. DPCs are used to run interrupt
3603          * handlers, and they should trigger as soon as possible
3604          * once scheduled by an ISR.
3605          */
3606
3607 #ifdef NTOSKRNL_MULTIPLE_DPCS
3608         sched_bind(curthread, kq->kq_cpu);
3609 #endif
3610         lwkt_setpri_self(TDPRI_INT_HIGH);
3611
3612         while (1) {
3613                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
3614
3615                 KeAcquireSpinLock(&kq->kq_lock, &irql);
3616
3617                 if (kq->kq_exit) {
3618                         kq->kq_exit = 0;
3619                         KeReleaseSpinLock(&kq->kq_lock, irql);
3620                         break;
3621                 }
3622
3623                 kq->kq_running = TRUE;
3624
3625                 while (!IsListEmpty(&kq->kq_disp)) {
3626                         l = RemoveHeadList((&kq->kq_disp));
3627                         d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3628                         InitializeListHead((&d->k_dpclistentry));
3629                         KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3630                         MSCALL4(d->k_deferedfunc, d, d->k_deferredctx,
3631                             d->k_sysarg1, d->k_sysarg2);
3632                         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3633                 }
3634
3635                 kq->kq_running = FALSE;
3636
3637                 KeReleaseSpinLock(&kq->kq_lock, irql);
3638
3639                 KeSetEvent(&kq->kq_done, IO_NO_INCREMENT, FALSE);
3640         }
3641
3642         wakeup(curthread);
3643         kthread_exit();
3644         return; /* notreached */
3645 }
3646
3647 static void
3648 ntoskrnl_destroy_dpc_threads(void)
3649 {
3650         kdpc_queue              *kq;
3651         kdpc                    dpc;
3652         int                     i;
3653
3654         kq = kq_queues;
3655 #ifdef NTOSKRNL_MULTIPLE_DPCS
3656         for (i = 0; i < ncpus; i++) {
3657 #else
3658         for (i = 0; i < 1; i++) {
3659 #endif
3660                 kq += i;
3661
3662                 kq->kq_exit = 1;
3663                 KeInitializeDpc(&dpc, NULL, NULL);
3664                 KeSetTargetProcessorDpc(&dpc, i);
3665                 KeInsertQueueDpc(&dpc, NULL, NULL);
3666                 while (kq->kq_exit)
3667                         tsleep(kq->kq_td, 0, "dpcw", hz/10);
3668         }
3669 }
3670
3671 static uint8_t
3672 ntoskrnl_insert_dpc(list_entry *head, kdpc *dpc)
3673 {
3674         list_entry              *l;
3675         kdpc                    *d;
3676
3677         l = head->nle_flink;
3678         while (l != head) {
3679                 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3680                 if (d == dpc)
3681                         return (FALSE);
3682                 l = l->nle_flink;
3683         }
3684
3685         if (dpc->k_importance == KDPC_IMPORTANCE_LOW)
3686                 InsertTailList((head), (&dpc->k_dpclistentry));
3687         else
3688                 InsertHeadList((head), (&dpc->k_dpclistentry));
3689
3690         return (TRUE);
3691 }
3692
3693 void
3694 KeInitializeDpc(kdpc *dpc, void *dpcfunc, void *dpcctx)
3695 {
3696
3697         if (dpc == NULL)
3698                 return;
3699
3700         dpc->k_deferedfunc = dpcfunc;
3701         dpc->k_deferredctx = dpcctx;
3702         dpc->k_num = KDPC_CPU_DEFAULT;
3703         dpc->k_importance = KDPC_IMPORTANCE_MEDIUM;
3704         InitializeListHead((&dpc->k_dpclistentry));
3705 }
3706
3707 uint8_t
3708 KeInsertQueueDpc(kdpc *dpc, void *sysarg1, void *sysarg2)
3709 {
3710         kdpc_queue              *kq;
3711         uint8_t                 r;
3712         uint8_t                 irql;
3713
3714         if (dpc == NULL)
3715                 return (FALSE);
3716
3717         kq = kq_queues;
3718
3719 #ifdef NTOSKRNL_MULTIPLE_DPCS
3720         KeRaiseIrql(DISPATCH_LEVEL, &irql);
3721
3722         /*
3723          * By default, the DPC is queued to run on the same CPU
3724          * that scheduled it.
3725          */
3726
3727         if (dpc->k_num == KDPC_CPU_DEFAULT)
3728                 kq += curthread->td_oncpu;
3729         else
3730                 kq += dpc->k_num;
3731         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3732 #else
3733         KeAcquireSpinLock(&kq->kq_lock, &irql);
3734 #endif
3735
3736         r = ntoskrnl_insert_dpc(&kq->kq_disp, dpc);
3737         if (r == TRUE) {
3738                 dpc->k_sysarg1 = sysarg1;
3739                 dpc->k_sysarg2 = sysarg2;
3740         }
3741         KeReleaseSpinLock(&kq->kq_lock, irql);
3742
3743         if (r == FALSE)
3744                 return (r);
3745
3746         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
3747
3748         return (r);
3749 }
3750
3751 uint8_t
3752 KeRemoveQueueDpc(kdpc *dpc)
3753 {
3754         kdpc_queue              *kq;
3755         uint8_t                 irql;
3756
3757         if (dpc == NULL)
3758                 return (FALSE);
3759
3760 #ifdef NTOSKRNL_MULTIPLE_DPCS
3761         KeRaiseIrql(DISPATCH_LEVEL, &irql);
3762
3763         kq = kq_queues + dpc->k_num;
3764
3765         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3766 #else
3767         kq = kq_queues;
3768         KeAcquireSpinLock(&kq->kq_lock, &irql);
3769 #endif
3770
3771         if (dpc->k_dpclistentry.nle_flink == &dpc->k_dpclistentry) {
3772                 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3773                 KeLowerIrql(irql);
3774                 return (FALSE);
3775         }
3776
3777         RemoveEntryList((&dpc->k_dpclistentry));
3778         InitializeListHead((&dpc->k_dpclistentry));
3779
3780         KeReleaseSpinLock(&kq->kq_lock, irql);
3781
3782         return (TRUE);
3783 }
3784
3785 void
3786 KeSetImportanceDpc(kdpc *dpc, uint32_t imp)
3787 {
3788         if (imp != KDPC_IMPORTANCE_LOW &&
3789             imp != KDPC_IMPORTANCE_MEDIUM &&
3790             imp != KDPC_IMPORTANCE_HIGH)
3791                 return;
3792
3793         dpc->k_importance = (uint8_t)imp;
3794 }
3795
3796 void
3797 KeSetTargetProcessorDpc(kdpc *dpc, uint8_t cpu)
3798 {
3799         if (cpu > ncpus)
3800                 return;
3801
3802         dpc->k_num = cpu;
3803 }
3804
3805 void
3806 KeFlushQueuedDpcs(void)
3807 {
3808         kdpc_queue              *kq;
3809         int                     i;
3810
3811         /*
3812          * Poke each DPC queue and wait
3813          * for them to drain.
3814          */
3815
3816 #ifdef NTOSKRNL_MULTIPLE_DPCS
3817         for (i = 0; i < ncpus; i++) {
3818 #else
3819         for (i = 0; i < 1; i++) {
3820 #endif
3821                 kq = kq_queues + i;
3822                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
3823                 KeWaitForSingleObject(&kq->kq_done, 0, 0, TRUE, NULL);
3824         }
3825 }
3826
3827 uint32_t
3828 KeGetCurrentProcessorNumber(void)
3829 {
3830         return (curthread->td_gd->gd_cpuid);
3831 }
3832
3833 uint8_t
3834 KeSetTimerEx(ktimer *timer, int64_t duetime, uint32_t period, kdpc *dpc)
3835 {
3836         struct timeval          tv;
3837         uint64_t                curtime;
3838         uint8_t                 pending;
3839
3840         if (timer == NULL)
3841                 return (FALSE);
3842
3843         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3844
3845         if (timer->k_header.dh_inserted == TRUE) {
3846                 ntoskrnl_remove_timer(timer);
3847 #ifdef NTOSKRNL_DEBUG_TIMERS
3848                 ntoskrnl_timer_cancels++;
3849 #endif
3850                 timer->k_header.dh_inserted = FALSE;
3851                 pending = TRUE;
3852         } else
3853                 pending = FALSE;
3854
3855         timer->k_duetime = duetime;
3856         timer->k_period = period;
3857         timer->k_header.dh_sigstate = FALSE;
3858         timer->k_dpc = dpc;
3859
3860         if (duetime < 0) {
3861                 tv.tv_sec = - (duetime) / 10000000;
3862                 tv.tv_usec = (- (duetime) / 10) -
3863                     (tv.tv_sec * 1000000);
3864         } else {
3865                 ntoskrnl_time(&curtime);
3866                 if (duetime < curtime)
3867                         tv.tv_sec = tv.tv_usec = 0;
3868                 else {
3869                         tv.tv_sec = ((duetime) - curtime) / 10000000;
3870                         tv.tv_usec = ((duetime) - curtime) / 10 -
3871                             (tv.tv_sec * 1000000);
3872                 }
3873         }
3874
3875         timer->k_header.dh_inserted = TRUE;
3876         ntoskrnl_insert_timer(timer, tvtohz_high(&tv));
3877 #ifdef NTOSKRNL_DEBUG_TIMERS
3878         ntoskrnl_timer_sets++;
3879 #endif
3880
3881         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3882
3883         return (pending);
3884 }
3885
3886 uint8_t
3887 KeSetTimer(ktimer *timer, int64_t duetime, kdpc *dpc)
3888 {
3889         return (KeSetTimerEx(timer, duetime, 0, dpc));
3890 }
3891
3892 /*
3893  * The Windows DDK documentation seems to say that cancelling
3894  * a timer that has a DPC will result in the DPC also being
3895  * cancelled, but this isn't really the case.
3896  */
3897
3898 uint8_t
3899 KeCancelTimer(ktimer *timer)
3900 {
3901         uint8_t                 pending;
3902
3903         if (timer == NULL)
3904                 return (FALSE);
3905
3906         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3907
3908         pending = timer->k_header.dh_inserted;
3909
3910         if (timer->k_header.dh_inserted == TRUE) {
3911                 timer->k_header.dh_inserted = FALSE;
3912                 ntoskrnl_remove_timer(timer);
3913 #ifdef NTOSKRNL_DEBUG_TIMERS
3914                 ntoskrnl_timer_cancels++;
3915 #endif
3916         }
3917
3918         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3919
3920         return (pending);
3921 }
3922
3923 uint8_t
3924 KeReadStateTimer(ktimer *timer)
3925 {
3926         return (timer->k_header.dh_sigstate);
3927 }
3928
3929 static int32_t
3930 KeDelayExecutionThread(uint8_t wait_mode, uint8_t alertable, int64_t *interval)
3931 {
3932         ktimer                  timer;
3933
3934         if (wait_mode != 0)
3935                 panic("invalid wait_mode %d", wait_mode);
3936
3937         KeInitializeTimer(&timer);
3938         KeSetTimer(&timer, *interval, NULL);
3939         KeWaitForSingleObject(&timer, 0, 0, alertable, NULL);
3940
3941         return STATUS_SUCCESS;
3942 }
3943
3944 static uint64_t
3945 KeQueryInterruptTime(void)
3946 {
3947         int ticks;
3948         struct timeval tv;
3949
3950         getmicrouptime(&tv);
3951
3952         ticks = tvtohz_high(&tv);
3953
3954         return ticks * ((10000000 + hz - 1) / hz);
3955 }
3956
3957 static struct thread *
3958 KeGetCurrentThread(void)
3959 {
3960
3961         return curthread;
3962 }
3963
3964 static int32_t
3965 KeSetPriorityThread(struct thread *td, int32_t pri)
3966 {
3967         int32_t old;
3968
3969         if (td == NULL)
3970                 return LOW_REALTIME_PRIORITY;
3971
3972         if (td->td_pri >= TDPRI_INT_HIGH)
3973                 old = HIGH_PRIORITY;
3974         else if (td->td_pri <= TDPRI_IDLE_WORK)
3975                 old = LOW_PRIORITY;
3976         else
3977                 old = LOW_REALTIME_PRIORITY;
3978
3979         if (pri == HIGH_PRIORITY)
3980                 lwkt_setpri(td, TDPRI_INT_HIGH);
3981         if (pri == LOW_REALTIME_PRIORITY)
3982                 lwkt_setpri(td, TDPRI_SOFT_TIMER);
3983         if (pri == LOW_PRIORITY)
3984                 lwkt_setpri(td, TDPRI_IDLE_WORK);
3985
3986         return old;
3987 }
3988
3989 static void
3990 dummy(void)
3991 {
3992         kprintf("ntoskrnl dummy called...\n");
3993 }
3994
3995
3996 image_patch_table ntoskrnl_functbl[] = {
3997         IMPORT_SFUNC(RtlZeroMemory, 2),
3998         IMPORT_SFUNC(RtlSecureZeroMemory, 2),
3999         IMPORT_SFUNC(RtlFillMemory, 3),
4000         IMPORT_SFUNC(RtlMoveMemory, 3),
4001         IMPORT_SFUNC(RtlCharToInteger, 3),
4002         IMPORT_SFUNC(RtlCopyMemory, 3),
4003         IMPORT_SFUNC(RtlCopyString, 2),
4004         IMPORT_SFUNC(RtlCompareMemory, 3),
4005         IMPORT_SFUNC(RtlEqualUnicodeString, 3),
4006         IMPORT_SFUNC(RtlCopyUnicodeString, 2),
4007         IMPORT_SFUNC(RtlUnicodeStringToAnsiString, 3),
4008         IMPORT_SFUNC(RtlAnsiStringToUnicodeString, 3),
4009         IMPORT_SFUNC(RtlInitAnsiString, 2),
4010         IMPORT_SFUNC_MAP(RtlInitString, RtlInitAnsiString, 2),
4011         IMPORT_SFUNC(RtlInitUnicodeString, 2),
4012         IMPORT_SFUNC(RtlFreeAnsiString, 1),
4013         IMPORT_SFUNC(RtlFreeUnicodeString, 1),
4014         IMPORT_SFUNC(RtlUnicodeStringToInteger, 3),
4015         IMPORT_CFUNC_MAP(sprintf, ksprintf, 0),
4016         IMPORT_CFUNC_MAP(vsprintf, kvsprintf, 0),
4017         IMPORT_CFUNC_MAP(_snprintf, ksnprintf, 0),
4018         IMPORT_CFUNC_MAP(_vsnprintf, kvsnprintf, 0),
4019         IMPORT_CFUNC(DbgPrint, 0),
4020         IMPORT_SFUNC(DbgBreakPoint, 0),
4021         IMPORT_SFUNC(KeBugCheckEx, 5),
4022         IMPORT_CFUNC(strncmp, 0),
4023         IMPORT_CFUNC(strcmp, 0),
4024         IMPORT_CFUNC_MAP(stricmp, strcasecmp, 0),
4025         IMPORT_CFUNC(strncpy, 0),
4026         IMPORT_CFUNC(strcpy, 0),
4027         IMPORT_CFUNC(strlen, 0),
4028         IMPORT_CFUNC_MAP(toupper, ntoskrnl_toupper, 0),
4029         IMPORT_CFUNC_MAP(tolower, ntoskrnl_tolower, 0),
4030         IMPORT_CFUNC_MAP(strstr, ntoskrnl_strstr, 0),
4031         IMPORT_CFUNC_MAP(strncat, ntoskrnl_strncat, 0),
4032         IMPORT_CFUNC_MAP(strchr, index, 0),
4033         IMPORT_CFUNC_MAP(strrchr, rindex, 0),
4034         IMPORT_CFUNC(memcpy, 0),
4035         IMPORT_CFUNC_MAP(memmove, ntoskrnl_memmove, 0),
4036         IMPORT_CFUNC_MAP(memset, ntoskrnl_memset, 0),
4037         IMPORT_CFUNC_MAP(memchr, ntoskrnl_memchr, 0),
4038         IMPORT_SFUNC(IoAllocateDriverObjectExtension, 4),
4039         IMPORT_SFUNC(IoGetDriverObjectExtension, 2),
4040         IMPORT_FFUNC(IofCallDriver, 2),
4041         IMPORT_FFUNC(IofCompleteRequest, 2),
4042         IMPORT_SFUNC(IoAcquireCancelSpinLock, 1),
4043         IMPORT_SFUNC(IoReleaseCancelSpinLock, 1),
4044         IMPORT_SFUNC(IoCancelIrp, 1),
4045         IMPORT_SFUNC(IoConnectInterrupt, 11),
4046         IMPORT_SFUNC(IoDisconnectInterrupt, 1),
4047         IMPORT_SFUNC(IoCreateDevice, 7),
4048         IMPORT_SFUNC(IoDeleteDevice, 1),
4049         IMPORT_SFUNC(IoGetAttachedDevice, 1),
4050         IMPORT_SFUNC(IoAttachDeviceToDeviceStack, 2),
4051         IMPORT_SFUNC(IoDetachDevice, 1),
4052         IMPORT_SFUNC(IoBuildSynchronousFsdRequest, 7),
4053         IMPORT_SFUNC(IoBuildAsynchronousFsdRequest, 6),
4054         IMPORT_SFUNC(IoBuildDeviceIoControlRequest, 9),
4055         IMPORT_SFUNC(IoAllocateIrp, 2),
4056         IMPORT_SFUNC(IoReuseIrp, 2),
4057         IMPORT_SFUNC(IoMakeAssociatedIrp, 2),
4058         IMPORT_SFUNC(IoFreeIrp, 1),
4059         IMPORT_SFUNC(IoInitializeIrp, 3),
4060         IMPORT_SFUNC(KeAcquireInterruptSpinLock, 1),
4061         IMPORT_SFUNC(KeReleaseInterruptSpinLock, 2),
4062         IMPORT_SFUNC(KeSynchronizeExecution, 3),
4063         IMPORT_SFUNC(KeWaitForSingleObject, 5),
4064         IMPORT_SFUNC(KeWaitForMultipleObjects, 8),
4065         IMPORT_SFUNC(_allmul, 4),
4066         IMPORT_SFUNC(_alldiv, 4),
4067         IMPORT_SFUNC(_allrem, 4),
4068         IMPORT_RFUNC(_allshr, 0),
4069         IMPORT_RFUNC(_allshl, 0),
4070         IMPORT_SFUNC(_aullmul, 4),
4071         IMPORT_SFUNC(_aulldiv, 4),
4072         IMPORT_SFUNC(_aullrem, 4),
4073         IMPORT_RFUNC(_aullshr, 0),
4074         IMPORT_RFUNC(_aullshl, 0),
4075         IMPORT_CFUNC(atoi, 0),
4076         IMPORT_CFUNC(atol, 0),
4077         IMPORT_CFUNC(rand, 0),
4078         IMPORT_CFUNC(srand, 0),
4079         IMPORT_SFUNC(WRITE_REGISTER_USHORT, 2),
4080         IMPORT_SFUNC(READ_REGISTER_USHORT, 1),
4081         IMPORT_SFUNC(WRITE_REGISTER_ULONG, 2),
4082         IMPORT_SFUNC(READ_REGISTER_ULONG, 1),
4083         IMPORT_SFUNC(READ_REGISTER_UCHAR, 1),
4084         IMPORT_SFUNC(WRITE_REGISTER_UCHAR, 2),
4085         IMPORT_SFUNC(ExInitializePagedLookasideList, 7),
4086         IMPORT_SFUNC(ExDeletePagedLookasideList, 1),
4087         IMPORT_SFUNC(ExInitializeNPagedLookasideList, 7),
4088         IMPORT_SFUNC(ExDeleteNPagedLookasideList, 1),
4089         IMPORT_FFUNC(InterlockedPopEntrySList, 1),
4090         IMPORT_FFUNC(InitializeSListHead, 1),
4091         IMPORT_FFUNC(InterlockedPushEntrySList, 2),
4092         IMPORT_SFUNC(ExQueryDepthSList, 1),
4093         IMPORT_FFUNC_MAP(ExpInterlockedPopEntrySList,
4094                 InterlockedPopEntrySList, 1),
4095         IMPORT_FFUNC_MAP(ExpInterlockedPushEntrySList,
4096                 InterlockedPushEntrySList, 2),
4097         IMPORT_FFUNC(ExInterlockedPopEntrySList, 2),
4098         IMPORT_FFUNC(ExInterlockedPushEntrySList, 3),
4099         IMPORT_SFUNC(ExAllocatePoolWithTag, 3),
4100         IMPORT_SFUNC(ExFreePoolWithTag, 2),
4101         IMPORT_SFUNC(ExFreePool, 1),
4102         /*
4103          * For AMD64, we can get away with just mapping
4104          * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
4105          * because the calling conventions end up being the same.
4106          */
4107         IMPORT_SFUNC(KeAcquireSpinLockAtDpcLevel, 1),
4108         IMPORT_SFUNC(KeReleaseSpinLockFromDpcLevel, 1),
4109         IMPORT_SFUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock, 1),
4110         IMPORT_SFUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock, 1),
4111         IMPORT_FFUNC(InterlockedIncrement, 1),
4112         IMPORT_FFUNC(InterlockedDecrement, 1),
4113         IMPORT_FFUNC(InterlockedExchange, 2),
4114         IMPORT_FFUNC(ExInterlockedAddLargeStatistic, 2),
4115         IMPORT_SFUNC(IoAllocateMdl, 5),
4116         IMPORT_SFUNC(IoFreeMdl, 1),
4117         IMPORT_SFUNC(MmAllocateContiguousMemory, 2 + 1),
4118         IMPORT_SFUNC(MmAllocateContiguousMemorySpecifyCache, 5 + 3),
4119         IMPORT_SFUNC(MmFreeContiguousMemory, 1),
4120         IMPORT_SFUNC(MmFreeContiguousMemorySpecifyCache, 3),
4121         IMPORT_SFUNC(MmSizeOfMdl, 1),
4122         IMPORT_SFUNC(MmMapLockedPages, 2),
4123         IMPORT_SFUNC(MmMapLockedPagesSpecifyCache, 6),
4124         IMPORT_SFUNC(MmUnmapLockedPages, 2),
4125         IMPORT_SFUNC(MmBuildMdlForNonPagedPool, 1),
4126         IMPORT_SFUNC(MmGetPhysicalAddress, 1),
4127         IMPORT_SFUNC(MmGetSystemRoutineAddress, 1),
4128         IMPORT_SFUNC(MmIsAddressValid, 1),
4129         IMPORT_SFUNC(MmMapIoSpace, 3 + 1),
4130         IMPORT_SFUNC(MmUnmapIoSpace, 2),
4131         IMPORT_SFUNC(KeInitializeSpinLock, 1),
4132         IMPORT_SFUNC(IoIsWdmVersionAvailable, 2),
4133         IMPORT_SFUNC(IoOpenDeviceRegistryKey, 4),
4134         IMPORT_SFUNC(IoGetDeviceObjectPointer, 4),
4135         IMPORT_SFUNC(IoGetDeviceProperty, 5),
4136         IMPORT_SFUNC(IoAllocateWorkItem, 1),
4137         IMPORT_SFUNC(IoFreeWorkItem, 1),
4138         IMPORT_SFUNC(IoQueueWorkItem, 4),
4139         IMPORT_SFUNC(ExQueueWorkItem, 2),
4140         IMPORT_SFUNC(ntoskrnl_workitem, 2),
4141         IMPORT_SFUNC(KeInitializeMutex, 2),
4142         IMPORT_SFUNC(KeReleaseMutex, 2),
4143         IMPORT_SFUNC(KeReadStateMutex, 1),
4144         IMPORT_SFUNC(KeInitializeEvent, 3),
4145         IMPORT_SFUNC(KeSetEvent, 3),
4146         IMPORT_SFUNC(KeResetEvent, 1),
4147         IMPORT_SFUNC(KeClearEvent, 1),
4148         IMPORT_SFUNC(KeReadStateEvent, 1),
4149         IMPORT_SFUNC(KeInitializeTimer, 1),
4150         IMPORT_SFUNC(KeInitializeTimerEx, 2),
4151         IMPORT_SFUNC(KeSetTimer, 3),
4152         IMPORT_SFUNC(KeSetTimerEx, 4),
4153         IMPORT_SFUNC(KeCancelTimer, 1),
4154         IMPORT_SFUNC(KeReadStateTimer, 1),
4155         IMPORT_SFUNC(KeInitializeDpc, 3),
4156         IMPORT_SFUNC(KeInsertQueueDpc, 3),
4157         IMPORT_SFUNC(KeRemoveQueueDpc, 1),
4158         IMPORT_SFUNC(KeSetImportanceDpc, 2),
4159         IMPORT_SFUNC(KeSetTargetProcessorDpc, 2),
4160         IMPORT_SFUNC(KeFlushQueuedDpcs, 0),
4161         IMPORT_SFUNC(KeGetCurrentProcessorNumber, 1),
4162         IMPORT_SFUNC(ObReferenceObjectByHandle, 6),
4163         IMPORT_FFUNC(ObfDereferenceObject, 1),
4164         IMPORT_SFUNC(ZwClose, 1),
4165         IMPORT_SFUNC(PsCreateSystemThread, 7),
4166         IMPORT_SFUNC(PsTerminateSystemThread, 1),
4167         IMPORT_SFUNC(IoWMIRegistrationControl, 2),
4168         IMPORT_SFUNC(WmiQueryTraceInformation, 5),
4169         IMPORT_CFUNC(WmiTraceMessage, 0),
4170         IMPORT_SFUNC(KeQuerySystemTime, 1),
4171         IMPORT_CFUNC(KeTickCount, 0),
4172         IMPORT_SFUNC(KeDelayExecutionThread, 3),
4173         IMPORT_SFUNC(KeQueryInterruptTime, 0),
4174         IMPORT_SFUNC(KeGetCurrentThread, 0),
4175         IMPORT_SFUNC(KeSetPriorityThread, 2),
4176
4177         /*
4178          * This last entry is a catch-all for any function we haven't
4179          * implemented yet. The PE import list patching routine will
4180          * use it for any function that doesn't have an explicit match
4181          * in this table.
4182          */
4183
4184         { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
4185
4186         /* End of list. */
4187
4188         { NULL, NULL, NULL }
4189 };