Merge branch 'vendor/GCC50'
[dragonfly.git] / sys / emulation / ndis / subr_ntoskrnl.c
1 /*-
2  * Copyright (c) 2003
3  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  * $FreeBSD: src/sys/compat/ndis/subr_ntoskrnl.c,v 1.117 2012/11/17 01:51:26 svnexp Exp $
33  */
34
35 #include <sys/ctype.h>
36 #include <sys/unistd.h>
37 #include <sys/param.h>
38 #include <sys/types.h>
39 #include <sys/errno.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/lock.h>
43 #include <sys/thread2.h>
44 #include <sys/mutex.h>
45 #include <sys/mutex2.h>
46
47 #include <sys/callout.h>
48 #include <sys/kernel.h>
49 #include <sys/proc.h>
50 #include <sys/condvar.h>
51 #include <sys/kthread.h>
52 #include <sys/module.h>
53 #include <sys/sched.h>
54 #include <sys/sysctl.h>
55
56 #include <machine/atomic.h>
57 #include <machine/stdarg.h>
58
59 #include <sys/bus.h>
60 #include <sys/rman.h>
61 #include <sys/objcache.h>
62
63 #include <vm/vm.h>
64 #include <vm/vm_param.h>
65 #include <vm/pmap.h>
66 #include <vm/vm_kern.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_extern.h>
69
70 #include <emulation/ndis/pe_var.h>
71 #include <emulation/ndis/cfg_var.h>
72 #include <emulation/ndis/resource_var.h>
73 #include <emulation/ndis/ntoskrnl_var.h>
74 #include <emulation/ndis/hal_var.h>
75 #include <emulation/ndis/ndis_var.h>
76
77 #include <stdarg.h>
78
79 #ifdef NTOSKRNL_DEBUG_TIMERS
80 static int sysctl_show_timers(SYSCTL_HANDLER_ARGS);
81
82 SYSCTL_PROC(_debug, OID_AUTO, ntoskrnl_timers, CTLTYPE_INT | CTLFLAG_RW,
83     NULL, 0, sysctl_show_timers, "I",
84     "Show ntoskrnl timer stats");
85 #endif
86
87 struct kdpc_queue {
88         list_entry              kq_disp;
89         struct thread           *kq_td;
90         int                     kq_cpu;
91         int                     kq_exit;
92         int                     kq_running;
93         kspin_lock              kq_lock;
94         nt_kevent               kq_proc;
95         nt_kevent               kq_done;
96 };
97
98 typedef struct kdpc_queue kdpc_queue;
99
100 struct wb_ext {
101         struct cv               we_cv;
102         struct thread           *we_td;
103 };
104
105 typedef struct wb_ext wb_ext;
106
107 #define NTOSKRNL_TIMEOUTS       256
108 #ifdef NTOSKRNL_DEBUG_TIMERS
109 static uint64_t ntoskrnl_timer_fires;
110 static uint64_t ntoskrnl_timer_sets;
111 static uint64_t ntoskrnl_timer_reloads;
112 static uint64_t ntoskrnl_timer_cancels;
113 #endif
114
115 struct callout_entry {
116         struct callout          ce_callout;
117         list_entry              ce_list;
118 };
119
120 typedef struct callout_entry callout_entry;
121
122 static struct list_entry ntoskrnl_calllist;
123 static struct mtx ntoskrnl_calllock;
124 struct kuser_shared_data kuser_shared_data;
125
126 static struct list_entry ntoskrnl_intlist;
127 static kspin_lock ntoskrnl_intlock;
128
129 static uint8_t RtlEqualUnicodeString(unicode_string *,
130         unicode_string *, uint8_t);
131 static void RtlCopyString(ansi_string *, const ansi_string *);
132 static void RtlCopyUnicodeString(unicode_string *,
133         unicode_string *);
134 static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
135          void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
136 static irp *IoBuildAsynchronousFsdRequest(uint32_t,
137         device_object *, void *, uint32_t, uint64_t *, io_status_block *);
138 static irp *IoBuildDeviceIoControlRequest(uint32_t,
139         device_object *, void *, uint32_t, void *, uint32_t,
140         uint8_t, nt_kevent *, io_status_block *);
141 static irp *IoAllocateIrp(uint8_t, uint8_t);
142 static void IoReuseIrp(irp *, uint32_t);
143 static void IoFreeIrp(irp *);
144 static void IoInitializeIrp(irp *, uint16_t, uint8_t);
145 static irp *IoMakeAssociatedIrp(irp *, uint8_t);
146 static uint32_t KeWaitForMultipleObjects(uint32_t,
147         nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
148         int64_t *, wait_block *);
149 static void ntoskrnl_waittest(nt_dispatch_header *, uint32_t);
150 static void ntoskrnl_satisfy_wait(nt_dispatch_header *, struct thread *);
151 static void ntoskrnl_satisfy_multiple_waits(wait_block *);
152 static int ntoskrnl_is_signalled(nt_dispatch_header *, struct thread *);
153 static void ntoskrnl_insert_timer(ktimer *, int);
154 static void ntoskrnl_remove_timer(ktimer *);
155 #ifdef NTOSKRNL_DEBUG_TIMERS
156 static void ntoskrnl_show_timers(void);
157 #endif
158 static void ntoskrnl_timercall(void *);
159 static void ntoskrnl_dpc_thread(void *);
160 static void ntoskrnl_destroy_dpc_threads(void);
161 static void ntoskrnl_destroy_workitem_threads(void);
162 static void ntoskrnl_workitem_thread(void *);
163 static void ntoskrnl_workitem(device_object *, void *);
164 static void ntoskrnl_unicode_to_ascii(uint16_t *, char *, int);
165 static void ntoskrnl_ascii_to_unicode(char *, uint16_t *, int);
166 static uint8_t ntoskrnl_insert_dpc(list_entry *, kdpc *);
167 static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
168 static uint16_t READ_REGISTER_USHORT(uint16_t *);
169 static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
170 static uint32_t READ_REGISTER_ULONG(uint32_t *);
171 static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
172 static uint8_t READ_REGISTER_UCHAR(uint8_t *);
173 static int64_t _allmul(int64_t, int64_t);
174 static int64_t _alldiv(int64_t, int64_t);
175 static int64_t _allrem(int64_t, int64_t);
176 static int64_t _allshr(int64_t, uint8_t);
177 static int64_t _allshl(int64_t, uint8_t);
178 static uint64_t _aullmul(uint64_t, uint64_t);
179 static uint64_t _aulldiv(uint64_t, uint64_t);
180 static uint64_t _aullrem(uint64_t, uint64_t);
181 static uint64_t _aullshr(uint64_t, uint8_t);
182 static uint64_t _aullshl(uint64_t, uint8_t);
183 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
184 static void InitializeSListHead(slist_header *);
185 static slist_entry *ntoskrnl_popsl(slist_header *);
186 static void ExFreePoolWithTag(void *, uint32_t);
187 static void ExInitializePagedLookasideList(paged_lookaside_list *,
188         lookaside_alloc_func *, lookaside_free_func *,
189         uint32_t, size_t, uint32_t, uint16_t);
190 static void ExDeletePagedLookasideList(paged_lookaside_list *);
191 static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
192         lookaside_alloc_func *, lookaside_free_func *,
193         uint32_t, size_t, uint32_t, uint16_t);
194 static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
195 static slist_entry
196         *ExInterlockedPushEntrySList(slist_header *,
197         slist_entry *, kspin_lock *);
198 static slist_entry
199         *ExInterlockedPopEntrySList(slist_header *, kspin_lock *);
200 static uint32_t InterlockedIncrement(volatile uint32_t *);
201 static uint32_t InterlockedDecrement(volatile uint32_t *);
202 static void ExInterlockedAddLargeStatistic(uint64_t *, uint32_t);
203 static void *MmAllocateContiguousMemory(uint32_t, uint64_t);
204 static void *MmAllocateContiguousMemorySpecifyCache(uint32_t,
205         uint64_t, uint64_t, uint64_t, enum nt_caching_type);
206 static void MmFreeContiguousMemory(void *);
207 static void MmFreeContiguousMemorySpecifyCache(void *, uint32_t,
208         enum nt_caching_type);
209 static uint32_t MmSizeOfMdl(void *, size_t);
210 static void *MmMapLockedPages(mdl *, uint8_t);
211 static void *MmMapLockedPagesSpecifyCache(mdl *,
212         uint8_t, uint32_t, void *, uint32_t, uint32_t);
213 static void MmUnmapLockedPages(void *, mdl *);
214 static device_t ntoskrnl_finddev(device_t, uint64_t, struct resource **);
215 static void RtlZeroMemory(void *, size_t);
216 static void RtlSecureZeroMemory(void *, size_t);
217 static void RtlFillMemory(void *, size_t, uint8_t);
218 static void RtlMoveMemory(void *, const void *, size_t);
219 static ndis_status RtlCharToInteger(const char *, uint32_t, uint32_t *);
220 static void RtlCopyMemory(void *, const void *, size_t);
221 static size_t RtlCompareMemory(const void *, const void *, size_t);
222 static ndis_status RtlUnicodeStringToInteger(unicode_string *,
223         uint32_t, uint32_t *);
224 static int atoi (const char *);
225 static long atol (const char *);
226 static int rand(void);
227 static void srand(unsigned int);
228 static void KeQuerySystemTime(uint64_t *);
229 static uint32_t KeTickCount(void);
230 static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
231 static int32_t IoOpenDeviceRegistryKey(struct device_object *, uint32_t,
232     uint32_t, void **);
233 static void ntoskrnl_thrfunc(void *);
234 static ndis_status PsCreateSystemThread(ndis_handle *,
235         uint32_t, void *, ndis_handle, void *, void *, void *);
236 static ndis_status PsTerminateSystemThread(ndis_status);
237 static ndis_status IoGetDeviceObjectPointer(unicode_string *,
238         uint32_t, void *, device_object *);
239 static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
240         uint32_t, void *, uint32_t *);
241 static void KeInitializeMutex(kmutant *, uint32_t);
242 static uint32_t KeReleaseMutex(kmutant *, uint8_t);
243 static uint32_t KeReadStateMutex(kmutant *);
244 static ndis_status ObReferenceObjectByHandle(ndis_handle,
245         uint32_t, void *, uint8_t, void **, void **);
246 static void ObfDereferenceObject(void *);
247 static uint32_t ZwClose(ndis_handle);
248 static uint32_t WmiQueryTraceInformation(uint32_t, void *, uint32_t,
249         uint32_t, void *);
250 static uint32_t WmiTraceMessage(uint64_t, uint32_t, void *, uint16_t, ...);
251 static uint32_t IoWMIRegistrationControl(device_object *, uint32_t);
252 static void *ntoskrnl_memset(void *, int, size_t);
253 static void *ntoskrnl_memmove(void *, void *, size_t);
254 static void *ntoskrnl_memchr(void *, unsigned char, size_t);
255 static char *ntoskrnl_strstr(char *, char *);
256 static char *ntoskrnl_strncat(char *, char *, size_t);
257 static int ntoskrnl_toupper(int);
258 static int ntoskrnl_tolower(int);
259 static funcptr ntoskrnl_findwrap(funcptr);
260 static uint32_t DbgPrint(char *, ...) __printflike(1, 2);
261 static void DbgBreakPoint(void);
262 static void KeBugCheckEx(uint32_t, u_long, u_long, u_long, u_long);
263 static int32_t KeDelayExecutionThread(uint8_t, uint8_t, int64_t *);
264 static int32_t KeSetPriorityThread(struct thread *, int32_t);
265 static void dummy(void);
266
267 static struct lock ntoskrnl_dispatchlock;
268 static struct mtx ntoskrnl_interlock;
269 static kspin_lock ntoskrnl_cancellock;
270 static int ntoskrnl_kth = 0;
271 static struct nt_objref_head ntoskrnl_reflist;
272 static struct objcache *mdl_cache;
273 static struct objcache *iw_cache;
274 static struct kdpc_queue *kq_queues;
275 static struct kdpc_queue *wq_queues;
276 static int wq_idx = 0;
277
278 static struct objcache_malloc_args mdl_alloc_args = {
279         MDL_ZONE_SIZE, M_DEVBUF
280 };
281 static struct objcache_malloc_args iw_alloc_args = {
282         sizeof(io_workitem), M_DEVBUF
283 };
284
285 int
286 ntoskrnl_libinit(void)
287 {
288         image_patch_table       *patch;
289         int                     error;
290         struct thread           *p;
291         kdpc_queue              *kq;
292         callout_entry           *e;
293         int                     i;
294
295         lockinit(&ntoskrnl_dispatchlock, MTX_NDIS_LOCK, 0, LK_CANRECURSE);
296         mtx_init(&ntoskrnl_interlock, "ndis1");
297         KeInitializeSpinLock(&ntoskrnl_cancellock);
298         KeInitializeSpinLock(&ntoskrnl_intlock);
299         TAILQ_INIT(&ntoskrnl_reflist);
300
301         InitializeListHead(&ntoskrnl_calllist);
302         InitializeListHead(&ntoskrnl_intlist);
303         mtx_init(&ntoskrnl_calllock, "ndis2");
304
305         kq_queues = ExAllocatePoolWithTag(NonPagedPool,
306 #ifdef NTOSKRNL_MULTIPLE_DPCS
307             sizeof(kdpc_queue) * ncpus, 0);
308 #else
309             sizeof(kdpc_queue), 0);
310 #endif
311
312         if (kq_queues == NULL)
313                 return (ENOMEM);
314
315         wq_queues = ExAllocatePoolWithTag(NonPagedPool,
316             sizeof(kdpc_queue) * WORKITEM_THREADS, 0);
317
318         if (wq_queues == NULL)
319                 return (ENOMEM);
320
321 #ifdef NTOSKRNL_MULTIPLE_DPCS
322         bzero((char *)kq_queues, sizeof(kdpc_queue) * ncpus);
323 #else
324         bzero((char *)kq_queues, sizeof(kdpc_queue));
325 #endif
326         bzero((char *)wq_queues, sizeof(kdpc_queue) * WORKITEM_THREADS);
327
328         /*
329          * Launch the DPC threads.
330          */
331
332 #ifdef NTOSKRNL_MULTIPLE_DPCS
333         for (i = 0; i < ncpus; i++) {
334 #else
335         for (i = 0; i < 1; i++) {
336 #endif
337                 kq = kq_queues + i;
338                 kq->kq_cpu = i;
339                 error = kthread_create_cpu(ntoskrnl_dpc_thread, kq, &p, i,
340                     "Win DPC %d", i);
341                 if (error)
342                         panic("failed to launch DPC thread");
343         }
344
345         /*
346          * Launch the workitem threads.
347          */
348
349         for (i = 0; i < WORKITEM_THREADS; i++) {
350                 kq = wq_queues + i;
351                 error = kthread_create(ntoskrnl_workitem_thread, kq, &p,
352                     "Win Workitem %d", i);
353                 if (error)
354                         panic("failed to launch workitem thread");
355         }
356
357         patch = ntoskrnl_functbl;
358         while (patch->ipt_func != NULL) {
359                 windrv_wrap((funcptr)patch->ipt_func,
360                     (funcptr *)&patch->ipt_wrap,
361                     patch->ipt_argcnt, patch->ipt_ftype);
362                 patch++;
363         }
364
365         for (i = 0; i < NTOSKRNL_TIMEOUTS; i++) {
366                 e = ExAllocatePoolWithTag(NonPagedPool,
367                     sizeof(callout_entry), 0);
368                 if (e == NULL)
369                         panic("failed to allocate timeouts");
370                 mtx_spinlock(&ntoskrnl_calllock);
371                 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
372                 mtx_spinunlock(&ntoskrnl_calllock);
373         }
374
375         /*
376          * MDLs are supposed to be variable size (they describe
377          * buffers containing some number of pages, but we don't
378          * know ahead of time how many pages that will be). But
379          * always allocating them off the heap is very slow. As
380          * a compromise, we create an MDL UMA zone big enough to
381          * handle any buffer requiring up to 16 pages, and we
382          * use those for any MDLs for buffers of 16 pages or less
383          * in size. For buffers larger than that (which we assume
384          * will be few and far between, we allocate the MDLs off
385          * the heap.
386          *
387          * CHANGED TO USING objcache(9) IN DRAGONFLY
388          */
389
390         mdl_cache = objcache_create("Windows MDL", 0, 0,
391             NULL, NULL, NULL, objcache_malloc_alloc, objcache_malloc_free,
392             &mdl_alloc_args);
393
394         iw_cache = objcache_create("Windows WorkItem", 0, 0,
395             NULL, NULL, NULL, objcache_malloc_alloc, objcache_malloc_free,
396             &iw_alloc_args);
397
398         return (0);
399 }
400
401 int
402 ntoskrnl_libfini(void)
403 {
404         image_patch_table       *patch;
405         callout_entry           *e;
406         list_entry              *l;
407
408         patch = ntoskrnl_functbl;
409         while (patch->ipt_func != NULL) {
410                 windrv_unwrap(patch->ipt_wrap);
411                 patch++;
412         }
413
414         /* Stop the workitem queues. */
415         ntoskrnl_destroy_workitem_threads();
416         /* Stop the DPC queues. */
417         ntoskrnl_destroy_dpc_threads();
418
419         ExFreePool(kq_queues);
420         ExFreePool(wq_queues);
421
422         objcache_destroy(mdl_cache);
423         objcache_destroy(iw_cache);
424
425         mtx_spinlock(&ntoskrnl_calllock);
426         while(!IsListEmpty(&ntoskrnl_calllist)) {
427                 l = RemoveHeadList(&ntoskrnl_calllist);
428                 e = CONTAINING_RECORD(l, callout_entry, ce_list);
429                 mtx_spinunlock(&ntoskrnl_calllock);
430                 ExFreePool(e);
431                 mtx_spinlock(&ntoskrnl_calllock);
432         }
433         mtx_spinunlock(&ntoskrnl_calllock);
434
435         lockuninit(&ntoskrnl_dispatchlock);
436         mtx_uninit(&ntoskrnl_interlock);
437         mtx_uninit(&ntoskrnl_calllock);
438
439         return (0);
440 }
441
442 /*
443  * We need to be able to reference this externally from the wrapper;
444  * GCC only generates a local implementation of memset.
445  */
446 static void *
447 ntoskrnl_memset(void *buf, int ch, size_t size)
448 {
449         return (memset(buf, ch, size));
450 }
451
452 static void *
453 ntoskrnl_memmove(void *dst, void *src, size_t size)
454 {
455         bcopy(src, dst, size);
456         return (dst);
457 }
458
459 static void *
460 ntoskrnl_memchr(void *buf, unsigned char ch, size_t len)
461 {
462         if (len != 0) {
463                 unsigned char *p = buf;
464
465                 do {
466                         if (*p++ == ch)
467                                 return (p - 1);
468                 } while (--len != 0);
469         }
470         return (NULL);
471 }
472
473 static char *
474 ntoskrnl_strstr(char *s, char *find)
475 {
476         char c, sc;
477         size_t len;
478
479         if ((c = *find++) != 0) {
480                 len = strlen(find);
481                 do {
482                         do {
483                                 if ((sc = *s++) == 0)
484                                         return (NULL);
485                         } while (sc != c);
486                 } while (strncmp(s, find, len) != 0);
487                 s--;
488         }
489         return (s);
490 }
491
492 /* Taken from libc */
493 static char *
494 ntoskrnl_strncat(char *dst, char *src, size_t n)
495 {
496         if (n != 0) {
497                 char *d = dst;
498                 const char *s = src;
499
500                 while (*d != 0)
501                         d++;
502                 do {
503                         if ((*d = *s++) == 0)
504                                 break;
505                         d++;
506                 } while (--n != 0);
507                 *d = 0;
508         }
509         return (dst);
510 }
511
512 static int
513 ntoskrnl_toupper(int c)
514 {
515         return (toupper(c));
516 }
517
518 static int
519 ntoskrnl_tolower(int c)
520 {
521         return (tolower(c));
522 }
523
524 static uint8_t
525 RtlEqualUnicodeString(unicode_string *str1, unicode_string *str2,
526         uint8_t caseinsensitive)
527 {
528         int                     i;
529
530         if (str1->us_len != str2->us_len)
531                 return (FALSE);
532
533         for (i = 0; i < str1->us_len; i++) {
534                 if (caseinsensitive == TRUE) {
535                         if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
536                             toupper((char)(str2->us_buf[i] & 0xFF)))
537                                 return (FALSE);
538                 } else {
539                         if (str1->us_buf[i] != str2->us_buf[i])
540                                 return (FALSE);
541                 }
542         }
543
544         return (TRUE);
545 }
546
547 static void
548 RtlCopyString(ansi_string *dst, const ansi_string *src)
549 {
550         if (src != NULL && src->as_buf != NULL && dst->as_buf != NULL) {
551                 dst->as_len = min(src->as_len, dst->as_maxlen);
552                 memcpy(dst->as_buf, src->as_buf, dst->as_len);
553                 if (dst->as_len < dst->as_maxlen)
554                         dst->as_buf[dst->as_len] = 0;
555         } else
556                 dst->as_len = 0;
557 }
558
559 static void
560 RtlCopyUnicodeString(unicode_string *dest, unicode_string *src)
561 {
562
563         if (dest->us_maxlen >= src->us_len)
564                 dest->us_len = src->us_len;
565         else
566                 dest->us_len = dest->us_maxlen;
567         memcpy(dest->us_buf, src->us_buf, dest->us_len);
568 }
569
570 static void
571 ntoskrnl_ascii_to_unicode(char *ascii, uint16_t *unicode, int len)
572 {
573         int                     i;
574         uint16_t                *ustr;
575
576         ustr = unicode;
577         for (i = 0; i < len; i++) {
578                 *ustr = (uint16_t)ascii[i];
579                 ustr++;
580         }
581 }
582
583 static void
584 ntoskrnl_unicode_to_ascii(uint16_t *unicode, char *ascii, int len)
585 {
586         int                     i;
587         uint8_t                 *astr;
588
589         astr = ascii;
590         for (i = 0; i < len / 2; i++) {
591                 *astr = (uint8_t)unicode[i];
592                 astr++;
593         }
594 }
595
596 uint32_t
597 RtlUnicodeStringToAnsiString(ansi_string *dest, unicode_string *src, uint8_t allocate)
598 {
599         if (dest == NULL || src == NULL)
600                 return (STATUS_INVALID_PARAMETER);
601
602         dest->as_len = src->us_len / 2;
603         if (dest->as_maxlen < dest->as_len)
604                 dest->as_len = dest->as_maxlen;
605
606         if (allocate == TRUE) {
607                 dest->as_buf = ExAllocatePoolWithTag(NonPagedPool,
608                     (src->us_len / 2) + 1, 0);
609                 if (dest->as_buf == NULL)
610                         return (STATUS_INSUFFICIENT_RESOURCES);
611                 dest->as_len = dest->as_maxlen = src->us_len / 2;
612         } else {
613                 dest->as_len = src->us_len / 2; /* XXX */
614                 if (dest->as_maxlen < dest->as_len)
615                         dest->as_len = dest->as_maxlen;
616         }
617
618         ntoskrnl_unicode_to_ascii(src->us_buf, dest->as_buf,
619             dest->as_len * 2);
620
621         return (STATUS_SUCCESS);
622 }
623
624 uint32_t
625 RtlAnsiStringToUnicodeString(unicode_string *dest, ansi_string *src,
626         uint8_t allocate)
627 {
628         if (dest == NULL || src == NULL)
629                 return (STATUS_INVALID_PARAMETER);
630
631         if (allocate == TRUE) {
632                 dest->us_buf = ExAllocatePoolWithTag(NonPagedPool,
633                     src->as_len * 2, 0);
634                 if (dest->us_buf == NULL)
635                         return (STATUS_INSUFFICIENT_RESOURCES);
636                 dest->us_len = dest->us_maxlen = strlen(src->as_buf) * 2;
637         } else {
638                 dest->us_len = src->as_len * 2; /* XXX */
639                 if (dest->us_maxlen < dest->us_len)
640                         dest->us_len = dest->us_maxlen;
641         }
642
643         ntoskrnl_ascii_to_unicode(src->as_buf, dest->us_buf,
644             dest->us_len / 2);
645
646         return (STATUS_SUCCESS);
647 }
648
649 void *
650 ExAllocatePoolWithTag(uint32_t pooltype, size_t len, uint32_t tag)
651 {
652         void                    *buf;
653
654         buf = kmalloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
655         if (buf == NULL)
656                 return (NULL);
657
658         return (buf);
659 }
660
661 static void
662 ExFreePoolWithTag(void *buf, uint32_t tag)
663 {
664         ExFreePool(buf);
665 }
666
667 void
668 ExFreePool(void *buf)
669 {
670         kfree(buf, M_DEVBUF);
671 }
672
673 uint32_t
674 IoAllocateDriverObjectExtension(driver_object *drv, void *clid,
675     uint32_t extlen, void **ext)
676 {
677         custom_extension        *ce;
678
679         ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
680             + extlen, 0);
681
682         if (ce == NULL)
683                 return (STATUS_INSUFFICIENT_RESOURCES);
684
685         ce->ce_clid = clid;
686         InsertTailList((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
687
688         *ext = (void *)(ce + 1);
689
690         return (STATUS_SUCCESS);
691 }
692
693 void *
694 IoGetDriverObjectExtension(driver_object *drv, void *clid)
695 {
696         list_entry              *e;
697         custom_extension        *ce;
698
699         /*
700          * Sanity check. Our dummy bus drivers don't have
701          * any driver extentions.
702          */
703
704         if (drv->dro_driverext == NULL)
705                 return (NULL);
706
707         e = drv->dro_driverext->dre_usrext.nle_flink;
708         while (e != &drv->dro_driverext->dre_usrext) {
709                 ce = (custom_extension *)e;
710                 if (ce->ce_clid == clid)
711                         return ((void *)(ce + 1));
712                 e = e->nle_flink;
713         }
714
715         return (NULL);
716 }
717
718
719 uint32_t
720 IoCreateDevice(driver_object *drv, uint32_t devextlen, unicode_string *devname,
721         uint32_t devtype, uint32_t devchars, uint8_t exclusive,
722         device_object **newdev)
723 {
724         device_object           *dev;
725
726         dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
727         if (dev == NULL)
728                 return (STATUS_INSUFFICIENT_RESOURCES);
729
730         dev->do_type = devtype;
731         dev->do_drvobj = drv;
732         dev->do_currirp = NULL;
733         dev->do_flags = 0;
734
735         if (devextlen) {
736                 dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
737                     devextlen, 0);
738
739                 if (dev->do_devext == NULL) {
740                         ExFreePool(dev);
741                         return (STATUS_INSUFFICIENT_RESOURCES);
742                 }
743
744                 bzero(dev->do_devext, devextlen);
745         } else
746                 dev->do_devext = NULL;
747
748         dev->do_size = sizeof(device_object) + devextlen;
749         dev->do_refcnt = 1;
750         dev->do_attacheddev = NULL;
751         dev->do_nextdev = NULL;
752         dev->do_devtype = devtype;
753         dev->do_stacksize = 1;
754         dev->do_alignreq = 1;
755         dev->do_characteristics = devchars;
756         dev->do_iotimer = NULL;
757         KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
758
759         /*
760          * Vpd is used for disk/tape devices,
761          * but we don't support those. (Yet.)
762          */
763         dev->do_vpb = NULL;
764
765         dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
766             sizeof(devobj_extension), 0);
767
768         if (dev->do_devobj_ext == NULL) {
769                 if (dev->do_devext != NULL)
770                         ExFreePool(dev->do_devext);
771                 ExFreePool(dev);
772                 return (STATUS_INSUFFICIENT_RESOURCES);
773         }
774
775         dev->do_devobj_ext->dve_type = 0;
776         dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
777         dev->do_devobj_ext->dve_devobj = dev;
778
779         /*
780          * Attach this device to the driver object's list
781          * of devices. Note: this is not the same as attaching
782          * the device to the device stack. The driver's AddDevice
783          * routine must explicitly call IoAddDeviceToDeviceStack()
784          * to do that.
785          */
786
787         if (drv->dro_devobj == NULL) {
788                 drv->dro_devobj = dev;
789                 dev->do_nextdev = NULL;
790         } else {
791                 dev->do_nextdev = drv->dro_devobj;
792                 drv->dro_devobj = dev;
793         }
794
795         *newdev = dev;
796
797         return (STATUS_SUCCESS);
798 }
799
800 void
801 IoDeleteDevice(device_object *dev)
802 {
803         device_object           *prev;
804
805         if (dev == NULL)
806                 return;
807
808         if (dev->do_devobj_ext != NULL)
809                 ExFreePool(dev->do_devobj_ext);
810
811         if (dev->do_devext != NULL)
812                 ExFreePool(dev->do_devext);
813
814         /* Unlink the device from the driver's device list. */
815
816         prev = dev->do_drvobj->dro_devobj;
817         if (prev == dev)
818                 dev->do_drvobj->dro_devobj = dev->do_nextdev;
819         else {
820                 while (prev->do_nextdev != dev)
821                         prev = prev->do_nextdev;
822                 prev->do_nextdev = dev->do_nextdev;
823         }
824
825         ExFreePool(dev);
826 }
827
828 device_object *
829 IoGetAttachedDevice(device_object *dev)
830 {
831         device_object           *d;
832
833         if (dev == NULL)
834                 return (NULL);
835
836         d = dev;
837
838         while (d->do_attacheddev != NULL)
839                 d = d->do_attacheddev;
840
841         return (d);
842 }
843
844 static irp *
845 IoBuildSynchronousFsdRequest(uint32_t func, device_object *dobj, void *buf,
846     uint32_t len, uint64_t *off, nt_kevent *event, io_status_block *status)
847 {
848         irp                     *ip;
849
850         ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
851         if (ip == NULL)
852                 return (NULL);
853         ip->irp_usrevent = event;
854
855         return (ip);
856 }
857
858 static irp *
859 IoBuildAsynchronousFsdRequest(uint32_t func, device_object *dobj, void *buf,
860     uint32_t len, uint64_t *off, io_status_block *status)
861 {
862         irp                     *ip;
863         io_stack_location       *sl;
864
865         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
866         if (ip == NULL)
867                 return (NULL);
868
869         ip->irp_usriostat = status;
870         ip->irp_tail.irp_overlay.irp_thread = NULL;
871
872         sl = IoGetNextIrpStackLocation(ip);
873         sl->isl_major = func;
874         sl->isl_minor = 0;
875         sl->isl_flags = 0;
876         sl->isl_ctl = 0;
877         sl->isl_devobj = dobj;
878         sl->isl_fileobj = NULL;
879         sl->isl_completionfunc = NULL;
880
881         ip->irp_userbuf = buf;
882
883         if (dobj->do_flags & DO_BUFFERED_IO) {
884                 ip->irp_assoc.irp_sysbuf =
885                     ExAllocatePoolWithTag(NonPagedPool, len, 0);
886                 if (ip->irp_assoc.irp_sysbuf == NULL) {
887                         IoFreeIrp(ip);
888                         return (NULL);
889                 }
890                 bcopy(buf, ip->irp_assoc.irp_sysbuf, len);
891         }
892
893         if (dobj->do_flags & DO_DIRECT_IO) {
894                 ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
895                 if (ip->irp_mdl == NULL) {
896                         if (ip->irp_assoc.irp_sysbuf != NULL)
897                                 ExFreePool(ip->irp_assoc.irp_sysbuf);
898                         IoFreeIrp(ip);
899                         return (NULL);
900                 }
901                 ip->irp_userbuf = NULL;
902                 ip->irp_assoc.irp_sysbuf = NULL;
903         }
904
905         if (func == IRP_MJ_READ) {
906                 sl->isl_parameters.isl_read.isl_len = len;
907                 if (off != NULL)
908                         sl->isl_parameters.isl_read.isl_byteoff = *off;
909                 else
910                         sl->isl_parameters.isl_read.isl_byteoff = 0;
911         }
912
913         if (func == IRP_MJ_WRITE) {
914                 sl->isl_parameters.isl_write.isl_len = len;
915                 if (off != NULL)
916                         sl->isl_parameters.isl_write.isl_byteoff = *off;
917                 else
918                         sl->isl_parameters.isl_write.isl_byteoff = 0;
919         }
920
921         return (ip);
922 }
923
924 static irp *
925 IoBuildDeviceIoControlRequest(uint32_t iocode, device_object *dobj, void *ibuf,
926         uint32_t ilen, void *obuf, uint32_t olen, uint8_t isinternal,
927         nt_kevent *event, io_status_block *status)
928 {
929         irp                     *ip;
930         io_stack_location       *sl;
931         uint32_t                buflen;
932
933         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
934         if (ip == NULL)
935                 return (NULL);
936         ip->irp_usrevent = event;
937         ip->irp_usriostat = status;
938         ip->irp_tail.irp_overlay.irp_thread = NULL;
939
940         sl = IoGetNextIrpStackLocation(ip);
941         sl->isl_major = isinternal == TRUE ?
942             IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
943         sl->isl_minor = 0;
944         sl->isl_flags = 0;
945         sl->isl_ctl = 0;
946         sl->isl_devobj = dobj;
947         sl->isl_fileobj = NULL;
948         sl->isl_completionfunc = NULL;
949         sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
950         sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
951         sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
952
953         switch(IO_METHOD(iocode)) {
954         case METHOD_BUFFERED:
955                 if (ilen > olen)
956                         buflen = ilen;
957                 else
958                         buflen = olen;
959                 if (buflen) {
960                         ip->irp_assoc.irp_sysbuf =
961                             ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
962                         if (ip->irp_assoc.irp_sysbuf == NULL) {
963                                 IoFreeIrp(ip);
964                                 return (NULL);
965                         }
966                 }
967                 if (ilen && ibuf != NULL) {
968                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
969                         bzero((char *)ip->irp_assoc.irp_sysbuf + ilen,
970                             buflen - ilen);
971                 } else
972                         bzero(ip->irp_assoc.irp_sysbuf, ilen);
973                 ip->irp_userbuf = obuf;
974                 break;
975         case METHOD_IN_DIRECT:
976         case METHOD_OUT_DIRECT:
977                 if (ilen && ibuf != NULL) {
978                         ip->irp_assoc.irp_sysbuf =
979                             ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
980                         if (ip->irp_assoc.irp_sysbuf == NULL) {
981                                 IoFreeIrp(ip);
982                                 return (NULL);
983                         }
984                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
985                 }
986                 if (olen && obuf != NULL) {
987                         ip->irp_mdl = IoAllocateMdl(obuf, olen,
988                             FALSE, FALSE, ip);
989                         /*
990                          * Normally we would MmProbeAndLockPages()
991                          * here, but we don't have to in our
992                          * imlementation.
993                          */
994                 }
995                 break;
996         case METHOD_NEITHER:
997                 ip->irp_userbuf = obuf;
998                 sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
999                 break;
1000         default:
1001                 break;
1002         }
1003
1004         /*
1005          * Ideally, we should associate this IRP with the calling
1006          * thread here.
1007          */
1008
1009         return (ip);
1010 }
1011
1012 static irp *
1013 IoAllocateIrp(uint8_t stsize, uint8_t chargequota)
1014 {
1015         irp                     *i;
1016
1017         i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
1018         if (i == NULL)
1019                 return (NULL);
1020
1021         IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
1022
1023         return (i);
1024 }
1025
1026 static irp *
1027 IoMakeAssociatedIrp(irp *ip, uint8_t stsize)
1028 {
1029         irp                     *associrp;
1030
1031         associrp = IoAllocateIrp(stsize, FALSE);
1032         if (associrp == NULL)
1033                 return (NULL);
1034
1035         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
1036         associrp->irp_flags |= IRP_ASSOCIATED_IRP;
1037         associrp->irp_tail.irp_overlay.irp_thread =
1038             ip->irp_tail.irp_overlay.irp_thread;
1039         associrp->irp_assoc.irp_master = ip;
1040         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1041
1042         return (associrp);
1043 }
1044
1045 static void
1046 IoFreeIrp(irp *ip)
1047 {
1048         ExFreePool(ip);
1049 }
1050
1051 static void
1052 IoInitializeIrp(irp *io, uint16_t psize, uint8_t ssize)
1053 {
1054         bzero((char *)io, IoSizeOfIrp(ssize));
1055         io->irp_size = psize;
1056         io->irp_stackcnt = ssize;
1057         io->irp_currentstackloc = ssize;
1058         InitializeListHead(&io->irp_thlist);
1059         io->irp_tail.irp_overlay.irp_csl =
1060             (io_stack_location *)(io + 1) + ssize;
1061 }
1062
1063 static void
1064 IoReuseIrp(irp *ip, uint32_t status)
1065 {
1066         uint8_t                 allocflags;
1067
1068         allocflags = ip->irp_allocflags;
1069         IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
1070         ip->irp_iostat.isb_status = status;
1071         ip->irp_allocflags = allocflags;
1072 }
1073
1074 void
1075 IoAcquireCancelSpinLock(uint8_t *irql)
1076 {
1077         KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
1078 }
1079
1080 void
1081 IoReleaseCancelSpinLock(uint8_t irql)
1082 {
1083         KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
1084 }
1085
1086 uint8_t
1087 IoCancelIrp(irp *ip)
1088 {
1089         cancel_func             cfunc;
1090         uint8_t                 cancelirql;
1091
1092         IoAcquireCancelSpinLock(&cancelirql);
1093         cfunc = IoSetCancelRoutine(ip, NULL);
1094         ip->irp_cancel = TRUE;
1095         if (cfunc == NULL) {
1096                 IoReleaseCancelSpinLock(cancelirql);
1097                 return (FALSE);
1098         }
1099         ip->irp_cancelirql = cancelirql;
1100         MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
1101         return (uint8_t)IoSetCancelValue(ip, TRUE);
1102 }
1103
1104 uint32_t
1105 IofCallDriver(device_object *dobj, irp *ip)
1106 {
1107         driver_object           *drvobj;
1108         io_stack_location       *sl;
1109         uint32_t                status;
1110         driver_dispatch         disp;
1111
1112         drvobj = dobj->do_drvobj;
1113
1114         if (ip->irp_currentstackloc <= 0)
1115                 panic("IoCallDriver(): out of stack locations");
1116
1117         IoSetNextIrpStackLocation(ip);
1118         sl = IoGetCurrentIrpStackLocation(ip);
1119
1120         sl->isl_devobj = dobj;
1121
1122         disp = drvobj->dro_dispatch[sl->isl_major];
1123         status = MSCALL2(disp, dobj, ip);
1124
1125         return (status);
1126 }
1127
1128 void
1129 IofCompleteRequest(irp *ip, uint8_t prioboost)
1130 {
1131         uint32_t                status;
1132         device_object           *dobj;
1133         io_stack_location       *sl;
1134         completion_func         cf;
1135
1136         KASSERT(ip->irp_iostat.isb_status != STATUS_PENDING,
1137             ("incorrect IRP(%p) status (STATUS_PENDING)", ip));
1138
1139         sl = IoGetCurrentIrpStackLocation(ip);
1140         IoSkipCurrentIrpStackLocation(ip);
1141
1142         do {
1143                 if (sl->isl_ctl & SL_PENDING_RETURNED)
1144                         ip->irp_pendingreturned = TRUE;
1145
1146                 if (ip->irp_currentstackloc != (ip->irp_stackcnt + 1))
1147                         dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
1148                 else
1149                         dobj = NULL;
1150
1151                 if (sl->isl_completionfunc != NULL &&
1152                     ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
1153                     sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
1154                     (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
1155                     sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
1156                     (ip->irp_cancel == TRUE &&
1157                     sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
1158                         cf = sl->isl_completionfunc;
1159                         status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
1160                         if (status == STATUS_MORE_PROCESSING_REQUIRED)
1161                                 return;
1162                 } else {
1163                         if ((ip->irp_currentstackloc <= ip->irp_stackcnt) &&
1164                             (ip->irp_pendingreturned == TRUE))
1165                                 IoMarkIrpPending(ip);
1166                 }
1167
1168                 /* move to the next.  */
1169                 IoSkipCurrentIrpStackLocation(ip);
1170                 sl++;
1171         } while (ip->irp_currentstackloc <= (ip->irp_stackcnt + 1));
1172
1173         if (ip->irp_usriostat != NULL)
1174                 *ip->irp_usriostat = ip->irp_iostat;
1175         if (ip->irp_usrevent != NULL)
1176                 KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
1177
1178         /* Handle any associated IRPs. */
1179
1180         if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
1181                 uint32_t                masterirpcnt;
1182                 irp                     *masterirp;
1183                 mdl                     *m;
1184
1185                 masterirp = ip->irp_assoc.irp_master;
1186                 masterirpcnt =
1187                     InterlockedDecrement(&masterirp->irp_assoc.irp_irpcnt);
1188
1189                 while ((m = ip->irp_mdl) != NULL) {
1190                         ip->irp_mdl = m->mdl_next;
1191                         IoFreeMdl(m);
1192                 }
1193                 IoFreeIrp(ip);
1194                 if (masterirpcnt == 0)
1195                         IoCompleteRequest(masterirp, IO_NO_INCREMENT);
1196                 return;
1197         }
1198
1199         /* With any luck, these conditions will never arise. */
1200
1201         if (ip->irp_flags & IRP_PAGING_IO) {
1202                 if (ip->irp_mdl != NULL)
1203                         IoFreeMdl(ip->irp_mdl);
1204                 IoFreeIrp(ip);
1205         }
1206 }
1207
1208 void
1209 ntoskrnl_intr(void *arg)
1210 {
1211         kinterrupt              *iobj;
1212         uint8_t                 irql;
1213         uint8_t                 claimed;
1214         list_entry              *l;
1215
1216         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1217         l = ntoskrnl_intlist.nle_flink;
1218         while (l != &ntoskrnl_intlist) {
1219                 iobj = CONTAINING_RECORD(l, kinterrupt, ki_list);
1220                 claimed = MSCALL2(iobj->ki_svcfunc, iobj, iobj->ki_svcctx);
1221                 if (claimed == TRUE)
1222                         break;
1223                 l = l->nle_flink;
1224         }
1225         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1226 }
1227
1228 uint8_t
1229 KeAcquireInterruptSpinLock(kinterrupt *iobj)
1230 {
1231         uint8_t                 irql;
1232         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1233         return (irql);
1234 }
1235
1236 void
1237 KeReleaseInterruptSpinLock(kinterrupt *iobj, uint8_t irql)
1238 {
1239         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1240 }
1241
1242 uint8_t
1243 KeSynchronizeExecution(kinterrupt *iobj, void *syncfunc, void *syncctx)
1244 {
1245         uint8_t                 irql;
1246
1247         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1248         MSCALL1(syncfunc, syncctx);
1249         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1250
1251         return (TRUE);
1252 }
1253
1254 /*
1255  * IoConnectInterrupt() is passed only the interrupt vector and
1256  * irql that a device wants to use, but no device-specific tag
1257  * of any kind. This conflicts rather badly with FreeBSD's
1258  * bus_setup_intr(), which needs the device_t for the device
1259  * requesting interrupt delivery. In order to bypass this
1260  * inconsistency, we implement a second level of interrupt
1261  * dispatching on top of bus_setup_intr(). All devices use
1262  * ntoskrnl_intr() as their ISR, and any device requesting
1263  * interrupts will be registered with ntoskrnl_intr()'s interrupt
1264  * dispatch list. When an interrupt arrives, we walk the list
1265  * and invoke all the registered ISRs. This effectively makes all
1266  * interrupts shared, but it's the only way to duplicate the
1267  * semantics of IoConnectInterrupt() and IoDisconnectInterrupt() properly.
1268  */
1269
1270 uint32_t
1271 IoConnectInterrupt(kinterrupt **iobj, void *svcfunc, void *svcctx,
1272         kspin_lock *lock, uint32_t vector, uint8_t irql, uint8_t syncirql,
1273         uint8_t imode, uint8_t shared, uint32_t affinity, uint8_t savefloat)
1274 {
1275         uint8_t                 curirql;
1276
1277         *iobj = ExAllocatePoolWithTag(NonPagedPool, sizeof(kinterrupt), 0);
1278         if (*iobj == NULL)
1279                 return (STATUS_INSUFFICIENT_RESOURCES);
1280
1281         (*iobj)->ki_svcfunc = svcfunc;
1282         (*iobj)->ki_svcctx = svcctx;
1283
1284         if (lock == NULL) {
1285                 KeInitializeSpinLock(&(*iobj)->ki_lock_priv);
1286                 (*iobj)->ki_lock = &(*iobj)->ki_lock_priv;
1287         } else
1288                 (*iobj)->ki_lock = lock;
1289
1290         KeAcquireSpinLock(&ntoskrnl_intlock, &curirql);
1291         InsertHeadList((&ntoskrnl_intlist), (&(*iobj)->ki_list));
1292         KeReleaseSpinLock(&ntoskrnl_intlock, curirql);
1293
1294         return (STATUS_SUCCESS);
1295 }
1296
1297 void
1298 IoDisconnectInterrupt(kinterrupt *iobj)
1299 {
1300         uint8_t                 irql;
1301
1302         if (iobj == NULL)
1303                 return;
1304
1305         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1306         RemoveEntryList((&iobj->ki_list));
1307         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1308
1309         ExFreePool(iobj);
1310 }
1311
1312 device_object *
1313 IoAttachDeviceToDeviceStack(device_object *src, device_object *dst)
1314 {
1315         device_object           *attached;
1316
1317         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
1318         attached = IoGetAttachedDevice(dst);
1319         attached->do_attacheddev = src;
1320         src->do_attacheddev = NULL;
1321         src->do_stacksize = attached->do_stacksize + 1;
1322         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1323
1324         return (attached);
1325 }
1326
1327 void
1328 IoDetachDevice(device_object *topdev)
1329 {
1330         device_object           *tail;
1331
1332         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
1333
1334         /* First, break the chain. */
1335         tail = topdev->do_attacheddev;
1336         if (tail == NULL) {
1337                 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1338                 return;
1339         }
1340         topdev->do_attacheddev = tail->do_attacheddev;
1341         topdev->do_refcnt--;
1342
1343         /* Now reduce the stacksize count for the takm_il objects. */
1344
1345         tail = topdev->do_attacheddev;
1346         while (tail != NULL) {
1347                 tail->do_stacksize--;
1348                 tail = tail->do_attacheddev;
1349         }
1350
1351         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1352 }
1353
1354 /*
1355  * For the most part, an object is considered signalled if
1356  * dh_sigstate == TRUE. The exception is for mutant objects
1357  * (mutexes), where the logic works like this:
1358  *
1359  * - If the thread already owns the object and sigstate is
1360  *   less than or equal to 0, then the object is considered
1361  *   signalled (recursive acquisition).
1362  * - If dh_sigstate == 1, the object is also considered
1363  *   signalled.
1364  */
1365
1366 static int
1367 ntoskrnl_is_signalled(nt_dispatch_header *obj, struct thread *td)
1368 {
1369         kmutant                 *km;
1370
1371         if (obj->dh_type == DISP_TYPE_MUTANT) {
1372                 km = (kmutant *)obj;
1373                 if ((obj->dh_sigstate <= 0 && km->km_ownerthread == td) ||
1374                     obj->dh_sigstate == 1)
1375                         return (TRUE);
1376                 return (FALSE);
1377         }
1378
1379         if (obj->dh_sigstate > 0)
1380                 return (TRUE);
1381         return (FALSE);
1382 }
1383
1384 static void
1385 ntoskrnl_satisfy_wait(nt_dispatch_header *obj, struct thread *td)
1386 {
1387         kmutant                 *km;
1388
1389         switch (obj->dh_type) {
1390         case DISP_TYPE_MUTANT:
1391                 km = (struct kmutant *)obj;
1392                 obj->dh_sigstate--;
1393                 /*
1394                  * If sigstate reaches 0, the mutex is now
1395                  * non-signalled (the new thread owns it).
1396                  */
1397                 if (obj->dh_sigstate == 0) {
1398                         km->km_ownerthread = td;
1399                         if (km->km_abandoned == TRUE)
1400                                 km->km_abandoned = FALSE;
1401                 }
1402                 break;
1403         /* Synchronization objects get reset to unsignalled. */
1404         case DISP_TYPE_SYNCHRONIZATION_EVENT:
1405         case DISP_TYPE_SYNCHRONIZATION_TIMER:
1406                 obj->dh_sigstate = 0;
1407                 break;
1408         case DISP_TYPE_SEMAPHORE:
1409                 obj->dh_sigstate--;
1410                 break;
1411         default:
1412                 break;
1413         }
1414 }
1415
1416 static void
1417 ntoskrnl_satisfy_multiple_waits(wait_block *wb)
1418 {
1419         wait_block              *cur;
1420         struct thread           *td;
1421
1422         cur = wb;
1423         td = wb->wb_kthread;
1424
1425         do {
1426                 ntoskrnl_satisfy_wait(wb->wb_object, td);
1427                 cur->wb_awakened = TRUE;
1428                 cur = cur->wb_next;
1429         } while (cur != wb);
1430 }
1431
1432 /* Always called with dispatcher lock held. */
1433 static void
1434 ntoskrnl_waittest(nt_dispatch_header *obj, uint32_t increment)
1435 {
1436         wait_block              *w, *next;
1437         list_entry              *e;
1438         struct thread           *td;
1439         wb_ext                  *we;
1440         int                     satisfied;
1441
1442         /*
1443          * Once an object has been signalled, we walk its list of
1444          * wait blocks. If a wait block can be awakened, then satisfy
1445          * waits as necessary and wake the thread.
1446          *
1447          * The rules work like this:
1448          *
1449          * If a wait block is marked as WAITTYPE_ANY, then
1450          * we can satisfy the wait conditions on the current
1451          * object and wake the thread right away. Satisfying
1452          * the wait also has the effect of breaking us out
1453          * of the search loop.
1454          *
1455          * If the object is marked as WAITTYLE_ALL, then the
1456          * wait block will be part of a circularly linked
1457          * list of wait blocks belonging to a waiting thread
1458          * that's sleeping in KeWaitForMultipleObjects(). In
1459          * order to wake the thread, all the objects in the
1460          * wait list must be in the signalled state. If they
1461          * are, we then satisfy all of them and wake the
1462          * thread.
1463          *
1464          */
1465
1466         e = obj->dh_waitlisthead.nle_flink;
1467
1468         while (e != &obj->dh_waitlisthead && obj->dh_sigstate > 0) {
1469                 w = CONTAINING_RECORD(e, wait_block, wb_waitlist);
1470                 we = w->wb_ext;
1471                 td = we->we_td;
1472                 satisfied = FALSE;
1473                 if (w->wb_waittype == WAITTYPE_ANY) {
1474                         /*
1475                          * Thread can be awakened if
1476                          * any wait is satisfied.
1477                          */
1478                         ntoskrnl_satisfy_wait(obj, td);
1479                         satisfied = TRUE;
1480                         w->wb_awakened = TRUE;
1481                 } else {
1482                         /*
1483                          * Thread can only be woken up
1484                          * if all waits are satisfied.
1485                          * If the thread is waiting on multiple
1486                          * objects, they should all be linked
1487                          * through the wb_next pointers in the
1488                          * wait blocks.
1489                          */
1490                         satisfied = TRUE;
1491                         next = w->wb_next;
1492                         while (next != w) {
1493                                 if (ntoskrnl_is_signalled(obj, td) == FALSE) {
1494                                         satisfied = FALSE;
1495                                         break;
1496                                 }
1497                                 next = next->wb_next;
1498                         }
1499                         ntoskrnl_satisfy_multiple_waits(w);
1500                 }
1501
1502                 if (satisfied == TRUE)
1503                         cv_broadcastpri(&we->we_cv,
1504                             (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
1505                             w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
1506
1507                 e = e->nle_flink;
1508         }
1509 }
1510
1511 /*
1512  * Return the number of 100 nanosecond intervals since
1513  * January 1, 1601. (?!?!)
1514  */
1515 void
1516 ntoskrnl_time(uint64_t *tval)
1517 {
1518         struct timespec         ts;
1519
1520         nanotime(&ts);
1521         *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
1522             11644473600 * 10000000; /* 100ns ticks from 1601 to 1970 */
1523 }
1524
1525 static void
1526 KeQuerySystemTime(uint64_t *current_time)
1527 {
1528         ntoskrnl_time(current_time);
1529 }
1530
1531 static uint32_t
1532 KeTickCount(void)
1533 {
1534         struct timeval tv;
1535         getmicrouptime(&tv);
1536         return tvtohz_high(&tv);
1537 }
1538
1539
1540 /*
1541  * KeWaitForSingleObject() is a tricky beast, because it can be used
1542  * with several different object types: semaphores, timers, events,
1543  * mutexes and threads. Semaphores don't appear very often, but the
1544  * other object types are quite common. KeWaitForSingleObject() is
1545  * what's normally used to acquire a mutex, and it can be used to
1546  * wait for a thread termination.
1547  *
1548  * The Windows NDIS API is implemented in terms of Windows kernel
1549  * primitives, and some of the object manipulation is duplicated in
1550  * NDIS. For example, NDIS has timers and events, which are actually
1551  * Windows kevents and ktimers. Now, you're supposed to only use the
1552  * NDIS variants of these objects within the confines of the NDIS API,
1553  * but there are some naughty developers out there who will use
1554  * KeWaitForSingleObject() on NDIS timer and event objects, so we
1555  * have to support that as well. Conseqently, our NDIS timer and event
1556  * code has to be closely tied into our ntoskrnl timer and event code,
1557  * just as it is in Windows.
1558  *
1559  * KeWaitForSingleObject() may do different things for different kinds
1560  * of objects:
1561  *
1562  * - For events, we check if the event has been signalled. If the
1563  *   event is already in the signalled state, we just return immediately,
1564  *   otherwise we wait for it to be set to the signalled state by someone
1565  *   else calling KeSetEvent(). Events can be either synchronization or
1566  *   notification events.
1567  *
1568  * - For timers, if the timer has already fired and the timer is in
1569  *   the signalled state, we just return, otherwise we wait on the
1570  *   timer. Unlike an event, timers get signalled automatically when
1571  *   they expire rather than someone having to trip them manually.
1572  *   Timers initialized with KeInitializeTimer() are always notification
1573  *   events: KeInitializeTimerEx() lets you initialize a timer as
1574  *   either a notification or synchronization event.
1575  *
1576  * - For mutexes, we try to acquire the mutex and if we can't, we wait
1577  *   on the mutex until it's available and then grab it. When a mutex is
1578  *   released, it enters the signalled state, which wakes up one of the
1579  *   threads waiting to acquire it. Mutexes are always synchronization
1580  *   events.
1581  *
1582  * - For threads, the only thing we do is wait until the thread object
1583  *   enters a signalled state, which occurs when the thread terminates.
1584  *   Threads are always notification events.
1585  *
1586  * A notification event wakes up all threads waiting on an object. A
1587  * synchronization event wakes up just one. Also, a synchronization event
1588  * is auto-clearing, which means we automatically set the event back to
1589  * the non-signalled state once the wakeup is done.
1590  */
1591
1592 uint32_t
1593 KeWaitForSingleObject(void *arg, uint32_t reason, uint32_t mode,
1594     uint8_t alertable, int64_t *duetime)
1595 {
1596         wait_block              w;
1597         struct thread           *td = curthread;
1598         struct timeval          tv;
1599         int                     error = 0;
1600         uint64_t                curtime;
1601         wb_ext                  we;
1602         nt_dispatch_header      *obj;
1603
1604         obj = arg;
1605
1606         if (obj == NULL)
1607                 return (STATUS_INVALID_PARAMETER);
1608
1609         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
1610
1611         cv_init(&we.we_cv, "KeWFS");
1612         we.we_td = td;
1613
1614         /*
1615          * Check to see if this object is already signalled,
1616          * and just return without waiting if it is.
1617          */
1618         if (ntoskrnl_is_signalled(obj, td) == TRUE) {
1619                 /* Sanity check the signal state value. */
1620                 if (obj->dh_sigstate != INT32_MIN) {
1621                         ntoskrnl_satisfy_wait(obj, curthread);
1622                         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1623                         return (STATUS_SUCCESS);
1624                 } else {
1625                         /*
1626                          * There's a limit to how many times we can
1627                          * recursively acquire a mutant. If we hit
1628                          * the limit, something is very wrong.
1629                          */
1630                         if (obj->dh_type == DISP_TYPE_MUTANT) {
1631                                 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1632                                 panic("mutant limit exceeded");
1633                         }
1634                 }
1635         }
1636
1637         bzero((char *)&w, sizeof(wait_block));
1638         w.wb_object = obj;
1639         w.wb_ext = &we;
1640         w.wb_waittype = WAITTYPE_ANY;
1641         w.wb_next = &w;
1642         w.wb_waitkey = 0;
1643         w.wb_awakened = FALSE;
1644         w.wb_oldpri = td->td_pri;
1645
1646         InsertTailList((&obj->dh_waitlisthead), (&w.wb_waitlist));
1647
1648         /*
1649          * The timeout value is specified in 100 nanosecond units
1650          * and can be a positive or negative number. If it's positive,
1651          * then the duetime is absolute, and we need to convert it
1652          * to an absolute offset relative to now in order to use it.
1653          * If it's negative, then the duetime is relative and we
1654          * just have to convert the units.
1655          */
1656
1657         if (duetime != NULL) {
1658                 if (*duetime < 0) {
1659                         tv.tv_sec = - (*duetime) / 10000000;
1660                         tv.tv_usec = (- (*duetime) / 10) -
1661                             (tv.tv_sec * 1000000);
1662                 } else {
1663                         ntoskrnl_time(&curtime);
1664                         if (*duetime < curtime)
1665                                 tv.tv_sec = tv.tv_usec = 0;
1666                         else {
1667                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1668                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
1669                                     (tv.tv_sec * 1000000);
1670                         }
1671                 }
1672         }
1673
1674         if (duetime == NULL)
1675                 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1676         else
1677                 error = cv_timedwait(&we.we_cv,
1678                     &ntoskrnl_dispatchlock, tvtohz_high(&tv));
1679
1680         RemoveEntryList(&w.wb_waitlist);
1681
1682         cv_destroy(&we.we_cv);
1683
1684         /* We timed out. Leave the object alone and return status. */
1685
1686         if (error == EWOULDBLOCK) {
1687                 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1688                 return (STATUS_TIMEOUT);
1689         }
1690
1691         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1692
1693         return (STATUS_SUCCESS);
1694 /*
1695         return (KeWaitForMultipleObjects(1, &obj, WAITTYPE_ALL, reason,
1696             mode, alertable, duetime, &w));
1697 */
1698 }
1699
1700 static uint32_t
1701 KeWaitForMultipleObjects(uint32_t cnt, nt_dispatch_header *obj[], uint32_t wtype,
1702         uint32_t reason, uint32_t mode, uint8_t alertable, int64_t *duetime,
1703         wait_block *wb_array)
1704 {
1705         struct thread           *td = curthread;
1706         wait_block              *whead, *w;
1707         wait_block              _wb_array[MAX_WAIT_OBJECTS];
1708         nt_dispatch_header      *cur;
1709         struct timeval          tv;
1710         int                     i, wcnt = 0, error = 0;
1711         uint64_t                curtime;
1712         struct timespec         t1, t2;
1713         uint32_t                status = STATUS_SUCCESS;
1714         wb_ext                  we;
1715
1716         if (cnt > MAX_WAIT_OBJECTS)
1717                 return (STATUS_INVALID_PARAMETER);
1718         if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
1719                 return (STATUS_INVALID_PARAMETER);
1720
1721         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
1722
1723         cv_init(&we.we_cv, "KeWFM");
1724         we.we_td = td;
1725
1726         if (wb_array == NULL)
1727                 whead = _wb_array;
1728         else
1729                 whead = wb_array;
1730
1731         bzero((char *)whead, sizeof(wait_block) * cnt);
1732
1733         /* First pass: see if we can satisfy any waits immediately. */
1734
1735         wcnt = 0;
1736         w = whead;
1737
1738         for (i = 0; i < cnt; i++) {
1739                 InsertTailList((&obj[i]->dh_waitlisthead),
1740                     (&w->wb_waitlist));
1741                 w->wb_ext = &we;
1742                 w->wb_object = obj[i];
1743                 w->wb_waittype = wtype;
1744                 w->wb_waitkey = i;
1745                 w->wb_awakened = FALSE;
1746                 w->wb_oldpri = td->td_pri;
1747                 w->wb_next = w + 1;
1748                 w++;
1749                 wcnt++;
1750                 if (ntoskrnl_is_signalled(obj[i], td)) {
1751                         /*
1752                          * There's a limit to how many times
1753                          * we can recursively acquire a mutant.
1754                          * If we hit the limit, something
1755                          * is very wrong.
1756                          */
1757                         if (obj[i]->dh_sigstate == INT32_MIN &&
1758                             obj[i]->dh_type == DISP_TYPE_MUTANT) {
1759                                 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1760                                 panic("mutant limit exceeded");
1761                         }
1762
1763                         /*
1764                          * If this is a WAITTYPE_ANY wait, then
1765                          * satisfy the waited object and exit
1766                          * right now.
1767                          */
1768
1769                         if (wtype == WAITTYPE_ANY) {
1770                                 ntoskrnl_satisfy_wait(obj[i], td);
1771                                 status = STATUS_WAIT_0 + i;
1772                                 goto wait_done;
1773                         } else {
1774                                 w--;
1775                                 wcnt--;
1776                                 w->wb_object = NULL;
1777                                 RemoveEntryList(&w->wb_waitlist);
1778                         }
1779                 }
1780         }
1781
1782         /*
1783          * If this is a WAITTYPE_ALL wait and all objects are
1784          * already signalled, satisfy the waits and exit now.
1785          */
1786
1787         if (wtype == WAITTYPE_ALL && wcnt == 0) {
1788                 for (i = 0; i < cnt; i++)
1789                         ntoskrnl_satisfy_wait(obj[i], td);
1790                 status = STATUS_SUCCESS;
1791                 goto wait_done;
1792         }
1793
1794         /*
1795          * Create a circular waitblock list. The waitcount
1796          * must always be non-zero when we get here.
1797          */
1798
1799         (w - 1)->wb_next = whead;
1800
1801         /* Wait on any objects that aren't yet signalled. */
1802
1803         /* Calculate timeout, if any. */
1804
1805         if (duetime != NULL) {
1806                 if (*duetime < 0) {
1807                         tv.tv_sec = - (*duetime) / 10000000;
1808                         tv.tv_usec = (- (*duetime) / 10) -
1809                             (tv.tv_sec * 1000000);
1810                 } else {
1811                         ntoskrnl_time(&curtime);
1812                         if (*duetime < curtime)
1813                                 tv.tv_sec = tv.tv_usec = 0;
1814                         else {
1815                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1816                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
1817                                     (tv.tv_sec * 1000000);
1818                         }
1819                 }
1820         }
1821
1822         while (wcnt) {
1823                 nanotime(&t1);
1824
1825                 if (duetime == NULL)
1826                         cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1827                 else
1828                         error = cv_timedwait(&we.we_cv,
1829                             &ntoskrnl_dispatchlock, tvtohz_high(&tv));
1830
1831                 /* Wait with timeout expired. */
1832
1833                 if (error) {
1834                         status = STATUS_TIMEOUT;
1835                         goto wait_done;
1836                 }
1837
1838                 nanotime(&t2);
1839
1840                 /* See what's been signalled. */
1841
1842                 w = whead;
1843                 do {
1844                         cur = w->wb_object;
1845                         if (ntoskrnl_is_signalled(cur, td) == TRUE ||
1846                             w->wb_awakened == TRUE) {
1847                                 /* Sanity check the signal state value. */
1848                                 if (cur->dh_sigstate == INT32_MIN &&
1849                                     cur->dh_type == DISP_TYPE_MUTANT) {
1850                                         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1851                                         panic("mutant limit exceeded");
1852                                 }
1853                                 wcnt--;
1854                                 if (wtype == WAITTYPE_ANY) {
1855                                         status = w->wb_waitkey &
1856                                             STATUS_WAIT_0;
1857                                         goto wait_done;
1858                                 }
1859                         }
1860                         w = w->wb_next;
1861                 } while (w != whead);
1862
1863                 /*
1864                  * If all objects have been signalled, or if this
1865                  * is a WAITTYPE_ANY wait and we were woke up by
1866                  * someone, we can bail.
1867                  */
1868
1869                 if (wcnt == 0) {
1870                         status = STATUS_SUCCESS;
1871                         goto wait_done;
1872                 }
1873
1874                 /*
1875                  * If this is WAITTYPE_ALL wait, and there's still
1876                  * objects that haven't been signalled, deduct the
1877                  * time that's elapsed so far from the timeout and
1878                  * wait again (or continue waiting indefinitely if
1879                  * there's no timeout).
1880                  */
1881
1882                 if (duetime != NULL) {
1883                         tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
1884                         tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
1885                 }
1886         }
1887
1888
1889 wait_done:
1890
1891         cv_destroy(&we.we_cv);
1892
1893         for (i = 0; i < cnt; i++) {
1894                 if (whead[i].wb_object != NULL)
1895                         RemoveEntryList(&whead[i].wb_waitlist);
1896
1897         }
1898         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1899
1900         return (status);
1901 }
1902
1903 static void
1904 WRITE_REGISTER_USHORT(uint16_t *reg, uint16_t val)
1905 {
1906         bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1907 }
1908
1909 static uint16_t
1910 READ_REGISTER_USHORT(uint16_t *reg)
1911 {
1912         return (bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1913 }
1914
1915 static void
1916 WRITE_REGISTER_ULONG(uint32_t *reg, uint32_t val)
1917 {
1918         bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1919 }
1920
1921 static uint32_t
1922 READ_REGISTER_ULONG(uint32_t *reg)
1923 {
1924         return (bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1925 }
1926
1927 static uint8_t
1928 READ_REGISTER_UCHAR(uint8_t *reg)
1929 {
1930         return (bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1931 }
1932
1933 static void
1934 WRITE_REGISTER_UCHAR(uint8_t *reg, uint8_t val)
1935 {
1936         bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1937 }
1938
1939 static int64_t
1940 _allmul(int64_t a, int64_t b)
1941 {
1942         return (a * b);
1943 }
1944
1945 static int64_t
1946 _alldiv(int64_t a, int64_t b)
1947 {
1948         return (a / b);
1949 }
1950
1951 static int64_t
1952 _allrem(int64_t a, int64_t b)
1953 {
1954         return (a % b);
1955 }
1956
1957 static uint64_t
1958 _aullmul(uint64_t a, uint64_t b)
1959 {
1960         return (a * b);
1961 }
1962
1963 static uint64_t
1964 _aulldiv(uint64_t a, uint64_t b)
1965 {
1966         return (a / b);
1967 }
1968
1969 static uint64_t
1970 _aullrem(uint64_t a, uint64_t b)
1971 {
1972         return (a % b);
1973 }
1974
1975 static int64_t
1976 _allshl(int64_t a, uint8_t b)
1977 {
1978         return (a << b);
1979 }
1980
1981 static uint64_t
1982 _aullshl(uint64_t a, uint8_t b)
1983 {
1984         return (a << b);
1985 }
1986
1987 static int64_t
1988 _allshr(int64_t a, uint8_t b)
1989 {
1990         return (a >> b);
1991 }
1992
1993 static uint64_t
1994 _aullshr(uint64_t a, uint8_t b)
1995 {
1996         return (a >> b);
1997 }
1998
1999 static slist_entry *
2000 ntoskrnl_pushsl(slist_header *head, slist_entry *entry)
2001 {
2002         slist_entry             *oldhead;
2003
2004         oldhead = head->slh_list.slh_next;
2005         entry->sl_next = head->slh_list.slh_next;
2006         head->slh_list.slh_next = entry;
2007         head->slh_list.slh_depth++;
2008         head->slh_list.slh_seq++;
2009
2010         return (oldhead);
2011 }
2012
2013 static void
2014 InitializeSListHead(slist_header *head)
2015 {
2016         memset(head, 0, sizeof(*head));
2017 }
2018
2019 static slist_entry *
2020 ntoskrnl_popsl(slist_header *head)
2021 {
2022         slist_entry             *first;
2023
2024         first = head->slh_list.slh_next;
2025         if (first != NULL) {
2026                 head->slh_list.slh_next = first->sl_next;
2027                 head->slh_list.slh_depth--;
2028                 head->slh_list.slh_seq++;
2029         }
2030
2031         return (first);
2032 }
2033
2034 /*
2035  * We need this to make lookaside lists work for amd64.
2036  * We pass a pointer to ExAllocatePoolWithTag() the lookaside
2037  * list structure. For amd64 to work right, this has to be a
2038  * pointer to the wrapped version of the routine, not the
2039  * original. Letting the Windows driver invoke the original
2040  * function directly will result in a convention calling
2041  * mismatch and a pretty crash. On x86, this effectively
2042  * becomes a no-op since ipt_func and ipt_wrap are the same.
2043  */
2044
2045 static funcptr
2046 ntoskrnl_findwrap(funcptr func)
2047 {
2048         image_patch_table       *patch;
2049
2050         patch = ntoskrnl_functbl;
2051         while (patch->ipt_func != NULL) {
2052                 if ((funcptr)patch->ipt_func == func)
2053                         return ((funcptr)patch->ipt_wrap);
2054                 patch++;
2055         }
2056
2057         return (NULL);
2058 }
2059
2060 static void
2061 ExInitializePagedLookasideList(paged_lookaside_list *lookaside,
2062         lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc,
2063         uint32_t flags, size_t size, uint32_t tag, uint16_t depth)
2064 {
2065         bzero((char *)lookaside, sizeof(paged_lookaside_list));
2066
2067         if (size < sizeof(slist_entry))
2068                 lookaside->nll_l.gl_size = sizeof(slist_entry);
2069         else
2070                 lookaside->nll_l.gl_size = size;
2071         lookaside->nll_l.gl_tag = tag;
2072         if (allocfunc == NULL)
2073                 lookaside->nll_l.gl_allocfunc =
2074                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2075         else
2076                 lookaside->nll_l.gl_allocfunc = allocfunc;
2077
2078         if (freefunc == NULL)
2079                 lookaside->nll_l.gl_freefunc =
2080                     ntoskrnl_findwrap((funcptr)ExFreePool);
2081         else
2082                 lookaside->nll_l.gl_freefunc = freefunc;
2083
2084         lookaside->nll_l.gl_type = NonPagedPool;
2085         lookaside->nll_l.gl_depth = depth;
2086         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2087 }
2088
2089 static void
2090 ExDeletePagedLookasideList(paged_lookaside_list *lookaside)
2091 {
2092         void                    *buf;
2093         void            (*freefunc)(void *);
2094
2095         freefunc = lookaside->nll_l.gl_freefunc;
2096         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2097                 MSCALL1(freefunc, buf);
2098 }
2099
2100 static void
2101 ExInitializeNPagedLookasideList(npaged_lookaside_list *lookaside,
2102         lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc,
2103         uint32_t flags, size_t size, uint32_t tag, uint16_t depth)
2104 {
2105         bzero((char *)lookaside, sizeof(npaged_lookaside_list));
2106
2107         if (size < sizeof(slist_entry))
2108                 lookaside->nll_l.gl_size = sizeof(slist_entry);
2109         else
2110                 lookaside->nll_l.gl_size = size;
2111         lookaside->nll_l.gl_tag = tag;
2112         if (allocfunc == NULL)
2113                 lookaside->nll_l.gl_allocfunc =
2114                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2115         else
2116                 lookaside->nll_l.gl_allocfunc = allocfunc;
2117
2118         if (freefunc == NULL)
2119                 lookaside->nll_l.gl_freefunc =
2120                     ntoskrnl_findwrap((funcptr)ExFreePool);
2121         else
2122                 lookaside->nll_l.gl_freefunc = freefunc;
2123
2124         lookaside->nll_l.gl_type = NonPagedPool;
2125         lookaside->nll_l.gl_depth = depth;
2126         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2127 }
2128
2129 static void
2130 ExDeleteNPagedLookasideList(npaged_lookaside_list *lookaside)
2131 {
2132         void                    *buf;
2133         void            (*freefunc)(void *);
2134
2135         freefunc = lookaside->nll_l.gl_freefunc;
2136         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2137                 MSCALL1(freefunc, buf);
2138 }
2139
2140 slist_entry *
2141 InterlockedPushEntrySList(slist_header *head, slist_entry *entry)
2142 {
2143         slist_entry             *oldhead;
2144
2145         mtx_spinlock(&ntoskrnl_interlock);
2146         oldhead = ntoskrnl_pushsl(head, entry);
2147         mtx_spinunlock(&ntoskrnl_interlock);
2148
2149         return (oldhead);
2150 }
2151
2152 slist_entry *
2153 InterlockedPopEntrySList(slist_header *head)
2154 {
2155         slist_entry             *first;
2156
2157         mtx_spinlock(&ntoskrnl_interlock);
2158         first = ntoskrnl_popsl(head);
2159         mtx_spinunlock(&ntoskrnl_interlock);
2160
2161         return (first);
2162 }
2163
2164 static slist_entry *
2165 ExInterlockedPushEntrySList(slist_header *head, slist_entry *entry,
2166     kspin_lock *lock)
2167 {
2168         return (InterlockedPushEntrySList(head, entry));
2169 }
2170
2171 static slist_entry *
2172 ExInterlockedPopEntrySList(slist_header *head, kspin_lock *lock)
2173 {
2174         return (InterlockedPopEntrySList(head));
2175 }
2176
2177 uint16_t
2178 ExQueryDepthSList(slist_header *head)
2179 {
2180         uint16_t                depth;
2181
2182         mtx_spinlock(&ntoskrnl_interlock);
2183         depth = head->slh_list.slh_depth;
2184         mtx_spinunlock(&ntoskrnl_interlock);
2185
2186         return (depth);
2187 }
2188
2189 void
2190 KeInitializeSpinLock(kspin_lock *lock)
2191 {
2192         *lock = 0;
2193 }
2194
2195 void
2196 KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
2197 {
2198         while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
2199                 /* sit and spin */;
2200 }
2201
2202 void
2203 KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
2204 {
2205         atomic_store_rel_int((volatile u_int *)lock, 0);
2206 }
2207
2208 uintptr_t
2209 InterlockedExchange(volatile uint32_t *dst, uintptr_t val)
2210 {
2211         uintptr_t               r;
2212
2213         mtx_spinlock(&ntoskrnl_interlock);
2214         r = *dst;
2215         *dst = val;
2216         mtx_spinunlock(&ntoskrnl_interlock);
2217
2218         return (r);
2219 }
2220
2221 static uint32_t
2222 InterlockedIncrement(volatile uint32_t *addend)
2223 {
2224         atomic_add_long((volatile u_long *)addend, 1);
2225         return (*addend);
2226 }
2227
2228 static uint32_t
2229 InterlockedDecrement(volatile uint32_t *addend)
2230 {
2231         atomic_subtract_long((volatile u_long *)addend, 1);
2232         return (*addend);
2233 }
2234
2235 static void
2236 ExInterlockedAddLargeStatistic(uint64_t *addend, uint32_t inc)
2237 {
2238         mtx_spinlock(&ntoskrnl_interlock);
2239         *addend += inc;
2240         mtx_spinunlock(&ntoskrnl_interlock);
2241 };
2242
2243 mdl *
2244 IoAllocateMdl(void *vaddr, uint32_t len, uint8_t secondarybuf,
2245         uint8_t chargequota, irp *iopkt)
2246 {
2247         mdl                     *m;
2248         int                     zone = 0;
2249
2250         if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
2251                 m = ExAllocatePoolWithTag(NonPagedPool,
2252                     MmSizeOfMdl(vaddr, len), 0);
2253         else {
2254                 m = objcache_get(mdl_cache, M_NOWAIT);
2255                 bzero(m, sizeof(mdl));
2256                 zone++;
2257         }
2258
2259         if (m == NULL)
2260                 return (NULL);
2261
2262         MmInitializeMdl(m, vaddr, len);
2263
2264         /*
2265          * MmInitializMdl() clears the flags field, so we
2266          * have to set this here. If the MDL came from the
2267          * MDL UMA zone, tag it so we can release it to
2268          * the right place later.
2269          */
2270         if (zone)
2271                 m->mdl_flags = MDL_ZONE_ALLOCED;
2272
2273         if (iopkt != NULL) {
2274                 if (secondarybuf == TRUE) {
2275                         mdl                     *last;
2276                         last = iopkt->irp_mdl;
2277                         while (last->mdl_next != NULL)
2278                                 last = last->mdl_next;
2279                         last->mdl_next = m;
2280                 } else {
2281                         if (iopkt->irp_mdl != NULL)
2282                                 panic("leaking an MDL in IoAllocateMdl()");
2283                         iopkt->irp_mdl = m;
2284                 }
2285         }
2286
2287         return (m);
2288 }
2289
2290 void
2291 IoFreeMdl(mdl *m)
2292 {
2293         if (m == NULL)
2294                 return;
2295
2296         if (m->mdl_flags & MDL_ZONE_ALLOCED)
2297                 objcache_put(mdl_cache, m);
2298         else
2299                 ExFreePool(m);
2300 }
2301
2302 static void *
2303 MmAllocateContiguousMemory(uint32_t size, uint64_t highest)
2304 {
2305         void *addr;
2306         size_t pagelength = roundup(size, PAGE_SIZE);
2307
2308         addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2309
2310         return (addr);
2311 }
2312
2313 #if 0 /* XXX swildner */
2314 static void *
2315 MmAllocateContiguousMemorySpecifyCache(uint32_t size, uint64_t lowest,
2316     uint64_t highest, uint64_t boundary, enum nt_caching_type cachetype)
2317 {
2318         vm_memattr_t            memattr;
2319         void                    *ret;
2320
2321         switch (cachetype) {
2322         case MmNonCached:
2323                 memattr = VM_MEMATTR_UNCACHEABLE;
2324                 break;
2325         case MmWriteCombined:
2326                 memattr = VM_MEMATTR_WRITE_COMBINING;
2327                 break;
2328         case MmNonCachedUnordered:
2329                 memattr = VM_MEMATTR_UNCACHEABLE;
2330                 break;
2331         case MmCached:
2332         case MmHardwareCoherentCached:
2333         case MmUSWCCached:
2334         default:
2335                 memattr = VM_MEMATTR_DEFAULT;
2336                 break;
2337         }
2338
2339         ret = (void *)kmem_alloc_contig(kernel_map, size, M_ZERO | M_NOWAIT,
2340             lowest, highest, PAGE_SIZE, boundary, memattr);
2341         if (ret != NULL)
2342                 malloc_type_allocated(M_DEVBUF, round_page(size));
2343         return (ret);
2344 }
2345 #else
2346 static void *
2347 MmAllocateContiguousMemorySpecifyCache(uint32_t size, uint64_t lowest,
2348     uint64_t highest, uint64_t boundary, enum nt_caching_type cachetype)
2349 {
2350 #if 0
2351         void *addr;
2352         size_t pagelength = roundup(size, PAGE_SIZE);
2353
2354         addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2355
2356         return(addr);
2357 #else
2358         panic("%s", __func__);
2359 #endif
2360 }
2361 #endif
2362
2363 static void
2364 MmFreeContiguousMemory(void *base)
2365 {
2366         ExFreePool(base);
2367 }
2368
2369 static void
2370 MmFreeContiguousMemorySpecifyCache(void *base, uint32_t size,
2371     enum nt_caching_type cachetype)
2372 {
2373         contigfree(base, size, M_DEVBUF);
2374 }
2375
2376 static uint32_t
2377 MmSizeOfMdl(void *vaddr, size_t len)
2378 {
2379         uint32_t                l;
2380
2381         l = sizeof(struct mdl) +
2382             (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
2383
2384         return (l);
2385 }
2386
2387 /*
2388  * The Microsoft documentation says this routine fills in the
2389  * page array of an MDL with the _physical_ page addresses that
2390  * comprise the buffer, but we don't really want to do that here.
2391  * Instead, we just fill in the page array with the kernel virtual
2392  * addresses of the buffers.
2393  */
2394 void
2395 MmBuildMdlForNonPagedPool(mdl *m)
2396 {
2397         vm_offset_t             *mdl_pages;
2398         int                     pagecnt, i;
2399
2400         pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
2401
2402         if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
2403                 panic("not enough pages in MDL to describe buffer");
2404
2405         mdl_pages = MmGetMdlPfnArray(m);
2406
2407         for (i = 0; i < pagecnt; i++)
2408                 *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
2409
2410         m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
2411         m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
2412 }
2413
2414 static void *
2415 MmMapLockedPages(mdl *buf, uint8_t accessmode)
2416 {
2417         buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
2418         return (MmGetMdlVirtualAddress(buf));
2419 }
2420
2421 static void *
2422 MmMapLockedPagesSpecifyCache(mdl *buf, uint8_t accessmode, uint32_t cachetype,
2423         void *vaddr, uint32_t bugcheck, uint32_t prio)
2424 {
2425         return (MmMapLockedPages(buf, accessmode));
2426 }
2427
2428 static void
2429 MmUnmapLockedPages(void *vaddr, mdl *buf)
2430 {
2431         buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
2432 }
2433
2434 /*
2435  * This function has a problem in that it will break if you
2436  * compile this module without PAE and try to use it on a PAE
2437  * kernel. Unfortunately, there's no way around this at the
2438  * moment. It's slightly less broken that using pmap_kextract().
2439  * You'd think the virtual memory subsystem would help us out
2440  * here, but it doesn't.
2441  */
2442
2443 static uint64_t
2444 MmGetPhysicalAddress(void *base)
2445 {
2446         return (pmap_extract(kernel_map.pmap, (vm_offset_t)base));
2447 }
2448
2449 void *
2450 MmGetSystemRoutineAddress(unicode_string *ustr)
2451 {
2452         ansi_string             astr;
2453
2454         if (RtlUnicodeStringToAnsiString(&astr, ustr, TRUE))
2455                 return (NULL);
2456         return (ndis_get_routine_address(ntoskrnl_functbl, astr.as_buf));
2457 }
2458
2459 uint8_t
2460 MmIsAddressValid(void *vaddr)
2461 {
2462         if (pmap_extract(kernel_map.pmap, (vm_offset_t)vaddr))
2463                 return (TRUE);
2464
2465         return (FALSE);
2466 }
2467
2468 void *
2469 MmMapIoSpace(uint64_t paddr, uint32_t len, uint32_t cachetype)
2470 {
2471         devclass_t              nexus_class;
2472         device_t                *nexus_devs, devp;
2473         int                     nexus_count = 0;
2474         device_t                matching_dev = NULL;
2475         struct resource         *res;
2476         int                     i;
2477         vm_offset_t             v;
2478
2479         /* There will always be at least one nexus. */
2480
2481         nexus_class = devclass_find("nexus");
2482         devclass_get_devices(nexus_class, &nexus_devs, &nexus_count);
2483
2484         for (i = 0; i < nexus_count; i++) {
2485                 devp = nexus_devs[i];
2486                 matching_dev = ntoskrnl_finddev(devp, paddr, &res);
2487                 if (matching_dev)
2488                         break;
2489         }
2490
2491         kfree(nexus_devs, M_TEMP);
2492
2493         if (matching_dev == NULL)
2494                 return (NULL);
2495
2496         v = (vm_offset_t)rman_get_virtual(res);
2497         if (paddr > rman_get_start(res))
2498                 v += paddr - rman_get_start(res);
2499
2500         return ((void *)v);
2501 }
2502
2503 void
2504 MmUnmapIoSpace(void *vaddr, size_t len)
2505 {
2506 }
2507
2508
2509 static device_t
2510 ntoskrnl_finddev(device_t dev, uint64_t paddr, struct resource **res)
2511 {
2512         device_t                *children = NULL;
2513         device_t                matching_dev;
2514         int                     childcnt;
2515         struct resource         *r;
2516         struct resource_list    *rl;
2517         struct resource_list_entry      *rle;
2518         uint32_t                flags;
2519         int                     i;
2520
2521         /* We only want devices that have been successfully probed. */
2522
2523         if (device_is_alive(dev) == FALSE)
2524                 return (NULL);
2525
2526         rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
2527         if (rl != NULL) {
2528                 SLIST_FOREACH(rle, rl, link) {
2529                         r = rle->res;
2530
2531                         if (r == NULL)
2532                                 continue;
2533
2534                         flags = rman_get_flags(r);
2535
2536                         if (rle->type == SYS_RES_MEMORY &&
2537                             paddr >= rman_get_start(r) &&
2538                             paddr <= rman_get_end(r)) {
2539                                 if (!(flags & RF_ACTIVE))
2540                                         bus_activate_resource(dev,
2541                                             SYS_RES_MEMORY, 0, r);
2542                                 *res = r;
2543                                 return (dev);
2544                         }
2545                 }
2546         }
2547
2548         /*
2549          * If this device has children, do another
2550          * level of recursion to inspect them.
2551          */
2552
2553         device_get_children(dev, &children, &childcnt);
2554
2555         for (i = 0; i < childcnt; i++) {
2556                 matching_dev = ntoskrnl_finddev(children[i], paddr, res);
2557                 if (matching_dev != NULL) {
2558                         kfree(children, M_TEMP);
2559                         return (matching_dev);
2560                 }
2561         }
2562
2563
2564         /* Won't somebody please think of the children! */
2565
2566         if (children != NULL)
2567                 kfree(children, M_TEMP);
2568
2569         return (NULL);
2570 }
2571
2572 /*
2573  * Workitems are unlike DPCs, in that they run in a user-mode thread
2574  * context rather than at DISPATCH_LEVEL in kernel context. In our
2575  * case we run them in kernel context anyway.
2576  */
2577 static void
2578 ntoskrnl_workitem_thread(void *arg)
2579 {
2580         kdpc_queue              *kq;
2581         list_entry              *l;
2582         io_workitem             *iw;
2583         uint8_t                 irql;
2584
2585         kq = arg;
2586
2587         InitializeListHead(&kq->kq_disp);
2588         kq->kq_td = curthread;
2589         kq->kq_exit = 0;
2590         KeInitializeSpinLock(&kq->kq_lock);
2591         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
2592
2593         while (1) {
2594                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
2595
2596                 KeAcquireSpinLock(&kq->kq_lock, &irql);
2597
2598                 if (kq->kq_exit) {
2599                         kq->kq_exit = 0;
2600                         KeReleaseSpinLock(&kq->kq_lock, irql);
2601                         break;
2602                 }
2603
2604                 while (!IsListEmpty(&kq->kq_disp)) {
2605                         l = RemoveHeadList(&kq->kq_disp);
2606                         iw = CONTAINING_RECORD(l,
2607                             io_workitem, iw_listentry);
2608                         InitializeListHead((&iw->iw_listentry));
2609                         if (iw->iw_func == NULL)
2610                                 continue;
2611                         KeReleaseSpinLock(&kq->kq_lock, irql);
2612                         MSCALL2(iw->iw_func, iw->iw_dobj, iw->iw_ctx);
2613                         KeAcquireSpinLock(&kq->kq_lock, &irql);
2614                 }
2615
2616                 KeReleaseSpinLock(&kq->kq_lock, irql);
2617         }
2618
2619         wakeup(curthread);
2620         kthread_exit();
2621         return; /* notreached */
2622 }
2623
2624 static ndis_status
2625 RtlCharToInteger(const char *src, uint32_t base, uint32_t *val)
2626 {
2627         int negative = 0;
2628         uint32_t res;
2629
2630         if (!src || !val)
2631                 return (STATUS_ACCESS_VIOLATION);
2632         while (*src != '\0' && *src <= ' ')
2633                 src++;
2634         if (*src == '+')
2635                 src++;
2636         else if (*src == '-') {
2637                 src++;
2638                 negative = 1;
2639         }
2640         if (base == 0) {
2641                 base = 10;
2642                 if (*src == '0') {
2643                         src++;
2644                         if (*src == 'b') {
2645                                 base = 2;
2646                                 src++;
2647                         } else if (*src == 'o') {
2648                                 base = 8;
2649                                 src++;
2650                         } else if (*src == 'x') {
2651                                 base = 16;
2652                                 src++;
2653                         }
2654                 }
2655         } else if (!(base == 2 || base == 8 || base == 10 || base == 16))
2656                 return (STATUS_INVALID_PARAMETER);
2657
2658         for (res = 0; *src; src++) {
2659                 int v;
2660                 if (isdigit(*src))
2661                         v = *src - '0';
2662                 else if (isxdigit(*src))
2663                         v = tolower(*src) - 'a' + 10;
2664                 else
2665                         v = base;
2666                 if (v >= base)
2667                         return (STATUS_INVALID_PARAMETER);
2668                 res = res * base + v;
2669         }
2670         *val = negative ? -res : res;
2671         return (STATUS_SUCCESS);
2672 }
2673
2674 static void
2675 ntoskrnl_destroy_workitem_threads(void)
2676 {
2677         kdpc_queue              *kq;
2678         int                     i;
2679
2680         for (i = 0; i < WORKITEM_THREADS; i++) {
2681                 kq = wq_queues + i;
2682                 kq->kq_exit = 1;
2683                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2684                 while (kq->kq_exit)
2685                         tsleep(kq->kq_td, 0, "waitiw", hz/10);
2686         }
2687 }
2688
2689 io_workitem *
2690 IoAllocateWorkItem(device_object *dobj)
2691 {
2692         io_workitem             *iw;
2693
2694         iw = objcache_get(iw_cache, M_NOWAIT);
2695         if (iw == NULL)
2696                 return (NULL);
2697
2698         InitializeListHead(&iw->iw_listentry);
2699         iw->iw_dobj = dobj;
2700
2701         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
2702         iw->iw_idx = wq_idx;
2703         WORKIDX_INC(wq_idx);
2704         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
2705
2706         return (iw);
2707 }
2708
2709 void
2710 IoFreeWorkItem(io_workitem *iw)
2711 {
2712         objcache_put(iw_cache, iw);
2713 }
2714
2715 void
2716 IoQueueWorkItem(io_workitem *iw, io_workitem_func iw_func, uint32_t qtype,
2717     void *ctx)
2718 {
2719         kdpc_queue              *kq;
2720         list_entry              *l;
2721         io_workitem             *cur;
2722         uint8_t                 irql;
2723
2724         kq = wq_queues + iw->iw_idx;
2725
2726         KeAcquireSpinLock(&kq->kq_lock, &irql);
2727
2728         /*
2729          * Traverse the list and make sure this workitem hasn't
2730          * already been inserted. Queuing the same workitem
2731          * twice will hose the list but good.
2732          */
2733
2734         l = kq->kq_disp.nle_flink;
2735         while (l != &kq->kq_disp) {
2736                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2737                 if (cur == iw) {
2738                         /* Already queued -- do nothing. */
2739                         KeReleaseSpinLock(&kq->kq_lock, irql);
2740                         return;
2741                 }
2742                 l = l->nle_flink;
2743         }
2744
2745         iw->iw_func = iw_func;
2746         iw->iw_ctx = ctx;
2747
2748         InsertTailList((&kq->kq_disp), (&iw->iw_listentry));
2749         KeReleaseSpinLock(&kq->kq_lock, irql);
2750
2751         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2752 }
2753
2754 static void
2755 ntoskrnl_workitem(device_object *dobj, void *arg)
2756 {
2757         io_workitem             *iw;
2758         work_queue_item         *w;
2759         work_item_func          f;
2760
2761         iw = arg;
2762         w = (work_queue_item *)dobj;
2763         f = (work_item_func)w->wqi_func;
2764         objcache_put(iw_cache, iw);
2765         MSCALL2(f, w, w->wqi_ctx);
2766 }
2767
2768 /*
2769  * The ExQueueWorkItem() API is deprecated in Windows XP. Microsoft
2770  * warns that it's unsafe and to use IoQueueWorkItem() instead. The
2771  * problem with ExQueueWorkItem() is that it can't guard against
2772  * the condition where a driver submits a job to the work queue and
2773  * is then unloaded before the job is able to run. IoQueueWorkItem()
2774  * acquires a reference to the device's device_object via the
2775  * object manager and retains it until after the job has completed,
2776  * which prevents the driver from being unloaded before the job
2777  * runs. (We don't currently support this behavior, though hopefully
2778  * that will change once the object manager API is fleshed out a bit.)
2779  *
2780  * Having said all that, the ExQueueWorkItem() API remains, because
2781  * there are still other parts of Windows that use it, including
2782  * NDIS itself: NdisScheduleWorkItem() calls ExQueueWorkItem().
2783  * We fake up the ExQueueWorkItem() API on top of our implementation
2784  * of IoQueueWorkItem(). Workitem thread #3 is reserved exclusively
2785  * for ExQueueWorkItem() jobs, and we pass a pointer to the work
2786  * queue item (provided by the caller) in to IoAllocateWorkItem()
2787  * instead of the device_object. We need to save this pointer so
2788  * we can apply a sanity check: as with the DPC queue and other
2789  * workitem queues, we can't allow the same work queue item to
2790  * be queued twice. If it's already pending, we silently return
2791  */
2792
2793 void
2794 ExQueueWorkItem(work_queue_item *w, uint32_t qtype)
2795 {
2796         io_workitem             *iw;
2797         io_workitem_func        iwf;
2798         kdpc_queue              *kq;
2799         list_entry              *l;
2800         io_workitem             *cur;
2801         uint8_t                 irql;
2802
2803
2804         /*
2805          * We need to do a special sanity test to make sure
2806          * the ExQueueWorkItem() API isn't used to queue
2807          * the same workitem twice. Rather than checking the
2808          * io_workitem pointer itself, we test the attached
2809          * device object, which is really a pointer to the
2810          * legacy work queue item structure.
2811          */
2812
2813         kq = wq_queues + WORKITEM_LEGACY_THREAD;
2814         KeAcquireSpinLock(&kq->kq_lock, &irql);
2815         l = kq->kq_disp.nle_flink;
2816         while (l != &kq->kq_disp) {
2817                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2818                 if (cur->iw_dobj == (device_object *)w) {
2819                         /* Already queued -- do nothing. */
2820                         KeReleaseSpinLock(&kq->kq_lock, irql);
2821                         return;
2822                 }
2823                 l = l->nle_flink;
2824         }
2825         KeReleaseSpinLock(&kq->kq_lock, irql);
2826
2827         iw = IoAllocateWorkItem((device_object *)w);
2828         if (iw == NULL)
2829                 return;
2830
2831         iw->iw_idx = WORKITEM_LEGACY_THREAD;
2832         iwf = (io_workitem_func)ntoskrnl_findwrap((funcptr)ntoskrnl_workitem);
2833         IoQueueWorkItem(iw, iwf, qtype, iw);
2834 }
2835
2836 static void
2837 RtlZeroMemory(void *dst, size_t len)
2838 {
2839         bzero(dst, len);
2840 }
2841
2842 static void
2843 RtlSecureZeroMemory(void *dst, size_t len)
2844 {
2845         memset(dst, 0, len);
2846 }
2847
2848 static void
2849 RtlFillMemory(void *dst, size_t len, uint8_t c)
2850 {
2851         memset(dst, c, len);
2852 }
2853
2854 static void
2855 RtlMoveMemory(void *dst, const void *src, size_t len)
2856 {
2857         memmove(dst, src, len);
2858 }
2859
2860 static void
2861 RtlCopyMemory(void *dst, const void *src, size_t len)
2862 {
2863         bcopy(src, dst, len);
2864 }
2865
2866 static size_t
2867 RtlCompareMemory(const void *s1, const void *s2, size_t len)
2868 {
2869         size_t                  i;
2870         uint8_t                 *m1, *m2;
2871
2872         m1 = __DECONST(char *, s1);
2873         m2 = __DECONST(char *, s2);
2874
2875         for (i = 0; i < len && m1[i] == m2[i]; i++);
2876         return (i);
2877 }
2878
2879 void
2880 RtlInitAnsiString(ansi_string *dst, char *src)
2881 {
2882         ansi_string             *a;
2883
2884         a = dst;
2885         if (a == NULL)
2886                 return;
2887         if (src == NULL) {
2888                 a->as_len = a->as_maxlen = 0;
2889                 a->as_buf = NULL;
2890         } else {
2891                 a->as_buf = src;
2892                 a->as_len = a->as_maxlen = strlen(src);
2893         }
2894 }
2895
2896 void
2897 RtlInitUnicodeString(unicode_string *dst, uint16_t *src)
2898 {
2899         unicode_string          *u;
2900         int                     i;
2901
2902         u = dst;
2903         if (u == NULL)
2904                 return;
2905         if (src == NULL) {
2906                 u->us_len = u->us_maxlen = 0;
2907                 u->us_buf = NULL;
2908         } else {
2909                 i = 0;
2910                 while(src[i] != 0)
2911                         i++;
2912                 u->us_buf = src;
2913                 u->us_len = u->us_maxlen = i * 2;
2914         }
2915 }
2916
2917 ndis_status
2918 RtlUnicodeStringToInteger(unicode_string *ustr, uint32_t base, uint32_t *val)
2919 {
2920         uint16_t                *uchr;
2921         int                     len, neg = 0;
2922         char                    abuf[64];
2923         char                    *astr;
2924
2925         uchr = ustr->us_buf;
2926         len = ustr->us_len;
2927         bzero(abuf, sizeof(abuf));
2928
2929         if ((char)((*uchr) & 0xFF) == '-') {
2930                 neg = 1;
2931                 uchr++;
2932                 len -= 2;
2933         } else if ((char)((*uchr) & 0xFF) == '+') {
2934                 neg = 0;
2935                 uchr++;
2936                 len -= 2;
2937         }
2938
2939         if (base == 0) {
2940                 if ((char)((*uchr) & 0xFF) == 'b') {
2941                         base = 2;
2942                         uchr++;
2943                         len -= 2;
2944                 } else if ((char)((*uchr) & 0xFF) == 'o') {
2945                         base = 8;
2946                         uchr++;
2947                         len -= 2;
2948                 } else if ((char)((*uchr) & 0xFF) == 'x') {
2949                         base = 16;
2950                         uchr++;
2951                         len -= 2;
2952                 } else
2953                         base = 10;
2954         }
2955
2956         astr = abuf;
2957         if (neg) {
2958                 strcpy(astr, "-");
2959                 astr++;
2960         }
2961
2962         ntoskrnl_unicode_to_ascii(uchr, astr, len);
2963         *val = strtoul(abuf, NULL, base);
2964
2965         return (STATUS_SUCCESS);
2966 }
2967
2968 void
2969 RtlFreeUnicodeString(unicode_string *ustr)
2970 {
2971         if (ustr->us_buf == NULL)
2972                 return;
2973         ExFreePool(ustr->us_buf);
2974         ustr->us_buf = NULL;
2975 }
2976
2977 void
2978 RtlFreeAnsiString(ansi_string *astr)
2979 {
2980         if (astr->as_buf == NULL)
2981                 return;
2982         ExFreePool(astr->as_buf);
2983         astr->as_buf = NULL;
2984 }
2985
2986 static int
2987 atoi(const char *str)
2988 {
2989         return (int)strtol(str, NULL, 10);
2990 }
2991
2992 static long
2993 atol(const char *str)
2994 {
2995         return strtol(str, NULL, 10);
2996 }
2997
2998 static int
2999 rand(void)
3000 {
3001         struct timeval          tv;
3002
3003         microtime(&tv);
3004         skrandom(tv.tv_usec);
3005         return ((int)krandom());
3006 }
3007
3008 static void
3009 srand(unsigned int seed)
3010 {
3011         skrandom(seed);
3012 }
3013
3014 static uint8_t
3015 IoIsWdmVersionAvailable(uint8_t major, uint8_t minor)
3016 {
3017         if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
3018                 return (TRUE);
3019         return (FALSE);
3020 }
3021
3022 static int32_t
3023 IoOpenDeviceRegistryKey(struct device_object *devobj, uint32_t type,
3024     uint32_t mask, void **key)
3025 {
3026         return (NDIS_STATUS_INVALID_DEVICE_REQUEST);
3027 }
3028
3029 static ndis_status
3030 IoGetDeviceObjectPointer(unicode_string *name, uint32_t reqaccess,
3031     void *fileobj, device_object *devobj)
3032 {
3033         return (STATUS_SUCCESS);
3034 }
3035
3036 static ndis_status
3037 IoGetDeviceProperty(device_object *devobj, uint32_t regprop, uint32_t buflen,
3038     void *prop, uint32_t *reslen)
3039 {
3040         driver_object           *drv;
3041         uint16_t                **name;
3042
3043         drv = devobj->do_drvobj;
3044
3045         switch (regprop) {
3046         case DEVPROP_DRIVER_KEYNAME:
3047                 name = prop;
3048                 *name = drv->dro_drivername.us_buf;
3049                 *reslen = drv->dro_drivername.us_len;
3050                 break;
3051         default:
3052                 return (STATUS_INVALID_PARAMETER_2);
3053                 break;
3054         }
3055
3056         return (STATUS_SUCCESS);
3057 }
3058
3059 static void
3060 KeInitializeMutex(kmutant *kmutex, uint32_t level)
3061 {
3062         InitializeListHead((&kmutex->km_header.dh_waitlisthead));
3063         kmutex->km_abandoned = FALSE;
3064         kmutex->km_apcdisable = 1;
3065         kmutex->km_header.dh_sigstate = 1;
3066         kmutex->km_header.dh_type = DISP_TYPE_MUTANT;
3067         kmutex->km_header.dh_size = sizeof(kmutant) / sizeof(uint32_t);
3068         kmutex->km_ownerthread = NULL;
3069 }
3070
3071 static uint32_t
3072 KeReleaseMutex(kmutant *kmutex, uint8_t kwait)
3073 {
3074         uint32_t                prevstate;
3075
3076         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3077         prevstate = kmutex->km_header.dh_sigstate;
3078         if (kmutex->km_ownerthread != curthread) {
3079                 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3080                 return (STATUS_MUTANT_NOT_OWNED);
3081         }
3082
3083         kmutex->km_header.dh_sigstate++;
3084         kmutex->km_abandoned = FALSE;
3085
3086         if (kmutex->km_header.dh_sigstate == 1) {
3087                 kmutex->km_ownerthread = NULL;
3088                 ntoskrnl_waittest(&kmutex->km_header, IO_NO_INCREMENT);
3089         }
3090
3091         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3092
3093         return (prevstate);
3094 }
3095
3096 static uint32_t
3097 KeReadStateMutex(kmutant *kmutex)
3098 {
3099         return (kmutex->km_header.dh_sigstate);
3100 }
3101
3102 void
3103 KeInitializeEvent(nt_kevent *kevent, uint32_t type, uint8_t state)
3104 {
3105         InitializeListHead((&kevent->k_header.dh_waitlisthead));
3106         kevent->k_header.dh_sigstate = state;
3107         if (type == EVENT_TYPE_NOTIFY)
3108                 kevent->k_header.dh_type = DISP_TYPE_NOTIFICATION_EVENT;
3109         else
3110                 kevent->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_EVENT;
3111         kevent->k_header.dh_size = sizeof(nt_kevent) / sizeof(uint32_t);
3112 }
3113
3114 uint32_t
3115 KeResetEvent(nt_kevent *kevent)
3116 {
3117         uint32_t                prevstate;
3118
3119         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3120         prevstate = kevent->k_header.dh_sigstate;
3121         kevent->k_header.dh_sigstate = FALSE;
3122         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3123
3124         return (prevstate);
3125 }
3126
3127 uint32_t
3128 KeSetEvent(nt_kevent *kevent, uint32_t increment, uint8_t kwait)
3129 {
3130         uint32_t                prevstate;
3131         wait_block              *w;
3132         nt_dispatch_header      *dh;
3133         wb_ext                  *we;
3134
3135         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3136         prevstate = kevent->k_header.dh_sigstate;
3137         dh = &kevent->k_header;
3138
3139         if (IsListEmpty(&dh->dh_waitlisthead))
3140                 /*
3141                  * If there's nobody in the waitlist, just set
3142                  * the state to signalled.
3143                  */
3144                 dh->dh_sigstate = 1;
3145         else {
3146                 /*
3147                  * Get the first waiter. If this is a synchronization
3148                  * event, just wake up that one thread (don't bother
3149                  * setting the state to signalled since we're supposed
3150                  * to automatically clear synchronization events anyway).
3151                  *
3152                  * If it's a notification event, or the first
3153                  * waiter is doing a WAITTYPE_ALL wait, go through
3154                  * the full wait satisfaction process.
3155                  */
3156                 w = CONTAINING_RECORD(dh->dh_waitlisthead.nle_flink,
3157                     wait_block, wb_waitlist);
3158                 we = w->wb_ext;
3159                 if (kevent->k_header.dh_type == DISP_TYPE_NOTIFICATION_EVENT ||
3160                     w->wb_waittype == WAITTYPE_ALL) {
3161                         if (prevstate == 0) {
3162                                 dh->dh_sigstate = 1;
3163                                 ntoskrnl_waittest(dh, increment);
3164                         }
3165                 } else {
3166                         w->wb_awakened |= TRUE;
3167                         cv_broadcastpri(&we->we_cv,
3168                             (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
3169                             w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
3170                 }
3171         }
3172
3173         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3174
3175         return (prevstate);
3176 }
3177
3178 void
3179 KeClearEvent(nt_kevent *kevent)
3180 {
3181         kevent->k_header.dh_sigstate = FALSE;
3182 }
3183
3184 uint32_t
3185 KeReadStateEvent(nt_kevent *kevent)
3186 {
3187         return (kevent->k_header.dh_sigstate);
3188 }
3189
3190 /*
3191  * The object manager in Windows is responsible for managing
3192  * references and access to various types of objects, including
3193  * device_objects, events, threads, timers and so on. However,
3194  * there's a difference in the way objects are handled in user
3195  * mode versus kernel mode.
3196  *
3197  * In user mode (i.e. Win32 applications), all objects are
3198  * managed by the object manager. For example, when you create
3199  * a timer or event object, you actually end up with an
3200  * object_header (for the object manager's bookkeeping
3201  * purposes) and an object body (which contains the actual object
3202  * structure, e.g. ktimer, kevent, etc...). This allows Windows
3203  * to manage resource quotas and to enforce access restrictions
3204  * on basically every kind of system object handled by the kernel.
3205  *
3206  * However, in kernel mode, you only end up using the object
3207  * manager some of the time. For example, in a driver, you create
3208  * a timer object by simply allocating the memory for a ktimer
3209  * structure and initializing it with KeInitializeTimer(). Hence,
3210  * the timer has no object_header and no reference counting or
3211  * security/resource checks are done on it. The assumption in
3212  * this case is that if you're running in kernel mode, you know
3213  * what you're doing, and you're already at an elevated privilege
3214  * anyway.
3215  *
3216  * There are some exceptions to this. The two most important ones
3217  * for our purposes are device_objects and threads. We need to use
3218  * the object manager to do reference counting on device_objects,
3219  * and for threads, you can only get a pointer to a thread's
3220  * dispatch header by using ObReferenceObjectByHandle() on the
3221  * handle returned by PsCreateSystemThread().
3222  */
3223
3224 static ndis_status
3225 ObReferenceObjectByHandle(ndis_handle handle, uint32_t reqaccess, void *otype,
3226         uint8_t accessmode, void **object, void **handleinfo)
3227 {
3228         nt_objref               *nr;
3229
3230         nr = kmalloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
3231         if (nr == NULL)
3232                 return (STATUS_INSUFFICIENT_RESOURCES);
3233
3234         InitializeListHead((&nr->no_dh.dh_waitlisthead));
3235         nr->no_obj = handle;
3236         nr->no_dh.dh_type = DISP_TYPE_THREAD;
3237         nr->no_dh.dh_sigstate = 0;
3238         nr->no_dh.dh_size = (uint8_t)(sizeof(struct thread) /
3239             sizeof(uint32_t));
3240         TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
3241         *object = nr;
3242
3243         return (STATUS_SUCCESS);
3244 }
3245
3246 static void
3247 ObfDereferenceObject(void *object)
3248 {
3249         nt_objref               *nr;
3250
3251         nr = object;
3252         TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
3253         kfree(nr, M_DEVBUF);
3254 }
3255
3256 static uint32_t
3257 ZwClose(ndis_handle handle)
3258 {
3259         return (STATUS_SUCCESS);
3260 }
3261
3262 static uint32_t
3263 WmiQueryTraceInformation(uint32_t traceclass, void *traceinfo,
3264     uint32_t infolen, uint32_t reqlen, void *buf)
3265 {
3266         return (STATUS_NOT_FOUND);
3267 }
3268
3269 static uint32_t
3270 WmiTraceMessage(uint64_t loghandle, uint32_t messageflags,
3271         void *guid, uint16_t messagenum, ...)
3272 {
3273         return (STATUS_SUCCESS);
3274 }
3275
3276 static uint32_t
3277 IoWMIRegistrationControl(device_object *dobj, uint32_t action)
3278 {
3279         return (STATUS_SUCCESS);
3280 }
3281
3282 /*
3283  * This is here just in case the thread returns without calling
3284  * PsTerminateSystemThread().
3285  */
3286 static void
3287 ntoskrnl_thrfunc(void *arg)
3288 {
3289         thread_context          *thrctx;
3290         uint32_t (*tfunc)(void *);
3291         void                    *tctx;
3292         uint32_t                rval;
3293
3294         thrctx = arg;
3295         tfunc = thrctx->tc_thrfunc;
3296         tctx = thrctx->tc_thrctx;
3297         kfree(thrctx, M_TEMP);
3298
3299         rval = MSCALL1(tfunc, tctx);
3300
3301         PsTerminateSystemThread(rval);
3302         return; /* notreached */
3303 }
3304
3305 static ndis_status
3306 PsCreateSystemThread(ndis_handle *handle, uint32_t reqaccess, void *objattrs,
3307     ndis_handle phandle, void *clientid, void *thrfunc, void *thrctx)
3308 {
3309         int                     error;
3310         thread_context          *tc;
3311         struct thread           *p;
3312
3313         tc = kmalloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
3314         if (tc == NULL)
3315                 return (STATUS_INSUFFICIENT_RESOURCES);
3316
3317         tc->tc_thrctx = thrctx;
3318         tc->tc_thrfunc = thrfunc;
3319
3320         error = kthread_create(ntoskrnl_thrfunc, tc, &p, "Win kthread %d",
3321             ntoskrnl_kth);
3322
3323         if (error) {
3324                 kfree(tc, M_TEMP);
3325                 return (STATUS_INSUFFICIENT_RESOURCES);
3326         }
3327
3328         *handle = p;
3329         ntoskrnl_kth++;
3330
3331         return (STATUS_SUCCESS);
3332 }
3333
3334 /*
3335  * In Windows, the exit of a thread is an event that you're allowed
3336  * to wait on, assuming you've obtained a reference to the thread using
3337  * ObReferenceObjectByHandle(). Unfortunately, the only way we can
3338  * simulate this behavior is to register each thread we create in a
3339  * reference list, and if someone holds a reference to us, we poke
3340  * them.
3341  */
3342 static ndis_status
3343 PsTerminateSystemThread(ndis_status status)
3344 {
3345         struct nt_objref        *nr;
3346
3347         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3348         TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
3349                 if (nr->no_obj != curthread->td_proc)
3350                         continue;
3351                 nr->no_dh.dh_sigstate = 1;
3352                 ntoskrnl_waittest(&nr->no_dh, IO_NO_INCREMENT);
3353                 break;
3354         }
3355         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3356
3357         ntoskrnl_kth--;
3358
3359         wakeup(curthread);
3360         kthread_exit();
3361         return (0);     /* notreached */
3362 }
3363
3364 static uint32_t
3365 DbgPrint(char *fmt, ...)
3366 {
3367         va_list                 ap;
3368
3369         if (bootverbose) {
3370                 va_start(ap, fmt);
3371                 kvprintf(fmt, ap);
3372                 va_end(ap);
3373         }
3374
3375         return (STATUS_SUCCESS);
3376 }
3377
3378 static void
3379 DbgBreakPoint(void)
3380 {
3381
3382         Debugger("DbgBreakPoint(): breakpoint");
3383 }
3384
3385 static void
3386 KeBugCheckEx(uint32_t code, u_long param1, u_long param2, u_long param3,
3387     u_long param4)
3388 {
3389         panic("KeBugCheckEx: STOP 0x%X", code);
3390 }
3391
3392 static void
3393 ntoskrnl_timercall(void *arg)
3394 {
3395         ktimer                  *timer;
3396         struct timeval          tv;
3397         kdpc                    *dpc;
3398
3399         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3400
3401         timer = arg;
3402
3403 #ifdef NTOSKRNL_DEBUG_TIMERS
3404         ntoskrnl_timer_fires++;
3405 #endif
3406         ntoskrnl_remove_timer(timer);
3407
3408         /*
3409          * This should never happen, but complain
3410          * if it does.
3411          */
3412
3413         if (timer->k_header.dh_inserted == FALSE) {
3414                 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3415                 kprintf("NTOS: timer %p fired even though "
3416                     "it was canceled\n", timer);
3417                 return;
3418         }
3419
3420         /* Mark the timer as no longer being on the timer queue. */
3421
3422         timer->k_header.dh_inserted = FALSE;
3423
3424         /* Now signal the object and satisfy any waits on it. */
3425
3426         timer->k_header.dh_sigstate = 1;
3427         ntoskrnl_waittest(&timer->k_header, IO_NO_INCREMENT);
3428
3429         /*
3430          * If this is a periodic timer, re-arm it
3431          * so it will fire again. We do this before
3432          * calling any deferred procedure calls because
3433          * it's possible the DPC might cancel the timer,
3434          * in which case it would be wrong for us to
3435          * re-arm it again afterwards.
3436          */
3437
3438         if (timer->k_period) {
3439                 tv.tv_sec = 0;
3440                 tv.tv_usec = timer->k_period * 1000;
3441                 timer->k_header.dh_inserted = TRUE;
3442                 ntoskrnl_insert_timer(timer, tvtohz_high(&tv));
3443 #ifdef NTOSKRNL_DEBUG_TIMERS
3444                 ntoskrnl_timer_reloads++;
3445 #endif
3446         }
3447
3448         dpc = timer->k_dpc;
3449
3450         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3451
3452         /* If there's a DPC associated with the timer, queue it up. */
3453
3454         if (dpc != NULL)
3455                 KeInsertQueueDpc(dpc, NULL, NULL);
3456 }
3457
3458 #ifdef NTOSKRNL_DEBUG_TIMERS
3459 static int
3460 sysctl_show_timers(SYSCTL_HANDLER_ARGS)
3461 {
3462         int                     ret;
3463
3464         ret = 0;
3465         ntoskrnl_show_timers();
3466         return (sysctl_handle_int(oidp, &ret, 0, req));
3467 }
3468
3469 static void
3470 ntoskrnl_show_timers(void)
3471 {
3472         int                     i = 0;
3473         list_entry              *l;
3474
3475         mtx_spinlock(&ntoskrnl_calllock);
3476         l = ntoskrnl_calllist.nle_flink;
3477         while(l != &ntoskrnl_calllist) {
3478                 i++;
3479                 l = l->nle_flink;
3480         }
3481         mtx_spinunlock(&ntoskrnl_calllock);
3482
3483         kprintf("\n");
3484         kprintf("%d timers available (out of %d)\n", i, NTOSKRNL_TIMEOUTS);
3485         kprintf("timer sets: %qu\n", ntoskrnl_timer_sets);
3486         kprintf("timer reloads: %qu\n", ntoskrnl_timer_reloads);
3487         kprintf("timer cancels: %qu\n", ntoskrnl_timer_cancels);
3488         kprintf("timer fires: %qu\n", ntoskrnl_timer_fires);
3489         kprintf("\n");
3490 }
3491 #endif
3492
3493 /*
3494  * Must be called with dispatcher lock held.
3495  */
3496
3497 static void
3498 ntoskrnl_insert_timer(ktimer *timer, int ticks)
3499 {
3500         callout_entry           *e;
3501         list_entry              *l;
3502         struct callout          *c;
3503
3504         /*
3505          * Try and allocate a timer.
3506          */
3507         mtx_spinlock(&ntoskrnl_calllock);
3508         if (IsListEmpty(&ntoskrnl_calllist)) {
3509                 mtx_spinunlock(&ntoskrnl_calllock);
3510 #ifdef NTOSKRNL_DEBUG_TIMERS
3511                 ntoskrnl_show_timers();
3512 #endif
3513                 panic("out of timers!");
3514         }
3515         l = RemoveHeadList(&ntoskrnl_calllist);
3516         mtx_spinunlock(&ntoskrnl_calllock);
3517
3518         e = CONTAINING_RECORD(l, callout_entry, ce_list);
3519         c = &e->ce_callout;
3520
3521         timer->k_callout = c;
3522
3523         callout_init_mp(c);
3524         callout_reset(c, ticks, ntoskrnl_timercall, timer);
3525 }
3526
3527 static void
3528 ntoskrnl_remove_timer(ktimer *timer)
3529 {
3530         callout_entry           *e;
3531
3532         e = (callout_entry *)timer->k_callout;
3533         callout_stop(timer->k_callout);
3534
3535         mtx_spinlock(&ntoskrnl_calllock);
3536         InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
3537         mtx_spinunlock(&ntoskrnl_calllock);
3538 }
3539
3540 void
3541 KeInitializeTimer(ktimer *timer)
3542 {
3543         if (timer == NULL)
3544                 return;
3545
3546         KeInitializeTimerEx(timer,  EVENT_TYPE_NOTIFY);
3547 }
3548
3549 void
3550 KeInitializeTimerEx(ktimer *timer, uint32_t type)
3551 {
3552         if (timer == NULL)
3553                 return;
3554
3555         bzero((char *)timer, sizeof(ktimer));
3556         InitializeListHead((&timer->k_header.dh_waitlisthead));
3557         timer->k_header.dh_sigstate = FALSE;
3558         timer->k_header.dh_inserted = FALSE;
3559         if (type == EVENT_TYPE_NOTIFY)
3560                 timer->k_header.dh_type = DISP_TYPE_NOTIFICATION_TIMER;
3561         else
3562                 timer->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_TIMER;
3563         timer->k_header.dh_size = sizeof(ktimer) / sizeof(uint32_t);
3564 }
3565
3566 /*
3567  * DPC subsystem. A Windows Defered Procedure Call has the following
3568  * properties:
3569  * - It runs at DISPATCH_LEVEL.
3570  * - It can have one of 3 importance values that control when it
3571  *   runs relative to other DPCs in the queue.
3572  * - On SMP systems, it can be set to run on a specific processor.
3573  * In order to satisfy the last property, we create a DPC thread for
3574  * each CPU in the system and bind it to that CPU. Each thread
3575  * maintains three queues with different importance levels, which
3576  * will be processed in order from lowest to highest.
3577  *
3578  * In Windows, interrupt handlers run as DPCs. (Not to be confused
3579  * with ISRs, which run in interrupt context and can preempt DPCs.)
3580  * ISRs are given the highest importance so that they'll take
3581  * precedence over timers and other things.
3582  */
3583
3584 static void
3585 ntoskrnl_dpc_thread(void *arg)
3586 {
3587         kdpc_queue              *kq;
3588         kdpc                    *d;
3589         list_entry              *l;
3590         uint8_t                 irql;
3591
3592         kq = arg;
3593
3594         InitializeListHead(&kq->kq_disp);
3595         kq->kq_td = curthread;
3596         kq->kq_exit = 0;
3597         kq->kq_running = FALSE;
3598         KeInitializeSpinLock(&kq->kq_lock);
3599         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
3600         KeInitializeEvent(&kq->kq_done, EVENT_TYPE_SYNC, FALSE);
3601
3602         /*
3603          * Elevate our priority. DPCs are used to run interrupt
3604          * handlers, and they should trigger as soon as possible
3605          * once scheduled by an ISR.
3606          */
3607
3608 #ifdef NTOSKRNL_MULTIPLE_DPCS
3609         sched_bind(curthread, kq->kq_cpu);
3610 #endif
3611         lwkt_setpri_self(TDPRI_INT_HIGH);
3612
3613         while (1) {
3614                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
3615
3616                 KeAcquireSpinLock(&kq->kq_lock, &irql);
3617
3618                 if (kq->kq_exit) {
3619                         kq->kq_exit = 0;
3620                         KeReleaseSpinLock(&kq->kq_lock, irql);
3621                         break;
3622                 }
3623
3624                 kq->kq_running = TRUE;
3625
3626                 while (!IsListEmpty(&kq->kq_disp)) {
3627                         l = RemoveHeadList((&kq->kq_disp));
3628                         d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3629                         InitializeListHead((&d->k_dpclistentry));
3630                         KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3631                         MSCALL4(d->k_deferedfunc, d, d->k_deferredctx,
3632                             d->k_sysarg1, d->k_sysarg2);
3633                         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3634                 }
3635
3636                 kq->kq_running = FALSE;
3637
3638                 KeReleaseSpinLock(&kq->kq_lock, irql);
3639
3640                 KeSetEvent(&kq->kq_done, IO_NO_INCREMENT, FALSE);
3641         }
3642
3643         wakeup(curthread);
3644         kthread_exit();
3645         return; /* notreached */
3646 }
3647
3648 static void
3649 ntoskrnl_destroy_dpc_threads(void)
3650 {
3651         kdpc_queue              *kq;
3652         kdpc                    dpc;
3653         int                     i;
3654
3655         kq = kq_queues;
3656 #ifdef NTOSKRNL_MULTIPLE_DPCS
3657         for (i = 0; i < ncpus; i++) {
3658 #else
3659         for (i = 0; i < 1; i++) {
3660 #endif
3661                 kq += i;
3662
3663                 kq->kq_exit = 1;
3664                 KeInitializeDpc(&dpc, NULL, NULL);
3665                 KeSetTargetProcessorDpc(&dpc, i);
3666                 KeInsertQueueDpc(&dpc, NULL, NULL);
3667                 while (kq->kq_exit)
3668                         tsleep(kq->kq_td, 0, "dpcw", hz/10);
3669         }
3670 }
3671
3672 static uint8_t
3673 ntoskrnl_insert_dpc(list_entry *head, kdpc *dpc)
3674 {
3675         list_entry              *l;
3676         kdpc                    *d;
3677
3678         l = head->nle_flink;
3679         while (l != head) {
3680                 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3681                 if (d == dpc)
3682                         return (FALSE);
3683                 l = l->nle_flink;
3684         }
3685
3686         if (dpc->k_importance == KDPC_IMPORTANCE_LOW)
3687                 InsertTailList((head), (&dpc->k_dpclistentry));
3688         else
3689                 InsertHeadList((head), (&dpc->k_dpclistentry));
3690
3691         return (TRUE);
3692 }
3693
3694 void
3695 KeInitializeDpc(kdpc *dpc, void *dpcfunc, void *dpcctx)
3696 {
3697
3698         if (dpc == NULL)
3699                 return;
3700
3701         dpc->k_deferedfunc = dpcfunc;
3702         dpc->k_deferredctx = dpcctx;
3703         dpc->k_num = KDPC_CPU_DEFAULT;
3704         dpc->k_importance = KDPC_IMPORTANCE_MEDIUM;
3705         InitializeListHead((&dpc->k_dpclistentry));
3706 }
3707
3708 uint8_t
3709 KeInsertQueueDpc(kdpc *dpc, void *sysarg1, void *sysarg2)
3710 {
3711         kdpc_queue              *kq;
3712         uint8_t                 r;
3713         uint8_t                 irql;
3714
3715         if (dpc == NULL)
3716                 return (FALSE);
3717
3718         kq = kq_queues;
3719
3720 #ifdef NTOSKRNL_MULTIPLE_DPCS
3721         KeRaiseIrql(DISPATCH_LEVEL, &irql);
3722
3723         /*
3724          * By default, the DPC is queued to run on the same CPU
3725          * that scheduled it.
3726          */
3727
3728         if (dpc->k_num == KDPC_CPU_DEFAULT)
3729                 kq += curthread->td_oncpu;
3730         else
3731                 kq += dpc->k_num;
3732         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3733 #else
3734         KeAcquireSpinLock(&kq->kq_lock, &irql);
3735 #endif
3736
3737         r = ntoskrnl_insert_dpc(&kq->kq_disp, dpc);
3738         if (r == TRUE) {
3739                 dpc->k_sysarg1 = sysarg1;
3740                 dpc->k_sysarg2 = sysarg2;
3741         }
3742         KeReleaseSpinLock(&kq->kq_lock, irql);
3743
3744         if (r == FALSE)
3745                 return (r);
3746
3747         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
3748
3749         return (r);
3750 }
3751
3752 uint8_t
3753 KeRemoveQueueDpc(kdpc *dpc)
3754 {
3755         kdpc_queue              *kq;
3756         uint8_t                 irql;
3757
3758         if (dpc == NULL)
3759                 return (FALSE);
3760
3761 #ifdef NTOSKRNL_MULTIPLE_DPCS
3762         KeRaiseIrql(DISPATCH_LEVEL, &irql);
3763
3764         kq = kq_queues + dpc->k_num;
3765
3766         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3767 #else
3768         kq = kq_queues;
3769         KeAcquireSpinLock(&kq->kq_lock, &irql);
3770 #endif
3771
3772         if (dpc->k_dpclistentry.nle_flink == &dpc->k_dpclistentry) {
3773                 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3774                 KeLowerIrql(irql);
3775                 return (FALSE);
3776         }
3777
3778         RemoveEntryList((&dpc->k_dpclistentry));
3779         InitializeListHead((&dpc->k_dpclistentry));
3780
3781         KeReleaseSpinLock(&kq->kq_lock, irql);
3782
3783         return (TRUE);
3784 }
3785
3786 void
3787 KeSetImportanceDpc(kdpc *dpc, uint32_t imp)
3788 {
3789         if (imp != KDPC_IMPORTANCE_LOW &&
3790             imp != KDPC_IMPORTANCE_MEDIUM &&
3791             imp != KDPC_IMPORTANCE_HIGH)
3792                 return;
3793
3794         dpc->k_importance = (uint8_t)imp;
3795 }
3796
3797 void
3798 KeSetTargetProcessorDpc(kdpc *dpc, uint8_t cpu)
3799 {
3800         if (cpu > ncpus)
3801                 return;
3802
3803         dpc->k_num = cpu;
3804 }
3805
3806 void
3807 KeFlushQueuedDpcs(void)
3808 {
3809         kdpc_queue              *kq;
3810         int                     i;
3811
3812         /*
3813          * Poke each DPC queue and wait
3814          * for them to drain.
3815          */
3816
3817 #ifdef NTOSKRNL_MULTIPLE_DPCS
3818         for (i = 0; i < ncpus; i++) {
3819 #else
3820         for (i = 0; i < 1; i++) {
3821 #endif
3822                 kq = kq_queues + i;
3823                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
3824                 KeWaitForSingleObject(&kq->kq_done, 0, 0, TRUE, NULL);
3825         }
3826 }
3827
3828 uint32_t
3829 KeGetCurrentProcessorNumber(void)
3830 {
3831         return (curthread->td_gd->gd_cpuid);
3832 }
3833
3834 uint8_t
3835 KeSetTimerEx(ktimer *timer, int64_t duetime, uint32_t period, kdpc *dpc)
3836 {
3837         struct timeval          tv;
3838         uint64_t                curtime;
3839         uint8_t                 pending;
3840
3841         if (timer == NULL)
3842                 return (FALSE);
3843
3844         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3845
3846         if (timer->k_header.dh_inserted == TRUE) {
3847                 ntoskrnl_remove_timer(timer);
3848 #ifdef NTOSKRNL_DEBUG_TIMERS
3849                 ntoskrnl_timer_cancels++;
3850 #endif
3851                 timer->k_header.dh_inserted = FALSE;
3852                 pending = TRUE;
3853         } else
3854                 pending = FALSE;
3855
3856         timer->k_duetime = duetime;
3857         timer->k_period = period;
3858         timer->k_header.dh_sigstate = FALSE;
3859         timer->k_dpc = dpc;
3860
3861         if (duetime < 0) {
3862                 tv.tv_sec = - (duetime) / 10000000;
3863                 tv.tv_usec = (- (duetime) / 10) -
3864                     (tv.tv_sec * 1000000);
3865         } else {
3866                 ntoskrnl_time(&curtime);
3867                 if (duetime < curtime)
3868                         tv.tv_sec = tv.tv_usec = 0;
3869                 else {
3870                         tv.tv_sec = ((duetime) - curtime) / 10000000;
3871                         tv.tv_usec = ((duetime) - curtime) / 10 -
3872                             (tv.tv_sec * 1000000);
3873                 }
3874         }
3875
3876         timer->k_header.dh_inserted = TRUE;
3877         ntoskrnl_insert_timer(timer, tvtohz_high(&tv));
3878 #ifdef NTOSKRNL_DEBUG_TIMERS
3879         ntoskrnl_timer_sets++;
3880 #endif
3881
3882         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3883
3884         return (pending);
3885 }
3886
3887 uint8_t
3888 KeSetTimer(ktimer *timer, int64_t duetime, kdpc *dpc)
3889 {
3890         return (KeSetTimerEx(timer, duetime, 0, dpc));
3891 }
3892
3893 /*
3894  * The Windows DDK documentation seems to say that cancelling
3895  * a timer that has a DPC will result in the DPC also being
3896  * cancelled, but this isn't really the case.
3897  */
3898
3899 uint8_t
3900 KeCancelTimer(ktimer *timer)
3901 {
3902         uint8_t                 pending;
3903
3904         if (timer == NULL)
3905                 return (FALSE);
3906
3907         lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3908
3909         pending = timer->k_header.dh_inserted;
3910
3911         if (timer->k_header.dh_inserted == TRUE) {
3912                 timer->k_header.dh_inserted = FALSE;
3913                 ntoskrnl_remove_timer(timer);
3914 #ifdef NTOSKRNL_DEBUG_TIMERS
3915                 ntoskrnl_timer_cancels++;
3916 #endif
3917         }
3918
3919         lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3920
3921         return (pending);
3922 }
3923
3924 uint8_t
3925 KeReadStateTimer(ktimer *timer)
3926 {
3927         return (timer->k_header.dh_sigstate);
3928 }
3929
3930 static int32_t
3931 KeDelayExecutionThread(uint8_t wait_mode, uint8_t alertable, int64_t *interval)
3932 {
3933         ktimer                  timer;
3934
3935         if (wait_mode != 0)
3936                 panic("invalid wait_mode %d", wait_mode);
3937
3938         KeInitializeTimer(&timer);
3939         KeSetTimer(&timer, *interval, NULL);
3940         KeWaitForSingleObject(&timer, 0, 0, alertable, NULL);
3941
3942         return STATUS_SUCCESS;
3943 }
3944
3945 static uint64_t
3946 KeQueryInterruptTime(void)
3947 {
3948         int ticks;
3949         struct timeval tv;
3950
3951         getmicrouptime(&tv);
3952
3953         ticks = tvtohz_high(&tv);
3954
3955         return ticks * ((10000000 + hz - 1) / hz);
3956 }
3957
3958 static struct thread *
3959 KeGetCurrentThread(void)
3960 {
3961
3962         return curthread;
3963 }
3964
3965 static int32_t
3966 KeSetPriorityThread(struct thread *td, int32_t pri)
3967 {
3968         int32_t old;
3969
3970         if (td == NULL)
3971                 return LOW_REALTIME_PRIORITY;
3972
3973         if (td->td_pri >= TDPRI_INT_HIGH)
3974                 old = HIGH_PRIORITY;
3975         else if (td->td_pri <= TDPRI_IDLE_WORK)
3976                 old = LOW_PRIORITY;
3977         else
3978                 old = LOW_REALTIME_PRIORITY;
3979
3980         if (pri == HIGH_PRIORITY)
3981                 lwkt_setpri(td, TDPRI_INT_HIGH);
3982         if (pri == LOW_REALTIME_PRIORITY)
3983                 lwkt_setpri(td, TDPRI_SOFT_TIMER);
3984         if (pri == LOW_PRIORITY)
3985                 lwkt_setpri(td, TDPRI_IDLE_WORK);
3986
3987         return old;
3988 }
3989
3990 static void
3991 dummy(void)
3992 {
3993         kprintf("ntoskrnl dummy called...\n");
3994 }
3995
3996
3997 image_patch_table ntoskrnl_functbl[] = {
3998         IMPORT_SFUNC(RtlZeroMemory, 2),
3999         IMPORT_SFUNC(RtlSecureZeroMemory, 2),
4000         IMPORT_SFUNC(RtlFillMemory, 3),
4001         IMPORT_SFUNC(RtlMoveMemory, 3),
4002         IMPORT_SFUNC(RtlCharToInteger, 3),
4003         IMPORT_SFUNC(RtlCopyMemory, 3),
4004         IMPORT_SFUNC(RtlCopyString, 2),
4005         IMPORT_SFUNC(RtlCompareMemory, 3),
4006         IMPORT_SFUNC(RtlEqualUnicodeString, 3),
4007         IMPORT_SFUNC(RtlCopyUnicodeString, 2),
4008         IMPORT_SFUNC(RtlUnicodeStringToAnsiString, 3),
4009         IMPORT_SFUNC(RtlAnsiStringToUnicodeString, 3),
4010         IMPORT_SFUNC(RtlInitAnsiString, 2),
4011         IMPORT_SFUNC_MAP(RtlInitString, RtlInitAnsiString, 2),
4012         IMPORT_SFUNC(RtlInitUnicodeString, 2),
4013         IMPORT_SFUNC(RtlFreeAnsiString, 1),
4014         IMPORT_SFUNC(RtlFreeUnicodeString, 1),
4015         IMPORT_SFUNC(RtlUnicodeStringToInteger, 3),
4016         IMPORT_CFUNC_MAP(sprintf, ksprintf, 0),
4017         IMPORT_CFUNC_MAP(vsprintf, kvsprintf, 0),
4018         IMPORT_CFUNC_MAP(_snprintf, ksnprintf, 0),
4019         IMPORT_CFUNC_MAP(_vsnprintf, kvsnprintf, 0),
4020         IMPORT_CFUNC(DbgPrint, 0),
4021         IMPORT_SFUNC(DbgBreakPoint, 0),
4022         IMPORT_SFUNC(KeBugCheckEx, 5),
4023         IMPORT_CFUNC(strncmp, 0),
4024         IMPORT_CFUNC(strcmp, 0),
4025         IMPORT_CFUNC_MAP(stricmp, strcasecmp, 0),
4026         IMPORT_CFUNC(strncpy, 0),
4027         IMPORT_CFUNC(strcpy, 0),
4028         IMPORT_CFUNC(strlen, 0),
4029         IMPORT_CFUNC_MAP(toupper, ntoskrnl_toupper, 0),
4030         IMPORT_CFUNC_MAP(tolower, ntoskrnl_tolower, 0),
4031         IMPORT_CFUNC_MAP(strstr, ntoskrnl_strstr, 0),
4032         IMPORT_CFUNC_MAP(strncat, ntoskrnl_strncat, 0),
4033         IMPORT_CFUNC_MAP(strchr, index, 0),
4034         IMPORT_CFUNC_MAP(strrchr, rindex, 0),
4035         IMPORT_CFUNC(memcpy, 0),
4036         IMPORT_CFUNC_MAP(memmove, ntoskrnl_memmove, 0),
4037         IMPORT_CFUNC_MAP(memset, ntoskrnl_memset, 0),
4038         IMPORT_CFUNC_MAP(memchr, ntoskrnl_memchr, 0),
4039         IMPORT_SFUNC(IoAllocateDriverObjectExtension, 4),
4040         IMPORT_SFUNC(IoGetDriverObjectExtension, 2),
4041         IMPORT_FFUNC(IofCallDriver, 2),
4042         IMPORT_FFUNC(IofCompleteRequest, 2),
4043         IMPORT_SFUNC(IoAcquireCancelSpinLock, 1),
4044         IMPORT_SFUNC(IoReleaseCancelSpinLock, 1),
4045         IMPORT_SFUNC(IoCancelIrp, 1),
4046         IMPORT_SFUNC(IoConnectInterrupt, 11),
4047         IMPORT_SFUNC(IoDisconnectInterrupt, 1),
4048         IMPORT_SFUNC(IoCreateDevice, 7),
4049         IMPORT_SFUNC(IoDeleteDevice, 1),
4050         IMPORT_SFUNC(IoGetAttachedDevice, 1),
4051         IMPORT_SFUNC(IoAttachDeviceToDeviceStack, 2),
4052         IMPORT_SFUNC(IoDetachDevice, 1),
4053         IMPORT_SFUNC(IoBuildSynchronousFsdRequest, 7),
4054         IMPORT_SFUNC(IoBuildAsynchronousFsdRequest, 6),
4055         IMPORT_SFUNC(IoBuildDeviceIoControlRequest, 9),
4056         IMPORT_SFUNC(IoAllocateIrp, 2),
4057         IMPORT_SFUNC(IoReuseIrp, 2),
4058         IMPORT_SFUNC(IoMakeAssociatedIrp, 2),
4059         IMPORT_SFUNC(IoFreeIrp, 1),
4060         IMPORT_SFUNC(IoInitializeIrp, 3),
4061         IMPORT_SFUNC(KeAcquireInterruptSpinLock, 1),
4062         IMPORT_SFUNC(KeReleaseInterruptSpinLock, 2),
4063         IMPORT_SFUNC(KeSynchronizeExecution, 3),
4064         IMPORT_SFUNC(KeWaitForSingleObject, 5),
4065         IMPORT_SFUNC(KeWaitForMultipleObjects, 8),
4066         IMPORT_SFUNC(_allmul, 4),
4067         IMPORT_SFUNC(_alldiv, 4),
4068         IMPORT_SFUNC(_allrem, 4),
4069         IMPORT_RFUNC(_allshr, 0),
4070         IMPORT_RFUNC(_allshl, 0),
4071         IMPORT_SFUNC(_aullmul, 4),
4072         IMPORT_SFUNC(_aulldiv, 4),
4073         IMPORT_SFUNC(_aullrem, 4),
4074         IMPORT_RFUNC(_aullshr, 0),
4075         IMPORT_RFUNC(_aullshl, 0),
4076         IMPORT_CFUNC(atoi, 0),
4077         IMPORT_CFUNC(atol, 0),
4078         IMPORT_CFUNC(rand, 0),
4079         IMPORT_CFUNC(srand, 0),
4080         IMPORT_SFUNC(WRITE_REGISTER_USHORT, 2),
4081         IMPORT_SFUNC(READ_REGISTER_USHORT, 1),
4082         IMPORT_SFUNC(WRITE_REGISTER_ULONG, 2),
4083         IMPORT_SFUNC(READ_REGISTER_ULONG, 1),
4084         IMPORT_SFUNC(READ_REGISTER_UCHAR, 1),
4085         IMPORT_SFUNC(WRITE_REGISTER_UCHAR, 2),
4086         IMPORT_SFUNC(ExInitializePagedLookasideList, 7),
4087         IMPORT_SFUNC(ExDeletePagedLookasideList, 1),
4088         IMPORT_SFUNC(ExInitializeNPagedLookasideList, 7),
4089         IMPORT_SFUNC(ExDeleteNPagedLookasideList, 1),
4090         IMPORT_FFUNC(InterlockedPopEntrySList, 1),
4091         IMPORT_FFUNC(InitializeSListHead, 1),
4092         IMPORT_FFUNC(InterlockedPushEntrySList, 2),
4093         IMPORT_SFUNC(ExQueryDepthSList, 1),
4094         IMPORT_FFUNC_MAP(ExpInterlockedPopEntrySList,
4095                 InterlockedPopEntrySList, 1),
4096         IMPORT_FFUNC_MAP(ExpInterlockedPushEntrySList,
4097                 InterlockedPushEntrySList, 2),
4098         IMPORT_FFUNC(ExInterlockedPopEntrySList, 2),
4099         IMPORT_FFUNC(ExInterlockedPushEntrySList, 3),
4100         IMPORT_SFUNC(ExAllocatePoolWithTag, 3),
4101         IMPORT_SFUNC(ExFreePoolWithTag, 2),
4102         IMPORT_SFUNC(ExFreePool, 1),
4103         /*
4104          * For AMD64, we can get away with just mapping
4105          * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
4106          * because the calling conventions end up being the same.
4107          */
4108         IMPORT_SFUNC(KeAcquireSpinLockAtDpcLevel, 1),
4109         IMPORT_SFUNC(KeReleaseSpinLockFromDpcLevel, 1),
4110         IMPORT_SFUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock, 1),
4111         IMPORT_SFUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock, 1),
4112         IMPORT_FFUNC(InterlockedIncrement, 1),
4113         IMPORT_FFUNC(InterlockedDecrement, 1),
4114         IMPORT_FFUNC(InterlockedExchange, 2),
4115         IMPORT_FFUNC(ExInterlockedAddLargeStatistic, 2),
4116         IMPORT_SFUNC(IoAllocateMdl, 5),
4117         IMPORT_SFUNC(IoFreeMdl, 1),
4118         IMPORT_SFUNC(MmAllocateContiguousMemory, 2 + 1),
4119         IMPORT_SFUNC(MmAllocateContiguousMemorySpecifyCache, 5 + 3),
4120         IMPORT_SFUNC(MmFreeContiguousMemory, 1),
4121         IMPORT_SFUNC(MmFreeContiguousMemorySpecifyCache, 3),
4122         IMPORT_SFUNC(MmSizeOfMdl, 1),
4123         IMPORT_SFUNC(MmMapLockedPages, 2),
4124         IMPORT_SFUNC(MmMapLockedPagesSpecifyCache, 6),
4125         IMPORT_SFUNC(MmUnmapLockedPages, 2),
4126         IMPORT_SFUNC(MmBuildMdlForNonPagedPool, 1),
4127         IMPORT_SFUNC(MmGetPhysicalAddress, 1),
4128         IMPORT_SFUNC(MmGetSystemRoutineAddress, 1),
4129         IMPORT_SFUNC(MmIsAddressValid, 1),
4130         IMPORT_SFUNC(MmMapIoSpace, 3 + 1),
4131         IMPORT_SFUNC(MmUnmapIoSpace, 2),
4132         IMPORT_SFUNC(KeInitializeSpinLock, 1),
4133         IMPORT_SFUNC(IoIsWdmVersionAvailable, 2),
4134         IMPORT_SFUNC(IoOpenDeviceRegistryKey, 4),
4135         IMPORT_SFUNC(IoGetDeviceObjectPointer, 4),
4136         IMPORT_SFUNC(IoGetDeviceProperty, 5),
4137         IMPORT_SFUNC(IoAllocateWorkItem, 1),
4138         IMPORT_SFUNC(IoFreeWorkItem, 1),
4139         IMPORT_SFUNC(IoQueueWorkItem, 4),
4140         IMPORT_SFUNC(ExQueueWorkItem, 2),
4141         IMPORT_SFUNC(ntoskrnl_workitem, 2),
4142         IMPORT_SFUNC(KeInitializeMutex, 2),
4143         IMPORT_SFUNC(KeReleaseMutex, 2),
4144         IMPORT_SFUNC(KeReadStateMutex, 1),
4145         IMPORT_SFUNC(KeInitializeEvent, 3),
4146         IMPORT_SFUNC(KeSetEvent, 3),
4147         IMPORT_SFUNC(KeResetEvent, 1),
4148         IMPORT_SFUNC(KeClearEvent, 1),
4149         IMPORT_SFUNC(KeReadStateEvent, 1),
4150         IMPORT_SFUNC(KeInitializeTimer, 1),
4151         IMPORT_SFUNC(KeInitializeTimerEx, 2),
4152         IMPORT_SFUNC(KeSetTimer, 3),
4153         IMPORT_SFUNC(KeSetTimerEx, 4),
4154         IMPORT_SFUNC(KeCancelTimer, 1),
4155         IMPORT_SFUNC(KeReadStateTimer, 1),
4156         IMPORT_SFUNC(KeInitializeDpc, 3),
4157         IMPORT_SFUNC(KeInsertQueueDpc, 3),
4158         IMPORT_SFUNC(KeRemoveQueueDpc, 1),
4159         IMPORT_SFUNC(KeSetImportanceDpc, 2),
4160         IMPORT_SFUNC(KeSetTargetProcessorDpc, 2),
4161         IMPORT_SFUNC(KeFlushQueuedDpcs, 0),
4162         IMPORT_SFUNC(KeGetCurrentProcessorNumber, 1),
4163         IMPORT_SFUNC(ObReferenceObjectByHandle, 6),
4164         IMPORT_FFUNC(ObfDereferenceObject, 1),
4165         IMPORT_SFUNC(ZwClose, 1),
4166         IMPORT_SFUNC(PsCreateSystemThread, 7),
4167         IMPORT_SFUNC(PsTerminateSystemThread, 1),
4168         IMPORT_SFUNC(IoWMIRegistrationControl, 2),
4169         IMPORT_SFUNC(WmiQueryTraceInformation, 5),
4170         IMPORT_CFUNC(WmiTraceMessage, 0),
4171         IMPORT_SFUNC(KeQuerySystemTime, 1),
4172         IMPORT_CFUNC(KeTickCount, 0),
4173         IMPORT_SFUNC(KeDelayExecutionThread, 3),
4174         IMPORT_SFUNC(KeQueryInterruptTime, 0),
4175         IMPORT_SFUNC(KeGetCurrentThread, 0),
4176         IMPORT_SFUNC(KeSetPriorityThread, 2),
4177
4178         /*
4179          * This last entry is a catch-all for any function we haven't
4180          * implemented yet. The PE import list patching routine will
4181          * use it for any function that doesn't have an explicit match
4182          * in this table.
4183          */
4184
4185         { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
4186
4187         /* End of list. */
4188
4189         { NULL, NULL, NULL }
4190 };