vkernel - enhance the pidfile option and fix memimg file scanning
[dragonfly.git] / sys / platform / vkernel64 / platform / init.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $DragonFly: src/sys/platform/vkernel/platform/init.c,v 1.56 2008/05/27 07:48:00 dillon Exp $
35 */
36
37#include <sys/types.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/stat.h>
41#include <sys/mman.h>
42#include <sys/cons.h>
43#include <sys/random.h>
44#include <sys/vkernel.h>
45#include <sys/tls.h>
46#include <sys/reboot.h>
47#include <sys/proc.h>
48#include <sys/msgbuf.h>
49#include <sys/vmspace.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52#include <sys/sysctl.h>
53#include <sys/un.h>
54#include <vm/vm_page.h>
55#include <vm/vm_map.h>
56#include <sys/mplock2.h>
57
58#include <machine/cpu.h>
59#include <machine/globaldata.h>
60#include <machine/tls.h>
61#include <machine/md_var.h>
62#include <machine/vmparam.h>
63#include <cpu/specialreg.h>
64
65#include <net/if.h>
66#include <net/if_arp.h>
67#include <net/ethernet.h>
68#include <net/bridge/if_bridgevar.h>
69#include <netinet/in.h>
70#include <arpa/inet.h>
71
72#include <stdio.h>
73#include <stdlib.h>
74#include <stdarg.h>
75#include <stdbool.h>
76#include <unistd.h>
77#include <fcntl.h>
78#include <string.h>
79#include <err.h>
80#include <errno.h>
81#include <assert.h>
82#include <sysexits.h>
83
84vm_paddr_t phys_avail[16];
85vm_paddr_t Maxmem;
86vm_paddr_t Maxmem_bytes;
87long physmem;
88int MemImageFd = -1;
89struct vkdisk_info DiskInfo[VKDISK_MAX];
90int DiskNum;
91struct vknetif_info NetifInfo[VKNETIF_MAX];
92int NetifNum;
93char *pid_file;
94vm_offset_t KvaStart;
95vm_offset_t KvaEnd;
96vm_offset_t KvaSize;
97vm_offset_t virtual_start;
98vm_offset_t virtual_end;
99vm_offset_t virtual2_start;
100vm_offset_t virtual2_end;
101vm_offset_t kernel_vm_end;
102vm_offset_t crashdumpmap;
103vm_offset_t clean_sva;
104vm_offset_t clean_eva;
105struct msgbuf *msgbufp;
106caddr_t ptvmmap;
107vpte_t *KernelPTD;
108vpte_t *KernelPTA; /* Warning: Offset for direct VA translation */
109void *dmap_min_address;
110u_int cpu_feature; /* XXX */
111int tsc_present;
112int64_t tsc_frequency;
113int optcpus; /* number of cpus - see mp_start() */
114int lwp_cpu_lock; /* if/how to lock virtual CPUs to real CPUs */
115int real_ncpus; /* number of real CPUs */
116int next_cpu; /* next real CPU to lock a virtual CPU to */
117
118struct privatespace *CPU_prvspace;
119
120static struct trapframe proc0_tf;
121static void *proc0paddr;
122
123static void init_sys_memory(char *imageFile);
124static void init_kern_memory(void);
125static void init_globaldata(void);
126static void init_vkernel(void);
127static void init_disk(char *diskExp[], int diskFileNum, enum vkdisk_type type);
128static void init_netif(char *netifExp[], int netifFileNum);
129static void writepid(void);
130static void cleanpid(void);
131static int unix_connect(const char *path);
132static void usage_err(const char *ctl, ...);
133static void usage_help(_Bool);
134
135static int save_ac;
136static char **save_av;
137
138/*
139 * Kernel startup for virtual kernels - standard main()
140 */
141int
142main(int ac, char **av)
143{
144 char *memImageFile = NULL;
145 char *netifFile[VKNETIF_MAX];
146 char *diskFile[VKDISK_MAX];
147 char *cdFile[VKDISK_MAX];
148 char *suffix;
149 char *endp;
150 int netifFileNum = 0;
151 int diskFileNum = 0;
152 int cdFileNum = 0;
153 int bootOnDisk = -1; /* set below to vcd (0) or vkd (1) */
154 int c;
155 int i;
156 int j;
157 int n;
158 int isq;
159 int real_vkernel_enable;
160 int supports_sse;
161 size_t vsize;
162
163 save_ac = ac;
164 save_av = av;
165
166 /*
167 * Process options
168 */
169 kernel_mem_readonly = 1;
170#ifdef SMP
171 optcpus = 2;
172#endif
173 lwp_cpu_lock = LCL_NONE;
174
175 real_vkernel_enable = 0;
176 vsize = sizeof(real_vkernel_enable);
177 sysctlbyname("vm.vkernel_enable", &real_vkernel_enable, &vsize, NULL,0);
178
179 if (real_vkernel_enable == 0) {
180 errx(1, "vm.vkernel_enable is 0, must be set "
181 "to 1 to execute a vkernel!");
182 }
183
184 real_ncpus = 1;
185 vsize = sizeof(real_ncpus);
186 sysctlbyname("hw.ncpu", &real_ncpus, &vsize, NULL, 0);
187
188 if (ac < 2)
189 usage_help(false);
190
191 while ((c = getopt(ac, av, "c:hsvl:m:n:r:e:i:p:I:U")) != -1) {
192 switch(c) {
193 case 'e':
194 /*
195 * name=value:name=value:name=value...
196 * name="value"...
197 *
198 * Allow values to be quoted but note that shells
199 * may remove the quotes, so using this feature
200 * to embed colons may require a backslash.
201 */
202 n = strlen(optarg);
203 isq = 0;
204 kern_envp = malloc(n + 2);
205 for (i = j = 0; i < n; ++i) {
206 if (optarg[i] == '"')
207 isq ^= 1;
208 else if (optarg[i] == '\'')
209 isq ^= 2;
210 else if (isq == 0 && optarg[i] == ':')
211 kern_envp[j++] = 0;
212 else
213 kern_envp[j++] = optarg[i];
214 }
215 kern_envp[j++] = 0;
216 kern_envp[j++] = 0;
217 break;
218 case 's':
219 boothowto |= RB_SINGLE;
220 break;
221 case 'v':
222 bootverbose = 1;
223 break;
224 case 'i':
225 memImageFile = optarg;
226 break;
227 case 'I':
228 if (netifFileNum < VKNETIF_MAX)
229 netifFile[netifFileNum++] = strdup(optarg);
230 break;
231 case 'r':
232 if (bootOnDisk < 0)
233 bootOnDisk = 1;
234 if (diskFileNum + cdFileNum < VKDISK_MAX)
235 diskFile[diskFileNum++] = strdup(optarg);
236 break;
237 case 'c':
238 if (bootOnDisk < 0)
239 bootOnDisk = 0;
240 if (diskFileNum + cdFileNum < VKDISK_MAX)
241 cdFile[cdFileNum++] = strdup(optarg);
242 break;
243 case 'm':
244 Maxmem_bytes = strtoull(optarg, &suffix, 0);
245 if (suffix) {
246 switch(*suffix) {
247 case 'g':
248 case 'G':
249 Maxmem_bytes <<= 30;
250 break;
251 case 'm':
252 case 'M':
253 Maxmem_bytes <<= 20;
254 break;
255 case 'k':
256 case 'K':
257 Maxmem_bytes <<= 10;
258 break;
259 default:
260 Maxmem_bytes = 0;
261 usage_err("Bad maxmem option");
262 /* NOT REACHED */
263 break;
264 }
265 }
266 break;
267 case 'l':
268 next_cpu = -1;
269 if (strncmp("map", optarg, 3) == 0) {
270 lwp_cpu_lock = LCL_PER_CPU;
271 if (optarg[3] == ',') {
272 next_cpu = strtol(optarg+4, &endp, 0);
273 if (*endp != '\0')
274 usage_err("Bad target CPU number at '%s'", endp);
275 } else {
276 next_cpu = 0;
277 }
278 if (next_cpu < 0 || next_cpu > real_ncpus - 1)
279 usage_err("Bad target CPU, valid range is 0-%d", real_ncpus - 1);
280 } else if (strncmp("any", optarg, 3) == 0) {
281 lwp_cpu_lock = LCL_NONE;
282 } else {
283 lwp_cpu_lock = LCL_SINGLE_CPU;
284 next_cpu = strtol(optarg, &endp, 0);
285 if (*endp != '\0')
286 usage_err("Bad target CPU number at '%s'", endp);
287 if (next_cpu < 0 || next_cpu > real_ncpus - 1)
288 usage_err("Bad target CPU, valid range is 0-%d", real_ncpus - 1);
289 }
290 break;
291 case 'n':
292 /*
293 * This value is set up by mp_start(), don't just
294 * set ncpus here.
295 */
296#ifdef SMP
297 optcpus = strtol(optarg, NULL, 0);
298 if (optcpus < 1 || optcpus > MAXCPU)
299 usage_err("Bad ncpus, valid range is 1-%d", MAXCPU);
300#else
301 if (strtol(optarg, NULL, 0) != 1) {
302 usage_err("You built a UP vkernel, only 1 cpu!");
303 }
304#endif
305
306 break;
307 case 'p':
308 pid_file = optarg;
309 break;
310 case 'U':
311 kernel_mem_readonly = 0;
312 break;
313 case 'h':
314 usage_help(true);
315 break;
316 default:
317 usage_help(false);
318 }
319 }
320
321 writepid();
322 cpu_disable_intr();
323 init_sys_memory(memImageFile);
324 init_kern_memory();
325 init_globaldata();
326 init_vkernel();
327 setrealcpu();
328 init_kqueue();
329
330 /*
331 * Check TSC
332 */
333 vsize = sizeof(tsc_present);
334 sysctlbyname("hw.tsc_present", &tsc_present, &vsize, NULL, 0);
335 vsize = sizeof(tsc_frequency);
336 sysctlbyname("hw.tsc_frequency", &tsc_frequency, &vsize, NULL, 0);
337 if (tsc_present)
338 cpu_feature |= CPUID_TSC;
339
340 /*
341 * Check SSE
342 */
343 vsize = sizeof(supports_sse);
344 supports_sse = 0;
345 sysctlbyname("hw.instruction_sse", &supports_sse, &vsize, NULL, 0);
346 init_fpu(supports_sse);
347 if (supports_sse)
348 cpu_feature |= CPUID_SSE | CPUID_FXSR;
349
350 /*
351 * We boot from the first installed disk.
352 */
353 if (bootOnDisk == 1) {
354 init_disk(diskFile, diskFileNum, VKD_DISK);
355 init_disk(cdFile, cdFileNum, VKD_CD);
356 } else {
357 init_disk(cdFile, cdFileNum, VKD_CD);
358 init_disk(diskFile, diskFileNum, VKD_DISK);
359 }
360 init_netif(netifFile, netifFileNum);
361 init_exceptions();
362 mi_startup();
363 /* NOT REACHED */
364 exit(EX_SOFTWARE);
365}
366
367/*
368 * Initialize system memory. This is the virtual kernel's 'RAM'.
369 */
370static
371void
372init_sys_memory(char *imageFile)
373{
374 struct stat st;
375 int i;
376 int fd;
377
378 /*
379 * Figure out the system memory image size. If an image file was
380 * specified and -m was not specified, use the image file's size.
381 */
382 if (imageFile && stat(imageFile, &st) == 0 && Maxmem_bytes == 0)
383 Maxmem_bytes = (vm_paddr_t)st.st_size;
384 if ((imageFile == NULL || stat(imageFile, &st) < 0) &&
385 Maxmem_bytes == 0) {
386 errx(1, "Cannot create new memory file %s unless "
387 "system memory size is specified with -m",
388 imageFile);
389 /* NOT REACHED */
390 }
391
392 /*
393 * Maxmem must be known at this time
394 */
395 if (Maxmem_bytes < 64 * 1024 * 1024 || (Maxmem_bytes & SEG_MASK)) {
396 errx(1, "Bad maxmem specification: 64MB minimum, "
397 "multiples of %dMB only",
398 SEG_SIZE / 1024 / 1024);
399 /* NOT REACHED */
400 }
401
402 /*
403 * Generate an image file name if necessary, then open/create the
404 * file exclusively locked. Do not allow multiple virtual kernels
405 * to use the same image file.
406 *
407 * Don't iterate through a million files if we do not have write
408 * access to the directory, stop if our open() failed on a
409 * non-existant file. Otherwise opens can fail for any number
410 */
411 if (imageFile == NULL) {
412 for (i = 0; i < 1000000; ++i) {
413 asprintf(&imageFile, "/var/vkernel/memimg.%06d", i);
414 fd = open(imageFile,
415 O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0644);
416 if (fd < 0 && stat(imageFile, &st) == 0) {
417 free(imageFile);
418 continue;
419 }
420 break;
421 }
422 } else {
423 fd = open(imageFile, O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0644);
424 }
425 fprintf(stderr, "Using memory file: %s\n", imageFile);
426 if (fd < 0 || fstat(fd, &st) < 0) {
427 err(1, "Unable to open/create %s", imageFile);
428 /* NOT REACHED */
429 }
430
431 /*
432 * Truncate or extend the file as necessary. Clean out the contents
433 * of the file, we want it to be full of holes so we don't waste
434 * time reading in data from an old file that we no longer care
435 * about.
436 */
437 ftruncate(fd, 0);
438 ftruncate(fd, Maxmem_bytes);
439
440 MemImageFd = fd;
441 Maxmem = Maxmem_bytes >> PAGE_SHIFT;
442 physmem = Maxmem;
443}
444
445/*
446 * Initialize kernel memory. This reserves kernel virtual memory by using
447 * MAP_VPAGETABLE
448 */
449
450static
451void
452init_kern_memory(void)
453{
454 void *base;
455 void *try;
456 char dummy;
457 char *topofstack = &dummy;
458 int i;
459 void *firstfree;
460
461 /*
462 * Memory map our kernel virtual memory space. Note that the
463 * kernel image itself is not made part of this memory for the
464 * moment.
465 *
466 * The memory map must be segment-aligned so we can properly
467 * offset KernelPTD.
468 *
469 * If the system kernel has a different MAXDSIZ, it might not
470 * be possible to map kernel memory in its prefered location.
471 * Try a number of different locations.
472 */
473 try = (void *)(512UL << 30);
474 base = NULL;
475 while ((char *)try + KERNEL_KVA_SIZE < topofstack) {
476 base = mmap(try, KERNEL_KVA_SIZE, PROT_READ|PROT_WRITE,
477 MAP_FILE|MAP_SHARED|MAP_VPAGETABLE,
478 MemImageFd, (off_t)try);
479 if (base == try)
480 break;
481 if (base != MAP_FAILED)
482 munmap(base, KERNEL_KVA_SIZE);
483 try = (char *)try + (512UL << 30);
484 }
485 if (base != try) {
486 err(1, "Unable to mmap() kernel virtual memory!");
487 /* NOT REACHED */
488 }
489 madvise(base, KERNEL_KVA_SIZE, MADV_NOSYNC);
490 KvaStart = (vm_offset_t)base;
491 KvaSize = KERNEL_KVA_SIZE;
492 KvaEnd = KvaStart + KvaSize;
493
494 /* cannot use kprintf yet */
495 printf("KVM mapped at %p-%p\n", (void *)KvaStart, (void *)KvaEnd);
496
497 /* MAP_FILE? */
498 dmap_min_address = mmap(0, DMAP_SIZE, PROT_READ|PROT_WRITE,
499 MAP_NOCORE|MAP_NOSYNC|MAP_SHARED,
500 MemImageFd, 0);
501 if (dmap_min_address == MAP_FAILED) {
502 err(1, "Unable to mmap() kernel DMAP region!");
503 /* NOT REACHED */
504 }
505
506 firstfree = 0;
507 pmap_bootstrap((vm_paddr_t *)&firstfree, (int64_t)base);
508
509 mcontrol(base, KERNEL_KVA_SIZE, MADV_SETMAP,
510 0 | VPTE_R | VPTE_W | VPTE_V);
511
512 /*
513 * phys_avail[] represents unallocated physical memory. MI code
514 * will use phys_avail[] to create the vm_page array.
515 */
516 phys_avail[0] = (vm_paddr_t)firstfree;
517 phys_avail[0] = (phys_avail[0] + PAGE_MASK) & ~(vm_paddr_t)PAGE_MASK;
518 phys_avail[1] = Maxmem_bytes;
519
520#if JGV
521 /*
522 * (virtual_start, virtual_end) represent unallocated kernel virtual
523 * memory. MI code will create kernel_map using these parameters.
524 */
525 virtual_start = KvaStart + (long)firstfree;
526 virtual_start = (virtual_start + PAGE_MASK) & ~(vm_offset_t)PAGE_MASK;
527 virtual_end = KvaStart + KERNEL_KVA_SIZE;
528#endif
529
530 /*
531 * pmap_growkernel() will set the correct value.
532 */
533 kernel_vm_end = 0;
534
535 /*
536 * Allocate space for process 0's UAREA.
537 */
538 proc0paddr = (void *)virtual_start;
539 for (i = 0; i < UPAGES; ++i) {
540 pmap_kenter_quick(virtual_start, phys_avail[0]);
541 virtual_start += PAGE_SIZE;
542 phys_avail[0] += PAGE_SIZE;
543 }
544
545 /*
546 * crashdumpmap
547 */
548 crashdumpmap = virtual_start;
549 virtual_start += MAXDUMPPGS * PAGE_SIZE;
550
551 /*
552 * msgbufp maps the system message buffer
553 */
554 assert((MSGBUF_SIZE & PAGE_MASK) == 0);
555 msgbufp = (void *)virtual_start;
556 for (i = 0; i < (MSGBUF_SIZE >> PAGE_SHIFT); ++i) {
557 pmap_kenter_quick(virtual_start, phys_avail[0]);
558 virtual_start += PAGE_SIZE;
559 phys_avail[0] += PAGE_SIZE;
560 }
561 msgbufinit(msgbufp, MSGBUF_SIZE);
562
563 /*
564 * used by kern_memio for /dev/mem access
565 */
566 ptvmmap = (caddr_t)virtual_start;
567 virtual_start += PAGE_SIZE;
568
569 /*
570 * Bootstrap the kernel_pmap
571 */
572#if JGV
573 pmap_bootstrap();
574#endif
575}
576
577/*
578 * Map the per-cpu globaldata for cpu #0. Allocate the space using
579 * virtual_start and phys_avail[0]
580 */
581static
582void
583init_globaldata(void)
584{
585 int i;
586 vm_paddr_t pa;
587 vm_offset_t va;
588
589 /*
590 * Reserve enough KVA to cover possible cpus. This is a considerable
591 * amount of KVA since the privatespace structure includes two
592 * whole page table mappings.
593 */
594 virtual_start = (virtual_start + SEG_MASK) & ~(vm_offset_t)SEG_MASK;
595 CPU_prvspace = (void *)virtual_start;
596 virtual_start += sizeof(struct privatespace) * SMP_MAXCPU;
597
598 /*
599 * Allocate enough physical memory to cover the mdglobaldata
600 * portion of the space and the idle stack and map the pages
601 * into KVA. For cpu #0 only.
602 */
603 for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) {
604 pa = phys_avail[0];
605 va = (vm_offset_t)&CPU_prvspace[0].mdglobaldata + i;
606 pmap_kenter_quick(va, pa);
607 phys_avail[0] += PAGE_SIZE;
608 }
609 for (i = 0; i < sizeof(CPU_prvspace[0].idlestack); i += PAGE_SIZE) {
610 pa = phys_avail[0];
611 va = (vm_offset_t)&CPU_prvspace[0].idlestack + i;
612 pmap_kenter_quick(va, pa);
613 phys_avail[0] += PAGE_SIZE;
614 }
615
616 /*
617 * Setup the %gs for cpu #0. The mycpu macro works after this
618 * point. Note that %fs is used by pthreads.
619 */
620 tls_set_gs(&CPU_prvspace[0], sizeof(struct privatespace));
621}
622
623/*
624 * Initialize very low level systems including thread0, proc0, etc.
625 */
626static
627void
628init_vkernel(void)
629{
630 struct mdglobaldata *gd;
631
632 gd = &CPU_prvspace[0].mdglobaldata;
633 bzero(gd, sizeof(*gd));
634
635 gd->mi.gd_curthread = &thread0;
636 thread0.td_gd = &gd->mi;
637 ncpus = 1;
638 ncpus2 = 1; /* rounded down power of 2 */
639 ncpus_fit = 1; /* rounded up power of 2 */
640 /* ncpus2_mask and ncpus_fit_mask are 0 */
641 init_param1();
642 gd->mi.gd_prvspace = &CPU_prvspace[0];
643 mi_gdinit(&gd->mi, 0);
644 cpu_gdinit(gd, 0);
645 mi_proc0init(&gd->mi, proc0paddr);
646 lwp0.lwp_md.md_regs = &proc0_tf;
647
648 /*init_locks();*/
649#ifdef SMP
650 /*
651 * Get the initial mplock with a count of 1 for the BSP.
652 * This uses a LOGICAL cpu ID, ie BSP == 0.
653 */
654 cpu_get_initial_mplock();
655#endif
656 cninit();
657 rand_initialize();
658#if 0 /* #ifdef DDB */
659 kdb_init();
660 if (boothowto & RB_KDB)
661 Debugger("Boot flags requested debugger");
662#endif
663 identcpu();
664#if 0
665 initializecpu(); /* Initialize CPU registers */
666#endif
667 init_param2((phys_avail[1] - phys_avail[0]) / PAGE_SIZE);
668
669#if 0
670 /*
671 * Map the message buffer
672 */
673 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
674 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off);
675 msgbufinit(msgbufp, MSGBUF_SIZE);
676#endif
677#if 0
678 thread0.td_pcb_cr3 ... MMU
679 lwp0.lwp_md.md_regs = &proc0_tf;
680#endif
681}
682
683/*
684 * Filesystem image paths for the virtual kernel are optional.
685 * If specified they each should point to a disk image,
686 * the first of which will become the root disk.
687 *
688 * The virtual kernel caches data from our 'disk' just like a normal kernel,
689 * so we do not really want the real kernel to cache the data too. Use
690 * O_DIRECT to remove the duplication.
691 */
692static
693void
694init_disk(char *diskExp[], int diskFileNum, enum vkdisk_type type)
695{
696 int i;
697
698 if (diskFileNum == 0)
699 return;
700
701 for(i=0; i < diskFileNum; i++){
702 char *fname;
703 fname = diskExp[i];
704
705 if (fname == NULL) {
706 warnx("Invalid argument to '-r'");
707 continue;
708 }
709
710 if (DiskNum < VKDISK_MAX) {
711 struct stat st;
712 struct vkdisk_info* info = NULL;
713 int fd;
714 size_t l = 0;
715
716 if (type == VKD_DISK)
717 fd = open(fname, O_RDWR|O_DIRECT, 0644);
718 else
719 fd = open(fname, O_RDONLY|O_DIRECT, 0644);
720 if (fd < 0 || fstat(fd, &st) < 0) {
721 err(1, "Unable to open/create %s", fname);
722 /* NOT REACHED */
723 }
724 if (S_ISREG(st.st_mode)) {
725 if (flock(fd, LOCK_EX|LOCK_NB) < 0) {
726 errx(1, "Disk image %s is already "
727 "in use\n", fname);
728 /* NOT REACHED */
729 }
730 }
731
732 info = &DiskInfo[DiskNum];
733 l = strlen(fname);
734
735 info->unit = i;
736 info->fd = fd;
737 info->type = type;
738 memcpy(info->fname, fname, l);
739
740 if (DiskNum == 0) {
741 if (type == VKD_CD) {
742 rootdevnames[0] = "cd9660:vcd0a";
743 } else if (type == VKD_DISK) {
744 rootdevnames[0] = "ufs:vkd0s0a";
745 rootdevnames[1] = "ufs:vkd0s1a";
746 }
747 }
748
749 DiskNum++;
750 } else {
751 warnx("vkd%d (%s) > VKDISK_MAX", DiskNum, fname);
752 continue;
753 }
754 }
755}
756
757static
758int
759netif_set_tapflags(int tap_unit, int f, int s)
760{
761 struct ifreq ifr;
762 int flags;
763
764 bzero(&ifr, sizeof(ifr));
765
766 snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "tap%d", tap_unit);
767 if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
768 warn("tap%d: ioctl(SIOCGIFFLAGS) failed", tap_unit);
769 return -1;
770 }
771
772 /*
773 * Adjust if_flags
774 *
775 * If the flags are already set/cleared, then we return
776 * immediately to avoid extra syscalls
777 */
778 flags = (ifr.ifr_flags & 0xffff) | (ifr.ifr_flagshigh << 16);
779 if (f < 0) {
780 /* Turn off flags */
781 f = -f;
782 if ((flags & f) == 0)
783 return 0;
784 flags &= ~f;
785 } else {
786 /* Turn on flags */
787 if (flags & f)
788 return 0;
789 flags |= f;
790 }
791
792 /*
793 * Fix up ifreq.ifr_name, since it may be trashed
794 * in previous ioctl(SIOCGIFFLAGS)
795 */
796 snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "tap%d", tap_unit);
797
798 ifr.ifr_flags = flags & 0xffff;
799 ifr.ifr_flagshigh = flags >> 16;
800 if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
801 warn("tap%d: ioctl(SIOCSIFFLAGS) failed", tap_unit);
802 return -1;
803 }
804 return 0;
805}
806
807static
808int
809netif_set_tapaddr(int tap_unit, in_addr_t addr, in_addr_t mask, int s)
810{
811 struct ifaliasreq ifra;
812 struct sockaddr_in *in;
813
814 bzero(&ifra, sizeof(ifra));
815 snprintf(ifra.ifra_name, sizeof(ifra.ifra_name), "tap%d", tap_unit);
816
817 /* Setup address */
818 in = (struct sockaddr_in *)&ifra.ifra_addr;
819 in->sin_family = AF_INET;
820 in->sin_len = sizeof(*in);
821 in->sin_addr.s_addr = addr;
822
823 if (mask != 0) {
824 /* Setup netmask */
825 in = (struct sockaddr_in *)&ifra.ifra_mask;
826 in->sin_len = sizeof(*in);
827 in->sin_addr.s_addr = mask;
828 }
829
830 if (ioctl(s, SIOCAIFADDR, &ifra) < 0) {
831 warn("tap%d: ioctl(SIOCAIFADDR) failed", tap_unit);
832 return -1;
833 }
834 return 0;
835}
836
837static
838int
839netif_add_tap2brg(int tap_unit, const char *ifbridge, int s)
840{
841 struct ifbreq ifbr;
842 struct ifdrv ifd;
843
844 bzero(&ifbr, sizeof(ifbr));
845 snprintf(ifbr.ifbr_ifsname, sizeof(ifbr.ifbr_ifsname),
846 "tap%d", tap_unit);
847
848 bzero(&ifd, sizeof(ifd));
849 strlcpy(ifd.ifd_name, ifbridge, sizeof(ifd.ifd_name));
850 ifd.ifd_cmd = BRDGADD;
851 ifd.ifd_len = sizeof(ifbr);
852 ifd.ifd_data = &ifbr;
853
854 if (ioctl(s, SIOCSDRVSPEC, &ifd) < 0) {
855 /*
856 * 'errno == EEXIST' means that the tap(4) is already
857 * a member of the bridge(4)
858 */
859 if (errno != EEXIST) {
860 warn("ioctl(%s, SIOCSDRVSPEC) failed", ifbridge);
861 return -1;
862 }
863 }
864 return 0;
865}
866
867#define TAPDEV_OFLAGS (O_RDWR | O_NONBLOCK)
868
869/*
870 * Locate the first unused tap(4) device file if auto mode is requested,
871 * or open the user supplied device file, and bring up the corresponding
872 * tap(4) interface.
873 *
874 * NOTE: Only tap(4) device file is supported currently
875 */
876static
877int
878netif_open_tap(const char *netif, int *tap_unit, int s)
879{
880 char tap_dev[MAXPATHLEN];
881 int tap_fd, failed;
882 struct stat st;
883 char *dname;
884
885 *tap_unit = -1;
886
887 if (strcmp(netif, "auto") == 0) {
888 /*
889 * Find first unused tap(4) device file
890 */
891 tap_fd = open("/dev/tap", TAPDEV_OFLAGS);
892 if (tap_fd < 0) {
893 warnc(errno, "Unable to find a free tap(4)");
894 return -1;
895 }
896 } else {
897 /*
898 * User supplied tap(4) device file or unix socket.
899 */
900 if (netif[0] == '/') /* Absolute path */
901 strlcpy(tap_dev, netif, sizeof(tap_dev));
902 else
903 snprintf(tap_dev, sizeof(tap_dev), "/dev/%s", netif);
904
905 tap_fd = open(tap_dev, TAPDEV_OFLAGS);
906
907 /*
908 * If we cannot open normally try to connect to it.
909 */
910 if (tap_fd < 0)
911 tap_fd = unix_connect(tap_dev);
912
913 if (tap_fd < 0) {
914 warn("Unable to open %s", tap_dev);
915 return -1;
916 }
917 }
918
919 /*
920 * Check whether the device file is a tap(4)
921 */
922 if (fstat(tap_fd, &st) < 0) {
923 failed = 1;
924 } else if (S_ISCHR(st.st_mode)) {
925 dname = fdevname(tap_fd);
926 if (dname)
927 dname = strstr(dname, "tap");
928 if (dname) {
929 /*
930 * Bring up the corresponding tap(4) interface
931 */
932 *tap_unit = strtol(dname + 3, NULL, 10);
933 printf("TAP UNIT %d\n", *tap_unit);
934 if (netif_set_tapflags(*tap_unit, IFF_UP, s) == 0)
935 failed = 0;
936 else
937 failed = 1;
938 } else {
939 failed = 1;
940 }
941 } else if (S_ISSOCK(st.st_mode)) {
942 /*
943 * Special socket connection (typically to vknet). We
944 * do not have to do anything.
945 */
946 failed = 0;
947 } else {
948 failed = 1;
949 }
950
951 if (failed) {
952 warnx("%s is not a tap(4) device or socket", tap_dev);
953 close(tap_fd);
954 tap_fd = -1;
955 *tap_unit = -1;
956 }
957 return tap_fd;
958}
959
960static int
961unix_connect(const char *path)
962{
963 struct sockaddr_un sunx;
964 int len;
965 int net_fd;
966 int sndbuf = 262144;
967 struct stat st;
968
969 snprintf(sunx.sun_path, sizeof(sunx.sun_path), "%s", path);
970 len = offsetof(struct sockaddr_un, sun_path[strlen(sunx.sun_path)]);
971 ++len; /* include nul */
972 sunx.sun_family = AF_UNIX;
973 sunx.sun_len = len;
974
975 net_fd = socket(AF_UNIX, SOCK_SEQPACKET, 0);
976 if (net_fd < 0)
977 return(-1);
978 if (connect(net_fd, (void *)&sunx, len) < 0) {
979 close(net_fd);
980 return(-1);
981 }
982 setsockopt(net_fd, SOL_SOCKET, SO_SNDBUF, &sndbuf, sizeof(sndbuf));
983 if (fstat(net_fd, &st) == 0)
984 printf("Network socket buffer: %d bytes\n", st.st_blksize);
985 fcntl(net_fd, F_SETFL, O_NONBLOCK);
986 return(net_fd);
987}
988
989#undef TAPDEV_MAJOR
990#undef TAPDEV_MINOR
991#undef TAPDEV_OFLAGS
992
993/*
994 * Following syntax is supported,
995 * 1) x.x.x.x tap(4)'s address is x.x.x.x
996 *
997 * 2) x.x.x.x/z tap(4)'s address is x.x.x.x
998 * tap(4)'s netmask len is z
999 *
1000 * 3) x.x.x.x:y.y.y.y tap(4)'s address is x.x.x.x
1001 * pseudo netif's address is y.y.y.y
1002 *
1003 * 4) x.x.x.x:y.y.y.y/z tap(4)'s address is x.x.x.x
1004 * pseudo netif's address is y.y.y.y
1005 * tap(4) and pseudo netif's netmask len are z
1006 *
1007 * 5) bridgeX tap(4) will be added to bridgeX
1008 *
1009 * 6) bridgeX:y.y.y.y tap(4) will be added to bridgeX
1010 * pseudo netif's address is y.y.y.y
1011 *
1012 * 7) bridgeX:y.y.y.y/z tap(4) will be added to bridgeX
1013 * pseudo netif's address is y.y.y.y
1014 * pseudo netif's netmask len is z
1015 */
1016static
1017int
1018netif_init_tap(int tap_unit, in_addr_t *addr, in_addr_t *mask, int s)
1019{
1020 in_addr_t tap_addr, netmask, netif_addr;
1021 int next_netif_addr;
1022 char *tok, *masklen_str, *ifbridge;
1023
1024 *addr = 0;
1025 *mask = 0;
1026
1027 tok = strtok(NULL, ":/");
1028 if (tok == NULL) {
1029 /*
1030 * Nothing special, simply use tap(4) as backend
1031 */
1032 return 0;
1033 }
1034
1035 if (inet_pton(AF_INET, tok, &tap_addr) > 0) {
1036 /*
1037 * tap(4)'s address is supplied
1038 */
1039 ifbridge = NULL;
1040
1041 /*
1042 * If there is next token, then it may be pseudo
1043 * netif's address or netmask len for tap(4)
1044 */
1045 next_netif_addr = 0;
1046 } else {
1047 /*
1048 * Not tap(4)'s address, assume it as a bridge(4)
1049 * iface name
1050 */
1051 tap_addr = 0;
1052 ifbridge = tok;
1053
1054 /*
1055 * If there is next token, then it must be pseudo
1056 * netif's address
1057 */
1058 next_netif_addr = 1;
1059 }
1060
1061 netmask = netif_addr = 0;
1062
1063 tok = strtok(NULL, ":/");
1064 if (tok == NULL)
1065 goto back;
1066
1067 if (inet_pton(AF_INET, tok, &netif_addr) <= 0) {
1068 if (next_netif_addr) {
1069 warnx("Invalid pseudo netif address: %s", tok);
1070 return -1;
1071 }
1072 netif_addr = 0;
1073
1074 /*
1075 * Current token is not address, then it must be netmask len
1076 */
1077 masklen_str = tok;
1078 } else {
1079 /*
1080 * Current token is pseudo netif address, if there is next token
1081 * it must be netmask len
1082 */
1083 masklen_str = strtok(NULL, "/");
1084 }
1085
1086 /* Calculate netmask */
1087 if (masklen_str != NULL) {
1088 u_long masklen;
1089
1090 masklen = strtoul(masklen_str, NULL, 10);
1091 if (masklen < 32 && masklen > 0) {
1092 netmask = htonl(~((1LL << (32 - masklen)) - 1)
1093 & 0xffffffff);
1094 } else {
1095 warnx("Invalid netmask len: %lu", masklen);
1096 return -1;
1097 }
1098 }
1099
1100 /* Make sure there is no more token left */
1101 if (strtok(NULL, ":/") != NULL) {
1102 warnx("Invalid argument to '-I'");
1103 return -1;
1104 }
1105
1106back:
1107 if (tap_unit < 0) {
1108 /* Do nothing */
1109 } else if (ifbridge == NULL) {
1110 /* Set tap(4) address/netmask */
1111 if (netif_set_tapaddr(tap_unit, tap_addr, netmask, s) < 0)
1112 return -1;
1113 } else {
1114 /* Tie tap(4) to bridge(4) */
1115 if (netif_add_tap2brg(tap_unit, ifbridge, s) < 0)
1116 return -1;
1117 }
1118
1119 *addr = netif_addr;
1120 *mask = netmask;
1121 return 0;
1122}
1123
1124/*
1125 * NetifInfo[] will be filled for pseudo netif initialization.
1126 * NetifNum will be bumped to reflect the number of valid entries
1127 * in NetifInfo[].
1128 */
1129static
1130void
1131init_netif(char *netifExp[], int netifExpNum)
1132{
1133 int i, s;
1134
1135 if (netifExpNum == 0)
1136 return;
1137
1138 s = socket(AF_INET, SOCK_DGRAM, 0); /* for ioctl(SIOC) */
1139 if (s < 0)
1140 return;
1141
1142 for (i = 0; i < netifExpNum; ++i) {
1143 struct vknetif_info *info;
1144 in_addr_t netif_addr, netif_mask;
1145 int tap_fd, tap_unit;
1146 char *netif;
1147
1148 netif = strtok(netifExp[i], ":");
1149 if (netif == NULL) {
1150 warnx("Invalid argument to '-I'");
1151 continue;
1152 }
1153
1154 /*
1155 * Open tap(4) device file and bring up the
1156 * corresponding interface
1157 */
1158 tap_fd = netif_open_tap(netif, &tap_unit, s);
1159 if (tap_fd < 0)
1160 continue;
1161
1162 /*
1163 * Initialize tap(4) and get address/netmask
1164 * for pseudo netif
1165 *
1166 * NB: Rest part of netifExp[i] is passed
1167 * to netif_init_tap() implicitly.
1168 */
1169 if (netif_init_tap(tap_unit, &netif_addr, &netif_mask, s) < 0) {
1170 /*
1171 * NB: Closing tap(4) device file will bring
1172 * down the corresponding interface
1173 */
1174 close(tap_fd);
1175 continue;
1176 }
1177
1178 info = &NetifInfo[NetifNum];
1179 info->tap_fd = tap_fd;
1180 info->tap_unit = tap_unit;
1181 info->netif_addr = netif_addr;
1182 info->netif_mask = netif_mask;
1183
1184 NetifNum++;
1185 if (NetifNum >= VKNETIF_MAX) /* XXX will this happen? */
1186 break;
1187 }
1188 close(s);
1189}
1190
1191/*
1192 * Create the pid file and leave it open and locked while the vkernel is
1193 * running. This allows a script to use /usr/bin/lockf to probe whether
1194 * a vkernel is still running (so as not to accidently kill an unrelated
1195 * process from a stale pid file).
1196 */
1197static
1198void
1199writepid(void)
1200{
1201 char buf[32];
1202 int fd;
1203
1204 if (pid_file != NULL) {
1205 snprintf(buf, sizeof(buf), "%ld\n", (long)getpid());
1206 fd = open(pid_file, O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0666);
1207 if (fd < 0) {
1208 if (errno == EWOULDBLOCK) {
1209 perror("Failed to lock pidfile, "
1210 "vkernel already running");
1211 } else {
1212 perror("Failed to create pidfile");
1213 }
1214 exit(EX_SOFTWARE);
1215 }
1216 ftruncate(fd, 0);
1217 write(fd, buf, strlen(buf));
1218 /* leave the file open to maintain the lock */
1219 }
1220}
1221
1222static
1223void
1224cleanpid( void )
1225{
1226 if (pid_file != NULL) {
1227 if (unlink(pid_file) < 0)
1228 perror("Warning: couldn't remove pidfile");
1229 }
1230}
1231
1232static
1233void
1234usage_err(const char *ctl, ...)
1235{
1236 va_list va;
1237
1238 va_start(va, ctl);
1239 vfprintf(stderr, ctl, va);
1240 va_end(va);
1241 fprintf(stderr, "\n");
1242 exit(EX_USAGE);
1243}
1244
1245static
1246void
1247usage_help(_Bool help)
1248{
1249 fprintf(stderr, "Usage: %s [-hsUv] [-c file] [-e name=value:name=value:...]\n"
1250 "\t[-i file] [-I interface[:address1[:address2][/netmask]]] [-l cpulock]\n"
1251 "\t[-m size] [-n numcpus] [-p file] [-r file]\n", save_av[0]);
1252
1253 if (help)
1254 fprintf(stderr, "\nArguments:\n"
1255 "\t-c\tSpecify a readonly CD-ROM image file to be used by the kernel.\n"
1256 "\t-e\tSpecify an environment to be used by the kernel.\n"
1257 "\t-h\tThis list of options.\n"
1258 "\t-i\tSpecify a memory image file to be used by the virtual kernel.\n"
1259 "\t-I\tCreate a virtual network device.\n"
1260 "\t-l\tSpecify which, if any, real CPUs to lock virtual CPUs to.\n"
1261 "\t-m\tSpecify the amount of memory to be used by the kernel in bytes.\n"
1262 "\t-n\tSpecify the number of CPUs you wish to emulate.\n"
1263 "\t-p\tSpecify a file in which to store the process ID.\n"
1264 "\t-r\tSpecify a R/W disk image file to be used by the kernel.\n"
1265 "\t-s\tBoot into single-user mode.\n"
1266 "\t-U\tEnable writing to kernel memory and module loading.\n"
1267 "\t-v\tTurn on verbose booting.\n");
1268
1269 exit(EX_USAGE);
1270}
1271
1272void
1273cpu_reset(void)
1274{
1275 kprintf("cpu reset, rebooting vkernel\n");
1276 closefrom(3);
1277 cleanpid();
1278 execv(save_av[0], save_av);
1279}
1280
1281void
1282cpu_halt(void)
1283{
1284 kprintf("cpu halt, exiting vkernel\n");
1285 cleanpid();
1286 exit(EX_OK);
1287}
1288
1289void
1290setrealcpu(void)
1291{
1292 switch(lwp_cpu_lock) {
1293 case LCL_PER_CPU:
1294 if (bootverbose)
1295 kprintf("Locking CPU%d to real cpu %d\n",
1296 mycpuid, next_cpu);
1297 usched_set(getpid(), USCHED_SET_CPU, &next_cpu, sizeof(next_cpu));
1298 next_cpu++;
1299 if (next_cpu >= real_ncpus)
1300 next_cpu = 0;
1301 break;
1302 case LCL_SINGLE_CPU:
1303 if (bootverbose)
1304 kprintf("Locking CPU%d to real cpu %d\n",
1305 mycpuid, next_cpu);
1306 usched_set(getpid(), USCHED_SET_CPU, &next_cpu, sizeof(next_cpu));
1307 break;
1308 default:
1309 /* do not map virtual cpus to real cpus */
1310 break;
1311 }
1312}
1313
1314/*
1315 * Allocate and free memory for module loading. The loaded module
1316 * has to be placed somewhere near the current kernel binary load
1317 * point or the relocations will not work.
1318 *
1319 * I'm not sure why this isn't working.
1320 */
1321int
1322vkernel_module_memory_alloc(vm_offset_t *basep, size_t bytes)
1323{
1324 kprintf("module loading for vkernel64's not currently supported\n");
1325 *basep = 0;
1326 return ENOMEM;
1327#if 0
1328#if 1
1329 size_t xtra;
1330 xtra = (PAGE_SIZE - (vm_offset_t)sbrk(0)) & PAGE_MASK;
1331 *basep = (vm_offset_t)sbrk(xtra + bytes) + xtra;
1332 bzero((void *)*basep, bytes);
1333#else
1334 *basep = (vm_offset_t)mmap((void *)0x000000000, bytes,
1335 PROT_READ|PROT_WRITE|PROT_EXEC,
1336 MAP_ANON|MAP_SHARED, -1, 0);
1337 if ((void *)*basep == MAP_FAILED)
1338 return ENOMEM;
1339#endif
1340 kprintf("basep %p %p %zd\n",
1341 (void *)vkernel_module_memory_alloc, (void *)*basep, bytes);
1342 return 0;
1343#endif
1344}
1345
1346void
1347vkernel_module_memory_free(vm_offset_t base, size_t bytes)
1348{
1349#if 0
1350#if 0
1351 munmap((void *)base, bytes);
1352#endif
1353#endif
1354}