From: Matthew Dillon Date: Sun, 1 Jul 2007 02:51:45 +0000 (+0000) Subject: Bring in all of Joe Talbott's SMP virtual kernel work to date, which makes X-Git-Tag: v2.0.1~2712 X-Git-Url: https://gitweb.dragonflybsd.org/dragonfly.git/commitdiff_plain/24eb47e054b150944d54923f1822282b2da8a3bb Bring in all of Joe Talbott's SMP virtual kernel work to date, which makes virtual kernel builds with SMP almost get through a full boot. This work includes: * Creation of 'cpu' threads via libthread_xu * Globaldata initialization * AP synchronization * Bootstrapping to the idle thread * SMP pmap (mmu) functions * IPI handling My part of this commit: * Bring all the signal interrupts under DragonFly's machine independant interrupt handler API. This will properly deal with the MP lock and critical section handling. * Some additional pmap bits to handle SMP invalidation issues. Submitted-by: Joe Talbott Additional-bits-by: Matt Dillon --- diff --git a/sys/platform/vkernel/conf/Makefile b/sys/platform/vkernel/conf/Makefile index cdf0330e8a..1505dc1a7d 100644 --- a/sys/platform/vkernel/conf/Makefile +++ b/sys/platform/vkernel/conf/Makefile @@ -1,4 +1,4 @@ -# $DragonFly: src/sys/platform/vkernel/conf/Makefile,v 1.6 2007/01/19 07:23:42 dillon Exp $ +# $DragonFly: src/sys/platform/vkernel/conf/Makefile,v 1.7 2007/07/01 02:51:41 dillon Exp $ # # Which version of config(8) is required. %VERSREQ= 400026 @@ -15,7 +15,7 @@ S= ../.. # Override the normal kernel link and link as a normal user program # -SYSTEM_LD= @${CC} -g -export-dynamic -o ${.TARGET} ${SYSTEM_OBJS} vers.o +SYSTEM_LD= @${CC} -g -export-dynamic -o ${.TARGET} ${SYSTEM_OBJS} vers.o -lthread_xu %BEFORE_DEPEND diff --git a/sys/platform/vkernel/conf/files b/sys/platform/vkernel/conf/files index 906ff7ac7a..7f7d97f21a 100644 --- a/sys/platform/vkernel/conf/files +++ b/sys/platform/vkernel/conf/files @@ -1,7 +1,7 @@ # This file tells config what files go into building a kernel, # files marked standard are always included. # -# $DragonFly: src/sys/platform/vkernel/conf/files,v 1.18 2007/06/18 18:57:12 josepht Exp $ +# $DragonFly: src/sys/platform/vkernel/conf/files,v 1.19 2007/07/01 02:51:41 dillon Exp $ # bf_enc.o optional ipsec ipsec_esp \ dependency "$S/crypto/blowfish/arch/i386/bf_enc.S $S/crypto/blowfish/arch/i386/bf_enc_586.S $S/crypto/blowfish/arch/i386/bf_enc_686.S" \ @@ -31,7 +31,8 @@ vfs/smbfs/smbfs_vnops.c optional smbfs cpu/i386/misc/atomic.c standard \ compile-with "${CC} -c ${CFLAGS} ${DEFINED_PROF:S/^$/-fomit-frame-pointer/} ${.IMPSRC}" platform/vkernel/i386/autoconf.c standard -platform/vkernel/i386/mp.c optional smp +platform/vkernel/i386/mp.c optional smp \ + compile-with "${CC} -c -pthread ${CFLAGS} -I/usr/include ${.IMPSRC}" platform/vkernel/i386/mplock.s optional smp # # DDB XXX @@ -69,6 +70,7 @@ platform/vkernel/i386/fork_tramp.s standard platform/vkernel/platform/init.c standard platform/vkernel/platform/globaldata.c standard platform/vkernel/platform/kqueue.c standard +platform/vkernel/platform/shutdown.c standard platform/vkernel/platform/machintr.c standard platform/vkernel/platform/copyio.c standard platform/vkernel/platform/pmap.c standard diff --git a/sys/platform/vkernel/i386/cpu_regs.c b/sys/platform/vkernel/i386/cpu_regs.c index b4cc0cb61e..f6da94d967 100644 --- a/sys/platform/vkernel/i386/cpu_regs.c +++ b/sys/platform/vkernel/i386/cpu_regs.c @@ -37,7 +37,7 @@ * * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $ - * $DragonFly: src/sys/platform/vkernel/i386/cpu_regs.c,v 1.17 2007/07/01 01:11:36 dillon Exp $ + * $DragonFly: src/sys/platform/vkernel/i386/cpu_regs.c,v 1.18 2007/07/01 02:51:43 dillon Exp $ */ #include "use_ether.h" @@ -699,7 +699,6 @@ cpu_idle(void) if (cpu_idle_hlt && !lwkt_runnable() && (td->td_flags & TDF_IDLE_NOHLT) == 0) { splz(); - signalmailbox(NULL); if (!lwkt_runnable()) { sigpause(0); } @@ -712,7 +711,6 @@ cpu_idle(void) } else { td->td_flags &= ~TDF_IDLE_NOHLT; splz(); - signalmailbox(NULL); #ifdef SMP /*__asm __volatile("sti; pause");*/ __asm __volatile("pause"); diff --git a/sys/platform/vkernel/i386/exception.c b/sys/platform/vkernel/i386/exception.c index ddc9931fdf..ac8deb761f 100644 --- a/sys/platform/vkernel/i386/exception.c +++ b/sys/platform/vkernel/i386/exception.c @@ -32,12 +32,13 @@ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $DragonFly: src/sys/platform/vkernel/i386/exception.c,v 1.6 2007/06/17 16:46:15 dillon Exp $ + * $DragonFly: src/sys/platform/vkernel/i386/exception.c,v 1.7 2007/07/01 02:51:43 dillon Exp $ */ #include "opt_ddb.h" #include #include +#include #include #include #include @@ -48,7 +49,11 @@ #include #include #include +#include + +#include #include +#include int _ucodesel = LSEL(LUCODE_SEL, SEL_UPL); int _udatasel = LSEL(LUDATA_SEL, SEL_UPL); @@ -58,13 +63,24 @@ static void exc_segfault(int signo, siginfo_t *info, void *ctx); static void exc_debugger(int signo, siginfo_t *info, void *ctx); #endif -/* signal shutdown thread misc. */ - -static void sigshutdown_daemon( void ); -static struct thread *sigshutdown_thread; -static struct kproc_desc sigshut_kp = { - "sigshutdown", sigshutdown_daemon, &sigshutdown_thread -}; +/* + * IPIs are 'fast' interrupts, so we deal with them directly from our + * signal handler. + */ +static +void +ipi(int nada, siginfo_t *info, void *ctxp) +{ + ++mycpu->gd_intr_nesting_level; + if (curthread->td_pri < TDPRI_CRIT) { + curthread->td_pri += TDPRI_CRIT; + lwkt_process_ipiq(); + curthread->td_pri -= TDPRI_CRIT; + } else { + need_ipiq(); + } + --mycpu->gd_intr_nesting_level; +} void init_exceptions(void) @@ -83,11 +99,12 @@ init_exceptions(void) sigaction(SIGQUIT, &sa, NULL); #endif - bzero(&sa, sizeof(sa)); - sigemptyset(&sa.sa_mask); - sa.sa_flags |= SA_MAILBOX | SA_NODEFER; - sa.sa_mailbox = &mdcpu->gd_shutdown; - sigaction(SIGTERM, &sa, NULL); + sa.sa_sigaction = ipi; + if (sigaction(SIGUSR1, &sa, NULL) != 0) + { + warn("ipi handler setup failed"); + panic("IPI setup failed"); + } } /* @@ -112,31 +129,6 @@ exc_segfault(int signo, siginfo_t *info, void *ctxp) splz(); } -/* - * This function runs in a thread dedicated to external shutdown signals. - * - * Currently, when a vkernel recieves a SIGTERM, either the VKERNEL init(8) - * is signaled with SIGUSR2, or the VKERNEL simply shuts down, preventing - * fsck's when the VKERNEL is restarted. - */ -static void -sigshutdown_daemon( void ) -{ - while (mdcpu->gd_shutdown == 0) { - tsleep(&mdcpu->gd_shutdown, 0, "sswait", 0); - } - mdcpu->gd_shutdown = 0; - kprintf("Caught SIGTERM from host system. Shutting down...\n"); - if (initproc != NULL) { - ksignal(initproc, SIGUSR2); - } - else { - reboot(RB_POWEROFF); - } -} -SYSINIT(sigshutdown, SI_BOOT2_PROC0, SI_ORDER_ANY, - kproc_start, &sigshut_kp); - #ifdef DDB static void diff --git a/sys/platform/vkernel/i386/mp.c b/sys/platform/vkernel/i386/mp.c index eadab732a5..8e3acec142 100644 --- a/sys/platform/vkernel/i386/mp.c +++ b/sys/platform/vkernel/i386/mp.c @@ -31,67 +31,161 @@ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $DragonFly: src/sys/platform/vkernel/i386/mp.c,v 1.1 2007/06/18 18:57:12 josepht Exp $ + * $DragonFly: src/sys/platform/vkernel/i386/mp.c,v 1.2 2007/07/01 02:51:43 dillon Exp $ */ + #include +#include +#include +#include #include +#include +#include +#include +#include + #include +#include +#include +#include #include +#include +#include +#include +#include +#include + +extern pt_entry_t *KPTphys; + +volatile u_int stopped_cpus; +cpumask_t smp_active_mask = 1; /* which cpus are ready for IPIs etc? */ +static int boot_address; +static cpumask_t smp_startup_mask = 1; /* which cpus have been started */ +int mp_naps; /* # of Applications processors */ +static int mp_finish; + +/* function prototypes XXX these should go elsewhere */ +void bootstrap_idle(void); +void single_cpu_ipi(int, int, int); +void selected_cpu_ipi(u_int, int, int); #if 0 -volatile lapic_t lapic; /* needed for kern/kern_shutdown.c */ -#endif -volatile u_int stopped_cpus; -cpumask_t smp_active_mask = 1; /* which cpus are ready for IPIs etc? */ -#if 0 -u_int mp_lock; +void ipi_handler(int); #endif -void -mp_start(void) -{ - panic("XXX mp_start()"); -} +pt_entry_t *SMPpt; -void -mp_announce(void) -{ - panic("XXX mp_announce()"); -} +/* AP uses this during bootstrap. Do not staticize. */ +char *bootSTK; +static int bootAP; -#if 0 + +/* XXX these need to go into the appropriate header file */ +static int start_all_aps(u_int); +void init_secondary(void); +void *start_ap(void *); + +/* + * Get SMP fully working before we start initializing devices. + */ +static void -get_mplock(void) +ap_finish(void) { - panic("XXX get_mplock()"); + int i; + cpumask_t ncpus_mask = 0; + + for (i = 1; i <= ncpus; i++) + ncpus_mask |= (1 << i); + + mp_finish = 1; + if (bootverbose) + kprintf("Finish MP startup\n"); + + /* build our map of 'other' CPUs */ + mycpu->gd_other_cpus = smp_startup_mask & ~(1 << mycpu->gd_cpuid); + + /* + * Let the other cpu's finish initializing and build their map + * of 'other' CPUs. + */ + rel_mplock(); + while (smp_active_mask != smp_startup_mask) + cpu_lfence(); + + while (try_mplock() == 0) + ; + if (bootverbose) + kprintf("Active CPU Mask: %08x\n", smp_active_mask); } -int -try_mplock(void) +SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL) + + +void * +start_ap(void *arg __unused) { - panic("XXX try_mplock()"); + init_secondary(); + + bootstrap_idle(); + + return(NULL); /* NOTREACHED */ } +/* storage for AP thread IDs */ +pthread_t ap_tids[MAXCPU]; + void -rel_mplock(void) +mp_start(void) { - panic("XXX rel_mplock()"); -} + int shift; + + /* XXX testing 2 cpus */ + ncpus = 2; + + mp_naps = ncpus - 1; + + /* ncpus2 -- ncpus rounded down to the nearest power of 2 */ + for (shift = 0; (1 << shift) <= ncpus; ++shift) + ; + --shift; + ncpus2_shift = shift; + ncpus2 = 1 << shift; + ncpus2_mask = ncpus2 - 1; + + /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */ + if ((1 << shift) < ncpus) + ++shift; + ncpus_fit = 1 << shift; + ncpus_fit_mask = ncpus_fit - 1; + + /* + * cpu0 initialization + */ + mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, + sizeof(lwkt_ipiq) * ncpus); + bzero(mycpu->gd_ipiq, sizeof(lwkt_ipiq) * ncpus); + + /* + * cpu 1-(n-1) + */ + start_all_aps(boot_address); -int -cpu_try_mplock(void) -{ - panic("XXX cpu_try_mplock()"); } + void -cpu_get_initial_mplock(void) +mp_announce(void) { - panic("XXX cpu_get_initial_mplock()"); -} -#endif + int x; + kprintf("DragonFly/MP: Multiprocessor\n"); + kprintf(" cpu0 (BSP)\n"); + + for (x = 1; x <= mp_naps; ++x) + kprintf(" cpu%d (AP)\n", x); +} void forward_fastint_remote(void *arg) @@ -102,33 +196,260 @@ forward_fastint_remote(void *arg) void cpu_send_ipiq(int dcpu) { + if ((1 << dcpu) & smp_active_mask) + if (pthread_kill(ap_tids[dcpu], SIGUSR1) != 0) + panic("pthread_kill failed in cpu_send_ipiq"); +#if 0 panic("XXX cpu_send_ipiq()"); +#endif } void smp_invltlb(void) { #ifdef SMP - panic("XXX smp_invltlb()"); #endif } +void +single_cpu_ipi(int cpu, int vector, int delivery_mode) +{ + kprintf("XXX single_cpu_ipi\n"); +} + +void +selected_cpu_ipi(u_int target, int vector, int delivery_mode) +{ + crit_enter(); + while (target) { + int n = bsfl(target); + target &= ~(1 << n); + single_cpu_ipi(n, vector, delivery_mode); + } + crit_exit(); +} + int stop_cpus(u_int map) { + map &= smp_active_mask; + + crit_enter(); + while (map) { + int n = bsfl(map); + map &= ~(1 << n); + if (pthread_kill(ap_tids[n], SIGSTOP) != 0) + panic("stop_cpus: pthread_kill failed"); + } + crit_exit(); +#if 0 panic("XXX stop_cpus()"); +#endif + + return(1); } int restart_cpus(u_int map) { + map &= smp_active_mask; + + crit_enter(); + while (map) { + int n = bsfl(map); + map &= ~(1 << n); + if (pthread_kill(ap_tids[n], SIGCONT) != 0) + panic("restart_cpus: pthread_kill failed"); + } + crit_exit(); +#if 0 panic("XXX restart_cpus()"); +#endif + + return(1); } void ap_init(void) { - panic("XXX ap_init()"); + /* + * Adjust smp_startup_mask to signal the BSP that we have started + * up successfully. Note that we do not yet hold the BGL. The BSP + * is waiting for our signal. + * + * We can't set our bit in smp_active_mask yet because we are holding + * interrupts physically disabled and remote cpus could deadlock + * trying to send us an IPI. + */ + smp_startup_mask |= 1 << mycpu->gd_cpuid; + cpu_mfence(); + + /* + * Interlock for finalization. Wait until mp_finish is non-zero, + * then get the MP lock. + * + * Note: We are in a critical section. + * + * Note: We have to synchronize td_mpcount to our desired MP state + * before calling cpu_try_mplock(). + * + * Note: we are the idle thread, we can only spin. + * + * Note: The load fence is memory volatile and prevents the compiler + * from improperly caching mp_finish, and the cpu from improperly + * caching it. + */ + + while (mp_finish == 0) { + cpu_lfence(); + } + ++curthread->td_mpcount; + while (cpu_try_mplock() == 0) + ; + + /* BSP may have changed PTD while we're waiting for the lock */ + cpu_invltlb(); + + /* Build our map of 'other' CPUs. */ + mycpu->gd_other_cpus = smp_startup_mask & ~(1 << mycpu->gd_cpuid); + + kprintf("SMP: AP CPU #%d Launched!\n", mycpu->gd_cpuid); + + + /* Set memory range attributes for this CPU to match the BSP */ + mem_range_AP_init(); + /* + * Once we go active we must process any IPIQ messages that may + * have been queued, because no actual IPI will occur until we + * set our bit in the smp_active_mask. If we don't the IPI + * message interlock could be left set which would also prevent + * further IPIs. + * + * The idle loop doesn't expect the BGL to be held and while + * lwkt_switch() normally cleans things up this is a special case + * because we returning almost directly into the idle loop. + * + * The idle thread is never placed on the runq, make sure + * nothing we've done put it there. + */ + KKASSERT(curthread->td_mpcount == 1); + smp_active_mask |= 1 << mycpu->gd_cpuid; + + mdcpu->gd_fpending = 0; + mdcpu->gd_ipending = 0; + initclocks_pcpu(); /* clock interrupts (via IPIs) */ + lwkt_process_ipiq(); + + /* + * Releasing the mp lock lets the BSP finish up the SMP init + */ + rel_mplock(); + KKASSERT((curthread->td_flags & TDF_RUNQ) == 0); +} + +void +init_secondary(void) +{ + int myid = bootAP; + struct mdglobaldata *md; + struct privatespace *ps; + + ps = &CPU_prvspace[myid]; + + KKASSERT(ps->mdglobaldata.mi.gd_prvspace == ps); + + /* + * Setup the %gs for cpu #n. The mycpu macro works after this + * point. + */ + tls_set_fs(&CPU_prvspace[myid], sizeof(struct privatespace)); + + md = mdcpu; /* loaded through %fs:0 (mdglobaldata.mi.gd_prvspace)*/ + + md->gd_common_tss.tss_esp0 = 0; /* not used until after switch */ + md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); + md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16; + + /* + * Set to a known state: + * Set by mpboot.s: CR0_PG, CR0_PE + * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM + */ } +static int +start_all_aps(u_int boot_addr) +{ + int x, i; + struct mdglobaldata *gd; + struct privatespace *ps; + vm_page_t m; + vm_offset_t va; +#if 0 + struct lwp_params params; +#endif + + /* + * needed for ipis to initial thread + * FIXME: rename ap_tids? + */ + ap_tids[0] = pthread_self(); + + for (x = 1; x <= mp_naps; x++) + { + /* Allocate space for the CPU's private space. */ + va = (vm_offset_t)&CPU_prvspace[x]; + for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) { + va =(vm_offset_t)&CPU_prvspace[x].mdglobaldata + i; + m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM); + pmap_kenter_quick(va, m->phys_addr); + } + for (i = 0; i < sizeof(CPU_prvspace[x].idlestack); i += PAGE_SIZE) { + va =(vm_offset_t)&CPU_prvspace[x].idlestack + i; + m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM); + pmap_kenter_quick(va, m->phys_addr); + } + + gd = &CPU_prvspace[x].mdglobaldata; /* official location */ + bzero(gd, sizeof(*gd)); + gd->mi.gd_prvspace = ps = &CPU_prvspace[x]; + + /* prime data page for it to use */ + mi_gdinit(&gd->mi, x); + cpu_gdinit(gd, x); + +#if 0 + gd->gd_CMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE1); + gd->gd_CMAP2 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE2); + gd->gd_CMAP3 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE3); + gd->gd_PMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].PPAGE1); + gd->gd_CADDR1 = ps->CPAGE1; + gd->gd_CADDR2 = ps->CPAGE2; + gd->gd_CADDR3 = ps->CPAGE3; + gd->gd_PADDR1 = (vpte_t *)ps->PPAGE1; +#endif + + gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * (mp_naps + 1)); + bzero(gd->mi.gd_ipiq, sizeof(lwkt_ipiq) * (mp_naps + 1)); + + /* + * Setup the AP boot stack + */ + bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2]; + bootAP = x; + + /* + * Setup the AP's lwp, this is the 'cpu' + */ + pthread_create(&ap_tids[x], NULL, start_ap, NULL); + + while((smp_startup_mask & (1 << x)) == 0) + cpu_lfence(); /* XXX spin until the AP has started */ + + /* XXX hack, sleep for a second to let the APs start up */ + sleep(1); + } + + return(ncpus - 1); +} diff --git a/sys/platform/vkernel/i386/mplock.s b/sys/platform/vkernel/i386/mplock.s index d2ae09c701..63ded8548f 100644 --- a/sys/platform/vkernel/i386/mplock.s +++ b/sys/platform/vkernel/i386/mplock.s @@ -1,6 +1,6 @@ /* * $FreeBSD: src/sys/i386/i386/mplock.s,v 1.29.2.2 2000/05/16 06:58:06 dillon Exp $ - * $DragonFly: src/sys/platform/vkernel/i386/mplock.s,v 1.1 2007/06/18 18:57:12 josepht Exp $ + * $DragonFly: src/sys/platform/vkernel/i386/mplock.s,v 1.2 2007/07/01 02:51:43 dillon Exp $ * * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. * @@ -151,9 +151,6 @@ NON_GPROF_ENTRY(get_mplock) movl $-1,%eax lock cmpxchgl %ecx,mp_lock jnz 2f -#ifdef PARANOID_INVLTLB - movl %cr3,%eax; movl %eax,%cr3 /* YYY check and remove */ -#endif NON_GPROF_RET /* success */ /* diff --git a/sys/platform/vkernel/i386/swtch.s b/sys/platform/vkernel/i386/swtch.s index 8b53677cfc..40705a3030 100644 --- a/sys/platform/vkernel/i386/swtch.s +++ b/sys/platform/vkernel/i386/swtch.s @@ -66,7 +66,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $ - * $DragonFly: src/sys/platform/vkernel/i386/swtch.s,v 1.7 2007/06/29 21:54:11 dillon Exp $ + * $DragonFly: src/sys/platform/vkernel/i386/swtch.s,v 1.8 2007/07/01 02:51:43 dillon Exp $ */ #include "use_npx.h" @@ -610,3 +610,13 @@ ENTRY(cpu_lwkt_restore) popl %ebp ret +/* + * bootstrap_idle() + * + * Make AP become the idle loop. + */ +ENTRY(bootstrap_idle) + movl PCPU(curthread),%eax + movl %eax,%ebx + movl TD_SP(%eax),%esp + ret diff --git a/sys/platform/vkernel/i386/trap.c b/sys/platform/vkernel/i386/trap.c index 018114dd8d..d36b2719d2 100644 --- a/sys/platform/vkernel/i386/trap.c +++ b/sys/platform/vkernel/i386/trap.c @@ -36,7 +36,7 @@ * * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $ - * $DragonFly: src/sys/platform/vkernel/i386/trap.c,v 1.26 2007/07/01 01:11:36 dillon Exp $ + * $DragonFly: src/sys/platform/vkernel/i386/trap.c,v 1.27 2007/07/01 02:51:43 dillon Exp $ */ /* @@ -1430,17 +1430,6 @@ go_user(struct intrframe *frame) tf->tf_xflags |= PGEX_FPFAULT; } - /* - * We must poll the mailbox prior to making the system call - * to properly interlock new mailbox signals against the - * system call. - * - * Passing a NULL frame causes the interrupt code to assume - * the supervisor. - */ - if (mdcpu->gd_mailbox) - signalmailbox(NULL); - /* * Run emulated user process context. This call interlocks * with new mailbox signals. @@ -1457,12 +1446,9 @@ go_user(struct intrframe *frame) tf->tf_xflags, frame->if_xflags); #endif if (r < 0) { - if (errno == EINTR) - signalmailbox(frame); - else + if (errno != EINTR) panic("vmspace_ctl failed"); } else { - signalmailbox(frame); if (tf->tf_trapno) { user_trap(tf); } else if (mycpu->gd_reqflags & RQF_AST_MASK) { diff --git a/sys/platform/vkernel/include/globaldata.h b/sys/platform/vkernel/include/globaldata.h index 537efd8dcb..2a05e38837 100644 --- a/sys/platform/vkernel/include/globaldata.h +++ b/sys/platform/vkernel/include/globaldata.h @@ -28,7 +28,7 @@ * should not include this file. * * $FreeBSD: src/sys/i386/include/globaldata.h,v 1.11.2.1 2000/05/16 06:58:10 dillon Exp $ - * $DragonFly: src/sys/platform/vkernel/include/globaldata.h,v 1.6 2007/06/17 16:46:16 dillon Exp $ + * $DragonFly: src/sys/platform/vkernel/include/globaldata.h,v 1.7 2007/07/01 02:51:44 dillon Exp $ */ #ifndef _MACHINE_GLOBALDATA_H_ @@ -83,8 +83,8 @@ struct mdglobaldata { int gd_spending; /* software interrupt pending */ int gd_sdelayed; /* delayed software ints */ int gd_currentldt; - int gd_mailbox; /* I/O signal delivery mailbox */ - int gd_shutdown; /* Shutdown signal delivery mailbox */ + int unused003; + int unused002; u_int unused001; u_int gd_other_cpus; u_int gd_ss_eflags; diff --git a/sys/platform/vkernel/include/md_var.h b/sys/platform/vkernel/include/md_var.h index a59eaaac57..f0e4323224 100644 --- a/sys/platform/vkernel/include/md_var.h +++ b/sys/platform/vkernel/include/md_var.h @@ -31,7 +31,7 @@ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $DragonFly: src/sys/platform/vkernel/include/md_var.h,v 1.19 2007/05/25 02:21:18 dillon Exp $ + * $DragonFly: src/sys/platform/vkernel/include/md_var.h,v 1.20 2007/07/01 02:51:44 dillon Exp $ */ #ifndef _MACHINE_MD_VAR_H_ @@ -99,9 +99,10 @@ void kern_trap(struct trapframe *); void user_trap(struct trapframe *); void syscall2 (struct trapframe *); void vcons_set_mode(int); -void signalmailbox(struct intrframe *); int npxdna(struct trapframe *); +void signalintr(int intr); + struct kqueue_info; struct kqueue_info *kqueue_add(int, void (*)(void *, struct intrframe *), void *); void kqueue_del(struct kqueue_info *); diff --git a/sys/platform/vkernel/include/pmap.h b/sys/platform/vkernel/include/pmap.h index 6111b3229a..42240c84b9 100644 --- a/sys/platform/vkernel/include/pmap.h +++ b/sys/platform/vkernel/include/pmap.h @@ -43,7 +43,7 @@ * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 * $FreeBSD: src/sys/i386/include/pmap.h,v 1.65.2.3 2001/10/03 07:15:37 peter Exp $ - * $DragonFly: src/sys/platform/vkernel/include/pmap.h,v 1.3 2007/01/15 09:28:39 dillon Exp $ + * $DragonFly: src/sys/platform/vkernel/include/pmap.h,v 1.4 2007/07/01 02:51:44 dillon Exp $ */ #ifndef _MACHINE_PMAP_H_ @@ -123,6 +123,7 @@ struct pmap { pd_entry_t *pm_pdir; /* KVA of page directory */ vpte_t pm_pdirpte; /* pte mapping phys page */ struct vm_object *pm_pteobj; /* Container for pte's */ + cpumask_t pm_cpucachemask;/* Invalidate cpu mappings */ TAILQ_ENTRY(pmap) pm_pmnode; /* list of pmaps */ TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ int pm_count; /* reference count */ diff --git a/sys/platform/vkernel/platform/globaldata.c b/sys/platform/vkernel/platform/globaldata.c index d78333c064..1015ece9d8 100644 --- a/sys/platform/vkernel/platform/globaldata.c +++ b/sys/platform/vkernel/platform/globaldata.c @@ -31,7 +31,7 @@ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $DragonFly: src/sys/platform/vkernel/platform/globaldata.c,v 1.3 2007/01/06 19:40:55 dillon Exp $ + * $DragonFly: src/sys/platform/vkernel/platform/globaldata.c,v 1.4 2007/07/01 02:51:45 dillon Exp $ */ #include @@ -95,8 +95,6 @@ cpu_gdinit(struct mdglobaldata *gd, int cpu) * Whole page table mappings and where we have to store the related * pde's. */ - KKASSERT(((vm_offset_t)gd->gd_PT1map & SEG_MASK) == 0); - KKASSERT(((vm_offset_t)gd->gd_PT2map & SEG_MASK) == 0); gd->gd_PT1map = gd->mi.gd_prvspace->PT1MAP; gd->gd_PT1pdir = NULL; @@ -107,6 +105,9 @@ cpu_gdinit(struct mdglobaldata *gd, int cpu) gd->gd_PT2pdir = NULL; gd->gd_PT2pde = &KernelPTD[((vm_offset_t)gd->gd_PT2map - KvaStart) / SEG_SIZE]; + + KKASSERT(((vm_offset_t)gd->gd_PT1map & SEG_MASK) == 0); + KKASSERT(((vm_offset_t)gd->gd_PT2map & SEG_MASK) == 0); } int diff --git a/sys/platform/vkernel/platform/kqueue.c b/sys/platform/vkernel/platform/kqueue.c index b64d489193..2b52a08ac7 100644 --- a/sys/platform/vkernel/platform/kqueue.c +++ b/sys/platform/vkernel/platform/kqueue.c @@ -31,7 +31,7 @@ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $DragonFly: src/sys/platform/vkernel/platform/kqueue.c,v 1.4 2007/06/17 16:46:17 dillon Exp $ + * $DragonFly: src/sys/platform/vkernel/platform/kqueue.c,v 1.5 2007/07/01 02:51:45 dillon Exp $ */ #include @@ -62,7 +62,11 @@ struct kqueue_info { int fd; }; -int KQueueFd = -1; +static void kqueuesig(int signo); +static void kqueue_intr(void *arg __unused, void *frame __unused); + +static int KQueueFd = -1; +static void *VIntr1; /* * Initialize kqueue based I/O @@ -78,8 +82,9 @@ init_kqueue(void) struct sigaction sa; bzero(&sa, sizeof(sa)); - sa.sa_mailbox = &mdcpu->gd_mailbox; - sa.sa_flags = SA_MAILBOX | SA_NODEFER; + /*sa.sa_mailbox = &mdcpu->gd_mailbox;*/ + sa.sa_flags = SA_NODEFER; + sa.sa_handler = kqueuesig; sigemptyset(&sa.sa_mask); sigaction(SIGIO, &sa, NULL); KQueueFd = kqueue(); @@ -90,49 +95,12 @@ init_kqueue(void) } /* - * A SIGIO mailbox event cause a system call interruption and a timely - * poll. If the mailbox is active we clean out all pending kqueue events. - * It is really that simple. - * - * We specify EV_CLEAR for all events to ensure no requeues before their - * time. A new SIGIO is not generated unless all events are cleared from - * the kqueue. + * Signal handler dispatches interrupt thread. Use interrupt #1 */ -void -signalmailbox(struct intrframe *frame) +static void +kqueuesig(int signo) { - struct mdglobaldata *gd = mdcpu; - struct timespec ts; - struct kevent kevary[8]; - int n; - int i; - - /* - * we only need to wake up our shutdown thread once. - * Keep it non-zero so the shutdown thread can detect it. - */ - - if (mdcpu->gd_shutdown > 0) { - mdcpu->gd_shutdown = -1; - wakeup(&mdcpu->gd_shutdown); - } - - if (gd->gd_mailbox == 0) - return; - gd->gd_mailbox = 0; - ts.tv_sec = 0; - ts.tv_nsec = 0; - crit_enter(); - do { - n = kevent(KQueueFd, NULL, 0, kevary, 8, &ts); - for (i = 0; i < n; ++i) { - struct kevent *kev = &kevary[i]; - struct kqueue_info *info = (void *)kev->udata; - - info->func(info->data, frame); - } - } while (n == 8); - crit_exit(); + signalintr(1); } /* @@ -145,6 +113,9 @@ kqueue_add(int fd, void (*func)(void *, struct intrframe *), void *data) struct kqueue_info *info; struct kevent kev; + if (VIntr1 == NULL) + VIntr1 = register_int(1, kqueue_intr, NULL, "kqueue", NULL, 0); + info = kmalloc(sizeof(*info), M_DEVBUF, M_ZERO|M_INTWAIT); info->func = func; info->data = data; @@ -201,3 +172,31 @@ kqueue_del(struct kqueue_info *info) kfree(info, M_DEVBUF); } +/* + * Safely called via DragonFly's normal interrupt handling mechanism. + * + * Calleld with the MP lock held. Note that this is still an interrupt + * thread context. + */ +static +void +kqueue_intr(void *arg __unused, void *frame __unused) +{ + struct timespec ts; + struct kevent kevary[8]; + int n; + int i; + + ts.tv_sec = 0; + ts.tv_nsec = 0; + do { + n = kevent(KQueueFd, NULL, 0, kevary, 8, &ts); + for (i = 0; i < n; ++i) { + struct kevent *kev = &kevary[i]; + struct kqueue_info *info = (void *)kev->udata; + + info->func(info->data, frame); + } + } while (n == 8); +} + diff --git a/sys/platform/vkernel/platform/machintr.c b/sys/platform/vkernel/platform/machintr.c index d606937958..64eb810a2f 100644 --- a/sys/platform/vkernel/platform/machintr.c +++ b/sys/platform/vkernel/platform/machintr.c @@ -31,7 +31,7 @@ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $DragonFly: src/sys/platform/vkernel/platform/machintr.c,v 1.11 2007/04/30 16:45:59 dillon Exp $ + * $DragonFly: src/sys/platform/vkernel/platform/machintr.c,v 1.12 2007/07/01 02:51:45 dillon Exp $ */ #include @@ -45,6 +45,8 @@ #include #include #include +#include +#include /* * Interrupt Subsystem ABI @@ -115,19 +117,50 @@ void splz(void) { struct mdglobaldata *gd = mdcpu; + thread_t td = gd->mi.gd_curthread; int irq; - atomic_clear_int_nonlocked(&gd->mi.gd_reqflags, RQF_INTPEND); - while ((irq = ffs(gd->gd_spending)) != 0) { - --irq; - atomic_clear_int(&gd->gd_spending, 1 << irq); - irq += FIRST_SOFTINT; - sched_ithd(irq); + while (gd->mi.gd_reqflags & (RQF_IPIQ|RQF_INTPEND)) { + crit_enter_quick(td); + if (gd->mi.gd_reqflags & RQF_IPIQ) { + atomic_clear_int_nonlocked(&gd->mi.gd_reqflags, + RQF_IPIQ); + lwkt_process_ipiq(); + } + if (gd->mi.gd_reqflags & RQF_INTPEND) { + atomic_clear_int_nonlocked(&gd->mi.gd_reqflags, + RQF_INTPEND); + while ((irq = ffs(gd->gd_spending)) != 0) { + --irq; + atomic_clear_int(&gd->gd_spending, 1 << irq); + irq += FIRST_SOFTINT; + sched_ithd(irq); + } + while ((irq = ffs(gd->gd_fpending)) != 0) { + --irq; + atomic_clear_int(&gd->gd_fpending, 1 << irq); + sched_ithd(irq); + } + } + crit_exit_noyield(td); } - while ((irq = ffs(gd->gd_fpending)) != 0) { - --irq; - atomic_clear_int(&gd->gd_fpending, 1 << irq); - sched_ithd(irq); +} + +/* + * Allows an unprotected signal handler or mailbox to signal an interrupt + */ +void +signalintr(int intr) +{ + struct mdglobaldata *gd = mdcpu; + thread_t td = gd->mi.gd_curthread; + + if (td->td_pri >= TDPRI_CRIT) { + atomic_set_int_nonlocked(&gd->gd_fpending, 1 << intr); + atomic_set_int_nonlocked(&gd->mi.gd_reqflags, RQF_INTPEND); + } else { + atomic_clear_int(&gd->gd_fpending, 1 << intr); + sched_ithd(intr); } } diff --git a/sys/platform/vkernel/platform/pmap.c b/sys/platform/vkernel/platform/pmap.c index d760f09bdf..cbb878cfdd 100644 --- a/sys/platform/vkernel/platform/pmap.c +++ b/sys/platform/vkernel/platform/pmap.c @@ -38,7 +38,7 @@ * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.250.2.18 2002/03/06 22:48:53 silby Exp $ - * $DragonFly: src/sys/platform/vkernel/platform/pmap.c,v 1.23 2007/06/29 21:54:12 dillon Exp $ + * $DragonFly: src/sys/platform/vkernel/platform/pmap.c,v 1.24 2007/07/01 02:51:45 dillon Exp $ */ /* * NOTE: PMAP_INVAL_ADD: In pc32 this function is called prior to adjusting @@ -227,6 +227,7 @@ pmap_pinit(struct pmap *pmap) pmap->pm_count = 1; pmap->pm_active = 0; pmap->pm_ptphint = NULL; + pmap->pm_cpucachemask = 0; TAILQ_INIT(&pmap->pm_pvlist); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); } @@ -283,16 +284,11 @@ pmap_release(struct pmap *pmap) #if defined(DIAGNOSTIC) if (object->ref_count != 1) panic("pmap_release: pteobj reference count != 1"); -#endif -#ifdef SMP - panic("Must write code to clear PTxpdir cache across all CPUs"); -#if 0 -#error "Must write code to clear PTxpdir cache across all CPUs" -#endif #endif /* * Once we destroy the page table, the mapping becomes invalid. - * Rather then waste time doing a madvise + * Don't waste time doing a madvise to invalidate the mapping, just + * set cpucachemask to 0. */ if (pmap->pm_pdir == gd->gd_PT1pdir) { gd->gd_PT1pdir = NULL; @@ -330,6 +326,7 @@ pmap_release(struct pmap *pmap) * Leave the KVA reservation for pm_pdir cached for later reuse. */ pmap->pm_pdirpte = 0; + pmap->pm_cpucachemask = 0; } static int @@ -463,8 +460,18 @@ get_ptbase(struct pmap *pmap, vm_offset_t va) KKASSERT(va >= KvaStart && va < KvaEnd); return(KernelPTA + (va >> PAGE_SHIFT)); } else if (pmap->pm_pdir == gd->gd_PT1pdir) { + if ((pmap->pm_cpucachemask & gd->mi.gd_cpumask) == 0) { + *gd->gd_PT1pde = pmap->pm_pdirpte; + madvise(gd->gd_PT1map, SEG_SIZE, MADV_INVAL); + atomic_set_int(&pmap->pm_cpucachemask, gd->mi.gd_cpumask); + } return(gd->gd_PT1map + (va >> PAGE_SHIFT)); } else if (pmap->pm_pdir == gd->gd_PT2pdir) { + if ((pmap->pm_cpucachemask & gd->mi.gd_cpumask) == 0) { + *gd->gd_PT2pde = pmap->pm_pdirpte; + madvise(gd->gd_PT2map, SEG_SIZE, MADV_INVAL); + atomic_set_int(&pmap->pm_cpucachemask, gd->mi.gd_cpumask); + } return(gd->gd_PT2map + (va >> PAGE_SHIFT)); } @@ -479,11 +486,13 @@ get_ptbase(struct pmap *pmap, vm_offset_t va) gd->gd_PT1pdir = pmap->pm_pdir; *gd->gd_PT1pde = pmap->pm_pdirpte; madvise(gd->gd_PT1map, SEG_SIZE, MADV_INVAL); + atomic_set_int(&pmap->pm_cpucachemask, gd->mi.gd_cpumask); return(gd->gd_PT1map + (va >> PAGE_SHIFT)); } else { gd->gd_PT2pdir = pmap->pm_pdir; *gd->gd_PT2pde = pmap->pm_pdirpte; madvise(gd->gd_PT2map, SEG_SIZE, MADV_INVAL); + atomic_set_int(&pmap->pm_cpucachemask, gd->mi.gd_cpumask); return(gd->gd_PT2map + (va >> PAGE_SHIFT)); } } @@ -536,12 +545,6 @@ inval_ptbase_pagedir(pmap_t pmap, vm_pindex_t pindex) struct mdglobaldata *gd = mdcpu; vm_offset_t va; -#ifdef SMP - panic("Must inval self-mappings in all gd's"); -#if 0 -#error "Must inval self-mappings in all gd's" -#endif -#endif if (pmap == &kernel_pmap) { va = (vm_offset_t)KernelPTA + (pindex << PAGE_SHIFT); madvise((void *)va, PAGE_SIZE, MADV_INVAL); @@ -553,14 +556,29 @@ inval_ptbase_pagedir(pmap_t pmap, vm_pindex_t pindex) va = (vm_offset_t)pindex << PAGE_SHIFT; vmspace_mcontrol(pmap, (void *)va, SEG_SIZE, MADV_INVAL, 0); } - if (pmap->pm_pdir == gd->gd_PT1pdir) { - va = (vm_offset_t)gd->gd_PT1map + (pindex << PAGE_SHIFT); - madvise((void *)va, PAGE_SIZE, MADV_INVAL); - } - if (pmap->pm_pdir == gd->gd_PT2pdir) { - va = (vm_offset_t)gd->gd_PT2map + (pindex << PAGE_SHIFT); - madvise((void *)va, PAGE_SIZE, MADV_INVAL); + + /* + * Do a selective invalidation if we have a valid cache of this + * page table. + */ + if (pmap->pm_cpucachemask & gd->mi.gd_cpumask) { + if (pmap->pm_pdir == gd->gd_PT1pdir) { + va = (vm_offset_t)gd->gd_PT1map + + (pindex << PAGE_SHIFT); + madvise((void *)va, PAGE_SIZE, MADV_INVAL); + } + if (pmap->pm_pdir == gd->gd_PT2pdir) { + va = (vm_offset_t)gd->gd_PT2map + + (pindex << PAGE_SHIFT); + madvise((void *)va, PAGE_SIZE, MADV_INVAL); + } } + + /* + * Invalidate any other cpu's cache mappings of this page table, + * leaving only ours. + */ + atomic_clear_int(&pmap->pm_cpucachemask, gd->gd_other_cpus); } /* @@ -794,7 +812,6 @@ pmap_qenter(vm_offset_t va, struct vm_page **m, int count) va += PAGE_SIZE; } #ifdef SMP - panic("XXX smp_invltlb()"); smp_invltlb(); #endif } @@ -849,7 +866,6 @@ pmap_qremove(vm_offset_t va, int count) va += PAGE_SIZE; } #ifdef SMP - panic("XXX smp_invltlb()"); smp_invltlb(); #endif } diff --git a/sys/platform/vkernel/i386/mp.c b/sys/platform/vkernel/platform/shutdown.c similarity index 56% copy from sys/platform/vkernel/i386/mp.c copy to sys/platform/vkernel/platform/shutdown.c index eadab732a5..5870290147 100644 --- a/sys/platform/vkernel/i386/mp.c +++ b/sys/platform/vkernel/platform/shutdown.c @@ -1,13 +1,13 @@ /* * Copyright (c) 2007 The DragonFly Project. All rights reserved. - * + * * This code is derived from software contributed to The DragonFly Project * by Matthew Dillon - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright @@ -17,7 +17,7 @@ * 3. Neither the name of The DragonFly Project nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific, prior written permission. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS @@ -30,105 +30,75 @@ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $DragonFly: src/sys/platform/vkernel/i386/mp.c,v 1.1 2007/06/18 18:57:12 josepht Exp $ + * + * $DragonFly: src/sys/platform/vkernel/platform/shutdown.c,v 1.1 2007/07/01 02:51:45 dillon Exp $ + */ +/* + * Install a signal handler for SIGTERM which shuts down the virtual kernel */ -#include +#include "opt_ddb.h" #include +#include +#include +#include +#include +#include +#include +#include -#include -#include +#include -#if 0 -volatile lapic_t lapic; /* needed for kern/kern_shutdown.c */ -#endif -volatile u_int stopped_cpus; -cpumask_t smp_active_mask = 1; /* which cpus are ready for IPIs etc? */ -#if 0 -u_int mp_lock; -#endif +#include +#include +#include +#include -void -mp_start(void) -{ - panic("XXX mp_start()"); -} +#include +#include +#include -void -mp_announce(void) -{ - panic("XXX mp_announce()"); -} - -#if 0 -void -get_mplock(void) -{ - panic("XXX get_mplock()"); -} - -int -try_mplock(void) -{ - panic("XXX try_mplock()"); -} - -void -rel_mplock(void) -{ - panic("XXX rel_mplock()"); -} +static void shutdownsig(int signo); +static void shutdown_intr(void *arg __unused, void *frame __unused); -int -cpu_try_mplock(void) -{ - panic("XXX cpu_try_mplock()"); -} +static void -cpu_get_initial_mplock(void) +initshutdown(void *arg __unused) { - panic("XXX cpu_get_initial_mplock()"); -} -#endif + struct sigaction sa; + bzero(&sa, sizeof(sa)); + sigemptyset(&sa.sa_mask); + sa.sa_flags |= SA_NODEFER; + sa.sa_handler = shutdownsig; + sigaction(SIGTERM, &sa, NULL); -void -forward_fastint_remote(void *arg) -{ - panic("XXX forward_fastint_remote()"); + register_int(2, shutdown_intr, NULL, "shutdown", NULL, 0); } +static void -cpu_send_ipiq(int dcpu) +shutdownsig(int signo) { - panic("XXX cpu_send_ipiq()"); + signalintr(2); } -void -smp_invltlb(void) -{ -#ifdef SMP - panic("XXX smp_invltlb()"); -#endif -} - -int -stop_cpus(u_int map) -{ - panic("XXX stop_cpus()"); -} - -int -restart_cpus(u_int map) -{ - panic("XXX restart_cpus()"); -} +SYSINIT(initshutdown, SI_BOOT2_PROC0, SI_ORDER_ANY, + initshutdown, NULL); +/* + * DragonFly-safe interrupt thread. We are the only handler on interrupt + * #2 so we can just steal the thread's context forever. + */ +static void -ap_init(void) +shutdown_intr(void *arg __unused, void *frame __unused) { - panic("XXX ap_init()"); + kprintf("Caught SIGTERM from host system. Shutting down...\n"); + if (initproc != NULL) { + ksignal(initproc, SIGUSR2); + } else { + reboot(RB_POWEROFF); + } } -