From 1918fc5c9c2c4fb606fcb4387002c740729cc2c4 Mon Sep 17 00:00:00 2001 From: Sascha Wildner Date: Wed, 24 Oct 2012 18:04:05 +0200 Subject: [PATCH] kernel: Make SMP support default (and non-optional). The 'SMP' kernel option gets removed with this commit, so it has to be removed from everybody's configs. Reviewed-by: sjg Approved-by: many --- UPDATING | 11 + sys/bus/pci/fixup_pci.c | 8 - sys/bus/pci/i386/pci_cfgreg.c | 8 - sys/conf/options | 3 - sys/config/GENERIC | 5 - sys/config/LINT | 17 -- sys/config/LINT64 | 15 -- sys/config/VKERNEL | 2 - sys/config/VKERNEL64 | 2 - sys/config/X86_64_GENERIC | 6 - sys/cpu/i386/include/atomic.h | 31 --- sys/cpu/i386/include/cpufunc.h | 14 -- sys/cpu/i386/include/param.h | 5 - sys/cpu/i386/include/profile.h | 6 - sys/cpu/x86_64/include/atomic.h | 31 --- sys/cpu/x86_64/include/cpufunc.h | 14 -- sys/cpu/x86_64/include/param.h | 4 - sys/cpu/x86_64/include/profile.h | 5 - sys/ddb/db_ps.c | 3 - sys/dev/acpica5/acpi_cpu_cstate.c | 7 - sys/dev/netif/aue/if_aue.c | 4 +- sys/dev/netif/lgue/if_lgue.c | 2 - sys/dev/serial/cy/cy.c | 8 - sys/dev/serial/sio/sio.c | 3 - sys/emulation/linux/i386/linux_support.s | 8 - sys/kern/kern_clock.c | 6 - sys/kern/kern_intr.c | 22 -- sys/kern/kern_kinfo.c | 4 +- sys/kern/kern_ktr.c | 20 +- sys/kern/kern_memio.c | 2 - sys/kern/kern_mib.c | 5 - sys/kern/kern_shutdown.c | 18 -- sys/kern/kern_sig.c | 14 -- sys/kern/kern_slaballoc.c | 17 -- sys/kern/kern_spinlock.c | 3 - sys/kern/kern_synch.c | 13 - sys/kern/kern_systimer.c | 6 - sys/kern/kern_timeout.c | 40 +-- sys/kern/kern_upcall.c | 8 - sys/kern/lwkt_ipiq.c | 22 -- sys/kern/lwkt_msgport.c | 23 -- sys/kern/lwkt_serialize.c | 15 -- sys/kern/lwkt_thread.c | 52 ---- sys/kern/lwkt_token.c | 2 - sys/kern/subr_cpu_topology.c | 3 - sys/kern/sys_pipe.c | 7 +- sys/kern/usched_bsd4.c | 99 -------- sys/kern/usched_dfly.c | 93 ------- sys/kern/usched_dummy.c | 13 - sys/kern/vfs_vnops.c | 3 +- sys/net/altq/altq_subr.c | 2 +- sys/net/altq/if_altq.h | 8 - sys/net/if.c | 8 - sys/net/netisr.c | 14 -- sys/net/pf/pf.c | 14 +- sys/net/route.c | 41 ---- sys/netinet/if_ether.c | 16 -- sys/netinet/ip_divert.c | 8 - sys/netinet/ip_flow.c | 6 - sys/netinet/ip_input.c | 12 +- sys/netinet/ip_var.h | 4 - sys/netinet/tcp_subr.c | 24 +- sys/netinet/tcp_usrreq.c | 22 -- sys/netinet/tcp_var.h | 5 - sys/netinet/udp_usrreq.c | 6 - sys/netproto/mpls/mpls_input.c | 8 - sys/netproto/mpls/mpls_var.h | 6 - sys/platform/pc32/apic/apic_vector.s | 8 - sys/platform/pc32/apic/lapic.c | 14 -- sys/platform/pc32/apic/lapic.h | 5 - sys/platform/pc32/conf/files | 6 +- sys/platform/pc32/i386/busdma_machdep.c | 21 -- sys/platform/pc32/i386/db_interface.c | 4 - sys/platform/pc32/i386/genassym.c | 3 - sys/platform/pc32/i386/i686_mem.c | 22 -- sys/platform/pc32/i386/initcpu.c | 10 - sys/platform/pc32/i386/ipl.s | 8 - sys/platform/pc32/i386/locore.s | 14 +- sys/platform/pc32/i386/machdep.c | 12 - sys/platform/pc32/i386/perfmon.c | 87 ------- sys/platform/pc32/i386/pmap.c | 55 ----- sys/platform/pc32/i386/pmap_inval.c | 6 - sys/platform/pc32/i386/support.s | 2 - sys/platform/pc32/i386/swtch.s | 12 - sys/platform/pc32/i386/sys_machdep.c | 14 -- sys/platform/pc32/i386/trap.c | 30 --- sys/platform/pc32/i386/vm_machdep.c | 8 - sys/platform/pc32/include/intr_machdep.h | 3 - sys/platform/pc32/include/lock.h | 28 --- sys/platform/pc32/include/pmap.h | 3 - sys/platform/pc32/include/smp.h | 9 - sys/platform/pc32/include/thread.h | 5 - sys/platform/pc32/isa/clock.c | 34 --- sys/platform/pc32/isa/npx.c | 227 ------------------ sys/platform/pc32/isa/prof_machdep.c | 65 +---- sys/platform/pc64/apic/apic_vector.s | 8 - sys/platform/pc64/apic/lapic.c | 14 -- sys/platform/pc64/apic/lapic.h | 5 - sys/platform/pc64/conf/files | 4 +- sys/platform/pc64/include/intr_machdep.h | 3 - sys/platform/pc64/include/lock.h | 27 --- sys/platform/pc64/include/pmap.h | 3 - sys/platform/pc64/include/smp.h | 9 - sys/platform/pc64/include/thread.h | 5 - sys/platform/pc64/isa/clock.c | 34 --- sys/platform/pc64/isa/prof_machdep.c | 65 +---- sys/platform/pc64/x86_64/amd64_mem.c | 12 - sys/platform/pc64/x86_64/busdma_machdep.c | 21 -- sys/platform/pc64/x86_64/db_interface.c | 4 - sys/platform/pc64/x86_64/genassym.c | 2 - sys/platform/pc64/x86_64/ipl.s | 8 - sys/platform/pc64/x86_64/machdep.c | 12 - sys/platform/pc64/x86_64/npx.c | 6 - sys/platform/pc64/x86_64/pmap.c | 53 +--- sys/platform/pc64/x86_64/pmap_inval.c | 6 - sys/platform/pc64/x86_64/support.s | 4 - sys/platform/pc64/x86_64/swtch.s | 11 - sys/platform/pc64/x86_64/trap.c | 20 -- sys/platform/vkernel/conf/files | 2 +- sys/platform/vkernel/i386/autoconf.c | 2 - sys/platform/vkernel/i386/cpu_regs.c | 12 - sys/platform/vkernel/i386/db_interface.c | 4 - sys/platform/vkernel/i386/exception.c | 8 - sys/platform/vkernel/i386/genassym.c | 3 - sys/platform/vkernel/i386/mp.c | 2 - sys/platform/vkernel/i386/npx.c | 7 - sys/platform/vkernel/i386/swtch.s | 7 - sys/platform/vkernel/i386/trap.c | 26 -- sys/platform/vkernel/include/clock.h | 3 - sys/platform/vkernel/include/pmap.h | 3 - sys/platform/vkernel/include/smp.h | 9 - sys/platform/vkernel/include/thread.h | 5 - .../vkernel/platform/busdma_machdep.c | 11 - sys/platform/vkernel/platform/init.c | 26 -- sys/platform/vkernel/platform/machintr.c | 4 - sys/platform/vkernel/platform/pmap.c | 12 - sys/platform/vkernel/platform/systimer.c | 7 - sys/platform/vkernel64/conf/files | 2 +- sys/platform/vkernel64/include/clock.h | 3 - sys/platform/vkernel64/include/smp.h | 9 - sys/platform/vkernel64/include/thread.h | 5 - .../vkernel64/platform/busdma_machdep.c | 11 - sys/platform/vkernel64/platform/init.c | 24 -- sys/platform/vkernel64/platform/machintr.c | 4 - sys/platform/vkernel64/platform/pmap.c | 12 - sys/platform/vkernel64/platform/systimer.c | 7 - sys/platform/vkernel64/x86_64/autoconf.c | 2 - sys/platform/vkernel64/x86_64/cpu_regs.c | 10 - sys/platform/vkernel64/x86_64/db_interface.c | 4 - sys/platform/vkernel64/x86_64/exception.c | 8 - sys/platform/vkernel64/x86_64/mp.c | 2 - sys/platform/vkernel64/x86_64/npx.c | 6 - sys/platform/vkernel64/x86_64/swtch.s | 6 - sys/platform/vkernel64/x86_64/trap.c | 26 -- sys/sys/callout.h | 5 - sys/sys/kinfo.h | 6 - sys/sys/mplock2.h | 16 -- sys/sys/serialize.h | 2 - sys/sys/spinlock2.h | 37 --- sys/sys/thread.h | 8 - sys/sys/thread2.h | 3 - sys/vfs/nfs/nfs_vfsops.c | 5 - sys/vfs/nwfs/nwfs_vfsops.c | 4 - sys/vm/vm_object.c | 9 - sys/vm/vm_page.c | 15 -- 165 files changed, 34 insertions(+), 2357 deletions(-) diff --git a/UPDATING b/UPDATING index 016e02fcac..1e8d3df0a3 100644 --- a/UPDATING +++ b/UPDATING @@ -8,6 +8,17 @@ # If you discover any problem, please contact the bugs@lists.dragonflybsd.org # mailing list with the details. ++-----------------------------------------------------------------------+ ++ UPGRADING DRAGONFLY FROM 3.2 to later versions + ++-----------------------------------------------------------------------+ + +SMP OPTION REMOVED +------------------ + +The SMP kernel option has been removed. All kernels now feature SMP +support. If you have 'options SMP' in your kernel config, you'll have +to remove it. + +-----------------------------------------------------------------------+ + UPGRADING DRAGONFLY FROM 3.0 to later versions + +-----------------------------------------------------------------------+ diff --git a/sys/bus/pci/fixup_pci.c b/sys/bus/pci/fixup_pci.c index ed1aa059f3..53c40bf33f 100644 --- a/sys/bus/pci/fixup_pci.c +++ b/sys/bus/pci/fixup_pci.c @@ -90,19 +90,11 @@ fixwsc_natoma(device_t dev) int pmccfg; pmccfg = pci_read_config(dev, 0x50, 2); -#if defined(SMP) if (pmccfg & 0x8000) { kprintf("Correcting Natoma config for SMP\n"); pmccfg &= ~0x8000; pci_write_config(dev, 0x50, pmccfg, 2); } -#else - if ((pmccfg & 0x8000) == 0) { - kprintf("Correcting Natoma config for non-SMP\n"); - pmccfg |= 0x8000; - pci_write_config(dev, 0x50, pmccfg, 2); - } -#endif } /* diff --git a/sys/bus/pci/i386/pci_cfgreg.c b/sys/bus/pci/i386/pci_cfgreg.c index 1f72301f71..0aea827793 100644 --- a/sys/bus/pci/i386/pci_cfgreg.c +++ b/sys/bus/pci/i386/pci_cfgreg.c @@ -519,9 +519,7 @@ pcie_cfgregopen(uint64_t base, uint8_t minbus, uint8_t maxbus) #ifdef PCIE_CFG_MECH struct pcie_cfg_list *pcielist; struct pcie_cfg_elem *pcie_array, *elem; -#ifdef SMP struct pcpu *pc; -#endif vm_offset_t va; uint32_t val1, val2; int i, slot; @@ -544,9 +542,7 @@ pcie_cfgregopen(uint64_t base, uint8_t minbus, uint8_t maxbus) kprintf("PCIe: Memory Mapped configuration base @ 0x%jx\n", (uintmax_t)base); -#ifdef SMP SLIST_FOREACH(pc, &cpuhead, pc_allcpu) -#endif { pcie_array = kmalloc(sizeof(struct pcie_cfg_elem) * PCIE_CACHE, @@ -561,11 +557,7 @@ pcie_cfgregopen(uint64_t base, uint8_t minbus, uint8_t maxbus) return (0); } -#ifdef SMP pcielist = &pcie_list[pc->pc_cpuid]; -#else - pcielist = &pcie_list[0]; -#endif TAILQ_INIT(pcielist); for (i = 0; i < PCIE_CACHE; i++) { elem = &pcie_array[i]; diff --git a/sys/conf/options b/sys/conf/options index d183b2b56a..452a0ce760 100644 --- a/sys/conf/options +++ b/sys/conf/options @@ -487,9 +487,6 @@ DEBUG_PCTRACK opt_pctrack.h # These are VM related options NO_SWAPPING opt_vm.h -# Standard SMP options -SMP opt_global.h - # sys/netkey KEY diff --git a/sys/config/GENERIC b/sys/config/GENERIC index 7beab925c1..85892dd5db 100644 --- a/sys/config/GENERIC +++ b/sys/config/GENERIC @@ -67,11 +67,6 @@ options ALTQ_FAIRQ #fair queueing #options ALTQ_NOPCC #don't use processor cycle counter #options ALTQ_DEBUG #for debugging -# Kernels configured with 'options SMP' should generally boot on both -# SMP and UP boxes. -# -options SMP # Symmetric MultiProcessor Kernel - # Debugging for Development options DDB options DDB_TRACE diff --git a/sys/config/LINT b/sys/config/LINT index 08aba7b801..8a5f89f917 100644 --- a/sys/config/LINT +++ b/sys/config/LINT @@ -104,23 +104,6 @@ options INCLUDE_CONFIG_FILE # Include this file in kernel # options ROOTDEVNAME=\"ufs:da0s2e\" -##################################################################### -# SMP OPTIONS: -# -# SMP enables building of a Symmetric MultiProcessor Kernel. It will -# boot on both SMP and UP boxes. -# -# Notes: -# -# An SMP kernel will ONLY run on an Intel MP spec. qualified motherboard. -# -# Be sure to disable 'cpu I486_CPU' for SMP kernels. -# -# Check the 'Rogue SMP hardware' section to see if additional options -# are required by your hardware. -# -#options SMP # Symmetric MultiProcessor Kernel - ##################################################################### # CPU OPTIONS diff --git a/sys/config/LINT64 b/sys/config/LINT64 index 78631dc9d5..4f8a3ee30a 100644 --- a/sys/config/LINT64 +++ b/sys/config/LINT64 @@ -104,21 +104,6 @@ options INCLUDE_CONFIG_FILE # Include this file in kernel # options ROOTDEVNAME=\"ufs:da0s2e\" -##################################################################### -# SMP OPTIONS: -# -# SMP enables building of a Symmetric MultiProcessor Kernel. It will -# boot on both SMP and UP boxes. -# -# Notes: -# -# An SMP kernel will ONLY run on an Intel MP spec. qualified motherboard. -# -# Check the 'Rogue SMP hardware' section to see if additional options -# are required by your hardware. -# -#options SMP # Symmetric MultiProcessor Kernel - ##################################################################### # CPU OPTIONS diff --git a/sys/config/VKERNEL b/sys/config/VKERNEL index f427e9bca0..36d6206899 100644 --- a/sys/config/VKERNEL +++ b/sys/config/VKERNEL @@ -67,8 +67,6 @@ options P1003_1B #Posix P1003_1B real-time extensions options _KPOSIX_PRIORITY_SCHEDULING options ICMP_BANDLIM #Rate limit bad replies -options SMP # Symmetric MultiProcessor Kernel - # Debugging for Development options DDB options DDB_TRACE diff --git a/sys/config/VKERNEL64 b/sys/config/VKERNEL64 index 6c3ead2ae7..e02faaad20 100644 --- a/sys/config/VKERNEL64 +++ b/sys/config/VKERNEL64 @@ -66,8 +66,6 @@ options P1003_1B #Posix P1003_1B real-time extensions options _KPOSIX_PRIORITY_SCHEDULING options ICMP_BANDLIM #Rate limit bad replies -options SMP # Symmetric MultiProcessor Kernel - # Debugging for Development options DDB options DDB_TRACE diff --git a/sys/config/X86_64_GENERIC b/sys/config/X86_64_GENERIC index 9f27754e5d..f132db1809 100644 --- a/sys/config/X86_64_GENERIC +++ b/sys/config/X86_64_GENERIC @@ -61,12 +61,6 @@ options ALTQ_FAIRQ #fair queueing #options ALTQ_NOPCC #don't use processor cycle counter #options ALTQ_DEBUG #for debugging - -# Kernels configured with 'options SMP' should generally boot on both -# SMP and UP boxes. -# -options SMP # Symmetric MultiProcessor Kernel - # Debugging for Development options DDB options DDB_TRACE diff --git a/sys/cpu/i386/include/atomic.h b/sys/cpu/i386/include/atomic.h index 9ddaff1789..ab641780c0 100644 --- a/sys/cpu/i386/include/atomic.h +++ b/sys/cpu/i386/include/atomic.h @@ -24,7 +24,6 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/include/atomic.h,v 1.9.2.1 2000/07/07 00:38:47 obrien Exp $ - * $DragonFly: src/sys/cpu/i386/include/atomic.h,v 1.25 2008/06/26 23:06:50 dillon Exp $ */ #ifndef _CPU_ATOMIC_H_ #define _CPU_ATOMIC_H_ @@ -71,11 +70,7 @@ extern void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \ extern void atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v); #else /* !KLD_MODULE */ -#if defined(SMP) || !defined(_KERNEL) #define MPLOCKED "lock ; " -#else -#define MPLOCKED -#endif /* * The assembly is volatilized to demark potential before-and-after side @@ -432,30 +427,6 @@ extern void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v); #else /* !KLD_MODULE */ -#if defined(_KERNEL) && !defined(SMP) -/* - * We assume that a = b will do atomic loads and stores. However, on a - * PentiumPro or higher, reads may pass writes, so for that case we have - * to use a serializing instruction (i.e. with LOCK) to do the load in - * SMP kernels. For UP kernels, however, the cache of the single processor - * is always consistent, so we don't need any memory barriers. - */ -#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ -static __inline u_##TYPE \ -atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ -{ \ - return (*p); \ -} \ - \ -static __inline void \ -atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ -{ \ - *p = v; \ -} \ -struct __hack - -#else /* !(_KERNEL && !SMP) */ - #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ static __inline u_##TYPE \ atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ @@ -484,8 +455,6 @@ atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ } \ struct __hack -#endif /* _KERNEL && !SMP */ - #endif /* !KLD_MODULE */ ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0"); diff --git a/sys/cpu/i386/include/cpufunc.h b/sys/cpu/i386/include/cpufunc.h index f1a61ebe13..0dc9acf0d2 100644 --- a/sys/cpu/i386/include/cpufunc.h +++ b/sys/cpu/i386/include/cpufunc.h @@ -58,9 +58,7 @@ __BEGIN_DECLS #ifdef __GNUC__ -#ifdef SMP #include /* XXX */ -#endif #ifdef SWTCH_OPTIM_STATS extern int tlb_flush_count; /* XXX */ @@ -194,15 +192,11 @@ cpu_enable_intr(void) static __inline void cpu_mfence(void) { -#ifdef SMP #ifdef CPU_HAS_SSE2 __asm __volatile("mfence" : : : "memory"); #else __asm __volatile("lock; addl $0,(%%esp)" : : : "memory"); #endif -#else - __asm __volatile("" : : : "memory"); -#endif } /* @@ -216,15 +210,11 @@ cpu_mfence(void) static __inline void cpu_lfence(void) { -#ifdef SMP #ifdef CPU_HAS_SSE2 __asm __volatile("lfence" : : : "memory"); #else __asm __volatile("lock; addl $0,(%%esp)" : : : "memory"); #endif -#else - __asm __volatile("" : : : "memory"); -#endif } /* @@ -422,12 +412,8 @@ invd(void) * will cause the invl*() functions to be equivalent to the cpu_invl*() * functions. */ -#ifdef SMP void smp_invltlb(void); void smp_invltlb_intr(void); -#else -#define smp_invltlb() -#endif #ifndef _CPU_INVLPG_DEFINED diff --git a/sys/cpu/i386/include/param.h b/sys/cpu/i386/include/param.h index d792843aaa..d6547ce984 100644 --- a/sys/cpu/i386/include/param.h +++ b/sys/cpu/i386/include/param.h @@ -35,7 +35,6 @@ * * from: @(#)param.h 5.8 (Berkeley) 6/28/91 * $FreeBSD: src/sys/i386/include/param.h,v 1.54.2.8 2002/08/31 21:15:55 dillon Exp $ - * $DragonFly: src/sys/cpu/i386/include/param.h,v 1.15 2008/08/25 17:01:35 dillon Exp $ */ #ifndef _CPU_PARAM_H_ @@ -95,11 +94,7 @@ #ifndef SMP_MAXCPU #define SMP_MAXCPU 16 #endif -#ifdef SMP #define MAXCPU SMP_MAXCPU -#else -#define MAXCPU 1 -#endif /* SMP */ #define ALIGNBYTES _ALIGNBYTES #define ALIGN(p) _ALIGN(p) diff --git a/sys/cpu/i386/include/profile.h b/sys/cpu/i386/include/profile.h index ef507ccb20..02bdd016b7 100644 --- a/sys/cpu/i386/include/profile.h +++ b/sys/cpu/i386/include/profile.h @@ -32,7 +32,6 @@ * * @(#)profile.h 8.1 (Berkeley) 6/11/93 * $FreeBSD: src/sys/i386/include/profile.h,v 1.20 1999/12/29 04:33:05 peter Exp $ - * $DragonFly: src/sys/cpu/i386/include/profile.h,v 1.9 2006/11/07 17:51:21 dillon Exp $ */ #ifndef _CPU_PROFILE_H_ @@ -71,7 +70,6 @@ #define PC_TO_I(p, pc) ((uintfptr_t)(pc) - (uintfptr_t)(p)->lowpc) #else #define MCOUNT_DECL(s) u_long s; -#ifdef SMP struct spinlock_deprecated; extern struct spinlock_deprecated mcount_spinlock; void spin_lock_np(struct spinlock_deprecated *sp); @@ -80,10 +78,6 @@ void spin_unlock_np(struct spinlock_deprecated *sp); __asm __volatile("cli" : : : "memory"); \ spin_lock_np(&mcount_spinlock); } #define MCOUNT_EXIT(s) { spin_unlock_np(&mcount_spinlock); write_eflags(s); } -#else -#define MCOUNT_ENTER(s) { s = read_eflags(); cpu_disable_intr(); } -#define MCOUNT_EXIT(s) (write_eflags(s)) -#endif #endif /* GUPROF */ #else /* !_KERNEL */ diff --git a/sys/cpu/x86_64/include/atomic.h b/sys/cpu/x86_64/include/atomic.h index 4850c5e252..c848bdb1aa 100644 --- a/sys/cpu/x86_64/include/atomic.h +++ b/sys/cpu/x86_64/include/atomic.h @@ -24,7 +24,6 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/include/atomic.h,v 1.9.2.1 2000/07/07 00:38:47 obrien Exp $ - * $DragonFly: src/sys/cpu/i386/include/atomic.h,v 1.25 2008/06/26 23:06:50 dillon Exp $ */ #ifndef _CPU_ATOMIC_H_ #define _CPU_ATOMIC_H_ @@ -73,11 +72,7 @@ extern void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \ extern void atomic_##NAME##_##TYPE##_nonlocked(volatile u_##TYPE *p, u_##TYPE v); #else /* !KLD_MODULE */ -#if defined(SMP) || !defined(_KERNEL) #define MPLOCKED "lock ; " -#else -#define MPLOCKED -#endif /* * The assembly is volatilized to demark potential before-and-after side @@ -463,30 +458,6 @@ extern void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v); #else /* !KLD_MODULE */ -#if defined(_KERNEL) && !defined(SMP) -/* - * We assume that a = b will do atomic loads and stores. However, on a - * PentiumPro or higher, reads may pass writes, so for that case we have - * to use a serializing instruction (i.e. with LOCK) to do the load in - * SMP kernels. For UP kernels, however, the cache of the single processor - * is always consistent, so we don't need any memory barriers. - */ -#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ -static __inline u_##TYPE \ -atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ -{ \ - return (*p); \ -} \ - \ -static __inline void \ -atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ -{ \ - *p = v; \ -} \ -struct __hack - -#else /* !(_KERNEL && !SMP) */ - #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ static __inline u_##TYPE \ atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ @@ -515,8 +486,6 @@ atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ } \ struct __hack -#endif /* _KERNEL && !SMP */ - #endif /* !KLD_MODULE */ ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0"); diff --git a/sys/cpu/x86_64/include/cpufunc.h b/sys/cpu/x86_64/include/cpufunc.h index 1163f3f2a7..7a8fbcfda5 100644 --- a/sys/cpu/x86_64/include/cpufunc.h +++ b/sys/cpu/x86_64/include/cpufunc.h @@ -63,9 +63,7 @@ __BEGIN_DECLS #ifdef __GNUC__ -#ifdef SMP #include /* XXX */ -#endif static __inline void breakpoint(void) @@ -177,11 +175,7 @@ cpu_enable_intr(void) static __inline void cpu_mfence(void) { -#ifdef SMP __asm __volatile("mfence" : : : "memory"); -#else - __asm __volatile("" : : : "memory"); -#endif } /* @@ -195,11 +189,7 @@ cpu_mfence(void) static __inline void cpu_lfence(void) { -#ifdef SMP __asm __volatile("lfence" : : : "memory"); -#else - __asm __volatile("" : : : "memory"); -#endif } /* @@ -424,12 +414,8 @@ invd(void) * will cause the invl*() functions to be equivalent to the cpu_invl*() * functions. */ -#ifdef SMP void smp_invltlb(void); void smp_invltlb_intr(void); -#else -#define smp_invltlb() -#endif #ifndef _CPU_INVLPG_DEFINED diff --git a/sys/cpu/x86_64/include/param.h b/sys/cpu/x86_64/include/param.h index c6d8ca2ba2..15a489dc47 100644 --- a/sys/cpu/x86_64/include/param.h +++ b/sys/cpu/x86_64/include/param.h @@ -89,11 +89,7 @@ * remain compatible between UP and SMP builds. */ #define SMP_MAXCPU 63 -#ifdef SMP #define MAXCPU SMP_MAXCPU -#else -#define MAXCPU 1 -#endif /* SMP */ #define ALIGNBYTES _ALIGNBYTES #define ALIGN(p) _ALIGN(p) diff --git a/sys/cpu/x86_64/include/profile.h b/sys/cpu/x86_64/include/profile.h index 38efa66119..40c6c70efa 100644 --- a/sys/cpu/x86_64/include/profile.h +++ b/sys/cpu/x86_64/include/profile.h @@ -63,17 +63,12 @@ #define PC_TO_I(p, pc) ((uintfptr_t)(pc) - (uintfptr_t)(p)->lowpc) #else #define MCOUNT_DECL(s) u_long s; -#ifdef SMP extern int mcount_lock; #define MCOUNT_ENTER(s) { s = read_rflags(); disable_intr(); \ while (!atomic_cmpset_acq_int(&mcount_lock, 0, 1)) \ /* nothing */ ; } #define MCOUNT_EXIT(s) { atomic_store_rel_int(&mcount_lock, 0); \ write_rflags(s); } -#else -#define MCOUNT_ENTER(s) { s = read_rflags(); disable_intr(); } -#define MCOUNT_EXIT(s) (write_rflags(s)) -#endif #endif /* GUPROF */ #else /* !_KERNEL */ diff --git a/sys/ddb/db_ps.c b/sys/ddb/db_ps.c index 8d93a15d72..57c2e330c3 100644 --- a/sys/ddb/db_ps.c +++ b/sys/ddb/db_ps.c @@ -31,7 +31,6 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/ddb/db_ps.c,v 1.20 1999/08/28 00:41:09 peter Exp $ - * $DragonFly: src/sys/ddb/db_ps.c,v 1.25 2008/06/29 21:38:21 dillon Exp $ */ #include #include @@ -199,10 +198,8 @@ db_dump_td_tokens(thread_t td) tok = ref->tr_tok; db_printf(" %p[tok=%p", ref, ref->tr_tok); -#ifdef SMP if (tok->t_ref && td == tok->t_ref->tr_owner) db_printf(",held"); -#endif db_printf("]"); } db_printf("\n"); diff --git a/sys/dev/acpica5/acpi_cpu_cstate.c b/sys/dev/acpica5/acpi_cpu_cstate.c index 028d77b146..f70c2c5b76 100644 --- a/sys/dev/acpica5/acpi_cpu_cstate.c +++ b/sys/dev/acpica5/acpi_cpu_cstate.c @@ -1119,16 +1119,9 @@ acpi_cpu_c1(void) ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0); #else splz(); -#ifdef SMP if ((mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) __asm __volatile("sti; hlt"); else __asm __volatile("sti; pause"); -#else - if ((mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) - __asm __volatile("sti; hlt"); - else - __asm __volatile("sti"); -#endif #endif /* !__ia64__ */ } diff --git a/sys/dev/netif/aue/if_aue.c b/sys/dev/netif/aue/if_aue.c index 92e254f0f4..88e56a1113 100644 --- a/sys/dev/netif/aue/if_aue.c +++ b/sys/dev/netif/aue/if_aue.c @@ -981,15 +981,13 @@ aue_start_ipifunc(void *arg) static void aue_start_schedule(struct ifnet *ifp) { -#ifdef SMP int cpu; cpu = ifp->if_start_cpuid(ifp); if (cpu != mycpuid) lwkt_send_ipiq(globaldata_find(cpu), aue_start_ipifunc, ifp); else -#endif - aue_start_ipifunc(ifp); + aue_start_ipifunc(ifp); } /* diff --git a/sys/dev/netif/lgue/if_lgue.c b/sys/dev/netif/lgue/if_lgue.c index ffe10227c0..6b53eea245 100644 --- a/sys/dev/netif/lgue/if_lgue.c +++ b/sys/dev/netif/lgue/if_lgue.c @@ -479,14 +479,12 @@ lgue_start_ipifunc(void *arg) static void lgue_start_schedule(struct ifnet *ifp) { -#ifdef SMP int cpu; cpu = ifp->if_start_cpuid(ifp); if (cpu != mycpuid) lwkt_send_ipiq(globaldata_find(cpu), lgue_start_ipifunc, ifp); else -#endif lgue_start_ipifunc(ifp); } diff --git a/sys/dev/serial/cy/cy.c b/sys/dev/serial/cy/cy.c index 1da54b02d1..3151430fff 100644 --- a/sys/dev/serial/cy/cy.c +++ b/sys/dev/serial/cy/cy.c @@ -83,22 +83,14 @@ #include #include #include -#ifndef SMP -#include -#endif #include #include #include "cyreg.h" #include -#ifdef SMP #define disable_intr() com_lock() #define enable_intr() com_unlock() -#else -#define disable_intr() ((void)0) -#define enable_intr() ((void)0) -#endif /* SMP */ /* * Dictionary so that I can name everything *sio* or *com* to compare with diff --git a/sys/dev/serial/sio/sio.c b/sys/dev/serial/sio/sio.c index b5938b6eed..21aacf9716 100644 --- a/sys/dev/serial/sio/sio.c +++ b/sys/dev/serial/sio/sio.c @@ -89,9 +89,6 @@ #include #include -#ifndef SMP -#include -#endif #include "sioreg.h" #include "sio_private.h" diff --git a/sys/emulation/linux/i386/linux_support.s b/sys/emulation/linux/i386/linux_support.s index 2e4ad44e91..6cdfee1dac 100644 --- a/sys/emulation/linux/i386/linux_support.s +++ b/sys/emulation/linux/i386/linux_support.s @@ -84,9 +84,7 @@ ENTRY(futex_addl) movl 12(%esp),%edx cmpl $VM_MAX_USER_ADDRESS-4,%edx ja futex_fault_pop -#ifdef SMP lock -#endif xaddl %eax,(%edx) movl 16(%esp),%edx movl %eax,(%edx) @@ -105,9 +103,7 @@ ENTRY(futex_orl) movl (%edx),%eax 1: movl %eax,%ecx orl 8(%esp),%ecx -#ifdef SMP lock -#endif cmpxchgl %ecx,(%edx) jnz 1b futex_tail: @@ -130,9 +126,7 @@ ENTRY(futex_andl) movl (%edx),%eax 1: movl %eax,%ecx andl 8(%esp),%ecx -#ifdef SMP lock -#endif cmpxchgl %ecx,(%edx) jnz 1b jmp futex_tail @@ -148,9 +142,7 @@ ENTRY(futex_xorl) movl (%edx),%eax 1: movl %eax,%ecx xorl 8(%esp),%ecx -#ifdef SMP lock -#endif cmpxchgl %ecx,(%edx) jnz 1b jmp futex_tail diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c index 7d4cc7c269..407ef00670 100644 --- a/sys/kern/kern_clock.c +++ b/sys/kern/kern_clock.c @@ -70,7 +70,6 @@ * * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $ - * $DragonFly: src/sys/kern/kern_clock.c,v 1.62 2008/09/09 04:06:13 dillon Exp $ */ #include "opt_ntp.h" @@ -136,7 +135,6 @@ struct kinfo_pcheader cputime_pcheader = { PCTRACK_SIZE, PCTRACK_ARYSIZE }; struct kinfo_pctrack cputime_pctrack[MAXCPU][PCTRACK_SIZE]; #endif -#ifdef SMP static int sysctl_cputime(SYSCTL_HANDLER_ARGS) { @@ -152,10 +150,6 @@ sysctl_cputime(SYSCTL_HANDLER_ARGS) } SYSCTL_PROC(_kern, OID_AUTO, cputime, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0, sysctl_cputime, "S,kinfo_cputime", "CPU time statistics"); -#else -SYSCTL_STRUCT(_kern, OID_AUTO, cputime, CTLFLAG_RD, &cpu_time, kinfo_cputime, - "CPU time statistics"); -#endif static int sysctl_cp_time(SYSCTL_HANDLER_ARGS) diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c index e5fa84c478..30a77ec7e1 100644 --- a/sys/kern/kern_intr.c +++ b/sys/kern/kern_intr.c @@ -511,16 +511,12 @@ next_registered_randintr(int intr) * We are NOT in a critical section, which will allow the scheduled * interrupt to preempt us. The MP lock might *NOT* be held here. */ -#ifdef SMP - static void sched_ithd_remote(void *arg) { sched_ithd_intern(arg); } -#endif - static void sched_ithd_intern(struct intr_info *info) { @@ -529,7 +525,6 @@ sched_ithd_intern(struct intr_info *info) if (info->i_reclist == NULL) { report_stray_interrupt(info, "sched_ithd"); } else { -#ifdef SMP if (info->i_thread.td_gd == mycpu) { if (info->i_running == 0) { info->i_running = 1; @@ -539,13 +534,6 @@ sched_ithd_intern(struct intr_info *info) } else { lwkt_send_ipiq(info->i_thread.td_gd, sched_ithd_remote, info); } -#else - if (info->i_running == 0) { - info->i_running = 1; - if (info->i_state != ISTATE_LIVELOCKED) - lwkt_schedule(&info->i_thread); /* MIGHT PREEMPT */ - } -#endif } } else { report_stray_interrupt(info, "sched_ithd"); @@ -671,9 +659,7 @@ ithread_fast_handler(struct intrframe *frame) struct intr_info *info; struct intrec **list; int must_schedule; -#ifdef SMP int got_mplock; -#endif TD_INVARIANTS_DECLARE; intrec_t rec, nrec; globaldata_t gd; @@ -715,9 +701,7 @@ ithread_fast_handler(struct intrframe *frame) ++gd->gd_intr_nesting_level; ++gd->gd_cnt.v_intr; must_schedule = info->i_slow; -#ifdef SMP got_mplock = 0; -#endif TD_INVARIANTS_GET(td); list = &info->i_reclist; @@ -727,7 +711,6 @@ ithread_fast_handler(struct intrframe *frame) nrec = rec->next; if (rec->intr_flags & INTR_CLOCK) { -#ifdef SMP if ((rec->intr_flags & INTR_MPSAFE) == 0 && got_mplock == 0) { if (try_mplock() == 0) { /* Couldn't get the MP lock; just schedule it. */ @@ -736,7 +719,6 @@ ithread_fast_handler(struct intrframe *frame) } got_mplock = 1; } -#endif if (rec->serializer) { must_schedule += lwkt_serialize_handler_try( rec->serializer, rec->handler, @@ -752,10 +734,8 @@ ithread_fast_handler(struct intrframe *frame) * Cleanup */ --gd->gd_intr_nesting_level; -#ifdef SMP if (got_mplock) rel_mplock(); -#endif /* * If we had a problem, or mixed fast and slow interrupt handlers are @@ -824,7 +804,6 @@ ithread_handler(void *arg) * are MPSAFE. However, if intr_mpsafe has been turned off we * always operate with the BGL. */ -#ifdef SMP if (info->i_mplock_required != mpheld) { if (info->i_mplock_required) { KKASSERT(mpheld == 0); @@ -836,7 +815,6 @@ ithread_handler(void *arg) mpheld = 0; } } -#endif TD_INVARIANTS_GET(gd->gd_curthread); diff --git a/sys/kern/kern_kinfo.c b/sys/kern/kern_kinfo.c index bc8aa2759e..876f197506 100644 --- a/sys/kern/kern_kinfo.c +++ b/sys/kern/kern_kinfo.c @@ -205,7 +205,7 @@ fill_kinfo_lwp(struct lwp *lwp, struct kinfo_lwp *kl) kl->kl_stat = LSSLEEP; } } -#if defined(_KERNEL) && defined(SMP) +#ifdef _KERNEL kl->kl_mpcount = get_mplock_count(lwp->lwp_thread); #else kl->kl_mpcount = 0; @@ -258,7 +258,7 @@ fill_kinfo_proc_kthread(struct thread *td, struct kinfo_proc *kp) kp->kp_lwp.kl_pid = -1; kp->kp_lwp.kl_tid = -1; kp->kp_lwp.kl_tdflags = td->td_flags; -#if defined(_KERNEL) && defined(SMP) +#ifdef _KERNEL kp->kp_lwp.kl_mpcount = get_mplock_count(td); #else kp->kp_lwp.kl_mpcount = 0; diff --git a/sys/kern/kern_ktr.c b/sys/kern/kern_ktr.c index 8c087022ac..1909ebf180 100644 --- a/sys/kern/kern_ktr.c +++ b/sys/kern/kern_ktr.c @@ -114,14 +114,12 @@ KTR_INFO(KTR_TESTLOG, testlog, test3, 2, "test3 %d %d %d %d", int dummy1, int du KTR_INFO(KTR_TESTLOG, testlog, test4, 3, "test4"); KTR_INFO(KTR_TESTLOG, testlog, test5, 4, "test5"); KTR_INFO(KTR_TESTLOG, testlog, test6, 5, "test6"); -#ifdef SMP KTR_INFO(KTR_TESTLOG, testlog, pingpong, 6, "pingpong"); KTR_INFO(KTR_TESTLOG, testlog, pipeline, 7, "pipeline"); KTR_INFO(KTR_TESTLOG, testlog, crit_beg, 8, "crit_beg"); KTR_INFO(KTR_TESTLOG, testlog, crit_end, 9, "crit_end"); KTR_INFO(KTR_TESTLOG, testlog, spin_beg, 10, "spin_beg"); KTR_INFO(KTR_TESTLOG, testlog, spin_end, 11, "spin_end"); -#endif #define logtest(name) KTR_LOG(testlog_ ## name, 0, 0, 0, 0) #define logtest_noargs(name) KTR_LOG(testlog_ ## name) #endif @@ -148,9 +146,7 @@ SYSCTL_INT(_debug_ktr, OID_AUTO, resynchronize, CTLFLAG_RW, static int ktr_testlogcnt = 0; SYSCTL_INT(_debug_ktr, OID_AUTO, testlogcnt, CTLFLAG_RW, &ktr_testlogcnt, 0, ""); static int ktr_testipicnt = 0; -#ifdef SMP static int ktr_testipicnt_remainder; -#endif SYSCTL_INT(_debug_ktr, OID_AUTO, testipicnt, CTLFLAG_RW, &ktr_testipicnt, 0, ""); static int ktr_testcritcnt = 0; SYSCTL_INT(_debug_ktr, OID_AUTO, testcritcnt, CTLFLAG_RW, &ktr_testcritcnt, 0, ""); @@ -168,9 +164,7 @@ struct ktr_cpu ktr_cpu[MAXCPU] = { { .core.ktr_buf = &ktr_buf0[0] } }; -#ifdef SMP static int64_t ktr_sync_tsc; -#endif struct callout ktr_resync_callout; #ifdef KTR_VERBOSE @@ -210,13 +204,11 @@ SYSINIT(ktr_sysinit, SI_BOOT2_KLD, SI_ORDER_ANY, ktr_sysinit, NULL); * This callback occurs on cpu0. */ #if KTR_TESTLOG -#ifdef SMP static void ktr_pingpong_remote(void *dummy); static void ktr_pipeline_remote(void *dummy); #endif -#endif -#if defined(SMP) && defined(_RDTSC_SUPPORTED_) +#ifdef _RDTSC_SUPPORTED_ static void ktr_resync_remote(void *dummy); extern cpumask_t smp_active_mask; @@ -374,7 +366,7 @@ ktr_pipeline_remote(void *dummy __unused) #endif -#else /* !SMP */ +#else /* !_RDTSC_SUPPORTED_ */ /* * The resync callback for UP doesn't do anything other then run the test @@ -429,11 +421,7 @@ ktr_begin_write_entry(struct ktr_info *info, const char *file, int line) ++kcpu->ktr_idx; #ifdef _RDTSC_SUPPORTED_ if (cpu_feature & CPUID_TSC) { -#ifdef SMP entry->ktr_timestamp = rdtsc() - tsc_offsets[cpu]; -#else - entry->ktr_timestamp = rdtsc(); -#endif } else #endif { @@ -453,9 +441,7 @@ ktr_finish_write_entry(struct ktr_info *info, struct ktr_entry *entry) cpu_ktr_caller(entry); #ifdef KTR_VERBOSE if (ktr_verbose && info->kf_format) { -#ifdef SMP kprintf("cpu%d ", mycpu->gd_cpuid); -#endif if (ktr_verbose > 1) { kprintf("%s.%d\t", entry->ktr_file, entry->ktr_line); } @@ -585,9 +571,7 @@ db_mach_vtrace(int cpu, struct ktr_entry *kp, int idx) { if (kp->ktr_info == NULL) return(0); -#ifdef SMP db_printf("cpu%d ", cpu); -#endif db_printf("%d: ", idx); if (db_ktr_verbose) { db_printf("%10.10lld %s.%d\t", (long long)kp->ktr_timestamp, diff --git a/sys/kern/kern_memio.c b/sys/kern/kern_memio.c index 6dbae93503..c678505de0 100644 --- a/sys/kern/kern_memio.c +++ b/sys/kern/kern_memio.c @@ -477,14 +477,12 @@ mem_range_attr_set(struct mem_range_desc *mrd, int *arg) return (mem_range_softc.mr_op->set(&mem_range_softc, mrd, arg)); } -#ifdef SMP void mem_range_AP_init(void) { if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP) return (mem_range_softc.mr_op->initAP(&mem_range_softc)); } -#endif static int random_ioctl(cdev_t dev, u_long cmd, caddr_t data, int flags, struct ucred *cred) diff --git a/sys/kern/kern_mib.c b/sys/kern/kern_mib.c index fa5387bb31..16f3963271 100644 --- a/sys/kern/kern_mib.c +++ b/sys/kern/kern_mib.c @@ -134,13 +134,8 @@ char kernelname[MAXPATHLEN] = "/kernel"; /* XXX bloat */ SYSCTL_STRING(_kern, KERN_BOOTFILE, bootfile, CTLFLAG_RW, kernelname, sizeof kernelname, "Name of kernel file booted"); -#ifdef SMP SYSCTL_INT(_hw, HW_NCPU, ncpu, CTLFLAG_RD, &ncpus, 0, "Number of active CPUs"); -#else -SYSCTL_INT(_hw, HW_NCPU, ncpu, CTLFLAG_RD, - 0, 1, "Number of active CPUs"); -#endif SYSCTL_INT(_hw, HW_BYTEORDER, byteorder, CTLFLAG_RD, 0, BYTE_ORDER, "System byte order"); diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c index 25f7372c39..c27f91b0e9 100644 --- a/sys/kern/kern_shutdown.c +++ b/sys/kern/kern_shutdown.c @@ -261,7 +261,6 @@ boot(int howto) /* collect extra flags that shutdown_nice might have set */ howto |= shutdown_howto; -#ifdef SMP /* * We really want to shutdown on the BSP. Subsystems such as ACPI * can't power-down the box otherwise. @@ -273,7 +272,6 @@ boot(int howto) kprintf("Switching to cpu #0 for shutdown\n"); lwkt_setcpu_self(globaldata_find(0)); } -#endif /* * Do any callouts that should be done BEFORE syncing the filesystems. */ @@ -706,7 +704,6 @@ panic(const char *fmt, ...) __va_list ap; static char buf[256]; -#ifdef SMP /* * If a panic occurs on multiple cpus before the first is able to * halt the other cpus, only one cpu is allowed to take the panic. @@ -759,9 +756,6 @@ panic(const char *fmt, ...) if (atomic_cmpset_ptr(&panic_cpu_gd, NULL, gd)) break; } -#else - panic_cpu_gd = gd; -#endif /* * Try to get the system into a working state. Save information * we are about to destroy. @@ -797,10 +791,8 @@ panic(const char *fmt, ...) panicstr = buf; __va_end(ap); kprintf("panic: %s\n", buf); -#ifdef SMP /* two separate prints in case of an unmapped page and trap */ kprintf("cpuid = %d\n", mycpu->gd_cpuid); -#endif #if (NGPIO > 0) && defined(ERROR_LED_ON_PANIC) led_switch("error", 1); @@ -822,12 +814,8 @@ panic(const char *fmt, ...) Debugger("panic"); else #endif -#ifdef SMP if (newpanic) stop_cpus(mycpu->gd_other_cpus); -#else - ; -#endif boot(bootopt); } @@ -928,28 +916,23 @@ dumpsys(void) int dump_stop_usertds = 0; -#ifdef SMP static void need_user_resched_remote(void *dummy) { need_user_resched(); } -#endif void dump_reactivate_cpus(void) { -#ifdef SMP globaldata_t gd; int cpu, seq; -#endif dump_stop_usertds = 1; need_user_resched(); -#ifdef SMP for (cpu = 0; cpu < ncpus; cpu++) { gd = globaldata_find(cpu); seq = lwkt_send_ipiq(gd, need_user_resched_remote, NULL); @@ -957,5 +940,4 @@ dump_reactivate_cpus(void) } restart_cpus(stopped_cpus); -#endif } diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c index ae88060f19..d64abab264 100644 --- a/sys/kern/kern_sig.c +++ b/sys/kern/kern_sig.c @@ -80,9 +80,7 @@ static int dokillpg(int sig, int pgid, int all); static int sig_ffs(sigset_t *set); static int sigprop(int sig); static void lwp_signotify(struct lwp *lp); -#ifdef SMP static void lwp_signotify_remote(void *arg); -#endif static int kern_sigtimedwait(sigset_t set, siginfo_t *info, struct timespec *timeout); @@ -1417,7 +1415,6 @@ lwp_signotify(struct lwp *lp) /* * lwp is sitting in tsleep() with PCATCH set */ -#ifdef SMP if (lp->lwp_thread->td_gd == mycpu) { setrunnable(lp); } else { @@ -1431,14 +1428,10 @@ lwp_signotify(struct lwp *lp) lwkt_send_ipiq(lp->lwp_thread->td_gd, lwp_signotify_remote, lp); } -#else - setrunnable(lp); -#endif } else if (lp->lwp_thread->td_flags & TDF_SINTR) { /* * lwp is sitting in lwkt_sleep() with PCATCH set. */ -#ifdef SMP if (lp->lwp_thread->td_gd == mycpu) { setrunnable(lp); } else { @@ -1452,9 +1445,6 @@ lwp_signotify(struct lwp *lp) lwkt_send_ipiq(lp->lwp_thread->td_gd, lwp_signotify_remote, lp); } -#else - setrunnable(lp); -#endif } else { /* * Otherwise the lwp is either in some uninterruptable state @@ -1465,8 +1455,6 @@ lwp_signotify(struct lwp *lp) crit_exit(); } -#ifdef SMP - /* * This function is called via an IPI so we cannot call setrunnable() here * (because while we hold the lp we don't own its token, and can't get it @@ -1497,8 +1485,6 @@ lwp_signotify_remote(void *arg) } } -#endif - /* * Caller must hold p->p_token */ diff --git a/sys/kern/kern_slaballoc.c b/sys/kern/kern_slaballoc.c index 237365e731..fef0f518a8 100644 --- a/sys/kern/kern_slaballoc.c +++ b/sys/kern/kern_slaballoc.c @@ -137,11 +137,9 @@ KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARGS); KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARGS); KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARGS); KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARGS); -#ifdef SMP KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARGS); KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARGS); KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARGS); -#endif KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin"); KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end"); @@ -336,10 +334,8 @@ malloc_uninit(void *data) if (type->ks_limit == 0) panic("malloc_uninit on uninitialized type"); -#ifdef SMP /* Make sure that all pending kfree()s are finished. */ lwkt_synchronize_ipiqs("muninit"); -#endif #ifdef INVARIANTS /* @@ -543,9 +539,7 @@ kmalloc(unsigned long size, struct malloc_type *type, int flags) { SLZone *z; SLChunk *chunk; -#ifdef SMP SLChunk *bchunk; -#endif SLGlobalData *slgd; struct globaldata *gd; unsigned long align; @@ -701,7 +695,6 @@ kmalloc(unsigned long size, struct malloc_type *type, int flags) if (--z->z_NFree <= 0) { KKASSERT(z->z_NFree == 0); -#ifdef SMP /* * WARNING! This code competes with other cpus. It is ok * for us to not drain RChunks here but we might as well, and @@ -728,7 +721,6 @@ kmalloc(unsigned long size, struct malloc_type *type, int flags) break; } } -#endif /* * Remove from the zone list if no free chunks remain. * Clear RSignal @@ -1015,7 +1007,6 @@ kstrdup(const char *str, struct malloc_type *type) return(nstr); } -#ifdef SMP /* * Notify our cpu that a remote cpu has freed some chunks in a zone that * we own. RCount will be bumped so the memory should be good, but validate @@ -1108,8 +1099,6 @@ kfree_remote(void *ptr) logmemory(free_rem_end, z, bchunk, 0L, 0); } -#endif - /* * free (SLAB ALLOCATOR) * @@ -1128,10 +1117,8 @@ kfree(void *ptr, struct malloc_type *type) struct globaldata *gd; int *kup; unsigned long size; -#ifdef SMP SLChunk *bchunk; int rsignal; -#endif logmemory_quick(free_beg); gd = mycpu; @@ -1223,7 +1210,6 @@ kfree(void *ptr, struct malloc_type *type) * (no critical section needed) */ if (z->z_CpuGd != gd) { -#ifdef SMP /* * Making these adjustments now allow us to avoid passing (type) * to the remote cpu. Note that ks_inuse/ks_memuse is being @@ -1279,9 +1265,6 @@ kfree(void *ptr, struct malloc_type *type) atomic_subtract_int(&z->z_RCount, 1); /* z can get ripped out from under us from this point on */ } -#else - panic("Corrupt SLZone"); -#endif logmemory_quick(free_end); return; } diff --git a/sys/kern/kern_spinlock.c b/sys/kern/kern_spinlock.c index 0cea05dda3..b0b0c21fee 100644 --- a/sys/kern/kern_spinlock.c +++ b/sys/kern/kern_spinlock.c @@ -68,8 +68,6 @@ struct spinlock pmap_spin = SPINLOCK_INITIALIZER(pmap_spin); -#ifdef SMP - struct indefinite_info { sysclock_t base; int secs; @@ -437,4 +435,3 @@ SYSCTL_PROC(_debug, KERN_PROC_ALL, spin_lock_test, CTLFLAG_RW|CTLTYPE_INT, 0, 0, sysctl_spin_lock_test, "I", "Test spinlock wait code"); #endif /* INVARIANTS */ -#endif /* SMP */ diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index a21cc40db0..53f6c44217 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -37,7 +37,6 @@ * * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $ - * $DragonFly: src/sys/kern/kern_synch.c,v 1.91 2008/09/09 04:06:13 dillon Exp $ */ #include "opt_ktrace.h" @@ -879,9 +878,7 @@ _wakeup(void *ident, int domain) struct thread *td; struct thread *ntd; globaldata_t gd; -#ifdef SMP cpumask_t mask; -#endif int id; crit_enter(); @@ -907,7 +904,6 @@ restart: } } -#ifdef SMP /* * We finished checking the current cpu but there still may be * more work to do. Either wakeup_one was requested and no matching @@ -930,7 +926,6 @@ restart: lwkt_send_ipiq2_mask(mask, _wakeup, ident, domain | PWAKEUP_MYCPU); } -#endif done: logtsleep1(wakeup_end); crit_exit(); @@ -996,7 +991,6 @@ wakeup_mycpu_one(const volatile void *ident) void wakeup_oncpu(globaldata_t gd, const volatile void *ident) { -#ifdef SMP globaldata_t mygd = mycpu; if (gd == mycpu) { _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mygd->gd_cpuid) | @@ -1006,9 +1000,6 @@ wakeup_oncpu(globaldata_t gd, const volatile void *ident) PWAKEUP_ENCODE(0, mygd->gd_cpuid) | PWAKEUP_MYCPU); } -#else - _wakeup(__DEALL(ident), PWAKEUP_MYCPU); -#endif } /* @@ -1018,7 +1009,6 @@ wakeup_oncpu(globaldata_t gd, const volatile void *ident) void wakeup_oncpu_one(globaldata_t gd, const volatile void *ident) { -#ifdef SMP globaldata_t mygd = mycpu; if (gd == mygd) { _wakeup(__DEALL(ident), PWAKEUP_ENCODE(0, mygd->gd_cpuid) | @@ -1028,9 +1018,6 @@ wakeup_oncpu_one(globaldata_t gd, const volatile void *ident) PWAKEUP_ENCODE(0, mygd->gd_cpuid) | PWAKEUP_MYCPU | PWAKEUP_ONE); } -#else - _wakeup(__DEALL(ident), PWAKEUP_MYCPU | PWAKEUP_ONE); -#endif } /* diff --git a/sys/kern/kern_systimer.c b/sys/kern/kern_systimer.c index 2492b3f3d1..b0632917ff 100644 --- a/sys/kern/kern_systimer.c +++ b/sys/kern/kern_systimer.c @@ -30,8 +30,6 @@ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $DragonFly: src/sys/kern/kern_systimer.c,v 1.12 2007/10/16 11:12:59 sephe Exp $ */ /* @@ -179,13 +177,9 @@ systimer_add(systimer_t info) info->flags = (info->flags | SYSTF_ONQUEUE) & ~SYSTF_IPIRUNNING; info->queue = &gd->gd_systimerq; } else { -#ifdef SMP KKASSERT((info->flags & SYSTF_IPIRUNNING) == 0); info->flags |= SYSTF_IPIRUNNING; lwkt_send_ipiq(info->gd, (ipifunc1_t)systimer_add, info); -#else - panic("systimer_add: bad gd in info %p", info); -#endif } crit_exit(); } diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c index 68d2f0534a..f899406421 100644 --- a/sys/kern/kern_timeout.c +++ b/sys/kern/kern_timeout.c @@ -245,9 +245,7 @@ softclock_handler(void *arg) struct callout_tailq *bucket; void (*c_func)(void *); void *c_arg; -#ifdef SMP int mpsafe = 1; -#endif /* * Run the callout thread at the same priority as other kernel @@ -266,7 +264,6 @@ loop: sc->next = TAILQ_NEXT(c, c_links.tqe); continue; } -#ifdef SMP if (c->c_flags & CALLOUT_MPSAFE) { if (mpsafe == 0) { mpsafe = 1; @@ -287,7 +284,6 @@ loop: continue; } } -#endif sc->next = TAILQ_NEXT(c, c_links.tqe); TAILQ_REMOVE(bucket, c, c_links.tqe); @@ -371,17 +367,13 @@ callout_reset(struct callout *c, int to_ticks, void (*ftn)(void *), c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); c->c_func = ftn; c->c_time = sc->curticks + to_ticks; -#ifdef SMP c->c_gd = gd; -#endif TAILQ_INSERT_TAIL(&sc->callwheel[c->c_time & callwheelmask], c, c_links.tqe); crit_exit_gd(gd); } -#ifdef SMP - struct callout_remote_arg { struct callout *c; void (*ftn)(void *); @@ -397,17 +389,12 @@ callout_reset_ipi(void *arg) callout_reset(rmt->c, rmt->to_ticks, rmt->ftn, rmt->arg); } -#endif - void callout_reset_bycpu(struct callout *c, int to_ticks, void (*ftn)(void *), void *arg, int cpuid) { KASSERT(cpuid >= 0 && cpuid < ncpus, ("invalid cpuid %d", cpuid)); -#ifndef SMP - callout_reset(c, to_ticks, ftn, arg); -#else if (cpuid == mycpuid) { callout_reset(c, to_ticks, ftn, arg); } else { @@ -425,7 +412,6 @@ callout_reset_bycpu(struct callout *c, int to_ticks, void (*ftn)(void *), seq = lwkt_send_ipiq(target_gd, callout_reset_ipi, &rmt); lwkt_wait_ipiq(target_gd, seq); } -#endif } /* @@ -450,9 +436,7 @@ int callout_stop(struct callout *c) { globaldata_t gd = mycpu; -#ifdef SMP globaldata_t tgd; -#endif softclock_pcpu_t sc; #ifdef INVARIANTS @@ -482,7 +466,6 @@ callout_stop(struct callout *c) * cpu. */ if ((c->c_flags & CALLOUT_PENDING) == 0) { -#ifdef SMP if ((c->c_flags & CALLOUT_ACTIVE) == 0) { crit_exit_gd(gd); return (0); @@ -492,14 +475,7 @@ callout_stop(struct callout *c) crit_exit_gd(gd); return (0); } - /* fall-through to the cpu-localization code. */ -#else - c->c_flags &= ~CALLOUT_ACTIVE; - crit_exit_gd(gd); - return (0); -#endif } -#ifdef SMP if ((tgd = c->c_gd) != gd) { /* * If the callout is owned by a different CPU we have to @@ -510,9 +486,7 @@ callout_stop(struct callout *c) cpu_ccfence(); /* don't let tgd alias c_gd */ seq = lwkt_send_ipiq(tgd, (void *)callout_stop, c); lwkt_wait_ipiq(tgd, seq); - } else -#endif - { + } else { /* * If the callout is owned by the same CPU we can * process it directly, but if we are racing our helper @@ -544,7 +518,6 @@ callout_stop_sync(struct callout *c) while (c->c_flags & CALLOUT_DID_INIT) { callout_stop(c); -#ifdef SMP if (c->c_gd) { sc = &softclock_pcpu_ary[c->c_gd->gd_cpuid]; if (sc->running == c) { @@ -552,13 +525,6 @@ callout_stop_sync(struct callout *c) tsleep(&sc->running, 0, "crace", 1); } } -#else - sc = &softclock_pcpu_ary[0]; - if (sc->running == c) { - while (sc->running == c) - tsleep(&sc->running, 0, "crace", 1); - } -#endif if ((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) == 0) break; kprintf("Warning: %s: callout race\n", curthread->td_comm); @@ -585,11 +551,7 @@ callout_terminate(struct callout *c) if (c->c_flags & CALLOUT_DID_INIT) { callout_stop(c); -#ifdef SMP sc = &softclock_pcpu_ary[c->c_gd->gd_cpuid]; -#else - sc = &softclock_pcpu_ary[0]; -#endif if (sc->running == c) { while (sc->running == c) tsleep(&sc->running, 0, "crace", 1); diff --git a/sys/kern/kern_upcall.c b/sys/kern/kern_upcall.c index 623a124d15..4bc8eb9ab8 100644 --- a/sys/kern/kern_upcall.c +++ b/sys/kern/kern_upcall.c @@ -59,8 +59,6 @@ MALLOC_DEFINE(M_UPCALL, "upcalls", "upcall registration structures"); -#ifdef SMP - static void sigupcall_remote(void *arg) { @@ -69,8 +67,6 @@ sigupcall_remote(void *arg) sigupcall(); } -#endif - /* * upc_register: * @@ -157,14 +153,10 @@ sys_upc_control(struct upc_control_args *uap) targlp->lwp_proc->p_flags |= P_UPCALLPEND; /* XXX lwp flags */ if (targlp->lwp_proc->p_flags & P_UPCALLWAIT) wakeup(&targlp->lwp_upcall); -#ifdef SMP if (targlp->lwp_thread->td_gd != mycpu) lwkt_send_ipiq(targlp->lwp_thread->td_gd, sigupcall_remote, targlp); else sigupcall(); -#else - sigupcall(); -#endif break; } } diff --git a/sys/kern/lwkt_ipiq.c b/sys/kern/lwkt_ipiq.c index 9972d46443..11b0374dca 100644 --- a/sys/kern/lwkt_ipiq.c +++ b/sys/kern/lwkt_ipiq.c @@ -66,7 +66,6 @@ #include #include -#ifdef SMP static __int64_t ipiq_count; /* total calls to lwkt_send_ipiq*() */ static __int64_t ipiq_fifofull; /* number of fifo full conditions detected */ static __int64_t ipiq_avoided; /* interlock with target avoids cpu ipi */ @@ -77,9 +76,7 @@ static int ipiq_debug; /* set to 1 for debug */ static int panic_ipiq_cpu = -1; static int panic_ipiq_count = 100; #endif -#endif -#ifdef SMP SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_count, CTLFLAG_RW, &ipiq_count, 0, "Number of IPI's sent"); SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_fifofull, CTLFLAG_RW, &ipiq_fifofull, 0, @@ -119,10 +116,6 @@ KTR_INFO(KTR_IPIQ, ipiq, send_end, 8, IPIQ_STRING, IPIQ_ARGS); #define logipiq2(name, arg) \ KTR_LOG(ipiq_ ## name, arg) -#endif /* SMP */ - -#ifdef SMP - static int lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip, struct intrframe *frame); static void lwkt_cpusync_remote1(lwkt_cpusync_t cs); @@ -726,8 +719,6 @@ lwkt_synchronize_ipiqs(const char *wmesg) } } -#endif - /* * CPU Synchronization Support * @@ -752,7 +743,6 @@ lwkt_cpusync_simple(cpumask_t mask, cpusync_func_t func, void *arg) void lwkt_cpusync_interlock(lwkt_cpusync_t cs) { -#ifdef SMP #if 0 const char *smsg = "SMPSYNL"; #endif @@ -787,9 +777,6 @@ lwkt_cpusync_interlock(lwkt_cpusync_t cs) #endif DEBUG_POP_INFO(); } -#else - cs->cs_mack = 0; -#endif } /* @@ -802,7 +789,6 @@ void lwkt_cpusync_deinterlock(lwkt_cpusync_t cs) { globaldata_t gd = mycpu; -#ifdef SMP #if 0 const char *smsg = "SMPSYNU"; #endif @@ -848,14 +834,8 @@ lwkt_cpusync_deinterlock(lwkt_cpusync_t cs) logipiq2(sync_end, (long)mask); } crit_exit_id("cpusync"); -#else - if (cs->cs_func && (cs->cs_mask & gd->gd_cpumask)) - cs->cs_func(cs->cs_data); -#endif } -#ifdef SMP - /* * helper IPI remote messaging function. * @@ -908,5 +888,3 @@ lwkt_cpusync_remote2(lwkt_cpusync_t cs) } } } - -#endif diff --git a/sys/kern/lwkt_msgport.c b/sys/kern/lwkt_msgport.c index 36c17b7e65..33a4eda89c 100644 --- a/sys/kern/lwkt_msgport.c +++ b/sys/kern/lwkt_msgport.c @@ -65,9 +65,7 @@ #include #include -#ifdef SMP #include -#endif #include MALLOC_DEFINE(M_LWKTMSG, "lwkt message", "lwkt message"); @@ -445,8 +443,6 @@ _lwkt_enqueue_reply(lwkt_port_t port, lwkt_msg_t msg) * message were headed to a different cpu. */ -#ifdef SMP - /* * This function completes reply processing for the default case in the * context of the originating cpu. @@ -485,8 +481,6 @@ lwkt_thread_replyport_remote(lwkt_msg_t msg) _lwkt_schedule_msg(port->mpu_td, flags); } -#endif - /* * lwkt_thread_replyport() - Backend to lwkt_replymsg() * @@ -512,15 +506,12 @@ lwkt_thread_replyport(lwkt_port_t port, lwkt_msg_t msg) * Assume the target thread is non-preemptive, so no critical * section is required. */ -#ifdef SMP if (port->mpu_td->td_gd == mycpu) { -#endif flags = msg->ms_flags; cpu_sfence(); msg->ms_flags |= MSGF_DONE | MSGF_REPLY; if (port->mp_flags & MSGPORTF_WAITING) _lwkt_schedule_msg(port->mpu_td, flags); -#ifdef SMP } else { #ifdef INVARIANTS msg->ms_flags |= MSGF_INTRANSIT; @@ -529,7 +520,6 @@ lwkt_thread_replyport(lwkt_port_t port, lwkt_msg_t msg) lwkt_send_ipiq(port->mpu_td->td_gd, (ipifunc1_t)lwkt_thread_replyport_remote, msg); } -#endif } else { /* * If an asynchronous completion has been requested the message @@ -537,15 +527,12 @@ lwkt_thread_replyport(lwkt_port_t port, lwkt_msg_t msg) * * A critical section is required to interlock the port queue. */ -#ifdef SMP if (port->mpu_td->td_gd == mycpu) { -#endif crit_enter(); _lwkt_enqueue_reply(port, msg); if (port->mp_flags & MSGPORTF_WAITING) _lwkt_schedule_msg(port->mpu_td, msg->ms_flags); crit_exit(); -#ifdef SMP } else { #ifdef INVARIANTS msg->ms_flags |= MSGF_INTRANSIT; @@ -554,7 +541,6 @@ lwkt_thread_replyport(lwkt_port_t port, lwkt_msg_t msg) lwkt_send_ipiq(port->mpu_td->td_gd, (ipifunc1_t)lwkt_thread_replyport_remote, msg); } -#endif } } @@ -585,9 +571,6 @@ lwkt_thread_dropmsg(lwkt_port_t port, lwkt_msg_t msg) * * The message must already have cleared MSGF_DONE and MSGF_REPLY */ - -#ifdef SMP - static void lwkt_thread_putport_remote(lwkt_msg_t msg) @@ -615,8 +598,6 @@ lwkt_thread_putport_remote(lwkt_msg_t msg) _lwkt_schedule_msg(port->mpu_td, msg->ms_flags); } -#endif - static int lwkt_thread_putport(lwkt_port_t port, lwkt_msg_t msg) @@ -624,15 +605,12 @@ lwkt_thread_putport(lwkt_port_t port, lwkt_msg_t msg) KKASSERT((msg->ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0); msg->ms_target_port = port; -#ifdef SMP if (port->mpu_td->td_gd == mycpu) { -#endif crit_enter(); _lwkt_pushmsg(port, msg); if (port->mp_flags & MSGPORTF_WAITING) _lwkt_schedule_msg(port->mpu_td, msg->ms_flags); crit_exit(); -#ifdef SMP } else { #ifdef INVARIANTS msg->ms_flags |= MSGF_INTRANSIT; @@ -640,7 +618,6 @@ lwkt_thread_putport(lwkt_port_t port, lwkt_msg_t msg) lwkt_send_ipiq(port->mpu_td->td_gd, (ipifunc1_t)lwkt_thread_putport_remote, msg); } -#endif return (EASYNC); } diff --git a/sys/kern/lwkt_serialize.c b/sys/kern/lwkt_serialize.c index 4c7f555242..3ee90df5d9 100644 --- a/sys/kern/lwkt_serialize.c +++ b/sys/kern/lwkt_serialize.c @@ -81,22 +81,16 @@ KTR_INFO(KTR_SERIALIZER, slz, wakeup_end, 5, SLZ_KTR_STRING, SLZ_KTR_ARGS); KTR_INFO(KTR_SERIALIZER, slz, try, 6, SLZ_KTR_STRING, SLZ_KTR_ARGS); KTR_INFO(KTR_SERIALIZER, slz, tryfail, 7, SLZ_KTR_STRING, SLZ_KTR_ARGS); KTR_INFO(KTR_SERIALIZER, slz, tryok, 8, SLZ_KTR_STRING, SLZ_KTR_ARGS); -#ifdef SMP KTR_INFO(KTR_SERIALIZER, slz, spinbo, 9, "slz=%p bo1=%d bo=%d", lwkt_serialize_t slz, int backoff1, int backoff); -#endif KTR_INFO(KTR_SERIALIZER, slz, enter_end, 10, SLZ_KTR_STRING, SLZ_KTR_ARGS); KTR_INFO(KTR_SERIALIZER, slz, exit_beg, 11, SLZ_KTR_STRING, SLZ_KTR_ARGS); #define logslz(name, slz) KTR_LOG(slz_ ## name, slz) -#ifdef SMP #define logslz_spinbo(slz, bo1, bo) KTR_LOG(slz_spinbo, slz, bo1, bo) -#endif static void lwkt_serialize_sleep(void *info); static void lwkt_serialize_wakeup(void *info); - -#ifdef SMP static void lwkt_serialize_adaptive_sleep(void *bo); static int slz_backoff_limit = 128; @@ -112,7 +106,6 @@ TUNABLE_INT("debug.serialize_boround", &slz_backoff_round); SYSCTL_INT(_debug, OID_AUTO, serialize_boround, CTLFLAG_RW, &slz_backoff_round, 0, "Backoff rounding"); -#endif /* SMP */ void lwkt_serialize_init(lwkt_serialize_t s) @@ -123,7 +116,6 @@ lwkt_serialize_init(lwkt_serialize_t s) #endif } -#ifdef SMP void lwkt_serialize_adaptive_enter(lwkt_serialize_t s) { @@ -142,7 +134,6 @@ lwkt_serialize_adaptive_enter(lwkt_serialize_t s) s->last_td = curthread; #endif } -#endif /* SMP */ void lwkt_serialize_enter(lwkt_serialize_t s) @@ -293,8 +284,6 @@ lwkt_serialize_sleep(void *info) } } -#ifdef SMP - static void lwkt_serialize_adaptive_sleep(void *arg) { @@ -341,8 +330,6 @@ lwkt_serialize_adaptive_sleep(void *arg) } } -#endif /* SMP */ - static void lwkt_serialize_wakeup(void *info) { @@ -351,7 +338,6 @@ lwkt_serialize_wakeup(void *info) logslz(wakeup_end, info); } -#ifdef SMP static void lwkt_serialize_sysinit(void *dummy __unused) { @@ -360,4 +346,3 @@ lwkt_serialize_sysinit(void *dummy __unused) } SYSINIT(lwkt_serialize, SI_SUB_PRE_DRIVERS, SI_ORDER_SECOND, lwkt_serialize_sysinit, NULL); -#endif diff --git a/sys/kern/lwkt_thread.c b/sys/kern/lwkt_thread.c index 5fc20617fa..f5f8f4ff1d 100644 --- a/sys/kern/lwkt_thread.c +++ b/sys/kern/lwkt_thread.c @@ -92,10 +92,8 @@ static __int64_t preempt_weird = 0; static int lwkt_use_spin_port; static struct objcache *thread_cache; -#ifdef SMP static void lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame); static void lwkt_setcpu_remote(void *arg); -#endif extern void cpu_heavy_restore(void); extern void cpu_lwkt_restore(void); @@ -406,8 +404,6 @@ lwkt_alloc_thread(struct thread *td, int stksize, int cpu, int flags) * NOTE! we have to be careful in regards to creating threads for other cpus * if SMP has not yet been activated. */ -#ifdef SMP - static void lwkt_init_thread_remote(void *arg) { @@ -419,8 +415,6 @@ lwkt_init_thread_remote(void *arg) TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq); } -#endif - /* * lwkt core thread structural initialization. * @@ -447,7 +441,6 @@ lwkt_init_thread(thread_t td, void *stack, int stksize, int flags, else lwkt_initport_thread(&td->td_msgport, td); pmap_init_thread(td); -#ifdef SMP /* * Normally initializing a thread for a remote cpu requires sending an * IPI. However, the idlethread is setup before the other cpus are @@ -461,12 +454,6 @@ lwkt_init_thread(thread_t td, void *stack, int stksize, int flags, } else { lwkt_send_ipiq(gd, lwkt_init_thread_remote, td); } -#else - crit_enter_gd(mygd); - TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); - crit_exit_gd(mygd); -#endif - dsched_new_thread(td); } @@ -630,7 +617,6 @@ lwkt_switch(void) gd->gd_spinlocks)); -#ifdef SMP #ifdef INVARIANTS if (td->td_cscount) { kprintf("Diagnostic: attempt to switch while mastering cpusync: %p\n", @@ -638,7 +624,6 @@ lwkt_switch(void) if (panic_on_cscount) panic("switching while mastering cpusync"); } -#endif #endif /* @@ -688,10 +673,8 @@ lwkt_switch(void) * Runq is empty, switch to idle to allow it to halt. */ ntd = &gd->gd_idlethread; -#ifdef SMP if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) ASSERT_NO_TOKENS_HELD(ntd); -#endif cpu_time.cp_msg[0] = 0; cpu_time.cp_stallpc = 0; goto haveidle; @@ -763,11 +746,9 @@ skip: */ cpu_pause(); ntd = &gd->gd_idlethread; -#ifdef SMP if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) ASSERT_NO_TOKENS_HELD(ntd); /* contention case, do not clear contention mask */ -#endif /* * We are going to have to retry but if the current thread is not @@ -797,7 +778,6 @@ skip: if (spinning < 0x7FFFFFFF) ++spinning; -#ifdef SMP /* * lwkt_getalltokens() failed in sorted token mode, we can use * monitor/mwait in this case. @@ -810,7 +790,6 @@ skip: (gd->gd_reqflags | RQF_SPINNING) & ~RQF_IDLECHECK_WK_MASK); } -#endif /* * We already checked that td is still scheduled so this should be @@ -909,7 +888,6 @@ haveidle: void lwkt_switch_return(thread_t otd) { -#ifdef SMP globaldata_t rgd; /* @@ -934,9 +912,6 @@ lwkt_switch_return(thread_t otd) } else { otd->td_flags &= ~TDF_RUNNING; } -#else - otd->td_flags &= ~TDF_RUNNING; -#endif /* * Final exit validations (see lwp_wait()). Note that otd becomes @@ -1020,7 +995,6 @@ lwkt_preempt(thread_t ntd, int critcount) ++preempt_miss; return; } -#ifdef SMP if (td->td_cscount) { ++preempt_miss; return; @@ -1029,7 +1003,6 @@ lwkt_preempt(thread_t ntd, int critcount) ++preempt_miss; return; } -#endif /* * We don't have to check spinlocks here as they will also bump * td_critcount. @@ -1307,17 +1280,12 @@ _lwkt_schedule(thread_t td) * critical section). If we do not own the thread there might * be a race but the target cpu will deal with it. */ -#ifdef SMP if (td->td_gd == mygd) { _lwkt_enqueue(td); _lwkt_schedule_post(mygd, td, 1); } else { lwkt_send_ipiq3(td->td_gd, lwkt_schedule_remote, td, 0); } -#else - _lwkt_enqueue(td); - _lwkt_schedule_post(mygd, td, 1); -#endif } crit_exit_gd(mygd); } @@ -1334,8 +1302,6 @@ lwkt_schedule_noresched(thread_t td) /* XXX not impl */ _lwkt_schedule(td); } -#ifdef SMP - /* * When scheduled remotely if frame != NULL the IPIQ is being * run via doreti or an interrupt then preemption can be allowed. @@ -1401,9 +1367,7 @@ lwkt_acquire(thread_t td) crit_enter_gd(mygd); DEBUG_PUSH_INFO("lwkt_acquire"); while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) { -#ifdef SMP lwkt_process_ipiq(); -#endif cpu_lfence(); if (--retry == 0) { kprintf("lwkt_acquire: stuck: td %p td->td_flags %08x\n", @@ -1425,8 +1389,6 @@ lwkt_acquire(thread_t td) } } -#endif - /* * Generic deschedule. Descheduling threads other then your own should be * done only in carefully controlled circumstances. Descheduling is @@ -1438,7 +1400,6 @@ void lwkt_deschedule(thread_t td) { crit_enter(); -#ifdef SMP if (td == curthread) { _lwkt_dequeue(td); } else { @@ -1448,9 +1409,6 @@ lwkt_deschedule(thread_t td) lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_deschedule, td); } } -#else - _lwkt_dequeue(td); -#endif crit_exit(); } @@ -1572,7 +1530,6 @@ lwkt_schedulerclock(thread_t td) void lwkt_setcpu_self(globaldata_t rgd) { -#ifdef SMP thread_t td = curthread; if (td->td_gd != rgd) { @@ -1602,21 +1559,17 @@ lwkt_setcpu_self(globaldata_t rgd) TAILQ_INSERT_TAIL(&rgd->gd_tdallq, td, td_allq); crit_exit_quick(td); } -#endif } void lwkt_migratecpu(int cpuid) { -#ifdef SMP globaldata_t rgd; rgd = globaldata_find(cpuid); lwkt_setcpu_self(rgd); -#endif } -#ifdef SMP /* * Remote IPI for cpu migration (called while in a critical section so we * do not have to enter another one). @@ -1642,7 +1595,6 @@ lwkt_setcpu_remote(void *arg) (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); _lwkt_enqueue(td); } -#endif struct lwp * lwkt_preempted_proc(void) @@ -1797,8 +1749,6 @@ crit_panic(void) /* NOT REACHED */ } -#ifdef SMP - /* * Called from debugger/panic on cpus which have been stopped. We must still * process the IPIQ while stopped, even if we were stopped while in a critical @@ -1822,5 +1772,3 @@ lwkt_smp_stopped(void) } crit_exit_gd(gd); } - -#endif diff --git a/sys/kern/lwkt_token.c b/sys/kern/lwkt_token.c index 95a40ac676..12bc374815 100644 --- a/sys/kern/lwkt_token.c +++ b/sys/kern/lwkt_token.c @@ -175,7 +175,6 @@ SYSCTL_LONG(_debug, OID_AUTO, tokens_add_latency, CTLFLAG_RW, static int _lwkt_getalltokens_sorted(thread_t td); -#ifdef SMP /* * Acquire the initial mplock * @@ -188,7 +187,6 @@ cpu_get_initial_mplock(void) if (lwkt_trytoken(&mp_token) == FALSE) panic("cpu_get_initial_mplock"); } -#endif /* * Return a pool token given an address. Use a prime number to reduce diff --git a/sys/kern/subr_cpu_topology.c b/sys/kern/subr_cpu_topology.c index 186f1c8872..5294b3e469 100644 --- a/sys/kern/subr_cpu_topology.c +++ b/sys/kern/subr_cpu_topology.c @@ -39,8 +39,6 @@ #include -#ifdef SMP - #ifndef NAPICID #define NAPICID 256 #endif @@ -569,4 +567,3 @@ init_cpu_topology(void) } SYSINIT(cpu_topology, SI_BOOT2_CPU_TOPOLOGY, SI_ORDER_FIRST, init_cpu_topology, NULL) -#endif diff --git a/sys/kern/sys_pipe.c b/sys/kern/sys_pipe.c index da5605448b..0be7ad7256 100644 --- a/sys/kern/sys_pipe.c +++ b/sys/kern/sys_pipe.c @@ -17,7 +17,6 @@ * are met. * * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $ - * $DragonFly: src/sys/kern/sys_pipe.c,v 1.50 2008/09/09 04:06:13 dillon Exp $ */ /* @@ -136,11 +135,9 @@ SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache, CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu"); SYSCTL_INT(_kern_pipe, OID_AUTO, maxbig, CTLFLAG_RW, &pipe_maxbig, 0, "max number of big pipes"); -#ifdef SMP static int pipe_delay = 5000; /* 5uS default */ SYSCTL_INT(_kern_pipe, OID_AUTO, delay, CTLFLAG_RW, &pipe_delay, 0, "SMP delay optimization in ns"); -#endif #if !defined(NO_PIPE_SYSCTL_STATS) SYSCTL_INT(_kern_pipe, OID_AUTO, bcache_alloc, CTLFLAG_RW, &pipe_bcache_alloc, 0, "pipe buffer from pcpu cache"); @@ -519,7 +516,7 @@ pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) if (rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex) continue; -#if defined(SMP) && defined(_RDTSC_SUPPORTED_) +#ifdef _RDTSC_SUPPORTED_ if (pipe_delay) { int64_t tsc_target; int good = 0; @@ -798,7 +795,6 @@ pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) if (segsize > space) segsize = space; -#ifdef SMP /* * If this is the first loop and the reader is * blocked, do a preemptive wakeup of the reader. @@ -813,7 +809,6 @@ pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) */ if ((wpipe->pipe_state & PIPE_WANTR)) wakeup(wpipe); -#endif /* * Transfer segment, which may include a wrap-around. diff --git a/sys/kern/usched_bsd4.c b/sys/kern/usched_bsd4.c index 1dc67e663f..1ea8afcf79 100644 --- a/sys/kern/usched_bsd4.c +++ b/sys/kern/usched_bsd4.c @@ -101,13 +101,10 @@ static void bsd4_forking(struct lwp *plp, struct lwp *lp); static void bsd4_exiting(struct lwp *lp, struct proc *); static void bsd4_uload_update(struct lwp *lp); static void bsd4_yield(struct lwp *lp); - -#ifdef SMP static void bsd4_need_user_resched_remote(void *dummy); static int bsd4_batchy_looser_pri_test(struct lwp* lp); static struct lwp *bsd4_chooseproc_locked_cache_coherent(struct lwp *chklp); static void bsd4_kick_helper(struct lwp *lp); -#endif static struct lwp *bsd4_chooseproc_locked(struct lwp *chklp); static void bsd4_remrunqueue_locked(struct lwp *lp); static void bsd4_setrunqueue_locked(struct lwp *lp); @@ -136,9 +133,7 @@ struct usched_bsd4_pcpu { short upri; struct lwp *uschedcp; struct lwp *old_uschedcp; -#ifdef SMP cpu_node_t *cpunode; -#endif }; typedef struct usched_bsd4_pcpu *bsd4_pcpu_t; @@ -164,9 +159,7 @@ static u_int32_t bsd4_idqueuebits; static cpumask_t bsd4_curprocmask = -1; /* currently running a user process */ static cpumask_t bsd4_rdyprocmask; /* ready to accept a user process */ static int bsd4_runqcount; -#ifdef SMP static volatile int bsd4_scancpu; -#endif static struct spinlock bsd4_spin; static struct usched_bsd4_pcpu bsd4_pcpu[MAXCPU]; static struct sysctl_ctx_list usched_bsd4_sysctl_ctx; @@ -189,14 +182,12 @@ SYSCTL_INT(_debug, OID_AUTO, bsd4_pid_debug, CTLFLAG_RW, "Print KTR debug information for this pid"); /* Tunning usched_bsd4 - configurable through kern.usched_bsd4.* */ -#ifdef SMP static int usched_bsd4_smt = 0; static int usched_bsd4_cache_coherent = 0; static int usched_bsd4_upri_affinity = 16; /* 32 queues - half-way */ static int usched_bsd4_queue_checks = 5; static int usched_bsd4_stick_to_level = 0; static long usched_bsd4_kicks; -#endif static int usched_bsd4_rrinterval = (ESTCPUFREQ + 9) / 10; static int usched_bsd4_decay = 8; static int usched_bsd4_batch_time = 10; @@ -236,7 +227,6 @@ KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_select_curproc, 0, "cpuid %d, old_pid %d, old_cpuid %d, curr_cpuid %d)", pid_t pid, int cpuid, pid_t old_pid, int old_cpuid, int curr); -#ifdef SMP KTR_INFO(KTR_USCHED_BSD4, usched, batchy_test_false, 0, "USCHED_BSD4(batchy_looser_pri_test false: pid %d, " "cpuid %d, verify_mask %lu)", @@ -270,12 +260,10 @@ KTR_INFO(KTR_USCHED_BSD4, usched, bsd4_setrunqueue_found_best_cpuid, 0, "USCHED_BSD4(bsd4_setrunqueue found cpu: pid %d, cpuid %d, " "mask %lu, found_cpuid %d, curr_cpuid %d)", pid_t pid, int cpuid, cpumask_t mask, int found_cpuid, int curr); -#endif KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc, 0, "USCHED_BSD4(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)", pid_t pid, int old_cpuid, int curr); -#ifdef SMP KTR_INFO(KTR_USCHED_BSD4, usched, chooseproc_cc, 0, "USCHED_BSD4(chooseproc_cc: pid %d, old_cpuid %d, curr_cpuid %d)", pid_t pid, int old_cpuid, int curr); @@ -297,7 +285,6 @@ KTR_INFO(KTR_USCHED_BSD4, usched, sched_thread_process, 0, KTR_INFO(KTR_USCHED_BSD4, usched, sched_thread_no_process_found, 0, "USCHED_BSD4(sched_thread %d no process found; tmpmask %lu)", int id, cpumask_t tmpmask); -#endif /* * Initialize the run queues at boot time. @@ -533,11 +520,9 @@ bsd4_select_curproc(globaldata_t gd) crit_enter_gd(gd); spin_lock(&bsd4_spin); -#ifdef SMP if(usched_bsd4_cache_coherent) nlp = bsd4_chooseproc_locked_cache_coherent(dd->uschedcp); else -#endif nlp = bsd4_chooseproc_locked(dd->uschedcp); if (nlp) { @@ -555,9 +540,7 @@ bsd4_select_curproc(globaldata_t gd) dd->uschedcp = nlp; dd->rrcount = 0; /* reset round robin */ spin_unlock(&bsd4_spin); -#ifdef SMP lwkt_acquire(nlp->lwp_thread); -#endif lwkt_schedule(nlp->lwp_thread); } else { spin_unlock(&bsd4_spin); @@ -574,7 +557,6 @@ bsd4_select_curproc(globaldata_t gd) #endif crit_exit_gd(gd); } -#ifdef SMP /* * batchy_looser_pri_test() - determine if a process is batchy or not @@ -616,7 +598,6 @@ bsd4_batchy_looser_pri_test(struct lwp* lp) return 1; } -#endif /* * * BSD4_SETRUNQUEUE @@ -633,11 +614,9 @@ bsd4_setrunqueue(struct lwp *lp) { globaldata_t gd; bsd4_pcpu_t dd; -#ifdef SMP int cpuid; cpumask_t mask; cpumask_t tmpmask; -#endif /* * First validate the process state relative to the current cpu. @@ -664,28 +643,6 @@ bsd4_setrunqueue(struct lwp *lp) */ KKASSERT(dd->uschedcp != lp); -#ifndef SMP - /* - * If we are not SMP we do not have a scheduler helper to kick - * and must directly activate the process if none are scheduled. - * - * This is really only an issue when bootstrapping init since - * the caller in all other cases will be a user process, and - * even if released (dd->uschedcp == NULL), that process will - * kickstart the scheduler when it returns to user mode from - * the kernel. - */ - if (dd->uschedcp == NULL) { - atomic_set_cpumask(&bsd4_curprocmask, gd->gd_cpumask); - dd->uschedcp = lp; - dd->upri = lp->lwp_priority; - lwkt_schedule(lp->lwp_thread); - crit_exit(); - return; - } -#endif - -#ifdef SMP /* * XXX fixme. Could be part of a remrunqueue/setrunqueue * operation when the priority is recalculated, so TDF_MIGRATING @@ -693,7 +650,6 @@ bsd4_setrunqueue(struct lwp *lp) */ if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0) lwkt_giveaway(lp->lwp_thread); -#endif /* * We lose control of lp the moment we release the spinlock after @@ -705,7 +661,6 @@ bsd4_setrunqueue(struct lwp *lp) bsd4_setrunqueue_locked(lp); lp->lwp_rebal_ticks = sched_ticks; -#ifdef SMP /* * Kick the scheduler helper on one of the other cpu's * and request a reschedule if appropriate. @@ -906,15 +861,6 @@ found: else wakeup(&dd->helper_thread); } -#else - /* - * Request a reschedule if appropriate. - */ - spin_unlock(&bsd4_spin); - if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) { - need_user_resched(); - } -#endif crit_exit(); } @@ -1223,7 +1169,6 @@ bsd4_resetpriority(struct lwp *lp) if ((bsd4_rdyprocmask & CPUMASK(reschedcpu)) && (checkpri == 0 || (dd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK))) { -#ifdef SMP if (reschedcpu == mycpu->gd_cpuid) { spin_unlock(&bsd4_spin); need_user_resched(); @@ -1235,10 +1180,6 @@ bsd4_resetpriority(struct lwp *lp) bsd4_need_user_resched_remote, NULL); } -#else - spin_unlock(&bsd4_spin); - need_user_resched(); -#endif } else { spin_unlock(&bsd4_spin); } @@ -1349,9 +1290,7 @@ bsd4_chooseproc_locked(struct lwp *chklp) cpumask = mycpu->gd_cpumask; -#ifdef SMP again: -#endif if (rtqbits) { pri = bsfl(rtqbits); q = &bsd4_rtqueues[pri]; @@ -1373,7 +1312,6 @@ again: lp = TAILQ_FIRST(q); KASSERT(lp, ("chooseproc: no lwp on busy queue")); -#ifdef SMP while ((lp->lwp_cpumask & cpumask) == 0) { lp = TAILQ_NEXT(lp, lwp_procq); if (lp == NULL) { @@ -1381,7 +1319,6 @@ again: goto again; } } -#endif /* * If the passed lwp is reasonably close to the selected @@ -1395,7 +1332,6 @@ again: return(NULL); } -#ifdef SMP /* * If the chosen lwp does not reside on this cpu spend a few * cycles looking for a better candidate at the same priority level. @@ -1409,7 +1345,6 @@ again: lp = chklp; } } -#endif KTR_COND_LOG(usched_chooseproc, lp->lwp_proc->p_pid == usched_bsd4_pid_debug, @@ -1427,7 +1362,6 @@ again: return lp; } -#ifdef SMP /* * chooseproc() - with a cache coherence heuristic. Try to pull a process that * has its home on the current CPU> If the process doesn't have its home here @@ -1642,8 +1576,6 @@ bsd4_need_user_resched_remote(void *dummy) wakeup_mycpu(&dd->helper_thread); } -#endif - /* * bsd4_remrunqueue_locked() removes a given process from the run queue * that it is on, clearing the queue busy bit if it becomes empty. @@ -1748,8 +1680,6 @@ bsd4_setrunqueue_locked(struct lwp *lp) *which |= 1 << pri; } -#ifdef SMP - /* * For SMP systems a user scheduler helper thread is created for each * cpu and is used to allow one cpu to wakeup another for the purposes of @@ -2076,32 +2006,3 @@ sched_thread_cpu_init(void) } SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND, sched_thread_cpu_init, NULL) - -#else /* No SMP options - just add the configurable parameters to sysctl */ - -static void -sched_sysctl_tree_init(void) -{ - sysctl_ctx_init(&usched_bsd4_sysctl_ctx); - usched_bsd4_sysctl_tree = - SYSCTL_ADD_NODE(&usched_bsd4_sysctl_ctx, - SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO, - "usched_bsd4", CTLFLAG_RD, 0, ""); - - /* usched_bsd4 sysctl configurable parameters */ - SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx, - SYSCTL_CHILDREN(usched_bsd4_sysctl_tree), - OID_AUTO, "rrinterval", CTLFLAG_RW, - &usched_bsd4_rrinterval, 0, ""); - SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx, - SYSCTL_CHILDREN(usched_bsd4_sysctl_tree), - OID_AUTO, "decay", CTLFLAG_RW, - &usched_bsd4_decay, 0, "Extra decay when not running"); - SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx, - SYSCTL_CHILDREN(usched_bsd4_sysctl_tree), - OID_AUTO, "batch_time", CTLFLAG_RW, - &usched_bsd4_batch_time, 0, "Min batch counter value"); -} -SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND, - sched_sysctl_tree_init, NULL) -#endif diff --git a/sys/kern/usched_dfly.c b/sys/kern/usched_dfly.c index 6e5fd8c1a8..ed02f87353 100644 --- a/sys/kern/usched_dfly.c +++ b/sys/kern/usched_dfly.c @@ -101,11 +101,7 @@ TAILQ_HEAD(rq, lwp); struct usched_dfly_pcpu { struct spinlock spin; -#ifdef SMP struct thread helper_thread; -#else - struct thread helper_thread_UNUSED; /* field unused */ -#endif short unusde01; short upri; int uload; @@ -120,9 +116,7 @@ struct usched_dfly_pcpu { int runqcount; int cpuid; cpumask_t cpumask; -#ifdef SMP cpu_node_t *cpunode; -#endif }; typedef struct usched_dfly_pcpu *dfly_pcpu_t; @@ -140,17 +134,12 @@ static void dfly_forking(struct lwp *plp, struct lwp *lp); static void dfly_exiting(struct lwp *lp, struct proc *); static void dfly_uload_update(struct lwp *lp); static void dfly_yield(struct lwp *lp); -#ifdef SMP static void dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd); static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp); static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd); static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp); -#endif - -#ifdef SMP static void dfly_need_user_resched_remote(void *dummy); -#endif static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd, struct lwp *chklp, int worst); static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp); @@ -188,9 +177,7 @@ struct usched usched_dfly = { */ static cpumask_t dfly_curprocmask = -1; /* currently running a user process */ static cpumask_t dfly_rdyprocmask; /* ready to accept a user process */ -#ifdef SMP static volatile int dfly_scancpu; -#endif static volatile int dfly_ucount; /* total running on whole system */ static struct usched_dfly_pcpu dfly_pcpu[MAXCPU]; static struct sysctl_ctx_list usched_dfly_sysctl_ctx; @@ -256,7 +243,6 @@ SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW, * 0x40 choose current cpu for forked process * 0x80 choose random cpu for forked process (default) */ -#ifdef SMP static int usched_dfly_smt = 0; static int usched_dfly_cache_coherent = 0; static int usched_dfly_weight1 = 200; /* keep thread on current cpu */ @@ -264,7 +250,6 @@ static int usched_dfly_weight2 = 180; /* synchronous peer's current cpu */ static int usched_dfly_weight3 = 40; /* number of threads on queue */ static int usched_dfly_weight4 = 160; /* availability of idle cores */ static int usched_dfly_features = 0x8F; /* allow pulls */ -#endif static int usched_dfly_fast_resched = 0;/* delta priority / resched */ static int usched_dfly_swmask = ~PPQMASK; /* allow pulls */ static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10; @@ -302,9 +287,7 @@ dfly_acquire_curproc(struct lwp *lp) { globaldata_t gd; dfly_pcpu_t dd; -#ifdef SMP dfly_pcpu_t rdd; -#endif thread_t td; int force_resched; @@ -359,7 +342,6 @@ dfly_acquire_curproc(struct lwp *lp) * (if a reschedule was not requested we want to move this * step after the uschedcp tests). */ -#ifdef SMP if (force_resched && (usched_dfly_features & 0x08) && (rdd = dfly_choose_best_queue(lp)) != dd) { @@ -372,7 +354,6 @@ dfly_acquire_curproc(struct lwp *lp) dd = &dfly_pcpu[gd->gd_cpuid]; continue; } -#endif /* * Either no reschedule was requested or the best queue was @@ -420,7 +401,6 @@ dfly_acquire_curproc(struct lwp *lp) spin_unlock(&dd->spin); break; } -#ifdef SMP /* * We are not the current lwp, figure out the best cpu * to run on (our current cpu will be given significant @@ -438,7 +418,6 @@ dfly_acquire_curproc(struct lwp *lp) dd = &dfly_pcpu[gd->gd_cpuid]; continue; } -#endif /* * We cannot become the current lwp, place the lp on the @@ -544,9 +523,7 @@ dfly_select_curproc(globaldata_t gd) dd->rrcount = 0; /* reset round robin */ #endif spin_unlock(&dd->spin); -#ifdef SMP lwkt_acquire(nlp->lwp_thread); -#endif lwkt_schedule(nlp->lwp_thread); } else { spin_unlock(&dd->spin); @@ -590,34 +567,6 @@ dfly_setrunqueue(struct lwp *lp) */ KKASSERT(rdd->uschedcp != lp); -#ifndef SMP - /* - * If we are not SMP we do not have a scheduler helper to kick - * and must directly activate the process if none are scheduled. - * - * This is really only an issue when bootstrapping init since - * the caller in all other cases will be a user process, and - * even if released (rdd->uschedcp == NULL), that process will - * kickstart the scheduler when it returns to user mode from - * the kernel. - * - * NOTE: On SMP we can't just set some other cpu's uschedcp. - */ - if (rdd->uschedcp == NULL) { - spin_lock(&rdd->spin); - if (rdd->uschedcp == NULL) { - atomic_set_cpumask(&dfly_curprocmask, 1); - rdd->uschedcp = lp; - rdd->upri = lp->lwp_priority; - spin_unlock(&rdd->spin); - lwkt_schedule(lp->lwp_thread); - return; - } - spin_unlock(&rdd->spin); - } -#endif - -#ifdef SMP /* * Ok, we have to setrunqueue some target cpu and request a reschedule * if necessary. @@ -655,12 +604,9 @@ dfly_setrunqueue(struct lwp *lp) dfly_changeqcpu_locked(lp, dd, rdd); spin_unlock(&dd->spin); } -#endif dfly_setrunqueue_dd(rdd, lp); } -#ifdef SMP - /* * Change qcpu to rdd->cpuid. The dd the lp is CURRENTLY on must be * spin-locked on-call. rdd does not have to be. @@ -679,8 +625,6 @@ dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd) } } -#endif - /* * Place lp on rdd's runqueue. Nothing is locked on call. This function * also performs all necessary ancillary notification actions. @@ -688,7 +632,6 @@ dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd) static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp) { -#ifdef SMP globaldata_t rgd; /* @@ -756,17 +699,6 @@ dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp) spin_unlock(&rdd->spin); lwkt_send_ipiq(rgd, dfly_need_user_resched_remote, NULL); } -#else - /* - * Request a reschedule if appropriate. - */ - spin_lock(&rdd->spin); - dfly_setrunqueue_locked(rdd, lp); - spin_unlock(&rdd->spin); - if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) { - need_user_resched(); - } -#endif } /* @@ -779,9 +711,7 @@ void dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp) { globaldata_t gd = mycpu; -#ifdef SMP dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid]; -#endif /* * Spinlocks also hold a critical section so there should not be @@ -830,7 +760,6 @@ dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp) * likely to be able to remain in place. Hopefully then any pairings, * if applicable, migrate to where these threads are. */ -#ifdef SMP if ((usched_dfly_features & 0x04) && ((u_int)sched_ticks & 7) == 0 && (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) { @@ -876,7 +805,6 @@ dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp) spin_unlock(&dd->spin); } } -#endif } /* @@ -1172,7 +1100,6 @@ dfly_resetpriority(struct lwp *lp) (checkpri == 0 || (rdd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK))) { -#ifdef SMP if (rcpu == mycpu->gd_cpuid) { spin_unlock(&rdd->spin); need_user_resched(); @@ -1182,10 +1109,6 @@ dfly_resetpriority(struct lwp *lp) dfly_need_user_resched_remote, NULL); } -#else - spin_unlock(&rdd->spin); - need_user_resched(); -#endif } else { spin_unlock(&rdd->spin); } @@ -1423,8 +1346,6 @@ dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd, return lp; } -#ifdef SMP - /* * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU. * @@ -1853,8 +1774,6 @@ dfly_need_user_resched_remote(void *dummy) } } -#endif - /* * dfly_remrunqueue_locked() removes a given process from the run queue * that it is on, clearing the queue busy bit if it becomes empty. @@ -1991,8 +1910,6 @@ dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp) *which |= 1 << pri; } -#ifdef SMP - /* * For SMP systems a user scheduler helper thread is created for each * cpu and is used to allow one cpu to wakeup another for the purposes of @@ -2144,8 +2061,6 @@ sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS) } #endif -#endif - /* * Setup the queues and scheduler helpers (scheduler helpers are SMP only). * Note that curprocmask bit 0 has already been cleared by rqinit() and @@ -2156,11 +2071,9 @@ usched_dfly_cpu_init(void) { int i; int j; -#ifdef SMP int cpuid; int smt_not_supported = 0; int cache_coherent_not_supported = 0; -#endif if (bootverbose) kprintf("Start scheduler helpers on cpus:\n"); @@ -2179,9 +2092,7 @@ usched_dfly_cpu_init(void) continue; spin_init(&dd->spin); -#ifdef SMP dd->cpunode = get_cpu_node_by_cpuid(i); -#endif dd->cpuid = i; dd->cpumask = CPUMASK(i); for (j = 0; j < NQS; j++) { @@ -2191,7 +2102,6 @@ usched_dfly_cpu_init(void) } atomic_clear_cpumask(&dfly_curprocmask, 1); -#ifdef SMP if (dd->cpunode == NULL) { smt_not_supported = 1; cache_coherent_not_supported = 1; @@ -2249,7 +2159,6 @@ usched_dfly_cpu_init(void) lwkt_create(dfly_helper_thread, NULL, NULL, &dd->helper_thread, 0, i, "usched %d", i); -#endif /* * Allow user scheduling on the target cpu. cpu #0 has already @@ -2273,7 +2182,6 @@ usched_dfly_cpu_init(void) OID_AUTO, "decay", CTLFLAG_RW, &usched_dfly_decay, 0, "Extra decay when not running"); -#ifdef SMP /* Add enable/disable option for SMT scheduling if supported */ if (smt_not_supported) { usched_dfly_smt = 0; @@ -2361,7 +2269,6 @@ usched_dfly_cpu_init(void) "paremter hw.cpu_topology.level_description"); #endif } -#endif /* SMP */ } SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND, usched_dfly_cpu_init, NULL) diff --git a/sys/kern/usched_dummy.c b/sys/kern/usched_dummy.c index de79499da1..85413748cd 100644 --- a/sys/kern/usched_dummy.c +++ b/sys/kern/usched_dummy.c @@ -30,8 +30,6 @@ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $DragonFly: src/sys/kern/usched_dummy.c,v 1.9 2008/04/21 15:24:46 dillon Exp $ */ #include @@ -252,9 +250,7 @@ dummy_select_curproc(globaldata_t gd) dd->uschedcp = lp; atomic_set_cpumask(&dummy_curprocmask, gd->gd_cpumask); spin_unlock(&dummy_spin); -#ifdef SMP lwkt_acquire(lp->lwp_thread); -#endif lwkt_schedule(lp->lwp_thread); } } @@ -293,9 +289,7 @@ dummy_setrunqueue(struct lwp *lp) ++dummy_runqcount; TAILQ_INSERT_TAIL(&dummy_runq, lp, lwp_procq); atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ); -#ifdef SMP lwkt_giveaway(lp->lwp_thread); -#endif /* lp = TAILQ_FIRST(&dummy_runq); */ @@ -459,8 +453,6 @@ dummy_uload_update(struct lwp *lp) * * MPSAFE */ -#ifdef SMP - static void dummy_sched_thread(void *dummy) { @@ -504,9 +496,7 @@ dummy_sched_thread(void *dummy) dd->uschedcp = lp; atomic_set_cpumask(&dummy_curprocmask, cpumask); spin_unlock(&dummy_spin); -#ifdef SMP lwkt_acquire(lp->lwp_thread); -#endif lwkt_schedule(lp->lwp_thread); } else { spin_unlock(&dummy_spin); @@ -553,6 +543,3 @@ dummy_sched_thread_cpu_init(void) } SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND, dummy_sched_thread_cpu_init, NULL) - -#endif - diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c index bc74f95a51..4858c8dc85 100644 --- a/sys/kern/vfs_vnops.c +++ b/sys/kern/vfs_vnops.c @@ -37,7 +37,6 @@ * * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $ - * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.58 2008/06/28 17:59:49 dillon Exp $ */ #include @@ -529,7 +528,7 @@ vn_set_fpf_offset(struct file *fp, off_t offset) static __inline off_t vn_poll_fpf_offset(struct file *fp) { -#if defined(__x86_64__) || !defined(SMP) +#if defined(__x86_64__) return(fp->f_offset); #else off_t off = vn_get_fpf_offset(fp); diff --git a/sys/net/altq/altq_subr.c b/sys/net/altq/altq_subr.c index 1ca6f68d24..dab44930c3 100644 --- a/sys/net/altq/altq_subr.c +++ b/sys/net/altq/altq_subr.c @@ -802,7 +802,7 @@ init_machclk(void) #if !defined(__i386__) || defined(ALTQ_NOPCC) machclk_usepcc = 0; -#elif defined(__DragonFly__) && defined(SMP) +#elif defined(__DragonFly__) machclk_usepcc = 0; #elif defined(__i386__) /* check if TSC is available */ diff --git a/sys/net/altq/if_altq.h b/sys/net/altq/if_altq.h index 9b0da4b5d6..94b189d505 100644 --- a/sys/net/altq/if_altq.h +++ b/sys/net/altq/if_altq.h @@ -1,5 +1,4 @@ /* $KAME: if_altq.h,v 1.11 2003/07/10 12:07:50 kjc Exp $ */ -/* $DragonFly: src/sys/net/altq/if_altq.h,v 1.4 2008/05/14 11:59:23 sephe Exp $ */ /* * Copyright (C) 1997-2003 @@ -70,17 +69,10 @@ struct ifaltq { int altq_started; /* ifnet.if_start interlock */ }; -#ifdef SMP #define ALTQ_ASSERT_LOCKED(ifq) ASSERT_SERIALIZED(&(ifq)->altq_lock) #define ALTQ_LOCK_INIT(ifq) lwkt_serialize_init(&(ifq)->altq_lock) #define ALTQ_LOCK(ifq) lwkt_serialize_adaptive_enter(&(ifq)->altq_lock) #define ALTQ_UNLOCK(ifq) lwkt_serialize_exit(&(ifq)->altq_lock) -#else -#define ALTQ_ASSERT_LOCKED(ifq) ((void)0) /* XXX */ -#define ALTQ_LOCK_INIT(ifq) ((void)0) -#define ALTQ_LOCK(ifq) crit_enter() -#define ALTQ_UNLOCK(ifq) crit_exit() -#endif #ifdef _KERNEL diff --git a/sys/net/if.c b/sys/net/if.c index c1ee3847e3..11935a1d48 100644 --- a/sys/net/if.c +++ b/sys/net/if.c @@ -171,10 +171,8 @@ KTR_INFO(KTR_IF_START, if_start, avoid, 2, IF_START_KTR_STRING, IF_START_KTR_ARGS); KTR_INFO(KTR_IF_START, if_start, contend_sched, 3, IF_START_KTR_STRING, IF_START_KTR_ARGS); -#ifdef SMP KTR_INFO(KTR_IF_START, if_start, chase_sched, 4, IF_START_KTR_STRING, IF_START_KTR_ARGS); -#endif #define logifstart(name, arg) KTR_LOG(if_start_ ## name, arg) TAILQ_HEAD(, ifg_group) ifg_head = TAILQ_HEAD_INITIALIZER(ifg_head); @@ -255,14 +253,12 @@ if_start_ipifunc(void *arg) static void if_start_schedule(struct ifnet *ifp) { -#ifdef SMP int cpu; cpu = ifp->if_start_cpuid(ifp); if (cpu != mycpuid) lwkt_send_ipiq(globaldata_find(cpu), if_start_ipifunc, ifp); else -#endif if_start_ipifunc(ifp); } @@ -317,7 +313,6 @@ if_start_dispatch(netmsg_t msg) lwkt_replymsg(lmsg, 0); /* reply ASAP */ crit_exit(); -#ifdef SMP if (mycpuid != ifp->if_start_cpuid(ifp)) { /* * If the ifnet is still up, we need to @@ -331,7 +326,6 @@ if_start_dispatch(netmsg_t msg) goto check; } } -#endif if (ifp->if_flags & IFF_UP) { ifnet_serialize_tx(ifp); /* XXX try? */ @@ -344,9 +338,7 @@ if_start_dispatch(netmsg_t msg) } ifnet_deserialize_tx(ifp); } -#ifdef SMP check: -#endif if (if_start_need_schedule(ifq, running)) { crit_enter(); if (lmsg->ms_flags & MSGF_DONE) { /* XXX necessary? */ diff --git a/sys/net/netisr.c b/sys/net/netisr.c index 0b4f7f9120..7550d7c6ed 100644 --- a/sys/net/netisr.c +++ b/sys/net/netisr.c @@ -590,7 +590,6 @@ schednetisr(int num) KASSERT((num > 0 && num <= NELEM(netisrs)), ("schednetisr: bad isr %d", num)); KKASSERT(netisrs[num].ni_handler != NULL); -#ifdef SMP if (mycpu->gd_cpuid != 0) { lwkt_send_ipiq(globaldata_find(0), schednetisr_remote, (void *)(intptr_t)num); @@ -599,15 +598,8 @@ schednetisr(int num) schednetisr_remote((void *)(intptr_t)num); crit_exit(); } -#else - crit_enter(); - schednetisr_remote((void *)(intptr_t)num); - crit_exit(); -#endif } -#ifdef SMP - static void netisr_barrier_dispatch(netmsg_t nmsg) { @@ -633,8 +625,6 @@ netisr_barrier_dispatch(netmsg_t nmsg) lwkt_replymsg(&nmsg->lmsg, 0); } -#endif - struct netisr_barrier * netisr_barrier_create(void) { @@ -647,7 +637,6 @@ netisr_barrier_create(void) void netisr_barrier_set(struct netisr_barrier *br) { -#ifdef SMP volatile cpumask_t other_cpumask; int i, cur_cpuid; @@ -685,14 +674,12 @@ netisr_barrier_set(struct netisr_barrier *br) if (other_cpumask != 0) tsleep(&other_cpumask, PINTERLOCKED, "nbrset", 0); } -#endif br->br_isset = 1; } void netisr_barrier_rem(struct netisr_barrier *br) { -#ifdef SMP int i, cur_cpuid; KKASSERT(&curthread->td_msgport == netisr_portfn(0)); @@ -713,7 +700,6 @@ netisr_barrier_rem(struct netisr_barrier *br) if (done & NETISR_BR_WAITDONE) wakeup(&msg->br_done); } -#endif br->br_isset = 0; } diff --git a/sys/net/pf/pf.c b/sys/net/pf/pf.c index d09567621d..3562cace4e 100644 --- a/sys/net/pf/pf.c +++ b/sys/net/pf/pf.c @@ -2812,7 +2812,6 @@ pf_get_translation(struct pf_pdesc *pd, struct mbuf *m, int off, int direction, return (r); } -#ifdef SMP struct netmsg_hashlookup { struct netmsg_base base; struct inpcb **nm_pinp; @@ -2844,8 +2843,6 @@ in_pcblookup_hash_handler(netmsg_t msg) } #endif /* PF_SOCKET_LOOKUP_DOMSG */ -#endif /* SMP */ - int pf_socket_lookup(int direction, struct pf_pdesc *pd) { @@ -2853,11 +2850,9 @@ pf_socket_lookup(int direction, struct pf_pdesc *pd) u_int16_t sport, dport; struct inpcbinfo *pi; struct inpcb *inp; -#ifdef SMP struct netmsg_hashlookup *msg = NULL; #ifdef PF_SOCKET_LOOKUP_DOMSG struct netmsg_hashlookup msg0; -#endif #endif int pi_cpu = 0; @@ -2882,7 +2877,6 @@ pf_socket_lookup(int direction, struct pf_pdesc *pd) pi_cpu = tcp_addrcpu(saddr->v4.s_addr, sport, daddr->v4.s_addr, dport); pi = &tcbinfo[pi_cpu]; -#ifdef SMP /* * Our netstack runs lockless on MP systems * (only for TCP connections at the moment). @@ -2927,7 +2921,6 @@ pf_socket_lookup(int direction, struct pf_pdesc *pd) return -1; #endif /* PF_SOCKET_LOOKUP_DOMSG */ } -#endif /* SMP */ break; case IPPROTO_UDP: if (pd->hdr.udp == NULL) @@ -2949,7 +2942,6 @@ pf_socket_lookup(int direction, struct pf_pdesc *pd) switch (pd->af) { #ifdef INET6 case AF_INET6: -#ifdef SMP /* * Query other CPU, second part * @@ -2959,9 +2951,7 @@ pf_socket_lookup(int direction, struct pf_pdesc *pd) * * Use some switch/case magic to avoid code duplication. */ - if (msg == NULL) -#endif /* SMP */ - { + if (msg == NULL) { inp = in6_pcblookup_hash(pi, &saddr->v6, sport, &daddr->v6, dport, INPLOOKUP_WILDCARD, NULL); @@ -2972,12 +2962,10 @@ pf_socket_lookup(int direction, struct pf_pdesc *pd) /* FALLTHROUGH if SMP and on other CPU */ #endif /* INET6 */ case AF_INET: -#ifdef SMP if (msg != NULL) { lwkt_domsg(netisr_portfn(pi_cpu), &msg->base.lmsg, 0); } else -#endif /* SMP */ { inp = in_pcblookup_hash(pi, saddr->v4, sport, daddr->v4, dport, INPLOOKUP_WILDCARD, NULL); diff --git a/sys/net/route.c b/sys/net/route.c index 698dec8d88..9b2237e12b 100644 --- a/sys/net/route.c +++ b/sys/net/route.c @@ -96,11 +96,7 @@ #endif static struct rtstatistics rtstatistics_percpu[MAXCPU]; -#ifdef SMP #define rtstat rtstatistics_percpu[mycpuid] -#else -#define rtstat rtstatistics_percpu[0] -#endif struct radix_node_head *rt_tables[MAXCPU][AF_MAX+1]; struct lwkt_port *rt_ports[MAXCPU]; @@ -112,10 +108,8 @@ static void rtable_service_loop(void *dummy); static void rtinit_rtrequest_callback(int, int, struct rt_addrinfo *, struct rtentry *, void *); -#ifdef SMP static void rtredirect_msghandler(netmsg_t msg); static void rtrequest1_msghandler(netmsg_t msg); -#endif static void rtsearch_msghandler(netmsg_t msg); static void rtmask_add_msghandler(netmsg_t msg); @@ -208,7 +202,6 @@ rtable_service_loop(void *dummy __unused) /* * Routing statistics. */ -#ifdef SMP static int sysctl_rtstatistics(SYSCTL_HANDLER_ARGS) { @@ -227,10 +220,6 @@ sysctl_rtstatistics(SYSCTL_HANDLER_ARGS) } SYSCTL_PROC(_net_route, OID_AUTO, stats, (CTLTYPE_OPAQUE|CTLFLAG_RW), 0, 0, sysctl_rtstatistics, "S,rtstatistics", "Routing statistics"); -#else -SYSCTL_STRUCT(_net_route, OID_AUTO, stats, CTLFLAG_RW, &rtstat, rtstatistics, -"Routing statistics"); -#endif /* * Packet routing routines. @@ -499,8 +488,6 @@ out: return error; } -#ifdef SMP - struct netmsg_rtredirect { struct netmsg_base base; struct sockaddr *dst; @@ -510,8 +497,6 @@ struct netmsg_rtredirect { struct sockaddr *src; }; -#endif - /* * Force a routing table entry to the specified * destination to go through the given gateway. @@ -526,7 +511,6 @@ rtredirect(struct sockaddr *dst, struct sockaddr *gateway, { struct rt_addrinfo rtinfo; int error; -#ifdef SMP struct netmsg_rtredirect msg; netmsg_init(&msg.base, NULL, &curthread->td_msgport, @@ -537,9 +521,6 @@ rtredirect(struct sockaddr *dst, struct sockaddr *gateway, msg.flags = flags; msg.src = src; error = lwkt_domsg(rtable_portfn(0), &msg.base.lmsg, 0); -#else - error = rtredirect_oncpu(dst, gateway, netmask, flags, src); -#endif bzero(&rtinfo, sizeof(struct rt_addrinfo)); rtinfo.rti_info[RTAX_DST] = dst; rtinfo.rti_info[RTAX_GATEWAY] = gateway; @@ -548,8 +529,6 @@ rtredirect(struct sockaddr *dst, struct sockaddr *gateway, rt_missmsg(RTM_REDIRECT, &rtinfo, flags, error); } -#ifdef SMP - static void rtredirect_msghandler(netmsg_t msg) { @@ -565,8 +544,6 @@ rtredirect_msghandler(netmsg_t msg) lwkt_replymsg(&msg->lmsg, 0); } -#endif - /* * Routing table ioctl interface. */ @@ -729,8 +706,6 @@ rtrequest_global( return rtrequest1_global(req, &rtinfo, NULL, NULL); } -#ifdef SMP - struct netmsg_rtq { struct netmsg_base base; int req; @@ -739,14 +714,11 @@ struct netmsg_rtq { void *arg; }; -#endif - int rtrequest1_global(int req, struct rt_addrinfo *rtinfo, rtrequest1_callback_func_t callback, void *arg) { int error; -#ifdef SMP struct netmsg_rtq msg; netmsg_init(&msg.base, NULL, &curthread->td_msgport, @@ -757,15 +729,6 @@ rtrequest1_global(int req, struct rt_addrinfo *rtinfo, msg.callback = callback; msg.arg = arg; error = lwkt_domsg(rtable_portfn(0), &msg.base.lmsg, 0); -#else - struct rtentry *rt = NULL; - - error = rtrequest1(req, rtinfo, &rt); - if (rt) - --rt->rt_refcnt; - if (callback) - callback(req, error, rtinfo, rt, arg); -#endif return (error); } @@ -774,8 +737,6 @@ rtrequest1_global(int req, struct rt_addrinfo *rtinfo, * are supposed to be identical on each cpu, an error occuring later in the * message chain is considered system-fatal. */ -#ifdef SMP - static void rtrequest1_msghandler(netmsg_t msg) { @@ -822,8 +783,6 @@ rtrequest1_msghandler(netmsg_t msg) } } -#endif - int rtrequest1(int req, struct rt_addrinfo *rtinfo, struct rtentry **ret_nrt) { diff --git a/sys/netinet/if_ether.c b/sys/netinet/if_ether.c index 21b9047f92..a751bbbbe0 100644 --- a/sys/netinet/if_ether.c +++ b/sys/netinet/if_ether.c @@ -64,7 +64,6 @@ * * @(#)if_ether.c 8.1 (Berkeley) 6/10/93 * $FreeBSD: src/sys/netinet/if_ether.c,v 1.64.2.23 2003/04/11 07:23:15 fjoe Exp $ - * $DragonFly: src/sys/netinet/if_ether.c,v 1.59 2008/11/22 11:03:35 sephe Exp $ */ /* @@ -822,8 +821,6 @@ arp_update_oncpu(struct mbuf *m, in_addr_t saddr, boolean_t create, } } -#ifdef SMP - struct netmsg_arp_update { struct netmsg_base base; struct mbuf *m; @@ -833,8 +830,6 @@ struct netmsg_arp_update { static void arp_update_msghandler(netmsg_t msg); -#endif - /* * Called from arpintr() - this routine is run from a single cpu. */ @@ -850,9 +845,7 @@ in_arpinput(struct mbuf *m) struct in_ifaddr *ia = NULL; struct sockaddr sa; struct in_addr isaddr, itaddr, myaddr; -#ifdef SMP struct netmsg_arp_update msg; -#endif uint8_t *enaddr = NULL; int op; int req_len; @@ -1036,17 +1029,12 @@ match: return; } -#ifdef SMP netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, arp_update_msghandler); msg.m = m; msg.saddr = isaddr.s_addr; msg.create = (itaddr.s_addr == myaddr.s_addr); lwkt_domsg(rtable_portfn(0), &msg.base.lmsg, 0); -#else - arp_update_oncpu(m, isaddr.s_addr, (itaddr.s_addr == myaddr.s_addr), - RTL_REPORTMSG, TRUE); -#endif reply: if (op != ARPOP_REQUEST) { m_freem(m); @@ -1124,8 +1112,6 @@ reply: ifp->if_output(ifp, m, &sa, NULL); } -#ifdef SMP - static void arp_update_msghandler(netmsg_t msg) { @@ -1147,8 +1133,6 @@ arp_update_msghandler(netmsg_t msg) lwkt_replymsg(&rmsg->base.lmsg, 0); } -#endif /* SMP */ - #endif /* INET */ /* diff --git a/sys/netinet/ip_divert.c b/sys/netinet/ip_divert.c index c15f4678bb..0bde56ba29 100644 --- a/sys/netinet/ip_divert.c +++ b/sys/netinet/ip_divert.c @@ -259,8 +259,6 @@ div_packet(struct mbuf *m, int incoming, int port) lwkt_reltoken(&div_token); } -#ifdef SMP - static void div_packet_handler(netmsg_t msg) { @@ -276,8 +274,6 @@ div_packet_handler(netmsg_t msg) /* no reply, msg embedded in mbuf */ } -#endif /* SMP */ - static void divert_packet(struct mbuf *m, int incoming) { @@ -299,7 +295,6 @@ divert_packet(struct mbuf *m, int incoming) port = divinfo->port; KASSERT(port != 0, ("%s: port=0", __func__)); -#ifdef SMP if (mycpuid != 0) { struct netmsg_packet *nmp; @@ -318,9 +313,6 @@ divert_packet(struct mbuf *m, int incoming) } else { div_packet(m, incoming, port); } -#else - div_packet(m, incoming, port); -#endif } /* diff --git a/sys/netinet/ip_flow.c b/sys/netinet/ip_flow.c index 110525451c..885eea313f 100644 --- a/sys/netinet/ip_flow.c +++ b/sys/netinet/ip_flow.c @@ -34,7 +34,6 @@ * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD: src/sys/netinet/ip_flow.c,v 1.9.2.2 2001/11/04 17:35:31 luigi Exp $ - * $DragonFly: src/sys/netinet/ip_flow.c,v 1.27 2008/10/28 07:09:26 sephe Exp $ */ #include @@ -432,7 +431,6 @@ ipflow_timo_ipi(void *arg __unused) void ipflow_slowtimo(void) { -#ifdef SMP cpumask_t mask = 0; int i; @@ -443,10 +441,6 @@ ipflow_slowtimo(void) mask &= smp_active_mask; if (mask != 0) lwkt_send_ipiq_mask(mask, ipflow_timo_ipi, NULL); -#else - if (ipflow_inuse) - ipflow_timo_ipi(NULL); -#endif } void diff --git a/sys/netinet/ip_input.c b/sys/netinet/ip_input.c index b4569ba0a5..7dad28f21c 100644 --- a/sys/netinet/ip_input.c +++ b/sys/netinet/ip_input.c @@ -234,7 +234,7 @@ struct in_ifaddrhashhead *in_ifaddrhashtbls[MAXCPU]; u_long in_ifaddrhmask; /* mask for hash table */ struct ip_stats ipstats_percpu[MAXCPU]; -#ifdef SMP + static int sysctl_ipstats(SYSCTL_HANDLER_ARGS) { @@ -253,10 +253,6 @@ sysctl_ipstats(SYSCTL_HANDLER_ARGS) } SYSCTL_PROC(_net_inet_ip, IPCTL_STATS, stats, (CTLTYPE_OPAQUE | CTLFLAG_RW), 0, 0, sysctl_ipstats, "S,ip_stats", "IP statistics"); -#else -SYSCTL_STRUCT(_net_inet_ip, IPCTL_STATS, stats, CTLFLAG_RW, - &ipstat, ip_stats, "IP statistics"); -#endif /* Packet reassembly stuff */ #define IPREASS_NHASH_LOG2 6 @@ -326,9 +322,7 @@ ip_init(void) { struct protosw *pr; int i; -#ifdef SMP int cpu; -#endif /* * Make sure we can handle a reasonable number of fragments but @@ -373,13 +367,9 @@ ip_init(void) * Initialize IP statistics counters for each CPU. * */ -#ifdef SMP for (cpu = 0; cpu < ncpus; ++cpu) { bzero(&ipstats_percpu[cpu], sizeof(struct ip_stats)); } -#else - bzero(&ipstat, sizeof(struct ip_stats)); -#endif netisr_register(NETISR_IP, ip_input_handler, ip_cpufn_in); netisr_register_hashcheck(NETISR_IP, ip_hashcheck); diff --git a/sys/netinet/ip_var.h b/sys/netinet/ip_var.h index eb5afc666f..004abdc689 100644 --- a/sys/netinet/ip_var.h +++ b/sys/netinet/ip_var.h @@ -151,11 +151,7 @@ struct ip_stats { #ifdef _KERNEL -#if defined(SMP) #define ipstat ipstats_percpu[mycpuid] -#else /* !SMP */ -#define ipstat ipstats_percpu[0] -#endif extern struct ip_stats ipstats_percpu[MAXCPU]; diff --git a/sys/netinet/tcp_subr.c b/sys/netinet/tcp_subr.c index 2cb88fc667..fdfff6b65b 100644 --- a/sys/netinet/tcp_subr.c +++ b/sys/netinet/tcp_subr.c @@ -278,7 +278,7 @@ static void tcp_willblock(void); static void tcp_notify (struct inpcb *, int); struct tcp_stats tcpstats_percpu[MAXCPU]; -#ifdef SMP + static int sysctl_tcpstats(SYSCTL_HANDLER_ARGS) { @@ -297,10 +297,6 @@ sysctl_tcpstats(SYSCTL_HANDLER_ARGS) } SYSCTL_PROC(_net_inet_tcp, TCPCTL_STATS, stats, (CTLTYPE_OPAQUE | CTLFLAG_RW), 0, 0, sysctl_tcpstats, "S,tcp_stats", "TCP statistics"); -#else -SYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW, - &tcpstat, tcp_stats, "TCP statistics"); -#endif /* * Target size of TCP PCB hash tables. Must be a power of two. @@ -410,13 +406,9 @@ tcp_init(void) /* * Initialize TCP statistics counters for each CPU. */ -#ifdef SMP for (cpu = 0; cpu < ncpus; ++cpu) { bzero(&tcpstats_percpu[cpu], sizeof(struct tcp_stats)); } -#else - bzero(&tcpstat, sizeof(struct tcp_stats)); -#endif syncache_init(); netisr_register_rollup(tcp_willblock); @@ -786,8 +778,6 @@ tcp_drop(struct tcpcb *tp, int error) return (tcp_close(tp)); } -#ifdef SMP - struct netmsg_listen_detach { struct netmsg_base base; struct tcpcb *nm_tp; @@ -812,8 +802,6 @@ tcp_listen_detach_handler(netmsg_t msg) lwkt_replymsg(&nmsg->base.lmsg, 0); } -#endif - /* * Close a TCP control block: * discard all space held by the tcp @@ -835,7 +823,6 @@ tcp_close(struct tcpcb *tp) const boolean_t isipv6 = FALSE; #endif -#ifdef SMP /* * INP_WILDCARD_MP indicates that listen(2) has been called on * this socket. This implies: @@ -868,7 +855,6 @@ tcp_close(struct tcpcb *tp) inp->inp_flags &= ~INP_WILDCARD_MP; } -#endif KKASSERT(tp->t_state != TCPS_TERMINATING); tp->t_state = TCPS_TERMINATING; @@ -1061,7 +1047,6 @@ tcp_drain_oncpu(struct inpcbhead *head) kfree(marker, M_TEMP); } -#ifdef SMP struct netmsg_tcp_drain { struct netmsg_base base; struct inpcbhead *nm_head; @@ -1075,14 +1060,11 @@ tcp_drain_handler(netmsg_t msg) tcp_drain_oncpu(nm->nm_head); lwkt_replymsg(&nm->base.lmsg, 0); } -#endif void tcp_drain(void) { -#ifdef SMP int cpu; -#endif if (!do_tcpdrain) return; @@ -1095,7 +1077,6 @@ tcp_drain(void) * where we're really low on mbufs, this is potentially * useful. */ -#ifdef SMP for (cpu = 0; cpu < ncpus2; cpu++) { struct netmsg_tcp_drain *nm; @@ -1112,9 +1093,6 @@ tcp_drain(void) lwkt_sendmsg(netisr_portfn(cpu), &nm->base.lmsg); } } -#else - tcp_drain_oncpu(&tcbinfo[0].pcblisthead); -#endif } /* diff --git a/sys/netinet/tcp_usrreq.c b/sys/netinet/tcp_usrreq.c index 1a7e448786..46f0302bf4 100644 --- a/sys/netinet/tcp_usrreq.c +++ b/sys/netinet/tcp_usrreq.c @@ -357,8 +357,6 @@ tcp6_usr_bind(netmsg_t msg) } #endif /* INET6 */ -#ifdef SMP - struct netmsg_inswildcard { struct netmsg_base base; struct inpcb *nm_inp; @@ -379,8 +377,6 @@ in_pcbinswildcardhash_handler(netmsg_t msg) lwkt_replymsg(&nm->base.lmsg, 0); } -#endif - /* * Prepare to accept connections. */ @@ -392,9 +388,7 @@ tcp_usr_listen(netmsg_t msg) int error = 0; struct inpcb *inp; struct tcpcb *tp; -#ifdef SMP struct netmsg_inswildcard nm; -#endif COMMON_START(so, inp, 0); @@ -411,7 +405,6 @@ tcp_usr_listen(netmsg_t msg) tp->t_flags |= TF_LISTEN; tp->tt_msg = NULL; /* Catch any invalid timer usage */ -#ifdef SMP if (ncpus > 1) { /* * We have to set the flag because we can't have other cpus @@ -434,7 +427,6 @@ tcp_usr_listen(netmsg_t msg) nm.nm_inp = inp; lwkt_domsg(netisr_portfn(1), &nm.base.lmsg, 0); } -#endif in_pcbinswildcardhash(inp); COMMON_END(PRU_LISTEN); } @@ -449,9 +441,7 @@ tcp6_usr_listen(netmsg_t msg) int error = 0; struct inpcb *inp; struct tcpcb *tp; -#ifdef SMP struct netmsg_inswildcard nm; -#endif COMMON_START(so, inp, 0); @@ -472,7 +462,6 @@ tcp6_usr_listen(netmsg_t msg) tp->t_flags |= TF_LISTEN; tp->tt_msg = NULL; /* Catch any invalid timer usage */ -#ifdef SMP if (ncpus > 1) { /* * We have to set the flag because we can't have other cpus @@ -495,7 +484,6 @@ tcp6_usr_listen(netmsg_t msg) nm.nm_inp = inp; lwkt_domsg(netisr_portfn(1), &nm.base.lmsg, 0); } -#endif in_pcbinswildcardhash(inp); COMMON_END(PRU_LISTEN); } @@ -1050,9 +1038,7 @@ tcp_connect(netmsg_t msg) struct inpcb *inp; struct tcpcb *tp; int error, calc_laddr = 1; -#ifdef SMP lwkt_port_t port; -#endif COMMON_START(so, inp, 0); @@ -1101,7 +1087,6 @@ tcp_connect(netmsg_t msg) } KKASSERT(inp->inp_socket == so); -#ifdef SMP port = tcp_addrport(sin->sin_addr.s_addr, sin->sin_port, (inp->inp_laddr.s_addr ? inp->inp_laddr.s_addr : if_sin->sin_addr.s_addr), @@ -1133,9 +1118,6 @@ tcp_connect(netmsg_t msg) /* msg invalid now */ return; } -#else - KKASSERT(so->so_port == &curthread->td_msgport); -#endif error = tcp_connect_oncpu(tp, msg->connect.nm_flags, msg->connect.nm_m, sin, if_sin); msg->connect.nm_m = NULL; @@ -1164,9 +1146,7 @@ tcp6_connect(netmsg_t msg) struct inpcb *inp; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)nam; struct in6_addr *addr6; -#ifdef SMP lwkt_port_t port; -#endif int error; COMMON_START(so, inp, 0); @@ -1197,7 +1177,6 @@ tcp6_connect(netmsg_t msg) if (error) goto out; -#ifdef SMP port = tcp6_addrport(); /* XXX hack for now, always cpu0 */ if (port != &curthread->td_msgport) { @@ -1221,7 +1200,6 @@ tcp6_connect(netmsg_t msg) /* msg invalid now */ return; } -#endif error = tcp6_connect_oncpu(tp, msg->connect.nm_flags, &msg->connect.nm_m, sin6, addr6); /* nm_m may still be intact */ diff --git a/sys/netinet/tcp_var.h b/sys/netinet/tcp_var.h index 49cae36d3a..0fb0ed5a76 100644 --- a/sys/netinet/tcp_var.h +++ b/sys/netinet/tcp_var.h @@ -65,7 +65,6 @@ * * @(#)tcp_var.h 8.4 (Berkeley) 5/24/95 * $FreeBSD: src/sys/netinet/tcp_var.h,v 1.56.2.13 2003/02/03 02:34:07 hsu Exp $ - * $DragonFly: src/sys/netinet/tcp_var.h,v 1.42 2008/10/27 02:56:30 sephe Exp $ */ #ifndef _NETINET_TCP_VAR_H_ @@ -442,11 +441,7 @@ struct tcp_stats { #ifdef _KERNEL -#if defined(SMP) #define tcpstat tcpstats_percpu[mycpuid] -#else -#define tcpstat tcpstats_percpu[0] -#endif struct sockopt; diff --git a/sys/netinet/udp_usrreq.c b/sys/netinet/udp_usrreq.c index 53c19e1ae7..4ad44c2258 100644 --- a/sys/netinet/udp_usrreq.c +++ b/sys/netinet/udp_usrreq.c @@ -646,7 +646,6 @@ udp_notifyall_oncpu(netmsg_t msg) static void udp_rtchange(struct inpcb *inp, int err) { -#ifdef SMP /* XXX Nuke this, once UDP inpcbs are CPU localized */ if (inp->inp_route.ro_rt && inp->inp_route.ro_rt->rt_cpuid == mycpuid) { rtfree(inp->inp_route.ro_rt); @@ -656,9 +655,6 @@ udp_rtchange(struct inpcb *inp, int err) * output is attempted. */ } -#else - in_rtchange(inp, err); -#endif } void @@ -1102,7 +1098,6 @@ udp_connect(netmsg_t msg) port = udp_addrport(sin->sin_addr.s_addr, sin->sin_port, inp->inp_laddr.s_addr, inp->inp_lport); -#ifdef SMP if (port != &curthread->td_msgport) { #ifdef notyet struct route *ro = &inp->inp_route; @@ -1134,7 +1129,6 @@ udp_connect(netmsg_t msg) panic("UDP activity should only be in netisr0"); #endif } -#endif KKASSERT(port == &curthread->td_msgport); error = udp_connect_oncpu(so, td, sin, if_sin); out: diff --git a/sys/netproto/mpls/mpls_input.c b/sys/netproto/mpls/mpls_input.c index 1af58943fb..e50688c14b 100644 --- a/sys/netproto/mpls/mpls_input.c +++ b/sys/netproto/mpls/mpls_input.c @@ -27,8 +27,6 @@ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $DragonFly: src/sys/netproto/mpls/mpls_input.c,v 1.4 2008/09/24 14:26:39 sephe Exp $ */ #include @@ -63,21 +61,15 @@ static void mpls_forward(struct mbuf *); void mpls_init(void) { -#ifdef SMP int cpu; -#endif /* * Initialize MPLS statistics counters for each CPU. * */ -#ifdef SMP for (cpu = 0; cpu < ncpus; ++cpu) { bzero(&mplsstats_percpu[cpu], sizeof(struct mpls_stats)); } -#else - bzero(&mplsstat, sizeof(struct mpls_stats)); -#endif netisr_register(NETISR_MPLS, mpls_input_handler, mpls_cpufn); } diff --git a/sys/netproto/mpls/mpls_var.h b/sys/netproto/mpls/mpls_var.h index 46f2a420e4..45ee5eb43c 100644 --- a/sys/netproto/mpls/mpls_var.h +++ b/sys/netproto/mpls/mpls_var.h @@ -27,8 +27,6 @@ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $DragonFly: src/sys/netproto/mpls/mpls_var.h,v 1.2 2008/08/05 15:11:32 nant Exp $ */ #ifndef _NETMPLS_MPLS_VAR_H_ @@ -51,11 +49,7 @@ struct mpls_stats { #ifdef _KERNEL -#if defined(SMP) #define mplsstat mplsstats_percpu[mycpuid] -#else /* !SMP */ -#define mplsstat mplsstats_percpu[0] -#endif extern struct mpls_stats mplsstats_percpu[MAXCPU]; diff --git a/sys/platform/pc32/apic/apic_vector.s b/sys/platform/pc32/apic/apic_vector.s index a9be21ef84..edadfce4cb 100644 --- a/sys/platform/pc32/apic/apic_vector.s +++ b/sys/platform/pc32/apic/apic_vector.s @@ -25,11 +25,7 @@ /* convert an absolute IRQ# into ipending index */ #define IRQ_LIDX(irq_num) ((irq_num) >> 5) -#ifdef SMP #define MPLOCKED lock ; -#else -#define MPLOCKED -#endif /* * Push an interrupt frame in a format acceptable to doreti, reload @@ -183,8 +179,6 @@ Xspuriousint: iret -#ifdef SMP - /* * Handle TLB shootdowns. * @@ -324,8 +318,6 @@ Xipiq: MEXITCOUNT jmp doreti_syscall_ret -#endif /* SMP */ - .text SUPERALIGN_TEXT .globl Xtimer diff --git a/sys/platform/pc32/apic/lapic.c b/sys/platform/pc32/apic/lapic.c index 174a8301e2..1bdf72dc98 100644 --- a/sys/platform/pc32/apic/lapic.c +++ b/sys/platform/pc32/apic/lapic.c @@ -153,7 +153,6 @@ lapic_init(boolean_t bsp) setidt(XTIMER_OFFSET, Xtimer, SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); -#ifdef SMP /* Install an inter-CPU IPI for TLB invalidation */ setidt(XINVLTLB_OFFSET, Xinvltlb, SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); @@ -165,7 +164,6 @@ lapic_init(boolean_t bsp) /* Install an inter-CPU IPI for CPU stop/restart */ setidt(XCPUSTOP_OFFSET, Xcpustop, SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); -#endif } /* @@ -447,22 +445,14 @@ lapic_timer_restart_handler(void *dummy __unused) static void lapic_timer_intr_pmfixup(struct cputimer_intr *cti __unused) { -#ifdef SMP lwkt_send_ipiq_mask(smp_active_mask, lapic_timer_fixup_handler, NULL); -#else - lapic_timer_fixup_handler(NULL); -#endif } static void lapic_timer_intr_restart(struct cputimer_intr *cti __unused) { -#ifdef SMP lwkt_send_ipiq_mask(smp_active_mask, lapic_timer_restart_handler, NULL); -#else - lapic_timer_restart_handler(NULL); -#endif } @@ -477,8 +467,6 @@ apic_dump(char* str) lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr); } -#ifdef SMP - /* * Inter Processor Interrupt functions. */ @@ -605,8 +593,6 @@ selected_apic_ipi(cpumask_t target, int vector, int delivery_mode) crit_exit(); } -#endif /* SMP */ - /* * Timer code, in development... * - suggested by rgrimes@gndrsh.aac.dev.com diff --git a/sys/platform/pc32/apic/lapic.h b/sys/platform/pc32/apic/lapic.h index cae676e0e4..9a06308920 100644 --- a/sys/platform/pc32/apic/lapic.h +++ b/sys/platform/pc32/apic/lapic.h @@ -23,7 +23,6 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/include/mpapic.h,v 1.14.2.2 2000/09/30 02:49:34 ps Exp $ - * $DragonFly: src/sys/platform/pc32/apic/mpapic.h,v 1.12 2008/06/07 11:37:23 mneumann Exp $ */ #ifndef _ARCH_APIC_LAPIC_H_ @@ -70,8 +69,6 @@ void lapic_map(vm_paddr_t); int lapic_unused_apic_id(int); void lapic_fixup_noioapic(void); -#ifdef SMP - #ifndef _MACHINE_SMP_H_ #include #endif @@ -92,6 +89,4 @@ all_but_self_ipi(int vector) return apic_ipi(APIC_DEST_ALLESELF, vector, APIC_DELMODE_FIXED); } -#endif /* SMP */ - #endif /* _ARCH_APIC_LAPIC_H_ */ diff --git a/sys/platform/pc32/conf/files b/sys/platform/pc32/conf/files index c1cf5eb0eb..33491d4b66 100644 --- a/sys/platform/pc32/conf/files +++ b/sys/platform/pc32/conf/files @@ -228,9 +228,9 @@ platform/pc32/apic/ioapic.c standard platform/pc32/apic/ioapic_abi.c standard platform/pc32/apic/ioapic_ipl.s standard platform/pc32/apic/apic_vector.s standard -platform/pc32/i386/mpboot.s optional smp -platform/pc32/i386/mp_clock.c optional smp -platform/pc32/i386/mp_machdep.c optional smp +platform/pc32/i386/mpboot.s standard +platform/pc32/i386/mp_clock.c standard +platform/pc32/i386/mp_machdep.c standard platform/pc32/i386/mptable.c standard platform/pc32/i386/nexus.c standard platform/pc32/i386/p4tcc.c optional cpu_enable_tcc diff --git a/sys/platform/pc32/i386/busdma_machdep.c b/sys/platform/pc32/i386/busdma_machdep.c index f9dd80e10b..6a5431cffb 100644 --- a/sys/platform/pc32/i386/busdma_machdep.c +++ b/sys/platform/pc32/i386/busdma_machdep.c @@ -76,11 +76,7 @@ struct bus_dma_tag { int map_count; bus_dma_segment_t *segments; struct bounce_zone *bounce_zone; -#ifdef SMP struct spinlock spin; -#else - int unused0; -#endif }; /* @@ -109,11 +105,7 @@ struct bounce_zone { STAILQ_ENTRY(bounce_zone) links; STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; -#ifdef SMP struct spinlock spin; -#else - int unused0; -#endif int total_bpages; int free_bpages; int reserved_bpages; @@ -129,13 +121,8 @@ struct bounce_zone { struct sysctl_oid *sysctl_tree; }; -#ifdef SMP #define BZ_LOCK(bz) spin_lock(&(bz)->spin) #define BZ_UNLOCK(bz) spin_unlock(&(bz)->spin) -#else -#define BZ_LOCK(bz) crit_enter() -#define BZ_UNLOCK(bz) crit_exit() -#endif static struct lwkt_token bounce_zone_tok = LWKT_TOKEN_INITIALIZER(bounce_zone_tok); @@ -220,9 +207,7 @@ bus_dma_tag_lock(bus_dma_tag_t tag, bus_dma_segment_t *cache) if (tag->nsegments <= BUS_DMA_CACHE_SEGMENTS) return(cache); -#ifdef SMP spin_lock(&tag->spin); -#endif return(tag->segments); } @@ -230,13 +215,11 @@ static __inline void bus_dma_tag_unlock(bus_dma_tag_t tag) { -#ifdef SMP if (tag->flags & BUS_DMA_PROTECTED) return; if (tag->nsegments > BUS_DMA_CACHE_SEGMENTS) spin_unlock(&tag->spin); -#endif } /* @@ -276,9 +259,7 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT | M_ZERO); -#ifdef SMP spin_init(&newtag->spin); -#endif newtag->parent = parent; newtag->alignment = alignment; newtag->boundary = boundary; @@ -1123,9 +1104,7 @@ alloc_bounce_zone(bus_dma_tag_t dmat) } bz = new_bz; -#ifdef SMP spin_init(&bz->spin); -#endif STAILQ_INIT(&bz->bounce_page_list); STAILQ_INIT(&bz->bounce_map_waitinglist); bz->free_bpages = 0; diff --git a/sys/platform/pc32/i386/db_interface.c b/sys/platform/pc32/i386/db_interface.c index f0285b0784..d9b49e7d8f 100644 --- a/sys/platform/pc32/i386/db_interface.c +++ b/sys/platform/pc32/i386/db_interface.c @@ -133,7 +133,6 @@ kdb_trap(int type, int code, struct i386_saved_state *regs) } crit_enter(); -#ifdef SMP db_printf("\nCPU%d stopping CPUs: 0x%08x\n", mycpu->gd_cpuid, mycpu->gd_other_cpus); @@ -141,7 +140,6 @@ kdb_trap(int type, int code, struct i386_saved_state *regs) stop_cpus(mycpu->gd_other_cpus); db_printf(" stopped\n"); -#endif /* SMP */ setjmp(db_global_jmpbuf); db_global_jmpbuf_valid = TRUE; @@ -155,7 +153,6 @@ kdb_trap(int type, int code, struct i386_saved_state *regs) db_active--; db_global_jmpbuf_valid = FALSE; -#ifdef SMP db_printf("\nCPU%d restarting CPUs: 0x%08x\n", mycpu->gd_cpuid, stopped_cpus); @@ -168,7 +165,6 @@ kdb_trap(int type, int code, struct i386_saved_state *regs) restart_cpus(stopped_cpus); db_printf(" restarted\n"); -#endif /* SMP */ crit_exit(); regs->tf_eip = ddb_regs.tf_eip; diff --git a/sys/platform/pc32/i386/genassym.c b/sys/platform/pc32/i386/genassym.c index 53d1b26170..bdeb6a5edf 100644 --- a/sys/platform/pc32/i386/genassym.c +++ b/sys/platform/pc32/i386/genassym.c @@ -35,7 +35,6 @@ * * from: @(#)genassym.c 5.11 (Berkeley) 5/10/91 * $FreeBSD: src/sys/i386/i386/genassym.c,v 1.86.2.3 2002/03/03 05:42:49 nyan Exp $ - * $DragonFly: src/sys/platform/pc32/i386/genassym.c,v 1.58 2008/05/09 06:35:11 dillon Exp $ */ #include @@ -97,10 +96,8 @@ ASSYM(MACHINTR_INTREN, offsetof(struct machintr_abi, intr_enable)); ASSYM(TD_SAVEFPU, offsetof(struct thread, td_mach) + offsetof(struct md_thread, mtd_savefpu)); ASSYM(TDPRI_INT_SUPPORT, TDPRI_INT_SUPPORT); -#ifdef SMP ASSYM(CPUMASK_LOCK, CPUMASK_LOCK); ASSYM(CPUMASK_BIT, CPUMASK_BIT); -#endif ASSYM(V_TRAP, offsetof(struct vmmeter, v_trap)); ASSYM(V_SYSCALL, offsetof(struct vmmeter, v_syscall)); diff --git a/sys/platform/pc32/i386/i686_mem.c b/sys/platform/pc32/i386/i686_mem.c index d903058cb4..26189d6b1d 100644 --- a/sys/platform/pc32/i386/i686_mem.c +++ b/sys/platform/pc32/i386/i686_mem.c @@ -96,10 +96,8 @@ static int i686_mrt2mtrr(int flags, int oldval); static int i686_mtrrconflict(int flag1, int flag2); static void i686_mrstore(struct mem_range_softc *sc); static void i686_mrstoreone(void *arg); -#ifdef SMP static void i686_mrstoreone_cpusync(void *arg); static void i686_mrAPinit_cpusync(void *arg); -#endif static struct mem_range_desc *i686_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr); static int i686_mrsetlow(struct mem_range_softc *sc, @@ -278,7 +276,6 @@ i686_mrt2mtrr(int flags, int oldval) static void i686_mrstore(struct mem_range_softc *sc) { -#ifdef SMP /* * We should use ipi_all_but_self() to call other CPUs into a * locking gate, then call a target function to do this work. @@ -286,23 +283,14 @@ i686_mrstore(struct mem_range_softc *sc) * implementation, not ready yet. */ lwkt_cpusync_simple(-1, i686_mrstoreone_cpusync, sc); -#else - mpintr_lock(); - i686_mrstoreone(sc); - mpintr_unlock(); -#endif } -#ifdef SMP - static void i686_mrstoreone_cpusync(void *arg) { i686_mrstoreone(arg); } -#endif - /* * Update the current CPU's MTRRs with those represented in the * descriptor list. Note that we do this wholesale rather than just @@ -673,16 +661,12 @@ i686_mrinit(struct mem_range_softc *sc) } } -#ifdef SMP - static void i686_mrAPinit_cpusync(void *arg) { i686_mrAPinit(arg); } -#endif - /* * Initialise MTRRs on an AP after the BSP has run the init code. */ @@ -703,7 +687,6 @@ i686_mrAPinit(struct mem_range_softc *sc) static void i686_mrreinit(struct mem_range_softc *sc) { -#ifdef SMP /* * We should use ipi_all_but_self() to call other CPUs into a * locking gate, then call a target function to do this work. @@ -711,11 +694,6 @@ i686_mrreinit(struct mem_range_softc *sc) * implementation, not ready yet. */ lwkt_cpusync_simple(-1, i686_mrAPinit_cpusync, sc); -#else - mpintr_lock(); - i686_mrAPinit(sc); - mpintr_unlock(); -#endif } static void diff --git a/sys/platform/pc32/i386/initcpu.c b/sys/platform/pc32/i386/initcpu.c index 6407f4eda9..0a49ed86d1 100644 --- a/sys/platform/pc32/i386/initcpu.c +++ b/sys/platform/pc32/i386/initcpu.c @@ -475,16 +475,6 @@ init_6x86MX(void) static void init_ppro(void) { -#ifndef SMP - u_int64_t apicbase; - - /* - * Local APIC should be diabled in UP kernel. - */ - apicbase = rdmsr(0x1b); - apicbase &= ~0x800LL; - wrmsr(0x1b, apicbase); -#endif } /* diff --git a/sys/platform/pc32/i386/ipl.s b/sys/platform/pc32/i386/ipl.s index 01f232456b..7d4ec0c6e8 100644 --- a/sys/platform/pc32/i386/ipl.s +++ b/sys/platform/pc32/i386/ipl.s @@ -116,10 +116,8 @@ doreti_next: cli /* re-assert cli on loop */ movl %eax,%ecx /* irq mask unavailable due to BGL */ notl %ecx -#ifdef SMP testl $RQF_IPIQ,PCPU(reqflags) jnz doreti_ipiq -#endif testl $RQF_TIMER,PCPU(reqflags) jnz doreti_timer /* @@ -302,7 +300,6 @@ doreti_ast: movl %esi,%eax /* restore cpl for loop */ jmp doreti_next -#ifdef SMP /* * IPIQ message pending. We clear RQF_IPIQ automatically. */ @@ -318,7 +315,6 @@ doreti_ipiq: decl PCPU(intr_nesting_level) movl %esi,%eax /* restore cpl for loop */ jmp doreti_next -#endif doreti_timer: movl %eax,%esi /* save cpl (can't use stack) */ @@ -354,10 +350,8 @@ splz_next: cli movl %eax,%ecx /* ecx = ~CPL */ notl %ecx -#ifdef SMP testl $RQF_IPIQ,PCPU(reqflags) jnz splz_ipiq -#endif testl $RQF_TIMER,PCPU(reqflags) jnz splz_timer @@ -452,7 +446,6 @@ splz_soft: popl %eax jmp splz_next -#ifdef SMP splz_ipiq: andl $~RQF_IPIQ,PCPU(reqflags) sti @@ -460,7 +453,6 @@ splz_ipiq: call lwkt_process_ipiq popl %eax jmp splz_next -#endif splz_timer: andl $~RQF_TIMER,PCPU(reqflags) diff --git a/sys/platform/pc32/i386/locore.s b/sys/platform/pc32/i386/locore.s index 37d28fbf20..19c6eb88c6 100644 --- a/sys/platform/pc32/i386/locore.s +++ b/sys/platform/pc32/i386/locore.s @@ -35,7 +35,6 @@ * * from: @(#)locore.s 7.3 (Berkeley) 5/13/91 * $FreeBSD: src/sys/i386/i386/locore.s,v 1.132.2.10 2003/02/03 20:54:49 jhb Exp $ - * $DragonFly: src/sys/platform/pc32/i386/locore.s,v 1.13 2007/01/08 03:33:42 dillon Exp $ * * originally from: locore.s, by William F. Jolitz * @@ -718,12 +717,6 @@ no_kernend: #endif xorl %edx,%edx -#if !defined(SMP) - testl $CPUID_PGE, R(cpu_feature) - jz 2f - orl $PG_G,%edx -#endif - 2: movl $R(etext),%ecx addl $PAGE_MASK,%ecx shrl $PAGE_SHIFT,%ecx @@ -735,12 +728,7 @@ no_kernend: andl $~PAGE_MASK, %eax map_read_write: movl $PG_RW,%edx -#if !defined(SMP) - testl $CPUID_PGE, R(cpu_feature) - jz 1f - orl $PG_G,%edx -#endif - + 1: movl R(KERNend),%ecx subl %eax,%ecx shrl $PAGE_SHIFT,%ecx diff --git a/sys/platform/pc32/i386/machdep.c b/sys/platform/pc32/i386/machdep.c index 5f7e68a738..9cc82a6601 100644 --- a/sys/platform/pc32/i386/machdep.c +++ b/sys/platform/pc32/i386/machdep.c @@ -153,11 +153,7 @@ SYSINIT(cpu_finish, SI_BOOT2_FINISH_CPU, SI_ORDER_FIRST, cpu_finish, NULL) int _udatasel, _ucodesel; u_int atdevbase; -#ifdef SMP int64_t tsc_offsets[MAXCPU]; -#else -int64_t tsc_offsets[1]; -#endif #if defined(SWTCH_OPTIM_STATS) extern int swtch_optim_stats; @@ -993,8 +989,6 @@ cpu_idle(void) } } -#ifdef SMP - /* * This routine is called if a spinlock has been held through the * exponential backoff period and is seriously contested. On a real cpu @@ -1006,8 +1000,6 @@ cpu_spinlock_contested(void) cpu_pause(); } -#endif - /* * Clear registers on exec */ @@ -1680,10 +1672,8 @@ physmap_done: */ base_memory = physmap[1]; -#ifdef SMP /* make hole for AP bootstrap code YYY */ physmap[1] = mp_bootaddress(base_memory); -#endif /* Save EBDA address, if any */ ebda_addr = (u_long)(*(u_short *)(KERNBASE + 0x40e)); @@ -2669,13 +2659,11 @@ struct spinlock_deprecated smp_rv_spinlock; static void init_locks(void) { -#ifdef SMP /* * Get the initial mplock with a count of 1 for the BSP. * This uses a LOGICAL cpu ID, ie BSP == 0. */ cpu_get_initial_mplock(); -#endif /* DEPRECATED */ spin_lock_init(&mcount_spinlock); spin_lock_init(&intr_spinlock); diff --git a/sys/platform/pc32/i386/perfmon.c b/sys/platform/pc32/i386/perfmon.c index 6055c4607b..4faab0dbb5 100644 --- a/sys/platform/pc32/i386/perfmon.c +++ b/sys/platform/pc32/i386/perfmon.c @@ -37,25 +37,15 @@ #include #include -#ifndef SMP -#include -#endif #include #include static int perfmon_inuse; static int perfmon_cpuok; -#ifndef SMP -static int msr_ctl[NPMC]; -#endif static int msr_pmc[NPMC]; static unsigned int ctl_shadow[NPMC]; static quad_t pmc_shadow[NPMC]; /* used when ctr is stopped on P5 */ static int (*writectl)(int); -#ifndef SMP -static int writectl5(int); -static int writectl6(int); -#endif static d_close_t perfmon_close; static d_open_t perfmon_open; @@ -89,30 +79,6 @@ SYSINIT(perfmondrv, SI_SUB_DRIVERS, SI_ORDER_ANY, perfmon_driver_init, NULL) void perfmon_init(void) { -#ifndef SMP - switch(cpu_class) { - case CPUCLASS_586: - perfmon_cpuok = 1; - msr_ctl[0] = 0x11; - msr_ctl[1] = 0x11; - msr_pmc[0] = 0x12; - msr_pmc[1] = 0x13; - writectl = writectl5; - break; - case CPUCLASS_686: - perfmon_cpuok = 1; - msr_ctl[0] = 0x186; - msr_ctl[1] = 0x187; - msr_pmc[0] = 0xc1; - msr_pmc[1] = 0xc2; - writectl = writectl6; - break; - - default: - perfmon_cpuok = 0; - break; - } -#endif /* SMP */ } int @@ -229,59 +195,6 @@ perfmon_reset(int pmc) return EBUSY; } -#ifndef SMP -/* - * Unfortunately, the performance-monitoring registers are laid out - * differently in the P5 and P6. We keep everything in P6 format - * internally (except for the event code), and convert to P5 - * format as needed on those CPUs. The writectl function pointer - * is set up to point to one of these functions by perfmon_init(). - */ -int -writectl6(int pmc) -{ - if (pmc > 0 && !(ctl_shadow[pmc] & (PMCF_EN << 16))) { - wrmsr(msr_ctl[pmc], 0); - } else { - wrmsr(msr_ctl[pmc], ctl_shadow[pmc]); - } - return 0; -} - -#define P5FLAG_P 0x200 -#define P5FLAG_E 0x100 -#define P5FLAG_USR 0x80 -#define P5FLAG_OS 0x40 - -int -writectl5(int pmc) -{ - quad_t newval = 0; - - if (ctl_shadow[1] & (PMCF_EN << 16)) { - if (ctl_shadow[1] & (PMCF_USR << 16)) - newval |= P5FLAG_USR << 16; - if (ctl_shadow[1] & (PMCF_OS << 16)) - newval |= P5FLAG_OS << 16; - if (!(ctl_shadow[1] & (PMCF_E << 16))) - newval |= P5FLAG_E << 16; - newval |= (ctl_shadow[1] & 0x3f) << 16; - } - if (ctl_shadow[0] & (PMCF_EN << 16)) { - if (ctl_shadow[0] & (PMCF_USR << 16)) - newval |= P5FLAG_USR; - if (ctl_shadow[0] & (PMCF_OS << 16)) - newval |= P5FLAG_OS; - if (!(ctl_shadow[0] & (PMCF_E << 16))) - newval |= P5FLAG_E; - newval |= ctl_shadow[0] & 0x3f; - } - - wrmsr(msr_ctl[0], newval); - return 0; /* XXX should check for unimplemented bits */ -} -#endif /* !SMP */ - /* * Now the user-mode interface, called from a subdevice of mem.c. */ diff --git a/sys/platform/pc32/i386/pmap.c b/sys/platform/pc32/i386/pmap.c index 1381a84ac7..1f42bf045b 100644 --- a/sys/platform/pc32/i386/pmap.c +++ b/sys/platform/pc32/i386/pmap.c @@ -426,12 +426,7 @@ pmap_bootstrap(vm_paddr_t firstaddr, vm_paddr_t loadaddr) * cases rather then invl1pg. Actually, I don't even know why it * works under UP because self-referential page table mappings */ -#ifdef SMP pgeflag = 0; -#else - if (cpu_feature & CPUID_PGE) - pgeflag = PG_G; -#endif /* * Initialize the 4MB page size flag @@ -454,28 +449,6 @@ pmap_bootstrap(vm_paddr_t firstaddr, vm_paddr_t loadaddr) ptditmp &= ~(NBPDR - 1); ptditmp |= PG_V | PG_RW | PG_PS | PG_U | pgeflag; pdir4mb = ptditmp; - -#ifndef SMP - /* - * Enable the PSE mode. If we are SMP we can't do this - * now because the APs will not be able to use it when - * they boot up. - */ - load_cr4(rcr4() | CR4_PSE); - - /* - * We can do the mapping here for the single processor - * case. We simply ignore the old page table page from - * now on. - */ - /* - * For SMP, we still need 4K pages to bootstrap APs, - * PSE will be enabled as soon as all APs are up. - */ - PTD[KPTDI] = (pd_entry_t)ptditmp; - kernel_pmap.pm_pdir[KPTDI] = (pd_entry_t)ptditmp; - cpu_invltlb(); -#endif } #endif @@ -500,7 +473,6 @@ pmap_bootstrap(vm_paddr_t firstaddr, vm_paddr_t loadaddr) cpu_invltlb(); } -#ifdef SMP /* * Set 4mb pdir for mp startup */ @@ -516,7 +488,6 @@ pmap_set_opt(void) } } } -#endif /* * Initialize the pmap module, called by vm_init() @@ -949,9 +920,7 @@ pmap_qenter(vm_offset_t va, vm_page_t *m, int count) va += PAGE_SIZE; m++; } -#ifdef SMP smp_invltlb(); /* XXX */ -#endif } /* @@ -974,9 +943,7 @@ pmap_qremove(vm_offset_t va, int count) cpu_invlpg((void *)va); va += PAGE_SIZE; } -#ifdef SMP smp_invltlb(); -#endif } /* @@ -2778,17 +2745,10 @@ pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired, * the pmap_inval_*() API that is)... it's ok to do this for simple * wiring changes. */ -#ifdef SMP if (wired) atomic_set_int(pte, PG_W); else atomic_clear_int(pte, PG_W); -#else - if (wired) - atomic_set_int_nonlocked(pte, PG_W); - else - atomic_clear_int_nonlocked(pte, PG_W); -#endif lwkt_reltoken(&vm_token); } @@ -3285,11 +3245,7 @@ pmap_ts_referenced(vm_page_t m) pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); if (pte && (*pte & PG_A)) { -#ifdef SMP atomic_clear_int(pte, PG_A); -#else - atomic_clear_int_nonlocked(pte, PG_A); -#endif rtval++; if (rtval > 4) { break; @@ -3574,30 +3530,21 @@ pmap_setlwpvm(struct lwp *lp, struct vmspace *newvm) lp->lwp_vmspace = newvm; if (curthread->td_lwp == lp) { pmap = vmspace_pmap(newvm); -#if defined(SMP) atomic_set_cpumask(&pmap->pm_active, mycpu->gd_cpumask); if (pmap->pm_active & CPUMASK_LOCK) pmap_interlock_wait(newvm); -#else - pmap->pm_active |= 1; -#endif #if defined(SWTCH_OPTIM_STATS) tlb_flush_count++; #endif curthread->td_pcb->pcb_cr3 = vtophys(pmap->pm_pdir); load_cr3(curthread->td_pcb->pcb_cr3); pmap = vmspace_pmap(oldvm); -#if defined(SMP) atomic_clear_cpumask(&pmap->pm_active, mycpu->gd_cpumask); -#else - pmap->pm_active &= ~(cpumask_t)1; -#endif } } } -#ifdef SMP /* * Called when switching to a locked pmap, used to interlock against pmaps * undergoing modifications to prevent us from activating the MMU for the @@ -3624,8 +3571,6 @@ pmap_interlock_wait(struct vmspace *vm) } } -#endif - /* * Return a page-directory alignment hint for device mappings which will * allow the use of super-pages for the mapping. diff --git a/sys/platform/pc32/i386/pmap_inval.c b/sys/platform/pc32/i386/pmap_inval.c index 05e65e56e8..a57840df29 100644 --- a/sys/platform/pc32/i386/pmap_inval.c +++ b/sys/platform/pc32/i386/pmap_inval.c @@ -84,7 +84,6 @@ void pmap_inval_interlock(pmap_inval_info_t info, pmap_t pmap, vm_offset_t va) { cpumask_t oactive; -#ifdef SMP cpumask_t nactive; DEBUG_PUSH_INFO("pmap_inval_interlock"); @@ -97,9 +96,6 @@ pmap_inval_interlock(pmap_inval_info_t info, pmap_t pmap, vm_offset_t va) cpu_pause(); } DEBUG_POP_INFO(); -#else - oactive = pmap->pm_active & ~CPUMASK_LOCK; -#endif KKASSERT((info->pir_flags & PIRF_CPUSYNC) == 0); info->pir_va = va; info->pir_flags = PIRF_CPUSYNC; @@ -111,9 +107,7 @@ void pmap_inval_deinterlock(pmap_inval_info_t info, pmap_t pmap) { KKASSERT(info->pir_flags & PIRF_CPUSYNC); -#ifdef SMP atomic_clear_cpumask(&pmap->pm_active, CPUMASK_LOCK); -#endif lwkt_cpusync_deinterlock(&info->pir_cpusync); info->pir_flags = 0; } diff --git a/sys/platform/pc32/i386/support.s b/sys/platform/pc32/i386/support.s index cbb2043763..34f20febcc 100644 --- a/sys/platform/pc32/i386/support.s +++ b/sys/platform/pc32/i386/support.s @@ -308,9 +308,7 @@ ENTRY(casuword) cmpl $VM_MAX_USER_ADDRESS-4,%edx /* verify address is valid */ ja fusufault -#ifdef SMP lock -#endif cmpxchgl %ecx,(%edx) /* Compare and set. */ /* diff --git a/sys/platform/pc32/i386/swtch.s b/sys/platform/pc32/i386/swtch.s index 0a6d4adc3e..1b0fb931a5 100644 --- a/sys/platform/pc32/i386/swtch.s +++ b/sys/platform/pc32/i386/swtch.s @@ -66,7 +66,6 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $ - * $DragonFly: src/sys/platform/pc32/i386/swtch.s,v 1.47 2007/06/29 21:54:10 dillon Exp $ */ #include "use_npx.h" @@ -82,11 +81,7 @@ #include "assym.s" -#if defined(SMP) #define MPLOCKED lock ; -#else -#define MPLOCKED -#endif .data @@ -309,7 +304,6 @@ ENTRY(cpu_heavy_restore) * wait for it to complete before we can continue. */ movl LWP_VMSPACE(%ecx), %ecx /* ECX = vmspace */ -#ifdef SMP pushl %eax /* save curthread */ 1: movl VM_PMAP+PM_ACTIVE(%ecx),%eax /* old value for cmpxchgl */ @@ -336,10 +330,6 @@ ENTRY(cpu_heavy_restore) jmp 2f 1: popl %eax -#else - movl PCPU(cpumask), %esi - orl %esi, VM_PMAP+PM_ACTIVE(%ecx) -#endif /* * Restore the MMU address space. If it is the same as the last @@ -559,12 +549,10 @@ ENTRY(cpu_idle_restore) movl %ecx,%cr3 andl $~TDF_RUNNING,TD_FLAGS(%ebx) orl $TDF_RUNNING,TD_FLAGS(%eax) /* manual, no switch_return */ -#ifdef SMP cmpl $0,PCPU(cpuid) je 1f call ap_init 1: -#endif /* * ap_init can decide to enable interrupts early, but otherwise, or if * we are UP, do it here. diff --git a/sys/platform/pc32/i386/sys_machdep.c b/sys/platform/pc32/i386/sys_machdep.c index b7d86f6801..79c4228b00 100644 --- a/sys/platform/pc32/i386/sys_machdep.c +++ b/sys/platform/pc32/i386/sys_machdep.c @@ -255,23 +255,17 @@ set_user_TLS(void) { struct thread *td = curthread; int i; -#ifdef SMP int off = GTLS_START + mycpu->gd_cpuid * NGDT; -#else - const int off = GTLS_START; -#endif for (i = 0; i < NGTLS; ++i) gdt[off + i].sd = td->td_tls.tls[i]; } -#ifdef SMP static void set_user_ldt_cpusync(void *arg) { set_user_ldt(arg); } -#endif /* * Update the GDT entry pointing to the LDT to point to the LDT of the @@ -289,11 +283,7 @@ set_user_ldt(struct pcb *pcb) return; pcb_ldt = pcb->pcb_ldt; -#ifdef SMP gdt[mycpu->gd_cpuid * NGDT + GUSERLDT_SEL].sd = pcb_ldt->ldt_sd; -#else - gdt[GUSERLDT_SEL].sd = pcb_ldt->ldt_sd; -#endif lldt(GSEL(GUSERLDT_SEL, SEL_KPL)); mdcpu->gd_currentldt = GSEL(GUSERLDT_SEL, SEL_KPL); } @@ -448,11 +438,7 @@ ki386_set_ldt(struct lwp *lp, char *args, int *res) * reload it. XXX we need to track which cpus might be * using the shared ldt and only signal those. */ -#ifdef SMP lwkt_cpusync_simple(-1, set_user_ldt_cpusync, pcb); -#else - set_user_ldt(pcb); -#endif } descs_size = uap->num * sizeof(union descriptor); diff --git a/sys/platform/pc32/i386/trap.c b/sys/platform/pc32/i386/trap.c index 5e429d81e0..92ca39e267 100644 --- a/sys/platform/pc32/i386/trap.c +++ b/sys/platform/pc32/i386/trap.c @@ -109,20 +109,12 @@ #include #include -#ifdef SMP - #define MAKEMPSAFE(have_mplock) \ if (have_mplock == 0) { \ get_mplock(); \ have_mplock = 1; \ } -#else - -#define MAKEMPSAFE(have_mplock) - -#endif - int (*pmath_emulate) (struct trapframe *); extern void trap (struct trapframe *frame); @@ -415,9 +407,7 @@ trap(struct trapframe *frame) struct proc *p; int sticks = 0; int i = 0, ucode = 0, type, code; -#ifdef SMP int have_mplock = 0; -#endif #ifdef INVARIANTS int crit_count = td->td_critcount; lwkt_tokref_t curstop = td->td_toks_stop; @@ -495,23 +485,15 @@ restart: if (in_vm86call) { if (frame->tf_eflags & PSL_VM && (type == T_PROTFLT || type == T_STKFLT)) { -#ifdef SMP KKASSERT(get_mplock_count(curthread) > 0); -#endif i = vm86_emulate((struct vm86frame *)frame); -#ifdef SMP KKASSERT(get_mplock_count(curthread) > 0); -#endif if (i != 0) { /* * returns to original process */ -#ifdef SMP vm86_trap((struct vm86frame *)frame, have_mplock); -#else - vm86_trap((struct vm86frame *)frame, 0); -#endif KKASSERT(0); /* NOT REACHED */ } goto out2; @@ -929,10 +911,8 @@ out: userret(lp, frame, sticks); userexit(lp); out2: ; -#ifdef SMP if (have_mplock) rel_mplock(); -#endif if (p != NULL && lp != NULL) KTR_LOG(kernentry_trap_ret, p->p_pid, lp->lwp_tid); #ifdef INVARIANTS @@ -1068,11 +1048,9 @@ trap_fatal(struct trapframe *frame, vm_offset_t eva) type, trap_msg[type], frame->tf_eflags & PSL_VM ? "vm86" : ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); -#ifdef SMP /* three separate prints in case of a trap on an unmapped page */ kprintf("cpuid = %d; ", mycpu->gd_cpuid); kprintf("lapic.id = %08x\n", lapic->id); -#endif if (type == T_PAGEFLT) { kprintf("fault virtual address = %p\n", (void *)eva); kprintf("fault code = %s %s, %s\n", @@ -1120,14 +1098,12 @@ trap_fatal(struct trapframe *frame, vm_offset_t eva) if (curthread->td_critcount) kprintf("(CRIT)"); kprintf("\n"); -#ifdef SMP /** * XXX FIXME: * we probably SHOULD have stopped the other CPUs before now! * another CPU COULD have been touching cpl at this moment... */ kprintf(" <- SMP: XXX"); -#endif kprintf("\n"); #ifdef KDB @@ -1184,11 +1160,9 @@ dblfault_handler(void) kprintf("eip = 0x%x\n", gd->gd_common_tss.tss_eip); kprintf("esp = 0x%x\n", gd->gd_common_tss.tss_esp); kprintf("ebp = 0x%x\n", gd->gd_common_tss.tss_ebp); -#ifdef SMP /* three separate prints in case of a trap on an unmapped page */ kprintf("cpuid = %d; ", gd->mi.gd_cpuid); kprintf("lapic.id = %08x\n", lapic->id); -#endif panic("double fault"); } @@ -1216,9 +1190,7 @@ syscall2(struct trapframe *frame) #ifdef INVARIANTS int crit_count = td->td_critcount; #endif -#ifdef SMP int have_mplock = 0; -#endif u_int code; union sysunion args; @@ -1417,13 +1389,11 @@ bad: STOPEVENT(p, S_SCX, code); userexit(lp); -#ifdef SMP /* * Release the MP lock if we had to get it */ if (have_mplock) rel_mplock(); -#endif KTR_LOG(kernentry_syscall_ret, p->p_pid, lp->lwp_tid, error); #ifdef INVARIANTS KASSERT(crit_count == td->td_critcount, diff --git a/sys/platform/pc32/i386/vm_machdep.c b/sys/platform/pc32/i386/vm_machdep.c index 4e34100990..c16f4fde69 100644 --- a/sys/platform/pc32/i386/vm_machdep.c +++ b/sys/platform/pc32/i386/vm_machdep.c @@ -82,11 +82,9 @@ #include static void cpu_reset_real (void); -#ifdef SMP static void cpu_reset_proxy (void); static u_int cpu_reset_proxyid; static volatile u_int cpu_reset_proxy_active; -#endif extern int _ucodesel, _udatasel; @@ -357,7 +355,6 @@ kvtop(void *addr) * Force reset the processor by invalidating the entire address space! */ -#ifdef SMP static void cpu_reset_proxy(void) { @@ -373,12 +370,10 @@ cpu_reset_proxy(void) DELAY(1000000); cpu_reset_real(); } -#endif void cpu_reset(void) { -#ifdef SMP if (smp_active_mask == 1) { cpu_reset_real(); /* NOTREACHED */ @@ -426,9 +421,6 @@ cpu_reset(void) /* NOTREACHED */ } } -#else - cpu_reset_real(); -#endif } static void diff --git a/sys/platform/pc32/include/intr_machdep.h b/sys/platform/pc32/include/intr_machdep.h index f4b7dbdc78..af3b322b16 100644 --- a/sys/platform/pc32/include/intr_machdep.h +++ b/sys/platform/pc32/include/intr_machdep.h @@ -31,7 +31,6 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/isa/intr_machdep.h,v 1.19.2.2 2001/10/14 20:05:50 luigi Exp $ - * $DragonFly: src/sys/platform/pc32/isa/intr_machdep.h,v 1.25 2006/10/23 21:50:31 dillon Exp $ */ #ifndef _ARCH_ISA_INTR_MACHDEP_H_ @@ -144,12 +143,10 @@ inthand_t Xspuriousint, /* handle APIC "spurious INTs" */ Xtimer; /* handle LAPIC timer INT */ -#ifdef SMP inthand_t Xcpustop, /* CPU stops & waits for another CPU to restart it */ Xinvltlb, /* TLB shootdowns */ Xipiq; /* handle lwkt_send_ipiq() requests */ -#endif #endif /* LOCORE */ diff --git a/sys/platform/pc32/include/lock.h b/sys/platform/pc32/include/lock.h index b12bd6af14..c69ad3ee67 100644 --- a/sys/platform/pc32/include/lock.h +++ b/sys/platform/pc32/include/lock.h @@ -32,7 +32,6 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/include/lock.h,v 1.11.2.2 2000/09/30 02:49:34 ps Exp $ - * $DragonFly: src/sys/platform/pc32/include/lock.h,v 1.17 2008/06/19 21:32:55 aggelos Exp $ */ #ifndef _MACHINE_LOCK_H_ @@ -52,10 +51,6 @@ * Under UP the spinlock routines still serve to disable/restore * interrupts. */ - - -#ifdef SMP - #define SPIN_INIT(mem) \ movl $0,mem ; \ @@ -102,29 +97,6 @@ #define SPIN_UNLOCK_NOREG(mem) \ SPIN_UNLOCK(mem) ; \ -#else - -#define SPIN_LOCK(mem) \ - pushfl ; \ - cli ; \ - orl $PSL_C,(%esp) ; \ - popl mem ; \ - -#define SPIN_LOCK_PUSH_RESG -#define SPIN_LOCK_POP_REGS -#define SPIN_LOCK_FRAME_SIZE 0 - -#define SPIN_UNLOCK(mem) \ - pushl mem ; \ - movl $0,mem ; \ - popfl ; \ - -#define SPIN_UNLOCK_PUSH_REGS -#define SPIN_UNLOCK_POP_REGS -#define SPIN_UNLOCK_FRAME_SIZE 0 - -#endif /* SMP */ - #else /* !LOCORE */ #ifdef _KERNEL diff --git a/sys/platform/pc32/include/pmap.h b/sys/platform/pc32/include/pmap.h index f52a54cb53..ca929957f2 100644 --- a/sys/platform/pc32/include/pmap.h +++ b/sys/platform/pc32/include/pmap.h @@ -43,7 +43,6 @@ * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 * $FreeBSD: src/sys/i386/include/pmap.h,v 1.65.2.3 2001/10/03 07:15:37 peter Exp $ - * $DragonFly: src/sys/platform/pc32/include/pmap.h,v 1.7 2007/06/08 00:57:04 dillon Exp $ */ #ifndef _MACHINE_PMAP_H_ @@ -304,9 +303,7 @@ void pmap_unmapdev (vm_offset_t, vm_size_t); unsigned *pmap_kernel_pte (vm_offset_t) __pure2; struct vm_page *pmap_use_pt (pmap_t, vm_offset_t); int pmap_get_pgeflag(void); -#ifdef SMP void pmap_set_opt (void); -#endif #endif /* _KERNEL */ diff --git a/sys/platform/pc32/include/smp.h b/sys/platform/pc32/include/smp.h index a46df3a882..f1f97288ab 100644 --- a/sys/platform/pc32/include/smp.h +++ b/sys/platform/pc32/include/smp.h @@ -7,8 +7,6 @@ * ---------------------------------------------------------------------------- * * $FreeBSD: src/sys/i386/include/smp.h,v 1.50.2.5 2001/02/13 22:32:45 tegge Exp $ - * $DragonFly: src/sys/platform/pc32/include/smp.h,v 1.20 2006/11/07 06:43:24 dillon Exp $ - * */ #ifndef _MACHINE_SMP_H_ @@ -16,8 +14,6 @@ #ifdef _KERNEL -#if defined(SMP) - #ifndef LOCORE /* @@ -93,11 +89,6 @@ int get_cpuid_from_apicid(int cpuid) { } #endif /* !LOCORE */ -#else /* !SMP */ - -#define smp_active_mask 1 /* smp_active_mask always 1 on UP machines */ - -#endif #endif /* _KERNEL */ #endif /* _MACHINE_SMP_H_ */ diff --git a/sys/platform/pc32/include/thread.h b/sys/platform/pc32/include/thread.h index f2b429c8de..b311634ac1 100644 --- a/sys/platform/pc32/include/thread.h +++ b/sys/platform/pc32/include/thread.h @@ -81,12 +81,7 @@ _get_mycpu(void) } #define mycpu _get_mycpu() - -#ifdef SMP #define mycpuid (_get_mycpu()->gd_cpuid) -#else -#define mycpuid 0 -#endif /* * note: curthread is never NULL, but curproc can be. Also note that diff --git a/sys/platform/pc32/isa/clock.c b/sys/platform/pc32/isa/clock.c index ad4a63b135..a51432b079 100644 --- a/sys/platform/pc32/isa/clock.c +++ b/sys/platform/pc32/isa/clock.c @@ -57,9 +57,6 @@ #include #include #include -#ifndef SMP -#include -#endif #include #include #include @@ -184,10 +181,8 @@ clkintr(void *dummy, void *frame_arg) { static sysclock_t sysclock_count; /* NOTE! Must be static */ struct globaldata *gd = mycpu; -#ifdef SMP struct globaldata *gscan; int n; -#endif /* * SWSTROBE mode is a one-shot, the timer is no longer running @@ -200,7 +195,6 @@ clkintr(void *dummy, void *frame_arg) * usually *ALL* of them. We need to use the LAPIC timer for this. */ sysclock_count = sys_cputimer->count(); -#ifdef SMP for (n = 0; n < ncpus; ++n) { gscan = globaldata_find(n); if (TAILQ_FIRST(&gscan->gd_systimerq) == NULL) @@ -212,10 +206,6 @@ clkintr(void *dummy, void *frame_arg) systimer_intr(&sysclock_count, 0, frame_arg); } } -#else - if (TAILQ_FIRST(&gd->gd_systimerq) != NULL) - systimer_intr(&sysclock_count, 0, frame_arg); -#endif } @@ -826,30 +816,6 @@ startrtclock(void) } EVENTHANDLER_REGISTER(shutdown_post_sync, resettodr_on_shutdown, NULL, SHUTDOWN_PRI_LAST); - -#if !defined(SMP) - /* - * We can not use the TSC in SMP mode, until we figure out a - * cheap (impossible), reliable and precise (yeah right!) way - * to synchronize the TSCs of all the CPUs. - * Curse Intel for leaving the counter out of the I/O APIC. - */ - -#if NAPM > 0 - /* - * We can not use the TSC if we support APM. Precise timekeeping - * on an APM'ed machine is at best a fools pursuit, since - * any and all of the time spent in various SMM code can't - * be reliably accounted for. Reading the RTC is your only - * source of reliable time info. The i8254 looses too of course - * but we need to have some kind of time... - * We don't know at this point whether APM is going to be used - * or not, nor when it might be activated. Play it safe. - */ - return; -#endif /* NAPM > 0 */ - -#endif /* !defined(SMP) */ } /* diff --git a/sys/platform/pc32/isa/npx.c b/sys/platform/pc32/isa/npx.c index 449fcfc491..e2c8ed7eea 100644 --- a/sys/platform/pc32/isa/npx.c +++ b/sys/platform/pc32/isa/npx.c @@ -56,28 +56,16 @@ #include #include -#ifndef SMP -#include -#endif #include #include #include #include #include #include -#ifndef SMP -#include -#endif #include #include #include -#ifndef SMP -#include -#include -#include -#endif - /* * 387 and 287 Numeric Coprocessor Extension (NPX) Driver. */ @@ -151,56 +139,11 @@ SYSCTL_INT(_kern, OID_AUTO, mmxopt, CTLFLAG_RD, &mmxopt, 0, "MMX/XMM optimized bcopy/copyin/copyout support"); #endif -#ifndef SMP -static u_int npx0_imask; -static struct gate_descriptor npx_idt_probeintr; -static int npx_intrno; -static volatile u_int npx_intrs_while_probing; -static volatile u_int npx_traps_while_probing; -#endif - static bool_t npx_ex16; static bool_t npx_exists; static bool_t npx_irq13; static int npx_irq; /* irq number */ -#ifndef SMP -/* - * Special interrupt handlers. Someday intr0-intr15 will be used to count - * interrupts. We'll still need a special exception 16 handler. The busy - * latch stuff in probeintr() can be moved to npxprobe(). - */ -inthand_t probeintr; -__asm(" \n\ - .text \n\ - .p2align 2,0x90 \n\ - .type " __XSTRING(CNAME(probeintr)) ",@function \n\ -" __XSTRING(CNAME(probeintr)) ": \n\ - ss \n\ - incl " __XSTRING(CNAME(npx_intrs_while_probing)) " \n\ - pushl %eax \n\ - movb $0x20,%al # EOI (asm in strings loses cpp features) \n\ - outb %al,$0xa0 # IO_ICU2 \n\ - outb %al,$0x20 # IO_ICU1 \n\ - movb $0,%al \n\ - outb %al,$0xf0 # clear BUSY# latch \n\ - popl %eax \n\ - iret \n\ -"); - -inthand_t probetrap; -__asm(" \n\ - .text \n\ - .p2align 2,0x90 \n\ - .type " __XSTRING(CNAME(probetrap)) ",@function \n\ -" __XSTRING(CNAME(probetrap)) ": \n\ - ss \n\ - incl " __XSTRING(CNAME(npx_traps_while_probing)) " \n\ - fnclex \n\ - iret \n\ -"); -#endif /* SMP */ - static struct krate badfprate = { 1 }; /* @@ -212,62 +155,14 @@ static struct krate badfprate = { 1 }; static int npx_probe(device_t dev) { -#ifdef SMP - if (resource_int_value("npx", 0, "irq", &npx_irq) != 0) npx_irq = 13; return npx_probe1(dev); - -#else /* SMP */ - - int result; - u_long save_eflags; - u_char save_icu1_mask; - u_char save_icu2_mask; - struct gate_descriptor save_idt_npxintr; - struct gate_descriptor save_idt_npxtrap; - /* - * This routine is now just a wrapper for npxprobe1(), to install - * special npx interrupt and trap handlers, to enable npx interrupts - * and to disable other interrupts. Someday isa_configure() will - * install suitable handlers and run with interrupts enabled so we - * won't need to do so much here. - */ - if (resource_int_value("npx", 0, "irq", &npx_irq) != 0) - npx_irq = 13; - npx_intrno = IDT_OFFSET + npx_irq; - save_eflags = read_eflags(); - cpu_disable_intr(); - save_icu1_mask = inb(IO_ICU1 + 1); - save_icu2_mask = inb(IO_ICU2 + 1); - save_idt_npxintr = idt[npx_intrno]; - save_idt_npxtrap = idt[16]; - outb(IO_ICU1 + 1, ~(1 << ICU_IRQ_SLAVE)); - outb(IO_ICU2 + 1, ~(1 << (npx_irq - 8))); - setidt(16, probetrap, SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); - setidt(npx_intrno, probeintr, SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); - npx_idt_probeintr = idt[npx_intrno]; - cpu_enable_intr(); - result = npx_probe1(dev); - cpu_disable_intr(); - outb(IO_ICU1 + 1, save_icu1_mask); - outb(IO_ICU2 + 1, save_icu2_mask); - idt[npx_intrno] = save_idt_npxintr; - idt[16] = save_idt_npxtrap; - write_eflags(save_eflags); - return (result); - -#endif /* SMP */ } static int npx_probe1(device_t dev) { -#ifndef SMP - u_short control; - u_short status; -#endif - /* * Partially reset the coprocessor, if any. Some BIOS's don't reset * it after a warm boot. @@ -313,90 +208,6 @@ npx_probe1(device_t dev) return (0); } -#ifndef SMP - /* - * Don't use fwait here because it might hang. - * Don't use fnop here because it usually hangs if there is no FPU. - */ - DELAY(1000); /* wait for any IRQ13 */ -#ifdef DIAGNOSTIC - if (npx_intrs_while_probing != 0) - kprintf("fninit caused %u bogus npx interrupt(s)\n", - npx_intrs_while_probing); - if (npx_traps_while_probing != 0) - kprintf("fninit caused %u bogus npx trap(s)\n", - npx_traps_while_probing); -#endif - /* - * Check for a status of mostly zero. - */ - status = 0x5a5a; - fnstsw(&status); - if ((status & 0xb8ff) == 0) { - /* - * Good, now check for a proper control word. - */ - control = 0x5a5a; - fnstcw(&control); - if ((control & 0x1f3f) == 0x033f) { - hw_float = npx_exists = 1; - /* - * We have an npx, now divide by 0 to see if exception - * 16 works. - */ - control &= ~(1 << 2); /* enable divide by 0 trap */ - fldcw(&control); - npx_traps_while_probing = npx_intrs_while_probing = 0; - fp_divide_by_0(); - if (npx_traps_while_probing != 0) { - /* - * Good, exception 16 works. - */ - npx_ex16 = 1; - return (0); - } - if (npx_intrs_while_probing != 0) { - int rid; - struct resource *r; - void *intr; - /* - * Bad, we are stuck with IRQ13. - */ - npx_irq13 = 1; - /* - * npxattach would be too late to set npx0_imask - */ - npx0_imask |= (1 << npx_irq); - - /* - * We allocate these resources permanently, - * so there is no need to keep track of them. - */ - rid = 0; - r = bus_alloc_resource(dev, SYS_RES_IOPORT, - &rid, IO_NPX, IO_NPX, - IO_NPXSIZE, RF_ACTIVE); - if (r == NULL) - panic("npx: can't get ports"); - rid = 0; - r = bus_alloc_legacy_irq_resource(dev, &rid, - npx_irq, RF_ACTIVE); - if (r == NULL) - panic("npx: can't get IRQ"); - BUS_SETUP_INTR(device_get_parent(dev), - dev, r, 0, - npx_intr, 0, &intr, NULL, NULL); - if (intr == NULL) - panic("npx: can't create intr"); - - return (0); - } - /* - * Worse, even IRQ13 is broken. Use emulator. - */ - } - } -#endif /* SMP */ /* * Probe failed, but we want to get to npxattach to initialize the * emulator and say that it has been installed. XXX handle devices @@ -916,8 +727,6 @@ npxdna(void) void npxsave(union savefpu *addr) { -#if defined(SMP) || !defined(CPU_DISABLE_SSE) - crit_enter(); stop_emulating(); fpusave(addr); @@ -925,42 +734,6 @@ npxsave(union savefpu *addr) fninit(); start_emulating(); crit_exit(); - -#else /* !SMP and CPU_DISABLE_SSE */ - - u_char icu1_mask; - u_char icu2_mask; - u_char old_icu1_mask; - u_char old_icu2_mask; - struct gate_descriptor save_idt_npxintr; - u_long save_eflags; - - save_eflags = read_eflags(); - cpu_disable_intr(); - old_icu1_mask = inb(IO_ICU1 + 1); - old_icu2_mask = inb(IO_ICU2 + 1); - save_idt_npxintr = idt[npx_intrno]; - outb(IO_ICU1 + 1, old_icu1_mask & ~((1 << ICU_IRQ_SLAVE) | npx0_imask)); - outb(IO_ICU2 + 1, old_icu2_mask & ~(npx0_imask >> 8)); - idt[npx_intrno] = npx_idt_probeintr; - cpu_enable_intr(); - stop_emulating(); - fnsave(addr); - fnop(); - cpu_disable_intr(); - mdcpu->gd_npxthread = NULL; - start_emulating(); - icu1_mask = inb(IO_ICU1 + 1); /* masks may have changed */ - icu2_mask = inb(IO_ICU2 + 1); - outb(IO_ICU1 + 1, - (icu1_mask & ~npx0_imask) | (old_icu1_mask & npx0_imask)); - outb(IO_ICU2 + 1, - (icu2_mask & ~(npx0_imask >> 8)) - | (old_icu2_mask & (npx0_imask >> 8))); - idt[npx_intrno] = save_idt_npxintr; - write_eflags(save_eflags); /* back to usual state */ - -#endif /* SMP */ } static void diff --git a/sys/platform/pc32/isa/prof_machdep.c b/sys/platform/pc32/isa/prof_machdep.c index de8d8f791a..c68b8a0979 100644 --- a/sys/platform/pc32/isa/prof_machdep.c +++ b/sys/platform/pc32/isa/prof_machdep.c @@ -183,37 +183,9 @@ cputime(void) { u_int count; int delta; -#if (defined(I586_CPU) || defined(I686_CPU)) && !defined(SMP) && \ - defined(PERFMON) && defined(I586_PMC_GUPROF) - u_quad_t event_count; -#endif u_char high, low; static u_int prev_count; -#if (defined(I586_CPU) || defined(I686_CPU)) && !defined(SMP) - if (cputime_clock == CPUTIME_CLOCK_TSC) { - count = (u_int)rdtsc(); - delta = (int)(count - prev_count); - prev_count = count; - return (delta); - } -#if defined(PERFMON) && defined(I586_PMC_GUPROF) - if (cputime_clock == CPUTIME_CLOCK_I586_PMC) { - /* - * XXX permon_read() should be inlined so that the - * perfmon module doesn't need to be compiled with - * profiling disabled and so that it is fast. - */ - perfmon_read(0, &event_count); - - count = (u_int)event_count; - delta = (int)(count - prev_count); - prev_count = count; - return (delta); - } -#endif /* PERFMON && I586_PMC_GUPROF */ -#endif /* (I586_CPU || I686_CPU) && !SMP */ - /* * Read the current value of the 8254 timer counter 0. */ @@ -291,44 +263,9 @@ SYSCTL_PROC(_machdep, OID_AUTO, cputime_clock, CTLTYPE_INT | CTLFLAG_RW, void startguprof(struct gmonparam *gp) { - if (cputime_clock == CPUTIME_CLOCK_UNINITIALIZED) { + if (cputime_clock == CPUTIME_CLOCK_UNINITIALIZED) cputime_clock = CPUTIME_CLOCK_I8254; -#if (defined(I586_CPU) || defined(I686_CPU)) && !defined(SMP) - if (tsc_frequency != 0) - cputime_clock = CPUTIME_CLOCK_TSC; -#endif - } gp->profrate = timer_freq << CPUTIME_CLOCK_I8254_SHIFT; -#if (defined(I586_CPU) || defined(I686_CPU)) && !defined(SMP) - if (cputime_clock == CPUTIME_CLOCK_TSC) - gp->profrate = (u_int)tsc_frequency; /* XXX */ -#if defined(PERFMON) && defined(I586_PMC_GUPROF) - else if (cputime_clock == CPUTIME_CLOCK_I586_PMC) { - if (perfmon_avail() && - perfmon_setup(0, cputime_clock_pmc_conf) == 0) { - if (perfmon_start(0) != 0) - perfmon_fini(0); - else { - /* XXX 1 event == 1 us. */ - gp->profrate = 1000000; - - saved_gmp = *gp; - - /* Zap overheads. They are invalid. */ - gp->cputime_overhead = 0; - gp->mcount_overhead = 0; - gp->mcount_post_overhead = 0; - gp->mcount_pre_overhead = 0; - gp->mexitcount_overhead = 0; - gp->mexitcount_post_overhead = 0; - gp->mexitcount_pre_overhead = 0; - - cputime_clock_pmc_init = TRUE; - } - } - } -#endif /* PERFMON && I586_PMC_GUPROF */ -#endif /* (I586_CPU || I686_CPU) && !SMP */ cputime_bias = 0; cputime(); } diff --git a/sys/platform/pc64/apic/apic_vector.s b/sys/platform/pc64/apic/apic_vector.s index b37e8c7806..1dc531ab28 100644 --- a/sys/platform/pc64/apic/apic_vector.s +++ b/sys/platform/pc64/apic/apic_vector.s @@ -32,11 +32,7 @@ /* convert an absolute IRQ# into gd_ipending index */ #define IRQ_LIDX(irq_num) ((irq_num) >> 6) -#ifdef SMP #define MPLOCKED lock ; -#else -#define MPLOCKED -#endif #define APIC_PUSH_FRAME \ PUSH_FRAME ; /* 15 regs + space for 5 extras */ \ @@ -182,8 +178,6 @@ Xspuriousint: APIC_POP_FRAME jmp doreti_iret -#ifdef SMP - /* * Handle TLB shootdowns. * @@ -305,8 +299,6 @@ Xipiq: APIC_POP_FRAME jmp doreti_iret -#endif /* SMP */ - .text SUPERALIGN_TEXT .globl Xtimer diff --git a/sys/platform/pc64/apic/lapic.c b/sys/platform/pc64/apic/lapic.c index af62d93a0b..218abb6bcd 100644 --- a/sys/platform/pc64/apic/lapic.c +++ b/sys/platform/pc64/apic/lapic.c @@ -154,7 +154,6 @@ lapic_init(boolean_t bsp) setidt_global(XTIMER_OFFSET, Xtimer, SDT_SYSIGT, SEL_KPL, 0); -#ifdef SMP /* Install an inter-CPU IPI for TLB invalidation */ setidt_global(XINVLTLB_OFFSET, Xinvltlb, SDT_SYSIGT, SEL_KPL, 0); @@ -166,7 +165,6 @@ lapic_init(boolean_t bsp) /* Install an inter-CPU IPI for CPU stop/restart */ setidt_global(XCPUSTOP_OFFSET, Xcpustop, SDT_SYSIGT, SEL_KPL, 0); -#endif } /* @@ -506,22 +504,14 @@ lapic_timer_restart_handler(void *dummy __unused) static void lapic_timer_intr_pmfixup(struct cputimer_intr *cti __unused) { -#ifdef SMP lwkt_send_ipiq_mask(smp_active_mask, lapic_timer_fixup_handler, NULL); -#else - lapic_timer_fixup_handler(NULL); -#endif } static void lapic_timer_intr_restart(struct cputimer_intr *cti __unused) { -#ifdef SMP lwkt_send_ipiq_mask(smp_active_mask, lapic_timer_restart_handler, NULL); -#else - lapic_timer_restart_handler(NULL); -#endif } @@ -536,8 +526,6 @@ apic_dump(char* str) lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr); } -#ifdef SMP - /* * Inter Processor Interrupt functions. */ @@ -666,8 +654,6 @@ selected_apic_ipi(cpumask_t target, int vector, int delivery_mode) crit_exit(); } -#endif /* SMP */ - /* * Timer code, in development... * - suggested by rgrimes@gndrsh.aac.dev.com diff --git a/sys/platform/pc64/apic/lapic.h b/sys/platform/pc64/apic/lapic.h index 69e67b06b0..b308d958bc 100644 --- a/sys/platform/pc64/apic/lapic.h +++ b/sys/platform/pc64/apic/lapic.h @@ -24,7 +24,6 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/include/mpapic.h,v 1.14.2.2 2000/09/30 02:49:34 ps Exp $ - * $DragonFly: src/sys/platform/pc64/apic/mpapic.h,v 1.1 2008/08/29 17:07:12 dillon Exp $ */ #ifndef _ARCH_APIC_LAPIC_H_ @@ -71,8 +70,6 @@ void lapic_map(vm_paddr_t); int lapic_unused_apic_id(int); void lapic_fixup_noioapic(void); -#ifdef SMP - #ifndef _MACHINE_SMP_H_ #include #endif @@ -93,6 +90,4 @@ all_but_self_ipi(int vector) return apic_ipi(APIC_DEST_ALLESELF, vector, APIC_DELMODE_FIXED); } -#endif /* SMP */ - #endif /* _ARCH_APIC_LAPIC_H_ */ diff --git a/sys/platform/pc64/conf/files b/sys/platform/pc64/conf/files index dd14e4da65..1fd45cf336 100644 --- a/sys/platform/pc64/conf/files +++ b/sys/platform/pc64/conf/files @@ -153,7 +153,7 @@ vfs/smbfs/smbfs_vnops.c optional smbfs cpu/x86_64/misc/atomic.c standard \ compile-with "${CC} -c ${CFLAGS} ${WERROR} ${DEFINED_PROF:S/^$/-fomit-frame-pointer/} ${.IMPSRC}" platform/pc64/x86_64/autoconf.c standard -platform/pc64/x86_64/mpboot.S optional smp +platform/pc64/x86_64/mpboot.S standard # DDB XXX cpu/x86_64/misc/x86_64-gdbstub.c optional ddb @@ -232,7 +232,7 @@ platform/pc64/x86_64/busdma_machdep.c standard platform/pc64/x86_64/sysarch.c standard platform/pc64/x86_64/ipl_funcs.c standard kern/syscalls.c standard -platform/pc64/x86_64/mp_machdep.c optional smp +platform/pc64/x86_64/mp_machdep.c standard platform/pc64/x86_64/mptable.c standard platform/pc64/acpica5/acpi_sdt.c standard platform/pc64/acpica5/acpi_fadt.c standard diff --git a/sys/platform/pc64/include/intr_machdep.h b/sys/platform/pc64/include/intr_machdep.h index f74d78e3fd..2ef62d872d 100644 --- a/sys/platform/pc64/include/intr_machdep.h +++ b/sys/platform/pc64/include/intr_machdep.h @@ -32,7 +32,6 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/isa/intr_machdep.h,v 1.19.2.2 2001/10/14 20:05:50 luigi Exp $ - * $DragonFly: src/sys/platform/pc64/isa/intr_machdep.h,v 1.1 2008/08/29 17:07:19 dillon Exp $ */ #ifndef _ARCH_INTR_MACHDEP_H_ @@ -148,12 +147,10 @@ inthand_t Xspuriousint, /* handle APIC "spurious INTs" */ Xtimer; /* handle LAPIC timer INT */ -#ifdef SMP inthand_t Xinvltlb, /* TLB shootdowns */ Xcpustop, /* CPU stops & waits for another CPU to restart it */ Xipiq; /* handle lwkt_send_ipiq() requests */ -#endif #endif /* LOCORE */ diff --git a/sys/platform/pc64/include/lock.h b/sys/platform/pc64/include/lock.h index 7b05e62d4e..94250c6239 100644 --- a/sys/platform/pc64/include/lock.h +++ b/sys/platform/pc64/include/lock.h @@ -32,7 +32,6 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/include/lock.h,v 1.11.2.2 2000/09/30 02:49:34 ps Exp $ - * $DragonFly: src/sys/platform/pc32/include/lock.h,v 1.17 2008/06/19 21:32:55 aggelos Exp $ */ #ifndef _MACHINE_LOCK_H_ @@ -53,9 +52,6 @@ * interrupts. */ - -#ifdef SMP - #define SPIN_INIT(mem) \ movq $0,mem ; \ @@ -102,29 +98,6 @@ #define SPIN_UNLOCK_NOREG(mem) \ SPIN_UNLOCK(mem) ; \ -#else /* !SMP */ - -#define SPIN_LOCK(mem) \ - pushfq ; \ - cli ; \ - orq $PSL_C,(%rsp) ; \ - popq mem ; \ - -#define SPIN_LOCK_PUSH_RESG -#define SPIN_LOCK_POP_REGS -#define SPIN_LOCK_FRAME_SIZE 0 - -#define SPIN_UNLOCK(mem) \ - pushq mem ; \ - movq $0,mem ; \ - popfq ; \ - -#define SPIN_UNLOCK_PUSH_REGS -#define SPIN_UNLOCK_POP_REGS -#define SPIN_UNLOCK_FRAME_SIZE 0 - -#endif /* SMP */ - #else /* !LOCORE */ #ifdef _KERNEL diff --git a/sys/platform/pc64/include/pmap.h b/sys/platform/pc64/include/pmap.h index 6c67d6c103..1e059ca7fd 100644 --- a/sys/platform/pc64/include/pmap.h +++ b/sys/platform/pc64/include/pmap.h @@ -45,7 +45,6 @@ * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 * $FreeBSD: src/sys/i386/include/pmap.h,v 1.65.2.3 2001/10/03 07:15:37 peter Exp $ - * $DragonFly: src/sys/platform/pc64/include/pmap.h,v 1.1 2008/08/29 17:07:17 dillon Exp $ */ #ifndef _MACHINE_PMAP_H_ @@ -330,9 +329,7 @@ void *pmap_mapdev (vm_paddr_t, vm_size_t); void *pmap_mapdev_uncacheable(vm_paddr_t, vm_size_t); void pmap_unmapdev (vm_offset_t, vm_size_t); struct vm_page *pmap_use_pt (pmap_t, vm_offset_t); -#ifdef SMP void pmap_set_opt (void); -#endif vm_paddr_t pmap_kextract(vm_offset_t); #endif /* _KERNEL */ diff --git a/sys/platform/pc64/include/smp.h b/sys/platform/pc64/include/smp.h index 42f845105c..1fdcda50ee 100644 --- a/sys/platform/pc64/include/smp.h +++ b/sys/platform/pc64/include/smp.h @@ -7,8 +7,6 @@ * ---------------------------------------------------------------------------- * * $FreeBSD: src/sys/i386/include/smp.h,v 1.50.2.5 2001/02/13 22:32:45 tegge Exp $ - * $DragonFly: src/sys/platform/pc32/include/smp.h,v 1.20 2006/11/07 06:43:24 dillon Exp $ - * */ #ifndef _MACHINE_SMP_H_ @@ -23,8 +21,6 @@ void initializecpu(void); #endif /* LOCORE */ -#if defined(SMP) - #ifndef LOCORE /* @@ -102,11 +98,6 @@ int get_cpuid_from_apicid(int apicid) { } #endif /* !LOCORE */ -#else /* !SMP */ - -#define smp_active_mask 1 /* smp_active_mask always 1 on UP machines */ - -#endif #endif /* _KERNEL */ #endif /* _MACHINE_SMP_H_ */ diff --git a/sys/platform/pc64/include/thread.h b/sys/platform/pc64/include/thread.h index 8968dc4a0e..5cd5358297 100644 --- a/sys/platform/pc64/include/thread.h +++ b/sys/platform/pc64/include/thread.h @@ -71,12 +71,7 @@ _get_mycpu(void) } #define mycpu _get_mycpu() - -#ifdef SMP #define mycpuid (_get_mycpu()->gd_cpuid) -#else -#define mycpuid 0 -#endif /* * note: curthread is never NULL, but curproc can be. Also note that diff --git a/sys/platform/pc64/isa/clock.c b/sys/platform/pc64/isa/clock.c index 0376179ae2..27ff8729f3 100644 --- a/sys/platform/pc64/isa/clock.c +++ b/sys/platform/pc64/isa/clock.c @@ -60,9 +60,6 @@ #include #include #include -#ifndef SMP -#include -#endif #include #include #include @@ -186,10 +183,8 @@ clkintr(void *dummy, void *frame_arg) { static sysclock_t sysclock_count; /* NOTE! Must be static */ struct globaldata *gd = mycpu; -#ifdef SMP struct globaldata *gscan; int n; -#endif /* * SWSTROBE mode is a one-shot, the timer is no longer running @@ -202,7 +197,6 @@ clkintr(void *dummy, void *frame_arg) * usually *ALL* of them. We need to use the LAPIC timer for this. */ sysclock_count = sys_cputimer->count(); -#ifdef SMP for (n = 0; n < ncpus; ++n) { gscan = globaldata_find(n); if (TAILQ_FIRST(&gscan->gd_systimerq) == NULL) @@ -214,10 +208,6 @@ clkintr(void *dummy, void *frame_arg) systimer_intr(&sysclock_count, 0, frame_arg); } } -#else - if (TAILQ_FIRST(&gd->gd_systimerq) != NULL) - systimer_intr(&sysclock_count, 0, frame_arg); -#endif } @@ -833,30 +823,6 @@ startrtclock(void) } EVENTHANDLER_REGISTER(shutdown_post_sync, resettodr_on_shutdown, NULL, SHUTDOWN_PRI_LAST); - -#if !defined(SMP) - /* - * We can not use the TSC in SMP mode, until we figure out a - * cheap (impossible), reliable and precise (yeah right!) way - * to synchronize the TSCs of all the CPUs. - * Curse Intel for leaving the counter out of the I/O APIC. - */ - -#if NAPM > 0 - /* - * We can not use the TSC if we support APM. Precise timekeeping - * on an APM'ed machine is at best a fools pursuit, since - * any and all of the time spent in various SMM code can't - * be reliably accounted for. Reading the RTC is your only - * source of reliable time info. The i8254 looses too of course - * but we need to have some kind of time... - * We don't know at this point whether APM is going to be used - * or not, nor when it might be activated. Play it safe. - */ - return; -#endif /* NAPM > 0 */ - -#endif /* !defined(SMP) */ } /* diff --git a/sys/platform/pc64/isa/prof_machdep.c b/sys/platform/pc64/isa/prof_machdep.c index 728692c04d..f43905a264 100644 --- a/sys/platform/pc64/isa/prof_machdep.c +++ b/sys/platform/pc64/isa/prof_machdep.c @@ -184,37 +184,9 @@ cputime(void) { u_int count; int delta; -#if (defined(I586_CPU) || defined(I686_CPU)) && !defined(SMP) && \ - defined(PERFMON) && defined(I586_PMC_GUPROF) - u_quad_t event_count; -#endif u_char high, low; static u_int prev_count; -#if (defined(I586_CPU) || defined(I686_CPU)) && !defined(SMP) - if (cputime_clock == CPUTIME_CLOCK_TSC) { - count = (u_int)rdtsc(); - delta = (int)(count - prev_count); - prev_count = count; - return (delta); - } -#if defined(PERFMON) && defined(I586_PMC_GUPROF) - if (cputime_clock == CPUTIME_CLOCK_I586_PMC) { - /* - * XXX permon_read() should be inlined so that the - * perfmon module doesn't need to be compiled with - * profiling disabled and so that it is fast. - */ - perfmon_read(0, &event_count); - - count = (u_int)event_count; - delta = (int)(count - prev_count); - prev_count = count; - return (delta); - } -#endif /* PERFMON && I586_PMC_GUPROF */ -#endif /* (I586_CPU || I686_CPU) && !SMP */ - /* * Read the current value of the 8254 timer counter 0. */ @@ -292,44 +264,9 @@ SYSCTL_PROC(_machdep, OID_AUTO, cputime_clock, CTLTYPE_INT | CTLFLAG_RW, void startguprof(struct gmonparam *gp) { - if (cputime_clock == CPUTIME_CLOCK_UNINITIALIZED) { + if (cputime_clock == CPUTIME_CLOCK_UNINITIALIZED) cputime_clock = CPUTIME_CLOCK_I8254; -#if (defined(I586_CPU) || defined(I686_CPU)) && !defined(SMP) - if (tsc_frequency != 0) - cputime_clock = CPUTIME_CLOCK_TSC; -#endif - } gp->profrate = timer_freq << CPUTIME_CLOCK_I8254_SHIFT; -#if (defined(I586_CPU) || defined(I686_CPU)) && !defined(SMP) - if (cputime_clock == CPUTIME_CLOCK_TSC) - gp->profrate = (u_int)tsc_frequency; /* XXX */ -#if defined(PERFMON) && defined(I586_PMC_GUPROF) - else if (cputime_clock == CPUTIME_CLOCK_I586_PMC) { - if (perfmon_avail() && - perfmon_setup(0, cputime_clock_pmc_conf) == 0) { - if (perfmon_start(0) != 0) - perfmon_fini(0); - else { - /* XXX 1 event == 1 us. */ - gp->profrate = 1000000; - - saved_gmp = *gp; - - /* Zap overheads. They are invalid. */ - gp->cputime_overhead = 0; - gp->mcount_overhead = 0; - gp->mcount_post_overhead = 0; - gp->mcount_pre_overhead = 0; - gp->mexitcount_overhead = 0; - gp->mexitcount_post_overhead = 0; - gp->mexitcount_pre_overhead = 0; - - cputime_clock_pmc_init = TRUE; - } - } - } -#endif /* PERFMON && I586_PMC_GUPROF */ -#endif /* (I586_CPU || I686_CPU) && !SMP */ cputime_bias = 0; cputime(); } diff --git a/sys/platform/pc64/x86_64/amd64_mem.c b/sys/platform/pc64/x86_64/amd64_mem.c index 4d3f2c7125..2fca20cfb9 100644 --- a/sys/platform/pc64/x86_64/amd64_mem.c +++ b/sys/platform/pc64/x86_64/amd64_mem.c @@ -280,7 +280,6 @@ amd64_mrt2mtrr(int flags, int oldval) static void amd64_mrstore(struct mem_range_softc *sc) { -#ifdef SMP /* * We should use ipi_all_but_self() to call other CPUs into a * locking gate, then call a target function to do this work. @@ -288,11 +287,6 @@ amd64_mrstore(struct mem_range_softc *sc) * implementation, not ready yet. */ lwkt_send_ipiq_mask(smp_active_mask, (void *)amd64_mrstoreone, sc); -#else - crit_enter(); - amd64_mrstoreone(sc); - crit_exit(); -#endif } /* @@ -723,7 +717,6 @@ amd64_mrAPinit(struct mem_range_softc *sc) static void amd64_mrreinit(struct mem_range_softc *sc) { -#ifdef SMP /* * We should use ipi_all_but_self() to call other CPUs into a * locking gate, then call a target function to do this work. @@ -731,11 +724,6 @@ amd64_mrreinit(struct mem_range_softc *sc) * implementation, not ready yet. */ lwkt_send_ipiq_mask(smp_active_mask, (void *)amd64_mrAPinit, sc); -#else - crit_enter(); - amd64_mrAPinit(sc); - crit_exit(); -#endif } static void diff --git a/sys/platform/pc64/x86_64/busdma_machdep.c b/sys/platform/pc64/x86_64/busdma_machdep.c index 00304e23c6..bf6d69d404 100644 --- a/sys/platform/pc64/x86_64/busdma_machdep.c +++ b/sys/platform/pc64/x86_64/busdma_machdep.c @@ -76,11 +76,7 @@ struct bus_dma_tag { int map_count; bus_dma_segment_t *segments; struct bounce_zone *bounce_zone; -#ifdef SMP struct spinlock spin; -#else - int unused0; -#endif }; /* @@ -109,11 +105,7 @@ struct bounce_zone { STAILQ_ENTRY(bounce_zone) links; STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; -#ifdef SMP struct spinlock spin; -#else - int unused0; -#endif int total_bpages; int free_bpages; int reserved_bpages; @@ -129,13 +121,8 @@ struct bounce_zone { struct sysctl_oid *sysctl_tree; }; -#ifdef SMP #define BZ_LOCK(bz) spin_lock(&(bz)->spin) #define BZ_UNLOCK(bz) spin_unlock(&(bz)->spin) -#else -#define BZ_LOCK(bz) crit_enter() -#define BZ_UNLOCK(bz) crit_exit() -#endif static struct lwkt_token bounce_zone_tok = LWKT_TOKEN_INITIALIZER(bounce_zone_token); @@ -220,9 +207,7 @@ bus_dma_tag_lock(bus_dma_tag_t tag, bus_dma_segment_t *cache) if (tag->nsegments <= BUS_DMA_CACHE_SEGMENTS) return(cache); -#ifdef SMP spin_lock(&tag->spin); -#endif return(tag->segments); } @@ -230,13 +215,11 @@ static __inline void bus_dma_tag_unlock(bus_dma_tag_t tag) { -#ifdef SMP if (tag->flags & BUS_DMA_PROTECTED) return; if (tag->nsegments > BUS_DMA_CACHE_SEGMENTS) spin_unlock(&tag->spin); -#endif } /* @@ -276,9 +259,7 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT | M_ZERO); -#ifdef SMP spin_init(&newtag->spin); -#endif newtag->parent = parent; newtag->alignment = alignment; newtag->boundary = boundary; @@ -1123,9 +1104,7 @@ alloc_bounce_zone(bus_dma_tag_t dmat) } bz = new_bz; -#ifdef SMP spin_init(&bz->spin); -#endif STAILQ_INIT(&bz->bounce_page_list); STAILQ_INIT(&bz->bounce_map_waitinglist); bz->free_bpages = 0; diff --git a/sys/platform/pc64/x86_64/db_interface.c b/sys/platform/pc64/x86_64/db_interface.c index e3b76a8bcd..9b396cefe0 100644 --- a/sys/platform/pc64/x86_64/db_interface.c +++ b/sys/platform/pc64/x86_64/db_interface.c @@ -157,7 +157,6 @@ kdb_trap(int type, int code, struct x86_64_saved_state *regs) ddb_regs = *regs; crit_enter(); -#ifdef SMP db_printf("\nCPU%d stopping CPUs: 0x%08jx\n", mycpu->gd_cpuid, (uintmax_t)mycpu->gd_other_cpus); @@ -165,7 +164,6 @@ kdb_trap(int type, int code, struct x86_64_saved_state *regs) stop_cpus(mycpu->gd_other_cpus); db_printf(" stopped\n"); -#endif /* SMP */ setjmp(db_global_jmpbuf); db_global_jmpbuf_valid = TRUE; @@ -181,7 +179,6 @@ kdb_trap(int type, int code, struct x86_64_saved_state *regs) /* vcons_set_mode(0); */ db_global_jmpbuf_valid = FALSE; -#ifdef SMP db_printf("\nCPU%d restarting CPUs: 0x%016jx\n", mycpu->gd_cpuid, (uintmax_t)stopped_cpus); @@ -196,7 +193,6 @@ kdb_trap(int type, int code, struct x86_64_saved_state *regs) restart_cpus(stopped_cpus); db_printf(" restarted\n"); -#endif /* SMP */ crit_exit(); regs->tf_rip = ddb_regs.tf_rip; diff --git a/sys/platform/pc64/x86_64/genassym.c b/sys/platform/pc64/x86_64/genassym.c index 13272f28d9..bc8e0f8ff3 100644 --- a/sys/platform/pc64/x86_64/genassym.c +++ b/sys/platform/pc64/x86_64/genassym.c @@ -237,10 +237,8 @@ ASSYM(MSR_FSBASE, MSR_FSBASE); ASSYM(MACHINTR_INTREN, offsetof(struct machintr_abi, intr_enable)); ASSYM(TDPRI_INT_SUPPORT, TDPRI_INT_SUPPORT); -#ifdef SMP ASSYM(CPUMASK_LOCK, CPUMASK_LOCK); ASSYM(CPUMASK_BIT, CPUMASK_BIT); -#endif ASSYM(IOAPIC_IRQI_ADDR, offsetof(struct ioapic_irqinfo, io_addr)); ASSYM(IOAPIC_IRQI_IDX, offsetof(struct ioapic_irqinfo, io_idx)); diff --git a/sys/platform/pc64/x86_64/ipl.s b/sys/platform/pc64/x86_64/ipl.s index 984fa924e5..900b91a3f6 100644 --- a/sys/platform/pc64/x86_64/ipl.s +++ b/sys/platform/pc64/x86_64/ipl.s @@ -150,10 +150,8 @@ doreti_next: cli /* re-assert cli on loop */ movq %rax,%rcx /* irq mask unavailable due to BGL */ notq %rcx -#ifdef SMP testl $RQF_IPIQ,PCPU(reqflags) jnz doreti_ipiq -#endif testl $RQF_TIMER,PCPU(reqflags) jnz doreti_timer /* @@ -307,7 +305,6 @@ doreti_ast: movl %r12d,%eax /* restore cpl for loop */ jmp doreti_next -#ifdef SMP /* * IPIQ message pending. We clear RQF_IPIQ automatically. */ @@ -323,7 +320,6 @@ doreti_ipiq: decl PCPU(intr_nesting_level) movl %r12d,%eax /* restore cpl for loop */ jmp doreti_next -#endif doreti_timer: movl %eax,%r12d /* save cpl (can't use stack) */ @@ -359,10 +355,8 @@ splz_next: cli movq %rax,%rcx /* rcx = ~CPL */ notq %rcx -#ifdef SMP testl $RQF_IPIQ,PCPU(reqflags) jnz splz_ipiq -#endif testl $RQF_TIMER,PCPU(reqflags) jnz splz_timer /* @@ -444,7 +438,6 @@ splz_soft: popq %rax jmp splz_next -#ifdef SMP splz_ipiq: andl $~RQF_IPIQ,PCPU(reqflags) sti @@ -452,7 +445,6 @@ splz_ipiq: call lwkt_process_ipiq popq %rax jmp splz_next -#endif splz_timer: andl $~RQF_TIMER,PCPU(reqflags) diff --git a/sys/platform/pc64/x86_64/machdep.c b/sys/platform/pc64/x86_64/machdep.c index 4fa35d8e67..227eef8ba0 100644 --- a/sys/platform/pc64/x86_64/machdep.c +++ b/sys/platform/pc64/x86_64/machdep.c @@ -163,11 +163,7 @@ struct privatespace CPU_prvspace[MAXCPU] __aligned(4096); /* XXX */ int _udatasel, _ucodesel, _ucode32sel; u_long atdevbase; -#ifdef SMP int64_t tsc_offsets[MAXCPU]; -#else -int64_t tsc_offsets[1]; -#endif #if defined(SWTCH_OPTIM_STATS) extern int swtch_optim_stats; @@ -1050,8 +1046,6 @@ cpu_idle(void) } } -#ifdef SMP - /* * This routine is called if a spinlock has been held through the * exponential backoff period and is seriously contested. On a real cpu @@ -1063,8 +1057,6 @@ cpu_spinlock_contested(void) cpu_pause(); } -#endif - /* * Clear registers on exec */ @@ -1492,10 +1484,8 @@ getmemsize(caddr_t kmdp, u_int64_t first) } base_memory = physmap[1] / 1024; -#ifdef SMP /* make hole for AP bootstrap code */ physmap[1] = mp_bootaddress(base_memory); -#endif /* Save EBDA address, if any */ ebda_addr = (u_long)(*(u_short *)(KERNBASE + 0x40e)); @@ -2457,13 +2447,11 @@ struct spinlock_deprecated clock_spinlock; static void init_locks(void) { -#ifdef SMP /* * Get the initial mplock with a count of 1 for the BSP. * This uses a LOGICAL cpu ID, ie BSP == 0. */ cpu_get_initial_mplock(); -#endif /* DEPRECATED */ spin_lock_init(&mcount_spinlock); spin_lock_init(&intr_spinlock); diff --git a/sys/platform/pc64/x86_64/npx.c b/sys/platform/pc64/x86_64/npx.c index e69c08812a..20e4bafb07 100644 --- a/sys/platform/pc64/x86_64/npx.c +++ b/sys/platform/pc64/x86_64/npx.c @@ -55,17 +55,11 @@ #include #include -#ifndef SMP -#include -#endif #include #include #include #include #include -#ifndef SMP -#include -#endif #include #include #include diff --git a/sys/platform/pc64/x86_64/pmap.c b/sys/platform/pc64/x86_64/pmap.c index e0a2eb7c90..441b365233 100644 --- a/sys/platform/pc64/x86_64/pmap.c +++ b/sys/platform/pc64/x86_64/pmap.c @@ -838,13 +838,8 @@ pmap_bootstrap(vm_paddr_t *firstaddr) * cases rather then invl1pg. Actually, I don't even know why it * works under UP because self-referential page table mappings */ -#ifdef SMP pgeflag = 0; -#else - if (cpu_feature & CPUID_PGE) - pgeflag = PG_G; -#endif - + /* * Initialize the 4MB page size flag */ @@ -866,33 +861,11 @@ pmap_bootstrap(vm_paddr_t *firstaddr) ptditmp &= ~(NBPDR - 1); ptditmp |= PG_V | PG_RW | PG_PS | PG_U | pgeflag; pdir4mb = ptditmp; - -#ifndef SMP - /* - * Enable the PSE mode. If we are SMP we can't do this - * now because the APs will not be able to use it when - * they boot up. - */ - load_cr4(rcr4() | CR4_PSE); - - /* - * We can do the mapping here for the single processor - * case. We simply ignore the old page table page from - * now on. - */ - /* - * For SMP, we still need 4K pages to bootstrap APs, - * PSE will be enabled as soon as all APs are up. - */ - PTD[KPTDI] = (pd_entry_t)ptditmp; - cpu_invltlb(); -#endif } #endif cpu_invltlb(); } -#ifdef SMP /* * Set 4mb pdir for mp startup */ @@ -906,7 +879,6 @@ pmap_set_opt(void) } } } -#endif /* * Initialize the pmap module. @@ -3903,17 +3875,10 @@ pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired, * the pmap_inval_*() API that is)... it's ok to do this for simple * wiring changes. */ -#ifdef SMP if (wired) atomic_set_long(ptep, PG_W); else atomic_clear_long(ptep, PG_W); -#else - if (wired) - atomic_set_long_nonlocked(ptep, PG_W); - else - atomic_clear_long_nonlocked(ptep, PG_W); -#endif pv_put(pv); lwkt_reltoken(&pmap->pm_token); } @@ -4292,11 +4257,7 @@ pmap_ts_referenced(vm_page_t m) continue; pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT); if (pte && (*pte & PG_A)) { -#ifdef SMP atomic_clear_long(pte, PG_A); -#else - atomic_clear_long_nonlocked(pte, PG_A); -#endif rtval++; if (rtval > 4) break; @@ -4554,13 +4515,9 @@ pmap_setlwpvm(struct lwp *lp, struct vmspace *newvm) lp->lwp_vmspace = newvm; if (curthread->td_lwp == lp) { pmap = vmspace_pmap(newvm); -#if defined(SMP) atomic_set_cpumask(&pmap->pm_active, mycpu->gd_cpumask); if (pmap->pm_active & CPUMASK_LOCK) pmap_interlock_wait(newvm); -#else - pmap->pm_active |= 1; -#endif #if defined(SWTCH_OPTIM_STATS) tlb_flush_count++; #endif @@ -4568,18 +4525,12 @@ pmap_setlwpvm(struct lwp *lp, struct vmspace *newvm) curthread->td_pcb->pcb_cr3 |= PG_RW | PG_U | PG_V; load_cr3(curthread->td_pcb->pcb_cr3); pmap = vmspace_pmap(oldvm); -#if defined(SMP) atomic_clear_cpumask(&pmap->pm_active, mycpu->gd_cpumask); -#else - pmap->pm_active &= ~(cpumask_t)1; -#endif } crit_exit(); } } -#ifdef SMP - /* * Called when switching to a locked pmap, used to interlock against pmaps * undergoing modifications to prevent us from activating the MMU for the @@ -4609,8 +4560,6 @@ pmap_interlock_wait(struct vmspace *vm) } } -#endif - vm_offset_t pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) { diff --git a/sys/platform/pc64/x86_64/pmap_inval.c b/sys/platform/pc64/x86_64/pmap_inval.c index 874acbd4b4..a7f743191f 100644 --- a/sys/platform/pc64/x86_64/pmap_inval.c +++ b/sys/platform/pc64/x86_64/pmap_inval.c @@ -90,7 +90,6 @@ void pmap_inval_interlock(pmap_inval_info_t info, pmap_t pmap, vm_offset_t va) { cpumask_t oactive; -#ifdef SMP cpumask_t nactive; DEBUG_PUSH_INFO("pmap_inval_interlock"); @@ -106,9 +105,6 @@ pmap_inval_interlock(pmap_inval_info_t info, pmap_t pmap, vm_offset_t va) cpu_pause(); } DEBUG_POP_INFO(); -#else - oactive = pmap->pm_active & ~CPUMASK_LOCK; -#endif KKASSERT((info->pir_flags & PIRF_CPUSYNC) == 0); info->pir_va = va; info->pir_flags = PIRF_CPUSYNC; @@ -126,9 +122,7 @@ void pmap_inval_deinterlock(pmap_inval_info_t info, pmap_t pmap) { KKASSERT(info->pir_flags & PIRF_CPUSYNC); -#ifdef SMP atomic_clear_cpumask(&pmap->pm_active, CPUMASK_LOCK); -#endif lwkt_cpusync_deinterlock(&info->pir_cpusync); info->pir_flags = 0; } diff --git a/sys/platform/pc64/x86_64/support.s b/sys/platform/pc64/x86_64/support.s index b15e81d2f8..b3fa072eda 100644 --- a/sys/platform/pc64/x86_64/support.s +++ b/sys/platform/pc64/x86_64/support.s @@ -337,9 +337,7 @@ ENTRY(casuword32) ja fusufault movl %esi,%eax /* old */ -#ifdef SMP lock -#endif cmpxchgl %edx,(%rdi) /* new = %edx */ /* @@ -368,9 +366,7 @@ ENTRY(casuword) ja fusufault movq %rsi,%rax /* old */ -#ifdef SMP lock -#endif cmpxchgq %rdx,(%rdi) /* new = %rdx */ /* diff --git a/sys/platform/pc64/x86_64/swtch.s b/sys/platform/pc64/x86_64/swtch.s index 7c0c244103..7e6956fe5a 100644 --- a/sys/platform/pc64/x86_64/swtch.s +++ b/sys/platform/pc64/x86_64/swtch.s @@ -84,11 +84,7 @@ #include "assym.s" -#if defined(SMP) #define MPLOCKED lock ; -#else -#define MPLOCKED -#endif .data @@ -327,7 +323,6 @@ ENTRY(cpu_heavy_restore) */ movq TD_LWP(%rax),%rcx movq LWP_VMSPACE(%rcx),%rcx /* RCX = vmspace */ -#ifdef SMP movq %rax,%r12 /* save newthread ptr */ 1: movq VM_PMAP+PM_ACTIVE(%rcx),%rax /* old contents */ @@ -360,10 +355,6 @@ ENTRY(cpu_heavy_restore) jmp 2f /* unconditional reload */ 1: movq %r12,%rax /* restore RAX = newthread */ -#else - movq PCPU(cpumask),%rsi - orq %rsi,VM_PMAP+PM_ACTIVE(%rcx) -#endif /* * Restore the MMU address space. If it is the same as the last * thread we don't have to invalidate the tlb (i.e. reload cr3). @@ -635,12 +626,10 @@ ENTRY(cpu_idle_restore) movq %rcx,%cr3 andl $~TDF_RUNNING,TD_FLAGS(%rbx) orl $TDF_RUNNING,TD_FLAGS(%rax) /* manual, no switch_return */ -#ifdef SMP cmpl $0,PCPU(cpuid) je 1f call ap_init 1: -#endif /* * ap_init can decide to enable interrupts early, but otherwise, or if * we are UP, do it here. diff --git a/sys/platform/pc64/x86_64/trap.c b/sys/platform/pc64/x86_64/trap.c index 0e373cfd9b..85fe0c2a06 100644 --- a/sys/platform/pc64/x86_64/trap.c +++ b/sys/platform/pc64/x86_64/trap.c @@ -90,20 +90,12 @@ #include #include -#ifdef SMP - #define MAKEMPSAFE(have_mplock) \ if (have_mplock == 0) { \ get_mplock(); \ have_mplock = 1; \ } -#else - -#define MAKEMPSAFE(have_mplock) - -#endif - extern void trap(struct trapframe *frame); static int trap_pfault(struct trapframe *, int); @@ -405,9 +397,7 @@ trap(struct trapframe *frame) struct proc *p; int sticks = 0; int i = 0, ucode = 0, type, code; -#ifdef SMP int have_mplock = 0; -#endif #ifdef INVARIANTS int crit_count = td->td_critcount; lwkt_tokref_t curstop = td->td_toks_stop; @@ -802,10 +792,8 @@ out: userret(lp, frame, sticks); userexit(lp); out2: ; -#ifdef SMP if (have_mplock) rel_mplock(); -#endif if (p != NULL && lp != NULL) KTR_LOG(kernentry_trap_ret, p->p_pid, lp->lwp_tid); #ifdef INVARIANTS @@ -985,11 +973,9 @@ trap_fatal(struct trapframe *frame, vm_offset_t eva) msg = "UNKNOWN"; kprintf("\n\nFatal trap %d: %s while in %s mode\n", type, msg, ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); -#ifdef SMP /* three separate prints in case of a trap on an unmapped page */ kprintf("cpuid = %d; ", mycpu->gd_cpuid); kprintf("lapic->id = %08x\n", lapic->id); -#endif if (type == T_PAGEFLT) { kprintf("fault virtual address = 0x%lx\n", eva); kprintf("fault code = %s %s %s, %s\n", @@ -1088,11 +1074,9 @@ dblfault_handler(struct trapframe *frame) kprintf("rip = 0x%lx\n", frame->tf_rip); kprintf("rsp = 0x%lx\n", frame->tf_rsp); kprintf("rbp = 0x%lx\n", frame->tf_rbp); -#ifdef SMP /* three separate prints in case of a trap on an unmapped page */ kprintf("cpuid = %d; ", mycpu->gd_cpuid); kprintf("lapic->id = %08x\n", lapic->id); -#endif panic("double fault"); } @@ -1121,9 +1105,7 @@ syscall2(struct trapframe *frame) #ifdef INVARIANTS int crit_count = td->td_critcount; #endif -#ifdef SMP int have_mplock = 0; -#endif register_t *argp; u_int code; int reg, regcnt; @@ -1343,13 +1325,11 @@ bad: STOPEVENT(p, S_SCX, code); userexit(lp); -#ifdef SMP /* * Release the MP lock if we had to get it */ if (have_mplock) rel_mplock(); -#endif KTR_LOG(kernentry_syscall_ret, p->p_pid, lp->lwp_tid, error); #ifdef INVARIANTS KASSERT(crit_count == td->td_critcount, diff --git a/sys/platform/vkernel/conf/files b/sys/platform/vkernel/conf/files index 74ad914782..28f42b4a27 100644 --- a/sys/platform/vkernel/conf/files +++ b/sys/platform/vkernel/conf/files @@ -31,7 +31,7 @@ vfs/smbfs/smbfs_vnops.c optional smbfs cpu/i386/misc/atomic.c standard \ compile-with "${CC} -c ${CFLAGS} ${WERROR} ${DEFINED_PROF:S/^$/-fomit-frame-pointer/} ${.IMPSRC}" platform/vkernel/i386/autoconf.c standard -platform/vkernel/i386/mp.c optional smp \ +platform/vkernel/i386/mp.c standard \ compile-with "${CC} -c -pthread ${CFLAGS} ${WERROR} -I/usr/include ${.IMPSRC}" # # DDB XXX diff --git a/sys/platform/vkernel/i386/autoconf.c b/sys/platform/vkernel/i386/autoconf.c index beb24be98e..7ab7f99f80 100644 --- a/sys/platform/vkernel/i386/autoconf.c +++ b/sys/platform/vkernel/i386/autoconf.c @@ -181,10 +181,8 @@ cpu_startup(void *dummy) (uintmax_t)ptoa(vmstats.v_free_count) / 1024 / 1024); bufinit(); vm_pager_bufferinit(); -#ifdef SMP mp_start(); mp_announce(); -#endif cpu_setregs(); } diff --git a/sys/platform/vkernel/i386/cpu_regs.c b/sys/platform/vkernel/i386/cpu_regs.c index b700d40506..548abea28d 100644 --- a/sys/platform/vkernel/i386/cpu_regs.c +++ b/sys/platform/vkernel/i386/cpu_regs.c @@ -117,11 +117,7 @@ static void fill_fpregs_xmm (struct savexmm *, struct save87 *); extern void ffs_rawread_setup(void); #endif /* DIRECTIO */ -#ifdef SMP int64_t tsc_offsets[MAXCPU]; -#else -int64_t tsc_offsets[1]; -#endif #if defined(SWTCH_OPTIM_STATS) extern int swtch_optim_stats; @@ -704,9 +700,7 @@ cpu_idle(void) if (cpu_idle_hlt && (td->td_gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) { splz(); -#ifdef SMP KKASSERT(MP_LOCK_HELD() == 0); -#endif if ((td->td_gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) { #ifdef DEBUGIDLE struct timeval tv1, tv2; @@ -731,16 +725,12 @@ cpu_idle(void) ++cpu_idle_hltcnt; } else { splz(); -#ifdef SMP __asm __volatile("pause"); -#endif ++cpu_idle_spincnt; } } } -#ifdef SMP - /* * Called by the spinlock code with or without a critical section held * when a spinlock is found to be seriously constested. @@ -754,8 +744,6 @@ cpu_spinlock_contested(void) cpu_pause(); } -#endif - /* * Clear registers on exec */ diff --git a/sys/platform/vkernel/i386/db_interface.c b/sys/platform/vkernel/i386/db_interface.c index f7b9225c60..11c2d69342 100644 --- a/sys/platform/vkernel/i386/db_interface.c +++ b/sys/platform/vkernel/i386/db_interface.c @@ -135,7 +135,6 @@ kdb_trap(int type, int code, struct i386_saved_state *regs) } crit_enter(); -#ifdef SMP db_printf("\nCPU%d stopping CPUs: 0x%08x\n", mycpu->gd_cpuid, mycpu->gd_other_cpus); @@ -143,7 +142,6 @@ kdb_trap(int type, int code, struct i386_saved_state *regs) stop_cpus(mycpu->gd_other_cpus); db_printf(" stopped\n"); -#endif /* SMP */ setjmp(db_global_jmpbuf); db_global_jmpbuf_valid = TRUE; @@ -159,7 +157,6 @@ kdb_trap(int type, int code, struct i386_saved_state *regs) vcons_set_mode(0); db_global_jmpbuf_valid = FALSE; -#ifdef SMP db_printf("\nCPU%d restarting CPUs: 0x%016jx\n", mycpu->gd_cpuid, (uintmax_t)stopped_cpus); @@ -172,7 +169,6 @@ kdb_trap(int type, int code, struct i386_saved_state *regs) restart_cpus(stopped_cpus); db_printf(" restarted\n"); -#endif /* SMP */ crit_exit(); regs->tf_eip = ddb_regs.tf_eip; diff --git a/sys/platform/vkernel/i386/exception.c b/sys/platform/vkernel/i386/exception.c index e274e26238..faacd4f37f 100644 --- a/sys/platform/vkernel/i386/exception.c +++ b/sys/platform/vkernel/i386/exception.c @@ -31,8 +31,6 @@ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $DragonFly: src/sys/platform/vkernel/i386/exception.c,v 1.11 2008/04/28 07:05:09 dillon Exp $ */ #include "opt_ddb.h" @@ -65,8 +63,6 @@ static void exc_segfault(int signo, siginfo_t *info, void *ctx); static void exc_debugger(int signo, siginfo_t *info, void *ctx); #endif -#ifdef SMP - /* * IPIs are 'fast' interrupts, so we deal with them directly from our * signal handler. @@ -125,8 +121,6 @@ stopsig(int nada, siginfo_t *info, void *ctxp) --curthread->td_critcount; } -#endif - #if 0 /* @@ -175,12 +169,10 @@ init_exceptions(void) sa.sa_sigaction = exc_debugger; sigaction(SIGQUIT, &sa, NULL); #endif -#ifdef SMP sa.sa_sigaction = ipisig; sigaction(SIGUSR1, &sa, NULL); sa.sa_sigaction = stopsig; sigaction(SIGXCPU, &sa, NULL); -#endif #if 0 sa.sa_sigaction = iosig; sigaction(SIGIO, &sa, NULL); diff --git a/sys/platform/vkernel/i386/genassym.c b/sys/platform/vkernel/i386/genassym.c index 31ca30de14..219ed5445c 100644 --- a/sys/platform/vkernel/i386/genassym.c +++ b/sys/platform/vkernel/i386/genassym.c @@ -35,7 +35,6 @@ * * from: @(#)genassym.c 5.11 (Berkeley) 5/10/91 * $FreeBSD: src/sys/i386/i386/genassym.c,v 1.86.2.3 2002/03/03 05:42:49 nyan Exp $ - * $DragonFly: src/sys/platform/vkernel/i386/genassym.c,v 1.58 2007/06/29 21:54:11 dillon Exp $ */ #include @@ -93,10 +92,8 @@ ASSYM(TDF_RUNNING, TDF_RUNNING); ASSYM(TD_SAVEFPU, offsetof(struct thread, td_mach) + offsetof(struct md_thread, mtd_savefpu)); ASSYM(TDPRI_INT_SUPPORT, TDPRI_INT_SUPPORT); -#ifdef SMP ASSYM(CPUMASK_LOCK, CPUMASK_LOCK); ASSYM(CPUMASK_BIT, CPUMASK_BIT); -#endif ASSYM(V_TRAP, offsetof(struct vmmeter, v_trap)); ASSYM(V_SYSCALL, offsetof(struct vmmeter, v_syscall)); diff --git a/sys/platform/vkernel/i386/mp.c b/sys/platform/vkernel/i386/mp.c index ffafd8cf8e..306b0c1313 100644 --- a/sys/platform/vkernel/i386/mp.c +++ b/sys/platform/vkernel/i386/mp.c @@ -207,8 +207,6 @@ cpu_send_ipiq(int dcpu) void smp_invltlb(void) { -#ifdef SMP -#endif } void diff --git a/sys/platform/vkernel/i386/npx.c b/sys/platform/vkernel/i386/npx.c index 6d686d498c..faa96c0586 100644 --- a/sys/platform/vkernel/i386/npx.c +++ b/sys/platform/vkernel/i386/npx.c @@ -36,7 +36,6 @@ * * from: @(#)npx.c 7.2 (Berkeley) 5/12/91 * $FreeBSD: src/sys/i386/isa/npx.c,v 1.80.2.3 2001/10/20 19:04:38 tegge Exp $ - * $DragonFly: src/sys/platform/vkernel/i386/npx.c,v 1.8 2008/01/29 19:54:56 dillon Exp $ */ #include "opt_debug_npx.h" @@ -58,17 +57,11 @@ #include #include -#ifndef SMP -#include -#endif #include #include #include #include #include -#ifndef SMP -#include -#endif #include #include #include diff --git a/sys/platform/vkernel/i386/swtch.s b/sys/platform/vkernel/i386/swtch.s index edb9b391c4..1201b7ad7f 100644 --- a/sys/platform/vkernel/i386/swtch.s +++ b/sys/platform/vkernel/i386/swtch.s @@ -66,7 +66,6 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $ - * $DragonFly: src/sys/platform/vkernel/i386/swtch.s,v 1.8 2007/07/01 02:51:43 dillon Exp $ */ #include "use_npx.h" @@ -81,11 +80,7 @@ #include "assym.s" -#if defined(SMP) #define MPLOCKED lock ; -#else -#define MPLOCKED -#endif .data @@ -503,12 +498,10 @@ ENTRY(cpu_idle_restore) pushl $0 andl $~TDF_RUNNING,TD_FLAGS(%ebx) orl $TDF_RUNNING,TD_FLAGS(%eax) /* manual, no switch_return */ -#ifdef SMP cmpl $0,PCPU(cpuid) je 1f call ap_init 1: -#endif /* sti */ jmp cpu_idle diff --git a/sys/platform/vkernel/i386/trap.c b/sys/platform/vkernel/i386/trap.c index 40ac62d052..f22b91e9ff 100644 --- a/sys/platform/vkernel/i386/trap.c +++ b/sys/platform/vkernel/i386/trap.c @@ -96,20 +96,12 @@ #include #include -#ifdef SMP - #define MAKEMPSAFE(have_mplock) \ if (have_mplock == 0) { \ get_mplock(); \ have_mplock = 1; \ } -#else - -#define MAKEMPSAFE(have_mplock) - -#endif - int (*pmath_emulate) (struct trapframe *); static int trap_pfault (struct trapframe *, int, vm_offset_t); @@ -389,9 +381,7 @@ user_trap(struct trapframe *frame) struct proc *p; int sticks = 0; int i = 0, ucode = 0, type, code; -#ifdef SMP int have_mplock = 0; -#endif #ifdef INVARIANTS int crit_count = td->td_critcount; lwkt_tokref_t curstop = td->td_toks_stop; @@ -650,10 +640,8 @@ out: userret(lp, frame, sticks); userexit(lp); out2: ; -#ifdef SMP if (have_mplock) rel_mplock(); -#endif KTR_LOG(kernentry_trap_ret, lp->lwp_proc->p_pid, lp->lwp_tid); #ifdef INVARIANTS KASSERT(crit_count == td->td_critcount, @@ -674,9 +662,7 @@ kern_trap(struct trapframe *frame) struct lwp *lp; struct proc *p; int i = 0, ucode = 0, type, code; -#ifdef SMP int have_mplock = 0; -#endif #ifdef INVARIANTS int crit_count = td->td_critcount; lwkt_tokref_t curstop = td->td_toks_stop; @@ -858,10 +844,8 @@ kernel_trap: out2: ; -#ifdef SMP if (have_mplock) rel_mplock(); -#endif #ifdef INVARIANTS KASSERT(crit_count == td->td_critcount, ("trap: critical section count mismatch! %d/%d", @@ -964,10 +948,8 @@ trap_fatal(struct trapframe *frame, int usermode, vm_offset_t eva) type, trap_msg[type], (usermode ? "user" : "kernel")); } -#ifdef SMP /* two separate prints in case of a trap on an unmapped page */ kprintf("cpuid = %d\n", mycpu->gd_cpuid); -#endif if (type == T_PAGEFLT) { kprintf("fault virtual address = %p\n", (void *)eva); kprintf("fault code = %s %s, %s\n", @@ -1012,14 +994,12 @@ trap_fatal(struct trapframe *frame, int usermode, vm_offset_t eva) if (curthread->td_critcount) kprintf("(CRIT)"); kprintf("\n"); -#ifdef SMP /** * XXX FIXME: * we probably SHOULD have stopped the other CPUs before now! * another CPU COULD have been touching cpl at this moment... */ kprintf(" <- SMP: XXX"); -#endif kprintf("\n"); #ifdef KDB @@ -1058,10 +1038,8 @@ dblfault_handler(void) kprintf("eip = 0x%x\n", gd->gd_common_tss.tss_eip); kprintf("esp = 0x%x\n", gd->gd_common_tss.tss_esp); kprintf("ebp = 0x%x\n", gd->gd_common_tss.tss_ebp); -#ifdef SMP /* two separate prints in case of a trap on an unmapped page */ kprintf("cpuid = %d\n", mycpu->gd_cpuid); -#endif panic("double fault"); } @@ -1090,9 +1068,7 @@ syscall2(struct trapframe *frame) #ifdef INVARIANTS int crit_count = td->td_critcount; #endif -#ifdef SMP int have_mplock = 0; -#endif u_int code; union sysunion args; @@ -1280,13 +1256,11 @@ bad: STOPEVENT(p, S_SCX, code); userexit(lp); -#ifdef SMP /* * Release the MP lock if we had to get it */ if (have_mplock) rel_mplock(); -#endif KTR_LOG(kernentry_syscall_ret, lp->lwp_proc->p_pid, lp->lwp_tid, error); #ifdef INVARIANTS KASSERT(crit_count == td->td_critcount, diff --git a/sys/platform/vkernel/include/clock.h b/sys/platform/vkernel/include/clock.h index 329fe5ed6a..4f461ac309 100644 --- a/sys/platform/vkernel/include/clock.h +++ b/sys/platform/vkernel/include/clock.h @@ -4,7 +4,6 @@ * This file is in the public domain. * * $FreeBSD: src/sys/i386/include/clock.h,v 1.38.2.1 2002/11/02 04:41:50 iwasaki Exp $ - * $DragonFly: src/sys/platform/vkernel/include/clock.h,v 1.2 2008/05/10 17:24:11 dillon Exp $ */ #ifndef _MACHINE_CLOCK_H_ @@ -28,9 +27,7 @@ extern int tsc_present; extern int64_t tsc_frequency; extern int tsc_is_broken; extern int wall_cmos_clock; -#ifdef SMP /* APIC-IO */ extern int apic_8254_intr; -#endif /* * Driver to clock driver interface. diff --git a/sys/platform/vkernel/include/pmap.h b/sys/platform/vkernel/include/pmap.h index 56ad7357e8..56eb1a2c9b 100644 --- a/sys/platform/vkernel/include/pmap.h +++ b/sys/platform/vkernel/include/pmap.h @@ -43,7 +43,6 @@ * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 * $FreeBSD: src/sys/i386/include/pmap.h,v 1.65.2.3 2001/10/03 07:15:37 peter Exp $ - * $DragonFly: src/sys/platform/vkernel/include/pmap.h,v 1.4 2007/07/01 02:51:44 dillon Exp $ */ #ifndef _MACHINE_PMAP_H_ @@ -182,9 +181,7 @@ void *pmap_mapdev (vm_paddr_t, vm_size_t); void pmap_unmapdev (vm_offset_t, vm_size_t); void pmap_release(struct pmap *pmap); struct vm_page *pmap_use_pt (pmap_t, vm_offset_t); -#ifdef SMP void pmap_set_opt (void); -#endif #endif /* _KERNEL */ diff --git a/sys/platform/vkernel/include/smp.h b/sys/platform/vkernel/include/smp.h index 2df73dbcc0..cdbc91e701 100644 --- a/sys/platform/vkernel/include/smp.h +++ b/sys/platform/vkernel/include/smp.h @@ -7,8 +7,6 @@ * ---------------------------------------------------------------------------- * * $FreeBSD: src/sys/i386/include/smp.h,v 1.50.2.5 2001/02/13 22:32:45 tegge Exp $ - * $DragonFly: src/sys/platform/vkernel/include/smp.h,v 1.3 2007/07/02 02:37:04 dillon Exp $ - * */ #ifndef _MACHINE_SMP_H_ @@ -16,8 +14,6 @@ #ifdef _KERNEL -#if defined(SMP) - #ifndef LOCORE #if 0 @@ -180,11 +176,6 @@ int get_logical_CPU_number_within_core(int cpuid); #define get_apicid_from_cpuid(cpuid) cpuid #endif /* !LOCORE */ -#else /* !SMP */ - -#define smp_active_mask 1 /* smp_active_mask always 1 on UP machines */ - -#endif #endif /* _KERNEL */ #endif /* _MACHINE_SMP_H_ */ diff --git a/sys/platform/vkernel/include/thread.h b/sys/platform/vkernel/include/thread.h index 1c4637b484..aae75a5de3 100644 --- a/sys/platform/vkernel/include/thread.h +++ b/sys/platform/vkernel/include/thread.h @@ -79,12 +79,7 @@ _get_mycpu(void) } #define mycpu _get_mycpu() - -#ifdef SMP #define mycpuid (_get_mycpu()->gd_cpuid) -#else -#define mycpuid 0 -#endif /* * note: curthread is never NULL, but curproc can be. Also note that diff --git a/sys/platform/vkernel/platform/busdma_machdep.c b/sys/platform/vkernel/platform/busdma_machdep.c index b1d01af290..0c4b806958 100644 --- a/sys/platform/vkernel/platform/busdma_machdep.c +++ b/sys/platform/vkernel/platform/busdma_machdep.c @@ -99,11 +99,7 @@ struct bounce_zone { STAILQ_ENTRY(bounce_zone) links; STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; -#ifdef SMP struct spinlock spin; -#else - int unused0; -#endif int total_bpages; int free_bpages; int reserved_bpages; @@ -119,13 +115,8 @@ struct bounce_zone { struct sysctl_oid *sysctl_tree; }; -#ifdef SMP #define BZ_LOCK(bz) spin_lock(&(bz)->spin) #define BZ_UNLOCK(bz) spin_unlock(&(bz)->spin) -#else -#define BZ_LOCK(bz) crit_enter() -#define BZ_UNLOCK(bz) crit_exit() -#endif static struct lwkt_token bounce_zone_tok = LWKT_TOKEN_INITIALIZER(bounce_zone_token); @@ -1016,9 +1007,7 @@ alloc_bounce_zone(bus_dma_tag_t dmat) } bz = new_bz; -#ifdef SMP spin_init(&bz->spin); -#endif STAILQ_INIT(&bz->bounce_page_list); STAILQ_INIT(&bz->bounce_map_waitinglist); bz->free_bpages = 0; diff --git a/sys/platform/vkernel/platform/init.c b/sys/platform/vkernel/platform/init.c index 2091c4a4d0..9b2dd01298 100644 --- a/sys/platform/vkernel/platform/init.c +++ b/sys/platform/vkernel/platform/init.c @@ -30,8 +30,6 @@ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $DragonFly: src/sys/platform/vkernel/platform/init.c,v 1.56 2008/05/27 07:48:00 dillon Exp $ */ #include @@ -180,11 +178,9 @@ main(int ac, char **av) * Process options */ kernel_mem_readonly = 1; -#ifdef SMP optcpus = 2; vkernel_b_arg = 0; vkernel_B_arg = 0; -#endif lwp_cpu_lock = LCL_NONE; real_vkernel_enable = 0; @@ -324,7 +320,6 @@ main(int ac, char **av) * set ncpus here. */ tok = strtok(optarg, ":"); -#ifdef SMP optcpus = strtol(tok, NULL, 0); if (optcpus < 1 || optcpus > MAXCPU) usage_err("Bad ncpus, valid range is 1-%d", MAXCPU); @@ -341,25 +336,6 @@ main(int ac, char **av) } } - -#else - if (strtol(tok, NULL, 0) != 1) { - usage_err("You built a UP vkernel, only 1 cpu!"); - } - - /* :logical_CPU_bits argument */ - tok = strtok(NULL, ":"); - if (tok != NULL) { - usage_err("You built a UP vkernel. No CPU topology available"); - - /* :core_bits argument */ - tok = strtok(NULL, ":"); - if (tok != NULL) { - usage_err("You built a UP vkernel. No CPU topology available"); - } - } -#endif - break; case 'p': pid_file = optarg; @@ -509,13 +485,11 @@ static void init_locks(void) { -#ifdef SMP /* * Get the initial mplock with a count of 1 for the BSP. * This uses a LOGICAL cpu ID, ie BSP == 0. */ cpu_get_initial_mplock(); -#endif /* our token pool needs to work early */ lwkt_token_pool_init(); diff --git a/sys/platform/vkernel/platform/machintr.c b/sys/platform/vkernel/platform/machintr.c index b109857cea..bfcda1d787 100644 --- a/sys/platform/vkernel/platform/machintr.c +++ b/sys/platform/vkernel/platform/machintr.c @@ -30,8 +30,6 @@ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $DragonFly: src/sys/platform/vkernel/platform/machintr.c,v 1.17 2008/04/30 16:59:45 dillon Exp $ */ #include @@ -127,12 +125,10 @@ splz(void) while (gd->mi.gd_reqflags & (RQF_IPIQ|RQF_INTPEND)) { crit_enter_quick(td); -#ifdef SMP if (gd->mi.gd_reqflags & RQF_IPIQ) { atomic_clear_int(&gd->mi.gd_reqflags, RQF_IPIQ); lwkt_process_ipiq(); } -#endif if (gd->mi.gd_reqflags & RQF_INTPEND) { atomic_clear_int(&gd->mi.gd_reqflags, RQF_INTPEND); while ((irq = ffs(gd->gd_spending)) != 0) { diff --git a/sys/platform/vkernel/platform/pmap.c b/sys/platform/vkernel/platform/pmap.c index e455580b56..9ab978eeaf 100644 --- a/sys/platform/vkernel/platform/pmap.c +++ b/sys/platform/vkernel/platform/pmap.c @@ -2797,11 +2797,7 @@ pmap_ts_referenced(vm_page_t m) pte = pmap_pte(pv->pv_pmap, pv->pv_va); if (pte && (*pte & VPTE_A)) { -#ifdef SMP atomic_clear_long(pte, VPTE_A); -#else - atomic_clear_long_nonlocked(pte, VPTE_A); -#endif rtval++; if (rtval > 4) { break; @@ -3028,20 +3024,12 @@ pmap_setlwpvm(struct lwp *lp, struct vmspace *newvm) lp->lwp_vmspace = newvm; if (curthread->td_lwp == lp) { pmap = vmspace_pmap(newvm); -#if defined(SMP) atomic_set_cpumask(&pmap->pm_active, mycpu->gd_cpumask); -#else - pmap->pm_active |= 1; -#endif #if defined(SWTCH_OPTIM_STATS) tlb_flush_count++; #endif pmap = vmspace_pmap(oldvm); -#if defined(SMP) atomic_clear_cpumask(&pmap->pm_active, mycpu->gd_cpumask); -#else - pmap->pm_active &= ~(cpumask_t)1; -#endif } } crit_exit(); diff --git a/sys/platform/vkernel/platform/systimer.c b/sys/platform/vkernel/platform/systimer.c index c7a0ff6efd..cb94c5871e 100644 --- a/sys/platform/vkernel/platform/systimer.c +++ b/sys/platform/vkernel/platform/systimer.c @@ -201,13 +201,10 @@ vktimer_intr(void *dummy, struct intrframe *frame) { static sysclock_t sysclock_count; struct globaldata *gd = mycpu; -#ifdef SMP struct globaldata *gscan; int n; -#endif sysclock_count = sys_cputimer->count(); -#ifdef SMP for (n = 0; n < ncpus; ++n) { gscan = globaldata_find(n); if (TAILQ_FIRST(&gscan->gd_systimerq) == NULL) @@ -219,10 +216,6 @@ vktimer_intr(void *dummy, struct intrframe *frame) systimer_intr(&sysclock_count, 0, frame); } } -#else - if (TAILQ_FIRST(&gd->gd_systimerq) != NULL) - systimer_intr(&sysclock_count, 0, frame); -#endif } /* diff --git a/sys/platform/vkernel64/conf/files b/sys/platform/vkernel64/conf/files index 55bbf4923c..e6b7e42b35 100644 --- a/sys/platform/vkernel64/conf/files +++ b/sys/platform/vkernel64/conf/files @@ -24,7 +24,7 @@ vfs/smbfs/smbfs_vnops.c optional smbfs cpu/x86_64/misc/atomic.c standard \ compile-with "${CC} -c ${CFLAGS} ${DEFINED_PROF:S/^$/-fomit-frame-pointer/} ${.IMPSRC}" platform/vkernel64/x86_64/autoconf.c standard -platform/vkernel64/x86_64/mp.c optional smp \ +platform/vkernel64/x86_64/mp.c standard \ compile-with "${CC} -c -pthread ${CFLAGS} -I/usr/include ${.IMPSRC}" # # DDB XXX diff --git a/sys/platform/vkernel64/include/clock.h b/sys/platform/vkernel64/include/clock.h index 329fe5ed6a..4f461ac309 100644 --- a/sys/platform/vkernel64/include/clock.h +++ b/sys/platform/vkernel64/include/clock.h @@ -4,7 +4,6 @@ * This file is in the public domain. * * $FreeBSD: src/sys/i386/include/clock.h,v 1.38.2.1 2002/11/02 04:41:50 iwasaki Exp $ - * $DragonFly: src/sys/platform/vkernel/include/clock.h,v 1.2 2008/05/10 17:24:11 dillon Exp $ */ #ifndef _MACHINE_CLOCK_H_ @@ -28,9 +27,7 @@ extern int tsc_present; extern int64_t tsc_frequency; extern int tsc_is_broken; extern int wall_cmos_clock; -#ifdef SMP /* APIC-IO */ extern int apic_8254_intr; -#endif /* * Driver to clock driver interface. diff --git a/sys/platform/vkernel64/include/smp.h b/sys/platform/vkernel64/include/smp.h index 1156c5b8f7..cd299eec14 100644 --- a/sys/platform/vkernel64/include/smp.h +++ b/sys/platform/vkernel64/include/smp.h @@ -7,8 +7,6 @@ * ---------------------------------------------------------------------------- * * $FreeBSD: src/sys/i386/include/smp.h,v 1.50.2.5 2001/02/13 22:32:45 tegge Exp $ - * $DragonFly: src/sys/platform/vkernel/include/smp.h,v 1.3 2007/07/02 02:37:04 dillon Exp $ - * */ #ifndef _MACHINE_SMP_H_ @@ -16,8 +14,6 @@ #ifdef _KERNEL -#if defined(SMP) - #ifndef LOCORE #if 0 @@ -180,11 +176,6 @@ int get_logical_CPU_number_within_core(int cpuid); #define get_apicid_from_cpuid(cpuid) cpuid #endif /* !LOCORE */ -#else /* !SMP */ - -#define smp_active_mask 1 /* smp_active_mask always 1 on UP machines */ - -#endif #endif /* _KERNEL */ #endif /* _MACHINE_SMP_H_ */ diff --git a/sys/platform/vkernel64/include/thread.h b/sys/platform/vkernel64/include/thread.h index 8d4ce066dd..ac7b995ddc 100644 --- a/sys/platform/vkernel64/include/thread.h +++ b/sys/platform/vkernel64/include/thread.h @@ -79,12 +79,7 @@ _get_mycpu(void) } #define mycpu _get_mycpu() - -#ifdef SMP #define mycpuid (_get_mycpu()->gd_cpuid) -#else -#define mycpuid 0 -#endif /* * note: curthread is never NULL, but curproc can be. Also note that diff --git a/sys/platform/vkernel64/platform/busdma_machdep.c b/sys/platform/vkernel64/platform/busdma_machdep.c index 74c698074b..4d6f4b0161 100644 --- a/sys/platform/vkernel64/platform/busdma_machdep.c +++ b/sys/platform/vkernel64/platform/busdma_machdep.c @@ -97,11 +97,7 @@ struct bounce_zone { STAILQ_ENTRY(bounce_zone) links; STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; -#ifdef SMP struct spinlock spin; -#else - int unused0; -#endif int total_bpages; int free_bpages; int reserved_bpages; @@ -117,13 +113,8 @@ struct bounce_zone { struct sysctl_oid *sysctl_tree; }; -#ifdef SMP #define BZ_LOCK(bz) spin_lock(&(bz)->spin) #define BZ_UNLOCK(bz) spin_unlock(&(bz)->spin) -#else -#define BZ_LOCK(bz) crit_enter() -#define BZ_UNLOCK(bz) crit_exit() -#endif static struct lwkt_token bounce_zone_tok = LWKT_TOKEN_INITIALIZER(bounce_zone_token); @@ -1012,9 +1003,7 @@ alloc_bounce_zone(bus_dma_tag_t dmat) } bz = new_bz; -#ifdef SMP spin_init(&bz->spin); -#endif STAILQ_INIT(&bz->bounce_page_list); STAILQ_INIT(&bz->bounce_map_waitinglist); bz->free_bpages = 0; diff --git a/sys/platform/vkernel64/platform/init.c b/sys/platform/vkernel64/platform/init.c index d00de893da..2b0ca2d908 100644 --- a/sys/platform/vkernel64/platform/init.c +++ b/sys/platform/vkernel64/platform/init.c @@ -177,11 +177,9 @@ main(int ac, char **av) * Process options */ kernel_mem_readonly = 1; -#ifdef SMP optcpus = 2; vkernel_b_arg = 0; vkernel_B_arg = 0; -#endif lwp_cpu_lock = LCL_NONE; real_vkernel_enable = 0; @@ -321,7 +319,6 @@ main(int ac, char **av) * set ncpus here. */ tok = strtok(optarg, ":"); -#ifdef SMP optcpus = strtol(tok, NULL, 0); if (optcpus < 1 || optcpus > MAXCPU) usage_err("Bad ncpus, valid range is 1-%d", MAXCPU); @@ -338,25 +335,6 @@ main(int ac, char **av) } } - -#else - if (strtol(tok, NULL, 0) != 1) { - usage_err("You built a UP vkernel, only 1 cpu!"); - } - - /* :lbits argument */ - tok = strtok(NULL, ":"); - if (tok != NULL) { - usage_err("You built a UP vkernel. No CPU topology available"); - - /* :cbits argument */ - tok = strtok(NULL, ":"); - if (tok != NULL) { - usage_err("You built a UP vkernel. No CPU topology available"); - } - } -#endif - break; case 'p': pid_file = optarg; @@ -684,13 +662,11 @@ static void init_locks(void) { -#ifdef SMP /* * Get the initial mplock with a count of 1 for the BSP. * This uses a LOGICAL cpu ID, ie BSP == 0. */ cpu_get_initial_mplock(); -#endif /* our token pool needs to work early */ lwkt_token_pool_init(); diff --git a/sys/platform/vkernel64/platform/machintr.c b/sys/platform/vkernel64/platform/machintr.c index 5e4302f420..30de5a4aa2 100644 --- a/sys/platform/vkernel64/platform/machintr.c +++ b/sys/platform/vkernel64/platform/machintr.c @@ -30,8 +30,6 @@ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $DragonFly: src/sys/platform/vkernel/platform/machintr.c,v 1.17 2008/04/30 16:59:45 dillon Exp $ */ #include @@ -127,12 +125,10 @@ splz(void) while (gd->mi.gd_reqflags & (RQF_IPIQ|RQF_INTPEND)) { crit_enter_quick(td); -#ifdef SMP if (gd->mi.gd_reqflags & RQF_IPIQ) { atomic_clear_int(&gd->mi.gd_reqflags, RQF_IPIQ); lwkt_process_ipiq(); } -#endif if (gd->mi.gd_reqflags & RQF_INTPEND) { atomic_clear_int(&gd->mi.gd_reqflags, RQF_INTPEND); while ((irq = ffs(gd->gd_spending)) != 0) { diff --git a/sys/platform/vkernel64/platform/pmap.c b/sys/platform/vkernel64/platform/pmap.c index 593701aea6..9fd83671a7 100644 --- a/sys/platform/vkernel64/platform/pmap.c +++ b/sys/platform/vkernel64/platform/pmap.c @@ -3050,11 +3050,7 @@ pmap_ts_referenced(vm_page_t m) pte = pmap_pte(pv->pv_pmap, pv->pv_va); if (pte && (*pte & VPTE_A)) { -#ifdef SMP atomic_clear_long(pte, VPTE_A); -#else - atomic_clear_long_nonlocked(pte, VPTE_A); -#endif rtval++; if (rtval > 4) { break; @@ -3232,21 +3228,13 @@ pmap_setlwpvm(struct lwp *lp, struct vmspace *newvm) lp->lwp_vmspace = newvm; if (curthread->td_lwp == lp) { pmap = vmspace_pmap(newvm); -#if defined(SMP) atomic_set_cpumask(&pmap->pm_active, CPUMASK(mycpu->gd_cpuid)); -#else - pmap->pm_active |= 1; -#endif #if defined(SWTCH_OPTIM_STATS) tlb_flush_count++; #endif pmap = vmspace_pmap(oldvm); -#if defined(SMP) atomic_clear_cpumask(&pmap->pm_active, CPUMASK(mycpu->gd_cpuid)); -#else - pmap->pm_active &= ~(cpumask_t)1; -#endif } } crit_exit(); diff --git a/sys/platform/vkernel64/platform/systimer.c b/sys/platform/vkernel64/platform/systimer.c index 1e94aca940..f559420c24 100644 --- a/sys/platform/vkernel64/platform/systimer.c +++ b/sys/platform/vkernel64/platform/systimer.c @@ -202,13 +202,10 @@ vktimer_intr(void *dummy, struct intrframe *frame) { static sysclock_t sysclock_count; struct globaldata *gd = mycpu; -#ifdef SMP struct globaldata *gscan; int n; -#endif sysclock_count = sys_cputimer->count(); -#ifdef SMP for (n = 0; n < ncpus; ++n) { gscan = globaldata_find(n); if (TAILQ_FIRST(&gscan->gd_systimerq) == NULL) @@ -220,10 +217,6 @@ vktimer_intr(void *dummy, struct intrframe *frame) systimer_intr(&sysclock_count, 0, frame); } } -#else - if (TAILQ_FIRST(&gd->gd_systimerq) != NULL) - systimer_intr(&sysclock_count, 0, frame); -#endif } /* diff --git a/sys/platform/vkernel64/x86_64/autoconf.c b/sys/platform/vkernel64/x86_64/autoconf.c index 70668e12da..1c9c30cf7a 100644 --- a/sys/platform/vkernel64/x86_64/autoconf.c +++ b/sys/platform/vkernel64/x86_64/autoconf.c @@ -178,10 +178,8 @@ cpu_startup(void *dummy) ptoa(vmstats.v_free_count) / 1024); bufinit(); vm_pager_bufferinit(); -#ifdef SMP mp_start(); mp_announce(); -#endif cpu_setregs(); } diff --git a/sys/platform/vkernel64/x86_64/cpu_regs.c b/sys/platform/vkernel64/x86_64/cpu_regs.c index 9a08a1c2ca..566582c881 100644 --- a/sys/platform/vkernel64/x86_64/cpu_regs.c +++ b/sys/platform/vkernel64/x86_64/cpu_regs.c @@ -115,11 +115,7 @@ static void fill_fpregs_xmm (struct savexmm *, struct save87 *); extern void ffs_rawread_setup(void); #endif /* DIRECTIO */ -#ifdef SMP int64_t tsc_offsets[MAXCPU]; -#else -int64_t tsc_offsets[1]; -#endif #if defined(SWTCH_OPTIM_STATS) extern int swtch_optim_stats; @@ -735,16 +731,12 @@ cpu_idle(void) ++cpu_idle_hltcnt; } else { splz(); -#ifdef SMP __asm __volatile("pause"); -#endif ++cpu_idle_spincnt; } } } -#ifdef SMP - /* * Called by the spinlock code with or without a critical section held * when a spinlock is found to be seriously constested. @@ -758,8 +750,6 @@ cpu_spinlock_contested(void) cpu_pause(); } -#endif - /* * Clear registers on exec */ diff --git a/sys/platform/vkernel64/x86_64/db_interface.c b/sys/platform/vkernel64/x86_64/db_interface.c index 4bed2ebf86..f6697ad0d3 100644 --- a/sys/platform/vkernel64/x86_64/db_interface.c +++ b/sys/platform/vkernel64/x86_64/db_interface.c @@ -157,7 +157,6 @@ kdb_trap(int type, int code, struct x86_64_saved_state *regs) ddb_regs = *regs; crit_enter(); -#ifdef SMP db_printf("\nCPU%d stopping CPUs: 0x%016jx\n", mycpu->gd_cpuid, (uintmax_t)mycpu->gd_other_cpus); @@ -165,7 +164,6 @@ kdb_trap(int type, int code, struct x86_64_saved_state *regs) stop_cpus(mycpu->gd_other_cpus); db_printf(" stopped\n"); -#endif /* SMP */ setjmp(db_global_jmpbuf); db_global_jmpbuf_valid = TRUE; @@ -181,7 +179,6 @@ kdb_trap(int type, int code, struct x86_64_saved_state *regs) vcons_set_mode(0); db_global_jmpbuf_valid = FALSE; -#ifdef SMP db_printf("\nCPU%d restarting CPUs: 0x%016jx\n", mycpu->gd_cpuid, (uintmax_t)stopped_cpus); @@ -196,7 +193,6 @@ kdb_trap(int type, int code, struct x86_64_saved_state *regs) restart_cpus(stopped_cpus); db_printf(" restarted\n"); -#endif /* SMP */ crit_exit(); regs->tf_rip = ddb_regs.tf_rip; diff --git a/sys/platform/vkernel64/x86_64/exception.c b/sys/platform/vkernel64/x86_64/exception.c index e7c240f6d8..b80ef27191 100644 --- a/sys/platform/vkernel64/x86_64/exception.c +++ b/sys/platform/vkernel64/x86_64/exception.c @@ -31,8 +31,6 @@ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $DragonFly: src/sys/platform/vkernel/i386/exception.c,v 1.11 2008/04/28 07:05:09 dillon Exp $ */ #include "opt_ddb.h" @@ -65,8 +63,6 @@ static void exc_segfault(int signo, siginfo_t *info, void *ctx); static void exc_debugger(int signo, siginfo_t *info, void *ctx); #endif -#ifdef SMP - /* * IPIs are 'fast' interrupts, so we deal with them directly from our * signal handler. @@ -130,8 +126,6 @@ stopsig(int nada, siginfo_t *info, void *ctxp) --td->td_critcount; } -#endif - #if 0 /* @@ -180,12 +174,10 @@ init_exceptions(void) sa.sa_sigaction = exc_debugger; sigaction(SIGQUIT, &sa, NULL); #endif -#ifdef SMP sa.sa_sigaction = ipisig; sigaction(SIGUSR1, &sa, NULL); sa.sa_sigaction = stopsig; sigaction(SIGXCPU, &sa, NULL); -#endif #if 0 sa.sa_sigaction = iosig; sigaction(SIGIO, &sa, NULL); diff --git a/sys/platform/vkernel64/x86_64/mp.c b/sys/platform/vkernel64/x86_64/mp.c index 3da0394b67..fbd4e6f8fd 100644 --- a/sys/platform/vkernel64/x86_64/mp.c +++ b/sys/platform/vkernel64/x86_64/mp.c @@ -208,8 +208,6 @@ cpu_send_ipiq(int dcpu) void smp_invltlb(void) { -#ifdef SMP -#endif } void diff --git a/sys/platform/vkernel64/x86_64/npx.c b/sys/platform/vkernel64/x86_64/npx.c index 730577e734..64872c24ac 100644 --- a/sys/platform/vkernel64/x86_64/npx.c +++ b/sys/platform/vkernel64/x86_64/npx.c @@ -55,17 +55,11 @@ #include #include -#ifndef SMP -#include -#endif #include #include #include #include #include -#ifndef SMP -#include -#endif #include #include #include diff --git a/sys/platform/vkernel64/x86_64/swtch.s b/sys/platform/vkernel64/x86_64/swtch.s index 2f32e21c0b..51761c383e 100644 --- a/sys/platform/vkernel64/x86_64/swtch.s +++ b/sys/platform/vkernel64/x86_64/swtch.s @@ -84,11 +84,7 @@ #include "assym.s" -#if defined(SMP) #define MPLOCKED lock ; -#else -#define MPLOCKED -#endif .data @@ -562,12 +558,10 @@ ENTRY(cpu_idle_restore) pushq $0 andl $~TDF_RUNNING,TD_FLAGS(%rbx) orl $TDF_RUNNING,TD_FLAGS(%rax) /* manual, no switch_return */ -#ifdef SMP cmpl $0,PCPU(cpuid) je 1f call ap_init 1: -#endif /* sti */ jmp cpu_idle diff --git a/sys/platform/vkernel64/x86_64/trap.c b/sys/platform/vkernel64/x86_64/trap.c index a0cd7188e1..ac35da8374 100644 --- a/sys/platform/vkernel64/x86_64/trap.c +++ b/sys/platform/vkernel64/x86_64/trap.c @@ -93,20 +93,12 @@ #include #include -#ifdef SMP - #define MAKEMPSAFE(have_mplock) \ if (have_mplock == 0) { \ get_mplock(); \ have_mplock = 1; \ } -#else - -#define MAKEMPSAFE(have_mplock) - -#endif - int (*pmath_emulate) (struct trapframe *); extern int trapwrite (unsigned addr); @@ -389,9 +381,7 @@ user_trap(struct trapframe *frame) struct proc *p; int sticks = 0; int i = 0, ucode = 0, type, code; -#ifdef SMP int have_mplock = 0; -#endif #ifdef INVARIANTS int crit_count = td->td_critcount; lwkt_tokref_t curstop = td->td_toks_stop; @@ -628,10 +618,8 @@ out: userret(lp, frame, sticks); userexit(lp); out2: ; -#ifdef SMP if (have_mplock) rel_mplock(); -#endif KTR_LOG(kernentry_trap_ret, lp->lwp_proc->p_pid, lp->lwp_tid); #ifdef INVARIANTS KASSERT(crit_count == td->td_critcount, @@ -652,9 +640,7 @@ kern_trap(struct trapframe *frame) struct lwp *lp; struct proc *p; int i = 0, ucode = 0, type, code; -#ifdef SMP int have_mplock = 0; -#endif #ifdef INVARIANTS int crit_count = td->td_critcount; lwkt_tokref_t curstop = td->td_toks_stop; @@ -836,10 +822,8 @@ kernel_trap: out2: ; -#ifdef SMP if (have_mplock) rel_mplock(); -#endif #ifdef INVARIANTS KASSERT(crit_count == td->td_critcount, ("trap: critical section count mismatch! %d/%d", @@ -967,10 +951,8 @@ trap_fatal(struct trapframe *frame, int usermode, vm_offset_t eva) type, trap_msg[type], (usermode ? "user" : "kernel")); } -#ifdef SMP /* two separate prints in case of a trap on an unmapped page */ kprintf("cpuid = %d\n", mycpu->gd_cpuid); -#endif if (type == T_PAGEFLT) { kprintf("fault virtual address = %p\n", (void *)eva); kprintf("fault code = %s %s, %s\n", @@ -1015,14 +997,12 @@ trap_fatal(struct trapframe *frame, int usermode, vm_offset_t eva) if (curthread->td_critcount) kprintf("(CRIT)"); kprintf("\n"); -#ifdef SMP /** * XXX FIXME: * we probably SHOULD have stopped the other CPUs before now! * another CPU COULD have been touching cpl at this moment... */ kprintf(" <- SMP: XXX"); -#endif kprintf("\n"); #ifdef KDB @@ -1065,10 +1045,8 @@ dblfault_handler(void) kprintf("rsp = 0x%lx\n", gd->gd_common_tss.tss_rsp); kprintf("rbp = 0x%lx\n", gd->gd_common_tss.tss_rbp); #endif -#ifdef SMP /* two separate prints in case of a trap on an unmapped page */ kprintf("cpuid = %d\n", mycpu->gd_cpuid); -#endif panic("double fault"); } @@ -1146,9 +1124,7 @@ syscall2(struct trapframe *frame) int crit_count = td->td_critcount; lwkt_tokref_t curstop = td->td_toks_stop; #endif -#ifdef SMP int have_mplock = 0; -#endif register_t *argp; u_int code; int reg, regcnt; @@ -1350,13 +1326,11 @@ bad: STOPEVENT(p, S_SCX, code); userexit(lp); -#ifdef SMP /* * Release the MP lock if we had to get it */ if (have_mplock) rel_mplock(); -#endif KTR_LOG(kernentry_syscall_ret, lp->lwp_proc->p_pid, lp->lwp_tid, error); #ifdef INVARIANTS KASSERT(&td->td_toks_base == td->td_toks_stop, diff --git a/sys/sys/callout.h b/sys/sys/callout.h index a2ff2ecfd6..072cfdda44 100644 --- a/sys/sys/callout.h +++ b/sys/sys/callout.h @@ -37,7 +37,6 @@ * * @(#)callout.h 8.2 (Berkeley) 1/21/94 * $FreeBSD: src/sys/sys/callout.h,v 1.15.2.1 2001/11/13 18:24:52 archie Exp $ - * $DragonFly: src/sys/sys/callout.h,v 1.5 2004/09/17 09:34:54 dillon Exp $ */ #ifndef _SYS_CALLOUT_H_ @@ -57,11 +56,7 @@ struct callout { void *c_arg; /* function argument */ void (*c_func) (void *); /* function to call */ int c_flags; /* state of this entry */ -#ifdef SMP struct globaldata *c_gd; -#else - void *c_gd_reserved; -#endif }; #define CALLOUT_LOCAL_ALLOC 0x0001 /* was allocated from callfree */ diff --git a/sys/sys/kinfo.h b/sys/sys/kinfo.h index 6fcd4dbf47..0f9fb43a5b 100644 --- a/sys/sys/kinfo.h +++ b/sys/sys/kinfo.h @@ -30,8 +30,6 @@ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $DragonFly: src/sys/sys/kinfo.h,v 1.13 2007/02/18 16:17:09 corecode Exp $ */ #ifndef _SYS_KINFO_H_ @@ -226,11 +224,7 @@ void fill_kinfo_proc_kthread(struct thread *, struct kinfo_proc *); #define KINFO_END(kp) (kp->gen.type == KINFO_TYPE_END) #if defined(_KERNEL) -#ifdef SMP #define cpu_time cputime_percpu[mycpuid] -#else -#define cpu_time cputime_percpu[0] -#endif #endif #if defined(_KERNEL) diff --git a/sys/sys/mplock2.h b/sys/sys/mplock2.h index 1e13c15d61..414213abad 100644 --- a/sys/sys/mplock2.h +++ b/sys/sys/mplock2.h @@ -14,8 +14,6 @@ #include #endif -#ifdef SMP - /* * NOTE: try_mplock()/lwkt_trytoken() return non-zero on success. */ @@ -29,18 +27,4 @@ void cpu_get_initial_mplock(void); #define MP_LOCK_HELD() LWKT_TOKEN_HELD_EXCL(&mp_token) #define ASSERT_MP_LOCK_HELD() ASSERT_LWKT_TOKEN_HELD_EXCL(&mp_token) -#else - -/* - * UNI-PROCESSOR BUILD - Degenerate case macros - */ -#define get_mplock() -#define rel_mplock() -#define try_mplock() 1 -#define owner_mplock() 0 -#define MP_LOCK_HELD(gd) 1 -#define ASSERT_MP_LOCK_HELD(td) - -#endif - #endif diff --git a/sys/sys/serialize.h b/sys/sys/serialize.h index 4b06d7aa0a..3106bf94b5 100644 --- a/sys/sys/serialize.h +++ b/sys/sys/serialize.h @@ -38,9 +38,7 @@ typedef struct lwkt_serialize *lwkt_serialize_t; void lwkt_serialize_init(lwkt_serialize_t); void lwkt_serialize_enter(lwkt_serialize_t); -#ifdef SMP void lwkt_serialize_adaptive_enter(lwkt_serialize_t); -#endif int lwkt_serialize_try(lwkt_serialize_t); void lwkt_serialize_exit(lwkt_serialize_t); void lwkt_serialize_handler_disable(lwkt_serialize_t); diff --git a/sys/sys/spinlock2.h b/sys/sys/spinlock2.h index e67bdd341d..0bee701dca 100644 --- a/sys/sys/spinlock2.h +++ b/sys/sys/spinlock2.h @@ -53,18 +53,12 @@ extern struct spinlock pmap_spin; -#ifdef SMP - int spin_trylock_contested(struct spinlock *spin); void spin_lock_contested(struct spinlock *spin); void spin_lock_shared_contested(struct spinlock *spin); void _spin_pool_lock(void *chan); void _spin_pool_unlock(void *chan); -#endif - -#ifdef SMP - /* * Attempt to obtain an exclusive spinlock. Returns FALSE on failure, * TRUE on success. @@ -94,21 +88,6 @@ spin_trylock(struct spinlock *spin) return (TRUE); } -#else - -static __inline boolean_t -spin_trylock(struct spinlock *spin) -{ - globaldata_t gd = mycpu; - - ++gd->gd_curthread->td_critcount; - cpu_ccfence(); - ++gd->gd_spinlocks; - return (TRUE); -} - -#endif - /* * Return TRUE if the spinlock is held (we can't tell by whom, though) */ @@ -127,7 +106,6 @@ spin_lock_quick(globaldata_t gd, struct spinlock *spin) ++gd->gd_curthread->td_critcount; cpu_ccfence(); ++gd->gd_spinlocks; -#ifdef SMP atomic_add_int(&spin->counta, 1); if (spin->counta != 1) spin_lock_contested(spin); @@ -143,7 +121,6 @@ spin_lock_quick(globaldata_t gd, struct spinlock *spin) } } #endif -#endif } static __inline void @@ -160,7 +137,6 @@ spin_lock(struct spinlock *spin) static __inline void spin_unlock_quick(globaldata_t gd, struct spinlock *spin) { -#ifdef SMP #ifdef DEBUG_LOCKS int i; for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) { @@ -183,7 +159,6 @@ spin_unlock_quick(globaldata_t gd, struct spinlock *spin) cpu_sfence(); atomic_add_int(&spin->counta, -1); cpu_sfence(); -#endif #ifdef DEBUG_LOCKS KKASSERT(gd->gd_spinlocks > 0); #endif @@ -207,7 +182,6 @@ spin_lock_shared_quick(globaldata_t gd, struct spinlock *spin) ++gd->gd_curthread->td_critcount; cpu_ccfence(); ++gd->gd_spinlocks; -#ifdef SMP atomic_add_int(&spin->counta, 1); if (spin->counta == 1) atomic_set_int(&spin->counta, SPINLOCK_SHARED); @@ -225,13 +199,11 @@ spin_lock_shared_quick(globaldata_t gd, struct spinlock *spin) } } #endif -#endif } static __inline void spin_unlock_shared_quick(globaldata_t gd, struct spinlock *spin) { -#ifdef SMP #ifdef DEBUG_LOCKS int i; for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) { @@ -260,7 +232,6 @@ spin_unlock_shared_quick(globaldata_t gd, struct spinlock *spin) break; } cpu_sfence(); -#endif #ifdef DEBUG_LOCKS KKASSERT(gd->gd_spinlocks > 0); #endif @@ -284,21 +255,13 @@ spin_unlock_shared(struct spinlock *spin) static __inline void spin_pool_lock(void *chan) { -#ifdef SMP _spin_pool_lock(chan); -#else - spin_lock(NULL); -#endif } static __inline void spin_pool_unlock(void *chan) { -#ifdef SMP _spin_pool_unlock(chan); -#else - spin_unlock(NULL); -#endif } static __inline void diff --git a/sys/sys/thread.h b/sys/sys/thread.h index 31ad0e77d2..c34f735ef5 100644 --- a/sys/sys/thread.h +++ b/sys/sys/thread.h @@ -278,11 +278,7 @@ struct thread { int td_nest_count; /* prevent splz nesting */ int td_contended; /* token contention count */ u_int td_mpflags; /* flags can be set by foreign cpus */ -#ifdef SMP int td_cscount; /* cpu synchronization master */ -#else - int td_cscount_unused; -#endif int td_wakefromcpu; /* who woke me up? */ int td_upri; /* user priority (sub-priority under td_pri) */ int td_unused02[2]; /* for future fields */ @@ -483,8 +479,6 @@ extern void lwkt_schedulerclock(thread_t td); extern void lwkt_setcpu_self(struct globaldata *); extern void lwkt_migratecpu(int); -#ifdef SMP - extern void lwkt_giveaway(struct thread *); extern void lwkt_acquire(struct thread *); extern int lwkt_send_ipiq3(struct globaldata *, ipifunc3_t, void *, int); @@ -501,8 +495,6 @@ extern void lwkt_process_ipiq_frame(struct intrframe *); extern void lwkt_smp_stopped(void); extern void lwkt_synchronize_ipiqs(const char *); -#endif /* SMP */ - /* lwkt_cpusync_init() - inline function in sys/thread2.h */ extern void lwkt_cpusync_simple(cpumask_t, cpusync_func_t, void *); extern void lwkt_cpusync_interlock(lwkt_cpusync_t); diff --git a/sys/sys/thread2.h b/sys/sys/thread2.h index 3e0a3f661d..510bcd4f53 100644 --- a/sys/sys/thread2.h +++ b/sys/sys/thread2.h @@ -281,8 +281,6 @@ lwkt_cpusync_init(lwkt_cpusync_t cs, cpumask_t mask, cs->cs_data = data; } -#ifdef SMP - /* * IPIQ messaging wrappers. IPIQ remote functions are passed three arguments: * a void * pointer, an integer, and a pointer to the trap frame (or NULL if @@ -352,7 +350,6 @@ lwkt_send_ipiq2_bycpu(int dcpu, ipifunc2_t func, void *arg1, int arg2) return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg1, arg2)); } -#endif /* SMP */ #endif /* _KERNEL */ #endif /* _SYS_THREAD2_H_ */ diff --git a/sys/vfs/nfs/nfs_vfsops.c b/sys/vfs/nfs/nfs_vfsops.c index 2211ef397b..f96d77dbe3 100644 --- a/sys/vfs/nfs/nfs_vfsops.c +++ b/sys/vfs/nfs/nfs_vfsops.c @@ -1144,7 +1144,6 @@ mountnfs(struct nfs_args *argp, struct mount *mp, struct sockaddr *nam, TAILQ_INSERT_TAIL(&nfs_mountq, nmp, nm_entry); lwkt_reltoken(&nfs_token); -#ifdef SMP switch(ncpus) { case 0: case 1: @@ -1160,10 +1159,6 @@ mountnfs(struct nfs_args *argp, struct mount *mp, struct sockaddr *nam, txcpu = -1; break; } -#else - rxcpu = 0; - txcpu = 0; -#endif /* * Start the reader and writer threads. diff --git a/sys/vfs/nwfs/nwfs_vfsops.c b/sys/vfs/nwfs/nwfs_vfsops.c index d6e8aaf150..16382c872f 100644 --- a/sys/vfs/nwfs/nwfs_vfsops.c +++ b/sys/vfs/nwfs/nwfs_vfsops.c @@ -355,10 +355,6 @@ nwfs_root(struct mount *mp, struct vnode **vpp) int nwfs_init(struct vfsconf *vfsp) { -#ifndef SMP - if (ncpus > 1) - kprintf("warning: nwfs module compiled without SMP support."); -#endif nwfs_hash_init(); nwfs_pbuf_freecnt = nswbuf / 2 + 1; NCPVODEBUG("always happy to load!\n"); diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index c822fb848f..5b007face2 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -411,12 +411,8 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) object->resident_page_count = 0; object->agg_pv_list_count = 0; object->shadow_count = 0; -#ifdef SMP /* cpu localization twist */ object->pg_color = (int)(intptr_t)curthread; -#else - object->pg_color = next_index; -#endif if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1)) incr = PQ_L2_SIZE / 3 + PQ_PRIME1; else @@ -1663,13 +1659,8 @@ vm_object_shadow(vm_object_t *objectp, vm_ooffset_t *offset, vm_size_t length, LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list); source->shadow_count++; source->generation++; -#ifdef SMP /* cpu localization twist */ result->pg_color = (int)(intptr_t)curthread; -#else - result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & - PQ_L2_MASK; -#endif } /* diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index e78888d51a..350fd26570 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -203,14 +203,12 @@ vm_add_new_page(vm_paddr_t pa) m->phys_addr = pa; m->flags = 0; m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK; -#ifdef SMP /* * Twist for cpu localization in addition to page coloring, so * different cpus selecting by m->queue get different page colors. */ m->pc ^= ((pa >> PAGE_SHIFT) / PQ_L2_SIZE) & PQ_L2_MASK; m->pc ^= ((pa >> PAGE_SHIFT) / (PQ_L2_SIZE * PQ_L2_SIZE)) & PQ_L2_MASK; -#endif /* * Reserve a certain number of contiguous low memory pages for * contigmalloc() to use. @@ -1493,9 +1491,7 @@ vm_page_pcpu_cache(void) vm_page_t vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req) { -#ifdef SMP globaldata_t gd = mycpu; -#endif vm_object_t obj; vm_page_t m; u_short pg_color; @@ -1517,7 +1513,6 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req) #endif m = NULL; -#ifdef SMP /* * Cpu twist - cpu localization algorithm */ @@ -1527,16 +1522,6 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req) } else { pg_color = gd->gd_cpuid + (pindex & ~ncpus_fit_mask); } -#else - /* - * Normal page coloring algorithm - */ - if (object) { - pg_color = object->pg_color + pindex; - } else { - pg_color = pindex; - } -#endif KKASSERT(page_req & (VM_ALLOC_NORMAL|VM_ALLOC_QUICK| VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM)); -- 2.41.0