From: Sascha Wildner Date: Thu, 3 Jan 2019 22:25:39 +0000 (+0100) Subject: i386 removal, part 69/x: Clean up sys/dev/netif. X-Git-Tag: v5.7.0~612 X-Git-Url: https://gitweb.dragonflybsd.org/dragonfly.git/commitdiff_plain/afbe4b803a21d676343e0a600f098a87122a13bb i386 removal, part 69/x: Clean up sys/dev/netif. According to comments from sephe. --- diff --git a/sys/dev/netif/bge/if_bge.c b/sys/dev/netif/bge/if_bge.c index d14f3cbee2..dcf0f10165 100644 --- a/sys/dev/netif/bge/if_bge.c +++ b/sys/dev/netif/bge/if_bge.c @@ -3129,7 +3129,7 @@ refresh_rx: } IFNET_STAT_INC(ifp, ipackets, 1); -#if !defined(__i386__) && !defined(__x86_64__) +#if !defined(__x86_64__) /* * The x86 allows unaligned accesses, but for other * platforms we must make sure the payload is aligned. diff --git a/sys/dev/netif/dc/if_dc.c b/sys/dev/netif/dc/if_dc.c index 0d1ad26dbf..aef97b2afc 100644 --- a/sys/dev/netif/dc/if_dc.c +++ b/sys/dev/netif/dc/if_dc.c @@ -314,7 +314,7 @@ static driver_t dc_driver = { static devclass_t dc_devclass; -#ifdef __i386__ +#ifdef __x86_64__ static int dc_quick=1; SYSCTL_INT(_hw, OID_AUTO, dc_quick, CTLFLAG_RW, &dc_quick,0,"do not mdevget in dc driver"); @@ -2567,7 +2567,7 @@ dc_rxeof(struct dc_softc *sc) /* No errors; receive the packet. */ total_len -= ETHER_CRC_LEN; -#ifdef __i386__ +#ifdef __x86_64__ /* * On the x86 we do not have alignment problems, so try to * allocate a new buffer for the receive ring, and pass up diff --git a/sys/dev/netif/lnc/am79900.c b/sys/dev/netif/lnc/am79900.c index 0eed826ae8..50146eab86 100644 --- a/sys/dev/netif/lnc/am79900.c +++ b/sys/dev/netif/lnc/am79900.c @@ -251,7 +251,7 @@ am79900_rint(struct lance_softc *sc) struct lermd rmd; uint32_t rmd1; int bix, rp; -#if defined(__i386__) +#if defined(__x86_64__) struct ether_header *eh; #endif @@ -312,7 +312,7 @@ am79900_rint(struct lance_softc *sc) if (m != NULL) { IFNET_STAT_INC(ifp, ipackets, 1); -#ifdef __i386__ +#ifdef __x86_64__ /* * The VMware LANCE does not present IFF_SIMPLEX * behavior on multicast packets. Thus drop the diff --git a/sys/dev/netif/nge/if_nge.c b/sys/dev/netif/nge/if_nge.c index de86b61b6c..2a7b8543d3 100644 --- a/sys/dev/netif/nge/if_nge.c +++ b/sys/dev/netif/nge/if_nge.c @@ -1241,7 +1241,7 @@ nge_rxeof(struct nge_softc *sc) * only gigE chip I know of with alignment constraints * on receive buffers. RX buffers must be 64-bit aligned. */ -#ifdef __i386__ +#ifdef __x86_64__ /* * By popular demand, ignore the alignment problems * on the Intel x86 platform. The performance hit @@ -1264,7 +1264,7 @@ nge_rxeof(struct nge_softc *sc) } m_adj(m0, ETHER_ALIGN); m = m0; -#ifdef __i386__ +#ifdef __x86_64__ } else { m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; diff --git a/sys/dev/netif/sf/if_sfreg.h b/sys/dev/netif/sf/if_sfreg.h index 6aa1e3a91e..35c41a2cf2 100644 --- a/sys/dev/netif/sf/if_sfreg.h +++ b/sys/dev/netif/sf/if_sfreg.h @@ -671,7 +671,7 @@ struct sf_rx_bufdesc_type0 { sf_end:1, sf_addrlo:30; u_int32_t sf_pad0; -#ifdef __i386__ +#ifndef __LP64__ u_int32_t sf_pad1; #endif struct mbuf *sf_mbuf; @@ -685,7 +685,7 @@ struct sf_rx_bufdesc_type1 { sf_end:1, sf_addrlo:30; u_int32_t sf_addrhi; -#ifdef __i386__ +#ifndef __LP64__ u_int32_t sf_pad; #endif struct mbuf *sf_mbuf; @@ -815,7 +815,7 @@ struct sf_frag_msdos { * 128 bytes per descriptor, we have room for 128 descriptors in the queue. */ struct sf_tx_bufdesc_type0 { -#ifdef __i386__ +#ifndef __LP64__ u_int32_t sf_pad; #endif struct mbuf *sf_mbuf; @@ -836,7 +836,7 @@ struct sf_tx_bufdesc_type0 { * maps a single fragment. */ struct sf_tx_bufdesc_type1 { -#ifdef __i386__ +#ifndef __LP64__ u_int32_t sf_pad; #endif struct mbuf *sf_mbuf; @@ -855,7 +855,7 @@ struct sf_tx_bufdesc_type1 { * maps a single fragment. */ struct sf_tx_bufdesc_type2 { -#ifdef __i386__ +#ifndef __LP64__ u_int32_t sf_pad; #endif struct mbuf *sf_mbuf; @@ -879,7 +879,7 @@ struct sf_tx_bufdesc_type2 { * to optimize copies in MS-DOS and OS/2 drivers. */ struct sf_tx_bufdesc_type4 { -#ifdef __i386__ +#ifndef __LP64__ u_int32_t sf_pad; #endif struct mbuf *sf_mbuf; diff --git a/sys/dev/netif/stge/if_stge.c b/sys/dev/netif/stge/if_stge.c index 31dc548707..117be232fd 100644 --- a/sys/dev/netif/stge/if_stge.c +++ b/sys/dev/netif/stge/if_stge.c @@ -157,7 +157,7 @@ static void stge_txeof(struct stge_softc *); static void stge_rxeof(struct stge_softc *, int); static __inline void stge_discard_rxbuf(struct stge_softc *, int); static int stge_newbuf(struct stge_softc *, int, int); -#ifndef __i386__ +#ifndef __x86_64__ static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *); #endif @@ -1505,7 +1505,7 @@ stge_discard_rxbuf(struct stge_softc *sc, int idx) rfd->rfd_status = 0; } -#ifndef __i386__ +#ifndef __x86_64__ /* * It seems that TC9021's DMA engine has alignment restrictions in * DMA scatter operations. The first DMA segment has no address @@ -1653,7 +1653,7 @@ stge_rxeof(struct stge_softc *sc, int count) } } -#ifndef __i386__ +#ifndef __x86_64__ if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) { if ((m = stge_fixup_rx(sc, m)) == NULL) { STGE_RXCHAIN_RESET(sc);