2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/bitops.h>
38 #include <linux/compiler.h>
39 #include <linux/list.h>
40 #include <linux/mutex.h>
41 #include <linux/netdevice.h>
42 #include <linux/if_vlan.h>
43 #include <linux/net_tstamp.h>
44 #ifdef CONFIG_MLX4_EN_DCB
45 #include <linux/dcbnl.h>
47 #include <linux/cpu_rmap.h>
48 #include <linux/ptp_clock_kernel.h>
50 #include <linux/mlx4/device.h>
51 #include <linux/mlx4/qp.h>
52 #include <linux/mlx4/cq.h>
53 #include <linux/mlx4/srq.h>
54 #include <linux/mlx4/doorbell.h>
55 #include <linux/mlx4/cmd.h>
58 #include "mlx4_stats.h"
60 #define DRV_NAME "mlx4_en"
61 #define DRV_VERSION "2.2-1"
62 #define DRV_RELDATE "Feb 2014"
64 #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
71 #define MLX4_EN_PAGE_SHIFT 12
72 #define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
73 #define DEF_RX_RINGS 16
74 #define MAX_RX_RINGS 128
75 #define MIN_RX_RINGS 4
77 #define HEADROOM (2048 / TXBB_SIZE + 1)
78 #define STAMP_STRIDE 64
79 #define STAMP_DWORDS (STAMP_STRIDE / 4)
80 #define STAMP_SHIFT 31
81 #define STAMP_VAL 0x7fffffff
82 #define STATS_DELAY (HZ / 4)
83 #define SERVICE_TASK_DELAY (HZ / 4)
84 #define MAX_NUM_OF_FS_RULES 256
86 #define MLX4_EN_FILTER_HASH_SHIFT 4
87 #define MLX4_EN_FILTER_EXPIRY_QUOTA 60
89 /* Typical TSO descriptor with 16 gather entries is 352 bytes... */
90 #define MAX_DESC_SIZE 512
91 #define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE)
94 * OS related constants and tunables
97 #define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1
99 #define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
101 /* Use the maximum between 16384 and a single page */
102 #define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384)
104 #define MLX4_EN_ALLOC_PREFER_ORDER PAGE_ALLOC_COSTLY_ORDER
106 /* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU
107 * and 4K allocations) */
109 FRAG_SZ0 = 1536 - NET_IP_ALIGN,
112 FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
114 #define MLX4_EN_MAX_RX_FRAGS 4
116 /* Maximum ring sizes */
117 #define MLX4_EN_MAX_TX_SIZE 8192
118 #define MLX4_EN_MAX_RX_SIZE 8192
120 /* Minimum ring size for our page-allocation scheme to work */
121 #define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
122 #define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
124 #define MLX4_EN_SMALL_PKT_SIZE 64
125 #define MLX4_EN_MIN_TX_RING_P_UP 1
126 #define MLX4_EN_MAX_TX_RING_P_UP 32
127 #define MLX4_EN_NUM_UP 8
128 #define MLX4_EN_DEF_TX_RING_SIZE 512
129 #define MLX4_EN_DEF_RX_RING_SIZE 1024
130 #define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
133 #define MLX4_EN_DEFAULT_TX_WORK 256
135 /* Target number of packets to coalesce with interrupt moderation */
136 #define MLX4_EN_RX_COAL_TARGET 44
137 #define MLX4_EN_RX_COAL_TIME 0x10
139 #define MLX4_EN_TX_COAL_PKTS 16
140 #define MLX4_EN_TX_COAL_TIME 0x10
142 #define MLX4_EN_RX_RATE_LOW 400000
143 #define MLX4_EN_RX_COAL_TIME_LOW 0
144 #define MLX4_EN_RX_RATE_HIGH 450000
145 #define MLX4_EN_RX_COAL_TIME_HIGH 128
146 #define MLX4_EN_RX_SIZE_THRESH 1024
147 #define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
148 #define MLX4_EN_SAMPLE_INTERVAL 0
149 #define MLX4_EN_AVG_PKT_SMALL 256
151 #define MLX4_EN_AUTO_CONF 0xffff
153 #define MLX4_EN_DEF_RX_PAUSE 1
154 #define MLX4_EN_DEF_TX_PAUSE 1
156 /* Interval between successive polls in the Tx routine when polling is used
157 instead of interrupts (in per-core Tx rings) - should be power of 2 */
158 #define MLX4_EN_TX_POLL_MODER 16
159 #define MLX4_EN_TX_POLL_TIMEOUT (HZ / 4)
161 #define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
162 #define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
163 #define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
165 #define MLX4_EN_MIN_MTU 46
166 #define ETH_BCAST 0xffffffffffffULL
168 #define MLX4_EN_LOOPBACK_RETRIES 5
169 #define MLX4_EN_LOOPBACK_TIMEOUT 100
171 #ifdef MLX4_EN_PERF_STAT
172 /* Number of samples to 'average' */
174 #define AVG_FACTOR 1024
176 #define INC_PERF_COUNTER(cnt) (++(cnt))
177 #define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add))
178 #define AVG_PERF_COUNTER(cnt, sample) \
179 ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
180 #define GET_PERF_COUNTER(cnt) (cnt)
181 #define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR)
185 #define INC_PERF_COUNTER(cnt) do {} while (0)
186 #define ADD_PERF_COUNTER(cnt, add) do {} while (0)
187 #define AVG_PERF_COUNTER(cnt, sample) do {} while (0)
188 #define GET_PERF_COUNTER(cnt) (0)
189 #define GET_AVG_PERF_COUNTER(cnt) (0)
190 #endif /* MLX4_EN_PERF_STAT */
192 /* Constants for TX flow */
194 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
212 #define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x))
213 #define XNOR(x, y) (!(x) == !(y))
216 struct mlx4_en_tx_info {
227 } ____cacheline_aligned_in_smp;
230 #define MLX4_EN_BIT_DESC_OWN 0x80000000
231 #define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg)
232 #define MLX4_EN_MEMTYPE_PAD 0x100
233 #define DS_SIZE sizeof(struct mlx4_wqe_data_seg)
236 struct mlx4_en_tx_desc {
237 struct mlx4_wqe_ctrl_seg ctrl;
239 struct mlx4_wqe_data_seg data; /* at least one data segment */
240 struct mlx4_wqe_lso_seg lso;
241 struct mlx4_wqe_inline_seg inl;
245 #define MLX4_EN_USE_SRQ 0x01000000
247 #define MLX4_EN_CX3_LOW_ID 0x1000
248 #define MLX4_EN_CX3_HIGH_ID 0x1005
250 struct mlx4_en_rx_alloc {
257 struct mlx4_en_tx_ring {
258 /* cache line used and dirtied in tx completion
259 * (mlx4_en_free_tx_buf())
263 unsigned long wake_queue;
265 /* cache line used and dirtied in mlx4_en_xmit() */
266 u32 prod ____cacheline_aligned_in_smp;
268 unsigned long packets;
269 unsigned long tx_csum;
270 unsigned long tso_packets;
271 unsigned long xmit_more;
273 unsigned long queue_stopped;
275 /* Following part should be mostly read */
276 cpumask_t affinity_mask;
278 struct mlx4_hwq_resources wqres;
279 u32 size; /* number of TXBBs */
282 u16 cqn; /* index of port CQ associated with this ring */
287 struct mlx4_en_tx_info *tx_info;
289 struct mlx4_qp_context context;
291 enum mlx4_qp_state qp_state;
295 struct netdev_queue *tx_queue;
296 int hwtstamp_tx_type;
297 } ____cacheline_aligned_in_smp;
299 struct mlx4_en_rx_desc {
300 /* actual number of entries depends on rx ring stride */
301 struct mlx4_wqe_data_seg data[0];
304 struct mlx4_en_rx_ring {
305 struct mlx4_hwq_resources wqres;
306 struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
307 u32 size ; /* number of Rx descs*/
312 u16 cqn; /* index of port CQ associated with this ring */
320 unsigned long packets;
321 #ifdef CONFIG_NET_RX_BUSY_POLL
322 unsigned long yields;
323 unsigned long misses;
324 unsigned long cleaned;
326 unsigned long csum_ok;
327 unsigned long csum_none;
328 unsigned long csum_complete;
329 int hwtstamp_rx_filter;
330 cpumask_var_t affinity_mask;
335 struct mlx4_hwq_resources wqres;
337 struct net_device *dev;
338 struct napi_struct napi;
345 struct mlx4_cqe *buf;
346 #define MLX4_EN_OPCODE_ERROR 0x1e
348 #ifdef CONFIG_NET_RX_BUSY_POLL
350 #define MLX4_EN_CQ_STATE_IDLE 0
351 #define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */
352 #define MLX4_EN_CQ_STATE_POLL 2 /* poll owns this CQ */
353 #define MLX4_CQ_LOCKED (MLX4_EN_CQ_STATE_NAPI | MLX4_EN_CQ_STATE_POLL)
354 #define MLX4_EN_CQ_STATE_NAPI_YIELD 4 /* NAPI yielded this CQ */
355 #define MLX4_EN_CQ_STATE_POLL_YIELD 8 /* poll yielded this CQ */
356 #define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD)
357 #define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
358 spinlock_t poll_lock; /* protects from LLS/napi conflicts */
359 #endif /* CONFIG_NET_RX_BUSY_POLL */
360 struct irq_desc *irq_desc;
363 struct mlx4_en_port_profile {
377 struct mlx4_en_profile {
383 u8 num_tx_rings_p_up;
384 struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
388 struct mlx4_dev *dev;
389 struct pci_dev *pdev;
390 struct mutex state_lock;
391 struct net_device *pndev[MLX4_MAX_PORTS + 1];
392 struct net_device *upper[MLX4_MAX_PORTS + 1];
395 struct mlx4_en_profile profile;
397 struct workqueue_struct *workqueue;
398 struct device *dma_device;
399 void __iomem *uar_map;
400 struct mlx4_uar priv_uar;
404 u8 mac_removed[MLX4_MAX_PORTS + 1];
407 struct cyclecounter cycles;
408 struct timecounter clock;
409 unsigned long last_overflow_check;
410 unsigned long overflow_period;
411 struct ptp_clock *ptp_clock;
412 struct ptp_clock_info ptp_clock_info;
413 struct notifier_block nb;
417 struct mlx4_en_rss_map {
419 struct mlx4_qp qps[MAX_RX_RINGS];
420 enum mlx4_qp_state state[MAX_RX_RINGS];
421 struct mlx4_qp indir_qp;
422 enum mlx4_qp_state indir_state;
425 enum mlx4_en_port_flag {
426 MLX4_EN_PORT_ANC = 1<<0, /* Auto-negotiation complete */
427 MLX4_EN_PORT_ANE = 1<<1, /* Auto-negotiation enabled */
430 struct mlx4_en_port_state {
437 enum mlx4_en_mclist_act {
443 struct mlx4_en_mc_list {
444 struct list_head list;
445 enum mlx4_en_mclist_act action;
451 struct mlx4_en_frag_info {
453 u16 frag_prefix_size;
457 #ifdef CONFIG_MLX4_EN_DCB
458 /* Minimal TC BW - setting to 0 will block traffic */
459 #define MLX4_EN_BW_MIN 1
460 #define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
462 #define MLX4_EN_TC_ETS 7
466 struct ethtool_flow_id {
467 struct list_head list;
468 struct ethtool_rx_flow_spec flow_spec;
473 MLX4_EN_FLAG_PROMISC = (1 << 0),
474 MLX4_EN_FLAG_MC_PROMISC = (1 << 1),
475 /* whether we need to enable hardware loopback by putting dmac
478 MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2),
479 /* whether we need to drop packets that hardware loopback-ed */
480 MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3),
481 MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4),
482 MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5),
485 #define PORT_BEACON_MAX_LIMIT (65535)
486 #define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
487 #define MLX4_EN_MAC_HASH_IDX 5
489 struct mlx4_en_stats_bitmap {
490 DECLARE_BITMAP(bitmap, NUM_ALL_STATS);
491 struct mutex mutex; /* for mutual access to stats bitmap */
494 struct mlx4_en_priv {
495 struct mlx4_en_dev *mdev;
496 struct mlx4_en_port_profile *prof;
497 struct net_device *dev;
498 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
499 struct net_device_stats stats;
500 struct net_device_stats ret_stats;
501 struct mlx4_en_port_state port_state;
502 spinlock_t stats_lock;
503 struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
504 /* To allow rules removal while port is going down */
505 struct list_head ethtool_list;
507 unsigned long last_moder_packets[MAX_RX_RINGS];
508 unsigned long last_moder_tx_packets;
509 unsigned long last_moder_bytes[MAX_RX_RINGS];
510 unsigned long last_moder_jiffies;
511 int last_moder_time[MAX_RX_RINGS];
521 u16 adaptive_rx_coal;
524 u32 validate_loopback;
526 struct mlx4_hwq_resources res;
534 unsigned char current_mac[ETH_ALEN + 2];
541 struct mlx4_en_rss_map rss_map;
544 u8 num_tx_rings_p_up;
549 struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
553 struct mlx4_en_tx_ring **tx_ring;
554 struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
555 struct mlx4_en_cq **tx_cq;
556 struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
557 struct mlx4_qp drop_qp;
558 struct work_struct rx_mode_task;
559 struct work_struct watchdog_task;
560 struct work_struct linkstate_task;
561 struct delayed_work stats_task;
562 struct delayed_work service_task;
563 #ifdef CONFIG_MLX4_EN_VXLAN
564 struct work_struct vxlan_add_task;
565 struct work_struct vxlan_del_task;
567 struct mlx4_en_perf_stats pstats;
568 struct mlx4_en_pkt_stats pkstats;
569 struct mlx4_en_flow_stats_rx rx_priority_flowstats[MLX4_NUM_PRIORITIES];
570 struct mlx4_en_flow_stats_tx tx_priority_flowstats[MLX4_NUM_PRIORITIES];
571 struct mlx4_en_flow_stats_rx rx_flowstats;
572 struct mlx4_en_flow_stats_tx tx_flowstats;
573 struct mlx4_en_port_stats port_stats;
574 struct mlx4_en_stats_bitmap stats_bitmap;
575 struct list_head mc_list;
576 struct list_head curr_list;
578 struct mlx4_en_stat_out_mbox hw_stats;
583 struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
584 struct hwtstamp_config hwtstamp_config;
586 #ifdef CONFIG_MLX4_EN_DCB
588 u16 maxrate[IEEE_8021QAZ_MAX_TCS];
589 enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS];
591 #ifdef CONFIG_RFS_ACCEL
592 spinlock_t filters_lock;
594 struct list_head filters;
595 struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
601 u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
606 MLX4_EN_WOL_MAGIC = (1ULL << 61),
607 MLX4_EN_WOL_ENABLED = (1ULL << 62),
610 struct mlx4_mac_entry {
611 struct hlist_node hlist;
612 unsigned char mac[ETH_ALEN + 2];
617 static inline struct mlx4_cqe *mlx4_en_get_cqe(void *buf, int idx, int cqe_sz)
619 return buf + idx * cqe_sz;
622 #ifdef CONFIG_NET_RX_BUSY_POLL
623 static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
625 spin_lock_init(&cq->poll_lock);
626 cq->state = MLX4_EN_CQ_STATE_IDLE;
629 /* called from the device poll rutine to get ownership of a cq */
630 static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq)
633 spin_lock(&cq->poll_lock);
634 if (cq->state & MLX4_CQ_LOCKED) {
635 WARN_ON(cq->state & MLX4_EN_CQ_STATE_NAPI);
636 cq->state |= MLX4_EN_CQ_STATE_NAPI_YIELD;
639 /* we don't care if someone yielded */
640 cq->state = MLX4_EN_CQ_STATE_NAPI;
641 spin_unlock(&cq->poll_lock);
645 /* returns true is someone tried to get the cq while napi had it */
646 static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq)
649 spin_lock(&cq->poll_lock);
650 WARN_ON(cq->state & (MLX4_EN_CQ_STATE_POLL |
651 MLX4_EN_CQ_STATE_NAPI_YIELD));
653 if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD)
655 cq->state = MLX4_EN_CQ_STATE_IDLE;
656 spin_unlock(&cq->poll_lock);
660 /* called from mlx4_en_low_latency_poll() */
661 static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
664 spin_lock_bh(&cq->poll_lock);
665 if ((cq->state & MLX4_CQ_LOCKED)) {
666 struct net_device *dev = cq->dev;
667 struct mlx4_en_priv *priv = netdev_priv(dev);
668 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
670 cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD;
674 /* preserve yield marks */
675 cq->state |= MLX4_EN_CQ_STATE_POLL;
676 spin_unlock_bh(&cq->poll_lock);
680 /* returns true if someone tried to get the cq while it was locked */
681 static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
684 spin_lock_bh(&cq->poll_lock);
685 WARN_ON(cq->state & (MLX4_EN_CQ_STATE_NAPI));
687 if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD)
689 cq->state = MLX4_EN_CQ_STATE_IDLE;
690 spin_unlock_bh(&cq->poll_lock);
694 /* true if a socket is polling, even if it did not get the lock */
695 static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
697 WARN_ON(!(cq->state & MLX4_CQ_LOCKED));
698 return cq->state & CQ_USER_PEND;
701 static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
705 static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq)
710 static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq)
715 static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
720 static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
725 static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
729 #endif /* CONFIG_NET_RX_BUSY_POLL */
731 #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
733 void mlx4_en_update_loopback_state(struct net_device *dev,
734 netdev_features_t features);
736 void mlx4_en_destroy_netdev(struct net_device *dev);
737 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
738 struct mlx4_en_port_profile *prof);
740 int mlx4_en_start_port(struct net_device *dev);
741 void mlx4_en_stop_port(struct net_device *dev, int detach);
743 void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
744 struct mlx4_en_stats_bitmap *stats_bitmap,
745 u8 rx_ppp, u8 rx_pause,
746 u8 tx_ppp, u8 tx_pause);
748 void mlx4_en_free_resources(struct mlx4_en_priv *priv);
749 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
751 int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
752 int entries, int ring, enum cq_type mode, int node);
753 void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq);
754 int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
756 void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
757 int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
758 int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
760 void mlx4_en_tx_irq(struct mlx4_cq *mcq);
761 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
762 void *accel_priv, select_queue_fallback_t fallback);
763 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
765 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
766 struct mlx4_en_tx_ring **pring,
767 u32 size, u16 stride,
768 int node, int queue_index);
769 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
770 struct mlx4_en_tx_ring **pring);
771 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
772 struct mlx4_en_tx_ring *ring,
773 int cq, int user_prio);
774 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
775 struct mlx4_en_tx_ring *ring);
776 void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
777 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
778 struct mlx4_en_rx_ring **pring,
779 u32 size, u16 stride, int node);
780 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
781 struct mlx4_en_rx_ring **pring,
782 u32 size, u16 stride);
783 int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
784 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
785 struct mlx4_en_rx_ring *ring);
786 int mlx4_en_process_rx_cq(struct net_device *dev,
787 struct mlx4_en_cq *cq,
789 int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
790 int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
791 void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
792 int is_tx, int rss, int qpn, int cqn, int user_prio,
793 struct mlx4_qp_context *context);
794 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
795 int mlx4_en_map_buffer(struct mlx4_buf *buf);
796 void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
798 void mlx4_en_calc_rx_buf(struct net_device *dev);
799 int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
800 void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
801 int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv);
802 void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv);
803 int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
804 void mlx4_en_rx_irq(struct mlx4_cq *mcq);
806 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
807 int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv);
809 int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
810 int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
812 #ifdef CONFIG_MLX4_EN_DCB
813 extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
814 extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
817 int mlx4_en_setup_tc(struct net_device *dev, u8 up);
819 #ifdef CONFIG_RFS_ACCEL
820 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
823 #define MLX4_EN_NUM_SELF_TEST 5
824 void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
825 void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
827 #define DEV_FEATURE_CHANGED(dev, new_features, feature) \
828 ((dev->features & feature) ^ (new_features & feature))
830 int mlx4_en_reset_config(struct net_device *dev,
831 struct hwtstamp_config ts_config,
832 netdev_features_t new_features);
833 void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
834 struct mlx4_en_stats_bitmap *stats_bitmap,
835 u8 rx_ppp, u8 rx_pause,
836 u8 tx_ppp, u8 tx_pause);
837 int mlx4_en_netdev_event(struct notifier_block *this,
838 unsigned long event, void *ptr);
841 * Functions for time stamping
843 u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe);
844 void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
845 struct skb_shared_hwtstamps *hwts,
847 void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev);
848 void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev);
852 extern const struct ethtool_ops mlx4_en_ethtool_ops;
857 * printk / logging functions
861 void en_print(const char *level, const struct mlx4_en_priv *priv,
862 const char *format, ...);
864 #define en_dbg(mlevel, priv, format, ...) \
866 if (NETIF_MSG_##mlevel & (priv)->msg_enable) \
867 en_print(KERN_DEBUG, priv, format, ##__VA_ARGS__); \
869 #define en_warn(priv, format, ...) \
870 en_print(KERN_WARNING, priv, format, ##__VA_ARGS__)
871 #define en_err(priv, format, ...) \
872 en_print(KERN_ERR, priv, format, ##__VA_ARGS__)
873 #define en_info(priv, format, ...) \
874 en_print(KERN_INFO, priv, format, ##__VA_ARGS__)
876 #define mlx4_err(mdev, format, ...) \
877 pr_err(DRV_NAME " %s: " format, \
878 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
879 #define mlx4_info(mdev, format, ...) \
880 pr_info(DRV_NAME " %s: " format, \
881 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
882 #define mlx4_warn(mdev, format, ...) \
883 pr_warn(DRV_NAME " %s: " format, \
884 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)