1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <rdma/ib_verbs.h>
34 #include <rdma/ib_addr.h>
35 #include <rdma/ib_user_verbs.h>
36 #include <rdma/iw_cm.h>
37 #include <rdma/ib_mad.h>
38 #include <linux/netdevice.h>
39 #include <linux/iommu.h>
40 #include <linux/pci.h>
41 #include <net/addrconf.h>
42 #include <linux/idr.h>
44 #include <linux/qed/qed_chain.h>
45 #include <linux/qed/qed_if.h>
48 #include <rdma/qedr-abi.h>
49 #include "qedr_iw_cm.h"
51 MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
52 MODULE_AUTHOR("QLogic Corporation");
53 MODULE_LICENSE("Dual BSD/GPL");
55 #define QEDR_WQ_MULTIPLIER_DFT (3)
57 static void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
58 enum ib_event_type type)
62 ibev.device = &dev->ibdev;
63 ibev.element.port_num = port_num;
66 ib_dispatch_event(&ibev);
69 static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
72 return IB_LINK_LAYER_ETHERNET;
75 static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
77 struct qedr_dev *qedr = get_qedr_dev(ibdev);
78 u32 fw_ver = (u32)qedr->attr.fw_ver;
80 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d. %d. %d. %d",
81 (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
82 (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
85 static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num)
87 struct qedr_dev *qdev;
89 qdev = get_qedr_dev(dev);
92 /* The HW vendor's device driver must guarantee
93 * that this function returns NULL before the net device reaches
94 * NETDEV_UNREGISTER_FINAL state.
99 static int qedr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
100 struct ib_port_immutable *immutable)
102 struct ib_port_attr attr;
105 err = qedr_query_port(ibdev, port_num, &attr);
109 immutable->pkey_tbl_len = attr.pkey_tbl_len;
110 immutable->gid_tbl_len = attr.gid_tbl_len;
111 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
112 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
113 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
118 static int qedr_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
119 struct ib_port_immutable *immutable)
121 struct ib_port_attr attr;
124 err = qedr_query_port(ibdev, port_num, &attr);
128 immutable->pkey_tbl_len = 1;
129 immutable->gid_tbl_len = 1;
130 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
131 immutable->max_mad_size = 0;
136 static int qedr_iw_register_device(struct qedr_dev *dev)
138 dev->ibdev.node_type = RDMA_NODE_RNIC;
139 dev->ibdev.query_gid = qedr_iw_query_gid;
141 dev->ibdev.get_port_immutable = qedr_iw_port_immutable;
143 dev->ibdev.iwcm = kzalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
144 if (!dev->ibdev.iwcm)
147 dev->ibdev.iwcm->connect = qedr_iw_connect;
148 dev->ibdev.iwcm->accept = qedr_iw_accept;
149 dev->ibdev.iwcm->reject = qedr_iw_reject;
150 dev->ibdev.iwcm->create_listen = qedr_iw_create_listen;
151 dev->ibdev.iwcm->destroy_listen = qedr_iw_destroy_listen;
152 dev->ibdev.iwcm->add_ref = qedr_iw_qp_add_ref;
153 dev->ibdev.iwcm->rem_ref = qedr_iw_qp_rem_ref;
154 dev->ibdev.iwcm->get_qp = qedr_iw_get_qp;
156 memcpy(dev->ibdev.iwcm->ifname,
157 dev->ndev->name, sizeof(dev->ibdev.iwcm->ifname));
162 static void qedr_roce_register_device(struct qedr_dev *dev)
164 dev->ibdev.node_type = RDMA_NODE_IB_CA;
165 dev->ibdev.query_gid = qedr_query_gid;
167 dev->ibdev.add_gid = qedr_add_gid;
168 dev->ibdev.del_gid = qedr_del_gid;
170 dev->ibdev.get_port_immutable = qedr_roce_port_immutable;
173 static int qedr_register_device(struct qedr_dev *dev)
177 strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX);
179 dev->ibdev.node_guid = dev->attr.node_guid;
180 memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
181 dev->ibdev.owner = THIS_MODULE;
182 dev->ibdev.uverbs_abi_ver = QEDR_ABI_VERSION;
184 dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) |
185 QEDR_UVERBS(QUERY_DEVICE) |
186 QEDR_UVERBS(QUERY_PORT) |
187 QEDR_UVERBS(ALLOC_PD) |
188 QEDR_UVERBS(DEALLOC_PD) |
189 QEDR_UVERBS(CREATE_COMP_CHANNEL) |
190 QEDR_UVERBS(CREATE_CQ) |
191 QEDR_UVERBS(RESIZE_CQ) |
192 QEDR_UVERBS(DESTROY_CQ) |
193 QEDR_UVERBS(REQ_NOTIFY_CQ) |
194 QEDR_UVERBS(CREATE_QP) |
195 QEDR_UVERBS(MODIFY_QP) |
196 QEDR_UVERBS(QUERY_QP) |
197 QEDR_UVERBS(DESTROY_QP) |
198 QEDR_UVERBS(REG_MR) |
199 QEDR_UVERBS(DEREG_MR) |
200 QEDR_UVERBS(POLL_CQ) |
201 QEDR_UVERBS(POST_SEND) |
202 QEDR_UVERBS(POST_RECV);
205 rc = qedr_iw_register_device(dev);
209 qedr_roce_register_device(dev);
212 dev->ibdev.phys_port_cnt = 1;
213 dev->ibdev.num_comp_vectors = dev->num_cnq;
215 dev->ibdev.query_device = qedr_query_device;
216 dev->ibdev.query_port = qedr_query_port;
217 dev->ibdev.modify_port = qedr_modify_port;
219 dev->ibdev.alloc_ucontext = qedr_alloc_ucontext;
220 dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext;
221 dev->ibdev.mmap = qedr_mmap;
223 dev->ibdev.alloc_pd = qedr_alloc_pd;
224 dev->ibdev.dealloc_pd = qedr_dealloc_pd;
226 dev->ibdev.create_cq = qedr_create_cq;
227 dev->ibdev.destroy_cq = qedr_destroy_cq;
228 dev->ibdev.resize_cq = qedr_resize_cq;
229 dev->ibdev.req_notify_cq = qedr_arm_cq;
231 dev->ibdev.create_qp = qedr_create_qp;
232 dev->ibdev.modify_qp = qedr_modify_qp;
233 dev->ibdev.query_qp = qedr_query_qp;
234 dev->ibdev.destroy_qp = qedr_destroy_qp;
236 dev->ibdev.query_pkey = qedr_query_pkey;
238 dev->ibdev.create_ah = qedr_create_ah;
239 dev->ibdev.destroy_ah = qedr_destroy_ah;
241 dev->ibdev.get_dma_mr = qedr_get_dma_mr;
242 dev->ibdev.dereg_mr = qedr_dereg_mr;
243 dev->ibdev.reg_user_mr = qedr_reg_user_mr;
244 dev->ibdev.alloc_mr = qedr_alloc_mr;
245 dev->ibdev.map_mr_sg = qedr_map_mr_sg;
247 dev->ibdev.poll_cq = qedr_poll_cq;
248 dev->ibdev.post_send = qedr_post_send;
249 dev->ibdev.post_recv = qedr_post_recv;
251 dev->ibdev.process_mad = qedr_process_mad;
253 dev->ibdev.get_netdev = qedr_get_netdev;
255 dev->ibdev.dev.parent = &dev->pdev->dev;
257 dev->ibdev.get_link_layer = qedr_link_layer;
258 dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str;
260 return ib_register_device(&dev->ibdev, NULL);
263 /* This function allocates fast-path status block memory */
264 static int qedr_alloc_mem_sb(struct qedr_dev *dev,
265 struct qed_sb_info *sb_info, u16 sb_id)
267 struct status_block *sb_virt;
271 sb_virt = dma_alloc_coherent(&dev->pdev->dev,
272 sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
276 rc = dev->ops->common->sb_init(dev->cdev, sb_info,
277 sb_virt, sb_phys, sb_id,
280 pr_err("Status block initialization failed\n");
281 dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt),
289 static void qedr_free_mem_sb(struct qedr_dev *dev,
290 struct qed_sb_info *sb_info, int sb_id)
292 if (sb_info->sb_virt) {
293 dev->ops->common->sb_release(dev->cdev, sb_info, sb_id);
294 dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
295 (void *)sb_info->sb_virt, sb_info->sb_phys);
299 static void qedr_free_resources(struct qedr_dev *dev)
304 destroy_workqueue(dev->iwarp_wq);
306 for (i = 0; i < dev->num_cnq; i++) {
307 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
308 dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
311 kfree(dev->cnq_array);
312 kfree(dev->sb_array);
313 kfree(dev->sgid_tbl);
316 static int qedr_alloc_resources(struct qedr_dev *dev)
318 struct qedr_cnq *cnq;
323 dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *
324 QEDR_MAX_SGID, GFP_KERNEL);
328 spin_lock_init(&dev->sgid_lock);
331 spin_lock_init(&dev->idr_lock);
332 idr_init(&dev->qpidr);
333 dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
336 /* Allocate Status blocks for CNQ */
337 dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
339 if (!dev->sb_array) {
344 dev->cnq_array = kcalloc(dev->num_cnq,
345 sizeof(*dev->cnq_array), GFP_KERNEL);
346 if (!dev->cnq_array) {
351 dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
353 /* Allocate CNQ PBLs */
354 n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE);
355 for (i = 0; i < dev->num_cnq; i++) {
356 cnq = &dev->cnq_array[i];
358 rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
363 rc = dev->ops->common->chain_alloc(dev->cdev,
364 QED_CHAIN_USE_TO_CONSUME,
366 QED_CHAIN_CNT_TYPE_U16,
368 sizeof(struct regpair *),
374 cnq->sb = &dev->sb_array[i];
375 cons_pi = dev->sb_array[i].sb_virt->pi_array;
376 cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
378 sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));
380 DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
381 i, qed_chain_get_cons_idx(&cnq->pbl));
386 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
388 for (--i; i >= 0; i--) {
389 dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
390 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
392 kfree(dev->cnq_array);
394 kfree(dev->sb_array);
396 kfree(dev->sgid_tbl);
400 /* QEDR sysfs interface */
401 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
404 struct qedr_dev *dev = dev_get_drvdata(device);
406 return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
409 static ssize_t show_hca_type(struct device *device,
410 struct device_attribute *attr, char *buf)
412 return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
415 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
416 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
418 static struct device_attribute *qedr_attributes[] = {
423 static void qedr_remove_sysfiles(struct qedr_dev *dev)
427 for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
428 device_remove_file(&dev->ibdev.dev, qedr_attributes[i]);
431 static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
433 struct pci_dev *bridge;
438 bridge = pdev->bus->self;
442 /* Check atomic routing support all the way to root complex */
443 while (bridge->bus->parent) {
444 rc = pcie_capability_read_word(bridge, PCI_EXP_FLAGS, &flags);
445 if (rc || ((flags & PCI_EXP_FLAGS_VERS) < 2))
448 rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap2);
452 rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl2);
456 if (!(cap2 & PCI_EXP_DEVCAP2_ATOMIC_ROUTE) ||
457 (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK))
459 bridge = bridge->bus->parent->self;
462 rc = pcie_capability_read_word(bridge, PCI_EXP_FLAGS, &flags);
463 if (rc || ((flags & PCI_EXP_FLAGS_VERS) < 2))
466 rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap2);
467 if (rc || !(cap2 & PCI_EXP_DEVCAP2_ATOMIC_COMP64))
470 /* Set atomic operations */
471 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
472 PCI_EXP_DEVCTL2_ATOMIC_REQ);
473 dev->atomic_cap = IB_ATOMIC_GLOB;
475 DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n");
480 pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2,
481 PCI_EXP_DEVCTL2_ATOMIC_REQ);
482 dev->atomic_cap = IB_ATOMIC_NONE;
484 DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n");
488 static const struct qed_rdma_ops *qed_ops;
490 #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
492 static irqreturn_t qedr_irq_handler(int irq, void *handle)
494 u16 hw_comp_cons, sw_comp_cons;
495 struct qedr_cnq *cnq = handle;
496 struct regpair *cq_handle;
499 qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
501 qed_sb_update_sb_idx(cnq->sb);
503 hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr);
504 sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
506 /* Align protocol-index and chain reads */
509 while (sw_comp_cons != hw_comp_cons) {
510 cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
511 cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
516 "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
517 cq_handle->hi, cq_handle->lo, sw_comp_cons,
523 if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
525 "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
526 cq_handle->hi, cq_handle->lo, cq);
532 if (!cq->destroyed && cq->ibcq.comp_handler)
533 (*cq->ibcq.comp_handler)
534 (&cq->ibcq, cq->ibcq.cq_context);
536 /* The CQ's CNQ notification counter is checked before
537 * destroying the CQ in a busy-wait loop that waits for all of
538 * the CQ's CNQ interrupts to be processed. It is increased
539 * here, only after the completion handler, to ensure that the
540 * the handler is not running when the CQ is destroyed.
544 sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
549 qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
552 qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1);
557 static void qedr_sync_free_irqs(struct qedr_dev *dev)
562 for (i = 0; i < dev->int_info.used_cnt; i++) {
563 if (dev->int_info.msix_cnt) {
564 vector = dev->int_info.msix[i * dev->num_hwfns].vector;
565 synchronize_irq(vector);
566 free_irq(vector, &dev->cnq_array[i]);
570 dev->int_info.used_cnt = 0;
573 static int qedr_req_msix_irqs(struct qedr_dev *dev)
577 if (dev->num_cnq > dev->int_info.msix_cnt) {
579 "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
580 dev->num_cnq, dev->int_info.msix_cnt);
584 for (i = 0; i < dev->num_cnq; i++) {
585 rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector,
586 qedr_irq_handler, 0, dev->cnq_array[i].name,
589 DP_ERR(dev, "Request cnq %d irq failed\n", i);
590 qedr_sync_free_irqs(dev);
592 DP_DEBUG(dev, QEDR_MSG_INIT,
593 "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
594 dev->cnq_array[i].name, i,
596 dev->int_info.used_cnt++;
603 static int qedr_setup_irqs(struct qedr_dev *dev)
607 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n");
609 /* Learn Interrupt configuration */
610 rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq);
614 rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info);
616 DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n");
620 if (dev->int_info.msix_cnt) {
621 DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n",
622 dev->int_info.msix_cnt);
623 rc = qedr_req_msix_irqs(dev);
628 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n");
633 static int qedr_set_device_attr(struct qedr_dev *dev)
635 struct qed_rdma_device *qed_attr;
636 struct qedr_device_attr *attr;
639 /* Part 1 - query core capabilities */
640 qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
642 /* Part 2 - check capabilities */
643 page_size = ~dev->attr.page_size_caps + 1;
644 if (page_size > PAGE_SIZE) {
646 "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
647 PAGE_SIZE, page_size);
651 /* Part 3 - copy and update capabilities */
653 attr->vendor_id = qed_attr->vendor_id;
654 attr->vendor_part_id = qed_attr->vendor_part_id;
655 attr->hw_ver = qed_attr->hw_ver;
656 attr->fw_ver = qed_attr->fw_ver;
657 attr->node_guid = qed_attr->node_guid;
658 attr->sys_image_guid = qed_attr->sys_image_guid;
659 attr->max_cnq = qed_attr->max_cnq;
660 attr->max_sge = qed_attr->max_sge;
661 attr->max_inline = qed_attr->max_inline;
662 attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE);
663 attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE);
664 attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc;
665 attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc;
666 attr->max_dev_resp_rd_atomic_resc =
667 qed_attr->max_dev_resp_rd_atomic_resc;
668 attr->max_cq = qed_attr->max_cq;
669 attr->max_qp = qed_attr->max_qp;
670 attr->max_mr = qed_attr->max_mr;
671 attr->max_mr_size = qed_attr->max_mr_size;
672 attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
673 attr->max_mw = qed_attr->max_mw;
674 attr->max_fmr = qed_attr->max_fmr;
675 attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
676 attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
677 attr->max_pd = qed_attr->max_pd;
678 attr->max_ah = qed_attr->max_ah;
679 attr->max_pkey = qed_attr->max_pkey;
680 attr->max_srq = qed_attr->max_srq;
681 attr->max_srq_wr = qed_attr->max_srq_wr;
682 attr->dev_caps = qed_attr->dev_caps;
683 attr->page_size_caps = qed_attr->page_size_caps;
684 attr->dev_ack_delay = qed_attr->dev_ack_delay;
685 attr->reserved_lkey = qed_attr->reserved_lkey;
686 attr->bad_pkey_counter = qed_attr->bad_pkey_counter;
687 attr->max_stats_queues = qed_attr->max_stats_queues;
692 static void qedr_unaffiliated_event(void *context, u8 event_code)
694 pr_err("unaffiliated event not implemented yet\n");
697 static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
699 #define EVENT_TYPE_NOT_DEFINED 0
700 #define EVENT_TYPE_CQ 1
701 #define EVENT_TYPE_QP 2
702 struct qedr_dev *dev = (struct qedr_dev *)context;
703 struct regpair *async_handle = (struct regpair *)fw_handle;
704 u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
705 u8 event_type = EVENT_TYPE_NOT_DEFINED;
706 struct ib_event event;
713 case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
714 event.event = IB_EVENT_CQ_ERR;
715 event_type = EVENT_TYPE_CQ;
717 case ROCE_ASYNC_EVENT_SQ_DRAINED:
718 event.event = IB_EVENT_SQ_DRAINED;
719 event_type = EVENT_TYPE_QP;
721 case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
722 event.event = IB_EVENT_QP_FATAL;
723 event_type = EVENT_TYPE_QP;
725 case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
726 event.event = IB_EVENT_QP_REQ_ERR;
727 event_type = EVENT_TYPE_QP;
729 case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
730 event.event = IB_EVENT_QP_ACCESS_ERR;
731 event_type = EVENT_TYPE_QP;
734 DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
738 switch (event_type) {
740 cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
743 if (ibcq->event_handler) {
744 event.device = ibcq->device;
745 event.element.cq = ibcq;
746 ibcq->event_handler(&event, ibcq->cq_context);
750 "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
753 DP_ERR(dev, "CQ event %d on hanlde %p\n", e_code, cq);
756 qp = (struct qedr_qp *)(uintptr_t)roce_handle64;
759 if (ibqp->event_handler) {
760 event.device = ibqp->device;
761 event.element.qp = ibqp;
762 ibqp->event_handler(&event, ibqp->qp_context);
766 "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
769 DP_ERR(dev, "QP event %d on hanlde %p\n", e_code, qp);
776 static int qedr_init_hw(struct qedr_dev *dev)
778 struct qed_rdma_add_user_out_params out_params;
779 struct qed_rdma_start_in_params *in_params;
780 struct qed_rdma_cnq_params *cur_pbl;
781 struct qed_rdma_events events;
782 dma_addr_t p_phys_table;
787 in_params = kzalloc(sizeof(*in_params), GFP_KERNEL);
793 in_params->desired_cnq = dev->num_cnq;
794 for (i = 0; i < dev->num_cnq; i++) {
795 cur_pbl = &in_params->cnq_pbl_list[i];
797 page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl);
798 cur_pbl->num_pbl_pages = page_cnt;
800 p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl);
801 cur_pbl->pbl_ptr = (u64)p_phys_table;
804 events.affiliated_event = qedr_affiliated_event;
805 events.unaffiliated_event = qedr_unaffiliated_event;
806 events.context = dev;
808 in_params->events = &events;
809 in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
810 in_params->max_mtu = dev->ndev->mtu;
811 dev->iwarp_max_mtu = dev->ndev->mtu;
812 ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
814 rc = dev->ops->rdma_init(dev->cdev, in_params);
818 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params);
822 dev->db_addr = (void __iomem *)(uintptr_t)out_params.dpi_addr;
823 dev->db_phys_addr = out_params.dpi_phys_addr;
824 dev->db_size = out_params.dpi_size;
825 dev->dpi = out_params.dpi;
827 rc = qedr_set_device_attr(dev);
831 DP_ERR(dev, "Init HW Failed rc = %d\n", rc);
836 static void qedr_stop_hw(struct qedr_dev *dev)
838 dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
839 dev->ops->rdma_stop(dev->rdma_ctx);
842 static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
843 struct net_device *ndev)
845 struct qed_dev_rdma_info dev_info;
846 struct qedr_dev *dev;
849 dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev));
851 pr_err("Unable to allocate ib device\n");
855 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n");
861 qed_ops = qed_get_rdma_ops();
863 DP_ERR(dev, "Failed to get qed roce operations\n");
868 rc = qed_ops->fill_dev_info(cdev, &dev_info);
872 dev->user_dpm_enabled = dev_info.user_dpm_enabled;
873 dev->rdma_type = dev_info.rdma_type;
874 dev->num_hwfns = dev_info.common.num_hwfns;
875 dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
877 dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
879 DP_ERR(dev, "not enough CNQ resources.\n");
883 dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT;
885 qedr_pci_set_atomic(dev, pdev);
887 rc = qedr_alloc_resources(dev);
891 rc = qedr_init_hw(dev);
895 rc = qedr_setup_irqs(dev);
899 rc = qedr_register_device(dev);
901 DP_ERR(dev, "Unable to allocate register device\n");
905 for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
906 if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
909 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
910 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
912 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
916 ib_unregister_device(&dev->ibdev);
918 qedr_sync_free_irqs(dev);
922 qedr_free_resources(dev);
924 ib_dealloc_device(&dev->ibdev);
925 DP_ERR(dev, "qedr driver load failed rc=%d\n", rc);
930 static void qedr_remove(struct qedr_dev *dev)
932 /* First unregister with stack to stop all the active traffic
933 * of the registered clients.
935 qedr_remove_sysfiles(dev);
936 ib_unregister_device(&dev->ibdev);
939 qedr_sync_free_irqs(dev);
940 qedr_free_resources(dev);
941 ib_dealloc_device(&dev->ibdev);
944 static void qedr_close(struct qedr_dev *dev)
946 if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
947 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
950 static void qedr_shutdown(struct qedr_dev *dev)
956 static void qedr_open(struct qedr_dev *dev)
958 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
959 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
962 static void qedr_mac_address_change(struct qedr_dev *dev)
964 union ib_gid *sgid = &dev->sgid_tbl[0];
965 u8 guid[8], mac_addr[6];
969 ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr);
970 guid[0] = mac_addr[0] ^ 2;
971 guid[1] = mac_addr[1];
972 guid[2] = mac_addr[2];
975 guid[5] = mac_addr[3];
976 guid[6] = mac_addr[4];
977 guid[7] = mac_addr[5];
978 sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
979 memcpy(&sgid->raw[8], guid, sizeof(guid));
982 rc = dev->ops->ll2_set_mac_filter(dev->cdev,
983 dev->gsi_ll2_mac_address,
984 dev->ndev->dev_addr);
986 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
988 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
991 DP_ERR(dev, "Error updating mac filter\n");
994 /* event handling via NIC driver ensures that all the NIC specific
995 * initialization done before RoCE driver notifies
998 static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event)
1010 case QEDE_CHANGE_ADDR:
1011 qedr_mac_address_change(dev);
1014 pr_err("Event not supported\n");
1018 static struct qedr_driver qedr_drv = {
1019 .name = "qedr_driver",
1021 .remove = qedr_remove,
1022 .notify = qedr_notify,
1025 static int __init qedr_init_module(void)
1027 return qede_rdma_register_driver(&qedr_drv);
1030 static void __exit qedr_exit_module(void)
1032 qede_rdma_unregister_driver(&qedr_drv);
1035 module_init(qedr_init_module);
1036 module_exit(qedr_exit_module);