1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics loopback device.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/scatterlist.h>
8 #include <linux/blk-mq.h>
9 #include <linux/nvme.h>
10 #include <linux/module.h>
11 #include <linux/parser.h>
13 #include "../host/nvme.h"
14 #include "../host/fabrics.h"
16 #define NVME_LOOP_MAX_SEGMENTS 256
18 struct nvme_loop_iod {
19 struct nvme_request nvme_req;
20 struct nvme_command cmd;
21 struct nvme_completion cqe;
23 struct nvme_loop_queue *queue;
24 struct work_struct work;
25 struct sg_table sg_table;
26 struct scatterlist first_sgl[];
29 struct nvme_loop_ctrl {
30 struct nvme_loop_queue *queues;
32 struct blk_mq_tag_set admin_tag_set;
34 struct list_head list;
35 struct blk_mq_tag_set tag_set;
36 struct nvme_loop_iod async_event_iod;
37 struct nvme_ctrl ctrl;
39 struct nvmet_port *port;
42 static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
44 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
47 enum nvme_loop_queue_flags {
51 struct nvme_loop_queue {
52 struct nvmet_cq nvme_cq;
53 struct nvmet_sq nvme_sq;
54 struct nvme_loop_ctrl *ctrl;
58 static LIST_HEAD(nvme_loop_ports);
59 static DEFINE_MUTEX(nvme_loop_ports_mutex);
61 static LIST_HEAD(nvme_loop_ctrl_list);
62 static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
64 static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
65 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
67 static const struct nvmet_fabrics_ops nvme_loop_ops;
69 static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
71 return queue - queue->ctrl->queues;
74 static void nvme_loop_complete_rq(struct request *req)
76 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
78 sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT);
79 nvme_complete_rq(req);
82 static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
84 u32 queue_idx = nvme_loop_queue_idx(queue);
87 return queue->ctrl->admin_tag_set.tags[queue_idx];
88 return queue->ctrl->tag_set.tags[queue_idx - 1];
91 static void nvme_loop_queue_response(struct nvmet_req *req)
93 struct nvme_loop_queue *queue =
94 container_of(req->sq, struct nvme_loop_queue, nvme_sq);
95 struct nvme_completion *cqe = req->cqe;
98 * AEN requests are special as they don't time out and can
99 * survive any kind of queue freeze and often don't respond to
100 * aborts. We don't even bother to allocate a struct request
101 * for them but rather special case them here.
103 if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue),
105 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
110 rq = nvme_find_rq(nvme_loop_tagset(queue), cqe->command_id);
112 dev_err(queue->ctrl->ctrl.device,
113 "got bad command_id %#x on queue %d\n",
114 cqe->command_id, nvme_loop_queue_idx(queue));
118 if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
119 nvme_loop_complete_rq(rq);
123 static void nvme_loop_execute_work(struct work_struct *work)
125 struct nvme_loop_iod *iod =
126 container_of(work, struct nvme_loop_iod, work);
128 iod->req.execute(&iod->req);
131 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
132 const struct blk_mq_queue_data *bd)
134 struct nvme_ns *ns = hctx->queue->queuedata;
135 struct nvme_loop_queue *queue = hctx->driver_data;
136 struct request *req = bd->rq;
137 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
138 bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
141 if (!nvme_check_ready(&queue->ctrl->ctrl, req, queue_ready))
142 return nvme_fail_nonready_command(&queue->ctrl->ctrl, req);
144 ret = nvme_setup_cmd(ns, req);
148 blk_mq_start_request(req);
149 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
150 iod->req.port = queue->ctrl->port;
151 if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
152 &queue->nvme_sq, &nvme_loop_ops))
155 if (blk_rq_nr_phys_segments(req)) {
156 iod->sg_table.sgl = iod->first_sgl;
157 if (sg_alloc_table_chained(&iod->sg_table,
158 blk_rq_nr_phys_segments(req),
159 iod->sg_table.sgl, NVME_INLINE_SG_CNT)) {
160 nvme_cleanup_cmd(req);
161 return BLK_STS_RESOURCE;
164 iod->req.sg = iod->sg_table.sgl;
165 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
166 iod->req.transfer_len = blk_rq_payload_bytes(req);
169 queue_work(nvmet_wq, &iod->work);
173 static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
175 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
176 struct nvme_loop_queue *queue = &ctrl->queues[0];
177 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
179 memset(&iod->cmd, 0, sizeof(iod->cmd));
180 iod->cmd.common.opcode = nvme_admin_async_event;
181 iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
182 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
184 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
186 dev_err(ctrl->ctrl.device, "failed async event work\n");
190 queue_work(nvmet_wq, &iod->work);
193 static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
194 struct nvme_loop_iod *iod, unsigned int queue_idx)
196 iod->req.cmd = &iod->cmd;
197 iod->req.cqe = &iod->cqe;
198 iod->queue = &ctrl->queues[queue_idx];
199 INIT_WORK(&iod->work, nvme_loop_execute_work);
203 static int nvme_loop_init_request(struct blk_mq_tag_set *set,
204 struct request *req, unsigned int hctx_idx,
205 unsigned int numa_node)
207 struct nvme_loop_ctrl *ctrl = set->driver_data;
208 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
210 nvme_req(req)->ctrl = &ctrl->ctrl;
211 nvme_req(req)->cmd = &iod->cmd;
212 return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
213 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
216 static struct lock_class_key loop_hctx_fq_lock_key;
218 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
219 unsigned int hctx_idx)
221 struct nvme_loop_ctrl *ctrl = data;
222 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
224 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
227 * flush_end_io() can be called recursively for us, so use our own
228 * lock class key for avoiding lockdep possible recursive locking,
229 * then we can remove the dynamically allocated lock class for each
230 * flush queue, that way may cause horrible boot delay.
232 blk_mq_hctx_set_fq_lock_class(hctx, &loop_hctx_fq_lock_key);
234 hctx->driver_data = queue;
238 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
239 unsigned int hctx_idx)
241 struct nvme_loop_ctrl *ctrl = data;
242 struct nvme_loop_queue *queue = &ctrl->queues[0];
244 BUG_ON(hctx_idx != 0);
246 hctx->driver_data = queue;
250 static const struct blk_mq_ops nvme_loop_mq_ops = {
251 .queue_rq = nvme_loop_queue_rq,
252 .complete = nvme_loop_complete_rq,
253 .init_request = nvme_loop_init_request,
254 .init_hctx = nvme_loop_init_hctx,
257 static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
258 .queue_rq = nvme_loop_queue_rq,
259 .complete = nvme_loop_complete_rq,
260 .init_request = nvme_loop_init_request,
261 .init_hctx = nvme_loop_init_admin_hctx,
264 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
266 if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
268 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
269 blk_mq_destroy_queue(ctrl->ctrl.admin_q);
270 blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
271 blk_mq_free_tag_set(&ctrl->admin_tag_set);
274 static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
276 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
278 if (list_empty(&ctrl->list))
281 mutex_lock(&nvme_loop_ctrl_mutex);
282 list_del(&ctrl->list);
283 mutex_unlock(&nvme_loop_ctrl_mutex);
286 blk_mq_destroy_queue(ctrl->ctrl.connect_q);
287 blk_mq_free_tag_set(&ctrl->tag_set);
290 nvmf_free_options(nctrl->opts);
295 static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
299 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
300 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
301 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
303 ctrl->ctrl.queue_count = 1;
306 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
308 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
309 unsigned int nr_io_queues;
312 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
313 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
314 if (ret || !nr_io_queues)
317 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
319 for (i = 1; i <= nr_io_queues; i++) {
320 ctrl->queues[i].ctrl = ctrl;
321 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
323 goto out_destroy_queues;
325 ctrl->ctrl.queue_count++;
331 nvme_loop_destroy_io_queues(ctrl);
335 static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
339 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
340 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
343 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
349 static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
353 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
354 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
355 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
356 ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS;
357 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
358 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
359 NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
360 ctrl->admin_tag_set.driver_data = ctrl;
361 ctrl->admin_tag_set.nr_hw_queues = 1;
362 ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
363 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
365 ctrl->queues[0].ctrl = ctrl;
366 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
369 ctrl->ctrl.queue_count = 1;
371 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
374 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
376 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
377 if (IS_ERR(ctrl->ctrl.fabrics_q)) {
378 error = PTR_ERR(ctrl->ctrl.fabrics_q);
379 goto out_free_tagset;
382 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
383 if (IS_ERR(ctrl->ctrl.admin_q)) {
384 error = PTR_ERR(ctrl->ctrl.admin_q);
385 goto out_cleanup_fabrics_q;
387 /* reset stopped state for the fresh admin queue */
388 clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags);
390 error = nvmf_connect_admin_queue(&ctrl->ctrl);
392 goto out_cleanup_queue;
394 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
396 error = nvme_enable_ctrl(&ctrl->ctrl);
398 goto out_cleanup_queue;
400 ctrl->ctrl.max_hw_sectors =
401 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
403 nvme_start_admin_queue(&ctrl->ctrl);
405 error = nvme_init_ctrl_finish(&ctrl->ctrl);
407 goto out_cleanup_queue;
412 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
413 blk_mq_destroy_queue(ctrl->ctrl.admin_q);
414 out_cleanup_fabrics_q:
415 blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
417 blk_mq_free_tag_set(&ctrl->admin_tag_set);
419 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
423 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
425 if (ctrl->ctrl.queue_count > 1) {
426 nvme_stop_queues(&ctrl->ctrl);
427 nvme_cancel_tagset(&ctrl->ctrl);
428 nvme_loop_destroy_io_queues(ctrl);
431 nvme_stop_admin_queue(&ctrl->ctrl);
432 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
433 nvme_shutdown_ctrl(&ctrl->ctrl);
435 nvme_cancel_admin_tagset(&ctrl->ctrl);
436 nvme_loop_destroy_admin_queue(ctrl);
439 static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
441 nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
444 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
446 struct nvme_loop_ctrl *ctrl;
448 mutex_lock(&nvme_loop_ctrl_mutex);
449 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
450 if (ctrl->ctrl.cntlid == nctrl->cntlid)
451 nvme_delete_ctrl(&ctrl->ctrl);
453 mutex_unlock(&nvme_loop_ctrl_mutex);
456 static void nvme_loop_reset_ctrl_work(struct work_struct *work)
458 struct nvme_loop_ctrl *ctrl =
459 container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
462 nvme_stop_ctrl(&ctrl->ctrl);
463 nvme_loop_shutdown_ctrl(ctrl);
465 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
466 if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
467 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
468 /* state change failure for non-deleted ctrl? */
473 ret = nvme_loop_configure_admin_queue(ctrl);
477 ret = nvme_loop_init_io_queues(ctrl);
479 goto out_destroy_admin;
481 ret = nvme_loop_connect_io_queues(ctrl);
485 blk_mq_update_nr_hw_queues(&ctrl->tag_set,
486 ctrl->ctrl.queue_count - 1);
488 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
491 nvme_start_ctrl(&ctrl->ctrl);
496 nvme_loop_destroy_io_queues(ctrl);
498 nvme_loop_destroy_admin_queue(ctrl);
500 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
501 nvme_uninit_ctrl(&ctrl->ctrl);
504 static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
506 .module = THIS_MODULE,
507 .flags = NVME_F_FABRICS,
508 .reg_read32 = nvmf_reg_read32,
509 .reg_read64 = nvmf_reg_read64,
510 .reg_write32 = nvmf_reg_write32,
511 .free_ctrl = nvme_loop_free_ctrl,
512 .submit_async_event = nvme_loop_submit_async_event,
513 .delete_ctrl = nvme_loop_delete_ctrl_host,
514 .get_address = nvmf_get_address,
517 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
521 ret = nvme_loop_init_io_queues(ctrl);
525 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
526 ctrl->tag_set.ops = &nvme_loop_mq_ops;
527 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
528 ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS;
529 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
530 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
531 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
532 NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
533 ctrl->tag_set.driver_data = ctrl;
534 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
535 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
536 ctrl->ctrl.tagset = &ctrl->tag_set;
538 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
540 goto out_destroy_queues;
542 ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
544 goto out_free_tagset;
546 ret = nvme_loop_connect_io_queues(ctrl);
548 goto out_cleanup_connect_q;
552 out_cleanup_connect_q:
553 blk_mq_destroy_queue(ctrl->ctrl.connect_q);
555 blk_mq_free_tag_set(&ctrl->tag_set);
557 nvme_loop_destroy_io_queues(ctrl);
561 static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
563 struct nvmet_port *p, *found = NULL;
565 mutex_lock(&nvme_loop_ports_mutex);
566 list_for_each_entry(p, &nvme_loop_ports, entry) {
567 /* if no transport address is specified use the first port */
568 if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
569 strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
574 mutex_unlock(&nvme_loop_ports_mutex);
578 static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
579 struct nvmf_ctrl_options *opts)
581 struct nvme_loop_ctrl *ctrl;
584 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
586 return ERR_PTR(-ENOMEM);
587 ctrl->ctrl.opts = opts;
588 INIT_LIST_HEAD(&ctrl->list);
590 INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
592 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
593 0 /* no quirks, we're perfect! */);
599 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
604 ctrl->ctrl.sqsize = opts->queue_size - 1;
605 ctrl->ctrl.kato = opts->kato;
606 ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
608 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
611 goto out_uninit_ctrl;
613 ret = nvme_loop_configure_admin_queue(ctrl);
615 goto out_free_queues;
617 if (opts->queue_size > ctrl->ctrl.maxcmd) {
618 /* warn if maxcmd is lower than queue_size */
619 dev_warn(ctrl->ctrl.device,
620 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
621 opts->queue_size, ctrl->ctrl.maxcmd);
622 opts->queue_size = ctrl->ctrl.maxcmd;
625 if (opts->nr_io_queues) {
626 ret = nvme_loop_create_io_queues(ctrl);
628 goto out_remove_admin_queue;
631 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
633 dev_info(ctrl->ctrl.device,
634 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
636 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
639 mutex_lock(&nvme_loop_ctrl_mutex);
640 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
641 mutex_unlock(&nvme_loop_ctrl_mutex);
643 nvme_start_ctrl(&ctrl->ctrl);
647 out_remove_admin_queue:
648 nvme_loop_destroy_admin_queue(ctrl);
652 nvme_uninit_ctrl(&ctrl->ctrl);
653 nvme_put_ctrl(&ctrl->ctrl);
660 static int nvme_loop_add_port(struct nvmet_port *port)
662 mutex_lock(&nvme_loop_ports_mutex);
663 list_add_tail(&port->entry, &nvme_loop_ports);
664 mutex_unlock(&nvme_loop_ports_mutex);
668 static void nvme_loop_remove_port(struct nvmet_port *port)
670 mutex_lock(&nvme_loop_ports_mutex);
671 list_del_init(&port->entry);
672 mutex_unlock(&nvme_loop_ports_mutex);
675 * Ensure any ctrls that are in the process of being
676 * deleted are in fact deleted before we return
677 * and free the port. This is to prevent active
678 * ctrls from using a port after it's freed.
680 flush_workqueue(nvme_delete_wq);
683 static const struct nvmet_fabrics_ops nvme_loop_ops = {
684 .owner = THIS_MODULE,
685 .type = NVMF_TRTYPE_LOOP,
686 .add_port = nvme_loop_add_port,
687 .remove_port = nvme_loop_remove_port,
688 .queue_response = nvme_loop_queue_response,
689 .delete_ctrl = nvme_loop_delete_ctrl,
692 static struct nvmf_transport_ops nvme_loop_transport = {
694 .module = THIS_MODULE,
695 .create_ctrl = nvme_loop_create_ctrl,
696 .allowed_opts = NVMF_OPT_TRADDR,
699 static int __init nvme_loop_init_module(void)
703 ret = nvmet_register_transport(&nvme_loop_ops);
707 ret = nvmf_register_transport(&nvme_loop_transport);
709 nvmet_unregister_transport(&nvme_loop_ops);
714 static void __exit nvme_loop_cleanup_module(void)
716 struct nvme_loop_ctrl *ctrl, *next;
718 nvmf_unregister_transport(&nvme_loop_transport);
719 nvmet_unregister_transport(&nvme_loop_ops);
721 mutex_lock(&nvme_loop_ctrl_mutex);
722 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
723 nvme_delete_ctrl(&ctrl->ctrl);
724 mutex_unlock(&nvme_loop_ctrl_mutex);
726 flush_workqueue(nvme_delete_wq);
729 module_init(nvme_loop_init_module);
730 module_exit(nvme_loop_cleanup_module);
732 MODULE_LICENSE("GPL v2");
733 MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */