Merge branch 'for-jens' of git://git.infradead.org/nvme into for-linus
[linux.git] / drivers / nvme / host / pci.c
1 /*
2  * NVM Express device driver
3  * Copyright (c) 2011-2014, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14
15 #include <linux/aer.h>
16 #include <linux/blkdev.h>
17 #include <linux/blk-mq.h>
18 #include <linux/blk-mq-pci.h>
19 #include <linux/dmi.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/io.h>
23 #include <linux/mm.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/once.h>
27 #include <linux/pci.h>
28 #include <linux/t10-pi.h>
29 #include <linux/types.h>
30 #include <linux/io-64-nonatomic-lo-hi.h>
31 #include <linux/sed-opal.h>
32
33 #include "nvme.h"
34
35 #define SQ_SIZE(depth)          (depth * sizeof(struct nvme_command))
36 #define CQ_SIZE(depth)          (depth * sizeof(struct nvme_completion))
37
38 #define SGES_PER_PAGE   (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
39
40 static int use_threaded_interrupts;
41 module_param(use_threaded_interrupts, int, 0);
42
43 static bool use_cmb_sqes = true;
44 module_param(use_cmb_sqes, bool, 0644);
45 MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
46
47 static unsigned int max_host_mem_size_mb = 128;
48 module_param(max_host_mem_size_mb, uint, 0444);
49 MODULE_PARM_DESC(max_host_mem_size_mb,
50         "Maximum Host Memory Buffer (HMB) size per controller (in MiB)");
51
52 static unsigned int sgl_threshold = SZ_32K;
53 module_param(sgl_threshold, uint, 0644);
54 MODULE_PARM_DESC(sgl_threshold,
55                 "Use SGLs when average request segment size is larger or equal to "
56                 "this size. Use 0 to disable SGLs.");
57
58 static int io_queue_depth_set(const char *val, const struct kernel_param *kp);
59 static const struct kernel_param_ops io_queue_depth_ops = {
60         .set = io_queue_depth_set,
61         .get = param_get_int,
62 };
63
64 static int io_queue_depth = 1024;
65 module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
66 MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
67
68 struct nvme_dev;
69 struct nvme_queue;
70
71 static void nvme_process_cq(struct nvme_queue *nvmeq);
72 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
73
74 /*
75  * Represents an NVM Express device.  Each nvme_dev is a PCI function.
76  */
77 struct nvme_dev {
78         struct nvme_queue *queues;
79         struct blk_mq_tag_set tagset;
80         struct blk_mq_tag_set admin_tagset;
81         u32 __iomem *dbs;
82         struct device *dev;
83         struct dma_pool *prp_page_pool;
84         struct dma_pool *prp_small_pool;
85         unsigned online_queues;
86         unsigned max_qid;
87         int q_depth;
88         u32 db_stride;
89         void __iomem *bar;
90         unsigned long bar_mapped_size;
91         struct work_struct remove_work;
92         struct mutex shutdown_lock;
93         bool subsystem;
94         void __iomem *cmb;
95         pci_bus_addr_t cmb_bus_addr;
96         u64 cmb_size;
97         u32 cmbsz;
98         u32 cmbloc;
99         struct nvme_ctrl ctrl;
100         struct completion ioq_wait;
101
102         /* shadow doorbell buffer support: */
103         u32 *dbbuf_dbs;
104         dma_addr_t dbbuf_dbs_dma_addr;
105         u32 *dbbuf_eis;
106         dma_addr_t dbbuf_eis_dma_addr;
107
108         /* host memory buffer support: */
109         u64 host_mem_size;
110         u32 nr_host_mem_descs;
111         dma_addr_t host_mem_descs_dma;
112         struct nvme_host_mem_buf_desc *host_mem_descs;
113         void **host_mem_desc_bufs;
114 };
115
116 static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
117 {
118         int n = 0, ret;
119
120         ret = kstrtoint(val, 10, &n);
121         if (ret != 0 || n < 2)
122                 return -EINVAL;
123
124         return param_set_int(val, kp);
125 }
126
127 static inline unsigned int sq_idx(unsigned int qid, u32 stride)
128 {
129         return qid * 2 * stride;
130 }
131
132 static inline unsigned int cq_idx(unsigned int qid, u32 stride)
133 {
134         return (qid * 2 + 1) * stride;
135 }
136
137 static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
138 {
139         return container_of(ctrl, struct nvme_dev, ctrl);
140 }
141
142 /*
143  * An NVM Express queue.  Each device has at least two (one for admin
144  * commands and one for I/O commands).
145  */
146 struct nvme_queue {
147         struct device *q_dmadev;
148         struct nvme_dev *dev;
149         spinlock_t q_lock;
150         struct nvme_command *sq_cmds;
151         struct nvme_command __iomem *sq_cmds_io;
152         volatile struct nvme_completion *cqes;
153         struct blk_mq_tags **tags;
154         dma_addr_t sq_dma_addr;
155         dma_addr_t cq_dma_addr;
156         u32 __iomem *q_db;
157         u16 q_depth;
158         s16 cq_vector;
159         u16 sq_tail;
160         u16 cq_head;
161         u16 qid;
162         u8 cq_phase;
163         u8 cqe_seen;
164         u32 *dbbuf_sq_db;
165         u32 *dbbuf_cq_db;
166         u32 *dbbuf_sq_ei;
167         u32 *dbbuf_cq_ei;
168 };
169
170 /*
171  * The nvme_iod describes the data in an I/O, including the list of PRP
172  * entries.  You can't see it in this data structure because C doesn't let
173  * me express that.  Use nvme_init_iod to ensure there's enough space
174  * allocated to store the PRP list.
175  */
176 struct nvme_iod {
177         struct nvme_request req;
178         struct nvme_queue *nvmeq;
179         bool use_sgl;
180         int aborted;
181         int npages;             /* In the PRP list. 0 means small pool in use */
182         int nents;              /* Used in scatterlist */
183         int length;             /* Of data, in bytes */
184         dma_addr_t first_dma;
185         struct scatterlist meta_sg; /* metadata requires single contiguous buffer */
186         struct scatterlist *sg;
187         struct scatterlist inline_sg[0];
188 };
189
190 /*
191  * Check we didin't inadvertently grow the command struct
192  */
193 static inline void _nvme_check_size(void)
194 {
195         BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
196         BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
197         BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
198         BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
199         BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
200         BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
201         BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
202         BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
203         BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
204         BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
205         BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
206         BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
207         BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
208 }
209
210 static inline unsigned int nvme_dbbuf_size(u32 stride)
211 {
212         return ((num_possible_cpus() + 1) * 8 * stride);
213 }
214
215 static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
216 {
217         unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
218
219         if (dev->dbbuf_dbs)
220                 return 0;
221
222         dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size,
223                                             &dev->dbbuf_dbs_dma_addr,
224                                             GFP_KERNEL);
225         if (!dev->dbbuf_dbs)
226                 return -ENOMEM;
227         dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size,
228                                             &dev->dbbuf_eis_dma_addr,
229                                             GFP_KERNEL);
230         if (!dev->dbbuf_eis) {
231                 dma_free_coherent(dev->dev, mem_size,
232                                   dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
233                 dev->dbbuf_dbs = NULL;
234                 return -ENOMEM;
235         }
236
237         return 0;
238 }
239
240 static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
241 {
242         unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
243
244         if (dev->dbbuf_dbs) {
245                 dma_free_coherent(dev->dev, mem_size,
246                                   dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
247                 dev->dbbuf_dbs = NULL;
248         }
249         if (dev->dbbuf_eis) {
250                 dma_free_coherent(dev->dev, mem_size,
251                                   dev->dbbuf_eis, dev->dbbuf_eis_dma_addr);
252                 dev->dbbuf_eis = NULL;
253         }
254 }
255
256 static void nvme_dbbuf_init(struct nvme_dev *dev,
257                             struct nvme_queue *nvmeq, int qid)
258 {
259         if (!dev->dbbuf_dbs || !qid)
260                 return;
261
262         nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)];
263         nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)];
264         nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)];
265         nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
266 }
267
268 static void nvme_dbbuf_set(struct nvme_dev *dev)
269 {
270         struct nvme_command c;
271
272         if (!dev->dbbuf_dbs)
273                 return;
274
275         memset(&c, 0, sizeof(c));
276         c.dbbuf.opcode = nvme_admin_dbbuf;
277         c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr);
278         c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);
279
280         if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
281                 dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
282                 /* Free memory and continue on */
283                 nvme_dbbuf_dma_free(dev);
284         }
285 }
286
287 static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
288 {
289         return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old);
290 }
291
292 /* Update dbbuf and return true if an MMIO is required */
293 static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
294                                               volatile u32 *dbbuf_ei)
295 {
296         if (dbbuf_db) {
297                 u16 old_value;
298
299                 /*
300                  * Ensure that the queue is written before updating
301                  * the doorbell in memory
302                  */
303                 wmb();
304
305                 old_value = *dbbuf_db;
306                 *dbbuf_db = value;
307
308                 if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
309                         return false;
310         }
311
312         return true;
313 }
314
315 /*
316  * Max size of iod being embedded in the request payload
317  */
318 #define NVME_INT_PAGES          2
319 #define NVME_INT_BYTES(dev)     (NVME_INT_PAGES * (dev)->ctrl.page_size)
320
321 /*
322  * Will slightly overestimate the number of pages needed.  This is OK
323  * as it only leads to a small amount of wasted memory for the lifetime of
324  * the I/O.
325  */
326 static int nvme_npages(unsigned size, struct nvme_dev *dev)
327 {
328         unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size,
329                                       dev->ctrl.page_size);
330         return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
331 }
332
333 /*
334  * Calculates the number of pages needed for the SGL segments. For example a 4k
335  * page can accommodate 256 SGL descriptors.
336  */
337 static int nvme_pci_npages_sgl(unsigned int num_seg)
338 {
339         return DIV_ROUND_UP(num_seg * sizeof(struct nvme_sgl_desc), PAGE_SIZE);
340 }
341
342 static unsigned int nvme_pci_iod_alloc_size(struct nvme_dev *dev,
343                 unsigned int size, unsigned int nseg, bool use_sgl)
344 {
345         size_t alloc_size;
346
347         if (use_sgl)
348                 alloc_size = sizeof(__le64 *) * nvme_pci_npages_sgl(nseg);
349         else
350                 alloc_size = sizeof(__le64 *) * nvme_npages(size, dev);
351
352         return alloc_size + sizeof(struct scatterlist) * nseg;
353 }
354
355 static unsigned int nvme_pci_cmd_size(struct nvme_dev *dev, bool use_sgl)
356 {
357         unsigned int alloc_size = nvme_pci_iod_alloc_size(dev,
358                                     NVME_INT_BYTES(dev), NVME_INT_PAGES,
359                                     use_sgl);
360
361         return sizeof(struct nvme_iod) + alloc_size;
362 }
363
364 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
365                                 unsigned int hctx_idx)
366 {
367         struct nvme_dev *dev = data;
368         struct nvme_queue *nvmeq = &dev->queues[0];
369
370         WARN_ON(hctx_idx != 0);
371         WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
372         WARN_ON(nvmeq->tags);
373
374         hctx->driver_data = nvmeq;
375         nvmeq->tags = &dev->admin_tagset.tags[0];
376         return 0;
377 }
378
379 static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
380 {
381         struct nvme_queue *nvmeq = hctx->driver_data;
382
383         nvmeq->tags = NULL;
384 }
385
386 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
387                           unsigned int hctx_idx)
388 {
389         struct nvme_dev *dev = data;
390         struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
391
392         if (!nvmeq->tags)
393                 nvmeq->tags = &dev->tagset.tags[hctx_idx];
394
395         WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
396         hctx->driver_data = nvmeq;
397         return 0;
398 }
399
400 static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
401                 unsigned int hctx_idx, unsigned int numa_node)
402 {
403         struct nvme_dev *dev = set->driver_data;
404         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
405         int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
406         struct nvme_queue *nvmeq = &dev->queues[queue_idx];
407
408         BUG_ON(!nvmeq);
409         iod->nvmeq = nvmeq;
410         return 0;
411 }
412
413 static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
414 {
415         struct nvme_dev *dev = set->driver_data;
416
417         return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev));
418 }
419
420 /**
421  * __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
422  * @nvmeq: The queue to use
423  * @cmd: The command to send
424  *
425  * Safe to use from interrupt context
426  */
427 static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
428                                                 struct nvme_command *cmd)
429 {
430         u16 tail = nvmeq->sq_tail;
431
432         if (nvmeq->sq_cmds_io)
433                 memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd));
434         else
435                 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
436
437         if (++tail == nvmeq->q_depth)
438                 tail = 0;
439         if (nvme_dbbuf_update_and_check_event(tail, nvmeq->dbbuf_sq_db,
440                                               nvmeq->dbbuf_sq_ei))
441                 writel(tail, nvmeq->q_db);
442         nvmeq->sq_tail = tail;
443 }
444
445 static void **nvme_pci_iod_list(struct request *req)
446 {
447         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
448         return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
449 }
450
451 static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
452 {
453         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
454         int nseg = blk_rq_nr_phys_segments(req);
455         unsigned int avg_seg_size;
456
457         if (nseg == 0)
458                 return false;
459
460         avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
461
462         if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
463                 return false;
464         if (!iod->nvmeq->qid)
465                 return false;
466         if (!sgl_threshold || avg_seg_size < sgl_threshold)
467                 return false;
468         return true;
469 }
470
471 static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
472 {
473         struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
474         int nseg = blk_rq_nr_phys_segments(rq);
475         unsigned int size = blk_rq_payload_bytes(rq);
476
477         iod->use_sgl = nvme_pci_use_sgls(dev, rq);
478
479         if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
480                 size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
481                                 iod->use_sgl);
482
483                 iod->sg = kmalloc(alloc_size, GFP_ATOMIC);
484                 if (!iod->sg)
485                         return BLK_STS_RESOURCE;
486         } else {
487                 iod->sg = iod->inline_sg;
488         }
489
490         iod->aborted = 0;
491         iod->npages = -1;
492         iod->nents = 0;
493         iod->length = size;
494
495         return BLK_STS_OK;
496 }
497
498 static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
499 {
500         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
501         const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1;
502         dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
503
504         int i;
505
506         if (iod->npages == 0)
507                 dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
508                         dma_addr);
509
510         for (i = 0; i < iod->npages; i++) {
511                 void *addr = nvme_pci_iod_list(req)[i];
512
513                 if (iod->use_sgl) {
514                         struct nvme_sgl_desc *sg_list = addr;
515
516                         next_dma_addr =
517                             le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
518                 } else {
519                         __le64 *prp_list = addr;
520
521                         next_dma_addr = le64_to_cpu(prp_list[last_prp]);
522                 }
523
524                 dma_pool_free(dev->prp_page_pool, addr, dma_addr);
525                 dma_addr = next_dma_addr;
526         }
527
528         if (iod->sg != iod->inline_sg)
529                 kfree(iod->sg);
530 }
531
532 #ifdef CONFIG_BLK_DEV_INTEGRITY
533 static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
534 {
535         if (be32_to_cpu(pi->ref_tag) == v)
536                 pi->ref_tag = cpu_to_be32(p);
537 }
538
539 static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
540 {
541         if (be32_to_cpu(pi->ref_tag) == p)
542                 pi->ref_tag = cpu_to_be32(v);
543 }
544
545 /**
546  * nvme_dif_remap - remaps ref tags to bip seed and physical lba
547  *
548  * The virtual start sector is the one that was originally submitted by the
549  * block layer. Due to partitioning, MD/DM cloning, etc. the actual physical
550  * start sector may be different. Remap protection information to match the
551  * physical LBA on writes, and back to the original seed on reads.
552  *
553  * Type 0 and 3 do not have a ref tag, so no remapping required.
554  */
555 static void nvme_dif_remap(struct request *req,
556                         void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
557 {
558         struct nvme_ns *ns = req->rq_disk->private_data;
559         struct bio_integrity_payload *bip;
560         struct t10_pi_tuple *pi;
561         void *p, *pmap;
562         u32 i, nlb, ts, phys, virt;
563
564         if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
565                 return;
566
567         bip = bio_integrity(req->bio);
568         if (!bip)
569                 return;
570
571         pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;
572
573         p = pmap;
574         virt = bip_get_seed(bip);
575         phys = nvme_block_nr(ns, blk_rq_pos(req));
576         nlb = (blk_rq_bytes(req) >> ns->lba_shift);
577         ts = ns->disk->queue->integrity.tuple_size;
578
579         for (i = 0; i < nlb; i++, virt++, phys++) {
580                 pi = (struct t10_pi_tuple *)p;
581                 dif_swap(phys, virt, pi);
582                 p += ts;
583         }
584         kunmap_atomic(pmap);
585 }
586 #else /* CONFIG_BLK_DEV_INTEGRITY */
587 static void nvme_dif_remap(struct request *req,
588                         void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
589 {
590 }
591 static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
592 {
593 }
594 static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
595 {
596 }
597 #endif
598
599 static void nvme_print_sgl(struct scatterlist *sgl, int nents)
600 {
601         int i;
602         struct scatterlist *sg;
603
604         for_each_sg(sgl, sg, nents, i) {
605                 dma_addr_t phys = sg_phys(sg);
606                 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
607                         "dma_address:%pad dma_length:%d\n",
608                         i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
609                         sg_dma_len(sg));
610         }
611 }
612
613 static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
614                 struct request *req, struct nvme_rw_command *cmnd)
615 {
616         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
617         struct dma_pool *pool;
618         int length = blk_rq_payload_bytes(req);
619         struct scatterlist *sg = iod->sg;
620         int dma_len = sg_dma_len(sg);
621         u64 dma_addr = sg_dma_address(sg);
622         u32 page_size = dev->ctrl.page_size;
623         int offset = dma_addr & (page_size - 1);
624         __le64 *prp_list;
625         void **list = nvme_pci_iod_list(req);
626         dma_addr_t prp_dma;
627         int nprps, i;
628
629         length -= (page_size - offset);
630         if (length <= 0) {
631                 iod->first_dma = 0;
632                 goto done;
633         }
634
635         dma_len -= (page_size - offset);
636         if (dma_len) {
637                 dma_addr += (page_size - offset);
638         } else {
639                 sg = sg_next(sg);
640                 dma_addr = sg_dma_address(sg);
641                 dma_len = sg_dma_len(sg);
642         }
643
644         if (length <= page_size) {
645                 iod->first_dma = dma_addr;
646                 goto done;
647         }
648
649         nprps = DIV_ROUND_UP(length, page_size);
650         if (nprps <= (256 / 8)) {
651                 pool = dev->prp_small_pool;
652                 iod->npages = 0;
653         } else {
654                 pool = dev->prp_page_pool;
655                 iod->npages = 1;
656         }
657
658         prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
659         if (!prp_list) {
660                 iod->first_dma = dma_addr;
661                 iod->npages = -1;
662                 return BLK_STS_RESOURCE;
663         }
664         list[0] = prp_list;
665         iod->first_dma = prp_dma;
666         i = 0;
667         for (;;) {
668                 if (i == page_size >> 3) {
669                         __le64 *old_prp_list = prp_list;
670                         prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
671                         if (!prp_list)
672                                 return BLK_STS_RESOURCE;
673                         list[iod->npages++] = prp_list;
674                         prp_list[0] = old_prp_list[i - 1];
675                         old_prp_list[i - 1] = cpu_to_le64(prp_dma);
676                         i = 1;
677                 }
678                 prp_list[i++] = cpu_to_le64(dma_addr);
679                 dma_len -= page_size;
680                 dma_addr += page_size;
681                 length -= page_size;
682                 if (length <= 0)
683                         break;
684                 if (dma_len > 0)
685                         continue;
686                 if (unlikely(dma_len < 0))
687                         goto bad_sgl;
688                 sg = sg_next(sg);
689                 dma_addr = sg_dma_address(sg);
690                 dma_len = sg_dma_len(sg);
691         }
692
693 done:
694         cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
695         cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
696
697         return BLK_STS_OK;
698
699  bad_sgl:
700         WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
701                         "Invalid SGL for payload:%d nents:%d\n",
702                         blk_rq_payload_bytes(req), iod->nents);
703         return BLK_STS_IOERR;
704 }
705
706 static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge,
707                 struct scatterlist *sg)
708 {
709         sge->addr = cpu_to_le64(sg_dma_address(sg));
710         sge->length = cpu_to_le32(sg_dma_len(sg));
711         sge->type = NVME_SGL_FMT_DATA_DESC << 4;
712 }
713
714 static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
715                 dma_addr_t dma_addr, int entries)
716 {
717         sge->addr = cpu_to_le64(dma_addr);
718         if (entries < SGES_PER_PAGE) {
719                 sge->length = cpu_to_le32(entries * sizeof(*sge));
720                 sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
721         } else {
722                 sge->length = cpu_to_le32(PAGE_SIZE);
723                 sge->type = NVME_SGL_FMT_SEG_DESC << 4;
724         }
725 }
726
727 static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
728                 struct request *req, struct nvme_rw_command *cmd, int entries)
729 {
730         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
731         struct dma_pool *pool;
732         struct nvme_sgl_desc *sg_list;
733         struct scatterlist *sg = iod->sg;
734         dma_addr_t sgl_dma;
735         int i = 0;
736
737         /* setting the transfer type as SGL */
738         cmd->flags = NVME_CMD_SGL_METABUF;
739
740         if (entries == 1) {
741                 nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg);
742                 return BLK_STS_OK;
743         }
744
745         if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
746                 pool = dev->prp_small_pool;
747                 iod->npages = 0;
748         } else {
749                 pool = dev->prp_page_pool;
750                 iod->npages = 1;
751         }
752
753         sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
754         if (!sg_list) {
755                 iod->npages = -1;
756                 return BLK_STS_RESOURCE;
757         }
758
759         nvme_pci_iod_list(req)[0] = sg_list;
760         iod->first_dma = sgl_dma;
761
762         nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries);
763
764         do {
765                 if (i == SGES_PER_PAGE) {
766                         struct nvme_sgl_desc *old_sg_desc = sg_list;
767                         struct nvme_sgl_desc *link = &old_sg_desc[i - 1];
768
769                         sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
770                         if (!sg_list)
771                                 return BLK_STS_RESOURCE;
772
773                         i = 0;
774                         nvme_pci_iod_list(req)[iod->npages++] = sg_list;
775                         sg_list[i++] = *link;
776                         nvme_pci_sgl_set_seg(link, sgl_dma, entries);
777                 }
778
779                 nvme_pci_sgl_set_data(&sg_list[i++], sg);
780                 sg = sg_next(sg);
781         } while (--entries > 0);
782
783         return BLK_STS_OK;
784 }
785
786 static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
787                 struct nvme_command *cmnd)
788 {
789         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
790         struct request_queue *q = req->q;
791         enum dma_data_direction dma_dir = rq_data_dir(req) ?
792                         DMA_TO_DEVICE : DMA_FROM_DEVICE;
793         blk_status_t ret = BLK_STS_IOERR;
794         int nr_mapped;
795
796         sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
797         iod->nents = blk_rq_map_sg(q, req, iod->sg);
798         if (!iod->nents)
799                 goto out;
800
801         ret = BLK_STS_RESOURCE;
802         nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
803                         DMA_ATTR_NO_WARN);
804         if (!nr_mapped)
805                 goto out;
806
807         if (iod->use_sgl)
808                 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
809         else
810                 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
811
812         if (ret != BLK_STS_OK)
813                 goto out_unmap;
814
815         ret = BLK_STS_IOERR;
816         if (blk_integrity_rq(req)) {
817                 if (blk_rq_count_integrity_sg(q, req->bio) != 1)
818                         goto out_unmap;
819
820                 sg_init_table(&iod->meta_sg, 1);
821                 if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
822                         goto out_unmap;
823
824                 if (req_op(req) == REQ_OP_WRITE)
825                         nvme_dif_remap(req, nvme_dif_prep);
826
827                 if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
828                         goto out_unmap;
829         }
830
831         if (blk_integrity_rq(req))
832                 cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
833         return BLK_STS_OK;
834
835 out_unmap:
836         dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
837 out:
838         return ret;
839 }
840
841 static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
842 {
843         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
844         enum dma_data_direction dma_dir = rq_data_dir(req) ?
845                         DMA_TO_DEVICE : DMA_FROM_DEVICE;
846
847         if (iod->nents) {
848                 dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
849                 if (blk_integrity_rq(req)) {
850                         if (req_op(req) == REQ_OP_READ)
851                                 nvme_dif_remap(req, nvme_dif_complete);
852                         dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
853                 }
854         }
855
856         nvme_cleanup_cmd(req);
857         nvme_free_iod(dev, req);
858 }
859
860 /*
861  * NOTE: ns is NULL when called on the admin queue.
862  */
863 static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
864                          const struct blk_mq_queue_data *bd)
865 {
866         struct nvme_ns *ns = hctx->queue->queuedata;
867         struct nvme_queue *nvmeq = hctx->driver_data;
868         struct nvme_dev *dev = nvmeq->dev;
869         struct request *req = bd->rq;
870         struct nvme_command cmnd;
871         blk_status_t ret;
872
873         ret = nvme_setup_cmd(ns, req, &cmnd);
874         if (ret)
875                 return ret;
876
877         ret = nvme_init_iod(req, dev);
878         if (ret)
879                 goto out_free_cmd;
880
881         if (blk_rq_nr_phys_segments(req)) {
882                 ret = nvme_map_data(dev, req, &cmnd);
883                 if (ret)
884                         goto out_cleanup_iod;
885         }
886
887         blk_mq_start_request(req);
888
889         spin_lock_irq(&nvmeq->q_lock);
890         if (unlikely(nvmeq->cq_vector < 0)) {
891                 ret = BLK_STS_IOERR;
892                 spin_unlock_irq(&nvmeq->q_lock);
893                 goto out_cleanup_iod;
894         }
895         __nvme_submit_cmd(nvmeq, &cmnd);
896         nvme_process_cq(nvmeq);
897         spin_unlock_irq(&nvmeq->q_lock);
898         return BLK_STS_OK;
899 out_cleanup_iod:
900         nvme_free_iod(dev, req);
901 out_free_cmd:
902         nvme_cleanup_cmd(req);
903         return ret;
904 }
905
906 static void nvme_pci_complete_rq(struct request *req)
907 {
908         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
909
910         nvme_unmap_data(iod->nvmeq->dev, req);
911         nvme_complete_rq(req);
912 }
913
914 /* We read the CQE phase first to check if the rest of the entry is valid */
915 static inline bool nvme_cqe_valid(struct nvme_queue *nvmeq, u16 head,
916                 u16 phase)
917 {
918         return (le16_to_cpu(nvmeq->cqes[head].status) & 1) == phase;
919 }
920
921 static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
922 {
923         u16 head = nvmeq->cq_head;
924
925         if (likely(nvmeq->cq_vector >= 0)) {
926                 if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
927                                                       nvmeq->dbbuf_cq_ei))
928                         writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
929         }
930 }
931
932 static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
933                 struct nvme_completion *cqe)
934 {
935         struct request *req;
936
937         if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
938                 dev_warn(nvmeq->dev->ctrl.device,
939                         "invalid id %d completed on queue %d\n",
940                         cqe->command_id, le16_to_cpu(cqe->sq_id));
941                 return;
942         }
943
944         /*
945          * AEN requests are special as they don't time out and can
946          * survive any kind of queue freeze and often don't respond to
947          * aborts.  We don't even bother to allocate a struct request
948          * for them but rather special case them here.
949          */
950         if (unlikely(nvmeq->qid == 0 &&
951                         cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
952                 nvme_complete_async_event(&nvmeq->dev->ctrl,
953                                 cqe->status, &cqe->result);
954                 return;
955         }
956
957         nvmeq->cqe_seen = 1;
958         req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
959         nvme_end_request(req, cqe->status, cqe->result);
960 }
961
962 static inline bool nvme_read_cqe(struct nvme_queue *nvmeq,
963                 struct nvme_completion *cqe)
964 {
965         if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) {
966                 *cqe = nvmeq->cqes[nvmeq->cq_head];
967
968                 if (++nvmeq->cq_head == nvmeq->q_depth) {
969                         nvmeq->cq_head = 0;
970                         nvmeq->cq_phase = !nvmeq->cq_phase;
971                 }
972                 return true;
973         }
974         return false;
975 }
976
977 static void nvme_process_cq(struct nvme_queue *nvmeq)
978 {
979         struct nvme_completion cqe;
980         int consumed = 0;
981
982         while (nvme_read_cqe(nvmeq, &cqe)) {
983                 nvme_handle_cqe(nvmeq, &cqe);
984                 consumed++;
985         }
986
987         if (consumed)
988                 nvme_ring_cq_doorbell(nvmeq);
989 }
990
991 static irqreturn_t nvme_irq(int irq, void *data)
992 {
993         irqreturn_t result;
994         struct nvme_queue *nvmeq = data;
995         spin_lock(&nvmeq->q_lock);
996         nvme_process_cq(nvmeq);
997         result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE;
998         nvmeq->cqe_seen = 0;
999         spin_unlock(&nvmeq->q_lock);
1000         return result;
1001 }
1002
1003 static irqreturn_t nvme_irq_check(int irq, void *data)
1004 {
1005         struct nvme_queue *nvmeq = data;
1006         if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
1007                 return IRQ_WAKE_THREAD;
1008         return IRQ_NONE;
1009 }
1010
1011 static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
1012 {
1013         struct nvme_completion cqe;
1014         int found = 0, consumed = 0;
1015
1016         if (!nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
1017                 return 0;
1018
1019         spin_lock_irq(&nvmeq->q_lock);
1020         while (nvme_read_cqe(nvmeq, &cqe)) {
1021                 nvme_handle_cqe(nvmeq, &cqe);
1022                 consumed++;
1023
1024                 if (tag == cqe.command_id) {
1025                         found = 1;
1026                         break;
1027                 }
1028        }
1029
1030         if (consumed)
1031                 nvme_ring_cq_doorbell(nvmeq);
1032         spin_unlock_irq(&nvmeq->q_lock);
1033
1034         return found;
1035 }
1036
1037 static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
1038 {
1039         struct nvme_queue *nvmeq = hctx->driver_data;
1040
1041         return __nvme_poll(nvmeq, tag);
1042 }
1043
1044 static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
1045 {
1046         struct nvme_dev *dev = to_nvme_dev(ctrl);
1047         struct nvme_queue *nvmeq = &dev->queues[0];
1048         struct nvme_command c;
1049
1050         memset(&c, 0, sizeof(c));
1051         c.common.opcode = nvme_admin_async_event;
1052         c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1053
1054         spin_lock_irq(&nvmeq->q_lock);
1055         __nvme_submit_cmd(nvmeq, &c);
1056         spin_unlock_irq(&nvmeq->q_lock);
1057 }
1058
1059 static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
1060 {
1061         struct nvme_command c;
1062
1063         memset(&c, 0, sizeof(c));
1064         c.delete_queue.opcode = opcode;
1065         c.delete_queue.qid = cpu_to_le16(id);
1066
1067         return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1068 }
1069
1070 static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
1071                                                 struct nvme_queue *nvmeq)
1072 {
1073         struct nvme_command c;
1074         int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
1075
1076         /*
1077          * Note: we (ab)use the fact that the prp fields survive if no data
1078          * is attached to the request.
1079          */
1080         memset(&c, 0, sizeof(c));
1081         c.create_cq.opcode = nvme_admin_create_cq;
1082         c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
1083         c.create_cq.cqid = cpu_to_le16(qid);
1084         c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1085         c.create_cq.cq_flags = cpu_to_le16(flags);
1086         c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
1087
1088         return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1089 }
1090
1091 static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
1092                                                 struct nvme_queue *nvmeq)
1093 {
1094         struct nvme_command c;
1095         int flags = NVME_QUEUE_PHYS_CONTIG;
1096
1097         /*
1098          * Note: we (ab)use the fact that the prp fields survive if no data
1099          * is attached to the request.
1100          */
1101         memset(&c, 0, sizeof(c));
1102         c.create_sq.opcode = nvme_admin_create_sq;
1103         c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
1104         c.create_sq.sqid = cpu_to_le16(qid);
1105         c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1106         c.create_sq.sq_flags = cpu_to_le16(flags);
1107         c.create_sq.cqid = cpu_to_le16(qid);
1108
1109         return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1110 }
1111
1112 static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
1113 {
1114         return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
1115 }
1116
1117 static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
1118 {
1119         return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
1120 }
1121
1122 static void abort_endio(struct request *req, blk_status_t error)
1123 {
1124         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1125         struct nvme_queue *nvmeq = iod->nvmeq;
1126
1127         dev_warn(nvmeq->dev->ctrl.device,
1128                  "Abort status: 0x%x", nvme_req(req)->status);
1129         atomic_inc(&nvmeq->dev->ctrl.abort_limit);
1130         blk_mq_free_request(req);
1131 }
1132
1133 static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
1134 {
1135
1136         /* If true, indicates loss of adapter communication, possibly by a
1137          * NVMe Subsystem reset.
1138          */
1139         bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
1140
1141         /* If there is a reset/reinit ongoing, we shouldn't reset again. */
1142         switch (dev->ctrl.state) {
1143         case NVME_CTRL_RESETTING:
1144         case NVME_CTRL_CONNECTING:
1145                 return false;
1146         default:
1147                 break;
1148         }
1149
1150         /* We shouldn't reset unless the controller is on fatal error state
1151          * _or_ if we lost the communication with it.
1152          */
1153         if (!(csts & NVME_CSTS_CFS) && !nssro)
1154                 return false;
1155
1156         /* If PCI error recovery process is happening, we cannot reset or
1157          * the recovery mechanism will surely fail.
1158          */
1159         if (pci_channel_offline(to_pci_dev(dev->dev)))
1160                 return false;
1161
1162         return true;
1163 }
1164
1165 static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
1166 {
1167         /* Read a config register to help see what died. */
1168         u16 pci_status;
1169         int result;
1170
1171         result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
1172                                       &pci_status);
1173         if (result == PCIBIOS_SUCCESSFUL)
1174                 dev_warn(dev->ctrl.device,
1175                          "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
1176                          csts, pci_status);
1177         else
1178                 dev_warn(dev->ctrl.device,
1179                          "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
1180                          csts, result);
1181 }
1182
1183 static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1184 {
1185         struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1186         struct nvme_queue *nvmeq = iod->nvmeq;
1187         struct nvme_dev *dev = nvmeq->dev;
1188         struct request *abort_req;
1189         struct nvme_command cmd;
1190         u32 csts = readl(dev->bar + NVME_REG_CSTS);
1191
1192         /*
1193          * Reset immediately if the controller is failed
1194          */
1195         if (nvme_should_reset(dev, csts)) {
1196                 nvme_warn_reset(dev, csts);
1197                 nvme_dev_disable(dev, false);
1198                 nvme_reset_ctrl(&dev->ctrl);
1199                 return BLK_EH_HANDLED;
1200         }
1201
1202         /*
1203          * Did we miss an interrupt?
1204          */
1205         if (__nvme_poll(nvmeq, req->tag)) {
1206                 dev_warn(dev->ctrl.device,
1207                          "I/O %d QID %d timeout, completion polled\n",
1208                          req->tag, nvmeq->qid);
1209                 return BLK_EH_HANDLED;
1210         }
1211
1212         /*
1213          * Shutdown immediately if controller times out while starting. The
1214          * reset work will see the pci device disabled when it gets the forced
1215          * cancellation error. All outstanding requests are completed on
1216          * shutdown, so we return BLK_EH_HANDLED.
1217          */
1218         switch (dev->ctrl.state) {
1219         case NVME_CTRL_CONNECTING:
1220         case NVME_CTRL_RESETTING:
1221                 dev_warn(dev->ctrl.device,
1222                          "I/O %d QID %d timeout, disable controller\n",
1223                          req->tag, nvmeq->qid);
1224                 nvme_dev_disable(dev, false);
1225                 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1226                 return BLK_EH_HANDLED;
1227         default:
1228                 break;
1229         }
1230
1231         /*
1232          * Shutdown the controller immediately and schedule a reset if the
1233          * command was already aborted once before and still hasn't been
1234          * returned to the driver, or if this is the admin queue.
1235          */
1236         if (!nvmeq->qid || iod->aborted) {
1237                 dev_warn(dev->ctrl.device,
1238                          "I/O %d QID %d timeout, reset controller\n",
1239                          req->tag, nvmeq->qid);
1240                 nvme_dev_disable(dev, false);
1241                 nvme_reset_ctrl(&dev->ctrl);
1242
1243                 /*
1244                  * Mark the request as handled, since the inline shutdown
1245                  * forces all outstanding requests to complete.
1246                  */
1247                 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1248                 return BLK_EH_HANDLED;
1249         }
1250
1251         if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
1252                 atomic_inc(&dev->ctrl.abort_limit);
1253                 return BLK_EH_RESET_TIMER;
1254         }
1255         iod->aborted = 1;
1256
1257         memset(&cmd, 0, sizeof(cmd));
1258         cmd.abort.opcode = nvme_admin_abort_cmd;
1259         cmd.abort.cid = req->tag;
1260         cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
1261
1262         dev_warn(nvmeq->dev->ctrl.device,
1263                 "I/O %d QID %d timeout, aborting\n",
1264                  req->tag, nvmeq->qid);
1265
1266         abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
1267                         BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
1268         if (IS_ERR(abort_req)) {
1269                 atomic_inc(&dev->ctrl.abort_limit);
1270                 return BLK_EH_RESET_TIMER;
1271         }
1272
1273         abort_req->timeout = ADMIN_TIMEOUT;
1274         abort_req->end_io_data = NULL;
1275         blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
1276
1277         /*
1278          * The aborted req will be completed on receiving the abort req.
1279          * We enable the timer again. If hit twice, it'll cause a device reset,
1280          * as the device then is in a faulty state.
1281          */
1282         return BLK_EH_RESET_TIMER;
1283 }
1284
1285 static void nvme_free_queue(struct nvme_queue *nvmeq)
1286 {
1287         dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
1288                                 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1289         if (nvmeq->sq_cmds)
1290                 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
1291                                         nvmeq->sq_cmds, nvmeq->sq_dma_addr);
1292 }
1293
1294 static void nvme_free_queues(struct nvme_dev *dev, int lowest)
1295 {
1296         int i;
1297
1298         for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
1299                 dev->ctrl.queue_count--;
1300                 nvme_free_queue(&dev->queues[i]);
1301         }
1302 }
1303
1304 /**
1305  * nvme_suspend_queue - put queue into suspended state
1306  * @nvmeq - queue to suspend
1307  */
1308 static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1309 {
1310         int vector;
1311
1312         spin_lock_irq(&nvmeq->q_lock);
1313         if (nvmeq->cq_vector == -1) {
1314                 spin_unlock_irq(&nvmeq->q_lock);
1315                 return 1;
1316         }
1317         vector = nvmeq->cq_vector;
1318         nvmeq->dev->online_queues--;
1319         nvmeq->cq_vector = -1;
1320         spin_unlock_irq(&nvmeq->q_lock);
1321
1322         if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
1323                 blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q);
1324
1325         pci_free_irq(to_pci_dev(nvmeq->dev->dev), vector, nvmeq);
1326
1327         return 0;
1328 }
1329
1330 static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
1331 {
1332         struct nvme_queue *nvmeq = &dev->queues[0];
1333
1334         if (shutdown)
1335                 nvme_shutdown_ctrl(&dev->ctrl);
1336         else
1337                 nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
1338
1339         spin_lock_irq(&nvmeq->q_lock);
1340         nvme_process_cq(nvmeq);
1341         spin_unlock_irq(&nvmeq->q_lock);
1342 }
1343
1344 static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
1345                                 int entry_size)
1346 {
1347         int q_depth = dev->q_depth;
1348         unsigned q_size_aligned = roundup(q_depth * entry_size,
1349                                           dev->ctrl.page_size);
1350
1351         if (q_size_aligned * nr_io_queues > dev->cmb_size) {
1352                 u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
1353                 mem_per_q = round_down(mem_per_q, dev->ctrl.page_size);
1354                 q_depth = div_u64(mem_per_q, entry_size);
1355
1356                 /*
1357                  * Ensure the reduced q_depth is above some threshold where it
1358                  * would be better to map queues in system memory with the
1359                  * original depth
1360                  */
1361                 if (q_depth < 64)
1362                         return -ENOMEM;
1363         }
1364
1365         return q_depth;
1366 }
1367
1368 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1369                                 int qid, int depth)
1370 {
1371         /* CMB SQEs will be mapped before creation */
1372         if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS))
1373                 return 0;
1374
1375         nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
1376                                             &nvmeq->sq_dma_addr, GFP_KERNEL);
1377         if (!nvmeq->sq_cmds)
1378                 return -ENOMEM;
1379         return 0;
1380 }
1381
1382 static int nvme_alloc_queue(struct nvme_dev *dev, int qid,
1383                 int depth, int node)
1384 {
1385         struct nvme_queue *nvmeq = &dev->queues[qid];
1386
1387         if (dev->ctrl.queue_count > qid)
1388                 return 0;
1389
1390         nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
1391                                           &nvmeq->cq_dma_addr, GFP_KERNEL);
1392         if (!nvmeq->cqes)
1393                 goto free_nvmeq;
1394
1395         if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
1396                 goto free_cqdma;
1397
1398         nvmeq->q_dmadev = dev->dev;
1399         nvmeq->dev = dev;
1400         spin_lock_init(&nvmeq->q_lock);
1401         nvmeq->cq_head = 0;
1402         nvmeq->cq_phase = 1;
1403         nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1404         nvmeq->q_depth = depth;
1405         nvmeq->qid = qid;
1406         nvmeq->cq_vector = -1;
1407         dev->ctrl.queue_count++;
1408
1409         return 0;
1410
1411  free_cqdma:
1412         dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
1413                                                         nvmeq->cq_dma_addr);
1414  free_nvmeq:
1415         return -ENOMEM;
1416 }
1417
1418 static int queue_request_irq(struct nvme_queue *nvmeq)
1419 {
1420         struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
1421         int nr = nvmeq->dev->ctrl.instance;
1422
1423         if (use_threaded_interrupts) {
1424                 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check,
1425                                 nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
1426         } else {
1427                 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq,
1428                                 NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
1429         }
1430 }
1431
1432 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
1433 {
1434         struct nvme_dev *dev = nvmeq->dev;
1435
1436         spin_lock_irq(&nvmeq->q_lock);
1437         nvmeq->sq_tail = 0;
1438         nvmeq->cq_head = 0;
1439         nvmeq->cq_phase = 1;
1440         nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1441         memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
1442         nvme_dbbuf_init(dev, nvmeq, qid);
1443         dev->online_queues++;
1444         spin_unlock_irq(&nvmeq->q_lock);
1445 }
1446
1447 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
1448 {
1449         struct nvme_dev *dev = nvmeq->dev;
1450         int result;
1451
1452         if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
1453                 unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
1454                                                       dev->ctrl.page_size);
1455                 nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
1456                 nvmeq->sq_cmds_io = dev->cmb + offset;
1457         }
1458
1459         nvmeq->cq_vector = qid - 1;
1460         result = adapter_alloc_cq(dev, qid, nvmeq);
1461         if (result < 0)
1462                 goto release_vector;
1463
1464         result = adapter_alloc_sq(dev, qid, nvmeq);
1465         if (result < 0)
1466                 goto release_cq;
1467
1468         nvme_init_queue(nvmeq, qid);
1469         result = queue_request_irq(nvmeq);
1470         if (result < 0)
1471                 goto release_sq;
1472
1473         return result;
1474
1475  release_sq:
1476         dev->online_queues--;
1477         adapter_delete_sq(dev, qid);
1478  release_cq:
1479         adapter_delete_cq(dev, qid);
1480  release_vector:
1481         nvmeq->cq_vector = -1;
1482         return result;
1483 }
1484
1485 static const struct blk_mq_ops nvme_mq_admin_ops = {
1486         .queue_rq       = nvme_queue_rq,
1487         .complete       = nvme_pci_complete_rq,
1488         .init_hctx      = nvme_admin_init_hctx,
1489         .exit_hctx      = nvme_admin_exit_hctx,
1490         .init_request   = nvme_init_request,
1491         .timeout        = nvme_timeout,
1492 };
1493
1494 static const struct blk_mq_ops nvme_mq_ops = {
1495         .queue_rq       = nvme_queue_rq,
1496         .complete       = nvme_pci_complete_rq,
1497         .init_hctx      = nvme_init_hctx,
1498         .init_request   = nvme_init_request,
1499         .map_queues     = nvme_pci_map_queues,
1500         .timeout        = nvme_timeout,
1501         .poll           = nvme_poll,
1502 };
1503
1504 static void nvme_dev_remove_admin(struct nvme_dev *dev)
1505 {
1506         if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
1507                 /*
1508                  * If the controller was reset during removal, it's possible
1509                  * user requests may be waiting on a stopped queue. Start the
1510                  * queue to flush these to completion.
1511                  */
1512                 blk_mq_unquiesce_queue(dev->ctrl.admin_q);
1513                 blk_cleanup_queue(dev->ctrl.admin_q);
1514                 blk_mq_free_tag_set(&dev->admin_tagset);
1515         }
1516 }
1517
1518 static int nvme_alloc_admin_tags(struct nvme_dev *dev)
1519 {
1520         if (!dev->ctrl.admin_q) {
1521                 dev->admin_tagset.ops = &nvme_mq_admin_ops;
1522                 dev->admin_tagset.nr_hw_queues = 1;
1523
1524                 dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1525                 dev->admin_tagset.timeout = ADMIN_TIMEOUT;
1526                 dev->admin_tagset.numa_node = dev_to_node(dev->dev);
1527                 dev->admin_tagset.cmd_size = nvme_pci_cmd_size(dev, false);
1528                 dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
1529                 dev->admin_tagset.driver_data = dev;
1530
1531                 if (blk_mq_alloc_tag_set(&dev->admin_tagset))
1532                         return -ENOMEM;
1533                 dev->ctrl.admin_tagset = &dev->admin_tagset;
1534
1535                 dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
1536                 if (IS_ERR(dev->ctrl.admin_q)) {
1537                         blk_mq_free_tag_set(&dev->admin_tagset);
1538                         return -ENOMEM;
1539                 }
1540                 if (!blk_get_queue(dev->ctrl.admin_q)) {
1541                         nvme_dev_remove_admin(dev);
1542                         dev->ctrl.admin_q = NULL;
1543                         return -ENODEV;
1544                 }
1545         } else
1546                 blk_mq_unquiesce_queue(dev->ctrl.admin_q);
1547
1548         return 0;
1549 }
1550
1551 static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
1552 {
1553         return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride);
1554 }
1555
1556 static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size)
1557 {
1558         struct pci_dev *pdev = to_pci_dev(dev->dev);
1559
1560         if (size <= dev->bar_mapped_size)
1561                 return 0;
1562         if (size > pci_resource_len(pdev, 0))
1563                 return -ENOMEM;
1564         if (dev->bar)
1565                 iounmap(dev->bar);
1566         dev->bar = ioremap(pci_resource_start(pdev, 0), size);
1567         if (!dev->bar) {
1568                 dev->bar_mapped_size = 0;
1569                 return -ENOMEM;
1570         }
1571         dev->bar_mapped_size = size;
1572         dev->dbs = dev->bar + NVME_REG_DBS;
1573
1574         return 0;
1575 }
1576
1577 static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
1578 {
1579         int result;
1580         u32 aqa;
1581         struct nvme_queue *nvmeq;
1582
1583         result = nvme_remap_bar(dev, db_bar_size(dev, 0));
1584         if (result < 0)
1585                 return result;
1586
1587         dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
1588                                 NVME_CAP_NSSRC(dev->ctrl.cap) : 0;
1589
1590         if (dev->subsystem &&
1591             (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
1592                 writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
1593
1594         result = nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
1595         if (result < 0)
1596                 return result;
1597
1598         result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
1599                         dev_to_node(dev->dev));
1600         if (result)
1601                 return result;
1602
1603         nvmeq = &dev->queues[0];
1604         aqa = nvmeq->q_depth - 1;
1605         aqa |= aqa << 16;
1606
1607         writel(aqa, dev->bar + NVME_REG_AQA);
1608         lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
1609         lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
1610
1611         result = nvme_enable_ctrl(&dev->ctrl, dev->ctrl.cap);
1612         if (result)
1613                 return result;
1614
1615         nvmeq->cq_vector = 0;
1616         nvme_init_queue(nvmeq, 0);
1617         result = queue_request_irq(nvmeq);
1618         if (result) {
1619                 nvmeq->cq_vector = -1;
1620                 return result;
1621         }
1622
1623         return result;
1624 }
1625
1626 static int nvme_create_io_queues(struct nvme_dev *dev)
1627 {
1628         unsigned i, max;
1629         int ret = 0;
1630
1631         for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
1632                 /* vector == qid - 1, match nvme_create_queue */
1633                 if (nvme_alloc_queue(dev, i, dev->q_depth,
1634                      pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) {
1635                         ret = -ENOMEM;
1636                         break;
1637                 }
1638         }
1639
1640         max = min(dev->max_qid, dev->ctrl.queue_count - 1);
1641         for (i = dev->online_queues; i <= max; i++) {
1642                 ret = nvme_create_queue(&dev->queues[i], i);
1643                 if (ret)
1644                         break;
1645         }
1646
1647         /*
1648          * Ignore failing Create SQ/CQ commands, we can continue with less
1649          * than the desired amount of queues, and even a controller without
1650          * I/O queues can still be used to issue admin commands.  This might
1651          * be useful to upgrade a buggy firmware for example.
1652          */
1653         return ret >= 0 ? 0 : ret;
1654 }
1655
1656 static ssize_t nvme_cmb_show(struct device *dev,
1657                              struct device_attribute *attr,
1658                              char *buf)
1659 {
1660         struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
1661
1662         return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz  : x%08x\n",
1663                        ndev->cmbloc, ndev->cmbsz);
1664 }
1665 static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
1666
1667 static u64 nvme_cmb_size_unit(struct nvme_dev *dev)
1668 {
1669         u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK;
1670
1671         return 1ULL << (12 + 4 * szu);
1672 }
1673
1674 static u32 nvme_cmb_size(struct nvme_dev *dev)
1675 {
1676         return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK;
1677 }
1678
1679 static void nvme_map_cmb(struct nvme_dev *dev)
1680 {
1681         u64 size, offset;
1682         resource_size_t bar_size;
1683         struct pci_dev *pdev = to_pci_dev(dev->dev);
1684         int bar;
1685
1686         dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
1687         if (!dev->cmbsz)
1688                 return;
1689         dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
1690
1691         if (!use_cmb_sqes)
1692                 return;
1693
1694         size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev);
1695         offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc);
1696         bar = NVME_CMB_BIR(dev->cmbloc);
1697         bar_size = pci_resource_len(pdev, bar);
1698
1699         if (offset > bar_size)
1700                 return;
1701
1702         /*
1703          * Controllers may support a CMB size larger than their BAR,
1704          * for example, due to being behind a bridge. Reduce the CMB to
1705          * the reported size of the BAR
1706          */
1707         if (size > bar_size - offset)
1708                 size = bar_size - offset;
1709
1710         dev->cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
1711         if (!dev->cmb)
1712                 return;
1713         dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
1714         dev->cmb_size = size;
1715
1716         if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
1717                                     &dev_attr_cmb.attr, NULL))
1718                 dev_warn(dev->ctrl.device,
1719                          "failed to add sysfs attribute for CMB\n");
1720 }
1721
1722 static inline void nvme_release_cmb(struct nvme_dev *dev)
1723 {
1724         if (dev->cmb) {
1725                 iounmap(dev->cmb);
1726                 dev->cmb = NULL;
1727                 sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
1728                                              &dev_attr_cmb.attr, NULL);
1729                 dev->cmbsz = 0;
1730         }
1731 }
1732
1733 static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
1734 {
1735         u64 dma_addr = dev->host_mem_descs_dma;
1736         struct nvme_command c;
1737         int ret;
1738
1739         memset(&c, 0, sizeof(c));
1740         c.features.opcode       = nvme_admin_set_features;
1741         c.features.fid          = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
1742         c.features.dword11      = cpu_to_le32(bits);
1743         c.features.dword12      = cpu_to_le32(dev->host_mem_size >>
1744                                               ilog2(dev->ctrl.page_size));
1745         c.features.dword13      = cpu_to_le32(lower_32_bits(dma_addr));
1746         c.features.dword14      = cpu_to_le32(upper_32_bits(dma_addr));
1747         c.features.dword15      = cpu_to_le32(dev->nr_host_mem_descs);
1748
1749         ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1750         if (ret) {
1751                 dev_warn(dev->ctrl.device,
1752                          "failed to set host mem (err %d, flags %#x).\n",
1753                          ret, bits);
1754         }
1755         return ret;
1756 }
1757
1758 static void nvme_free_host_mem(struct nvme_dev *dev)
1759 {
1760         int i;
1761
1762         for (i = 0; i < dev->nr_host_mem_descs; i++) {
1763                 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
1764                 size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
1765
1766                 dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i],
1767                                 le64_to_cpu(desc->addr));
1768         }
1769
1770         kfree(dev->host_mem_desc_bufs);
1771         dev->host_mem_desc_bufs = NULL;
1772         dma_free_coherent(dev->dev,
1773                         dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
1774                         dev->host_mem_descs, dev->host_mem_descs_dma);
1775         dev->host_mem_descs = NULL;
1776         dev->nr_host_mem_descs = 0;
1777 }
1778
1779 static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
1780                 u32 chunk_size)
1781 {
1782         struct nvme_host_mem_buf_desc *descs;
1783         u32 max_entries, len;
1784         dma_addr_t descs_dma;
1785         int i = 0;
1786         void **bufs;
1787         u64 size, tmp;
1788
1789         tmp = (preferred + chunk_size - 1);
1790         do_div(tmp, chunk_size);
1791         max_entries = tmp;
1792
1793         if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
1794                 max_entries = dev->ctrl.hmmaxd;
1795
1796         descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs),
1797                         &descs_dma, GFP_KERNEL);
1798         if (!descs)
1799                 goto out;
1800
1801         bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL);
1802         if (!bufs)
1803                 goto out_free_descs;
1804
1805         for (size = 0; size < preferred && i < max_entries; size += len) {
1806                 dma_addr_t dma_addr;
1807
1808                 len = min_t(u64, chunk_size, preferred - size);
1809                 bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL,
1810                                 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
1811                 if (!bufs[i])
1812                         break;
1813
1814                 descs[i].addr = cpu_to_le64(dma_addr);
1815                 descs[i].size = cpu_to_le32(len / dev->ctrl.page_size);
1816                 i++;
1817         }
1818
1819         if (!size)
1820                 goto out_free_bufs;
1821
1822         dev->nr_host_mem_descs = i;
1823         dev->host_mem_size = size;
1824         dev->host_mem_descs = descs;
1825         dev->host_mem_descs_dma = descs_dma;
1826         dev->host_mem_desc_bufs = bufs;
1827         return 0;
1828
1829 out_free_bufs:
1830         while (--i >= 0) {
1831                 size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
1832
1833                 dma_free_coherent(dev->dev, size, bufs[i],
1834                                 le64_to_cpu(descs[i].addr));
1835         }
1836
1837         kfree(bufs);
1838 out_free_descs:
1839         dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
1840                         descs_dma);
1841 out:
1842         dev->host_mem_descs = NULL;
1843         return -ENOMEM;
1844 }
1845
1846 static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
1847 {
1848         u32 chunk_size;
1849
1850         /* start big and work our way down */
1851         for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
1852              chunk_size >= max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
1853              chunk_size /= 2) {
1854                 if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
1855                         if (!min || dev->host_mem_size >= min)
1856                                 return 0;
1857                         nvme_free_host_mem(dev);
1858                 }
1859         }
1860
1861         return -ENOMEM;
1862 }
1863
1864 static int nvme_setup_host_mem(struct nvme_dev *dev)
1865 {
1866         u64 max = (u64)max_host_mem_size_mb * SZ_1M;
1867         u64 preferred = (u64)dev->ctrl.hmpre * 4096;
1868         u64 min = (u64)dev->ctrl.hmmin * 4096;
1869         u32 enable_bits = NVME_HOST_MEM_ENABLE;
1870         int ret;
1871
1872         preferred = min(preferred, max);
1873         if (min > max) {
1874                 dev_warn(dev->ctrl.device,
1875                         "min host memory (%lld MiB) above limit (%d MiB).\n",
1876                         min >> ilog2(SZ_1M), max_host_mem_size_mb);
1877                 nvme_free_host_mem(dev);
1878                 return 0;
1879         }
1880
1881         /*
1882          * If we already have a buffer allocated check if we can reuse it.
1883          */
1884         if (dev->host_mem_descs) {
1885                 if (dev->host_mem_size >= min)
1886                         enable_bits |= NVME_HOST_MEM_RETURN;
1887                 else
1888                         nvme_free_host_mem(dev);
1889         }
1890
1891         if (!dev->host_mem_descs) {
1892                 if (nvme_alloc_host_mem(dev, min, preferred)) {
1893                         dev_warn(dev->ctrl.device,
1894                                 "failed to allocate host memory buffer.\n");
1895                         return 0; /* controller must work without HMB */
1896                 }
1897
1898                 dev_info(dev->ctrl.device,
1899                         "allocated %lld MiB host memory buffer.\n",
1900                         dev->host_mem_size >> ilog2(SZ_1M));
1901         }
1902
1903         ret = nvme_set_host_mem(dev, enable_bits);
1904         if (ret)
1905                 nvme_free_host_mem(dev);
1906         return ret;
1907 }
1908
1909 static int nvme_setup_io_queues(struct nvme_dev *dev)
1910 {
1911         struct nvme_queue *adminq = &dev->queues[0];
1912         struct pci_dev *pdev = to_pci_dev(dev->dev);
1913         int result, nr_io_queues;
1914         unsigned long size;
1915
1916         nr_io_queues = num_present_cpus();
1917         result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
1918         if (result < 0)
1919                 return result;
1920
1921         if (nr_io_queues == 0)
1922                 return 0;
1923
1924         if (dev->cmb && (dev->cmbsz & NVME_CMBSZ_SQS)) {
1925                 result = nvme_cmb_qdepth(dev, nr_io_queues,
1926                                 sizeof(struct nvme_command));
1927                 if (result > 0)
1928                         dev->q_depth = result;
1929                 else
1930                         nvme_release_cmb(dev);
1931         }
1932
1933         do {
1934                 size = db_bar_size(dev, nr_io_queues);
1935                 result = nvme_remap_bar(dev, size);
1936                 if (!result)
1937                         break;
1938                 if (!--nr_io_queues)
1939                         return -ENOMEM;
1940         } while (1);
1941         adminq->q_db = dev->dbs;
1942
1943         /* Deregister the admin queue's interrupt */
1944         pci_free_irq(pdev, 0, adminq);
1945
1946         /*
1947          * If we enable msix early due to not intx, disable it again before
1948          * setting up the full range we need.
1949          */
1950         pci_free_irq_vectors(pdev);
1951         nr_io_queues = pci_alloc_irq_vectors(pdev, 1, nr_io_queues,
1952                         PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY);
1953         if (nr_io_queues <= 0)
1954                 return -EIO;
1955         dev->max_qid = nr_io_queues;
1956
1957         /*
1958          * Should investigate if there's a performance win from allocating
1959          * more queues than interrupt vectors; it might allow the submission
1960          * path to scale better, even if the receive path is limited by the
1961          * number of interrupts.
1962          */
1963
1964         result = queue_request_irq(adminq);
1965         if (result) {
1966                 adminq->cq_vector = -1;
1967                 return result;
1968         }
1969         return nvme_create_io_queues(dev);
1970 }
1971
1972 static void nvme_del_queue_end(struct request *req, blk_status_t error)
1973 {
1974         struct nvme_queue *nvmeq = req->end_io_data;
1975
1976         blk_mq_free_request(req);
1977         complete(&nvmeq->dev->ioq_wait);
1978 }
1979
1980 static void nvme_del_cq_end(struct request *req, blk_status_t error)
1981 {
1982         struct nvme_queue *nvmeq = req->end_io_data;
1983
1984         if (!error) {
1985                 unsigned long flags;
1986
1987                 /*
1988                  * We might be called with the AQ q_lock held
1989                  * and the I/O queue q_lock should always
1990                  * nest inside the AQ one.
1991                  */
1992                 spin_lock_irqsave_nested(&nvmeq->q_lock, flags,
1993                                         SINGLE_DEPTH_NESTING);
1994                 nvme_process_cq(nvmeq);
1995                 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
1996         }
1997
1998         nvme_del_queue_end(req, error);
1999 }
2000
2001 static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
2002 {
2003         struct request_queue *q = nvmeq->dev->ctrl.admin_q;
2004         struct request *req;
2005         struct nvme_command cmd;
2006
2007         memset(&cmd, 0, sizeof(cmd));
2008         cmd.delete_queue.opcode = opcode;
2009         cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
2010
2011         req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
2012         if (IS_ERR(req))
2013                 return PTR_ERR(req);
2014
2015         req->timeout = ADMIN_TIMEOUT;
2016         req->end_io_data = nvmeq;
2017
2018         blk_execute_rq_nowait(q, NULL, req, false,
2019                         opcode == nvme_admin_delete_cq ?
2020                                 nvme_del_cq_end : nvme_del_queue_end);
2021         return 0;
2022 }
2023
2024 static void nvme_disable_io_queues(struct nvme_dev *dev)
2025 {
2026         int pass, queues = dev->online_queues - 1;
2027         unsigned long timeout;
2028         u8 opcode = nvme_admin_delete_sq;
2029
2030         for (pass = 0; pass < 2; pass++) {
2031                 int sent = 0, i = queues;
2032
2033                 reinit_completion(&dev->ioq_wait);
2034  retry:
2035                 timeout = ADMIN_TIMEOUT;
2036                 for (; i > 0; i--, sent++)
2037                         if (nvme_delete_queue(&dev->queues[i], opcode))
2038                                 break;
2039
2040                 while (sent--) {
2041                         timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout);
2042                         if (timeout == 0)
2043                                 return;
2044                         if (i)
2045                                 goto retry;
2046                 }
2047                 opcode = nvme_admin_delete_cq;
2048         }
2049 }
2050
2051 /*
2052  * return error value only when tagset allocation failed
2053  */
2054 static int nvme_dev_add(struct nvme_dev *dev)
2055 {
2056         int ret;
2057
2058         if (!dev->ctrl.tagset) {
2059                 dev->tagset.ops = &nvme_mq_ops;
2060                 dev->tagset.nr_hw_queues = dev->online_queues - 1;
2061                 dev->tagset.timeout = NVME_IO_TIMEOUT;
2062                 dev->tagset.numa_node = dev_to_node(dev->dev);
2063                 dev->tagset.queue_depth =
2064                                 min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
2065                 dev->tagset.cmd_size = nvme_pci_cmd_size(dev, false);
2066                 if ((dev->ctrl.sgls & ((1 << 0) | (1 << 1))) && sgl_threshold) {
2067                         dev->tagset.cmd_size = max(dev->tagset.cmd_size,
2068                                         nvme_pci_cmd_size(dev, true));
2069                 }
2070                 dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
2071                 dev->tagset.driver_data = dev;
2072
2073                 ret = blk_mq_alloc_tag_set(&dev->tagset);
2074                 if (ret) {
2075                         dev_warn(dev->ctrl.device,
2076                                 "IO queues tagset allocation failed %d\n", ret);
2077                         return ret;
2078                 }
2079                 dev->ctrl.tagset = &dev->tagset;
2080
2081                 nvme_dbbuf_set(dev);
2082         } else {
2083                 blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
2084
2085                 /* Free previously allocated queues that are no longer usable */
2086                 nvme_free_queues(dev, dev->online_queues);
2087         }
2088
2089         return 0;
2090 }
2091
2092 static int nvme_pci_enable(struct nvme_dev *dev)
2093 {
2094         int result = -ENOMEM;
2095         struct pci_dev *pdev = to_pci_dev(dev->dev);
2096
2097         if (pci_enable_device_mem(pdev))
2098                 return result;
2099
2100         pci_set_master(pdev);
2101
2102         if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
2103             dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
2104                 goto disable;
2105
2106         if (readl(dev->bar + NVME_REG_CSTS) == -1) {
2107                 result = -ENODEV;
2108                 goto disable;
2109         }
2110
2111         /*
2112          * Some devices and/or platforms don't advertise or work with INTx
2113          * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
2114          * adjust this later.
2115          */
2116         result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
2117         if (result < 0)
2118                 return result;
2119
2120         dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
2121
2122         dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1,
2123                                 io_queue_depth);
2124         dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
2125         dev->dbs = dev->bar + 4096;
2126
2127         /*
2128          * Temporary fix for the Apple controller found in the MacBook8,1 and
2129          * some MacBook7,1 to avoid controller resets and data loss.
2130          */
2131         if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
2132                 dev->q_depth = 2;
2133                 dev_warn(dev->ctrl.device, "detected Apple NVMe controller, "
2134                         "set queue depth=%u to work around controller resets\n",
2135                         dev->q_depth);
2136         } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG &&
2137                    (pdev->device == 0xa821 || pdev->device == 0xa822) &&
2138                    NVME_CAP_MQES(dev->ctrl.cap) == 0) {
2139                 dev->q_depth = 64;
2140                 dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, "
2141                         "set queue depth=%u\n", dev->q_depth);
2142         }
2143
2144         nvme_map_cmb(dev);
2145
2146         pci_enable_pcie_error_reporting(pdev);
2147         pci_save_state(pdev);
2148         return 0;
2149
2150  disable:
2151         pci_disable_device(pdev);
2152         return result;
2153 }
2154
2155 static void nvme_dev_unmap(struct nvme_dev *dev)
2156 {
2157         if (dev->bar)
2158                 iounmap(dev->bar);
2159         pci_release_mem_regions(to_pci_dev(dev->dev));
2160 }
2161
2162 static void nvme_pci_disable(struct nvme_dev *dev)
2163 {
2164         struct pci_dev *pdev = to_pci_dev(dev->dev);
2165
2166         nvme_release_cmb(dev);
2167         pci_free_irq_vectors(pdev);
2168
2169         if (pci_is_enabled(pdev)) {
2170                 pci_disable_pcie_error_reporting(pdev);
2171                 pci_disable_device(pdev);
2172         }
2173 }
2174
2175 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
2176 {
2177         int i;
2178         bool dead = true;
2179         struct pci_dev *pdev = to_pci_dev(dev->dev);
2180
2181         mutex_lock(&dev->shutdown_lock);
2182         if (pci_is_enabled(pdev)) {
2183                 u32 csts = readl(dev->bar + NVME_REG_CSTS);
2184
2185                 if (dev->ctrl.state == NVME_CTRL_LIVE ||
2186                     dev->ctrl.state == NVME_CTRL_RESETTING)
2187                         nvme_start_freeze(&dev->ctrl);
2188                 dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) ||
2189                         pdev->error_state  != pci_channel_io_normal);
2190         }
2191
2192         /*
2193          * Give the controller a chance to complete all entered requests if
2194          * doing a safe shutdown.
2195          */
2196         if (!dead) {
2197                 if (shutdown)
2198                         nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
2199
2200                 /*
2201                  * If the controller is still alive tell it to stop using the
2202                  * host memory buffer.  In theory the shutdown / reset should
2203                  * make sure that it doesn't access the host memoery anymore,
2204                  * but I'd rather be safe than sorry..
2205                  */
2206                 if (dev->host_mem_descs)
2207                         nvme_set_host_mem(dev, 0);
2208
2209         }
2210         nvme_stop_queues(&dev->ctrl);
2211
2212         if (!dead) {
2213                 nvme_disable_io_queues(dev);
2214                 nvme_disable_admin_queue(dev, shutdown);
2215         }
2216         for (i = dev->ctrl.queue_count - 1; i >= 0; i--)
2217                 nvme_suspend_queue(&dev->queues[i]);
2218
2219         nvme_pci_disable(dev);
2220
2221         blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
2222         blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
2223
2224         /*
2225          * The driver will not be starting up queues again if shutting down so
2226          * must flush all entered requests to their failed completion to avoid
2227          * deadlocking blk-mq hot-cpu notifier.
2228          */
2229         if (shutdown)
2230                 nvme_start_queues(&dev->ctrl);
2231         mutex_unlock(&dev->shutdown_lock);
2232 }
2233
2234 static int nvme_setup_prp_pools(struct nvme_dev *dev)
2235 {
2236         dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
2237                                                 PAGE_SIZE, PAGE_SIZE, 0);
2238         if (!dev->prp_page_pool)
2239                 return -ENOMEM;
2240
2241         /* Optimisation for I/Os between 4k and 128k */
2242         dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
2243                                                 256, 256, 0);
2244         if (!dev->prp_small_pool) {
2245                 dma_pool_destroy(dev->prp_page_pool);
2246                 return -ENOMEM;
2247         }
2248         return 0;
2249 }
2250
2251 static void nvme_release_prp_pools(struct nvme_dev *dev)
2252 {
2253         dma_pool_destroy(dev->prp_page_pool);
2254         dma_pool_destroy(dev->prp_small_pool);
2255 }
2256
2257 static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
2258 {
2259         struct nvme_dev *dev = to_nvme_dev(ctrl);
2260
2261         nvme_dbbuf_dma_free(dev);
2262         put_device(dev->dev);
2263         if (dev->tagset.tags)
2264                 blk_mq_free_tag_set(&dev->tagset);
2265         if (dev->ctrl.admin_q)
2266                 blk_put_queue(dev->ctrl.admin_q);
2267         kfree(dev->queues);
2268         free_opal_dev(dev->ctrl.opal_dev);
2269         kfree(dev);
2270 }
2271
2272 static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
2273 {
2274         dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status);
2275
2276         nvme_get_ctrl(&dev->ctrl);
2277         nvme_dev_disable(dev, false);
2278         if (!queue_work(nvme_wq, &dev->remove_work))
2279                 nvme_put_ctrl(&dev->ctrl);
2280 }
2281
2282 static void nvme_reset_work(struct work_struct *work)
2283 {
2284         struct nvme_dev *dev =
2285                 container_of(work, struct nvme_dev, ctrl.reset_work);
2286         bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
2287         int result = -ENODEV;
2288         enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
2289
2290         if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
2291                 goto out;
2292
2293         /*
2294          * If we're called to reset a live controller first shut it down before
2295          * moving on.
2296          */
2297         if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
2298                 nvme_dev_disable(dev, false);
2299
2300         /*
2301          * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
2302          * initializing procedure here.
2303          */
2304         if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
2305                 dev_warn(dev->ctrl.device,
2306                         "failed to mark controller CONNECTING\n");
2307                 goto out;
2308         }
2309
2310         result = nvme_pci_enable(dev);
2311         if (result)
2312                 goto out;
2313
2314         result = nvme_pci_configure_admin_queue(dev);
2315         if (result)
2316                 goto out;
2317
2318         result = nvme_alloc_admin_tags(dev);
2319         if (result)
2320                 goto out;
2321
2322         result = nvme_init_identify(&dev->ctrl);
2323         if (result)
2324                 goto out;
2325
2326         if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) {
2327                 if (!dev->ctrl.opal_dev)
2328                         dev->ctrl.opal_dev =
2329                                 init_opal_dev(&dev->ctrl, &nvme_sec_submit);
2330                 else if (was_suspend)
2331                         opal_unlock_from_suspend(dev->ctrl.opal_dev);
2332         } else {
2333                 free_opal_dev(dev->ctrl.opal_dev);
2334                 dev->ctrl.opal_dev = NULL;
2335         }
2336
2337         if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) {
2338                 result = nvme_dbbuf_dma_alloc(dev);
2339                 if (result)
2340                         dev_warn(dev->dev,
2341                                  "unable to allocate dma for dbbuf\n");
2342         }
2343
2344         if (dev->ctrl.hmpre) {
2345                 result = nvme_setup_host_mem(dev);
2346                 if (result < 0)
2347                         goto out;
2348         }
2349
2350         result = nvme_setup_io_queues(dev);
2351         if (result)
2352                 goto out;
2353
2354         /*
2355          * Keep the controller around but remove all namespaces if we don't have
2356          * any working I/O queue.
2357          */
2358         if (dev->online_queues < 2) {
2359                 dev_warn(dev->ctrl.device, "IO queues not created\n");
2360                 nvme_kill_queues(&dev->ctrl);
2361                 nvme_remove_namespaces(&dev->ctrl);
2362                 new_state = NVME_CTRL_ADMIN_ONLY;
2363         } else {
2364                 nvme_start_queues(&dev->ctrl);
2365                 nvme_wait_freeze(&dev->ctrl);
2366                 /* hit this only when allocate tagset fails */
2367                 if (nvme_dev_add(dev))
2368                         new_state = NVME_CTRL_ADMIN_ONLY;
2369                 nvme_unfreeze(&dev->ctrl);
2370         }
2371
2372         /*
2373          * If only admin queue live, keep it to do further investigation or
2374          * recovery.
2375          */
2376         if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
2377                 dev_warn(dev->ctrl.device,
2378                         "failed to mark controller state %d\n", new_state);
2379                 goto out;
2380         }
2381
2382         nvme_start_ctrl(&dev->ctrl);
2383         return;
2384
2385  out:
2386         nvme_remove_dead_ctrl(dev, result);
2387 }
2388
2389 static void nvme_remove_dead_ctrl_work(struct work_struct *work)
2390 {
2391         struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
2392         struct pci_dev *pdev = to_pci_dev(dev->dev);
2393
2394         nvme_kill_queues(&dev->ctrl);
2395         if (pci_get_drvdata(pdev))
2396                 device_release_driver(&pdev->dev);
2397         nvme_put_ctrl(&dev->ctrl);
2398 }
2399
2400 static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
2401 {
2402         *val = readl(to_nvme_dev(ctrl)->bar + off);
2403         return 0;
2404 }
2405
2406 static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
2407 {
2408         writel(val, to_nvme_dev(ctrl)->bar + off);
2409         return 0;
2410 }
2411
2412 static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
2413 {
2414         *val = readq(to_nvme_dev(ctrl)->bar + off);
2415         return 0;
2416 }
2417
2418 static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
2419         .name                   = "pcie",
2420         .module                 = THIS_MODULE,
2421         .flags                  = NVME_F_METADATA_SUPPORTED,
2422         .reg_read32             = nvme_pci_reg_read32,
2423         .reg_write32            = nvme_pci_reg_write32,
2424         .reg_read64             = nvme_pci_reg_read64,
2425         .free_ctrl              = nvme_pci_free_ctrl,
2426         .submit_async_event     = nvme_pci_submit_async_event,
2427 };
2428
2429 static int nvme_dev_map(struct nvme_dev *dev)
2430 {
2431         struct pci_dev *pdev = to_pci_dev(dev->dev);
2432
2433         if (pci_request_mem_regions(pdev, "nvme"))
2434                 return -ENODEV;
2435
2436         if (nvme_remap_bar(dev, NVME_REG_DBS + 4096))
2437                 goto release;
2438
2439         return 0;
2440   release:
2441         pci_release_mem_regions(pdev);
2442         return -ENODEV;
2443 }
2444
2445 static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
2446 {
2447         if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
2448                 /*
2449                  * Several Samsung devices seem to drop off the PCIe bus
2450                  * randomly when APST is on and uses the deepest sleep state.
2451                  * This has been observed on a Samsung "SM951 NVMe SAMSUNG
2452                  * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD
2453                  * 950 PRO 256GB", but it seems to be restricted to two Dell
2454                  * laptops.
2455                  */
2456                 if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") &&
2457                     (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
2458                      dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
2459                         return NVME_QUIRK_NO_DEEPEST_PS;
2460         } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
2461                 /*
2462                  * Samsung SSD 960 EVO drops off the PCIe bus after system
2463                  * suspend on a Ryzen board, ASUS PRIME B350M-A.
2464                  */
2465                 if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
2466                     dmi_match(DMI_BOARD_NAME, "PRIME B350M-A"))
2467                         return NVME_QUIRK_NO_APST;
2468         }
2469
2470         return 0;
2471 }
2472
2473 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2474 {
2475         int node, result = -ENOMEM;
2476         struct nvme_dev *dev;
2477         unsigned long quirks = id->driver_data;
2478
2479         node = dev_to_node(&pdev->dev);
2480         if (node == NUMA_NO_NODE)
2481                 set_dev_node(&pdev->dev, first_memory_node);
2482
2483         dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
2484         if (!dev)
2485                 return -ENOMEM;
2486
2487         dev->queues = kcalloc_node(num_possible_cpus() + 1,
2488                         sizeof(struct nvme_queue), GFP_KERNEL, node);
2489         if (!dev->queues)
2490                 goto free;
2491
2492         dev->dev = get_device(&pdev->dev);
2493         pci_set_drvdata(pdev, dev);
2494
2495         result = nvme_dev_map(dev);
2496         if (result)
2497                 goto put_pci;
2498
2499         INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
2500         INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
2501         mutex_init(&dev->shutdown_lock);
2502         init_completion(&dev->ioq_wait);
2503
2504         result = nvme_setup_prp_pools(dev);
2505         if (result)
2506                 goto unmap;
2507
2508         quirks |= check_vendor_combination_bug(pdev);
2509
2510         result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
2511                         quirks);
2512         if (result)
2513                 goto release_pools;
2514
2515         dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
2516
2517         nvme_reset_ctrl(&dev->ctrl);
2518
2519         return 0;
2520
2521  release_pools:
2522         nvme_release_prp_pools(dev);
2523  unmap:
2524         nvme_dev_unmap(dev);
2525  put_pci:
2526         put_device(dev->dev);
2527  free:
2528         kfree(dev->queues);
2529         kfree(dev);
2530         return result;
2531 }
2532
2533 static void nvme_reset_prepare(struct pci_dev *pdev)
2534 {
2535         struct nvme_dev *dev = pci_get_drvdata(pdev);
2536         nvme_dev_disable(dev, false);
2537 }
2538
2539 static void nvme_reset_done(struct pci_dev *pdev)
2540 {
2541         struct nvme_dev *dev = pci_get_drvdata(pdev);
2542         nvme_reset_ctrl_sync(&dev->ctrl);
2543 }
2544
2545 static void nvme_shutdown(struct pci_dev *pdev)
2546 {
2547         struct nvme_dev *dev = pci_get_drvdata(pdev);
2548         nvme_dev_disable(dev, true);
2549 }
2550
2551 /*
2552  * The driver's remove may be called on a device in a partially initialized
2553  * state. This function must not have any dependencies on the device state in
2554  * order to proceed.
2555  */
2556 static void nvme_remove(struct pci_dev *pdev)
2557 {
2558         struct nvme_dev *dev = pci_get_drvdata(pdev);
2559
2560         nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
2561
2562         cancel_work_sync(&dev->ctrl.reset_work);
2563         pci_set_drvdata(pdev, NULL);
2564
2565         if (!pci_device_is_present(pdev)) {
2566                 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
2567                 nvme_dev_disable(dev, false);
2568         }
2569
2570         flush_work(&dev->ctrl.reset_work);
2571         nvme_stop_ctrl(&dev->ctrl);
2572         nvme_remove_namespaces(&dev->ctrl);
2573         nvme_dev_disable(dev, true);
2574         nvme_free_host_mem(dev);
2575         nvme_dev_remove_admin(dev);
2576         nvme_free_queues(dev, 0);
2577         nvme_uninit_ctrl(&dev->ctrl);
2578         nvme_release_prp_pools(dev);
2579         nvme_dev_unmap(dev);
2580         nvme_put_ctrl(&dev->ctrl);
2581 }
2582
2583 static int nvme_pci_sriov_configure(struct pci_dev *pdev, int numvfs)
2584 {
2585         int ret = 0;
2586
2587         if (numvfs == 0) {
2588                 if (pci_vfs_assigned(pdev)) {
2589                         dev_warn(&pdev->dev,
2590                                 "Cannot disable SR-IOV VFs while assigned\n");
2591                         return -EPERM;
2592                 }
2593                 pci_disable_sriov(pdev);
2594                 return 0;
2595         }
2596
2597         ret = pci_enable_sriov(pdev, numvfs);
2598         return ret ? ret : numvfs;
2599 }
2600
2601 #ifdef CONFIG_PM_SLEEP
2602 static int nvme_suspend(struct device *dev)
2603 {
2604         struct pci_dev *pdev = to_pci_dev(dev);
2605         struct nvme_dev *ndev = pci_get_drvdata(pdev);
2606
2607         nvme_dev_disable(ndev, true);
2608         return 0;
2609 }
2610
2611 static int nvme_resume(struct device *dev)
2612 {
2613         struct pci_dev *pdev = to_pci_dev(dev);
2614         struct nvme_dev *ndev = pci_get_drvdata(pdev);
2615
2616         nvme_reset_ctrl(&ndev->ctrl);
2617         return 0;
2618 }
2619 #endif
2620
2621 static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
2622
2623 static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
2624                                                 pci_channel_state_t state)
2625 {
2626         struct nvme_dev *dev = pci_get_drvdata(pdev);
2627
2628         /*
2629          * A frozen channel requires a reset. When detected, this method will
2630          * shutdown the controller to quiesce. The controller will be restarted
2631          * after the slot reset through driver's slot_reset callback.
2632          */
2633         switch (state) {
2634         case pci_channel_io_normal:
2635                 return PCI_ERS_RESULT_CAN_RECOVER;
2636         case pci_channel_io_frozen:
2637                 dev_warn(dev->ctrl.device,
2638                         "frozen state error detected, reset controller\n");
2639                 nvme_dev_disable(dev, false);
2640                 return PCI_ERS_RESULT_NEED_RESET;
2641         case pci_channel_io_perm_failure:
2642                 dev_warn(dev->ctrl.device,
2643                         "failure state error detected, request disconnect\n");
2644                 return PCI_ERS_RESULT_DISCONNECT;
2645         }
2646         return PCI_ERS_RESULT_NEED_RESET;
2647 }
2648
2649 static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
2650 {
2651         struct nvme_dev *dev = pci_get_drvdata(pdev);
2652
2653         dev_info(dev->ctrl.device, "restart after slot reset\n");
2654         pci_restore_state(pdev);
2655         nvme_reset_ctrl(&dev->ctrl);
2656         return PCI_ERS_RESULT_RECOVERED;
2657 }
2658
2659 static void nvme_error_resume(struct pci_dev *pdev)
2660 {
2661         pci_cleanup_aer_uncorrect_error_status(pdev);
2662 }
2663
2664 static const struct pci_error_handlers nvme_err_handler = {
2665         .error_detected = nvme_error_detected,
2666         .slot_reset     = nvme_slot_reset,
2667         .resume         = nvme_error_resume,
2668         .reset_prepare  = nvme_reset_prepare,
2669         .reset_done     = nvme_reset_done,
2670 };
2671
2672 static const struct pci_device_id nvme_id_table[] = {
2673         { PCI_VDEVICE(INTEL, 0x0953),
2674                 .driver_data = NVME_QUIRK_STRIPE_SIZE |
2675                                 NVME_QUIRK_DEALLOCATE_ZEROES, },
2676         { PCI_VDEVICE(INTEL, 0x0a53),
2677                 .driver_data = NVME_QUIRK_STRIPE_SIZE |
2678                                 NVME_QUIRK_DEALLOCATE_ZEROES, },
2679         { PCI_VDEVICE(INTEL, 0x0a54),
2680                 .driver_data = NVME_QUIRK_STRIPE_SIZE |
2681                                 NVME_QUIRK_DEALLOCATE_ZEROES, },
2682         { PCI_VDEVICE(INTEL, 0x0a55),
2683                 .driver_data = NVME_QUIRK_STRIPE_SIZE |
2684                                 NVME_QUIRK_DEALLOCATE_ZEROES, },
2685         { PCI_VDEVICE(INTEL, 0xf1a5),   /* Intel 600P/P3100 */
2686                 .driver_data = NVME_QUIRK_NO_DEEPEST_PS },
2687         { PCI_VDEVICE(INTEL, 0x5845),   /* Qemu emulated controller */
2688                 .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
2689         { PCI_DEVICE(0x1c58, 0x0003),   /* HGST adapter */
2690                 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2691         { PCI_DEVICE(0x1c58, 0x0023),   /* WDC SN200 adapter */
2692                 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2693         { PCI_DEVICE(0x1c5f, 0x0540),   /* Memblaze Pblaze4 adapter */
2694                 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2695         { PCI_DEVICE(0x144d, 0xa821),   /* Samsung PM1725 */
2696                 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2697         { PCI_DEVICE(0x144d, 0xa822),   /* Samsung PM1725a */
2698                 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2699         { PCI_DEVICE(0x1d1d, 0x1f1f),   /* LighNVM qemu device */
2700                 .driver_data = NVME_QUIRK_LIGHTNVM, },
2701         { PCI_DEVICE(0x1d1d, 0x2807),   /* CNEX WL */
2702                 .driver_data = NVME_QUIRK_LIGHTNVM, },
2703         { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
2704         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
2705         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
2706         { 0, }
2707 };
2708 MODULE_DEVICE_TABLE(pci, nvme_id_table);
2709
2710 static struct pci_driver nvme_driver = {
2711         .name           = "nvme",
2712         .id_table       = nvme_id_table,
2713         .probe          = nvme_probe,
2714         .remove         = nvme_remove,
2715         .shutdown       = nvme_shutdown,
2716         .driver         = {
2717                 .pm     = &nvme_dev_pm_ops,
2718         },
2719         .sriov_configure = nvme_pci_sriov_configure,
2720         .err_handler    = &nvme_err_handler,
2721 };
2722
2723 static int __init nvme_init(void)
2724 {
2725         return pci_register_driver(&nvme_driver);
2726 }
2727
2728 static void __exit nvme_exit(void)
2729 {
2730         pci_unregister_driver(&nvme_driver);
2731         flush_workqueue(nvme_wq);
2732         _nvme_check_size();
2733 }
2734
2735 MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
2736 MODULE_LICENSE("GPL");
2737 MODULE_VERSION("1.0");
2738 module_init(nvme_init);
2739 module_exit(nvme_exit);