1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
5 * Rewrite, cleanup, new allocation schemes, virtual merging:
6 * Copyright (C) 2004 Olof Johansson, IBM Corporation
7 * and Ben. Herrenschmidt, IBM Corporation
9 * Dynamic DMA mapping support, bus-independent parts.
13 #include <linux/init.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/bitmap.h>
21 #include <linux/iommu-helper.h>
22 #include <linux/crash_dump.h>
23 #include <linux/hash.h>
24 #include <linux/fault-inject.h>
25 #include <linux/pci.h>
26 #include <linux/iommu.h>
27 #include <linux/sched.h>
28 #include <linux/debugfs.h>
29 #include <linux/vmalloc.h>
31 #include <asm/iommu.h>
32 #include <asm/pci-bridge.h>
33 #include <asm/machdep.h>
34 #include <asm/kdump.h>
35 #include <asm/fadump.h>
38 #include <asm/mmu_context.h>
39 #include <asm/ppc-pci.h>
43 #ifdef CONFIG_IOMMU_DEBUGFS
44 static int iommu_debugfs_weight_get(void *data, u64 *val)
46 struct iommu_table *tbl = data;
47 *val = bitmap_weight(tbl->it_map, tbl->it_size);
50 DEFINE_DEBUGFS_ATTRIBUTE(iommu_debugfs_fops_weight, iommu_debugfs_weight_get, NULL, "%llu\n");
52 static void iommu_debugfs_add(struct iommu_table *tbl)
55 struct dentry *liobn_entry;
57 sprintf(name, "%08lx", tbl->it_index);
58 liobn_entry = debugfs_create_dir(name, iommu_debugfs_dir);
60 debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight);
61 debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size);
62 debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift);
63 debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start);
64 debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end);
65 debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels);
66 debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size);
69 static void iommu_debugfs_del(struct iommu_table *tbl)
73 sprintf(name, "%08lx", tbl->it_index);
74 debugfs_lookup_and_remove(name, iommu_debugfs_dir);
77 static void iommu_debugfs_add(struct iommu_table *tbl){}
78 static void iommu_debugfs_del(struct iommu_table *tbl){}
83 static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
85 static int __init setup_iommu(char *str)
87 if (!strcmp(str, "novmerge"))
89 else if (!strcmp(str, "vmerge"))
94 __setup("iommu=", setup_iommu);
96 static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
99 * We precalculate the hash to avoid doing it on every allocation.
101 * The hash is important to spread CPUs across all the pools. For example,
102 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
103 * with 4 pools all primary threads would map to the same pool.
105 static int __init setup_iommu_pool_hash(void)
109 for_each_possible_cpu(i)
110 per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
114 subsys_initcall(setup_iommu_pool_hash);
116 #ifdef CONFIG_FAIL_IOMMU
118 static DECLARE_FAULT_ATTR(fail_iommu);
120 static int __init setup_fail_iommu(char *str)
122 return setup_fault_attr(&fail_iommu, str);
124 __setup("fail_iommu=", setup_fail_iommu);
126 static bool should_fail_iommu(struct device *dev)
128 return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
131 static int __init fail_iommu_debugfs(void)
133 struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
136 return PTR_ERR_OR_ZERO(dir);
138 late_initcall(fail_iommu_debugfs);
140 static ssize_t fail_iommu_show(struct device *dev,
141 struct device_attribute *attr, char *buf)
143 return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
146 static ssize_t fail_iommu_store(struct device *dev,
147 struct device_attribute *attr, const char *buf,
152 if (count > 0 && sscanf(buf, "%d", &i) > 0)
153 dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
158 static DEVICE_ATTR_RW(fail_iommu);
160 static int fail_iommu_bus_notify(struct notifier_block *nb,
161 unsigned long action, void *data)
163 struct device *dev = data;
165 if (action == BUS_NOTIFY_ADD_DEVICE) {
166 if (device_create_file(dev, &dev_attr_fail_iommu))
167 pr_warn("Unable to create IOMMU fault injection sysfs "
169 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
170 device_remove_file(dev, &dev_attr_fail_iommu);
177 * PCI and VIO buses need separate notifier_block structs, since they're linked
178 * list nodes. Sharing a notifier_block would mean that any notifiers later
179 * registered for PCI buses would also get called by VIO buses and vice versa.
181 static struct notifier_block fail_iommu_pci_bus_notifier = {
182 .notifier_call = fail_iommu_bus_notify
186 static struct notifier_block fail_iommu_vio_bus_notifier = {
187 .notifier_call = fail_iommu_bus_notify
191 static int __init fail_iommu_setup(void)
194 bus_register_notifier(&pci_bus_type, &fail_iommu_pci_bus_notifier);
197 bus_register_notifier(&vio_bus_type, &fail_iommu_vio_bus_notifier);
203 * Must execute after PCI and VIO subsystem have initialised but before
204 * devices are probed.
206 arch_initcall(fail_iommu_setup);
208 static inline bool should_fail_iommu(struct device *dev)
214 static unsigned long iommu_range_alloc(struct device *dev,
215 struct iommu_table *tbl,
216 unsigned long npages,
217 unsigned long *handle,
219 unsigned int align_order)
221 unsigned long n, end, start;
223 int largealloc = npages > 15;
225 unsigned long align_mask;
227 unsigned int pool_nr;
228 struct iommu_pool *pool;
230 align_mask = (1ull << align_order) - 1;
232 /* This allocator was derived from x86_64's bit string search */
235 if (unlikely(npages == 0)) {
236 if (printk_ratelimit())
238 return DMA_MAPPING_ERROR;
241 if (should_fail_iommu(dev))
242 return DMA_MAPPING_ERROR;
245 * We don't need to disable preemption here because any CPU can
246 * safely use any IOMMU pool.
248 pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
251 pool = &(tbl->large_pool);
253 pool = &(tbl->pools[pool_nr]);
255 spin_lock_irqsave(&(pool->lock), flags);
258 if ((pass == 0) && handle && *handle &&
259 (*handle >= pool->start) && (*handle < pool->end))
266 /* The case below can happen if we have a small segment appended
267 * to a large, or when the previous alloc was at the very end of
268 * the available space. If so, go back to the initial start.
273 if (limit + tbl->it_offset > mask) {
274 limit = mask - tbl->it_offset + 1;
275 /* If we're constrained on address range, first try
276 * at the masked hint to avoid O(n) search complexity,
277 * but on second pass, start at 0 in pool 0.
279 if ((start & mask) >= limit || pass > 0) {
280 spin_unlock(&(pool->lock));
281 pool = &(tbl->pools[0]);
282 spin_lock(&(pool->lock));
289 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
290 dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift),
293 if (likely(pass == 0)) {
294 /* First try the pool from the start */
295 pool->hint = pool->start;
299 } else if (pass <= tbl->nr_pools) {
300 /* Now try scanning all the other pools */
301 spin_unlock(&(pool->lock));
302 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
303 pool = &tbl->pools[pool_nr];
304 spin_lock(&(pool->lock));
305 pool->hint = pool->start;
309 } else if (pass == tbl->nr_pools + 1) {
310 /* Last resort: try largepool */
311 spin_unlock(&pool->lock);
312 pool = &tbl->large_pool;
313 spin_lock(&pool->lock);
314 pool->hint = pool->start;
320 spin_unlock_irqrestore(&(pool->lock), flags);
321 return DMA_MAPPING_ERROR;
327 /* Bump the hint to a new block for small allocs. */
329 /* Don't bump to new block to avoid fragmentation */
332 /* Overflow will be taken care of at the next allocation */
333 pool->hint = (end + tbl->it_blocksize - 1) &
334 ~(tbl->it_blocksize - 1);
337 /* Update handle for SG allocations */
341 spin_unlock_irqrestore(&(pool->lock), flags);
346 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
347 void *page, unsigned int npages,
348 enum dma_data_direction direction,
349 unsigned long mask, unsigned int align_order,
353 dma_addr_t ret = DMA_MAPPING_ERROR;
356 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
358 if (unlikely(entry == DMA_MAPPING_ERROR))
359 return DMA_MAPPING_ERROR;
361 entry += tbl->it_offset; /* Offset into real TCE table */
362 ret = entry << tbl->it_page_shift; /* Set the return dma address */
364 /* Put the TCEs in the HW table */
365 build_fail = tbl->it_ops->set(tbl, entry, npages,
366 (unsigned long)page &
367 IOMMU_PAGE_MASK(tbl), direction, attrs);
369 /* tbl->it_ops->set() only returns non-zero for transient errors.
370 * Clean up the table bitmap in this case and return
371 * DMA_MAPPING_ERROR. For all other errors the functionality is
374 if (unlikely(build_fail)) {
375 __iommu_free(tbl, ret, npages);
376 return DMA_MAPPING_ERROR;
379 /* Flush/invalidate TLB caches if necessary */
380 if (tbl->it_ops->flush)
381 tbl->it_ops->flush(tbl);
383 /* Make sure updates are seen by hardware */
389 static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
392 unsigned long entry, free_entry;
394 entry = dma_addr >> tbl->it_page_shift;
395 free_entry = entry - tbl->it_offset;
397 if (((free_entry + npages) > tbl->it_size) ||
398 (entry < tbl->it_offset)) {
399 if (printk_ratelimit()) {
400 printk(KERN_INFO "iommu_free: invalid entry\n");
401 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
402 printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
403 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
404 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
405 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
406 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
407 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
417 static struct iommu_pool *get_pool(struct iommu_table *tbl,
420 struct iommu_pool *p;
421 unsigned long largepool_start = tbl->large_pool.start;
423 /* The large pool is the last pool at the top of the table */
424 if (entry >= largepool_start) {
425 p = &tbl->large_pool;
427 unsigned int pool_nr = entry / tbl->poolsize;
429 BUG_ON(pool_nr > tbl->nr_pools);
430 p = &tbl->pools[pool_nr];
436 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
439 unsigned long entry, free_entry;
441 struct iommu_pool *pool;
443 entry = dma_addr >> tbl->it_page_shift;
444 free_entry = entry - tbl->it_offset;
446 pool = get_pool(tbl, free_entry);
448 if (!iommu_free_check(tbl, dma_addr, npages))
451 tbl->it_ops->clear(tbl, entry, npages);
453 spin_lock_irqsave(&(pool->lock), flags);
454 bitmap_clear(tbl->it_map, free_entry, npages);
455 spin_unlock_irqrestore(&(pool->lock), flags);
458 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
461 __iommu_free(tbl, dma_addr, npages);
463 /* Make sure TLB cache is flushed if the HW needs it. We do
464 * not do an mb() here on purpose, it is not needed on any of
465 * the current platforms.
467 if (tbl->it_ops->flush)
468 tbl->it_ops->flush(tbl);
471 int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
472 struct scatterlist *sglist, int nelems,
473 unsigned long mask, enum dma_data_direction direction,
476 dma_addr_t dma_next = 0, dma_addr;
477 struct scatterlist *s, *outs, *segstart;
478 int outcount, incount, i, build_fail = 0;
480 unsigned long handle;
481 unsigned int max_seg_size;
483 BUG_ON(direction == DMA_NONE);
485 if ((nelems == 0) || !tbl)
488 outs = s = segstart = &sglist[0];
493 /* Init first segment length for backout at failure */
494 outs->dma_length = 0;
496 DBG("sg mapping %d elements:\n", nelems);
498 max_seg_size = dma_get_max_seg_size(dev);
499 for_each_sg(sglist, s, nelems, i) {
500 unsigned long vaddr, npages, entry, slen;
508 /* Allocate iommu entries for that segment */
509 vaddr = (unsigned long) sg_virt(s);
510 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
512 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
513 (vaddr & ~PAGE_MASK) == 0)
514 align = PAGE_SHIFT - tbl->it_page_shift;
515 entry = iommu_range_alloc(dev, tbl, npages, &handle,
516 mask >> tbl->it_page_shift, align);
518 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
521 if (unlikely(entry == DMA_MAPPING_ERROR)) {
522 if (!(attrs & DMA_ATTR_NO_WARN) &&
524 dev_info(dev, "iommu_alloc failed, tbl %p "
525 "vaddr %lx npages %lu\n", tbl, vaddr,
530 /* Convert entry to a dma_addr_t */
531 entry += tbl->it_offset;
532 dma_addr = entry << tbl->it_page_shift;
533 dma_addr |= (vaddr & ~IOMMU_PAGE_MASK(tbl));
535 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
536 npages, entry, dma_addr);
538 /* Insert into HW table */
539 build_fail = tbl->it_ops->set(tbl, entry, npages,
540 vaddr & IOMMU_PAGE_MASK(tbl),
542 if(unlikely(build_fail))
545 /* If we are in an open segment, try merging */
547 DBG(" - trying merge...\n");
548 /* We cannot merge if:
549 * - allocated dma_addr isn't contiguous to previous allocation
551 if (novmerge || (dma_addr != dma_next) ||
552 (outs->dma_length + s->length > max_seg_size)) {
553 /* Can't merge: create a new segment */
556 outs = sg_next(outs);
557 DBG(" can't merge, new segment.\n");
559 outs->dma_length += s->length;
560 DBG(" merged, new len: %ux\n", outs->dma_length);
565 /* This is a new segment, fill entries */
566 DBG(" - filling new segment.\n");
567 outs->dma_address = dma_addr;
568 outs->dma_length = slen;
571 /* Calculate next page pointer for contiguous check */
572 dma_next = dma_addr + slen;
574 DBG(" - dma next is: %lx\n", dma_next);
577 /* Flush/invalidate TLB caches if necessary */
578 if (tbl->it_ops->flush)
579 tbl->it_ops->flush(tbl);
581 DBG("mapped %d elements:\n", outcount);
583 /* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
584 * next entry of the sglist if we didn't fill the list completely
586 if (outcount < incount) {
587 outs = sg_next(outs);
588 outs->dma_length = 0;
591 /* Make sure updates are seen by hardware */
597 for_each_sg(sglist, s, nelems, i) {
598 if (s->dma_length != 0) {
599 unsigned long vaddr, npages;
601 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
602 npages = iommu_num_pages(s->dma_address, s->dma_length,
603 IOMMU_PAGE_SIZE(tbl));
604 __iommu_free(tbl, vaddr, npages);
614 void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
615 int nelems, enum dma_data_direction direction,
618 struct scatterlist *sg;
620 BUG_ON(direction == DMA_NONE);
628 dma_addr_t dma_handle = sg->dma_address;
630 if (sg->dma_length == 0)
632 npages = iommu_num_pages(dma_handle, sg->dma_length,
633 IOMMU_PAGE_SIZE(tbl));
634 __iommu_free(tbl, dma_handle, npages);
638 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
639 * do not do an mb() here, the affected platforms do not need it
642 if (tbl->it_ops->flush)
643 tbl->it_ops->flush(tbl);
646 static void iommu_table_clear(struct iommu_table *tbl)
649 * In case of firmware assisted dump system goes through clean
650 * reboot process at the time of system crash. Hence it's safe to
651 * clear the TCE entries if firmware assisted dump is active.
653 if (!is_kdump_kernel() || is_fadump_active()) {
654 /* Clear the table in case firmware left allocations in it */
655 tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
659 #ifdef CONFIG_CRASH_DUMP
660 if (tbl->it_ops->get) {
661 unsigned long index, tceval, tcecount = 0;
663 /* Reserve the existing mappings left by the first kernel. */
664 for (index = 0; index < tbl->it_size; index++) {
665 tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
667 * Freed TCE entry contains 0x7fffffffffffffff on JS20
669 if (tceval && (tceval != 0x7fffffffffffffffUL)) {
670 __set_bit(index, tbl->it_map);
675 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
676 printk(KERN_WARNING "TCE table is full; freeing ");
677 printk(KERN_WARNING "%d entries for the kdump boot\n",
678 KDUMP_MIN_TCE_ENTRIES);
679 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
680 index < tbl->it_size; index++)
681 __clear_bit(index, tbl->it_map);
687 static void iommu_table_reserve_pages(struct iommu_table *tbl,
688 unsigned long res_start, unsigned long res_end)
692 WARN_ON_ONCE(res_end < res_start);
694 * Reserve page 0 so it will not be used for any mappings.
695 * This avoids buggy drivers that consider page 0 to be invalid
696 * to crash the machine or even lose data.
698 if (tbl->it_offset == 0)
699 set_bit(0, tbl->it_map);
701 if (res_start < tbl->it_offset)
702 res_start = tbl->it_offset;
704 if (res_end > (tbl->it_offset + tbl->it_size))
705 res_end = tbl->it_offset + tbl->it_size;
707 /* Check if res_start..res_end is a valid range in the table */
708 if (res_start >= res_end) {
709 tbl->it_reserved_start = tbl->it_offset;
710 tbl->it_reserved_end = tbl->it_offset;
714 tbl->it_reserved_start = res_start;
715 tbl->it_reserved_end = res_end;
717 for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
718 set_bit(i - tbl->it_offset, tbl->it_map);
722 * Build a iommu_table structure. This contains a bit map which
723 * is used to manage allocation of the tce space.
725 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
726 unsigned long res_start, unsigned long res_end)
729 static int welcomed = 0;
731 struct iommu_pool *p;
733 BUG_ON(!tbl->it_ops);
735 /* number of bytes needed for the bitmap */
736 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
738 tbl->it_map = vzalloc_node(sz, nid);
740 pr_err("%s: Can't allocate %ld bytes\n", __func__, sz);
744 iommu_table_reserve_pages(tbl, res_start, res_end);
746 /* We only split the IOMMU table if we have 1GB or more of space */
747 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
748 tbl->nr_pools = IOMMU_NR_POOLS;
752 /* We reserve the top 1/4 of the table for large allocations */
753 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
755 for (i = 0; i < tbl->nr_pools; i++) {
757 spin_lock_init(&(p->lock));
758 p->start = tbl->poolsize * i;
760 p->end = p->start + tbl->poolsize;
763 p = &tbl->large_pool;
764 spin_lock_init(&(p->lock));
765 p->start = tbl->poolsize * i;
767 p->end = tbl->it_size;
769 iommu_table_clear(tbl);
772 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
773 novmerge ? "disabled" : "enabled");
777 iommu_debugfs_add(tbl);
782 bool iommu_table_in_use(struct iommu_table *tbl)
784 unsigned long start = 0, end;
786 /* ignore reserved bit0 */
787 if (tbl->it_offset == 0)
790 /* Simple case with no reserved MMIO32 region */
791 if (!tbl->it_reserved_start && !tbl->it_reserved_end)
792 return find_next_bit(tbl->it_map, tbl->it_size, start) != tbl->it_size;
794 end = tbl->it_reserved_start - tbl->it_offset;
795 if (find_next_bit(tbl->it_map, end, start) != end)
798 start = tbl->it_reserved_end - tbl->it_offset;
800 return find_next_bit(tbl->it_map, end, start) != end;
803 static void iommu_table_free(struct kref *kref)
805 struct iommu_table *tbl;
807 tbl = container_of(kref, struct iommu_table, it_kref);
809 if (tbl->it_ops->free)
810 tbl->it_ops->free(tbl);
817 iommu_debugfs_del(tbl);
819 /* verify that table contains no entries */
820 if (iommu_table_in_use(tbl))
821 pr_warn("%s: Unexpected TCEs\n", __func__);
830 struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
832 if (kref_get_unless_zero(&tbl->it_kref))
837 EXPORT_SYMBOL_GPL(iommu_tce_table_get);
839 int iommu_tce_table_put(struct iommu_table *tbl)
844 return kref_put(&tbl->it_kref, iommu_table_free);
846 EXPORT_SYMBOL_GPL(iommu_tce_table_put);
848 /* Creates TCEs for a user provided buffer. The user buffer must be
849 * contiguous real kernel storage (not vmalloc). The address passed here
850 * comprises a page address and offset into that page. The dma_addr_t
851 * returned will point to the same byte within the page as was passed in.
853 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
854 struct page *page, unsigned long offset, size_t size,
855 unsigned long mask, enum dma_data_direction direction,
858 dma_addr_t dma_handle = DMA_MAPPING_ERROR;
861 unsigned int npages, align;
863 BUG_ON(direction == DMA_NONE);
865 vaddr = page_address(page) + offset;
866 uaddr = (unsigned long)vaddr;
869 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
871 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
872 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
873 align = PAGE_SHIFT - tbl->it_page_shift;
875 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
876 mask >> tbl->it_page_shift, align,
878 if (dma_handle == DMA_MAPPING_ERROR) {
879 if (!(attrs & DMA_ATTR_NO_WARN) &&
880 printk_ratelimit()) {
881 dev_info(dev, "iommu_alloc failed, tbl %p "
882 "vaddr %p npages %d\n", tbl, vaddr,
886 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
892 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
893 size_t size, enum dma_data_direction direction,
898 BUG_ON(direction == DMA_NONE);
901 npages = iommu_num_pages(dma_handle, size,
902 IOMMU_PAGE_SIZE(tbl));
903 iommu_free(tbl, dma_handle, npages);
907 /* Allocates a contiguous real buffer and creates mappings over it.
908 * Returns the virtual address of the buffer and sets dma_handle
909 * to the dma address (mapping) of the first page.
911 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
912 size_t size, dma_addr_t *dma_handle,
913 unsigned long mask, gfp_t flag, int node)
918 unsigned int nio_pages, io_order;
920 int tcesize = (1 << tbl->it_page_shift);
922 size = PAGE_ALIGN(size);
923 order = get_order(size);
926 * Client asked for way too much space. This is checked later
927 * anyway. It is easier to debug here for the drivers than in
930 if (order >= IOMAP_MAX_ORDER) {
931 dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
939 /* Alloc enough pages (and possibly more) */
940 page = alloc_pages_node(node, flag, order);
943 ret = page_address(page);
944 memset(ret, 0, size);
946 /* Set up tces to cover the allocated range */
947 nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
949 io_order = get_iommu_order(size, tbl);
950 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
951 mask >> tbl->it_page_shift, io_order, 0);
952 if (mapping == DMA_MAPPING_ERROR) {
953 free_pages((unsigned long)ret, order);
957 *dma_handle = mapping | ((u64)ret & (tcesize - 1));
961 void iommu_free_coherent(struct iommu_table *tbl, size_t size,
962 void *vaddr, dma_addr_t dma_handle)
965 unsigned int nio_pages;
967 size = PAGE_ALIGN(size);
968 nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
969 iommu_free(tbl, dma_handle, nio_pages);
970 size = PAGE_ALIGN(size);
971 free_pages((unsigned long)vaddr, get_order(size));
975 unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
978 case DMA_BIDIRECTIONAL:
979 return TCE_PCI_READ | TCE_PCI_WRITE;
980 case DMA_FROM_DEVICE:
981 return TCE_PCI_WRITE;
988 EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
990 #ifdef CONFIG_IOMMU_API
994 static void group_release(void *iommu_data)
996 struct iommu_table_group *table_group = iommu_data;
998 table_group->group = NULL;
1001 void iommu_register_group(struct iommu_table_group *table_group,
1002 int pci_domain_number, unsigned long pe_num)
1004 struct iommu_group *grp;
1007 grp = iommu_group_alloc();
1009 pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
1013 table_group->group = grp;
1014 iommu_group_set_iommudata(grp, table_group, group_release);
1015 name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
1016 pci_domain_number, pe_num);
1019 iommu_group_set_name(grp, name);
1023 enum dma_data_direction iommu_tce_direction(unsigned long tce)
1025 if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
1026 return DMA_BIDIRECTIONAL;
1027 else if (tce & TCE_PCI_READ)
1028 return DMA_TO_DEVICE;
1029 else if (tce & TCE_PCI_WRITE)
1030 return DMA_FROM_DEVICE;
1034 EXPORT_SYMBOL_GPL(iommu_tce_direction);
1036 void iommu_flush_tce(struct iommu_table *tbl)
1038 /* Flush/invalidate TLB caches if necessary */
1039 if (tbl->it_ops->flush)
1040 tbl->it_ops->flush(tbl);
1042 /* Make sure updates are seen by hardware */
1045 EXPORT_SYMBOL_GPL(iommu_flush_tce);
1047 int iommu_tce_check_ioba(unsigned long page_shift,
1048 unsigned long offset, unsigned long size,
1049 unsigned long ioba, unsigned long npages)
1051 unsigned long mask = (1UL << page_shift) - 1;
1056 ioba >>= page_shift;
1060 if ((ioba + 1) > (offset + size))
1065 EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
1067 int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
1069 unsigned long mask = (1UL << page_shift) - 1;
1076 EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
1078 long iommu_tce_xchg_no_kill(struct mm_struct *mm,
1079 struct iommu_table *tbl,
1080 unsigned long entry, unsigned long *hpa,
1081 enum dma_data_direction *direction)
1084 unsigned long size = 0;
1086 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction);
1087 if (!ret && ((*direction == DMA_FROM_DEVICE) ||
1088 (*direction == DMA_BIDIRECTIONAL)) &&
1089 !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
1091 SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
1095 EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill);
1097 void iommu_tce_kill(struct iommu_table *tbl,
1098 unsigned long entry, unsigned long pages)
1100 if (tbl->it_ops->tce_kill)
1101 tbl->it_ops->tce_kill(tbl, entry, pages);
1103 EXPORT_SYMBOL_GPL(iommu_tce_kill);
1105 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1106 static int iommu_take_ownership(struct iommu_table *tbl)
1108 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1112 * VFIO does not control TCE entries allocation and the guest
1113 * can write new TCEs on top of existing ones so iommu_tce_build()
1114 * must be able to release old pages. This functionality
1115 * requires exchange() callback defined so if it is not
1116 * implemented, we disallow taking ownership over the table.
1118 if (!tbl->it_ops->xchg_no_kill)
1121 spin_lock_irqsave(&tbl->large_pool.lock, flags);
1122 for (i = 0; i < tbl->nr_pools; i++)
1123 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
1125 if (iommu_table_in_use(tbl)) {
1126 pr_err("iommu_tce: it_map is not empty");
1129 memset(tbl->it_map, 0xff, sz);
1132 for (i = 0; i < tbl->nr_pools; i++)
1133 spin_unlock(&tbl->pools[i].lock);
1134 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1139 static void iommu_release_ownership(struct iommu_table *tbl)
1141 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1143 spin_lock_irqsave(&tbl->large_pool.lock, flags);
1144 for (i = 0; i < tbl->nr_pools; i++)
1145 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
1147 memset(tbl->it_map, 0, sz);
1149 iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1150 tbl->it_reserved_end);
1152 for (i = 0; i < tbl->nr_pools; i++)
1153 spin_unlock(&tbl->pools[i].lock);
1154 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1158 int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
1161 * The sysfs entries should be populated before
1162 * binding IOMMU group. If sysfs entries isn't
1163 * ready, we simply bail.
1165 if (!device_is_registered(dev))
1168 if (device_iommu_mapped(dev)) {
1169 pr_debug("%s: Skipping device %s with iommu group %d\n",
1170 __func__, dev_name(dev),
1171 iommu_group_id(dev->iommu_group));
1175 pr_debug("%s: Adding %s to iommu group %d\n",
1176 __func__, dev_name(dev), iommu_group_id(table_group->group));
1178 * This is still not adding devices via the IOMMU bus notifier because
1179 * of pcibios_init() from arch/powerpc/kernel/pci_64.c which calls
1180 * pcibios_scan_phb() first (and this guy adds devices and triggers
1181 * the notifier) and only then it calls pci_bus_add_devices() which
1182 * configures DMA for buses which also creates PEs and IOMMU groups.
1184 return iommu_probe_device(dev);
1186 EXPORT_SYMBOL_GPL(iommu_add_device);
1188 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1190 * A simple iommu_table_group_ops which only allows reusing the existing
1191 * iommu_table. This handles VFIO for POWER7 or the nested KVM.
1192 * The ops does not allow creating windows and only allows reusing the existing
1193 * one if it matches table_group->tce32_start/tce32_size/page_shift.
1195 static unsigned long spapr_tce_get_table_size(__u32 page_shift,
1196 __u64 window_size, __u32 levels)
1202 size = window_size >> (page_shift - 3);
1206 static long spapr_tce_create_table(struct iommu_table_group *table_group, int num,
1207 __u32 page_shift, __u64 window_size, __u32 levels,
1208 struct iommu_table **ptbl)
1210 struct iommu_table *tbl = table_group->tables[0];
1215 if (tbl->it_page_shift != page_shift ||
1216 tbl->it_size != (window_size >> page_shift) ||
1217 tbl->it_indirect_levels != levels - 1)
1220 *ptbl = iommu_tce_table_get(tbl);
1224 static long spapr_tce_set_window(struct iommu_table_group *table_group,
1225 int num, struct iommu_table *tbl)
1227 return tbl == table_group->tables[num] ? 0 : -EPERM;
1230 static long spapr_tce_unset_window(struct iommu_table_group *table_group, int num)
1235 static long spapr_tce_take_ownership(struct iommu_table_group *table_group)
1239 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1240 struct iommu_table *tbl = table_group->tables[i];
1242 if (!tbl || !tbl->it_map)
1245 rc = iommu_take_ownership(tbl);
1249 for (j = 0; j < i; ++j)
1250 iommu_release_ownership(table_group->tables[j]);
1256 static void spapr_tce_release_ownership(struct iommu_table_group *table_group)
1260 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1261 struct iommu_table *tbl = table_group->tables[i];
1266 iommu_table_clear(tbl);
1268 iommu_release_ownership(tbl);
1272 struct iommu_table_group_ops spapr_tce_table_group_ops = {
1273 .get_table_size = spapr_tce_get_table_size,
1274 .create_table = spapr_tce_create_table,
1275 .set_window = spapr_tce_set_window,
1276 .unset_window = spapr_tce_unset_window,
1277 .take_ownership = spapr_tce_take_ownership,
1278 .release_ownership = spapr_tce_release_ownership,
1282 * A simple iommu_ops to allow less cruft in generic VFIO code.
1285 spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
1288 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1289 struct iommu_table_group *table_group;
1290 struct iommu_group *grp;
1292 /* At first attach the ownership is already set */
1296 grp = iommu_group_get(dev);
1297 table_group = iommu_group_get_iommudata(grp);
1299 * The domain being set to PLATFORM from earlier
1300 * BLOCKED. The table_group ownership has to be released.
1302 table_group->ops->release_ownership(table_group);
1303 iommu_group_put(grp);
1308 static const struct iommu_domain_ops spapr_tce_platform_domain_ops = {
1309 .attach_dev = spapr_tce_platform_iommu_attach_dev,
1312 static struct iommu_domain spapr_tce_platform_domain = {
1313 .type = IOMMU_DOMAIN_PLATFORM,
1314 .ops = &spapr_tce_platform_domain_ops,
1318 spapr_tce_blocked_iommu_attach_dev(struct iommu_domain *platform_domain,
1321 struct iommu_group *grp = iommu_group_get(dev);
1322 struct iommu_table_group *table_group;
1326 * FIXME: SPAPR mixes blocked and platform behaviors, the blocked domain
1327 * also sets the dma_api ops
1329 table_group = iommu_group_get_iommudata(grp);
1330 ret = table_group->ops->take_ownership(table_group);
1331 iommu_group_put(grp);
1336 static const struct iommu_domain_ops spapr_tce_blocked_domain_ops = {
1337 .attach_dev = spapr_tce_blocked_iommu_attach_dev,
1340 static struct iommu_domain spapr_tce_blocked_domain = {
1341 .type = IOMMU_DOMAIN_BLOCKED,
1342 .ops = &spapr_tce_blocked_domain_ops,
1345 static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap)
1348 case IOMMU_CAP_CACHE_COHERENCY:
1357 static struct iommu_device *spapr_tce_iommu_probe_device(struct device *dev)
1359 struct pci_dev *pdev;
1360 struct pci_controller *hose;
1362 if (!dev_is_pci(dev))
1363 return ERR_PTR(-ENODEV);
1365 pdev = to_pci_dev(dev);
1366 hose = pdev->bus->sysdata;
1368 return &hose->iommu;
1371 static void spapr_tce_iommu_release_device(struct device *dev)
1375 static struct iommu_group *spapr_tce_iommu_device_group(struct device *dev)
1377 struct pci_controller *hose;
1378 struct pci_dev *pdev;
1380 pdev = to_pci_dev(dev);
1381 hose = pdev->bus->sysdata;
1383 if (!hose->controller_ops.device_group)
1384 return ERR_PTR(-ENOENT);
1386 return hose->controller_ops.device_group(hose, pdev);
1389 static const struct iommu_ops spapr_tce_iommu_ops = {
1390 .default_domain = &spapr_tce_platform_domain,
1391 .blocked_domain = &spapr_tce_blocked_domain,
1392 .capable = spapr_tce_iommu_capable,
1393 .probe_device = spapr_tce_iommu_probe_device,
1394 .release_device = spapr_tce_iommu_release_device,
1395 .device_group = spapr_tce_iommu_device_group,
1398 static struct attribute *spapr_tce_iommu_attrs[] = {
1402 static struct attribute_group spapr_tce_iommu_group = {
1403 .name = "spapr-tce-iommu",
1404 .attrs = spapr_tce_iommu_attrs,
1407 static const struct attribute_group *spapr_tce_iommu_groups[] = {
1408 &spapr_tce_iommu_group,
1412 void ppc_iommu_register_device(struct pci_controller *phb)
1414 iommu_device_sysfs_add(&phb->iommu, phb->parent,
1415 spapr_tce_iommu_groups, "iommu-phb%04x",
1416 phb->global_number);
1417 iommu_device_register(&phb->iommu, &spapr_tce_iommu_ops,
1421 void ppc_iommu_unregister_device(struct pci_controller *phb)
1423 iommu_device_unregister(&phb->iommu);
1424 iommu_device_sysfs_remove(&phb->iommu);
1428 * This registers IOMMU devices of PHBs. This needs to happen
1429 * after core_initcall(iommu_init) + postcore_initcall(pci_driver_init) and
1430 * before subsys_initcall(iommu_subsys_init).
1432 static int __init spapr_tce_setup_phb_iommus_initcall(void)
1434 struct pci_controller *hose;
1436 list_for_each_entry(hose, &hose_list, list_node) {
1437 ppc_iommu_register_device(hose);
1441 postcore_initcall_sync(spapr_tce_setup_phb_iommus_initcall);
1444 #endif /* CONFIG_IOMMU_API */