2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
6 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
7 * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
9 * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
31 #include <linux/memblock.h>
32 #include <linux/spinlock.h>
33 #include <linux/sched.h> /* for show_stack */
34 #include <linux/string.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/crash_dump.h>
38 #include <linux/memory.h>
43 #include <asm/iommu.h>
44 #include <asm/pci-bridge.h>
45 #include <asm/machdep.h>
46 #include <asm/firmware.h>
48 #include <asm/ppc-pci.h>
50 #include <asm/mmzone.h>
51 #include <asm/plpar_wrappers.h>
54 static void tce_invalidate_pSeries_sw(struct iommu_table *tbl,
55 __be64 *startp, __be64 *endp)
57 u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
58 unsigned long start, end, inc;
62 inc = L1_CACHE_BYTES; /* invalidate a cacheline of TCEs at a time */
64 /* If this is non-zero, change the format. We shift the
65 * address and or in the magic from the device tree. */
70 start |= tbl->it_busno;
74 end |= inc - 1; /* round up end to be different than start */
76 mb(); /* Make sure TCEs in memory are written */
77 while (start <= end) {
78 out_be64(invalidate, start);
83 static int tce_build_pSeries(struct iommu_table *tbl, long index,
84 long npages, unsigned long uaddr,
85 enum dma_data_direction direction,
86 struct dma_attrs *attrs)
92 proto_tce = TCE_PCI_READ; // Read allowed
94 if (direction != DMA_TO_DEVICE)
95 proto_tce |= TCE_PCI_WRITE;
97 tces = tcep = ((__be64 *)tbl->it_base) + index;
100 /* can't move this out since we might cross MEMBLOCK boundary */
101 rpn = __pa(uaddr) >> TCE_SHIFT;
102 *tcep = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
104 uaddr += TCE_PAGE_SIZE;
108 if (tbl->it_type & TCE_PCI_SWINV_CREATE)
109 tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
114 static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
118 tces = tcep = ((__be64 *)tbl->it_base) + index;
123 if (tbl->it_type & TCE_PCI_SWINV_FREE)
124 tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
127 static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
131 tcep = ((__be64 *)tbl->it_base) + index;
133 return be64_to_cpu(*tcep);
136 static void tce_free_pSeriesLP(struct iommu_table*, long, long);
137 static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
139 static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
140 long npages, unsigned long uaddr,
141 enum dma_data_direction direction,
142 struct dma_attrs *attrs)
148 long tcenum_start = tcenum, npages_start = npages;
150 rpn = __pa(uaddr) >> TCE_SHIFT;
151 proto_tce = TCE_PCI_READ;
152 if (direction != DMA_TO_DEVICE)
153 proto_tce |= TCE_PCI_WRITE;
156 tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
157 rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce);
159 if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
161 tce_free_pSeriesLP(tbl, tcenum_start,
162 (npages_start - (npages + 1)));
166 if (rc && printk_ratelimit()) {
167 printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
168 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
169 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
170 printk("\ttce val = 0x%llx\n", tce );
171 show_stack(current, (unsigned long *)__get_SP());
180 static DEFINE_PER_CPU(__be64 *, tce_page);
182 static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
183 long npages, unsigned long uaddr,
184 enum dma_data_direction direction,
185 struct dma_attrs *attrs)
192 long tcenum_start = tcenum, npages_start = npages;
197 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
201 local_irq_save(flags); /* to protect tcep and the page behind it */
203 tcep = __get_cpu_var(tce_page);
205 /* This is safe to do since interrupts are off when we're called
206 * from iommu_alloc{,_sg}()
209 tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
210 /* If allocation fails, fall back to the loop implementation */
212 local_irq_restore(flags);
213 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
216 __get_cpu_var(tce_page) = tcep;
219 rpn = __pa(uaddr) >> TCE_SHIFT;
220 proto_tce = TCE_PCI_READ;
221 if (direction != DMA_TO_DEVICE)
222 proto_tce |= TCE_PCI_WRITE;
224 /* We can map max one pageful of TCEs at a time */
227 * Set up the page with TCE data, looping through and setting
230 limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);
232 for (l = 0; l < limit; l++) {
233 tcep[l] = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
237 rc = plpar_tce_put_indirect((u64)tbl->it_index,
244 } while (npages > 0 && !rc);
246 local_irq_restore(flags);
248 if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
250 tce_freemulti_pSeriesLP(tbl, tcenum_start,
251 (npages_start - (npages + limit)));
255 if (rc && printk_ratelimit()) {
256 printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
257 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
258 printk("\tnpages = 0x%llx\n", (u64)npages);
259 printk("\ttce[0] val = 0x%llx\n", tcep[0]);
260 show_stack(current, (unsigned long *)__get_SP());
265 static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
270 rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0);
272 if (rc && printk_ratelimit()) {
273 printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
274 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
275 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
276 show_stack(current, (unsigned long *)__get_SP());
284 static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
288 rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
290 if (rc && printk_ratelimit()) {
291 printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
292 printk("\trc = %lld\n", rc);
293 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
294 printk("\tnpages = 0x%llx\n", (u64)npages);
295 show_stack(current, (unsigned long *)__get_SP());
299 static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
302 unsigned long tce_ret;
304 rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret);
306 if (rc && printk_ratelimit()) {
307 printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc);
308 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
309 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
310 show_stack(current, (unsigned long *)__get_SP());
316 /* this is compatible with cells for the device tree property */
317 struct dynamic_dma_window_prop {
318 __be32 liobn; /* tce table number */
319 __be64 dma_base; /* address hi,lo */
320 __be32 tce_shift; /* ilog2(tce_page_size) */
321 __be32 window_shift; /* ilog2(tce_window_size) */
324 struct direct_window {
325 struct device_node *device;
326 const struct dynamic_dma_window_prop *prop;
327 struct list_head list;
330 /* Dynamic DMA Window support */
331 struct ddw_query_response {
332 __be32 windows_available;
333 __be32 largest_available_block;
335 __be32 migration_capable;
338 struct ddw_create_response {
344 static LIST_HEAD(direct_window_list);
345 /* prevents races between memory on/offline and window creation */
346 static DEFINE_SPINLOCK(direct_window_list_lock);
347 /* protects initializing window twice for same device */
348 static DEFINE_MUTEX(direct_window_init_mutex);
349 #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
351 static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn,
352 unsigned long num_pfn, const void *arg)
354 const struct dynamic_dma_window_prop *maprange = arg;
356 u64 tce_size, num_tce, dma_offset, next;
360 tce_shift = be32_to_cpu(maprange->tce_shift);
361 tce_size = 1ULL << tce_shift;
362 next = start_pfn << PAGE_SHIFT;
363 num_tce = num_pfn << PAGE_SHIFT;
365 /* round back to the beginning of the tce page size */
366 num_tce += next & (tce_size - 1);
367 next &= ~(tce_size - 1);
369 /* covert to number of tces */
370 num_tce |= tce_size - 1;
371 num_tce >>= tce_shift;
375 * Set up the page with TCE data, looping through and setting
378 limit = min_t(long, num_tce, 512);
379 dma_offset = next + be64_to_cpu(maprange->dma_base);
381 rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn),
384 next += limit * tce_size;
386 } while (num_tce > 0 && !rc);
391 static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
392 unsigned long num_pfn, const void *arg)
394 const struct dynamic_dma_window_prop *maprange = arg;
395 u64 tce_size, num_tce, dma_offset, next, proto_tce, liobn;
401 local_irq_disable(); /* to protect tcep and the page behind it */
402 tcep = __get_cpu_var(tce_page);
405 tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
410 __get_cpu_var(tce_page) = tcep;
413 proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
415 liobn = (u64)be32_to_cpu(maprange->liobn);
416 tce_shift = be32_to_cpu(maprange->tce_shift);
417 tce_size = 1ULL << tce_shift;
418 next = start_pfn << PAGE_SHIFT;
419 num_tce = num_pfn << PAGE_SHIFT;
421 /* round back to the beginning of the tce page size */
422 num_tce += next & (tce_size - 1);
423 next &= ~(tce_size - 1);
425 /* covert to number of tces */
426 num_tce |= tce_size - 1;
427 num_tce >>= tce_shift;
429 /* We can map max one pageful of TCEs at a time */
432 * Set up the page with TCE data, looping through and setting
435 limit = min_t(long, num_tce, 4096/TCE_ENTRY_SIZE);
436 dma_offset = next + be64_to_cpu(maprange->dma_base);
438 for (l = 0; l < limit; l++) {
439 tcep[l] = cpu_to_be64(proto_tce | next);
443 rc = plpar_tce_put_indirect(liobn,
449 } while (num_tce > 0 && !rc);
451 /* error cleanup: caller will clear whole range */
457 static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn,
458 unsigned long num_pfn, void *arg)
460 return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg);
465 static void iommu_table_setparms(struct pci_controller *phb,
466 struct device_node *dn,
467 struct iommu_table *tbl)
469 struct device_node *node;
470 const unsigned long *basep, *sw_inval;
475 basep = of_get_property(node, "linux,tce-base", NULL);
476 sizep = of_get_property(node, "linux,tce-size", NULL);
477 if (basep == NULL || sizep == NULL) {
478 printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %s has "
479 "missing tce entries !\n", dn->full_name);
483 tbl->it_base = (unsigned long)__va(*basep);
485 if (!is_kdump_kernel())
486 memset((void *)tbl->it_base, 0, *sizep);
488 tbl->it_busno = phb->bus->number;
490 /* Units of tce entries */
491 tbl->it_offset = phb->dma_window_base_cur >> IOMMU_PAGE_SHIFT;
493 /* Test if we are going over 2GB of DMA space */
494 if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
495 udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
496 panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
499 phb->dma_window_base_cur += phb->dma_window_size;
501 /* Set the tce table size - measured in entries */
502 tbl->it_size = phb->dma_window_size >> IOMMU_PAGE_SHIFT;
505 tbl->it_blocksize = 16;
506 tbl->it_type = TCE_PCI;
508 sw_inval = of_get_property(node, "linux,tce-sw-invalidate-info", NULL);
511 * This property contains information on how to
512 * invalidate the TCE entry. The first property is
513 * the base MMIO address used to invalidate entries.
514 * The second property tells us the format of the TCE
515 * invalidate (whether it needs to be shifted) and
516 * some magic routing info to add to our invalidate
519 tbl->it_index = (unsigned long) ioremap(sw_inval[0], 8);
520 tbl->it_busno = sw_inval[1]; /* overload this with magic */
521 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
526 * iommu_table_setparms_lpar
528 * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
530 static void iommu_table_setparms_lpar(struct pci_controller *phb,
531 struct device_node *dn,
532 struct iommu_table *tbl,
533 const __be32 *dma_window)
535 unsigned long offset, size;
537 of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
539 tbl->it_busno = phb->bus->number;
541 tbl->it_blocksize = 16;
542 tbl->it_type = TCE_PCI;
543 tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
544 tbl->it_size = size >> IOMMU_PAGE_SHIFT;
547 static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
549 struct device_node *dn;
550 struct iommu_table *tbl;
551 struct device_node *isa_dn, *isa_dn_orig;
552 struct device_node *tmp;
556 dn = pci_bus_to_OF_node(bus);
558 pr_debug("pci_dma_bus_setup_pSeries: setting up bus %s\n", dn->full_name);
561 /* This is not a root bus, any setup will be done for the
562 * device-side of the bridge in iommu_dev_setup_pSeries().
568 /* Check if the ISA bus on the system is under
571 isa_dn = isa_dn_orig = of_find_node_by_type(NULL, "isa");
573 while (isa_dn && isa_dn != dn)
574 isa_dn = isa_dn->parent;
577 of_node_put(isa_dn_orig);
579 /* Count number of direct PCI children of the PHB. */
580 for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling)
583 pr_debug("Children: %d\n", children);
585 /* Calculate amount of DMA window per slot. Each window must be
586 * a power of two (due to pci_alloc_consistent requirements).
588 * Keep 256MB aside for PHBs with ISA.
592 /* No ISA/IDE - just set window size and return */
593 pci->phb->dma_window_size = 0x80000000ul; /* To be divided */
595 while (pci->phb->dma_window_size * children > 0x80000000ul)
596 pci->phb->dma_window_size >>= 1;
597 pr_debug("No ISA/IDE, window size is 0x%llx\n",
598 pci->phb->dma_window_size);
599 pci->phb->dma_window_base_cur = 0;
604 /* If we have ISA, then we probably have an IDE
605 * controller too. Allocate a 128MB table but
606 * skip the first 128MB to avoid stepping on ISA
609 pci->phb->dma_window_size = 0x8000000ul;
610 pci->phb->dma_window_base_cur = 0x8000000ul;
612 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
615 iommu_table_setparms(pci->phb, dn, tbl);
616 pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
617 iommu_register_group(tbl, pci_domain_nr(bus), 0);
619 /* Divide the rest (1.75GB) among the children */
620 pci->phb->dma_window_size = 0x80000000ul;
621 while (pci->phb->dma_window_size * children > 0x70000000ul)
622 pci->phb->dma_window_size >>= 1;
624 pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size);
628 static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
630 struct iommu_table *tbl;
631 struct device_node *dn, *pdn;
633 const __be32 *dma_window = NULL;
635 dn = pci_bus_to_OF_node(bus);
637 pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %s\n",
640 /* Find nearest ibm,dma-window, walking up the device tree */
641 for (pdn = dn; pdn != NULL; pdn = pdn->parent) {
642 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
643 if (dma_window != NULL)
647 if (dma_window == NULL) {
648 pr_debug(" no ibm,dma-window property !\n");
654 pr_debug(" parent is %s, iommu_table: 0x%p\n",
655 pdn->full_name, ppci->iommu_table);
657 if (!ppci->iommu_table) {
658 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
660 iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
661 ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
662 iommu_register_group(tbl, pci_domain_nr(bus), 0);
663 pr_debug(" created table: %p\n", ppci->iommu_table);
668 static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
670 struct device_node *dn;
671 struct iommu_table *tbl;
673 pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev));
675 dn = dev->dev.of_node;
677 /* If we're the direct child of a root bus, then we need to allocate
678 * an iommu table ourselves. The bus setup code should have setup
679 * the window sizes already.
681 if (!dev->bus->self) {
682 struct pci_controller *phb = PCI_DN(dn)->phb;
684 pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
685 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
687 iommu_table_setparms(phb, dn, tbl);
688 PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node);
689 iommu_register_group(tbl, pci_domain_nr(phb->bus), 0);
690 set_iommu_table_base_and_group(&dev->dev,
691 PCI_DN(dn)->iommu_table);
695 /* If this device is further down the bus tree, search upwards until
696 * an already allocated iommu table is found and use that.
699 while (dn && PCI_DN(dn) && PCI_DN(dn)->iommu_table == NULL)
702 if (dn && PCI_DN(dn))
703 set_iommu_table_base_and_group(&dev->dev,
704 PCI_DN(dn)->iommu_table);
706 printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
710 static int __read_mostly disable_ddw;
712 static int __init disable_ddw_setup(char *str)
715 printk(KERN_INFO "ppc iommu: disabling ddw.\n");
720 early_param("disable_ddw", disable_ddw_setup);
722 static inline void __remove_ddw(struct device_node *np, const u32 *ddw_avail, u64 liobn)
726 ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
728 pr_warning("%s: failed to remove DMA window: rtas returned "
729 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
730 np->full_name, ret, ddw_avail[2], liobn);
732 pr_debug("%s: successfully removed DMA window: rtas returned "
733 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
734 np->full_name, ret, ddw_avail[2], liobn);
737 static void remove_ddw(struct device_node *np)
739 struct dynamic_dma_window_prop *dwp;
740 struct property *win64;
741 const u32 *ddw_avail;
745 ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len);
746 win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
750 if (!ddw_avail || len < 3 * sizeof(u32) || win64->length < sizeof(*dwp))
754 liobn = (u64)be32_to_cpu(dwp->liobn);
756 /* clear the whole window, note the arg is in kernel pages */
757 ret = tce_clearrange_multi_pSeriesLP(0,
758 1ULL << (be32_to_cpu(dwp->window_shift) - PAGE_SHIFT), dwp);
760 pr_warning("%s failed to clear tces in window.\n",
763 pr_debug("%s successfully cleared tces in window.\n",
766 __remove_ddw(np, ddw_avail, liobn);
769 ret = of_remove_property(np, win64);
771 pr_warning("%s: failed to remove direct window property: %d\n",
775 static u64 find_existing_ddw(struct device_node *pdn)
777 struct direct_window *window;
778 const struct dynamic_dma_window_prop *direct64;
781 spin_lock(&direct_window_list_lock);
782 /* check if we already created a window and dupe that config if so */
783 list_for_each_entry(window, &direct_window_list, list) {
784 if (window->device == pdn) {
785 direct64 = window->prop;
786 dma_addr = be64_to_cpu(direct64->dma_base);
790 spin_unlock(&direct_window_list_lock);
795 static void __restore_default_window(struct eeh_dev *edev,
796 u32 ddw_restore_token)
803 * Get the config address and phb buid of the PE window.
804 * Rely on eeh to retrieve this for us.
805 * Retrieve them from the pci device, not the node with the
806 * dma-window property
808 cfg_addr = edev->config_addr;
809 if (edev->pe_config_addr)
810 cfg_addr = edev->pe_config_addr;
811 buid = edev->phb->buid;
814 ret = rtas_call(ddw_restore_token, 3, 1, NULL, cfg_addr,
815 BUID_HI(buid), BUID_LO(buid));
816 } while (rtas_busy_delay(ret));
817 pr_info("ibm,reset-pe-dma-windows(%x) %x %x %x returned %d\n",
818 ddw_restore_token, cfg_addr, BUID_HI(buid), BUID_LO(buid), ret);
821 static int find_existing_ddw_windows(void)
823 struct device_node *pdn;
824 const struct dynamic_dma_window_prop *direct64;
825 const u32 *ddw_extensions;
827 if (!firmware_has_feature(FW_FEATURE_LPAR))
830 for_each_node_with_property(pdn, DIRECT64_PROPNAME) {
831 direct64 = of_get_property(pdn, DIRECT64_PROPNAME, NULL);
836 * We need to ensure the IOMMU table is active when we
837 * return from the IOMMU setup so that the common code
838 * can clear the table or find the holes. To that end,
839 * first, remove any existing DDW configuration.
844 * Second, if we are running on a new enough level of
845 * firmware where the restore API is present, use it to
846 * restore the 32-bit window, which was removed in
848 * If the API is not present, then create_ddw couldn't
849 * have removed the 32-bit window in the first place, so
850 * removing the DDW configuration should be sufficient.
852 ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions",
854 if (ddw_extensions && ddw_extensions[0] > 0)
855 __restore_default_window(of_node_to_eeh_dev(pdn),
861 machine_arch_initcall(pseries, find_existing_ddw_windows);
863 static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
864 struct ddw_query_response *query)
866 struct eeh_dev *edev;
872 * Get the config address and phb buid of the PE window.
873 * Rely on eeh to retrieve this for us.
874 * Retrieve them from the pci device, not the node with the
875 * dma-window property
877 edev = pci_dev_to_eeh_dev(dev);
878 cfg_addr = edev->config_addr;
879 if (edev->pe_config_addr)
880 cfg_addr = edev->pe_config_addr;
881 buid = edev->phb->buid;
883 ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
884 cfg_addr, BUID_HI(buid), BUID_LO(buid));
885 dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x"
886 " returned %d\n", ddw_avail[0], cfg_addr, BUID_HI(buid),
891 static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
892 struct ddw_create_response *create, int page_shift,
895 struct eeh_dev *edev;
901 * Get the config address and phb buid of the PE window.
902 * Rely on eeh to retrieve this for us.
903 * Retrieve them from the pci device, not the node with the
904 * dma-window property
906 edev = pci_dev_to_eeh_dev(dev);
907 cfg_addr = edev->config_addr;
908 if (edev->pe_config_addr)
909 cfg_addr = edev->pe_config_addr;
910 buid = edev->phb->buid;
913 /* extra outputs are LIOBN and dma-addr (hi, lo) */
914 ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, cfg_addr,
915 BUID_HI(buid), BUID_LO(buid), page_shift, window_shift);
916 } while (rtas_busy_delay(ret));
918 "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
919 "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail[1],
920 cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift,
921 window_shift, ret, create->liobn, create->addr_hi, create->addr_lo);
926 static void restore_default_window(struct pci_dev *dev,
927 u32 ddw_restore_token)
929 __restore_default_window(pci_dev_to_eeh_dev(dev), ddw_restore_token);
932 struct failed_ddw_pdn {
933 struct device_node *pdn;
934 struct list_head list;
937 static LIST_HEAD(failed_ddw_pdn_list);
940 * If the PE supports dynamic dma windows, and there is space for a table
941 * that can map all pages in a linear offset, then setup such a table,
942 * and record the dma-offset in the struct device.
944 * dev: the pci device we are checking
945 * pdn: the parent pe node with the ibm,dma_window property
946 * Future: also check if we can remap the base window for our base page size
948 * returns the dma offset for use by dma_set_mask
950 static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
953 struct ddw_query_response query;
954 struct ddw_create_response create;
956 u64 dma_addr, max_addr;
957 struct device_node *dn;
958 const u32 *uninitialized_var(ddw_avail);
959 const u32 *uninitialized_var(ddw_extensions);
960 u32 ddw_restore_token = 0;
961 struct direct_window *window;
962 struct property *win64;
963 struct dynamic_dma_window_prop *ddwprop;
964 const void *dma_window = NULL;
965 unsigned long liobn, offset, size;
966 struct failed_ddw_pdn *fpdn;
968 mutex_lock(&direct_window_init_mutex);
970 dma_addr = find_existing_ddw(pdn);
975 * If we already went through this for a previous function of
976 * the same device and failed, we don't want to muck with the
977 * DMA window again, as it will race with in-flight operations
978 * and can lead to EEHs. The above mutex protects access to the
981 list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) {
982 if (!strcmp(fpdn->pdn->full_name, pdn->full_name))
987 * the ibm,ddw-applicable property holds the tokens for:
988 * ibm,query-pe-dma-window
989 * ibm,create-pe-dma-window
990 * ibm,remove-pe-dma-window
991 * for the given node in that order.
992 * the property is actually in the parent, not the PE
994 ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len);
995 if (!ddw_avail || len < 3 * sizeof(u32))
999 * the extensions property is only required to exist in certain
1000 * levels of firmware and later
1001 * the ibm,ddw-extensions property is a list with the first
1002 * element containing the number of extensions and each
1003 * subsequent entry is a value corresponding to that extension
1005 ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions", &len);
1006 if (ddw_extensions) {
1008 * each new defined extension length should be added to
1009 * the top of the switch so the "earlier" entries also
1012 switch (ddw_extensions[0]) {
1013 /* ibm,reset-pe-dma-windows */
1015 ddw_restore_token = ddw_extensions[1];
1021 * Only remove the existing DMA window if we can restore back to
1022 * the default state. Removing the existing window maximizes the
1023 * resources available to firmware for dynamic window creation.
1025 if (ddw_restore_token) {
1026 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
1027 of_parse_dma_window(pdn, dma_window, &liobn, &offset, &size);
1028 __remove_ddw(pdn, ddw_avail, liobn);
1032 * Query if there is a second window of size to map the
1033 * whole partition. Query returns number of windows, largest
1034 * block assigned to PE (partition endpoint), and two bitmasks
1035 * of page sizes: supported and supported for migrate-dma.
1037 dn = pci_device_to_OF_node(dev);
1038 ret = query_ddw(dev, ddw_avail, &query);
1040 goto out_restore_window;
1042 if (query.windows_available == 0) {
1044 * no additional windows are available for this device.
1045 * We might be able to reallocate the existing window,
1046 * trading in for a larger page size.
1048 dev_dbg(&dev->dev, "no free dynamic windows");
1049 goto out_restore_window;
1051 if (be32_to_cpu(query.page_size) & 4) {
1052 page_shift = 24; /* 16MB */
1053 } else if (be32_to_cpu(query.page_size) & 2) {
1054 page_shift = 16; /* 64kB */
1055 } else if (be32_to_cpu(query.page_size) & 1) {
1056 page_shift = 12; /* 4kB */
1058 dev_dbg(&dev->dev, "no supported direct page size in mask %x",
1060 goto out_restore_window;
1062 /* verify the window * number of ptes will map the partition */
1063 /* check largest block * page size > max memory hotplug addr */
1064 max_addr = memory_hotplug_max();
1065 if (be32_to_cpu(query.largest_available_block) < (max_addr >> page_shift)) {
1066 dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u "
1067 "%llu-sized pages\n", max_addr, query.largest_available_block,
1068 1ULL << page_shift);
1069 goto out_restore_window;
1071 len = order_base_2(max_addr);
1072 win64 = kzalloc(sizeof(struct property), GFP_KERNEL);
1075 "couldn't allocate property for 64bit dma window\n");
1076 goto out_restore_window;
1078 win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL);
1079 win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL);
1080 win64->length = sizeof(*ddwprop);
1081 if (!win64->name || !win64->value) {
1083 "couldn't allocate property name and value\n");
1087 ret = create_ddw(dev, ddw_avail, &create, page_shift, len);
1091 ddwprop->liobn = create.liobn;
1092 ddwprop->dma_base = cpu_to_be64(of_read_number(&create.addr_hi, 2));
1093 ddwprop->tce_shift = cpu_to_be32(page_shift);
1094 ddwprop->window_shift = cpu_to_be32(len);
1096 dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %s\n",
1097 create.liobn, dn->full_name);
1099 window = kzalloc(sizeof(*window), GFP_KERNEL);
1101 goto out_clear_window;
1103 ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
1104 win64->value, tce_setrange_multi_pSeriesLP_walk);
1106 dev_info(&dev->dev, "failed to map direct window for %s: %d\n",
1107 dn->full_name, ret);
1108 goto out_free_window;
1111 ret = of_add_property(pdn, win64);
1113 dev_err(&dev->dev, "unable to add dma window property for %s: %d",
1114 pdn->full_name, ret);
1115 goto out_free_window;
1118 window->device = pdn;
1119 window->prop = ddwprop;
1120 spin_lock(&direct_window_list_lock);
1121 list_add(&window->list, &direct_window_list);
1122 spin_unlock(&direct_window_list_lock);
1124 dma_addr = of_read_number(&create.addr_hi, 2);
1135 kfree(win64->value);
1139 if (ddw_restore_token)
1140 restore_default_window(dev, ddw_restore_token);
1142 fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
1146 list_add(&fpdn->list, &failed_ddw_pdn_list);
1149 mutex_unlock(&direct_window_init_mutex);
1153 static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
1155 struct device_node *pdn, *dn;
1156 struct iommu_table *tbl;
1157 const __be32 *dma_window = NULL;
1160 pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
1162 /* dev setup for LPAR is a little tricky, since the device tree might
1163 * contain the dma-window properties per-device and not necessarily
1164 * for the bus. So we need to search upwards in the tree until we
1165 * either hit a dma-window property, OR find a parent with a table
1166 * already allocated.
1168 dn = pci_device_to_OF_node(dev);
1169 pr_debug(" node is %s\n", dn->full_name);
1171 for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
1172 pdn = pdn->parent) {
1173 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
1178 if (!pdn || !PCI_DN(pdn)) {
1179 printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
1180 "no DMA window found for pci dev=%s dn=%s\n",
1181 pci_name(dev), of_node_full_name(dn));
1184 pr_debug(" parent is %s\n", pdn->full_name);
1187 if (!pci->iommu_table) {
1188 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
1190 iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
1191 pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
1192 iommu_register_group(tbl, pci_domain_nr(pci->phb->bus), 0);
1193 pr_debug(" created table: %p\n", pci->iommu_table);
1195 pr_debug(" found DMA window, table: %p\n", pci->iommu_table);
1198 set_iommu_table_base_and_group(&dev->dev, pci->iommu_table);
1201 static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
1203 bool ddw_enabled = false;
1204 struct device_node *pdn, *dn;
1205 struct pci_dev *pdev;
1206 const __be32 *dma_window = NULL;
1212 if (!dev_is_pci(dev))
1215 pdev = to_pci_dev(dev);
1217 /* only attempt to use a new window if 64-bit DMA is requested */
1218 if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) {
1219 dn = pci_device_to_OF_node(pdev);
1220 dev_dbg(dev, "node is %s\n", dn->full_name);
1223 * the device tree might contain the dma-window properties
1224 * per-device and not necessarily for the bus. So we need to
1225 * search upwards in the tree until we either hit a dma-window
1226 * property, OR find a parent with a table already allocated.
1228 for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
1229 pdn = pdn->parent) {
1230 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
1234 if (pdn && PCI_DN(pdn)) {
1235 dma_offset = enable_ddw(pdev, pdn);
1236 if (dma_offset != 0) {
1237 dev_info(dev, "Using 64-bit direct DMA at offset %llx\n", dma_offset);
1238 set_dma_offset(dev, dma_offset);
1239 set_dma_ops(dev, &dma_direct_ops);
1245 /* fall back on iommu ops, restore table pointer with ops */
1246 if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) {
1247 dev_info(dev, "Restoring 32-bit DMA via iommu\n");
1248 set_dma_ops(dev, &dma_iommu_ops);
1249 pci_dma_dev_setup_pSeriesLP(pdev);
1253 if (!dma_supported(dev, dma_mask))
1256 *dev->dma_mask = dma_mask;
1260 static u64 dma_get_required_mask_pSeriesLP(struct device *dev)
1265 if (!disable_ddw && dev_is_pci(dev)) {
1266 struct pci_dev *pdev = to_pci_dev(dev);
1267 struct device_node *dn;
1269 dn = pci_device_to_OF_node(pdev);
1271 /* search upwards for ibm,dma-window */
1272 for (; dn && PCI_DN(dn) && !PCI_DN(dn)->iommu_table;
1274 if (of_get_property(dn, "ibm,dma-window", NULL))
1276 /* if there is a ibm,ddw-applicable property require 64 bits */
1277 if (dn && PCI_DN(dn) &&
1278 of_get_property(dn, "ibm,ddw-applicable", NULL))
1279 return DMA_BIT_MASK(64);
1282 return dma_iommu_ops.get_required_mask(dev);
1285 #else /* CONFIG_PCI */
1286 #define pci_dma_bus_setup_pSeries NULL
1287 #define pci_dma_dev_setup_pSeries NULL
1288 #define pci_dma_bus_setup_pSeriesLP NULL
1289 #define pci_dma_dev_setup_pSeriesLP NULL
1290 #define dma_set_mask_pSeriesLP NULL
1291 #define dma_get_required_mask_pSeriesLP NULL
1292 #endif /* !CONFIG_PCI */
1294 static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
1297 struct direct_window *window;
1298 struct memory_notify *arg = data;
1302 case MEM_GOING_ONLINE:
1303 spin_lock(&direct_window_list_lock);
1304 list_for_each_entry(window, &direct_window_list, list) {
1305 ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
1306 arg->nr_pages, window->prop);
1309 spin_unlock(&direct_window_list_lock);
1311 case MEM_CANCEL_ONLINE:
1313 spin_lock(&direct_window_list_lock);
1314 list_for_each_entry(window, &direct_window_list, list) {
1315 ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
1316 arg->nr_pages, window->prop);
1319 spin_unlock(&direct_window_list_lock);
1324 if (ret && action != MEM_CANCEL_ONLINE)
1330 static struct notifier_block iommu_mem_nb = {
1331 .notifier_call = iommu_mem_notifier,
1334 static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
1336 int err = NOTIFY_OK;
1337 struct device_node *np = node;
1338 struct pci_dn *pci = PCI_DN(np);
1339 struct direct_window *window;
1342 case OF_RECONFIG_DETACH_NODE:
1344 if (pci && pci->iommu_table)
1345 iommu_free_table(pci->iommu_table, np->full_name);
1347 spin_lock(&direct_window_list_lock);
1348 list_for_each_entry(window, &direct_window_list, list) {
1349 if (window->device == np) {
1350 list_del(&window->list);
1355 spin_unlock(&direct_window_list_lock);
1364 static struct notifier_block iommu_reconfig_nb = {
1365 .notifier_call = iommu_reconfig_notifier,
1368 /* These are called very early. */
1369 void iommu_init_early_pSeries(void)
1371 if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL))
1374 if (firmware_has_feature(FW_FEATURE_LPAR)) {
1375 if (firmware_has_feature(FW_FEATURE_MULTITCE)) {
1376 ppc_md.tce_build = tce_buildmulti_pSeriesLP;
1377 ppc_md.tce_free = tce_freemulti_pSeriesLP;
1379 ppc_md.tce_build = tce_build_pSeriesLP;
1380 ppc_md.tce_free = tce_free_pSeriesLP;
1382 ppc_md.tce_get = tce_get_pSeriesLP;
1383 ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
1384 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
1385 ppc_md.dma_set_mask = dma_set_mask_pSeriesLP;
1386 ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP;
1388 ppc_md.tce_build = tce_build_pSeries;
1389 ppc_md.tce_free = tce_free_pSeries;
1390 ppc_md.tce_get = tce_get_pseries;
1391 ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeries;
1392 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeries;
1396 of_reconfig_notifier_register(&iommu_reconfig_nb);
1397 register_memory_notifier(&iommu_mem_nb);
1399 set_pci_dma_ops(&dma_iommu_ops);
1402 static int __init disable_multitce(char *str)
1404 if (strcmp(str, "off") == 0 &&
1405 firmware_has_feature(FW_FEATURE_LPAR) &&
1406 firmware_has_feature(FW_FEATURE_MULTITCE)) {
1407 printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
1408 ppc_md.tce_build = tce_build_pSeriesLP;
1409 ppc_md.tce_free = tce_free_pSeriesLP;
1410 powerpc_firmware_features &= ~FW_FEATURE_MULTITCE;
1415 __setup("multitce=", disable_multitce);