2 * Copyright © 2008-2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
30 #include <drm/i915_drm.h>
33 #define KB(x) ((x) * 1024)
34 #define MB(x) (KB(x) * 1024)
37 * The BIOS typically reserves some of the system's memory for the exclusive
38 * use of the integrated graphics. This memory is no longer available for
39 * use by the OS and so the user finds that his system has less memory
40 * available than he put in. We refer to this memory as stolen.
42 * The BIOS will allocate its framebuffer from the stolen memory. Our
43 * goal is try to reuse that object for our own fbcon which must always
44 * be available for panics. Anything else we can reuse the stolen memory
48 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
49 struct drm_mm_node *node, u64 size,
50 unsigned alignment, u64 start, u64 end)
54 if (!drm_mm_initialized(&dev_priv->mm.stolen))
57 mutex_lock(&dev_priv->mm.stolen_lock);
58 ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
60 start, end, DRM_MM_INSERT_BEST);
61 mutex_unlock(&dev_priv->mm.stolen_lock);
66 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
67 struct drm_mm_node *node, u64 size,
70 return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
71 alignment, 0, U64_MAX);
74 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
75 struct drm_mm_node *node)
77 mutex_lock(&dev_priv->mm.stolen_lock);
78 drm_mm_remove_node(node);
79 mutex_unlock(&dev_priv->mm.stolen_lock);
84 struct resource * devm_request_mem_region(struct device *dev,
85 resource_size_t start, resource_size_t n, const char *name)
87 static struct rman stolen_rman;
90 stolen_rman.rm_start = start;
91 stolen_rman.rm_end = start + n;
92 stolen_rman.rm_type = RMAN_ARRAY;
93 stolen_rman.rm_descr = name;
94 if (rman_init(&stolen_rman, -1))
97 if (rman_manage_region(&stolen_rman, stolen_rman.rm_start, stolen_rman.rm_end))
100 res = kmalloc(sizeof(*res), M_DRM, GFP_KERNEL);
103 #endif /* __DragonFly__ */
105 static dma_addr_t i915_stolen_to_dma(struct drm_i915_private *dev_priv)
107 struct pci_dev *pdev = dev_priv->drm.pdev;
108 struct i915_ggtt *ggtt = &dev_priv->ggtt;
112 /* Almost universally we can find the Graphics Base of Stolen Memory
113 * at register BSM (0x5c) in the igfx configuration space. On a few
114 * (desktop) machines this is also mirrored in the bridge device at
115 * different locations, or in the MCHBAR.
117 * On 865 we just check the TOUD register.
119 * On 830/845/85x the stolen memory base isn't available in any
120 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
124 if (INTEL_GEN(dev_priv) >= 3) {
127 pci_read_config_dword(pdev, INTEL_BSM, &bsm);
129 base = bsm & INTEL_BSM_MASK;
130 } else if (IS_I865G(dev_priv)) {
135 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
138 if (tmp & TSEG_ENABLE) {
139 switch (tmp & I845_TSEG_SIZE_MASK) {
140 case I845_TSEG_SIZE_512K:
143 case I845_TSEG_SIZE_1M:
149 pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0),
152 base = (toud << 16) + tseg_size;
153 } else if (IS_I85X(dev_priv)) {
158 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
161 if (tmp & TSEG_ENABLE)
164 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 1),
168 base = tom - tseg_size - ggtt->stolen_size;
169 } else if (IS_I845G(dev_priv)) {
174 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
177 if (tmp & TSEG_ENABLE) {
178 switch (tmp & I845_TSEG_SIZE_MASK) {
179 case I845_TSEG_SIZE_512K:
182 case I845_TSEG_SIZE_1M:
188 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
192 base = tom - tseg_size - ggtt->stolen_size;
193 } else if (IS_I830(dev_priv)) {
198 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
201 if (tmp & TSEG_ENABLE) {
202 if (tmp & I830_TSEG_SIZE_1M)
208 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
212 base = tom - tseg_size - ggtt->stolen_size;
215 if (base == 0 || add_overflows(base, ggtt->stolen_size))
218 /* make sure we don't clobber the GTT if it's within stolen memory */
219 if (INTEL_GEN(dev_priv) <= 4 &&
220 !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
222 dma_addr_t start, end;
224 { .start = base, .end = base + ggtt->stolen_size, },
225 { .start = base, .end = base + ggtt->stolen_size, },
227 u64 ggtt_start, ggtt_end;
229 ggtt_start = I915_READ(PGTBL_CTL);
230 if (IS_GEN4(dev_priv))
231 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
232 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
234 ggtt_start &= PGTBL_ADDRESS_LO_MASK;
235 ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4;
237 if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end)
238 stolen[0].end = ggtt_start;
239 if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end)
240 stolen[1].start = ggtt_end;
242 /* pick the larger of the two chunks */
243 if (stolen[0].end - stolen[0].start >
244 stolen[1].end - stolen[1].start) {
245 base = stolen[0].start;
246 ggtt->stolen_size = stolen[0].end - stolen[0].start;
248 base = stolen[1].start;
249 ggtt->stolen_size = stolen[1].end - stolen[1].start;
252 if (stolen[0].start != stolen[1].start ||
253 stolen[0].end != stolen[1].end) {
254 dma_addr_t end = base + ggtt->stolen_size - 1;
256 DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
257 (unsigned long long)ggtt_start,
258 (unsigned long long)ggtt_end - 1);
259 DRM_DEBUG_KMS("Stolen memory adjusted to %pad - %pad\n",
265 /* Verify that nothing else uses this physical address. Stolen
266 * memory should be reserved by the BIOS and hidden from the
267 * kernel. So if the region is already marked as busy, something
268 * is seriously wrong.
270 r = devm_request_mem_region(dev_priv->drm.dev, base, ggtt->stolen_size,
271 "Graphics Stolen Memory");
274 * One more attempt but this time requesting region from
275 * base + 1, as we have seen that this resolves the region
276 * conflict with the PCI Bus.
277 * This is a BIOS w/a: Some BIOS wrap stolen in the root
278 * PCI bus, but have an off-by-one error. Hence retry the
279 * reservation starting from 1 instead of 0.
280 * There's also BIOS with off-by-one on the other end.
282 r = devm_request_mem_region(dev_priv->drm.dev, base + 1,
283 ggtt->stolen_size - 2,
284 "Graphics Stolen Memory");
286 * GEN3 firmware likes to smash pci bridges into the stolen
287 * range. Apparently this works.
289 if (r == NULL && !IS_GEN3(dev_priv)) {
290 dma_addr_t end = base + ggtt->stolen_size;
292 DRM_ERROR("conflict detected with stolen region: [%pad - %pad]\n",
301 void i915_gem_cleanup_stolen(struct drm_device *dev)
303 struct drm_i915_private *dev_priv = to_i915(dev);
305 if (!drm_mm_initialized(&dev_priv->mm.stolen))
308 drm_mm_takedown(&dev_priv->mm.stolen);
311 static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
312 dma_addr_t *base, u32 *size)
314 struct i915_ggtt *ggtt = &dev_priv->ggtt;
315 uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
316 CTG_STOLEN_RESERVED :
317 ELK_STOLEN_RESERVED);
318 dma_addr_t stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
320 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
322 WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
324 /* On these platforms, the register doesn't have a size field, so the
325 * size is the distance between the base and the top of the stolen
326 * memory. We also have the genuine case where base is zero and there's
327 * nothing reserved. */
331 *size = stolen_top - *base;
334 static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
335 dma_addr_t *base, u32 *size)
337 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
339 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
341 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
342 case GEN6_STOLEN_RESERVED_1M:
345 case GEN6_STOLEN_RESERVED_512K:
348 case GEN6_STOLEN_RESERVED_256K:
351 case GEN6_STOLEN_RESERVED_128K:
356 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
360 static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
361 dma_addr_t *base, u32 *size)
363 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
365 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
367 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
368 case GEN7_STOLEN_RESERVED_1M:
371 case GEN7_STOLEN_RESERVED_256K:
376 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
380 static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
381 dma_addr_t *base, u32 *size)
383 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
385 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
387 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
388 case GEN8_STOLEN_RESERVED_1M:
391 case GEN8_STOLEN_RESERVED_2M:
392 *size = 2 * 1024 * 1024;
394 case GEN8_STOLEN_RESERVED_4M:
395 *size = 4 * 1024 * 1024;
397 case GEN8_STOLEN_RESERVED_8M:
398 *size = 8 * 1024 * 1024;
401 *size = 8 * 1024 * 1024;
402 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
406 static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
407 dma_addr_t *base, u32 *size)
409 struct i915_ggtt *ggtt = &dev_priv->ggtt;
410 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
411 dma_addr_t stolen_top;
413 stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
415 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
417 /* On these platforms, the register doesn't have a size field, so the
418 * size is the distance between the base and the top of the stolen
419 * memory. We also have the genuine case where base is zero and there's
420 * nothing reserved. */
424 *size = stolen_top - *base;
427 int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
429 struct i915_ggtt *ggtt = &dev_priv->ggtt;
430 dma_addr_t reserved_base, stolen_top;
431 u32 reserved_total, reserved_size;
432 u32 stolen_usable_start;
434 lockinit(&dev_priv->mm.stolen_lock, "i915msl", 0, LK_CANRECURSE);
436 if (intel_vgpu_active(dev_priv)) {
437 DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
441 if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
442 DRM_INFO("DMAR active, disabling use of stolen memory\n");
446 if (ggtt->stolen_size == 0)
449 dev_priv->mm.stolen_base = i915_stolen_to_dma(dev_priv);
450 if (dev_priv->mm.stolen_base == 0)
453 stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
457 switch (INTEL_INFO(dev_priv)->gen) {
462 if (IS_G4X(dev_priv))
463 g4x_get_stolen_reserved(dev_priv,
464 &reserved_base, &reserved_size);
467 /* Assume the gen6 maximum for the older platforms. */
468 reserved_size = 1024 * 1024;
469 reserved_base = stolen_top - reserved_size;
472 gen6_get_stolen_reserved(dev_priv,
473 &reserved_base, &reserved_size);
476 gen7_get_stolen_reserved(dev_priv,
477 &reserved_base, &reserved_size);
481 chv_get_stolen_reserved(dev_priv,
482 &reserved_base, &reserved_size);
484 bdw_get_stolen_reserved(dev_priv,
485 &reserved_base, &reserved_size);
489 /* It is possible for the reserved base to be zero, but the register
490 * field for size doesn't have a zero option. */
491 if (reserved_base == 0) {
493 reserved_base = stolen_top;
496 if (reserved_base < dev_priv->mm.stolen_base ||
497 reserved_base + reserved_size > stolen_top) {
498 dma_addr_t reserved_top = reserved_base + reserved_size;
499 DRM_DEBUG_KMS("Stolen reserved area [%pad - %pad] outside stolen memory [%pad - %pad]\n",
500 &reserved_base, &reserved_top,
501 &dev_priv->mm.stolen_base, &stolen_top);
505 ggtt->stolen_reserved_base = reserved_base;
506 ggtt->stolen_reserved_size = reserved_size;
508 /* It is possible for the reserved area to end before the end of stolen
509 * memory, so just consider the start. */
510 reserved_total = stolen_top - reserved_base;
512 DRM_DEBUG_KMS("Memory reserved for graphics device: %uK, usable: %uK\n",
513 ggtt->stolen_size >> 10,
514 (ggtt->stolen_size - reserved_total) >> 10);
516 stolen_usable_start = 0;
517 /* WaSkipStolenMemoryFirstPage:bdw+ */
518 if (INTEL_GEN(dev_priv) >= 8)
519 stolen_usable_start = 4096;
521 ggtt->stolen_usable_size =
522 ggtt->stolen_size - reserved_total - stolen_usable_start;
524 /* Basic memrange allocator for stolen space. */
525 drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start,
526 ggtt->stolen_usable_size);
531 static struct sg_table *
532 i915_pages_create_for_stolen(struct drm_device *dev,
533 u32 offset, u32 size)
535 struct drm_i915_private *dev_priv = to_i915(dev);
537 struct scatterlist *sg;
539 GEM_BUG_ON(range_overflows(offset, size, dev_priv->ggtt.stolen_size));
541 /* We hide that we have no struct page backing our stolen object
542 * by wrapping the contiguous physical allocation with a fake
543 * dma mapping in a single scatterlist.
546 st = kmalloc(sizeof(*st), M_DRM, GFP_KERNEL);
548 return ERR_PTR(-ENOMEM);
550 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
552 return ERR_PTR(-ENOMEM);
559 sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
560 sg_dma_len(sg) = size;
565 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
567 struct sg_table *pages =
568 i915_pages_create_for_stolen(obj->base.dev,
572 return PTR_ERR(pages);
574 __i915_gem_object_set_pages(obj, pages, obj->stolen->size);
579 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
580 struct sg_table *pages)
582 /* Should only be called from i915_gem_object_release_stolen() */
583 sg_free_table(pages);
588 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
590 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
591 struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
595 __i915_gem_object_unpin_pages(obj);
597 i915_gem_stolen_remove_node(dev_priv, stolen);
601 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
602 .get_pages = i915_gem_object_get_pages_stolen,
603 .put_pages = i915_gem_object_put_pages_stolen,
604 .release = i915_gem_object_release_stolen,
607 static struct drm_i915_gem_object *
608 _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
609 struct drm_mm_node *stolen)
611 struct drm_i915_gem_object *obj;
612 unsigned int cache_level;
614 obj = i915_gem_object_alloc(dev_priv);
618 drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
619 i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
621 obj->stolen = stolen;
622 obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
623 cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
624 i915_gem_object_set_cache_coherency(obj, cache_level);
626 if (i915_gem_object_pin_pages(obj))
632 i915_gem_object_free(obj);
636 struct drm_i915_gem_object *
637 i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size)
639 struct drm_i915_gem_object *obj;
640 struct drm_mm_node *stolen;
643 if (!drm_mm_initialized(&dev_priv->mm.stolen))
649 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
653 ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
659 obj = _i915_gem_object_create_stolen(dev_priv, stolen);
663 i915_gem_stolen_remove_node(dev_priv, stolen);
668 struct drm_i915_gem_object *
669 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
674 struct i915_ggtt *ggtt = &dev_priv->ggtt;
675 struct drm_i915_gem_object *obj;
676 struct drm_mm_node *stolen;
677 struct i915_vma *vma;
680 if (!drm_mm_initialized(&dev_priv->mm.stolen))
683 lockdep_assert_held(&dev_priv->drm.struct_mutex);
685 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
686 stolen_offset, gtt_offset, size);
688 /* KISS and expect everything to be page-aligned */
689 if (WARN_ON(size == 0) ||
690 WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
691 WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
694 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
698 stolen->start = stolen_offset;
700 mutex_lock(&dev_priv->mm.stolen_lock);
701 ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
702 mutex_unlock(&dev_priv->mm.stolen_lock);
704 DRM_DEBUG_KMS("failed to allocate stolen space\n");
709 obj = _i915_gem_object_create_stolen(dev_priv, stolen);
711 DRM_DEBUG_KMS("failed to allocate stolen object\n");
712 i915_gem_stolen_remove_node(dev_priv, stolen);
717 /* Some objects just need physical mem from stolen space */
718 if (gtt_offset == I915_GTT_OFFSET_NONE)
721 ret = i915_gem_object_pin_pages(obj);
725 vma = i915_vma_instance(obj, &ggtt->base, NULL);
731 /* To simplify the initialisation sequence between KMS and GTT,
732 * we allow construction of the stolen object prior to
733 * setting up the GTT space. The actual reservation will occur
736 ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
737 size, gtt_offset, obj->cache_level,
740 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
744 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
746 vma->pages = obj->mm.pages;
747 vma->flags |= I915_VMA_GLOBAL_BIND;
748 __i915_vma_set_map_and_fenceable(vma);
749 list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
751 lockmgr(&dev_priv->mm.obj_lock, LK_EXCLUSIVE);
752 list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
754 lockmgr(&dev_priv->mm.obj_lock, LK_RELEASE);
759 i915_gem_object_unpin_pages(obj);
761 i915_gem_object_put(obj);