Merge branch 'vendor/GCC50'
[dragonfly.git] / sys / dev / drm / i915 / i915_gem_stolen.c
1 /*
2  * Copyright © 2008-2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uk>
26  *
27  */
28
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32
33 /*
34  * The BIOS typically reserves some of the system's memory for the exclusive
35  * use of the integrated graphics. This memory is no longer available for
36  * use by the OS and so the user finds that his system has less memory
37  * available than he put in. We refer to this memory as stolen.
38  *
39  * The BIOS will allocate its framebuffer from the stolen memory. Our
40  * goal is try to reuse that object for our own fbcon which must always
41  * be available for panics. Anything else we can reuse the stolen memory
42  * for is a boon.
43  */
44
45 static unsigned long i915_stolen_to_physical(struct drm_device *dev)
46 {
47         u32 base;
48
49         /* Almost universally we can find the Graphics Base of Stolen Memory
50          * at offset 0x5c in the igfx configuration space. On a few (desktop)
51          * machines this is also mirrored in the bridge device at different
52          * locations, or in the MCHBAR. On gen2, the layout is again slightly
53          * different with the Graphics Segment immediately following Top of
54          * Memory (or Top of Usable DRAM). Note it appears that TOUD is only
55          * reported by 865g, so we just use the top of memory as determined
56          * by the e820 probe.
57          *
58          * XXX However gen2 requires an unavailable symbol.
59          */
60         base = 0;
61         if (INTEL_INFO(dev)->gen >= 3) {
62                 /* Read Graphics Base of Stolen Memory directly */
63                 pci_read_config_dword(dev->pdev, 0x5c, &base);
64                 base &= ~((1<<20) - 1);
65         } else { /* GEN2 */
66 #if 0
67                 /* Stolen is immediately above Top of Memory */
68                 base = max_low_pfn_mapped << PAGE_SHIFT;
69 #endif
70         }
71
72         if (base == 0)
73                 return 0;
74
75         /* Verify that nothing else uses this physical address. Stolen
76          * memory should be reserved by the BIOS and hidden from the
77          * kernel. So if the region is already marked as busy, something
78          * is seriously wrong.
79          */
80 #if 0
81         r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
82                                     "Graphics Stolen Memory");
83         if (r == NULL) {
84                 /*
85                  * One more attempt but this time requesting region from
86                  * base + 1, as we have seen that this resolves the region
87                  * conflict with the PCI Bus.
88                  * This is a BIOS w/a: Some BIOS wrap stolen in the root
89                  * PCI bus, but have an off-by-one error. Hence retry the
90                  * reservation starting from 1 instead of 0.
91                  */
92                 r = devm_request_mem_region(dev->dev, base + 1,
93                                             dev_priv->gtt.stolen_size - 1,
94                                             "Graphics Stolen Memory");
95                 if (r == NULL) {
96                         DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
97                                   base, base + (uint32_t)dev_priv->gtt.stolen_size);
98                         base = 0;
99                 }
100         }
101 #endif
102
103         return base;
104 }
105
106 static int i915_setup_compression(struct drm_device *dev, int size)
107 {
108         struct drm_i915_private *dev_priv = dev->dev_private;
109         struct drm_mm_node *compressed_fb, *compressed_llb = NULL;
110         int ret;
111
112         compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
113         if (!compressed_fb)
114                 goto err_llb;
115
116         /* Try to over-allocate to reduce reallocations and fragmentation */
117         ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
118                                  size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
119         if (ret)
120                 ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
121                                          size >>= 1, 4096,
122                                          DRM_MM_SEARCH_DEFAULT);
123         if (ret)
124                 goto err_llb;
125
126         if (HAS_PCH_SPLIT(dev))
127                 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
128         else if (IS_GM45(dev)) {
129                 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
130         } else {
131                 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
132                 if (!compressed_llb)
133                         goto err_fb;
134
135                 ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
136                                          4096, 4096, DRM_MM_SEARCH_DEFAULT);
137                 if (ret)
138                         goto err_fb;
139
140                 dev_priv->fbc.compressed_llb = compressed_llb;
141
142                 I915_WRITE(FBC_CFB_BASE,
143                            dev_priv->mm.stolen_base + compressed_fb->start);
144                 I915_WRITE(FBC_LL_BASE,
145                            dev_priv->mm.stolen_base + compressed_llb->start);
146         }
147
148         dev_priv->fbc.compressed_fb = compressed_fb;
149         dev_priv->fbc.size = size;
150
151         DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
152                       size);
153
154         return 0;
155
156 err_fb:
157         kfree(compressed_llb);
158         drm_mm_remove_node(compressed_fb);
159 err_llb:
160         kfree(compressed_fb);
161         pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
162         return -ENOSPC;
163 }
164
165 int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
166 {
167         struct drm_i915_private *dev_priv = dev->dev_private;
168
169         if (!drm_mm_initialized(&dev_priv->mm.stolen))
170                 return -ENODEV;
171
172         if (size < dev_priv->fbc.size)
173                 return 0;
174
175         /* Release any current block */
176         i915_gem_stolen_cleanup_compression(dev);
177
178         return i915_setup_compression(dev, size);
179 }
180
181 void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
182 {
183         struct drm_i915_private *dev_priv = dev->dev_private;
184
185         if (dev_priv->fbc.size == 0)
186                 return;
187
188         if (dev_priv->fbc.compressed_fb) {
189                 drm_mm_remove_node(dev_priv->fbc.compressed_fb);
190                 kfree(dev_priv->fbc.compressed_fb);
191         }
192
193         if (dev_priv->fbc.compressed_llb) {
194                 drm_mm_remove_node(dev_priv->fbc.compressed_llb);
195                 kfree(dev_priv->fbc.compressed_llb);
196         }
197
198         dev_priv->fbc.size = 0;
199 }
200
201 void i915_gem_cleanup_stolen(struct drm_device *dev)
202 {
203         struct drm_i915_private *dev_priv = dev->dev_private;
204
205         if (!drm_mm_initialized(&dev_priv->mm.stolen))
206                 return;
207
208         i915_gem_stolen_cleanup_compression(dev);
209         drm_mm_takedown(&dev_priv->mm.stolen);
210 }
211
212 int i915_gem_init_stolen(struct drm_device *dev)
213 {
214         struct drm_i915_private *dev_priv = dev->dev_private;
215         int bios_reserved = 0;
216
217 #ifdef CONFIG_INTEL_IOMMU
218         if (intel_iommu_gfx_mapped) {
219                 DRM_INFO("DMAR active, disabling use of stolen memory\n");
220                 return 0;
221         }
222 #endif
223
224         if (dev_priv->gtt.stolen_size == 0)
225                 return 0;
226
227         dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
228         if (dev_priv->mm.stolen_base == 0)
229                 return 0;
230
231         DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
232                       dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
233
234         if (IS_VALLEYVIEW(dev))
235                 bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
236
237         if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
238                 return 0;
239
240         /* Basic memrange allocator for stolen space */
241         drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
242                     bios_reserved);
243
244         return 0;
245 }
246
247 #if 0
248 static struct sg_table *
249 i915_pages_create_for_stolen(struct drm_device *dev,
250                              u32 offset, u32 size)
251 {
252         struct drm_i915_private *dev_priv = dev->dev_private;
253         struct sg_table *st;
254         struct scatterlist *sg;
255
256         DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
257         BUG_ON(offset > dev_priv->gtt.stolen_size - size);
258
259         /* We hide that we have no struct page backing our stolen object
260          * by wrapping the contiguous physical allocation with a fake
261          * dma mapping in a single scatterlist.
262          */
263
264         st = kmalloc(sizeof(*st), GFP_KERNEL);
265         if (st == NULL)
266                 return NULL;
267
268         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
269                 kfree(st);
270                 return NULL;
271         }
272
273         sg = st->sgl;
274         sg->offset = 0;
275         sg->length = size;
276
277         sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
278         sg_dma_len(sg) = size;
279
280         return st;
281 }
282 #endif
283
284 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
285 {
286         BUG();
287         return -EINVAL;
288 }
289
290 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
291 {
292 #if 0
293         /* Should only be called during free */
294         sg_free_table(obj->pages);
295         kfree(obj->pages);
296 #else
297         BUG();
298 #endif
299 }
300
301 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
302         .get_pages = i915_gem_object_get_pages_stolen,
303         .put_pages = i915_gem_object_put_pages_stolen,
304 };
305
306 static struct drm_i915_gem_object *
307 _i915_gem_object_create_stolen(struct drm_device *dev,
308                                struct drm_mm_node *stolen)
309 {
310         struct drm_i915_gem_object *obj;
311
312 #if 0
313         obj = i915_gem_object_alloc(dev);
314 #else
315         obj = NULL;
316 #endif
317         if (obj == NULL)
318                 return NULL;
319
320         drm_gem_private_object_init(dev, &obj->base, stolen->size);
321         i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
322
323 #if 0
324         obj->pages = i915_pages_create_for_stolen(dev,
325                                                   stolen->start, stolen->size);
326 #else
327         obj->pages = NULL;
328 #endif
329         if (obj->pages == NULL)
330                 goto cleanup;
331
332         obj->has_dma_mapping = true;
333         i915_gem_object_pin_pages(obj);
334         obj->stolen = stolen;
335
336         obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
337         obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
338
339         return obj;
340
341 cleanup:
342         i915_gem_object_free(obj);
343         return NULL;
344 }
345
346 struct drm_i915_gem_object *
347 i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
348 {
349         struct drm_i915_private *dev_priv = dev->dev_private;
350         struct drm_i915_gem_object *obj;
351         struct drm_mm_node *stolen;
352         int ret;
353
354         if (!drm_mm_initialized(&dev_priv->mm.stolen))
355                 return NULL;
356
357         DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
358         if (size == 0)
359                 return NULL;
360
361         stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
362         if (!stolen)
363                 return NULL;
364
365         ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
366                                  4096, DRM_MM_SEARCH_DEFAULT);
367         if (ret) {
368                 kfree(stolen);
369                 return NULL;
370         }
371
372         obj = _i915_gem_object_create_stolen(dev, stolen);
373         if (obj)
374                 return obj;
375
376         drm_mm_remove_node(stolen);
377         kfree(stolen);
378         return NULL;
379 }
380
381 struct drm_i915_gem_object *
382 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
383                                                u32 stolen_offset,
384                                                u32 gtt_offset,
385                                                u32 size)
386 {
387         struct drm_i915_private *dev_priv = dev->dev_private;
388         struct i915_address_space *ggtt = &dev_priv->gtt.base;
389         struct drm_i915_gem_object *obj;
390         struct drm_mm_node *stolen;
391         struct i915_vma *vma;
392         int ret;
393
394         if (!drm_mm_initialized(&dev_priv->mm.stolen))
395                 return NULL;
396
397         DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
398                         stolen_offset, gtt_offset, size);
399
400         /* KISS and expect everything to be page-aligned */
401         BUG_ON(stolen_offset & 4095);
402         BUG_ON(size & 4095);
403
404         if (WARN_ON(size == 0))
405                 return NULL;
406
407         stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
408         if (!stolen)
409                 return NULL;
410
411         stolen->start = stolen_offset;
412         stolen->size = size;
413         ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
414         if (ret) {
415                 DRM_DEBUG_KMS("failed to allocate stolen space\n");
416                 kfree(stolen);
417                 return NULL;
418         }
419
420         obj = _i915_gem_object_create_stolen(dev, stolen);
421         if (obj == NULL) {
422                 DRM_DEBUG_KMS("failed to allocate stolen object\n");
423                 drm_mm_remove_node(stolen);
424                 kfree(stolen);
425                 return NULL;
426         }
427
428         /* Some objects just need physical mem from stolen space */
429         if (gtt_offset == I915_GTT_OFFSET_NONE)
430                 return obj;
431
432         vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
433         if (IS_ERR(vma)) {
434                 ret = PTR_ERR(vma);
435                 goto err_out;
436         }
437
438         /* To simplify the initialisation sequence between KMS and GTT,
439          * we allow construction of the stolen object prior to
440          * setting up the GTT space. The actual reservation will occur
441          * later.
442          */
443         vma->node.start = gtt_offset;
444         vma->node.size = size;
445         if (drm_mm_initialized(&ggtt->mm)) {
446                 ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
447                 if (ret) {
448                         DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
449                         goto err_vma;
450                 }
451         }
452
453         obj->has_global_gtt_mapping = 1;
454
455         list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
456         list_add_tail(&vma->mm_list, &ggtt->inactive_list);
457         i915_gem_object_pin_pages(obj);
458
459         return obj;
460
461 err_vma:
462         i915_gem_vma_destroy(vma);
463 err_out:
464         drm_mm_remove_node(stolen);
465         kfree(stolen);
466         drm_gem_object_unreference(&obj->base);
467         return NULL;
468 }
469
470 void
471 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
472 {
473         if (obj->stolen) {
474                 drm_mm_remove_node(obj->stolen);
475                 kfree(obj->stolen);
476                 obj->stolen = NULL;
477         }
478 }