2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 #include <linux/bitops.h>
30 #include <drm/i915_drm.h>
33 /** @file i915_gem_tiling.c
35 * Support for managing tiling state of buffer objects.
37 * The idea behind tiling is to increase cache hit rates by rearranging
38 * pixel data so that a group of pixel accesses are in the same cacheline.
39 * Performance improvement from doing this on the back/depth buffer are on
42 * Intel architectures make this somewhat more complicated, though, by
43 * adjustments made to addressing of data when the memory is in interleaved
44 * mode (matched pairs of DIMMS) to improve memory bandwidth.
45 * For interleaved memory, the CPU sends every sequential 64 bytes
46 * to an alternate memory channel so it can get the bandwidth from both.
48 * The GPU also rearranges its accesses for increased bandwidth to interleaved
49 * memory, and it matches what the CPU does for non-tiled. However, when tiled
50 * it does it a little differently, since one walks addresses not just in the
51 * X direction but also Y. So, along with alternating channels when bit
52 * 6 of the address flips, it also alternates when other bits flip -- Bits 9
53 * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
54 * are common to both the 915 and 965-class hardware.
56 * The CPU also sometimes XORs in higher bits as well, to improve
57 * bandwidth doing strided access like we do so frequently in graphics. This
58 * is called "Channel XOR Randomization" in the MCH documentation. The result
59 * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
62 * All of this bit 6 XORing has an effect on our memory management,
63 * as we need to make sure that the 3d driver can correctly address object
66 * If we don't have interleaved memory, all tiling is safe and no swizzling is
69 * When bit 17 is XORed in, we simply refuse to tile at all. Bit
70 * 17 is not just a page offset, so as we page an objet out and back in,
71 * individual pages in it will have different bit 17 addresses, resulting in
72 * each 64 bytes being swapped with its neighbor!
74 * Otherwise, if interleaved, we have to tell the 3d driver what the address
75 * swizzling it needs to do is, since it's writing with the CPU to the pages
76 * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
77 * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
78 * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
79 * to match what the GPU expects.
83 * Detects bit 6 swizzling of address lookup between IGD access and CPU
84 * access through main memory.
87 i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
89 struct drm_i915_private *dev_priv = dev->dev_private;
90 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
91 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
93 if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
95 * On BDW+, swizzling is not used. We leave the CPU memory
96 * controller in charge of optimizing memory accesses without
97 * the extra address manipulation GPU side.
99 * VLV and CHV don't have GPU swizzling.
101 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
102 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
103 } else if (INTEL_INFO(dev)->gen >= 6) {
104 if (dev_priv->preserve_bios_swizzle) {
105 if (I915_READ(DISP_ARB_CTL) &
106 DISP_TILE_SURFACE_SWIZZLING) {
107 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
108 swizzle_y = I915_BIT_6_SWIZZLE_9;
110 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
111 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
114 uint32_t dimm_c0, dimm_c1;
115 dimm_c0 = I915_READ(MAD_DIMM_C0);
116 dimm_c1 = I915_READ(MAD_DIMM_C1);
117 dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
118 dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
119 /* Enable swizzling when the channels are populated
120 * with identically sized dimms. We don't need to check
121 * the 3rd channel because no cpu with gpu attached
122 * ships in that configuration. Also, swizzling only
123 * makes sense for 2 channels anyway. */
124 if (dimm_c0 == dimm_c1) {
125 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
126 swizzle_y = I915_BIT_6_SWIZZLE_9;
128 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
129 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
132 } else if (IS_GEN5(dev)) {
133 /* On Ironlake whatever DRAM config, GPU always do
134 * same swizzling setup.
136 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
137 swizzle_y = I915_BIT_6_SWIZZLE_9;
138 } else if (IS_GEN2(dev)) {
139 /* As far as we know, the 865 doesn't have these bit 6
142 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
143 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
144 } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
147 /* On 9xx chipsets, channel interleave by the CPU is
148 * determined by DCC. For single-channel, neither the CPU
149 * nor the GPU do swizzling. For dual channel interleaved,
150 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
151 * 9 for Y tiled. The CPU's interleave is independent, and
152 * can be based on either bit 11 (haven't seen this yet) or
155 dcc = I915_READ(DCC);
156 switch (dcc & DCC_ADDRESSING_MODE_MASK) {
157 case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
158 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
159 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
160 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
162 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
163 if (dcc & DCC_CHANNEL_XOR_DISABLE) {
164 /* This is the base swizzling by the GPU for
167 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
168 swizzle_y = I915_BIT_6_SWIZZLE_9;
169 } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
170 /* Bit 11 swizzling by the CPU in addition. */
171 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
172 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
174 /* Bit 17 swizzling by the CPU in addition. */
175 swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
176 swizzle_y = I915_BIT_6_SWIZZLE_9_17;
181 /* check for L-shaped memory aka modified enhanced addressing */
183 uint32_t ddc2 = I915_READ(DCC2);
185 if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
186 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
189 if (dcc == 0xffffffff) {
190 DRM_ERROR("Couldn't read from MCHBAR. "
191 "Disabling tiling.\n");
192 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
193 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
196 /* The 965, G33, and newer, have a very flexible memory
197 * configuration. It will enable dual-channel mode
198 * (interleaving) on as much memory as it can, and the GPU
199 * will additionally sometimes enable different bit 6
200 * swizzling for tiled objects from the CPU.
202 * Here's what I found on the G965:
203 * slot fill memory size swizzling
204 * 0A 0B 1A 1B 1-ch 2-ch
206 * 512 0 512 0 16 1008 X
207 * 512 0 0 512 16 1008 X
208 * 0 512 0 512 16 1008 X
209 * 1024 1024 1024 0 2048 1024 O
211 * We could probably detect this based on either the DRB
212 * matching, which was the case for the swizzling required in
213 * the table above, or from the 1-ch value being less than
214 * the minimum size of a rank.
216 if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
217 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
218 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
220 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
221 swizzle_y = I915_BIT_6_SWIZZLE_9;
225 dev_priv->mm.bit_6_swizzle_x = swizzle_x;
226 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
229 /* Check pitch constriants for all chips & tiling formats */
231 i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
235 /* Linear is always fine */
236 if (tiling_mode == I915_TILING_NONE)
240 (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
245 /* check maximum stride & object size */
246 /* i965+ stores the end address of the gtt mapping in the fence
247 * reg, so dont bother to check the size */
248 if (INTEL_INFO(dev)->gen >= 7) {
249 if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
251 } else if (INTEL_INFO(dev)->gen >= 4) {
252 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
259 if (size > I830_FENCE_MAX_SIZE_VAL << 20)
262 if (size > I830_FENCE_MAX_SIZE_VAL << 19)
267 if (stride < tile_width)
270 /* 965+ just needs multiples of tile width */
271 if (INTEL_INFO(dev)->gen >= 4) {
272 if (stride & (tile_width - 1))
277 /* Pre-965 needs power of two tile widths */
278 if (stride & (stride - 1))
284 /* Is the current GTT allocation valid for the change in tiling? */
286 i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
290 if (tiling_mode == I915_TILING_NONE)
293 if (INTEL_INFO(obj->base.dev)->gen >= 4)
296 if (INTEL_INFO(obj->base.dev)->gen == 3) {
297 if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
300 if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
304 size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
305 if (i915_gem_obj_ggtt_size(obj) != size)
308 if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
315 * Sets the tiling mode of an object, returning the required swizzling of
316 * bit 6 of addresses in the object.
319 i915_gem_set_tiling(struct drm_device *dev, void *data,
320 struct drm_file *file)
322 struct drm_i915_gem_set_tiling *args = data;
323 struct drm_i915_private *dev_priv = dev->dev_private;
324 struct drm_i915_gem_object *obj;
327 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
328 if (&obj->base == NULL)
331 if (!i915_tiling_ok(dev,
332 args->stride, obj->base.size, args->tiling_mode)) {
333 drm_gem_object_unreference_unlocked(&obj->base);
337 mutex_lock(&dev->struct_mutex);
338 if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
343 if (args->tiling_mode == I915_TILING_NONE) {
344 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
347 if (args->tiling_mode == I915_TILING_X)
348 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
350 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
352 /* Hide bit 17 swizzling from the user. This prevents old Mesa
353 * from aborting the application on sw fallbacks to bit 17,
354 * and we use the pread/pwrite bit17 paths to swizzle for it.
355 * If there was a user that was relying on the swizzle
356 * information for drm_intel_bo_map()ed reads/writes this would
357 * break it, but we don't have any of those.
359 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
360 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
361 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
362 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
364 /* If we can't handle the swizzling, make it untiled. */
365 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
366 args->tiling_mode = I915_TILING_NONE;
367 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
372 if (args->tiling_mode != obj->tiling_mode ||
373 args->stride != obj->stride) {
374 /* We need to rebind the object if its current allocation
375 * no longer meets the alignment restrictions for its new
376 * tiling mode. Otherwise we can just leave it alone, but
377 * need to ensure that any fence register is updated before
378 * the next fenced (either through the GTT or by the BLT unit
379 * on older GPUs) access.
381 * After updating the tiling parameters, we then flag whether
382 * we need to update an associated fence register. Note this
383 * has to also include the unfenced register the GPU uses
384 * whilst executing a fenced command for an untiled object.
386 if (obj->map_and_fenceable &&
387 !i915_gem_object_fence_ok(obj, args->tiling_mode))
388 ret = i915_gem_object_ggtt_unbind(obj);
392 obj->madv == I915_MADV_WILLNEED &&
393 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
394 if (args->tiling_mode == I915_TILING_NONE)
395 i915_gem_object_unpin_pages(obj);
396 if (obj->tiling_mode == I915_TILING_NONE)
397 i915_gem_object_pin_pages(obj);
401 obj->last_fenced_req ||
402 obj->fence_reg != I915_FENCE_REG_NONE;
404 obj->tiling_mode = args->tiling_mode;
405 obj->stride = args->stride;
407 /* Force the fence to be reacquired for GTT access */
408 i915_gem_release_mmap(obj);
411 /* we have to maintain this existing ABI... */
412 args->stride = obj->stride;
413 args->tiling_mode = obj->tiling_mode;
415 /* Try to preallocate memory required to save swizzling on put-pages */
416 if (i915_gem_object_needs_bit17_swizzle(obj)) {
417 if (obj->bit_17 == NULL) {
418 obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
419 sizeof(long), GFP_KERNEL);
427 drm_gem_object_unreference(&obj->base);
428 mutex_unlock(&dev->struct_mutex);
434 * Returns the current tiling mode and required bit 6 swizzling for the object.
437 i915_gem_get_tiling(struct drm_device *dev, void *data,
438 struct drm_file *file)
440 struct drm_i915_gem_get_tiling *args = data;
441 struct drm_i915_private *dev_priv = dev->dev_private;
442 struct drm_i915_gem_object *obj;
444 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
445 if (&obj->base == NULL)
448 mutex_lock(&dev->struct_mutex);
450 args->tiling_mode = obj->tiling_mode;
451 switch (obj->tiling_mode) {
453 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
456 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
458 case I915_TILING_NONE:
459 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
462 DRM_ERROR("unknown tiling mode\n");
465 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
466 args->phys_swizzle_mode = args->swizzle_mode;
467 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
468 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
469 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
470 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
472 drm_gem_object_unreference(&obj->base);
473 mutex_unlock(&dev->struct_mutex);
479 * Swap every 64 bytes of this page around, to account for it having a new
480 * bit 17 of its physical address and therefore being interpreted differently
484 i915_gem_swizzle_page(struct vm_page *page)
492 for (i = 0; i < PAGE_SIZE; i += 128) {
493 memcpy(temp, &vaddr[i], 64);
494 memcpy(&vaddr[i], &vaddr[i + 64], 64);
495 memcpy(&vaddr[i + 64], temp, 64);
502 i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
504 int page_count = obj->base.size >> PAGE_SHIFT;
507 if (obj->bit_17 == NULL)
510 for (i = 0; i < page_count; i++) {
511 struct vm_page *page = obj->pages[i];
512 char new_bit_17 = VM_PAGE_TO_PHYS(obj->pages[i]) >> 17;
513 if ((new_bit_17 & 0x1) !=
514 (test_bit(i, obj->bit_17) != 0)) {
515 i915_gem_swizzle_page(page);
516 set_page_dirty(page);
522 i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
524 int page_count = obj->base.size >> PAGE_SHIFT;
527 if (obj->bit_17 == NULL) {
528 obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
529 sizeof(long), GFP_KERNEL);
530 if (obj->bit_17 == NULL) {
531 DRM_ERROR("Failed to allocate memory for bit 17 "
537 for (i = 0; i < page_count; i++) {
538 if (VM_PAGE_TO_PHYS(obj->pages[i]) & (1 << 17))
539 __set_bit(i, obj->bit_17);
541 __clear_bit(i, obj->bit_17);