2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 #include <linux/bitops.h>
30 #include <drm/i915_drm.h>
33 /** @file i915_gem_tiling.c
35 * Support for managing tiling state of buffer objects.
37 * The idea behind tiling is to increase cache hit rates by rearranging
38 * pixel data so that a group of pixel accesses are in the same cacheline.
39 * Performance improvement from doing this on the back/depth buffer are on
42 * Intel architectures make this somewhat more complicated, though, by
43 * adjustments made to addressing of data when the memory is in interleaved
44 * mode (matched pairs of DIMMS) to improve memory bandwidth.
45 * For interleaved memory, the CPU sends every sequential 64 bytes
46 * to an alternate memory channel so it can get the bandwidth from both.
48 * The GPU also rearranges its accesses for increased bandwidth to interleaved
49 * memory, and it matches what the CPU does for non-tiled. However, when tiled
50 * it does it a little differently, since one walks addresses not just in the
51 * X direction but also Y. So, along with alternating channels when bit
52 * 6 of the address flips, it also alternates when other bits flip -- Bits 9
53 * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
54 * are common to both the 915 and 965-class hardware.
56 * The CPU also sometimes XORs in higher bits as well, to improve
57 * bandwidth doing strided access like we do so frequently in graphics. This
58 * is called "Channel XOR Randomization" in the MCH documentation. The result
59 * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
62 * All of this bit 6 XORing has an effect on our memory management,
63 * as we need to make sure that the 3d driver can correctly address object
66 * If we don't have interleaved memory, all tiling is safe and no swizzling is
69 * When bit 17 is XORed in, we simply refuse to tile at all. Bit
70 * 17 is not just a page offset, so as we page an objet out and back in,
71 * individual pages in it will have different bit 17 addresses, resulting in
72 * each 64 bytes being swapped with its neighbor!
74 * Otherwise, if interleaved, we have to tell the 3d driver what the address
75 * swizzling it needs to do is, since it's writing with the CPU to the pages
76 * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
77 * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
78 * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
79 * to match what the GPU expects.
83 * Detects bit 6 swizzling of address lookup between IGD access and CPU
84 * access through main memory.
87 i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
89 struct drm_i915_private *dev_priv = dev->dev_private;
90 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
91 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
93 if (IS_VALLEYVIEW(dev)) {
94 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
95 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
96 } else if (INTEL_INFO(dev)->gen >= 6) {
97 uint32_t dimm_c0, dimm_c1;
98 dimm_c0 = I915_READ(MAD_DIMM_C0);
99 dimm_c1 = I915_READ(MAD_DIMM_C1);
100 dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
101 dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
102 /* Enable swizzling when the channels are populated with
103 * identically sized dimms. We don't need to check the 3rd
104 * channel because no cpu with gpu attached ships in that
105 * configuration. Also, swizzling only makes sense for 2
106 * channels anyway. */
107 if (dimm_c0 == dimm_c1) {
108 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
109 swizzle_y = I915_BIT_6_SWIZZLE_9;
111 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
112 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
114 } else if (IS_GEN5(dev)) {
115 /* On Ironlake whatever DRAM config, GPU always do
116 * same swizzling setup.
118 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
119 swizzle_y = I915_BIT_6_SWIZZLE_9;
120 } else if (IS_GEN2(dev)) {
121 /* As far as we know, the 865 doesn't have these bit 6
124 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
125 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
126 } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
129 /* On 9xx chipsets, channel interleave by the CPU is
130 * determined by DCC. For single-channel, neither the CPU
131 * nor the GPU do swizzling. For dual channel interleaved,
132 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
133 * 9 for Y tiled. The CPU's interleave is independent, and
134 * can be based on either bit 11 (haven't seen this yet) or
137 dcc = I915_READ(DCC);
138 switch (dcc & DCC_ADDRESSING_MODE_MASK) {
139 case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
140 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
141 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
142 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
144 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
145 if (dcc & DCC_CHANNEL_XOR_DISABLE) {
146 /* This is the base swizzling by the GPU for
149 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
150 swizzle_y = I915_BIT_6_SWIZZLE_9;
151 } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
152 /* Bit 11 swizzling by the CPU in addition. */
153 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
154 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
156 /* Bit 17 swizzling by the CPU in addition. */
157 swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
158 swizzle_y = I915_BIT_6_SWIZZLE_9_17;
162 if (dcc == 0xffffffff) {
163 DRM_ERROR("Couldn't read from MCHBAR. "
164 "Disabling tiling.\n");
165 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
166 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
169 /* The 965, G33, and newer, have a very flexible memory
170 * configuration. It will enable dual-channel mode
171 * (interleaving) on as much memory as it can, and the GPU
172 * will additionally sometimes enable different bit 6
173 * swizzling for tiled objects from the CPU.
175 * Here's what I found on the G965:
176 * slot fill memory size swizzling
177 * 0A 0B 1A 1B 1-ch 2-ch
179 * 512 0 512 0 16 1008 X
180 * 512 0 0 512 16 1008 X
181 * 0 512 0 512 16 1008 X
182 * 1024 1024 1024 0 2048 1024 O
184 * We could probably detect this based on either the DRB
185 * matching, which was the case for the swizzling required in
186 * the table above, or from the 1-ch value being less than
187 * the minimum size of a rank.
189 if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
190 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
191 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
193 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
194 swizzle_y = I915_BIT_6_SWIZZLE_9;
198 dev_priv->mm.bit_6_swizzle_x = swizzle_x;
199 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
202 /* Check pitch constriants for all chips & tiling formats */
204 i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
208 /* Linear is always fine */
209 if (tiling_mode == I915_TILING_NONE)
213 (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
218 /* check maximum stride & object size */
219 /* i965+ stores the end address of the gtt mapping in the fence
220 * reg, so dont bother to check the size */
221 if (INTEL_INFO(dev)->gen >= 7) {
222 if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
224 } else if (INTEL_INFO(dev)->gen >= 4) {
225 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
232 if (size > I830_FENCE_MAX_SIZE_VAL << 20)
235 if (size > I830_FENCE_MAX_SIZE_VAL << 19)
240 if (stride < tile_width)
243 /* 965+ just needs multiples of tile width */
244 if (INTEL_INFO(dev)->gen >= 4) {
245 if (stride & (tile_width - 1))
250 /* Pre-965 needs power of two tile widths */
251 if (stride & (stride - 1))
257 /* Is the current GTT allocation valid for the change in tiling? */
259 i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
263 if (tiling_mode == I915_TILING_NONE)
266 if (INTEL_INFO(obj->base.dev)->gen >= 4)
269 if (INTEL_INFO(obj->base.dev)->gen == 3) {
270 if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
273 if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
277 size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
278 if (i915_gem_obj_ggtt_size(obj) != size)
281 if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
288 * Sets the tiling mode of an object, returning the required swizzling of
289 * bit 6 of addresses in the object.
292 i915_gem_set_tiling(struct drm_device *dev, void *data,
293 struct drm_file *file)
295 struct drm_i915_gem_set_tiling *args = data;
296 struct drm_i915_private *dev_priv = dev->dev_private;
297 struct drm_i915_gem_object *obj;
300 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
301 if (&obj->base == NULL)
304 if (!i915_tiling_ok(dev,
305 args->stride, obj->base.size, args->tiling_mode)) {
306 drm_gem_object_unreference_unlocked(&obj->base);
310 if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
311 drm_gem_object_unreference_unlocked(&obj->base);
315 if (args->tiling_mode == I915_TILING_NONE) {
316 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
319 if (args->tiling_mode == I915_TILING_X)
320 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
322 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
324 /* Hide bit 17 swizzling from the user. This prevents old Mesa
325 * from aborting the application on sw fallbacks to bit 17,
326 * and we use the pread/pwrite bit17 paths to swizzle for it.
327 * If there was a user that was relying on the swizzle
328 * information for drm_intel_bo_map()ed reads/writes this would
329 * break it, but we don't have any of those.
331 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
332 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
333 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
334 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
336 /* If we can't handle the swizzling, make it untiled. */
337 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
338 args->tiling_mode = I915_TILING_NONE;
339 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
344 mutex_lock(&dev->struct_mutex);
345 if (args->tiling_mode != obj->tiling_mode ||
346 args->stride != obj->stride) {
347 /* We need to rebind the object if its current allocation
348 * no longer meets the alignment restrictions for its new
349 * tiling mode. Otherwise we can just leave it alone, but
350 * need to ensure that any fence register is updated before
351 * the next fenced (either through the GTT or by the BLT unit
352 * on older GPUs) access.
354 * After updating the tiling parameters, we then flag whether
355 * we need to update an associated fence register. Note this
356 * has to also include the unfenced register the GPU uses
357 * whilst executing a fenced command for an untiled object.
360 obj->map_and_fenceable =
361 !i915_gem_obj_ggtt_bound(obj) ||
362 (i915_gem_obj_ggtt_offset(obj) +
363 obj->base.size <= dev_priv->gtt.mappable_end &&
364 i915_gem_object_fence_ok(obj, args->tiling_mode));
366 /* Rebind if we need a change of alignment */
367 if (!obj->map_and_fenceable) {
369 i915_gem_get_gtt_alignment(dev, obj->base.size,
372 if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1))
373 ret = i915_gem_object_ggtt_unbind(obj);
378 obj->fenced_gpu_access ||
379 obj->fence_reg != I915_FENCE_REG_NONE;
381 obj->tiling_mode = args->tiling_mode;
382 obj->stride = args->stride;
384 /* Force the fence to be reacquired for GTT access */
385 i915_gem_release_mmap(obj);
388 /* we have to maintain this existing ABI... */
389 args->stride = obj->stride;
390 args->tiling_mode = obj->tiling_mode;
392 /* Try to preallocate memory required to save swizzling on put-pages */
393 if (i915_gem_object_needs_bit17_swizzle(obj)) {
394 if (obj->bit_17 == NULL) {
395 obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
396 sizeof(long), GFP_KERNEL);
403 drm_gem_object_unreference(&obj->base);
404 mutex_unlock(&dev->struct_mutex);
410 * Returns the current tiling mode and required bit 6 swizzling for the object.
413 i915_gem_get_tiling(struct drm_device *dev, void *data,
414 struct drm_file *file)
416 struct drm_i915_gem_get_tiling *args = data;
417 struct drm_i915_private *dev_priv = dev->dev_private;
418 struct drm_i915_gem_object *obj;
420 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
421 if (&obj->base == NULL)
424 mutex_lock(&dev->struct_mutex);
426 args->tiling_mode = obj->tiling_mode;
427 switch (obj->tiling_mode) {
429 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
432 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
434 case I915_TILING_NONE:
435 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
438 DRM_ERROR("unknown tiling mode\n");
441 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
442 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
443 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
444 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
445 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
447 drm_gem_object_unreference(&obj->base);
448 mutex_unlock(&dev->struct_mutex);
454 * Swap every 64 bytes of this page around, to account for it having a new
455 * bit 17 of its physical address and therefore being interpreted differently
459 i915_gem_swizzle_page(struct vm_page *page)
467 for (i = 0; i < PAGE_SIZE; i += 128) {
468 memcpy(temp, &vaddr[i], 64);
469 memcpy(&vaddr[i], &vaddr[i + 64], 64);
470 memcpy(&vaddr[i + 64], temp, 64);
477 i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
479 int page_count = obj->base.size >> PAGE_SHIFT;
482 if (obj->bit_17 == NULL)
485 for (i = 0; i < page_count; i++) {
486 struct vm_page *page = obj->pages[i];
487 char new_bit_17 = VM_PAGE_TO_PHYS(obj->pages[i]) >> 17;
488 if ((new_bit_17 & 0x1) !=
489 (test_bit(i, obj->bit_17) != 0)) {
490 i915_gem_swizzle_page(page);
491 set_page_dirty(page);
497 i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
499 int page_count = obj->base.size >> PAGE_SHIFT;
502 if (obj->bit_17 == NULL) {
503 obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
504 sizeof(long), GFP_KERNEL);
505 if (obj->bit_17 == NULL) {
506 DRM_ERROR("Failed to allocate memory for bit 17 "
512 for (i = 0; i < page_count; i++) {
513 if (VM_PAGE_TO_PHYS(obj->pages[i]) & (1 << 17))
514 __set_bit(i, obj->bit_17);
516 __clear_bit(i, obj->bit_17);