1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #define pr_fmt(fmt) "[TTM] " fmt
33 #include <linux/sched.h>
34 #include <linux/highmem.h>
35 #include <linux/pagemap.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/file.h>
38 #include <linux/swap.h>
39 #include <linux/slab.h>
40 #include <linux/export.h>
41 #include <drm/drm_cache.h>
42 #include <drm/drm_mem_util.h>
43 #include <drm/ttm/ttm_module.h>
44 #include <drm/ttm/ttm_bo_driver.h>
45 #include <drm/ttm/ttm_placement.h>
46 #include <drm/ttm/ttm_page_alloc.h>
48 #include <asm/set_memory.h>
52 * Allocates storage for pointers to the pages that back the ttm.
54 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
56 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
59 static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
61 ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
62 sizeof(*ttm->ttm.pages) +
63 sizeof(*ttm->dma_address));
64 ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
68 static inline int ttm_tt_set_page_caching(struct page *p,
69 enum ttm_caching_state c_old,
70 enum ttm_caching_state c_new)
79 if (c_old != tt_cached) {
80 /* p isn't in the default caching state, set it to
81 * writeback first to free its current memtype. */
83 ret = set_pages_wb(p, 1);
89 pmap_page_set_memattr((struct vm_page *)p, VM_MEMATTR_WRITE_COMBINING);
90 else if (c_new == tt_uncached)
91 ret = set_pages_uc(p, 1);
95 #else /* CONFIG_X86 */
96 static inline int ttm_tt_set_page_caching(struct page *p,
97 enum ttm_caching_state c_old,
98 enum ttm_caching_state c_new)
102 #endif /* CONFIG_X86 */
105 * Change caching policy for the linear kernel map
106 * for range of pages in a ttm.
109 static int ttm_tt_set_caching(struct ttm_tt *ttm,
110 enum ttm_caching_state c_state)
113 struct page *cur_page;
116 if (ttm->caching_state == c_state)
119 if (ttm->state == tt_unpopulated) {
120 /* Change caching but don't populate */
121 ttm->caching_state = c_state;
125 if (ttm->caching_state == tt_cached)
126 drm_clflush_pages(ttm->pages, ttm->num_pages);
128 for (i = 0; i < ttm->num_pages; ++i) {
129 cur_page = ttm->pages[i];
130 if (likely(cur_page != NULL)) {
131 ret = ttm_tt_set_page_caching(cur_page,
134 if (unlikely(ret != 0))
139 ttm->caching_state = c_state;
144 for (j = 0; j < i; ++j) {
145 cur_page = ttm->pages[j];
146 if (likely(cur_page != NULL)) {
147 (void)ttm_tt_set_page_caching(cur_page, c_state,
155 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
157 enum ttm_caching_state state;
159 if (placement & TTM_PL_FLAG_WC)
161 else if (placement & TTM_PL_FLAG_UNCACHED)
166 return ttm_tt_set_caching(ttm, state);
168 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
170 void ttm_tt_destroy(struct ttm_tt *ttm)
177 if (ttm->state == tt_unbound)
178 ttm_tt_unpopulate(ttm);
180 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
182 vm_object_deallocate(ttm->swap_storage);
184 ttm->swap_storage = NULL;
185 ttm->func->destroy(ttm);
188 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
189 unsigned long size, uint32_t page_flags,
190 struct page *dummy_read_page)
193 ttm->glob = bdev->glob;
194 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
195 ttm->caching_state = tt_cached;
196 ttm->page_flags = page_flags;
197 ttm->dummy_read_page = dummy_read_page;
198 ttm->state = tt_unpopulated;
199 ttm->swap_storage = NULL;
201 ttm_tt_alloc_page_directory(ttm);
204 pr_err("Failed allocating page table\n");
209 EXPORT_SYMBOL(ttm_tt_init);
211 void ttm_tt_fini(struct ttm_tt *ttm)
213 drm_free_large(ttm->pages);
216 EXPORT_SYMBOL(ttm_tt_fini);
218 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
219 unsigned long size, uint32_t page_flags,
220 struct page *dummy_read_page)
222 struct ttm_tt *ttm = &ttm_dma->ttm;
225 ttm->glob = bdev->glob;
226 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
227 ttm->caching_state = tt_cached;
228 ttm->page_flags = page_flags;
229 ttm->dummy_read_page = dummy_read_page;
230 ttm->state = tt_unpopulated;
231 ttm->swap_storage = NULL;
233 INIT_LIST_HEAD(&ttm_dma->pages_list);
234 ttm_dma_tt_alloc_page_directory(ttm_dma);
237 pr_err("Failed allocating page table\n");
242 EXPORT_SYMBOL(ttm_dma_tt_init);
244 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
246 struct ttm_tt *ttm = &ttm_dma->ttm;
248 drm_free_large(ttm->pages);
250 ttm_dma->dma_address = NULL;
252 EXPORT_SYMBOL(ttm_dma_tt_fini);
254 void ttm_tt_unbind(struct ttm_tt *ttm)
258 if (ttm->state == tt_bound) {
259 ret = ttm->func->unbind(ttm);
261 ttm->state = tt_unbound;
265 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
272 if (ttm->state == tt_bound)
275 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
279 ret = ttm->func->bind(ttm, bo_mem);
280 if (unlikely(ret != 0))
283 ttm->state = tt_bound;
287 EXPORT_SYMBOL(ttm_tt_bind);
289 int ttm_tt_swapin(struct ttm_tt *ttm)
291 vm_object_t swap_storage;
292 struct page *from_page;
293 struct page *to_page;
297 swap_storage = ttm->swap_storage;
298 BUG_ON(swap_storage == NULL);
300 VM_OBJECT_LOCK(swap_storage);
301 vm_object_pip_add(swap_storage, 1);
302 for (i = 0; i < ttm->num_pages; ++i) {
303 from_page = (struct page *)vm_page_grab(swap_storage, i, VM_ALLOC_NORMAL |
305 if (((struct vm_page *)from_page)->valid != VM_PAGE_BITS_ALL) {
306 if (vm_pager_has_page(swap_storage, i)) {
307 if (vm_pager_get_page(swap_storage, i,
308 (struct vm_page **)&from_page, 1) != VM_PAGER_OK) {
309 vm_page_free((struct vm_page *)from_page);
314 vm_page_zero_invalid((struct vm_page *)from_page, TRUE);
317 to_page = ttm->pages[i];
318 if (unlikely(to_page == NULL)) {
319 vm_page_wakeup((struct vm_page *)from_page);
323 pmap_copy_page(VM_PAGE_TO_PHYS((struct vm_page *)from_page),
324 VM_PAGE_TO_PHYS((struct vm_page *)to_page));
325 vm_page_wakeup((struct vm_page *)from_page);
327 vm_object_pip_wakeup(swap_storage);
328 VM_OBJECT_UNLOCK(swap_storage);
330 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
331 vm_object_deallocate(swap_storage);
332 ttm->swap_storage = NULL;
333 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
337 vm_object_pip_wakeup(swap_storage);
338 VM_OBJECT_UNLOCK(swap_storage);
343 int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage)
346 vm_page_t from_page, to_page;
349 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
350 BUG_ON(ttm->caching_state != tt_cached);
352 if (!persistent_swap_storage) {
353 obj = swap_pager_alloc(NULL,
354 IDX_TO_OFF(ttm->num_pages), VM_PROT_DEFAULT, 0);
356 pr_err("Failed allocating swap storage\n");
360 obj = persistent_swap_storage;
363 vm_object_pip_add(obj, 1);
364 for (i = 0; i < ttm->num_pages; ++i) {
365 from_page = (struct vm_page *)ttm->pages[i];
366 if (unlikely(from_page == NULL))
368 to_page = vm_page_grab(obj, i, VM_ALLOC_NORMAL |
370 pmap_copy_page(VM_PAGE_TO_PHYS(from_page),
371 VM_PAGE_TO_PHYS(to_page));
372 to_page->valid = VM_PAGE_BITS_ALL;
373 vm_page_dirty(to_page);
374 vm_page_wakeup(to_page);
376 vm_object_pip_wakeup(obj);
377 VM_OBJECT_UNLOCK(obj);
379 ttm_tt_unpopulate(ttm);
380 ttm->swap_storage = obj;
381 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
382 if (persistent_swap_storage)
383 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
388 static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
392 struct page **page = ttm->pages;
394 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
397 for (i = 0; i < ttm->num_pages; ++i) {
398 (*page)->mapping = NULL;
399 (*page++)->index = 0;
404 void ttm_tt_unpopulate(struct ttm_tt *ttm)
406 if (ttm->state == tt_unpopulated)
409 ttm_tt_clear_mapping(ttm);
410 ttm->bdev->driver->ttm_tt_unpopulate(ttm);