drm: Implement order_base_2(n)
[dragonfly.git] / sys / dev / drm / include / drm / ttm / ttm_bo_driver.h
... / ...
CommitLineData
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30/* $FreeBSD: head/sys/dev/drm2/ttm/ttm_bo_driver.h 247835 2013-03-05 09:49:34Z kib $ */
31#ifndef _TTM_BO_DRIVER_H_
32#define _TTM_BO_DRIVER_H_
33
34#include <drm/ttm/ttm_bo_api.h>
35#include <drm/ttm/ttm_memory.h>
36#include <drm/ttm/ttm_module.h>
37#include <drm/drm_mm.h>
38#include <drm/drm_global.h>
39#include <sys/tree.h>
40#include <linux/workqueue.h>
41
42/* XXX nasty hack, but does the job */
43#undef RB_ROOT
44#define RB_ROOT(head) (head)->rbh_root
45
46struct ttm_backend_func {
47 /**
48 * struct ttm_backend_func member bind
49 *
50 * @ttm: Pointer to a struct ttm_tt.
51 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
52 * memory type and location for binding.
53 *
54 * Bind the backend pages into the aperture in the location
55 * indicated by @bo_mem. This function should be able to handle
56 * differences between aperture and system page sizes.
57 */
58 int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
59
60 /**
61 * struct ttm_backend_func member unbind
62 *
63 * @ttm: Pointer to a struct ttm_tt.
64 *
65 * Unbind previously bound backend pages. This function should be
66 * able to handle differences between aperture and system page sizes.
67 */
68 int (*unbind) (struct ttm_tt *ttm);
69
70 /**
71 * struct ttm_backend_func member destroy
72 *
73 * @ttm: Pointer to a struct ttm_tt.
74 *
75 * Destroy the backend. This will be call back from ttm_tt_destroy so
76 * don't call ttm_tt_destroy from the callback or infinite loop.
77 */
78 void (*destroy) (struct ttm_tt *ttm);
79};
80
81#define TTM_PAGE_FLAG_WRITE (1 << 3)
82#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
83#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
84#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
85#define TTM_PAGE_FLAG_DMA32 (1 << 7)
86#define TTM_PAGE_FLAG_SG (1 << 8)
87
88enum ttm_caching_state {
89 tt_uncached,
90 tt_wc,
91 tt_cached
92};
93
94/**
95 * struct ttm_tt
96 *
97 * @bdev: Pointer to a struct ttm_bo_device.
98 * @func: Pointer to a struct ttm_backend_func that describes
99 * the backend methods.
100 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
101 * pointer.
102 * @pages: Array of pages backing the data.
103 * @num_pages: Number of pages in the page array.
104 * @bdev: Pointer to the current struct ttm_bo_device.
105 * @be: Pointer to the ttm backend.
106 * @swap_storage: Pointer to shmem struct file for swap storage.
107 * @caching_state: The current caching state of the pages.
108 * @state: The current binding state of the pages.
109 *
110 * This is a structure holding the pages, caching- and aperture binding
111 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
112 * memory.
113 */
114
115struct ttm_tt {
116 struct ttm_bo_device *bdev;
117 struct ttm_backend_func *func;
118 struct vm_page *dummy_read_page;
119 struct vm_page **pages;
120 uint32_t page_flags;
121 unsigned long num_pages;
122 struct sg_table *sg; /* for SG objects via dma-buf */
123 struct ttm_bo_global *glob;
124 struct vm_object *swap_storage;
125 enum ttm_caching_state caching_state;
126 enum {
127 tt_bound,
128 tt_unbound,
129 tt_unpopulated,
130 } state;
131};
132
133/**
134 * struct ttm_dma_tt
135 *
136 * @ttm: Base ttm_tt struct.
137 * @dma_address: The DMA (bus) addresses of the pages
138 * @pages_list: used by some page allocation backend
139 *
140 * This is a structure holding the pages, caching- and aperture binding
141 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
142 * memory.
143 */
144struct ttm_dma_tt {
145 struct ttm_tt ttm;
146 dma_addr_t *dma_address;
147 struct list_head pages_list;
148};
149
150#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
151#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
152#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
153
154struct ttm_mem_type_manager;
155
156struct ttm_mem_type_manager_func {
157 /**
158 * struct ttm_mem_type_manager member init
159 *
160 * @man: Pointer to a memory type manager.
161 * @p_size: Implementation dependent, but typically the size of the
162 * range to be managed in pages.
163 *
164 * Called to initialize a private range manager. The function is
165 * expected to initialize the man::priv member.
166 * Returns 0 on success, negative error code on failure.
167 */
168 int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
169
170 /**
171 * struct ttm_mem_type_manager member takedown
172 *
173 * @man: Pointer to a memory type manager.
174 *
175 * Called to undo the setup done in init. All allocated resources
176 * should be freed.
177 */
178 int (*takedown)(struct ttm_mem_type_manager *man);
179
180 /**
181 * struct ttm_mem_type_manager member get_node
182 *
183 * @man: Pointer to a memory type manager.
184 * @bo: Pointer to the buffer object we're allocating space for.
185 * @placement: Placement details.
186 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
187 *
188 * This function should allocate space in the memory type managed
189 * by @man. Placement details if
190 * applicable are given by @placement. If successful,
191 * @mem::mm_node should be set to a non-null value, and
192 * @mem::start should be set to a value identifying the beginning
193 * of the range allocated, and the function should return zero.
194 * If the memory region accommodate the buffer object, @mem::mm_node
195 * should be set to NULL, and the function should return 0.
196 * If a system error occurred, preventing the request to be fulfilled,
197 * the function should return a negative error code.
198 *
199 * Note that @mem::mm_node will only be dereferenced by
200 * struct ttm_mem_type_manager functions and optionally by the driver,
201 * which has knowledge of the underlying type.
202 *
203 * This function may not be called from within atomic context, so
204 * an implementation can and must use either a mutex or a spinlock to
205 * protect any data structures managing the space.
206 */
207 int (*get_node)(struct ttm_mem_type_manager *man,
208 struct ttm_buffer_object *bo,
209 struct ttm_placement *placement,
210 struct ttm_mem_reg *mem);
211
212 /**
213 * struct ttm_mem_type_manager member put_node
214 *
215 * @man: Pointer to a memory type manager.
216 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
217 *
218 * This function frees memory type resources previously allocated
219 * and that are identified by @mem::mm_node and @mem::start. May not
220 * be called from within atomic context.
221 */
222 void (*put_node)(struct ttm_mem_type_manager *man,
223 struct ttm_mem_reg *mem);
224
225 /**
226 * struct ttm_mem_type_manager member debug
227 *
228 * @man: Pointer to a memory type manager.
229 * @prefix: Prefix to be used in printout to identify the caller.
230 *
231 * This function is called to print out the state of the memory
232 * type manager to aid debugging of out-of-memory conditions.
233 * It may not be called from within atomic context.
234 */
235 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
236};
237
238/**
239 * struct ttm_mem_type_manager
240 *
241 * @has_type: The memory type has been initialized.
242 * @use_type: The memory type is enabled.
243 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
244 * managed by this memory type.
245 * @gpu_offset: If used, the GPU offset of the first managed page of
246 * fixed memory or the first managed location in an aperture.
247 * @size: Size of the managed region.
248 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
249 * as defined in ttm_placement_common.h
250 * @default_caching: The default caching policy used for a buffer object
251 * placed in this memory type if the user doesn't provide one.
252 * @func: structure pointer implementing the range manager. See above
253 * @priv: Driver private closure for @func.
254 * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
255 * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
256 * reserved by the TTM vm system.
257 * @io_reserve_lru: Optional lru list for unreserving io mem regions.
258 * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
259 * static information. bdev::driver::io_mem_free is never used.
260 * @lru: The lru list for this memory type.
261 *
262 * This structure is used to identify and manage memory types for a device.
263 * It's set up by the ttm_bo_driver::init_mem_type method.
264 */
265
266
267
268struct ttm_mem_type_manager {
269 struct ttm_bo_device *bdev;
270
271 /*
272 * No protection. Constant from start.
273 */
274
275 bool has_type;
276 bool use_type;
277 uint32_t flags;
278 unsigned long gpu_offset;
279 uint64_t size;
280 uint32_t available_caching;
281 uint32_t default_caching;
282 const struct ttm_mem_type_manager_func *func;
283 void *priv;
284 struct lock io_reserve_mutex;
285 bool use_io_reserve_lru;
286 bool io_reserve_fastpath;
287
288 /*
289 * Protected by @io_reserve_mutex:
290 */
291
292 struct list_head io_reserve_lru;
293
294 /*
295 * Protected by the global->lru_lock.
296 */
297
298 struct list_head lru;
299};
300
301/**
302 * struct ttm_bo_driver
303 *
304 * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
305 * @invalidate_caches: Callback to invalidate read caches when a buffer object
306 * has been evicted.
307 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
308 * structure.
309 * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
310 * @move: Callback for a driver to hook in accelerated functions to
311 * move a buffer.
312 * If set to NULL, a potentially slow memcpy() move is used.
313 * @sync_obj_signaled: See ttm_fence_api.h
314 * @sync_obj_wait: See ttm_fence_api.h
315 * @sync_obj_flush: See ttm_fence_api.h
316 * @sync_obj_unref: See ttm_fence_api.h
317 * @sync_obj_ref: See ttm_fence_api.h
318 */
319
320struct ttm_bo_driver {
321 /**
322 * ttm_tt_create
323 *
324 * @bdev: pointer to a struct ttm_bo_device:
325 * @size: Size of the data needed backing.
326 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
327 * @dummy_read_page: See struct ttm_bo_device.
328 *
329 * Create a struct ttm_tt to back data with system memory pages.
330 * No pages are actually allocated.
331 * Returns:
332 * NULL: Out of memory.
333 */
334 struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
335 unsigned long size,
336 uint32_t page_flags,
337 struct vm_page *dummy_read_page);
338
339 /**
340 * ttm_tt_populate
341 *
342 * @ttm: The struct ttm_tt to contain the backing pages.
343 *
344 * Allocate all backing pages
345 * Returns:
346 * -ENOMEM: Out of memory.
347 */
348 int (*ttm_tt_populate)(struct ttm_tt *ttm);
349
350 /**
351 * ttm_tt_unpopulate
352 *
353 * @ttm: The struct ttm_tt to contain the backing pages.
354 *
355 * Free all backing page
356 */
357 void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
358
359 /**
360 * struct ttm_bo_driver member invalidate_caches
361 *
362 * @bdev: the buffer object device.
363 * @flags: new placement of the rebound buffer object.
364 *
365 * A previosly evicted buffer has been rebound in a
366 * potentially new location. Tell the driver that it might
367 * consider invalidating read (texture) caches on the next command
368 * submission as a consequence.
369 */
370
371 int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
372 int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
373 struct ttm_mem_type_manager *man);
374 /**
375 * struct ttm_bo_driver member evict_flags:
376 *
377 * @bo: the buffer object to be evicted
378 *
379 * Return the bo flags for a buffer which is not mapped to the hardware.
380 * These will be placed in proposed_flags so that when the move is
381 * finished, they'll end up in bo->mem.flags
382 */
383
384 void(*evict_flags) (struct ttm_buffer_object *bo,
385 struct ttm_placement *placement);
386 /**
387 * struct ttm_bo_driver member move:
388 *
389 * @bo: the buffer to move
390 * @evict: whether this motion is evicting the buffer from
391 * the graphics address space
392 * @interruptible: Use interruptible sleeps if possible when sleeping.
393 * @no_wait: whether this should give up and return -EBUSY
394 * if this move would require sleeping
395 * @new_mem: the new memory region receiving the buffer
396 *
397 * Move a buffer between two memory regions.
398 */
399 int (*move) (struct ttm_buffer_object *bo,
400 bool evict, bool interruptible,
401 bool no_wait_gpu,
402 struct ttm_mem_reg *new_mem);
403
404 /**
405 * struct ttm_bo_driver_member verify_access
406 *
407 * @bo: Pointer to a buffer object.
408 * @filp: Pointer to a struct file trying to access the object.
409 * FreeBSD: use devfs_get_cdevpriv etc.
410 *
411 * Called from the map / write / read methods to verify that the
412 * caller is permitted to access the buffer object.
413 * This member may be set to NULL, which will refuse this kind of
414 * access for all buffer objects.
415 * This function should return 0 if access is granted, -EPERM otherwise.
416 */
417 int (*verify_access) (struct ttm_buffer_object *bo);
418
419 /**
420 * In case a driver writer dislikes the TTM fence objects,
421 * the driver writer can replace those with sync objects of
422 * his / her own. If it turns out that no driver writer is
423 * using these. I suggest we remove these hooks and plug in
424 * fences directly. The bo driver needs the following functionality:
425 * See the corresponding functions in the fence object API
426 * documentation.
427 */
428
429 bool (*sync_obj_signaled) (void *sync_obj);
430 int (*sync_obj_wait) (void *sync_obj,
431 bool lazy, bool interruptible);
432 int (*sync_obj_flush) (void *sync_obj);
433 void (*sync_obj_unref) (void **sync_obj);
434 void *(*sync_obj_ref) (void *sync_obj);
435
436 /* hook to notify driver about a driver move so it
437 * can do tiling things */
438 void (*move_notify)(struct ttm_buffer_object *bo,
439 struct ttm_mem_reg *new_mem);
440 /* notify the driver we are taking a fault on this BO
441 * and have reserved it */
442 int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
443
444 /**
445 * notify the driver that we're about to swap out this bo
446 */
447 void (*swap_notify) (struct ttm_buffer_object *bo);
448
449 /**
450 * Driver callback on when mapping io memory (for bo_move_memcpy
451 * for instance). TTM will take care to call io_mem_free whenever
452 * the mapping is not use anymore. io_mem_reserve & io_mem_free
453 * are balanced.
454 */
455 int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
456 void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
457};
458
459/**
460 * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
461 */
462
463struct ttm_bo_global_ref {
464 struct drm_global_reference ref;
465 struct ttm_mem_global *mem_glob;
466};
467
468/**
469 * struct ttm_bo_global - Buffer object driver global data.
470 *
471 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
472 * @dummy_read_page: Pointer to a dummy page used for mapping requests
473 * of unpopulated pages.
474 * @shrink: A shrink callback object used for buffer object swap.
475 * @device_list_mutex: Mutex protecting the device list.
476 * This mutex is held while traversing the device list for pm options.
477 * @lru_lock: Spinlock protecting the bo subsystem lru lists.
478 * @device_list: List of buffer object devices.
479 * @swap_lru: Lru list of buffer objects used for swapping.
480 */
481
482struct ttm_bo_global {
483 u_int kobj_ref;
484
485 /**
486 * Constant after init.
487 */
488
489 struct ttm_mem_global *mem_glob;
490 struct vm_page *dummy_read_page;
491 struct ttm_mem_shrink shrink;
492 struct lock device_list_mutex;
493 struct lock lru_lock;
494
495 /**
496 * Protected by device_list_mutex.
497 */
498 struct list_head device_list;
499
500 /**
501 * Protected by the lru_lock.
502 */
503 struct list_head swap_lru;
504
505 /**
506 * Internal protection.
507 */
508 atomic_t bo_count;
509};
510
511
512#define TTM_NUM_MEM_TYPES 8
513
514#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
515 idling before CPU mapping */
516#define TTM_BO_PRIV_FLAG_MAX 1
517#define TTM_BO_PRIV_FLAG_ACTIVE 2 /* Used for release sequencing */
518/**
519 * struct ttm_bo_device - Buffer object driver device-specific data.
520 *
521 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
522 * @man: An array of mem_type_managers.
523 * @fence_lock: Protects the synchronizing members on *all* bos belonging
524 * to this device.
525 * @addr_space_mm: Range manager for the device address space.
526 * lru_lock: Spinlock that protects the buffer+device lru lists and
527 * ddestroy lists.
528 * @val_seq: Current validation sequence.
529 * @dev_mapping: A pointer to the struct address_space representing the
530 * device address space.
531 * @wq: Work queue structure for the delayed delete workqueue.
532 *
533 */
534
535struct ttm_bo_device {
536
537 /*
538 * Constant after bo device init / atomic.
539 */
540 struct list_head device_list;
541 struct ttm_bo_global *glob;
542 struct ttm_bo_driver *driver;
543 struct lock vm_lock;
544 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
545 struct lock fence_lock;
546 /*
547 * Protected by the vm lock.
548 */
549
550 RB_HEAD(ttm_bo_device_buffer_objects, ttm_buffer_object) addr_space_rb;
551 struct drm_mm addr_space_mm;
552
553 /*
554 * Protected by the global:lru lock.
555 */
556 struct list_head ddestroy;
557 uint32_t val_seq;
558
559 /*
560 * Protected by load / firstopen / lastclose /unload sync.
561 */
562
563 struct address_space *dev_mapping;
564
565 /*
566 * Internal protection.
567 */
568
569 struct delayed_work wq;
570
571 bool need_dma32;
572};
573
574/**
575 * ttm_flag_masked
576 *
577 * @old: Pointer to the result and original value.
578 * @new: New value of bits.
579 * @mask: Mask of bits to change.
580 *
581 * Convenience function to change a number of bits identified by a mask.
582 */
583
584static inline uint32_t
585ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
586{
587 *old ^= (*old ^ new) & mask;
588 return *old;
589}
590
591/**
592 * ttm_tt_init
593 *
594 * @ttm: The struct ttm_tt.
595 * @bdev: pointer to a struct ttm_bo_device:
596 * @size: Size of the data needed backing.
597 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
598 * @dummy_read_page: See struct ttm_bo_device.
599 *
600 * Create a struct ttm_tt to back data with system memory pages.
601 * No pages are actually allocated.
602 * Returns:
603 * NULL: Out of memory.
604 */
605extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
606 unsigned long size, uint32_t page_flags,
607 struct vm_page *dummy_read_page);
608extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
609 unsigned long size, uint32_t page_flags,
610 struct vm_page *dummy_read_page);
611
612/**
613 * ttm_tt_fini
614 *
615 * @ttm: the ttm_tt structure.
616 *
617 * Free memory of ttm_tt structure
618 */
619extern void ttm_tt_fini(struct ttm_tt *ttm);
620extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
621
622/**
623 * ttm_ttm_bind:
624 *
625 * @ttm: The struct ttm_tt containing backing pages.
626 * @bo_mem: The struct ttm_mem_reg identifying the binding location.
627 *
628 * Bind the pages of @ttm to an aperture location identified by @bo_mem
629 */
630extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
631
632/**
633 * ttm_ttm_destroy:
634 *
635 * @ttm: The struct ttm_tt.
636 *
637 * Unbind, unpopulate and destroy common struct ttm_tt.
638 */
639extern void ttm_tt_destroy(struct ttm_tt *ttm);
640
641/**
642 * ttm_ttm_unbind:
643 *
644 * @ttm: The struct ttm_tt.
645 *
646 * Unbind a struct ttm_tt.
647 */
648extern void ttm_tt_unbind(struct ttm_tt *ttm);
649
650/**
651 * ttm_tt_swapin:
652 *
653 * @ttm: The struct ttm_tt.
654 *
655 * Swap in a previously swap out ttm_tt.
656 */
657extern int ttm_tt_swapin(struct ttm_tt *ttm);
658
659/**
660 * ttm_tt_cache_flush:
661 *
662 * @pages: An array of pointers to struct page:s to flush.
663 * @num_pages: Number of pages to flush.
664 *
665 * Flush the data of the indicated pages from the cpu caches.
666 * This is used when changing caching attributes of the pages from
667 * cache-coherent.
668 */
669extern void ttm_tt_cache_flush(struct vm_page *pages[], unsigned long num_pages);
670
671/**
672 * ttm_tt_set_placement_caching:
673 *
674 * @ttm A struct ttm_tt the backing pages of which will change caching policy.
675 * @placement: Flag indicating the desired caching policy.
676 *
677 * This function will change caching policy of any default kernel mappings of
678 * the pages backing @ttm. If changing from cached to uncached or
679 * write-combined,
680 * all CPU caches will first be flushed to make sure the data of the pages
681 * hit RAM. This function may be very costly as it involves global TLB
682 * and cache flushes and potential page splitting / combining.
683 */
684extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
685extern int ttm_tt_swapout(struct ttm_tt *ttm,
686 struct vm_object *persistent_swap_storage);
687
688/*
689 * ttm_bo.c
690 */
691
692/**
693 * ttm_mem_reg_is_pci
694 *
695 * @bdev: Pointer to a struct ttm_bo_device.
696 * @mem: A valid struct ttm_mem_reg.
697 *
698 * Returns true if the memory described by @mem is PCI memory,
699 * false otherwise.
700 */
701extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
702 struct ttm_mem_reg *mem);
703
704/**
705 * ttm_bo_mem_space
706 *
707 * @bo: Pointer to a struct ttm_buffer_object. the data of which
708 * we want to allocate space for.
709 * @proposed_placement: Proposed new placement for the buffer object.
710 * @mem: A struct ttm_mem_reg.
711 * @interruptible: Sleep interruptible when sliping.
712 * @no_wait_gpu: Return immediately if the GPU is busy.
713 *
714 * Allocate memory space for the buffer object pointed to by @bo, using
715 * the placement flags in @mem, potentially evicting other idle buffer objects.
716 * This function may sleep while waiting for space to become available.
717 * Returns:
718 * -EBUSY: No space available (only if no_wait == 1).
719 * -ENOMEM: Could not allocate memory for the buffer object, either due to
720 * fragmentation or concurrent allocators.
721 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
722 */
723extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
724 struct ttm_placement *placement,
725 struct ttm_mem_reg *mem,
726 bool interruptible,
727 bool no_wait_gpu);
728
729extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
730 struct ttm_mem_reg *mem);
731extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
732 struct ttm_mem_reg *mem);
733
734extern void ttm_bo_global_release(struct drm_global_reference *ref);
735extern int ttm_bo_global_init(struct drm_global_reference *ref);
736
737extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
738
739/**
740 * ttm_bo_device_init
741 *
742 * @bdev: A pointer to a struct ttm_bo_device to initialize.
743 * @glob: A pointer to an initialized struct ttm_bo_global.
744 * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
745 * @file_page_offset: Offset into the device address space that is available
746 * for buffer data. This ensures compatibility with other users of the
747 * address space.
748 *
749 * Initializes a struct ttm_bo_device:
750 * Returns:
751 * !0: Failure.
752 */
753extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
754 struct ttm_bo_global *glob,
755 struct ttm_bo_driver *driver,
756 uint64_t file_page_offset, bool need_dma32);
757
758/**
759 * ttm_bo_unmap_virtual
760 *
761 * @bo: tear down the virtual mappings for this BO
762 */
763extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
764
765/**
766 * ttm_bo_unmap_virtual
767 *
768 * @bo: tear down the virtual mappings for this BO
769 *
770 * The caller must take ttm_mem_io_lock before calling this function.
771 */
772extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
773
774extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
775extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
776extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
777 bool interruptible);
778extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
779
780
781/**
782 * ttm_bo_reserve:
783 *
784 * @bo: A pointer to a struct ttm_buffer_object.
785 * @interruptible: Sleep interruptible if waiting.
786 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
787 * @use_sequence: If @bo is already reserved, Only sleep waiting for
788 * it to become unreserved if @sequence < (@bo)->sequence.
789 *
790 * Locks a buffer object for validation. (Or prevents other processes from
791 * locking it for validation) and removes it from lru lists, while taking
792 * a number of measures to prevent deadlocks.
793 *
794 * Deadlocks may occur when two processes try to reserve multiple buffers in
795 * different order, either by will or as a result of a buffer being evicted
796 * to make room for a buffer already reserved. (Buffers are reserved before
797 * they are evicted). The following algorithm prevents such deadlocks from
798 * occurring:
799 * Processes attempting to reserve multiple buffers other than for eviction,
800 * (typically execbuf), should first obtain a unique 32-bit
801 * validation sequence number,
802 * and call this function with @use_sequence == 1 and @sequence == the unique
803 * sequence number. If upon call of this function, the buffer object is already
804 * reserved, the validation sequence is checked against the validation
805 * sequence of the process currently reserving the buffer,
806 * and if the current validation sequence is greater than that of the process
807 * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
808 * waiting for the buffer to become unreserved, after which it retries
809 * reserving.
810 * The caller should, when receiving an -EAGAIN error
811 * release all its buffer reservations, wait for @bo to become unreserved, and
812 * then rerun the validation with the same validation sequence. This procedure
813 * will always guarantee that the process with the lowest validation sequence
814 * will eventually succeed, preventing both deadlocks and starvation.
815 *
816 * Returns:
817 * -EAGAIN: The reservation may cause a deadlock.
818 * Release all buffer reservations, wait for @bo to become unreserved and
819 * try again. (only if use_sequence == 1).
820 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
821 * a signal. Release all buffer reservations and return to user-space.
822 * -EBUSY: The function needed to sleep, but @no_wait was true
823 * -EDEADLK: Bo already reserved using @sequence. This error code will only
824 * be returned if @use_sequence is set to true.
825 */
826extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
827 bool interruptible,
828 bool no_wait, bool use_sequence, uint32_t sequence);
829
830/**
831 * ttm_bo_reserve_slowpath_nolru:
832 * @bo: A pointer to a struct ttm_buffer_object.
833 * @interruptible: Sleep interruptible if waiting.
834 * @sequence: Set (@bo)->sequence to this value after lock
835 *
836 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
837 * from all our other reservations. Because there are no other reservations
838 * held by us, this function cannot deadlock any more.
839 *
840 * Will not remove reserved buffers from the lru lists.
841 * Otherwise identical to ttm_bo_reserve_slowpath.
842 */
843extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
844 bool interruptible,
845 uint32_t sequence);
846
847
848/**
849 * ttm_bo_reserve_slowpath:
850 * @bo: A pointer to a struct ttm_buffer_object.
851 * @interruptible: Sleep interruptible if waiting.
852 * @sequence: Set (@bo)->sequence to this value after lock
853 *
854 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
855 * from all our other reservations. Because there are no other reservations
856 * held by us, this function cannot deadlock any more.
857 */
858extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
859 bool interruptible, uint32_t sequence);
860
861/**
862 * ttm_bo_reserve_nolru:
863 *
864 * @bo: A pointer to a struct ttm_buffer_object.
865 * @interruptible: Sleep interruptible if waiting.
866 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
867 * @use_sequence: If @bo is already reserved, Only sleep waiting for
868 * it to become unreserved if @sequence < (@bo)->sequence.
869 *
870 * Will not remove reserved buffers from the lru lists.
871 * Otherwise identical to ttm_bo_reserve.
872 *
873 * Returns:
874 * -EAGAIN: The reservation may cause a deadlock.
875 * Release all buffer reservations, wait for @bo to become unreserved and
876 * try again. (only if use_sequence == 1).
877 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
878 * a signal. Release all buffer reservations and return to user-space.
879 * -EBUSY: The function needed to sleep, but @no_wait was true
880 * -EDEADLK: Bo already reserved using @sequence. This error code will only
881 * be returned if @use_sequence is set to true.
882 */
883extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
884 bool interruptible,
885 bool no_wait, bool use_sequence,
886 uint32_t sequence);
887
888/**
889 * ttm_bo_unreserve
890 *
891 * @bo: A pointer to a struct ttm_buffer_object.
892 *
893 * Unreserve a previous reservation of @bo.
894 */
895extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
896
897/**
898 * ttm_bo_unreserve_locked
899 *
900 * @bo: A pointer to a struct ttm_buffer_object.
901 *
902 * Unreserve a previous reservation of @bo.
903 * Needs to be called with struct ttm_bo_global::lru_lock held.
904 */
905extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
906
907/*
908 * ttm_bo_util.c
909 */
910
911/**
912 * ttm_bo_move_ttm
913 *
914 * @bo: A pointer to a struct ttm_buffer_object.
915 * @evict: 1: This is an eviction. Don't try to pipeline.
916 * @no_wait_gpu: Return immediately if the GPU is busy.
917 * @new_mem: struct ttm_mem_reg indicating where to move.
918 *
919 * Optimized move function for a buffer object with both old and
920 * new placement backed by a TTM. The function will, if successful,
921 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
922 * and update the (@bo)->mem placement flags. If unsuccessful, the old
923 * data remains untouched, and it's up to the caller to free the
924 * memory space indicated by @new_mem.
925 * Returns:
926 * !0: Failure.
927 */
928
929extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
930 bool evict, bool no_wait_gpu,
931 struct ttm_mem_reg *new_mem);
932
933/**
934 * ttm_bo_move_memcpy
935 *
936 * @bo: A pointer to a struct ttm_buffer_object.
937 * @evict: 1: This is an eviction. Don't try to pipeline.
938 * @no_wait_gpu: Return immediately if the GPU is busy.
939 * @new_mem: struct ttm_mem_reg indicating where to move.
940 *
941 * Fallback move function for a mappable buffer object in mappable memory.
942 * The function will, if successful,
943 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
944 * and update the (@bo)->mem placement flags. If unsuccessful, the old
945 * data remains untouched, and it's up to the caller to free the
946 * memory space indicated by @new_mem.
947 * Returns:
948 * !0: Failure.
949 */
950
951extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
952 bool evict, bool no_wait_gpu,
953 struct ttm_mem_reg *new_mem);
954
955/**
956 * ttm_bo_free_old_node
957 *
958 * @bo: A pointer to a struct ttm_buffer_object.
959 *
960 * Utility function to free an old placement after a successful move.
961 */
962extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
963
964/**
965 * ttm_bo_move_accel_cleanup.
966 *
967 * @bo: A pointer to a struct ttm_buffer_object.
968 * @sync_obj: A sync object that signals when moving is complete.
969 * @evict: This is an evict move. Don't return until the buffer is idle.
970 * @no_wait_gpu: Return immediately if the GPU is busy.
971 * @new_mem: struct ttm_mem_reg indicating where to move.
972 *
973 * Accelerated move function to be called when an accelerated move
974 * has been scheduled. The function will create a new temporary buffer object
975 * representing the old placement, and put the sync object on both buffer
976 * objects. After that the newly created buffer object is unref'd to be
977 * destroyed when the move is complete. This will help pipeline
978 * buffer moves.
979 */
980
981extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
982 void *sync_obj,
983 bool evict, bool no_wait_gpu,
984 struct ttm_mem_reg *new_mem);
985/**
986 * ttm_io_prot
987 *
988 * @c_state: Caching state.
989 * @tmp: Page protection flag for a normal, cached mapping.
990 *
991 * Utility function that returns the pgprot_t that should be used for
992 * setting up a PTE with the caching model indicated by @c_state.
993 */
994extern vm_memattr_t ttm_io_prot(uint32_t caching_flags);
995
996extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
997
998#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
999#define TTM_HAS_AGP
1000#include <linux/agp_backend.h>
1001
1002/**
1003 * ttm_agp_tt_create
1004 *
1005 * @bdev: Pointer to a struct ttm_bo_device.
1006 * @bridge: The agp bridge this device is sitting on.
1007 * @size: Size of the data needed backing.
1008 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
1009 * @dummy_read_page: See struct ttm_bo_device.
1010 *
1011 *
1012 * Create a TTM backend that uses the indicated AGP bridge as an aperture
1013 * for TT memory. This function uses the linux agpgart interface to
1014 * bind and unbind memory backing a ttm_tt.
1015 */
1016extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
1017 struct agp_bridge_data *bridge,
1018 unsigned long size, uint32_t page_flags,
1019 struct vm_page *dummy_read_page);
1020int ttm_agp_tt_populate(struct ttm_tt *ttm);
1021void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
1022#endif
1023
1024
1025int ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
1026 struct ttm_buffer_object *b);
1027RB_PROTOTYPE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
1028 ttm_bo_cmp_rb_tree_items);
1029
1030
1031#endif