1 /**************************************************************************
3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 /* $FreeBSD: head/sys/dev/drm2/ttm/ttm_bo_driver.h 247835 2013-03-05 09:49:34Z kib $ */
31 #ifndef _TTM_BO_DRIVER_H_
32 #define _TTM_BO_DRIVER_H_
34 #include <drm/ttm/ttm_bo_api.h>
35 #include <drm/ttm/ttm_memory.h>
36 #include <drm/ttm/ttm_module.h>
37 #include <drm/drm_mm.h>
38 #include <drm/drm_global.h>
40 #include <linux/workqueue.h>
41 #include <linux/reservation.h>
43 /* XXX nasty hack, but does the job */
45 #define RB_ROOT(head) (head)->rbh_root
47 struct ttm_backend_func {
49 * struct ttm_backend_func member bind
51 * @ttm: Pointer to a struct ttm_tt.
52 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
53 * memory type and location for binding.
55 * Bind the backend pages into the aperture in the location
56 * indicated by @bo_mem. This function should be able to handle
57 * differences between aperture and system page sizes.
59 int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
62 * struct ttm_backend_func member unbind
64 * @ttm: Pointer to a struct ttm_tt.
66 * Unbind previously bound backend pages. This function should be
67 * able to handle differences between aperture and system page sizes.
69 int (*unbind) (struct ttm_tt *ttm);
72 * struct ttm_backend_func member destroy
74 * @ttm: Pointer to a struct ttm_tt.
76 * Destroy the backend. This will be call back from ttm_tt_destroy so
77 * don't call ttm_tt_destroy from the callback or infinite loop.
79 void (*destroy) (struct ttm_tt *ttm);
82 #define TTM_PAGE_FLAG_WRITE (1 << 3)
83 #define TTM_PAGE_FLAG_SWAPPED (1 << 4)
84 #define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
85 #define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
86 #define TTM_PAGE_FLAG_DMA32 (1 << 7)
87 #define TTM_PAGE_FLAG_SG (1 << 8)
89 enum ttm_caching_state {
98 * @bdev: Pointer to a struct ttm_bo_device.
99 * @func: Pointer to a struct ttm_backend_func that describes
100 * the backend methods.
101 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
103 * @pages: Array of pages backing the data.
104 * @num_pages: Number of pages in the page array.
105 * @bdev: Pointer to the current struct ttm_bo_device.
106 * @be: Pointer to the ttm backend.
107 * @swap_storage: Pointer to shmem struct file for swap storage.
108 * @caching_state: The current caching state of the pages.
109 * @state: The current binding state of the pages.
111 * This is a structure holding the pages, caching- and aperture binding
112 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
117 struct ttm_bo_device *bdev;
118 struct ttm_backend_func *func;
119 struct vm_page *dummy_read_page;
120 struct vm_page **pages;
122 unsigned long num_pages;
123 struct sg_table *sg; /* for SG objects via dma-buf */
124 struct ttm_bo_global *glob;
125 struct vm_object *swap_storage;
126 enum ttm_caching_state caching_state;
137 * @ttm: Base ttm_tt struct.
138 * @dma_address: The DMA (bus) addresses of the pages
139 * @pages_list: used by some page allocation backend
141 * This is a structure holding the pages, caching- and aperture binding
142 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
147 dma_addr_t *dma_address;
148 struct list_head pages_list;
151 #define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
152 #define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
153 #define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
155 struct ttm_mem_type_manager;
157 struct ttm_mem_type_manager_func {
159 * struct ttm_mem_type_manager member init
161 * @man: Pointer to a memory type manager.
162 * @p_size: Implementation dependent, but typically the size of the
163 * range to be managed in pages.
165 * Called to initialize a private range manager. The function is
166 * expected to initialize the man::priv member.
167 * Returns 0 on success, negative error code on failure.
169 int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
172 * struct ttm_mem_type_manager member takedown
174 * @man: Pointer to a memory type manager.
176 * Called to undo the setup done in init. All allocated resources
179 int (*takedown)(struct ttm_mem_type_manager *man);
182 * struct ttm_mem_type_manager member get_node
184 * @man: Pointer to a memory type manager.
185 * @bo: Pointer to the buffer object we're allocating space for.
186 * @placement: Placement details.
187 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
189 * This function should allocate space in the memory type managed
190 * by @man. Placement details if
191 * applicable are given by @placement. If successful,
192 * @mem::mm_node should be set to a non-null value, and
193 * @mem::start should be set to a value identifying the beginning
194 * of the range allocated, and the function should return zero.
195 * If the memory region accommodate the buffer object, @mem::mm_node
196 * should be set to NULL, and the function should return 0.
197 * If a system error occurred, preventing the request to be fulfilled,
198 * the function should return a negative error code.
200 * Note that @mem::mm_node will only be dereferenced by
201 * struct ttm_mem_type_manager functions and optionally by the driver,
202 * which has knowledge of the underlying type.
204 * This function may not be called from within atomic context, so
205 * an implementation can and must use either a mutex or a spinlock to
206 * protect any data structures managing the space.
208 int (*get_node)(struct ttm_mem_type_manager *man,
209 struct ttm_buffer_object *bo,
210 struct ttm_placement *placement,
211 struct ttm_mem_reg *mem);
214 * struct ttm_mem_type_manager member put_node
216 * @man: Pointer to a memory type manager.
217 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
219 * This function frees memory type resources previously allocated
220 * and that are identified by @mem::mm_node and @mem::start. May not
221 * be called from within atomic context.
223 void (*put_node)(struct ttm_mem_type_manager *man,
224 struct ttm_mem_reg *mem);
227 * struct ttm_mem_type_manager member debug
229 * @man: Pointer to a memory type manager.
230 * @prefix: Prefix to be used in printout to identify the caller.
232 * This function is called to print out the state of the memory
233 * type manager to aid debugging of out-of-memory conditions.
234 * It may not be called from within atomic context.
236 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
240 * struct ttm_mem_type_manager
242 * @has_type: The memory type has been initialized.
243 * @use_type: The memory type is enabled.
244 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
245 * managed by this memory type.
246 * @gpu_offset: If used, the GPU offset of the first managed page of
247 * fixed memory or the first managed location in an aperture.
248 * @size: Size of the managed region.
249 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
250 * as defined in ttm_placement_common.h
251 * @default_caching: The default caching policy used for a buffer object
252 * placed in this memory type if the user doesn't provide one.
253 * @func: structure pointer implementing the range manager. See above
254 * @priv: Driver private closure for @func.
255 * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
256 * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
257 * reserved by the TTM vm system.
258 * @io_reserve_lru: Optional lru list for unreserving io mem regions.
259 * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
260 * static information. bdev::driver::io_mem_free is never used.
261 * @lru: The lru list for this memory type.
263 * This structure is used to identify and manage memory types for a device.
264 * It's set up by the ttm_bo_driver::init_mem_type method.
269 struct ttm_mem_type_manager {
270 struct ttm_bo_device *bdev;
273 * No protection. Constant from start.
279 unsigned long gpu_offset;
281 uint32_t available_caching;
282 uint32_t default_caching;
283 const struct ttm_mem_type_manager_func *func;
285 struct lock io_reserve_mutex;
286 bool use_io_reserve_lru;
287 bool io_reserve_fastpath;
290 * Protected by @io_reserve_mutex:
293 struct list_head io_reserve_lru;
296 * Protected by the global->lru_lock.
299 struct list_head lru;
303 * struct ttm_bo_driver
305 * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
306 * @invalidate_caches: Callback to invalidate read caches when a buffer object
308 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
310 * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
311 * @move: Callback for a driver to hook in accelerated functions to
313 * If set to NULL, a potentially slow memcpy() move is used.
314 * @sync_obj_signaled: See ttm_fence_api.h
315 * @sync_obj_wait: See ttm_fence_api.h
316 * @sync_obj_flush: See ttm_fence_api.h
317 * @sync_obj_unref: See ttm_fence_api.h
318 * @sync_obj_ref: See ttm_fence_api.h
321 struct ttm_bo_driver {
325 * @bdev: pointer to a struct ttm_bo_device:
326 * @size: Size of the data needed backing.
327 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
328 * @dummy_read_page: See struct ttm_bo_device.
330 * Create a struct ttm_tt to back data with system memory pages.
331 * No pages are actually allocated.
333 * NULL: Out of memory.
335 struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
338 struct vm_page *dummy_read_page);
343 * @ttm: The struct ttm_tt to contain the backing pages.
345 * Allocate all backing pages
347 * -ENOMEM: Out of memory.
349 int (*ttm_tt_populate)(struct ttm_tt *ttm);
354 * @ttm: The struct ttm_tt to contain the backing pages.
356 * Free all backing page
358 void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
361 * struct ttm_bo_driver member invalidate_caches
363 * @bdev: the buffer object device.
364 * @flags: new placement of the rebound buffer object.
366 * A previosly evicted buffer has been rebound in a
367 * potentially new location. Tell the driver that it might
368 * consider invalidating read (texture) caches on the next command
369 * submission as a consequence.
372 int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
373 int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
374 struct ttm_mem_type_manager *man);
376 * struct ttm_bo_driver member evict_flags:
378 * @bo: the buffer object to be evicted
380 * Return the bo flags for a buffer which is not mapped to the hardware.
381 * These will be placed in proposed_flags so that when the move is
382 * finished, they'll end up in bo->mem.flags
385 void(*evict_flags) (struct ttm_buffer_object *bo,
386 struct ttm_placement *placement);
388 * struct ttm_bo_driver member move:
390 * @bo: the buffer to move
391 * @evict: whether this motion is evicting the buffer from
392 * the graphics address space
393 * @interruptible: Use interruptible sleeps if possible when sleeping.
394 * @no_wait: whether this should give up and return -EBUSY
395 * if this move would require sleeping
396 * @new_mem: the new memory region receiving the buffer
398 * Move a buffer between two memory regions.
400 int (*move) (struct ttm_buffer_object *bo,
401 bool evict, bool interruptible,
403 struct ttm_mem_reg *new_mem);
406 * struct ttm_bo_driver_member verify_access
408 * @bo: Pointer to a buffer object.
409 * @filp: Pointer to a struct file trying to access the object.
410 * FreeBSD: use devfs_get_cdevpriv etc.
412 * Called from the map / write / read methods to verify that the
413 * caller is permitted to access the buffer object.
414 * This member may be set to NULL, which will refuse this kind of
415 * access for all buffer objects.
416 * This function should return 0 if access is granted, -EPERM otherwise.
418 int (*verify_access) (struct ttm_buffer_object *bo);
421 * In case a driver writer dislikes the TTM fence objects,
422 * the driver writer can replace those with sync objects of
423 * his / her own. If it turns out that no driver writer is
424 * using these. I suggest we remove these hooks and plug in
425 * fences directly. The bo driver needs the following functionality:
426 * See the corresponding functions in the fence object API
430 bool (*sync_obj_signaled) (void *sync_obj);
431 int (*sync_obj_wait) (void *sync_obj,
432 bool lazy, bool interruptible);
433 int (*sync_obj_flush) (void *sync_obj);
434 void (*sync_obj_unref) (void **sync_obj);
435 void *(*sync_obj_ref) (void *sync_obj);
437 /* hook to notify driver about a driver move so it
438 * can do tiling things */
439 void (*move_notify)(struct ttm_buffer_object *bo,
440 struct ttm_mem_reg *new_mem);
441 /* notify the driver we are taking a fault on this BO
442 * and have reserved it */
443 int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
446 * notify the driver that we're about to swap out this bo
448 void (*swap_notify) (struct ttm_buffer_object *bo);
451 * Driver callback on when mapping io memory (for bo_move_memcpy
452 * for instance). TTM will take care to call io_mem_free whenever
453 * the mapping is not use anymore. io_mem_reserve & io_mem_free
456 int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
457 void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
461 * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
464 struct ttm_bo_global_ref {
465 struct drm_global_reference ref;
466 struct ttm_mem_global *mem_glob;
470 * struct ttm_bo_global - Buffer object driver global data.
472 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
473 * @dummy_read_page: Pointer to a dummy page used for mapping requests
474 * of unpopulated pages.
475 * @shrink: A shrink callback object used for buffer object swap.
476 * @device_list_mutex: Mutex protecting the device list.
477 * This mutex is held while traversing the device list for pm options.
478 * @lru_lock: Spinlock protecting the bo subsystem lru lists.
479 * @device_list: List of buffer object devices.
480 * @swap_lru: Lru list of buffer objects used for swapping.
483 struct ttm_bo_global {
487 * Constant after init.
490 struct ttm_mem_global *mem_glob;
491 struct vm_page *dummy_read_page;
492 struct ttm_mem_shrink shrink;
493 struct lock device_list_mutex;
494 struct lock lru_lock;
497 * Protected by device_list_mutex.
499 struct list_head device_list;
502 * Protected by the lru_lock.
504 struct list_head swap_lru;
507 * Internal protection.
513 #define TTM_NUM_MEM_TYPES 8
515 #define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
516 idling before CPU mapping */
517 #define TTM_BO_PRIV_FLAG_MAX 1
518 #define TTM_BO_PRIV_FLAG_ACTIVE 2 /* Used for release sequencing */
520 * struct ttm_bo_device - Buffer object driver device-specific data.
522 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
523 * @man: An array of mem_type_managers.
524 * @fence_lock: Protects the synchronizing members on *all* bos belonging
526 * @addr_space_mm: Range manager for the device address space.
527 * lru_lock: Spinlock that protects the buffer+device lru lists and
529 * @val_seq: Current validation sequence.
530 * @dev_mapping: A pointer to the struct address_space representing the
531 * device address space.
532 * @wq: Work queue structure for the delayed delete workqueue.
536 struct ttm_bo_device {
539 * Constant after bo device init / atomic.
541 struct list_head device_list;
542 struct ttm_bo_global *glob;
543 struct ttm_bo_driver *driver;
545 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
546 struct lock fence_lock;
548 * Protected by the vm lock.
551 RB_HEAD(ttm_bo_device_buffer_objects, ttm_buffer_object) addr_space_rb;
552 struct drm_mm addr_space_mm;
555 * Protected by the global:lru lock.
557 struct list_head ddestroy;
561 * Protected by load / firstopen / lastclose /unload sync.
564 struct address_space *dev_mapping;
567 * Internal protection.
570 struct delayed_work wq;
578 * @old: Pointer to the result and original value.
579 * @new: New value of bits.
580 * @mask: Mask of bits to change.
582 * Convenience function to change a number of bits identified by a mask.
585 static inline uint32_t
586 ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
588 *old ^= (*old ^ new) & mask;
595 * @ttm: The struct ttm_tt.
596 * @bdev: pointer to a struct ttm_bo_device:
597 * @size: Size of the data needed backing.
598 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
599 * @dummy_read_page: See struct ttm_bo_device.
601 * Create a struct ttm_tt to back data with system memory pages.
602 * No pages are actually allocated.
604 * NULL: Out of memory.
606 extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
607 unsigned long size, uint32_t page_flags,
608 struct vm_page *dummy_read_page);
609 extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
610 unsigned long size, uint32_t page_flags,
611 struct vm_page *dummy_read_page);
616 * @ttm: the ttm_tt structure.
618 * Free memory of ttm_tt structure
620 extern void ttm_tt_fini(struct ttm_tt *ttm);
621 extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
626 * @ttm: The struct ttm_tt containing backing pages.
627 * @bo_mem: The struct ttm_mem_reg identifying the binding location.
629 * Bind the pages of @ttm to an aperture location identified by @bo_mem
631 extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
636 * @ttm: The struct ttm_tt.
638 * Unbind, unpopulate and destroy common struct ttm_tt.
640 extern void ttm_tt_destroy(struct ttm_tt *ttm);
645 * @ttm: The struct ttm_tt.
647 * Unbind a struct ttm_tt.
649 extern void ttm_tt_unbind(struct ttm_tt *ttm);
654 * @ttm: The struct ttm_tt.
656 * Swap in a previously swap out ttm_tt.
658 extern int ttm_tt_swapin(struct ttm_tt *ttm);
661 * ttm_tt_cache_flush:
663 * @pages: An array of pointers to struct page:s to flush.
664 * @num_pages: Number of pages to flush.
666 * Flush the data of the indicated pages from the cpu caches.
667 * This is used when changing caching attributes of the pages from
670 extern void ttm_tt_cache_flush(struct vm_page *pages[], unsigned long num_pages);
673 * ttm_tt_set_placement_caching:
675 * @ttm A struct ttm_tt the backing pages of which will change caching policy.
676 * @placement: Flag indicating the desired caching policy.
678 * This function will change caching policy of any default kernel mappings of
679 * the pages backing @ttm. If changing from cached to uncached or
681 * all CPU caches will first be flushed to make sure the data of the pages
682 * hit RAM. This function may be very costly as it involves global TLB
683 * and cache flushes and potential page splitting / combining.
685 extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
686 extern int ttm_tt_swapout(struct ttm_tt *ttm,
687 struct vm_object *persistent_swap_storage);
696 * @bdev: Pointer to a struct ttm_bo_device.
697 * @mem: A valid struct ttm_mem_reg.
699 * Returns true if the memory described by @mem is PCI memory,
702 extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
703 struct ttm_mem_reg *mem);
708 * @bo: Pointer to a struct ttm_buffer_object. the data of which
709 * we want to allocate space for.
710 * @proposed_placement: Proposed new placement for the buffer object.
711 * @mem: A struct ttm_mem_reg.
712 * @interruptible: Sleep interruptible when sliping.
713 * @no_wait_gpu: Return immediately if the GPU is busy.
715 * Allocate memory space for the buffer object pointed to by @bo, using
716 * the placement flags in @mem, potentially evicting other idle buffer objects.
717 * This function may sleep while waiting for space to become available.
719 * -EBUSY: No space available (only if no_wait == 1).
720 * -ENOMEM: Could not allocate memory for the buffer object, either due to
721 * fragmentation or concurrent allocators.
722 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
724 extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
725 struct ttm_placement *placement,
726 struct ttm_mem_reg *mem,
730 extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
731 struct ttm_mem_reg *mem);
732 extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
733 struct ttm_mem_reg *mem);
735 extern void ttm_bo_global_release(struct drm_global_reference *ref);
736 extern int ttm_bo_global_init(struct drm_global_reference *ref);
738 extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
743 * @bdev: A pointer to a struct ttm_bo_device to initialize.
744 * @glob: A pointer to an initialized struct ttm_bo_global.
745 * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
746 * @file_page_offset: Offset into the device address space that is available
747 * for buffer data. This ensures compatibility with other users of the
750 * Initializes a struct ttm_bo_device:
754 extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
755 struct ttm_bo_global *glob,
756 struct ttm_bo_driver *driver,
757 uint64_t file_page_offset, bool need_dma32);
760 * ttm_bo_unmap_virtual
762 * @bo: tear down the virtual mappings for this BO
764 extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
767 * ttm_bo_unmap_virtual
769 * @bo: tear down the virtual mappings for this BO
771 * The caller must take ttm_mem_io_lock before calling this function.
773 extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
775 extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
776 extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
777 extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
779 extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
785 * @bo: A pointer to a struct ttm_buffer_object.
786 * @interruptible: Sleep interruptible if waiting.
787 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
788 * @use_ticket: If @bo is already reserved, Only sleep waiting for
789 * it to become unreserved if @sequence < (@bo)->sequence.
791 * Locks a buffer object for validation. (Or prevents other processes from
792 * locking it for validation) and removes it from lru lists, while taking
793 * a number of measures to prevent deadlocks.
795 * Deadlocks may occur when two processes try to reserve multiple buffers in
796 * different order, either by will or as a result of a buffer being evicted
797 * to make room for a buffer already reserved. (Buffers are reserved before
798 * they are evicted). The following algorithm prevents such deadlocks from
800 * Processes attempting to reserve multiple buffers other than for eviction,
801 * (typically execbuf), should first obtain a unique 32-bit
802 * validation sequence number,
803 * and call this function with @use_sequence == 1 and @sequence == the unique
804 * sequence number. If upon call of this function, the buffer object is already
805 * reserved, the validation sequence is checked against the validation
806 * sequence of the process currently reserving the buffer,
807 * and if the current validation sequence is greater than that of the process
808 * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
809 * waiting for the buffer to become unreserved, after which it retries
811 * The caller should, when receiving an -EAGAIN error
812 * release all its buffer reservations, wait for @bo to become unreserved, and
813 * then rerun the validation with the same validation sequence. This procedure
814 * will always guarantee that the process with the lowest validation sequence
815 * will eventually succeed, preventing both deadlocks and starvation.
818 * -EAGAIN: The reservation may cause a deadlock.
819 * Release all buffer reservations, wait for @bo to become unreserved and
820 * try again. (only if use_sequence == 1).
821 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
822 * a signal. Release all buffer reservations and return to user-space.
823 * -EBUSY: The function needed to sleep, but @no_wait was true
824 * -EDEADLK: Bo already reserved using @sequence. This error code will only
825 * be returned if @use_sequence is set to true.
827 extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
829 bool no_wait, bool use_ticket,
830 struct ww_acquire_ctx *ticket);
833 * ttm_bo_reserve_slowpath_nolru:
834 * @bo: A pointer to a struct ttm_buffer_object.
835 * @interruptible: Sleep interruptible if waiting.
836 * @sequence: Set (@bo)->sequence to this value after lock
838 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
839 * from all our other reservations. Because there are no other reservations
840 * held by us, this function cannot deadlock any more.
842 * Will not remove reserved buffers from the lru lists.
843 * Otherwise identical to ttm_bo_reserve_slowpath.
845 extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
847 struct ww_acquire_ctx *ticket);
851 * ttm_bo_reserve_slowpath:
852 * @bo: A pointer to a struct ttm_buffer_object.
853 * @interruptible: Sleep interruptible if waiting.
854 * @sequence: Set (@bo)->sequence to this value after lock
856 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
857 * from all our other reservations. Because there are no other reservations
858 * held by us, this function cannot deadlock any more.
860 extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
862 struct ww_acquire_ctx *ticket);
865 * ttm_bo_reserve_nolru:
867 * @bo: A pointer to a struct ttm_buffer_object.
868 * @interruptible: Sleep interruptible if waiting.
869 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
870 * @use_sequence: If @bo is already reserved, Only sleep waiting for
871 * it to become unreserved if @sequence < (@bo)->sequence.
873 * Will not remove reserved buffers from the lru lists.
874 * Otherwise identical to ttm_bo_reserve.
877 * -EAGAIN: The reservation may cause a deadlock.
878 * Release all buffer reservations, wait for @bo to become unreserved and
879 * try again. (only if use_sequence == 1).
880 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
881 * a signal. Release all buffer reservations and return to user-space.
882 * -EBUSY: The function needed to sleep, but @no_wait was true
883 * -EDEADLK: Bo already reserved using @sequence. This error code will only
884 * be returned if @use_sequence is set to true.
886 extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
888 bool no_wait, bool use_ticket,
889 struct ww_acquire_ctx *ticket);
894 * @bo: A pointer to a struct ttm_buffer_object.
896 * Unreserve a previous reservation of @bo.
898 extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
901 * ttm_bo_unreserve_ticket
902 * @bo: A pointer to a struct ttm_buffer_object.
903 * @ticket: ww_acquire_ctx used for reserving
905 * Unreserve a previous reservation of @bo made with @ticket.
907 extern void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
908 struct ww_acquire_ctx *ticket);
911 * ttm_bo_unreserve_locked
912 * @bo: A pointer to a struct ttm_buffer_object.
913 * @ticket: ww_acquire_ctx used for reserving, or NULL
915 * Unreserve a previous reservation of @bo made with @ticket.
916 * Needs to be called with struct ttm_bo_global::lru_lock held.
918 extern void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo,
919 struct ww_acquire_ctx *ticket);
925 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
926 struct ttm_mem_reg *mem);
927 void ttm_mem_io_free(struct ttm_bo_device *bdev,
928 struct ttm_mem_reg *mem);
932 * @bo: A pointer to a struct ttm_buffer_object.
933 * @evict: 1: This is an eviction. Don't try to pipeline.
934 * @no_wait_gpu: Return immediately if the GPU is busy.
935 * @new_mem: struct ttm_mem_reg indicating where to move.
937 * Optimized move function for a buffer object with both old and
938 * new placement backed by a TTM. The function will, if successful,
939 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
940 * and update the (@bo)->mem placement flags. If unsuccessful, the old
941 * data remains untouched, and it's up to the caller to free the
942 * memory space indicated by @new_mem.
947 extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
948 bool evict, bool no_wait_gpu,
949 struct ttm_mem_reg *new_mem);
954 * @bo: A pointer to a struct ttm_buffer_object.
955 * @evict: 1: This is an eviction. Don't try to pipeline.
956 * @no_wait_gpu: Return immediately if the GPU is busy.
957 * @new_mem: struct ttm_mem_reg indicating where to move.
959 * Fallback move function for a mappable buffer object in mappable memory.
960 * The function will, if successful,
961 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
962 * and update the (@bo)->mem placement flags. If unsuccessful, the old
963 * data remains untouched, and it's up to the caller to free the
964 * memory space indicated by @new_mem.
969 extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
970 bool evict, bool no_wait_gpu,
971 struct ttm_mem_reg *new_mem);
974 * ttm_bo_free_old_node
976 * @bo: A pointer to a struct ttm_buffer_object.
978 * Utility function to free an old placement after a successful move.
980 extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
983 * ttm_bo_move_accel_cleanup.
985 * @bo: A pointer to a struct ttm_buffer_object.
986 * @sync_obj: A sync object that signals when moving is complete.
987 * @evict: This is an evict move. Don't return until the buffer is idle.
988 * @no_wait_gpu: Return immediately if the GPU is busy.
989 * @new_mem: struct ttm_mem_reg indicating where to move.
991 * Accelerated move function to be called when an accelerated move
992 * has been scheduled. The function will create a new temporary buffer object
993 * representing the old placement, and put the sync object on both buffer
994 * objects. After that the newly created buffer object is unref'd to be
995 * destroyed when the move is complete. This will help pipeline
999 extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
1001 bool evict, bool no_wait_gpu,
1002 struct ttm_mem_reg *new_mem);
1006 * @c_state: Caching state.
1007 * @tmp: Page protection flag for a normal, cached mapping.
1009 * Utility function that returns the pgprot_t that should be used for
1010 * setting up a PTE with the caching model indicated by @c_state.
1012 extern vm_memattr_t ttm_io_prot(uint32_t caching_flags);
1014 extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
1016 #if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
1018 #include <linux/agp_backend.h>
1023 * @bdev: Pointer to a struct ttm_bo_device.
1024 * @bridge: The agp bridge this device is sitting on.
1025 * @size: Size of the data needed backing.
1026 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
1027 * @dummy_read_page: See struct ttm_bo_device.
1030 * Create a TTM backend that uses the indicated AGP bridge as an aperture
1031 * for TT memory. This function uses the linux agpgart interface to
1032 * bind and unbind memory backing a ttm_tt.
1034 extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
1035 struct agp_bridge_data *bridge,
1036 unsigned long size, uint32_t page_flags,
1037 struct vm_page *dummy_read_page);
1038 int ttm_agp_tt_populate(struct ttm_tt *ttm);
1039 void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
1043 int ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
1044 struct ttm_buffer_object *b);
1045 RB_PROTOTYPE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
1046 ttm_bo_cmp_rb_tree_items);