From 6f486c69759966097af0f0641bb6a06e08f9c752 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Fran=C3=A7ois=20Tigeot?= Date: Sat, 14 Sep 2013 19:47:40 +0200 Subject: [PATCH] drm: Sync with FreeBSD * As of commit r255045 (2013-08-30): 'u_long' is consistently spelled 'unsigned long' in this file. Fix it. * Some of the changes are bug fixes, including a few memory leaks * Others are necessary groundwork for the Radeon KMS driver * Some new files were imported from Linux 3.8-rc3 * Support for E-DDC has been added * Many atomic routines were revisited to make them more compatible with the Linux APIs and run-time behavior --- sys/bus/pci/pcireg.h | 1 + sys/conf/files | 1 + sys/dev/drm/drm/Makefile | 2 + sys/dev/drm/drmP.h | 40 +++++- sys/dev/drm/drm_atomic.h | 90 ++++++------- sys/dev/drm/drm_buffer.c | 180 +++++++++++++++++++++++++ sys/dev/drm/drm_buffer.h | 148 ++++++++++++++++++++ sys/dev/drm/drm_context.c | 6 +- sys/dev/drm/drm_core.h | 37 +++++ sys/dev/drm/drm_crtc.c | 5 + sys/dev/drm/drm_crtc.h | 2 +- sys/dev/drm/drm_crtc_helper.h | 4 +- sys/dev/drm/drm_dp_helper.c | 146 ++++++++++++++++++++ sys/dev/drm/drm_dp_helper.h | 128 ++++++++++++++++-- sys/dev/drm/drm_dp_iic_helper.c | 18 +-- sys/dev/drm/drm_drv.c | 128 ++++++++++++++---- sys/dev/drm/drm_edid.c | 15 ++- sys/dev/drm/drm_fb_helper.c | 5 +- sys/dev/drm/drm_fixed.h | 71 ++++++++++ sys/dev/drm/drm_gem.c | 38 +++++- sys/dev/drm/drm_gem_names.c | 6 +- sys/dev/drm/drm_ioctl.c | 5 +- sys/dev/drm/drm_irq.c | 6 +- sys/dev/drm/drm_linux_list.h | 5 + sys/dev/drm/drm_pci.c | 50 +++++++ sys/dev/drm/drm_sysctl.c | 24 +++- sys/dev/drm/foo.txt | 1 + sys/dev/drm/i915/i915_gem.c | 17 ++- sys/dev/drm/i915/i915_gem_execbuffer.c | 2 +- sys/dev/drm/i915/intel_crt.c | 2 +- sys/dev/drm/i915/intel_display.c | 12 +- sys/dev/drm/i915/intel_dp.c | 16 +-- sys/dev/drm/i915/intel_drv.h | 2 +- sys/dev/drm/i915/intel_hdmi.c | 2 +- sys/dev/drm/i915/intel_lvds.c | 2 +- sys/dev/drm/i915/intel_panel.c | 2 +- sys/dev/drm/i915/intel_sdvo.c | 6 +- sys/dev/drm/i915/intel_tv.c | 2 +- sys/dev/drm/mach64/mach64_irq.c | 2 +- sys/dev/drm/mga/mga_irq.c | 2 +- sys/dev/drm/r128/r128_irq.c | 2 +- sys/dev/drm/ttm/ttm_bo.c | 112 +++++++++++---- sys/dev/drm/ttm/ttm_bo_driver.h | 61 +++++---- sys/dev/drm/ttm/ttm_bo_util.c | 30 +++-- sys/dev/drm/ttm/ttm_bo_vm.c | 53 +++++++- sys/dev/drm/ttm/ttm_execbuf_util.c | 72 +++++----- sys/dev/drm/ttm/ttm_page_alloc.c | 9 +- sys/dev/drm/ttm/ttm_tt.c | 4 +- 48 files changed, 1298 insertions(+), 276 deletions(-) create mode 100644 sys/dev/drm/drm_buffer.c create mode 100644 sys/dev/drm/drm_buffer.h create mode 100644 sys/dev/drm/drm_core.h create mode 100644 sys/dev/drm/drm_dp_helper.c create mode 100644 sys/dev/drm/drm_fixed.h create mode 100644 sys/dev/drm/foo.txt diff --git a/sys/bus/pci/pcireg.h b/sys/bus/pci/pcireg.h index a0f42bcfc6..f3ad047c59 100644 --- a/sys/bus/pci/pcireg.h +++ b/sys/bus/pci/pcireg.h @@ -706,6 +706,7 @@ typedef u_int32_t pcireg_t; /* ~typical configuration space */ /* PCI Express link capabilities, 32bits */ #define PCIER_LINKCAP 0x0c +#define PCIER_LINK_CAP2 0x2c #define PCIEM_LNKCAP_SPEED_MASK 0x000f /* Supported link speeds */ #define PCIEM_LNKCAP_SPEED_2_5 0x1 /* 2.5GT/s */ #define PCIEM_LNKCAP_SPEED_5 0x2 /* 5.0GT/s and 2.5GT/s */ diff --git a/sys/conf/files b/sys/conf/files index e6dbd8d40b..74c767ec28 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -1902,6 +1902,7 @@ dev/drm/drm_context.c optional drm dev/drm/drm_crtc.c optional drm dev/drm/drm_crtc_helper.c optional drm dev/drm/drm_dma.c optional drm +dev/drm/drm_dp_helper.c optional drm dev/drm/drm_dp_iic_helper.c optional drm dev/drm/drm_drawable.c optional drm dev/drm/drm_drv.c optional drm diff --git a/sys/dev/drm/drm/Makefile b/sys/dev/drm/drm/Makefile index f55779679f..6f18393030 100644 --- a/sys/dev/drm/drm/Makefile +++ b/sys/dev/drm/drm/Makefile @@ -5,10 +5,12 @@ SRCS = \ drm_agpsupport.c \ drm_auth.c \ drm_bufs.c \ + drm_buffer.c \ drm_context.c \ drm_crtc.c \ drm_crtc_helper.c \ drm_dma.c \ + drm_dp_helper.c \ drm_dp_iic_helper.c \ drm_drawable.c \ drm_drv.c \ diff --git a/sys/dev/drm/drmP.h b/sys/dev/drm/drmP.h index 6e46b7ae2a..64c4998097 100644 --- a/sys/dev/drm/drmP.h +++ b/sys/dev/drm/drmP.h @@ -302,6 +302,9 @@ typedef int8_t s8; #define DRM_HZ hz #define DRM_UDELAY(udelay) DELAY(udelay) +#define DRM_MDELAY(msecs) do { int loops = (msecs); \ + while (loops--) DELAY(1000); \ + } while (0) #define DRM_TIME_SLICE (hz/20) /* Time slice for GLXContexts */ #define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \ @@ -699,6 +702,7 @@ struct drm_gem_object { struct drm_driver_info { int (*load)(struct drm_device *, unsigned long flags); + int (*use_msi)(struct drm_device *, unsigned long flags); int (*firstopen)(struct drm_device *); int (*open)(struct drm_device *, struct drm_file *); void (*preclose)(struct drm_device *, struct drm_file *file_priv); @@ -737,6 +741,8 @@ struct drm_driver_info { int (*gem_init_object)(struct drm_gem_object *obj); void (*gem_free_object)(struct drm_gem_object *obj); + int (*gem_open_object)(struct drm_gem_object *, struct drm_file *); + void (*gem_close_object)(struct drm_gem_object *, struct drm_file *); struct cdev_pager_ops *gem_pager_ops; @@ -826,8 +832,10 @@ struct drm_device { struct drm_driver_info *driver; drm_pci_id_list_t *id_entry; /* PCI ID, name, and chipset private */ - u_int16_t pci_device; /* PCI device id */ - u_int16_t pci_vendor; /* PCI vendor id */ + uint16_t pci_device; /* PCI device id */ + uint16_t pci_vendor; /* PCI vendor id */ + uint16_t pci_subdevice; /* PCI subsystem device id */ + uint16_t pci_subvendor; /* PCI subsystem vendor id */ char *unique; /* Unique identifier: e.g., busid */ int unique_len; /* Length of unique field */ @@ -899,7 +907,7 @@ struct drm_device { drm_agp_head_t *agp; drm_sg_mem_t *sg; /* Scatter gather memory */ - atomic_t *ctx_bitmap; + unsigned long *ctx_bitmap; void *dev_private; unsigned int agp_buffer_token; drm_local_map_t *agp_buffer_map; @@ -907,7 +915,7 @@ struct drm_device { struct drm_minor *control; /**< Control node for card */ struct drm_minor *primary; /**< render type primary screen head */ - void *drm_ttm_bo; + void *drm_ttm_bdev; struct unrhdr *drw_unrhdr; /* RB tree of drawable infos */ RB_HEAD(drawable_tree, bsd_drm_drawable_info) drw_head; @@ -1314,6 +1322,8 @@ void drm_gem_pager_dtr(void *obj); struct ttm_bo_device; int ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size, struct vm_object **obj_res, int nprot); +struct ttm_buffer_object; +void ttm_bo_release_mmap(struct ttm_buffer_object *bo); void drm_device_lock_mtx(struct drm_device *dev); void drm_device_unlock_mtx(struct drm_device *dev); @@ -1426,5 +1436,27 @@ do { \ #define VM_OBJECT_WLOCK(object) VM_OBJECT_LOCK(object) #define VM_OBJECT_WUNLOCK(object) VM_OBJECT_UNLOCK(object) +/* Error codes conversion from Linux to FreeBSD. */ +/* XXXKIB what is the right code for EREMOTEIO on FreeBSD? */ +#define EREMOTEIO ENXIO +#define ERESTARTSYS ERESTART + +#define PCI_VENDOR_ID_APPLE 0x106b +#define PCI_VENDOR_ID_ASUSTEK 0x1043 +#define PCI_VENDOR_ID_ATI 0x1002 +#define PCI_VENDOR_ID_DELL 0x1028 +#define PCI_VENDOR_ID_HP 0x103c +#define PCI_VENDOR_ID_IBM 0x1014 +#define PCI_VENDOR_ID_INTEL 0x8086 +#define PCI_VENDOR_ID_SERVERWORKS 0x1166 +#define PCI_VENDOR_ID_SONY 0x104d +#define PCI_VENDOR_ID_VIA 0x1106 + +#define DRM_PCIE_SPEED_25 1 +#define DRM_PCIE_SPEED_50 2 +#define DRM_PCIE_SPEED_80 4 + +extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask); + #endif /* __KERNEL__ */ #endif /* _DRM_P_H_ */ diff --git a/sys/dev/drm/drm_atomic.h b/sys/dev/drm/drm_atomic.h index bcd595830f..93619d84ee 100644 --- a/sys/dev/drm/drm_atomic.h +++ b/sys/dev/drm/drm_atomic.h @@ -7,6 +7,7 @@ /*- * Copyright 2004 Eric Anholt + * Copyright 2013 Jung-uk Kim * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -28,68 +29,57 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * - * $FreeBSD: src/sys/dev/drm2/drm_atomic.h,v 1.1 2012/05/22 11:07:44 kib Exp $ + * $FreeBSD: head/sys/dev/drm2/drm_atomic.h 255042 2013-08-29 20:51:12Z jkim $ */ -/* Many of these implementations are rather fake, but good enough. */ +typedef u_int atomic_t; +typedef uint64_t atomic64_t; -typedef u_int32_t atomic_t; +#define BITS_PER_LONG (sizeof(long) * NBBY) +#define BITS_TO_LONGS(x) howmany(x, BITS_PER_LONG) -#define atomic_set(p, v) (*(p) = (v)) -#define atomic_read(p) (*(p)) -#define atomic_inc(p) atomic_add_int(p, 1) -#define atomic_dec(p) atomic_subtract_int(p, 1) -#define atomic_add(n, p) atomic_add_int(p, n) -#define atomic_sub(n, p) atomic_subtract_int(p, n) +#define atomic_read(p) (*(volatile u_int *)(p)) +#define atomic_set(p, v) do { *(u_int *)(p) = (v); } while (0) -static __inline atomic_t -test_and_set_bit(int b, volatile void *p) -{ - unsigned int m = 1<> 5), 1 << (b & 0x1f)); -} +#define atomic_xchg(p, v) atomic_swap_int(p, v) +#define atomic64_xchg(p, v) atomic_swap_64(p, v) -static __inline void -set_bit(int b, volatile void *p) -{ - atomic_set_int(((volatile int *)p) + (b >> 5), 1 << (b & 0x1f)); -} +#define __bit_word(b) ((b) / BITS_PER_LONG) +#define __bit_mask(b) (1UL << (b) % BITS_PER_LONG) +#define __bit_addr(p, b) ((volatile u_long *)(p) + __bit_word(b)) -static __inline int -test_bit(int b, volatile void *p) -{ - return ((volatile int *)p)[b >> 5] & (1 << (b & 0x1f)); -} +#define clear_bit(b, p) \ + atomic_clear_long(__bit_addr(p, b), __bit_mask(b)) +#define set_bit(b, p) \ + atomic_set_long(__bit_addr(p, b), __bit_mask(b)) +#define test_bit(b, p) \ + ((*__bit_addr(p, b) & __bit_mask(b)) != 0) -static __inline int -find_first_zero_bit(volatile void *p, int max) +static __inline u_long +find_first_zero_bit(const u_long *p, u_long max) { - int b; - volatile int *ptr = (volatile int *)p; + u_long i, n; - for (b = 0; b < max; b += 32) { - if (ptr[b >> 5] != ~0) { - for (;;) { - if ((ptr[b >> 5] & (1 << (b & 0x1f))) == 0) - return b; - b++; - } - } + KASSERT(max % BITS_PER_LONG == 0, ("invalid bitmap size %lu", max)); + for (i = 0; i < max / BITS_PER_LONG; i++) { + n = ~p[i]; + if (n != 0) + return (i * BITS_PER_LONG + ffsl(n) - 1); } - return max; + return (max); } - -#define BITS_TO_LONGS(x) (howmany((x), NBBY * sizeof(long))) diff --git a/sys/dev/drm/drm_buffer.c b/sys/dev/drm/drm_buffer.c new file mode 100644 index 0000000000..9fd9eb599d --- /dev/null +++ b/sys/dev/drm/drm_buffer.c @@ -0,0 +1,180 @@ +/************************************************************************** + * + * Copyright 2010 Pauli Nieminen. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * $FreeBSD: head/sys/dev/drm2/drm_buffer.c 254794 2013-08-24 16:14:20Z dumbbell $ + **************************************************************************/ +/* + * Multipart buffer for coping data which is larger than the page size. + * + * Authors: + * Pauli Nieminen + */ + +#include + +/** + * Allocate the drm buffer object. + * + * buf: Pointer to a pointer where the object is stored. + * size: The number of bytes to allocate. + */ +int drm_buffer_alloc(struct drm_buffer **buf, int size) +{ + int nr_pages = size / PAGE_SIZE + 1; + int idx; + + /* Allocating pointer table to end of structure makes drm_buffer + * variable sized */ + *buf = kmalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *), + DRM_MEM_DRIVER, M_ZERO | M_WAITOK); + + if (*buf == NULL) { + DRM_ERROR("Failed to allocate drm buffer object to hold" + " %d bytes in %d pages.\n", + size, nr_pages); + return -ENOMEM; + } + + (*buf)->size = size; + + for (idx = 0; idx < nr_pages; ++idx) { + + (*buf)->data[idx] = + kmalloc(min(PAGE_SIZE, size - idx * PAGE_SIZE), + DRM_MEM_DRIVER, M_WAITOK); + + + if ((*buf)->data[idx] == NULL) { + DRM_ERROR("Failed to allocate %dth page for drm" + " buffer with %d bytes and %d pages.\n", + idx + 1, size, nr_pages); + goto error_out; + } + + } + + return 0; + +error_out: + + /* Only last element can be null pointer so check for it first. */ + if ((*buf)->data[idx]) + drm_free((*buf)->data[idx], DRM_MEM_DRIVER); + + for (--idx; idx >= 0; --idx) + drm_free((*buf)->data[idx], DRM_MEM_DRIVER); + + drm_free(*buf, DRM_MEM_DRIVER); + return -ENOMEM; +} + +/** + * Copy the user data to the begin of the buffer and reset the processing + * iterator. + * + * user_data: A pointer the data that is copied to the buffer. + * size: The Number of bytes to copy. + */ +int drm_buffer_copy_from_user(struct drm_buffer *buf, + void __user *user_data, int size) +{ + int nr_pages = size / PAGE_SIZE + 1; + int idx; + + if (size > buf->size) { + DRM_ERROR("Requesting to copy %d bytes to a drm buffer with" + " %d bytes space\n", + size, buf->size); + return -EFAULT; + } + + for (idx = 0; idx < nr_pages; ++idx) { + + if (DRM_COPY_FROM_USER(buf->data[idx], + (char *)user_data + idx * PAGE_SIZE, + min(PAGE_SIZE, size - idx * PAGE_SIZE))) { + DRM_ERROR("Failed to copy user data (%p) to drm buffer" + " (%p) %dth page.\n", + user_data, buf, idx); + return -EFAULT; + + } + } + buf->iterator = 0; + return 0; +} + +/** + * Free the drm buffer object + */ +void drm_buffer_free(struct drm_buffer *buf) +{ + + if (buf != NULL) { + + int nr_pages = buf->size / PAGE_SIZE + 1; + int idx; + for (idx = 0; idx < nr_pages; ++idx) + kfree(buf->data[idx], DRM_MEM_DRIVER); + + kfree(buf, DRM_MEM_DRIVER); + } +} + +/** + * Read an object from buffer that may be split to multiple parts. If object + * is not split function just returns the pointer to object in buffer. But in + * case of split object data is copied to given stack object that is suplied + * by caller. + * + * The processing location of the buffer is also advanced to the next byte + * after the object. + * + * objsize: The size of the objet in bytes. + * stack_obj: A pointer to a memory location where object can be copied. + */ +void *drm_buffer_read_object(struct drm_buffer *buf, + int objsize, void *stack_obj) +{ + int idx = drm_buffer_index(buf); + int page = drm_buffer_page(buf); + void *obj = NULL; + + if (idx + objsize <= PAGE_SIZE) { + obj = &buf->data[page][idx]; + } else { + /* The object is split which forces copy to temporary object.*/ + int beginsz = PAGE_SIZE - idx; + memcpy(stack_obj, &buf->data[page][idx], beginsz); + + memcpy((char *)stack_obj + beginsz, &buf->data[page + 1][0], + objsize - beginsz); + + obj = stack_obj; + } + + drm_buffer_advance(buf, objsize); + return obj; +} diff --git a/sys/dev/drm/drm_buffer.h b/sys/dev/drm/drm_buffer.h new file mode 100644 index 0000000000..6625789b49 --- /dev/null +++ b/sys/dev/drm/drm_buffer.h @@ -0,0 +1,148 @@ +/************************************************************************** + * + * Copyright 2010 Pauli Nieminen. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * $FreeBSD: head/sys/dev/drm2/drm_buffer.h 254794 2013-08-24 16:14:20Z dumbbell $ + **************************************************************************/ +/* + * Multipart buffer for coping data which is larger than the page size. + * + * Authors: + * Pauli Nieminen + */ + +#ifndef _DRM_BUFFER_H_ +#define _DRM_BUFFER_H_ + +#include + +struct drm_buffer { + int iterator; + int size; + char *data[]; +}; + + +/** + * Return the index of page that buffer is currently pointing at. + */ +static inline int drm_buffer_page(struct drm_buffer *buf) +{ + return buf->iterator / PAGE_SIZE; +} +/** + * Return the index of the current byte in the page + */ +static inline int drm_buffer_index(struct drm_buffer *buf) +{ + return buf->iterator & (PAGE_SIZE - 1); +} +/** + * Return number of bytes that is left to process + */ +static inline int drm_buffer_unprocessed(struct drm_buffer *buf) +{ + return buf->size - buf->iterator; +} + +/** + * Advance the buffer iterator number of bytes that is given. + */ +static inline void drm_buffer_advance(struct drm_buffer *buf, int bytes) +{ + buf->iterator += bytes; +} + +/** + * Allocate the drm buffer object. + * + * buf: A pointer to a pointer where the object is stored. + * size: The number of bytes to allocate. + */ +extern int drm_buffer_alloc(struct drm_buffer **buf, int size); + +/** + * Copy the user data to the begin of the buffer and reset the processing + * iterator. + * + * user_data: A pointer the data that is copied to the buffer. + * size: The Number of bytes to copy. + */ +extern int drm_buffer_copy_from_user(struct drm_buffer *buf, + void __user *user_data, int size); + +/** + * Free the drm buffer object + */ +extern void drm_buffer_free(struct drm_buffer *buf); + +/** + * Read an object from buffer that may be split to multiple parts. If object + * is not split function just returns the pointer to object in buffer. But in + * case of split object data is copied to given stack object that is suplied + * by caller. + * + * The processing location of the buffer is also advanced to the next byte + * after the object. + * + * objsize: The size of the objet in bytes. + * stack_obj: A pointer to a memory location where object can be copied. + */ +extern void *drm_buffer_read_object(struct drm_buffer *buf, + int objsize, void *stack_obj); + +/** + * Returns the pointer to the dword which is offset number of elements from the + * current processing location. + * + * Caller must make sure that dword is not split in the buffer. This + * requirement is easily met if all the sizes of objects in buffer are + * multiples of dword and PAGE_SIZE is multiple dword. + * + * Call to this function doesn't change the processing location. + * + * offset: The index of the dword relative to the internat iterator. + */ +static inline void *drm_buffer_pointer_to_dword(struct drm_buffer *buffer, + int offset) +{ + int iter = buffer->iterator + offset * 4; + return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)]; +} +/** + * Returns the pointer to the dword which is offset number of elements from + * the current processing location. + * + * Call to this function doesn't change the processing location. + * + * offset: The index of the byte relative to the internat iterator. + */ +static inline void *drm_buffer_pointer_to_byte(struct drm_buffer *buffer, + int offset) +{ + int iter = buffer->iterator + offset; + return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)]; +} + +#endif diff --git a/sys/dev/drm/drm_context.c b/sys/dev/drm/drm_context.c index 5df3c76750..88fa1e698a 100644 --- a/sys/dev/drm/drm_context.c +++ b/sys/dev/drm/drm_context.c @@ -180,7 +180,7 @@ bad: int drm_context_switch(struct drm_device *dev, int old, int new) { - if (test_and_set_bit(0, &dev->context_flag)) { + if (atomic_xchg(&dev->context_flag, 1) != 0) { DRM_ERROR("Reentering -- FIXME\n"); return EBUSY; } @@ -188,7 +188,7 @@ int drm_context_switch(struct drm_device *dev, int old, int new) DRM_DEBUG("Context switch from %d to %d\n", old, new); if (new == dev->last_context) { - clear_bit(0, &dev->context_flag); + atomic_xchg(&dev->context_flag, 0); return 0; } @@ -206,7 +206,7 @@ int drm_context_switch_complete(struct drm_device *dev, int new) /* If a context switch is ever initiated when the kernel holds the lock, release that lock here. */ - clear_bit(0, &dev->context_flag); + atomic_xchg(&dev->context_flag, 0); return 0; } diff --git a/sys/dev/drm/drm_core.h b/sys/dev/drm/drm_core.h new file mode 100644 index 0000000000..5aa5010c23 --- /dev/null +++ b/sys/dev/drm/drm_core.h @@ -0,0 +1,37 @@ +/* + * Copyright 2004 Jon Smirl + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * $FreeBSD: head/sys/dev/drm2/drm_core.h 254792 2013-08-24 15:47:15Z dumbbell $ + */ + +#define CORE_AUTHOR "Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl" + +#define CORE_NAME "drm" +#define CORE_DESC "DRM shared core routines" +#define CORE_DATE "20060810" + +#define DRM_IF_MAJOR 1 +#define DRM_IF_MINOR 4 + +#define CORE_MAJOR 1 +#define CORE_MINOR 1 +#define CORE_PATCHLEVEL 0 diff --git a/sys/dev/drm/drm_crtc.c b/sys/dev/drm/drm_crtc.c index 39d281834b..3338d071b3 100644 --- a/sys/dev/drm/drm_crtc.c +++ b/sys/dev/drm/drm_crtc.c @@ -170,6 +170,9 @@ static struct drm_prop_enum_list drm_encoder_enum_list[] = { DRM_MODE_ENCODER_TVDAC, "TV" }, }; +static void drm_property_destroy_blob(struct drm_device *dev, + struct drm_property_blob *blob); + char *drm_get_encoder_name(struct drm_encoder *encoder) { static char buf[32]; @@ -520,6 +523,8 @@ void drm_connector_cleanup(struct drm_connector *connector) drm_mode_remove(connector, mode); lockmgr(&dev->mode_config.lock, LK_EXCLUSIVE); + if (connector->edid_blob_ptr) + drm_property_destroy_blob(dev, connector->edid_blob_ptr); drm_mode_object_put(dev, &connector->base); list_del(&connector->head); dev->mode_config.num_connector--; diff --git a/sys/dev/drm/drm_crtc.h b/sys/dev/drm/drm_crtc.h index a7f0fd9971..4fb8f70055 100644 --- a/sys/dev/drm/drm_crtc.h +++ b/sys/dev/drm/drm_crtc.h @@ -659,7 +659,7 @@ struct drm_mode_config { int min_width, min_height; int max_width, max_height; - struct drm_mode_config_funcs *funcs; + const struct drm_mode_config_funcs *funcs; resource_size_t fb_base; /* output poll support */ diff --git a/sys/dev/drm/drm_crtc_helper.h b/sys/dev/drm/drm_crtc_helper.h index 51775b19fd..b752e1055c 100644 --- a/sys/dev/drm/drm_crtc_helper.h +++ b/sys/dev/drm/drm_crtc_helper.h @@ -51,7 +51,7 @@ struct drm_crtc_helper_funcs { /* Provider can fixup or change mode timings before modeset occurs */ bool (*mode_fixup)(struct drm_crtc *crtc, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); /* Actually set the mode */ int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, @@ -78,7 +78,7 @@ struct drm_encoder_helper_funcs { void (*restore)(struct drm_encoder *encoder); bool (*mode_fixup)(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); void (*prepare)(struct drm_encoder *encoder); void (*commit)(struct drm_encoder *encoder); diff --git a/sys/dev/drm/drm_dp_helper.c b/sys/dev/drm/drm_dp_helper.c new file mode 100644 index 0000000000..ddbfaaf0cc --- /dev/null +++ b/sys/dev/drm/drm_dp_helper.c @@ -0,0 +1,146 @@ +/* + * Copyright © 2009 Keith Packard + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + * + * $FreeBSD: head/sys/dev/drm2/drm_dp_helper.c 254817 2013-08-24 23:38:57Z dumbbell $ + */ + +#include +#include + +/** + * DOC: dp helpers + * + * These functions contain some common logic and helpers at various abstraction + * levels to deal with Display Port sink devices and related things like DP aux + * channel transfers, EDID reading over DP aux channels, decoding certain DPCD + * blocks, ... + */ + +static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r) +{ + return link_status[r - DP_LANE0_1_STATUS]; +} + +static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_LANE0_1_STATUS + (lane >> 1); + int s = (lane & 1) * 4; + u8 l = dp_link_status(link_status, i); + return (l >> s) & 0xf; +} + +bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count) +{ + u8 lane_align; + u8 lane_status; + int lane; + + lane_align = dp_link_status(link_status, + DP_LANE_ALIGN_STATUS_UPDATED); + if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) + return false; + for (lane = 0; lane < lane_count; lane++) { + lane_status = dp_get_lane_status(link_status, lane); + if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS) + return false; + } + return true; +} + +bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count) +{ + int lane; + u8 lane_status; + + for (lane = 0; lane < lane_count; lane++) { + lane_status = dp_get_lane_status(link_status, lane); + if ((lane_status & DP_LANE_CR_DONE) == 0) + return false; + } + return true; +} + +u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); + int s = ((lane & 1) ? + DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : + DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); + u8 l = dp_link_status(link_status, i); + + return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; +} + +u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); + int s = ((lane & 1) ? + DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : + DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); + u8 l = dp_link_status(link_status, i); + + return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; +} + +void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) { + if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) + DRM_UDELAY(100); + else + DRM_MDELAY(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4); +} + +void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) { + if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) + DRM_UDELAY(400); + else + DRM_MDELAY(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4); +} + +u8 drm_dp_link_rate_to_bw_code(int link_rate) +{ + switch (link_rate) { + case 162000: + default: + return DP_LINK_BW_1_62; + case 270000: + return DP_LINK_BW_2_7; + case 540000: + return DP_LINK_BW_5_4; + } +} + +int drm_dp_bw_code_to_link_rate(u8 link_bw) +{ + switch (link_bw) { + case DP_LINK_BW_1_62: + default: + return 162000; + case DP_LINK_BW_2_7: + return 270000; + case DP_LINK_BW_5_4: + return 540000; + } +} diff --git a/sys/dev/drm/drm_dp_helper.h b/sys/dev/drm/drm_dp_helper.h index 12e58903ec..6790ed9618 100644 --- a/sys/dev/drm/drm_dp_helper.h +++ b/sys/dev/drm/drm_dp_helper.h @@ -25,7 +25,19 @@ #ifndef _DRM_DP_HELPER_H_ #define _DRM_DP_HELPER_H_ -/* From the VESA DisplayPort spec */ +/* + * Unless otherwise noted, all values are from the DP 1.1a spec. Note that + * DP and DPCD versions are independent. Differences from 1.0 are not noted, + * 1.0 devices basically don't exist in the wild. + * + * Abbreviations, in chronological order: + * + * eDP: Embedded DisplayPort version 1 + * DPI: DisplayPort Interoperability Guideline v1.1a + * 1.2: DisplayPort 1.2 + * + * 1.2 formally includes both eDP and DPI definitions. + */ #define AUX_NATIVE_WRITE 0x8 #define AUX_NATIVE_READ 0x9 @@ -52,7 +64,7 @@ #define DP_MAX_LANE_COUNT 0x002 # define DP_MAX_LANE_COUNT_MASK 0x1f -# define DP_TPS3_SUPPORTED (1 << 6) +# define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */ # define DP_ENHANCED_FRAME_CAP (1 << 7) #define DP_MAX_DOWNSPREAD 0x003 @@ -68,14 +80,33 @@ /* 10b = TMDS or HDMI */ /* 11b = Other */ # define DP_FORMAT_CONVERSION (1 << 3) +# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */ #define DP_MAIN_LINK_CHANNEL_CODING 0x006 -#define DP_TRAINING_AUX_RD_INTERVAL 0x00e +#define DP_DOWN_STREAM_PORT_COUNT 0x007 +# define DP_PORT_COUNT_MASK 0x0f +# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */ +# define DP_OUI_SUPPORT (1 << 7) -#define DP_PSR_SUPPORT 0x070 +#define DP_I2C_SPEED_CAP 0x00c /* DPI */ +# define DP_I2C_SPEED_1K 0x01 +# define DP_I2C_SPEED_5K 0x02 +# define DP_I2C_SPEED_10K 0x04 +# define DP_I2C_SPEED_100K 0x08 +# define DP_I2C_SPEED_400K 0x10 +# define DP_I2C_SPEED_1M 0x20 + +#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */ +#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */ + +/* Multiple stream transport */ +#define DP_MSTM_CAP 0x021 /* 1.2 */ +# define DP_MST_CAP (1 << 0) + +#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */ # define DP_PSR_IS_SUPPORTED 1 -#define DP_PSR_CAPS 0x071 +#define DP_PSR_CAPS 0x071 /* XXX 1.2? */ # define DP_PSR_NO_TRAIN_ON_EXIT 1 # define DP_PSR_SETUP_TIME_330 (0 << 1) # define DP_PSR_SETUP_TIME_275 (1 << 1) @@ -87,11 +118,36 @@ # define DP_PSR_SETUP_TIME_MASK (7 << 1) # define DP_PSR_SETUP_TIME_SHIFT 1 +/* + * 0x80-0x8f describe downstream port capabilities, but there are two layouts + * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not, + * each port's descriptor is one byte wide. If it was set, each port's is + * four bytes wide, starting with the one byte from the base info. As of + * DP interop v1.1a only VGA defines additional detail. + */ + +/* offset 0 */ +#define DP_DOWNSTREAM_PORT_0 0x80 +# define DP_DS_PORT_TYPE_MASK (7 << 0) +# define DP_DS_PORT_TYPE_DP 0 +# define DP_DS_PORT_TYPE_VGA 1 +# define DP_DS_PORT_TYPE_DVI 2 +# define DP_DS_PORT_TYPE_HDMI 3 +# define DP_DS_PORT_TYPE_NON_EDID 4 +# define DP_DS_PORT_HPD (1 << 3) +/* offset 1 for VGA is maximum megapixels per second / 8 */ +/* offset 2 */ +# define DP_DS_VGA_MAX_BPC_MASK (3 << 0) +# define DP_DS_VGA_8BPC 0 +# define DP_DS_VGA_10BPC 1 +# define DP_DS_VGA_12BPC 2 +# define DP_DS_VGA_16BPC 3 + /* link configuration */ #define DP_LINK_BW_SET 0x100 # define DP_LINK_BW_1_62 0x06 # define DP_LINK_BW_2_7 0x0a -# define DP_LINK_BW_5_4 0x14 +# define DP_LINK_BW_5_4 0x14 /* 1.2 */ #define DP_LANE_COUNT_SET 0x101 # define DP_LANE_COUNT_MASK 0x0f @@ -101,7 +157,7 @@ # define DP_TRAINING_PATTERN_DISABLE 0 # define DP_TRAINING_PATTERN_1 1 # define DP_TRAINING_PATTERN_2 2 -# define DP_TRAINING_PATTERN_3 3 +# define DP_TRAINING_PATTERN_3 3 /* 1.2 */ # define DP_TRAINING_PATTERN_MASK 0x3 # define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2) @@ -142,16 +198,32 @@ #define DP_DOWNSPREAD_CTRL 0x107 # define DP_SPREAD_AMP_0_5 (1 << 4) +# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */ #define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 # define DP_SET_ANSI_8B10B (1 << 0) -#define DP_PSR_EN_CFG 0x170 +#define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */ +/* bitmask as for DP_I2C_SPEED_CAP */ + +#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */ + +#define DP_MSTM_CTRL 0x111 /* 1.2 */ +# define DP_MST_EN (1 << 0) +# define DP_UP_REQ_EN (1 << 1) +# define DP_UPSTREAM_IS_SRC (1 << 2) + +#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ # define DP_PSR_ENABLE (1 << 0) # define DP_PSR_MAIN_LINK_ACTIVE (1 << 1) # define DP_PSR_CRC_VERIFICATION (1 << 2) # define DP_PSR_FRAME_CAPTURE (1 << 3) +#define DP_SINK_COUNT 0x200 +/* prior to 1.2 bit 7 was reserved mbz */ +# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f)) +# define DP_SINK_CP_READY (1 << 6) + #define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201 # define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0) # define DP_AUTOMATED_TEST_REQUEST (1 << 1) @@ -209,18 +281,22 @@ # define DP_TEST_NAK (1 << 1) # define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2) +#define DP_SOURCE_OUI 0x300 +#define DP_SINK_OUI 0x400 +#define DP_BRANCH_OUI 0x500 + #define DP_SET_POWER 0x600 # define DP_SET_POWER_D0 0x1 # define DP_SET_POWER_D3 0x2 -#define DP_PSR_ERROR_STATUS 0x2006 +#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */ # define DP_PSR_LINK_CRC_ERROR (1 << 0) # define DP_PSR_RFB_STORAGE_ERROR (1 << 1) -#define DP_PSR_ESI 0x2007 +#define DP_PSR_ESI 0x2007 /* XXX 1.2? */ # define DP_PSR_CAPS_CHANGE (1 << 0) -#define DP_PSR_STATUS 0x2008 +#define DP_PSR_STATUS 0x2008 /* XXX 1.2? */ # define DP_PSR_SINK_INACTIVE 0 # define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1 # define DP_PSR_SINK_ACTIVE_RFB 2 @@ -247,4 +323,34 @@ int iic_dp_aux_add_bus(device_t dev, const char *name, int (*ch)(device_t idev, int mode, uint8_t write_byte, uint8_t *read_byte), void *priv, device_t *bus, device_t *adapter); + +#define DP_LINK_STATUS_SIZE 6 +bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], + int lane); +u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], + int lane); + +#define DP_RECEIVER_CAP_SIZE 0xf +void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]); +void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]); + +u8 drm_dp_link_rate_to_bw_code(int link_rate); +int drm_dp_bw_code_to_link_rate(u8 link_bw); + +static inline int +drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]); +} + +static inline u8 +drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; +} + #endif /* _DRM_DP_HELPER_H_ */ diff --git a/sys/dev/drm/drm_dp_iic_helper.c b/sys/dev/drm/drm_dp_iic_helper.c index 39485dfa2e..1c83dc42c0 100644 --- a/sys/dev/drm/drm_dp_iic_helper.c +++ b/sys/dev/drm/drm_dp_iic_helper.c @@ -215,22 +215,6 @@ iic_dp_aux_attach(device_t idev) return (0); } -static int -iic_dp_aux_detach(device_t idev) -{ - struct iic_dp_aux_data *aux_data; - device_t port; - - aux_data = device_get_softc(idev); - - port = aux_data->port; - bus_generic_detach(idev); - if (port != NULL) - device_delete_child(idev, port); - - return (0); -} - int iic_dp_aux_add_bus(device_t dev, const char *name, int (*ch)(device_t idev, int mode, uint8_t write_byte, uint8_t *read_byte), @@ -271,7 +255,7 @@ iic_dp_aux_add_bus(device_t dev, const char *name, static device_method_t drm_iic_dp_aux_methods[] = { DEVMETHOD(device_probe, iic_dp_aux_probe), DEVMETHOD(device_attach, iic_dp_aux_attach), - DEVMETHOD(device_detach, iic_dp_aux_detach), + DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(iicbus_reset, iic_dp_aux_reset), DEVMETHOD(iicbus_transfer, iic_dp_aux_xfer), DEVMETHOD_END diff --git a/sys/dev/drm/drm_drv.c b/sys/dev/drm/drm_drv.c index 3b91cc8e82..575c3db1b5 100644 --- a/sys/dev/drm/drm_drv.c +++ b/sys/dev/drm/drm_drv.c @@ -39,6 +39,8 @@ #include "dev/drm/drmP.h" #include "dev/drm/drm.h" +#include "dev/drm/drm_core.h" +#include "dev/drm/drm_global.h" #include "dev/drm/drm_sarea.h" #ifdef DRM_DEBUG_DEFAULT_ON @@ -77,7 +79,7 @@ static moduledata_t drm_mod = { "drm", drm_modevent, 0 -}; +}; DECLARE_MODULE(drm, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); MODULE_VERSION(drm, 1); MODULE_DEPEND(drm, agp, 1, 1, 1); @@ -204,13 +206,22 @@ static struct drm_msi_blacklist_entry drm_msi_blacklist[] = { {0, 0} }; -static int drm_msi_is_blacklisted(int vendor, int device) +static int drm_msi_is_blacklisted(struct drm_device *dev, unsigned long flags) { int i = 0; - + + if (dev->driver->use_msi != NULL) { + int use_msi; + + use_msi = dev->driver->use_msi(dev, flags); + + return (!use_msi); + } + + /* TODO: Maybe move this to a callback in i915? */ for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) { - if ((drm_msi_blacklist[i].vendor == vendor) && - (drm_msi_blacklist[i].device == device)) { + if ((drm_msi_blacklist[i].vendor == dev->pci_vendor) && + (drm_msi_blacklist[i].device == dev->pci_device)) { return 1; } } @@ -245,7 +256,7 @@ int drm_attach(device_t kdev, drm_pci_id_list_t *idlist) { struct drm_device *dev; drm_pci_id_list_t *id_entry; - int unit, msicount; + int unit, error, msicount; int rid = 0; unit = device_get_unit(kdev); @@ -256,20 +267,23 @@ int drm_attach(device_t kdev, drm_pci_id_list_t *idlist) else dev->device = kdev; - dev->devnode = make_dev(&drm_cdevsw, unit, DRM_DEV_UID, DRM_DEV_GID, - DRM_DEV_MODE, "dri/card%d", unit); - - dev->pci_domain = 0; + dev->pci_domain = pci_get_domain(dev->device); dev->pci_bus = pci_get_bus(dev->device); dev->pci_slot = pci_get_slot(dev->device); dev->pci_func = pci_get_function(dev->device); dev->pci_vendor = pci_get_vendor(dev->device); dev->pci_device = pci_get_device(dev->device); + dev->pci_subvendor = pci_get_subvendor(dev->device); + dev->pci_subdevice = pci_get_subdevice(dev->device); + + id_entry = drm_find_description(dev->pci_vendor, + dev->pci_device, idlist); + dev->id_entry = id_entry; if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) { if (drm_msi && - !drm_msi_is_blacklisted(dev->pci_vendor, dev->pci_device)) { + !drm_msi_is_blacklisted(dev, dev->id_entry->driver_private)) { msicount = pci_msi_count(dev->device); DRM_DEBUG("MSI count = %d\n", msicount); if (msicount > 1) @@ -299,11 +313,41 @@ int drm_attach(device_t kdev, drm_pci_id_list_t *idlist) lockinit(&dev->event_lock, "drmev", 0, LK_CANRECURSE); lockinit(&dev->dev_struct_lock, "drmslk", 0, LK_CANRECURSE); - id_entry = drm_find_description(dev->pci_vendor, - dev->pci_device, idlist); - dev->id_entry = id_entry; + error = drm_load(dev); + if (error) + goto error; + + error = drm_create_cdevs(kdev); + if (error) + goto error; - return drm_load(dev); + return (error); +error: + if (dev->irqr) { + bus_release_resource(dev->device, SYS_RES_IRQ, + dev->irqrid, dev->irqr); + } + if (dev->msi_enabled) { + pci_release_msi(dev->device); + } + return (error); +} + +int +drm_create_cdevs(device_t kdev) +{ + struct drm_device *dev; + int error, unit; + + unit = device_get_unit(kdev); + dev = device_get_softc(kdev); + + dev->devnode = make_dev(&drm_cdevsw, unit, DRM_DEV_UID, DRM_DEV_GID, + DRM_DEV_MODE, "dri/card%d", unit); + error = 0; + if (error == 0) + dev->devnode->si_drv1 = dev; + return (error); } int drm_detach(device_t kdev) @@ -333,7 +377,7 @@ drm_pci_id_list_t *drm_find_description(int vendor, int device, drm_pci_id_list_t *idlist) { int i = 0; - + for (i = 0; idlist[i].vendor != 0; i++) { if ((idlist[i].vendor == vendor) && ((idlist[i].device == device) || @@ -543,7 +587,7 @@ static int drm_load(struct drm_device *dev) DRM_ERROR("Request to enable bus-master failed.\n"); DRM_UNLOCK(dev); if (retcode != 0) - goto error; + goto error1; } DRM_INFO("Initialized %s %d.%d.%d %s\n", @@ -557,7 +601,9 @@ static int drm_load(struct drm_device *dev) error1: delete_unrhdr(dev->drw_unrhdr); + drm_gem_destroy(dev); error: + drm_ctxbitmap_cleanup(dev); drm_sysctl_cleanup(dev); DRM_LOCK(dev); drm_lastclose(dev); @@ -737,7 +783,7 @@ int drm_close(struct dev_close_args *ap) drm_lock_free(&dev->lock, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); - + /* FIXME: may require heavy-handed reset of hardware at this point, possibly processed via a callback to the X @@ -933,11 +979,11 @@ drm_mmap_single(struct dev_mmap_single_args *ap) int nprot = ap->a_nprot; dev = drm_get_device_from_kdev(kdev); - if ((dev->driver->driver_features & DRIVER_GEM) != 0) { - return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot)); - } else if (dev->drm_ttm_bo != NULL) { - return (ttm_bo_mmap_single(dev->drm_ttm_bo, offset, size, + if (dev->drm_ttm_bdev != NULL) { + return (ttm_bo_mmap_single(dev->drm_ttm_bdev, offset, size, obj_res, nprot)); + } else if ((dev->driver->driver_features & DRIVER_GEM) != 0) { + return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot)); } else { return (ENODEV); } @@ -953,14 +999,9 @@ MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1); #define LINUX_IOCTL_DRM_MAX 0x64ff static linux_ioctl_function_t drm_linux_ioctl; -static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl, +static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl, LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX}; -SYSINIT(drm_register, SI_SUB_KLD, SI_ORDER_MIDDLE, - linux_ioctl_register_handler, &drm_handler); -SYSUNINIT(drm_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE, - linux_ioctl_unregister_handler, &drm_handler); - /* The bits for in/out are switched on Linux */ #define LINUX_IOC_IN IOC_OUT #define LINUX_IOC_OUT IOC_IN @@ -983,6 +1024,37 @@ drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args) } #endif /* DRM_LINUX */ +static int +drm_core_init(void *arg) +{ + + drm_global_init(); + +#if DRM_LINUX + linux_ioctl_register_handler(&drm_handler); +#endif /* DRM_LINUX */ + + DRM_INFO("Initialized %s %d.%d.%d %s\n", + CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); + return 0; +} + +static void +drm_core_exit(void *arg) +{ + +#if DRM_LINUX + linux_ioctl_unregister_handler(&drm_handler); +#endif /* DRM_LINUX */ + + drm_global_release(); +} + +SYSINIT(drm_register, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, + drm_core_init, NULL); +SYSUNINIT(drm_unregister, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, + drm_core_exit, NULL); + /* * Check if dmi_system_id structure matches system DMI data */ diff --git a/sys/dev/drm/drm_edid.c b/sys/dev/drm/drm_edid.c index b202cf54df..83dde11dc8 100644 --- a/sys/dev/drm/drm_edid.c +++ b/sys/dev/drm/drm_edid.c @@ -252,6 +252,8 @@ drm_do_probe_ddc_edid(device_t adapter, unsigned char *buf, int block, int len) { unsigned char start = block * EDID_LENGTH; + unsigned char segment = block >> 1; + unsigned char xfers = segment ? 3 : 2; int ret, retries = 5; /* The core i2c driver will automatically retry the transfer if the @@ -263,6 +265,11 @@ drm_do_probe_ddc_edid(device_t adapter, unsigned char *buf, do { struct iic_msg msgs[] = { { + .slave = DDC_SEGMENT_ADDR << 1, + .flags = 0, + .len = 1, + .buf = &segment, + }, { .slave = DDC_ADDR << 1, .flags = IIC_M_WR, .len = 1, @@ -274,7 +281,13 @@ drm_do_probe_ddc_edid(device_t adapter, unsigned char *buf, .buf = buf, } }; - ret = iicbus_transfer(adapter, msgs, 2); + + /* + * Avoid sending the segment addr to not upset non-compliant ddc + * monitors. + */ + ret = iicbus_transfer(adapter, &msgs[3 - xfers], xfers); + if (ret != 0) DRM_DEBUG_KMS("iicbus_transfer countdown %d error %d\n", retries, ret); diff --git a/sys/dev/drm/drm_fb_helper.c b/sys/dev/drm/drm_fb_helper.c index 897e085022..cf1b7b625f 100644 --- a/sys/dev/drm/drm_fb_helper.c +++ b/sys/dev/drm/drm_fb_helper.c @@ -554,8 +554,11 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) for (i = 0; i < helper->connector_count; i++) drm_free(helper->connector_info[i], DRM_MEM_KMS); drm_free(helper->connector_info, DRM_MEM_KMS); - for (i = 0; i < helper->crtc_count; i++) + for (i = 0; i < helper->crtc_count; i++) { drm_free(helper->crtc_info[i].mode_set.connectors, DRM_MEM_KMS); + if (helper->crtc_info[i].mode_set.mode) + drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode); + } drm_free(helper->crtc_info, DRM_MEM_KMS); } diff --git a/sys/dev/drm/drm_fixed.h b/sys/dev/drm/drm_fixed.h new file mode 100644 index 0000000000..96bbec3018 --- /dev/null +++ b/sys/dev/drm/drm_fixed.h @@ -0,0 +1,71 @@ +/* + * Copyright 2009 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * + * $FreeBSD: head/sys/dev/drm2/drm_fixed.h 254853 2013-08-25 12:27:15Z dumbbell $ + */ + +#ifndef DRM_FIXED_H +#define DRM_FIXED_H + +typedef union dfixed { + u32 full; +} fixed20_12; + + +#define dfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */ +#define dfixed_const_half(A) (u32)(((A) << 12) + 2048) +#define dfixed_const_666(A) (u32)(((A) << 12) + 2731) +#define dfixed_const_8(A) (u32)(((A) << 12) + 3277) +#define dfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12) +#define dfixed_init(A) { .full = dfixed_const((A)) } +#define dfixed_init_half(A) { .full = dfixed_const_half((A)) } +#define dfixed_trunc(A) ((A).full >> 12) +#define dfixed_frac(A) ((A).full & ((1 << 12) - 1)) + +static inline u32 dfixed_floor(fixed20_12 A) +{ + u32 non_frac = dfixed_trunc(A); + + return dfixed_const(non_frac); +} + +static inline u32 dfixed_ceil(fixed20_12 A) +{ + u32 non_frac = dfixed_trunc(A); + + if (A.full > dfixed_const(non_frac)) + return dfixed_const(non_frac + 1); + else + return dfixed_const(non_frac); +} + +static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B) +{ + u64 tmp = ((u64)A.full << 13); + + do_div(tmp, B.full); + tmp += 1; + tmp /= 2; + return lower_32_bits(tmp); +} +#endif diff --git a/sys/dev/drm/drm_gem.c b/sys/dev/drm/drm_gem.c index 9108532428..d1e1a0a4ac 100644 --- a/sys/dev/drm/drm_gem.c +++ b/sys/dev/drm/drm_gem.c @@ -122,7 +122,7 @@ drm_gem_private_object_init(struct drm_device *dev, struct drm_gem_object *obj, obj->vm_obj = NULL; obj->refcount = 1; - atomic_set(&obj->handle_count, 0); + atomic_store_rel_int(&obj->handle_count, 0); obj->size = size; return (0); @@ -164,7 +164,7 @@ void drm_gem_object_reference(struct drm_gem_object *obj) { - KASSERT(obj->refcount > 0, ("Dandling obj %p", obj)); + KASSERT(obj->refcount > 0, ("Dangling obj %p", obj)); refcount_acquire(&obj->refcount); } @@ -243,24 +243,40 @@ int drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj, uint32_t *handle) { - int error; + struct drm_device *dev = obj->dev; + int ret; - error = drm_gem_name_create(&file_priv->object_names, obj, handle); - if (error != 0) - return (error); + ret = drm_gem_name_create(&file_priv->object_names, obj, handle); + if (ret != 0) + return (ret); drm_gem_object_handle_reference(obj); + + if (dev->driver->gem_open_object) { + ret = dev->driver->gem_open_object(obj, file_priv); + if (ret) { + drm_gem_handle_delete(file_priv, *handle); + return ret; + } + } + return (0); } int drm_gem_handle_delete(struct drm_file *file_priv, uint32_t handle) { + struct drm_device *dev; struct drm_gem_object *obj; obj = drm_gem_names_remove(&file_priv->object_names, handle); if (obj == NULL) return (EINVAL); + + dev = obj->dev; + if (dev->driver->gem_close_object) + dev->driver->gem_close_object(obj, file_priv); drm_gem_object_handle_unreference_unlocked(obj); + return (0); } @@ -313,9 +329,17 @@ drm_gem_open(struct drm_device *dev, struct drm_file *file_priv) static int drm_gem_object_release_handle(uint32_t name, void *ptr, void *arg) { + struct drm_file *file_priv; struct drm_gem_object *obj; + struct drm_device *dev; + file_priv = arg; obj = ptr; + dev = obj->dev; + + if (dev->driver->gem_close_object) + dev->driver->gem_close_object(obj, file_priv); + drm_gem_object_handle_unreference(obj); return (0); } @@ -325,7 +349,7 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_priv) { drm_gem_names_foreach(&file_priv->object_names, - drm_gem_object_release_handle, NULL); + drm_gem_object_release_handle, file_priv); drm_gem_names_fini(&file_priv->object_names); } diff --git a/sys/dev/drm/drm_gem_names.c b/sys/dev/drm/drm_gem_names.c index 1f7f2291bf..c6aa0e6a8a 100644 --- a/sys/dev/drm/drm_gem_names.c +++ b/sys/dev/drm/drm_gem_names.c @@ -133,12 +133,12 @@ drm_gem_name_create(struct drm_gem_names *names, void *p, uint32_t *name) { struct drm_gem_name *np; - np = kmalloc(sizeof(struct drm_gem_name), M_GEM_NAMES, M_WAITOK); - lockmgr(&names->lock, LK_EXCLUSIVE); if (*name != 0) { - lockmgr(&names->lock, LK_RELEASE); return (EALREADY); } + + np = kmalloc(sizeof(struct drm_gem_name), M_GEM_NAMES, M_WAITOK); + lockmgr(&names->lock, LK_EXCLUSIVE); np->name = alloc_unr(names->unr); if (np->name == -1) { lockmgr(&names->lock, LK_RELEASE); diff --git a/sys/dev/drm/drm_ioctl.c b/sys/dev/drm/drm_ioctl.c index e1db9ab6ec..45f76c84cf 100644 --- a/sys/dev/drm/drm_ioctl.c +++ b/sys/dev/drm/drm_ioctl.c @@ -35,6 +35,7 @@ */ #include "dev/drm/drmP.h" +#include /* * Beginning in revision 1.1 of the DRM interface, getunique will return @@ -253,10 +254,6 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) return 0; } - -#define DRM_IF_MAJOR 1 -#define DRM_IF_MINOR 2 - int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv) { diff --git a/sys/dev/drm/drm_irq.c b/sys/dev/drm/drm_irq.c index 75d0f34000..05118e7c27 100644 --- a/sys/dev/drm/drm_irq.c +++ b/sys/dev/drm/drm_irq.c @@ -195,7 +195,7 @@ struct timeval ns_to_timeval(const int64_t nsec) { struct timeval tv; - uint32_t rem; + long rem; if (nsec == 0) { tv.tv_sec = 0; @@ -771,7 +771,7 @@ int drm_vblank_get(struct drm_device *dev, int crtc) lockmgr(&dev->vbl_lock, LK_EXCLUSIVE); /* Going from 0->1 means we have to enable interrupts again */ - if (atomic_fetchadd_int(&dev->vblank_refcount[crtc], 1) == 0) { + if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { lockmgr(&dev->vblank_time_lock, LK_EXCLUSIVE); if (!dev->vblank_enabled[crtc]) { /* Enable vblank irqs under vblank_time_lock protection. @@ -816,7 +816,7 @@ void drm_vblank_put(struct drm_device *dev, int crtc) ("Too many drm_vblank_put for crtc %d", crtc)); /* Last user schedules interrupt disable */ - if (atomic_fetchadd_int(&dev->vblank_refcount[crtc], -1) == 1 && + if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) && (drm_vblank_offdelay > 0)) callout_reset(&dev->vblank_disable_callout, (drm_vblank_offdelay * DRM_HZ) / 1000, diff --git a/sys/dev/drm/drm_linux_list.h b/sys/dev/drm/drm_linux_list.h index 86fdfe7663..34940cd249 100644 --- a/sys/dev/drm/drm_linux_list.h +++ b/sys/dev/drm/drm_linux_list.h @@ -142,6 +142,11 @@ list_del_init(struct list_head *entry) { &pos->member != (head); \ pos = n, n = list_entry(n->member.next, __typeof(*n), member)) +#define list_for_each_entry_safe_from(pos, n, head, member) \ + for (n = list_entry(pos->member.next, __typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, __typeof(*n), member)) + #define list_first_entry(ptr, type, member) \ list_entry((ptr)->next, type, member) diff --git a/sys/dev/drm/drm_pci.c b/sys/dev/drm/drm_pci.c index 0daf8db5b4..20218addc5 100644 --- a/sys/dev/drm/drm_pci.c +++ b/sys/dev/drm/drm_pci.c @@ -127,3 +127,53 @@ drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah) } /*@}*/ + +int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask) +{ + device_t root; + int pos; + u32 lnkcap = 0, lnkcap2 = 0; + + *mask = 0; + if (!drm_device_is_pcie(dev)) + return -EINVAL; + + root = device_get_parent(dev->device); + + pos = 0; + pci_find_extcap(root, PCIY_EXPRESS, &pos); + if (!pos) + return -EINVAL; + + /* we've been informed via and serverworks don't make the cut */ + if (pci_get_vendor(root) == PCI_VENDOR_ID_VIA || + pci_get_vendor(root) == PCI_VENDOR_ID_SERVERWORKS) + return -EINVAL; + + lnkcap = pci_read_config(root, pos + PCIER_LINKCAP, 4); + lnkcap2 = pci_read_config(root, pos + PCIER_LINK_CAP2, 4); + + lnkcap &= PCIEM_LNKCAP_SPEED_MASK; + lnkcap2 &= 0xfe; + +#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x02 /* Supported Link Speed 2.5GT/s */ +#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x04 /* Supported Link Speed 5.0GT/s */ +#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x08 /* Supported Link Speed 8.0GT/s */ + + if (lnkcap2) { /* PCIE GEN 3.0 */ + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) + *mask |= DRM_PCIE_SPEED_25; + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) + *mask |= DRM_PCIE_SPEED_50; + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) + *mask |= DRM_PCIE_SPEED_80; + } else { + if (lnkcap & 1) + *mask |= DRM_PCIE_SPEED_25; + if (lnkcap & 2) + *mask |= DRM_PCIE_SPEED_50; + } + + DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", pci_get_vendor(root), pci_get_device(root), lnkcap, lnkcap2); + return 0; +} diff --git a/sys/dev/drm/drm_sysctl.c b/sys/dev/drm/drm_sysctl.c index bea36e22b0..a6dba46b25 100644 --- a/sys/dev/drm/drm_sysctl.c +++ b/sys/dev/drm/drm_sysctl.c @@ -180,7 +180,15 @@ static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS { struct drm_device *dev = arg1; drm_local_map_t *map, *tempmaps; - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG" }; + const char *types[] = { + [_DRM_FRAME_BUFFER] = "FB", + [_DRM_REGISTERS] = "REG", + [_DRM_SHM] = "SHM", + [_DRM_AGP] = "AGP", + [_DRM_SCATTER_GATHER] = "SG", + [_DRM_CONSISTENT] = "CONS", + [_DRM_GEM] = "GEM" + }; const char *type, *yesno; int i, mapcount; char buf[128]; @@ -214,10 +222,20 @@ static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS for (i = 0; i < mapcount; i++) { map = &tempmaps[i]; - if (map->type < 0 || map->type > 4) + switch(map->type) { + default: type = "??"; - else + break; + case _DRM_FRAME_BUFFER: + case _DRM_REGISTERS: + case _DRM_SHM: + case _DRM_AGP: + case _DRM_SCATTER_GATHER: + case _DRM_CONSISTENT: + case _DRM_GEM: type = types[map->type]; + break; + } if (!map->mtrr) yesno = "no"; diff --git a/sys/dev/drm/foo.txt b/sys/dev/drm/foo.txt new file mode 100644 index 0000000000..257cc5642c --- /dev/null +++ b/sys/dev/drm/foo.txt @@ -0,0 +1 @@ +foo diff --git a/sys/dev/drm/i915/i915_gem.c b/sys/dev/drm/i915/i915_gem.c index 867d6a2c36..aa2202b36c 100644 --- a/sys/dev/drm/i915/i915_gem.c +++ b/sys/dev/drm/i915/i915_gem.c @@ -137,7 +137,7 @@ i915_gem_wait_for_error(struct drm_device *dev) } lockmgr(&dev_priv->error_completion_lock, LK_RELEASE); - if (atomic_read(&dev_priv->mm.wedged)) { + if (atomic_load_acq_int(&dev_priv->mm.wedged)) { lockmgr(&dev_priv->error_completion_lock, LK_EXCLUSIVE); dev_priv->error_completion++; lockmgr(&dev_priv->error_completion_lock, LK_RELEASE); @@ -748,8 +748,13 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) u32 seqno = 0; int ret; - if (atomic_read(&dev_priv->mm.wedged)) - return -EIO; + dev_priv = dev->dev_private; + if (atomic_load_acq_int(&dev_priv->mm.wedged)) + return (-EIO); + + recent_enough = ticks - (20 * hz / 1000); + ring = NULL; + seqno = 0; spin_lock(&file_priv->mm.lock); list_for_each_entry(request, &file_priv->mm.request_list, client_list) { @@ -770,15 +775,15 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) if (ring->irq_get(ring)) { while (ret == 0 && !(i915_seqno_passed(ring->get_seqno(ring), seqno) || - atomic_read(&dev_priv->mm.wedged))) + atomic_load_acq_int(&dev_priv->mm.wedged))) ret = -lksleep(ring, &ring->irq_lock, PCATCH, "915thr", 0); ring->irq_put(ring); - if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) + if (ret == 0 && atomic_load_acq_int(&dev_priv->mm.wedged)) ret = -EIO; } else if (_intel_wait_for(dev, i915_seqno_passed(ring->get_seqno(ring), seqno) || - atomic_read(&dev_priv->mm.wedged), 3000, 0, "915rtr")) { + atomic_load_acq_int(&dev_priv->mm.wedged), 3000, 0, "915rtr")) { ret = -EBUSY; } } diff --git a/sys/dev/drm/i915/i915_gem_execbuffer.c b/sys/dev/drm/i915/i915_gem_execbuffer.c index e07c860ea3..37ca117b32 100644 --- a/sys/dev/drm/i915/i915_gem_execbuffer.c +++ b/sys/dev/drm/i915/i915_gem_execbuffer.c @@ -191,7 +191,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, i915_gem_clflush_object(obj); if (obj->base.pending_write_domain) - cd->flips |= atomic_read(&obj->pending_flip); + cd->flips |= atomic_load_acq_int(&obj->pending_flip); /* The actual obj->write_domain will be updated with * pending_write_domain after we emit the accumulated flush for all diff --git a/sys/dev/drm/i915/intel_crt.c b/sys/dev/drm/i915/intel_crt.c index 9f9f63e79b..603749bbfb 100644 --- a/sys/dev/drm/i915/intel_crt.c +++ b/sys/dev/drm/i915/intel_crt.c @@ -110,7 +110,7 @@ static int intel_crt_mode_valid(struct drm_connector *connector, } static bool intel_crt_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; diff --git a/sys/dev/drm/i915/intel_display.c b/sys/dev/drm/i915/intel_display.c index deb7e7fe2c..a039f28f64 100644 --- a/sys/dev/drm/i915/intel_display.c +++ b/sys/dev/drm/i915/intel_display.c @@ -2263,8 +2263,8 @@ intel_finish_fb(struct drm_framebuffer *old_fb) int ret; lockmgr(&dev->event_lock, LK_EXCLUSIVE); - while (!atomic_read(&dev_priv->mm.wedged) && - atomic_read(&obj->pending_flip) != 0) { + while (!atomic_load_acq_int(&dev_priv->mm.wedged) && + atomic_load_acq_int(&obj->pending_flip) != 0) { lksleep(&obj->pending_flip, &dev->event_lock, 0, "915flp", 0); } @@ -2950,7 +2950,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) dev = crtc->dev; dev_priv = dev->dev_private; lockmgr(&dev->event_lock, LK_EXCLUSIVE); - while (atomic_read(&obj->pending_flip) != 0) + while (atomic_load_acq_int(&obj->pending_flip) != 0) lksleep(&obj->pending_flip, &dev->event_lock, 0, "915wfl", 0); lockmgr(&dev->event_lock, LK_RELEASE); } @@ -3514,7 +3514,7 @@ void intel_encoder_destroy(struct drm_encoder *encoder) } static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; @@ -7337,7 +7337,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev, obj = work->old_fb_obj; atomic_clear_int(&obj->pending_flip, 1 << intel_crtc->plane); - if (atomic_read(&obj->pending_flip) == 0) + if (atomic_load_acq_int(&obj->pending_flip) == 0) wakeup(&obj->pending_flip); lockmgr(&dev->event_lock, LK_RELEASE); @@ -7639,7 +7639,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return 0; cleanup_pending: - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); + atomic_clear_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane); drm_gem_object_unreference(&work->old_fb_obj->base); drm_gem_object_unreference(&obj->base); DRM_UNLOCK(dev); diff --git a/sys/dev/drm/i915/intel_dp.c b/sys/dev/drm/i915/intel_dp.c index bc19f81ba5..e16cbb5c1a 100644 --- a/sys/dev/drm/i915/intel_dp.c +++ b/sys/dev/drm/i915/intel_dp.c @@ -41,9 +41,6 @@ #define DP_LINK_CONFIGURATION_SIZE 9 -/* XXXKIB what is the right code for the FreeBSD ? */ -#define EREMOTEIO ENXIO - struct intel_dp { struct intel_encoder base; uint32_t output_reg; @@ -222,7 +219,7 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes) static bool intel_dp_adjust_dithering(struct intel_dp *intel_dp, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); @@ -674,7 +671,7 @@ intel_dp_i2c_init(struct intel_dp *intel_dp, } static bool -intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, +intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; @@ -689,14 +686,9 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode); intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, mode, adjusted_mode); - /* - * the mode->clock is used to calculate the Data&Link M/N - * of the pipe. For the eDP the fixed clock should be used. - */ - mode->clock = intel_dp->panel_fixed_mode->clock; } - if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode)) + if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, adjusted_mode)) return false; bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; @@ -705,7 +697,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, for (clock = 0; clock <= max_clock; clock++) { int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); - if (intel_dp_link_required(mode->clock, bpp) + if (intel_dp_link_required(adjusted_mode->clock, bpp) <= link_avail) { intel_dp->link_bw = bws[clock]; intel_dp->lane_count = lane_count; diff --git a/sys/dev/drm/i915/intel_drv.h b/sys/dev/drm/i915/intel_drv.h index fea00664bc..a12bbaffe1 100644 --- a/sys/dev/drm/i915/intel_drv.h +++ b/sys/dev/drm/i915/intel_drv.h @@ -317,7 +317,7 @@ extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode); extern void intel_pch_panel_fitting(struct drm_device *dev, int fitting_mode, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); extern u32 intel_panel_get_max_backlight(struct drm_device *dev); extern u32 intel_panel_get_backlight(struct drm_device *dev); diff --git a/sys/dev/drm/i915/intel_hdmi.c b/sys/dev/drm/i915/intel_hdmi.c index 57ba47a86d..01c45501ce 100644 --- a/sys/dev/drm/i915/intel_hdmi.c +++ b/sys/dev/drm/i915/intel_hdmi.c @@ -316,7 +316,7 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector, } static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; diff --git a/sys/dev/drm/i915/intel_lvds.c b/sys/dev/drm/i915/intel_lvds.c index b928e7a419..f421c5feea 100644 --- a/sys/dev/drm/i915/intel_lvds.c +++ b/sys/dev/drm/i915/intel_lvds.c @@ -228,7 +228,7 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target) } static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; diff --git a/sys/dev/drm/i915/intel_panel.c b/sys/dev/drm/i915/intel_panel.c index 7dae5f5669..956fd23d64 100644 --- a/sys/dev/drm/i915/intel_panel.c +++ b/sys/dev/drm/i915/intel_panel.c @@ -58,7 +58,7 @@ intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, void intel_pch_panel_fitting(struct drm_device *dev, int fitting_mode, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_i915_private *dev_priv = dev->dev_private; diff --git a/sys/dev/drm/i915/intel_sdvo.c b/sys/dev/drm/i915/intel_sdvo.c index f894ff0ac6..78d144a88b 100644 --- a/sys/dev/drm/i915/intel_sdvo.c +++ b/sys/dev/drm/i915/intel_sdvo.c @@ -923,7 +923,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) static bool intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { struct intel_sdvo_dtd output_dtd; @@ -940,7 +940,7 @@ intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo, static bool intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { /* Reset the input timing to the screen. Assume always input 0. */ @@ -963,7 +963,7 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo, } static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); diff --git a/sys/dev/drm/i915/intel_tv.c b/sys/dev/drm/i915/intel_tv.c index bc2ae5f9e2..2e45d37aed 100644 --- a/sys/dev/drm/i915/intel_tv.c +++ b/sys/dev/drm/i915/intel_tv.c @@ -845,7 +845,7 @@ intel_tv_mode_valid(struct drm_connector *connector, static bool -intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, +intel_tv_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; diff --git a/sys/dev/drm/mach64/mach64_irq.c b/sys/dev/drm/mach64/mach64_irq.c index 76abe72c5d..2428415405 100644 --- a/sys/dev/drm/mach64/mach64_irq.c +++ b/sys/dev/drm/mach64/mach64_irq.c @@ -70,7 +70,7 @@ irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS) u32 mach64_get_vblank_counter(struct drm_device * dev, int crtc) { - const drm_mach64_private_t *const dev_priv = dev->dev_private; + drm_mach64_private_t *const dev_priv = dev->dev_private; if (crtc != 0) return 0; diff --git a/sys/dev/drm/mga/mga_irq.c b/sys/dev/drm/mga/mga_irq.c index b8265ea224..43dec7eedd 100644 --- a/sys/dev/drm/mga/mga_irq.c +++ b/sys/dev/drm/mga/mga_irq.c @@ -38,7 +38,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc) { - const drm_mga_private_t *const dev_priv = + drm_mga_private_t *const dev_priv = (drm_mga_private_t *) dev->dev_private; if (crtc != 0) { diff --git a/sys/dev/drm/r128/r128_irq.c b/sys/dev/drm/r128/r128_irq.c index fdbfd5aa03..1ab1050a90 100644 --- a/sys/dev/drm/r128/r128_irq.c +++ b/sys/dev/drm/r128/r128_irq.c @@ -37,7 +37,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc) { - const drm_r128_private_t *dev_priv = dev->dev_private; + drm_r128_private_t *dev_priv = dev->dev_private; if (crtc != 0) return 0; diff --git a/sys/dev/drm/ttm/ttm_bo.c b/sys/dev/drm/ttm/ttm_bo.c index 67309040c7..72f5ba1fd6 100644 --- a/sys/dev/drm/ttm/ttm_bo.c +++ b/sys/dev/drm/ttm/ttm_bo.c @@ -130,7 +130,7 @@ static void ttm_bo_release_list(struct ttm_buffer_object *bo) ttm_mem_global_free(bdev->glob->mem_glob, acc_size); } -int +static int ttm_bo_wait_unreserved_locked(struct ttm_buffer_object *bo, bool interruptible) { const char *wmsg; @@ -144,7 +144,7 @@ ttm_bo_wait_unreserved_locked(struct ttm_buffer_object *bo, bool interruptible) flags = 0; wmsg = "ttbowu"; } - while (!ttm_bo_is_reserved(bo)) { + while (ttm_bo_is_reserved(bo)) { ret = -lksleep(bo, &bo->glob->lru_lock, 0, wmsg, 0); if (ret != 0) break; @@ -195,13 +195,13 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) return put_count; } -int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, +int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence) { int ret; - while (unlikely(atomic_read(&bo->reserved) != 0)) { + while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { /** * Deadlock avoidance for multi-bo reserving. */ @@ -223,22 +223,35 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, return -EBUSY; ret = ttm_bo_wait_unreserved_locked(bo, interruptible); + if (unlikely(ret)) return ret; } - atomic_set(&bo->reserved, 1); if (use_sequence) { + bool wake_up = false; /** * Wake up waiters that may need to recheck for deadlock, * if we decreased the sequence number. */ if (unlikely((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)) - wakeup(bo); + wake_up = true; + /* + * In the worst case with memory ordering these values can be + * seen in the wrong order. However since we call wake_up_all + * in that case, this will hopefully not pose a problem, + * and the worst case would only cause someone to accidentally + * hit -EAGAIN in ttm_bo_reserve when they see old value of + * val_seq. However this would only happen if seq_valid was + * written before val_seq was, and just means some slightly + * increased cpu usage + */ bo->val_seq = sequence; bo->seq_valid = true; + if (wake_up) + wakeup(bo); } else { bo->seq_valid = false; } @@ -267,15 +280,67 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, int put_count = 0; int ret; - lockmgr(&glob->lru_lock, LK_EXCLUSIVE); - ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence, - sequence); - if (likely(ret == 0)) + lockmgr(&bo->glob->lru_lock, LK_EXCLUSIVE); + ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence, + sequence); + if (likely(ret == 0)) { put_count = ttm_bo_del_from_lru(bo); - lockmgr(&glob->lru_lock, LK_RELEASE); + lockmgr(&glob->lru_lock, LK_RELEASE); + ttm_bo_list_ref_sub(bo, put_count, true); + } else + lockmgr(&bo->glob->lru_lock, LK_RELEASE); - ttm_bo_list_ref_sub(bo, put_count, true); + return ret; +} + +int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, + bool interruptible, uint32_t sequence) +{ + bool wake_up = false; + int ret; + while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { + if (bo->seq_valid && sequence == bo->val_seq) { + DRM_ERROR( + "%s: bo->seq_valid && sequence == bo->val_seq", + __func__); + } + + ret = ttm_bo_wait_unreserved_locked(bo, interruptible); + + if (unlikely(ret)) + return ret; + } + + if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid) + wake_up = true; + + /** + * Wake up waiters that may need to recheck for deadlock, + * if we decreased the sequence number. + */ + bo->val_seq = sequence; + bo->seq_valid = true; + if (wake_up) + wakeup(bo); + + return 0; +} + +int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, + bool interruptible, uint32_t sequence) +{ + struct ttm_bo_global *glob = bo->glob; + int put_count, ret; + + lockmgr(&glob->lru_lock, LK_EXCLUSIVE); + ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence); + if (likely(!ret)) { + put_count = ttm_bo_del_from_lru(bo); + lockmgr(&glob->lru_lock, LK_RELEASE); + ttm_bo_list_ref_sub(bo, put_count, true); + } else + lockmgr(&glob->lru_lock, LK_RELEASE); return ret; } @@ -411,6 +476,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, bo->mem = tmp_mem; bdev->driver->move_notify(bo, mem); bo->mem = *mem; + *mem = tmp_mem; } goto out_err; @@ -487,7 +553,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) int ret; lockmgr(&glob->lru_lock, LK_EXCLUSIVE); - ret = ttm_bo_reserve_locked(bo, false, true, false, 0); + ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); (void) ttm_bo_wait(bo, false, false, true); @@ -579,7 +645,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, return ret; lockmgr(&glob->lru_lock, LK_EXCLUSIVE); - ret = ttm_bo_reserve_locked(bo, false, true, false, 0); + ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); /* * We raced, and lost, someone else holds the reservation now, @@ -643,7 +709,12 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) refcount_acquire(&nentry->list_kref); } - ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0); + ret = ttm_bo_reserve_nolru(entry, false, true, false, 0); + if (remove_all && ret) { + ret = ttm_bo_reserve_nolru(entry, false, false, + false, 0); + } + if (!ret) ret = ttm_bo_cleanup_refs_and_unlock(entry, false, !remove_all); @@ -795,7 +866,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, lockmgr(&glob->lru_lock, LK_EXCLUSIVE); list_for_each_entry(bo, &man->lru, lru) { - ret = ttm_bo_reserve_locked(bo, false, true, false, 0); + ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); if (!ret) break; } @@ -1563,13 +1634,8 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) { - struct ttm_bo_device *bdev = bo->bdev; - /* off_t offset = (off_t)bo->addr_space_offset;XXXKIB */ - /* off_t holelen = ((off_t)bo->mem.num_pages) << PAGE_SHIFT;XXXKIB */ - if (!bdev->dev_mapping) - return; - /* unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); XXXKIB */ + ttm_bo_release_mmap(bo); ttm_mem_io_free_vm(bo); } @@ -1735,7 +1801,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) lockmgr(&glob->lru_lock, LK_EXCLUSIVE); list_for_each_entry(bo, &glob->swap_lru, swap) { - ret = ttm_bo_reserve_locked(bo, false, true, false, 0); + ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); if (!ret) break; } diff --git a/sys/dev/drm/ttm/ttm_bo_driver.h b/sys/dev/drm/ttm/ttm_bo_driver.h index fe4873848d..5ce1d62c91 100644 --- a/sys/dev/drm/ttm/ttm_bo_driver.h +++ b/sys/dev/drm/ttm/ttm_bo_driver.h @@ -790,16 +790,7 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); * to make room for a buffer already reserved. (Buffers are reserved before * they are evicted). The following algorithm prevents such deadlocks from * occurring: - * 1) Buffers are reserved with the lru spinlock held. Upon successful - * reservation they are removed from the lru list. This stops a reserved buffer - * from being evicted. However the lru spinlock is released between the time - * a buffer is selected for eviction and the time it is reserved. - * Therefore a check is made when a buffer is reserved for eviction, that it - * is still the first buffer in the lru list, before it is removed from the - * list. @check_lru == 1 forces this check. If it fails, the function returns - * -EINVAL, and the caller should then choose a new buffer to evict and repeat - * the procedure. - * 2) Processes attempting to reserve multiple buffers other than for eviction, + * Processes attempting to reserve multiple buffers other than for eviction, * (typically execbuf), should first obtain a unique 32-bit * validation sequence number, * and call this function with @use_sequence == 1 and @sequence == the unique @@ -830,9 +821,39 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence); +/** + * ttm_bo_reserve_slowpath_nolru: + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @sequence: Set (@bo)->sequence to this value after lock + * + * This is called after ttm_bo_reserve returns -EAGAIN and we backed off + * from all our other reservations. Because there are no other reservations + * held by us, this function cannot deadlock any more. + * + * Will not remove reserved buffers from the lru lists. + * Otherwise identical to ttm_bo_reserve_slowpath. + */ +extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, + bool interruptible, + uint32_t sequence); + /** - * ttm_bo_reserve_locked: + * ttm_bo_reserve_slowpath: + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @sequence: Set (@bo)->sequence to this value after lock + * + * This is called after ttm_bo_reserve returns -EAGAIN and we backed off + * from all our other reservations. Because there are no other reservations + * held by us, this function cannot deadlock any more. + */ +extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, + bool interruptible, uint32_t sequence); + +/** + * ttm_bo_reserve_nolru: * * @bo: A pointer to a struct ttm_buffer_object. * @interruptible: Sleep interruptible if waiting. @@ -840,9 +861,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, * @use_sequence: If @bo is already reserved, Only sleep waiting for * it to become unreserved if @sequence < (@bo)->sequence. * - * Must be called with struct ttm_bo_global::lru_lock held, - * and will not remove reserved buffers from the lru lists. - * The function may release the LRU spinlock if it needs to sleep. + * Will not remove reserved buffers from the lru lists. * Otherwise identical to ttm_bo_reserve. * * Returns: @@ -855,7 +874,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, * -EDEADLK: Bo already reserved using @sequence. This error code will only * be returned if @use_sequence is set to true. */ -extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, +extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence); @@ -879,18 +898,6 @@ extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); */ extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo); -/** - * ttm_bo_wait_unreserved - * - * @bo: A pointer to a struct ttm_buffer_object. - * - * Wait for a struct ttm_buffer_object to become unreserved. - * This is typically used in the execbuf code to relax cpu-usage when - * a potential deadlock condition backoff. - */ -extern int ttm_bo_wait_unreserved_locked(struct ttm_buffer_object *bo, - bool interruptible); - /* * ttm_bo_util.c */ diff --git a/sys/dev/drm/ttm/ttm_bo_util.c b/sys/dev/drm/ttm/ttm_bo_util.c index ab560f229a..735acb408e 100644 --- a/sys/dev/drm/ttm/ttm_bo_util.c +++ b/sys/dev/drm/ttm/ttm_bo_util.c @@ -321,8 +321,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, if (ttm->state == tt_unpopulated) { ret = ttm->bdev->driver->ttm_tt_populate(ttm); - if (ret) + if (ret) { + /* if we fail here don't nuke the mm node + * as the bo still owns it */ + old_copy.mm_node = NULL; goto out1; + } } add = 0; @@ -346,8 +350,11 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, prot); } else ret = ttm_copy_io_page(new_iomap, old_iomap, page); - if (ret) + if (ret) { + /* failing here, means keep old copy as-is */ + old_copy.mm_node = NULL; goto out1; + } } cpu_mfence(); out2: @@ -393,11 +400,13 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo) static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, - void *sync_obj, struct ttm_buffer_object **new_obj) + struct ttm_buffer_object **new_obj) { struct ttm_buffer_object *fbo; + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_driver *driver = bdev->driver; - fbo = kmalloc(sizeof(*fbo), M_TTM_TRANSF_OBJ, M_ZERO | M_WAITOK); + fbo = kmalloc(sizeof(*fbo), M_TTM_TRANSF_OBJ, M_WAITOK); *fbo = *bo; /** @@ -412,7 +421,12 @@ ttm_buffer_object_transfer(struct ttm_buffer_object *bo, fbo->vm_node = NULL; atomic_set(&fbo->cpu_writers, 0); - fbo->sync_obj = sync_obj; + lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); + if (bo->sync_obj) + fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); + else + fbo->sync_obj = NULL; + lockmgr(&bdev->fence_lock, LK_RELEASE); refcount_init(&fbo->list_kref, 1); refcount_init(&fbo->kref, 1); fbo->destroy = &ttm_transfered_destroy; @@ -593,7 +607,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, int ret; struct ttm_buffer_object *ghost_obj; void *tmp_obj = NULL; - void *sync_obj_ref; lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); if (bo->sync_obj) { @@ -626,14 +639,11 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, */ set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - - sync_obj_ref = bo->bdev->driver->sync_obj_ref(bo->sync_obj); lockmgr(&bdev->fence_lock, LK_RELEASE); - /* ttm_buffer_object_transfer accesses bo->sync_obj */ - ret = ttm_buffer_object_transfer(bo, sync_obj_ref, &ghost_obj); if (tmp_obj) driver->sync_obj_unref(&tmp_obj); + ret = ttm_buffer_object_transfer(bo, &ghost_obj); if (ret) return ret; diff --git a/sys/dev/drm/ttm/ttm_bo_vm.c b/sys/dev/drm/ttm/ttm_bo_vm.c index 8f8cedb351..c5c4eb63f2 100644 --- a/sys/dev/drm/ttm/ttm_bo_vm.c +++ b/sys/dev/drm/ttm/ttm_bo_vm.c @@ -150,7 +150,23 @@ reserve: lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { - ret = ttm_bo_wait(bo, false, true, false); + /* + * Here, the behavior differs between Linux and FreeBSD. + * + * On Linux, the wait is interruptible (3rd argument to + * ttm_bo_wait). There must be some mechanism to resume + * page fault handling, once the signal is processed. + * + * On FreeBSD, the wait is uninteruptible. This is not a + * problem as we can't end up with an unkillable process + * here, because the wait will eventually time out. + * + * An example of this situation is the Xorg process + * which uses SIGALRM internally. The signal could + * interrupt the wait, causing the page fault to fail + * and the process to receive SIGSEGV. + */ + ret = ttm_bo_wait(bo, false, false, false); lockmgr(&bdev->fence_lock, LK_RELEASE); if (unlikely(ret != 0)) { retval = VM_PAGER_ERROR; @@ -255,8 +271,16 @@ ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, { /* - * We don't acquire a reference on bo->kref here, because it was - * already done in ttm_bo_mmap_single(). + * On Linux, a reference to the buffer object is acquired here. + * The reason is that this function is not called when the + * mmap() is initialized, but only when a process forks for + * instance. Therefore on Linux, the reference on the bo is + * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's + * then released in ttm_bo_vm_close(). + * + * Here, this function is called during mmap() intialization. + * Thus, the reference acquired in ttm_bo_mmap_single() is + * sufficient. */ *color = 0; @@ -323,6 +347,29 @@ out_unref: return ret; } +void +ttm_bo_release_mmap(struct ttm_buffer_object *bo) +{ + vm_object_t vm_obj; + vm_page_t m; + int i; + + vm_obj = cdev_pager_lookup(bo); + if (vm_obj == NULL) + return; + + VM_OBJECT_WLOCK(vm_obj); + for (i = 0; i < bo->num_pages; i++) { + m = vm_page_lookup_busy_wait(vm_obj, i, TRUE, "ttm_unm"); + if (m == NULL) + continue; + cdev_pager_free_page(vm_obj, m); + } + VM_OBJECT_WUNLOCK(vm_obj); + + vm_object_deallocate(vm_obj); +} + #if 0 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) { diff --git a/sys/dev/drm/ttm/ttm_execbuf_util.c b/sys/dev/drm/ttm/ttm_execbuf_util.c index e231e0dfaa..825472e658 100644 --- a/sys/dev/drm/ttm/ttm_execbuf_util.c +++ b/sys/dev/drm/ttm/ttm_execbuf_util.c @@ -81,19 +81,6 @@ static void ttm_eu_list_ref_sub(struct list_head *list) } } -static int ttm_eu_wait_unreserved_locked(struct list_head *list, - struct ttm_buffer_object *bo) -{ - int ret; - - ttm_eu_del_from_lru_locked(list); - ret = ttm_bo_wait_unreserved_locked(bo, true); - if (unlikely(ret != 0)) - ttm_eu_backoff_reservation_locked(list); - return ret; -} - - void ttm_eu_backoff_reservation(struct list_head *list) { struct ttm_validate_buffer *entry; @@ -141,47 +128,62 @@ int ttm_eu_reserve_buffers(struct list_head *list) glob = entry->bo->glob; lockmgr(&glob->lru_lock, LK_EXCLUSIVE); -retry_locked: val_seq = entry->bo->bdev->val_seq++; +retry_locked: list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; -retry_this_bo: - ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq); + /* already slowpath reserved? */ + if (entry->reserved) + continue; + + ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); switch (ret) { case 0: break; case -EBUSY: - ret = ttm_eu_wait_unreserved_locked(list, bo); - if (unlikely(ret != 0)) { - lockmgr(&glob->lru_lock, LK_RELEASE); - ttm_eu_list_ref_sub(list); - return ret; - } - goto retry_this_bo; + ttm_eu_del_from_lru_locked(list); + ret = ttm_bo_reserve_nolru(bo, true, false, + true, val_seq); + if (!ret) + break; + + if (unlikely(ret != -EAGAIN)) + goto err; + + /* fallthrough */ case -EAGAIN: ttm_eu_backoff_reservation_locked(list); + + /* + * temporarily increase sequence number every retry, + * to prevent us from seeing our old reservation + * sequence when someone else reserved the buffer, + * but hasn't updated the seq_valid/seqno members yet. + */ + val_seq = entry->bo->bdev->val_seq++; + ttm_eu_list_ref_sub(list); - ret = ttm_bo_wait_unreserved_locked(bo, true); + ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq); if (unlikely(ret != 0)) { lockmgr(&glob->lru_lock, LK_RELEASE); return ret; } + entry->reserved = true; + if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { + ret = -EBUSY; + goto err; + } goto retry_locked; default: - ttm_eu_backoff_reservation_locked(list); - lockmgr(&glob->lru_lock, LK_RELEASE); - ttm_eu_list_ref_sub(list); - return ret; + goto err; } entry->reserved = true; if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { - ttm_eu_backoff_reservation_locked(list); - lockmgr(&glob->lru_lock, LK_RELEASE); - ttm_eu_list_ref_sub(list); - return -EBUSY; + ret = -EBUSY; + goto err; } } @@ -190,6 +192,12 @@ retry_this_bo: ttm_eu_list_ref_sub(list); return 0; + +err: + ttm_eu_backoff_reservation_locked(list); + lockmgr(&glob->lru_lock, LK_RELEASE); + ttm_eu_list_ref_sub(list); + return ret; } void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) diff --git a/sys/dev/drm/ttm/ttm_page_alloc.c b/sys/dev/drm/ttm/ttm_page_alloc.c index 8cac8de8da..61bace5659 100644 --- a/sys/dev/drm/ttm/ttm_page_alloc.c +++ b/sys/dev/drm/ttm/ttm_page_alloc.c @@ -50,8 +50,6 @@ #include #endif -#define VM_ALLOC_DMA32 VM_ALLOC_RESERVED1 - #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(vm_page_t)) #define SMALL_ALLOCATION 16 #define FREE_ALL_PAGES (~0U) @@ -323,6 +321,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) vm_page_t *pages_to_free; unsigned freed_pages = 0, npages_to_free = nr_free; + unsigned i; if (NUM_PAGES_TO_ALLOC < nr_free) npages_to_free = NUM_PAGES_TO_ALLOC; @@ -341,7 +340,8 @@ restart: /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ if (freed_pages >= NUM_PAGES_TO_ALLOC) { /* remove range of pages from the pool */ - TAILQ_REMOVE(&pool->list, p, pageq); + for (i = 0; i < freed_pages; i++) + TAILQ_REMOVE(&pool->list, pages_to_free[i], pageq); ttm_pool_update_free_locked(pool, freed_pages); /** @@ -376,7 +376,8 @@ restart: /* remove range of pages from the pool */ if (freed_pages) { - TAILQ_REMOVE(&pool->list, p, pageq); + for (i = 0; i < freed_pages; i++) + TAILQ_REMOVE(&pool->list, pages_to_free[i], pageq); ttm_pool_update_free_locked(pool, freed_pages); nr_free -= freed_pages; diff --git a/sys/dev/drm/ttm/ttm_tt.c b/sys/dev/drm/ttm/ttm_tt.c index 687744be9d..e54fad16a7 100644 --- a/sys/dev/drm/ttm/ttm_tt.c +++ b/sys/dev/drm/ttm/ttm_tt.c @@ -355,11 +355,11 @@ int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage) from_page = ttm->pages[i]; if (unlikely(from_page == NULL)) continue; - to_page = vm_page_grab(obj, i, VM_ALLOC_RETRY); + to_page = vm_page_grab(obj, i, VM_ALLOC_NORMAL); pmap_copy_page(VM_PAGE_TO_PHYS(from_page), VM_PAGE_TO_PHYS(to_page)); - vm_page_dirty(to_page); to_page->valid = VM_PAGE_BITS_ALL; + vm_page_dirty(to_page); vm_page_wakeup(to_page); } vm_object_pip_wakeup(obj); -- 2.41.0