summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2012-05-22 11:07:44 +0000
committerkib <kib@FreeBSD.org>2012-05-22 11:07:44 +0000
commitd1d8560148b9e67eb619ed84268d3fd537ef93e0 (patch)
tree56fbf2ef5c084cabbc681783ee59c062a6c18b92
parenta86ecb8bca0f478cb6325ffbbf205725977ce44f (diff)
downloadFreeBSD-src-d1d8560148b9e67eb619ed84268d3fd537ef93e0.zip
FreeBSD-src-d1d8560148b9e67eb619ed84268d3fd537ef93e0.tar.gz
Add the code for new Intel GPU driver, which supports GEM, KMS and
works with new generations of GPUs (IronLake, SandyBridge and supposedly IvyBridge). The driver is not connected to the build yet. Sponsored by: The FreeBSD Foundation MFC after: 1 week
-rw-r--r--sys/dev/drm2/drm.h1214
-rw-r--r--sys/dev/drm2/drmP.h1400
-rw-r--r--sys/dev/drm2/drm_agpsupport.c434
-rw-r--r--sys/dev/drm2/drm_atomic.h93
-rw-r--r--sys/dev/drm2/drm_auth.c190
-rw-r--r--sys/dev/drm2/drm_bufs.c1130
-rw-r--r--sys/dev/drm2/drm_context.c312
-rw-r--r--sys/dev/drm2/drm_crtc.c3413
-rw-r--r--sys/dev/drm2/drm_crtc.h935
-rw-r--r--sys/dev/drm2/drm_crtc_helper.c1043
-rw-r--r--sys/dev/drm2/drm_crtc_helper.h146
-rw-r--r--sys/dev/drm2/drm_dma.c139
-rw-r--r--sys/dev/drm2/drm_dp_helper.h250
-rw-r--r--sys/dev/drm2/drm_dp_iic_helper.c292
-rw-r--r--sys/dev/drm2/drm_drawable.c173
-rw-r--r--sys/dev/drm2/drm_drv.c980
-rw-r--r--sys/dev/drm2/drm_edid.c1781
-rw-r--r--sys/dev/drm2/drm_edid.h244
-rw-r--r--sys/dev/drm2/drm_edid_modes.h381
-rw-r--r--sys/dev/drm2/drm_fb_helper.c1568
-rw-r--r--sys/dev/drm2/drm_fb_helper.h141
-rw-r--r--sys/dev/drm2/drm_fops.c202
-rw-r--r--sys/dev/drm2/drm_fourcc.h139
-rw-r--r--sys/dev/drm2/drm_gem.c487
-rw-r--r--sys/dev/drm2/drm_gem_names.c211
-rw-r--r--sys/dev/drm2/drm_gem_names.h64
-rw-r--r--sys/dev/drm2/drm_hashtab.c181
-rw-r--r--sys/dev/drm2/drm_hashtab.h68
-rw-r--r--sys/dev/drm2/drm_internal.h43
-rw-r--r--sys/dev/drm2/drm_ioctl.c320
-rw-r--r--sys/dev/drm2/drm_irq.c1253
-rw-r--r--sys/dev/drm2/drm_linux_list.h177
-rw-r--r--sys/dev/drm2/drm_linux_list_sort.c75
-rw-r--r--sys/dev/drm2/drm_lock.c199
-rw-r--r--sys/dev/drm2/drm_memory.c127
-rw-r--r--sys/dev/drm2/drm_mm.c563
-rw-r--r--sys/dev/drm2/drm_mm.h185
-rw-r--r--sys/dev/drm2/drm_mode.h444
-rw-r--r--sys/dev/drm2/drm_modes.c1147
-rw-r--r--sys/dev/drm2/drm_pci.c125
-rw-r--r--sys/dev/drm2/drm_pciids.h764
-rw-r--r--sys/dev/drm2/drm_sarea.h87
-rw-r--r--sys/dev/drm2/drm_scatter.c129
-rw-r--r--sys/dev/drm2/drm_sman.c352
-rw-r--r--sys/dev/drm2/drm_sman.h181
-rw-r--r--sys/dev/drm2/drm_stub.c60
-rw-r--r--sys/dev/drm2/drm_sysctl.c364
-rw-r--r--sys/dev/drm2/drm_vm.c134
-rw-r--r--sys/dev/drm2/i915/i915_debug.c1683
-rw-r--r--sys/dev/drm2/i915/i915_dma.c2075
-rw-r--r--sys/dev/drm2/i915/i915_drm.h971
-rw-r--r--sys/dev/drm2/i915/i915_drv.c821
-rw-r--r--sys/dev/drm2/i915/i915_drv.h1481
-rw-r--r--sys/dev/drm2/i915/i915_gem.c3760
-rw-r--r--sys/dev/drm2/i915/i915_gem_evict.c213
-rw-r--r--sys/dev/drm2/i915/i915_gem_execbuffer.c1528
-rw-r--r--sys/dev/drm2/i915/i915_gem_gtt.c329
-rw-r--r--sys/dev/drm2/i915/i915_gem_tiling.c495
-rw-r--r--sys/dev/drm2/i915/i915_irq.c2278
-rw-r--r--sys/dev/drm2/i915/i915_reg.h3876
-rw-r--r--sys/dev/drm2/i915/i915_suspend.c909
-rw-r--r--sys/dev/drm2/i915/intel_bios.c737
-rw-r--r--sys/dev/drm2/i915/intel_bios.h620
-rw-r--r--sys/dev/drm2/i915/intel_crt.c624
-rw-r--r--sys/dev/drm2/i915/intel_display.c9532
-rw-r--r--sys/dev/drm2/i915/intel_dp.c2562
-rw-r--r--sys/dev/drm2/i915/intel_drv.h428
-rw-r--r--sys/dev/drm2/i915/intel_fb.c270
-rw-r--r--sys/dev/drm2/i915/intel_hdmi.c576
-rw-r--r--sys/dev/drm2/i915/intel_iic.c716
-rw-r--r--sys/dev/drm2/i915/intel_lvds.c1125
-rw-r--r--sys/dev/drm2/i915/intel_modes.c143
-rw-r--r--sys/dev/drm2/i915/intel_opregion.c550
-rw-r--r--sys/dev/drm2/i915/intel_overlay.c1582
-rw-r--r--sys/dev/drm2/i915/intel_panel.c327
-rw-r--r--sys/dev/drm2/i915/intel_ringbuffer.c1623
-rw-r--r--sys/dev/drm2/i915/intel_ringbuffer.h203
-rw-r--r--sys/dev/drm2/i915/intel_sdvo.c2680
-rw-r--r--sys/dev/drm2/i915/intel_sdvo_regs.h725
-rw-r--r--sys/dev/drm2/i915/intel_sprite.c669
-rw-r--r--sys/dev/drm2/i915/intel_tv.c1609
81 files changed, 72060 insertions, 0 deletions
diff --git a/sys/dev/drm2/drm.h b/sys/dev/drm2/drm.h
new file mode 100644
index 0000000..d7e57ad
--- /dev/null
+++ b/sys/dev/drm2/drm.h
@@ -0,0 +1,1214 @@
+/**
+ * \file drm.h
+ * Header for the Direct Rendering Manager
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ *
+ * \par Acknowledgments:
+ * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
+ */
+
+/*-
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/**
+ * \mainpage
+ *
+ * The Direct Rendering Manager (DRM) is a device-independent kernel-level
+ * device driver that provides support for the XFree86 Direct Rendering
+ * Infrastructure (DRI).
+ *
+ * The DRM supports the Direct Rendering Infrastructure (DRI) in four major
+ * ways:
+ * -# The DRM provides synchronized access to the graphics hardware via
+ * the use of an optimized two-tiered lock.
+ * -# The DRM enforces the DRI security policy for access to the graphics
+ * hardware by only allowing authenticated X11 clients access to
+ * restricted regions of memory.
+ * -# The DRM provides a generic DMA engine, complete with multiple
+ * queues and the ability to detect the need for an OpenGL context
+ * switch.
+ * -# The DRM is extensible via the use of small device-specific modules
+ * that rely extensively on the API exported by the DRM module.
+ *
+ */
+
+#ifndef _DRM_H_
+#define _DRM_H_
+
+#ifndef __user
+#define __user
+#endif
+#ifndef __iomem
+#define __iomem
+#endif
+
+#ifdef __GNUC__
+# define DEPRECATED __attribute__ ((deprecated))
+#else
+# define DEPRECATED
+#endif
+
+#if defined(__linux__)
+#include <asm/ioctl.h> /* For _IO* macros */
+#define DRM_IOCTL_NR(n) _IOC_NR(n)
+#define DRM_IOC_VOID _IOC_NONE
+#define DRM_IOC_READ _IOC_READ
+#define DRM_IOC_WRITE _IOC_WRITE
+#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
+#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
+#include <sys/ioccom.h>
+#define DRM_IOCTL_NR(n) ((n) & 0xff)
+#define DRM_IOC_VOID IOC_VOID
+#define DRM_IOC_READ IOC_OUT
+#define DRM_IOC_WRITE IOC_IN
+#define DRM_IOC_READWRITE IOC_INOUT
+#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+#endif
+
+#ifdef __OpenBSD__
+#define DRM_MAJOR 81
+#endif
+#if defined(__linux__) || defined(__NetBSD__)
+#define DRM_MAJOR 226
+#endif
+#define DRM_MAX_MINOR 15
+
+#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
+#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
+#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
+#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
+
+#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
+#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
+#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
+#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
+#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
+
+#if defined(__linux__)
+typedef unsigned int drm_handle_t;
+#else
+#include <sys/types.h>
+typedef unsigned long drm_handle_t; /**< To mapped regions */
+#endif
+typedef unsigned int drm_context_t; /**< GLXContext handle */
+typedef unsigned int drm_drawable_t;
+typedef unsigned int drm_magic_t; /**< Magic for authentication */
+
+/**
+ * Cliprect.
+ *
+ * \warning If you change this structure, make sure you change
+ * XF86DRIClipRectRec in the server as well
+ *
+ * \note KW: Actually it's illegal to change either for
+ * backwards-compatibility reasons.
+ */
+struct drm_clip_rect {
+ unsigned short x1;
+ unsigned short y1;
+ unsigned short x2;
+ unsigned short y2;
+};
+
+/**
+ * Texture region,
+ */
+struct drm_tex_region {
+ unsigned char next;
+ unsigned char prev;
+ unsigned char in_use;
+ unsigned char padding;
+ unsigned int age;
+};
+
+/**
+ * Hardware lock.
+ *
+ * The lock structure is a simple cache-line aligned integer. To avoid
+ * processor bus contention on a multiprocessor system, there should not be any
+ * other data stored in the same cache line.
+ */
+struct drm_hw_lock {
+ __volatile__ unsigned int lock; /**< lock variable */
+ char padding[60]; /**< Pad to cache line */
+};
+
+/* This is beyond ugly, and only works on GCC. However, it allows me to use
+ * drm.h in places (i.e., in the X-server) where I can't use size_t. The real
+ * fix is to use uint32_t instead of size_t, but that fix will break existing
+ * LP64 (i.e., PowerPC64, SPARC64, IA-64, Alpha, etc.) systems. That *will*
+ * eventually happen, though. I chose 'unsigned long' to be the fallback type
+ * because that works on all the platforms I know about. Hopefully, the
+ * real fix will happen before that bites us.
+ */
+
+#ifdef __SIZE_TYPE__
+# define DRM_SIZE_T __SIZE_TYPE__
+#else
+# warning "__SIZE_TYPE__ not defined. Assuming sizeof(size_t) == sizeof(unsigned long)!"
+# define DRM_SIZE_T unsigned long
+#endif
+
+/**
+ * DRM_IOCTL_VERSION ioctl argument type.
+ *
+ * \sa drmGetVersion().
+ */
+struct drm_version {
+ int version_major; /**< Major version */
+ int version_minor; /**< Minor version */
+ int version_patchlevel; /**< Patch level */
+ DRM_SIZE_T name_len; /**< Length of name buffer */
+ char __user *name; /**< Name of driver */
+ DRM_SIZE_T date_len; /**< Length of date buffer */
+ char __user *date; /**< User-space buffer to hold date */
+ DRM_SIZE_T desc_len; /**< Length of desc buffer */
+ char __user *desc; /**< User-space buffer to hold desc */
+};
+
+/**
+ * DRM_IOCTL_GET_UNIQUE ioctl argument type.
+ *
+ * \sa drmGetBusid() and drmSetBusId().
+ */
+struct drm_unique {
+ DRM_SIZE_T unique_len; /**< Length of unique */
+ char __user *unique; /**< Unique name for driver instantiation */
+};
+
+#undef DRM_SIZE_T
+
+struct drm_list {
+ int count; /**< Length of user-space structures */
+ struct drm_version __user *version;
+};
+
+struct drm_block {
+ int unused;
+};
+
+/**
+ * DRM_IOCTL_CONTROL ioctl argument type.
+ *
+ * \sa drmCtlInstHandler() and drmCtlUninstHandler().
+ */
+struct drm_control {
+ enum {
+ DRM_ADD_COMMAND,
+ DRM_RM_COMMAND,
+ DRM_INST_HANDLER,
+ DRM_UNINST_HANDLER
+ } func;
+ int irq;
+};
+
+/**
+ * Type of memory to map.
+ */
+enum drm_map_type {
+ _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
+ _DRM_REGISTERS = 1, /**< no caching, no core dump */
+ _DRM_SHM = 2, /**< shared, cached */
+ _DRM_AGP = 3, /**< AGP/GART */
+ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
+ _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
+ _DRM_GEM = 6 /**< GEM */
+};
+
+/**
+ * Memory mapping flags.
+ */
+enum drm_map_flags {
+ _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
+ _DRM_READ_ONLY = 0x02,
+ _DRM_LOCKED = 0x04, /**< shared, cached, locked */
+ _DRM_KERNEL = 0x08, /**< kernel requires access */
+ _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
+ _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
+ _DRM_REMOVABLE = 0x40, /**< Removable mapping */
+ _DRM_DRIVER = 0x80 /**< Managed by driver */
+};
+
+struct drm_ctx_priv_map {
+ unsigned int ctx_id; /**< Context requesting private mapping */
+ void *handle; /**< Handle of map */
+};
+
+/**
+ * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
+ * argument type.
+ *
+ * \sa drmAddMap().
+ */
+struct drm_map {
+ unsigned long offset; /**< Requested physical address (0 for SAREA)*/
+ unsigned long size; /**< Requested physical size (bytes) */
+ enum drm_map_type type; /**< Type of memory to map */
+ enum drm_map_flags flags; /**< Flags */
+ void *handle; /**< User-space: "Handle" to pass to mmap() */
+ /**< Kernel-space: kernel-virtual address */
+ int mtrr; /**< MTRR slot used */
+ /* Private data */
+};
+
+/**
+ * DRM_IOCTL_GET_CLIENT ioctl argument type.
+ */
+struct drm_client {
+ int idx; /**< Which client desired? */
+ int auth; /**< Is client authenticated? */
+ unsigned long pid; /**< Process ID */
+ unsigned long uid; /**< User ID */
+ unsigned long magic; /**< Magic */
+ unsigned long iocs; /**< Ioctl count */
+};
+
+enum drm_stat_type {
+ _DRM_STAT_LOCK,
+ _DRM_STAT_OPENS,
+ _DRM_STAT_CLOSES,
+ _DRM_STAT_IOCTLS,
+ _DRM_STAT_LOCKS,
+ _DRM_STAT_UNLOCKS,
+ _DRM_STAT_VALUE, /**< Generic value */
+ _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
+ _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
+
+ _DRM_STAT_IRQ, /**< IRQ */
+ _DRM_STAT_PRIMARY, /**< Primary DMA bytes */
+ _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
+ _DRM_STAT_DMA, /**< DMA */
+ _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
+ _DRM_STAT_MISSED /**< Missed DMA opportunity */
+ /* Add to the *END* of the list */
+};
+
+/**
+ * DRM_IOCTL_GET_STATS ioctl argument type.
+ */
+struct drm_stats {
+ unsigned long count;
+ struct {
+ unsigned long value;
+ enum drm_stat_type type;
+ } data[15];
+};
+
+/**
+ * Hardware locking flags.
+ */
+enum drm_lock_flags {
+ _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
+ _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
+ _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
+ _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
+ /* These *HALT* flags aren't supported yet
+ -- they will be used to support the
+ full-screen DGA-like mode. */
+ _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
+ _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
+};
+
+/**
+ * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
+ *
+ * \sa drmGetLock() and drmUnlock().
+ */
+struct drm_lock {
+ int context;
+ enum drm_lock_flags flags;
+};
+
+/**
+ * DMA flags
+ *
+ * \warning
+ * These values \e must match xf86drm.h.
+ *
+ * \sa drm_dma.
+ */
+enum drm_dma_flags {
+ /* Flags for DMA buffer dispatch */
+ _DRM_DMA_BLOCK = 0x01, /**<
+ * Block until buffer dispatched.
+ *
+ * \note The buffer may not yet have
+ * been processed by the hardware --
+ * getting a hardware lock with the
+ * hardware quiescent will ensure
+ * that the buffer has been
+ * processed.
+ */
+ _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
+ _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
+
+ /* Flags for DMA buffer request */
+ _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
+ _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
+ _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
+};
+
+/**
+ * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
+ *
+ * \sa drmAddBufs().
+ */
+struct drm_buf_desc {
+ int count; /**< Number of buffers of this size */
+ int size; /**< Size in bytes */
+ int low_mark; /**< Low water mark */
+ int high_mark; /**< High water mark */
+ enum {
+ _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
+ _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
+ _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
+ _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
+ _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
+ } flags;
+ unsigned long agp_start; /**<
+ * Start address of where the AGP buffers are
+ * in the AGP aperture
+ */
+};
+
+/**
+ * DRM_IOCTL_INFO_BUFS ioctl argument type.
+ */
+struct drm_buf_info {
+ int count; /**< Number of buffers described in list */
+ struct drm_buf_desc __user *list; /**< List of buffer descriptions */
+};
+
+/**
+ * DRM_IOCTL_FREE_BUFS ioctl argument type.
+ */
+struct drm_buf_free {
+ int count;
+ int __user *list;
+};
+
+/**
+ * Buffer information
+ *
+ * \sa drm_buf_map.
+ */
+struct drm_buf_pub {
+ int idx; /**< Index into the master buffer list */
+ int total; /**< Buffer size */
+ int used; /**< Amount of buffer in use (for DMA) */
+ void __user *address; /**< Address of buffer */
+};
+
+/**
+ * DRM_IOCTL_MAP_BUFS ioctl argument type.
+ */
+struct drm_buf_map {
+ int count; /**< Length of the buffer list */
+#if defined(__cplusplus)
+ void __user *c_virtual;
+#else
+ void __user *virtual; /**< Mmap'd area in user-virtual */
+#endif
+ struct drm_buf_pub __user *list; /**< Buffer information */
+};
+
+/**
+ * DRM_IOCTL_DMA ioctl argument type.
+ *
+ * Indices here refer to the offset into the buffer list in drm_buf_get.
+ *
+ * \sa drmDMA().
+ */
+struct drm_dma {
+ int context; /**< Context handle */
+ int send_count; /**< Number of buffers to send */
+ int __user *send_indices; /**< List of handles to buffers */
+ int __user *send_sizes; /**< Lengths of data to send */
+ enum drm_dma_flags flags; /**< Flags */
+ int request_count; /**< Number of buffers requested */
+ int request_size; /**< Desired size for buffers */
+ int __user *request_indices; /**< Buffer information */
+ int __user *request_sizes;
+ int granted_count; /**< Number of buffers granted */
+};
+
+enum drm_ctx_flags {
+ _DRM_CONTEXT_PRESERVED = 0x01,
+ _DRM_CONTEXT_2DONLY = 0x02
+};
+
+/**
+ * DRM_IOCTL_ADD_CTX ioctl argument type.
+ *
+ * \sa drmCreateContext() and drmDestroyContext().
+ */
+struct drm_ctx {
+ drm_context_t handle;
+ enum drm_ctx_flags flags;
+};
+
+/**
+ * DRM_IOCTL_RES_CTX ioctl argument type.
+ */
+struct drm_ctx_res {
+ int count;
+ struct drm_ctx __user *contexts;
+};
+
+/**
+ * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
+ */
+struct drm_draw {
+ drm_drawable_t handle;
+};
+
+/**
+ * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
+ */
+typedef enum {
+ DRM_DRAWABLE_CLIPRECTS,
+} drm_drawable_info_type_t;
+
+struct drm_update_draw {
+ drm_drawable_t handle;
+ unsigned int type;
+ unsigned int num;
+ unsigned long long data;
+};
+
+/**
+ * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
+ */
+struct drm_auth {
+ drm_magic_t magic;
+};
+
+/**
+ * DRM_IOCTL_IRQ_BUSID ioctl argument type.
+ *
+ * \sa drmGetInterruptFromBusID().
+ */
+struct drm_irq_busid {
+ int irq; /**< IRQ number */
+ int busnum; /**< bus number */
+ int devnum; /**< device number */
+ int funcnum; /**< function number */
+};
+
+enum drm_vblank_seq_type {
+ _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
+ _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
+ _DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
+ _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
+ _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
+ _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
+ _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
+ _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
+};
+#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
+
+#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
+#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
+ _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
+
+struct drm_wait_vblank_request {
+ enum drm_vblank_seq_type type;
+ unsigned int sequence;
+ unsigned long signal;
+};
+
+struct drm_wait_vblank_reply {
+ enum drm_vblank_seq_type type;
+ unsigned int sequence;
+ long tval_sec;
+ long tval_usec;
+};
+
+/**
+ * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
+ *
+ * \sa drmWaitVBlank().
+ */
+union drm_wait_vblank {
+ struct drm_wait_vblank_request request;
+ struct drm_wait_vblank_reply reply;
+};
+
+
+#define _DRM_PRE_MODESET 1
+#define _DRM_POST_MODESET 2
+
+/**
+ * DRM_IOCTL_MODESET_CTL ioctl argument type
+ *
+ * \sa drmModesetCtl().
+ */
+struct drm_modeset_ctl {
+ uint32_t crtc;
+ uint32_t cmd;
+};
+
+/**
+ * DRM_IOCTL_AGP_ENABLE ioctl argument type.
+ *
+ * \sa drmAgpEnable().
+ */
+struct drm_agp_mode {
+ unsigned long mode; /**< AGP mode */
+};
+
+/**
+ * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
+ *
+ * \sa drmAgpAlloc() and drmAgpFree().
+ */
+struct drm_agp_buffer {
+ unsigned long size; /**< In bytes -- will round to page boundary */
+ unsigned long handle; /**< Used for binding / unbinding */
+ unsigned long type; /**< Type of memory to allocate */
+ unsigned long physical; /**< Physical used by i810 */
+};
+
+/**
+ * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
+ *
+ * \sa drmAgpBind() and drmAgpUnbind().
+ */
+struct drm_agp_binding {
+ unsigned long handle; /**< From drm_agp_buffer */
+ unsigned long offset; /**< In bytes -- will round to page boundary */
+};
+
+/**
+ * DRM_IOCTL_AGP_INFO ioctl argument type.
+ *
+ * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
+ * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
+ * drmAgpVendorId() and drmAgpDeviceId().
+ */
+struct drm_agp_info {
+ int agp_version_major;
+ int agp_version_minor;
+ unsigned long mode;
+ unsigned long aperture_base; /**< physical address */
+ unsigned long aperture_size; /**< bytes */
+ unsigned long memory_allowed; /**< bytes */
+ unsigned long memory_used;
+
+ /** \name PCI information */
+ /*@{ */
+ unsigned short id_vendor;
+ unsigned short id_device;
+ /*@} */
+};
+
+/**
+ * DRM_IOCTL_SG_ALLOC ioctl argument type.
+ */
+struct drm_scatter_gather {
+ unsigned long size; /**< In bytes -- will round to page boundary */
+ unsigned long handle; /**< Used for mapping / unmapping */
+};
+
+/**
+ * DRM_IOCTL_SET_VERSION ioctl argument type.
+ */
+struct drm_set_version {
+ int drm_di_major;
+ int drm_di_minor;
+ int drm_dd_major;
+ int drm_dd_minor;
+};
+
+#define DRM_FENCE_FLAG_EMIT 0x00000001
+#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
+/**
+ * On hardware with no interrupt events for operation completion,
+ * indicates that the kernel should sleep while waiting for any blocking
+ * operation to complete rather than spinning.
+ *
+ * Has no effect otherwise.
+ */
+#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
+#define DRM_FENCE_FLAG_NO_USER 0x00000010
+
+/* Reserved for driver use */
+#define DRM_FENCE_MASK_DRIVER 0xFF000000
+
+#define DRM_FENCE_TYPE_EXE 0x00000001
+
+struct drm_fence_arg {
+ unsigned int handle;
+ unsigned int fence_class;
+ unsigned int type;
+ unsigned int flags;
+ unsigned int signaled;
+ unsigned int error;
+ unsigned int sequence;
+ unsigned int pad64;
+ uint64_t expand_pad[2]; /* Future expansion */
+};
+
+/* Buffer permissions, referring to how the GPU uses the buffers.
+ * these translate to fence types used for the buffers.
+ * Typically a texture buffer is read, A destination buffer is write and
+ * a command (batch-) buffer is exe. Can be or-ed together.
+ */
+
+#define DRM_BO_FLAG_READ (1ULL << 0)
+#define DRM_BO_FLAG_WRITE (1ULL << 1)
+#define DRM_BO_FLAG_EXE (1ULL << 2)
+
+/*
+ * All of the bits related to access mode
+ */
+#define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
+/*
+ * Status flags. Can be read to determine the actual state of a buffer.
+ * Can also be set in the buffer mask before validation.
+ */
+
+/*
+ * Mask: Never evict this buffer. Not even with force. This type of buffer is only
+ * available to root and must be manually removed before buffer manager shutdown
+ * or lock.
+ * Flags: Acknowledge
+ */
+#define DRM_BO_FLAG_NO_EVICT (1ULL << 4)
+
+/*
+ * Mask: Require that the buffer is placed in mappable memory when validated.
+ * If not set the buffer may or may not be in mappable memory when validated.
+ * Flags: If set, the buffer is in mappable memory.
+ */
+#define DRM_BO_FLAG_MAPPABLE (1ULL << 5)
+
+/* Mask: The buffer should be shareable with other processes.
+ * Flags: The buffer is shareable with other processes.
+ */
+#define DRM_BO_FLAG_SHAREABLE (1ULL << 6)
+
+/* Mask: If set, place the buffer in cache-coherent memory if available.
+ * If clear, never place the buffer in cache coherent memory if validated.
+ * Flags: The buffer is currently in cache-coherent memory.
+ */
+#define DRM_BO_FLAG_CACHED (1ULL << 7)
+
+/* Mask: Make sure that every time this buffer is validated,
+ * it ends up on the same location provided that the memory mask is the same.
+ * The buffer will also not be evicted when claiming space for
+ * other buffers. Basically a pinned buffer but it may be thrown out as
+ * part of buffer manager shutdown or locking.
+ * Flags: Acknowledge.
+ */
+#define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
+
+/* Mask: Make sure the buffer is in cached memory when mapped. In conjunction
+ * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
+ * with unsnooped PTEs instead of snooped, by using chipset-specific cache
+ * flushing at bind time. A better name might be DRM_BO_FLAG_TT_UNSNOOPED,
+ * as the eviction to local memory (TTM unbind) on map is just a side effect
+ * to prevent aggressive cache prefetch from the GPU disturbing the cache
+ * management that the DRM is doing.
+ *
+ * Flags: Acknowledge.
+ * Buffers allocated with this flag should not be used for suballocators
+ * This type may have issues on CPUs with over-aggressive caching
+ * http://marc.info/?l=linux-kernel&m=102376926732464&w=2
+ */
+#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19)
+
+
+/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
+ * Flags: Acknowledge.
+ */
+#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13)
+
+/*
+ * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
+ * Flags: Acknowledge.
+ */
+#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
+#define DRM_BO_FLAG_TILE (1ULL << 15)
+
+/*
+ * Memory type flags that can be or'ed together in the mask, but only
+ * one appears in flags.
+ */
+
+/* System memory */
+#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24)
+/* Translation table memory */
+#define DRM_BO_FLAG_MEM_TT (1ULL << 25)
+/* Vram memory */
+#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26)
+/* Up to the driver to define. */
+#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27)
+#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28)
+#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29)
+#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30)
+#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
+/* We can add more of these now with a 64-bit flag type */
+
+/*
+ * This is a mask covering all of the memory type flags; easier to just
+ * use a single constant than a bunch of | values. It covers
+ * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
+ */
+#define DRM_BO_MASK_MEM 0x00000000FF000000ULL
+/*
+ * This adds all of the CPU-mapping options in with the memory
+ * type to label all bits which change how the page gets mapped
+ */
+#define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \
+ DRM_BO_FLAG_CACHED_MAPPED | \
+ DRM_BO_FLAG_CACHED | \
+ DRM_BO_FLAG_MAPPABLE)
+
+/* Driver-private flags */
+#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
+
+/*
+ * Don't block on validate and map. Instead, return EBUSY.
+ */
+#define DRM_BO_HINT_DONT_BLOCK 0x00000002
+/*
+ * Don't place this buffer on the unfenced list. This means
+ * that the buffer will not end up having a fence associated
+ * with it as a result of this operation
+ */
+#define DRM_BO_HINT_DONT_FENCE 0x00000004
+/**
+ * On hardware with no interrupt events for operation completion,
+ * indicates that the kernel should sleep while waiting for any blocking
+ * operation to complete rather than spinning.
+ *
+ * Has no effect otherwise.
+ */
+#define DRM_BO_HINT_WAIT_LAZY 0x00000008
+/*
+ * The client has compute relocations refering to this buffer using the
+ * offset in the presumed_offset field. If that offset ends up matching
+ * where this buffer lands, the kernel is free to skip executing those
+ * relocations
+ */
+#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
+
+#define DRM_BO_INIT_MAGIC 0xfe769812
+#define DRM_BO_INIT_MAJOR 1
+#define DRM_BO_INIT_MINOR 0
+#define DRM_BO_INIT_PATCH 0
+
+
+struct drm_bo_info_req {
+ uint64_t mask;
+ uint64_t flags;
+ unsigned int handle;
+ unsigned int hint;
+ unsigned int fence_class;
+ unsigned int desired_tile_stride;
+ unsigned int tile_info;
+ unsigned int pad64;
+ uint64_t presumed_offset;
+};
+
+struct drm_bo_create_req {
+ uint64_t flags;
+ uint64_t size;
+ uint64_t buffer_start;
+ unsigned int hint;
+ unsigned int page_alignment;
+};
+
+
+/*
+ * Reply flags
+ */
+
+#define DRM_BO_REP_BUSY 0x00000001
+
+struct drm_bo_info_rep {
+ uint64_t flags;
+ uint64_t proposed_flags;
+ uint64_t size;
+ uint64_t offset;
+ uint64_t arg_handle;
+ uint64_t buffer_start;
+ unsigned int handle;
+ unsigned int fence_flags;
+ unsigned int rep_flags;
+ unsigned int page_alignment;
+ unsigned int desired_tile_stride;
+ unsigned int hw_tile_stride;
+ unsigned int tile_info;
+ unsigned int pad64;
+ uint64_t expand_pad[4]; /*Future expansion */
+};
+
+struct drm_bo_arg_rep {
+ struct drm_bo_info_rep bo_info;
+ int ret;
+ unsigned int pad64;
+};
+
+struct drm_bo_create_arg {
+ union {
+ struct drm_bo_create_req req;
+ struct drm_bo_info_rep rep;
+ } d;
+};
+
+struct drm_bo_handle_arg {
+ unsigned int handle;
+};
+
+struct drm_bo_reference_info_arg {
+ union {
+ struct drm_bo_handle_arg req;
+ struct drm_bo_info_rep rep;
+ } d;
+};
+
+struct drm_bo_map_wait_idle_arg {
+ union {
+ struct drm_bo_info_req req;
+ struct drm_bo_info_rep rep;
+ } d;
+};
+
+struct drm_bo_op_req {
+ enum {
+ drm_bo_validate,
+ drm_bo_fence,
+ drm_bo_ref_fence,
+ } op;
+ unsigned int arg_handle;
+ struct drm_bo_info_req bo_req;
+};
+
+
+struct drm_bo_op_arg {
+ uint64_t next;
+ union {
+ struct drm_bo_op_req req;
+ struct drm_bo_arg_rep rep;
+ } d;
+ int handled;
+ unsigned int pad64;
+};
+
+
+#define DRM_BO_MEM_LOCAL 0
+#define DRM_BO_MEM_TT 1
+#define DRM_BO_MEM_VRAM 2
+#define DRM_BO_MEM_PRIV0 3
+#define DRM_BO_MEM_PRIV1 4
+#define DRM_BO_MEM_PRIV2 5
+#define DRM_BO_MEM_PRIV3 6
+#define DRM_BO_MEM_PRIV4 7
+
+#define DRM_BO_MEM_TYPES 8 /* For now. */
+
+#define DRM_BO_LOCK_UNLOCK_BM (1 << 0)
+#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
+
+struct drm_bo_version_arg {
+ uint32_t major;
+ uint32_t minor;
+ uint32_t patchlevel;
+};
+
+struct drm_mm_type_arg {
+ unsigned int mem_type;
+ unsigned int lock_flags;
+};
+
+struct drm_mm_init_arg {
+ unsigned int magic;
+ unsigned int major;
+ unsigned int minor;
+ unsigned int mem_type;
+ uint64_t p_offset;
+ uint64_t p_size;
+};
+
+struct drm_mm_info_arg {
+ unsigned int mem_type;
+ uint64_t p_size;
+};
+
+struct drm_gem_close {
+ /** Handle of the object to be closed. */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_gem_flink {
+ /** Handle for the object being named */
+ uint32_t handle;
+
+ /** Returned global name */
+ uint32_t name;
+};
+
+struct drm_gem_open {
+ /** Name of object being opened */
+ uint32_t name;
+
+ /** Returned handle for the object */
+ uint32_t handle;
+
+ /** Returned size of the object */
+ uint64_t size;
+};
+
+struct drm_get_cap {
+ uint64_t capability;
+ uint64_t value;
+};
+
+struct drm_event {
+ uint32_t type;
+ uint32_t length;
+};
+
+#define DRM_EVENT_VBLANK 0x01
+#define DRM_EVENT_FLIP_COMPLETE 0x02
+
+struct drm_event_vblank {
+ struct drm_event base;
+ uint64_t user_data;
+ uint32_t tv_sec;
+ uint32_t tv_usec;
+ uint32_t sequence;
+ uint32_t reserved;
+};
+
+#define DRM_CAP_DUMB_BUFFER 0x1
+#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
+#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
+#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
+
+#include "drm_mode.h"
+
+/**
+ * \name Ioctls Definitions
+ */
+/*@{*/
+
+#define DRM_IOCTL_BASE 'd'
+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
+#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
+#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
+#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
+
+#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
+#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
+#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
+#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
+#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
+#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
+#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
+#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
+#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
+
+#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
+#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
+#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
+
+#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap)
+
+#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
+#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
+#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
+#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
+#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
+#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
+#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
+#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
+#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
+#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
+#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
+
+#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
+
+#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
+#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
+
+#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e)
+#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f)
+
+#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
+#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
+#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
+#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
+#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
+#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
+#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
+#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
+#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
+#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
+#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
+#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
+#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
+
+#define DRM_IOCTL_GEM_PRIME_OPEN DRM_IOWR(0x2e, struct drm_gem_open)
+
+#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
+#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
+#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
+#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
+#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
+#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
+
+#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
+#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
+
+#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
+
+#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
+
+#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
+#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
+#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc)
+#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor)
+#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
+#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
+#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder)
+#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector)
+#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd)
+#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd)
+
+#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property)
+#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
+#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
+#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
+#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
+#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
+#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
+#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
+
+#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
+#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
+#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
+#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
+#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane)
+#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane)
+#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
+
+#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
+#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
+#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
+#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg)
+
+#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg)
+
+#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg)
+#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
+#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg)
+#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
+#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg)
+#define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
+#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
+#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
+#define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg)
+#define DRM_IOCTL_MM_INFO DRM_IOWR(0xd7, struct drm_mm_info_arg)
+
+/*@}*/
+
+/**
+ * Device specific ioctls should only be in their respective headers
+ * The device specific ioctl range is from 0x40 to 0x99.
+ * Generic IOCTLS restart at 0xA0.
+ *
+ * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
+ * drmCommandReadWrite().
+ */
+#define DRM_COMMAND_BASE 0x40
+#define DRM_COMMAND_END 0xA0
+
+/* typedef area */
+#ifndef __KERNEL__
+typedef struct drm_clip_rect drm_clip_rect_t;
+typedef struct drm_tex_region drm_tex_region_t;
+typedef struct drm_hw_lock drm_hw_lock_t;
+typedef struct drm_version drm_version_t;
+typedef struct drm_unique drm_unique_t;
+typedef struct drm_list drm_list_t;
+typedef struct drm_block drm_block_t;
+typedef struct drm_control drm_control_t;
+typedef enum drm_map_type drm_map_type_t;
+typedef enum drm_map_flags drm_map_flags_t;
+typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
+typedef struct drm_map drm_map_t;
+typedef struct drm_client drm_client_t;
+typedef enum drm_stat_type drm_stat_type_t;
+typedef struct drm_stats drm_stats_t;
+typedef enum drm_lock_flags drm_lock_flags_t;
+typedef struct drm_lock drm_lock_t;
+typedef enum drm_dma_flags drm_dma_flags_t;
+typedef struct drm_buf_desc drm_buf_desc_t;
+typedef struct drm_buf_info drm_buf_info_t;
+typedef struct drm_buf_free drm_buf_free_t;
+typedef struct drm_buf_pub drm_buf_pub_t;
+typedef struct drm_buf_map drm_buf_map_t;
+typedef struct drm_dma drm_dma_t;
+typedef union drm_wait_vblank drm_wait_vblank_t;
+typedef struct drm_agp_mode drm_agp_mode_t;
+typedef enum drm_ctx_flags drm_ctx_flags_t;
+typedef struct drm_ctx drm_ctx_t;
+typedef struct drm_ctx_res drm_ctx_res_t;
+typedef struct drm_draw drm_draw_t;
+typedef struct drm_update_draw drm_update_draw_t;
+typedef struct drm_auth drm_auth_t;
+typedef struct drm_irq_busid drm_irq_busid_t;
+typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
+typedef struct drm_agp_buffer drm_agp_buffer_t;
+typedef struct drm_agp_binding drm_agp_binding_t;
+typedef struct drm_agp_info drm_agp_info_t;
+typedef struct drm_scatter_gather drm_scatter_gather_t;
+typedef struct drm_set_version drm_set_version_t;
+
+typedef struct drm_fence_arg drm_fence_arg_t;
+typedef struct drm_mm_type_arg drm_mm_type_arg_t;
+typedef struct drm_mm_init_arg drm_mm_init_arg_t;
+typedef enum drm_bo_type drm_bo_type_t;
+#endif
+
+#endif
diff --git a/sys/dev/drm2/drmP.h b/sys/dev/drm2/drmP.h
new file mode 100644
index 0000000..f216d03
--- /dev/null
+++ b/sys/dev/drm2/drmP.h
@@ -0,0 +1,1400 @@
+/* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
+ * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
+ */
+/*-
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef _DRM_P_H_
+#define _DRM_P_H_
+
+#if defined(_KERNEL) || defined(__KERNEL__)
+
+struct drm_device;
+struct drm_file;
+
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/module.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/sglist.h>
+#include <sys/stat.h>
+#include <sys/priv.h>
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/fcntl.h>
+#include <sys/uio.h>
+#include <sys/filio.h>
+#include <sys/selinfo.h>
+#include <sys/sysctl.h>
+#include <sys/bus.h>
+#include <sys/queue.h>
+#include <sys/signalvar.h>
+#include <sys/poll.h>
+#include <sys/sbuf.h>
+#include <sys/taskqueue.h>
+#include <sys/tree.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_param.h>
+#include <vm/vm_phys.h>
+#include <machine/param.h>
+#include <machine/pmap.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+#if defined(__i386__) || defined(__amd64__)
+#include <machine/specialreg.h>
+#endif
+#include <machine/sysarch.h>
+#include <sys/endian.h>
+#include <sys/mman.h>
+#include <sys/rman.h>
+#include <sys/memrange.h>
+#include <dev/agp/agpvar.h>
+#include <sys/agpio.h>
+#include <sys/mutex.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+#include <sys/selinfo.h>
+#include <sys/bus.h>
+
+#include <dev/drm2/drm.h>
+#include <dev/drm2/drm_atomic.h>
+#include <dev/drm2/drm_internal.h>
+#include <dev/drm2/drm_linux_list.h>
+#include <dev/drm2/drm_gem_names.h>
+#include <dev/drm2/drm_mm.h>
+#include <dev/drm2/drm_hashtab.h>
+
+#include "opt_drm.h"
+#ifdef DRM_DEBUG
+#undef DRM_DEBUG
+#define DRM_DEBUG_DEFAULT_ON 1
+#endif /* DRM_DEBUG */
+
+#define DRM_DEBUGBITS_DEBUG 0x1
+#define DRM_DEBUGBITS_KMS 0x2
+#define DRM_DEBUGBITS_FAILED_IOCTL 0x4
+
+#undef DRM_LINUX
+#define DRM_LINUX 0
+
+/* driver capabilities and requirements mask */
+#define DRIVER_USE_AGP 0x1
+#define DRIVER_REQUIRE_AGP 0x2
+#define DRIVER_USE_MTRR 0x4
+#define DRIVER_PCI_DMA 0x8
+#define DRIVER_SG 0x10
+#define DRIVER_HAVE_DMA 0x20
+#define DRIVER_HAVE_IRQ 0x40
+#define DRIVER_IRQ_SHARED 0x80
+#define DRIVER_IRQ_VBL 0x100
+#define DRIVER_DMA_QUEUE 0x200
+#define DRIVER_FB_DMA 0x400
+#define DRIVER_IRQ_VBL2 0x800
+#define DRIVER_GEM 0x1000
+#define DRIVER_MODESET 0x2000
+#define DRIVER_USE_PLATFORM_DEVICE 0x4000
+#define DRIVER_LOCKLESS_IRQ 0x8000
+
+
+#define DRM_HASH_SIZE 16 /* Size of key hash table */
+#define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
+#define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
+
+#define DRM_GEM_MAPPING_MASK (3ULL << 62)
+#define DRM_GEM_MAPPING_KEY (2ULL << 62) /* Non-canonical address form */
+#define DRM_GEM_MAX_IDX 0x3fffff
+#define DRM_GEM_MAPPING_IDX(o) (((o) >> 40) & DRM_GEM_MAX_IDX)
+#define DRM_GEM_MAPPING_OFF(i) (((uint64_t)(i)) << 40)
+#define DRM_GEM_MAPPING_MAPOFF(o) \
+ ((o) & ~(DRM_GEM_MAPPING_OFF(DRM_GEM_MAX_IDX) | DRM_GEM_MAPPING_KEY))
+
+MALLOC_DECLARE(DRM_MEM_DMA);
+MALLOC_DECLARE(DRM_MEM_SAREA);
+MALLOC_DECLARE(DRM_MEM_DRIVER);
+MALLOC_DECLARE(DRM_MEM_MAGIC);
+MALLOC_DECLARE(DRM_MEM_IOCTLS);
+MALLOC_DECLARE(DRM_MEM_MAPS);
+MALLOC_DECLARE(DRM_MEM_BUFS);
+MALLOC_DECLARE(DRM_MEM_SEGS);
+MALLOC_DECLARE(DRM_MEM_PAGES);
+MALLOC_DECLARE(DRM_MEM_FILES);
+MALLOC_DECLARE(DRM_MEM_QUEUES);
+MALLOC_DECLARE(DRM_MEM_CMDS);
+MALLOC_DECLARE(DRM_MEM_MAPPINGS);
+MALLOC_DECLARE(DRM_MEM_BUFLISTS);
+MALLOC_DECLARE(DRM_MEM_AGPLISTS);
+MALLOC_DECLARE(DRM_MEM_CTXBITMAP);
+MALLOC_DECLARE(DRM_MEM_SGLISTS);
+MALLOC_DECLARE(DRM_MEM_DRAWABLE);
+MALLOC_DECLARE(DRM_MEM_MM);
+MALLOC_DECLARE(DRM_MEM_HASHTAB);
+MALLOC_DECLARE(DRM_MEM_KMS);
+
+SYSCTL_DECL(_hw_drm);
+
+#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
+
+ /* Internal types and structures */
+#define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
+#define DRM_MIN(a,b) ((a)<(b)?(a):(b))
+#define DRM_MAX(a,b) ((a)>(b)?(a):(b))
+
+#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
+
+#define __OS_HAS_AGP 1
+
+#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
+#define DRM_DEV_UID 0
+#define DRM_DEV_GID 0
+
+#define wait_queue_head_t atomic_t
+#define DRM_WAKEUP(w) wakeup((void *)w)
+#define DRM_WAKEUP_INT(w) wakeup(w)
+#define DRM_INIT_WAITQUEUE(queue) do {(void)(queue);} while (0)
+
+#define DRM_CURPROC curthread
+#define DRM_STRUCTPROC struct thread
+#define DRM_SPINTYPE struct mtx
+#define DRM_SPININIT(l,name) mtx_init(l, name, NULL, MTX_DEF)
+#define DRM_SPINUNINIT(l) mtx_destroy(l)
+#define DRM_SPINLOCK(l) mtx_lock(l)
+#define DRM_SPINUNLOCK(u) mtx_unlock(u)
+#define DRM_SPINLOCK_IRQSAVE(l, irqflags) do { \
+ mtx_lock(l); \
+ (void)irqflags; \
+} while (0)
+#define DRM_SPINUNLOCK_IRQRESTORE(u, irqflags) mtx_unlock(u)
+#define DRM_SPINLOCK_ASSERT(l) mtx_assert(l, MA_OWNED)
+#define DRM_CURRENTPID curthread->td_proc->p_pid
+#define DRM_LOCK(dev) sx_xlock(&(dev)->dev_struct_lock)
+#define DRM_UNLOCK(dev) sx_xunlock(&(dev)->dev_struct_lock)
+#define DRM_LOCK_SLEEP(dev, chan, flags, msg, timeout) \
+ (sx_sleep((chan), &(dev)->dev_struct_lock, (flags), (msg), (timeout)))
+#if defined(INVARIANTS)
+#define DRM_LOCK_ASSERT(dev) sx_assert(&(dev)->dev_struct_lock, SA_XLOCKED)
+#define DRM_UNLOCK_ASSERT(dev) sx_assert(&(dev)->dev_struct_lock, SA_UNLOCKED)
+#else
+#define DRM_LOCK_ASSERT(d)
+#define DRM_UNLOCK_ASSERT(d)
+#endif
+
+#define DRM_SYSCTL_HANDLER_ARGS (SYSCTL_HANDLER_ARGS)
+
+#define DRM_IRQ_ARGS void *arg
+typedef void irqreturn_t;
+#define IRQ_HANDLED /* nothing */
+#define IRQ_NONE /* nothing */
+
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#define container_of(ptr, type, member) ({ \
+ __typeof( ((type *)0)->member ) *__mptr = (ptr); \
+ (type *)( (char *)__mptr - offsetof(type,member) );})
+
+enum {
+ DRM_IS_NOT_AGP,
+ DRM_IS_AGP,
+ DRM_MIGHT_BE_AGP
+};
+#define DRM_AGP_MEM struct agp_memory_info
+
+#define drm_get_device_from_kdev(_kdev) (_kdev->si_drv1)
+
+#define PAGE_ALIGN(addr) round_page(addr)
+/* DRM_SUSER returns true if the user is superuser */
+#define DRM_SUSER(p) (priv_check(p, PRIV_DRIVER) == 0)
+#define DRM_AGP_FIND_DEVICE() agp_find_device()
+#define DRM_MTRR_WC MDF_WRITECOMBINE
+#define jiffies ticks
+#define jiffies_to_msecs(x) (((int64_t)(x)) * 1000 / hz)
+#define msecs_to_jiffies(x) (((int64_t)(x)) * hz / 1000)
+#define time_after(a,b) ((long)(b) - (long)(a) < 0)
+#define time_after_eq(a,b) ((long)(b) - (long)(a) <= 0)
+#define drm_msleep(x, msg) pause((msg), ((int64_t)(x)) * 1000 / hz)
+
+typedef vm_paddr_t dma_addr_t;
+typedef uint64_t u64;
+typedef uint32_t u32;
+typedef uint16_t u16;
+typedef uint8_t u8;
+typedef int64_t s64;
+typedef int32_t s32;
+typedef int16_t s16;
+typedef int8_t s8;
+
+/* DRM_READMEMORYBARRIER() prevents reordering of reads.
+ * DRM_WRITEMEMORYBARRIER() prevents reordering of writes.
+ * DRM_MEMORYBARRIER() prevents reordering of reads and writes.
+ */
+#define DRM_READMEMORYBARRIER() rmb()
+#define DRM_WRITEMEMORYBARRIER() wmb()
+#define DRM_MEMORYBARRIER() mb()
+
+#define DRM_READ8(map, offset) \
+ *(volatile u_int8_t *)(((vm_offset_t)(map)->virtual) + \
+ (vm_offset_t)(offset))
+#define DRM_READ16(map, offset) \
+ le16toh(*(volatile u_int16_t *)(((vm_offset_t)(map)->virtual) + \
+ (vm_offset_t)(offset)))
+#define DRM_READ32(map, offset) \
+ le32toh(*(volatile u_int32_t *)(((vm_offset_t)(map)->virtual) + \
+ (vm_offset_t)(offset)))
+#define DRM_READ64(map, offset) \
+ le64toh(*(volatile u_int64_t *)(((vm_offset_t)(map)->virtual) + \
+ (vm_offset_t)(offset)))
+#define DRM_WRITE8(map, offset, val) \
+ *(volatile u_int8_t *)(((vm_offset_t)(map)->virtual) + \
+ (vm_offset_t)(offset)) = val
+#define DRM_WRITE16(map, offset, val) \
+ *(volatile u_int16_t *)(((vm_offset_t)(map)->virtual) + \
+ (vm_offset_t)(offset)) = htole16(val)
+#define DRM_WRITE32(map, offset, val) \
+ *(volatile u_int32_t *)(((vm_offset_t)(map)->virtual) + \
+ (vm_offset_t)(offset)) = htole32(val)
+#define DRM_WRITE64(map, offset, val) \
+ *(volatile u_int64_t *)(((vm_offset_t)(map)->virtual) + \
+ (vm_offset_t)(offset)) = htole64(val)
+
+#define DRM_VERIFYAREA_READ( uaddr, size ) \
+ (!useracc(__DECONST(caddr_t, uaddr), size, VM_PROT_READ))
+
+#define DRM_COPY_TO_USER(user, kern, size) \
+ copyout(kern, user, size)
+#define DRM_COPY_FROM_USER(kern, user, size) \
+ copyin(user, kern, size)
+#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
+ copyin(arg2, arg1, arg3)
+#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
+ copyout(arg2, arg1, arg3)
+#define DRM_GET_USER_UNCHECKED(val, uaddr) \
+ ((val) = fuword32(uaddr), 0)
+
+#define cpu_to_le32(x) htole32(x)
+#define le32_to_cpu(x) le32toh(x)
+
+#define DRM_HZ hz
+#define DRM_UDELAY(udelay) DELAY(udelay)
+#define DRM_TIME_SLICE (hz/20) /* Time slice for GLXContexts */
+
+#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \
+ (_map) = (_dev)->context_sareas[_ctx]; \
+} while(0)
+
+#define LOCK_TEST_WITH_RETURN(dev, file_priv) \
+do { \
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || \
+ dev->lock.file_priv != file_priv) { \
+ DRM_ERROR("%s called without lock held\n", \
+ __FUNCTION__); \
+ return EINVAL; \
+ } \
+} while (0)
+
+/* Returns -errno to shared code */
+#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
+for ( ret = 0 ; !ret && !(condition) ; ) { \
+ DRM_UNLOCK(dev); \
+ mtx_lock(&dev->irq_lock); \
+ if (!(condition)) \
+ ret = -mtx_sleep(&(queue), &dev->irq_lock, \
+ PCATCH, "drmwtq", (timeout)); \
+ mtx_unlock(&dev->irq_lock); \
+ DRM_LOCK(dev); \
+}
+
+#define DRM_ERROR(fmt, ...) \
+ printf("error: [" DRM_NAME ":pid%d:%s] *ERROR* " fmt, \
+ DRM_CURRENTPID, __func__ , ##__VA_ARGS__)
+
+#define DRM_INFO(fmt, ...) printf("info: [" DRM_NAME "] " fmt , ##__VA_ARGS__)
+
+#define DRM_DEBUG(fmt, ...) do { \
+ if ((drm_debug_flag & DRM_DEBUGBITS_DEBUG) != 0) \
+ printf("[" DRM_NAME ":pid%d:%s] " fmt, DRM_CURRENTPID, \
+ __func__ , ##__VA_ARGS__); \
+} while (0)
+
+#define DRM_DEBUG_KMS(fmt, ...) do { \
+ if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0) \
+ printf("[" DRM_NAME ":KMS:pid%d:%s] " fmt, DRM_CURRENTPID,\
+ __func__ , ##__VA_ARGS__); \
+} while (0)
+
+#define DRM_DEBUG_DRIVER(fmt, ...) do { \
+ if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0) \
+ printf("[" DRM_NAME ":KMS:pid%d:%s] " fmt, DRM_CURRENTPID,\
+ __func__ , ##__VA_ARGS__); \
+} while (0)
+
+typedef struct drm_pci_id_list
+{
+ int vendor;
+ int device;
+ long driver_private;
+ char *name;
+} drm_pci_id_list_t;
+
+struct drm_msi_blacklist_entry
+{
+ int vendor;
+ int device;
+};
+
+#define DRM_AUTH 0x1
+#define DRM_MASTER 0x2
+#define DRM_ROOT_ONLY 0x4
+#define DRM_CONTROL_ALLOW 0x8
+#define DRM_UNLOCKED 0x10
+
+typedef struct drm_ioctl_desc {
+ unsigned long cmd;
+ int (*func)(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ int flags;
+} drm_ioctl_desc_t;
+/**
+ * Creates a driver or general drm_ioctl_desc array entry for the given
+ * ioctl, for use by drm_ioctl().
+ */
+#define DRM_IOCTL_DEF(ioctl, func, flags) \
+ [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags}
+
+typedef struct drm_magic_entry {
+ drm_magic_t magic;
+ struct drm_file *priv;
+ struct drm_magic_entry *next;
+} drm_magic_entry_t;
+
+typedef struct drm_magic_head {
+ struct drm_magic_entry *head;
+ struct drm_magic_entry *tail;
+} drm_magic_head_t;
+
+typedef struct drm_buf {
+ int idx; /* Index into master buflist */
+ int total; /* Buffer size */
+ int order; /* log-base-2(total) */
+ int used; /* Amount of buffer in use (for DMA) */
+ unsigned long offset; /* Byte offset (used internally) */
+ void *address; /* Address of buffer */
+ unsigned long bus_address; /* Bus address of buffer */
+ struct drm_buf *next; /* Kernel-only: used for free list */
+ __volatile__ int pending; /* On hardware DMA queue */
+ struct drm_file *file_priv; /* Unique identifier of holding process */
+ int context; /* Kernel queue for this buffer */
+ enum {
+ DRM_LIST_NONE = 0,
+ DRM_LIST_FREE = 1,
+ DRM_LIST_WAIT = 2,
+ DRM_LIST_PEND = 3,
+ DRM_LIST_PRIO = 4,
+ DRM_LIST_RECLAIM = 5
+ } list; /* Which list we're on */
+
+ int dev_priv_size; /* Size of buffer private stoarge */
+ void *dev_private; /* Per-buffer private storage */
+} drm_buf_t;
+
+typedef struct drm_freelist {
+ int initialized; /* Freelist in use */
+ atomic_t count; /* Number of free buffers */
+ drm_buf_t *next; /* End pointer */
+
+ int low_mark; /* Low water mark */
+ int high_mark; /* High water mark */
+} drm_freelist_t;
+
+typedef struct drm_dma_handle {
+ void *vaddr;
+ bus_addr_t busaddr;
+ bus_dma_tag_t tag;
+ bus_dmamap_t map;
+} drm_dma_handle_t;
+
+typedef struct drm_buf_entry {
+ int buf_size;
+ int buf_count;
+ drm_buf_t *buflist;
+ int seg_count;
+ drm_dma_handle_t **seglist;
+ int page_order;
+
+ drm_freelist_t freelist;
+} drm_buf_entry_t;
+
+/* Event queued up for userspace to read */
+struct drm_pending_event {
+ struct drm_event *event;
+ struct list_head link;
+ struct drm_file *file_priv;
+ pid_t pid; /* pid of requester, no guarantee it's valid by the time
+ we deliver the event, for tracing only */
+ void (*destroy)(struct drm_pending_event *event);
+};
+
+typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
+struct drm_file {
+ TAILQ_ENTRY(drm_file) link;
+ struct drm_device *dev;
+ int authenticated;
+ int master;
+ pid_t pid;
+ uid_t uid;
+ drm_magic_t magic;
+ unsigned long ioctl_count;
+
+ void *driver_priv;
+ struct drm_gem_names object_names;
+
+ int is_master;
+ struct drm_master *masterp;
+
+ struct list_head fbs;
+
+ struct list_head event_list;
+ int event_space;
+ struct selinfo event_poll;
+};
+
+typedef struct drm_lock_data {
+ struct drm_hw_lock *hw_lock; /* Hardware lock */
+ struct drm_file *file_priv; /* Unique identifier of holding process (NULL is kernel)*/
+ int lock_queue; /* Queue of blocked processes */
+ unsigned long lock_time; /* Time of last lock in jiffies */
+} drm_lock_data_t;
+
+/* This structure, in the struct drm_device, is always initialized while the
+ * device
+ * is open. dev->dma_lock protects the incrementing of dev->buf_use, which
+ * when set marks that no further bufs may be allocated until device teardown
+ * occurs (when the last open of the device has closed). The high/low
+ * watermarks of bufs are only touched by the X Server, and thus not
+ * concurrently accessed, so no locking is needed.
+ */
+typedef struct drm_device_dma {
+ drm_buf_entry_t bufs[DRM_MAX_ORDER+1];
+ int buf_count;
+ drm_buf_t **buflist; /* Vector of pointers info bufs */
+ int seg_count;
+ int page_count;
+ unsigned long *pagelist;
+ unsigned long byte_count;
+ enum {
+ _DRM_DMA_USE_AGP = 0x01,
+ _DRM_DMA_USE_SG = 0x02
+ } flags;
+} drm_device_dma_t;
+
+typedef struct drm_agp_mem {
+ void *handle;
+ unsigned long bound; /* address */
+ int pages;
+ struct drm_agp_mem *prev;
+ struct drm_agp_mem *next;
+} drm_agp_mem_t;
+
+typedef struct drm_agp_head {
+ device_t agpdev;
+ struct agp_info info;
+ const char *chipset;
+ drm_agp_mem_t *memory;
+ unsigned long mode;
+ int enabled;
+ int acquired;
+ unsigned long base;
+ int mtrr;
+ int cant_use_aperture;
+ unsigned long page_mask;
+} drm_agp_head_t;
+
+typedef struct drm_sg_mem {
+ vm_offset_t vaddr;
+ vm_paddr_t *busaddr;
+ vm_pindex_t pages;
+} drm_sg_mem_t;
+
+#define DRM_MAP_HANDLE_BITS (sizeof(void *) == 4 ? 4 : 24)
+#define DRM_MAP_HANDLE_SHIFT (sizeof(void *) * 8 - DRM_MAP_HANDLE_BITS)
+typedef TAILQ_HEAD(drm_map_list, drm_local_map) drm_map_list_t;
+
+typedef struct drm_local_map {
+ unsigned long offset; /* Physical address (0 for SAREA) */
+ unsigned long size; /* Physical size (bytes) */
+ enum drm_map_type type; /* Type of memory mapped */
+ enum drm_map_flags flags; /* Flags */
+ void *handle; /* User-space: "Handle" to pass to mmap */
+ /* Kernel-space: kernel-virtual address */
+ int mtrr; /* Boolean: MTRR used */
+ /* Private data */
+ int rid; /* PCI resource ID for bus_space */
+ void *virtual; /* Kernel-space: kernel-virtual address */
+ struct resource *bsr;
+ bus_space_tag_t bst;
+ bus_space_handle_t bsh;
+ drm_dma_handle_t *dmah;
+ TAILQ_ENTRY(drm_local_map) link;
+} drm_local_map_t;
+
+struct drm_vblank_info {
+ wait_queue_head_t queue; /* vblank wait queue */
+ atomic_t count; /* number of VBLANK interrupts */
+ /* (driver must alloc the right number of counters) */
+ atomic_t refcount; /* number of users of vblank interrupts */
+ u32 last; /* protected by dev->vbl_lock, used */
+ /* for wraparound handling */
+ int enabled; /* so we don't call enable more than */
+ /* once per disable */
+ int inmodeset; /* Display driver is setting mode */
+};
+
+/* Size of ringbuffer for vblank timestamps. Just double-buffer
+ * in initial implementation.
+ */
+#define DRM_VBLANKTIME_RBSIZE 2
+
+/* Flags and return codes for get_vblank_timestamp() driver function. */
+#define DRM_CALLED_FROM_VBLIRQ 1
+#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
+#define DRM_VBLANKTIME_INVBL (1 << 1)
+
+/* get_scanout_position() return flags */
+#define DRM_SCANOUTPOS_VALID (1 << 0)
+#define DRM_SCANOUTPOS_INVBL (1 << 1)
+#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
+
+/* location of GART table */
+#define DRM_ATI_GART_MAIN 1
+#define DRM_ATI_GART_FB 2
+
+#define DRM_ATI_GART_PCI 1
+#define DRM_ATI_GART_PCIE 2
+#define DRM_ATI_GART_IGP 3
+
+struct drm_ati_pcigart_info {
+ int gart_table_location;
+ int gart_reg_if;
+ void *addr;
+ dma_addr_t bus_addr;
+ dma_addr_t table_mask;
+ dma_addr_t member_mask;
+ struct drm_dma_handle *table_handle;
+ drm_local_map_t mapping;
+ int table_size;
+ struct drm_dma_handle *dmah; /* handle for ATI PCIGART table */
+};
+
+typedef vm_paddr_t resource_size_t;
+
+/**
+ * GEM specific mm private for tracking GEM objects
+ */
+struct drm_gem_mm {
+ struct drm_open_hash offset_hash; /**< User token hash table for maps */
+ struct unrhdr *idxunr;
+};
+
+struct drm_gem_object {
+ /** Reference count of this object */
+ u_int refcount;
+
+ /** Handle count of this object. Each handle also holds a reference */
+ u_int handle_count; /* number of handles on this object */
+
+ /** Related drm device */
+ struct drm_device *dev;
+
+ /** File representing the shmem storage: filp in Linux parlance */
+ vm_object_t vm_obj;
+
+ bool on_map;
+ struct drm_hash_item map_list;
+
+ /**
+ * Size of the object, in bytes. Immutable over the object's
+ * lifetime.
+ */
+ size_t size;
+
+ /**
+ * Global name for this object, starts at 1. 0 means unnamed.
+ * Access is covered by the object_name_lock in the related drm_device
+ */
+ int name;
+
+ /**
+ * Memory domains. These monitor which caches contain read/write data
+ * related to the object. When transitioning from one set of domains
+ * to another, the driver is called to ensure that caches are suitably
+ * flushed and invalidated
+ */
+ uint32_t read_domains;
+ uint32_t write_domain;
+
+ /**
+ * While validating an exec operation, the
+ * new read/write domain values are computed here.
+ * They will be transferred to the above values
+ * at the point that any cache flushing occurs
+ */
+ uint32_t pending_read_domains;
+ uint32_t pending_write_domain;
+
+ void *driver_private;
+};
+
+#include "drm_crtc.h"
+
+#ifndef DMA_BIT_MASK
+#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1)
+#endif
+
+#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
+
+struct drm_driver_info {
+ int (*load)(struct drm_device *, unsigned long flags);
+ int (*firstopen)(struct drm_device *);
+ int (*open)(struct drm_device *, struct drm_file *);
+ void (*preclose)(struct drm_device *, struct drm_file *file_priv);
+ void (*postclose)(struct drm_device *, struct drm_file *);
+ void (*lastclose)(struct drm_device *);
+ int (*unload)(struct drm_device *);
+ void (*reclaim_buffers_locked)(struct drm_device *,
+ struct drm_file *file_priv);
+ int (*dma_ioctl)(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ void (*dma_ready)(struct drm_device *);
+ int (*dma_quiescent)(struct drm_device *);
+ int (*dma_flush_block_and_flush)(struct drm_device *, int context,
+ enum drm_lock_flags flags);
+ int (*dma_flush_unblock)(struct drm_device *, int context,
+ enum drm_lock_flags flags);
+ int (*context_ctor)(struct drm_device *dev, int context);
+ int (*context_dtor)(struct drm_device *dev, int context);
+ int (*kernel_context_switch)(struct drm_device *dev, int old,
+ int new);
+ int (*kernel_context_switch_unlock)(struct drm_device *dev);
+ void (*irq_preinstall)(struct drm_device *dev);
+ int (*irq_postinstall)(struct drm_device *dev);
+ void (*irq_uninstall)(struct drm_device *dev);
+ void (*irq_handler)(DRM_IRQ_ARGS);
+
+ u32 (*get_vblank_counter)(struct drm_device *dev, int crtc);
+ int (*enable_vblank)(struct drm_device *dev, int crtc);
+ void (*disable_vblank)(struct drm_device *dev, int crtc);
+ int (*get_scanout_position)(struct drm_device *dev, int crtc,
+ int *vpos, int *hpos);
+
+ int (*get_vblank_timestamp)(struct drm_device *dev, int crtc,
+ int *max_error, struct timeval *vblank_time,
+ unsigned flags);
+
+ int (*gem_init_object)(struct drm_gem_object *obj);
+ void (*gem_free_object)(struct drm_gem_object *obj);
+
+ struct cdev_pager_ops *gem_pager_ops;
+
+ int (*dumb_create)(struct drm_file *file_priv,
+ struct drm_device *dev, struct drm_mode_create_dumb *args);
+ int (*dumb_map_offset)(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle, uint64_t *offset);
+ int (*dumb_destroy)(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle);
+
+ int (*sysctl_init)(struct drm_device *dev,
+ struct sysctl_ctx_list *ctx, struct sysctl_oid *top);
+ void (*sysctl_cleanup)(struct drm_device *dev);
+
+ drm_pci_id_list_t *id_entry; /* PCI ID, name, and chipset private */
+
+ /**
+ * Called by \c drm_device_is_agp. Typically used to determine if a
+ * card is really attached to AGP or not.
+ *
+ * \param dev DRM device handle
+ *
+ * \returns
+ * One of three values is returned depending on whether or not the
+ * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
+ * (return of 1), or may or may not be AGP (return of 2).
+ */
+ int (*device_is_agp) (struct drm_device * dev);
+
+ drm_ioctl_desc_t *ioctls;
+ int max_ioctl;
+
+ int buf_priv_size;
+
+ int major;
+ int minor;
+ int patchlevel;
+ const char *name; /* Simple driver name */
+ const char *desc; /* Longer driver name */
+ const char *date; /* Date of last major changes. */
+
+ u32 driver_features;
+};
+
+/**
+ * DRM minor structure. This structure represents a drm minor number.
+ */
+struct drm_minor {
+ int index; /**< Minor device number */
+ int type; /**< Control or render */
+ device_t kdev; /**< OS device */
+ struct drm_device *dev;
+
+ struct drm_master *master; /* currently active master for this node */
+ struct list_head master_list;
+ struct drm_mode_group mode_group;
+};
+
+/* mode specified on the command line */
+struct drm_cmdline_mode {
+ bool specified;
+ bool refresh_specified;
+ bool bpp_specified;
+ int xres, yres;
+ int bpp;
+ int refresh;
+ bool rb;
+ bool interlace;
+ bool cvt;
+ bool margins;
+ enum drm_connector_force force;
+};
+
+struct drm_pending_vblank_event {
+ struct drm_pending_event base;
+ int pipe;
+ struct drm_event_vblank event;
+};
+
+/* Length for the array of resource pointers for drm_get_resource_*. */
+#define DRM_MAX_PCI_RESOURCE 6
+
+/**
+ * DRM device functions structure
+ */
+struct drm_device {
+ struct drm_driver_info *driver;
+ drm_pci_id_list_t *id_entry; /* PCI ID, name, and chipset private */
+
+ u_int16_t pci_device; /* PCI device id */
+ u_int16_t pci_vendor; /* PCI vendor id */
+
+ char *unique; /* Unique identifier: e.g., busid */
+ int unique_len; /* Length of unique field */
+ device_t device; /* Device instance from newbus */
+ struct cdev *devnode; /* Device number for mknod */
+ int if_version; /* Highest interface version set */
+
+ int flags; /* Flags to open(2) */
+
+ /* Locks */
+ struct mtx dma_lock; /* protects dev->dma */
+ struct mtx irq_lock; /* protects irq condition checks */
+ struct mtx dev_lock; /* protects everything else */
+ struct sx dev_struct_lock;
+ DRM_SPINTYPE drw_lock;
+
+ /* Usage Counters */
+ int open_count; /* Outstanding files open */
+ int buf_use; /* Buffers in use -- cannot alloc */
+
+ /* Performance counters */
+ unsigned long counters;
+ enum drm_stat_type types[15];
+ atomic_t counts[15];
+
+ /* Authentication */
+ drm_file_list_t files;
+ drm_magic_head_t magiclist[DRM_HASH_SIZE];
+
+ /* Linked list of mappable regions. Protected by dev_lock */
+ drm_map_list_t maplist;
+ struct unrhdr *map_unrhdr;
+
+ drm_local_map_t **context_sareas;
+ int max_context;
+
+ drm_lock_data_t lock; /* Information on hardware lock */
+
+ /* DMA queues (contexts) */
+ drm_device_dma_t *dma; /* Optional pointer for DMA support */
+
+ /* Context support */
+ int irq; /* Interrupt used by board */
+ int irq_enabled; /* True if the irq handler is enabled */
+ int msi_enabled; /* MSI enabled */
+ int irqrid; /* Interrupt used by board */
+ struct resource *irqr; /* Resource for interrupt used by board */
+ void *irqh; /* Handle from bus_setup_intr */
+
+ /* Storage of resource pointers for drm_get_resource_* */
+ struct resource *pcir[DRM_MAX_PCI_RESOURCE];
+ int pcirid[DRM_MAX_PCI_RESOURCE];
+
+ int pci_domain;
+ int pci_bus;
+ int pci_slot;
+ int pci_func;
+
+ atomic_t context_flag; /* Context swapping flag */
+ int last_context; /* Last current context */
+
+ int num_crtcs;
+
+ struct sigio *buf_sigio; /* Processes waiting for SIGIO */
+
+ /* Sysctl support */
+ struct drm_sysctl_info *sysctl;
+ int sysctl_node_idx;
+
+ drm_agp_head_t *agp;
+ drm_sg_mem_t *sg; /* Scatter gather memory */
+ atomic_t *ctx_bitmap;
+ void *dev_private;
+ unsigned int agp_buffer_token;
+ drm_local_map_t *agp_buffer_map;
+
+ struct drm_minor *control; /**< Control node for card */
+ struct drm_minor *primary; /**< render type primary screen head */
+
+ struct unrhdr *drw_unrhdr;
+ /* RB tree of drawable infos */
+ RB_HEAD(drawable_tree, bsd_drm_drawable_info) drw_head;
+
+ int vblank_disable_allowed;
+
+ atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
+ struct timeval *_vblank_time; /**< timestamp of current vblank_count (drivers must alloc right number of fields) */
+ struct mtx vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
+ struct mtx vbl_lock;
+ atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
+ u32 *last_vblank; /* protected by dev->vbl_lock, used */
+ /* for wraparound handling */
+ int *vblank_enabled; /* so we don't call enable more than
+ once per disable */
+ int *vblank_inmodeset; /* Display driver is setting mode */
+ u32 *last_vblank_wait; /* Last vblank seqno waited per CRTC */
+ struct callout vblank_disable_callout;
+
+ u32 max_vblank_count; /**< size of vblank counter register */
+
+ struct list_head vblank_event_list;
+ struct mtx event_lock;
+
+ struct drm_mode_config mode_config; /**< Current mode config */
+
+ /* GEM part */
+ struct sx object_name_lock;
+ struct drm_gem_names object_names;
+ void *mm_private;
+
+ void *sysctl_private;
+ char busid_str[128];
+ int modesetting;
+};
+
+static __inline__ int drm_core_check_feature(struct drm_device *dev,
+ int feature)
+{
+ return ((dev->driver->driver_features & feature) ? 1 : 0);
+}
+
+#if __OS_HAS_AGP
+static inline int drm_core_has_AGP(struct drm_device *dev)
+{
+ return drm_core_check_feature(dev, DRIVER_USE_AGP);
+}
+#else
+#define drm_core_has_AGP(dev) (0)
+#endif
+
+enum dmi_field {
+ DMI_NONE,
+ DMI_BIOS_VENDOR,
+ DMI_BIOS_VERSION,
+ DMI_BIOS_DATE,
+ DMI_SYS_VENDOR,
+ DMI_PRODUCT_NAME,
+ DMI_PRODUCT_VERSION,
+ DMI_PRODUCT_SERIAL,
+ DMI_PRODUCT_UUID,
+ DMI_BOARD_VENDOR,
+ DMI_BOARD_NAME,
+ DMI_BOARD_VERSION,
+ DMI_BOARD_SERIAL,
+ DMI_BOARD_ASSET_TAG,
+ DMI_CHASSIS_VENDOR,
+ DMI_CHASSIS_TYPE,
+ DMI_CHASSIS_VERSION,
+ DMI_CHASSIS_SERIAL,
+ DMI_CHASSIS_ASSET_TAG,
+ DMI_STRING_MAX,
+};
+
+struct dmi_strmatch {
+ unsigned char slot;
+ char substr[79];
+};
+
+struct dmi_system_id {
+ int (*callback)(const struct dmi_system_id *);
+ const char *ident;
+ struct dmi_strmatch matches[4];
+};
+#define DMI_MATCH(a, b) {(a), (b)}
+bool dmi_check_system(const struct dmi_system_id *);
+
+extern int drm_debug_flag;
+extern int drm_notyet_flag;
+extern unsigned int drm_vblank_offdelay;
+extern unsigned int drm_timestamp_precision;
+
+/* Device setup support (drm_drv.c) */
+int drm_probe(device_t kdev, drm_pci_id_list_t *idlist);
+int drm_attach(device_t kdev, drm_pci_id_list_t *idlist);
+int drm_create_cdevs(device_t kdev);
+void drm_close(void *data);
+int drm_detach(device_t kdev);
+d_ioctl_t drm_ioctl;
+d_open_t drm_open;
+d_read_t drm_read;
+d_poll_t drm_poll;
+d_mmap_t drm_mmap;
+extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
+
+void drm_event_wakeup(struct drm_pending_event *e);
+
+int drm_add_busid_modesetting(struct drm_device *dev,
+ struct sysctl_ctx_list *ctx, struct sysctl_oid *top);
+
+/* File operations helpers (drm_fops.c) */
+extern int drm_open_helper(struct cdev *kdev, int flags, int fmt,
+ DRM_STRUCTPROC *p,
+ struct drm_device *dev);
+
+/* Memory management support (drm_memory.c) */
+void drm_mem_init(void);
+void drm_mem_uninit(void);
+void *drm_ioremap_wc(struct drm_device *dev, drm_local_map_t *map);
+void *drm_ioremap(struct drm_device *dev, drm_local_map_t *map);
+void drm_ioremapfree(drm_local_map_t *map);
+int drm_mtrr_add(unsigned long offset, size_t size, int flags);
+int drm_mtrr_del(int handle, unsigned long offset, size_t size, int flags);
+
+int drm_context_switch(struct drm_device *dev, int old, int new);
+int drm_context_switch_complete(struct drm_device *dev, int new);
+
+int drm_ctxbitmap_init(struct drm_device *dev);
+void drm_ctxbitmap_cleanup(struct drm_device *dev);
+void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
+int drm_ctxbitmap_next(struct drm_device *dev);
+
+/* Locking IOCTL support (drm_lock.c) */
+int drm_lock_take(struct drm_lock_data *lock_data,
+ unsigned int context);
+int drm_lock_transfer(struct drm_lock_data *lock_data,
+ unsigned int context);
+int drm_lock_free(struct drm_lock_data *lock_data,
+ unsigned int context);
+
+/* Buffer management support (drm_bufs.c) */
+unsigned long drm_get_resource_start(struct drm_device *dev,
+ unsigned int resource);
+unsigned long drm_get_resource_len(struct drm_device *dev,
+ unsigned int resource);
+void drm_rmmap(struct drm_device *dev, drm_local_map_t *map);
+int drm_order(unsigned long size);
+int drm_addmap(struct drm_device *dev, unsigned long offset,
+ unsigned long size,
+ enum drm_map_type type, enum drm_map_flags flags,
+ drm_local_map_t **map_ptr);
+int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request);
+int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request);
+int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request);
+
+/* DMA support (drm_dma.c) */
+int drm_dma_setup(struct drm_device *dev);
+void drm_dma_takedown(struct drm_device *dev);
+void drm_free_buffer(struct drm_device *dev, drm_buf_t *buf);
+void drm_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv);
+#define drm_core_reclaim_buffers drm_reclaim_buffers
+
+/* IRQ support (drm_irq.c) */
+int drm_irq_install(struct drm_device *dev);
+int drm_irq_uninstall(struct drm_device *dev);
+irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
+void drm_driver_irq_preinstall(struct drm_device *dev);
+void drm_driver_irq_postinstall(struct drm_device *dev);
+void drm_driver_irq_uninstall(struct drm_device *dev);
+
+void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
+void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
+int drm_modeset_ctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
+extern int drm_wait_vblank(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
+extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
+extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+ struct timeval *vblanktime);
+extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
+void drm_handle_vblank_events(struct drm_device *dev, int crtc);
+extern int drm_vblank_get(struct drm_device *dev, int crtc);
+extern void drm_vblank_put(struct drm_device *dev, int crtc);
+extern void drm_vblank_off(struct drm_device *dev, int crtc);
+extern void drm_vblank_cleanup(struct drm_device *dev);
+extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+ struct timeval *tvblank, unsigned flags);
+extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+ int crtc, int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags,
+ struct drm_crtc *refcrtc);
+extern void drm_calc_timestamping_constants(struct drm_crtc *crtc);
+
+struct timeval ns_to_timeval(const int64_t nsec);
+int64_t timeval_to_ns(const struct timeval *tv);
+
+/* AGP/PCI Express/GART support (drm_agpsupport.c) */
+int drm_device_is_agp(struct drm_device *dev);
+int drm_device_is_pcie(struct drm_device *dev);
+drm_agp_head_t *drm_agp_init(void);
+int drm_agp_acquire(struct drm_device *dev);
+int drm_agp_release(struct drm_device *dev);
+int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info);
+int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
+void *drm_agp_allocate_memory(size_t pages, u32 type);
+int drm_agp_free_memory(void *handle);
+int drm_agp_bind_memory(void *handle, off_t start);
+int drm_agp_unbind_memory(void *handle);
+int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
+int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
+int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
+int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
+
+/* Scatter Gather Support (drm_scatter.c) */
+void drm_sg_cleanup(drm_sg_mem_t *entry);
+int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
+
+/* sysctl support (drm_sysctl.h) */
+extern int drm_sysctl_init(struct drm_device *dev);
+extern int drm_sysctl_cleanup(struct drm_device *dev);
+
+/* ATI PCIGART support (ati_pcigart.c) */
+int drm_ati_pcigart_init(struct drm_device *dev,
+ struct drm_ati_pcigart_info *gart_info);
+int drm_ati_pcigart_cleanup(struct drm_device *dev,
+ struct drm_ati_pcigart_info *gart_info);
+
+/* Cache management (drm_memory.c) */
+void drm_clflush_pages(vm_page_t *pages, unsigned long num_pages);
+
+/* Locking IOCTL support (drm_drv.c) */
+int drm_lock(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_unlock(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_version(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_setversion(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/* Misc. IOCTL support (drm_ioctl.c) */
+int drm_irq_by_busid(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_getunique(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_setunique(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_getmap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_getclient(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_getstats(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_getcap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_noop(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/* Context IOCTL support (drm_context.c) */
+int drm_resctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_addctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_modctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_getctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_switchctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_newctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_rmctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_setsareactx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_getsareactx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/* Drawable IOCTL support (drm_drawable.c) */
+int drm_adddraw(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_rmdraw(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_update_draw(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
+ int handle);
+
+/* Drawable support (drm_drawable.c) */
+void drm_drawable_free_all(struct drm_device *dev);
+
+/* Authentication IOCTL support (drm_auth.c) */
+int drm_getmagic(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_authmagic(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/* Buffer management support (drm_bufs.c) */
+int drm_addmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_rmmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_addbufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_infobufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_markbufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_freebufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_mapbufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/* DMA support (drm_dma.c) */
+int drm_dma(struct drm_device *dev, void *data, struct drm_file *file_priv);
+
+/* IRQ support (drm_irq.c) */
+int drm_control(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/* AGP/GART support (drm_agpsupport.c) */
+int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_info_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_free_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+ /* Stub support (drm_stub.h) */
+extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/* Scatter Gather Support (drm_scatter.c) */
+int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_sg_free(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/* consistent PCI memory functions (drm_pci.c) */
+drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
+ size_t align, dma_addr_t maxaddr);
+void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
+
+/* Graphics Execution Manager library functions (drm_gem.c) */
+int drm_gem_init(struct drm_device *dev);
+void drm_gem_destroy(struct drm_device *dev);
+
+int drm_gem_close_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_gem_open_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ u32 *handlep);
+int drm_gem_handle_delete(struct drm_file *file_priv, uint32_t handle);
+void drm_gem_object_handle_reference(struct drm_gem_object *obj);
+void drm_gem_object_handle_unreference(struct drm_gem_object *obj);
+void drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj);
+void drm_gem_object_handle_free(struct drm_gem_object *obj);
+void drm_gem_object_reference(struct drm_gem_object *obj);
+void drm_gem_object_unreference(struct drm_gem_object *obj);
+void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj);
+void drm_gem_object_release(struct drm_gem_object *obj);
+void drm_gem_object_free(struct drm_gem_object *obj);
+int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj,
+ size_t size);
+int drm_gem_private_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size);
+struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
+ size_t size);
+struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle);
+
+void drm_gem_open(struct drm_device *dev, struct drm_file *file_priv);
+void drm_gem_release(struct drm_device *dev, struct drm_file *file_priv);
+
+int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
+void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
+int drm_gem_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
+ struct vm_object **obj_res, int nprot);
+void drm_gem_pager_dtr(void *obj);
+
+void drm_device_lock_mtx(struct drm_device *dev);
+void drm_device_unlock_mtx(struct drm_device *dev);
+int drm_device_sleep_mtx(struct drm_device *dev, void *chan, int flags,
+ const char *msg, int timeout);
+void drm_device_assert_mtx_locked(struct drm_device *dev);
+void drm_device_assert_mtx_unlocked(struct drm_device *dev);
+
+void drm_device_lock_struct(struct drm_device *dev);
+void drm_device_unlock_struct(struct drm_device *dev);
+int drm_device_sleep_struct(struct drm_device *dev, void *chan, int flags,
+ const char *msg, int timeout);
+void drm_device_assert_struct_locked(struct drm_device *dev);
+void drm_device_assert_struct_unlocked(struct drm_device *dev);
+
+void drm_compat_locking_init(struct drm_device *dev);
+void drm_sleep_locking_init(struct drm_device *dev);
+
+/* drm_modes.c */
+bool drm_mode_parse_command_line_for_connector(const char *mode_option,
+ struct drm_connector *connector, struct drm_cmdline_mode *mode);
+struct drm_display_mode *drm_mode_create_from_cmdline_mode(
+ struct drm_device *dev, struct drm_cmdline_mode *cmd);
+
+/* drm_edid.c */
+u8 *drm_find_cea_extension(struct edid *edid);
+
+/* Inline replacements for drm_alloc and friends */
+static __inline__ void *
+drm_alloc(size_t size, struct malloc_type *area)
+{
+ return malloc(size, area, M_NOWAIT);
+}
+
+static __inline__ void *
+drm_calloc(size_t nmemb, size_t size, struct malloc_type *area)
+{
+ return malloc(size * nmemb, area, M_NOWAIT | M_ZERO);
+}
+
+static __inline__ void *
+drm_realloc(void *oldpt, size_t oldsize, size_t size,
+ struct malloc_type *area)
+{
+ return reallocf(oldpt, size, area, M_NOWAIT);
+}
+
+static __inline__ void
+drm_free(void *pt, size_t size, struct malloc_type *area)
+{
+ free(pt, area);
+}
+
+/* Inline replacements for DRM_IOREMAP macros */
+static __inline__ void
+drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
+{
+ map->virtual = drm_ioremap_wc(dev, map);
+}
+static __inline__ void
+drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
+{
+ map->virtual = drm_ioremap(dev, map);
+}
+static __inline__ void
+drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
+{
+ if ( map->virtual && map->size )
+ drm_ioremapfree(map);
+}
+
+static __inline__ struct drm_local_map *
+drm_core_findmap(struct drm_device *dev, unsigned long offset)
+{
+ drm_local_map_t *map;
+
+ DRM_LOCK_ASSERT(dev);
+ TAILQ_FOREACH(map, &dev->maplist, link) {
+ if (offset == (unsigned long)map->handle)
+ return map;
+ }
+ return NULL;
+}
+
+static __inline__ void drm_core_dropmap(struct drm_map *map)
+{
+}
+
+#define KIB_NOTYET() \
+do { \
+ if (drm_debug_flag && drm_notyet_flag) \
+ printf("NOTYET: %s at %s:%d\n", __func__, __FILE__, __LINE__); \
+} while (0)
+
+#define KTR_DRM KTR_DEV
+#define KTR_DRM_REG KTR_SPARE3
+
+#endif /* __KERNEL__ */
+#endif /* _DRM_P_H_ */
diff --git a/sys/dev/drm2/drm_agpsupport.c b/sys/dev/drm2/drm_agpsupport.c
new file mode 100644
index 0000000..eafe117
--- /dev/null
+++ b/sys/dev/drm2/drm_agpsupport.c
@@ -0,0 +1,434 @@
+/*-
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/** @file drm_agpsupport.c
+ * Support code for tying the kernel AGP support to DRM drivers and
+ * the DRM's AGP ioctls.
+ */
+
+#include <dev/drm2/drmP.h>
+
+#include <dev/agp/agpreg.h>
+#include <dev/pci/pcireg.h>
+
+/* Returns 1 if AGP or 0 if not. */
+static int
+drm_device_find_capability(struct drm_device *dev, int cap)
+{
+
+ return (pci_find_cap(dev->device, cap, NULL) == 0);
+}
+
+int drm_device_is_agp(struct drm_device *dev)
+{
+ if (dev->driver->device_is_agp != NULL) {
+ int ret;
+
+ /* device_is_agp returns a tristate, 0 = not AGP, 1 = definitely
+ * AGP, 2 = fall back to PCI capability
+ */
+ ret = (*dev->driver->device_is_agp)(dev);
+ if (ret != DRM_MIGHT_BE_AGP)
+ return ret;
+ }
+
+ return (drm_device_find_capability(dev, PCIY_AGP));
+}
+
+int drm_device_is_pcie(struct drm_device *dev)
+{
+ return (drm_device_find_capability(dev, PCIY_EXPRESS));
+}
+
+int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info)
+{
+ struct agp_info *kern;
+
+ if (!dev->agp || !dev->agp->acquired)
+ return EINVAL;
+
+ kern = &dev->agp->info;
+ agp_get_info(dev->agp->agpdev, kern);
+ info->agp_version_major = 1;
+ info->agp_version_minor = 0;
+ info->mode = kern->ai_mode;
+ info->aperture_base = kern->ai_aperture_base;
+ info->aperture_size = kern->ai_aperture_size;
+ info->memory_allowed = kern->ai_memory_allowed;
+ info->memory_used = kern->ai_memory_used;
+ info->id_vendor = kern->ai_devid & 0xffff;
+ info->id_device = kern->ai_devid >> 16;
+
+ return 0;
+}
+
+int drm_agp_info_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ int err;
+ struct drm_agp_info info;
+
+ err = drm_agp_info(dev, &info);
+ if (err != 0)
+ return err;
+
+ *(struct drm_agp_info *) data = info;
+ return 0;
+}
+
+int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+
+ return drm_agp_acquire(dev);
+}
+
+int drm_agp_acquire(struct drm_device *dev)
+{
+ int retcode;
+
+ if (!dev->agp || dev->agp->acquired)
+ return EINVAL;
+
+ retcode = agp_acquire(dev->agp->agpdev);
+ if (retcode)
+ return retcode;
+
+ dev->agp->acquired = 1;
+ return 0;
+}
+
+int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+
+ return drm_agp_release(dev);
+}
+
+int drm_agp_release(struct drm_device * dev)
+{
+ if (!dev->agp || !dev->agp->acquired)
+ return EINVAL;
+ agp_release(dev->agp->agpdev);
+ dev->agp->acquired = 0;
+ return 0;
+}
+
+int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
+{
+
+ if (!dev->agp || !dev->agp->acquired)
+ return EINVAL;
+
+ dev->agp->mode = mode.mode;
+ agp_enable(dev->agp->agpdev, mode.mode);
+ dev->agp->enabled = 1;
+ return 0;
+}
+
+int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_agp_mode mode;
+
+ mode = *(struct drm_agp_mode *) data;
+
+ return drm_agp_enable(dev, mode);
+}
+
+int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
+{
+ drm_agp_mem_t *entry;
+ void *handle;
+ unsigned long pages;
+ u_int32_t type;
+ struct agp_memory_info info;
+
+ if (!dev->agp || !dev->agp->acquired)
+ return EINVAL;
+
+ entry = malloc(sizeof(*entry), DRM_MEM_AGPLISTS, M_NOWAIT | M_ZERO);
+ if (entry == NULL)
+ return ENOMEM;
+
+ pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+ type = (u_int32_t) request->type;
+
+ DRM_UNLOCK(dev);
+ handle = drm_agp_allocate_memory(pages, type);
+ DRM_LOCK(dev);
+ if (handle == NULL) {
+ free(entry, DRM_MEM_AGPLISTS);
+ return ENOMEM;
+ }
+
+ entry->handle = handle;
+ entry->bound = 0;
+ entry->pages = pages;
+ entry->prev = NULL;
+ entry->next = dev->agp->memory;
+ if (dev->agp->memory)
+ dev->agp->memory->prev = entry;
+ dev->agp->memory = entry;
+
+ agp_memory_info(dev->agp->agpdev, entry->handle, &info);
+
+ request->handle = (unsigned long) entry->handle;
+ request->physical = info.ami_physical;
+
+ return 0;
+}
+
+int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_agp_buffer request;
+ int retcode;
+
+ request = *(struct drm_agp_buffer *) data;
+
+ DRM_LOCK(dev);
+ retcode = drm_agp_alloc(dev, &request);
+ DRM_UNLOCK(dev);
+
+ *(struct drm_agp_buffer *) data = request;
+
+ return retcode;
+}
+
+static drm_agp_mem_t * drm_agp_lookup_entry(struct drm_device *dev,
+ void *handle)
+{
+ drm_agp_mem_t *entry;
+
+ for (entry = dev->agp->memory; entry; entry = entry->next) {
+ if (entry->handle == handle) return entry;
+ }
+ return NULL;
+}
+
+int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
+{
+ drm_agp_mem_t *entry;
+ int retcode;
+
+ if (!dev->agp || !dev->agp->acquired)
+ return EINVAL;
+
+ entry = drm_agp_lookup_entry(dev, (void *)request->handle);
+ if (entry == NULL || !entry->bound)
+ return EINVAL;
+
+ DRM_UNLOCK(dev);
+ retcode = drm_agp_unbind_memory(entry->handle);
+ DRM_LOCK(dev);
+
+ if (retcode == 0)
+ entry->bound = 0;
+
+ return retcode;
+}
+
+int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_agp_binding request;
+ int retcode;
+
+ request = *(struct drm_agp_binding *) data;
+
+ DRM_LOCK(dev);
+ retcode = drm_agp_unbind(dev, &request);
+ DRM_UNLOCK(dev);
+
+ return retcode;
+}
+
+int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
+{
+ drm_agp_mem_t *entry;
+ int retcode;
+ int page;
+
+ if (!dev->agp || !dev->agp->acquired)
+ return EINVAL;
+
+ DRM_DEBUG("agp_bind, page_size=%x\n", (int)PAGE_SIZE);
+
+ entry = drm_agp_lookup_entry(dev, (void *)request->handle);
+ if (entry == NULL || entry->bound)
+ return EINVAL;
+
+ page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
+
+ DRM_UNLOCK(dev);
+ retcode = drm_agp_bind_memory(entry->handle, page);
+ DRM_LOCK(dev);
+ if (retcode == 0)
+ entry->bound = dev->agp->base + (page << PAGE_SHIFT);
+
+ return retcode;
+}
+
+int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_agp_binding request;
+ int retcode;
+
+ request = *(struct drm_agp_binding *) data;
+
+ DRM_LOCK(dev);
+ retcode = drm_agp_bind(dev, &request);
+ DRM_UNLOCK(dev);
+
+ return retcode;
+}
+
+int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
+{
+ drm_agp_mem_t *entry;
+
+ if (!dev->agp || !dev->agp->acquired)
+ return EINVAL;
+
+ entry = drm_agp_lookup_entry(dev, (void*)request->handle);
+ if (entry == NULL)
+ return EINVAL;
+
+ if (entry->prev)
+ entry->prev->next = entry->next;
+ else
+ dev->agp->memory = entry->next;
+ if (entry->next)
+ entry->next->prev = entry->prev;
+
+ DRM_UNLOCK(dev);
+ if (entry->bound)
+ drm_agp_unbind_memory(entry->handle);
+ drm_agp_free_memory(entry->handle);
+ DRM_LOCK(dev);
+
+ free(entry, DRM_MEM_AGPLISTS);
+
+ return 0;
+
+}
+
+int drm_agp_free_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_agp_buffer request;
+ int retcode;
+
+ request = *(struct drm_agp_buffer *) data;
+
+ DRM_LOCK(dev);
+ retcode = drm_agp_free(dev, &request);
+ DRM_UNLOCK(dev);
+
+ return retcode;
+}
+
+drm_agp_head_t *drm_agp_init(void)
+{
+ device_t agpdev;
+ drm_agp_head_t *head = NULL;
+ int agp_available = 1;
+
+ agpdev = DRM_AGP_FIND_DEVICE();
+ if (!agpdev)
+ agp_available = 0;
+
+ DRM_DEBUG("agp_available = %d\n", agp_available);
+
+ if (agp_available) {
+ head = malloc(sizeof(*head), DRM_MEM_AGPLISTS,
+ M_NOWAIT | M_ZERO);
+ if (head == NULL)
+ return NULL;
+ head->agpdev = agpdev;
+ agp_get_info(agpdev, &head->info);
+ head->base = head->info.ai_aperture_base;
+ head->memory = NULL;
+ DRM_INFO("AGP at 0x%08lx %dMB\n",
+ (long)head->info.ai_aperture_base,
+ (int)(head->info.ai_aperture_size >> 20));
+ }
+ return head;
+}
+
+void *drm_agp_allocate_memory(size_t pages, u32 type)
+{
+ device_t agpdev;
+
+ agpdev = DRM_AGP_FIND_DEVICE();
+ if (!agpdev)
+ return NULL;
+
+ return agp_alloc_memory(agpdev, type, pages << AGP_PAGE_SHIFT);
+}
+
+int drm_agp_free_memory(void *handle)
+{
+ device_t agpdev;
+
+ agpdev = DRM_AGP_FIND_DEVICE();
+ if (!agpdev || !handle)
+ return 0;
+
+ agp_free_memory(agpdev, handle);
+ return 1;
+}
+
+int drm_agp_bind_memory(void *handle, off_t start)
+{
+ device_t agpdev;
+
+ agpdev = DRM_AGP_FIND_DEVICE();
+ if (!agpdev || !handle)
+ return EINVAL;
+
+ return agp_bind_memory(agpdev, handle, start * PAGE_SIZE);
+}
+
+int drm_agp_unbind_memory(void *handle)
+{
+ device_t agpdev;
+
+ agpdev = DRM_AGP_FIND_DEVICE();
+ if (!agpdev || !handle)
+ return EINVAL;
+
+ return agp_unbind_memory(agpdev, handle);
+}
diff --git a/sys/dev/drm2/drm_atomic.h b/sys/dev/drm2/drm_atomic.h
new file mode 100644
index 0000000..e7dbed9
--- /dev/null
+++ b/sys/dev/drm2/drm_atomic.h
@@ -0,0 +1,93 @@
+/**
+ * \file drm_atomic.h
+ * Atomic operations used in the DRM which may or may not be provided by the OS.
+ *
+ * \author Eric Anholt <anholt@FreeBSD.org>
+ */
+
+/*-
+ * Copyright 2004 Eric Anholt
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/* Many of these implementations are rather fake, but good enough. */
+
+typedef u_int32_t atomic_t;
+
+#define atomic_set(p, v) (*(p) = (v))
+#define atomic_read(p) (*(p))
+#define atomic_inc(p) atomic_add_int(p, 1)
+#define atomic_dec(p) atomic_subtract_int(p, 1)
+#define atomic_add(n, p) atomic_add_int(p, n)
+#define atomic_sub(n, p) atomic_subtract_int(p, n)
+
+static __inline atomic_t
+test_and_set_bit(int b, volatile void *p)
+{
+ int s = splhigh();
+ unsigned int m = 1<<b;
+ unsigned int r = *(volatile int *)p & m;
+ *(volatile int *)p |= m;
+ splx(s);
+ return r;
+}
+
+static __inline void
+clear_bit(int b, volatile void *p)
+{
+ atomic_clear_int(((volatile int *)p) + (b >> 5), 1 << (b & 0x1f));
+}
+
+static __inline void
+set_bit(int b, volatile void *p)
+{
+ atomic_set_int(((volatile int *)p) + (b >> 5), 1 << (b & 0x1f));
+}
+
+static __inline int
+test_bit(int b, volatile void *p)
+{
+ return ((volatile int *)p)[b >> 5] & (1 << (b & 0x1f));
+}
+
+static __inline int
+find_first_zero_bit(volatile void *p, int max)
+{
+ int b;
+ volatile int *ptr = (volatile int *)p;
+
+ for (b = 0; b < max; b += 32) {
+ if (ptr[b >> 5] != ~0) {
+ for (;;) {
+ if ((ptr[b >> 5] & (1 << (b & 0x1f))) == 0)
+ return b;
+ b++;
+ }
+ }
+ }
+ return max;
+}
+
+#define BITS_TO_LONGS(x) (howmany((x), NBBY * sizeof(long)))
diff --git a/sys/dev/drm2/drm_auth.c b/sys/dev/drm2/drm_auth.c
new file mode 100644
index 0000000..69acff9
--- /dev/null
+++ b/sys/dev/drm2/drm_auth.c
@@ -0,0 +1,190 @@
+/*-
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/** @file drm_auth.c
+ * Implementation of the get/authmagic ioctls implementing the authentication
+ * scheme between the master and clients.
+ */
+
+#include <dev/drm2/drmP.h>
+
+static int drm_hash_magic(drm_magic_t magic)
+{
+ return magic & (DRM_HASH_SIZE-1);
+}
+
+/**
+ * Returns the file private associated with the given magic number.
+ */
+static struct drm_file *drm_find_file(struct drm_device *dev, drm_magic_t magic)
+{
+ drm_magic_entry_t *pt;
+ int hash = drm_hash_magic(magic);
+
+ DRM_LOCK_ASSERT(dev);
+
+ for (pt = dev->magiclist[hash].head; pt; pt = pt->next) {
+ if (pt->magic == magic) {
+ return pt->priv;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * Inserts the given magic number into the hash table of used magic number
+ * lists.
+ */
+static int drm_add_magic(struct drm_device *dev, struct drm_file *priv,
+ drm_magic_t magic)
+{
+ int hash;
+ drm_magic_entry_t *entry;
+
+ DRM_DEBUG("%d\n", magic);
+
+ DRM_LOCK_ASSERT(dev);
+
+ hash = drm_hash_magic(magic);
+ entry = malloc(sizeof(*entry), DRM_MEM_MAGIC, M_ZERO | M_NOWAIT);
+ if (!entry)
+ return ENOMEM;
+ entry->magic = magic;
+ entry->priv = priv;
+ entry->next = NULL;
+
+ if (dev->magiclist[hash].tail) {
+ dev->magiclist[hash].tail->next = entry;
+ dev->magiclist[hash].tail = entry;
+ } else {
+ dev->magiclist[hash].head = entry;
+ dev->magiclist[hash].tail = entry;
+ }
+
+ return 0;
+}
+
+/**
+ * Removes the given magic number from the hash table of used magic number
+ * lists.
+ */
+static int drm_remove_magic(struct drm_device *dev, drm_magic_t magic)
+{
+ drm_magic_entry_t *prev = NULL;
+ drm_magic_entry_t *pt;
+ int hash;
+
+ DRM_LOCK_ASSERT(dev);
+
+ DRM_DEBUG("%d\n", magic);
+ hash = drm_hash_magic(magic);
+
+ for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) {
+ if (pt->magic == magic) {
+ if (dev->magiclist[hash].head == pt) {
+ dev->magiclist[hash].head = pt->next;
+ }
+ if (dev->magiclist[hash].tail == pt) {
+ dev->magiclist[hash].tail = prev;
+ }
+ if (prev) {
+ prev->next = pt->next;
+ }
+ free(pt, DRM_MEM_MAGIC);
+ return 0;
+ }
+ }
+
+ return EINVAL;
+}
+
+/**
+ * Called by the client, this returns a unique magic number to be authorized
+ * by the master.
+ *
+ * The master may use its own knowledge of the client (such as the X
+ * connection that the magic is passed over) to determine if the magic number
+ * should be authenticated.
+ */
+int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ static drm_magic_t sequence = 0;
+ struct drm_auth *auth = data;
+
+ /* Find unique magic */
+ if (file_priv->magic) {
+ auth->magic = file_priv->magic;
+ } else {
+ DRM_LOCK(dev);
+ do {
+ int old = sequence;
+
+ auth->magic = old+1;
+
+ if (!atomic_cmpset_int(&sequence, old, auth->magic))
+ continue;
+ } while (drm_find_file(dev, auth->magic));
+ file_priv->magic = auth->magic;
+ drm_add_magic(dev, file_priv, auth->magic);
+ DRM_UNLOCK(dev);
+ }
+
+ DRM_DEBUG("%u\n", auth->magic);
+
+ return 0;
+}
+
+/**
+ * Marks the client associated with the given magic number as authenticated.
+ */
+int drm_authmagic(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_auth *auth = data;
+ struct drm_file *priv;
+
+ DRM_DEBUG("%u\n", auth->magic);
+
+ DRM_LOCK(dev);
+ priv = drm_find_file(dev, auth->magic);
+ if (priv != NULL) {
+ priv->authenticated = 1;
+ drm_remove_magic(dev, auth->magic);
+ DRM_UNLOCK(dev);
+ return 0;
+ } else {
+ DRM_UNLOCK(dev);
+ return EINVAL;
+ }
+}
diff --git a/sys/dev/drm2/drm_bufs.c b/sys/dev/drm2/drm_bufs.c
new file mode 100644
index 0000000..410c88f
--- /dev/null
+++ b/sys/dev/drm2/drm_bufs.c
@@ -0,0 +1,1130 @@
+/*-
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/** @file drm_bufs.c
+ * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
+ */
+
+#include <dev/pci/pcireg.h>
+
+#include <dev/drm2/drmP.h>
+
+/* Allocation of PCI memory resources (framebuffer, registers, etc.) for
+ * drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual
+ * address for accessing them. Cleaned up at unload.
+ */
+static int drm_alloc_resource(struct drm_device *dev, int resource)
+{
+ struct resource *res;
+ int rid;
+
+ DRM_LOCK_ASSERT(dev);
+
+ if (resource >= DRM_MAX_PCI_RESOURCE) {
+ DRM_ERROR("Resource %d too large\n", resource);
+ return 1;
+ }
+
+ if (dev->pcir[resource] != NULL) {
+ return 0;
+ }
+
+ DRM_UNLOCK(dev);
+ rid = PCIR_BAR(resource);
+ res = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &rid,
+ RF_SHAREABLE);
+ DRM_LOCK(dev);
+ if (res == NULL) {
+ DRM_ERROR("Couldn't find resource 0x%x\n", resource);
+ return 1;
+ }
+
+ if (dev->pcir[resource] == NULL) {
+ dev->pcirid[resource] = rid;
+ dev->pcir[resource] = res;
+ }
+
+ return 0;
+}
+
+unsigned long drm_get_resource_start(struct drm_device *dev,
+ unsigned int resource)
+{
+ if (drm_alloc_resource(dev, resource) != 0)
+ return 0;
+
+ return rman_get_start(dev->pcir[resource]);
+}
+
+unsigned long drm_get_resource_len(struct drm_device *dev,
+ unsigned int resource)
+{
+ if (drm_alloc_resource(dev, resource) != 0)
+ return 0;
+
+ return rman_get_size(dev->pcir[resource]);
+}
+
+int drm_addmap(struct drm_device * dev, unsigned long offset,
+ unsigned long size,
+ enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr)
+{
+ drm_local_map_t *map;
+ int align;
+ /*drm_agp_mem_t *entry;
+ int valid;*/
+
+ /* Only allow shared memory to be removable since we only keep enough
+ * book keeping information about shared memory to allow for removal
+ * when processes fork.
+ */
+ if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
+ DRM_ERROR("Requested removable map for non-DRM_SHM\n");
+ return EINVAL;
+ }
+ if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
+ DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
+ offset, size);
+ return EINVAL;
+ }
+ if (offset + size < offset) {
+ DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
+ offset, size);
+ return EINVAL;
+ }
+
+ DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
+ size, type);
+
+ /* Check if this is just another version of a kernel-allocated map, and
+ * just hand that back if so.
+ */
+ if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
+ type == _DRM_SHM) {
+ TAILQ_FOREACH(map, &dev->maplist, link) {
+ if (map->type == type && (map->offset == offset ||
+ (map->type == _DRM_SHM &&
+ map->flags == _DRM_CONTAINS_LOCK))) {
+ map->size = size;
+ DRM_DEBUG("Found kernel map %d\n", type);
+ goto done;
+ }
+ }
+ }
+ DRM_UNLOCK(dev);
+
+ /* Allocate a new map structure, fill it in, and do any type-specific
+ * initialization necessary.
+ */
+ map = malloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
+ if (!map) {
+ DRM_LOCK(dev);
+ return ENOMEM;
+ }
+
+ map->offset = offset;
+ map->size = size;
+ map->type = type;
+ map->flags = flags;
+ map->handle = (void *)((unsigned long)alloc_unr(dev->map_unrhdr) <<
+ DRM_MAP_HANDLE_SHIFT);
+
+ switch (map->type) {
+ case _DRM_REGISTERS:
+ map->virtual = drm_ioremap(dev, map);
+ if (!(map->flags & _DRM_WRITE_COMBINING))
+ break;
+ /* FALLTHROUGH */
+ case _DRM_FRAME_BUFFER:
+ if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
+ map->mtrr = 1;
+ break;
+ case _DRM_SHM:
+ map->virtual = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
+ DRM_DEBUG("%lu %d %p\n",
+ map->size, drm_order(map->size), map->virtual);
+ if (!map->virtual) {
+ free(map, DRM_MEM_MAPS);
+ DRM_LOCK(dev);
+ return ENOMEM;
+ }
+ map->offset = (unsigned long)map->virtual;
+ if (map->flags & _DRM_CONTAINS_LOCK) {
+ /* Prevent a 2nd X Server from creating a 2nd lock */
+ DRM_LOCK(dev);
+ if (dev->lock.hw_lock != NULL) {
+ DRM_UNLOCK(dev);
+ free(map->virtual, DRM_MEM_MAPS);
+ free(map, DRM_MEM_MAPS);
+ return EBUSY;
+ }
+ dev->lock.hw_lock = map->virtual; /* Pointer to lock */
+ DRM_UNLOCK(dev);
+ }
+ break;
+ case _DRM_AGP:
+ /*valid = 0;*/
+ /* In some cases (i810 driver), user space may have already
+ * added the AGP base itself, because dev->agp->base previously
+ * only got set during AGP enable. So, only add the base
+ * address if the map's offset isn't already within the
+ * aperture.
+ */
+ if (map->offset < dev->agp->base ||
+ map->offset > dev->agp->base +
+ dev->agp->info.ai_aperture_size - 1) {
+ map->offset += dev->agp->base;
+ }
+ map->mtrr = dev->agp->mtrr; /* for getmap */
+ /*for (entry = dev->agp->memory; entry; entry = entry->next) {
+ if ((map->offset >= entry->bound) &&
+ (map->offset + map->size <=
+ entry->bound + entry->pages * PAGE_SIZE)) {
+ valid = 1;
+ break;
+ }
+ }
+ if (!valid) {
+ free(map, DRM_MEM_MAPS);
+ DRM_LOCK(dev);
+ return EACCES;
+ }*/
+ break;
+ case _DRM_SCATTER_GATHER:
+ if (!dev->sg) {
+ free(map, DRM_MEM_MAPS);
+ DRM_LOCK(dev);
+ return EINVAL;
+ }
+ map->virtual = (void *)(dev->sg->vaddr + offset);
+ map->offset = dev->sg->vaddr + offset;
+ break;
+ case _DRM_CONSISTENT:
+ /* Unfortunately, we don't get any alignment specification from
+ * the caller, so we have to guess. drm_pci_alloc requires
+ * a power-of-two alignment, so try to align the bus address of
+ * the map to it size if possible, otherwise just assume
+ * PAGE_SIZE alignment.
+ */
+ align = map->size;
+ if ((align & (align - 1)) != 0)
+ align = PAGE_SIZE;
+ map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
+ if (map->dmah == NULL) {
+ free(map, DRM_MEM_MAPS);
+ DRM_LOCK(dev);
+ return ENOMEM;
+ }
+ map->virtual = map->dmah->vaddr;
+ map->offset = map->dmah->busaddr;
+ break;
+ default:
+ DRM_ERROR("Bad map type %d\n", map->type);
+ free(map, DRM_MEM_MAPS);
+ DRM_LOCK(dev);
+ return EINVAL;
+ }
+
+ DRM_LOCK(dev);
+ TAILQ_INSERT_TAIL(&dev->maplist, map, link);
+
+done:
+ /* Jumped to, with lock held, when a kernel map is found. */
+
+ DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
+ map->size);
+
+ *map_ptr = map;
+
+ return 0;
+}
+
+int drm_addmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_map *request = data;
+ drm_local_map_t *map;
+ int err;
+
+ if (!(dev->flags & (FREAD|FWRITE)))
+ return EACCES; /* Require read/write */
+
+ if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
+ return EACCES;
+
+ DRM_LOCK(dev);
+ err = drm_addmap(dev, request->offset, request->size, request->type,
+ request->flags, &map);
+ DRM_UNLOCK(dev);
+ if (err != 0)
+ return err;
+
+ request->offset = map->offset;
+ request->size = map->size;
+ request->type = map->type;
+ request->flags = map->flags;
+ request->mtrr = map->mtrr;
+ request->handle = (void *)map->handle;
+
+ return 0;
+}
+
+void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
+{
+ DRM_LOCK_ASSERT(dev);
+
+ if (map == NULL)
+ return;
+
+ TAILQ_REMOVE(&dev->maplist, map, link);
+
+ switch (map->type) {
+ case _DRM_REGISTERS:
+ if (map->bsr == NULL)
+ drm_ioremapfree(map);
+ /* FALLTHROUGH */
+ case _DRM_FRAME_BUFFER:
+ if (map->mtrr) {
+ int __unused retcode;
+
+ retcode = drm_mtrr_del(0, map->offset, map->size,
+ DRM_MTRR_WC);
+ DRM_DEBUG("mtrr_del = %d\n", retcode);
+ }
+ break;
+ case _DRM_SHM:
+ free(map->virtual, DRM_MEM_MAPS);
+ break;
+ case _DRM_AGP:
+ case _DRM_SCATTER_GATHER:
+ break;
+ case _DRM_CONSISTENT:
+ drm_pci_free(dev, map->dmah);
+ break;
+ default:
+ DRM_ERROR("Bad map type %d\n", map->type);
+ break;
+ }
+
+ if (map->bsr != NULL) {
+ bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
+ map->bsr);
+ }
+
+ DRM_UNLOCK(dev);
+ if (map->handle)
+ free_unr(dev->map_unrhdr, (unsigned long)map->handle >>
+ DRM_MAP_HANDLE_SHIFT);
+ DRM_LOCK(dev);
+
+ free(map, DRM_MEM_MAPS);
+}
+
+/* Remove a map private from list and deallocate resources if the mapping
+ * isn't in use.
+ */
+
+int drm_rmmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_local_map_t *map;
+ struct drm_map *request = data;
+
+ DRM_LOCK(dev);
+ TAILQ_FOREACH(map, &dev->maplist, link) {
+ if (map->handle == request->handle &&
+ map->flags & _DRM_REMOVABLE)
+ break;
+ }
+
+ /* No match found. */
+ if (map == NULL) {
+ DRM_UNLOCK(dev);
+ return EINVAL;
+ }
+
+ drm_rmmap(dev, map);
+
+ DRM_UNLOCK(dev);
+
+ return 0;
+}
+
+
+static void drm_cleanup_buf_error(struct drm_device *dev,
+ drm_buf_entry_t *entry)
+{
+ int i;
+
+ if (entry->seg_count) {
+ for (i = 0; i < entry->seg_count; i++) {
+ drm_pci_free(dev, entry->seglist[i]);
+ }
+ free(entry->seglist, DRM_MEM_SEGS);
+
+ entry->seg_count = 0;
+ }
+
+ if (entry->buf_count) {
+ for (i = 0; i < entry->buf_count; i++) {
+ free(entry->buflist[i].dev_private, DRM_MEM_BUFS);
+ }
+ free(entry->buflist, DRM_MEM_BUFS);
+
+ entry->buf_count = 0;
+ }
+}
+
+static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
+{
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_entry_t *entry;
+ /*drm_agp_mem_t *agp_entry;
+ int valid*/
+ drm_buf_t *buf;
+ unsigned long offset;
+ unsigned long agp_offset;
+ int count;
+ int order;
+ int size;
+ int alignment;
+ int page_order;
+ int total;
+ int byte_count;
+ int i;
+ drm_buf_t **temp_buflist;
+
+ count = request->count;
+ order = drm_order(request->size);
+ size = 1 << order;
+
+ alignment = (request->flags & _DRM_PAGE_ALIGN)
+ ? round_page(size) : size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+
+ byte_count = 0;
+ agp_offset = dev->agp->base + request->agp_start;
+
+ DRM_DEBUG("count: %d\n", count);
+ DRM_DEBUG("order: %d\n", order);
+ DRM_DEBUG("size: %d\n", size);
+ DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
+ DRM_DEBUG("alignment: %d\n", alignment);
+ DRM_DEBUG("page_order: %d\n", page_order);
+ DRM_DEBUG("total: %d\n", total);
+
+ /* Make sure buffers are located in AGP memory that we own */
+ /* Breaks MGA due to drm_alloc_agp not setting up entries for the
+ * memory. Safe to ignore for now because these ioctls are still
+ * root-only.
+ */
+ /*valid = 0;
+ for (agp_entry = dev->agp->memory; agp_entry;
+ agp_entry = agp_entry->next) {
+ if ((agp_offset >= agp_entry->bound) &&
+ (agp_offset + total * count <=
+ agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
+ valid = 1;
+ break;
+ }
+ }
+ if (!valid) {
+ DRM_DEBUG("zone invalid\n");
+ return EINVAL;
+ }*/
+
+ entry = &dma->bufs[order];
+
+ entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
+ M_NOWAIT | M_ZERO);
+ if (!entry->buflist) {
+ return ENOMEM;
+ }
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+
+ offset = 0;
+
+ while (entry->buf_count < count) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+
+ buf->offset = (dma->byte_count + offset);
+ buf->bus_address = agp_offset + offset;
+ buf->address = (void *)(agp_offset + offset);
+ buf->next = NULL;
+ buf->pending = 0;
+ buf->file_priv = NULL;
+
+ buf->dev_priv_size = dev->driver->buf_priv_size;
+ buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
+ M_NOWAIT | M_ZERO);
+ if (buf->dev_private == NULL) {
+ /* Set count correctly so we free the proper amount. */
+ entry->buf_count = count;
+ drm_cleanup_buf_error(dev, entry);
+ return ENOMEM;
+ }
+
+ offset += alignment;
+ entry->buf_count++;
+ byte_count += PAGE_SIZE << page_order;
+ }
+
+ DRM_DEBUG("byte_count: %d\n", byte_count);
+
+ temp_buflist = realloc(dma->buflist,
+ (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
+ DRM_MEM_BUFS, M_NOWAIT);
+ if (temp_buflist == NULL) {
+ /* Free the entry because it isn't valid */
+ drm_cleanup_buf_error(dev, entry);
+ return ENOMEM;
+ }
+ dma->buflist = temp_buflist;
+
+ for (i = 0; i < entry->buf_count; i++) {
+ dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+ }
+
+ dma->buf_count += entry->buf_count;
+ dma->byte_count += byte_count;
+
+ DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+ DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+ request->count = entry->buf_count;
+ request->size = size;
+
+ dma->flags = _DRM_DMA_USE_AGP;
+
+ return 0;
+}
+
+static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int count;
+ int order;
+ int size;
+ int total;
+ int page_order;
+ drm_buf_entry_t *entry;
+ drm_buf_t *buf;
+ int alignment;
+ unsigned long offset;
+ int i;
+ int byte_count;
+ int page_count;
+ unsigned long *temp_pagelist;
+ drm_buf_t **temp_buflist;
+
+ count = request->count;
+ order = drm_order(request->size);
+ size = 1 << order;
+
+ DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
+ request->count, request->size, size, order);
+
+ alignment = (request->flags & _DRM_PAGE_ALIGN)
+ ? round_page(size) : size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+
+ entry = &dma->bufs[order];
+
+ entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
+ M_NOWAIT | M_ZERO);
+ entry->seglist = malloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS,
+ M_NOWAIT | M_ZERO);
+
+ /* Keep the original pagelist until we know all the allocations
+ * have succeeded
+ */
+ temp_pagelist = malloc((dma->page_count + (count << page_order)) *
+ sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT);
+
+ if (entry->buflist == NULL || entry->seglist == NULL ||
+ temp_pagelist == NULL) {
+ free(temp_pagelist, DRM_MEM_PAGES);
+ free(entry->seglist, DRM_MEM_SEGS);
+ free(entry->buflist, DRM_MEM_BUFS);
+ return ENOMEM;
+ }
+
+ memcpy(temp_pagelist, dma->pagelist, dma->page_count *
+ sizeof(*dma->pagelist));
+
+ DRM_DEBUG("pagelist: %d entries\n",
+ dma->page_count + (count << page_order));
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+ byte_count = 0;
+ page_count = 0;
+
+ while (entry->buf_count < count) {
+ DRM_SPINUNLOCK(&dev->dma_lock);
+ drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
+ 0xfffffffful);
+ DRM_SPINLOCK(&dev->dma_lock);
+ if (dmah == NULL) {
+ /* Set count correctly so we free the proper amount. */
+ entry->buf_count = count;
+ entry->seg_count = count;
+ drm_cleanup_buf_error(dev, entry);
+ free(temp_pagelist, DRM_MEM_PAGES);
+ return ENOMEM;
+ }
+
+ entry->seglist[entry->seg_count++] = dmah;
+ for (i = 0; i < (1 << page_order); i++) {
+ DRM_DEBUG("page %d @ %p\n",
+ dma->page_count + page_count,
+ (char *)dmah->vaddr + PAGE_SIZE * i);
+ temp_pagelist[dma->page_count + page_count++] =
+ (long)dmah->vaddr + PAGE_SIZE * i;
+ }
+ for (offset = 0;
+ offset + size <= total && entry->buf_count < count;
+ offset += alignment, ++entry->buf_count) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+ buf->offset = (dma->byte_count + byte_count + offset);
+ buf->address = ((char *)dmah->vaddr + offset);
+ buf->bus_address = dmah->busaddr + offset;
+ buf->next = NULL;
+ buf->pending = 0;
+ buf->file_priv = NULL;
+
+ buf->dev_priv_size = dev->driver->buf_priv_size;
+ buf->dev_private = malloc(buf->dev_priv_size,
+ DRM_MEM_BUFS, M_NOWAIT | M_ZERO);
+ if (buf->dev_private == NULL) {
+ /* Set count correctly so we free the proper amount. */
+ entry->buf_count = count;
+ entry->seg_count = count;
+ drm_cleanup_buf_error(dev, entry);
+ free(temp_pagelist, DRM_MEM_PAGES);
+ return ENOMEM;
+ }
+
+ DRM_DEBUG("buffer %d @ %p\n",
+ entry->buf_count, buf->address);
+ }
+ byte_count += PAGE_SIZE << page_order;
+ }
+
+ temp_buflist = realloc(dma->buflist,
+ (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
+ DRM_MEM_BUFS, M_NOWAIT);
+ if (temp_buflist == NULL) {
+ /* Free the entry because it isn't valid */
+ drm_cleanup_buf_error(dev, entry);
+ free(temp_pagelist, DRM_MEM_PAGES);
+ return ENOMEM;
+ }
+ dma->buflist = temp_buflist;
+
+ for (i = 0; i < entry->buf_count; i++) {
+ dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+ }
+
+ /* No allocations failed, so now we can replace the orginal pagelist
+ * with the new one.
+ */
+ free(dma->pagelist, DRM_MEM_PAGES);
+ dma->pagelist = temp_pagelist;
+
+ dma->buf_count += entry->buf_count;
+ dma->seg_count += entry->seg_count;
+ dma->page_count += entry->seg_count << page_order;
+ dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
+
+ request->count = entry->buf_count;
+ request->size = size;
+
+ return 0;
+
+}
+
+static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
+{
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_entry_t *entry;
+ drm_buf_t *buf;
+ unsigned long offset;
+ unsigned long agp_offset;
+ int count;
+ int order;
+ int size;
+ int alignment;
+ int page_order;
+ int total;
+ int byte_count;
+ int i;
+ drm_buf_t **temp_buflist;
+
+ count = request->count;
+ order = drm_order(request->size);
+ size = 1 << order;
+
+ alignment = (request->flags & _DRM_PAGE_ALIGN)
+ ? round_page(size) : size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+
+ byte_count = 0;
+ agp_offset = request->agp_start;
+
+ DRM_DEBUG("count: %d\n", count);
+ DRM_DEBUG("order: %d\n", order);
+ DRM_DEBUG("size: %d\n", size);
+ DRM_DEBUG("agp_offset: %ld\n", agp_offset);
+ DRM_DEBUG("alignment: %d\n", alignment);
+ DRM_DEBUG("page_order: %d\n", page_order);
+ DRM_DEBUG("total: %d\n", total);
+
+ entry = &dma->bufs[order];
+
+ entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
+ M_NOWAIT | M_ZERO);
+ if (entry->buflist == NULL)
+ return ENOMEM;
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+
+ offset = 0;
+
+ while (entry->buf_count < count) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+
+ buf->offset = (dma->byte_count + offset);
+ buf->bus_address = agp_offset + offset;
+ buf->address = (void *)(agp_offset + offset + dev->sg->vaddr);
+ buf->next = NULL;
+ buf->pending = 0;
+ buf->file_priv = NULL;
+
+ buf->dev_priv_size = dev->driver->buf_priv_size;
+ buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
+ M_NOWAIT | M_ZERO);
+ if (buf->dev_private == NULL) {
+ /* Set count correctly so we free the proper amount. */
+ entry->buf_count = count;
+ drm_cleanup_buf_error(dev, entry);
+ return ENOMEM;
+ }
+
+ DRM_DEBUG("buffer %d @ %p\n",
+ entry->buf_count, buf->address);
+
+ offset += alignment;
+ entry->buf_count++;
+ byte_count += PAGE_SIZE << page_order;
+ }
+
+ DRM_DEBUG("byte_count: %d\n", byte_count);
+
+ temp_buflist = realloc(dma->buflist,
+ (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
+ DRM_MEM_BUFS, M_NOWAIT);
+ if (temp_buflist == NULL) {
+ /* Free the entry because it isn't valid */
+ drm_cleanup_buf_error(dev, entry);
+ return ENOMEM;
+ }
+ dma->buflist = temp_buflist;
+
+ for (i = 0; i < entry->buf_count; i++) {
+ dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+ }
+
+ dma->buf_count += entry->buf_count;
+ dma->byte_count += byte_count;
+
+ DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+ DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+ request->count = entry->buf_count;
+ request->size = size;
+
+ dma->flags = _DRM_DMA_USE_SG;
+
+ return 0;
+}
+
+int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
+{
+ int order, ret;
+
+ if (request->count < 0 || request->count > 4096)
+ return EINVAL;
+
+ order = drm_order(request->size);
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+ return EINVAL;
+
+ DRM_SPINLOCK(&dev->dma_lock);
+
+ /* No more allocations after first buffer-using ioctl. */
+ if (dev->buf_use != 0) {
+ DRM_SPINUNLOCK(&dev->dma_lock);
+ return EBUSY;
+ }
+ /* No more than one allocation per order */
+ if (dev->dma->bufs[order].buf_count != 0) {
+ DRM_SPINUNLOCK(&dev->dma_lock);
+ return ENOMEM;
+ }
+
+ ret = drm_do_addbufs_agp(dev, request);
+
+ DRM_SPINUNLOCK(&dev->dma_lock);
+
+ return ret;
+}
+
+int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
+{
+ int order, ret;
+
+ if (!DRM_SUSER(DRM_CURPROC))
+ return EACCES;
+
+ if (request->count < 0 || request->count > 4096)
+ return EINVAL;
+
+ order = drm_order(request->size);
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+ return EINVAL;
+
+ DRM_SPINLOCK(&dev->dma_lock);
+
+ /* No more allocations after first buffer-using ioctl. */
+ if (dev->buf_use != 0) {
+ DRM_SPINUNLOCK(&dev->dma_lock);
+ return EBUSY;
+ }
+ /* No more than one allocation per order */
+ if (dev->dma->bufs[order].buf_count != 0) {
+ DRM_SPINUNLOCK(&dev->dma_lock);
+ return ENOMEM;
+ }
+
+ ret = drm_do_addbufs_sg(dev, request);
+
+ DRM_SPINUNLOCK(&dev->dma_lock);
+
+ return ret;
+}
+
+int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
+{
+ int order, ret;
+
+ if (!DRM_SUSER(DRM_CURPROC))
+ return EACCES;
+
+ if (request->count < 0 || request->count > 4096)
+ return EINVAL;
+
+ order = drm_order(request->size);
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+ return EINVAL;
+
+ DRM_SPINLOCK(&dev->dma_lock);
+
+ /* No more allocations after first buffer-using ioctl. */
+ if (dev->buf_use != 0) {
+ DRM_SPINUNLOCK(&dev->dma_lock);
+ return EBUSY;
+ }
+ /* No more than one allocation per order */
+ if (dev->dma->bufs[order].buf_count != 0) {
+ DRM_SPINUNLOCK(&dev->dma_lock);
+ return ENOMEM;
+ }
+
+ ret = drm_do_addbufs_pci(dev, request);
+
+ DRM_SPINUNLOCK(&dev->dma_lock);
+
+ return ret;
+}
+
+int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_buf_desc *request = data;
+ int err;
+
+ if (request->flags & _DRM_AGP_BUFFER)
+ err = drm_addbufs_agp(dev, request);
+ else if (request->flags & _DRM_SG_BUFFER)
+ err = drm_addbufs_sg(dev, request);
+ else
+ err = drm_addbufs_pci(dev, request);
+
+ return err;
+}
+
+int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_device_dma_t *dma = dev->dma;
+ struct drm_buf_info *request = data;
+ int i;
+ int count;
+ int retcode = 0;
+
+ DRM_SPINLOCK(&dev->dma_lock);
+ ++dev->buf_use; /* Can't allocate more after this call */
+ DRM_SPINUNLOCK(&dev->dma_lock);
+
+ for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
+ if (dma->bufs[i].buf_count)
+ ++count;
+ }
+
+ DRM_DEBUG("count = %d\n", count);
+
+ if (request->count >= count) {
+ for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
+ if (dma->bufs[i].buf_count) {
+ struct drm_buf_desc from;
+
+ from.count = dma->bufs[i].buf_count;
+ from.size = dma->bufs[i].buf_size;
+ from.low_mark = dma->bufs[i].freelist.low_mark;
+ from.high_mark = dma->bufs[i].freelist.high_mark;
+
+ if (DRM_COPY_TO_USER(&request->list[count], &from,
+ sizeof(struct drm_buf_desc)) != 0) {
+ retcode = EFAULT;
+ break;
+ }
+
+ DRM_DEBUG("%d %d %d %d %d\n",
+ i, dma->bufs[i].buf_count,
+ dma->bufs[i].buf_size,
+ dma->bufs[i].freelist.low_mark,
+ dma->bufs[i].freelist.high_mark);
+ ++count;
+ }
+ }
+ }
+ request->count = count;
+
+ return retcode;
+}
+
+int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_device_dma_t *dma = dev->dma;
+ struct drm_buf_desc *request = data;
+ int order;
+
+ DRM_DEBUG("%d, %d, %d\n",
+ request->size, request->low_mark, request->high_mark);
+
+
+ order = drm_order(request->size);
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
+ request->low_mark < 0 || request->high_mark < 0) {
+ return EINVAL;
+ }
+
+ DRM_SPINLOCK(&dev->dma_lock);
+ if (request->low_mark > dma->bufs[order].buf_count ||
+ request->high_mark > dma->bufs[order].buf_count) {
+ DRM_SPINUNLOCK(&dev->dma_lock);
+ return EINVAL;
+ }
+
+ dma->bufs[order].freelist.low_mark = request->low_mark;
+ dma->bufs[order].freelist.high_mark = request->high_mark;
+ DRM_SPINUNLOCK(&dev->dma_lock);
+
+ return 0;
+}
+
+int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_device_dma_t *dma = dev->dma;
+ struct drm_buf_free *request = data;
+ int i;
+ int idx;
+ drm_buf_t *buf;
+ int retcode = 0;
+
+ DRM_DEBUG("%d\n", request->count);
+
+ DRM_SPINLOCK(&dev->dma_lock);
+ for (i = 0; i < request->count; i++) {
+ if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
+ retcode = EFAULT;
+ break;
+ }
+ if (idx < 0 || idx >= dma->buf_count) {
+ DRM_ERROR("Index %d (of %d max)\n",
+ idx, dma->buf_count - 1);
+ retcode = EINVAL;
+ break;
+ }
+ buf = dma->buflist[idx];
+ if (buf->file_priv != file_priv) {
+ DRM_ERROR("Process %d freeing buffer not owned\n",
+ DRM_CURRENTPID);
+ retcode = EINVAL;
+ break;
+ }
+ drm_free_buffer(dev, buf);
+ }
+ DRM_SPINUNLOCK(&dev->dma_lock);
+
+ return retcode;
+}
+
+int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int retcode = 0;
+ const int zero = 0;
+ vm_offset_t address;
+ struct vmspace *vms;
+ vm_ooffset_t foff;
+ vm_size_t size;
+ vm_offset_t vaddr;
+ struct drm_buf_map *request = data;
+ int i;
+
+ vms = DRM_CURPROC->td_proc->p_vmspace;
+
+ DRM_SPINLOCK(&dev->dma_lock);
+ dev->buf_use++; /* Can't allocate more after this call */
+ DRM_SPINUNLOCK(&dev->dma_lock);
+
+ if (request->count < dma->buf_count)
+ goto done;
+
+ if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
+ (drm_core_check_feature(dev, DRIVER_SG) &&
+ (dma->flags & _DRM_DMA_USE_SG))) {
+ drm_local_map_t *map = dev->agp_buffer_map;
+
+ if (map == NULL) {
+ retcode = EINVAL;
+ goto done;
+ }
+ size = round_page(map->size);
+ foff = (unsigned long)map->handle;
+ } else {
+ size = round_page(dma->byte_count),
+ foff = 0;
+ }
+
+ vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
+#if __FreeBSD_version >= 600023
+ retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
+ VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE,
+ dev->devnode, foff);
+#else
+ retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
+ VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
+ SLIST_FIRST(&dev->devnode->si_hlist), foff);
+#endif
+ if (retcode)
+ goto done;
+
+ request->virtual = (void *)vaddr;
+
+ for (i = 0; i < dma->buf_count; i++) {
+ if (DRM_COPY_TO_USER(&request->list[i].idx,
+ &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
+ retcode = EFAULT;
+ goto done;
+ }
+ if (DRM_COPY_TO_USER(&request->list[i].total,
+ &dma->buflist[i]->total, sizeof(request->list[0].total))) {
+ retcode = EFAULT;
+ goto done;
+ }
+ if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
+ sizeof(zero))) {
+ retcode = EFAULT;
+ goto done;
+ }
+ address = vaddr + dma->buflist[i]->offset; /* *** */
+ if (DRM_COPY_TO_USER(&request->list[i].address, &address,
+ sizeof(address))) {
+ retcode = EFAULT;
+ goto done;
+ }
+ }
+
+ done:
+ request->count = dma->buf_count;
+
+ DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
+
+ return retcode;
+}
+
+/*
+ * Compute order. Can be made faster.
+ */
+int drm_order(unsigned long size)
+{
+ int order;
+
+ if (size == 0)
+ return 0;
+
+ order = flsl(size) - 1;
+ if (size & ~(1ul << order))
+ ++order;
+
+ return order;
+}
diff --git a/sys/dev/drm2/drm_context.c b/sys/dev/drm2/drm_context.c
new file mode 100644
index 0000000..c844a39
--- /dev/null
+++ b/sys/dev/drm2/drm_context.c
@@ -0,0 +1,312 @@
+/*-
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/** @file drm_context.c
+ * Implementation of the context management ioctls.
+ */
+
+#include <dev/drm2/drmP.h>
+
+/* ================================================================
+ * Context bitmap support
+ */
+
+void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle)
+{
+ if (ctx_handle < 0 || ctx_handle >= DRM_MAX_CTXBITMAP ||
+ dev->ctx_bitmap == NULL) {
+ DRM_ERROR("Attempt to free invalid context handle: %d\n",
+ ctx_handle);
+ return;
+ }
+
+ DRM_LOCK(dev);
+ clear_bit(ctx_handle, dev->ctx_bitmap);
+ dev->context_sareas[ctx_handle] = NULL;
+ DRM_UNLOCK(dev);
+ return;
+}
+
+int drm_ctxbitmap_next(struct drm_device *dev)
+{
+ int bit;
+
+ if (dev->ctx_bitmap == NULL)
+ return -1;
+
+ DRM_LOCK(dev);
+ bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
+ if (bit >= DRM_MAX_CTXBITMAP) {
+ DRM_UNLOCK(dev);
+ return -1;
+ }
+
+ set_bit(bit, dev->ctx_bitmap);
+ DRM_DEBUG("bit : %d\n", bit);
+ if ((bit+1) > dev->max_context) {
+ drm_local_map_t **ctx_sareas;
+ int max_ctx = (bit+1);
+
+ ctx_sareas = realloc(dev->context_sareas,
+ max_ctx * sizeof(*dev->context_sareas),
+ DRM_MEM_SAREA, M_NOWAIT);
+ if (ctx_sareas == NULL) {
+ clear_bit(bit, dev->ctx_bitmap);
+ DRM_DEBUG("failed to allocate bit : %d\n", bit);
+ DRM_UNLOCK(dev);
+ return -1;
+ }
+ dev->max_context = max_ctx;
+ dev->context_sareas = ctx_sareas;
+ dev->context_sareas[bit] = NULL;
+ }
+ DRM_UNLOCK(dev);
+ return bit;
+}
+
+int drm_ctxbitmap_init(struct drm_device *dev)
+{
+ int i;
+ int temp;
+
+ DRM_LOCK(dev);
+ dev->ctx_bitmap = malloc(PAGE_SIZE, DRM_MEM_CTXBITMAP,
+ M_NOWAIT | M_ZERO);
+ if (dev->ctx_bitmap == NULL) {
+ DRM_UNLOCK(dev);
+ return ENOMEM;
+ }
+ dev->context_sareas = NULL;
+ dev->max_context = -1;
+ DRM_UNLOCK(dev);
+
+ for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+ temp = drm_ctxbitmap_next(dev);
+ DRM_DEBUG("drm_ctxbitmap_init : %d\n", temp);
+ }
+
+ return 0;
+}
+
+void drm_ctxbitmap_cleanup(struct drm_device *dev)
+{
+ DRM_LOCK(dev);
+ if (dev->context_sareas != NULL)
+ free(dev->context_sareas, DRM_MEM_SAREA);
+ free(dev->ctx_bitmap, DRM_MEM_CTXBITMAP);
+ DRM_UNLOCK(dev);
+}
+
+/* ================================================================
+ * Per Context SAREA Support
+ */
+
+int drm_getsareactx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_ctx_priv_map *request = data;
+ drm_local_map_t *map;
+
+ DRM_LOCK(dev);
+ if (dev->max_context < 0 ||
+ request->ctx_id >= (unsigned) dev->max_context) {
+ DRM_UNLOCK(dev);
+ return EINVAL;
+ }
+
+ map = dev->context_sareas[request->ctx_id];
+ DRM_UNLOCK(dev);
+
+ request->handle = (void *)map->handle;
+
+ return 0;
+}
+
+int drm_setsareactx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_ctx_priv_map *request = data;
+ drm_local_map_t *map = NULL;
+
+ DRM_LOCK(dev);
+ TAILQ_FOREACH(map, &dev->maplist, link) {
+ if (map->handle == request->handle) {
+ if (dev->max_context < 0)
+ goto bad;
+ if (request->ctx_id >= (unsigned) dev->max_context)
+ goto bad;
+ dev->context_sareas[request->ctx_id] = map;
+ DRM_UNLOCK(dev);
+ return 0;
+ }
+ }
+
+bad:
+ DRM_UNLOCK(dev);
+ return EINVAL;
+}
+
+/* ================================================================
+ * The actual DRM context handling routines
+ */
+
+int drm_context_switch(struct drm_device *dev, int old, int new)
+{
+ if (test_and_set_bit(0, &dev->context_flag)) {
+ DRM_ERROR("Reentering -- FIXME\n");
+ return EBUSY;
+ }
+
+ DRM_DEBUG("Context switch from %d to %d\n", old, new);
+
+ if (new == dev->last_context) {
+ clear_bit(0, &dev->context_flag);
+ return 0;
+ }
+
+ return 0;
+}
+
+int drm_context_switch_complete(struct drm_device *dev, int new)
+{
+ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("Lock isn't held after context switch\n");
+ }
+
+ /* If a context switch is ever initiated
+ when the kernel holds the lock, release
+ that lock here. */
+ clear_bit(0, &dev->context_flag);
+
+ return 0;
+}
+
+int drm_resctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_ctx_res *res = data;
+ struct drm_ctx ctx;
+ int i;
+
+ if (res->count >= DRM_RESERVED_CONTEXTS) {
+ bzero(&ctx, sizeof(ctx));
+ for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+ ctx.handle = i;
+ if (DRM_COPY_TO_USER(&res->contexts[i],
+ &ctx, sizeof(ctx)))
+ return EFAULT;
+ }
+ }
+ res->count = DRM_RESERVED_CONTEXTS;
+
+ return 0;
+}
+
+int drm_addctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_ctx *ctx = data;
+
+ ctx->handle = drm_ctxbitmap_next(dev);
+ if (ctx->handle == DRM_KERNEL_CONTEXT) {
+ /* Skip kernel's context and get a new one. */
+ ctx->handle = drm_ctxbitmap_next(dev);
+ }
+ DRM_DEBUG("%d\n", ctx->handle);
+ if (ctx->handle == -1) {
+ DRM_DEBUG("Not enough free contexts.\n");
+ /* Should this return -EBUSY instead? */
+ return ENOMEM;
+ }
+
+ if (dev->driver->context_ctor && ctx->handle != DRM_KERNEL_CONTEXT) {
+ DRM_LOCK(dev);
+ dev->driver->context_ctor(dev, ctx->handle);
+ DRM_UNLOCK(dev);
+ }
+
+ return 0;
+}
+
+int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ /* This does nothing */
+ return 0;
+}
+
+int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_ctx *ctx = data;
+
+ /* This is 0, because we don't handle any context flags */
+ ctx->flags = 0;
+
+ return 0;
+}
+
+int drm_switchctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_ctx *ctx = data;
+
+ DRM_DEBUG("%d\n", ctx->handle);
+ return drm_context_switch(dev, dev->last_context, ctx->handle);
+}
+
+int drm_newctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_ctx *ctx = data;
+
+ DRM_DEBUG("%d\n", ctx->handle);
+ drm_context_switch_complete(dev, ctx->handle);
+
+ return 0;
+}
+
+int drm_rmctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_ctx *ctx = data;
+
+ DRM_DEBUG("%d\n", ctx->handle);
+ if (ctx->handle != DRM_KERNEL_CONTEXT) {
+ if (dev->driver->context_dtor) {
+ DRM_LOCK(dev);
+ dev->driver->context_dtor(dev, ctx->handle);
+ DRM_UNLOCK(dev);
+ }
+
+ drm_ctxbitmap_free(dev, ctx->handle);
+ }
+
+ return 0;
+}
diff --git a/sys/dev/drm2/drm_crtc.c b/sys/dev/drm2/drm_crtc.c
new file mode 100644
index 0000000..f6e462e
--- /dev/null
+++ b/sys/dev/drm2/drm_crtc.c
@@ -0,0 +1,3413 @@
+/*
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2008 Red Hat Inc.
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drm.h>
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_crtc.h>
+#include <dev/drm2/drm_edid.h>
+#include <dev/drm2/drm_fourcc.h>
+#include <sys/limits.h>
+
+/* Avoid boilerplate. I'm tired of typing. */
+#define DRM_ENUM_NAME_FN(fnname, list) \
+ char *fnname(int val) \
+ { \
+ int i; \
+ for (i = 0; i < DRM_ARRAY_SIZE(list); i++) { \
+ if (list[i].type == val) \
+ return list[i].name; \
+ } \
+ return "(unknown)"; \
+ }
+
+/*
+ * Global properties
+ */
+static struct drm_prop_enum_list drm_dpms_enum_list[] =
+{ { DRM_MODE_DPMS_ON, "On" },
+ { DRM_MODE_DPMS_STANDBY, "Standby" },
+ { DRM_MODE_DPMS_SUSPEND, "Suspend" },
+ { DRM_MODE_DPMS_OFF, "Off" }
+};
+
+DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
+
+/*
+ * Optional properties
+ */
+static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
+{
+ { DRM_MODE_SCALE_NONE, "None" },
+ { DRM_MODE_SCALE_FULLSCREEN, "Full" },
+ { DRM_MODE_SCALE_CENTER, "Center" },
+ { DRM_MODE_SCALE_ASPECT, "Full aspect" },
+};
+
+static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
+{
+ { DRM_MODE_DITHERING_OFF, "Off" },
+ { DRM_MODE_DITHERING_ON, "On" },
+ { DRM_MODE_DITHERING_AUTO, "Automatic" },
+};
+
+/*
+ * Non-global properties, but "required" for certain connectors.
+ */
+static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
+{
+ { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
+ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
+
+static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
+{
+ { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
+ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
+ drm_dvi_i_subconnector_enum_list)
+
+static struct drm_prop_enum_list drm_tv_select_enum_list[] =
+{
+ { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
+};
+
+DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
+
+static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
+{
+ { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
+};
+
+DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
+ drm_tv_subconnector_enum_list)
+
+static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
+ { DRM_MODE_DIRTY_OFF, "Off" },
+ { DRM_MODE_DIRTY_ON, "On" },
+ { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
+};
+
+DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
+ drm_dirty_info_enum_list)
+
+struct drm_conn_prop_enum_list {
+ int type;
+ char *name;
+ int count;
+};
+
+/*
+ * Connector and encoder types.
+ */
+static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
+{ { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
+ { DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
+ { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
+ { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
+ { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
+ { DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
+ { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
+ { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
+ { DRM_MODE_CONNECTOR_Component, "Component", 0 },
+ { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
+ { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
+ { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
+ { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
+ { DRM_MODE_CONNECTOR_TV, "TV", 0 },
+ { DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
+};
+
+static struct drm_prop_enum_list drm_encoder_enum_list[] =
+{ { DRM_MODE_ENCODER_NONE, "None" },
+ { DRM_MODE_ENCODER_DAC, "DAC" },
+ { DRM_MODE_ENCODER_TMDS, "TMDS" },
+ { DRM_MODE_ENCODER_LVDS, "LVDS" },
+ { DRM_MODE_ENCODER_TVDAC, "TV" },
+};
+
+char *drm_get_encoder_name(struct drm_encoder *encoder)
+{
+ static char buf[32];
+
+ snprintf(buf, 32, "%s-%d",
+ drm_encoder_enum_list[encoder->encoder_type].name,
+ encoder->base.id);
+ return buf;
+}
+
+char *drm_get_connector_name(struct drm_connector *connector)
+{
+ static char buf[32];
+
+ snprintf(buf, 32, "%s-%d",
+ drm_connector_enum_list[connector->connector_type].name,
+ connector->connector_type_id);
+ return buf;
+}
+
+char *drm_get_connector_status_name(enum drm_connector_status status)
+{
+ if (status == connector_status_connected)
+ return "connected";
+ else if (status == connector_status_disconnected)
+ return "disconnected";
+ else
+ return "unknown";
+}
+
+/**
+ * drm_mode_object_get - allocate a new identifier
+ * @dev: DRM device
+ * @ptr: object pointer, used to generate unique ID
+ * @type: object type
+ *
+ * LOCKING:
+ *
+ * Create a unique identifier based on @ptr in @dev's identifier space. Used
+ * for tracking modes, CRTCs and connectors.
+ *
+ * RETURNS:
+ * New unique (relative to other objects in @dev) integer identifier for the
+ * object.
+ */
+static int drm_mode_object_get(struct drm_device *dev,
+ struct drm_mode_object *obj, uint32_t obj_type)
+{
+ int new_id;
+ int ret;
+
+ new_id = 0;
+ ret = drm_gem_name_create(&dev->mode_config.crtc_names, obj, &new_id);
+ if (ret != 0)
+ return (ret);
+
+ obj->id = new_id;
+ obj->type = obj_type;
+ return 0;
+}
+
+/**
+ * drm_mode_object_put - free an identifer
+ * @dev: DRM device
+ * @id: ID to free
+ *
+ * LOCKING:
+ * Caller must hold DRM mode_config lock.
+ *
+ * Free @id from @dev's unique identifier pool.
+ */
+static void drm_mode_object_put(struct drm_device *dev,
+ struct drm_mode_object *object)
+{
+
+ drm_gem_names_remove(&dev->mode_config.crtc_names, object->id);
+}
+
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type)
+{
+ struct drm_mode_object *obj;
+
+ obj = drm_gem_name_ref(&dev->mode_config.crtc_names, id, NULL);
+ if (!obj || (obj->type != type) || (obj->id != id))
+ obj = NULL;
+
+ return obj;
+}
+
+/**
+ * drm_framebuffer_init - initialize a framebuffer
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Allocates an ID for the framebuffer's parent mode object, sets its mode
+ * functions & device file and adds it to the master fd list.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ int ret;
+
+ DRM_MODE_CONFIG_ASSERT_LOCKED(dev);
+
+ ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
+ if (ret)
+ return ret;
+
+ fb->dev = dev;
+ fb->funcs = funcs;
+ dev->mode_config.num_fb++;
+ list_add(&fb->head, &dev->mode_config.fb_list);
+
+ return 0;
+}
+
+/**
+ * drm_framebuffer_cleanup - remove a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes
+ * it, setting it to NULL.
+ */
+void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ struct drm_mode_set set;
+ int ret;
+
+ DRM_MODE_CONFIG_ASSERT_LOCKED(dev);
+
+ /* remove from any CRTC */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->fb == fb) {
+ /* should turn off the crtc */
+ memset(&set, 0, sizeof(struct drm_mode_set));
+ set.crtc = crtc;
+ set.fb = NULL;
+ ret = crtc->funcs->set_config(&set);
+ if (ret)
+ DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+ }
+ }
+
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ if (plane->fb == fb) {
+ /* should turn off the crtc */
+ ret = plane->funcs->disable_plane(plane);
+ if (ret)
+ DRM_ERROR("failed to disable plane with busy fb\n");
+ /* disconnect the plane from the fb and crtc: */
+ plane->fb = NULL;
+ plane->crtc = NULL;
+ }
+ }
+
+ drm_mode_object_put(dev, &fb->base);
+ list_del(&fb->head);
+ dev->mode_config.num_fb--;
+}
+
+/**
+ * drm_crtc_init - Initialise a new CRTC object
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @funcs: callbacks for the new CRTC
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Inits a new object created as base part of an driver crtc object.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ const struct drm_crtc_funcs *funcs)
+{
+ int ret;
+
+ crtc->dev = dev;
+ crtc->funcs = funcs;
+
+ sx_xlock(&dev->mode_config.mutex);
+ ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
+ if (ret)
+ goto out;
+
+ list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
+ dev->mode_config.num_crtc++;
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+/**
+ * drm_crtc_cleanup - Cleans up the core crtc usage.
+ * @crtc: CRTC to cleanup
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Cleanup @crtc. Removes from drm modesetting space
+ * does NOT free object, caller does that.
+ */
+void drm_crtc_cleanup(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+
+ DRM_MODE_CONFIG_ASSERT_LOCKED(dev);
+
+ if (crtc->gamma_store) {
+ free(crtc->gamma_store, DRM_MEM_KMS);
+ crtc->gamma_store = NULL;
+ }
+
+ drm_mode_object_put(dev, &crtc->base);
+ list_del(&crtc->head);
+ dev->mode_config.num_crtc--;
+}
+
+/**
+ * drm_mode_probed_add - add a mode to a connector's probed mode list
+ * @connector: connector the new mode
+ * @mode: mode data
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Add @mode to @connector's mode list for later use.
+ */
+void drm_mode_probed_add(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+
+ DRM_MODE_CONFIG_ASSERT_LOCKED(connector->dev);
+
+ list_add(&mode->head, &connector->probed_modes);
+}
+
+/**
+ * drm_mode_remove - remove and free a mode
+ * @connector: connector list to modify
+ * @mode: mode to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Remove @mode from @connector's mode list, then free it.
+ */
+void drm_mode_remove(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+
+ DRM_MODE_CONFIG_ASSERT_LOCKED(connector->dev);
+
+ list_del(&mode->head);
+ drm_mode_destroy(connector->dev, mode);
+}
+
+/**
+ * drm_connector_init - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @name: user visible name of the connector
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type)
+{
+ int ret;
+
+ sx_xlock(&dev->mode_config.mutex);
+
+ ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
+ if (ret)
+ goto out;
+
+ connector->dev = dev;
+ connector->funcs = funcs;
+ connector->connector_type = connector_type;
+ connector->connector_type_id =
+ ++drm_connector_enum_list[connector_type].count; /* TODO */
+ INIT_LIST_HEAD(&connector->user_modes);
+ INIT_LIST_HEAD(&connector->probed_modes);
+ INIT_LIST_HEAD(&connector->modes);
+ connector->edid_blob_ptr = NULL;
+
+ list_add_tail(&connector->head, &dev->mode_config.connector_list);
+ dev->mode_config.num_connector++;
+
+ drm_connector_attach_property(connector,
+ dev->mode_config.edid_property, 0);
+
+ drm_connector_attach_property(connector,
+ dev->mode_config.dpms_property, 0);
+
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+/**
+ * drm_connector_cleanup - cleans up an initialised connector
+ * @connector: connector to cleanup
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Cleans up the connector but doesn't free the object.
+ */
+void drm_connector_cleanup(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode, *t;
+
+ list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
+ drm_mode_remove(connector, mode);
+
+ list_for_each_entry_safe(mode, t, &connector->modes, head)
+ drm_mode_remove(connector, mode);
+
+ list_for_each_entry_safe(mode, t, &connector->user_modes, head)
+ drm_mode_remove(connector, mode);
+
+ sx_xlock(&dev->mode_config.mutex);
+ drm_mode_object_put(dev, &connector->base);
+ list_del(&connector->head);
+ dev->mode_config.num_connector--;
+ sx_xunlock(&dev->mode_config.mutex);
+}
+
+int drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type)
+{
+ int ret;
+
+ sx_xlock(&dev->mode_config.mutex);
+
+ ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
+ if (ret)
+ goto out;
+
+ encoder->dev = dev;
+ encoder->encoder_type = encoder_type;
+ encoder->funcs = funcs;
+
+ list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
+ dev->mode_config.num_encoder++;
+
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+void drm_encoder_cleanup(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+
+ sx_xlock(&dev->mode_config.mutex);
+ drm_mode_object_put(dev, &encoder->base);
+ list_del(&encoder->head);
+ dev->mode_config.num_encoder--;
+ sx_xunlock(&dev->mode_config.mutex);
+}
+
+int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, uint32_t format_count,
+ bool priv)
+{
+ int ret;
+
+ sx_xlock(&dev->mode_config.mutex);
+
+ ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
+ if (ret)
+ goto out;
+
+ plane->dev = dev;
+ plane->funcs = funcs;
+ plane->format_types = malloc(sizeof(uint32_t) * format_count,
+ DRM_MEM_KMS, M_WAITOK);
+
+ memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
+ plane->format_count = format_count;
+ plane->possible_crtcs = possible_crtcs;
+
+ /* private planes are not exposed to userspace, but depending on
+ * display hardware, might be convenient to allow sharing programming
+ * for the scanout engine with the crtc implementation.
+ */
+ if (!priv) {
+ list_add_tail(&plane->head, &dev->mode_config.plane_list);
+ dev->mode_config.num_plane++;
+ } else {
+ INIT_LIST_HEAD(&plane->head);
+ }
+
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+void drm_plane_cleanup(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+
+ sx_xlock(&dev->mode_config.mutex);
+ free(plane->format_types, DRM_MEM_KMS);
+ drm_mode_object_put(dev, &plane->base);
+ /* if not added to a list, it must be a private plane */
+ if (!list_empty(&plane->head)) {
+ list_del(&plane->head);
+ dev->mode_config.num_plane--;
+ }
+ sx_xunlock(&dev->mode_config.mutex);
+}
+
+/**
+ * drm_mode_create - create a new display mode
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold DRM mode_config lock.
+ *
+ * Create a new drm_display_mode, give it an ID, and return it.
+ *
+ * RETURNS:
+ * Pointer to new mode on success, NULL on error.
+ */
+struct drm_display_mode *drm_mode_create(struct drm_device *dev)
+{
+ struct drm_display_mode *nmode;
+
+ nmode = malloc(sizeof(struct drm_display_mode), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+
+ if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
+ free(nmode, DRM_MEM_KMS);
+ return (NULL);
+ }
+ return nmode;
+}
+
+/**
+ * drm_mode_destroy - remove a mode
+ * @dev: DRM device
+ * @mode: mode to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Free @mode's unique identifier, then free it.
+ */
+void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
+{
+ if (!mode)
+ return;
+
+ drm_mode_object_put(dev, &mode->base);
+
+ free(mode, DRM_MEM_KMS);
+}
+
+static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
+{
+ struct drm_property *edid;
+ struct drm_property *dpms;
+
+ /*
+ * Standard properties (apply to all connectors)
+ */
+ edid = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_IMMUTABLE,
+ "EDID", 0);
+ dev->mode_config.edid_property = edid;
+
+ dpms = drm_property_create_enum(dev, 0,
+ "DPMS", drm_dpms_enum_list,
+ DRM_ARRAY_SIZE(drm_dpms_enum_list));
+ dev->mode_config.dpms_property = dpms;
+
+ return 0;
+}
+
+/**
+ * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
+ * @dev: DRM device
+ *
+ * Called by a driver the first time a DVI-I connector is made.
+ */
+int drm_mode_create_dvi_i_properties(struct drm_device *dev)
+{
+ struct drm_property *dvi_i_selector;
+ struct drm_property *dvi_i_subconnector;
+
+ if (dev->mode_config.dvi_i_select_subconnector_property)
+ return 0;
+
+ dvi_i_selector =
+ drm_property_create_enum(dev, 0,
+ "select subconnector",
+ drm_dvi_i_select_enum_list,
+ DRM_ARRAY_SIZE(drm_dvi_i_select_enum_list));
+ dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
+
+ dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "subconnector",
+ drm_dvi_i_subconnector_enum_list,
+ DRM_ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
+ dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
+
+ return 0;
+}
+
+/**
+ * drm_create_tv_properties - create TV specific connector properties
+ * @dev: DRM device
+ * @num_modes: number of different TV formats (modes) supported
+ * @modes: array of pointers to strings containing name of each format
+ *
+ * Called by a driver's TV initialization routine, this function creates
+ * the TV specific connector properties for a given device. Caller is
+ * responsible for allocating a list of format names and passing them to
+ * this routine.
+ */
+int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
+ char *modes[])
+{
+ struct drm_property *tv_selector;
+ struct drm_property *tv_subconnector;
+ int i;
+
+ if (dev->mode_config.tv_select_subconnector_property)
+ return 0;
+
+ /*
+ * Basic connector properties
+ */
+ tv_selector = drm_property_create_enum(dev, 0,
+ "select subconnector",
+ drm_tv_select_enum_list,
+ DRM_ARRAY_SIZE(drm_tv_select_enum_list));
+ dev->mode_config.tv_select_subconnector_property = tv_selector;
+
+ tv_subconnector =
+ drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "subconnector",
+ drm_tv_subconnector_enum_list,
+ DRM_ARRAY_SIZE(drm_tv_subconnector_enum_list));
+ dev->mode_config.tv_subconnector_property = tv_subconnector;
+
+ /*
+ * Other, TV specific properties: margins & TV modes.
+ */
+ dev->mode_config.tv_left_margin_property =
+ drm_property_create_range(dev, 0, "left margin", 0, 100);
+
+ dev->mode_config.tv_right_margin_property =
+ drm_property_create_range(dev, 0, "right margin", 0, 100);
+
+ dev->mode_config.tv_top_margin_property =
+ drm_property_create_range(dev, 0, "top margin", 0, 100);
+
+ dev->mode_config.tv_bottom_margin_property =
+ drm_property_create_range(dev, 0, "bottom margin", 0, 100);
+
+ dev->mode_config.tv_mode_property =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", num_modes);
+ for (i = 0; i < num_modes; i++)
+ drm_property_add_enum(dev->mode_config.tv_mode_property, i,
+ i, modes[i]);
+
+ dev->mode_config.tv_brightness_property =
+ drm_property_create_range(dev, 0, "brightness", 0, 100);
+
+ dev->mode_config.tv_contrast_property =
+ drm_property_create_range(dev, 0, "contrast", 0, 100);
+
+ dev->mode_config.tv_flicker_reduction_property =
+ drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
+
+ dev->mode_config.tv_overscan_property =
+ drm_property_create_range(dev, 0, "overscan", 0, 100);
+
+ dev->mode_config.tv_saturation_property =
+ drm_property_create_range(dev, 0, "saturation", 0, 100);
+
+ dev->mode_config.tv_hue_property =
+ drm_property_create_range(dev, 0, "hue", 0, 100);
+
+ return 0;
+}
+
+/**
+ * drm_mode_create_scaling_mode_property - create scaling mode property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_scaling_mode_property(struct drm_device *dev)
+{
+ struct drm_property *scaling_mode;
+
+ if (dev->mode_config.scaling_mode_property)
+ return 0;
+
+ scaling_mode =
+ drm_property_create_enum(dev, 0, "scaling mode",
+ drm_scaling_mode_enum_list,
+ DRM_ARRAY_SIZE(drm_scaling_mode_enum_list));
+
+ dev->mode_config.scaling_mode_property = scaling_mode;
+
+ return 0;
+}
+
+/**
+ * drm_mode_create_dithering_property - create dithering property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dithering_property(struct drm_device *dev)
+{
+ struct drm_property *dithering_mode;
+
+ if (dev->mode_config.dithering_mode_property)
+ return 0;
+
+ dithering_mode =
+ drm_property_create_enum(dev, 0, "dithering",
+ drm_dithering_mode_enum_list,
+ DRM_ARRAY_SIZE(drm_dithering_mode_enum_list));
+ dev->mode_config.dithering_mode_property = dithering_mode;
+
+ return 0;
+}
+
+/**
+ * drm_mode_create_dirty_property - create dirty property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dirty_info_property(struct drm_device *dev)
+{
+ struct drm_property *dirty_info;
+
+ if (dev->mode_config.dirty_info_property)
+ return 0;
+
+ dirty_info =
+ drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "dirty",
+ drm_dirty_info_enum_list,
+ DRM_ARRAY_SIZE(drm_dirty_info_enum_list));
+ dev->mode_config.dirty_info_property = dirty_info;
+
+ return 0;
+}
+
+/**
+ * drm_mode_config_init - initialize DRM mode_configuration structure
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * None, should happen single threaded at init time.
+ *
+ * Initialize @dev's mode_config structure, used for tracking the graphics
+ * configuration of @dev.
+ */
+void drm_mode_config_init(struct drm_device *dev)
+{
+ sx_init(&dev->mode_config.mutex, "kmslk");
+ INIT_LIST_HEAD(&dev->mode_config.fb_list);
+ INIT_LIST_HEAD(&dev->mode_config.crtc_list);
+ INIT_LIST_HEAD(&dev->mode_config.connector_list);
+ INIT_LIST_HEAD(&dev->mode_config.encoder_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+ INIT_LIST_HEAD(&dev->mode_config.plane_list);
+ drm_gem_names_init(&dev->mode_config.crtc_names);
+
+ sx_xlock(&dev->mode_config.mutex);
+ drm_mode_create_standard_connector_properties(dev);
+ sx_xunlock(&dev->mode_config.mutex);
+
+ /* Just to be sure */
+ dev->mode_config.num_fb = 0;
+ dev->mode_config.num_connector = 0;
+ dev->mode_config.num_crtc = 0;
+ dev->mode_config.num_encoder = 0;
+}
+
+static int
+drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
+{
+ uint32_t total_objects = 0;
+
+ total_objects += dev->mode_config.num_crtc;
+ total_objects += dev->mode_config.num_connector;
+ total_objects += dev->mode_config.num_encoder;
+
+ group->id_list = malloc(total_objects * sizeof(uint32_t),
+ DRM_MEM_KMS, M_WAITOK | M_ZERO);
+
+ group->num_crtcs = 0;
+ group->num_connectors = 0;
+ group->num_encoders = 0;
+ return 0;
+}
+
+int drm_mode_group_init_legacy_group(struct drm_device *dev,
+ struct drm_mode_group *group)
+{
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ if ((ret = drm_mode_group_init(dev, group)))
+ return ret;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ group->id_list[group->num_crtcs++] = crtc->base.id;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ group->id_list[group->num_crtcs + group->num_encoders++] =
+ encoder->base.id;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ group->id_list[group->num_crtcs + group->num_encoders +
+ group->num_connectors++] = connector->base.id;
+
+ return 0;
+}
+
+/**
+ * drm_mode_config_cleanup - free up DRM mode_config info
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Free up all the connectors and CRTCs associated with this DRM device, then
+ * free up the framebuffers and associated buffer objects.
+ *
+ * FIXME: cleanup any dangling user buffer objects too
+ */
+void drm_mode_config_cleanup(struct drm_device *dev)
+{
+ struct drm_connector *connector, *ot;
+ struct drm_crtc *crtc, *ct;
+ struct drm_encoder *encoder, *enct;
+ struct drm_framebuffer *fb, *fbt;
+ struct drm_property *property, *pt;
+ struct drm_plane *plane, *plt;
+
+ list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
+ head) {
+ encoder->funcs->destroy(encoder);
+ }
+
+ list_for_each_entry_safe(connector, ot,
+ &dev->mode_config.connector_list, head) {
+ connector->funcs->destroy(connector);
+ }
+
+ list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
+ head) {
+ drm_property_destroy(dev, property);
+ }
+
+ list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+ fb->funcs->destroy(fb);
+ }
+
+ list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+ crtc->funcs->destroy(crtc);
+ }
+
+ list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
+ head) {
+ plane->funcs->destroy(plane);
+ }
+ drm_gem_names_fini(&dev->mode_config.crtc_names);
+}
+
+/**
+ * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
+ * @out: drm_mode_modeinfo struct to return to the user
+ * @in: drm_display_mode to use
+ *
+ * LOCKING:
+ * None.
+ *
+ * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
+ * the user.
+ */
+static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
+ const struct drm_display_mode *in)
+{
+ if (in->hdisplay > USHRT_MAX || in->hsync_start > USHRT_MAX ||
+ in->hsync_end > USHRT_MAX || in->htotal > USHRT_MAX ||
+ in->hskew > USHRT_MAX || in->vdisplay > USHRT_MAX ||
+ in->vsync_start > USHRT_MAX || in->vsync_end > USHRT_MAX ||
+ in->vtotal > USHRT_MAX || in->vscan > USHRT_MAX)
+ printf("timing values too large for mode info\n");
+
+ out->clock = in->clock;
+ out->hdisplay = in->hdisplay;
+ out->hsync_start = in->hsync_start;
+ out->hsync_end = in->hsync_end;
+ out->htotal = in->htotal;
+ out->hskew = in->hskew;
+ out->vdisplay = in->vdisplay;
+ out->vsync_start = in->vsync_start;
+ out->vsync_end = in->vsync_end;
+ out->vtotal = in->vtotal;
+ out->vscan = in->vscan;
+ out->vrefresh = in->vrefresh;
+ out->flags = in->flags;
+ out->type = in->type;
+ strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+ out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+}
+
+/**
+ * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
+ * @out: drm_display_mode to return to the user
+ * @in: drm_mode_modeinfo to use
+ *
+ * LOCKING:
+ * None.
+ *
+ * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
+ * the caller.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+static int drm_crtc_convert_umode(struct drm_display_mode *out,
+ const struct drm_mode_modeinfo *in)
+{
+ if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
+ return ERANGE;
+
+ out->clock = in->clock;
+ out->hdisplay = in->hdisplay;
+ out->hsync_start = in->hsync_start;
+ out->hsync_end = in->hsync_end;
+ out->htotal = in->htotal;
+ out->hskew = in->hskew;
+ out->vdisplay = in->vdisplay;
+ out->vsync_start = in->vsync_start;
+ out->vsync_end = in->vsync_end;
+ out->vtotal = in->vtotal;
+ out->vscan = in->vscan;
+ out->vrefresh = in->vrefresh;
+ out->flags = in->flags;
+ out->type = in->type;
+ strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+ out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+
+ return 0;
+}
+
+/**
+ * drm_mode_getresources - get graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Construct a set of configuration description structures and return
+ * them to the user, including CRTC, connector and framebuffer configuration.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getresources(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_card_res *card_res = data;
+ struct list_head *lh;
+ struct drm_framebuffer *fb;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ int ret = 0;
+ int connector_count = 0;
+ int crtc_count = 0;
+ int fb_count = 0;
+ int encoder_count = 0;
+ int copied = 0, i;
+ uint32_t __user *fb_id;
+ uint32_t __user *crtc_id;
+ uint32_t __user *connector_id;
+ uint32_t __user *encoder_id;
+ struct drm_mode_group *mode_group;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return (EINVAL);
+
+ sx_xlock(&dev->mode_config.mutex);
+
+ /*
+ * For the non-control nodes we need to limit the list of resources
+ * by IDs in the group list for this node
+ */
+ list_for_each(lh, &file_priv->fbs)
+ fb_count++;
+
+#if 1
+ mode_group = NULL; /* XXXKIB */
+ if (1 || file_priv->master) {
+#else
+ mode_group = &file_priv->masterp->minor->mode_group;
+ if (file_priv->masterp->minor->type == DRM_MINOR_CONTROL) {
+#endif
+
+ list_for_each(lh, &dev->mode_config.crtc_list)
+ crtc_count++;
+
+ list_for_each(lh, &dev->mode_config.connector_list)
+ connector_count++;
+
+ list_for_each(lh, &dev->mode_config.encoder_list)
+ encoder_count++;
+ } else {
+
+ crtc_count = mode_group->num_crtcs;
+ connector_count = mode_group->num_connectors;
+ encoder_count = mode_group->num_encoders;
+ }
+
+ card_res->max_height = dev->mode_config.max_height;
+ card_res->min_height = dev->mode_config.min_height;
+ card_res->max_width = dev->mode_config.max_width;
+ card_res->min_width = dev->mode_config.min_width;
+
+ /* handle this in 4 parts */
+ /* FBs */
+ if (card_res->count_fbs >= fb_count) {
+ copied = 0;
+ fb_id = (uint32_t *)(uintptr_t)card_res->fb_id_ptr;
+ list_for_each_entry(fb, &file_priv->fbs, filp_head) {
+ if (copyout(&fb->base.id, fb_id + copied,
+ sizeof(uint32_t))) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ card_res->count_fbs = fb_count;
+
+ /* CRTCs */
+ if (card_res->count_crtcs >= crtc_count) {
+ copied = 0;
+ crtc_id = (uint32_t *)(uintptr_t)card_res->crtc_id_ptr;
+#if 1
+ if (1 || file_priv->master) {
+#else
+ if (file_priv->masterp->minor->type == DRM_MINOR_CONTROL) {
+#endif
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ head) {
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+ if (copyout(&crtc->base.id, crtc_id +
+ copied, sizeof(uint32_t))) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ } else {
+ for (i = 0; i < mode_group->num_crtcs; i++) {
+ if (copyout(&mode_group->id_list[i],
+ crtc_id + copied, sizeof(uint32_t))) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ }
+ card_res->count_crtcs = crtc_count;
+
+ /* Encoders */
+ if (card_res->count_encoders >= encoder_count) {
+ copied = 0;
+ encoder_id = (uint32_t *)(uintptr_t)card_res->encoder_id_ptr;
+#if 1
+ if (file_priv->master) {
+#else
+ if (file_priv->masterp->minor->type == DRM_MINOR_CONTROL) {
+#endif
+ list_for_each_entry(encoder,
+ &dev->mode_config.encoder_list,
+ head) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
+ drm_get_encoder_name(encoder));
+ if (copyout(&encoder->base.id, encoder_id +
+ copied, sizeof(uint32_t))) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ } else {
+ for (i = mode_group->num_crtcs;
+ i < mode_group->num_crtcs + mode_group->num_encoders;
+ i++) {
+ if (copyout(&mode_group->id_list[i],
+ encoder_id + copied, sizeof(uint32_t))) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+
+ }
+ }
+ card_res->count_encoders = encoder_count;
+
+ /* Connectors */
+ if (card_res->count_connectors >= connector_count) {
+ copied = 0;
+ connector_id = (uint32_t *)(uintptr_t)card_res->connector_id_ptr;
+#if 1
+ if (file_priv->master) {
+#else
+ if (file_priv->masterp->minor->type == DRM_MINOR_CONTROL) {
+#endif
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ head) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ drm_get_connector_name(connector));
+ if (copyout(&connector->base.id,
+ connector_id + copied, sizeof(uint32_t))) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ } else {
+ int start = mode_group->num_crtcs +
+ mode_group->num_encoders;
+ for (i = start; i < start + mode_group->num_connectors; i++) {
+ if (copyout(&mode_group->id_list[i],
+ connector_id + copied, sizeof(uint32_t))) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ }
+ card_res->count_connectors = connector_count;
+
+ DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs,
+ card_res->count_connectors, card_res->count_encoders);
+
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_getcrtc - get CRTC configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Construct a CRTC configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_crtc *crtc_resp = data;
+ struct drm_crtc *crtc;
+ struct drm_mode_object *obj;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return (EINVAL);
+
+ sx_xlock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ ret = (EINVAL);
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ crtc_resp->x = crtc->x;
+ crtc_resp->y = crtc->y;
+ crtc_resp->gamma_size = crtc->gamma_size;
+ if (crtc->fb)
+ crtc_resp->fb_id = crtc->fb->base.id;
+ else
+ crtc_resp->fb_id = 0;
+
+ if (crtc->enabled) {
+
+ drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
+ crtc_resp->mode_valid = 1;
+
+ } else {
+ crtc_resp->mode_valid = 0;
+ }
+
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_getconnector - get connector configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Construct a connector configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getconnector(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_connector *out_resp = data;
+ struct drm_mode_object *obj;
+ struct drm_connector *connector;
+ struct drm_display_mode *mode;
+ int mode_count = 0;
+ int props_count = 0;
+ int encoders_count = 0;
+ int ret = 0;
+ int copied = 0;
+ int i;
+ struct drm_mode_modeinfo u_mode;
+ struct drm_mode_modeinfo __user *mode_ptr;
+ uint32_t *prop_ptr;
+ uint64_t *prop_values;
+ uint32_t *encoder_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return (EINVAL);
+
+ memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
+
+ sx_xlock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, out_resp->connector_id,
+ DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ ret = EINVAL;
+ goto out;
+ }
+ connector = obj_to_connector(obj);
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+ if (connector->property_ids[i] != 0) {
+ props_count++;
+ }
+ }
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] != 0) {
+ encoders_count++;
+ }
+ }
+
+ if (out_resp->count_modes == 0) {
+ connector->funcs->fill_modes(connector,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+ }
+
+ /* delayed so we get modes regardless of pre-fill_modes state */
+ list_for_each_entry(mode, &connector->modes, head)
+ mode_count++;
+
+ out_resp->connector_id = connector->base.id;
+ out_resp->connector_type = connector->connector_type;
+ out_resp->connector_type_id = connector->connector_type_id;
+ out_resp->mm_width = connector->display_info.width_mm;
+ out_resp->mm_height = connector->display_info.height_mm;
+ out_resp->subpixel = connector->display_info.subpixel_order;
+ out_resp->connection = connector->status;
+ if (connector->encoder)
+ out_resp->encoder_id = connector->encoder->base.id;
+ else
+ out_resp->encoder_id = 0;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if ((out_resp->count_modes >= mode_count) && mode_count) {
+ copied = 0;
+ mode_ptr = (struct drm_mode_modeinfo *)(uintptr_t)out_resp->modes_ptr;
+ list_for_each_entry(mode, &connector->modes, head) {
+ drm_crtc_convert_to_umode(&u_mode, mode);
+ if (copyout(&u_mode, mode_ptr + copied,
+ sizeof(u_mode))) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ out_resp->count_modes = mode_count;
+
+ if ((out_resp->count_props >= props_count) && props_count) {
+ copied = 0;
+ prop_ptr = (uint32_t *)(uintptr_t)(out_resp->props_ptr);
+ prop_values = (uint64_t *)(uintptr_t)(out_resp->prop_values_ptr);
+ for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+ if (connector->property_ids[i] != 0) {
+ if (copyout(&connector->property_ids[i],
+ prop_ptr + copied, sizeof(uint32_t))) {
+ ret = EFAULT;
+ goto out;
+ }
+
+ if (copyout(&connector->property_values[i],
+ prop_values + copied, sizeof(uint64_t))) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ }
+ out_resp->count_props = props_count;
+
+ if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
+ copied = 0;
+ encoder_ptr = (uint32_t *)(uintptr_t)(out_resp->encoders_ptr);
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] != 0) {
+ if (copyout(&connector->encoder_ids[i],
+ encoder_ptr + copied, sizeof(uint32_t))) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ }
+ out_resp->count_encoders = encoders_count;
+
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+int drm_mode_getencoder(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_encoder *enc_resp = data;
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return (EINVAL);
+
+ sx_xlock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, enc_resp->encoder_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj) {
+ ret = EINVAL;
+ goto out;
+ }
+ encoder = obj_to_encoder(obj);
+
+ if (encoder->crtc)
+ enc_resp->crtc_id = encoder->crtc->base.id;
+ else
+ enc_resp->crtc_id = 0;
+ enc_resp->encoder_type = encoder->encoder_type;
+ enc_resp->encoder_id = encoder->base.id;
+ enc_resp->possible_crtcs = encoder->possible_crtcs;
+ enc_resp->possible_clones = encoder->possible_clones;
+
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_getplane_res - get plane info
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Return an plane count and set of IDs.
+ */
+int drm_mode_getplane_res(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_plane_res *plane_resp = data;
+ struct drm_mode_config *config;
+ struct drm_plane *plane;
+ uint32_t *plane_ptr;
+ int copied = 0, ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return (EINVAL);
+
+ sx_xlock(&dev->mode_config.mutex);
+ config = &dev->mode_config;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if (config->num_plane &&
+ (plane_resp->count_planes >= config->num_plane)) {
+ plane_ptr = (uint32_t *)(unsigned long)plane_resp->plane_id_ptr;
+
+ list_for_each_entry(plane, &config->plane_list, head) {
+ if (copyout(&plane->base.id, plane_ptr + copied,
+ sizeof(uint32_t))) {
+ ret = EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ plane_resp->count_planes = config->num_plane;
+
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_getplane - get plane info
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Return plane info, including formats supported, gamma size, any
+ * current fb, etc.
+ */
+int drm_mode_getplane(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_plane *plane_resp = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ uint32_t *format_ptr;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return (EINVAL);
+
+ sx_xlock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, plane_resp->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = ENOENT;
+ goto out;
+ }
+ plane = obj_to_plane(obj);
+
+ if (plane->crtc)
+ plane_resp->crtc_id = plane->crtc->base.id;
+ else
+ plane_resp->crtc_id = 0;
+
+ if (plane->fb)
+ plane_resp->fb_id = plane->fb->base.id;
+ else
+ plane_resp->fb_id = 0;
+
+ plane_resp->plane_id = plane->base.id;
+ plane_resp->possible_crtcs = plane->possible_crtcs;
+ plane_resp->gamma_size = plane->gamma_size;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if (plane->format_count &&
+ (plane_resp->count_format_types >= plane->format_count)) {
+ format_ptr = (uint32_t *)(unsigned long)plane_resp->format_type_ptr;
+ if (copyout(format_ptr,
+ plane->format_types,
+ sizeof(uint32_t) * plane->format_count)) {
+ ret = EFAULT;
+ goto out;
+ }
+ }
+ plane_resp->count_format_types = plane->format_count;
+
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_setplane - set up or tear down an plane
+ * @dev: DRM device
+ * @data: ioctl data*
+ * @file_prive: DRM file info
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Set plane info, including placement, fb, scaling, and other factors.
+ * Or pass a NULL fb to disable.
+ */
+int drm_mode_setplane(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_set_plane *plane_req = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ int ret = 0;
+ unsigned int fb_width, fb_height;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return (EINVAL);
+
+ sx_xlock(&dev->mode_config.mutex);
+
+ /*
+ * First, find the plane, crtc, and fb objects. If not available,
+ * we don't bother to call the driver.
+ */
+ obj = drm_mode_object_find(dev, plane_req->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown plane ID %d\n",
+ plane_req->plane_id);
+ ret = ENOENT;
+ goto out;
+ }
+ plane = obj_to_plane(obj);
+
+ /* No fb means shut it down */
+ if (!plane_req->fb_id) {
+ plane->funcs->disable_plane(plane);
+ plane->crtc = NULL;
+ plane->fb = NULL;
+ goto out;
+ }
+
+ obj = drm_mode_object_find(dev, plane_req->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+ plane_req->crtc_id);
+ ret = ENOENT;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ obj = drm_mode_object_find(dev, plane_req->fb_id,
+ DRM_MODE_OBJECT_FB);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+ plane_req->fb_id);
+ ret = ENOENT;
+ goto out;
+ }
+ fb = obj_to_fb(obj);
+
+ /* Check whether this plane supports the fb pixel format. */
+ for (i = 0; i < plane->format_count; i++)
+ if (fb->pixel_format == plane->format_types[i])
+ break;
+ if (i == plane->format_count) {
+ DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format);
+ ret = EINVAL;
+ goto out;
+ }
+
+ fb_width = fb->width << 16;
+ fb_height = fb->height << 16;
+
+ /* Make sure source coordinates are inside the fb. */
+ if (plane_req->src_w > fb_width ||
+ plane_req->src_x > fb_width - plane_req->src_w ||
+ plane_req->src_h > fb_height ||
+ plane_req->src_y > fb_height - plane_req->src_h) {
+ DRM_DEBUG_KMS("Invalid source coordinates "
+ "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+ plane_req->src_w >> 16,
+ ((plane_req->src_w & 0xffff) * 15625) >> 10,
+ plane_req->src_h >> 16,
+ ((plane_req->src_h & 0xffff) * 15625) >> 10,
+ plane_req->src_x >> 16,
+ ((plane_req->src_x & 0xffff) * 15625) >> 10,
+ plane_req->src_y >> 16,
+ ((plane_req->src_y & 0xffff) * 15625) >> 10);
+ ret = ENOSPC;
+ goto out;
+ }
+
+ /* Give drivers some help against integer overflows */
+ if (plane_req->crtc_w > INT_MAX ||
+ plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
+ plane_req->crtc_h > INT_MAX ||
+ plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
+ DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->crtc_x, plane_req->crtc_y);
+ ret = ERANGE;
+ goto out;
+ }
+
+ ret = -plane->funcs->update_plane(plane, crtc, fb,
+ plane_req->crtc_x, plane_req->crtc_y,
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->src_x, plane_req->src_y,
+ plane_req->src_w, plane_req->src_h);
+ if (!ret) {
+ plane->crtc = crtc;
+ plane->fb = fb;
+ }
+
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+/**
+ * drm_mode_setcrtc - set CRTC configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Build a new CRTC configuration based on user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_mode_crtc *crtc_req = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct drm_connector **connector_set = NULL, *connector;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_display_mode *mode = NULL;
+ struct drm_mode_set set;
+ uint32_t *set_connectors_ptr;
+ int ret = 0;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return (EINVAL);
+
+ /* For some reason crtc x/y offsets are signed internally. */
+ if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
+ return (ERANGE);
+
+ sx_xlock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, crtc_req->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
+ ret = EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+
+ if (crtc_req->mode_valid) {
+ /* If we have a mode we need a framebuffer. */
+ /* If we pass -1, set the mode with the currently bound fb */
+ if (crtc_req->fb_id == -1) {
+ if (!crtc->fb) {
+ DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ fb = crtc->fb;
+ } else {
+ obj = drm_mode_object_find(dev, crtc_req->fb_id,
+ DRM_MODE_OBJECT_FB);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown FB ID%d\n",
+ crtc_req->fb_id);
+ ret = EINVAL;
+ goto out;
+ }
+ fb = obj_to_fb(obj);
+ }
+
+ mode = drm_mode_create(dev);
+ if (!mode) {
+ ret = ENOMEM;
+ goto out;
+ }
+
+ ret = drm_crtc_convert_umode(mode, &crtc_req->mode);
+ if (ret) {
+ DRM_DEBUG_KMS("Invalid mode\n");
+ goto out;
+ }
+
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+
+ if (mode->hdisplay > fb->width ||
+ mode->vdisplay > fb->height ||
+ crtc_req->x > fb->width - mode->hdisplay ||
+ crtc_req->y > fb->height - mode->vdisplay) {
+ DRM_DEBUG_KMS("Invalid CRTC viewport %ux%u+%u+%u for fb size %ux%u.\n",
+ mode->hdisplay, mode->vdisplay,
+ crtc_req->x, crtc_req->y,
+ fb->width, fb->height);
+ ret = ENOSPC;
+ goto out;
+ }
+ }
+
+ if (crtc_req->count_connectors == 0 && mode) {
+ DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
+ ret = EINVAL;
+ goto out;
+ }
+
+ if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
+ DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
+ crtc_req->count_connectors);
+ ret = EINVAL;
+ goto out;
+ }
+
+ if (crtc_req->count_connectors > 0) {
+ u32 out_id;
+
+ /* Avoid unbounded kernel memory allocation */
+ if (crtc_req->count_connectors > config->num_connector) {
+ ret = EINVAL;
+ goto out;
+ }
+
+ connector_set = malloc(crtc_req->count_connectors *
+ sizeof(struct drm_connector *), DRM_MEM_KMS, M_WAITOK);
+
+ for (i = 0; i < crtc_req->count_connectors; i++) {
+ set_connectors_ptr = (uint32_t *)(uintptr_t)crtc_req->set_connectors_ptr;
+ if (copyin(&set_connectors_ptr[i], &out_id, sizeof(uint32_t))) {
+ ret = EFAULT;
+ goto out;
+ }
+
+ obj = drm_mode_object_find(dev, out_id,
+ DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ DRM_DEBUG_KMS("Connector id %d unknown\n",
+ out_id);
+ ret = EINVAL;
+ goto out;
+ }
+ connector = obj_to_connector(obj);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ drm_get_connector_name(connector));
+
+ connector_set[i] = connector;
+ }
+ }
+
+ set.crtc = crtc;
+ set.x = crtc_req->x;
+ set.y = crtc_req->y;
+ set.mode = mode;
+ set.connectors = connector_set;
+ set.num_connectors = crtc_req->count_connectors;
+ set.fb = fb;
+ ret = crtc->funcs->set_config(&set);
+
+out:
+ free(connector_set, DRM_MEM_KMS);
+ drm_mode_destroy(dev, mode);
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+int drm_mode_cursor_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_cursor *req = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return (EINVAL);
+
+ if (!req->flags)
+ return (EINVAL);
+
+ sx_xlock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
+ ret = EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ if (req->flags & DRM_MODE_CURSOR_BO) {
+ if (!crtc->funcs->cursor_set) {
+ ret = ENXIO;
+ goto out;
+ }
+ /* Turns off the cursor if handle is 0 */
+ ret = -crtc->funcs->cursor_set(crtc, file_priv, req->handle,
+ req->width, req->height);
+ }
+
+ if (req->flags & DRM_MODE_CURSOR_MOVE) {
+ if (crtc->funcs->cursor_move) {
+ ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
+ } else {
+ ret = EFAULT;
+ goto out;
+ }
+ }
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/* Original addfb only supported RGB formats, so figure out which one */
+uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
+{
+ uint32_t fmt;
+
+ switch (bpp) {
+ case 8:
+ fmt = DRM_FORMAT_RGB332;
+ break;
+ case 16:
+ if (depth == 15)
+ fmt = DRM_FORMAT_XRGB1555;
+ else
+ fmt = DRM_FORMAT_RGB565;
+ break;
+ case 24:
+ fmt = DRM_FORMAT_RGB888;
+ break;
+ case 32:
+ if (depth == 24)
+ fmt = DRM_FORMAT_XRGB8888;
+ else if (depth == 30)
+ fmt = DRM_FORMAT_XRGB2101010;
+ else
+ fmt = DRM_FORMAT_ARGB8888;
+ break;
+ default:
+ DRM_ERROR("bad bpp, assuming RGB24 pixel format\n");
+ fmt = DRM_FORMAT_XRGB8888;
+ break;
+ }
+
+ return fmt;
+}
+
+/**
+ * drm_mode_addfb - add an FB to the graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Add a new FB to the specified CRTC, given a user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_fb_cmd *or = data;
+ struct drm_mode_fb_cmd2 r = {};
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_framebuffer *fb;
+ int ret = 0;
+
+ /* Use new struct with format internally */
+ r.fb_id = or->fb_id;
+ r.width = or->width;
+ r.height = or->height;
+ r.pitches[0] = or->pitch;
+ r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
+ r.handles[0] = or->handle;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return (EINVAL);
+
+ if ((config->min_width > r.width) || (r.width > config->max_width))
+ return (EINVAL);
+ if ((config->min_height > r.height) || (r.height > config->max_height))
+ return (EINVAL);
+
+ sx_xlock(&dev->mode_config.mutex);
+
+ ret = -dev->mode_config.funcs->fb_create(dev, file_priv, &r, &fb);
+ if (ret != 0) {
+ DRM_ERROR("could not create framebuffer, error %d\n", ret);
+ goto out;
+ }
+
+ or->fb_id = fb->base.id;
+ list_add(&fb->filp_head, &file_priv->fbs);
+ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+static int format_check(struct drm_mode_fb_cmd2 *r)
+{
+ uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_XBGR4444:
+ case DRM_FORMAT_RGBX4444:
+ case DRM_FORMAT_BGRX4444:
+ case DRM_FORMAT_ARGB4444:
+ case DRM_FORMAT_ABGR4444:
+ case DRM_FORMAT_RGBA4444:
+ case DRM_FORMAT_BGRA4444:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_AYUV:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 0;
+ default:
+ return (EINVAL);
+ }
+}
+
+/**
+ * drm_mode_addfb2 - add an FB to the graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Add a new FB to the specified CRTC, given a user request with format.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addfb2(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_fb_cmd2 *r = data;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_framebuffer *fb;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return (EINVAL);
+
+ if ((config->min_width > r->width) || (r->width > config->max_width)) {
+ DRM_ERROR("bad framebuffer width %d, should be >= %d && <= %d\n",
+ r->width, config->min_width, config->max_width);
+ return (EINVAL);
+ }
+ if ((config->min_height > r->height) || (r->height > config->max_height)) {
+ DRM_ERROR("bad framebuffer height %d, should be >= %d && <= %d\n",
+ r->height, config->min_height, config->max_height);
+ return (EINVAL);
+ }
+
+ ret = format_check(r);
+ if (ret) {
+ DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format);
+ return ret;
+ }
+
+ sx_xlock(&dev->mode_config.mutex);
+
+ /* TODO check buffer is sufficiently large */
+ /* TODO setup destructor callback */
+
+ ret = -dev->mode_config.funcs->fb_create(dev, file_priv, r, &fb);
+ if (ret != 0) {
+ DRM_ERROR("could not create framebuffer, error %d\n", ret);
+ goto out;
+ }
+
+ r->fb_id = fb->base.id;
+ list_add(&fb->filp_head, &file_priv->fbs);
+ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+ return (ret);
+}
+
+/**
+ * drm_mode_rmfb - remove an FB from the configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Remove the FB specified by the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_rmfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_object *obj;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_framebuffer *fbl = NULL;
+ uint32_t *id = data;
+ int ret = 0;
+ int found = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return (EINVAL);
+
+ sx_xlock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
+ /* TODO check that we really get a framebuffer back. */
+ if (!obj) {
+ ret = EINVAL;
+ goto out;
+ }
+ fb = obj_to_fb(obj);
+
+ list_for_each_entry(fbl, &file_priv->fbs, filp_head)
+ if (fb == fbl)
+ found = 1;
+
+ if (!found) {
+ ret = EINVAL;
+ goto out;
+ }
+
+ /* TODO release all crtc connected to the framebuffer */
+ /* TODO unhock the destructor from the buffer object */
+
+ list_del(&fb->filp_head);
+ fb->funcs->destroy(fb);
+
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_getfb - get FB info
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Lookup the FB given its ID and return info about it.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_fb_cmd *r = data;
+ struct drm_mode_object *obj;
+ struct drm_framebuffer *fb;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return (EINVAL);
+
+ sx_xlock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj) {
+ ret = EINVAL;
+ goto out;
+ }
+ fb = obj_to_fb(obj);
+
+ r->height = fb->height;
+ r->width = fb->width;
+ r->depth = fb->depth;
+ r->bpp = fb->bits_per_pixel;
+ r->pitch = fb->pitches[0];
+ fb->funcs->create_handle(fb, file_priv, &r->handle);
+
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_clip_rect __user *clips_ptr;
+ struct drm_clip_rect *clips = NULL;
+ struct drm_mode_fb_dirty_cmd *r = data;
+ struct drm_mode_object *obj;
+ struct drm_framebuffer *fb;
+ unsigned flags;
+ int num_clips;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return (EINVAL);
+
+ sx_xlock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj) {
+ ret = EINVAL;
+ goto out_err1;
+ }
+ fb = obj_to_fb(obj);
+
+ num_clips = r->num_clips;
+ clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
+
+ if (!num_clips != !clips_ptr) {
+ ret = EINVAL;
+ goto out_err1;
+ }
+
+ flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
+
+ /* If userspace annotates copy, clips must come in pairs */
+ if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
+ ret = EINVAL;
+ goto out_err1;
+ }
+
+ if (num_clips && clips_ptr) {
+ if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
+ ret = EINVAL;
+ goto out_err1;
+ }
+ clips = malloc(num_clips * sizeof(*clips), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+
+ ret = copyin(clips_ptr, clips, num_clips * sizeof(*clips));
+ if (ret)
+ goto out_err2;
+ }
+
+ if (fb->funcs->dirty) {
+ ret = -fb->funcs->dirty(fb, file_priv, flags, r->color,
+ clips, num_clips);
+ } else {
+ ret = ENOSYS;
+ goto out_err2;
+ }
+
+out_err2:
+ free(clips, DRM_MEM_KMS);
+out_err1:
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+
+/**
+ * drm_fb_release - remove and free the FBs on this file
+ * @filp: file * from the ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Destroy all the FBs associated with @filp.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+void drm_fb_release(struct drm_file *priv)
+{
+#if 1
+ struct drm_device *dev = priv->dev;
+#else
+ struct drm_device *dev = priv->minor->dev;
+#endif
+ struct drm_framebuffer *fb, *tfb;
+
+ sx_xlock(&dev->mode_config.mutex);
+ list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
+ list_del(&fb->filp_head);
+ fb->funcs->destroy(fb);
+ }
+ sx_xunlock(&dev->mode_config.mutex);
+}
+
+/**
+ * drm_mode_attachmode - add a mode to the user mode list
+ * @dev: DRM device
+ * @connector: connector to add the mode to
+ * @mode: mode to add
+ *
+ * Add @mode to @connector's user mode list.
+ */
+static void drm_mode_attachmode(struct drm_device *dev,
+ struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ list_add_tail(&mode->head, &connector->user_modes);
+}
+
+int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct drm_connector *connector;
+ int ret = 0;
+ struct drm_display_mode *dup_mode, *next;
+ DRM_LIST_HEAD(list);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder)
+ continue;
+ if (connector->encoder->crtc == crtc) {
+ dup_mode = drm_mode_duplicate(dev, mode);
+ if (!dup_mode) {
+ ret = ENOMEM;
+ goto out;
+ }
+ list_add_tail(&dup_mode->head, &list);
+ }
+ }
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder)
+ continue;
+ if (connector->encoder->crtc == crtc)
+ list_move_tail(list.next, &connector->user_modes);
+ }
+
+ MPASS(!list_empty(&list));
+
+ out:
+ list_for_each_entry_safe(dup_mode, next, &list, head)
+ drm_mode_destroy(dev, dup_mode);
+
+ return ret;
+}
+
+static int drm_mode_detachmode(struct drm_device *dev,
+ struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int found = 0;
+ int ret = 0;
+ struct drm_display_mode *match_mode, *t;
+
+ list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) {
+ if (drm_mode_equal(match_mode, mode)) {
+ list_del(&match_mode->head);
+ drm_mode_destroy(dev, match_mode);
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
+{
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_mode_detachmode(dev, connector, mode);
+ }
+ return 0;
+}
+
+/**
+ * drm_fb_attachmode - Attach a user mode to an connector
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * This attaches a user specified mode to an connector.
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_attachmode_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_mode_cmd *mode_cmd = data;
+ struct drm_connector *connector;
+ struct drm_display_mode *mode;
+ struct drm_mode_object *obj;
+ struct drm_mode_modeinfo *umode = &mode_cmd->mode;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ sx_xlock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ connector = obj_to_connector(obj);
+
+ mode = drm_mode_create(dev);
+ if (!mode) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = drm_crtc_convert_umode(mode, umode);
+ if (ret) {
+ DRM_DEBUG_KMS("Invalid mode\n");
+ drm_mode_destroy(dev, mode);
+ goto out;
+ }
+
+ drm_mode_attachmode(dev, connector, mode);
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+
+/**
+ * drm_fb_detachmode - Detach a user specified mode from an connector
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_detachmode_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_object *obj;
+ struct drm_mode_mode_cmd *mode_cmd = data;
+ struct drm_connector *connector;
+ struct drm_display_mode mode;
+ struct drm_mode_modeinfo *umode = &mode_cmd->mode;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ sx_xlock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ connector = obj_to_connector(obj);
+
+ ret = drm_crtc_convert_umode(&mode, umode);
+ if (ret) {
+ DRM_DEBUG_KMS("Invalid mode\n");
+ goto out;
+ }
+
+ ret = drm_mode_detachmode(dev, connector, &mode);
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+ const char *name, int num_values)
+{
+ struct drm_property *property = NULL;
+ int ret;
+
+ property = malloc(sizeof(struct drm_property), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+
+ if (num_values) {
+ property->values = malloc(sizeof(uint64_t)*num_values, DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+ }
+
+ ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
+ if (ret)
+ goto fail;
+ property->flags = flags;
+ property->num_values = num_values;
+ INIT_LIST_HEAD(&property->enum_blob_list);
+
+ if (name) {
+ strncpy(property->name, name, DRM_PROP_NAME_LEN);
+ property->name[DRM_PROP_NAME_LEN-1] = '\0';
+ }
+
+ list_add_tail(&property->head, &dev->mode_config.property_list);
+ return property;
+
+fail:
+ free(property->values, DRM_MEM_KMS);
+ free(property, DRM_MEM_KMS);
+ return (NULL);
+}
+
+struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+ const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values)
+{
+ struct drm_property *property;
+ int i, ret;
+
+ flags |= DRM_MODE_PROP_ENUM;
+
+ property = drm_property_create(dev, flags, name, num_values);
+ if (!property)
+ return NULL;
+
+ for (i = 0; i < num_values; i++) {
+ ret = drm_property_add_enum(property, i,
+ props[i].type,
+ props[i].name);
+ if (ret) {
+ drm_property_destroy(dev, property);
+ return NULL;
+ }
+ }
+
+ return property;
+}
+
+struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+ const char *name,
+ uint64_t min, uint64_t max)
+{
+ struct drm_property *property;
+
+ flags |= DRM_MODE_PROP_RANGE;
+
+ property = drm_property_create(dev, flags, name, 2);
+ if (!property)
+ return NULL;
+
+ property->values[0] = min;
+ property->values[1] = max;
+
+ return property;
+}
+
+int drm_property_add_enum(struct drm_property *property, int index,
+ uint64_t value, const char *name)
+{
+ struct drm_property_enum *prop_enum;
+
+ if (!(property->flags & DRM_MODE_PROP_ENUM))
+ return -EINVAL;
+
+ if (!list_empty(&property->enum_blob_list)) {
+ list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+ if (prop_enum->value == value) {
+ strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+ return 0;
+ }
+ }
+ }
+
+ prop_enum = malloc(sizeof(struct drm_property_enum), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+
+ strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+ prop_enum->value = value;
+
+ property->values[index] = value;
+ list_add_tail(&prop_enum->head, &property->enum_blob_list);
+ return 0;
+}
+
+void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
+{
+ struct drm_property_enum *prop_enum, *pt;
+
+ list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) {
+ list_del(&prop_enum->head);
+ free(prop_enum, DRM_MEM_KMS);
+ }
+
+ if (property->num_values)
+ free(property->values, DRM_MEM_KMS);
+ drm_mode_object_put(dev, &property->base);
+ list_del(&property->head);
+ free(property, DRM_MEM_KMS);
+}
+
+int drm_connector_attach_property(struct drm_connector *connector,
+ struct drm_property *property, uint64_t init_val)
+{
+ int i;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+ if (connector->property_ids[i] == 0) {
+ connector->property_ids[i] = property->base.id;
+ connector->property_values[i] = init_val;
+ break;
+ }
+ }
+
+ if (i == DRM_CONNECTOR_MAX_PROPERTY)
+ return -EINVAL;
+ return 0;
+}
+
+int drm_connector_property_set_value(struct drm_connector *connector,
+ struct drm_property *property, uint64_t value)
+{
+ int i;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+ if (connector->property_ids[i] == property->base.id) {
+ connector->property_values[i] = value;
+ break;
+ }
+ }
+
+ if (i == DRM_CONNECTOR_MAX_PROPERTY)
+ return -EINVAL;
+ return 0;
+}
+
+int drm_connector_property_get_value(struct drm_connector *connector,
+ struct drm_property *property, uint64_t *val)
+{
+ int i;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+ if (connector->property_ids[i] == property->base.id) {
+ *val = connector->property_values[i];
+ break;
+ }
+ }
+
+ if (i == DRM_CONNECTOR_MAX_PROPERTY)
+ return -EINVAL;
+ return 0;
+}
+
+int drm_mode_getproperty_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_object *obj;
+ struct drm_mode_get_property *out_resp = data;
+ struct drm_property *property;
+ int enum_count = 0;
+ int blob_count = 0;
+ int value_count = 0;
+ int ret = 0, i;
+ int copied;
+ struct drm_property_enum *prop_enum;
+ struct drm_mode_property_enum __user *enum_ptr;
+ struct drm_property_blob *prop_blob;
+ uint32_t *blob_id_ptr;
+ uint64_t *values_ptr;
+ uint32_t *blob_length_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ sx_xlock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
+ if (!obj) {
+ ret = -EINVAL;
+ goto done;
+ }
+ property = obj_to_property(obj);
+
+ if (property->flags & DRM_MODE_PROP_ENUM) {
+ list_for_each_entry(prop_enum, &property->enum_blob_list, head)
+ enum_count++;
+ } else if (property->flags & DRM_MODE_PROP_BLOB) {
+ list_for_each_entry(prop_blob, &property->enum_blob_list, head)
+ blob_count++;
+ }
+
+ value_count = property->num_values;
+
+ strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
+ out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
+ out_resp->flags = property->flags;
+
+ if ((out_resp->count_values >= value_count) && value_count) {
+ values_ptr = (uint64_t *)(uintptr_t)out_resp->values_ptr;
+ for (i = 0; i < value_count; i++) {
+ if (copyout(&property->values[i], values_ptr + i, sizeof(uint64_t))) {
+ ret = -EFAULT;
+ goto done;
+ }
+ }
+ }
+ out_resp->count_values = value_count;
+
+ if (property->flags & DRM_MODE_PROP_ENUM) {
+ if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
+ copied = 0;
+ enum_ptr = (struct drm_mode_property_enum *)(uintptr_t)out_resp->enum_blob_ptr;
+ list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+
+ if (copyout(&prop_enum->value, &enum_ptr[copied].value, sizeof(uint64_t))) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ if (copyout(&prop_enum->name,
+ &enum_ptr[copied].name,DRM_PROP_NAME_LEN)) {
+ ret = -EFAULT;
+ goto done;
+ }
+ copied++;
+ }
+ }
+ out_resp->count_enum_blobs = enum_count;
+ }
+
+ if (property->flags & DRM_MODE_PROP_BLOB) {
+ if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
+ copied = 0;
+ blob_id_ptr = (uint32_t *)(uintptr_t)out_resp->enum_blob_ptr;
+ blob_length_ptr = (uint32_t *)(uintptr_t)out_resp->values_ptr;
+
+ list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
+ if (copyout(&prop_blob->base.id,
+ blob_id_ptr + copied, sizeof(uint32_t))) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ if (copyout(&prop_blob->length,
+ blob_length_ptr + copied, sizeof(uint32_t))) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ copied++;
+ }
+ }
+ out_resp->count_enum_blobs = blob_count;
+ }
+done:
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length,
+ void *data)
+{
+ struct drm_property_blob *blob;
+ int ret;
+
+ if (!length || !data)
+ return NULL;
+
+ blob = malloc(sizeof(struct drm_property_blob) + length, DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+
+ ret = drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
+ if (ret) {
+ free(blob, DRM_MEM_KMS);
+ return (NULL);
+ }
+
+ blob->length = length;
+
+ memcpy(blob->data, data, length);
+
+ list_add_tail(&blob->head, &dev->mode_config.property_blob_list);
+ return blob;
+}
+
+static void drm_property_destroy_blob(struct drm_device *dev,
+ struct drm_property_blob *blob)
+{
+ drm_mode_object_put(dev, &blob->base);
+ list_del(&blob->head);
+ free(blob, DRM_MEM_KMS);
+}
+
+int drm_mode_getblob_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_object *obj;
+ struct drm_mode_get_blob *out_resp = data;
+ struct drm_property_blob *blob;
+ int ret = 0;
+ void *blob_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ sx_xlock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
+ if (!obj) {
+ ret = -EINVAL;
+ goto done;
+ }
+ blob = obj_to_blob(obj);
+
+ if (out_resp->length == blob->length) {
+ blob_ptr = (void *)(unsigned long)out_resp->data;
+ if (copyout(blob->data, blob_ptr, blob->length)){
+ ret = -EFAULT;
+ goto done;
+ }
+ }
+ out_resp->length = blob->length;
+
+done:
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+ struct edid *edid)
+{
+ struct drm_device *dev = connector->dev;
+ int ret = 0, size;
+
+ if (connector->edid_blob_ptr)
+ drm_property_destroy_blob(dev, connector->edid_blob_ptr);
+
+ /* Delete edid, when there is none. */
+ if (!edid) {
+ connector->edid_blob_ptr = NULL;
+ ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
+ return ret;
+ }
+
+ size = EDID_LENGTH * (1 + edid->extensions);
+ connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
+ size, edid);
+
+ ret = drm_connector_property_set_value(connector,
+ dev->mode_config.edid_property,
+ connector->edid_blob_ptr->base.id);
+
+ return ret;
+}
+
+int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_connector_set_property *out_resp = data;
+ struct drm_mode_object *obj;
+ struct drm_property *property;
+ struct drm_connector *connector;
+ int ret = -EINVAL;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ sx_xlock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ goto out;
+ }
+ connector = obj_to_connector(obj);
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+ if (connector->property_ids[i] == out_resp->prop_id)
+ break;
+ }
+
+ if (i == DRM_CONNECTOR_MAX_PROPERTY) {
+ goto out;
+ }
+
+ obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
+ if (!obj) {
+ goto out;
+ }
+ property = obj_to_property(obj);
+
+ if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+ goto out;
+
+ if (property->flags & DRM_MODE_PROP_RANGE) {
+ if (out_resp->value < property->values[0])
+ goto out;
+
+ if (out_resp->value > property->values[1])
+ goto out;
+ } else {
+ int found = 0;
+ for (i = 0; i < property->num_values; i++) {
+ if (property->values[i] == out_resp->value) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ goto out;
+ }
+ }
+
+ /* Do DPMS ourselves */
+ if (property == connector->dev->mode_config.dpms_property) {
+ if (connector->funcs->dpms)
+ (*connector->funcs->dpms)(connector, (int) out_resp->value);
+ ret = 0;
+ } else if (connector->funcs->set_property)
+ ret = connector->funcs->set_property(connector, property, out_resp->value);
+
+ /* store the property value if successful */
+ if (!ret)
+ drm_connector_property_set_value(connector, property, out_resp->value);
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder)
+{
+ int i;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0) {
+ connector->encoder_ids[i] = encoder->base.id;
+ return 0;
+ }
+ }
+ return -ENOMEM;
+}
+
+void drm_mode_connector_detach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder)
+{
+ int i;
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == encoder->base.id) {
+ connector->encoder_ids[i] = 0;
+ if (connector->encoder == encoder)
+ connector->encoder = NULL;
+ break;
+ }
+ }
+}
+
+int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+ int gamma_size)
+{
+ crtc->gamma_size = gamma_size;
+
+ crtc->gamma_store = malloc(gamma_size * sizeof(uint16_t) * 3,
+ DRM_MEM_KMS, M_WAITOK | M_ZERO);
+
+ return 0;
+}
+
+int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_crtc_lut *crtc_lut = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ void *r_base, *g_base, *b_base;
+ int size;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ sx_xlock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ /* memcpy into gamma store */
+ if (crtc_lut->gamma_size != crtc->gamma_size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ size = crtc_lut->gamma_size * (sizeof(uint16_t));
+ r_base = crtc->gamma_store;
+ if (copyin((void *)(uintptr_t)crtc_lut->red, r_base, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ g_base = (char *)r_base + size;
+ if (copyin((void *)(uintptr_t)crtc_lut->green, g_base, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ b_base = (char *)g_base + size;
+ if (copyin((void *)(uintptr_t)crtc_lut->blue, b_base, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+
+}
+
+int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_crtc_lut *crtc_lut = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ void *r_base, *g_base, *b_base;
+ int size;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ sx_xlock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ /* memcpy into gamma store */
+ if (crtc_lut->gamma_size != crtc->gamma_size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ size = crtc_lut->gamma_size * (sizeof(uint16_t));
+ r_base = crtc->gamma_store;
+ if (copyout(r_base, (void *)(uintptr_t)crtc_lut->red, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ g_base = (char *)r_base + size;
+ if (copyout(g_base, (void *)(uintptr_t)crtc_lut->green, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ b_base = (char *)g_base + size;
+ if (copyout(b_base, (void *)(uintptr_t)crtc_lut->blue, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+static void
+drm_kms_free(void *arg)
+{
+
+ free(arg, DRM_MEM_KMS);
+}
+
+int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_crtc_page_flip *page_flip = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ struct drm_pending_vblank_event *e = NULL;
+ int ret = EINVAL;
+
+ if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
+ page_flip->reserved != 0)
+ return (EINVAL);
+
+ sx_xlock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj)
+ goto out;
+ crtc = obj_to_crtc(obj);
+
+ if (crtc->fb == NULL) {
+ /* The framebuffer is currently unbound, presumably
+ * due to a hotplug event, that userspace has not
+ * yet discovered.
+ */
+ ret = EBUSY;
+ goto out;
+ }
+
+ if (crtc->funcs->page_flip == NULL)
+ goto out;
+
+ obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj)
+ goto out;
+ fb = obj_to_fb(obj);
+
+ if (crtc->mode.hdisplay > fb->width ||
+ crtc->mode.vdisplay > fb->height ||
+ crtc->x > fb->width - crtc->mode.hdisplay ||
+ crtc->y > fb->height - crtc->mode.vdisplay) {
+ DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d.\n",
+ fb->width, fb->height,
+ crtc->mode.hdisplay, crtc->mode.vdisplay,
+ crtc->x, crtc->y);
+ ret = ENOSPC;
+ goto out;
+ }
+
+ if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ ret = ENOMEM;
+ mtx_lock(&dev->event_lock);
+ if (file_priv->event_space < sizeof e->event) {
+ mtx_unlock(&dev->event_lock);
+ goto out;
+ }
+ file_priv->event_space -= sizeof e->event;
+ mtx_unlock(&dev->event_lock);
+
+ e = malloc(sizeof *e, DRM_MEM_KMS, M_WAITOK | M_ZERO);
+
+ e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+ e->event.base.length = sizeof e->event;
+ e->event.user_data = page_flip->user_data;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file_priv;
+ e->base.destroy =
+ (void (*) (struct drm_pending_event *))drm_kms_free;
+ }
+
+ ret = -crtc->funcs->page_flip(crtc, fb, e);
+ if (ret != 0) {
+ if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ mtx_lock(&dev->event_lock);
+ file_priv->event_space += sizeof e->event;
+ mtx_unlock(&dev->event_lock);
+ free(e, DRM_MEM_KMS);
+ }
+ }
+
+out:
+ sx_xunlock(&dev->mode_config.mutex);
+ CTR3(KTR_DRM, "page_flip_ioctl %d %d %d", curproc->p_pid,
+ page_flip->crtc_id, ret);
+ return (ret);
+}
+
+void drm_mode_config_reset(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ if (crtc->funcs->reset)
+ crtc->funcs->reset(crtc);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->funcs->reset)
+ encoder->funcs->reset(encoder);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->funcs->reset)
+ connector->funcs->reset(connector);
+}
+
+int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_create_dumb *args = data;
+
+ if (!dev->driver->dumb_create)
+ return -ENOTSUP;
+ return dev->driver->dumb_create(file_priv, dev, args);
+}
+
+int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_map_dumb *args = data;
+
+ /* call driver ioctl to get mmap offset */
+ if (!dev->driver->dumb_map_offset)
+ return -ENOTSUP;
+
+ return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
+}
+
+int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_destroy_dumb *args = data;
+
+ if (!dev->driver->dumb_destroy)
+ return -ENOTSUP;
+
+ return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+}
+
+/*
+ * Just need to support RGB formats here for compat with code that doesn't
+ * use pixel formats directly yet.
+ */
+void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+ int *bpp)
+{
+ switch (format) {
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ *depth = 8;
+ *bpp = 8;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ *depth = 15;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ *depth = 16;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ *depth = 24;
+ *bpp = 24;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ *depth = 24;
+ *bpp = 32;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ *depth = 30;
+ *bpp = 32;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ *depth = 32;
+ *bpp = 32;
+ break;
+ default:
+ DRM_DEBUG_KMS("unsupported pixel format\n");
+ *depth = 0;
+ *bpp = 0;
+ break;
+ }
+}
diff --git a/sys/dev/drm2/drm_crtc.h b/sys/dev/drm2/drm_crtc.h
new file mode 100644
index 0000000..a69c537
--- /dev/null
+++ b/sys/dev/drm2/drm_crtc.h
@@ -0,0 +1,935 @@
+/*
+ * Copyright © 2006 Keith Packard
+ * Copyright © 2007-2008 Dave Airlie
+ * Copyright © 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+#ifndef __DRM_CRTC_H__
+#define __DRM_CRTC_H__
+
+#include <dev/drm2/drm_gem_names.h>
+#include <dev/drm2/drm_fourcc.h>
+
+struct drm_device;
+struct drm_mode_set;
+struct drm_framebuffer;
+struct i2c_adapter;
+
+#define DRM_MODE_OBJECT_CRTC 0xcccccccc
+#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
+#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
+#define DRM_MODE_OBJECT_MODE 0xdededede
+#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
+#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
+#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
+#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
+
+struct drm_mode_object {
+ uint32_t id;
+ uint32_t type;
+};
+
+/*
+ * Note on terminology: here, for brevity and convenience, we refer to connector
+ * control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS,
+ * DVI, etc. And 'screen' refers to the whole of the visible display, which
+ * may span multiple monitors (and therefore multiple CRTC and connector
+ * structures).
+ */
+
+enum drm_mode_status {
+ MODE_OK = 0, /* Mode OK */
+ MODE_HSYNC, /* hsync out of range */
+ MODE_VSYNC, /* vsync out of range */
+ MODE_H_ILLEGAL, /* mode has illegal horizontal timings */
+ MODE_V_ILLEGAL, /* mode has illegal horizontal timings */
+ MODE_BAD_WIDTH, /* requires an unsupported linepitch */
+ MODE_NOMODE, /* no mode with a maching name */
+ MODE_NO_INTERLACE, /* interlaced mode not supported */
+ MODE_NO_DBLESCAN, /* doublescan mode not supported */
+ MODE_NO_VSCAN, /* multiscan mode not supported */
+ MODE_MEM, /* insufficient video memory */
+ MODE_VIRTUAL_X, /* mode width too large for specified virtual size */
+ MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */
+ MODE_MEM_VIRT, /* insufficient video memory given virtual size */
+ MODE_NOCLOCK, /* no fixed clock available */
+ MODE_CLOCK_HIGH, /* clock required is too high */
+ MODE_CLOCK_LOW, /* clock required is too low */
+ MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */
+ MODE_BAD_HVALUE, /* horizontal timing was out of range */
+ MODE_BAD_VVALUE, /* vertical timing was out of range */
+ MODE_BAD_VSCAN, /* VScan value out of range */
+ MODE_HSYNC_NARROW, /* horizontal sync too narrow */
+ MODE_HSYNC_WIDE, /* horizontal sync too wide */
+ MODE_HBLANK_NARROW, /* horizontal blanking too narrow */
+ MODE_HBLANK_WIDE, /* horizontal blanking too wide */
+ MODE_VSYNC_NARROW, /* vertical sync too narrow */
+ MODE_VSYNC_WIDE, /* vertical sync too wide */
+ MODE_VBLANK_NARROW, /* vertical blanking too narrow */
+ MODE_VBLANK_WIDE, /* vertical blanking too wide */
+ MODE_PANEL, /* exceeds panel dimensions */
+ MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */
+ MODE_ONE_WIDTH, /* only one width is supported */
+ MODE_ONE_HEIGHT, /* only one height is supported */
+ MODE_ONE_SIZE, /* only one resolution is supported */
+ MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */
+ MODE_UNVERIFIED = -3, /* mode needs to reverified */
+ MODE_BAD = -2, /* unspecified reason */
+ MODE_ERROR = -1 /* error condition */
+};
+
+#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
+ DRM_MODE_TYPE_CRTC_C)
+
+#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
+ .name = nm, .status = 0, .type = (t), .clock = (c), \
+ .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
+ .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
+ .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
+ .vscan = (vs), .flags = (f), .vrefresh = 0
+
+#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
+
+struct drm_display_mode {
+ /* Header */
+ struct list_head head;
+ struct drm_mode_object base;
+
+ char name[DRM_DISPLAY_MODE_LEN];
+
+ int connector_count;
+ enum drm_mode_status status;
+ int type;
+
+ /* Proposed mode values */
+ int clock; /* in kHz */
+ int hdisplay;
+ int hsync_start;
+ int hsync_end;
+ int htotal;
+ int hskew;
+ int vdisplay;
+ int vsync_start;
+ int vsync_end;
+ int vtotal;
+ int vscan;
+ unsigned int flags;
+
+ /* Addressable image size (may be 0 for projectors, etc.) */
+ int width_mm;
+ int height_mm;
+
+ /* Actual mode we give to hw */
+ int clock_index;
+ int synth_clock;
+ int crtc_hdisplay;
+ int crtc_hblank_start;
+ int crtc_hblank_end;
+ int crtc_hsync_start;
+ int crtc_hsync_end;
+ int crtc_htotal;
+ int crtc_hskew;
+ int crtc_vdisplay;
+ int crtc_vblank_start;
+ int crtc_vblank_end;
+ int crtc_vsync_start;
+ int crtc_vsync_end;
+ int crtc_vtotal;
+ int crtc_hadjusted;
+ int crtc_vadjusted;
+
+ /* Driver private mode info */
+ int private_size;
+ int *private;
+ int private_flags;
+
+ int vrefresh; /* in Hz */
+ int hsync; /* in kHz */
+};
+
+enum drm_connector_status {
+ connector_status_connected = 1,
+ connector_status_disconnected = 2,
+ connector_status_unknown = 3,
+};
+
+enum subpixel_order {
+ SubPixelUnknown = 0,
+ SubPixelHorizontalRGB,
+ SubPixelHorizontalBGR,
+ SubPixelVerticalRGB,
+ SubPixelVerticalBGR,
+ SubPixelNone,
+};
+
+#define DRM_COLOR_FORMAT_RGB444 (1<<0)
+#define DRM_COLOR_FORMAT_YCRCB444 (1<<1)
+#define DRM_COLOR_FORMAT_YCRCB422 (1<<2)
+/*
+ * Describes a given display (e.g. CRT or flat panel) and its limitations.
+ */
+struct drm_display_info {
+ char name[DRM_DISPLAY_INFO_LEN];
+
+ /* Physical size */
+ unsigned int width_mm;
+ unsigned int height_mm;
+
+ /* Clock limits FIXME: storage format */
+ unsigned int min_vfreq, max_vfreq;
+ unsigned int min_hfreq, max_hfreq;
+ unsigned int pixel_clock;
+ unsigned int bpc;
+
+ enum subpixel_order subpixel_order;
+ u32 color_formats;
+
+ u8 cea_rev;
+
+ char *raw_edid; /* if any */
+};
+
+struct drm_framebuffer_funcs {
+ void (*destroy)(struct drm_framebuffer *framebuffer);
+ int (*create_handle)(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle);
+ /**
+ * Optinal callback for the dirty fb ioctl.
+ *
+ * Userspace can notify the driver via this callback
+ * that a area of the framebuffer has changed and should
+ * be flushed to the display hardware.
+ *
+ * See documentation in drm_mode.h for the struct
+ * drm_mode_fb_dirty_cmd for more information as all
+ * the semantics and arguments have a one to one mapping
+ * on this function.
+ */
+ int (*dirty)(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv, unsigned flags,
+ unsigned color, struct drm_clip_rect *clips,
+ unsigned num_clips);
+};
+
+struct drm_framebuffer {
+ struct drm_device *dev;
+ struct list_head head;
+ struct drm_mode_object base;
+ const struct drm_framebuffer_funcs *funcs;
+ unsigned int pitches[4];
+ unsigned int offsets[4];
+ unsigned int width;
+ unsigned int height;
+ /* depth can be 15 or 16 */
+ unsigned int depth;
+ int bits_per_pixel;
+ int flags;
+ uint32_t pixel_format; /* fourcc format */
+ struct list_head filp_head;
+ /* if you are using the helper */
+ void *helper_private;
+};
+
+struct drm_property_blob {
+ struct drm_mode_object base;
+ struct list_head head;
+ unsigned int length;
+ unsigned char data[];
+};
+
+struct drm_property_enum {
+ uint64_t value;
+ struct list_head head;
+ char name[DRM_PROP_NAME_LEN];
+};
+
+struct drm_property {
+ struct list_head head;
+ struct drm_mode_object base;
+ uint32_t flags;
+ char name[DRM_PROP_NAME_LEN];
+ uint32_t num_values;
+ uint64_t *values;
+
+ struct list_head enum_blob_list;
+};
+
+struct drm_crtc;
+struct drm_connector;
+struct drm_encoder;
+struct drm_pending_vblank_event;
+struct drm_plane;
+
+/**
+ * drm_crtc_funcs - control CRTCs for a given device
+ * @reset: reset CRTC after state has been invalidate (e.g. resume)
+ * @dpms: control display power levels
+ * @save: save CRTC state
+ * @resore: restore CRTC state
+ * @lock: lock the CRTC
+ * @unlock: unlock the CRTC
+ * @shadow_allocate: allocate shadow pixmap
+ * @shadow_create: create shadow pixmap for rotation support
+ * @shadow_destroy: free shadow pixmap
+ * @mode_fixup: fixup proposed mode
+ * @mode_set: set the desired mode on the CRTC
+ * @gamma_set: specify color ramp for CRTC
+ * @destroy: deinit and free object.
+ *
+ * The drm_crtc_funcs structure is the central CRTC management structure
+ * in the DRM. Each CRTC controls one or more connectors (note that the name
+ * CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc.
+ * connectors, not just CRTs).
+ *
+ * Each driver is responsible for filling out this structure at startup time,
+ * in addition to providing other modesetting features, like i2c and DDC
+ * bus accessors.
+ */
+struct drm_crtc_funcs {
+ /* Save CRTC state */
+ void (*save)(struct drm_crtc *crtc); /* suspend? */
+ /* Restore CRTC state */
+ void (*restore)(struct drm_crtc *crtc); /* resume? */
+ /* Reset CRTC state */
+ void (*reset)(struct drm_crtc *crtc);
+
+ /* cursor controls */
+ int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height);
+ int (*cursor_move)(struct drm_crtc *crtc, int x, int y);
+
+ /* Set gamma on the CRTC */
+ void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ uint32_t start, uint32_t size);
+ /* Object destroy routine */
+ void (*destroy)(struct drm_crtc *crtc);
+
+ int (*set_config)(struct drm_mode_set *set);
+
+ /*
+ * Flip to the given framebuffer. This implements the page
+ * flip ioctl descibed in drm_mode.h, specifically, the
+ * implementation must return immediately and block all
+ * rendering to the current fb until the flip has completed.
+ * If userspace set the event flag in the ioctl, the event
+ * argument will point to an event to send back when the flip
+ * completes, otherwise it will be NULL.
+ */
+ int (*page_flip)(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event);
+};
+
+/**
+ * drm_crtc - central CRTC control structure
+ * @enabled: is this CRTC enabled?
+ * @x: x position on screen
+ * @y: y position on screen
+ * @funcs: CRTC control functions
+ *
+ * Each CRTC may have one or more connectors associated with it. This structure
+ * allows the CRTC to be controlled.
+ */
+struct drm_crtc {
+ struct drm_device *dev;
+ struct list_head head;
+
+ struct drm_mode_object base;
+
+ /* framebuffer the connector is currently bound to */
+ struct drm_framebuffer *fb;
+
+ bool enabled;
+
+ /* Requested mode from modesetting. */
+ struct drm_display_mode mode;
+
+ /* Programmed mode in hw, after adjustments for encoders,
+ * crtc, panel scaling etc. Needed for timestamping etc.
+ */
+ struct drm_display_mode hwmode;
+
+ int x, y;
+ const struct drm_crtc_funcs *funcs;
+
+ /* CRTC gamma size for reporting to userspace */
+ uint32_t gamma_size;
+ uint16_t *gamma_store;
+
+ /* Constants needed for precise vblank and swap timestamping. */
+ int64_t framedur_ns, linedur_ns, pixeldur_ns;
+
+ /* if you are using the helper */
+ void *helper_private;
+};
+
+
+/**
+ * drm_connector_funcs - control connectors on a given device
+ * @dpms: set power state (see drm_crtc_funcs above)
+ * @save: save connector state
+ * @restore: restore connector state
+ * @reset: reset connector after state has been invalidate (e.g. resume)
+ * @mode_valid: is this mode valid on the given connector?
+ * @mode_fixup: try to fixup proposed mode for this connector
+ * @mode_set: set this mode
+ * @detect: is this connector active?
+ * @get_modes: get mode list for this connector
+ * @set_property: property for this connector may need update
+ * @destroy: make object go away
+ * @force: notify the driver the connector is forced on
+ *
+ * Each CRTC may have one or more connectors attached to it. The functions
+ * below allow the core DRM code to control connectors, enumerate available modes,
+ * etc.
+ */
+struct drm_connector_funcs {
+ void (*dpms)(struct drm_connector *connector, int mode);
+ void (*save)(struct drm_connector *connector);
+ void (*restore)(struct drm_connector *connector);
+ void (*reset)(struct drm_connector *connector);
+
+ /* Check to see if anything is attached to the connector.
+ * @force is set to false whilst polling, true when checking the
+ * connector due to user request. @force can be used by the driver
+ * to avoid expensive, destructive operations during automated
+ * probing.
+ */
+ enum drm_connector_status (*detect)(struct drm_connector *connector,
+ bool force);
+ int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
+ int (*set_property)(struct drm_connector *connector, struct drm_property *property,
+ uint64_t val);
+ void (*destroy)(struct drm_connector *connector);
+ void (*force)(struct drm_connector *connector);
+};
+
+struct drm_encoder_funcs {
+ void (*reset)(struct drm_encoder *encoder);
+ void (*destroy)(struct drm_encoder *encoder);
+};
+
+#define DRM_CONNECTOR_MAX_UMODES 16
+#define DRM_CONNECTOR_MAX_PROPERTY 16
+#define DRM_CONNECTOR_LEN 32
+#define DRM_CONNECTOR_MAX_ENCODER 2
+
+/**
+ * drm_encoder - central DRM encoder structure
+ */
+struct drm_encoder {
+ struct drm_device *dev;
+ struct list_head head;
+
+ struct drm_mode_object base;
+ int encoder_type;
+ uint32_t possible_crtcs;
+ uint32_t possible_clones;
+
+ struct drm_crtc *crtc;
+ const struct drm_encoder_funcs *funcs;
+ void *helper_private;
+};
+
+enum drm_connector_force {
+ DRM_FORCE_UNSPECIFIED,
+ DRM_FORCE_OFF,
+ DRM_FORCE_ON, /* force on analog part normally */
+ DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
+};
+
+/* should we poll this connector for connects and disconnects */
+/* hot plug detectable */
+#define DRM_CONNECTOR_POLL_HPD (1 << 0)
+/* poll for connections */
+#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
+/* can cleanly poll for disconnections without flickering the screen */
+/* DACs should rarely do this without a lot of testing */
+#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
+
+#define MAX_ELD_BYTES 128
+
+/**
+ * drm_connector - central DRM connector control structure
+ * @crtc: CRTC this connector is currently connected to, NULL if none
+ * @interlace_allowed: can this connector handle interlaced modes?
+ * @doublescan_allowed: can this connector handle doublescan?
+ * @available_modes: modes available on this connector (from get_modes() + user)
+ * @initial_x: initial x position for this connector
+ * @initial_y: initial y position for this connector
+ * @status: connector connected?
+ * @funcs: connector control functions
+ *
+ * Each connector may be connected to one or more CRTCs, or may be clonable by
+ * another connector if they can share a CRTC. Each connector also has a specific
+ * position in the broader display (referred to as a 'screen' though it could
+ * span multiple monitors).
+ */
+struct drm_connector {
+ struct drm_device *dev;
+ /* struct device kdev; XXXKIB */
+ struct device_attribute *attr;
+ struct list_head head;
+
+ struct drm_mode_object base;
+
+ int connector_type;
+ int connector_type_id;
+ bool interlace_allowed;
+ bool doublescan_allowed;
+ struct list_head modes; /* list of modes on this connector */
+
+ int initial_x, initial_y;
+ enum drm_connector_status status;
+
+ /* these are modes added by probing with DDC or the BIOS */
+ struct list_head probed_modes;
+
+ struct drm_display_info display_info;
+ const struct drm_connector_funcs *funcs;
+
+ struct list_head user_modes;
+ struct drm_property_blob *edid_blob_ptr;
+ u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY];
+ uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY];
+
+ uint8_t polled; /* DRM_CONNECTOR_POLL_* */
+
+ /* requested DPMS state */
+ int dpms;
+
+ void *helper_private;
+
+ /* forced on connector */
+ enum drm_connector_force force;
+ uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
+ uint32_t force_encoder_id;
+ struct drm_encoder *encoder; /* currently active encoder */
+
+ /* EDID bits */
+ uint8_t eld[MAX_ELD_BYTES];
+ bool dvi_dual;
+ int max_tmds_clock; /* in MHz */
+ bool latency_present[2];
+ int video_latency[2]; /* [0]: progressive, [1]: interlaced */
+ int audio_latency[2];
+
+ int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
+};
+
+/**
+ * drm_plane_funcs - driver plane control functions
+ * @update_plane: update the plane configuration
+ * @disable_plane: shut down the plane
+ * @destroy: clean up plane resources
+ */
+struct drm_plane_funcs {
+ int (*update_plane)(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+ int (*disable_plane)(struct drm_plane *plane);
+ void (*destroy)(struct drm_plane *plane);
+};
+
+/**
+ * drm_plane - central DRM plane control structure
+ * @dev: DRM device this plane belongs to
+ * @head: for list management
+ * @base: base mode object
+ * @possible_crtcs: pipes this plane can be bound to
+ * @format_types: array of formats supported by this plane
+ * @format_count: number of formats supported
+ * @crtc: currently bound CRTC
+ * @fb: currently bound fb
+ * @gamma_size: size of gamma table
+ * @gamma_store: gamma correction table
+ * @enabled: enabled flag
+ * @funcs: helper functions
+ * @helper_private: storage for drver layer
+ */
+struct drm_plane {
+ struct drm_device *dev;
+ struct list_head head;
+
+ struct drm_mode_object base;
+
+ uint32_t possible_crtcs;
+ uint32_t *format_types;
+ uint32_t format_count;
+
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+
+ /* CRTC gamma size for reporting to userspace */
+ uint32_t gamma_size;
+ uint16_t *gamma_store;
+
+ bool enabled;
+
+ const struct drm_plane_funcs *funcs;
+ void *helper_private;
+};
+
+/**
+ * struct drm_mode_set
+ *
+ * Represents a single crtc the connectors that it drives with what mode
+ * and from which framebuffer it scans out from.
+ *
+ * This is used to set modes.
+ */
+struct drm_mode_set {
+ struct list_head head;
+
+ struct drm_framebuffer *fb;
+ struct drm_crtc *crtc;
+ struct drm_display_mode *mode;
+
+ uint32_t x;
+ uint32_t y;
+
+ struct drm_connector **connectors;
+ size_t num_connectors;
+};
+
+/**
+ * struct drm_mode_config_funcs - configure CRTCs for a given screen layout
+ */
+struct drm_mode_config_funcs {
+ int (*fb_create)(struct drm_device *dev,
+ struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_framebuffer **res);
+ void (*output_poll_changed)(struct drm_device *dev);
+};
+
+struct drm_mode_group {
+ uint32_t num_crtcs;
+ uint32_t num_encoders;
+ uint32_t num_connectors;
+
+ /* list of object IDs for this group */
+ uint32_t *id_list;
+};
+
+/**
+ * drm_mode_config - Mode configuration control structure
+ *
+ */
+struct drm_mode_config {
+ struct sx mutex; /* protects configuration (mode lists etc.) */
+ struct drm_gem_names crtc_names; /* use this idr for all IDs, fb, crtc, connector, modes */
+ /* this is limited to one for now */
+ int num_fb;
+ struct list_head fb_list;
+ int num_connector;
+ struct list_head connector_list;
+ int num_encoder;
+ struct list_head encoder_list;
+ int num_plane;
+ struct list_head plane_list;
+
+ int num_crtc;
+ struct list_head crtc_list;
+
+ struct list_head property_list;
+
+ int min_width, min_height;
+ int max_width, max_height;
+ struct drm_mode_config_funcs *funcs;
+ resource_size_t fb_base;
+
+ /* output poll support */
+ bool poll_enabled;
+ struct timeout_task output_poll_task;
+
+ /* pointers to standard properties */
+ struct list_head property_blob_list;
+ struct drm_property *edid_property;
+ struct drm_property *dpms_property;
+
+ /* DVI-I properties */
+ struct drm_property *dvi_i_subconnector_property;
+ struct drm_property *dvi_i_select_subconnector_property;
+
+ /* TV properties */
+ struct drm_property *tv_subconnector_property;
+ struct drm_property *tv_select_subconnector_property;
+ struct drm_property *tv_mode_property;
+ struct drm_property *tv_left_margin_property;
+ struct drm_property *tv_right_margin_property;
+ struct drm_property *tv_top_margin_property;
+ struct drm_property *tv_bottom_margin_property;
+ struct drm_property *tv_brightness_property;
+ struct drm_property *tv_contrast_property;
+ struct drm_property *tv_flicker_reduction_property;
+ struct drm_property *tv_overscan_property;
+ struct drm_property *tv_saturation_property;
+ struct drm_property *tv_hue_property;
+
+ /* Optional properties */
+ struct drm_property *scaling_mode_property;
+ struct drm_property *dithering_mode_property;
+ struct drm_property *dirty_info_property;
+
+ /* dumb ioctl parameters */
+ uint32_t preferred_depth, prefer_shadow;
+};
+
+#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
+#define obj_to_connector(x) container_of(x, struct drm_connector, base)
+#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
+#define obj_to_mode(x) container_of(x, struct drm_display_mode, base)
+#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
+#define obj_to_property(x) container_of(x, struct drm_property, base)
+#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
+#define obj_to_plane(x) container_of(x, struct drm_plane, base)
+
+struct drm_prop_enum_list {
+ int type;
+ char *name;
+};
+
+#if defined(MODE_SETTING_LOCKING_IS_NOT_BROKEN)
+#define DRM_MODE_CONFIG_ASSERT_LOCKED(dev) \
+ sx_assert(&dev->mode_config.mutex, SA_XLOCKED)
+#else
+#define DRM_MODE_CONFIG_ASSERT_LOCKED(dev)
+#endif
+
+extern char *drm_get_dirty_info_name(int val);
+extern char *drm_get_connector_status_name(enum drm_connector_status status);
+
+extern int drm_crtc_init(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ const struct drm_crtc_funcs *funcs);
+extern void drm_crtc_cleanup(struct drm_crtc *crtc);
+
+extern int drm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type);
+
+extern void drm_connector_cleanup(struct drm_connector *connector);
+
+extern int drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type);
+
+extern int drm_plane_init(struct drm_device *dev,
+ struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, uint32_t format_count,
+ bool priv);
+extern void drm_plane_cleanup(struct drm_plane *plane);
+
+extern void drm_encoder_cleanup(struct drm_encoder *encoder);
+
+extern char *drm_get_connector_name(struct drm_connector *connector);
+extern char *drm_get_dpms_name(int val);
+extern char *drm_get_dvi_i_subconnector_name(int val);
+extern char *drm_get_dvi_i_select_name(int val);
+extern char *drm_get_tv_subconnector_name(int val);
+extern char *drm_get_tv_select_name(int val);
+extern void drm_fb_release(struct drm_file *file_priv);
+extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
+extern struct edid *drm_get_edid(struct drm_connector *connector,
+ device_t adapter);
+extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
+extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
+extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
+extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+ const struct drm_display_mode *mode);
+extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode);
+extern void drm_mode_config_init(struct drm_device *dev);
+extern void drm_mode_config_reset(struct drm_device *dev);
+extern void drm_mode_config_cleanup(struct drm_device *dev);
+extern void drm_mode_set_name(struct drm_display_mode *mode);
+extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2);
+extern int drm_mode_width(struct drm_display_mode *mode);
+extern int drm_mode_height(struct drm_display_mode *mode);
+
+/* for us by fb module */
+extern int drm_mode_attachmode_crtc(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
+extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode);
+
+extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
+extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
+extern void drm_mode_list_concat(struct list_head *head,
+ struct list_head *new);
+extern void drm_mode_validate_size(struct drm_device *dev,
+ struct list_head *mode_list,
+ int maxX, int maxY, int maxPitch);
+extern void drm_mode_validate_clocks(struct drm_device *dev,
+ struct list_head *mode_list,
+ int *min, int *max, int n_ranges);
+extern void drm_mode_prune_invalid(struct drm_device *dev,
+ struct list_head *mode_list, bool verbose);
+extern void drm_mode_sort(struct list_head *mode_list);
+extern int drm_mode_hsync(const struct drm_display_mode *mode);
+extern int drm_mode_vrefresh(const struct drm_display_mode *mode);
+extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
+ int adjust_flags);
+extern void drm_mode_connector_list_update(struct drm_connector *connector);
+extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+ struct edid *edid);
+extern int drm_connector_property_set_value(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value);
+extern int drm_connector_property_get_value(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t *value);
+extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
+extern void drm_framebuffer_set_object(struct drm_device *dev,
+ unsigned long handle);
+extern int drm_framebuffer_init(struct drm_device *dev,
+ struct drm_framebuffer *fb,
+ const struct drm_framebuffer_funcs *funcs);
+extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
+extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
+extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
+extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
+extern bool drm_crtc_in_use(struct drm_crtc *crtc);
+
+extern int drm_connector_attach_property(struct drm_connector *connector,
+ struct drm_property *property, uint64_t init_val);
+extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+ const char *name, int num_values);
+extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+ const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values);
+struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+ const char *name,
+ uint64_t min, uint64_t max);
+extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
+extern int drm_property_add_enum(struct drm_property *property, int index,
+ uint64_t value, const char *name);
+extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
+extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
+ char *formats[]);
+extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
+extern int drm_mode_create_dithering_property(struct drm_device *dev);
+extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
+extern char *drm_get_encoder_name(struct drm_encoder *encoder);
+
+extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder);
+extern void drm_mode_connector_detach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder);
+extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+ int gamma_size);
+extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type);
+/* IOCTLs */
+extern int drm_mode_getresources(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_getplane_res(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_mode_getcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_getconnector(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_setcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_getplane(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_setplane(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_cursor_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_addfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_addfb2(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
+extern int drm_mode_rmfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_getfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_addmode_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_rmmode_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_attachmode_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_detachmode_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+extern int drm_mode_getproperty_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_getblob_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_hotplug_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_replacefb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_getencoder(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern bool drm_detect_hdmi_monitor(struct edid *edid);
+extern bool drm_detect_monitor_audio(struct edid *edid);
+extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
+ int hdisplay, int vdisplay, int vrefresh,
+ bool reduced, bool interlaced, bool margins);
+extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
+ int hdisplay, int vdisplay, int vrefresh,
+ bool interlaced, int margins);
+extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
+ int hdisplay, int vdisplay, int vrefresh,
+ bool interlaced, int margins, int GTF_M,
+ int GTF_2C, int GTF_K, int GTF_2J);
+extern int drm_add_modes_noedid(struct drm_connector *connector,
+ int hdisplay, int vdisplay);
+
+extern int drm_edid_header_is_valid(const u8 *raw_edid);
+extern bool drm_edid_is_valid(struct edid *edid);
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+ int hsize, int vsize, int fresh);
+
+extern int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+ int *bpp);
+#endif /* __DRM_CRTC_H__ */
diff --git a/sys/dev/drm2/drm_crtc_helper.c b/sys/dev/drm2/drm_crtc_helper.c
new file mode 100644
index 0000000..b798a57
--- /dev/null
+++ b/sys/dev/drm2/drm_crtc_helper.c
@@ -0,0 +1,1043 @@
+/*
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_crtc.h>
+#include <dev/drm2/drm_fourcc.h>
+#include <dev/drm2/drm_crtc_helper.h>
+#include <dev/drm2/drm_fb_helper.h>
+
+bool
+drm_fetch_cmdline_mode_from_kenv(struct drm_connector *connector,
+ struct drm_cmdline_mode *cmdline_mode)
+{
+ char *tun_var_name, *tun_mode;
+ static const char tun_prefix[] = "drm_mode.";
+ bool res;
+
+ res = false;
+ tun_var_name = malloc(sizeof(tun_prefix) +
+ strlen(drm_get_connector_name(connector)), M_TEMP, M_WAITOK);
+ strcpy(tun_var_name, tun_prefix);
+ strcat(tun_var_name, drm_get_connector_name(connector));
+ tun_mode = getenv(tun_var_name);
+ if (tun_mode != NULL) {
+ res = drm_mode_parse_command_line_for_connector(tun_mode,
+ connector, cmdline_mode);
+ freeenv(tun_mode);
+ }
+ free(tun_var_name, M_TEMP);
+ return (res);
+}
+
+static bool drm_kms_helper_poll = true;
+
+static void drm_mode_validate_flag(struct drm_connector *connector,
+ int flags)
+{
+ struct drm_display_mode *mode, *t;
+
+ if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
+ return;
+
+ list_for_each_entry_safe(mode, t, &connector->modes, head) {
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+ !(flags & DRM_MODE_FLAG_INTERLACE))
+ mode->status = MODE_NO_INTERLACE;
+ if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
+ !(flags & DRM_MODE_FLAG_DBLSCAN))
+ mode->status = MODE_NO_DBLESCAN;
+ }
+
+ return;
+}
+
+/**
+ * drm_helper_probe_single_connector_modes - get complete set of display modes
+ * @dev: DRM device
+ * @maxX: max width for modes
+ * @maxY: max height for modes
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Based on @dev's mode_config layout, scan all the connectors and try to detect
+ * modes on them. Modes will first be added to the connector's probed_modes
+ * list, then culled (based on validity and the @maxX, @maxY parameters) and
+ * put into the normal modes list.
+ *
+ * Intended to be used either at bootup time or when major configuration
+ * changes have occurred.
+ *
+ * FIXME: take into account monitor limits
+ *
+ * RETURNS:
+ * Number of modes found on @connector.
+ */
+int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode, *t;
+ struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+ struct drm_cmdline_mode cmdline_mode;
+ int count = 0;
+ int mode_flags = 0;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
+ drm_get_connector_name(connector));
+ /* set all modes to the unverified state */
+ list_for_each_entry_safe(mode, t, &connector->modes, head)
+ mode->status = MODE_UNVERIFIED;
+
+ if (connector->force) {
+ if (connector->force == DRM_FORCE_ON)
+ connector->status = connector_status_connected;
+ else
+ connector->status = connector_status_disconnected;
+ if (connector->funcs->force)
+ connector->funcs->force(connector);
+ } else {
+ connector->status = connector->funcs->detect(connector, true);
+ drm_kms_helper_poll_enable(dev);
+ }
+
+ if (connector->status == connector_status_disconnected) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
+ connector->base.id, drm_get_connector_name(connector));
+ drm_mode_connector_update_edid_property(connector, NULL);
+ goto prune;
+ }
+
+ count = (*connector_funcs->get_modes)(connector);
+ if (count == 0 && drm_fetch_cmdline_mode_from_kenv(connector,
+ &cmdline_mode)) {
+ mode = drm_mode_create_from_cmdline_mode(dev,
+ &cmdline_mode);
+ if (mode != NULL) {
+ DRM_DEBUG_KMS(
+ "[CONNECTOR:%d:%s] found manual override ",
+ connector->base.id,
+ drm_get_connector_name(connector));
+ drm_mode_debug_printmodeline(mode);
+ drm_mode_probed_add(connector, mode);
+ count++;
+ } else {
+ DRM_ERROR(
+ "[CONNECTOR:%d:%s] manual override mode: parse error\n",
+ connector->base.id,
+ drm_get_connector_name(connector));
+ }
+ }
+ if (count == 0 && connector->status == connector_status_connected)
+ count = drm_add_modes_noedid(connector, 1024, 768);
+ if (count == 0)
+ goto prune;
+
+ drm_mode_connector_list_update(connector);
+
+ if (maxX && maxY)
+ drm_mode_validate_size(dev, &connector->modes, maxX,
+ maxY, 0);
+
+ if (connector->interlace_allowed)
+ mode_flags |= DRM_MODE_FLAG_INTERLACE;
+ if (connector->doublescan_allowed)
+ mode_flags |= DRM_MODE_FLAG_DBLSCAN;
+ drm_mode_validate_flag(connector, mode_flags);
+
+ list_for_each_entry_safe(mode, t, &connector->modes, head) {
+ if (mode->status == MODE_OK)
+ mode->status = connector_funcs->mode_valid(connector,
+ mode);
+ }
+
+prune:
+ drm_mode_prune_invalid(dev, &connector->modes, true);
+
+ if (list_empty(&connector->modes))
+ return 0;
+
+ drm_mode_sort(&connector->modes);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
+ drm_get_connector_name(connector));
+ list_for_each_entry_safe(mode, t, &connector->modes, head) {
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ drm_mode_debug_printmodeline(mode);
+ }
+
+ return count;
+}
+
+/**
+ * drm_helper_encoder_in_use - check if a given encoder is in use
+ * @encoder: encoder to check
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Walk @encoders's DRM device's mode_config and see if it's in use.
+ *
+ * RETURNS:
+ * True if @encoder is part of the mode_config, false otherwise.
+ */
+bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
+{
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ return true;
+ return false;
+}
+
+/**
+ * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
+ * @crtc: CRTC to check
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Walk @crtc's DRM device's mode_config and see if it's in use.
+ *
+ * RETURNS:
+ * True if @crtc is part of the mode_config, false otherwise.
+ */
+bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev = crtc->dev;
+ /* FIXME: Locking around list access? */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
+ return true;
+ return false;
+}
+
+static void
+drm_encoder_disable(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+
+ if (encoder_funcs->disable)
+ (*encoder_funcs->disable)(encoder);
+ else
+ (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+}
+
+/**
+ * drm_helper_disable_unused_functions - disable unused objects
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled
+ * by calling its dpms function, which should power it off.
+ */
+void drm_helper_disable_unused_functions(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder)
+ continue;
+ if (connector->status == connector_status_disconnected)
+ connector->encoder = NULL;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (!drm_helper_encoder_in_use(encoder)) {
+ drm_encoder_disable(encoder);
+ /* disconnector encoder from any connector */
+ encoder->crtc = NULL;
+ }
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc->enabled = drm_helper_crtc_in_use(crtc);
+ if (!crtc->enabled) {
+ if (crtc_funcs->disable)
+ (*crtc_funcs->disable)(crtc);
+ else
+ (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF);
+ crtc->fb = NULL;
+ }
+ }
+}
+
+/**
+ * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
+ * @encoder: encoder to test
+ * @crtc: crtc to test
+ *
+ * Return false if @encoder can't be driven by @crtc, true otherwise.
+ */
+static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
+ struct drm_crtc *crtc)
+{
+ struct drm_device *dev;
+ struct drm_crtc *tmp;
+ int crtc_mask = 1;
+
+ if (crtc == NULL)
+ printf("checking null crtc?\n");
+
+ dev = crtc->dev;
+
+ list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+ if (tmp == crtc)
+ break;
+ crtc_mask <<= 1;
+ }
+
+ if (encoder->possible_crtcs & crtc_mask)
+ return true;
+ return false;
+}
+
+/*
+ * Check the CRTC we're going to map each output to vs. its current
+ * CRTC. If they don't match, we have to disable the output and the CRTC
+ * since the driver will have to re-route things.
+ */
+static void
+drm_crtc_prepare_encoders(struct drm_device *dev)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ encoder_funcs = encoder->helper_private;
+ /* Disable unused encoders */
+ if (encoder->crtc == NULL)
+ drm_encoder_disable(encoder);
+ /* Disable encoders whose CRTC is about to change */
+ if (encoder_funcs->get_crtc &&
+ encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
+ drm_encoder_disable(encoder);
+ }
+}
+
+/**
+ * drm_crtc_set_mode - set a mode
+ * @crtc: CRTC to program
+ * @mode: mode to use
+ * @x: width of mode
+ * @y: height of mode
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
+ * to fixup or reject the mode prior to trying to set it.
+ *
+ * RETURNS:
+ * True if the mode was set successfully, or false otherwise.
+ */
+bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ int saved_x, saved_y;
+ struct drm_encoder *encoder;
+ bool ret = true;
+
+ crtc->enabled = drm_helper_crtc_in_use(crtc);
+ if (!crtc->enabled)
+ return true;
+
+ adjusted_mode = drm_mode_duplicate(dev, mode);
+ if (!adjusted_mode)
+ return false;
+
+ saved_hwmode = crtc->hwmode;
+ saved_mode = crtc->mode;
+ saved_x = crtc->x;
+ saved_y = crtc->y;
+
+ /* Update crtc values up front so the driver can rely on them for mode
+ * setting.
+ */
+ crtc->mode = *mode;
+ crtc->x = x;
+ crtc->y = y;
+
+ /* Pass our mode to the connectors and the CRTC to give them a chance to
+ * adjust it according to limitations or connector properties, and also
+ * a chance to reject the mode entirely.
+ */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != crtc)
+ continue;
+ encoder_funcs = encoder->helper_private;
+ if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
+ adjusted_mode))) {
+ goto done;
+ }
+ }
+
+ if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
+ goto done;
+ }
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+
+ /* Prepare the encoders and CRTCs before setting the mode. */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != crtc)
+ continue;
+ encoder_funcs = encoder->helper_private;
+ /* Disable the encoders as the first thing we do. */
+ encoder_funcs->prepare(encoder);
+ }
+
+ drm_crtc_prepare_encoders(dev);
+
+ crtc_funcs->prepare(crtc);
+
+ /* Set up the DPLL and any encoders state that needs to adjust or depend
+ * on the DPLL.
+ */
+ ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
+ if (!ret)
+ goto done;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+ encoder->base.id, drm_get_encoder_name(encoder),
+ mode->base.id, mode->name);
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+ }
+
+ /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+ crtc_funcs->commit(crtc);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->commit(encoder);
+
+ }
+
+ /* Store real post-adjustment hardware mode. */
+ crtc->hwmode = *adjusted_mode;
+
+ /* Calculate and store various constants which
+ * are later needed by vblank and swap-completion
+ * timestamping. They are derived from true hwmode.
+ */
+ drm_calc_timestamping_constants(crtc);
+
+ /* FIXME: add subpixel order */
+done:
+ drm_mode_destroy(dev, adjusted_mode);
+ if (!ret) {
+ crtc->hwmode = saved_hwmode;
+ crtc->mode = saved_mode;
+ crtc->x = saved_x;
+ crtc->y = saved_y;
+ }
+
+ return ret;
+}
+
+static int
+drm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ /* Decouple all encoders and their attached connectors from this crtc */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ connector->encoder = NULL;
+ }
+ }
+
+ drm_helper_disable_unused_functions(dev);
+ return 0;
+}
+
+/**
+ * drm_crtc_helper_set_config - set a new config from userspace
+ * @crtc: CRTC to setup
+ * @crtc_info: user provided configuration
+ * @new_mode: new mode to set
+ * @connector_set: set of connectors for the new config
+ * @fb: new framebuffer
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Setup a new configuration, provided by the user in @crtc_info, and enable
+ * it.
+ *
+ * RETURNS:
+ * Zero. (FIXME)
+ */
+int drm_crtc_helper_set_config(struct drm_mode_set *set)
+{
+ struct drm_device *dev;
+ struct drm_crtc *save_crtcs, *new_crtc, *crtc;
+ struct drm_encoder *save_encoders, *new_encoder, *encoder;
+ struct drm_framebuffer *old_fb = NULL;
+ bool mode_changed = false; /* if true do a full mode set */
+ bool fb_changed = false; /* if true and !mode_changed just do a flip */
+ struct drm_connector *save_connectors, *connector;
+ int count = 0, ro, fail = 0;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ struct drm_mode_set save_set;
+ int ret = 0;
+ int i;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (!set)
+ return -EINVAL;
+
+ if (!set->crtc)
+ return -EINVAL;
+
+ if (!set->crtc->helper_private)
+ return -EINVAL;
+
+ crtc_funcs = set->crtc->helper_private;
+
+ if (!set->mode)
+ set->fb = NULL;
+
+ if (set->fb) {
+ DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+ set->crtc->base.id, set->fb->base.id,
+ (int)set->num_connectors, set->x, set->y);
+ } else {
+ DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+ return drm_crtc_helper_disable(set->crtc);
+ }
+
+ dev = set->crtc->dev;
+
+ /* Allocate space for the backup of all (non-pointer) crtc, encoder and
+ * connector data. */
+ save_crtcs = malloc(dev->mode_config.num_crtc * sizeof(struct drm_crtc),
+ DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ save_encoders = malloc(dev->mode_config.num_encoder *
+ sizeof(struct drm_encoder), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ save_connectors = malloc(dev->mode_config.num_connector *
+ sizeof(struct drm_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+
+ /* Copy data. Note that driver private data is not affected.
+ * Should anything bad happen only the expected state is
+ * restored, not the drivers personal bookkeeping.
+ */
+ count = 0;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ save_crtcs[count++] = *crtc;
+ }
+
+ count = 0;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ save_encoders[count++] = *encoder;
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ save_connectors[count++] = *connector;
+ }
+
+ save_set.crtc = set->crtc;
+ save_set.mode = &set->crtc->mode;
+ save_set.x = set->crtc->x;
+ save_set.y = set->crtc->y;
+ save_set.fb = set->crtc->fb;
+
+ /* We should be able to check here if the fb has the same properties
+ * and then just flip_or_move it */
+ if (set->crtc->fb != set->fb) {
+ /* If we have no fb then treat it as a full mode set */
+ if (set->crtc->fb == NULL) {
+ DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
+ mode_changed = true;
+ } else if (set->fb == NULL) {
+ mode_changed = true;
+ } else
+ fb_changed = true;
+ }
+
+ if (set->x != set->crtc->x || set->y != set->crtc->y)
+ fb_changed = true;
+
+ if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+ DRM_DEBUG_KMS("modes are different, full mode set\n");
+ drm_mode_debug_printmodeline(&set->crtc->mode);
+ drm_mode_debug_printmodeline(set->mode);
+ mode_changed = true;
+ }
+
+ /* a) traverse passed in connector list and get encoders for them */
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+ new_encoder = connector->encoder;
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == connector) {
+ new_encoder = connector_funcs->best_encoder(connector);
+ /* if we can't get an encoder for a connector
+ we are setting now - then fail */
+ if (new_encoder == NULL)
+ /* don't break so fail path works correct */
+ fail = 1;
+ break;
+ }
+ }
+
+ if (new_encoder != connector->encoder) {
+ DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+ mode_changed = true;
+ /* If the encoder is reused for another connector, then
+ * the appropriate crtc will be set later.
+ */
+ if (connector->encoder)
+ connector->encoder->crtc = NULL;
+ connector->encoder = new_encoder;
+ }
+ }
+
+ if (fail) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder)
+ continue;
+
+ if (connector->encoder->crtc == set->crtc)
+ new_crtc = NULL;
+ else
+ new_crtc = connector->encoder->crtc;
+
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == connector)
+ new_crtc = set->crtc;
+ }
+
+ /* Make sure the new CRTC will work with the encoder */
+ if (new_crtc &&
+ !drm_encoder_crtc_ok(connector->encoder, new_crtc)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ if (new_crtc != connector->encoder->crtc) {
+ DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+ mode_changed = true;
+ connector->encoder->crtc = new_crtc;
+ }
+ if (new_crtc) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
+ connector->base.id, drm_get_connector_name(connector),
+ new_crtc->base.id);
+ } else {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
+ connector->base.id, drm_get_connector_name(connector));
+ }
+ }
+
+ /* mode_set_base is not a required function */
+ if (fb_changed && !crtc_funcs->mode_set_base)
+ mode_changed = true;
+
+ if (mode_changed) {
+ set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
+ if (set->crtc->enabled) {
+ DRM_DEBUG_KMS("attempting to set mode from"
+ " userspace\n");
+ drm_mode_debug_printmodeline(set->mode);
+ old_fb = set->crtc->fb;
+ set->crtc->fb = set->fb;
+ if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
+ set->x, set->y,
+ old_fb)) {
+ DRM_ERROR("failed to set mode on [CRTC:%d]\n",
+ set->crtc->base.id);
+ set->crtc->fb = old_fb;
+ ret = -EINVAL;
+ goto fail;
+ }
+ DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+ for (i = 0; i < set->num_connectors; i++) {
+ DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+ drm_get_connector_name(set->connectors[i]));
+ set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
+ }
+ }
+ drm_helper_disable_unused_functions(dev);
+ } else if (fb_changed) {
+ set->crtc->x = set->x;
+ set->crtc->y = set->y;
+
+ old_fb = set->crtc->fb;
+ if (set->crtc->fb != set->fb)
+ set->crtc->fb = set->fb;
+ ret = crtc_funcs->mode_set_base(set->crtc,
+ set->x, set->y, old_fb);
+ if (ret != 0) {
+ set->crtc->fb = old_fb;
+ goto fail;
+ }
+ }
+
+ free(save_connectors, DRM_MEM_KMS);
+ free(save_encoders, DRM_MEM_KMS);
+ free(save_crtcs, DRM_MEM_KMS);
+ return 0;
+
+fail:
+ /* Restore all previous data. */
+ count = 0;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ *crtc = save_crtcs[count++];
+ }
+
+ count = 0;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ *encoder = save_encoders[count++];
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ *connector = save_connectors[count++];
+ }
+
+ /* Try to restore the config */
+ if (mode_changed &&
+ !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
+ save_set.y, save_set.fb))
+ DRM_ERROR("failed to restore config after modeset failure\n");
+
+ free(save_connectors, DRM_MEM_KMS);
+ free(save_encoders, DRM_MEM_KMS);
+ free(save_crtcs, DRM_MEM_KMS);
+ return ret;
+}
+
+static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
+{
+ int dpms = DRM_MODE_DPMS_OFF;
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ if (connector->dpms < dpms)
+ dpms = connector->dpms;
+ return dpms;
+}
+
+static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
+{
+ int dpms = DRM_MODE_DPMS_OFF;
+ struct drm_connector *connector;
+ struct drm_device *dev = crtc->dev;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder && connector->encoder->crtc == crtc)
+ if (connector->dpms < dpms)
+ dpms = connector->dpms;
+ return dpms;
+}
+
+/**
+ * drm_helper_connector_dpms
+ * @connector affected connector
+ * @mode DPMS mode
+ *
+ * Calls the low-level connector DPMS function, then
+ * calls appropriate encoder and crtc DPMS functions as well
+ */
+void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
+ int old_dpms;
+
+ if (mode == connector->dpms)
+ return;
+
+ old_dpms = connector->dpms;
+ connector->dpms = mode;
+
+ /* from off to on, do crtc then encoder */
+ if (mode < old_dpms) {
+ if (crtc) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
+ }
+ if (encoder) {
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ (*encoder_funcs->dpms) (encoder,
+ drm_helper_choose_encoder_dpms(encoder));
+ }
+ }
+
+ /* from on to off, do encoder then crtc */
+ if (mode > old_dpms) {
+ if (encoder) {
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ (*encoder_funcs->dpms) (encoder,
+ drm_helper_choose_encoder_dpms(encoder));
+ }
+ if (crtc) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
+ }
+ }
+
+ return;
+}
+
+int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ int i;
+
+ fb->width = mode_cmd->width;
+ fb->height = mode_cmd->height;
+ for (i = 0; i < 4; i++) {
+ fb->pitches[i] = mode_cmd->pitches[i];
+ fb->offsets[i] = mode_cmd->offsets[i];
+ }
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
+ &fb->bits_per_pixel);
+ fb->pixel_format = mode_cmd->pixel_format;
+
+ return 0;
+}
+
+int drm_helper_resume_force_mode(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ int ret;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+
+ if (!crtc->enabled)
+ continue;
+
+ ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
+
+ if (!ret)
+ DRM_ERROR("failed to set mode on crtc %p\n", crtc);
+
+ /* Turn off outputs that were already powered off */
+ if (drm_helper_choose_crtc_dpms(crtc)) {
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if(encoder->crtc != crtc)
+ continue;
+
+ encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ (*encoder_funcs->dpms) (encoder,
+ drm_helper_choose_encoder_dpms(encoder));
+ }
+
+ crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
+ }
+ }
+ /* disable the unused connectors while restoring the modesetting */
+ drm_helper_disable_unused_functions(dev);
+ return 0;
+}
+
+#define DRM_OUTPUT_POLL_PERIOD (10 * hz)
+static void output_poll_execute(void *ctx, int pending)
+{
+ struct drm_device *dev;
+ struct drm_connector *connector;
+ enum drm_connector_status old_status;
+ bool repoll = false, changed = false;
+
+ if (!drm_kms_helper_poll)
+ return;
+
+ dev = ctx;
+
+ sx_xlock(&dev->mode_config.mutex);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+ /* if this is HPD or polled don't check it -
+ TV out for instance */
+ if (!connector->polled)
+ continue;
+
+ else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT))
+ repoll = true;
+
+ old_status = connector->status;
+ /* if we are connected and don't want to poll for disconnect
+ skip it */
+ if (old_status == connector_status_connected &&
+ !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
+ !(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ continue;
+
+ connector->status = connector->funcs->detect(connector, false);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+ connector->base.id,
+ drm_get_connector_name(connector),
+ old_status, connector->status);
+ if (old_status != connector->status)
+ changed = true;
+ }
+
+ sx_xunlock(&dev->mode_config.mutex);
+
+ if (changed) {
+#if 0
+ /* send a uevent + call fbdev */
+ drm_sysfs_hotplug_event(dev);
+#endif
+ if (dev->mode_config.funcs->output_poll_changed)
+ dev->mode_config.funcs->output_poll_changed(dev);
+ }
+
+ if (repoll) {
+ taskqueue_enqueue_timeout(taskqueue_thread,
+ &dev->mode_config.output_poll_task,
+ DRM_OUTPUT_POLL_PERIOD);
+ }
+}
+
+void drm_kms_helper_poll_disable(struct drm_device *dev)
+{
+ if (!dev->mode_config.poll_enabled)
+ return;
+ taskqueue_cancel_timeout(taskqueue_thread,
+ &dev->mode_config.output_poll_task, NULL);
+}
+
+void drm_kms_helper_poll_enable(struct drm_device *dev)
+{
+ bool poll = false;
+ struct drm_connector *connector;
+
+ if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+ return;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->polled)
+ poll = true;
+ }
+
+ if (poll) {
+ taskqueue_enqueue_timeout(taskqueue_thread,
+ &dev->mode_config.output_poll_task, DRM_OUTPUT_POLL_PERIOD);
+ }
+}
+
+void drm_kms_helper_poll_init(struct drm_device *dev)
+{
+
+ TIMEOUT_TASK_INIT(taskqueue_thread, &dev->mode_config.output_poll_task,
+ 0, output_poll_execute, dev);
+ dev->mode_config.poll_enabled = true;
+
+ drm_kms_helper_poll_enable(dev);
+}
+
+void drm_kms_helper_poll_fini(struct drm_device *dev)
+{
+ drm_kms_helper_poll_disable(dev);
+}
+
+void drm_helper_hpd_irq_event(struct drm_device *dev)
+{
+ if (!dev->mode_config.poll_enabled)
+ return;
+
+ /* kill timer and schedule immediate execution, this doesn't block */
+ taskqueue_cancel_timeout(taskqueue_thread,
+ &dev->mode_config.output_poll_task, NULL);
+ if (drm_kms_helper_poll)
+ taskqueue_enqueue_timeout(taskqueue_thread,
+ &dev->mode_config.output_poll_task, 0);
+}
diff --git a/sys/dev/drm2/drm_crtc_helper.h b/sys/dev/drm2/drm_crtc_helper.h
new file mode 100644
index 0000000..7654263
--- /dev/null
+++ b/sys/dev/drm2/drm_crtc_helper.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright © 2006 Keith Packard
+ * Copyright © 2007-2008 Dave Airlie
+ * Copyright © 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * The DRM mode setting helper functions are common code for drivers to use if
+ * they wish. Drivers are not forced to use this code in their
+ * implementations but it would be useful if they code they do use at least
+ * provides a consistent interface and operation to userspace
+ */
+
+#ifndef __DRM_CRTC_HELPER_H__
+#define __DRM_CRTC_HELPER_H__
+
+enum mode_set_atomic {
+ LEAVE_ATOMIC_MODE_SET,
+ ENTER_ATOMIC_MODE_SET,
+};
+
+struct drm_crtc_helper_funcs {
+ /*
+ * Control power levels on the CRTC. If the mode passed in is
+ * unsupported, the provider must use the next lowest power level.
+ */
+ void (*dpms)(struct drm_crtc *crtc, int mode);
+ void (*prepare)(struct drm_crtc *crtc);
+ void (*commit)(struct drm_crtc *crtc);
+
+ /* Provider can fixup or change mode timings before modeset occurs */
+ bool (*mode_fixup)(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ /* Actually set the mode */
+ int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb);
+
+ /* Move the crtc on the current fb to the given position *optional* */
+ int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb);
+ int (*mode_set_base_atomic)(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y,
+ enum mode_set_atomic);
+
+ /* reload the current crtc LUT */
+ void (*load_lut)(struct drm_crtc *crtc);
+
+ /* disable crtc when not in use - more explicit than dpms off */
+ void (*disable)(struct drm_crtc *crtc);
+};
+
+struct drm_encoder_helper_funcs {
+ void (*dpms)(struct drm_encoder *encoder, int mode);
+ void (*save)(struct drm_encoder *encoder);
+ void (*restore)(struct drm_encoder *encoder);
+
+ bool (*mode_fixup)(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*prepare)(struct drm_encoder *encoder);
+ void (*commit)(struct drm_encoder *encoder);
+ void (*mode_set)(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
+ /* detect for DAC style encoders */
+ enum drm_connector_status (*detect)(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+ /* disable encoder when not in use - more explicit than dpms off */
+ void (*disable)(struct drm_encoder *encoder);
+};
+
+struct drm_connector_helper_funcs {
+ int (*get_modes)(struct drm_connector *connector);
+ int (*mode_valid)(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+ struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
+};
+
+extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
+extern void drm_helper_disable_unused_functions(struct drm_device *dev);
+extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
+extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb);
+extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
+extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
+
+extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
+
+extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+
+static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
+ const struct drm_crtc_helper_funcs *funcs)
+{
+ crtc->helper_private = __DECONST(void *, funcs);
+}
+
+static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
+ const struct drm_encoder_helper_funcs *funcs)
+{
+ encoder->helper_private = __DECONST(void *, funcs);
+}
+
+static inline void drm_connector_helper_add(struct drm_connector *connector,
+ const struct drm_connector_helper_funcs *funcs)
+{
+ connector->helper_private = __DECONST(void *, funcs);
+}
+
+extern int drm_helper_resume_force_mode(struct drm_device *dev);
+extern void drm_kms_helper_poll_init(struct drm_device *dev);
+extern void drm_kms_helper_poll_fini(struct drm_device *dev);
+extern void drm_helper_hpd_irq_event(struct drm_device *dev);
+
+extern void drm_kms_helper_poll_disable(struct drm_device *dev);
+extern void drm_kms_helper_poll_enable(struct drm_device *dev);
+
+extern bool drm_fetch_cmdline_mode_from_kenv(struct drm_connector *connector,
+ struct drm_cmdline_mode *cmdline_mode);
+#endif
diff --git a/sys/dev/drm2/drm_dma.c b/sys/dev/drm2/drm_dma.c
new file mode 100644
index 0000000..c0a1c80
--- /dev/null
+++ b/sys/dev/drm2/drm_dma.c
@@ -0,0 +1,139 @@
+/*-
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/** @file drm_dma.c
+ * Support code for DMA buffer management.
+ *
+ * The implementation used to be significantly more complicated, but the
+ * complexity has been moved into the drivers as different buffer management
+ * schemes evolved.
+ */
+
+#include <dev/drm2/drmP.h>
+
+int drm_dma_setup(struct drm_device *dev)
+{
+
+ dev->dma = malloc(sizeof(*dev->dma), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+ if (dev->dma == NULL)
+ return ENOMEM;
+
+ DRM_SPININIT(&dev->dma_lock, "drmdma");
+
+ return 0;
+}
+
+void drm_dma_takedown(struct drm_device *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int i, j;
+
+ if (dma == NULL)
+ return;
+
+ /* Clear dma buffers */
+ for (i = 0; i <= DRM_MAX_ORDER; i++) {
+ if (dma->bufs[i].seg_count) {
+ DRM_DEBUG("order %d: buf_count = %d,"
+ " seg_count = %d\n", i, dma->bufs[i].buf_count,
+ dma->bufs[i].seg_count);
+ for (j = 0; j < dma->bufs[i].seg_count; j++) {
+ drm_pci_free(dev, dma->bufs[i].seglist[j]);
+ }
+ free(dma->bufs[i].seglist, DRM_MEM_SEGS);
+ }
+
+ if (dma->bufs[i].buf_count) {
+ for (j = 0; j < dma->bufs[i].buf_count; j++) {
+ free(dma->bufs[i].buflist[j].dev_private,
+ DRM_MEM_BUFS);
+ }
+ free(dma->bufs[i].buflist, DRM_MEM_BUFS);
+ }
+ }
+
+ free(dma->buflist, DRM_MEM_BUFS);
+ free(dma->pagelist, DRM_MEM_PAGES);
+ free(dev->dma, DRM_MEM_DRIVER);
+ dev->dma = NULL;
+ DRM_SPINUNINIT(&dev->dma_lock);
+}
+
+
+void drm_free_buffer(struct drm_device *dev, drm_buf_t *buf)
+{
+ if (!buf)
+ return;
+
+ buf->pending = 0;
+ buf->file_priv= NULL;
+ buf->used = 0;
+}
+
+void drm_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+
+ if (!dma)
+ return;
+
+ for (i = 0; i < dma->buf_count; i++) {
+ if (dma->buflist[i]->file_priv == file_priv) {
+ switch (dma->buflist[i]->list) {
+ case DRM_LIST_NONE:
+ drm_free_buffer(dev, dma->buflist[i]);
+ break;
+ case DRM_LIST_WAIT:
+ dma->buflist[i]->list = DRM_LIST_RECLAIM;
+ break;
+ default:
+ /* Buffer already on hardware. */
+ break;
+ }
+ }
+ }
+}
+
+/* Call into the driver-specific DMA handler */
+int drm_dma(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+
+ if (dev->driver->dma_ioctl) {
+ /* shared code returns -errno */
+ return -dev->driver->dma_ioctl(dev, data, file_priv);
+ } else {
+ DRM_DEBUG("DMA ioctl on driver with no dma handler\n");
+ return EINVAL;
+ }
+}
diff --git a/sys/dev/drm2/drm_dp_helper.h b/sys/dev/drm2/drm_dp_helper.h
new file mode 100644
index 0000000..4eea73b
--- /dev/null
+++ b/sys/dev/drm2/drm_dp_helper.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright © 2008 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DRM_DP_HELPER_H_
+#define _DRM_DP_HELPER_H_
+
+/* From the VESA DisplayPort spec */
+
+#define AUX_NATIVE_WRITE 0x8
+#define AUX_NATIVE_READ 0x9
+#define AUX_I2C_WRITE 0x0
+#define AUX_I2C_READ 0x1
+#define AUX_I2C_STATUS 0x2
+#define AUX_I2C_MOT 0x4
+
+#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
+#define AUX_NATIVE_REPLY_NACK (0x1 << 4)
+#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
+#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
+
+#define AUX_I2C_REPLY_ACK (0x0 << 6)
+#define AUX_I2C_REPLY_NACK (0x1 << 6)
+#define AUX_I2C_REPLY_DEFER (0x2 << 6)
+#define AUX_I2C_REPLY_MASK (0x3 << 6)
+
+/* AUX CH addresses */
+/* DPCD */
+#define DP_DPCD_REV 0x000
+
+#define DP_MAX_LINK_RATE 0x001
+
+#define DP_MAX_LANE_COUNT 0x002
+# define DP_MAX_LANE_COUNT_MASK 0x1f
+# define DP_TPS3_SUPPORTED (1 << 6)
+# define DP_ENHANCED_FRAME_CAP (1 << 7)
+
+#define DP_MAX_DOWNSPREAD 0x003
+# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6)
+
+#define DP_NORP 0x004
+
+#define DP_DOWNSTREAMPORT_PRESENT 0x005
+# define DP_DWN_STRM_PORT_PRESENT (1 << 0)
+# define DP_DWN_STRM_PORT_TYPE_MASK 0x06
+/* 00b = DisplayPort */
+/* 01b = Analog */
+/* 10b = TMDS or HDMI */
+/* 11b = Other */
+# define DP_FORMAT_CONVERSION (1 << 3)
+
+#define DP_MAIN_LINK_CHANNEL_CODING 0x006
+
+#define DP_TRAINING_AUX_RD_INTERVAL 0x00e
+
+#define DP_PSR_SUPPORT 0x070
+# define DP_PSR_IS_SUPPORTED 1
+#define DP_PSR_CAPS 0x071
+# define DP_PSR_NO_TRAIN_ON_EXIT 1
+# define DP_PSR_SETUP_TIME_330 (0 << 1)
+# define DP_PSR_SETUP_TIME_275 (1 << 1)
+# define DP_PSR_SETUP_TIME_220 (2 << 1)
+# define DP_PSR_SETUP_TIME_165 (3 << 1)
+# define DP_PSR_SETUP_TIME_110 (4 << 1)
+# define DP_PSR_SETUP_TIME_55 (5 << 1)
+# define DP_PSR_SETUP_TIME_0 (6 << 1)
+# define DP_PSR_SETUP_TIME_MASK (7 << 1)
+# define DP_PSR_SETUP_TIME_SHIFT 1
+
+/* link configuration */
+#define DP_LINK_BW_SET 0x100
+# define DP_LINK_BW_1_62 0x06
+# define DP_LINK_BW_2_7 0x0a
+# define DP_LINK_BW_5_4 0x14
+
+#define DP_LANE_COUNT_SET 0x101
+# define DP_LANE_COUNT_MASK 0x0f
+# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
+
+#define DP_TRAINING_PATTERN_SET 0x102
+# define DP_TRAINING_PATTERN_DISABLE 0
+# define DP_TRAINING_PATTERN_1 1
+# define DP_TRAINING_PATTERN_2 2
+# define DP_TRAINING_PATTERN_3 3
+# define DP_TRAINING_PATTERN_MASK 0x3
+
+# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
+# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
+# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
+# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
+# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
+
+# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
+# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
+
+# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6)
+# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6)
+# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6)
+# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6)
+
+#define DP_TRAINING_LANE0_SET 0x103
+#define DP_TRAINING_LANE1_SET 0x104
+#define DP_TRAINING_LANE2_SET 0x105
+#define DP_TRAINING_LANE3_SET 0x106
+
+# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
+# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
+# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
+# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0)
+
+# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3)
+
+# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
+# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
+
+#define DP_DOWNSPREAD_CTRL 0x107
+# define DP_SPREAD_AMP_0_5 (1 << 4)
+
+#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
+# define DP_SET_ANSI_8B10B (1 << 0)
+
+#define DP_PSR_EN_CFG 0x170
+# define DP_PSR_ENABLE (1 << 0)
+# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1)
+# define DP_PSR_CRC_VERIFICATION (1 << 2)
+# define DP_PSR_FRAME_CAPTURE (1 << 3)
+
+#define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201
+# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0)
+# define DP_AUTOMATED_TEST_REQUEST (1 << 1)
+# define DP_CP_IRQ (1 << 2)
+# define DP_SINK_SPECIFIC_IRQ (1 << 6)
+
+#define DP_LANE0_1_STATUS 0x202
+#define DP_LANE2_3_STATUS 0x203
+# define DP_LANE_CR_DONE (1 << 0)
+# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
+# define DP_LANE_SYMBOL_LOCKED (1 << 2)
+
+#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE | \
+ DP_LANE_CHANNEL_EQ_DONE | \
+ DP_LANE_SYMBOL_LOCKED)
+
+#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
+
+#define DP_INTERLANE_ALIGN_DONE (1 << 0)
+#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
+#define DP_LINK_STATUS_UPDATED (1 << 7)
+
+#define DP_SINK_STATUS 0x205
+
+#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
+#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
+
+#define DP_ADJUST_REQUEST_LANE0_1 0x206
+#define DP_ADJUST_REQUEST_LANE2_3 0x207
+# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
+# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
+# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
+# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
+# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
+# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
+# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
+# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
+
+#define DP_TEST_REQUEST 0x218
+# define DP_TEST_LINK_TRAINING (1 << 0)
+# define DP_TEST_LINK_PATTERN (1 << 1)
+# define DP_TEST_LINK_EDID_READ (1 << 2)
+# define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */
+
+#define DP_TEST_LINK_RATE 0x219
+# define DP_LINK_RATE_162 (0x6)
+# define DP_LINK_RATE_27 (0xa)
+
+#define DP_TEST_LANE_COUNT 0x220
+
+#define DP_TEST_PATTERN 0x221
+
+#define DP_TEST_RESPONSE 0x260
+# define DP_TEST_ACK (1 << 0)
+# define DP_TEST_NAK (1 << 1)
+# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2)
+
+#define DP_SET_POWER 0x600
+# define DP_SET_POWER_D0 0x1
+# define DP_SET_POWER_D3 0x2
+
+#define DP_PSR_ERROR_STATUS 0x2006
+# define DP_PSR_LINK_CRC_ERROR (1 << 0)
+# define DP_PSR_RFB_STORAGE_ERROR (1 << 1)
+
+#define DP_PSR_ESI 0x2007
+# define DP_PSR_CAPS_CHANGE (1 << 0)
+
+#define DP_PSR_STATUS 0x2008
+# define DP_PSR_SINK_INACTIVE 0
+# define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1
+# define DP_PSR_SINK_ACTIVE_RFB 2
+# define DP_PSR_SINK_ACTIVE_SINK_SYNCED 3
+# define DP_PSR_SINK_ACTIVE_RESYNC 4
+# define DP_PSR_SINK_INTERNAL_ERROR 7
+# define DP_PSR_SINK_STATE_MASK 0x07
+
+#define MODE_I2C_START 1
+#define MODE_I2C_WRITE 2
+#define MODE_I2C_READ 4
+#define MODE_I2C_STOP 8
+
+struct iic_dp_aux_data {
+ bool running;
+ u16 address;
+ void *priv;
+ int (*aux_ch)(device_t adapter, int mode, uint8_t write_byte,
+ uint8_t *read_byte);
+ device_t port;
+};
+
+int iic_dp_aux_add_bus(device_t dev, const char *name,
+ int (*ch)(device_t idev, int mode, uint8_t write_byte, uint8_t *read_byte),
+ void *priv, device_t *bus, device_t *adapter);
+
+#endif /* _DRM_DP_HELPER_H_ */
diff --git a/sys/dev/drm2/drm_dp_iic_helper.c b/sys/dev/drm2/drm_dp_iic_helper.c
new file mode 100644
index 0000000..6d54170
--- /dev/null
+++ b/sys/dev/drm2/drm_dp_iic_helper.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright © 2009 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/kobj.h>
+#include <sys/bus.h>
+#include <dev/iicbus/iic.h>
+#include "iicbus_if.h"
+#include <dev/iicbus/iiconf.h>
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_dp_helper.h>
+
+static int
+iic_dp_aux_transaction(device_t idev, int mode, uint8_t write_byte,
+ uint8_t *read_byte)
+{
+ struct iic_dp_aux_data *aux_data;
+ int ret;
+
+ aux_data = device_get_softc(idev);
+ ret = (*aux_data->aux_ch)(idev, mode, write_byte, read_byte);
+ return (ret);
+}
+
+/*
+ * I2C over AUX CH
+ */
+
+/*
+ * Send the address. If the I2C link is running, this 'restarts'
+ * the connection with the new address, this is used for doing
+ * a write followed by a read (as needed for DDC)
+ */
+static int
+iic_dp_aux_address(device_t idev, u16 address, bool reading)
+{
+ struct iic_dp_aux_data *aux_data;
+ int mode, ret;
+
+ aux_data = device_get_softc(idev);
+ mode = MODE_I2C_START;
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ aux_data->address = address;
+ aux_data->running = true;
+ ret = iic_dp_aux_transaction(idev, mode, 0, NULL);
+ return (ret);
+}
+
+/*
+ * Stop the I2C transaction. This closes out the link, sending
+ * a bare address packet with the MOT bit turned off
+ */
+static void
+iic_dp_aux_stop(device_t idev, bool reading)
+{
+ struct iic_dp_aux_data *aux_data;
+ int mode;
+
+ aux_data = device_get_softc(idev);
+ mode = MODE_I2C_STOP;
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ if (aux_data->running) {
+ (void)iic_dp_aux_transaction(idev, mode, 0, NULL);
+ aux_data->running = false;
+ }
+}
+
+/*
+ * Write a single byte to the current I2C address, the
+ * the I2C link must be running or this returns -EIO
+ */
+static int
+iic_dp_aux_put_byte(device_t idev, u8 byte)
+{
+ struct iic_dp_aux_data *aux_data;
+ int ret;
+
+ aux_data = device_get_softc(idev);
+
+ if (!aux_data->running)
+ return (EIO);
+
+ ret = iic_dp_aux_transaction(idev, MODE_I2C_WRITE, byte, NULL);
+ return (ret);
+}
+
+/*
+ * Read a single byte from the current I2C address, the
+ * I2C link must be running or this returns -EIO
+ */
+static int
+iic_dp_aux_get_byte(device_t idev, u8 *byte_ret)
+{
+ struct iic_dp_aux_data *aux_data;
+ int ret;
+
+ aux_data = device_get_softc(idev);
+
+ if (!aux_data->running)
+ return (EIO);
+
+ ret = iic_dp_aux_transaction(idev, MODE_I2C_READ, 0, byte_ret);
+ return (ret);
+}
+
+static int
+iic_dp_aux_xfer(device_t idev, struct iic_msg *msgs, uint32_t num)
+{
+ u8 *buf;
+ int b, m, ret;
+ u16 len;
+ bool reading;
+
+ ret = 0;
+ reading = false;
+
+ for (m = 0; m < num; m++) {
+ len = msgs[m].len;
+ buf = msgs[m].buf;
+ reading = (msgs[m].flags & IIC_M_RD) != 0;
+ ret = iic_dp_aux_address(idev, msgs[m].slave, reading);
+ if (ret != 0)
+ break;
+ if (reading) {
+ for (b = 0; b < len; b++) {
+ ret = iic_dp_aux_get_byte(idev, &buf[b]);
+ if (ret != 0)
+ break;
+ }
+ } else {
+ for (b = 0; b < len; b++) {
+ ret = iic_dp_aux_put_byte(idev, buf[b]);
+ if (ret != 0)
+ break;
+ }
+ }
+ if (ret != 0)
+ break;
+ }
+ iic_dp_aux_stop(idev, reading);
+ DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
+ return (ret);
+}
+
+static void
+iic_dp_aux_reset_bus(device_t idev)
+{
+
+ (void)iic_dp_aux_address(idev, 0, false);
+ (void)iic_dp_aux_stop(idev, false);
+}
+
+static int
+iic_dp_aux_reset(device_t idev, u_char speed, u_char addr, u_char *oldaddr)
+{
+
+ iic_dp_aux_reset_bus(idev);
+ return (0);
+}
+
+static int
+iic_dp_aux_prepare_bus(device_t idev)
+{
+
+ /* adapter->retries = 3; */
+ iic_dp_aux_reset_bus(idev);
+ return (0);
+}
+
+static int
+iic_dp_aux_probe(device_t idev)
+{
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+iic_dp_aux_attach(device_t idev)
+{
+ struct iic_dp_aux_data *aux_data;
+
+ aux_data = device_get_softc(idev);
+ aux_data->port = device_add_child(idev, "iicbus", -1);
+ if (aux_data->port == NULL)
+ return (ENXIO);
+ device_quiet(aux_data->port);
+ bus_generic_attach(idev);
+ return (0);
+}
+
+static int
+iic_dp_aux_detach(device_t idev)
+{
+ struct iic_dp_aux_data *aux_data;
+ device_t port;
+
+ aux_data = device_get_softc(idev);
+
+ port = aux_data->port;
+ bus_generic_detach(idev);
+ if (port != NULL)
+ device_delete_child(idev, port);
+
+ return (0);
+}
+
+int
+iic_dp_aux_add_bus(device_t dev, const char *name,
+ int (*ch)(device_t idev, int mode, uint8_t write_byte, uint8_t *read_byte),
+ void *priv, device_t *bus, device_t *adapter)
+{
+ device_t ibus;
+ struct iic_dp_aux_data *data;
+ int idx, error;
+ static int dp_bus_counter;
+
+ mtx_lock(&Giant);
+
+ idx = atomic_fetchadd_int(&dp_bus_counter, 1);
+ ibus = device_add_child(dev, "drm_iic_dp_aux", idx);
+ if (ibus == NULL) {
+ mtx_unlock(&Giant);
+ DRM_ERROR("drm_iic_dp_aux bus %d creation error\n", idx);
+ return (-ENXIO);
+ }
+ device_quiet(ibus);
+ error = device_probe_and_attach(ibus);
+ if (error != 0) {
+ device_delete_child(dev, ibus);
+ mtx_unlock(&Giant);
+ DRM_ERROR("drm_iic_dp_aux bus %d attach failed, %d\n",
+ idx, error);
+ return (-error);
+ }
+ data = device_get_softc(ibus);
+ data->running = false;
+ data->address = 0;
+ data->aux_ch = ch;
+ data->priv = priv;
+ error = iic_dp_aux_prepare_bus(ibus);
+ if (error == 0) {
+ *bus = ibus;
+ *adapter = data->port;
+ }
+ mtx_unlock(&Giant);
+ return (error);
+}
+
+static device_method_t drm_iic_dp_aux_methods[] = {
+ DEVMETHOD(device_probe, iic_dp_aux_probe),
+ DEVMETHOD(device_attach, iic_dp_aux_attach),
+ DEVMETHOD(device_detach, iic_dp_aux_detach),
+ DEVMETHOD(iicbus_reset, iic_dp_aux_reset),
+ DEVMETHOD(iicbus_transfer, iic_dp_aux_xfer),
+ DEVMETHOD_END
+};
+static driver_t drm_iic_dp_aux_driver = {
+ "drm_iic_dp_aux",
+ drm_iic_dp_aux_methods,
+ sizeof(struct iic_dp_aux_data)
+};
+static devclass_t drm_iic_dp_aux_devclass;
+DRIVER_MODULE_ORDERED(drm_iic_dp_aux, drmn, drm_iic_dp_aux_driver,
+ drm_iic_dp_aux_devclass, 0, 0, SI_ORDER_SECOND);
diff --git a/sys/dev/drm2/drm_drawable.c b/sys/dev/drm2/drm_drawable.c
new file mode 100644
index 0000000..e8c4e02
--- /dev/null
+++ b/sys/dev/drm2/drm_drawable.c
@@ -0,0 +1,173 @@
+/*-
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/** @file drm_drawable.c
+ * This file implements ioctls to store information along with DRM drawables,
+ * such as the current set of cliprects for vblank-synced buffer swaps.
+ */
+
+#include <dev/drm2/drmP.h>
+
+struct bsd_drm_drawable_info {
+ struct drm_drawable_info info;
+ int handle;
+ RB_ENTRY(bsd_drm_drawable_info) tree;
+};
+
+static int
+drm_drawable_compare(struct bsd_drm_drawable_info *a,
+ struct bsd_drm_drawable_info *b)
+{
+ if (a->handle > b->handle)
+ return 1;
+ if (a->handle < b->handle)
+ return -1;
+ return 0;
+}
+
+RB_GENERATE_STATIC(drawable_tree, bsd_drm_drawable_info, tree,
+ drm_drawable_compare);
+
+struct drm_drawable_info *
+drm_get_drawable_info(struct drm_device *dev, int handle)
+{
+ struct bsd_drm_drawable_info find, *result;
+
+ find.handle = handle;
+ result = RB_FIND(drawable_tree, &dev->drw_head, &find);
+
+ return &result->info;
+}
+
+int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_draw *draw = data;
+ struct bsd_drm_drawable_info *info;
+
+ info = malloc(sizeof(struct bsd_drm_drawable_info), DRM_MEM_DRAWABLE,
+ M_NOWAIT | M_ZERO);
+ if (info == NULL)
+ return ENOMEM;
+
+ info->handle = alloc_unr(dev->drw_unrhdr);
+ DRM_SPINLOCK(&dev->drw_lock);
+ RB_INSERT(drawable_tree, &dev->drw_head, info);
+ draw->handle = info->handle;
+ DRM_SPINUNLOCK(&dev->drw_lock);
+
+ DRM_DEBUG("%d\n", draw->handle);
+
+ return 0;
+}
+
+int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_draw *draw = (struct drm_draw *)data;
+ struct drm_drawable_info *info;
+
+ DRM_SPINLOCK(&dev->drw_lock);
+ info = drm_get_drawable_info(dev, draw->handle);
+ if (info != NULL) {
+ RB_REMOVE(drawable_tree, &dev->drw_head,
+ (struct bsd_drm_drawable_info *)info);
+ DRM_SPINUNLOCK(&dev->drw_lock);
+ free_unr(dev->drw_unrhdr, draw->handle);
+ free(info->rects, DRM_MEM_DRAWABLE);
+ free(info, DRM_MEM_DRAWABLE);
+ return 0;
+ } else {
+ DRM_SPINUNLOCK(&dev->drw_lock);
+ return EINVAL;
+ }
+}
+
+int drm_update_draw(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_drawable_info *info;
+ struct drm_update_draw *update = (struct drm_update_draw *)data;
+ int ret;
+
+ info = drm_get_drawable_info(dev, update->handle);
+ if (info == NULL)
+ return EINVAL;
+
+ switch (update->type) {
+ case DRM_DRAWABLE_CLIPRECTS:
+ DRM_SPINLOCK(&dev->drw_lock);
+ if (update->num != info->num_rects) {
+ free(info->rects, DRM_MEM_DRAWABLE);
+ info->rects = NULL;
+ info->num_rects = 0;
+ }
+ if (update->num == 0) {
+ DRM_SPINUNLOCK(&dev->drw_lock);
+ return 0;
+ }
+ if (info->rects == NULL) {
+ info->rects = malloc(sizeof(*info->rects) *
+ update->num, DRM_MEM_DRAWABLE, M_NOWAIT);
+ if (info->rects == NULL) {
+ DRM_SPINUNLOCK(&dev->drw_lock);
+ return ENOMEM;
+ }
+ info->num_rects = update->num;
+ }
+ /* For some reason the pointer arg is unsigned long long. */
+ ret = copyin((void *)(intptr_t)update->data, info->rects,
+ sizeof(*info->rects) * info->num_rects);
+ DRM_SPINUNLOCK(&dev->drw_lock);
+ return ret;
+ default:
+ return EINVAL;
+ }
+}
+
+void drm_drawable_free_all(struct drm_device *dev)
+{
+ struct bsd_drm_drawable_info *info, *next;
+
+ DRM_SPINLOCK(&dev->drw_lock);
+ for (info = RB_MIN(drawable_tree, &dev->drw_head);
+ info != NULL ; info = next) {
+ next = RB_NEXT(drawable_tree, &dev->drw_head, info);
+ RB_REMOVE(drawable_tree, &dev->drw_head,
+ (struct bsd_drm_drawable_info *)info);
+ DRM_SPINUNLOCK(&dev->drw_lock);
+ free_unr(dev->drw_unrhdr, info->handle);
+ free(info->info.rects, DRM_MEM_DRAWABLE);
+ free(info, DRM_MEM_DRAWABLE);
+ DRM_SPINLOCK(&dev->drw_lock);
+ }
+ DRM_SPINUNLOCK(&dev->drw_lock);
+}
diff --git a/sys/dev/drm2/drm_drv.c b/sys/dev/drm2/drm_drv.c
new file mode 100644
index 0000000..42ff194
--- /dev/null
+++ b/sys/dev/drm2/drm_drv.c
@@ -0,0 +1,980 @@
+/*-
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/** @file drm_drv.c
+ * The catch-all file for DRM device support, including module setup/teardown,
+ * open/close, and ioctl dispatch.
+ */
+
+
+#include <sys/limits.h>
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/drm_sarea.h>
+#include <dev/drm2/drm_mode.h>
+
+#ifdef DRM_DEBUG_DEFAULT_ON
+int drm_debug_flag = 1;
+#else
+int drm_debug_flag = 2;
+#endif
+int drm_notyet_flag = 0;
+
+unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
+unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
+
+static int drm_load(struct drm_device *dev);
+static void drm_unload(struct drm_device *dev);
+static drm_pci_id_list_t *drm_find_description(int vendor, int device,
+ drm_pci_id_list_t *idlist);
+
+static int
+drm_modevent(module_t mod, int type, void *data)
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ TUNABLE_INT_FETCH("drm.debug", &drm_debug_flag);
+ TUNABLE_INT_FETCH("drm.notyet", &drm_notyet_flag);
+ break;
+ }
+ return (0);
+}
+
+static moduledata_t drm_mod = {
+ "drmn",
+ drm_modevent,
+ 0
+};
+DECLARE_MODULE(drmn, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
+MODULE_VERSION(drmn, 1);
+MODULE_DEPEND(drmn, agp, 1, 1, 1);
+MODULE_DEPEND(drmn, pci, 1, 1, 1);
+MODULE_DEPEND(drmn, mem, 1, 1, 1);
+MODULE_DEPEND(drmn, iicbus, 1, 1, 1);
+
+static drm_ioctl_desc_t drm_ioctls[256] = {
+ DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+};
+
+static struct cdevsw drm_cdevsw = {
+ .d_version = D_VERSION,
+ .d_open = drm_open,
+ .d_read = drm_read,
+ .d_ioctl = drm_ioctl,
+ .d_poll = drm_poll,
+ .d_mmap = drm_mmap,
+ .d_mmap_single = drm_gem_mmap_single,
+ .d_name = "drm",
+ .d_flags = D_TRACKCLOSE
+};
+
+static int drm_msi = 1; /* Enable by default. */
+TUNABLE_INT("hw.drm.msi", &drm_msi);
+SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
+SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1,
+ "Enable MSI interrupts for drm devices");
+
+static struct drm_msi_blacklist_entry drm_msi_blacklist[] = {
+ {0x8086, 0x2772}, /* Intel i945G */ \
+ {0x8086, 0x27A2}, /* Intel i945GM */ \
+ {0x8086, 0x27AE}, /* Intel i945GME */ \
+ {0, 0}
+};
+
+static int drm_msi_is_blacklisted(int vendor, int device)
+{
+ int i = 0;
+
+ for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) {
+ if ((drm_msi_blacklist[i].vendor == vendor) &&
+ (drm_msi_blacklist[i].device == device)) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
+{
+ drm_pci_id_list_t *id_entry;
+ int vendor, device;
+
+ vendor = pci_get_vendor(kdev);
+ device = pci_get_device(kdev);
+
+ if (pci_get_class(kdev) != PCIC_DISPLAY
+ || pci_get_subclass(kdev) != PCIS_DISPLAY_VGA)
+ return ENXIO;
+
+ id_entry = drm_find_description(vendor, device, idlist);
+ if (id_entry != NULL) {
+ if (!device_get_desc(kdev)) {
+ DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
+ device_set_desc(kdev, id_entry->name);
+ }
+ return 0;
+ }
+
+ return ENXIO;
+}
+
+int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
+{
+ struct drm_device *dev;
+ drm_pci_id_list_t *id_entry;
+ int error, msicount;
+
+ dev = device_get_softc(kdev);
+
+ dev->device = kdev;
+
+ dev->pci_domain = pci_get_domain(dev->device);
+ dev->pci_bus = pci_get_bus(dev->device);
+ dev->pci_slot = pci_get_slot(dev->device);
+ dev->pci_func = pci_get_function(dev->device);
+
+ dev->pci_vendor = pci_get_vendor(dev->device);
+ dev->pci_device = pci_get_device(dev->device);
+
+ if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) {
+ if (drm_msi &&
+ !drm_msi_is_blacklisted(dev->pci_vendor, dev->pci_device)) {
+ msicount = pci_msi_count(dev->device);
+ DRM_DEBUG("MSI count = %d\n", msicount);
+ if (msicount > 1)
+ msicount = 1;
+
+ if (pci_alloc_msi(dev->device, &msicount) == 0) {
+ DRM_INFO("MSI enabled %d message(s)\n",
+ msicount);
+ dev->msi_enabled = 1;
+ dev->irqrid = 1;
+ }
+ }
+
+ dev->irqr = bus_alloc_resource_any(dev->device, SYS_RES_IRQ,
+ &dev->irqrid, RF_SHAREABLE);
+ if (!dev->irqr) {
+ return (ENOENT);
+ }
+
+ dev->irq = (int) rman_get_start(dev->irqr);
+ }
+
+ mtx_init(&dev->dev_lock, "drmdev", NULL, MTX_DEF);
+ mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF);
+ mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF);
+ mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF);
+ mtx_init(&dev->event_lock, "drmev", NULL, MTX_DEF);
+ sx_init(&dev->dev_struct_lock, "drmslk");
+
+ id_entry = drm_find_description(dev->pci_vendor,
+ dev->pci_device, idlist);
+ dev->id_entry = id_entry;
+
+ error = drm_load(dev);
+ if (error == 0)
+ error = drm_create_cdevs(kdev);
+ return (error);
+}
+
+int
+drm_create_cdevs(device_t kdev)
+{
+ struct drm_device *dev;
+ int error, unit;
+
+ unit = device_get_unit(kdev);
+ dev = device_get_softc(kdev);
+
+ error = make_dev_p(MAKEDEV_WAITOK | MAKEDEV_CHECKNAME, &dev->devnode,
+ &drm_cdevsw, 0, DRM_DEV_UID, DRM_DEV_GID,
+ DRM_DEV_MODE, "dri/card%d", unit);
+ if (error == 0)
+ dev->devnode->si_drv1 = dev;
+ return (error);
+}
+
+int drm_detach(device_t kdev)
+{
+ struct drm_device *dev;
+
+ dev = device_get_softc(kdev);
+ drm_unload(dev);
+ if (dev->irqr) {
+ bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid,
+ dev->irqr);
+ if (dev->msi_enabled) {
+ pci_release_msi(dev->device);
+ DRM_INFO("MSI released\n");
+ }
+ }
+ return (0);
+}
+
+#ifndef DRM_DEV_NAME
+#define DRM_DEV_NAME "drm"
+#endif
+
+devclass_t drm_devclass;
+
+drm_pci_id_list_t *drm_find_description(int vendor, int device,
+ drm_pci_id_list_t *idlist)
+{
+ int i = 0;
+
+ for (i = 0; idlist[i].vendor != 0; i++) {
+ if ((idlist[i].vendor == vendor) &&
+ ((idlist[i].device == device) ||
+ (idlist[i].device == 0))) {
+ return &idlist[i];
+ }
+ }
+ return NULL;
+}
+
+static int drm_firstopen(struct drm_device *dev)
+{
+ drm_local_map_t *map;
+ int i;
+
+ DRM_LOCK_ASSERT(dev);
+
+ /* prebuild the SAREA */
+ i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
+ _DRM_CONTAINS_LOCK, &map);
+ if (i != 0)
+ return i;
+
+ if (dev->driver->firstopen)
+ dev->driver->firstopen(dev);
+
+ dev->buf_use = 0;
+
+ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
+ i = drm_dma_setup(dev);
+ if (i != 0)
+ return i;
+ }
+
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ dev->magiclist[i].head = NULL;
+ dev->magiclist[i].tail = NULL;
+ }
+
+ dev->lock.lock_queue = 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ dev->irq_enabled = 0;
+ dev->context_flag = 0;
+ dev->last_context = 0;
+ dev->if_version = 0;
+
+ dev->buf_sigio = NULL;
+
+ DRM_DEBUG("\n");
+
+ return 0;
+}
+
+static int drm_lastclose(struct drm_device *dev)
+{
+ drm_magic_entry_t *pt, *next;
+ drm_local_map_t *map, *mapsave;
+ int i;
+
+ DRM_LOCK_ASSERT(dev);
+
+ DRM_DEBUG("\n");
+
+ if (dev->driver->lastclose != NULL)
+ dev->driver->lastclose(dev);
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled)
+ drm_irq_uninstall(dev);
+
+ if (dev->unique) {
+ free(dev->unique, DRM_MEM_DRIVER);
+ dev->unique = NULL;
+ dev->unique_len = 0;
+ }
+ /* Clear pid list */
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ for (pt = dev->magiclist[i].head; pt; pt = next) {
+ next = pt->next;
+ free(pt, DRM_MEM_MAGIC);
+ }
+ dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
+ }
+
+ DRM_UNLOCK(dev);
+ drm_drawable_free_all(dev);
+ DRM_LOCK(dev);
+
+ /* Clear AGP information */
+ if (dev->agp) {
+ drm_agp_mem_t *entry;
+ drm_agp_mem_t *nexte;
+
+ /* Remove AGP resources, but leave dev->agp intact until
+ * drm_unload is called.
+ */
+ for (entry = dev->agp->memory; entry; entry = nexte) {
+ nexte = entry->next;
+ if (entry->bound)
+ drm_agp_unbind_memory(entry->handle);
+ drm_agp_free_memory(entry->handle);
+ free(entry, DRM_MEM_AGPLISTS);
+ }
+ dev->agp->memory = NULL;
+
+ if (dev->agp->acquired)
+ drm_agp_release(dev);
+
+ dev->agp->acquired = 0;
+ dev->agp->enabled = 0;
+ }
+ if (dev->sg != NULL) {
+ drm_sg_cleanup(dev->sg);
+ dev->sg = NULL;
+ }
+
+ TAILQ_FOREACH_SAFE(map, &dev->maplist, link, mapsave) {
+ if (!(map->flags & _DRM_DRIVER))
+ drm_rmmap(dev, map);
+ }
+
+ drm_dma_takedown(dev);
+ if (dev->lock.hw_lock) {
+ dev->lock.hw_lock = NULL; /* SHM removed */
+ dev->lock.file_priv = NULL;
+ DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
+ }
+
+ return 0;
+}
+
+static int drm_load(struct drm_device *dev)
+{
+ int i, retcode;
+
+ DRM_DEBUG("\n");
+
+ TAILQ_INIT(&dev->maplist);
+ dev->map_unrhdr = new_unrhdr(1, ((1 << DRM_MAP_HANDLE_BITS) - 1), NULL);
+ if (dev->map_unrhdr == NULL) {
+ DRM_ERROR("Couldn't allocate map number allocator\n");
+ return EINVAL;
+ }
+
+
+ drm_mem_init();
+ drm_sysctl_init(dev);
+ TAILQ_INIT(&dev->files);
+
+ dev->counters = 6;
+ dev->types[0] = _DRM_STAT_LOCK;
+ dev->types[1] = _DRM_STAT_OPENS;
+ dev->types[2] = _DRM_STAT_CLOSES;
+ dev->types[3] = _DRM_STAT_IOCTLS;
+ dev->types[4] = _DRM_STAT_LOCKS;
+ dev->types[5] = _DRM_STAT_UNLOCKS;
+
+ for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
+ atomic_set(&dev->counts[i], 0);
+
+ INIT_LIST_HEAD(&dev->vblank_event_list);
+
+ if (drm_core_has_AGP(dev)) {
+ if (drm_device_is_agp(dev))
+ dev->agp = drm_agp_init();
+ if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) &&
+ dev->agp == NULL) {
+ DRM_ERROR("Card isn't AGP, or couldn't initialize "
+ "AGP.\n");
+ retcode = ENOMEM;
+ goto error;
+ }
+ if (dev->agp != NULL && dev->agp->info.ai_aperture_base != 0) {
+ if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
+ dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
+ dev->agp->mtrr = 1;
+ }
+ }
+
+ retcode = drm_ctxbitmap_init(dev);
+ if (retcode != 0) {
+ DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+ goto error;
+ }
+
+ dev->drw_unrhdr = new_unrhdr(1, INT_MAX, NULL);
+ if (dev->drw_unrhdr == NULL) {
+ DRM_ERROR("Couldn't allocate drawable number allocator\n");
+ retcode = ENOMEM;
+ goto error;
+ }
+
+ if (dev->driver->driver_features & DRIVER_GEM) {
+ retcode = drm_gem_init(dev);
+ if (retcode != 0) {
+ DRM_ERROR("Cannot initialize graphics execution "
+ "manager (GEM)\n");
+ goto error1;
+ }
+ }
+
+ if (dev->driver->load != NULL) {
+ DRM_LOCK(dev);
+ /* Shared code returns -errno. */
+ retcode = -dev->driver->load(dev,
+ dev->id_entry->driver_private);
+ if (pci_enable_busmaster(dev->device))
+ DRM_ERROR("Request to enable bus-master failed.\n");
+ DRM_UNLOCK(dev);
+ if (retcode != 0)
+ goto error;
+ }
+
+ DRM_INFO("Initialized %s %d.%d.%d %s\n",
+ dev->driver->name,
+ dev->driver->major,
+ dev->driver->minor,
+ dev->driver->patchlevel,
+ dev->driver->date);
+
+ return 0;
+
+error1:
+ delete_unrhdr(dev->drw_unrhdr);
+error:
+ drm_sysctl_cleanup(dev);
+ DRM_LOCK(dev);
+ drm_lastclose(dev);
+ DRM_UNLOCK(dev);
+ if (dev->devnode != NULL)
+ destroy_dev(dev->devnode);
+
+ mtx_destroy(&dev->drw_lock);
+ mtx_destroy(&dev->vbl_lock);
+ mtx_destroy(&dev->irq_lock);
+ mtx_destroy(&dev->dev_lock);
+ mtx_destroy(&dev->event_lock);
+ sx_destroy(&dev->dev_struct_lock);
+
+ return retcode;
+}
+
+static void drm_unload(struct drm_device *dev)
+{
+ int i;
+
+ DRM_DEBUG("\n");
+
+ drm_sysctl_cleanup(dev);
+ if (dev->devnode != NULL)
+ destroy_dev(dev->devnode);
+
+ drm_ctxbitmap_cleanup(dev);
+
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_destroy(dev);
+
+ if (dev->agp && dev->agp->mtrr) {
+ int __unused retcode;
+
+ retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
+ dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
+ DRM_DEBUG("mtrr_del = %d", retcode);
+ }
+
+ drm_vblank_cleanup(dev);
+
+ DRM_LOCK(dev);
+ drm_lastclose(dev);
+ DRM_UNLOCK(dev);
+
+ /* Clean up PCI resources allocated by drm_bufs.c. We're not really
+ * worried about resource consumption while the DRM is inactive (between
+ * lastclose and firstopen or unload) because these aren't actually
+ * taking up KVA, just keeping the PCI resource allocated.
+ */
+ for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
+ if (dev->pcir[i] == NULL)
+ continue;
+ bus_release_resource(dev->device, SYS_RES_MEMORY,
+ dev->pcirid[i], dev->pcir[i]);
+ dev->pcir[i] = NULL;
+ }
+
+ if (dev->agp) {
+ free(dev->agp, DRM_MEM_AGPLISTS);
+ dev->agp = NULL;
+ }
+
+ if (dev->driver->unload != NULL) {
+ DRM_LOCK(dev);
+ dev->driver->unload(dev);
+ DRM_UNLOCK(dev);
+ }
+
+ delete_unrhdr(dev->drw_unrhdr);
+ delete_unrhdr(dev->map_unrhdr);
+
+ drm_mem_uninit();
+
+ if (pci_disable_busmaster(dev->device))
+ DRM_ERROR("Request to disable bus-master failed.\n");
+
+ mtx_destroy(&dev->drw_lock);
+ mtx_destroy(&dev->vbl_lock);
+ mtx_destroy(&dev->irq_lock);
+ mtx_destroy(&dev->dev_lock);
+ mtx_destroy(&dev->event_lock);
+ sx_destroy(&dev->dev_struct_lock);
+}
+
+int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_version *version = data;
+ int len;
+
+#define DRM_COPY( name, value ) \
+ len = strlen( value ); \
+ if ( len > name##_len ) len = name##_len; \
+ name##_len = strlen( value ); \
+ if ( len && name ) { \
+ if ( DRM_COPY_TO_USER( name, value, len ) ) \
+ return EFAULT; \
+ }
+
+ version->version_major = dev->driver->major;
+ version->version_minor = dev->driver->minor;
+ version->version_patchlevel = dev->driver->patchlevel;
+
+ DRM_COPY(version->name, dev->driver->name);
+ DRM_COPY(version->date, dev->driver->date);
+ DRM_COPY(version->desc, dev->driver->desc);
+
+ return 0;
+}
+
+int
+drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
+{
+ struct drm_device *dev;
+ int retcode;
+
+ dev = kdev->si_drv1;
+ if (dev == NULL)
+ return (ENXIO);
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+
+ retcode = drm_open_helper(kdev, flags, fmt, p, dev);
+
+ if (retcode == 0) {
+ atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+ DRM_LOCK(dev);
+ mtx_lock(&Giant);
+ device_busy(dev->device);
+ mtx_unlock(&Giant);
+ if (!dev->open_count++)
+ retcode = drm_firstopen(dev);
+ DRM_UNLOCK(dev);
+ }
+
+ return (retcode);
+}
+
+void drm_close(void *data)
+{
+ struct drm_file *file_priv = data;
+ struct drm_device *dev = file_priv->dev;
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+
+ DRM_LOCK(dev);
+
+ if (dev->driver->preclose != NULL)
+ dev->driver->preclose(dev, file_priv);
+
+ /* ========================================================
+ * Begin inline drm_release
+ */
+
+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+ DRM_CURRENTPID, (long)dev->device, dev->open_count);
+
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_release(dev, file_priv);
+
+ if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
+ && dev->lock.file_priv == file_priv) {
+ DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
+ DRM_CURRENTPID,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ if (dev->driver->reclaim_buffers_locked != NULL)
+ dev->driver->reclaim_buffers_locked(dev, file_priv);
+
+ drm_lock_free(&dev->lock,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+
+ /* FIXME: may require heavy-handed reset of
+ hardware at this point, possibly
+ processed via a callback to the X
+ server. */
+ } else if (dev->driver->reclaim_buffers_locked != NULL &&
+ dev->lock.hw_lock != NULL) {
+ /* The lock is required to reclaim buffers */
+ for (;;) {
+ if (!dev->lock.hw_lock) {
+ /* Device has been unregistered */
+ retcode = EINTR;
+ break;
+ }
+ if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) {
+ dev->lock.file_priv = file_priv;
+ dev->lock.lock_time = jiffies;
+ atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
+ break; /* Got lock */
+ }
+ /* Contention */
+ retcode = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue,
+ PCATCH, "drmlk2", 0);
+ if (retcode)
+ break;
+ }
+ if (retcode == 0) {
+ dev->driver->reclaim_buffers_locked(dev, file_priv);
+ drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
+ }
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+ !dev->driver->reclaim_buffers_locked)
+ drm_reclaim_buffers(dev, file_priv);
+
+ funsetown(&dev->buf_sigio);
+ seldrain(&file_priv->event_poll);
+
+ if (dev->driver->postclose != NULL)
+ dev->driver->postclose(dev, file_priv);
+ TAILQ_REMOVE(&dev->files, file_priv, link);
+ free(file_priv, DRM_MEM_FILES);
+
+ /* ========================================================
+ * End inline drm_release
+ */
+
+ atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
+ mtx_lock(&Giant);
+ device_unbusy(dev->device);
+ mtx_unlock(&Giant);
+ if (--dev->open_count == 0) {
+ retcode = drm_lastclose(dev);
+ }
+
+ DRM_UNLOCK(dev);
+}
+
+/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm.
+ */
+int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
+ DRM_STRUCTPROC *p)
+{
+ struct drm_device *dev = drm_get_device_from_kdev(kdev);
+ int retcode = 0;
+ drm_ioctl_desc_t *ioctl;
+ int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv);
+ int nr = DRM_IOCTL_NR(cmd);
+ int is_driver_ioctl = 0;
+ struct drm_file *file_priv;
+
+ retcode = devfs_get_cdevpriv((void **)&file_priv);
+ if (retcode != 0) {
+ DRM_ERROR("can't find authenticator\n");
+ return EINVAL;
+ }
+
+ atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
+ ++file_priv->ioctl_count;
+
+ DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
+ DRM_CURRENTPID, cmd, nr, (long)dev->device,
+ file_priv->authenticated);
+
+ switch (cmd) {
+ case FIONBIO:
+ case FIOASYNC:
+ return 0;
+
+ case FIOSETOWN:
+ return fsetown(*(int *)data, &dev->buf_sigio);
+
+ case FIOGETOWN:
+ *(int *) data = fgetown(&dev->buf_sigio);
+ return 0;
+ }
+
+ if (IOCGROUP(cmd) != DRM_IOCTL_BASE) {
+ DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd));
+ return EINVAL;
+ }
+
+ ioctl = &drm_ioctls[nr];
+ /* It's not a core DRM ioctl, try driver-specific. */
+ if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
+ /* The array entries begin at DRM_COMMAND_BASE ioctl nr */
+ nr -= DRM_COMMAND_BASE;
+ if (nr > dev->driver->max_ioctl) {
+ DRM_DEBUG("Bad driver ioctl number, 0x%x (of 0x%x)\n",
+ nr, dev->driver->max_ioctl);
+ return EINVAL;
+ }
+ ioctl = &dev->driver->ioctls[nr];
+ is_driver_ioctl = 1;
+ }
+ func = ioctl->func;
+
+ if (func == NULL) {
+ DRM_DEBUG("no function\n");
+ return EINVAL;
+ }
+
+ if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) ||
+ ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
+ ((ioctl->flags & DRM_MASTER) && !file_priv->master))
+ return EACCES;
+
+ if (is_driver_ioctl) {
+ if ((ioctl->flags & DRM_UNLOCKED) == 0)
+ DRM_LOCK(dev);
+ /* shared code returns -errno */
+ retcode = -func(dev, data, file_priv);
+ if ((ioctl->flags & DRM_UNLOCKED) == 0)
+ DRM_UNLOCK(dev);
+ } else {
+ retcode = func(dev, data, file_priv);
+ }
+
+ if (retcode != 0)
+ DRM_DEBUG(" returning %d\n", retcode);
+ if (retcode != 0 &&
+ (drm_debug_flag & DRM_DEBUGBITS_FAILED_IOCTL) != 0) {
+ printf(
+"pid %d, cmd 0x%02lx, nr 0x%02x/%1d, dev 0x%lx, auth %d, res %d\n",
+ DRM_CURRENTPID, cmd, nr, is_driver_ioctl, (long)dev->device,
+ file_priv->authenticated, retcode);
+ }
+
+ return retcode;
+}
+
+drm_local_map_t *drm_getsarea(struct drm_device *dev)
+{
+ drm_local_map_t *map;
+
+ DRM_LOCK_ASSERT(dev);
+ TAILQ_FOREACH(map, &dev->maplist, link) {
+ if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
+ return map;
+ }
+
+ return NULL;
+}
+
+int
+drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
+ struct sysctl_oid *top)
+{
+ struct sysctl_oid *oid;
+
+ snprintf(dev->busid_str, sizeof(dev->busid_str),
+ "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
+ dev->pci_slot, dev->pci_func);
+ oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
+ CTLFLAG_RD, dev->busid_str, 0, NULL);
+ if (oid == NULL)
+ return (ENOMEM);
+ dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
+ oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
+ "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
+ if (oid == NULL)
+ return (ENOMEM);
+
+ return (0);
+}
+
+#if DRM_LINUX
+
+#include <sys/sysproto.h>
+
+MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
+
+#define LINUX_IOCTL_DRM_MIN 0x6400
+#define LINUX_IOCTL_DRM_MAX 0x64ff
+
+static linux_ioctl_function_t drm_linux_ioctl;
+static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
+ LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
+
+SYSINIT(drm_register, SI_SUB_KLD, SI_ORDER_MIDDLE,
+ linux_ioctl_register_handler, &drm_handler);
+SYSUNINIT(drm_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE,
+ linux_ioctl_unregister_handler, &drm_handler);
+
+/* The bits for in/out are switched on Linux */
+#define LINUX_IOC_IN IOC_OUT
+#define LINUX_IOC_OUT IOC_IN
+
+static int
+drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
+{
+ int error;
+ int cmd = args->cmd;
+
+ args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
+ if (cmd & LINUX_IOC_IN)
+ args->cmd |= IOC_IN;
+ if (cmd & LINUX_IOC_OUT)
+ args->cmd |= IOC_OUT;
+
+ error = ioctl(p, (struct ioctl_args *)args);
+
+ return error;
+}
+#endif /* DRM_LINUX */
+
+bool
+dmi_check_system(const struct dmi_system_id *sysid)
+{
+
+ /* XXXKIB */
+ return (false);
+}
+
diff --git a/sys/dev/drm2/drm_edid.c b/sys/dev/drm2/drm_edid.c
new file mode 100644
index 0000000..edd33b0
--- /dev/null
+++ b/sys/dev/drm2/drm_edid.c
@@ -0,0 +1,1781 @@
+/*
+ * Copyright (c) 2006 Luc Verhaegen (quirks list)
+ * Copyright (c) 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2010 Red Hat, Inc.
+ *
+ * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
+ * FB layer.
+ * Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_edid.h>
+#include <dev/drm2/drm_edid_modes.h>
+#include <dev/iicbus/iic.h>
+#include <dev/iicbus/iiconf.h>
+#include "iicbus_if.h"
+
+#define version_greater(edid, maj, min) \
+ (((edid)->version > (maj)) || \
+ ((edid)->version == (maj) && (edid)->revision > (min)))
+
+#define EDID_EST_TIMINGS 16
+#define EDID_STD_TIMINGS 8
+#define EDID_DETAILED_TIMINGS 4
+
+/*
+ * EDID blocks out in the wild have a variety of bugs, try to collect
+ * them here (note that userspace may work around broken monitors first,
+ * but fixes should make their way here so that the kernel "just works"
+ * on as many displays as possible).
+ */
+
+/* First detailed mode wrong, use largest 60Hz mode */
+#define EDID_QUIRK_PREFER_LARGE_60 (1 << 0)
+/* Reported 135MHz pixel clock is too high, needs adjustment */
+#define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1)
+/* Prefer the largest mode at 75 Hz */
+#define EDID_QUIRK_PREFER_LARGE_75 (1 << 2)
+/* Detail timing is in cm not mm */
+#define EDID_QUIRK_DETAILED_IN_CM (1 << 3)
+/* Detailed timing descriptors have bogus size values, so just take the
+ * maximum size and use that.
+ */
+#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
+/* Monitor forgot to set the first detailed is preferred bit. */
+#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
+/* use +hsync +vsync for detailed mode */
+#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
+
+struct detailed_mode_closure {
+ struct drm_connector *connector;
+ struct edid *edid;
+ bool preferred;
+ u32 quirks;
+ int modes;
+};
+
+#define LEVEL_DMT 0
+#define LEVEL_GTF 1
+#define LEVEL_GTF2 2
+#define LEVEL_CVT 3
+
+static struct edid_quirk {
+ char *vendor;
+ int product_id;
+ u32 quirks;
+} edid_quirk_list[] = {
+ /* Acer AL1706 */
+ { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
+ /* Acer F51 */
+ { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
+ /* Unknown Acer */
+ { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+ /* Belinea 10 15 55 */
+ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
+ { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
+
+ /* Envision Peripherals, Inc. EN-7100e */
+ { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
+ /* Envision EN2028 */
+ { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
+
+ /* Funai Electronics PM36B */
+ { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
+ EDID_QUIRK_DETAILED_IN_CM },
+
+ /* LG Philips LCD LP154W01-A5 */
+ { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+ { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+
+ /* Philips 107p5 CRT */
+ { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+ /* Proview AY765C */
+ { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+ /* Samsung SyncMaster 205BW. Note: irony */
+ { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
+ /* Samsung SyncMaster 22[5-6]BW */
+ { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
+ { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+};
+
+/*** DDC fetch and block validation ***/
+
+static const u8 edid_header[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
+};
+
+ /*
+ * Sanity check the header of the base EDID block. Return 8 if the header
+ * is perfect, down to 0 if it's totally wrong.
+ */
+int drm_edid_header_is_valid(const u8 *raw_edid)
+{
+ int i, score = 0;
+
+ for (i = 0; i < sizeof(edid_header); i++)
+ if (raw_edid[i] == edid_header[i])
+ score++;
+
+ return score;
+}
+
+/*
+ * Sanity check the EDID block (base or extension). Return 0 if the block
+ * doesn't check out, or 1 if it's valid.
+ */
+static bool
+drm_edid_block_valid(u8 *raw_edid)
+{
+ int i;
+ u8 csum = 0;
+ struct edid *edid = (struct edid *)raw_edid;
+
+ if (raw_edid[0] == 0x00) {
+ int score = drm_edid_header_is_valid(raw_edid);
+ if (score == 8) ;
+ else if (score >= 6) {
+ DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+ memcpy(raw_edid, edid_header, sizeof(edid_header));
+ } else {
+ goto bad;
+ }
+ }
+
+ for (i = 0; i < EDID_LENGTH; i++)
+ csum += raw_edid[i];
+ if (csum) {
+ DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+
+ /* allow CEA to slide through, switches mangle this */
+ if (raw_edid[0] != 0x02)
+ goto bad;
+ }
+
+ /* per-block-type checks */
+ switch (raw_edid[0]) {
+ case 0: /* base */
+ if (edid->version != 1) {
+ DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+ goto bad;
+ }
+
+ if (edid->revision > 4)
+ DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+ break;
+
+ default:
+ break;
+ }
+
+ return 1;
+
+bad:
+ if (raw_edid) {
+ DRM_DEBUG_KMS("Raw EDID:\n");
+ if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0) {
+ for (i = 0; i < EDID_LENGTH; ) {
+ printf("%02x", raw_edid[i]);
+ i++;
+ if (i % 16 == 0 || i == EDID_LENGTH)
+ printf("\n");
+ else if (i % 8 == 0)
+ printf(" ");
+ else
+ printf(" ");
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * drm_edid_is_valid - sanity check EDID data
+ * @edid: EDID data
+ *
+ * Sanity-check an entire EDID record (including extensions)
+ */
+bool drm_edid_is_valid(struct edid *edid)
+{
+ int i;
+ u8 *raw = (u8 *)edid;
+
+ if (!edid)
+ return false;
+
+ for (i = 0; i <= edid->extensions; i++)
+ if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
+ return false;
+
+ return true;
+}
+
+#define DDC_ADDR 0x50
+#define DDC_SEGMENT_ADDR 0x30
+/**
+ * Get EDID information via I2C.
+ *
+ * \param adapter : i2c device adaptor
+ * \param buf : EDID data buffer to be filled
+ * \param len : EDID data buffer length
+ * \return 0 on success or -1 on failure.
+ *
+ * Try to fetch EDID information by calling i2c driver function.
+ */
+static int
+drm_do_probe_ddc_edid(device_t adapter, unsigned char *buf,
+ int block, int len)
+{
+ unsigned char start = block * EDID_LENGTH;
+ int ret, retries = 5;
+
+ /* The core i2c driver will automatically retry the transfer if the
+ * adapter reports EAGAIN. However, we find that bit-banging transfers
+ * are susceptible to errors under a heavily loaded machine and
+ * generate spurious NAKs and timeouts. Retrying the transfer
+ * of the individual block a few times seems to overcome this.
+ */
+ do {
+ struct iic_msg msgs[] = {
+ {
+ .slave = DDC_ADDR,
+ .flags = IIC_M_WR,
+ .len = 1,
+ .buf = &start,
+ }, {
+ .slave = DDC_ADDR,
+ .flags = IIC_M_RD,
+ .len = len,
+ .buf = buf,
+ }
+ };
+ ret = iicbus_transfer(adapter, msgs, 2);
+ if (ret != 0)
+ DRM_DEBUG_KMS("iicbus_transfer countdown %d error %d\n",
+ retries, ret);
+ } while (ret != 0 && --retries);
+
+ return (ret == 0 ? 0 : -1);
+}
+
+static bool drm_edid_is_zero(u8 *in_edid, int length)
+{
+ int i;
+ u32 *raw_edid = (u32 *)in_edid;
+
+ for (i = 0; i < length / 4; i++)
+ if (*(raw_edid + i) != 0)
+ return false;
+ return true;
+}
+
+static u8 *
+drm_do_get_edid(struct drm_connector *connector, device_t adapter)
+{
+ int i, j = 0, valid_extensions = 0;
+ u8 *block, *new;
+
+ block = malloc(EDID_LENGTH, DRM_MEM_KMS, M_WAITOK | M_ZERO);
+
+ /* base block fetch */
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block))
+ break;
+ if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
+ connector->null_edid_counter++;
+ goto carp;
+ }
+ }
+ if (i == 4)
+ goto carp;
+
+ /* if there's no extensions, we're done */
+ if (block[0x7e] == 0)
+ return block;
+
+ new = reallocf(block, (block[0x7e] + 1) * EDID_LENGTH, DRM_MEM_KMS,
+ M_WAITOK);
+ block = new;
+
+ for (j = 1; j <= block[0x7e]; j++) {
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter,
+ block + (valid_extensions + 1) * EDID_LENGTH,
+ j, EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
+ valid_extensions++;
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_DEBUG_KMS("%s: Ignoring invalid EDID block %d.\n",
+ drm_get_connector_name(connector), j);
+ }
+
+ if (valid_extensions != block[0x7e]) {
+ block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
+ block[0x7e] = valid_extensions;
+ new = reallocf(block, (valid_extensions + 1) * EDID_LENGTH,
+ DRM_MEM_KMS, M_WAITOK);
+ block = new;
+ }
+
+ DRM_DEBUG_KMS("got EDID from %s\n", drm_get_connector_name(connector));
+ return block;
+
+carp:
+ DRM_ERROR("%s: EDID block %d invalid.\n",
+ drm_get_connector_name(connector), j);
+
+out:
+ free(block, DRM_MEM_KMS);
+ return NULL;
+}
+
+/**
+ * Probe DDC presence.
+ *
+ * \param adapter : i2c device adaptor
+ * \return 1 on success
+ */
+static bool
+drm_probe_ddc(device_t adapter)
+{
+ unsigned char out;
+
+ return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
+}
+
+/**
+ * drm_get_edid - get EDID data, if available
+ * @connector: connector we're probing
+ * @adapter: i2c adapter to use for DDC
+ *
+ * Poke the given i2c channel to grab EDID data if possible. If found,
+ * attach it to the connector.
+ *
+ * Return edid data or NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid(struct drm_connector *connector,
+ device_t adapter)
+{
+ struct edid *edid = NULL;
+
+ if (drm_probe_ddc(adapter))
+ edid = (struct edid *)drm_do_get_edid(connector, adapter);
+
+ connector->display_info.raw_edid = (char *)edid;
+
+ return edid;
+
+}
+
+/*** EDID parsing ***/
+
+/**
+ * edid_vendor - match a string against EDID's obfuscated vendor field
+ * @edid: EDID to match
+ * @vendor: vendor string
+ *
+ * Returns true if @vendor is in @edid, false otherwise
+ */
+static bool edid_vendor(struct edid *edid, char *vendor)
+{
+ char edid_vendor[3];
+
+ edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
+ edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
+ ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
+ edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@';
+
+ return !strncmp(edid_vendor, vendor, 3);
+}
+
+/**
+ * edid_get_quirks - return quirk flags for a given EDID
+ * @edid: EDID to process
+ *
+ * This tells subsequent routines what fixes they need to apply.
+ */
+static u32 edid_get_quirks(struct edid *edid)
+{
+ struct edid_quirk *quirk;
+ int i;
+
+ for (i = 0; i < DRM_ARRAY_SIZE(edid_quirk_list); i++) {
+ quirk = &edid_quirk_list[i];
+
+ if (edid_vendor(edid, quirk->vendor) &&
+ (EDID_PRODUCT_ID(edid) == quirk->product_id))
+ return quirk->quirks;
+ }
+
+ return 0;
+}
+
+#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
+#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
+
+/**
+ * edid_fixup_preferred - set preferred modes based on quirk list
+ * @connector: has mode list to fix up
+ * @quirks: quirks list
+ *
+ * Walk the mode list for @connector, clearing the preferred status
+ * on existing modes and setting it anew for the right mode ala @quirks.
+ */
+static void edid_fixup_preferred(struct drm_connector *connector,
+ u32 quirks)
+{
+ struct drm_display_mode *t, *cur_mode, *preferred_mode;
+ int target_refresh = 0;
+
+ if (list_empty(&connector->probed_modes))
+ return;
+
+ if (quirks & EDID_QUIRK_PREFER_LARGE_60)
+ target_refresh = 60;
+ if (quirks & EDID_QUIRK_PREFER_LARGE_75)
+ target_refresh = 75;
+
+ preferred_mode = list_first_entry(&connector->probed_modes,
+ struct drm_display_mode, head);
+
+ list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) {
+ cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+
+ if (cur_mode == preferred_mode)
+ continue;
+
+ /* Largest mode is preferred */
+ if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
+ preferred_mode = cur_mode;
+
+ /* At a given size, try to get closest to target refresh */
+ if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
+ MODE_REFRESH_DIFF(cur_mode, target_refresh) <
+ MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
+ preferred_mode = cur_mode;
+ }
+ }
+
+ preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
+}
+
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+ int hsize, int vsize, int fresh)
+{
+ struct drm_display_mode *mode = NULL;
+ int i;
+
+ for (i = 0; i < drm_num_dmt_modes; i++) {
+ struct drm_display_mode *ptr = &drm_dmt_modes[i];
+ if (hsize == ptr->hdisplay &&
+ vsize == ptr->vdisplay &&
+ fresh == drm_mode_vrefresh(ptr)) {
+ /* get the expected default mode */
+ mode = drm_mode_duplicate(dev, ptr);
+ break;
+ }
+ }
+ return mode;
+}
+
+typedef void detailed_cb(struct detailed_timing *timing, void *closure);
+
+static void
+cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
+{
+ int i, n = 0;
+ u8 rev = ext[0x01], d = ext[0x02];
+ u8 *det_base = ext + d;
+
+ switch (rev) {
+ case 0:
+ /* can't happen */
+ return;
+ case 1:
+ /* have to infer how many blocks we have, check pixel clock */
+ for (i = 0; i < 6; i++)
+ if (det_base[18*i] || det_base[18*i+1])
+ n++;
+ break;
+ default:
+ /* explicit count */
+ n = min(ext[0x03] & 0x0f, 6);
+ break;
+ }
+
+ for (i = 0; i < n; i++)
+ cb((struct detailed_timing *)(det_base + 18 * i), closure);
+}
+
+static void
+vtb_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
+{
+ unsigned int i, n = min((int)ext[0x02], 6);
+ u8 *det_base = ext + 5;
+
+ if (ext[0x01] != 1)
+ return; /* unknown version */
+
+ for (i = 0; i < n; i++)
+ cb((struct detailed_timing *)(det_base + 18 * i), closure);
+}
+
+static void
+drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
+{
+ int i;
+ struct edid *edid = (struct edid *)raw_edid;
+
+ if (edid == NULL)
+ return;
+
+ for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
+ cb(&(edid->detailed_timings[i]), closure);
+
+ for (i = 1; i <= raw_edid[0x7e]; i++) {
+ u8 *ext = raw_edid + (i * EDID_LENGTH);
+ switch (*ext) {
+ case CEA_EXT:
+ cea_for_each_detailed_block(ext, cb, closure);
+ break;
+ case VTB_EXT:
+ vtb_for_each_detailed_block(ext, cb, closure);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void
+is_rb(struct detailed_timing *t, void *data)
+{
+ u8 *r = (u8 *)t;
+ if (r[3] == EDID_DETAIL_MONITOR_RANGE)
+ if (r[15] & 0x10)
+ *(bool *)data = true;
+}
+
+/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
+static bool
+drm_monitor_supports_rb(struct edid *edid)
+{
+ if (edid->revision >= 4) {
+ bool ret;
+ drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
+ return ret;
+ }
+
+ return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
+}
+
+static void
+find_gtf2(struct detailed_timing *t, void *data)
+{
+ u8 *r = (u8 *)t;
+ if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
+ *(u8 **)data = r;
+}
+
+/* Secondary GTF curve kicks in above some break frequency */
+static int
+drm_gtf2_hbreak(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? (r[12] * 2) : 0;
+}
+
+static int
+drm_gtf2_2c(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[13] : 0;
+}
+
+static int
+drm_gtf2_m(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? (r[15] << 8) + r[14] : 0;
+}
+
+static int
+drm_gtf2_k(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[16] : 0;
+}
+
+static int
+drm_gtf2_2j(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[17] : 0;
+}
+
+/**
+ * standard_timing_level - get std. timing level(CVT/GTF/DMT)
+ * @edid: EDID block to scan
+ */
+static int standard_timing_level(struct edid *edid)
+{
+ if (edid->revision >= 2) {
+ if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
+ return LEVEL_CVT;
+ if (drm_gtf2_hbreak(edid))
+ return LEVEL_GTF2;
+ return LEVEL_GTF;
+ }
+ return LEVEL_DMT;
+}
+
+/*
+ * 0 is reserved. The spec says 0x01 fill for unused timings. Some old
+ * monitors fill with ascii space (0x20) instead.
+ */
+static int
+bad_std_timing(u8 a, u8 b)
+{
+ return (a == 0x00 && b == 0x00) ||
+ (a == 0x01 && b == 0x01) ||
+ (a == 0x20 && b == 0x20);
+}
+
+/**
+ * drm_mode_std - convert standard mode info (width, height, refresh) into mode
+ * @t: standard timing params
+ * @timing_level: standard timing level
+ *
+ * Take the standard timing params (in this case width, aspect, and refresh)
+ * and convert them into a real mode using CVT/GTF/DMT.
+ */
+static struct drm_display_mode *
+drm_mode_std(struct drm_connector *connector, struct edid *edid,
+ struct std_timing *t, int revision)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *m, *mode = NULL;
+ int hsize, vsize;
+ int vrefresh_rate;
+ unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
+ >> EDID_TIMING_ASPECT_SHIFT;
+ unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
+ >> EDID_TIMING_VFREQ_SHIFT;
+ int timing_level = standard_timing_level(edid);
+
+ if (bad_std_timing(t->hsize, t->vfreq_aspect))
+ return NULL;
+
+ /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
+ hsize = t->hsize * 8 + 248;
+ /* vrefresh_rate = vfreq + 60 */
+ vrefresh_rate = vfreq + 60;
+ /* the vdisplay is calculated based on the aspect ratio */
+ if (aspect_ratio == 0) {
+ if (revision < 3)
+ vsize = hsize;
+ else
+ vsize = (hsize * 10) / 16;
+ } else if (aspect_ratio == 1)
+ vsize = (hsize * 3) / 4;
+ else if (aspect_ratio == 2)
+ vsize = (hsize * 4) / 5;
+ else
+ vsize = (hsize * 9) / 16;
+
+ /* HDTV hack, part 1 */
+ if (vrefresh_rate == 60 &&
+ ((hsize == 1360 && vsize == 765) ||
+ (hsize == 1368 && vsize == 769))) {
+ hsize = 1366;
+ vsize = 768;
+ }
+
+ /*
+ * If this connector already has a mode for this size and refresh
+ * rate (because it came from detailed or CVT info), use that
+ * instead. This way we don't have to guess at interlace or
+ * reduced blanking.
+ */
+ list_for_each_entry(m, &connector->probed_modes, head)
+ if (m->hdisplay == hsize && m->vdisplay == vsize &&
+ drm_mode_vrefresh(m) == vrefresh_rate)
+ return NULL;
+
+ /* HDTV hack, part 2 */
+ if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
+ mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
+ false);
+ mode->hdisplay = 1366;
+ mode->hsync_start = mode->hsync_start - 1;
+ mode->hsync_end = mode->hsync_end - 1;
+ return mode;
+ }
+
+ /* check whether it can be found in default mode table */
+ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate);
+ if (mode)
+ return mode;
+
+ switch (timing_level) {
+ case LEVEL_DMT:
+ break;
+ case LEVEL_GTF:
+ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+ break;
+ case LEVEL_GTF2:
+ /*
+ * This is potentially wrong if there's ever a monitor with
+ * more than one ranges section, each claiming a different
+ * secondary GTF curve. Please don't do that.
+ */
+ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+ if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
+ free(mode, DRM_MEM_KMS);
+ mode = drm_gtf_mode_complex(dev, hsize, vsize,
+ vrefresh_rate, 0, 0,
+ drm_gtf2_m(edid),
+ drm_gtf2_2c(edid),
+ drm_gtf2_k(edid),
+ drm_gtf2_2j(edid));
+ }
+ break;
+ case LEVEL_CVT:
+ mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
+ false);
+ break;
+ }
+ return mode;
+}
+
+/*
+ * EDID is delightfully ambiguous about how interlaced modes are to be
+ * encoded. Our internal representation is of frame height, but some
+ * HDTV detailed timings are encoded as field height.
+ *
+ * The format list here is from CEA, in frame size. Technically we
+ * should be checking refresh rate too. Whatever.
+ */
+static void
+drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
+ struct detailed_pixel_timing *pt)
+{
+ int i;
+ static const struct {
+ int w, h;
+ } cea_interlaced[] = {
+ { 1920, 1080 },
+ { 720, 480 },
+ { 1440, 480 },
+ { 2880, 480 },
+ { 720, 576 },
+ { 1440, 576 },
+ { 2880, 576 },
+ };
+
+ if (!(pt->misc & DRM_EDID_PT_INTERLACED))
+ return;
+
+ for (i = 0; i < DRM_ARRAY_SIZE(cea_interlaced); i++) {
+ if ((mode->hdisplay == cea_interlaced[i].w) &&
+ (mode->vdisplay == cea_interlaced[i].h / 2)) {
+ mode->vdisplay *= 2;
+ mode->vsync_start *= 2;
+ mode->vsync_end *= 2;
+ mode->vtotal *= 2;
+ mode->vtotal |= 1;
+ }
+ }
+
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+}
+
+/**
+ * drm_mode_detailed - create a new mode from an EDID detailed timing section
+ * @dev: DRM device (needed to create new mode)
+ * @edid: EDID block
+ * @timing: EDID detailed timing info
+ * @quirks: quirks to apply
+ *
+ * An EDID detailed timing block contains enough info for us to create and
+ * return a new struct drm_display_mode.
+ */
+static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+ struct edid *edid,
+ struct detailed_timing *timing,
+ u32 quirks)
+{
+ struct drm_display_mode *mode;
+ struct detailed_pixel_timing *pt = &timing->data.pixel_data;
+ unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo;
+ unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
+ unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
+ unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
+ unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
+ unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
+ unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
+ unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
+
+ /* ignore tiny modes */
+ if (hactive < 64 || vactive < 64)
+ return NULL;
+
+ if (pt->misc & DRM_EDID_PT_STEREO) {
+ printf("stereo mode not supported\n");
+ return NULL;
+ }
+ if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
+ printf("composite sync not supported\n");
+ }
+
+ /* it is incorrect if hsync/vsync width is zero */
+ if (!hsync_pulse_width || !vsync_pulse_width) {
+ DRM_DEBUG_KMS("Incorrect Detailed timing. "
+ "Wrong Hsync/Vsync pulse width\n");
+ return NULL;
+ }
+ mode = drm_mode_create(dev);
+ if (!mode)
+ return NULL;
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+
+ if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
+ timing->pixel_clock = htole16(1088);
+
+ mode->clock = le16toh(timing->pixel_clock) * 10;
+
+ mode->hdisplay = hactive;
+ mode->hsync_start = mode->hdisplay + hsync_offset;
+ mode->hsync_end = mode->hsync_start + hsync_pulse_width;
+ mode->htotal = mode->hdisplay + hblank;
+
+ mode->vdisplay = vactive;
+ mode->vsync_start = mode->vdisplay + vsync_offset;
+ mode->vsync_end = mode->vsync_start + vsync_pulse_width;
+ mode->vtotal = mode->vdisplay + vblank;
+
+ /* Some EDIDs have bogus h/vtotal values */
+ if (mode->hsync_end > mode->htotal)
+ mode->htotal = mode->hsync_end + 1;
+ if (mode->vsync_end > mode->vtotal)
+ mode->vtotal = mode->vsync_end + 1;
+
+ drm_mode_do_interlace_quirk(mode, pt);
+
+ drm_mode_set_name(mode);
+
+ if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+ pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
+ }
+
+ mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+ mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+
+ mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
+ mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
+
+ if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
+ mode->width_mm *= 10;
+ mode->height_mm *= 10;
+ }
+
+ if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
+ mode->width_mm = edid->width_cm * 10;
+ mode->height_mm = edid->height_cm * 10;
+ }
+
+ return mode;
+}
+
+static bool
+mode_is_rb(const struct drm_display_mode *mode)
+{
+ return (mode->htotal - mode->hdisplay == 160) &&
+ (mode->hsync_end - mode->hdisplay == 80) &&
+ (mode->hsync_end - mode->hsync_start == 32) &&
+ (mode->vsync_start - mode->vdisplay == 3);
+}
+
+static bool
+mode_in_hsync_range(struct drm_display_mode *mode,
+ struct edid *edid, u8 *t)
+{
+ int hsync, hmin, hmax;
+
+ hmin = t[7];
+ if (edid->revision >= 4)
+ hmin += ((t[4] & 0x04) ? 255 : 0);
+ hmax = t[8];
+ if (edid->revision >= 4)
+ hmax += ((t[4] & 0x08) ? 255 : 0);
+ hsync = drm_mode_hsync(mode);
+
+ return (hsync <= hmax && hsync >= hmin);
+}
+
+static bool
+mode_in_vsync_range(struct drm_display_mode *mode,
+ struct edid *edid, u8 *t)
+{
+ int vsync, vmin, vmax;
+
+ vmin = t[5];
+ if (edid->revision >= 4)
+ vmin += ((t[4] & 0x01) ? 255 : 0);
+ vmax = t[6];
+ if (edid->revision >= 4)
+ vmax += ((t[4] & 0x02) ? 255 : 0);
+ vsync = drm_mode_vrefresh(mode);
+
+ return (vsync <= vmax && vsync >= vmin);
+}
+
+static u32
+range_pixel_clock(struct edid *edid, u8 *t)
+{
+ /* unspecified */
+ if (t[9] == 0 || t[9] == 255)
+ return 0;
+
+ /* 1.4 with CVT support gives us real precision, yay */
+ if (edid->revision >= 4 && t[10] == 0x04)
+ return (t[9] * 10000) - ((t[12] >> 2) * 250);
+
+ /* 1.3 is pathetic, so fuzz up a bit */
+ return t[9] * 10000 + 5001;
+}
+
+static bool
+mode_in_range(struct drm_display_mode *mode, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ u32 max_clock;
+ u8 *t = (u8 *)timing;
+
+ if (!mode_in_hsync_range(mode, edid, t))
+ return false;
+
+ if (!mode_in_vsync_range(mode, edid, t))
+ return false;
+
+ if ((max_clock = range_pixel_clock(edid, t)))
+ if (mode->clock > max_clock)
+ return false;
+
+ /* 1.4 max horizontal check */
+ if (edid->revision >= 4 && t[10] == 0x04)
+ if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
+ return false;
+
+ if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
+ return false;
+
+ return true;
+}
+
+/*
+ * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
+ * need to account for them.
+ */
+static int
+drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < drm_num_dmt_modes; i++) {
+ if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
+ newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+
+ return modes;
+}
+
+static void
+do_inferred_modes(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+ int gtf = (closure->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
+
+ if (gtf && data->type == EDID_DETAIL_MONITOR_RANGE)
+ closure->modes += drm_gtf_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+}
+
+static int
+add_inferred_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
+
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid, do_inferred_modes,
+ &closure);
+
+ return closure.modes;
+}
+
+static int
+drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
+{
+ int i, j, m, modes = 0;
+ struct drm_display_mode *mode;
+ u8 *est = ((u8 *)timing) + 5;
+
+ for (i = 0; i < 6; i++) {
+ for (j = 7; j > 0; j--) {
+ m = (i * 8) + (7 - j);
+ if (m >= DRM_ARRAY_SIZE(est3_modes))
+ break;
+ if (est[i] & (1 << j)) {
+ mode = drm_mode_find_dmt(connector->dev,
+ est3_modes[m].w,
+ est3_modes[m].h,
+ est3_modes[m].r
+ /*, est3_modes[m].rb */);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ modes++;
+ }
+ }
+ }
+ }
+
+ return modes;
+}
+
+static void
+do_established_modes(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+
+ if (data->type == EDID_DETAIL_EST_TIMINGS)
+ closure->modes += drm_est3_modes(closure->connector, timing);
+}
+
+/**
+ * add_established_modes - get est. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Each EDID block contains a bitmap of the supported "established modes" list
+ * (defined above). Tease them out and add them to the global modes list.
+ */
+static int
+add_established_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct drm_device *dev = connector->dev;
+ unsigned long est_bits = edid->established_timings.t1 |
+ (edid->established_timings.t2 << 8) |
+ ((edid->established_timings.mfg_rsvd & 0x80) << 9);
+ int i, modes = 0;
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
+
+ for (i = 0; i <= EDID_EST_TIMINGS; i++) {
+ if (est_bits & (1<<i)) {
+ struct drm_display_mode *newmode;
+ newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid,
+ do_established_modes, &closure);
+
+ return modes + closure.modes;
+}
+
+static void
+do_standard_modes(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+ struct drm_connector *connector = closure->connector;
+ struct edid *edid = closure->edid;
+
+ if (data->type == EDID_DETAIL_STD_MODES) {
+ int i;
+ for (i = 0; i < 6; i++) {
+ struct std_timing *std;
+ struct drm_display_mode *newmode;
+
+ std = &data->data.timings[i];
+ newmode = drm_mode_std(connector, edid, std,
+ edid->revision);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ closure->modes++;
+ }
+ }
+ }
+}
+
+/**
+ * add_standard_modes - get std. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Standard modes can be calculated using the appropriate standard (DMT,
+ * GTF or CVT. Grab them from @edid and add them to the list.
+ */
+static int
+add_standard_modes(struct drm_connector *connector, struct edid *edid)
+{
+ int i, modes = 0;
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
+
+ for (i = 0; i < EDID_STD_TIMINGS; i++) {
+ struct drm_display_mode *newmode;
+
+ newmode = drm_mode_std(connector, edid,
+ &edid->standard_timings[i],
+ edid->revision);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid, do_standard_modes,
+ &closure);
+
+ /* XXX should also look for standard codes in VTB blocks */
+
+ return modes + closure.modes;
+}
+
+static int drm_cvt_modes(struct drm_connector *connector,
+ struct detailed_timing *timing)
+{
+ int i, j, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+ struct cvt_timing *cvt;
+ const int rates[] = { 60, 85, 75, 60, 50 };
+ const u8 empty[3] = { 0, 0, 0 };
+
+ for (i = 0; i < 4; i++) {
+ int width = 0, height;
+ cvt = &(timing->data.other_data.data.cvt[i]);
+
+ if (!memcmp(cvt->code, empty, 3))
+ continue;
+
+ height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
+ switch (cvt->code[1] & 0x0c) {
+ case 0x00:
+ width = height * 4 / 3;
+ break;
+ case 0x04:
+ width = height * 16 / 9;
+ break;
+ case 0x08:
+ width = height * 16 / 10;
+ break;
+ case 0x0c:
+ width = height * 15 / 9;
+ break;
+ }
+
+ for (j = 1; j < 5; j++) {
+ if (cvt->code[2] & (1 << j)) {
+ newmode = drm_cvt_mode(dev, width, height,
+ rates[j], j == 0,
+ false, false);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+ }
+
+ return modes;
+}
+
+static void
+do_cvt_mode(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+
+ if (data->type == EDID_DETAIL_CVT_3BYTE)
+ closure->modes += drm_cvt_modes(closure->connector, timing);
+}
+
+static int
+add_cvt_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
+
+ if (version_greater(edid, 1, 2))
+ drm_for_each_detailed_block((u8 *)edid, do_cvt_mode, &closure);
+
+ /* XXX should also look for CVT codes in VTB blocks */
+
+ return closure.modes;
+}
+
+static void
+do_detailed_mode(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct drm_display_mode *newmode;
+
+ if (timing->pixel_clock) {
+ newmode = drm_mode_detailed(closure->connector->dev,
+ closure->edid, timing,
+ closure->quirks);
+ if (!newmode)
+ return;
+
+ if (closure->preferred)
+ newmode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(closure->connector, newmode);
+ closure->modes++;
+ closure->preferred = 0;
+ }
+}
+
+/*
+ * add_detailed_modes - Add modes from detailed timings
+ * @connector: attached connector
+ * @edid: EDID block to scan
+ * @quirks: quirks to apply
+ */
+static int
+add_detailed_modes(struct drm_connector *connector, struct edid *edid,
+ u32 quirks)
+{
+ struct detailed_mode_closure closure = {
+ connector,
+ edid,
+ 1,
+ quirks,
+ 0
+ };
+
+ if (closure.preferred && !version_greater(edid, 1, 3))
+ closure.preferred =
+ (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+
+ drm_for_each_detailed_block((u8 *)edid, do_detailed_mode, &closure);
+
+ return closure.modes;
+}
+
+#define HDMI_IDENTIFIER 0x000C03
+#define AUDIO_BLOCK 0x01
+#define VENDOR_BLOCK 0x03
+#define SPEAKER_BLOCK 0x04
+#define EDID_BASIC_AUDIO (1 << 6)
+
+/**
+ * Search EDID for CEA extension block.
+ */
+u8 *drm_find_cea_extension(struct edid *edid)
+{
+ u8 *edid_ext = NULL;
+ int i;
+
+ /* No EDID or EDID extensions */
+ if (edid == NULL || edid->extensions == 0)
+ return NULL;
+
+ /* Find CEA extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
+ if (edid_ext[0] == CEA_EXT)
+ break;
+ }
+
+ if (i == edid->extensions)
+ return NULL;
+
+ return edid_ext;
+}
+
+static void
+parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db)
+{
+ connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */
+
+ connector->dvi_dual = db[6] & 1;
+ connector->max_tmds_clock = db[7] * 5;
+
+ connector->latency_present[0] = db[8] >> 7;
+ connector->latency_present[1] = (db[8] >> 6) & 1;
+ connector->video_latency[0] = db[9];
+ connector->audio_latency[0] = db[10];
+ connector->video_latency[1] = db[11];
+ connector->audio_latency[1] = db[12];
+
+ DRM_DEBUG_KMS("HDMI: DVI dual %d, "
+ "max TMDS clock %d, "
+ "latency present %d %d, "
+ "video latency %d %d, "
+ "audio latency %d %d\n",
+ connector->dvi_dual,
+ connector->max_tmds_clock,
+ (int) connector->latency_present[0],
+ (int) connector->latency_present[1],
+ connector->video_latency[0],
+ connector->video_latency[1],
+ connector->audio_latency[0],
+ connector->audio_latency[1]);
+}
+
+static void
+monitor_name(struct detailed_timing *t, void *data)
+{
+ if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
+ *(u8 **)data = t->data.other_data.data.str.str;
+}
+
+/**
+ * drm_edid_to_eld - build ELD from EDID
+ * @connector: connector corresponding to the HDMI/DP sink
+ * @edid: EDID to parse
+ *
+ * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver.
+ * Some ELD fields are left to the graphics driver caller:
+ * - Conn_Type
+ * - HDCP
+ * - Port_ID
+ */
+void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
+{
+ uint8_t *eld = connector->eld;
+ u8 *cea;
+ u8 *name;
+ u8 *db;
+ int sad_count = 0;
+ int mnl;
+ int dbl;
+
+ memset(eld, 0, sizeof(connector->eld));
+
+ cea = drm_find_cea_extension(edid);
+ if (!cea) {
+ DRM_DEBUG_KMS("ELD: no CEA Extension found\n");
+ return;
+ }
+
+ name = NULL;
+ drm_for_each_detailed_block((u8 *)edid, monitor_name, &name);
+ for (mnl = 0; name && mnl < 13; mnl++) {
+ if (name[mnl] == 0x0a)
+ break;
+ eld[20 + mnl] = name[mnl];
+ }
+ eld[4] = (cea[1] << 5) | mnl;
+ DRM_DEBUG_KMS("ELD monitor %s\n", eld + 20);
+
+ eld[0] = 2 << 3; /* ELD version: 2 */
+
+ eld[16] = edid->mfg_id[0];
+ eld[17] = edid->mfg_id[1];
+ eld[18] = edid->prod_code[0];
+ eld[19] = edid->prod_code[1];
+
+ for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
+ dbl = db[0] & 0x1f;
+
+ switch ((db[0] & 0xe0) >> 5) {
+ case AUDIO_BLOCK: /* Audio Data Block, contains SADs */
+ sad_count = dbl / 3;
+ memcpy(eld + 20 + mnl, &db[1], dbl);
+ break;
+ case SPEAKER_BLOCK: /* Speaker Allocation Data Block */
+ eld[7] = db[1];
+ break;
+ case VENDOR_BLOCK:
+ /* HDMI Vendor-Specific Data Block */
+ if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
+ parse_hdmi_vsdb(connector, db);
+ break;
+ default:
+ break;
+ }
+ }
+ eld[5] |= sad_count << 4;
+ eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
+
+ DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count);
+}
+
+/**
+ * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
+ * @connector: connector associated with the HDMI/DP sink
+ * @mode: the display mode
+ */
+int drm_av_sync_delay(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+ int a, v;
+
+ if (!connector->latency_present[0])
+ return 0;
+ if (!connector->latency_present[1])
+ i = 0;
+
+ a = connector->audio_latency[i];
+ v = connector->video_latency[i];
+
+ /*
+ * HDMI/DP sink doesn't support audio or video?
+ */
+ if (a == 255 || v == 255)
+ return 0;
+
+ /*
+ * Convert raw EDID values to millisecond.
+ * Treat unknown latency as 0ms.
+ */
+ if (a)
+ a = min(2 * (a - 1), 500);
+ if (v)
+ v = min(2 * (v - 1), 500);
+
+ return max(v - a, 0);
+}
+
+/**
+ * drm_select_eld - select one ELD from multiple HDMI/DP sinks
+ * @encoder: the encoder just changed display mode
+ * @mode: the adjusted display mode
+ *
+ * It's possible for one encoder to be associated with multiple HDMI/DP sinks.
+ * The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
+ */
+struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder && connector->eld[0])
+ return connector;
+
+ return NULL;
+}
+
+/**
+ * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
+ * @edid: monitor EDID information
+ *
+ * Parse the CEA extension according to CEA-861-B.
+ * Return true if HDMI, false if not or unknown.
+ */
+bool drm_detect_hdmi_monitor(struct edid *edid)
+{
+ u8 *edid_ext;
+ int i, hdmi_id;
+ int start_offset, end_offset;
+ bool is_hdmi = false;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ goto end;
+
+ /* Data block offset in CEA extension block */
+ start_offset = 4;
+ end_offset = edid_ext[2];
+
+ /*
+ * Because HDMI identifier is in Vendor Specific Block,
+ * search it from all data blocks of CEA extension.
+ */
+ for (i = start_offset; i < end_offset;
+ /* Increased by data block len */
+ i += ((edid_ext[i] & 0x1f) + 1)) {
+ /* Find vendor specific block */
+ if ((edid_ext[i] >> 5) == VENDOR_BLOCK) {
+ hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) |
+ edid_ext[i + 3] << 16;
+ /* Find HDMI identifier */
+ if (hdmi_id == HDMI_IDENTIFIER)
+ is_hdmi = true;
+ break;
+ }
+ }
+
+end:
+ return is_hdmi;
+}
+
+/**
+ * drm_detect_monitor_audio - check monitor audio capability
+ *
+ * Monitor should have CEA extension block.
+ * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
+ * audio' only. If there is any audio extension block and supported
+ * audio format, assume at least 'basic audio' support, even if 'basic
+ * audio' is not defined in EDID.
+ *
+ */
+bool drm_detect_monitor_audio(struct edid *edid)
+{
+ u8 *edid_ext;
+ int i, j;
+ bool has_audio = false;
+ int start_offset, end_offset;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ goto end;
+
+ has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
+
+ if (has_audio) {
+ DRM_DEBUG_KMS("Monitor has basic audio support\n");
+ goto end;
+ }
+
+ /* Data block offset in CEA extension block */
+ start_offset = 4;
+ end_offset = edid_ext[2];
+
+ for (i = start_offset; i < end_offset;
+ i += ((edid_ext[i] & 0x1f) + 1)) {
+ if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
+ has_audio = true;
+ for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
+ DRM_DEBUG_KMS("CEA audio format %d\n",
+ (edid_ext[i + j] >> 3) & 0xf);
+ goto end;
+ }
+ }
+end:
+ return has_audio;
+}
+
+/**
+ * drm_add_display_info - pull display info out if present
+ * @edid: EDID data
+ * @info: display info (attached to connector)
+ *
+ * Grab any available display info and stuff it into the drm_display_info
+ * structure that's part of the connector. Useful for tracking bpp and
+ * color spaces.
+ */
+static void drm_add_display_info(struct edid *edid,
+ struct drm_display_info *info)
+{
+ u8 *edid_ext;
+
+ info->width_mm = edid->width_cm * 10;
+ info->height_mm = edid->height_cm * 10;
+
+ /* driver figures it out in this case */
+ info->bpc = 0;
+ info->color_formats = 0;
+
+ /* Only defined for 1.4 with digital displays */
+ if (edid->revision < 4)
+ return;
+
+ if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
+ return;
+
+ switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
+ case DRM_EDID_DIGITAL_DEPTH_6:
+ info->bpc = 6;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_8:
+ info->bpc = 8;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_10:
+ info->bpc = 10;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_12:
+ info->bpc = 12;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_14:
+ info->bpc = 14;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_16:
+ info->bpc = 16;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_UNDEF:
+ default:
+ info->bpc = 0;
+ break;
+ }
+
+ info->color_formats = DRM_COLOR_FORMAT_RGB444;
+ if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB444)
+ info->color_formats = DRM_COLOR_FORMAT_YCRCB444;
+ if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422)
+ info->color_formats = DRM_COLOR_FORMAT_YCRCB422;
+
+ /* Get data from CEA blocks if present */
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ return;
+
+ info->cea_rev = edid_ext[1];
+}
+
+/**
+ * drm_add_edid_modes - add modes from EDID data, if available
+ * @connector: connector we're probing
+ * @edid: edid data
+ *
+ * Add the specified modes to the connector's mode list.
+ *
+ * Return number of modes added or 0 if we couldn't find any.
+ */
+int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
+{
+ int num_modes = 0;
+ u32 quirks;
+
+ if (edid == NULL) {
+ return 0;
+ }
+ if (!drm_edid_is_valid(edid)) {
+ device_printf(connector->dev->device, "%s: EDID invalid.\n",
+ drm_get_connector_name(connector));
+ return 0;
+ }
+
+ quirks = edid_get_quirks(edid);
+
+ /*
+ * EDID spec says modes should be preferred in this order:
+ * - preferred detailed mode
+ * - other detailed modes from base block
+ * - detailed modes from extension blocks
+ * - CVT 3-byte code modes
+ * - standard timing codes
+ * - established timing codes
+ * - modes inferred from GTF or CVT range information
+ *
+ * We get this pretty much right.
+ *
+ * XXX order for additional mode types in extension blocks?
+ */
+ num_modes += add_detailed_modes(connector, edid, quirks);
+ num_modes += add_cvt_modes(connector, edid);
+ num_modes += add_standard_modes(connector, edid);
+ num_modes += add_established_modes(connector, edid);
+ num_modes += add_inferred_modes(connector, edid);
+
+ if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+ edid_fixup_preferred(connector, quirks);
+
+ drm_add_display_info(edid, &connector->display_info);
+
+ return num_modes;
+}
+
+/**
+ * drm_add_modes_noedid - add modes for the connectors without EDID
+ * @connector: connector we're probing
+ * @hdisplay: the horizontal display limit
+ * @vdisplay: the vertical display limit
+ *
+ * Add the specified modes to the connector's mode list. Only when the
+ * hdisplay/vdisplay is not beyond the given limit, it will be added.
+ *
+ * Return number of modes added or 0 if we couldn't find any.
+ */
+int drm_add_modes_noedid(struct drm_connector *connector,
+ int hdisplay, int vdisplay)
+{
+ int i, count, num_modes = 0;
+ struct drm_display_mode *mode;
+ struct drm_device *dev = connector->dev;
+
+ count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+ if (hdisplay < 0)
+ hdisplay = 0;
+ if (vdisplay < 0)
+ vdisplay = 0;
+
+ for (i = 0; i < count; i++) {
+ struct drm_display_mode *ptr = &drm_dmt_modes[i];
+ if (hdisplay && vdisplay) {
+ /*
+ * Only when two are valid, they will be used to check
+ * whether the mode should be added to the mode list of
+ * the connector.
+ */
+ if (ptr->hdisplay > hdisplay ||
+ ptr->vdisplay > vdisplay)
+ continue;
+ }
+ if (drm_mode_vrefresh(ptr) > 61)
+ continue;
+ mode = drm_mode_duplicate(dev, ptr);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ num_modes++;
+ }
+ }
+ return num_modes;
+}
diff --git a/sys/dev/drm2/drm_edid.h b/sys/dev/drm2/drm_edid.h
new file mode 100644
index 0000000..e0c1470
--- /dev/null
+++ b/sys/dev/drm2/drm_edid.h
@@ -0,0 +1,244 @@
+/*
+ * Copyright © 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+#ifndef __DRM_EDID_H__
+#define __DRM_EDID_H__
+
+#include <sys/types.h>
+#include <dev/drm2/drmP.h>
+
+#define EDID_LENGTH 128
+#define DDC_ADDR 0x50
+
+#define CEA_EXT 0x02
+#define VTB_EXT 0x10
+#define DI_EXT 0x40
+#define LS_EXT 0x50
+#define MI_EXT 0x60
+
+struct est_timings {
+ u8 t1;
+ u8 t2;
+ u8 mfg_rsvd;
+} __attribute__((packed));
+
+/* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
+#define EDID_TIMING_ASPECT_SHIFT 6
+#define EDID_TIMING_ASPECT_MASK (0x3 << EDID_TIMING_ASPECT_SHIFT)
+
+/* need to add 60 */
+#define EDID_TIMING_VFREQ_SHIFT 0
+#define EDID_TIMING_VFREQ_MASK (0x3f << EDID_TIMING_VFREQ_SHIFT)
+
+struct std_timing {
+ u8 hsize; /* need to multiply by 8 then add 248 */
+ u8 vfreq_aspect;
+} __attribute__((packed));
+
+#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1)
+#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
+#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3)
+#define DRM_EDID_PT_STEREO (1 << 5)
+#define DRM_EDID_PT_INTERLACED (1 << 7)
+
+/* If detailed data is pixel timing */
+struct detailed_pixel_timing {
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hactive_hblank_hi;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vactive_vblank_hi;
+ u8 hsync_offset_lo;
+ u8 hsync_pulse_width_lo;
+ u8 vsync_offset_pulse_width_lo;
+ u8 hsync_vsync_offset_pulse_width_hi;
+ u8 width_mm_lo;
+ u8 height_mm_lo;
+ u8 width_height_mm_hi;
+ u8 hborder;
+ u8 vborder;
+ u8 misc;
+} __attribute__((packed));
+
+/* If it's not pixel timing, it'll be one of the below */
+struct detailed_data_string {
+ u8 str[13];
+} __attribute__((packed));
+
+struct detailed_data_monitor_range {
+ u8 min_vfreq;
+ u8 max_vfreq;
+ u8 min_hfreq_khz;
+ u8 max_hfreq_khz;
+ u8 pixel_clock_mhz; /* need to multiply by 10 */
+ u16 sec_gtf_toggle; /* A000=use above, 20=use below */
+ u8 hfreq_start_khz; /* need to multiply by 2 */
+ u8 c; /* need to divide by 2 */
+ u16 m;
+ u8 k;
+ u8 j; /* need to divide by 2 */
+} __attribute__((packed));
+
+struct detailed_data_wpindex {
+ u8 white_yx_lo; /* Lower 2 bits each */
+ u8 white_x_hi;
+ u8 white_y_hi;
+ u8 gamma; /* need to divide by 100 then add 1 */
+} __attribute__((packed));
+
+struct detailed_data_color_point {
+ u8 windex1;
+ u8 wpindex1[3];
+ u8 windex2;
+ u8 wpindex2[3];
+} __attribute__((packed));
+
+struct cvt_timing {
+ u8 code[3];
+} __attribute__((packed));
+
+struct detailed_non_pixel {
+ u8 pad1;
+ u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
+ fb=color point data, fa=standard timing data,
+ f9=undefined, f8=mfg. reserved */
+ u8 pad2;
+ union {
+ struct detailed_data_string str;
+ struct detailed_data_monitor_range range;
+ struct detailed_data_wpindex color;
+ struct std_timing timings[6];
+ struct cvt_timing cvt[4];
+ } data;
+} __attribute__((packed));
+
+#define EDID_DETAIL_EST_TIMINGS 0xf7
+#define EDID_DETAIL_CVT_3BYTE 0xf8
+#define EDID_DETAIL_COLOR_MGMT_DATA 0xf9
+#define EDID_DETAIL_STD_MODES 0xfa
+#define EDID_DETAIL_MONITOR_CPDATA 0xfb
+#define EDID_DETAIL_MONITOR_NAME 0xfc
+#define EDID_DETAIL_MONITOR_RANGE 0xfd
+#define EDID_DETAIL_MONITOR_STRING 0xfe
+#define EDID_DETAIL_MONITOR_SERIAL 0xff
+
+struct detailed_timing {
+ u16 pixel_clock; /* need to multiply by 10 KHz */
+ union {
+ struct detailed_pixel_timing pixel_data;
+ struct detailed_non_pixel other_data;
+ } data;
+} __attribute__((packed));
+
+#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0)
+#define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 1)
+#define DRM_EDID_INPUT_COMPOSITE_SYNC (1 << 2)
+#define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3)
+#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4)
+#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5)
+#define DRM_EDID_INPUT_DIGITAL (1 << 7)
+#define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4)
+#define DRM_EDID_DIGITAL_TYPE_UNDEF (0)
+#define DRM_EDID_DIGITAL_TYPE_DVI (1)
+#define DRM_EDID_DIGITAL_TYPE_HDMI_A (2)
+#define DRM_EDID_DIGITAL_TYPE_HDMI_B (3)
+#define DRM_EDID_DIGITAL_TYPE_MDDI (4)
+#define DRM_EDID_DIGITAL_TYPE_DP (5)
+
+#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0)
+#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1)
+#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2)
+#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
+/* If digital */
+#define DRM_EDID_FEATURE_COLOR_MASK (3 << 3)
+#define DRM_EDID_FEATURE_RGB (0 << 3)
+#define DRM_EDID_FEATURE_RGB_YCRCB444 (1 << 3)
+#define DRM_EDID_FEATURE_RGB_YCRCB422 (2 << 3)
+#define DRM_EDID_FEATURE_RGB_YCRCB (3 << 3) /* both 4:4:4 and 4:2:2 */
+
+#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5)
+#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6)
+#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7)
+
+struct edid {
+ u8 header[8];
+ /* Vendor & product info */
+ u8 mfg_id[2];
+ u8 prod_code[2];
+ u32 serial; /* FIXME: byte order */
+ u8 mfg_week;
+ u8 mfg_year;
+ /* EDID version */
+ u8 version;
+ u8 revision;
+ /* Display info: */
+ u8 input;
+ u8 width_cm;
+ u8 height_cm;
+ u8 gamma;
+ u8 features;
+ /* Color characteristics */
+ u8 red_green_lo;
+ u8 black_white_lo;
+ u8 red_x;
+ u8 red_y;
+ u8 green_x;
+ u8 green_y;
+ u8 blue_x;
+ u8 blue_y;
+ u8 white_x;
+ u8 white_y;
+ /* Est. timings and mfg rsvd timings*/
+ struct est_timings established_timings;
+ /* Standard timings 1-8*/
+ struct std_timing standard_timings[8];
+ /* Detailing timings 1-4 */
+ struct detailed_timing detailed_timings[4];
+ /* Number of 128 byte ext. blocks */
+ u8 extensions;
+ /* Checksum */
+ u8 checksum;
+} __attribute__((packed));
+
+#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
+
+struct drm_encoder;
+struct drm_connector;
+struct drm_display_mode;
+void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
+int drm_av_sync_delay(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
+
+#endif /* __DRM_EDID_H__ */
diff --git a/sys/dev/drm2/drm_edid_modes.h b/sys/dev/drm2/drm_edid_modes.h
new file mode 100644
index 0000000..beded26
--- /dev/null
+++ b/sys/dev/drm2/drm_edid_modes.h
@@ -0,0 +1,381 @@
+/*
+ * Copyright (c) 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2010 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_edid.h>
+
+/*
+ * Autogenerated from the DMT spec.
+ * This table is copied from xfree86/modes/xf86EdidModes.c.
+ * But the mode with Reduced blank feature is deleted.
+ */
+static struct drm_display_mode drm_dmt_modes[] = {
+ /* 640x350@85Hz */
+ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+ 736, 832, 0, 350, 382, 385, 445, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x400@85Hz */
+ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+ 736, 832, 0, 400, 401, 404, 445, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 720x400@85Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
+ 828, 936, 0, 400, 401, 404, 446, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 489, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@72Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 492, 520, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@75Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+ 720, 840, 0, 480, 481, 484, 500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@85Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
+ 752, 832, 0, 480, 481, 484, 509, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 800x600@56Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+ 896, 1024, 0, 600, 601, 603, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@72Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+ 976, 1040, 0, 600, 637, 643, 666, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+ 896, 1056, 0, 600, 601, 604, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@85Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
+ 896, 1048, 0, 600, 601, 604, 631, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 848x480@60Hz */
+ { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
+ 976, 1088, 0, 480, 486, 494, 517, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@43Hz, interlace */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
+ 1208, 1264, 0, 768, 768, 772, 817, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1024x768@60Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1024x768@70Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+ 1184, 1328, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1024x768@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
+ 1136, 1312, 0, 768, 769, 772, 800, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@85Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
+ 1168, 1376, 0, 768, 769, 772, 808, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1152x864@75Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@60Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+ 1472, 1664, 0, 768, 771, 778, 798, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@75Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
+ 1488, 1696, 0, 768, 771, 778, 805, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x768@85Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
+ 1496, 1712, 0, 768, 771, 778, 809, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@60Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+ 1480, 1680, 0, 800, 803, 809, 831, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@75Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
+ 1488, 1696, 0, 800, 803, 809, 838, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@85Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
+ 1496, 1712, 0, 800, 803, 809, 843, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@60Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+ 1488, 1800, 0, 960, 961, 964, 1000, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@85Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
+ 1504, 1728, 0, 960, 961, 964, 1011, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@60Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@75Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@85Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
+ 1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1360x768@60Hz */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+ 1536, 1792, 0, 768, 771, 777, 795, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x1050@60Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+ 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x1050@75Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
+ 1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x1050@85Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
+ 1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@60Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+ 1672, 1904, 0, 900, 903, 909, 934, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@75Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
+ 1688, 1936, 0, 900, 903, 909, 942, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@85Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
+ 1696, 1952, 0, 900, 903, 909, 948, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@60Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@65Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@70Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@75Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@85Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@60Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+ 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@75Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
+ 1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@85Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
+ 1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@60Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+ 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1729x1344@75Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
+ 2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1853x1392@60Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+ 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1856x1392@75Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
+ 2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@60Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+ 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@75Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
+ 2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@85Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
+ 2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@60Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+ 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@75Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
+ 2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@60Hz */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+ 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@75HZ */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
+ 3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@85HZ */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
+ 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+static const int drm_num_dmt_modes =
+ sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+
+static struct drm_display_mode edid_est_modes[] = {
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+ 896, 1024, 0, 600, 601, 603, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+ 720, 840, 0, 480, 481, 484, 500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 491, 520, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
+ 768, 864, 0, 480, 483, 486, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
+ 846, 900, 0, 400, 421, 423, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
+ 846, 900, 0, 400, 412, 414, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+ 1136, 1312, 0, 768, 769, 772, 800, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+ 1184, 1328, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+ 1208, 1264, 0, 768, 768, 776, 817, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
+ 928, 1152, 0, 624, 625, 628, 667, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+ 896, 1056, 0, 600, 601, 604, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+ 976, 1040, 0, 600, 637, 643, 666, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
+};
+
+static const struct {
+ short w;
+ short h;
+ short r;
+ short rb;
+} est3_modes[] = {
+ /* byte 6 */
+ { 640, 350, 85, 0 },
+ { 640, 400, 85, 0 },
+ { 720, 400, 85, 0 },
+ { 640, 480, 85, 0 },
+ { 848, 480, 60, 0 },
+ { 800, 600, 85, 0 },
+ { 1024, 768, 85, 0 },
+ { 1152, 864, 75, 0 },
+ /* byte 7 */
+ { 1280, 768, 60, 1 },
+ { 1280, 768, 60, 0 },
+ { 1280, 768, 75, 0 },
+ { 1280, 768, 85, 0 },
+ { 1280, 960, 60, 0 },
+ { 1280, 960, 85, 0 },
+ { 1280, 1024, 60, 0 },
+ { 1280, 1024, 85, 0 },
+ /* byte 8 */
+ { 1360, 768, 60, 0 },
+ { 1440, 900, 60, 1 },
+ { 1440, 900, 60, 0 },
+ { 1440, 900, 75, 0 },
+ { 1440, 900, 85, 0 },
+ { 1400, 1050, 60, 1 },
+ { 1400, 1050, 60, 0 },
+ { 1400, 1050, 75, 0 },
+ /* byte 9 */
+ { 1400, 1050, 85, 0 },
+ { 1680, 1050, 60, 1 },
+ { 1680, 1050, 60, 0 },
+ { 1680, 1050, 75, 0 },
+ { 1680, 1050, 85, 0 },
+ { 1600, 1200, 60, 0 },
+ { 1600, 1200, 65, 0 },
+ { 1600, 1200, 70, 0 },
+ /* byte 10 */
+ { 1600, 1200, 75, 0 },
+ { 1600, 1200, 85, 0 },
+ { 1792, 1344, 60, 0 },
+ { 1792, 1344, 85, 0 },
+ { 1856, 1392, 60, 0 },
+ { 1856, 1392, 75, 0 },
+ { 1920, 1200, 60, 1 },
+ { 1920, 1200, 60, 0 },
+ /* byte 11 */
+ { 1920, 1200, 75, 0 },
+ { 1920, 1200, 85, 0 },
+ { 1920, 1440, 60, 0 },
+ { 1920, 1440, 75, 0 },
+};
+static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
diff --git a/sys/dev/drm2/drm_fb_helper.c b/sys/dev/drm2/drm_fb_helper.c
new file mode 100644
index 0000000..2f24ec4
--- /dev/null
+++ b/sys/dev/drm2/drm_fb_helper.c
@@ -0,0 +1,1568 @@
+/*
+ * Copyright (c) 2006-2009 Red Hat Inc.
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM framebuffer helper functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_crtc.h>
+#include <dev/drm2/drm_fb_helper.h>
+#include <dev/drm2/drm_crtc_helper.h>
+
+static DRM_LIST_HEAD(kernel_fb_helper_list);
+
+/* simple single crtc case helper function */
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct drm_fb_helper_connector *fb_helper_connector;
+
+ fb_helper_connector = malloc(
+ sizeof(struct drm_fb_helper_connector), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+
+ fb_helper_connector->connector = connector;
+ fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
+ }
+ return 0;
+}
+
+const char *fb_mode_option;
+
+/**
+ * drm_fb_helper_connector_parse_command_line - parse command line for connector
+ * @connector - connector to parse line for
+ * @mode_option - per connector mode option
+ *
+ * This parses the connector specific then generic command lines for
+ * modes and options to configure the connector.
+ *
+ * This uses the same parameters as the fb modedb.c, except for extra
+ * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
+ *
+ * enable/enable Digital/disable bit at the end
+ */
+static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_connector *fb_helper_conn,
+ const char *mode_option)
+{
+ const char *name;
+ unsigned int namelen;
+ int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
+ unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
+ int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
+ int i;
+ enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
+ struct drm_fb_helper_cmdline_mode *cmdline_mode;
+ struct drm_connector *connector;
+
+ if (!fb_helper_conn)
+ return false;
+ connector = fb_helper_conn->connector;
+
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
+ if (!mode_option)
+ mode_option = fb_mode_option;
+
+ if (!mode_option) {
+ cmdline_mode->specified = false;
+ return false;
+ }
+
+ name = mode_option;
+ namelen = strlen(name);
+ for (i = namelen-1; i >= 0; i--) {
+ switch (name[i]) {
+ case '@':
+ namelen = i;
+ if (!refresh_specified && !bpp_specified &&
+ !yres_specified) {
+ refresh = strtol(&name[i+1], NULL, 10);
+ refresh_specified = 1;
+ if (cvt || rb)
+ cvt = 0;
+ } else
+ goto done;
+ break;
+ case '-':
+ namelen = i;
+ if (!bpp_specified && !yres_specified) {
+ bpp = strtol(&name[i+1], NULL, 10);
+ bpp_specified = 1;
+ if (cvt || rb)
+ cvt = 0;
+ } else
+ goto done;
+ break;
+ case 'x':
+ if (!yres_specified) {
+ yres = strtol(&name[i+1], NULL, 10);
+ yres_specified = 1;
+ } else
+ goto done;
+ case '0' ... '9':
+ break;
+ case 'M':
+ if (!yres_specified)
+ cvt = 1;
+ break;
+ case 'R':
+ if (cvt)
+ rb = 1;
+ break;
+ case 'm':
+ if (!cvt)
+ margins = 1;
+ break;
+ case 'i':
+ if (!cvt)
+ interlace = 1;
+ break;
+ case 'e':
+ force = DRM_FORCE_ON;
+ break;
+ case 'D':
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
+ (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
+ force = DRM_FORCE_ON;
+ else
+ force = DRM_FORCE_ON_DIGITAL;
+ break;
+ case 'd':
+ force = DRM_FORCE_OFF;
+ break;
+ default:
+ goto done;
+ }
+ }
+ if (i < 0 && yres_specified) {
+ xres = strtol(name, NULL, 10);
+ res_specified = 1;
+ }
+done:
+
+ DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
+ drm_get_connector_name(connector), xres, yres,
+ (refresh) ? refresh : 60, (rb) ? " reduced blanking" :
+ "", (margins) ? " with margins" : "", (interlace) ?
+ " interlaced" : "");
+
+ if (force) {
+ const char *s;
+ switch (force) {
+ case DRM_FORCE_OFF: s = "OFF"; break;
+ case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
+ default:
+ case DRM_FORCE_ON: s = "ON"; break;
+ }
+
+ DRM_INFO("forcing %s connector %s\n",
+ drm_get_connector_name(connector), s);
+ connector->force = force;
+ }
+
+ if (res_specified) {
+ cmdline_mode->specified = true;
+ cmdline_mode->xres = xres;
+ cmdline_mode->yres = yres;
+ }
+
+ if (refresh_specified) {
+ cmdline_mode->refresh_specified = true;
+ cmdline_mode->refresh = refresh;
+ }
+
+ if (bpp_specified) {
+ cmdline_mode->bpp_specified = true;
+ cmdline_mode->bpp = bpp;
+ }
+ cmdline_mode->rb = rb ? true : false;
+ cmdline_mode->cvt = cvt ? true : false;
+ cmdline_mode->interlace = interlace ? true : false;
+
+ return true;
+}
+
+static int
+fb_get_options(const char *connector_name, char **option)
+{
+
+ return (1);
+}
+
+static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
+{
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ char *option = NULL;
+
+ fb_helper_conn = fb_helper->connector_info[i];
+
+ /* do something on return - turn off connector maybe */
+ if (fb_get_options(drm_get_connector_name(fb_helper_conn->connector), &option))
+ continue;
+
+ drm_fb_helper_connector_parse_command_line(fb_helper_conn, option);
+ }
+ return 0;
+}
+
+#if 0
+static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
+{
+ uint16_t *r_base, *g_base, *b_base;
+ int i;
+
+ r_base = crtc->gamma_store;
+ g_base = r_base + crtc->gamma_size;
+ b_base = g_base + crtc->gamma_size;
+
+ for (i = 0; i < crtc->gamma_size; i++)
+ helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i);
+}
+
+static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
+{
+ uint16_t *r_base, *g_base, *b_base;
+
+ r_base = crtc->gamma_store;
+ g_base = r_base + crtc->gamma_size;
+ b_base = g_base + crtc->gamma_size;
+
+ crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+}
+#endif
+
+#if 0
+int drm_fb_helper_debug_enter(struct fb_info *info)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct drm_crtc_helper_funcs *funcs;
+ int i;
+
+ if (list_empty(&kernel_fb_helper_list))
+ return false;
+
+ list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
+ for (i = 0; i < helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set =
+ &helper->crtc_info[i].mode_set;
+
+ if (!mode_set->crtc->enabled)
+ continue;
+
+ funcs = mode_set->crtc->helper_private;
+ drm_fb_helper_save_lut_atomic(mode_set->crtc, helper);
+ funcs->mode_set_base_atomic(mode_set->crtc,
+ mode_set->fb,
+ mode_set->x,
+ mode_set->y,
+ ENTER_ATOMIC_MODE_SET);
+ }
+ }
+
+ return 0;
+}
+#endif
+
+#if 0
+/* Find the real fb for a given fb helper CRTC */
+static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *c;
+
+ list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+ if (crtc->base.id == c->base.id)
+ return c->fb;
+ }
+
+ return NULL;
+}
+#endif
+
+#if 0
+int drm_fb_helper_debug_leave(struct fb_info *info)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct drm_crtc *crtc;
+ struct drm_crtc_helper_funcs *funcs;
+ struct drm_framebuffer *fb;
+ int i;
+
+ for (i = 0; i < helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
+ crtc = mode_set->crtc;
+ funcs = crtc->helper_private;
+ fb = drm_mode_config_fb(crtc);
+
+ if (!crtc->enabled)
+ continue;
+
+ if (!fb) {
+ DRM_ERROR("no fb to restore??\n");
+ continue;
+ }
+
+ drm_fb_helper_restore_lut_atomic(mode_set->crtc);
+ funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
+ crtc->y, LEAVE_ATOMIC_MODE_SET);
+ }
+
+ return 0;
+}
+#endif
+
+bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
+{
+ bool error = false;
+ int i, ret;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
+ ret = drm_crtc_helper_set_config(mode_set);
+ if (ret)
+ error = true;
+ }
+ return error;
+}
+
+#if 0
+bool drm_fb_helper_force_kernel_mode(void)
+{
+ bool ret, error = false;
+ struct drm_fb_helper *helper;
+
+ if (list_empty(&kernel_fb_helper_list))
+ return false;
+
+ list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
+ if (helper->dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ continue;
+
+ ret = drm_fb_helper_restore_fbdev_mode(helper);
+ if (ret)
+ error = true;
+ }
+ return error;
+}
+#endif
+
+#if 0
+int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
+ void *panic_str)
+{
+ printf("panic occurred, switching back to text console\n");
+ return drm_fb_helper_force_kernel_mode();
+ return 0;
+}
+
+static struct notifier_block paniced = {
+ .notifier_call = drm_fb_helper_panic,
+};
+
+/**
+ * drm_fb_helper_restore - restore the framebuffer console (kernel) config
+ *
+ * Restore's the kernel's fbcon mode, used for lastclose & panic paths.
+ */
+void drm_fb_helper_restore(void)
+{
+ bool ret;
+ ret = drm_fb_helper_force_kernel_mode();
+ if (ret == true)
+ DRM_ERROR("Failed to restore crtc configuration\n");
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
+{
+ drm_fb_helper_restore();
+}
+static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
+
+static void drm_fb_helper_sysrq(int dummy1)
+{
+ schedule_work(&drm_fb_helper_restore_work);
+}
+
+static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
+ .handler = drm_fb_helper_sysrq,
+ .help_msg = "force-fb(V)",
+ .action_msg = "Restore framebuffer console",
+};
+#else
+static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
+#endif
+#endif
+
+#if 0
+static void drm_fb_helper_on(struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_crtc *crtc;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ int i, j;
+
+ /*
+ * For each CRTC in this fb, turn the crtc on then,
+ * find all associated encoders and turn them on.
+ */
+ sx_xlock(&dev->mode_config.mutex);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
+
+ if (!crtc->enabled)
+ continue;
+
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+
+ /* Walk the connectors & encoders on this fb turning them on */
+ for (j = 0; j < fb_helper->connector_count; j++) {
+ connector = fb_helper->connector_info[j]->connector;
+ connector->dpms = DRM_MODE_DPMS_ON;
+ drm_connector_property_set_value(connector,
+ dev->mode_config.dpms_property,
+ DRM_MODE_DPMS_ON);
+ }
+ /* Found a CRTC on this fb, now find encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ struct drm_encoder_helper_funcs *encoder_funcs;
+
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+ }
+ }
+ }
+ sx_xunlock(&dev->mode_config.mutex);
+}
+#endif
+
+#if 0
+static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_crtc *crtc;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ int i, j;
+
+ /*
+ * For each CRTC in this fb, find all associated encoders
+ * and turn them off, then turn off the CRTC.
+ */
+ sx_xlock(&dev->mode_config.mutex);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
+
+ if (!crtc->enabled)
+ continue;
+
+ /* Walk the connectors on this fb and mark them off */
+ for (j = 0; j < fb_helper->connector_count; j++) {
+ connector = fb_helper->connector_info[j]->connector;
+ connector->dpms = dpms_mode;
+ drm_connector_property_set_value(connector,
+ dev->mode_config.dpms_property,
+ dpms_mode);
+ }
+ /* Found a CRTC on this fb, now find encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ struct drm_encoder_helper_funcs *encoder_funcs;
+
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->dpms(encoder, dpms_mode);
+ }
+ }
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ }
+ sx_xunlock(&dev->mode_config.mutex);
+}
+#endif
+
+#if 0
+int drm_fb_helper_blank(int blank, struct fb_info *info)
+{
+ switch (blank) {
+ /* Display: On; HSync: On, VSync: On */
+ case FB_BLANK_UNBLANK:
+ drm_fb_helper_on(info);
+ break;
+ /* Display: Off; HSync: On, VSync: On */
+ case FB_BLANK_NORMAL:
+ drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
+ break;
+ /* Display: Off; HSync: Off, VSync: On */
+ case FB_BLANK_HSYNC_SUSPEND:
+ drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
+ break;
+ /* Display: Off; HSync: On, VSync: Off */
+ case FB_BLANK_VSYNC_SUSPEND:
+ drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
+ break;
+ /* Display: Off; HSync: Off, VSync: Off */
+ case FB_BLANK_POWERDOWN:
+ drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
+ break;
+ }
+ return 0;
+}
+#endif
+
+static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
+{
+ int i;
+
+ for (i = 0; i < helper->connector_count; i++)
+ free(helper->connector_info[i], DRM_MEM_KMS);
+ free(helper->connector_info, DRM_MEM_KMS);
+ for (i = 0; i < helper->crtc_count; i++)
+ free(helper->crtc_info[i].mode_set.connectors, DRM_MEM_KMS);
+ free(helper->crtc_info, DRM_MEM_KMS);
+}
+
+int drm_fb_helper_init(struct drm_device *dev,
+ struct drm_fb_helper *fb_helper,
+ int crtc_count, int max_conn_count)
+{
+ struct drm_crtc *crtc;
+ int i;
+
+ fb_helper->dev = dev;
+
+ INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
+
+ fb_helper->crtc_info = malloc(crtc_count *
+ sizeof(struct drm_fb_helper_crtc), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+
+ fb_helper->crtc_count = crtc_count;
+ fb_helper->connector_info = malloc(dev->mode_config.num_connector *
+ sizeof(struct drm_fb_helper_connector *), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+ fb_helper->connector_count = 0;
+
+ for (i = 0; i < crtc_count; i++) {
+ fb_helper->crtc_info[i].mode_set.connectors =
+ malloc(max_conn_count * sizeof(struct drm_connector *),
+ DRM_MEM_KMS, M_WAITOK | M_ZERO);
+
+ fb_helper->crtc_info[i].mode_set.num_connectors = 0;
+ }
+
+ i = 0;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ fb_helper->crtc_info[i].crtc_id = crtc->base.id;
+ fb_helper->crtc_info[i].mode_set.crtc = crtc;
+ i++;
+ }
+ fb_helper->conn_limit = max_conn_count;
+ return 0;
+}
+
+void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
+{
+ if (!list_empty(&fb_helper->kernel_fb_list)) {
+ list_del(&fb_helper->kernel_fb_list);
+ if (list_empty(&kernel_fb_helper_list)) {
+#if 0
+ printk(KERN_INFO "drm: unregistered panic notifier\n");
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &paniced);
+ unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
+#endif
+ }
+ }
+
+ drm_fb_helper_crtc_free(fb_helper);
+
+}
+
+#if 0
+static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, u16 regno, struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ int pindex;
+
+ if (info->fix.visual == FB_VISUAL_trueCOLOR) {
+ u32 *palette;
+ u32 value;
+ /* place color in psuedopalette */
+ if (regno > 16)
+ return -EINVAL;
+ palette = (u32 *)info->pseudo_palette;
+ red >>= (16 - info->var.red.length);
+ green >>= (16 - info->var.green.length);
+ blue >>= (16 - info->var.blue.length);
+ value = (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset);
+ if (info->var.transp.length > 0) {
+ u32 mask = (1 << info->var.transp.length) - 1;
+ mask <<= info->var.transp.offset;
+ value |= mask;
+ }
+ palette[regno] = value;
+ return 0;
+ }
+
+ pindex = regno;
+
+ if (fb->bits_per_pixel == 16) {
+ pindex = regno << 3;
+
+ if (fb->depth == 16 && regno > 63)
+ return -EINVAL;
+ if (fb->depth == 15 && regno > 31)
+ return -EINVAL;
+
+ if (fb->depth == 16) {
+ u16 r, g, b;
+ int i;
+ if (regno < 32) {
+ for (i = 0; i < 8; i++)
+ fb_helper->funcs->gamma_set(crtc, red,
+ green, blue, pindex + i);
+ }
+
+ fb_helper->funcs->gamma_get(crtc, &r,
+ &g, &b,
+ pindex >> 1);
+
+ for (i = 0; i < 4; i++)
+ fb_helper->funcs->gamma_set(crtc, r,
+ green, b,
+ (pindex >> 1) + i);
+ }
+ }
+
+ if (fb->depth != 16)
+ fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
+ return 0;
+}
+#endif
+
+#if 0
+int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ u16 *red, *green, *blue, *transp;
+ struct drm_crtc *crtc;
+ int i, j, rc = 0;
+ int start;
+
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
+
+ red = cmap->red;
+ green = cmap->green;
+ blue = cmap->blue;
+ transp = cmap->transp;
+ start = cmap->start;
+
+ for (j = 0; j < cmap->len; j++) {
+ u16 hred, hgreen, hblue, htransp = 0xffff;
+
+ hred = *red++;
+ hgreen = *green++;
+ hblue = *blue++;
+
+ if (transp)
+ htransp = *transp++;
+
+ rc = setcolreg(crtc, hred, hgreen, hblue, start++, info);
+ if (rc)
+ return rc;
+ }
+ crtc_funcs->load_lut(crtc);
+ }
+ return rc;
+}
+#endif
+
+#if 0
+int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ int depth;
+
+ if (var->pixclock != 0 || in_dbg_master())
+ return -EINVAL;
+
+ /* Need to resize the fb object !!! */
+ if (var->bits_per_pixel > fb->bits_per_pixel ||
+ var->xres > fb->width || var->yres > fb->height ||
+ var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
+ DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
+ "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
+ var->xres, var->yres, var->bits_per_pixel,
+ var->xres_virtual, var->yres_virtual,
+ fb->width, fb->height, fb->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ switch (var->bits_per_pixel) {
+ case 16:
+ depth = (var->green.length == 6) ? 16 : 15;
+ break;
+ case 32:
+ depth = (var->transp.length > 0) ? 32 : 24;
+ break;
+ default:
+ depth = var->bits_per_pixel;
+ break;
+ }
+
+ switch (depth) {
+ case 8:
+ var->red.offset = 0;
+ var->green.offset = 0;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.length = 0;
+ var->transp.offset = 0;
+ break;
+ case 15:
+ var->red.offset = 10;
+ var->green.offset = 5;
+ var->blue.offset = 0;
+ var->red.length = 5;
+ var->green.length = 5;
+ var->blue.length = 5;
+ var->transp.length = 1;
+ var->transp.offset = 15;
+ break;
+ case 16:
+ var->red.offset = 11;
+ var->green.offset = 5;
+ var->blue.offset = 0;
+ var->red.length = 5;
+ var->green.length = 6;
+ var->blue.length = 5;
+ var->transp.length = 0;
+ var->transp.offset = 0;
+ break;
+ case 24:
+ var->red.offset = 16;
+ var->green.offset = 8;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.length = 0;
+ var->transp.offset = 0;
+ break;
+ case 32:
+ var->red.offset = 16;
+ var->green.offset = 8;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.length = 8;
+ var->transp.offset = 24;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+#endif
+
+#if 0
+/* this will let fbcon do the mode init */
+int drm_fb_helper_set_par(struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_device *dev = fb_helper->dev;
+ struct fb_var_screeninfo *var = &info->var;
+ struct drm_crtc *crtc;
+ int ret;
+ int i;
+
+ if (var->pixclock != 0) {
+ DRM_ERROR("PIXEL CLOCK SET\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->mode_config.mutex);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
+ if (ret) {
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (fb_helper->delayed_hotplug) {
+ fb_helper->delayed_hotplug = false;
+ drm_fb_helper_hotplug_event(fb_helper);
+ }
+ return 0;
+}
+#endif
+
+#if 0
+int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_mode_set *modeset;
+ struct drm_crtc *crtc;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&dev->mode_config.mutex);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+
+ modeset = &fb_helper->crtc_info[i].mode_set;
+
+ modeset->x = var->xoffset;
+ modeset->y = var->yoffset;
+
+ if (modeset->num_connectors) {
+ ret = crtc->funcs->set_config(modeset);
+ if (!ret) {
+ info->var.xoffset = var->xoffset;
+ info->var.yoffset = var->yoffset;
+ }
+ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+#endif
+
+int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
+ int preferred_bpp)
+{
+ int new_fb = 0;
+ int crtc_count = 0;
+ int i;
+#if 0
+ struct fb_info *info;
+#endif
+ struct drm_fb_helper_surface_size sizes;
+ int gamma_size = 0;
+
+ memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
+ sizes.surface_depth = 24;
+ sizes.surface_bpp = 32;
+ sizes.fb_width = (unsigned)-1;
+ sizes.fb_height = (unsigned)-1;
+
+ /* if driver picks 8 or 16 by default use that
+ for both depth/bpp */
+ if (preferred_bpp != sizes.surface_bpp) {
+ sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
+ }
+ /* first up get a count of crtcs now in use and new min/maxes width/heights */
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
+ struct drm_fb_helper_cmdline_mode *cmdline_mode;
+
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
+
+ if (cmdline_mode->bpp_specified) {
+ switch (cmdline_mode->bpp) {
+ case 8:
+ sizes.surface_depth = sizes.surface_bpp = 8;
+ break;
+ case 15:
+ sizes.surface_depth = 15;
+ sizes.surface_bpp = 16;
+ break;
+ case 16:
+ sizes.surface_depth = sizes.surface_bpp = 16;
+ break;
+ case 24:
+ sizes.surface_depth = sizes.surface_bpp = 24;
+ break;
+ case 32:
+ sizes.surface_depth = 24;
+ sizes.surface_bpp = 32;
+ break;
+ }
+ break;
+ }
+ }
+
+ crtc_count = 0;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_display_mode *desired_mode;
+ desired_mode = fb_helper->crtc_info[i].desired_mode;
+
+ if (desired_mode) {
+ if (gamma_size == 0)
+ gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
+ if (desired_mode->hdisplay < sizes.fb_width)
+ sizes.fb_width = desired_mode->hdisplay;
+ if (desired_mode->vdisplay < sizes.fb_height)
+ sizes.fb_height = desired_mode->vdisplay;
+ if (desired_mode->hdisplay > sizes.surface_width)
+ sizes.surface_width = desired_mode->hdisplay;
+ if (desired_mode->vdisplay > sizes.surface_height)
+ sizes.surface_height = desired_mode->vdisplay;
+ crtc_count++;
+ }
+ }
+
+ if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
+ /* hmm everyone went away - assume VGA cable just fell out
+ and will come back later. */
+ DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
+ sizes.fb_width = sizes.surface_width = 1024;
+ sizes.fb_height = sizes.surface_height = 768;
+ }
+
+ /* push down into drivers */
+ new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
+ if (new_fb < 0)
+ return new_fb;
+
+#if 0
+ info = fb_helper->fbdev;
+#endif
+
+ /* set the fb pointer */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
+ }
+
+#if 0
+ if (new_fb) {
+ info->var.pixclock = 0;
+ if (register_framebuffer(info) < 0) {
+ return -EINVAL;
+ }
+
+ printf("fb%d: %s frame buffer device\n", info->node,
+ info->fix.id);
+
+ } else {
+ drm_fb_helper_set_par(info);
+ }
+
+ /* Switch back to kernel console on panic */
+ /* multi card linked list maybe */
+ if (list_empty(&kernel_fb_helper_list)) {
+ printf("drm: registered panic notifier\n");
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &paniced);
+ }
+ if (new_fb)
+ list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+#endif
+
+ return 0;
+}
+
+#if 0
+void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+ uint32_t depth)
+{
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
+ FB_VISUAL_trueCOLOR;
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
+ info->fix.type_aux = 0;
+ info->fix.xpanstep = 1; /* doing it in hw */
+ info->fix.ypanstep = 1; /* doing it in hw */
+ info->fix.ywrapstep = 0;
+ info->fix.accel = FB_ACCEL_NONE;
+ info->fix.type_aux = 0;
+
+ info->fix.line_length = pitch;
+ return;
+}
+
+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
+ uint32_t fb_width, uint32_t fb_height)
+{
+ struct drm_framebuffer *fb = fb_helper->fb;
+ info->pseudo_palette = fb_helper->pseudo_palette;
+ info->var.xres_virtual = fb->width;
+ info->var.yres_virtual = fb->height;
+ info->var.bits_per_pixel = fb->bits_per_pixel;
+ info->var.accel_flags = FB_ACCELF_TEXT;
+ info->var.xoffset = 0;
+ info->var.yoffset = 0;
+ info->var.activate = FB_ACTIVATE_NOW;
+ info->var.height = -1;
+ info->var.width = -1;
+
+ switch (fb->depth) {
+ case 8:
+ info->var.red.offset = 0;
+ info->var.green.offset = 0;
+ info->var.blue.offset = 0;
+ info->var.red.length = 8; /* 8bit DAC */
+ info->var.green.length = 8;
+ info->var.blue.length = 8;
+ info->var.transp.offset = 0;
+ info->var.transp.length = 0;
+ break;
+ case 15:
+ info->var.red.offset = 10;
+ info->var.green.offset = 5;
+ info->var.blue.offset = 0;
+ info->var.red.length = 5;
+ info->var.green.length = 5;
+ info->var.blue.length = 5;
+ info->var.transp.offset = 15;
+ info->var.transp.length = 1;
+ break;
+ case 16:
+ info->var.red.offset = 11;
+ info->var.green.offset = 5;
+ info->var.blue.offset = 0;
+ info->var.red.length = 5;
+ info->var.green.length = 6;
+ info->var.blue.length = 5;
+ info->var.transp.offset = 0;
+ break;
+ case 24:
+ info->var.red.offset = 16;
+ info->var.green.offset = 8;
+ info->var.blue.offset = 0;
+ info->var.red.length = 8;
+ info->var.green.length = 8;
+ info->var.blue.length = 8;
+ info->var.transp.offset = 0;
+ info->var.transp.length = 0;
+ break;
+ case 32:
+ info->var.red.offset = 16;
+ info->var.green.offset = 8;
+ info->var.blue.offset = 0;
+ info->var.red.length = 8;
+ info->var.green.length = 8;
+ info->var.blue.length = 8;
+ info->var.transp.offset = 24;
+ info->var.transp.length = 8;
+ break;
+ default:
+ break;
+ }
+
+ info->var.xres = fb_width;
+ info->var.yres = fb_height;
+}
+#endif
+
+static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
+ uint32_t maxX,
+ uint32_t maxY)
+{
+ struct drm_connector *connector;
+ int count = 0;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ count += connector->funcs->fill_modes(connector, maxX, maxY);
+ }
+
+ return count;
+}
+
+static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &fb_connector->connector->modes, head) {
+ if (drm_mode_width(mode) > width ||
+ drm_mode_height(mode) > height)
+ continue;
+ if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ return mode;
+ }
+ return NULL;
+}
+
+static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
+{
+ struct drm_fb_helper_cmdline_mode *cmdline_mode;
+ cmdline_mode = &fb_connector->cmdline_mode;
+ return cmdline_mode->specified;
+}
+
+static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+ int width, int height)
+{
+ struct drm_cmdline_mode *cmdline_mode;
+ struct drm_display_mode *mode = NULL;
+
+ cmdline_mode = &fb_helper_conn->cmdline_mode1;
+ if (cmdline_mode->specified == false &&
+ !drm_fetch_cmdline_mode_from_kenv(fb_helper_conn->connector,
+ cmdline_mode))
+ return (NULL);
+
+ /* attempt to find a matching mode in the list of modes
+ * we have gotten so far, if not add a CVT mode that conforms
+ */
+ if (cmdline_mode->rb || cmdline_mode->margins)
+ goto create_mode;
+
+ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+ /* check width/height */
+ if (mode->hdisplay != cmdline_mode->xres ||
+ mode->vdisplay != cmdline_mode->yres)
+ continue;
+
+ if (cmdline_mode->refresh_specified) {
+ if (mode->vrefresh != cmdline_mode->refresh)
+ continue;
+ }
+
+ if (cmdline_mode->interlace) {
+ if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+ continue;
+ }
+ return mode;
+ }
+
+create_mode:
+ if (cmdline_mode->cvt)
+ mode = drm_cvt_mode(fb_helper_conn->connector->dev,
+ cmdline_mode->xres, cmdline_mode->yres,
+ cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
+ cmdline_mode->rb, cmdline_mode->interlace,
+ cmdline_mode->margins);
+ else
+ mode = drm_gtf_mode(fb_helper_conn->connector->dev,
+ cmdline_mode->xres, cmdline_mode->yres,
+ cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
+ cmdline_mode->interlace,
+ cmdline_mode->margins);
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ list_add(&mode->head, &fb_helper_conn->connector->modes);
+ return mode;
+}
+
+static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
+{
+ bool enable;
+
+ if (strict) {
+ enable = connector->status == connector_status_connected;
+ } else {
+ enable = connector->status != connector_status_disconnected;
+ }
+ return enable;
+}
+
+static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
+ bool *enabled)
+{
+ bool any_enabled = false;
+ struct drm_connector *connector;
+ int i = 0;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ enabled[i] = drm_connector_enabled(connector, true);
+ DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
+ enabled[i] ? "yes" : "no");
+ any_enabled |= enabled[i];
+ }
+
+ if (any_enabled)
+ return;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ enabled[i] = drm_connector_enabled(connector, false);
+ }
+}
+
+static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ int count, i, j;
+ bool can_clone = false;
+ struct drm_fb_helper_connector *fb_helper_conn;
+ struct drm_display_mode *dmt_mode, *mode;
+
+ /* only contemplate cloning in the single crtc case */
+ if (fb_helper->crtc_count > 1)
+ return false;
+
+ count = 0;
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (enabled[i])
+ count++;
+ }
+
+ /* only contemplate cloning if more than one connector is enabled */
+ if (count <= 1)
+ return false;
+
+ /* check the command line or if nothing common pick 1024x768 */
+ can_clone = true;
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (!enabled[i])
+ continue;
+ fb_helper_conn = fb_helper->connector_info[i];
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ if (!modes[i]) {
+ can_clone = false;
+ break;
+ }
+ for (j = 0; j < i; j++) {
+ if (!enabled[j])
+ continue;
+ if (!drm_mode_equal(modes[j], modes[i]))
+ can_clone = false;
+ }
+ }
+
+ if (can_clone) {
+ DRM_DEBUG_KMS("can clone using command line\n");
+ return true;
+ }
+
+ /* try and find a 1024x768 mode on each connector */
+ can_clone = true;
+ dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60);
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+
+ if (!enabled[i])
+ continue;
+
+ fb_helper_conn = fb_helper->connector_info[i];
+ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+ if (drm_mode_equal(mode, dmt_mode))
+ modes[i] = mode;
+ }
+ if (!modes[i])
+ can_clone = false;
+ }
+
+ if (can_clone) {
+ DRM_DEBUG_KMS("can clone using 1024x768\n");
+ return true;
+ }
+ DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
+ return false;
+}
+
+static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ fb_helper_conn = fb_helper->connector_info[i];
+
+ if (enabled[i] == false)
+ continue;
+
+ DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+
+ /* got for command line mode first */
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ if (!modes[i]) {
+ DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+ modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
+ }
+ /* No preferred modes, pick one off the list */
+ if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
+ list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head)
+ break;
+ }
+ DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
+ "none");
+ }
+ return true;
+}
+
+static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_crtc **best_crtcs,
+ struct drm_display_mode **modes,
+ int n, int width, int height)
+{
+ int c, o;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+ struct drm_connector_helper_funcs *connector_funcs;
+ struct drm_encoder *encoder;
+ struct drm_fb_helper_crtc *best_crtc;
+ int my_score, best_score, score;
+ struct drm_fb_helper_crtc **crtcs, *crtc;
+ struct drm_fb_helper_connector *fb_helper_conn;
+
+ if (n == fb_helper->connector_count)
+ return 0;
+
+ fb_helper_conn = fb_helper->connector_info[n];
+ connector = fb_helper_conn->connector;
+
+ best_crtcs[n] = NULL;
+ best_crtc = NULL;
+ best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
+ if (modes[n] == NULL)
+ return best_score;
+
+ crtcs = malloc(dev->mode_config.num_connector *
+ sizeof(struct drm_fb_helper_crtc *), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+
+ my_score = 1;
+ if (connector->status == connector_status_connected)
+ my_score++;
+ if (drm_has_cmdline_mode(fb_helper_conn))
+ my_score++;
+ if (drm_has_preferred_mode(fb_helper_conn, width, height))
+ my_score++;
+
+ connector_funcs = connector->helper_private;
+ encoder = connector_funcs->best_encoder(connector);
+ if (!encoder)
+ goto out;
+
+ /* select a crtc for this connector and then attempt to configure
+ remaining connectors */
+ for (c = 0; c < fb_helper->crtc_count; c++) {
+ crtc = &fb_helper->crtc_info[c];
+
+ if ((encoder->possible_crtcs & (1 << c)) == 0) {
+ continue;
+ }
+
+ for (o = 0; o < n; o++)
+ if (best_crtcs[o] == crtc)
+ break;
+
+ if (o < n) {
+ /* ignore cloning unless only a single crtc */
+ if (fb_helper->crtc_count > 1)
+ continue;
+
+ if (!drm_mode_equal(modes[o], modes[n]))
+ continue;
+ }
+
+ crtcs[n] = crtc;
+ memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
+ score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
+ width, height);
+ if (score > best_score) {
+ best_crtc = crtc;
+ best_score = score;
+ memcpy(best_crtcs, crtcs,
+ dev->mode_config.num_connector *
+ sizeof(struct drm_fb_helper_crtc *));
+ }
+ }
+out:
+ free(crtcs, DRM_MEM_KMS);
+ return best_score;
+}
+
+static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_fb_helper_crtc **crtcs;
+ struct drm_display_mode **modes;
+ struct drm_encoder *encoder;
+ struct drm_mode_set *modeset;
+ bool *enabled;
+ int width, height;
+ int i, ret;
+
+ DRM_DEBUG_KMS("\n");
+
+ width = dev->mode_config.max_width;
+ height = dev->mode_config.max_height;
+
+ /* clean out all the encoder/crtc combos */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ encoder->crtc = NULL;
+ }
+
+ crtcs = malloc(dev->mode_config.num_connector *
+ sizeof(struct drm_fb_helper_crtc *), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+ modes = malloc(dev->mode_config.num_connector *
+ sizeof(struct drm_display_mode *), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+ enabled = malloc(dev->mode_config.num_connector *
+ sizeof(bool), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+
+ drm_enable_connectors(fb_helper, enabled);
+
+ ret = drm_target_cloned(fb_helper, modes, enabled, width, height);
+ if (!ret) {
+ ret = drm_target_preferred(fb_helper, modes, enabled, width, height);
+ if (!ret)
+ DRM_ERROR("Unable to find initial modes\n");
+ }
+
+ DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
+
+ drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
+
+ /* need to set the modesets up here for use later */
+ /* fill out the connector<->crtc mappings into the modesets */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ modeset = &fb_helper->crtc_info[i].mode_set;
+ modeset->num_connectors = 0;
+ }
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_display_mode *mode = modes[i];
+ struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
+ modeset = &fb_crtc->mode_set;
+
+ if (mode && fb_crtc) {
+ DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
+ mode->name, fb_crtc->mode_set.crtc->base.id);
+ fb_crtc->desired_mode = mode;
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = drm_mode_duplicate(dev,
+ fb_crtc->desired_mode);
+ modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+ }
+ }
+
+ free(crtcs, DRM_MEM_KMS);
+ free(modes, DRM_MEM_KMS);
+ free(enabled, DRM_MEM_KMS);
+}
+
+/**
+ * drm_helper_initial_config - setup a sane initial connector configuration
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Called at init time, must take mode config lock.
+ *
+ * Scan the CRTCs and connectors and try to put together an initial setup.
+ * At the moment, this is a cloned configuration across all heads with
+ * a new framebuffer object as the backing store.
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
+{
+ struct drm_device *dev = fb_helper->dev;
+ int count = 0;
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(fb_helper->dev);
+
+ drm_fb_helper_parse_command_line(fb_helper);
+
+ count = drm_fb_helper_probe_connector_modes(fb_helper,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+ /*
+ * we shouldn't end up with no modes here.
+ */
+ if (count == 0) {
+ printf("No connectors reported connected with modes\n");
+ }
+ drm_setup_crtcs(fb_helper);
+
+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+
+int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ int count = 0;
+ u32 max_width, max_height, bpp_sel;
+ bool bound = false, crtcs_bound = false;
+ struct drm_crtc *crtc;
+
+ if (!fb_helper->fb)
+ return 0;
+
+ sx_xlock(&dev->mode_config.mutex);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->fb)
+ crtcs_bound = true;
+ if (crtc->fb == fb_helper->fb)
+ bound = true;
+ }
+
+ if (!bound && crtcs_bound) {
+ fb_helper->delayed_hotplug = true;
+ sx_xunlock(&dev->mode_config.mutex);
+ return 0;
+ }
+ DRM_DEBUG_KMS("\n");
+
+ max_width = fb_helper->fb->width;
+ max_height = fb_helper->fb->height;
+ bpp_sel = fb_helper->fb->bits_per_pixel;
+
+ count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
+ max_height);
+ drm_setup_crtcs(fb_helper);
+ sx_xunlock(&dev->mode_config.mutex);
+
+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+
diff --git a/sys/dev/drm2/drm_fb_helper.h b/sys/dev/drm2/drm_fb_helper.h
new file mode 100644
index 0000000..4b5ef85
--- /dev/null
+++ b/sys/dev/drm2/drm_fb_helper.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2006-2009 Red Hat Inc.
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM framebuffer helper functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * $FreeBSD$
+ */
+#ifndef DRM_FB_HELPER_H
+#define DRM_FB_HELPER_H
+
+struct drm_fb_helper;
+
+struct drm_fb_helper_crtc {
+ uint32_t crtc_id;
+ struct drm_mode_set mode_set;
+ struct drm_display_mode *desired_mode;
+};
+
+/* mode specified on the command line */
+struct drm_fb_helper_cmdline_mode {
+ bool specified;
+ bool refresh_specified;
+ bool bpp_specified;
+ int xres, yres;
+ int bpp;
+ int refresh;
+ bool rb;
+ bool interlace;
+ bool cvt;
+ bool margins;
+};
+
+struct drm_fb_helper_surface_size {
+ u32 fb_width;
+ u32 fb_height;
+ u32 surface_width;
+ u32 surface_height;
+ u32 surface_bpp;
+ u32 surface_depth;
+};
+
+struct drm_fb_helper_funcs {
+ void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+ void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
+
+ int (*fb_probe)(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes);
+};
+
+struct drm_fb_helper_connector {
+ struct drm_fb_helper_cmdline_mode cmdline_mode;
+ struct drm_cmdline_mode cmdline_mode1;
+ struct drm_connector *connector;
+};
+
+struct drm_fb_helper {
+ struct drm_framebuffer *fb;
+ struct drm_framebuffer *saved_fb;
+ struct drm_device *dev;
+ struct drm_display_mode *mode;
+ int crtc_count;
+ struct drm_fb_helper_crtc *crtc_info;
+ int connector_count;
+ struct drm_fb_helper_connector **connector_info;
+ struct drm_fb_helper_funcs *funcs;
+ int conn_limit;
+ struct fb_info *fbdev;
+ u32 pseudo_palette[17];
+ struct list_head kernel_fb_list;
+
+ /* we got a hotplug but fbdev wasn't running the console
+ delay until next set_par */
+ bool delayed_hotplug;
+};
+
+struct fb_var_screeninfo;
+struct fb_cmap;
+
+int drm_fb_helper_single_fb_probe(struct drm_fb_helper *helper,
+ int preferred_bpp);
+
+int drm_fb_helper_init(struct drm_device *dev,
+ struct drm_fb_helper *helper, int crtc_count,
+ int max_conn);
+void drm_fb_helper_fini(struct drm_fb_helper *helper);
+int drm_fb_helper_blank(int blank, struct fb_info *info);
+int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info);
+int drm_fb_helper_set_par(struct fb_info *info);
+int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info);
+int drm_fb_helper_setcolreg(unsigned regno,
+ unsigned red,
+ unsigned green,
+ unsigned blue,
+ unsigned transp,
+ struct fb_info *info);
+
+bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper);
+void drm_fb_helper_restore(void);
+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
+ uint32_t fb_width, uint32_t fb_height);
+void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+ uint32_t depth);
+
+int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
+
+int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
+int drm_fb_helper_debug_enter(struct fb_info *info);
+int drm_fb_helper_debug_leave(struct fb_info *info);
+bool drm_fb_helper_force_kernel_mode(void);
+
+#endif
diff --git a/sys/dev/drm2/drm_fops.c b/sys/dev/drm2/drm_fops.c
new file mode 100644
index 0000000..0071783
--- /dev/null
+++ b/sys/dev/drm2/drm_fops.c
@@ -0,0 +1,202 @@
+/*-
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Daryll Strauss <daryll@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/** @file drm_fops.c
+ * Support code for dealing with the file privates associated with each
+ * open of the DRM device.
+ */
+
+#include <dev/drm2/drmP.h>
+
+/* drm_open_helper is called whenever a process opens /dev/drm. */
+int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
+ struct drm_device *dev)
+{
+ struct drm_file *priv;
+ int retcode;
+
+ if (flags & O_EXCL)
+ return EBUSY; /* No exclusive opens */
+ dev->flags = flags;
+
+ DRM_DEBUG("pid = %d, device = %s\n", DRM_CURRENTPID, devtoname(kdev));
+
+ priv = malloc(sizeof(*priv), DRM_MEM_FILES, M_NOWAIT | M_ZERO);
+ if (priv == NULL) {
+ return ENOMEM;
+ }
+
+ retcode = devfs_set_cdevpriv(priv, drm_close);
+ if (retcode != 0) {
+ free(priv, DRM_MEM_FILES);
+ return retcode;
+ }
+
+ DRM_LOCK(dev);
+ priv->dev = dev;
+ priv->uid = p->td_ucred->cr_svuid;
+ priv->pid = p->td_proc->p_pid;
+ priv->ioctl_count = 0;
+
+ /* for compatibility root is always authenticated */
+ priv->authenticated = DRM_SUSER(p);
+
+ INIT_LIST_HEAD(&priv->fbs);
+ INIT_LIST_HEAD(&priv->event_list);
+ priv->event_space = 4096; /* set aside 4k for event buffer */
+
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_open(dev, priv);
+
+ if (dev->driver->open) {
+ /* shared code returns -errno */
+ retcode = -dev->driver->open(dev, priv);
+ if (retcode != 0) {
+ devfs_clear_cdevpriv();
+ free(priv, DRM_MEM_FILES);
+ DRM_UNLOCK(dev);
+ return retcode;
+ }
+ }
+
+ /* first opener automatically becomes master */
+ priv->master = TAILQ_EMPTY(&dev->files);
+
+ TAILQ_INSERT_TAIL(&dev->files, priv, link);
+ DRM_UNLOCK(dev);
+ kdev->si_drv1 = dev;
+ return 0;
+}
+
+static bool
+drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
+ struct uio *uio, struct drm_pending_event **out)
+{
+ struct drm_pending_event *e;
+
+ if (list_empty(&file_priv->event_list))
+ return (false);
+ e = list_first_entry(&file_priv->event_list,
+ struct drm_pending_event, link);
+ if (e->event->length > uio->uio_resid)
+ return (false);
+
+ file_priv->event_space += e->event->length;
+ list_del(&e->link);
+ *out = e;
+ return (true);
+}
+
+int
+drm_read(struct cdev *kdev, struct uio *uio, int ioflag)
+{
+ struct drm_file *file_priv;
+ struct drm_device *dev;
+ struct drm_pending_event *e;
+ int error;
+
+ error = devfs_get_cdevpriv((void **)&file_priv);
+ if (error != 0) {
+ DRM_ERROR("can't find authenticator\n");
+ return (EINVAL);
+ }
+ dev = drm_get_device_from_kdev(kdev);
+ mtx_lock(&dev->event_lock);
+ while (list_empty(&file_priv->event_list)) {
+ if ((ioflag & O_NONBLOCK) != 0) {
+ error = EAGAIN;
+ goto out;
+ }
+ error = msleep(&file_priv->event_space, &dev->event_lock,
+ PCATCH, "drmrea", 0);
+ if (error != 0)
+ goto out;
+ }
+ while (drm_dequeue_event(dev, file_priv, uio, &e)) {
+ mtx_unlock(&dev->event_lock);
+ error = uiomove(e->event, e->event->length, uio);
+ CTR3(KTR_DRM, "drm_event_dequeued %d %d %d", curproc->p_pid,
+ e->event->type, e->event->length);
+ e->destroy(e);
+ if (error != 0)
+ return (error);
+ mtx_lock(&dev->event_lock);
+ }
+out:
+ mtx_unlock(&dev->event_lock);
+ return (error);
+}
+
+void
+drm_event_wakeup(struct drm_pending_event *e)
+{
+ struct drm_file *file_priv;
+ struct drm_device *dev;
+
+ file_priv = e->file_priv;
+ dev = file_priv->dev;
+ mtx_assert(&dev->event_lock, MA_OWNED);
+
+ wakeup(&file_priv->event_space);
+ selwakeup(&file_priv->event_poll);
+}
+
+int
+drm_poll(struct cdev *kdev, int events, struct thread *td)
+{
+ struct drm_file *file_priv;
+ struct drm_device *dev;
+ int error, revents;
+
+ error = devfs_get_cdevpriv((void **)&file_priv);
+ if (error != 0) {
+ DRM_ERROR("can't find authenticator\n");
+ return (EINVAL);
+ }
+ dev = drm_get_device_from_kdev(kdev);
+
+ revents = 0;
+ mtx_lock(&dev->event_lock);
+ if ((events & (POLLIN | POLLRDNORM)) != 0) {
+ if (list_empty(&file_priv->event_list)) {
+ CTR0(KTR_DRM, "drm_poll empty list");
+ selrecord(td, &file_priv->event_poll);
+ } else {
+ revents |= events & (POLLIN | POLLRDNORM);
+ CTR1(KTR_DRM, "drm_poll revents %x", revents);
+ }
+ }
+ mtx_unlock(&dev->event_lock);
+ return (revents);
+}
diff --git a/sys/dev/drm2/drm_fourcc.h b/sys/dev/drm2/drm_fourcc.h
new file mode 100644
index 0000000..0e871df
--- /dev/null
+++ b/sys/dev/drm2/drm_fourcc.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef DRM_FOURCC_H
+#define DRM_FOURCC_H
+
+#include <sys/types.h>
+
+#define fourcc_code(a, b, c, d) ((uint32_t)(a) | ((uint32_t)(b) << 8) | \
+ ((uint32_t)(c) << 16) | ((uint32_t)(d) << 24))
+
+#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
+
+/* color index */
+#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
+
+/* 8 bpp RGB */
+#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
+#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
+
+/* 16 bpp RGB */
+#define DRM_FORMAT_XRGB4444 fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */
+#define DRM_FORMAT_XBGR4444 fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */
+#define DRM_FORMAT_RGBX4444 fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */
+#define DRM_FORMAT_BGRX4444 fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */
+
+#define DRM_FORMAT_ARGB4444 fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */
+#define DRM_FORMAT_ABGR4444 fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */
+#define DRM_FORMAT_RGBA4444 fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */
+#define DRM_FORMAT_BGRA4444 fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */
+
+#define DRM_FORMAT_XRGB1555 fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */
+#define DRM_FORMAT_XBGR1555 fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */
+#define DRM_FORMAT_RGBX5551 fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */
+#define DRM_FORMAT_BGRX5551 fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */
+
+#define DRM_FORMAT_ARGB1555 fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */
+#define DRM_FORMAT_ABGR1555 fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */
+#define DRM_FORMAT_RGBA5551 fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */
+#define DRM_FORMAT_BGRA5551 fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */
+
+#define DRM_FORMAT_RGB565 fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */
+#define DRM_FORMAT_BGR565 fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */
+
+/* 24 bpp RGB */
+#define DRM_FORMAT_RGB888 fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */
+#define DRM_FORMAT_BGR888 fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */
+
+/* 32 bpp RGB */
+#define DRM_FORMAT_XRGB8888 fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */
+#define DRM_FORMAT_XBGR8888 fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */
+#define DRM_FORMAT_RGBX8888 fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */
+#define DRM_FORMAT_BGRX8888 fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */
+
+#define DRM_FORMAT_ARGB8888 fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */
+#define DRM_FORMAT_ABGR8888 fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */
+#define DRM_FORMAT_RGBA8888 fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */
+#define DRM_FORMAT_BGRA8888 fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */
+
+#define DRM_FORMAT_XRGB2101010 fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */
+#define DRM_FORMAT_XBGR2101010 fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */
+#define DRM_FORMAT_RGBX1010102 fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */
+#define DRM_FORMAT_BGRX1010102 fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */
+
+#define DRM_FORMAT_ARGB2101010 fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */
+#define DRM_FORMAT_ABGR2101010 fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */
+#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
+#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+
+/* packed YCbCr */
+#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
+#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
+#define DRM_FORMAT_UYVY fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */
+#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
+
+#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+
+/*
+ * 2 plane YCbCr
+ * index 0 = Y plane, [7:0] Y
+ * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
+ * or
+ * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
+ */
+#define DRM_FORMAT_NV12 fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
+#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
+
+/* 2 non contiguous plane YCbCr */
+#define DRM_FORMAT_NV12M fourcc_code('N', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
+
+/*
+ * 3 plane YCbCr
+ * index 0: Y plane, [7:0] Y
+ * index 1: Cb plane, [7:0] Cb
+ * index 2: Cr plane, [7:0] Cr
+ * or
+ * index 1: Cr plane, [7:0] Cr
+ * index 2: Cb plane, [7:0] Cb
+ */
+#define DRM_FORMAT_YUV410 fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU410 fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV411 fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU411 fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV420 fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU420 fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV422 fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU422 fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
+
+/* 3 non contiguous plane YCbCr */
+#define DRM_FORMAT_YUV420M fourcc_code('Y', 'M', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
+
+#endif /* DRM_FOURCC_H */
diff --git a/sys/dev/drm2/drm_gem.c b/sys/dev/drm2/drm_gem.c
new file mode 100644
index 0000000..3401de8
--- /dev/null
+++ b/sys/dev/drm2/drm_gem.c
@@ -0,0 +1,487 @@
+/*-
+ * Copyright (c) 2011 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Konstantin Belousov under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_vm.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/drm_sarea.h>
+
+/*
+ * We make up offsets for buffer objects so we can recognize them at
+ * mmap time.
+ */
+
+/* pgoff in mmap is an unsigned long, so we need to make sure that
+ * the faked up offset will fit
+ */
+
+#if ULONG_MAX == UINT64_MAX
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+#else
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
+#endif
+
+int
+drm_gem_init(struct drm_device *dev)
+{
+ struct drm_gem_mm *mm;
+
+ drm_gem_names_init(&dev->object_names);
+ mm = malloc(sizeof(*mm), DRM_MEM_DRIVER, M_WAITOK);
+ dev->mm_private = mm;
+ if (drm_ht_create(&mm->offset_hash, 19) != 0) {
+ free(mm, DRM_MEM_DRIVER);
+ return (ENOMEM);
+ }
+ mm->idxunr = new_unrhdr(0, DRM_GEM_MAX_IDX, NULL);
+ return (0);
+}
+
+void
+drm_gem_destroy(struct drm_device *dev)
+{
+ struct drm_gem_mm *mm;
+
+ mm = dev->mm_private;
+ dev->mm_private = NULL;
+ drm_ht_remove(&mm->offset_hash);
+ delete_unrhdr(mm->idxunr);
+ free(mm, DRM_MEM_DRIVER);
+ drm_gem_names_fini(&dev->object_names);
+}
+
+int
+drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj,
+ size_t size)
+{
+
+ KASSERT((size & (PAGE_SIZE - 1)) == 0,
+ ("Bad size %ju", (uintmax_t)size));
+
+ obj->dev = dev;
+ obj->vm_obj = vm_pager_allocate(OBJT_DEFAULT, NULL, size,
+ VM_PROT_READ | VM_PROT_WRITE, 0, curthread->td_ucred);
+
+ obj->refcount = 1;
+ obj->handle_count = 0;
+ obj->size = size;
+
+ return (0);
+}
+
+int
+drm_gem_private_object_init(struct drm_device *dev, struct drm_gem_object *obj,
+ size_t size)
+{
+
+ MPASS((size & (PAGE_SIZE - 1)) == 0);
+
+ obj->dev = dev;
+ obj->vm_obj = NULL;
+
+ obj->refcount = 1;
+ atomic_set(&obj->handle_count, 0);
+ obj->size = size;
+
+ return (0);
+}
+
+
+struct drm_gem_object *
+drm_gem_object_alloc(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_object *obj;
+
+ obj = malloc(sizeof(*obj), DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
+ if (drm_gem_object_init(dev, obj, size) != 0)
+ goto free;
+
+ if (dev->driver->gem_init_object != NULL &&
+ dev->driver->gem_init_object(obj) != 0)
+ goto dealloc;
+ return (obj);
+dealloc:
+ vm_object_deallocate(obj->vm_obj);
+free:
+ free(obj, DRM_MEM_DRIVER);
+ return (NULL);
+}
+
+void
+drm_gem_object_free(struct drm_gem_object *obj)
+{
+ struct drm_device *dev;
+
+ dev = obj->dev;
+ DRM_LOCK_ASSERT(dev);
+ if (dev->driver->gem_free_object != NULL)
+ dev->driver->gem_free_object(obj);
+}
+
+void
+drm_gem_object_reference(struct drm_gem_object *obj)
+{
+
+ KASSERT(obj->refcount > 0, ("Dandling obj %p", obj));
+ refcount_acquire(&obj->refcount);
+}
+
+void
+drm_gem_object_unreference(struct drm_gem_object *obj)
+{
+
+ if (obj == NULL)
+ return;
+ if (refcount_release(&obj->refcount))
+ drm_gem_object_free(obj);
+}
+
+void
+drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
+{
+ struct drm_device *dev;
+
+ if (obj == NULL)
+ return;
+ dev = obj->dev;
+ DRM_LOCK(dev);
+ drm_gem_object_unreference(obj);
+ DRM_UNLOCK(dev);
+}
+
+void
+drm_gem_object_handle_reference(struct drm_gem_object *obj)
+{
+
+ drm_gem_object_reference(obj);
+ atomic_add_rel_int(&obj->handle_count, 1);
+}
+
+void
+drm_gem_object_handle_free(struct drm_gem_object *obj)
+{
+ struct drm_device *dev;
+ struct drm_gem_object *obj1;
+
+ dev = obj->dev;
+ if (obj->name != 0) {
+ obj1 = drm_gem_names_remove(&dev->object_names, obj->name);
+ obj->name = 0;
+ drm_gem_object_unreference(obj1);
+ }
+}
+
+void
+drm_gem_object_handle_unreference(struct drm_gem_object *obj)
+{
+
+ if (obj == NULL ||
+ atomic_load_acq_int(&obj->handle_count) == 0)
+ return;
+
+ if (atomic_fetchadd_int(&obj->handle_count, -1) == 1)
+ drm_gem_object_handle_free(obj);
+ drm_gem_object_unreference(obj);
+}
+
+void
+drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
+{
+
+ if (obj == NULL ||
+ atomic_load_acq_int(&obj->handle_count) == 0)
+ return;
+
+ if (atomic_fetchadd_int(&obj->handle_count, -1) == 1)
+ drm_gem_object_handle_free(obj);
+ drm_gem_object_unreference_unlocked(obj);
+}
+
+int
+drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj,
+ uint32_t *handle)
+{
+ int error;
+
+ error = drm_gem_name_create(&file_priv->object_names, obj, handle);
+ if (error != 0)
+ return (error);
+ drm_gem_object_handle_reference(obj);
+ return (0);
+}
+
+int
+drm_gem_handle_delete(struct drm_file *file_priv, uint32_t handle)
+{
+ struct drm_gem_object *obj;
+
+ obj = drm_gem_names_remove(&file_priv->object_names, handle);
+ if (obj == NULL)
+ return (EINVAL);
+ drm_gem_object_handle_unreference_unlocked(obj);
+ return (0);
+}
+
+void
+drm_gem_object_release(struct drm_gem_object *obj)
+{
+
+ /*
+ * obj->vm_obj can be NULL for private gem objects.
+ */
+ vm_object_deallocate(obj->vm_obj);
+}
+
+int
+drm_gem_open_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_open *args;
+ struct drm_gem_object *obj;
+ int ret;
+ uint32_t handle;
+
+ if (!drm_core_check_feature(dev, DRIVER_GEM))
+ return (ENODEV);
+ args = data;
+
+ obj = drm_gem_name_ref(&dev->object_names, args->name,
+ (void (*)(void *))drm_gem_object_reference);
+ if (obj == NULL)
+ return (ENOENT);
+ handle = 0;
+ ret = drm_gem_handle_create(file_priv, obj, &handle);
+ drm_gem_object_unreference_unlocked(obj);
+ if (ret != 0)
+ return (ret);
+
+ args->handle = handle;
+ args->size = obj->size;
+
+ return (0);
+}
+
+void
+drm_gem_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+
+ drm_gem_names_init(&file_priv->object_names);
+}
+
+static int
+drm_gem_object_release_handle(uint32_t name, void *ptr, void *arg)
+{
+ struct drm_gem_object *obj;
+
+ obj = ptr;
+ drm_gem_object_handle_unreference(obj);
+ return (0);
+}
+
+void
+drm_gem_release(struct drm_device *dev, struct drm_file *file_priv)
+{
+
+ drm_gem_names_foreach(&file_priv->object_names,
+ drm_gem_object_release_handle, NULL);
+ drm_gem_names_fini(&file_priv->object_names);
+}
+
+int
+drm_gem_close_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_close *args;
+
+ if (!drm_core_check_feature(dev, DRIVER_GEM))
+ return (ENODEV);
+ args = data;
+
+ return (drm_gem_handle_delete(file_priv, args->handle));
+}
+
+int
+drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_flink *args;
+ struct drm_gem_object *obj;
+ int error;
+
+ if (!drm_core_check_feature(dev, DRIVER_GEM))
+ return (ENODEV);
+ args = data;
+
+ obj = drm_gem_name_ref(&file_priv->object_names, args->handle,
+ (void (*)(void *))drm_gem_object_reference);
+ if (obj == NULL)
+ return (ENOENT);
+ error = drm_gem_name_create(&dev->object_names, obj, &obj->name);
+ if (error != 0) {
+ if (error == EALREADY)
+ error = 0;
+ drm_gem_object_unreference_unlocked(obj);
+ }
+ if (error == 0)
+ args->name = obj->name;
+ return (error);
+}
+
+struct drm_gem_object *
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *file_priv,
+ uint32_t handle)
+{
+ struct drm_gem_object *obj;
+
+ obj = drm_gem_name_ref(&file_priv->object_names, handle,
+ (void (*)(void *))drm_gem_object_reference);
+ return (obj);
+}
+
+static struct drm_gem_object *
+drm_gem_object_from_offset(struct drm_device *dev, vm_ooffset_t offset)
+{
+ struct drm_gem_object *obj;
+ struct drm_gem_mm *mm;
+ struct drm_hash_item *map_list;
+
+ if ((offset & DRM_GEM_MAPPING_MASK) != DRM_GEM_MAPPING_KEY)
+ return (NULL);
+ offset &= ~DRM_GEM_MAPPING_KEY;
+ mm = dev->mm_private;
+ if (drm_ht_find_item(&mm->offset_hash, DRM_GEM_MAPPING_IDX(offset),
+ &map_list) != 0) {
+ DRM_DEBUG("drm_gem_object_from_offset: offset 0x%jx obj not found\n",
+ (uintmax_t)offset);
+ return (NULL);
+ }
+ obj = member2struct(drm_gem_object, map_list, map_list);
+ return (obj);
+}
+
+int
+drm_gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+ struct drm_device *dev;
+ struct drm_gem_mm *mm;
+ int ret;
+
+ if (obj->on_map)
+ return (0);
+ dev = obj->dev;
+ mm = dev->mm_private;
+ ret = 0;
+
+ obj->map_list.key = alloc_unr(mm->idxunr);
+ ret = drm_ht_insert_item(&mm->offset_hash, &obj->map_list);
+ if (ret != 0) {
+ DRM_ERROR("failed to add to map hash\n");
+ free_unr(mm->idxunr, obj->map_list.key);
+ return (ret);
+ }
+ obj->on_map = true;
+ return (0);
+}
+
+void
+drm_gem_free_mmap_offset(struct drm_gem_object *obj)
+{
+ struct drm_hash_item *list;
+ struct drm_gem_mm *mm;
+
+ if (!obj->on_map)
+ return;
+ mm = obj->dev->mm_private;
+ list = &obj->map_list;
+
+ drm_ht_remove_item(&mm->offset_hash, list);
+ free_unr(mm->idxunr, list->key);
+ obj->on_map = false;
+}
+
+int
+drm_gem_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
+ struct vm_object **obj_res, int nprot)
+{
+ struct drm_device *dev;
+ struct drm_gem_object *gem_obj;
+ struct vm_object *vm_obj;
+
+ dev = drm_get_device_from_kdev(kdev);
+ if ((dev->driver->driver_features & DRIVER_GEM) == 0)
+ return (ENODEV);
+ DRM_LOCK(dev);
+ gem_obj = drm_gem_object_from_offset(dev, *offset);
+ if (gem_obj == NULL) {
+ DRM_UNLOCK(dev);
+ return (ENODEV);
+ }
+ drm_gem_object_reference(gem_obj);
+ DRM_UNLOCK(dev);
+ vm_obj = cdev_pager_allocate(gem_obj, OBJT_MGTDEVICE,
+ dev->driver->gem_pager_ops, size, nprot,
+ DRM_GEM_MAPPING_MAPOFF(*offset), curthread->td_ucred);
+ if (vm_obj == NULL) {
+ drm_gem_object_unreference_unlocked(gem_obj);
+ return (EINVAL);
+ }
+ *offset = DRM_GEM_MAPPING_MAPOFF(*offset);
+ *obj_res = vm_obj;
+ return (0);
+}
+
+void
+drm_gem_pager_dtr(void *handle)
+{
+ struct drm_gem_object *obj;
+ struct drm_device *dev;
+
+ obj = handle;
+ dev = obj->dev;
+
+ DRM_LOCK(dev);
+ drm_gem_free_mmap_offset(obj);
+ drm_gem_object_unreference(obj);
+ DRM_UNLOCK(dev);
+}
diff --git a/sys/dev/drm2/drm_gem_names.c b/sys/dev/drm2/drm_gem_names.c
new file mode 100644
index 0000000..2577d13
--- /dev/null
+++ b/sys/dev/drm2/drm_gem_names.c
@@ -0,0 +1,211 @@
+/*-
+ * Copyright (c) 2011 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Konstantin Belousov under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/limits.h>
+#include <sys/malloc.h>
+
+#include <dev/drm2/drm_gem_names.h>
+
+MALLOC_DEFINE(M_GEM_NAMES, "gem_name", "Hash headers for the gem names");
+
+static void drm_gem_names_delete_name(struct drm_gem_names *names,
+ struct drm_gem_name *np);
+
+void
+drm_gem_names_init(struct drm_gem_names *names)
+{
+
+ names->unr = new_unrhdr(1, INT_MAX, NULL); /* XXXKIB */
+ names->names_hash = hashinit(1000 /* XXXKIB */, M_GEM_NAMES,
+ &names->hash_mask);
+ mtx_init(&names->lock, "drmnames", NULL, MTX_DEF);
+}
+
+void
+drm_gem_names_fini(struct drm_gem_names *names)
+{
+ struct drm_gem_name *np;
+ int i;
+
+ mtx_lock(&names->lock);
+ for (i = 0; i <= names->hash_mask; i++) {
+ while ((np = LIST_FIRST(&names->names_hash[i])) != NULL) {
+ drm_gem_names_delete_name(names, np);
+ mtx_lock(&names->lock);
+ }
+ }
+ mtx_unlock(&names->lock);
+ mtx_destroy(&names->lock);
+ hashdestroy(names->names_hash, M_GEM_NAMES, names->hash_mask);
+ delete_unrhdr(names->unr);
+}
+
+static struct drm_gem_names_head *
+gem_name_hash_index(struct drm_gem_names *names, int name)
+{
+
+ return (&names->names_hash[name & names->hash_mask]);
+}
+
+void *
+drm_gem_name_ref(struct drm_gem_names *names, uint32_t name,
+ void (*ref)(void *))
+{
+ struct drm_gem_name *n;
+
+ mtx_lock(&names->lock);
+ LIST_FOREACH(n, gem_name_hash_index(names, name), link) {
+ if (n->name == name) {
+ if (ref != NULL)
+ ref(n->ptr);
+ mtx_unlock(&names->lock);
+ return (n->ptr);
+ }
+ }
+ mtx_unlock(&names->lock);
+ return (NULL);
+}
+
+struct drm_gem_ptr_match_arg {
+ uint32_t res;
+ void *ptr;
+};
+
+static int
+drm_gem_ptr_match(uint32_t name, void *ptr, void *arg)
+{
+ struct drm_gem_ptr_match_arg *a;
+
+ a = arg;
+ if (ptr == a->ptr) {
+ a->res = name;
+ return (1);
+ } else
+ return (0);
+}
+
+uint32_t
+drm_gem_find_name(struct drm_gem_names *names, void *ptr)
+{
+ struct drm_gem_ptr_match_arg arg;
+
+ arg.res = 0;
+ arg.ptr = ptr;
+ drm_gem_names_foreach(names, drm_gem_ptr_match, &arg);
+ return (arg.res);
+}
+
+int
+drm_gem_name_create(struct drm_gem_names *names, void *p, uint32_t *name)
+{
+ struct drm_gem_name *np;
+
+ np = malloc(sizeof(struct drm_gem_name), M_GEM_NAMES, M_WAITOK);
+ mtx_lock(&names->lock);
+ if (*name != 0) {
+ mtx_unlock(&names->lock);
+ return (EALREADY);
+ }
+ np->name = alloc_unr(names->unr);
+ if (np->name == -1) {
+ mtx_unlock(&names->lock);
+ free(np, M_GEM_NAMES);
+ return (ENOMEM);
+ }
+ *name = np->name;
+ np->ptr = p;
+ LIST_INSERT_HEAD(gem_name_hash_index(names, np->name), np, link);
+ mtx_unlock(&names->lock);
+ return (0);
+}
+
+static void
+drm_gem_names_delete_name(struct drm_gem_names *names, struct drm_gem_name *np)
+{
+
+ mtx_assert(&names->lock, MA_OWNED);
+ LIST_REMOVE(np, link);
+ mtx_unlock(&names->lock);
+ free_unr(names->unr, np->name);
+ free(np, M_GEM_NAMES);
+}
+
+void *
+drm_gem_names_remove(struct drm_gem_names *names, uint32_t name)
+{
+ struct drm_gem_name *n;
+ void *res;
+
+ mtx_lock(&names->lock);
+ LIST_FOREACH(n, gem_name_hash_index(names, name), link) {
+ if (n->name == name) {
+ res = n->ptr;
+ drm_gem_names_delete_name(names, n);
+ return (res);
+ }
+ }
+ mtx_unlock(&names->lock);
+ return (NULL);
+}
+
+void
+drm_gem_names_foreach(struct drm_gem_names *names,
+ int (*f)(uint32_t, void *, void *), void *arg)
+{
+ struct drm_gem_name *np;
+ struct drm_gem_name marker;
+ int i, fres;
+
+ bzero(&marker, sizeof(marker));
+ marker.name = -1;
+ mtx_lock(&names->lock);
+ for (i = 0; i <= names->hash_mask; i++) {
+ for (np = LIST_FIRST(&names->names_hash[i]); np != NULL; ) {
+ if (np->name == -1) {
+ np = LIST_NEXT(np, link);
+ continue;
+ }
+ LIST_INSERT_AFTER(np, &marker, link);
+ mtx_unlock(&names->lock);
+ fres = f(np->name, np->ptr, arg);
+ mtx_lock(&names->lock);
+ np = LIST_NEXT(&marker, link);
+ LIST_REMOVE(&marker, link);
+ if (fres)
+ break;
+ }
+ }
+ mtx_unlock(&names->lock);
+}
diff --git a/sys/dev/drm2/drm_gem_names.h b/sys/dev/drm2/drm_gem_names.h
new file mode 100644
index 0000000..0fe4edd
--- /dev/null
+++ b/sys/dev/drm2/drm_gem_names.h
@@ -0,0 +1,64 @@
+/*-
+ * Copyright (c) 2011 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Konstantin Belousov under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef DRM_GEM_NAMES_H
+#define DRM_GEM_NAMES_H
+
+#include <sys/types.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/queue.h>
+
+struct drm_gem_name {
+ uint32_t name;
+ void *ptr;
+ LIST_ENTRY(drm_gem_name) link;
+};
+
+struct drm_gem_names {
+ struct mtx lock;
+ LIST_HEAD(drm_gem_names_head, drm_gem_name) *names_hash;
+ u_long hash_mask;
+ struct unrhdr *unr;
+};
+
+void drm_gem_names_init(struct drm_gem_names *names);
+void drm_gem_names_fini(struct drm_gem_names *names);
+uint32_t drm_gem_find_name(struct drm_gem_names *names, void *ptr);
+void *drm_gem_name_ref(struct drm_gem_names *names, uint32_t name,
+ void (*ref)(void *));
+int drm_gem_name_create(struct drm_gem_names *names, void *obj, uint32_t *name);
+void drm_gem_names_foreach(struct drm_gem_names *names,
+ int (*f)(uint32_t, void *, void *), void *arg);
+void *drm_gem_names_remove(struct drm_gem_names *names, uint32_t name);
+
+#endif
diff --git a/sys/dev/drm2/drm_hashtab.c b/sys/dev/drm2/drm_hashtab.c
new file mode 100644
index 0000000..8536fb8
--- /dev/null
+++ b/sys/dev/drm2/drm_hashtab.c
@@ -0,0 +1,181 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Simple open hash tab implementation.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_hashtab.h>
+
+#include <sys/hash.h>
+
+int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
+{
+ ht->size = 1 << order;
+ ht->order = order;
+ ht->table = NULL;
+ ht->table = hashinit_flags(ht->size, DRM_MEM_HASHTAB, &ht->mask,
+ HASH_NOWAIT);
+ if (!ht->table) {
+ DRM_ERROR("Out of memory for hash table\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
+{
+ struct drm_hash_item *entry;
+ struct drm_hash_item_list *h_list;
+ unsigned int hashed_key;
+ int count = 0;
+
+ hashed_key = hash32_buf(&key, sizeof(key), ht->order);
+ DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
+ h_list = &ht->table[hashed_key & ht->mask];
+ LIST_FOREACH(entry, h_list, head)
+ DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
+}
+
+static struct drm_hash_item *
+drm_ht_find_key(struct drm_open_hash *ht, unsigned long key)
+{
+ struct drm_hash_item *entry;
+ struct drm_hash_item_list *h_list;
+ unsigned int hashed_key;
+
+ hashed_key = hash32_buf(&key, sizeof(key), ht->order);
+ h_list = &ht->table[hashed_key & ht->mask];
+ LIST_FOREACH(entry, h_list, head) {
+ if (entry->key == key)
+ return entry;
+ if (entry->key > key)
+ break;
+ }
+ return NULL;
+}
+
+
+int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
+{
+ struct drm_hash_item *entry, *parent;
+ struct drm_hash_item_list *h_list;
+ unsigned int hashed_key;
+ unsigned long key = item->key;
+
+ hashed_key = hash32_buf(&key, sizeof(key), ht->order);
+ h_list = &ht->table[hashed_key & ht->mask];
+ parent = NULL;
+ LIST_FOREACH(entry, h_list, head) {
+ if (entry->key == key)
+ return -EINVAL;
+ if (entry->key > key)
+ break;
+ parent = entry;
+ }
+ if (parent) {
+ LIST_INSERT_AFTER(parent, item, head);
+ } else {
+ LIST_INSERT_HEAD(h_list, item, head);
+ }
+ return 0;
+}
+
+/*
+ * Just insert an item and return any "bits" bit key that hasn't been
+ * used before.
+ */
+int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
+ unsigned long seed, int bits, int shift,
+ unsigned long add)
+{
+ int ret;
+ unsigned long mask = (1 << bits) - 1;
+ unsigned long first, unshifted_key = 0;
+
+ unshifted_key = hash32_buf(&seed, sizeof(seed), unshifted_key);
+ first = unshifted_key;
+ do {
+ item->key = (unshifted_key << shift) + add;
+ ret = drm_ht_insert_item(ht, item);
+ if (ret)
+ unshifted_key = (unshifted_key + 1) & mask;
+ } while(ret && (unshifted_key != first));
+
+ if (ret) {
+ DRM_ERROR("Available key bit space exhausted\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
+ struct drm_hash_item **item)
+{
+ struct drm_hash_item *entry;
+
+ entry = drm_ht_find_key(ht, key);
+ if (!entry)
+ return -EINVAL;
+
+ *item = entry;
+ return 0;
+}
+
+int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
+{
+ struct drm_hash_item *entry;
+
+ entry = drm_ht_find_key(ht, key);
+ if (entry) {
+ LIST_REMOVE(entry, head);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
+{
+ LIST_REMOVE(item, head);
+ return 0;
+}
+
+void drm_ht_remove(struct drm_open_hash *ht)
+{
+ if (ht->table) {
+ hashdestroy(ht->table, DRM_MEM_HASHTAB, ht->mask);
+ ht->table = NULL;
+ }
+}
diff --git a/sys/dev/drm2/drm_hashtab.h b/sys/dev/drm2/drm_hashtab.h
new file mode 100644
index 0000000..fc200b5
--- /dev/null
+++ b/sys/dev/drm2/drm_hashtab.h
@@ -0,0 +1,68 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Simple open hash tab implementation.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef DRM_HASHTAB_H
+#define DRM_HASHTAB_H
+
+#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
+
+struct drm_hash_item {
+ LIST_ENTRY(drm_hash_item) head;
+ unsigned long key;
+};
+
+struct drm_open_hash {
+ LIST_HEAD(drm_hash_item_list, drm_hash_item) *table;
+ unsigned int size;
+ unsigned int order;
+ unsigned long mask;
+};
+
+extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
+extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
+extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
+ unsigned long seed, int bits, int shift,
+ unsigned long add);
+extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
+
+extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
+extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
+extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
+extern void drm_ht_remove(struct drm_open_hash *ht);
+
+#endif
diff --git a/sys/dev/drm2/drm_internal.h b/sys/dev/drm2/drm_internal.h
new file mode 100644
index 0000000..0ed1b6f
--- /dev/null
+++ b/sys/dev/drm2/drm_internal.h
@@ -0,0 +1,43 @@
+/*-
+ * Copyright 2007 Red Hat, Inc
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/* This header file holds function prototypes and data types that are
+ * internal to the drm (not exported to user space) but shared across
+ * drivers and platforms */
+
+#ifndef __DRM_INTERNAL_H__
+#define __DRM_INTERNAL_H__
+
+/**
+ * Drawable information.
+ */
+struct drm_drawable_info {
+ unsigned int num_rects;
+ struct drm_clip_rect *rects;
+};
+
+#endif
diff --git a/sys/dev/drm2/drm_ioctl.c b/sys/dev/drm2/drm_ioctl.c
new file mode 100644
index 0000000..b2c7aff
--- /dev/null
+++ b/sys/dev/drm2/drm_ioctl.c
@@ -0,0 +1,320 @@
+/*-
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/** @file drm_ioctl.c
+ * Varios minor DRM ioctls not applicable to other files, such as versioning
+ * information and reporting DRM information to userland.
+ */
+
+#include <dev/drm2/drmP.h>
+
+/*
+ * Beginning in revision 1.1 of the DRM interface, getunique will return
+ * a unique in the form pci:oooo:bb:dd.f (o=domain, b=bus, d=device, f=function)
+ * before setunique has been called. The format for the bus-specific part of
+ * the unique is not defined for any other bus.
+ */
+int drm_getunique(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_unique *u = data;
+
+ if (u->unique_len >= dev->unique_len) {
+ if (DRM_COPY_TO_USER(u->unique, dev->unique, dev->unique_len))
+ return EFAULT;
+ }
+ u->unique_len = dev->unique_len;
+
+ return 0;
+}
+
+/* Deprecated in DRM version 1.1, and will return EBUSY when setversion has
+ * requested version 1.1 or greater.
+ */
+int drm_setunique(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_unique *u = data;
+ int domain, bus, slot, func, ret;
+ char *busid;
+
+ /* Check and copy in the submitted Bus ID */
+ if (!u->unique_len || u->unique_len > 1024)
+ return EINVAL;
+
+ busid = malloc(u->unique_len + 1, DRM_MEM_DRIVER, M_WAITOK);
+ if (busid == NULL)
+ return ENOMEM;
+
+ if (DRM_COPY_FROM_USER(busid, u->unique, u->unique_len)) {
+ free(busid, DRM_MEM_DRIVER);
+ return EFAULT;
+ }
+ busid[u->unique_len] = '\0';
+
+ /* Return error if the busid submitted doesn't match the device's actual
+ * busid.
+ */
+ ret = sscanf(busid, "PCI:%d:%d:%d", &bus, &slot, &func);
+ if (ret != 3) {
+ free(busid, DRM_MEM_DRIVER);
+ return EINVAL;
+ }
+ domain = bus >> 8;
+ bus &= 0xff;
+
+ if ((domain != dev->pci_domain) ||
+ (bus != dev->pci_bus) ||
+ (slot != dev->pci_slot) ||
+ (func != dev->pci_func)) {
+ free(busid, DRM_MEM_DRIVER);
+ return EINVAL;
+ }
+
+ /* Actually set the device's busid now. */
+ DRM_LOCK(dev);
+ if (dev->unique_len || dev->unique) {
+ DRM_UNLOCK(dev);
+ return EBUSY;
+ }
+
+ dev->unique_len = u->unique_len;
+ dev->unique = busid;
+ DRM_UNLOCK(dev);
+
+ return 0;
+}
+
+
+static int
+drm_set_busid(struct drm_device *dev)
+{
+
+ DRM_LOCK(dev);
+
+ if (dev->unique != NULL) {
+ DRM_UNLOCK(dev);
+ return EBUSY;
+ }
+
+ dev->unique_len = 20;
+ dev->unique = malloc(dev->unique_len + 1, DRM_MEM_DRIVER, M_NOWAIT);
+ if (dev->unique == NULL) {
+ DRM_UNLOCK(dev);
+ return ENOMEM;
+ }
+
+ snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%1x",
+ dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func);
+
+ DRM_UNLOCK(dev);
+
+ return 0;
+}
+
+int drm_getmap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_map *map = data;
+ drm_local_map_t *mapinlist;
+ int idx;
+ int i = 0;
+
+ idx = map->offset;
+
+ DRM_LOCK(dev);
+ if (idx < 0) {
+ DRM_UNLOCK(dev);
+ return EINVAL;
+ }
+
+ TAILQ_FOREACH(mapinlist, &dev->maplist, link) {
+ if (i == idx) {
+ map->offset = mapinlist->offset;
+ map->size = mapinlist->size;
+ map->type = mapinlist->type;
+ map->flags = mapinlist->flags;
+ map->handle = mapinlist->handle;
+ map->mtrr = mapinlist->mtrr;
+ break;
+ }
+ i++;
+ }
+
+ DRM_UNLOCK(dev);
+
+ if (mapinlist == NULL)
+ return EINVAL;
+
+ return 0;
+}
+
+int drm_getclient(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_client *client = data;
+ struct drm_file *pt;
+ int idx;
+ int i = 0;
+
+ idx = client->idx;
+ DRM_LOCK(dev);
+ TAILQ_FOREACH(pt, &dev->files, link) {
+ if (i == idx) {
+ client->auth = pt->authenticated;
+ client->pid = pt->pid;
+ client->uid = pt->uid;
+ client->magic = pt->magic;
+ client->iocs = pt->ioctl_count;
+ DRM_UNLOCK(dev);
+ return 0;
+ }
+ i++;
+ }
+ DRM_UNLOCK(dev);
+
+ return EINVAL;
+}
+
+int drm_getstats(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_stats *stats = data;
+ int i;
+
+ memset(stats, 0, sizeof(struct drm_stats));
+
+ DRM_LOCK(dev);
+
+ for (i = 0; i < dev->counters; i++) {
+ if (dev->types[i] == _DRM_STAT_LOCK)
+ stats->data[i].value =
+ (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0);
+ else
+ stats->data[i].value = atomic_read(&dev->counts[i]);
+ stats->data[i].type = dev->types[i];
+ }
+
+ stats->count = dev->counters;
+
+ DRM_UNLOCK(dev);
+
+ return 0;
+}
+
+int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_get_cap *req = data;
+
+ req->value = 0;
+ switch (req->capability) {
+ case DRM_CAP_DUMB_BUFFER:
+ if (dev->driver->dumb_create)
+ req->value = 1;
+ break;
+ case DRM_CAP_VBLANK_HIGH_CRTC:
+ req->value = 1;
+ break;
+ case DRM_CAP_DUMB_PREFERRED_DEPTH:
+ req->value = dev->mode_config.preferred_depth;
+ break;
+ case DRM_CAP_DUMB_PREFER_SHADOW:
+ req->value = dev->mode_config.prefer_shadow;
+ break;
+ default:
+ return EINVAL;
+ }
+ return 0;
+}
+
+
+#define DRM_IF_MAJOR 1
+#define DRM_IF_MINOR 2
+
+int drm_setversion(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_set_version *sv = data;
+ struct drm_set_version ver;
+ int if_version;
+
+ /* Save the incoming data, and set the response before continuing
+ * any further.
+ */
+ ver = *sv;
+ sv->drm_di_major = DRM_IF_MAJOR;
+ sv->drm_di_minor = DRM_IF_MINOR;
+ sv->drm_dd_major = dev->driver->major;
+ sv->drm_dd_minor = dev->driver->minor;
+
+ DRM_DEBUG("ver.drm_di_major %d ver.drm_di_minor %d "
+ "ver.drm_dd_major %d ver.drm_dd_minor %d\n",
+ ver.drm_di_major, ver.drm_di_minor, ver.drm_dd_major,
+ ver.drm_dd_minor);
+ DRM_DEBUG("sv->drm_di_major %d sv->drm_di_minor %d "
+ "sv->drm_dd_major %d sv->drm_dd_minor %d\n",
+ sv->drm_di_major, sv->drm_di_minor, sv->drm_dd_major,
+ sv->drm_dd_minor);
+
+ if (ver.drm_di_major != -1) {
+ if (ver.drm_di_major != DRM_IF_MAJOR ||
+ ver.drm_di_minor < 0 || ver.drm_di_minor > DRM_IF_MINOR) {
+ return EINVAL;
+ }
+ if_version = DRM_IF_VERSION(ver.drm_di_major,
+ ver.drm_dd_minor);
+ dev->if_version = DRM_MAX(if_version, dev->if_version);
+ if (ver.drm_di_minor >= 1) {
+ /*
+ * Version 1.1 includes tying of DRM to specific device
+ */
+ drm_set_busid(dev);
+ }
+ }
+
+ if (ver.drm_dd_major != -1) {
+ if (ver.drm_dd_major != dev->driver->major ||
+ ver.drm_dd_minor < 0 ||
+ ver.drm_dd_minor > dev->driver->minor)
+ {
+ return EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+
+int drm_noop(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ DRM_DEBUG("\n");
+ return 0;
+}
diff --git a/sys/dev/drm2/drm_irq.c b/sys/dev/drm2/drm_irq.c
new file mode 100644
index 0000000..0324e8c
--- /dev/null
+++ b/sys/dev/drm2/drm_irq.c
@@ -0,0 +1,1253 @@
+/*-
+ * Copyright 2003 Eric Anholt
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <anholt@FreeBSD.org>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/** @file drm_irq.c
+ * Support code for handling setup/teardown of interrupt handlers and
+ * handing interrupt handlers off to the drivers.
+ */
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+
+MALLOC_DEFINE(DRM_MEM_VBLANK, "drm_vblank", "DRM VBLANK Handling Data");
+
+/* Access macro for slots in vblank timestamp ringbuffer. */
+#define vblanktimestamp(dev, crtc, count) ( \
+ (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
+ ((count) % DRM_VBLANKTIME_RBSIZE)])
+
+/* Retry timestamp calculation up to 3 times to satisfy
+ * drm_timestamp_precision before giving up.
+ */
+#define DRM_TIMESTAMP_MAXRETRIES 3
+
+/* Threshold in nanoseconds for detection of redundant
+ * vblank irq in drm_handle_vblank(). 1 msec should be ok.
+ */
+#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
+
+int drm_irq_by_busid(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_irq_busid *irq = data;
+
+ if ((irq->busnum >> 8) != dev->pci_domain ||
+ (irq->busnum & 0xff) != dev->pci_bus ||
+ irq->devnum != dev->pci_slot ||
+ irq->funcnum != dev->pci_func)
+ return EINVAL;
+
+ irq->irq = dev->irq;
+
+ DRM_DEBUG("%d:%d:%d => IRQ %d\n",
+ irq->busnum, irq->devnum, irq->funcnum, irq->irq);
+
+ return 0;
+}
+
+static void
+drm_irq_handler_wrap(void *arg)
+{
+ struct drm_device *dev = arg;
+
+ mtx_lock(&dev->irq_lock);
+ dev->driver->irq_handler(arg);
+ mtx_unlock(&dev->irq_lock);
+}
+
+int
+drm_irq_install(struct drm_device *dev)
+{
+ int retcode;
+
+ if (dev->irq == 0 || dev->dev_private == NULL)
+ return (EINVAL);
+
+ DRM_DEBUG("irq=%d\n", dev->irq);
+
+ DRM_LOCK(dev);
+ if (dev->irq_enabled) {
+ DRM_UNLOCK(dev);
+ return EBUSY;
+ }
+ dev->irq_enabled = 1;
+
+ dev->context_flag = 0;
+
+ /* Before installing handler */
+ if (dev->driver->irq_preinstall)
+ dev->driver->irq_preinstall(dev);
+ DRM_UNLOCK(dev);
+
+ /* Install handler */
+ retcode = bus_setup_intr(dev->device, dev->irqr,
+ INTR_TYPE_TTY | INTR_MPSAFE, NULL,
+ (dev->driver->driver_features & DRIVER_LOCKLESS_IRQ) != 0 ?
+ drm_irq_handler_wrap : dev->driver->irq_handler,
+ dev, &dev->irqh);
+ if (retcode != 0)
+ goto err;
+
+ /* After installing handler */
+ DRM_LOCK(dev);
+ if (dev->driver->irq_postinstall)
+ dev->driver->irq_postinstall(dev);
+ DRM_UNLOCK(dev);
+
+ return (0);
+err:
+ device_printf(dev->device, "Error setting interrupt: %d\n", retcode);
+ dev->irq_enabled = 0;
+
+ return (retcode);
+}
+
+int drm_irq_uninstall(struct drm_device *dev)
+{
+ int i;
+
+ if (!dev->irq_enabled)
+ return EINVAL;
+
+ dev->irq_enabled = 0;
+
+ /*
+ * Wake up any waiters so they don't hang.
+ */
+ if (dev->num_crtcs) {
+ mtx_lock(&dev->vbl_lock);
+ for (i = 0; i < dev->num_crtcs; i++) {
+ wakeup(&dev->_vblank_count[i]);
+ dev->vblank_enabled[i] = 0;
+ dev->last_vblank[i] =
+ dev->driver->get_vblank_counter(dev, i);
+ }
+ mtx_unlock(&dev->vbl_lock);
+ }
+
+ DRM_DEBUG("irq=%d\n", dev->irq);
+
+ if (dev->driver->irq_uninstall)
+ dev->driver->irq_uninstall(dev);
+
+ DRM_UNLOCK(dev);
+ bus_teardown_intr(dev->device, dev->irqr, dev->irqh);
+ DRM_LOCK(dev);
+
+ return 0;
+}
+
+int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_control *ctl = data;
+ int err;
+
+ switch (ctl->func) {
+ case DRM_INST_HANDLER:
+ /* Handle drivers whose DRM used to require IRQ setup but the
+ * no longer does.
+ */
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ return 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+ if (dev->if_version < DRM_IF_VERSION(1, 2) &&
+ ctl->irq != dev->irq)
+ return EINVAL;
+ return drm_irq_install(dev);
+ case DRM_UNINST_HANDLER:
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ return 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+ DRM_LOCK(dev);
+ err = drm_irq_uninstall(dev);
+ DRM_UNLOCK(dev);
+ return err;
+ default:
+ return EINVAL;
+ }
+}
+
+#define NSEC_PER_USEC 1000L
+#define NSEC_PER_SEC 1000000000L
+
+int64_t
+timeval_to_ns(const struct timeval *tv)
+{
+ return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
+ tv->tv_usec * NSEC_PER_USEC;
+}
+
+struct timeval
+ns_to_timeval(const int64_t nsec)
+{
+ struct timeval tv;
+ uint32_t rem;
+
+ if (nsec == 0) {
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ return (tv);
+ }
+
+ tv.tv_sec = nsec / NSEC_PER_SEC;
+ rem = nsec % NSEC_PER_SEC;
+ if (rem < 0) {
+ tv.tv_sec--;
+ rem += NSEC_PER_SEC;
+ }
+ tv.tv_usec = rem / 1000;
+ return (tv);
+}
+
+/*
+ * Clear vblank timestamp buffer for a crtc.
+ */
+static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
+{
+ memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0,
+ DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
+}
+
+static int64_t
+abs64(int64_t x)
+{
+
+ return (x < 0 ? -x : x);
+}
+
+/*
+ * Disable vblank irq's on crtc, make sure that last vblank count
+ * of hardware and corresponding consistent software vblank counter
+ * are preserved, even if there are any spurious vblank irq's after
+ * disable.
+ */
+static void vblank_disable_and_save(struct drm_device *dev, int crtc)
+{
+ u32 vblcount;
+ int64_t diff_ns;
+ int vblrc;
+ struct timeval tvblank;
+
+ /* Prevent vblank irq processing while disabling vblank irqs,
+ * so no updates of timestamps or count can happen after we've
+ * disabled. Needed to prevent races in case of delayed irq's.
+ */
+ mtx_lock(&dev->vblank_time_lock);
+
+ dev->driver->disable_vblank(dev, crtc);
+ dev->vblank_enabled[crtc] = 0;
+
+ /* No further vblank irq's will be processed after
+ * this point. Get current hardware vblank count and
+ * vblank timestamp, repeat until they are consistent.
+ *
+ * FIXME: There is still a race condition here and in
+ * drm_update_vblank_count() which can cause off-by-one
+ * reinitialization of software vblank counter. If gpu
+ * vblank counter doesn't increment exactly at the leading
+ * edge of a vblank interval, then we can lose 1 count if
+ * we happen to execute between start of vblank and the
+ * delayed gpu counter increment.
+ */
+ do {
+ dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+ vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
+ } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc));
+
+ /* Compute time difference to stored timestamp of last vblank
+ * as updated by last invocation of drm_handle_vblank() in vblank irq.
+ */
+ vblcount = atomic_read(&dev->_vblank_count[crtc]);
+ diff_ns = timeval_to_ns(&tvblank) -
+ timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+ /* If there is at least 1 msec difference between the last stored
+ * timestamp and tvblank, then we are currently executing our
+ * disable inside a new vblank interval, the tvblank timestamp
+ * corresponds to this new vblank interval and the irq handler
+ * for this vblank didn't run yet and won't run due to our disable.
+ * Therefore we need to do the job of drm_handle_vblank() and
+ * increment the vblank counter by one to account for this vblank.
+ *
+ * Skip this step if there isn't any high precision timestamp
+ * available. In that case we can't account for this and just
+ * hope for the best.
+ */
+ if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
+ atomic_inc(&dev->_vblank_count[crtc]);
+ }
+
+ /* Invalidate all timestamps while vblank irq's are off. */
+ clear_vblank_timestamps(dev, crtc);
+
+ mtx_unlock(&dev->vblank_time_lock);
+}
+
+static void vblank_disable_fn(void * arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ int i;
+
+ if (!dev->vblank_disable_allowed)
+ return;
+
+ for (i = 0; i < dev->num_crtcs; i++) {
+ mtx_lock(&dev->vbl_lock);
+ if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
+ dev->vblank_enabled[i]) {
+ DRM_DEBUG("disabling vblank on crtc %d\n", i);
+ vblank_disable_and_save(dev, i);
+ }
+ mtx_unlock(&dev->vbl_lock);
+ }
+}
+
+void drm_vblank_cleanup(struct drm_device *dev)
+{
+ /* Bail if the driver didn't call drm_vblank_init() */
+ if (dev->num_crtcs == 0)
+ return;
+
+ callout_stop(&dev->vblank_disable_callout);
+
+ vblank_disable_fn(dev);
+
+ free(dev->_vblank_count, DRM_MEM_VBLANK);
+ free(dev->vblank_refcount, DRM_MEM_VBLANK);
+ free(dev->vblank_enabled, DRM_MEM_VBLANK);
+ free(dev->last_vblank, DRM_MEM_VBLANK);
+ free(dev->last_vblank_wait, DRM_MEM_VBLANK);
+ free(dev->vblank_inmodeset, DRM_MEM_VBLANK);
+ free(dev->_vblank_time, DRM_MEM_VBLANK);
+
+ dev->num_crtcs = 0;
+}
+
+int drm_vblank_init(struct drm_device *dev, int num_crtcs)
+{
+ int i;
+
+ callout_init(&dev->vblank_disable_callout, CALLOUT_MPSAFE);
+#if 0
+ mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF);
+#endif
+ mtx_init(&dev->vblank_time_lock, "drmvtl", NULL, MTX_DEF);
+
+ dev->num_crtcs = num_crtcs;
+
+ dev->_vblank_count = malloc(sizeof(atomic_t) * num_crtcs,
+ DRM_MEM_VBLANK, M_WAITOK);
+ dev->vblank_refcount = malloc(sizeof(atomic_t) * num_crtcs,
+ DRM_MEM_VBLANK, M_WAITOK);
+ dev->vblank_enabled = malloc(num_crtcs * sizeof(int),
+ DRM_MEM_VBLANK, M_WAITOK | M_ZERO);
+ dev->last_vblank = malloc(num_crtcs * sizeof(u32),
+ DRM_MEM_VBLANK, M_WAITOK | M_ZERO);
+ dev->last_vblank_wait = malloc(num_crtcs * sizeof(u32),
+ DRM_MEM_VBLANK, M_WAITOK | M_ZERO);
+ dev->vblank_inmodeset = malloc(num_crtcs * sizeof(int),
+ DRM_MEM_VBLANK, M_WAITOK | M_ZERO);
+ dev->_vblank_time = malloc(num_crtcs * DRM_VBLANKTIME_RBSIZE *
+ sizeof(struct timeval), DRM_MEM_VBLANK, M_WAITOK | M_ZERO);
+ DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
+
+ /* Driver specific high-precision vblank timestamping supported? */
+ if (dev->driver->get_vblank_timestamp)
+ DRM_INFO("Driver supports precise vblank timestamp query.\n");
+ else
+ DRM_INFO("No driver support for vblank timestamp query.\n");
+
+ /* Zero per-crtc vblank stuff */
+ for (i = 0; i < num_crtcs; i++) {
+ atomic_set(&dev->_vblank_count[i], 0);
+ atomic_set(&dev->vblank_refcount[i], 0);
+ }
+
+ dev->vblank_disable_allowed = 0;
+ return 0;
+}
+
+void
+drm_calc_timestamping_constants(struct drm_crtc *crtc)
+{
+ int64_t linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
+ uint64_t dotclock;
+
+ /* Dot clock in Hz: */
+ dotclock = (uint64_t) crtc->hwmode.clock * 1000;
+
+ /* Fields of interlaced scanout modes are only halve a frame duration.
+ * Double the dotclock to get halve the frame-/line-/pixelduration.
+ */
+ if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
+ dotclock *= 2;
+
+ /* Valid dotclock? */
+ if (dotclock > 0) {
+ /* Convert scanline length in pixels and video dot clock to
+ * line duration, frame duration and pixel duration in
+ * nanoseconds:
+ */
+ pixeldur_ns = (int64_t)1000000000 / dotclock;
+ linedur_ns = ((uint64_t)crtc->hwmode.crtc_htotal *
+ 1000000000) / dotclock;
+ framedur_ns = (int64_t)crtc->hwmode.crtc_vtotal * linedur_ns;
+ } else
+ DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
+ crtc->base.id);
+
+ crtc->pixeldur_ns = pixeldur_ns;
+ crtc->linedur_ns = linedur_ns;
+ crtc->framedur_ns = framedur_ns;
+
+ DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
+ crtc->base.id, crtc->hwmode.crtc_htotal,
+ crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay);
+ DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
+ crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
+ (int) linedur_ns, (int) pixeldur_ns);
+}
+
+/**
+ * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
+ * drivers. Implements calculation of exact vblank timestamps from
+ * given drm_display_mode timings and current video scanout position
+ * of a crtc. This can be called from within get_vblank_timestamp()
+ * implementation of a kms driver to implement the actual timestamping.
+ *
+ * Should return timestamps conforming to the OML_sync_control OpenML
+ * extension specification. The timestamp corresponds to the end of
+ * the vblank interval, aka start of scanout of topmost-leftmost display
+ * pixel in the following video frame.
+ *
+ * Requires support for optional dev->driver->get_scanout_position()
+ * in kms driver, plus a bit of setup code to provide a drm_display_mode
+ * that corresponds to the true scanout timing.
+ *
+ * The current implementation only handles standard video modes. It
+ * returns as no operation if a doublescan or interlaced video mode is
+ * active. Higher level code is expected to handle this.
+ *
+ * @dev: DRM device.
+ * @crtc: Which crtc's vblank timestamp to retrieve.
+ * @max_error: Desired maximum allowable error in timestamps (nanosecs).
+ * On return contains true maximum error of timestamp.
+ * @vblank_time: Pointer to struct timeval which should receive the timestamp.
+ * @flags: Flags to pass to driver:
+ * 0 = Default.
+ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ * @refcrtc: drm_crtc* of crtc which defines scanout timing.
+ *
+ * Returns negative value on error, failure or if not supported in current
+ * video mode:
+ *
+ * -EINVAL - Invalid crtc.
+ * -EAGAIN - Temporary unavailable, e.g., called before initial modeset.
+ * -ENOTSUPP - Function not supported in current display mode.
+ * -EIO - Failed, e.g., due to failed scanout position query.
+ *
+ * Returns or'ed positive status flags on success:
+ *
+ * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping.
+ * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
+ *
+ */
+int
+drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
+ int *max_error, struct timeval *vblank_time, unsigned flags,
+ struct drm_crtc *refcrtc)
+{
+ struct timeval stime, raw_time;
+ struct drm_display_mode *mode;
+ int vbl_status, vtotal, vdisplay;
+ int vpos, hpos, i;
+ int64_t framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
+ bool invbl;
+
+ if (crtc < 0 || crtc >= dev->num_crtcs) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
+ /* Scanout position query not supported? Should not happen. */
+ if (!dev->driver->get_scanout_position) {
+ DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
+ return -EIO;
+ }
+
+ mode = &refcrtc->hwmode;
+ vtotal = mode->crtc_vtotal;
+ vdisplay = mode->crtc_vdisplay;
+
+ /* Durations of frames, lines, pixels in nanoseconds. */
+ framedur_ns = refcrtc->framedur_ns;
+ linedur_ns = refcrtc->linedur_ns;
+ pixeldur_ns = refcrtc->pixeldur_ns;
+
+ /* If mode timing undefined, just return as no-op:
+ * Happens during initial modesetting of a crtc.
+ */
+ if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
+ DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
+ return -EAGAIN;
+ }
+
+ /* Get current scanout position with system timestamp.
+ * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
+ * if single query takes longer than max_error nanoseconds.
+ *
+ * This guarantees a tight bound on maximum error if
+ * code gets preempted or delayed for some reason.
+ */
+ for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
+ /* Disable preemption to make it very likely to
+ * succeed in the first iteration.
+ */
+ critical_enter();
+
+ /* Get system timestamp before query. */
+ getmicrouptime(&stime);
+
+ /* Get vertical and horizontal scanout pos. vpos, hpos. */
+ vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
+
+ /* Get system timestamp after query. */
+ getmicrouptime(&raw_time);
+
+ critical_exit();
+
+ /* Return as no-op if scanout query unsupported or failed. */
+ if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
+ DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
+ crtc, vbl_status);
+ return -EIO;
+ }
+
+ duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
+
+ /* Accept result with < max_error nsecs timing uncertainty. */
+ if (duration_ns <= (int64_t) *max_error)
+ break;
+ }
+
+ /* Noisy system timing? */
+ if (i == DRM_TIMESTAMP_MAXRETRIES) {
+ DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
+ crtc, (int) duration_ns/1000, *max_error/1000, i);
+ }
+
+ /* Return upper bound of timestamp precision error. */
+ *max_error = (int) duration_ns;
+
+ /* Check if in vblank area:
+ * vpos is >=0 in video scanout area, but negative
+ * within vblank area, counting down the number of lines until
+ * start of scanout.
+ */
+ invbl = vbl_status & DRM_SCANOUTPOS_INVBL;
+
+ /* Convert scanout position into elapsed time at raw_time query
+ * since start of scanout at first display scanline. delta_ns
+ * can be negative if start of scanout hasn't happened yet.
+ */
+ delta_ns = (int64_t)vpos * linedur_ns + (int64_t)hpos * pixeldur_ns;
+
+ /* Is vpos outside nominal vblank area, but less than
+ * 1/100 of a frame height away from start of vblank?
+ * If so, assume this isn't a massively delayed vblank
+ * interrupt, but a vblank interrupt that fired a few
+ * microseconds before true start of vblank. Compensate
+ * by adding a full frame duration to the final timestamp.
+ * Happens, e.g., on ATI R500, R600.
+ *
+ * We only do this if DRM_CALLED_FROM_VBLIRQ.
+ */
+ if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl &&
+ ((vdisplay - vpos) < vtotal / 100)) {
+ delta_ns = delta_ns - framedur_ns;
+
+ /* Signal this correction as "applied". */
+ vbl_status |= 0x8;
+ }
+
+ /* Subtract time delta from raw timestamp to get final
+ * vblank_time timestamp for end of vblank.
+ */
+ *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
+
+ DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %jd.%jd -> %jd.%jd [e %d us, %d rep]\n",
+ crtc, (int)vbl_status, hpos, vpos, (uintmax_t)raw_time.tv_sec,
+ (uintmax_t)raw_time.tv_usec, (uintmax_t)vblank_time->tv_sec,
+ (uintmax_t)vblank_time->tv_usec, (int)duration_ns/1000, i);
+
+ vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
+ if (invbl)
+ vbl_status |= DRM_VBLANKTIME_INVBL;
+
+ return vbl_status;
+}
+
+/**
+ * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
+ * vblank interval.
+ *
+ * @dev: DRM device
+ * @crtc: which crtc's vblank timestamp to retrieve
+ * @tvblank: Pointer to target struct timeval which should receive the timestamp
+ * @flags: Flags to pass to driver:
+ * 0 = Default.
+ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ *
+ * Fetches the system timestamp corresponding to the time of the most recent
+ * vblank interval on specified crtc. May call into kms-driver to
+ * compute the timestamp with a high-precision GPU specific method.
+ *
+ * Returns zero if timestamp originates from uncorrected do_gettimeofday()
+ * call, i.e., it isn't very precisely locked to the true vblank.
+ *
+ * Returns non-zero if timestamp is considered to be very precise.
+ */
+u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+ struct timeval *tvblank, unsigned flags)
+{
+ int ret = 0;
+
+ /* Define requested maximum error on timestamps (nanoseconds). */
+ int max_error = (int) drm_timestamp_precision * 1000;
+
+ /* Query driver if possible and precision timestamping enabled. */
+ if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
+ ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
+ tvblank, flags);
+ if (ret > 0)
+ return (u32) ret;
+ }
+
+ /* GPU high precision timestamp query unsupported or failed.
+ * Return gettimeofday timestamp as best estimate.
+ */
+ microtime(tvblank);
+
+ return 0;
+}
+
+/**
+ * drm_vblank_count - retrieve "cooked" vblank counter value
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity.
+ */
+u32 drm_vblank_count(struct drm_device *dev, int crtc)
+{
+ return atomic_read(&dev->_vblank_count[crtc]);
+}
+
+/**
+ * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
+ * and the system timestamp corresponding to that vblank counter value.
+ *
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity. Returns corresponding system timestamp of the time
+ * of the vblank interval that corresponds to the current value vblank counter
+ * value.
+ */
+u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+ struct timeval *vblanktime)
+{
+ u32 cur_vblank;
+
+ /* Read timestamp from slot of _vblank_time ringbuffer
+ * that corresponds to current vblank count. Retry if
+ * count has incremented during readout. This works like
+ * a seqlock.
+ */
+ do {
+ cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
+ *vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
+ rmb();
+ } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
+
+ return cur_vblank;
+}
+
+/**
+ * drm_update_vblank_count - update the master vblank counter
+ * @dev: DRM device
+ * @crtc: counter to update
+ *
+ * Call back into the driver to update the appropriate vblank counter
+ * (specified by @crtc). Deal with wraparound, if it occurred, and
+ * update the last read value so we can deal with wraparound on the next
+ * call if necessary.
+ *
+ * Only necessary when going from off->on, to account for frames we
+ * didn't get an interrupt for.
+ *
+ * Note: caller must hold dev->vbl_lock since this reads & writes
+ * device vblank fields.
+ */
+static void drm_update_vblank_count(struct drm_device *dev, int crtc)
+{
+ u32 cur_vblank, diff, tslot, rc;
+ struct timeval t_vblank;
+
+ /*
+ * Interrupts were disabled prior to this call, so deal with counter
+ * wrap if needed.
+ * NOTE! It's possible we lost a full dev->max_vblank_count events
+ * here if the register is small or we had vblank interrupts off for
+ * a long time.
+ *
+ * We repeat the hardware vblank counter & timestamp query until
+ * we get consistent results. This to prevent races between gpu
+ * updating its hardware counter while we are retrieving the
+ * corresponding vblank timestamp.
+ */
+ do {
+ cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+ rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
+ } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
+
+ /* Deal with counter wrap */
+ diff = cur_vblank - dev->last_vblank[crtc];
+ if (cur_vblank < dev->last_vblank[crtc]) {
+ diff += dev->max_vblank_count;
+
+ DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
+ crtc, dev->last_vblank[crtc], cur_vblank, diff);
+ }
+
+ DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
+ crtc, diff);
+
+ /* Reinitialize corresponding vblank timestamp if high-precision query
+ * available. Skip this step if query unsupported or failed. Will
+ * reinitialize delayed at next vblank interrupt in that case.
+ */
+ if (rc) {
+ tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
+ vblanktimestamp(dev, crtc, tslot) = t_vblank;
+ }
+
+ atomic_add(diff, &dev->_vblank_count[crtc]);
+}
+
+/**
+ * drm_vblank_get - get a reference count on vblank events
+ * @dev: DRM device
+ * @crtc: which CRTC to own
+ *
+ * Acquire a reference count on vblank events to avoid having them disabled
+ * while in use.
+ *
+ * RETURNS
+ * Zero on success, nonzero on failure.
+ */
+int drm_vblank_get(struct drm_device *dev, int crtc)
+{
+ int ret = 0;
+
+ mtx_lock(&dev->vbl_lock);
+ /* Going from 0->1 means we have to enable interrupts again */
+ if (atomic_fetchadd_int(&dev->vblank_refcount[crtc], 1) == 0) {
+ mtx_lock(&dev->vblank_time_lock);
+ if (!dev->vblank_enabled[crtc]) {
+ /* Enable vblank irqs under vblank_time_lock protection.
+ * All vblank count & timestamp updates are held off
+ * until we are done reinitializing master counter and
+ * timestamps. Filtercode in drm_handle_vblank() will
+ * prevent double-accounting of same vblank interval.
+ */
+ ret = -dev->driver->enable_vblank(dev, crtc);
+ DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
+ crtc, ret);
+ if (ret)
+ atomic_dec(&dev->vblank_refcount[crtc]);
+ else {
+ dev->vblank_enabled[crtc] = 1;
+ drm_update_vblank_count(dev, crtc);
+ }
+ }
+ mtx_unlock(&dev->vblank_time_lock);
+ } else {
+ if (!dev->vblank_enabled[crtc]) {
+ atomic_dec(&dev->vblank_refcount[crtc]);
+ ret = EINVAL;
+ }
+ }
+ mtx_unlock(&dev->vbl_lock);
+
+ return ret;
+}
+
+/**
+ * drm_vblank_put - give up ownership of vblank events
+ * @dev: DRM device
+ * @crtc: which counter to give up
+ *
+ * Release ownership of a given vblank counter, turning off interrupts
+ * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
+ */
+void drm_vblank_put(struct drm_device *dev, int crtc)
+{
+ KASSERT(atomic_read(&dev->vblank_refcount[crtc]) != 0,
+ ("Too many drm_vblank_put for crtc %d", crtc));
+
+ /* Last user schedules interrupt disable */
+ if (atomic_fetchadd_int(&dev->vblank_refcount[crtc], -1) == 1 &&
+ (drm_vblank_offdelay > 0))
+ callout_reset(&dev->vblank_disable_callout,
+ (drm_vblank_offdelay * DRM_HZ) / 1000,
+ vblank_disable_fn, dev);
+}
+
+void drm_vblank_off(struct drm_device *dev, int crtc)
+{
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
+ unsigned int seq;
+
+ mtx_lock(&dev->vbl_lock);
+ vblank_disable_and_save(dev, crtc);
+ mtx_lock(&dev->event_lock);
+ wakeup(&dev->_vblank_count[crtc]);
+
+ /* Send any queued vblank events, lest the natives grow disquiet */
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
+ list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+ if (e->pipe != crtc)
+ continue;
+ DRM_DEBUG("Sending premature vblank event on disable: \
+ wanted %d, current %d\n",
+ e->event.sequence, seq);
+
+ e->event.sequence = seq;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ drm_vblank_put(dev, e->pipe);
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ drm_event_wakeup(&e->base);
+ CTR3(KTR_DRM, "vblank_event_delivered %d %d %d",
+ e->base.pid, e->pipe, e->event.sequence);
+ }
+
+ mtx_unlock(&dev->event_lock);
+ mtx_unlock(&dev->vbl_lock);
+}
+
+/**
+ * drm_vblank_pre_modeset - account for vblanks across mode sets
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @post: post or pre mode set?
+ *
+ * Account for vblank events across mode setting events, which will likely
+ * reset the hardware frame counter.
+ */
+void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
+{
+ /* vblank is not initialized (IRQ not installed ?) */
+ if (!dev->num_crtcs)
+ return;
+ /*
+ * To avoid all the problems that might happen if interrupts
+ * were enabled/disabled around or between these calls, we just
+ * have the kernel take a reference on the CRTC (just once though
+ * to avoid corrupting the count if multiple, mismatch calls occur),
+ * so that interrupts remain enabled in the interim.
+ */
+ if (!dev->vblank_inmodeset[crtc]) {
+ dev->vblank_inmodeset[crtc] = 0x1;
+ if (drm_vblank_get(dev, crtc) == 0)
+ dev->vblank_inmodeset[crtc] |= 0x2;
+ }
+}
+
+void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
+{
+
+ if (dev->vblank_inmodeset[crtc]) {
+ mtx_lock(&dev->vbl_lock);
+ dev->vblank_disable_allowed = 1;
+ mtx_unlock(&dev->vbl_lock);
+
+ if (dev->vblank_inmodeset[crtc] & 0x2)
+ drm_vblank_put(dev, crtc);
+
+ dev->vblank_inmodeset[crtc] = 0;
+ }
+}
+
+/**
+ * drm_modeset_ctl - handle vblank event counter changes across mode switch
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
+ * ioctls around modesetting so that any lost vblank events are accounted for.
+ *
+ * Generally the counter will reset across mode sets. If interrupts are
+ * enabled around this call, we don't have to do anything since the counter
+ * will have already been incremented.
+ */
+int drm_modeset_ctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_modeset_ctl *modeset = data;
+ int ret = 0;
+ unsigned int crtc;
+
+ /* If drm_vblank_init() hasn't been called yet, just no-op */
+ if (!dev->num_crtcs)
+ goto out;
+
+ crtc = modeset->crtc;
+ if (crtc >= dev->num_crtcs) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (modeset->cmd) {
+ case _DRM_PRE_MODESET:
+ drm_vblank_pre_modeset(dev, crtc);
+ break;
+ case _DRM_POST_MODESET:
+ drm_vblank_post_modeset(dev, crtc);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+out:
+ return ret;
+}
+
+static void
+drm_vblank_event_destroy(struct drm_pending_event *e)
+{
+
+ free(e, DRM_MEM_VBLANK);
+}
+
+static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
+ union drm_wait_vblank *vblwait,
+ struct drm_file *file_priv)
+{
+ struct drm_pending_vblank_event *e;
+ struct timeval now;
+ unsigned int seq;
+ int ret;
+
+ e = malloc(sizeof *e, DRM_MEM_VBLANK, M_WAITOK | M_ZERO);
+
+ e->pipe = pipe;
+ e->base.pid = curproc->p_pid;
+ e->event.base.type = DRM_EVENT_VBLANK;
+ e->event.base.length = sizeof e->event;
+ e->event.user_data = vblwait->request.signal;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file_priv;
+ e->base.destroy = drm_vblank_event_destroy;
+
+ mtx_lock(&dev->event_lock);
+
+ if (file_priv->event_space < sizeof e->event) {
+ ret = EBUSY;
+ goto err_unlock;
+ }
+
+ file_priv->event_space -= sizeof e->event;
+ seq = drm_vblank_count_and_time(dev, pipe, &now);
+
+ if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
+ (seq - vblwait->request.sequence) <= (1 << 23)) {
+ vblwait->request.sequence = seq + 1;
+ vblwait->reply.sequence = vblwait->request.sequence;
+ }
+
+ DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
+ vblwait->request.sequence, seq, pipe);
+
+ CTR4(KTR_DRM, "vblank_event_queued %d %d rt %x %d", curproc->p_pid, pipe,
+ vblwait->request.type, vblwait->request.sequence);
+
+ e->event.sequence = vblwait->request.sequence;
+ if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+ e->event.sequence = seq;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ drm_vblank_put(dev, pipe);
+ list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+ drm_event_wakeup(&e->base);
+ vblwait->reply.sequence = seq;
+ CTR3(KTR_DRM, "vblank_event_wakeup p1 %d %d %d", curproc->p_pid,
+ pipe, vblwait->request.sequence);
+ } else {
+ /* drm_handle_vblank_events will call drm_vblank_put */
+ list_add_tail(&e->base.link, &dev->vblank_event_list);
+ vblwait->reply.sequence = vblwait->request.sequence;
+ }
+
+ mtx_unlock(&dev->event_lock);
+
+ return 0;
+
+err_unlock:
+ mtx_unlock(&dev->event_lock);
+ free(e, DRM_MEM_VBLANK);
+ drm_vblank_put(dev, pipe);
+ return ret;
+}
+
+/**
+ * Wait for VBLANK.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param data user argument, pointing to a drm_wait_vblank structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * This function enables the vblank interrupt on the pipe requested, then
+ * sleeps waiting for the requested sequence number to occur, and drops
+ * the vblank interrupt refcount afterwards. (vblank irq disable follows that
+ * after a timeout with no further vblank waits scheduled).
+ */
+int drm_wait_vblank(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ union drm_wait_vblank *vblwait = data;
+ int ret = 0;
+ unsigned int flags, seq, crtc, high_crtc;
+
+ if (/*(!drm_dev_to_irq(dev)) || */(!dev->irq_enabled))
+ return (EINVAL);
+
+ if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
+ return (EINVAL);
+
+ if (vblwait->request.type &
+ ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
+ _DRM_VBLANK_HIGH_CRTC_MASK)) {
+ DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
+ vblwait->request.type,
+ (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
+ _DRM_VBLANK_HIGH_CRTC_MASK));
+ return (EINVAL);
+ }
+
+ flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
+ high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
+ if (high_crtc)
+ crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
+ else
+ crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+ if (crtc >= dev->num_crtcs)
+ return (EINVAL);
+
+ ret = drm_vblank_get(dev, crtc);
+ if (ret) {
+ DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
+ return (ret);
+ }
+ seq = drm_vblank_count(dev, crtc);
+
+ switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
+ case _DRM_VBLANK_RELATIVE:
+ vblwait->request.sequence += seq;
+ vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+ case _DRM_VBLANK_ABSOLUTE:
+ break;
+ default:
+ ret = (EINVAL);
+ goto done;
+ }
+
+ if (flags & _DRM_VBLANK_EVENT) {
+ /* must hold on to the vblank ref until the event fires
+ * drm_vblank_put will be called asynchronously
+ */
+ return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
+ }
+
+ if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+ (seq - vblwait->request.sequence) <= (1<<23)) {
+ vblwait->request.sequence = seq + 1;
+ }
+
+ dev->last_vblank_wait[crtc] = vblwait->request.sequence;
+ mtx_lock(&dev->vblank_time_lock);
+ while (((drm_vblank_count(dev, crtc) - vblwait->request.sequence) >
+ (1 << 23)) && dev->irq_enabled) {
+ /*
+ * The wakeups from the drm_irq_uninstall() and
+ * drm_vblank_off() may be lost there since vbl_lock
+ * is not held. Then, the timeout will wake us; the 3
+ * seconds delay should not be a problem for
+ * application when crtc is disabled or irq
+ * uninstalled anyway.
+ */
+ ret = msleep(&dev->_vblank_count[crtc], &dev->vblank_time_lock,
+ PCATCH, "drmvbl", 3 * hz);
+ if (ret != 0)
+ break;
+ }
+ mtx_unlock(&dev->vblank_time_lock);
+ if (ret != EINTR) {
+ struct timeval now;
+ long reply_seq;
+
+ reply_seq = drm_vblank_count_and_time(dev, crtc, &now);
+ CTR5(KTR_DRM, "wait_vblank %d %d rt %x success %d %d",
+ curproc->p_pid, crtc, vblwait->request.type,
+ vblwait->request.sequence, reply_seq);
+ vblwait->reply.sequence = reply_seq;
+ vblwait->reply.tval_sec = now.tv_sec;
+ vblwait->reply.tval_usec = now.tv_usec;
+ } else {
+ CTR5(KTR_DRM, "wait_vblank %d %d rt %x error %d %d",
+ curproc->p_pid, crtc, vblwait->request.type, ret,
+ vblwait->request.sequence);
+ }
+
+done:
+ drm_vblank_put(dev, crtc);
+ return ret;
+}
+
+void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+{
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
+ unsigned int seq;
+
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
+ CTR2(KTR_DRM, "drm_handle_vblank_events %d %d", seq, crtc);
+
+ mtx_lock(&dev->event_lock);
+
+ list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+ if (e->pipe != crtc)
+ continue;
+ if ((seq - e->event.sequence) > (1<<23))
+ continue;
+
+ e->event.sequence = seq;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ drm_vblank_put(dev, e->pipe);
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ drm_event_wakeup(&e->base);
+ CTR3(KTR_DRM, "vblank_event_wakeup p2 %d %d %d", e->base.pid,
+ e->pipe, e->event.sequence);
+ }
+
+ mtx_unlock(&dev->event_lock);
+}
+
+/**
+ * drm_handle_vblank - handle a vblank event
+ * @dev: DRM device
+ * @crtc: where this event occurred
+ *
+ * Drivers should call this routine in their vblank interrupt handlers to
+ * update the vblank counter and send any signals that may be pending.
+ */
+bool drm_handle_vblank(struct drm_device *dev, int crtc)
+{
+ u32 vblcount;
+ int64_t diff_ns;
+ struct timeval tvblank;
+
+ if (!dev->num_crtcs)
+ return false;
+
+ /* Need timestamp lock to prevent concurrent execution with
+ * vblank enable/disable, as this would cause inconsistent
+ * or corrupted timestamps and vblank counts.
+ */
+ mtx_lock(&dev->vblank_time_lock);
+
+ /* Vblank irq handling disabled. Nothing to do. */
+ if (!dev->vblank_enabled[crtc]) {
+ mtx_unlock(&dev->vblank_time_lock);
+ return false;
+ }
+
+ /* Fetch corresponding timestamp for this vblank interval from
+ * driver and store it in proper slot of timestamp ringbuffer.
+ */
+
+ /* Get current timestamp and count. */
+ vblcount = atomic_read(&dev->_vblank_count[crtc]);
+ drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
+
+ /* Compute time difference to timestamp of last vblank */
+ diff_ns = timeval_to_ns(&tvblank) -
+ timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+ /* Update vblank timestamp and count if at least
+ * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
+ * difference between last stored timestamp and current
+ * timestamp. A smaller difference means basically
+ * identical timestamps. Happens if this vblank has
+ * been already processed and this is a redundant call,
+ * e.g., due to spurious vblank interrupts. We need to
+ * ignore those for accounting.
+ */
+ if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
+ /* Store new timestamp in ringbuffer. */
+ vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
+
+ /* Increment cooked vblank count. This also atomically commits
+ * the timestamp computed above.
+ */
+ atomic_inc(&dev->_vblank_count[crtc]);
+ } else {
+ DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
+ crtc, (int) diff_ns);
+ }
+
+ wakeup(&dev->_vblank_count[crtc]);
+ drm_handle_vblank_events(dev, crtc);
+
+ mtx_unlock(&dev->vblank_time_lock);
+ return true;
+}
diff --git a/sys/dev/drm2/drm_linux_list.h b/sys/dev/drm2/drm_linux_list.h
new file mode 100644
index 0000000..3b23a30
--- /dev/null
+++ b/sys/dev/drm2/drm_linux_list.h
@@ -0,0 +1,177 @@
+/* drm_linux_list.h -- linux list functions for the BSDs.
+ * Created: Mon Apr 7 14:30:16 1999 by anholt@FreeBSD.org
+ */
+/*-
+ * Copyright 2003 Eric Anholt
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <anholt@FreeBSD.org>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef _DRM_LINUX_LIST_H_
+#define _DRM_LINUX_LIST_H_
+
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+#define list_entry(ptr, type, member) container_of(ptr,type,member)
+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+
+static __inline__ void
+INIT_LIST_HEAD(struct list_head *head) {
+ (head)->next = head;
+ (head)->prev = head;
+}
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define DRM_LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
+
+static __inline__ int
+list_empty(const struct list_head *head) {
+ return (head)->next == head;
+}
+
+static __inline__ void
+list_add(struct list_head *new, struct list_head *head) {
+ (head)->next->prev = new;
+ (new)->next = (head)->next;
+ (new)->prev = head;
+ (head)->next = new;
+}
+
+static __inline__ void
+list_add_tail(struct list_head *entry, struct list_head *head) {
+ (entry)->prev = (head)->prev;
+ (entry)->next = head;
+ (head)->prev->next = entry;
+ (head)->prev = entry;
+}
+
+static __inline__ void
+list_del(struct list_head *entry) {
+ (entry)->next->prev = (entry)->prev;
+ (entry)->prev->next = (entry)->next;
+}
+
+static inline void list_replace(struct list_head *old,
+ struct list_head *new)
+{
+ new->next = old->next;
+ new->next->prev = new;
+ new->prev = old->prev;
+ new->prev->next = new;
+}
+
+static inline void list_move(struct list_head *list, struct list_head *head)
+{
+ list_del(list);
+ list_add(list, head);
+}
+
+static inline void list_move_tail(struct list_head *list,
+ struct list_head *head)
+{
+ list_del(list);
+ list_add_tail(list, head);
+}
+
+static __inline__ void
+list_del_init(struct list_head *entry) {
+ (entry)->next->prev = (entry)->prev;
+ (entry)->prev->next = (entry)->next;
+ INIT_LIST_HEAD(entry);
+}
+
+#define list_for_each(entry, head) \
+ for (entry = (head)->next; entry != head; entry = (entry)->next)
+
+#define list_for_each_prev(entry, head) \
+ for (entry = (head)->prev; entry != (head); \
+ entry = entry->prev)
+
+#define list_for_each_safe(entry, temp, head) \
+ for (entry = (head)->next, temp = (entry)->next; \
+ entry != head; \
+ entry = temp, temp = entry->next)
+
+#define list_for_each_entry(pos, head, member) \
+ for (pos = list_entry((head)->next, __typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_entry(pos->member.next, __typeof(*pos), member))
+
+#define list_for_each_entry_continue_reverse(pos, head, member) \
+ for (pos = list_entry(pos->member.prev, __typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_entry(pos->member.prev, __typeof(*pos), member))
+
+/**
+ * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_safe(pos, n, head, member) \
+ for (pos = list_entry((head)->next, __typeof(*pos), member), \
+ n = list_entry(pos->member.next, __typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.next, __typeof(*n), member))
+
+#define list_first_entry(ptr, type, member) \
+ list_entry((ptr)->next, type, member)
+
+
+static inline void
+__list_splice(const struct list_head *list, struct list_head *prev,
+ struct list_head *next)
+{
+ struct list_head *first = list->next;
+ struct list_head *last = list->prev;
+
+ first->prev = prev;
+ prev->next = first;
+
+ last->next = next;
+ next->prev = last;
+}
+
+static inline void
+list_splice(const struct list_head *list, struct list_head *head)
+{
+ if (list_empty(list))
+ return;
+
+ __list_splice(list, head, head->next);
+}
+
+void drm_list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv,
+ struct list_head *a, struct list_head *b));
+
+#endif /* _DRM_LINUX_LIST_H_ */
diff --git a/sys/dev/drm2/drm_linux_list_sort.c b/sys/dev/drm2/drm_linux_list_sort.c
new file mode 100644
index 0000000..973ce4a
--- /dev/null
+++ b/sys/dev/drm2/drm_linux_list_sort.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2011 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Konstantin Belousov under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <dev/drm2/drmP.h>
+__FBSDID("$FreeBSD$");
+
+struct drm_list_sort_thunk {
+ int (*cmp)(void *, struct list_head *, struct list_head *);
+ void *priv;
+};
+
+static int
+drm_le_cmp(void *priv, const void *d1, const void *d2)
+{
+ struct list_head *le1, *le2;
+ struct drm_list_sort_thunk *thunk;
+
+ thunk = priv;
+ le1 = __DECONST(struct list_head *, d1);
+ le2 = __DECONST(struct list_head *, d2);
+ return ((thunk->cmp)(thunk->priv, le1, le2));
+}
+
+/*
+ * Punt and use array sort.
+ */
+void
+drm_list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv,
+ struct list_head *a, struct list_head *b))
+{
+ struct drm_list_sort_thunk thunk;
+ struct list_head **ar, *le;
+ int count, i;
+
+ count = 0;
+ list_for_each(le, head)
+ count++;
+ ar = malloc(sizeof(struct list_head *) * count, M_TEMP, M_WAITOK);
+ i = 0;
+ list_for_each(le, head)
+ ar[i++] = le;
+ thunk.cmp = cmp;
+ thunk.priv = priv;
+ qsort_r(ar, count, sizeof(struct list_head *), &thunk, drm_le_cmp);
+ INIT_LIST_HEAD(head);
+ for (i = 0; i < count; i++)
+ list_add_tail(ar[i], head);
+ free(ar, M_TEMP);
+}
diff --git a/sys/dev/drm2/drm_lock.c b/sys/dev/drm2/drm_lock.c
new file mode 100644
index 0000000..8ae2305
--- /dev/null
+++ b/sys/dev/drm2/drm_lock.c
@@ -0,0 +1,199 @@
+/*-
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/** @file drm_lock.c
+ * Implementation of the ioctls and other support code for dealing with the
+ * hardware lock.
+ *
+ * The DRM hardware lock is a shared structure between the kernel and userland.
+ *
+ * On uncontended access where the new context was the last context, the
+ * client may take the lock without dropping down into the kernel, using atomic
+ * compare-and-set.
+ *
+ * If the client finds during compare-and-set that it was not the last owner
+ * of the lock, it calls the DRM lock ioctl, which may sleep waiting for the
+ * lock, and may have side-effects of kernel-managed context switching.
+ *
+ * When the client releases the lock, if the lock is marked as being contended
+ * by another client, then the DRM unlock ioctl is called so that the
+ * contending client may be woken up.
+ */
+
+#include <dev/drm2/drmP.h>
+
+int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_lock *lock = data;
+ int ret = 0;
+
+ if (lock->context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ DRM_CURRENTPID, lock->context);
+ return EINVAL;
+ }
+
+ DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+ lock->context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
+ lock->flags);
+
+ if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) &&
+ lock->context < 0)
+ return EINVAL;
+
+ DRM_LOCK(dev);
+ for (;;) {
+ if (drm_lock_take(&dev->lock, lock->context)) {
+ dev->lock.file_priv = file_priv;
+ dev->lock.lock_time = jiffies;
+ atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
+ break; /* Got lock */
+ }
+
+ /* Contention */
+ ret = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue,
+ PCATCH, "drmlk2", 0);
+ if (ret != 0)
+ break;
+ }
+ DRM_UNLOCK(dev);
+
+ if (ret == ERESTART)
+ DRM_DEBUG("restarting syscall\n");
+ else
+ DRM_DEBUG("%d %s\n", lock->context,
+ ret ? "interrupted" : "has lock");
+
+ if (ret != 0)
+ return ret;
+
+ /* XXX: Add signal blocking here */
+
+ if (dev->driver->dma_quiescent != NULL &&
+ (lock->flags & _DRM_LOCK_QUIESCENT))
+ dev->driver->dma_quiescent(dev);
+
+ return 0;
+}
+
+int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_lock *lock = data;
+
+ DRM_DEBUG("%d (pid %d) requests unlock (0x%08x), flags = 0x%08x\n",
+ lock->context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
+ lock->flags);
+
+ if (lock->context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ DRM_CURRENTPID, lock->context);
+ return EINVAL;
+ }
+
+ atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
+
+ DRM_LOCK(dev);
+ drm_lock_transfer(&dev->lock, DRM_KERNEL_CONTEXT);
+
+ if (drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("\n");
+ }
+ DRM_UNLOCK(dev);
+
+ return 0;
+}
+
+int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context)
+{
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+ unsigned int old, new;
+
+ do {
+ old = *lock;
+ if (old & _DRM_LOCK_HELD)
+ new = old | _DRM_LOCK_CONT;
+ else
+ new = context | _DRM_LOCK_HELD;
+ } while (!atomic_cmpset_int(lock, old, new));
+
+ if (_DRM_LOCKING_CONTEXT(old) == context) {
+ if (old & _DRM_LOCK_HELD) {
+ if (context != DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("%d holds heavyweight lock\n",
+ context);
+ }
+ return 0;
+ }
+ }
+ if (new == (context | _DRM_LOCK_HELD)) {
+ /* Have lock */
+ return 1;
+ }
+ return 0;
+}
+
+/* This takes a lock forcibly and hands it to context. Should ONLY be used
+ inside *_unlock to give lock to kernel before calling *_dma_schedule. */
+int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context)
+{
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+ unsigned int old, new;
+
+ lock_data->file_priv = NULL;
+ do {
+ old = *lock;
+ new = context | _DRM_LOCK_HELD;
+ } while (!atomic_cmpset_int(lock, old, new));
+
+ return 1;
+}
+
+int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
+{
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+ unsigned int old, new;
+
+ lock_data->file_priv = NULL;
+ do {
+ old = *lock;
+ new = 0;
+ } while (!atomic_cmpset_int(lock, old, new));
+
+ if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
+ DRM_ERROR("%d freed heavyweight lock held by %d\n",
+ context, _DRM_LOCKING_CONTEXT(old));
+ return 1;
+ }
+ DRM_WAKEUP_INT((void *)&lock_data->lock_queue);
+ return 0;
+}
diff --git a/sys/dev/drm2/drm_memory.c b/sys/dev/drm2/drm_memory.c
new file mode 100644
index 0000000..fa48197
--- /dev/null
+++ b/sys/dev/drm2/drm_memory.c
@@ -0,0 +1,127 @@
+/*-
+ *Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2011 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Konstantin Belousov
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/** @file drm_memory.c
+ * Wrappers for kernel memory allocation routines, and MTRR management support.
+ *
+ * This file previously implemented a memory consumption tracking system using
+ * the "area" argument for various different types of allocations, but that
+ * has been stripped out for now.
+ */
+
+#include <dev/drm2/drmP.h>
+
+MALLOC_DEFINE(DRM_MEM_DMA, "drm_dma", "DRM DMA Data Structures");
+MALLOC_DEFINE(DRM_MEM_SAREA, "drm_sarea", "DRM SAREA Data Structures");
+MALLOC_DEFINE(DRM_MEM_DRIVER, "drm_driver", "DRM DRIVER Data Structures");
+MALLOC_DEFINE(DRM_MEM_MAGIC, "drm_magic", "DRM MAGIC Data Structures");
+MALLOC_DEFINE(DRM_MEM_IOCTLS, "drm_ioctls", "DRM IOCTL Data Structures");
+MALLOC_DEFINE(DRM_MEM_MAPS, "drm_maps", "DRM MAP Data Structures");
+MALLOC_DEFINE(DRM_MEM_BUFS, "drm_bufs", "DRM BUFFER Data Structures");
+MALLOC_DEFINE(DRM_MEM_SEGS, "drm_segs", "DRM SEGMENTS Data Structures");
+MALLOC_DEFINE(DRM_MEM_PAGES, "drm_pages", "DRM PAGES Data Structures");
+MALLOC_DEFINE(DRM_MEM_FILES, "drm_files", "DRM FILE Data Structures");
+MALLOC_DEFINE(DRM_MEM_QUEUES, "drm_queues", "DRM QUEUE Data Structures");
+MALLOC_DEFINE(DRM_MEM_CMDS, "drm_cmds", "DRM COMMAND Data Structures");
+MALLOC_DEFINE(DRM_MEM_MAPPINGS, "drm_mapping", "DRM MAPPING Data Structures");
+MALLOC_DEFINE(DRM_MEM_BUFLISTS, "drm_buflists", "DRM BUFLISTS Data Structures");
+MALLOC_DEFINE(DRM_MEM_AGPLISTS, "drm_agplists", "DRM AGPLISTS Data Structures");
+MALLOC_DEFINE(DRM_MEM_CTXBITMAP, "drm_ctxbitmap",
+ "DRM CTXBITMAP Data Structures");
+MALLOC_DEFINE(DRM_MEM_SGLISTS, "drm_sglists", "DRM SGLISTS Data Structures");
+MALLOC_DEFINE(DRM_MEM_DRAWABLE, "drm_drawable", "DRM DRAWABLE Data Structures");
+MALLOC_DEFINE(DRM_MEM_MM, "drm_sman", "DRM MEMORY MANAGER Data Structures");
+MALLOC_DEFINE(DRM_MEM_HASHTAB, "drm_hashtab", "DRM HASHTABLE Data Structures");
+MALLOC_DEFINE(DRM_MEM_KMS, "drm_kms", "DRM KMS Data Structures");
+
+void drm_mem_init(void)
+{
+}
+
+void drm_mem_uninit(void)
+{
+}
+
+void *drm_ioremap_wc(struct drm_device *dev, drm_local_map_t *map)
+{
+ return pmap_mapdev_attr(map->offset, map->size, VM_MEMATTR_WRITE_COMBINING);
+}
+
+void *drm_ioremap(struct drm_device *dev, drm_local_map_t *map)
+{
+ return pmap_mapdev(map->offset, map->size);
+}
+
+void drm_ioremapfree(drm_local_map_t *map)
+{
+ pmap_unmapdev((vm_offset_t) map->virtual, map->size);
+}
+
+int
+drm_mtrr_add(unsigned long offset, size_t size, int flags)
+{
+ int act;
+ struct mem_range_desc mrdesc;
+
+ mrdesc.mr_base = offset;
+ mrdesc.mr_len = size;
+ mrdesc.mr_flags = flags;
+ act = MEMRANGE_SET_UPDATE;
+ strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
+ return mem_range_attr_set(&mrdesc, &act);
+}
+
+int
+drm_mtrr_del(int __unused handle, unsigned long offset, size_t size, int flags)
+{
+ int act;
+ struct mem_range_desc mrdesc;
+
+ mrdesc.mr_base = offset;
+ mrdesc.mr_len = size;
+ mrdesc.mr_flags = flags;
+ act = MEMRANGE_SET_REMOVE;
+ strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
+ return mem_range_attr_set(&mrdesc, &act);
+}
+
+void
+drm_clflush_pages(vm_page_t *pages, unsigned long num_pages)
+{
+
+ pmap_invalidate_cache_pages(pages, num_pages);
+}
diff --git a/sys/dev/drm2/drm_mm.c b/sys/dev/drm2/drm_mm.c
new file mode 100644
index 0000000..cb35fc0
--- /dev/null
+++ b/sys/dev/drm2/drm_mm.c
@@ -0,0 +1,563 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Generic simple memory manager implementation. Intended to be used as a base
+ * class implementation for more advanced memory managers.
+ *
+ * Note that the algorithm used is quite simple and there might be substantial
+ * performance gains if a smarter free list is implemented. Currently it is just an
+ * unordered stack of free regions. This could easily be improved if an RB-tree
+ * is used instead. At least if we expect heavy fragmentation.
+ *
+ * Aligned allocations can also see improvement.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_mm.h>
+
+#define MM_UNUSED_TARGET 4
+
+static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
+{
+ struct drm_mm_node *child;
+
+ child = malloc(sizeof(*child), DRM_MEM_MM, M_ZERO |
+ (atomic ? M_NOWAIT : M_WAITOK));
+
+ if (unlikely(child == NULL)) {
+ mtx_lock(&mm->unused_lock);
+ if (list_empty(&mm->unused_nodes))
+ child = NULL;
+ else {
+ child =
+ list_entry(mm->unused_nodes.next,
+ struct drm_mm_node, node_list);
+ list_del(&child->node_list);
+ --mm->num_unused;
+ }
+ mtx_unlock(&mm->unused_lock);
+ }
+ return child;
+}
+
+int drm_mm_pre_get(struct drm_mm *mm)
+{
+ struct drm_mm_node *node;
+
+ mtx_lock(&mm->unused_lock);
+ while (mm->num_unused < MM_UNUSED_TARGET) {
+ mtx_unlock(&mm->unused_lock);
+ node = malloc(sizeof(*node), DRM_MEM_MM, M_WAITOK);
+ mtx_lock(&mm->unused_lock);
+
+ if (unlikely(node == NULL)) {
+ int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
+ mtx_unlock(&mm->unused_lock);
+ return ret;
+ }
+ ++mm->num_unused;
+ list_add_tail(&node->node_list, &mm->unused_nodes);
+ }
+ mtx_unlock(&mm->unused_lock);
+ return 0;
+}
+
+static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+{
+ return hole_node->start + hole_node->size;
+}
+
+static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+{
+ struct drm_mm_node *next_node =
+ list_entry(hole_node->node_list.next, struct drm_mm_node,
+ node_list);
+
+ return next_node->start;
+}
+
+static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment)
+{
+ struct drm_mm *mm = hole_node->mm;
+ unsigned long tmp = 0, wasted = 0;
+ unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+ unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+
+ KASSERT(hole_node->hole_follows && !node->allocated, ("hole_node"));
+
+ if (alignment)
+ tmp = hole_start % alignment;
+
+ if (!tmp) {
+ hole_node->hole_follows = 0;
+ list_del_init(&hole_node->hole_stack);
+ } else
+ wasted = alignment - tmp;
+
+ node->start = hole_start + wasted;
+ node->size = size;
+ node->mm = mm;
+ node->allocated = 1;
+
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole_node->node_list);
+
+ KASSERT(node->start + node->size <= hole_end, ("hole pos"));
+
+ if (node->start + node->size < hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ node->hole_follows = 1;
+ } else {
+ node->hole_follows = 0;
+ }
+}
+
+struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
+ unsigned long size,
+ unsigned alignment,
+ int atomic)
+{
+ struct drm_mm_node *node;
+
+ node = drm_mm_kmalloc(hole_node->mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
+
+ drm_mm_insert_helper(hole_node, node, size, alignment);
+
+ return node;
+}
+
+int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment)
+{
+ struct drm_mm_node *hole_node;
+
+ hole_node = drm_mm_search_free(mm, size, alignment, 0);
+ if (!hole_node)
+ return -ENOSPC;
+
+ drm_mm_insert_helper(hole_node, node, size, alignment);
+
+ return 0;
+}
+
+static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end)
+{
+ struct drm_mm *mm = hole_node->mm;
+ unsigned long tmp = 0, wasted = 0;
+ unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+ unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+
+ KASSERT(hole_node->hole_follows && !node->allocated, ("hole_node"));
+
+ if (hole_start < start)
+ wasted += start - hole_start;
+ if (alignment)
+ tmp = (hole_start + wasted) % alignment;
+
+ if (tmp)
+ wasted += alignment - tmp;
+
+ if (!wasted) {
+ hole_node->hole_follows = 0;
+ list_del_init(&hole_node->hole_stack);
+ }
+
+ node->start = hole_start + wasted;
+ node->size = size;
+ node->mm = mm;
+ node->allocated = 1;
+
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole_node->node_list);
+
+ KASSERT(node->start + node->size <= hole_end, ("hole_end"));
+ KASSERT(node->start + node->size <= end, ("end"));
+
+ if (node->start + node->size < hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ node->hole_follows = 1;
+ } else {
+ node->hole_follows = 0;
+ }
+}
+
+struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int atomic)
+{
+ struct drm_mm_node *node;
+
+ node = drm_mm_kmalloc(hole_node->mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
+
+ drm_mm_insert_helper_range(hole_node, node, size, alignment,
+ start, end);
+
+ return node;
+}
+
+int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end)
+{
+ struct drm_mm_node *hole_node;
+
+ hole_node = drm_mm_search_free_in_range(mm, size, alignment,
+ start, end, 0);
+ if (!hole_node)
+ return -ENOSPC;
+
+ drm_mm_insert_helper_range(hole_node, node, size, alignment,
+ start, end);
+
+ return 0;
+}
+
+void drm_mm_remove_node(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
+
+ KASSERT(!node->scanned_block && !node->scanned_prev_free
+ && !node->scanned_next_free, ("node"));
+
+ prev_node =
+ list_entry(node->node_list.prev, struct drm_mm_node, node_list);
+
+ if (node->hole_follows) {
+ KASSERT(drm_mm_hole_node_start(node)
+ != drm_mm_hole_node_end(node), ("hole_follows"));
+ list_del(&node->hole_stack);
+ } else
+ KASSERT(drm_mm_hole_node_start(node)
+ == drm_mm_hole_node_end(node), ("!hole_follows"));
+
+ if (!prev_node->hole_follows) {
+ prev_node->hole_follows = 1;
+ list_add(&prev_node->hole_stack, &mm->hole_stack);
+ } else
+ list_move(&prev_node->hole_stack, &mm->hole_stack);
+
+ list_del(&node->node_list);
+ node->allocated = 0;
+}
+
+/*
+ * Put a block. Merge with the previous and / or next block if they are free.
+ * Otherwise add to the free stack.
+ */
+
+void drm_mm_put_block(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+
+ drm_mm_remove_node(node);
+
+ mtx_lock(&mm->unused_lock);
+ if (mm->num_unused < MM_UNUSED_TARGET) {
+ list_add(&node->node_list, &mm->unused_nodes);
+ ++mm->num_unused;
+ } else
+ free(node, DRM_MEM_MM);
+ mtx_unlock(&mm->unused_lock);
+}
+
+static int check_free_hole(unsigned long start, unsigned long end,
+ unsigned long size, unsigned alignment)
+{
+ unsigned wasted = 0;
+
+ if (end - start < size)
+ return 0;
+
+ if (alignment) {
+ unsigned tmp = start % alignment;
+ if (tmp)
+ wasted = alignment - tmp;
+ }
+
+ if (end >= start + size + wasted) {
+ return 1;
+ }
+
+ return 0;
+}
+
+
+struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment, int best_match)
+{
+ struct drm_mm_node *entry;
+ struct drm_mm_node *best;
+ unsigned long best_size;
+
+ best = NULL;
+ best_size = ~0UL;
+
+ list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
+ KASSERT(entry->hole_follows, ("hole_follows"));
+ if (!check_free_hole(drm_mm_hole_node_start(entry),
+ drm_mm_hole_node_end(entry),
+ size, alignment))
+ continue;
+
+ if (!best_match)
+ return entry;
+
+ if (entry->size < best_size) {
+ best = entry;
+ best_size = entry->size;
+ }
+ }
+
+ return best;
+}
+
+struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int best_match)
+{
+ struct drm_mm_node *entry;
+ struct drm_mm_node *best;
+ unsigned long best_size;
+
+ KASSERT(!mm->scanned_blocks, ("scanned"));
+
+ best = NULL;
+ best_size = ~0UL;
+
+ list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
+ unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
+ start : drm_mm_hole_node_start(entry);
+ unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
+ end : drm_mm_hole_node_end(entry);
+
+ KASSERT(entry->hole_follows, ("hole_follows"));
+ if (!check_free_hole(adj_start, adj_end, size, alignment))
+ continue;
+
+ if (!best_match)
+ return entry;
+
+ if (entry->size < best_size) {
+ best = entry;
+ best_size = entry->size;
+ }
+ }
+
+ return best;
+}
+
+void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
+{
+ list_replace(&old->node_list, &new->node_list);
+ list_replace(&old->hole_stack, &new->hole_stack);
+ new->hole_follows = old->hole_follows;
+ new->mm = old->mm;
+ new->start = old->start;
+ new->size = old->size;
+
+ old->allocated = 0;
+ new->allocated = 1;
+}
+
+void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
+ unsigned alignment)
+{
+ mm->scan_alignment = alignment;
+ mm->scan_size = size;
+ mm->scanned_blocks = 0;
+ mm->scan_hit_start = 0;
+ mm->scan_hit_size = 0;
+ mm->scan_check_range = 0;
+ mm->prev_scanned_node = NULL;
+}
+
+void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end)
+{
+ mm->scan_alignment = alignment;
+ mm->scan_size = size;
+ mm->scanned_blocks = 0;
+ mm->scan_hit_start = 0;
+ mm->scan_hit_size = 0;
+ mm->scan_start = start;
+ mm->scan_end = end;
+ mm->scan_check_range = 1;
+ mm->prev_scanned_node = NULL;
+}
+
+int drm_mm_scan_add_block(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
+ unsigned long hole_start, hole_end;
+ unsigned long adj_start;
+ unsigned long adj_end;
+
+ mm->scanned_blocks++;
+
+ KASSERT(!node->scanned_block, ("node->scanned_block"));
+ node->scanned_block = 1;
+
+ prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+ node_list);
+
+ node->scanned_preceeds_hole = prev_node->hole_follows;
+ prev_node->hole_follows = 1;
+ list_del(&node->node_list);
+ node->node_list.prev = &prev_node->node_list;
+ node->node_list.next = &mm->prev_scanned_node->node_list;
+ mm->prev_scanned_node = node;
+
+ hole_start = drm_mm_hole_node_start(prev_node);
+ hole_end = drm_mm_hole_node_end(prev_node);
+ if (mm->scan_check_range) {
+ adj_start = hole_start < mm->scan_start ?
+ mm->scan_start : hole_start;
+ adj_end = hole_end > mm->scan_end ?
+ mm->scan_end : hole_end;
+ } else {
+ adj_start = hole_start;
+ adj_end = hole_end;
+ }
+
+ if (check_free_hole(adj_start , adj_end,
+ mm->scan_size, mm->scan_alignment)) {
+ mm->scan_hit_start = hole_start;
+ mm->scan_hit_size = hole_end;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+int drm_mm_scan_remove_block(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
+
+ mm->scanned_blocks--;
+
+ KASSERT(node->scanned_block, ("scanned_block"));
+ node->scanned_block = 0;
+
+ prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+ node_list);
+
+ prev_node->hole_follows = node->scanned_preceeds_hole;
+ INIT_LIST_HEAD(&node->node_list);
+ list_add(&node->node_list, &prev_node->node_list);
+
+ /* Only need to check for containement because start&size for the
+ * complete resulting free block (not just the desired part) is
+ * stored. */
+ if (node->start >= mm->scan_hit_start &&
+ node->start + node->size
+ <= mm->scan_hit_start + mm->scan_hit_size) {
+ return 1;
+ }
+
+ return 0;
+}
+
+int drm_mm_clean(struct drm_mm * mm)
+{
+ struct list_head *head = &mm->head_node.node_list;
+
+ return (head->next->next == head);
+}
+
+int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+{
+ INIT_LIST_HEAD(&mm->hole_stack);
+ INIT_LIST_HEAD(&mm->unused_nodes);
+ mm->num_unused = 0;
+ mm->scanned_blocks = 0;
+ mtx_init(&mm->unused_lock, "drm_unused", NULL, MTX_DEF);
+
+ INIT_LIST_HEAD(&mm->head_node.node_list);
+ INIT_LIST_HEAD(&mm->head_node.hole_stack);
+ mm->head_node.hole_follows = 1;
+ mm->head_node.scanned_block = 0;
+ mm->head_node.scanned_prev_free = 0;
+ mm->head_node.scanned_next_free = 0;
+ mm->head_node.mm = mm;
+ mm->head_node.start = start + size;
+ mm->head_node.size = start - mm->head_node.start;
+ list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
+
+ return 0;
+}
+
+void drm_mm_takedown(struct drm_mm * mm)
+{
+ struct drm_mm_node *entry, *next;
+
+ if (!list_empty(&mm->head_node.node_list)) {
+ DRM_ERROR("Memory manager not clean. Delaying takedown\n");
+ return;
+ }
+
+ mtx_lock(&mm->unused_lock);
+ list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
+ list_del(&entry->node_list);
+ free(entry, DRM_MEM_MM);
+ --mm->num_unused;
+ }
+ mtx_unlock(&mm->unused_lock);
+
+ mtx_destroy(&mm->unused_lock);
+
+ KASSERT(mm->num_unused == 0, ("num_unused != 0"));
+}
diff --git a/sys/dev/drm2/drm_mm.h b/sys/dev/drm2/drm_mm.h
new file mode 100644
index 0000000..7b173af
--- /dev/null
+++ b/sys/dev/drm2/drm_mm.h
@@ -0,0 +1,185 @@
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Authors:
+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _DRM_MM_H_
+#define _DRM_MM_H_
+
+#include <dev/drm2/drm_linux_list.h>
+
+struct drm_mm_node {
+ struct list_head node_list;
+ struct list_head hole_stack;
+ unsigned hole_follows : 1;
+ unsigned scanned_block : 1;
+ unsigned scanned_prev_free : 1;
+ unsigned scanned_next_free : 1;
+ unsigned scanned_preceeds_hole : 1;
+ unsigned allocated : 1;
+ unsigned long start;
+ unsigned long size;
+ struct drm_mm *mm;
+ void *private;
+};
+
+struct drm_mm {
+ struct list_head hole_stack;
+ struct drm_mm_node head_node;
+ struct list_head unused_nodes;
+ int num_unused;
+ struct mtx unused_lock;
+ unsigned int scan_check_range : 1;
+ unsigned scan_alignment;
+ unsigned long scan_size;
+ unsigned long scan_hit_start;
+ unsigned scan_hit_size;
+ unsigned scanned_blocks;
+ unsigned long scan_start;
+ unsigned long scan_end;
+ struct drm_mm_node *prev_scanned_node;
+};
+
+static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
+{
+ return node->allocated;
+}
+
+static inline bool drm_mm_initialized(struct drm_mm *mm)
+{
+ return (mm->hole_stack.next != NULL);
+}
+#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
+ &(mm)->head_node.node_list, \
+ node_list)
+#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
+ for (entry = (mm)->prev_scanned_node, \
+ next = entry ? list_entry(entry->node_list.next, \
+ struct drm_mm_node, node_list) : NULL; \
+ entry != NULL; entry = next, \
+ next = entry ? list_entry(entry->node_list.next, \
+ struct drm_mm_node, node_list) : NULL)
+
+/*
+ * Basic range manager support (drm_mm.c)
+ */
+extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ int atomic);
+extern struct drm_mm_node *drm_mm_get_block_range_generic(
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int atomic);
+static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment)
+{
+ return drm_mm_get_block_generic(parent, size, alignment, 0);
+}
+static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment)
+{
+ return drm_mm_get_block_generic(parent, size, alignment, 1);
+}
+static inline struct drm_mm_node *drm_mm_get_block_range(
+ struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end)
+{
+ return drm_mm_get_block_range_generic(parent, size, alignment,
+ start, end, 0);
+}
+static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
+ struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end)
+{
+ return drm_mm_get_block_range_generic(parent, size, alignment,
+ start, end, 1);
+}
+extern int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment);
+extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end);
+extern void drm_mm_put_block(struct drm_mm_node *cur);
+extern void drm_mm_remove_node(struct drm_mm_node *node);
+extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
+extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ int best_match);
+extern struct drm_mm_node *drm_mm_search_free_in_range(
+ const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int best_match);
+extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
+ unsigned long size);
+extern void drm_mm_takedown(struct drm_mm *mm);
+extern int drm_mm_clean(struct drm_mm *mm);
+extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
+extern int drm_mm_remove_space_from_tail(struct drm_mm *mm,
+ unsigned long size);
+extern int drm_mm_add_space_to_tail(struct drm_mm *mm,
+ unsigned long size, int atomic);
+extern int drm_mm_pre_get(struct drm_mm *mm);
+
+static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
+{
+ return block->mm;
+}
+
+void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
+ unsigned alignment);
+void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end);
+int drm_mm_scan_add_block(struct drm_mm_node *node);
+int drm_mm_scan_remove_block(struct drm_mm_node *node);
+
+#endif
diff --git a/sys/dev/drm2/drm_mode.h b/sys/dev/drm2/drm_mode.h
new file mode 100644
index 0000000..bc28240
--- /dev/null
+++ b/sys/dev/drm2/drm_mode.h
@@ -0,0 +1,444 @@
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com>
+ * Copyright (c) 2008 Red Hat Inc.
+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * Copyright (c) 2007-2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DRM_MODE_H
+#define _DRM_MODE_H
+
+#define DRM_DISPLAY_INFO_LEN 32
+#define DRM_CONNECTOR_NAME_LEN 32
+#define DRM_DISPLAY_MODE_LEN 32
+#define DRM_PROP_NAME_LEN 32
+
+#define DRM_MODE_TYPE_BUILTIN (1<<0)
+#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
+#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN)
+#define DRM_MODE_TYPE_PREFERRED (1<<3)
+#define DRM_MODE_TYPE_DEFAULT (1<<4)
+#define DRM_MODE_TYPE_USERDEF (1<<5)
+#define DRM_MODE_TYPE_DRIVER (1<<6)
+
+/* Video mode flags */
+/* bit compatible with the xorg definitions. */
+#define DRM_MODE_FLAG_PHSYNC (1<<0)
+#define DRM_MODE_FLAG_NHSYNC (1<<1)
+#define DRM_MODE_FLAG_PVSYNC (1<<2)
+#define DRM_MODE_FLAG_NVSYNC (1<<3)
+#define DRM_MODE_FLAG_INTERLACE (1<<4)
+#define DRM_MODE_FLAG_DBLSCAN (1<<5)
+#define DRM_MODE_FLAG_CSYNC (1<<6)
+#define DRM_MODE_FLAG_PCSYNC (1<<7)
+#define DRM_MODE_FLAG_NCSYNC (1<<8)
+#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
+#define DRM_MODE_FLAG_BCAST (1<<10)
+#define DRM_MODE_FLAG_PIXMUX (1<<11)
+#define DRM_MODE_FLAG_DBLCLK (1<<12)
+#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
+
+/* DPMS flags */
+/* bit compatible with the xorg definitions. */
+#define DRM_MODE_DPMS_ON 0
+#define DRM_MODE_DPMS_STANDBY 1
+#define DRM_MODE_DPMS_SUSPEND 2
+#define DRM_MODE_DPMS_OFF 3
+
+/* Scaling mode options */
+#define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or
+ software can still scale) */
+#define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */
+#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
+#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
+
+/* Dithering mode options */
+#define DRM_MODE_DITHERING_OFF 0
+#define DRM_MODE_DITHERING_ON 1
+#define DRM_MODE_DITHERING_AUTO 2
+
+/* Dirty info options */
+#define DRM_MODE_DIRTY_OFF 0
+#define DRM_MODE_DIRTY_ON 1
+#define DRM_MODE_DIRTY_ANNOTATE 2
+
+struct drm_mode_modeinfo {
+ uint32_t clock;
+ uint16_t hdisplay, hsync_start, hsync_end, htotal, hskew;
+ uint16_t vdisplay, vsync_start, vsync_end, vtotal, vscan;
+
+ uint32_t vrefresh;
+
+ uint32_t flags;
+ uint32_t type;
+ char name[DRM_DISPLAY_MODE_LEN];
+};
+
+struct drm_mode_card_res {
+ uint64_t fb_id_ptr;
+ uint64_t crtc_id_ptr;
+ uint64_t connector_id_ptr;
+ uint64_t encoder_id_ptr;
+ uint32_t count_fbs;
+ uint32_t count_crtcs;
+ uint32_t count_connectors;
+ uint32_t count_encoders;
+ uint32_t min_width, max_width;
+ uint32_t min_height, max_height;
+};
+
+struct drm_mode_crtc {
+ uint64_t set_connectors_ptr;
+ uint32_t count_connectors;
+
+ uint32_t crtc_id; /**< Id */
+ uint32_t fb_id; /**< Id of framebuffer */
+
+ uint32_t x, y; /**< Position on the frameuffer */
+
+ uint32_t gamma_size;
+ uint32_t mode_valid;
+ struct drm_mode_modeinfo mode;
+};
+
+#define DRM_MODE_PRESENT_TOP_FIELD (1<<0)
+#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1)
+
+/* Planes blend with or override other bits on the CRTC */
+struct drm_mode_set_plane {
+ uint32_t plane_id;
+ uint32_t crtc_id;
+ uint32_t fb_id; /* fb object contains surface format type */
+ uint32_t flags; /* see above flags */
+
+ /* Signed dest location allows it to be partially off screen */
+ int32_t crtc_x, crtc_y;
+ uint32_t crtc_w, crtc_h;
+
+ /* Source values are 16.16 fixed point */
+ uint32_t src_x, src_y;
+ uint32_t src_h, src_w;
+};
+
+struct drm_mode_get_plane {
+ uint32_t plane_id;
+
+ uint32_t crtc_id;
+ uint32_t fb_id;
+
+ uint32_t possible_crtcs;
+ uint32_t gamma_size;
+
+ uint32_t count_format_types;
+ uint64_t format_type_ptr;
+};
+
+struct drm_mode_get_plane_res {
+ uint64_t plane_id_ptr;
+ uint32_t count_planes;
+};
+
+#define DRM_MODE_ENCODER_NONE 0
+#define DRM_MODE_ENCODER_DAC 1
+#define DRM_MODE_ENCODER_TMDS 2
+#define DRM_MODE_ENCODER_LVDS 3
+#define DRM_MODE_ENCODER_TVDAC 4
+
+struct drm_mode_get_encoder {
+ uint32_t encoder_id;
+ uint32_t encoder_type;
+
+ uint32_t crtc_id; /**< Id of crtc */
+
+ uint32_t possible_crtcs;
+ uint32_t possible_clones;
+};
+
+/* This is for connectors with multiple signal types. */
+/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
+#define DRM_MODE_SUBCONNECTOR_Automatic 0
+#define DRM_MODE_SUBCONNECTOR_Unknown 0
+#define DRM_MODE_SUBCONNECTOR_DVID 3
+#define DRM_MODE_SUBCONNECTOR_DVIA 4
+#define DRM_MODE_SUBCONNECTOR_Composite 5
+#define DRM_MODE_SUBCONNECTOR_SVIDEO 6
+#define DRM_MODE_SUBCONNECTOR_Component 8
+#define DRM_MODE_SUBCONNECTOR_SCART 9
+
+#define DRM_MODE_CONNECTOR_Unknown 0
+#define DRM_MODE_CONNECTOR_VGA 1
+#define DRM_MODE_CONNECTOR_DVII 2
+#define DRM_MODE_CONNECTOR_DVID 3
+#define DRM_MODE_CONNECTOR_DVIA 4
+#define DRM_MODE_CONNECTOR_Composite 5
+#define DRM_MODE_CONNECTOR_SVIDEO 6
+#define DRM_MODE_CONNECTOR_LVDS 7
+#define DRM_MODE_CONNECTOR_Component 8
+#define DRM_MODE_CONNECTOR_9PinDIN 9
+#define DRM_MODE_CONNECTOR_DisplayPort 10
+#define DRM_MODE_CONNECTOR_HDMIA 11
+#define DRM_MODE_CONNECTOR_HDMIB 12
+#define DRM_MODE_CONNECTOR_TV 13
+#define DRM_MODE_CONNECTOR_eDP 14
+
+struct drm_mode_get_connector {
+
+ uint64_t encoders_ptr;
+ uint64_t modes_ptr;
+ uint64_t props_ptr;
+ uint64_t prop_values_ptr;
+
+ uint32_t count_modes;
+ uint32_t count_props;
+ uint32_t count_encoders;
+
+ uint32_t encoder_id; /**< Current Encoder */
+ uint32_t connector_id; /**< Id */
+ uint32_t connector_type;
+ uint32_t connector_type_id;
+
+ uint32_t connection;
+ uint32_t mm_width, mm_height; /**< HxW in millimeters */
+ uint32_t subpixel;
+};
+
+#define DRM_MODE_PROP_PENDING (1<<0)
+#define DRM_MODE_PROP_RANGE (1<<1)
+#define DRM_MODE_PROP_IMMUTABLE (1<<2)
+#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
+#define DRM_MODE_PROP_BLOB (1<<4)
+
+struct drm_mode_property_enum {
+ uint64_t value;
+ char name[DRM_PROP_NAME_LEN];
+};
+
+struct drm_mode_get_property {
+ uint64_t values_ptr; /* values and blob lengths */
+ uint64_t enum_blob_ptr; /* enum and blob id ptrs */
+
+ uint32_t prop_id;
+ uint32_t flags;
+ char name[DRM_PROP_NAME_LEN];
+
+ uint32_t count_values;
+ uint32_t count_enum_blobs;
+};
+
+struct drm_mode_connector_set_property {
+ uint64_t value;
+ uint32_t prop_id;
+ uint32_t connector_id;
+};
+
+struct drm_mode_get_blob {
+ uint32_t blob_id;
+ uint32_t length;
+ uint64_t data;
+};
+
+struct drm_mode_fb_cmd {
+ uint32_t fb_id;
+ uint32_t width, height;
+ uint32_t pitch;
+ uint32_t bpp;
+ uint32_t depth;
+ /* driver specific handle */
+ uint32_t handle;
+};
+
+#define DRM_MODE_FB_INTERLACED (1<<0 /* for interlaced framebuffers */
+
+struct drm_mode_fb_cmd2 {
+ uint32_t fb_id;
+ uint32_t width, height;
+ uint32_t pixel_format; /* fourcc code from drm_fourcc.h */
+ uint32_t flags; /* see above flags */
+
+ /*
+ * In case of planar formats, this ioctl allows up to 4
+ * buffer objects with offets and pitches per plane.
+ * The pitch and offset order is dictated by the fourcc,
+ * e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
+ *
+ * YUV 4:2:0 image with a plane of 8 bit Y samples
+ * followed by an interleaved U/V plane containing
+ * 8 bit 2x2 subsampled colour difference samples.
+ *
+ * So it would consist of Y as offset[0] and UV as
+ * offeset[1]. Note that offset[0] will generally
+ * be 0.
+ */
+ uint32_t handles[4];
+ uint32_t pitches[4]; /* pitch for each plane */
+ uint32_t offsets[4]; /* offset of each plane */
+};
+
+#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
+#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
+#define DRM_MODE_FB_DIRTY_FLAGS 0x03
+
+#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256
+
+/*
+ * Mark a region of a framebuffer as dirty.
+ *
+ * Some hardware does not automatically update display contents
+ * as a hardware or software draw to a framebuffer. This ioctl
+ * allows userspace to tell the kernel and the hardware what
+ * regions of the framebuffer have changed.
+ *
+ * The kernel or hardware is free to update more then just the
+ * region specified by the clip rects. The kernel or hardware
+ * may also delay and/or coalesce several calls to dirty into a
+ * single update.
+ *
+ * Userspace may annotate the updates, the annotates are a
+ * promise made by the caller that the change is either a copy
+ * of pixels or a fill of a single color in the region specified.
+ *
+ * If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then
+ * the number of updated regions are half of num_clips given,
+ * where the clip rects are paired in src and dst. The width and
+ * height of each one of the pairs must match.
+ *
+ * If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller
+ * promises that the region specified of the clip rects is filled
+ * completely with a single color as given in the color argument.
+ */
+
+struct drm_mode_fb_dirty_cmd {
+ uint32_t fb_id;
+ uint32_t flags;
+ uint32_t color;
+ uint32_t num_clips;
+ uint64_t clips_ptr;
+};
+
+struct drm_mode_mode_cmd {
+ uint32_t connector_id;
+ struct drm_mode_modeinfo mode;
+};
+
+#define DRM_MODE_CURSOR_BO (1<<0)
+#define DRM_MODE_CURSOR_MOVE (1<<1)
+
+/*
+ * depending on the value in flags diffrent members are used.
+ *
+ * CURSOR_BO uses
+ * crtc
+ * width
+ * height
+ * handle - if 0 turns the cursor of
+ *
+ * CURSOR_MOVE uses
+ * crtc
+ * x
+ * y
+ */
+struct drm_mode_cursor {
+ uint32_t flags;
+ uint32_t crtc_id;
+ int32_t x;
+ int32_t y;
+ uint32_t width;
+ uint32_t height;
+ /* driver specific handle */
+ uint32_t handle;
+};
+
+struct drm_mode_crtc_lut {
+ uint32_t crtc_id;
+ uint32_t gamma_size;
+
+ /* pointers to arrays */
+ uint64_t red;
+ uint64_t green;
+ uint64_t blue;
+};
+
+#define DRM_MODE_PAGE_FLIP_EVENT 0x01
+#define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT
+
+/*
+ * Request a page flip on the specified crtc.
+ *
+ * This ioctl will ask KMS to schedule a page flip for the specified
+ * crtc. Once any pending rendering targeting the specified fb (as of
+ * ioctl time) has completed, the crtc will be reprogrammed to display
+ * that fb after the next vertical refresh. The ioctl returns
+ * immediately, but subsequent rendering to the current fb will block
+ * in the execbuffer ioctl until the page flip happens. If a page
+ * flip is already pending as the ioctl is called, EBUSY will be
+ * returned.
+ *
+ * The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will
+ * request that drm sends back a vblank event (see drm.h: struct
+ * drm_event_vblank) when the page flip is done. The user_data field
+ * passed in with this ioctl will be returned as the user_data field
+ * in the vblank event struct.
+ *
+ * The reserved field must be zero until we figure out something
+ * clever to use it for.
+ */
+
+struct drm_mode_crtc_page_flip {
+ uint32_t crtc_id;
+ uint32_t fb_id;
+ uint32_t flags;
+ uint32_t reserved;
+ uint64_t user_data;
+};
+
+/* create a dumb scanout buffer */
+struct drm_mode_create_dumb {
+ uint32_t height;
+ uint32_t width;
+ uint32_t bpp;
+ uint32_t flags;
+ /* handle, pitch, size will be returned */
+ uint32_t handle;
+ uint32_t pitch;
+ uint64_t size;
+};
+
+/* set up for mmap of a dumb scanout buffer */
+struct drm_mode_map_dumb {
+ /** Handle for the object being mapped. */
+ uint32_t handle;
+ uint32_t pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ uint64_t offset;
+};
+
+struct drm_mode_destroy_dumb {
+ uint32_t handle;
+};
+
+#endif
diff --git a/sys/dev/drm2/drm_modes.c b/sys/dev/drm2/drm_modes.c
new file mode 100644
index 0000000..a2dbbdd
--- /dev/null
+++ b/sys/dev/drm2/drm_modes.c
@@ -0,0 +1,1147 @@
+/*
+ * Copyright © 1997-2003 by The XFree86 Project, Inc.
+ * Copyright © 2007 Dave Airlie
+ * Copyright © 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2005-2006 Luc Verhaegen
+ * Copyright (c) 2001, Andy Ritger aritger@nvidia.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Except as contained in this notice, the name of the copyright holder(s)
+ * and author(s) shall not be used in advertising or otherwise to promote
+ * the sale, use or other dealings in this Software without prior written
+ * authorization from the copyright holder(s) and author(s).
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/drm_crtc.h>
+
+#define KHZ2PICOS(a) (1000000000UL/(a))
+
+/**
+ * drm_mode_debug_printmodeline - debug print a mode
+ * @dev: DRM device
+ * @mode: mode to print
+ *
+ * LOCKING:
+ * None.
+ *
+ * Describe @mode using DRM_DEBUG.
+ */
+void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
+{
+ DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
+ "0x%x 0x%x\n",
+ mode->base.id, mode->name, mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+}
+
+/**
+ * drm_cvt_mode -create a modeline based on CVT algorithm
+ * @dev: DRM device
+ * @hdisplay: hdisplay size
+ * @vdisplay: vdisplay size
+ * @vrefresh : vrefresh rate
+ * @reduced : Whether the GTF calculation is simplified
+ * @interlaced:Whether the interlace is supported
+ *
+ * LOCKING:
+ * none.
+ *
+ * return the modeline based on CVT algorithm
+ *
+ * This function is called to generate the modeline based on CVT algorithm
+ * according to the hdisplay, vdisplay, vrefresh.
+ * It is based from the VESA(TM) Coordinated Video Timing Generator by
+ * Graham Loveridge April 9, 2003 available at
+ * http://www.elo.utfsm.cl/~elo212/docs/CVTd6r1.xls
+ *
+ * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
+ * What I have done is to translate it by using integer calculation.
+ */
+#define HV_FACTOR 1000
+struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
+ int vdisplay, int vrefresh,
+ bool reduced, bool interlaced, bool margins)
+{
+ /* 1) top/bottom margin size (% of height) - default: 1.8, */
+#define CVT_MARGIN_PERCENTAGE 18
+ /* 2) character cell horizontal granularity (pixels) - default 8 */
+#define CVT_H_GRANULARITY 8
+ /* 3) Minimum vertical porch (lines) - default 3 */
+#define CVT_MIN_V_PORCH 3
+ /* 4) Minimum number of vertical back porch lines - default 6 */
+#define CVT_MIN_V_BPORCH 6
+ /* Pixel Clock step (kHz) */
+#define CVT_CLOCK_STEP 250
+ struct drm_display_mode *drm_mode;
+ unsigned int vfieldrate, hperiod;
+ int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
+ int interlace;
+
+ /* allocate the drm_display_mode structure. If failure, we will
+ * return directly
+ */
+ drm_mode = drm_mode_create(dev);
+ if (!drm_mode)
+ return NULL;
+
+ /* the CVT default refresh rate is 60Hz */
+ if (!vrefresh)
+ vrefresh = 60;
+
+ /* the required field fresh rate */
+ if (interlaced)
+ vfieldrate = vrefresh * 2;
+ else
+ vfieldrate = vrefresh;
+
+ /* horizontal pixels */
+ hdisplay_rnd = hdisplay - (hdisplay % CVT_H_GRANULARITY);
+
+ /* determine the left&right borders */
+ hmargin = 0;
+ if (margins) {
+ hmargin = hdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
+ hmargin -= hmargin % CVT_H_GRANULARITY;
+ }
+ /* find the total active pixels */
+ drm_mode->hdisplay = hdisplay_rnd + 2 * hmargin;
+
+ /* find the number of lines per field */
+ if (interlaced)
+ vdisplay_rnd = vdisplay / 2;
+ else
+ vdisplay_rnd = vdisplay;
+
+ /* find the top & bottom borders */
+ vmargin = 0;
+ if (margins)
+ vmargin = vdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
+
+ drm_mode->vdisplay = vdisplay + 2 * vmargin;
+
+ /* Interlaced */
+ if (interlaced)
+ interlace = 1;
+ else
+ interlace = 0;
+
+ /* Determine VSync Width from aspect ratio */
+ if (!(vdisplay % 3) && ((vdisplay * 4 / 3) == hdisplay))
+ vsync = 4;
+ else if (!(vdisplay % 9) && ((vdisplay * 16 / 9) == hdisplay))
+ vsync = 5;
+ else if (!(vdisplay % 10) && ((vdisplay * 16 / 10) == hdisplay))
+ vsync = 6;
+ else if (!(vdisplay % 4) && ((vdisplay * 5 / 4) == hdisplay))
+ vsync = 7;
+ else if (!(vdisplay % 9) && ((vdisplay * 15 / 9) == hdisplay))
+ vsync = 7;
+ else /* custom */
+ vsync = 10;
+
+ if (!reduced) {
+ /* simplify the GTF calculation */
+ /* 4) Minimum time of vertical sync + back porch interval (µs)
+ * default 550.0
+ */
+ int tmp1, tmp2;
+#define CVT_MIN_VSYNC_BP 550
+ /* 3) Nominal HSync width (% of line period) - default 8 */
+#define CVT_HSYNC_PERCENTAGE 8
+ unsigned int hblank_percentage;
+ int vsyncandback_porch, vback_porch, hblank;
+
+ /* estimated the horizontal period */
+ tmp1 = HV_FACTOR * 1000000 -
+ CVT_MIN_VSYNC_BP * HV_FACTOR * vfieldrate;
+ tmp2 = (vdisplay_rnd + 2 * vmargin + CVT_MIN_V_PORCH) * 2 +
+ interlace;
+ hperiod = tmp1 * 2 / (tmp2 * vfieldrate);
+
+ tmp1 = CVT_MIN_VSYNC_BP * HV_FACTOR / hperiod + 1;
+ /* 9. Find number of lines in sync + backporch */
+ if (tmp1 < (vsync + CVT_MIN_V_PORCH))
+ vsyncandback_porch = vsync + CVT_MIN_V_PORCH;
+ else
+ vsyncandback_porch = tmp1;
+ /* 10. Find number of lines in back porch */
+ vback_porch = vsyncandback_porch - vsync;
+ drm_mode->vtotal = vdisplay_rnd + 2 * vmargin +
+ vsyncandback_porch + CVT_MIN_V_PORCH;
+ /* 5) Definition of Horizontal blanking time limitation */
+ /* Gradient (%/kHz) - default 600 */
+#define CVT_M_FACTOR 600
+ /* Offset (%) - default 40 */
+#define CVT_C_FACTOR 40
+ /* Blanking time scaling factor - default 128 */
+#define CVT_K_FACTOR 128
+ /* Scaling factor weighting - default 20 */
+#define CVT_J_FACTOR 20
+#define CVT_M_PRIME (CVT_M_FACTOR * CVT_K_FACTOR / 256)
+#define CVT_C_PRIME ((CVT_C_FACTOR - CVT_J_FACTOR) * CVT_K_FACTOR / 256 + \
+ CVT_J_FACTOR)
+ /* 12. Find ideal blanking duty cycle from formula */
+ hblank_percentage = CVT_C_PRIME * HV_FACTOR - CVT_M_PRIME *
+ hperiod / 1000;
+ /* 13. Blanking time */
+ if (hblank_percentage < 20 * HV_FACTOR)
+ hblank_percentage = 20 * HV_FACTOR;
+ hblank = drm_mode->hdisplay * hblank_percentage /
+ (100 * HV_FACTOR - hblank_percentage);
+ hblank -= hblank % (2 * CVT_H_GRANULARITY);
+ /* 14. find the total pixes per line */
+ drm_mode->htotal = drm_mode->hdisplay + hblank;
+ drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2;
+ drm_mode->hsync_start = drm_mode->hsync_end -
+ (drm_mode->htotal * CVT_HSYNC_PERCENTAGE) / 100;
+ drm_mode->hsync_start += CVT_H_GRANULARITY -
+ drm_mode->hsync_start % CVT_H_GRANULARITY;
+ /* fill the Vsync values */
+ drm_mode->vsync_start = drm_mode->vdisplay + CVT_MIN_V_PORCH;
+ drm_mode->vsync_end = drm_mode->vsync_start + vsync;
+ } else {
+ /* Reduced blanking */
+ /* Minimum vertical blanking interval time (µs)- default 460 */
+#define CVT_RB_MIN_VBLANK 460
+ /* Fixed number of clocks for horizontal sync */
+#define CVT_RB_H_SYNC 32
+ /* Fixed number of clocks for horizontal blanking */
+#define CVT_RB_H_BLANK 160
+ /* Fixed number of lines for vertical front porch - default 3*/
+#define CVT_RB_VFPORCH 3
+ int vbilines;
+ int tmp1, tmp2;
+ /* 8. Estimate Horizontal period. */
+ tmp1 = HV_FACTOR * 1000000 -
+ CVT_RB_MIN_VBLANK * HV_FACTOR * vfieldrate;
+ tmp2 = vdisplay_rnd + 2 * vmargin;
+ hperiod = tmp1 / (tmp2 * vfieldrate);
+ /* 9. Find number of lines in vertical blanking */
+ vbilines = CVT_RB_MIN_VBLANK * HV_FACTOR / hperiod + 1;
+ /* 10. Check if vertical blanking is sufficient */
+ if (vbilines < (CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH))
+ vbilines = CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH;
+ /* 11. Find total number of lines in vertical field */
+ drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + vbilines;
+ /* 12. Find total number of pixels in a line */
+ drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
+ /* Fill in HSync values */
+ drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
+ drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
+ /* Fill in VSync values */
+ drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
+ drm_mode->vsync_end = drm_mode->vsync_start + vsync;
+ }
+ /* 15/13. Find pixel clock frequency (kHz for xf86) */
+ drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
+ drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
+ /* 18/16. Find actual vertical frame frequency */
+ /* ignore - just set the mode flag for interlaced */
+ if (interlaced) {
+ drm_mode->vtotal *= 2;
+ drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ }
+ /* Fill the mode line name */
+ drm_mode_set_name(drm_mode);
+ if (reduced)
+ drm_mode->flags |= (DRM_MODE_FLAG_PHSYNC |
+ DRM_MODE_FLAG_NVSYNC);
+ else
+ drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_NHSYNC);
+
+ return drm_mode;
+}
+
+/**
+ * drm_gtf_mode_complex - create the modeline based on full GTF algorithm
+ *
+ * @dev :drm device
+ * @hdisplay :hdisplay size
+ * @vdisplay :vdisplay size
+ * @vrefresh :vrefresh rate.
+ * @interlaced :whether the interlace is supported
+ * @margins :desired margin size
+ * @GTF_[MCKJ] :extended GTF formula parameters
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on full GTF algorithm.
+ *
+ * GTF feature blocks specify C and J in multiples of 0.5, so we pass them
+ * in here multiplied by two. For a C of 40, pass in 80.
+ */
+struct drm_display_mode *
+drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
+ int vrefresh, bool interlaced, int margins,
+ int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
+{ /* 1) top/bottom margin size (% of height) - default: 1.8, */
+#define GTF_MARGIN_PERCENTAGE 18
+ /* 2) character cell horizontal granularity (pixels) - default 8 */
+#define GTF_CELL_GRAN 8
+ /* 3) Minimum vertical porch (lines) - default 3 */
+#define GTF_MIN_V_PORCH 1
+ /* width of vsync in lines */
+#define V_SYNC_RQD 3
+ /* width of hsync as % of total line */
+#define H_SYNC_PERCENT 8
+ /* min time of vsync + back porch (microsec) */
+#define MIN_VSYNC_PLUS_BP 550
+ /* C' and M' are part of the Blanking Duty Cycle computation */
+#define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
+#define GTF_M_PRIME (GTF_K * GTF_M / 256)
+ struct drm_display_mode *drm_mode;
+ unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
+ int top_margin, bottom_margin;
+ int interlace;
+ unsigned int hfreq_est;
+ int vsync_plus_bp, vback_porch;
+ unsigned int vtotal_lines, vfieldrate_est, hperiod;
+ unsigned int vfield_rate, vframe_rate;
+ int left_margin, right_margin;
+ unsigned int total_active_pixels, ideal_duty_cycle;
+ unsigned int hblank, total_pixels, pixel_freq;
+ int hsync, hfront_porch, vodd_front_porch_lines;
+ unsigned int tmp1, tmp2;
+
+ drm_mode = drm_mode_create(dev);
+ if (!drm_mode)
+ return NULL;
+
+ /* 1. In order to give correct results, the number of horizontal
+ * pixels requested is first processed to ensure that it is divisible
+ * by the character size, by rounding it to the nearest character
+ * cell boundary:
+ */
+ hdisplay_rnd = (hdisplay + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
+ hdisplay_rnd = hdisplay_rnd * GTF_CELL_GRAN;
+
+ /* 2. If interlace is requested, the number of vertical lines assumed
+ * by the calculation must be halved, as the computation calculates
+ * the number of vertical lines per field.
+ */
+ if (interlaced)
+ vdisplay_rnd = vdisplay / 2;
+ else
+ vdisplay_rnd = vdisplay;
+
+ /* 3. Find the frame rate required: */
+ if (interlaced)
+ vfieldrate_rqd = vrefresh * 2;
+ else
+ vfieldrate_rqd = vrefresh;
+
+ /* 4. Find number of lines in Top margin: */
+ top_margin = 0;
+ if (margins)
+ top_margin = (vdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
+ 1000;
+ /* 5. Find number of lines in bottom margin: */
+ bottom_margin = top_margin;
+
+ /* 6. If interlace is required, then set variable interlace: */
+ if (interlaced)
+ interlace = 1;
+ else
+ interlace = 0;
+
+ /* 7. Estimate the Horizontal frequency */
+ {
+ tmp1 = (1000000 - MIN_VSYNC_PLUS_BP * vfieldrate_rqd) / 500;
+ tmp2 = (vdisplay_rnd + 2 * top_margin + GTF_MIN_V_PORCH) *
+ 2 + interlace;
+ hfreq_est = (tmp2 * 1000 * vfieldrate_rqd) / tmp1;
+ }
+
+ /* 8. Find the number of lines in V sync + back porch */
+ /* [V SYNC+BP] = RINT(([MIN VSYNC+BP] * hfreq_est / 1000000)) */
+ vsync_plus_bp = MIN_VSYNC_PLUS_BP * hfreq_est / 1000;
+ vsync_plus_bp = (vsync_plus_bp + 500) / 1000;
+ /* 9. Find the number of lines in V back porch alone: */
+ vback_porch = vsync_plus_bp - V_SYNC_RQD;
+ /* 10. Find the total number of lines in Vertical field period: */
+ vtotal_lines = vdisplay_rnd + top_margin + bottom_margin +
+ vsync_plus_bp + GTF_MIN_V_PORCH;
+ /* 11. Estimate the Vertical field frequency: */
+ vfieldrate_est = hfreq_est / vtotal_lines;
+ /* 12. Find the actual horizontal period: */
+ hperiod = 1000000 / (vfieldrate_rqd * vtotal_lines);
+
+ /* 13. Find the actual Vertical field frequency: */
+ vfield_rate = hfreq_est / vtotal_lines;
+ /* 14. Find the Vertical frame frequency: */
+ if (interlaced)
+ vframe_rate = vfield_rate / 2;
+ else
+ vframe_rate = vfield_rate;
+ /* 15. Find number of pixels in left margin: */
+ if (margins)
+ left_margin = (hdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
+ 1000;
+ else
+ left_margin = 0;
+
+ /* 16.Find number of pixels in right margin: */
+ right_margin = left_margin;
+ /* 17.Find total number of active pixels in image and left and right */
+ total_active_pixels = hdisplay_rnd + left_margin + right_margin;
+ /* 18.Find the ideal blanking duty cycle from blanking duty cycle */
+ ideal_duty_cycle = GTF_C_PRIME * 1000 -
+ (GTF_M_PRIME * 1000000 / hfreq_est);
+ /* 19.Find the number of pixels in the blanking time to the nearest
+ * double character cell: */
+ hblank = total_active_pixels * ideal_duty_cycle /
+ (100000 - ideal_duty_cycle);
+ hblank = (hblank + GTF_CELL_GRAN) / (2 * GTF_CELL_GRAN);
+ hblank = hblank * 2 * GTF_CELL_GRAN;
+ /* 20.Find total number of pixels: */
+ total_pixels = total_active_pixels + hblank;
+ /* 21.Find pixel clock frequency: */
+ pixel_freq = total_pixels * hfreq_est / 1000;
+ /* Stage 1 computations are now complete; I should really pass
+ * the results to another function and do the Stage 2 computations,
+ * but I only need a few more values so I'll just append the
+ * computations here for now */
+ /* 17. Find the number of pixels in the horizontal sync period: */
+ hsync = H_SYNC_PERCENT * total_pixels / 100;
+ hsync = (hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
+ hsync = hsync * GTF_CELL_GRAN;
+ /* 18. Find the number of pixels in horizontal front porch period */
+ hfront_porch = hblank / 2 - hsync;
+ /* 36. Find the number of lines in the odd front porch period: */
+ vodd_front_porch_lines = GTF_MIN_V_PORCH ;
+
+ /* finally, pack the results in the mode struct */
+ drm_mode->hdisplay = hdisplay_rnd;
+ drm_mode->hsync_start = hdisplay_rnd + hfront_porch;
+ drm_mode->hsync_end = drm_mode->hsync_start + hsync;
+ drm_mode->htotal = total_pixels;
+ drm_mode->vdisplay = vdisplay_rnd;
+ drm_mode->vsync_start = vdisplay_rnd + vodd_front_porch_lines;
+ drm_mode->vsync_end = drm_mode->vsync_start + V_SYNC_RQD;
+ drm_mode->vtotal = vtotal_lines;
+
+ drm_mode->clock = pixel_freq;
+
+ if (interlaced) {
+ drm_mode->vtotal *= 2;
+ drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ }
+
+ drm_mode_set_name(drm_mode);
+ if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
+ drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
+ else
+ drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
+
+ return drm_mode;
+}
+
+/**
+ * drm_gtf_mode - create the modeline based on GTF algorithm
+ *
+ * @dev :drm device
+ * @hdisplay :hdisplay size
+ * @vdisplay :vdisplay size
+ * @vrefresh :vrefresh rate.
+ * @interlaced :whether the interlace is supported
+ * @margins :whether the margin is supported
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on GTF algorithm
+ *
+ * This function is to create the modeline based on the GTF algorithm.
+ * Generalized Timing Formula is derived from:
+ * GTF Spreadsheet by Andy Morrish (1/5/97)
+ * available at http://www.vesa.org
+ *
+ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
+ * What I have done is to translate it by using integer calculation.
+ * I also refer to the function of fb_get_mode in the file of
+ * drivers/video/fbmon.c
+ *
+ * Standard GTF parameters:
+ * M = 600
+ * C = 40
+ * K = 128
+ * J = 20
+ */
+struct drm_display_mode *
+drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
+ bool lace, int margins)
+{
+ return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
+ margins, 600, 40 * 2, 128, 20 * 2);
+}
+
+/**
+ * drm_mode_set_name - set the name on a mode
+ * @mode: name will be set in this mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Set the name of @mode to a standard format.
+ */
+void drm_mode_set_name(struct drm_display_mode *mode)
+{
+ bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+ snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
+ mode->hdisplay, mode->vdisplay,
+ interlaced ? "i" : "");
+}
+
+/**
+ * drm_mode_list_concat - move modes from one list to another
+ * @head: source list
+ * @new: dst list
+ *
+ * LOCKING:
+ * Caller must ensure both lists are locked.
+ *
+ * Move all the modes from @head to @new.
+ */
+void drm_mode_list_concat(struct list_head *head, struct list_head *new)
+{
+
+ struct list_head *entry, *tmp;
+
+ list_for_each_safe(entry, tmp, head) {
+ list_move_tail(entry, new);
+ }
+}
+
+/**
+ * drm_mode_width - get the width of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's width (hdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->hdisplay
+ */
+int drm_mode_width(struct drm_display_mode *mode)
+{
+ return mode->hdisplay;
+
+}
+
+/**
+ * drm_mode_height - get the height of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's height (vdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->vdisplay
+ */
+int drm_mode_height(struct drm_display_mode *mode)
+{
+ return mode->vdisplay;
+}
+
+/** drm_mode_hsync - get the hsync of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @modes's hsync rate in kHz, rounded to the nearest int.
+ */
+int drm_mode_hsync(const struct drm_display_mode *mode)
+{
+ unsigned int calc_val;
+
+ if (mode->hsync)
+ return mode->hsync;
+
+ if (mode->htotal < 0)
+ return 0;
+
+ calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
+ calc_val += 500; /* round to 1000Hz */
+ calc_val /= 1000; /* truncate to kHz */
+
+ return calc_val;
+}
+
+/**
+ * drm_mode_vrefresh - get the vrefresh of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's vrefresh rate in Hz or calculate it if necessary.
+ *
+ * FIXME: why is this needed? shouldn't vrefresh be set already?
+ *
+ * RETURNS:
+ * Vertical refresh rate. It will be the result of actual value plus 0.5.
+ * If it is 70.288, it will return 70Hz.
+ * If it is 59.6, it will return 60Hz.
+ */
+int drm_mode_vrefresh(const struct drm_display_mode *mode)
+{
+ int refresh = 0;
+ unsigned int calc_val;
+
+ if (mode->vrefresh > 0)
+ refresh = mode->vrefresh;
+ else if (mode->htotal > 0 && mode->vtotal > 0) {
+ int vtotal;
+ vtotal = mode->vtotal;
+ /* work out vrefresh the value will be x1000 */
+ calc_val = (mode->clock * 1000);
+ calc_val /= mode->htotal;
+ refresh = (calc_val + vtotal / 2) / vtotal;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ refresh *= 2;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ refresh /= 2;
+ if (mode->vscan > 1)
+ refresh /= mode->vscan;
+ }
+ return refresh;
+}
+
+/**
+ * drm_mode_set_crtcinfo - set CRTC modesetting parameters
+ * @p: mode
+ * @adjust_flags: unused? (FIXME)
+ *
+ * LOCKING:
+ * None.
+ *
+ * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
+ */
+void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
+{
+ if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
+ return;
+
+ p->crtc_hdisplay = p->hdisplay;
+ p->crtc_hsync_start = p->hsync_start;
+ p->crtc_hsync_end = p->hsync_end;
+ p->crtc_htotal = p->htotal;
+ p->crtc_hskew = p->hskew;
+ p->crtc_vdisplay = p->vdisplay;
+ p->crtc_vsync_start = p->vsync_start;
+ p->crtc_vsync_end = p->vsync_end;
+ p->crtc_vtotal = p->vtotal;
+
+ if (p->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
+ p->crtc_vdisplay /= 2;
+ p->crtc_vsync_start /= 2;
+ p->crtc_vsync_end /= 2;
+ p->crtc_vtotal /= 2;
+ }
+
+ p->crtc_vtotal |= 1;
+ }
+
+ if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
+ p->crtc_vdisplay *= 2;
+ p->crtc_vsync_start *= 2;
+ p->crtc_vsync_end *= 2;
+ p->crtc_vtotal *= 2;
+ }
+
+ if (p->vscan > 1) {
+ p->crtc_vdisplay *= p->vscan;
+ p->crtc_vsync_start *= p->vscan;
+ p->crtc_vsync_end *= p->vscan;
+ p->crtc_vtotal *= p->vscan;
+ }
+
+ p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
+ p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
+ p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
+ p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
+
+ p->crtc_hadjusted = false;
+ p->crtc_vadjusted = false;
+}
+
+
+/**
+ * drm_mode_duplicate - allocate and duplicate an existing mode
+ * @m: mode to duplicate
+ *
+ * LOCKING:
+ * None.
+ *
+ * Just allocate a new mode, copy the existing mode into it, and return
+ * a pointer to it. Used to create new instances of established modes.
+ */
+struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+ const struct drm_display_mode *mode)
+{
+ struct drm_display_mode *nmode;
+ int new_id;
+
+ nmode = drm_mode_create(dev);
+ if (!nmode)
+ return NULL;
+
+ new_id = nmode->base.id;
+ *nmode = *mode;
+ nmode->base.id = new_id;
+ INIT_LIST_HEAD(&nmode->head);
+ return nmode;
+}
+
+/**
+ * drm_mode_equal - test modes for equality
+ * @mode1: first mode
+ * @mode2: second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Check to see if @mode1 and @mode2 are equivalent.
+ *
+ * RETURNS:
+ * true if the modes are equal, false otherwise.
+ */
+bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
+{
+ /* do clock check convert to PICOS so fb modes get matched
+ * the same */
+ if (mode1->clock && mode2->clock) {
+ if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock))
+ return false;
+ } else if (mode1->clock != mode2->clock)
+ return false;
+
+ if (mode1->hdisplay == mode2->hdisplay &&
+ mode1->hsync_start == mode2->hsync_start &&
+ mode1->hsync_end == mode2->hsync_end &&
+ mode1->htotal == mode2->htotal &&
+ mode1->hskew == mode2->hskew &&
+ mode1->vdisplay == mode2->vdisplay &&
+ mode1->vsync_start == mode2->vsync_start &&
+ mode1->vsync_end == mode2->vsync_end &&
+ mode1->vtotal == mode2->vtotal &&
+ mode1->vscan == mode2->vscan &&
+ mode1->flags == mode2->flags)
+ return true;
+
+ return false;
+}
+
+/**
+ * drm_mode_validate_size - make sure modes adhere to size constraints
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @maxX: maximum width
+ * @maxY: maximum height
+ * @maxPitch: max pitch
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * The DRM device (@dev) has size and pitch limits. Here we validate the
+ * modes we probed for @dev against those limits and set their status as
+ * necessary.
+ */
+void drm_mode_validate_size(struct drm_device *dev,
+ struct list_head *mode_list,
+ int maxX, int maxY, int maxPitch)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, mode_list, head) {
+ if (maxPitch > 0 && mode->hdisplay > maxPitch)
+ mode->status = MODE_BAD_WIDTH;
+
+ if (maxX > 0 && mode->hdisplay > maxX)
+ mode->status = MODE_VIRTUAL_X;
+
+ if (maxY > 0 && mode->vdisplay > maxY)
+ mode->status = MODE_VIRTUAL_Y;
+ }
+}
+
+/**
+ * drm_mode_validate_clocks - validate modes against clock limits
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @min: minimum clock rate array
+ * @max: maximum clock rate array
+ * @n_ranges: number of clock ranges (size of arrays)
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Some code may need to check a mode list against the clock limits of the
+ * device in question. This function walks the mode list, testing to make
+ * sure each mode falls within a given range (defined by @min and @max
+ * arrays) and sets @mode->status as needed.
+ */
+void drm_mode_validate_clocks(struct drm_device *dev,
+ struct list_head *mode_list,
+ int *min, int *max, int n_ranges)
+{
+ struct drm_display_mode *mode;
+ int i;
+
+ list_for_each_entry(mode, mode_list, head) {
+ bool good = false;
+ for (i = 0; i < n_ranges; i++) {
+ if (mode->clock >= min[i] && mode->clock <= max[i]) {
+ good = true;
+ break;
+ }
+ }
+ if (!good)
+ mode->status = MODE_CLOCK_RANGE;
+ }
+}
+
+/**
+ * drm_mode_prune_invalid - remove invalid modes from mode list
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @verbose: be verbose about it
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Once mode list generation is complete, a caller can use this routine to
+ * remove invalid modes from a mode list. If any of the modes have a
+ * status other than %MODE_OK, they are removed from @mode_list and freed.
+ */
+void drm_mode_prune_invalid(struct drm_device *dev,
+ struct list_head *mode_list, bool verbose)
+{
+ struct drm_display_mode *mode, *t;
+
+ list_for_each_entry_safe(mode, t, mode_list, head) {
+ if (mode->status != MODE_OK) {
+ list_del(&mode->head);
+ if (verbose) {
+ drm_mode_debug_printmodeline(mode);
+ DRM_DEBUG_KMS("Not using %s mode %d\n",
+ mode->name, mode->status);
+ }
+ drm_mode_destroy(dev, mode);
+ }
+ }
+}
+
+/**
+ * drm_mode_compare - compare modes for favorability
+ * @priv: unused
+ * @lh_a: list_head for first mode
+ * @lh_b: list_head for second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
+ * which is better.
+ *
+ * RETURNS:
+ * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
+ * positive if @lh_b is better than @lh_a.
+ */
+static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head *lh_b)
+{
+ struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
+ struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
+ int diff;
+
+ diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
+ ((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
+ if (diff)
+ return diff;
+ diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
+ if (diff)
+ return diff;
+ diff = b->clock - a->clock;
+ return diff;
+}
+
+/**
+ * drm_mode_sort - sort mode list
+ * @mode_list: list to sort
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Sort @mode_list by favorability, putting good modes first.
+ */
+void drm_mode_sort(struct list_head *mode_list)
+{
+ drm_list_sort(NULL, mode_list, drm_mode_compare);
+}
+
+/**
+ * drm_mode_connector_list_update - update the mode list for the connector
+ * @connector: the connector to update
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * This moves the modes from the @connector probed_modes list
+ * to the actual mode list. It compares the probed mode against the current
+ * list and only adds different modes. All modes unverified after this point
+ * will be removed by the prune invalid modes.
+ */
+void drm_mode_connector_list_update(struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+ struct drm_display_mode *pmode, *pt;
+ int found_it;
+
+ list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
+ head) {
+ found_it = 0;
+ /* go through current modes checking for the new probed mode */
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (drm_mode_equal(pmode, mode)) {
+ found_it = 1;
+ /* if equal delete the probed mode */
+ mode->status = pmode->status;
+ /* Merge type bits together */
+ mode->type |= pmode->type;
+ list_del(&pmode->head);
+ drm_mode_destroy(connector->dev, pmode);
+ break;
+ }
+ }
+
+ if (!found_it) {
+ list_move_tail(&pmode->head, &connector->modes);
+ }
+ }
+}
+
+/**
+ * drm_mode_parse_command_line_for_connector - parse command line for connector
+ * @mode_option - per connector mode option
+ * @connector - connector to parse line for
+ *
+ * This parses the connector specific then generic command lines for
+ * modes and options to configure the connector.
+ *
+ * This uses the same parameters as the fb modedb.c, except for extra
+ * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
+ *
+ * enable/enable Digital/disable bit at the end
+ */
+bool drm_mode_parse_command_line_for_connector(const char *mode_option,
+ struct drm_connector *connector,
+ struct drm_cmdline_mode *mode)
+{
+ const char *name;
+ unsigned int namelen;
+ bool res_specified = false, bpp_specified = false, refresh_specified = false;
+ unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
+ bool yres_specified = false, cvt = false, rb = false;
+ bool interlace = false, margins = false, was_digit = false;
+ int i;
+ enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
+
+#ifdef XXX_CONFIG_FB
+ if (!mode_option)
+ mode_option = fb_mode_option;
+#endif
+
+ if (!mode_option) {
+ mode->specified = false;
+ return false;
+ }
+
+ name = mode_option;
+ namelen = strlen(name);
+ for (i = namelen-1; i >= 0; i--) {
+ switch (name[i]) {
+ case '@':
+ if (!refresh_specified && !bpp_specified &&
+ !yres_specified && !cvt && !rb && was_digit) {
+ refresh = strtol(&name[i+1], NULL, 10);
+ refresh_specified = true;
+ was_digit = false;
+ } else
+ goto done;
+ break;
+ case '-':
+ if (!bpp_specified && !yres_specified && !cvt &&
+ !rb && was_digit) {
+ bpp = strtol(&name[i+1], NULL, 10);
+ bpp_specified = true;
+ was_digit = false;
+ } else
+ goto done;
+ break;
+ case 'x':
+ if (!yres_specified && was_digit) {
+ yres = strtol(&name[i+1], NULL, 10);
+ yres_specified = true;
+ was_digit = false;
+ } else
+ goto done;
+ case '0' ... '9':
+ was_digit = true;
+ break;
+ case 'M':
+ if (yres_specified || cvt || was_digit)
+ goto done;
+ cvt = true;
+ break;
+ case 'R':
+ if (yres_specified || cvt || rb || was_digit)
+ goto done;
+ rb = true;
+ break;
+ case 'm':
+ if (cvt || yres_specified || was_digit)
+ goto done;
+ margins = true;
+ break;
+ case 'i':
+ if (cvt || yres_specified || was_digit)
+ goto done;
+ interlace = true;
+ break;
+ case 'e':
+ if (yres_specified || bpp_specified || refresh_specified ||
+ was_digit || (force != DRM_FORCE_UNSPECIFIED))
+ goto done;
+
+ force = DRM_FORCE_ON;
+ break;
+ case 'D':
+ if (yres_specified || bpp_specified || refresh_specified ||
+ was_digit || (force != DRM_FORCE_UNSPECIFIED))
+ goto done;
+
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
+ (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
+ force = DRM_FORCE_ON;
+ else
+ force = DRM_FORCE_ON_DIGITAL;
+ break;
+ case 'd':
+ if (yres_specified || bpp_specified || refresh_specified ||
+ was_digit || (force != DRM_FORCE_UNSPECIFIED))
+ goto done;
+
+ force = DRM_FORCE_OFF;
+ break;
+ default:
+ goto done;
+ }
+ }
+
+ if (i < 0 && yres_specified) {
+ char *ch;
+ xres = strtol(name, &ch, 10);
+ if ((ch != NULL) && (*ch == 'x'))
+ res_specified = true;
+ else
+ i = ch - name;
+ } else if (!yres_specified && was_digit) {
+ /* catch mode that begins with digits but has no 'x' */
+ i = 0;
+ }
+done:
+ if (i >= 0) {
+ printf("parse error at position %i in video mode '%s'\n",
+ i, name);
+ mode->specified = false;
+ return false;
+ }
+
+ if (res_specified) {
+ mode->specified = true;
+ mode->xres = xres;
+ mode->yres = yres;
+ }
+
+ if (refresh_specified) {
+ mode->refresh_specified = true;
+ mode->refresh = refresh;
+ }
+
+ if (bpp_specified) {
+ mode->bpp_specified = true;
+ mode->bpp = bpp;
+ }
+ mode->rb = rb;
+ mode->cvt = cvt;
+ mode->interlace = interlace;
+ mode->margins = margins;
+ mode->force = force;
+
+ return true;
+}
+
+struct drm_display_mode *
+drm_mode_create_from_cmdline_mode(struct drm_device *dev,
+ struct drm_cmdline_mode *cmd)
+{
+ struct drm_display_mode *mode;
+
+ if (cmd->cvt)
+ mode = drm_cvt_mode(dev,
+ cmd->xres, cmd->yres,
+ cmd->refresh_specified ? cmd->refresh : 60,
+ cmd->rb, cmd->interlace,
+ cmd->margins);
+ else
+ mode = drm_gtf_mode(dev,
+ cmd->xres, cmd->yres,
+ cmd->refresh_specified ? cmd->refresh : 60,
+ cmd->interlace,
+ cmd->margins);
+ if (!mode)
+ return NULL;
+
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ return mode;
+}
diff --git a/sys/dev/drm2/drm_pci.c b/sys/dev/drm2/drm_pci.c
new file mode 100644
index 0000000..3d7a3cc
--- /dev/null
+++ b/sys/dev/drm2/drm_pci.c
@@ -0,0 +1,125 @@
+/*-
+ * Copyright 2003 Eric Anholt.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/**
+ * \file drm_pci.h
+ * \brief PCI consistent, DMA-accessible memory allocation.
+ *
+ * \author Eric Anholt <anholt@FreeBSD.org>
+ */
+
+#include <dev/drm2/drmP.h>
+
+/**********************************************************************/
+/** \name PCI memory */
+/*@{*/
+
+static void
+drm_pci_busdma_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ drm_dma_handle_t *dmah = arg;
+
+ if (error != 0)
+ return;
+
+ KASSERT(nsegs == 1, ("drm_pci_busdma_callback: bad dma segment count"));
+ dmah->busaddr = segs[0].ds_addr;
+}
+
+/**
+ * \brief Allocate a physically contiguous DMA-accessible consistent
+ * memory block.
+ */
+drm_dma_handle_t *
+drm_pci_alloc(struct drm_device *dev, size_t size,
+ size_t align, dma_addr_t maxaddr)
+{
+ drm_dma_handle_t *dmah;
+ int ret;
+
+ /* Need power-of-two alignment, so fail the allocation if it isn't. */
+ if ((align & (align - 1)) != 0) {
+ DRM_ERROR("drm_pci_alloc with non-power-of-two alignment %d\n",
+ (int)align);
+ return NULL;
+ }
+
+ dmah = malloc(sizeof(drm_dma_handle_t), DRM_MEM_DMA, M_ZERO | M_NOWAIT);
+ if (dmah == NULL)
+ return NULL;
+
+ /* Make sure we aren't holding mutexes here */
+ mtx_assert(&dev->dma_lock, MA_NOTOWNED);
+ if (mtx_owned(&dev->dma_lock))
+ DRM_ERROR("called while holding dma_lock\n");
+
+ ret = bus_dma_tag_create(NULL, align, 0, /* tag, align, boundary */
+ maxaddr, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */
+ NULL, NULL, /* filtfunc, filtfuncargs */
+ size, 1, size, /* maxsize, nsegs, maxsegsize */
+ 0, NULL, NULL, /* flags, lockfunc, lockfuncargs */
+ &dmah->tag);
+ if (ret != 0) {
+ free(dmah, DRM_MEM_DMA);
+ return NULL;
+ }
+
+ ret = bus_dmamem_alloc(dmah->tag, &dmah->vaddr,
+ BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_NOCACHE, &dmah->map);
+ if (ret != 0) {
+ bus_dma_tag_destroy(dmah->tag);
+ free(dmah, DRM_MEM_DMA);
+ return NULL;
+ }
+
+ ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr, size,
+ drm_pci_busdma_callback, dmah, BUS_DMA_NOWAIT);
+ if (ret != 0) {
+ bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
+ bus_dma_tag_destroy(dmah->tag);
+ free(dmah, DRM_MEM_DMA);
+ return NULL;
+ }
+
+ return dmah;
+}
+
+/**
+ * \brief Free a DMA-accessible consistent memory block.
+ */
+void
+drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)
+{
+ if (dmah == NULL)
+ return;
+
+ bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
+ bus_dma_tag_destroy(dmah->tag);
+
+ free(dmah, DRM_MEM_DMA);
+}
+
+/*@}*/
diff --git a/sys/dev/drm2/drm_pciids.h b/sys/dev/drm2/drm_pciids.h
new file mode 100644
index 0000000..fbabb63
--- /dev/null
+++ b/sys/dev/drm2/drm_pciids.h
@@ -0,0 +1,764 @@
+/*
+ * $FreeBSD$
+ */
+/*
+ This file is auto-generated from the drm_pciids.txt in the DRM CVS
+ Please contact dri-devel@lists.sf.net to add new cards to this list
+*/
+#define radeon_PCI_IDS \
+ {0x1002, 0x3150, CHIP_RV380|RADEON_IS_MOBILITY, "ATI Radeon Mobility X600 M24"}, \
+ {0x1002, 0x3152, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Radeon Mobility X300 M24"}, \
+ {0x1002, 0x3154, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FireGL M24 GL"}, \
+ {0x1002, 0x3E50, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI Radeon RV380 X600"}, \
+ {0x1002, 0x3E54, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI FireGL V3200 RV380"}, \
+ {0x1002, 0x4136, CHIP_RS100|RADEON_IS_IGP, "ATI Radeon RS100 IGP 320"}, \
+ {0x1002, 0x4137, CHIP_RS200|RADEON_IS_IGP, "ATI Radeon RS200 IGP 340"}, \
+ {0x1002, 0x4144, CHIP_R300, "ATI Radeon AD 9500"}, \
+ {0x1002, 0x4145, CHIP_R300, "ATI Radeon AE 9700 Pro"}, \
+ {0x1002, 0x4146, CHIP_R300, "ATI Radeon AF R300 9600TX"}, \
+ {0x1002, 0x4147, CHIP_R300, "ATI FireGL AG Z1"}, \
+ {0x1002, 0x4148, CHIP_R350, "ATI Radeon AH 9800 SE"}, \
+ {0x1002, 0x4149, CHIP_R350, "ATI Radeon AI 9800"}, \
+ {0x1002, 0x414A, CHIP_R350, "ATI Radeon AJ 9800"}, \
+ {0x1002, 0x414B, CHIP_R350, "ATI FireGL AK X2"}, \
+ {0x1002, 0x4150, CHIP_RV350, "ATI Radeon AP 9600"}, \
+ {0x1002, 0x4151, CHIP_RV350, "ATI Radeon AQ 9600 SE"}, \
+ {0x1002, 0x4152, CHIP_RV350, "ATI Radeon AR 9600 XT"}, \
+ {0x1002, 0x4153, CHIP_RV350, "ATI Radeon AS 9550"}, \
+ {0x1002, 0x4154, CHIP_RV350, "ATI FireGL AT T2"}, \
+ {0x1002, 0x4155, CHIP_RV350, "ATI Radeon 9650"}, \
+ {0x1002, 0x4156, CHIP_RV350, "ATI FireGL AV RV360 T2"}, \
+ {0x1002, 0x4237, CHIP_RS200|RADEON_IS_IGP, "ATI Radeon RS250 IGP"}, \
+ {0x1002, 0x4242, CHIP_R200, "ATI Radeon BB R200 AIW 8500DV"}, \
+ {0x1002, 0x4243, CHIP_R200, "ATI Radeon BC R200"}, \
+ {0x1002, 0x4336, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY, "ATI Radeon RS100 Mobility U1"}, \
+ {0x1002, 0x4337, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY, "ATI Radeon RS200 Mobility IGP 340M"}, \
+ {0x1002, 0x4437, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY, "ATI Radeon RS250 Mobility IGP"}, \
+ {0x1002, 0x4966, CHIP_RV250, "ATI Radeon If RV250 9000"}, \
+ {0x1002, 0x4967, CHIP_RV250, "ATI Radeon Ig RV250 9000"}, \
+ {0x1002, 0x4A48, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JH R420 X800"}, \
+ {0x1002, 0x4A49, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JI R420 X800 Pro"}, \
+ {0x1002, 0x4A4A, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JJ R420 X800 SE"}, \
+ {0x1002, 0x4A4B, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JK R420 X800 XT"}, \
+ {0x1002, 0x4A4C, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JL R420 X800"}, \
+ {0x1002, 0x4A4D, CHIP_R420|RADEON_NEW_MEMMAP, "ATI FireGL JM X3-256"}, \
+ {0x1002, 0x4A4E, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Radeon JN R420 Mobility M18"}, \
+ {0x1002, 0x4A4F, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JO R420 X800 SE"}, \
+ {0x1002, 0x4A50, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JP R420 X800 XT PE"}, \
+ {0x1002, 0x4A54, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JT R420 AIW X800 VE"}, \
+ {0x1002, 0x4B49, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R481 X850 XT"}, \
+ {0x1002, 0x4B4A, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R481 X850 SE"}, \
+ {0x1002, 0x4B4B, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R481 X850 Pro"}, \
+ {0x1002, 0x4B4C, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R481 X850 XT PE"}, \
+ {0x1002, 0x4C57, CHIP_RV200|RADEON_IS_MOBILITY, "ATI Radeon LW RV200 Mobility 7500 M7"}, \
+ {0x1002, 0x4C58, CHIP_RV200|RADEON_IS_MOBILITY, "ATI Radeon LX RV200 Mobility FireGL 7800 M7"}, \
+ {0x1002, 0x4C59, CHIP_RV100|RADEON_IS_MOBILITY, "ATI Radeon LY RV100 Mobility M6"}, \
+ {0x1002, 0x4C5A, CHIP_RV100|RADEON_IS_MOBILITY, "ATI Radeon LZ RV100 Mobility M6"}, \
+ {0x1002, 0x4C64, CHIP_RV250|RADEON_IS_MOBILITY, "ATI Radeon Ld RV250 Mobility 9000 M9"}, \
+ {0x1002, 0x4C66, CHIP_RV250, "ATI Radeon Lf RV250 Mobility 9000 M9 / FireMV 2400 PCI"}, \
+ {0x1002, 0x4C67, CHIP_RV250|RADEON_IS_MOBILITY, "ATI Radeon Lg RV250 Mobility 9000 M9"}, \
+ {0x1002, 0x4E44, CHIP_R300, "ATI Radeon ND R300 9700 Pro"}, \
+ {0x1002, 0x4E45, CHIP_R300, "ATI Radeon NE R300 9500 Pro / 9700"}, \
+ {0x1002, 0x4E46, CHIP_R300, "ATI Radeon NF R300 9600TX"}, \
+ {0x1002, 0x4E47, CHIP_R300, "ATI Radeon NG R300 FireGL X1"}, \
+ {0x1002, 0x4E48, CHIP_R350, "ATI Radeon NH R350 9800 Pro"}, \
+ {0x1002, 0x4E49, CHIP_R350, "ATI Radeon NI R350 9800"}, \
+ {0x1002, 0x4E4A, CHIP_R350, "ATI Radeon NJ R360 9800 XT"}, \
+ {0x1002, 0x4E4B, CHIP_R350, "ATI FireGL NK X2"}, \
+ {0x1002, 0x4E50, CHIP_RV350|RADEON_IS_MOBILITY, "ATI Radeon RV350 Mobility 9600 M10 NP"}, \
+ {0x1002, 0x4E51, CHIP_RV350|RADEON_IS_MOBILITY, "ATI Radeon RV350 Mobility 9600 M10 NQ"}, \
+ {0x1002, 0x4E52, CHIP_RV350|RADEON_IS_MOBILITY, "ATI Radeon RV350 Mobility 9600 M11 NR"}, \
+ {0x1002, 0x4E53, CHIP_RV350|RADEON_IS_MOBILITY, "ATI Radeon RV350 Mobility 9600 M10 NS"}, \
+ {0x1002, 0x4E54, CHIP_RV350|RADEON_IS_MOBILITY, "ATI FireGL T2/T2e"}, \
+ {0x1002, 0x4E56, CHIP_RV350|RADEON_IS_MOBILITY, "ATI Radeon Mobility 9550"}, \
+ {0x1002, 0x5144, CHIP_R100|RADEON_SINGLE_CRTC, "ATI Radeon QD R100"}, \
+ {0x1002, 0x5145, CHIP_R100|RADEON_SINGLE_CRTC, "ATI Radeon QE R100"}, \
+ {0x1002, 0x5146, CHIP_R100|RADEON_SINGLE_CRTC, "ATI Radeon QF R100"}, \
+ {0x1002, 0x5147, CHIP_R100|RADEON_SINGLE_CRTC, "ATI Radeon QG R100"}, \
+ {0x1002, 0x5148, CHIP_R200, "ATI Radeon QH R200 8500"}, \
+ {0x1002, 0x514C, CHIP_R200, "ATI Radeon QL R200 8500 LE"}, \
+ {0x1002, 0x514D, CHIP_R200, "ATI Radeon QM R200 9100"}, \
+ {0x1002, 0x5157, CHIP_RV200, "ATI Radeon QW RV200 7500"}, \
+ {0x1002, 0x5158, CHIP_RV200, "ATI Radeon QX RV200 7500"}, \
+ {0x1002, 0x5159, CHIP_RV100, "ATI Radeon QY RV100 7000/VE"}, \
+ {0x1002, 0x515A, CHIP_RV100, "ATI Radeon QZ RV100 7000/VE"}, \
+ {0x1002, 0x515E, CHIP_RV100, "ATI ES1000 RN50"}, \
+ {0x1002, 0x5460, CHIP_RV380|RADEON_IS_MOBILITY, "ATI Radeon Mobility X300 M22"}, \
+ {0x1002, 0x5462, CHIP_RV380|RADEON_IS_MOBILITY, "ATI Radeon Mobility X600 SE M24C"}, \
+ {0x1002, 0x5464, CHIP_RV380|RADEON_IS_MOBILITY, "ATI FireGL M22 GL 5464"}, \
+ {0x1002, 0x5548, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R423 X800"}, \
+ {0x1002, 0x5549, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R423 X800 Pro"}, \
+ {0x1002, 0x554A, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R423 X800 XT PE"}, \
+ {0x1002, 0x554B, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R423 X800 SE"}, \
+ {0x1002, 0x554C, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R430 X800 XTP"}, \
+ {0x1002, 0x554D, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R430 X800 XL"}, \
+ {0x1002, 0x554E, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R430 X800 SE"}, \
+ {0x1002, 0x554F, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R430 X800"}, \
+ {0x1002, 0x5550, CHIP_R423|RADEON_NEW_MEMMAP, "ATI FireGL V7100 R423"}, \
+ {0x1002, 0x5551, CHIP_R423|RADEON_NEW_MEMMAP, "ATI FireGL V5100 R423 UQ"}, \
+ {0x1002, 0x5552, CHIP_R423|RADEON_NEW_MEMMAP, "ATI FireGL unknown R423 UR"}, \
+ {0x1002, 0x5554, CHIP_R423|RADEON_NEW_MEMMAP, "ATI FireGL unknown R423 UT"}, \
+ {0x1002, 0x564A, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5000 M26"}, \
+ {0x1002, 0x564B, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5000 M26"}, \
+ {0x1002, 0x564F, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Radeon Mobility X700 XL M26"}, \
+ {0x1002, 0x5652, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Radeon Mobility X700 M26"}, \
+ {0x1002, 0x5653, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Radeon Mobility X700 M26"}, \
+ {0x1002, 0x5657, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon X550XTX"}, \
+ {0x1002, 0x5834, CHIP_RS300|RADEON_IS_IGP, "ATI Radeon RS300 9100 IGP"}, \
+ {0x1002, 0x5835, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY, "ATI Radeon RS300 Mobility IGP"}, \
+ {0x1002, 0x5954, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_IGPGART, "ATI RS480 XPRESS 200G"}, \
+ {0x1002, 0x5955, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon XPRESS 200M 5955"}, \
+ {0x1002, 0x5974, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon RS482 XPRESS 200"}, \
+ {0x1002, 0x5975, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon RS485 XPRESS 1100 IGP"}, \
+ {0x1002, 0x5960, CHIP_RV280, "ATI Radeon RV280 9250"}, \
+ {0x1002, 0x5961, CHIP_RV280, "ATI Radeon RV280 9200"}, \
+ {0x1002, 0x5962, CHIP_RV280, "ATI Radeon RV280 9200"}, \
+ {0x1002, 0x5964, CHIP_RV280, "ATI Radeon RV280 9200 SE"}, \
+ {0x1002, 0x5965, CHIP_RV280, "ATI FireMV 2200 PCI"}, \
+ {0x1002, 0x5969, CHIP_RV100, "ATI ES1000 RN50"}, \
+ {0x1002, 0x5a41, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART, "ATI Radeon XPRESS 200 5A41 (PCIE)"}, \
+ {0x1002, 0x5a42, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon XPRESS 200M 5A42 (PCIE)"}, \
+ {0x1002, 0x5a61, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART, "ATI Radeon RC410 XPRESS 200"}, \
+ {0x1002, 0x5a62, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon RC410 XPRESS 200M"}, \
+ {0x1002, 0x5b60, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI Radeon RV370 X300 SE"}, \
+ {0x1002, 0x5b62, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI Radeon RV370 X600 Pro"}, \
+ {0x1002, 0x5b63, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI Radeon RV370 X550"}, \
+ {0x1002, 0x5b64, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI FireGL V3100 (RV370) 5B64"}, \
+ {0x1002, 0x5b65, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI FireMV 2200 PCIE (RV370) 5B65"}, \
+ {0x1002, 0x5c61, CHIP_RV280|RADEON_IS_MOBILITY, "ATI Radeon RV280 Mobility"}, \
+ {0x1002, 0x5c63, CHIP_RV280|RADEON_IS_MOBILITY, "ATI Radeon RV280 Mobility"}, \
+ {0x1002, 0x5d48, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X800 XT M28"}, \
+ {0x1002, 0x5d49, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5100 M28"}, \
+ {0x1002, 0x5d4a, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X800 M28"}, \
+ {0x1002, 0x5d4c, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850"}, \
+ {0x1002, 0x5d4d, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850 XT PE"}, \
+ {0x1002, 0x5d4e, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850 SE"}, \
+ {0x1002, 0x5d4f, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850 Pro"}, \
+ {0x1002, 0x5d50, CHIP_R423|RADEON_NEW_MEMMAP, "ATI unknown Radeon / FireGL R480"}, \
+ {0x1002, 0x5d52, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850 XT"}, \
+ {0x1002, 0x5d57, CHIP_R423|RADEON_NEW_MEMMAP, "ATI Radeon R423 X800 XT"}, \
+ {0x1002, 0x5e48, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI FireGL V5000 RV410"}, \
+ {0x1002, 0x5e4a, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700 XT"}, \
+ {0x1002, 0x5e4b, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700 Pro"}, \
+ {0x1002, 0x5e4c, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700 SE"}, \
+ {0x1002, 0x5e4d, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700"}, \
+ {0x1002, 0x5e4f, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700 SE"}, \
+ {0x1002, 0x7100, CHIP_R520|RADEON_NEW_MEMMAP, "ATI Radeon X1800"}, \
+ {0x1002, 0x7101, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1800 XT"}, \
+ {0x1002, 0x7102, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1800"}, \
+ {0x1002, 0x7103, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V7200"}, \
+ {0x1002, 0x7104, CHIP_R520|RADEON_NEW_MEMMAP, "ATI FireGL V7200"}, \
+ {0x1002, 0x7105, CHIP_R520|RADEON_NEW_MEMMAP, "ATI FireGL V5300"}, \
+ {0x1002, 0x7106, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V7100"}, \
+ {0x1002, 0x7108, CHIP_R520|RADEON_NEW_MEMMAP, "ATI Radeon X1800"}, \
+ {0x1002, 0x7109, CHIP_R520|RADEON_NEW_MEMMAP, "ATI Radeon X1800"}, \
+ {0x1002, 0x710A, CHIP_R520|RADEON_NEW_MEMMAP, "ATI Radeon X1800"}, \
+ {0x1002, 0x710B, CHIP_R520|RADEON_NEW_MEMMAP, "ATI Radeon X1800"}, \
+ {0x1002, 0x710C, CHIP_R520|RADEON_NEW_MEMMAP, "ATI Radeon X1800"}, \
+ {0x1002, 0x710E, CHIP_R520|RADEON_NEW_MEMMAP, "ATI FireGL V7300"}, \
+ {0x1002, 0x710F, CHIP_R520|RADEON_NEW_MEMMAP, "ATI FireGL V7350"}, \
+ {0x1002, 0x7140, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1600"}, \
+ {0x1002, 0x7141, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI RV505"}, \
+ {0x1002, 0x7142, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300/X1550"}, \
+ {0x1002, 0x7143, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1550"}, \
+ {0x1002, 0x7144, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI M54-GL"}, \
+ {0x1002, 0x7145, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1400"}, \
+ {0x1002, 0x7146, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300/X1550"}, \
+ {0x1002, 0x7147, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1550 64-bit"}, \
+ {0x1002, 0x7149, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1300"}, \
+ {0x1002, 0x714A, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1300"}, \
+ {0x1002, 0x714B, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1300"}, \
+ {0x1002, 0x714C, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1300"}, \
+ {0x1002, 0x714D, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300"}, \
+ {0x1002, 0x714E, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300"}, \
+ {0x1002, 0x714F, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI RV505"}, \
+ {0x1002, 0x7151, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI RV505"}, \
+ {0x1002, 0x7152, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI FireGL V3300"}, \
+ {0x1002, 0x7153, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI FireGL V3350"}, \
+ {0x1002, 0x715E, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300"}, \
+ {0x1002, 0x715F, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1550 64-bit"}, \
+ {0x1002, 0x7180, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300/X1550"}, \
+ {0x1002, 0x7181, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1600"}, \
+ {0x1002, 0x7183, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300/X1550"}, \
+ {0x1002, 0x7186, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1450"}, \
+ {0x1002, 0x7187, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300/X1550"}, \
+ {0x1002, 0x7188, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X2300"}, \
+ {0x1002, 0x718A, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X2300"}, \
+ {0x1002, 0x718B, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1350"}, \
+ {0x1002, 0x718C, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1350"}, \
+ {0x1002, 0x718D, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1450"}, \
+ {0x1002, 0x718F, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300"}, \
+ {0x1002, 0x7193, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1550"}, \
+ {0x1002, 0x7196, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1350"}, \
+ {0x1002, 0x719B, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI FireMV 2250"}, \
+ {0x1002, 0x719F, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1550 64-bit"}, \
+ {0x1002, 0x71C0, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1600"}, \
+ {0x1002, 0x71C1, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1650"}, \
+ {0x1002, 0x71C2, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1600"}, \
+ {0x1002, 0x71C3, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1600"}, \
+ {0x1002, 0x71C4, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5200"}, \
+ {0x1002, 0x71C5, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1600"}, \
+ {0x1002, 0x71C6, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1650"}, \
+ {0x1002, 0x71C7, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1650"}, \
+ {0x1002, 0x71CD, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1600"}, \
+ {0x1002, 0x71CE, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1300 XT/X1600 Pro"}, \
+ {0x1002, 0x71D2, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI FireGL V3400"}, \
+ {0x1002, 0x71D4, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5250"}, \
+ {0x1002, 0x71D5, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1700"}, \
+ {0x1002, 0x71D6, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1700 XT"}, \
+ {0x1002, 0x71DA, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI FireGL V5200"}, \
+ {0x1002, 0x71DE, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1700"}, \
+ {0x1002, 0x7200, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X2300HD"}, \
+ {0x1002, 0x7210, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 2300"}, \
+ {0x1002, 0x7211, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 2300"}, \
+ {0x1002, 0x7240, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1950"}, \
+ {0x1002, 0x7243, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
+ {0x1002, 0x7244, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1950"}, \
+ {0x1002, 0x7245, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
+ {0x1002, 0x7246, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
+ {0x1002, 0x7247, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
+ {0x1002, 0x7248, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
+ {0x1002, 0x7249, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
+ {0x1002, 0x724A, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
+ {0x1002, 0x724B, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
+ {0x1002, 0x724C, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
+ {0x1002, 0x724D, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
+ {0x1002, 0x724E, CHIP_R580|RADEON_NEW_MEMMAP, "ATI AMD Stream Processor"}, \
+ {0x1002, 0x724F, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
+ {0x1002, 0x7280, CHIP_RV570|RADEON_NEW_MEMMAP, "ATI Radeon X1950"}, \
+ {0x1002, 0x7281, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI RV560"}, \
+ {0x1002, 0x7283, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI RV560"}, \
+ {0x1002, 0x7284, CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1900"}, \
+ {0x1002, 0x7287, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI RV560"}, \
+ {0x1002, 0x7288, CHIP_RV570|RADEON_NEW_MEMMAP, "ATI Radeon X1950 GT"}, \
+ {0x1002, 0x7289, CHIP_RV570|RADEON_NEW_MEMMAP, "ATI RV570"}, \
+ {0x1002, 0x728B, CHIP_RV570|RADEON_NEW_MEMMAP, "ATI RV570"}, \
+ {0x1002, 0x728C, CHIP_RV570|RADEON_NEW_MEMMAP, "ATI ATI FireGL V7400"}, \
+ {0x1002, 0x7290, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI RV560"}, \
+ {0x1002, 0x7291, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI Radeon X1650"}, \
+ {0x1002, 0x7293, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI Radeon X1650"}, \
+ {0x1002, 0x7297, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI RV560"}, \
+ {0x1002, 0x7834, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP, "ATI Radeon RS350 9000/9100 IGP"}, \
+ {0x1002, 0x7835, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Radeon RS350 Mobility IGP"}, \
+ {0x1002, 0x793f, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP, "ATI Radeon X1200"}, \
+ {0x1002, 0x7941, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP, "ATI Radeon X1200"}, \
+ {0x1002, 0x7942, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP, "ATI Radeon X1200"}, \
+ {0x1002, 0x791e, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS690 X1250 IGP"}, \
+ {0x1002, 0x791f, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS690 X1270 IGP"}, \
+ {0x1002, 0x796c, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS740 HD2100 IGP"}, \
+ {0x1002, 0x796d, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS740 HD2100 IGP"}, \
+ {0x1002, 0x796e, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS740 HD2100 IGP"}, \
+ {0x1002, 0x796f, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS740 HD2100 IGP"}, \
+ {0x1002, 0x9400, CHIP_R600|RADEON_NEW_MEMMAP, "ATI Radeon HD 2900 XT"}, \
+ {0x1002, 0x9401, CHIP_R600|RADEON_NEW_MEMMAP, "ATI Radeon HD 2900 XT"}, \
+ {0x1002, 0x9402, CHIP_R600|RADEON_NEW_MEMMAP, "ATI Radeon HD 2900 XT"}, \
+ {0x1002, 0x9403, CHIP_R600|RADEON_NEW_MEMMAP, "ATI Radeon HD 2900 Pro"}, \
+ {0x1002, 0x9405, CHIP_R600|RADEON_NEW_MEMMAP, "ATI Radeon HD 2900 GT"}, \
+ {0x1002, 0x940A, CHIP_R600|RADEON_NEW_MEMMAP, "ATI FireGL V8650"}, \
+ {0x1002, 0x940B, CHIP_R600|RADEON_NEW_MEMMAP, "ATI FireGL V8600"}, \
+ {0x1002, 0x940F, CHIP_R600|RADEON_NEW_MEMMAP, "ATI FireGL V7600"}, \
+ {0x1002, 0x94A0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4830"}, \
+ {0x1002, 0x94A1, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4850"}, \
+ {0x1002, 0x94A3, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FirePro M7740"}, \
+ {0x1002, 0x94B1, CHIP_RV740|RADEON_NEW_MEMMAP, "ATI RV740"}, \
+ {0x1002, 0x94B3, CHIP_RV740|RADEON_NEW_MEMMAP, "ATI Radeon HD 4770"}, \
+ {0x1002, 0x94B4, CHIP_RV740|RADEON_NEW_MEMMAP, "ATI Radeon HD 4700 Series"}, \
+ {0x1002, 0x94B5, CHIP_RV740|RADEON_NEW_MEMMAP, "ATI Radeon HD 4770"}, \
+ {0x1002, 0x94B9, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FirePro M5750"}, \
+ {0x1002, 0x94C0, CHIP_RV610|RADEON_NEW_MEMMAP, "RV610"}, \
+ {0x1002, 0x94C1, CHIP_RV610|RADEON_NEW_MEMMAP, "Radeon HD 2400 XT"}, \
+ {0x1002, 0x94C3, CHIP_RV610|RADEON_NEW_MEMMAP, "Radeon HD 2400 Pro"}, \
+ {0x1002, 0x94C4, CHIP_RV610|RADEON_NEW_MEMMAP, "Radeon HD 2400 PRO AGP"}, \
+ {0x1002, 0x94C5, CHIP_RV610|RADEON_NEW_MEMMAP, "FireGL V4000"}, \
+ {0x1002, 0x94C6, CHIP_RV610|RADEON_NEW_MEMMAP, "RV610"}, \
+ {0x1002, 0x94C7, CHIP_RV610|RADEON_NEW_MEMMAP, "ATI Radeon HD 2350"}, \
+ {0x1002, 0x94C8, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 2400 XT"}, \
+ {0x1002, 0x94C9, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 2400"}, \
+ {0x1002, 0x94CB, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI RADEON E2400"}, \
+ {0x1002, 0x94CC, CHIP_RV610|RADEON_NEW_MEMMAP, "ATI RV610"}, \
+ {0x1002, 0x94CD, CHIP_RV610|RADEON_NEW_MEMMAP, "ATI FireMV 2260"}, \
+ {0x1002, 0x9500, CHIP_RV670|RADEON_NEW_MEMMAP, "ATI RV670"}, \
+ {0x1002, 0x9501, CHIP_RV670|RADEON_NEW_MEMMAP, "ATI Radeon HD3870"}, \
+ {0x1002, 0x9504, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3850"}, \
+ {0x1002, 0x9505, CHIP_RV670|RADEON_NEW_MEMMAP, "ATI Radeon HD3850"}, \
+ {0x1002, 0x9506, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3850 X2"}, \
+ {0x1002, 0x9507, CHIP_RV670|RADEON_NEW_MEMMAP, "ATI RV670"}, \
+ {0x1002, 0x9508, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3870"}, \
+ {0x1002, 0x9509, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3870 X2"}, \
+ {0x1002, 0x950F, CHIP_RV670|RADEON_NEW_MEMMAP, "ATI Radeon HD3870 X2"}, \
+ {0x1002, 0x9511, CHIP_RV670|RADEON_NEW_MEMMAP, "ATI FireGL V7700"}, \
+ {0x1002, 0x9515, CHIP_RV670|RADEON_NEW_MEMMAP, "ATI Radeon HD3850"}, \
+ {0x1002, 0x9517, CHIP_RV670|RADEON_NEW_MEMMAP, "ATI Radeon HD3690"}, \
+ {0x1002, 0x9519, CHIP_RV670|RADEON_NEW_MEMMAP, "AMD Firestream 9170"}, \
+ {0x1002, 0x9580, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI RV630"}, \
+ {0x1002, 0x9581, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 2600"}, \
+ {0x1002, 0x9583, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 2600 XT"}, \
+ {0x1002, 0x9586, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI Radeon HD 2600 XT AGP"}, \
+ {0x1002, 0x9587, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI Radeon HD 2600 Pro AGP"}, \
+ {0x1002, 0x9588, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI Radeon HD 2600 XT"}, \
+ {0x1002, 0x9589, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI Radeon HD 2600 Pro"}, \
+ {0x1002, 0x958A, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI Gemini RV630"}, \
+ {0x1002, 0x958B, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Gemini Mobility Radeon HD 2600 XT"}, \
+ {0x1002, 0x958C, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI FireGL V5600"}, \
+ {0x1002, 0x958D, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI FireGL V3600"}, \
+ {0x1002, 0x958E, CHIP_RV630|RADEON_NEW_MEMMAP, "ATI Radeon HD 2600 LE"}, \
+ {0x1002, 0x958F, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL Graphics Processor"}, \
+ {0x1002, 0x95C0, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI Radeon HD 3470"}, \
+ {0x1002, 0x95C5, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI Radeon HD 3450"}, \
+ {0x1002, 0x95C6, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI Radeon HD 3450"}, \
+ {0x1002, 0x95C7, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI Radeon HD 3430"}, \
+ {0x1002, 0x95C9, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI Radeon HD 3450"}, \
+ {0x1002, 0x95C2, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3430"}, \
+ {0x1002, 0x95C4, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3400 Series"}, \
+ {0x1002, 0x95CC, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI FirePro V3700"}, \
+ {0x1002, 0x95CD, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI FireMV 2450"}, \
+ {0x1002, 0x95CE, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI FireMV 2260"}, \
+ {0x1002, 0x95CF, CHIP_RV620|RADEON_NEW_MEMMAP, "ATI FireMV 2260"}, \
+ {0x1002, 0x9590, CHIP_RV635|RADEON_NEW_MEMMAP, "ATI ATI Radeon HD 3600 Series"}, \
+ {0x1002, 0x9596, CHIP_RV635|RADEON_NEW_MEMMAP, "ATI ATI Radeon HD 3650 AGP"}, \
+ {0x1002, 0x9597, CHIP_RV635|RADEON_NEW_MEMMAP, "ATI ATI Radeon HD 3600 PRO"}, \
+ {0x1002, 0x9598, CHIP_RV635|RADEON_NEW_MEMMAP, "ATI ATI Radeon HD 3600 XT"}, \
+ {0x1002, 0x9599, CHIP_RV635|RADEON_NEW_MEMMAP, "ATI ATI Radeon HD 3600 PRO"}, \
+ {0x1002, 0x9591, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3650"}, \
+ {0x1002, 0x9593, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 3670"}, \
+ {0x1002, 0x9595, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5700"}, \
+ {0x1002, 0x959B, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5725"}, \
+ {0x1002, 0x9610, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon HD 3200 Graphics"}, \
+ {0x1002, 0x9611, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon 3100 Graphics"}, \
+ {0x1002, 0x9612, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon HD 3200 Graphics"}, \
+ {0x1002, 0x9613, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon 3100 Graphics"}, \
+ {0x1002, 0x9614, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon 3300 Graphics"}, \
+ {0x1002, 0x9615, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon 3200 Graphics"}, \
+ {0x1002, 0x9616, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon 3000 Graphics"}, \
+ {0x1002, 0x9710, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon HD 4200"}, \
+ {0x1002, 0x9711, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon 4100"}, \
+ {0x1002, 0x9712, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Mobility Radeon HD 4200"}, \
+ {0x1002, 0x9713, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Mobility Radeon 4100"}, \
+ {0x1002, 0x9714, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI RS880"}, \
+ {0x1002, 0x9715, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "ATI Radeon HD 4250"}, \
+ {0x1002, 0x9440, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4800 Series"}, \
+ {0x1002, 0x9441, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4870 X2"}, \
+ {0x1002, 0x9442, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4800 Series"}, \
+ {0x1002, 0x9443, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4850 X2"}, \
+ {0x1002, 0x944C, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4800 Series"}, \
+ {0x1002, 0x9450, CHIP_RV770|RADEON_NEW_MEMMAP, "AMD FireStream 9270"}, \
+ {0x1002, 0x9452, CHIP_RV770|RADEON_NEW_MEMMAP, "AMD FireStream 9250"}, \
+ {0x1002, 0x9444, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI FirePro V8750 (FireGL)"}, \
+ {0x1002, 0x9446, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI FirePro V7760 (FireGL)"}, \
+ {0x1002, 0x9456, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI FirePro V8700 (FireGL)"}, \
+ {0x1002, 0x944E, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI FirePro RV770"}, \
+ {0x1002, 0x944A, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4850"}, \
+ {0x1002, 0x944B, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4850 X2"}, \
+ {0x1002, 0x945A, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4870"}, \
+ {0x1002, 0x945B, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon M98"}, \
+ {0x1002, 0x9460, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4800 Series"}, \
+ {0x1002, 0x9462, CHIP_RV770|RADEON_NEW_MEMMAP, "ATI Radeon 4800 Series"}, \
+ {0x1002, 0x946A, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FirePro M7750"}, \
+ {0x1002, 0x946B, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI M98"}, \
+ {0x1002, 0x947A, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI M98"}, \
+ {0x1002, 0x947B, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI M98"}, \
+ {0x1002, 0x9487, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI Radeon RV730 (AGP)"}, \
+ {0x1002, 0x948F, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI Radeon RV730 (AGP)"}, \
+ {0x1002, 0x9490, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI Radeon HD 4670"}, \
+ {0x1002, 0x9495, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI Radeon HD 4600 Series"}, \
+ {0x1002, 0x9498, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI Radeon HD 4650"}, \
+ {0x1002, 0x9480, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4650"}, \
+ {0x1002, 0x9488, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 4670"}, \
+ {0x1002, 0x9489, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FirePro M5750"}, \
+ {0x1002, 0x9491, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI RADEON E4600"}, \
+ {0x1002, 0x949C, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI FirePro V7750 (FireGL)"}, \
+ {0x1002, 0x949E, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI FirePro V5700 (FireGL)"}, \
+ {0x1002, 0x949F, CHIP_RV730|RADEON_NEW_MEMMAP, "ATI FirePro V3750 (FireGL)"}, \
+ {0x1002, 0x9540, CHIP_RV710|RADEON_NEW_MEMMAP, "ATI Radeon HD 4550"}, \
+ {0x1002, 0x9541, CHIP_RV710|RADEON_NEW_MEMMAP, "ATI Radeon RV710"}, \
+ {0x1002, 0x9542, CHIP_RV710|RADEON_NEW_MEMMAP, "ATI Radeon RV710"}, \
+ {0x1002, 0x954E, CHIP_RV710|RADEON_NEW_MEMMAP, "ATI Radeon RV710"}, \
+ {0x1002, 0x954F, CHIP_RV710|RADEON_NEW_MEMMAP, "ATI Radeon HD 4350"}, \
+ {0x1002, 0x9552, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon 4300 Series"}, \
+ {0x1002, 0x9553, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon 4500 Series"}, \
+ {0x1002, 0x9555, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon 4500 Series"}, \
+ {0x1002, 0x9557, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FirePro RG220"}, \
+ {0, 0, 0, NULL}
+
+#define r128_PCI_IDS \
+ {0x1002, 0x4c45, 0, "ATI Rage 128 Mobility LE (PCI)"}, \
+ {0x1002, 0x4c46, 0, "ATI Rage 128 Mobility LF (AGP)"}, \
+ {0x1002, 0x4d46, 0, "ATI Rage 128 Mobility MF (AGP)"}, \
+ {0x1002, 0x4d4c, 0, "ATI Rage 128 Mobility ML (AGP)"}, \
+ {0x1002, 0x5041, 0, "ATI Rage 128 Pro PA (PCI)"}, \
+ {0x1002, 0x5042, 0, "ATI Rage 128 Pro PB (AGP)"}, \
+ {0x1002, 0x5043, 0, "ATI Rage 128 Pro PC (AGP)"}, \
+ {0x1002, 0x5044, 0, "ATI Rage 128 Pro PD (PCI)"}, \
+ {0x1002, 0x5045, 0, "ATI Rage 128 Pro PE (AGP)"}, \
+ {0x1002, 0x5046, 0, "ATI Rage 128 Pro PF (AGP)"}, \
+ {0x1002, 0x5047, 0, "ATI Rage 128 Pro PG (PCI)"}, \
+ {0x1002, 0x5048, 0, "ATI Rage 128 Pro PH (AGP)"}, \
+ {0x1002, 0x5049, 0, "ATI Rage 128 Pro PI (AGP)"}, \
+ {0x1002, 0x504A, 0, "ATI Rage 128 Pro PJ (PCI)"}, \
+ {0x1002, 0x504B, 0, "ATI Rage 128 Pro PK (AGP)"}, \
+ {0x1002, 0x504C, 0, "ATI Rage 128 Pro PL (AGP)"}, \
+ {0x1002, 0x504D, 0, "ATI Rage 128 Pro PM (PCI)"}, \
+ {0x1002, 0x504E, 0, "ATI Rage 128 Pro PN (AGP)"}, \
+ {0x1002, 0x504F, 0, "ATI Rage 128 Pro PO (AGP)"}, \
+ {0x1002, 0x5050, 0, "ATI Rage 128 Pro PP (PCI)"}, \
+ {0x1002, 0x5051, 0, "ATI Rage 128 Pro PQ (AGP)"}, \
+ {0x1002, 0x5052, 0, "ATI Rage 128 Pro PR (PCI)"}, \
+ {0x1002, 0x5053, 0, "ATI Rage 128 Pro PS (PCI)"}, \
+ {0x1002, 0x5054, 0, "ATI Rage 128 Pro PT (AGP)"}, \
+ {0x1002, 0x5055, 0, "ATI Rage 128 Pro PU (AGP)"}, \
+ {0x1002, 0x5056, 0, "ATI Rage 128 Pro PV (PCI)"}, \
+ {0x1002, 0x5057, 0, "ATI Rage 128 Pro PW (AGP)"}, \
+ {0x1002, 0x5058, 0, "ATI Rage 128 Pro PX (AGP)"}, \
+ {0x1002, 0x5245, 0, "ATI Rage 128 RE (PCI)"}, \
+ {0x1002, 0x5246, 0, "ATI Rage 128 RF (AGP)"}, \
+ {0x1002, 0x5247, 0, "ATI Rage 128 RG (AGP)"}, \
+ {0x1002, 0x524b, 0, "ATI Rage 128 RK (PCI)"}, \
+ {0x1002, 0x524c, 0, "ATI Rage 128 RL (AGP)"}, \
+ {0x1002, 0x534d, 0, "ATI Rage 128 SM (AGP)"}, \
+ {0x1002, 0x5446, 0, "ATI Rage 128 Pro Ultra TF (AGP)"}, \
+ {0x1002, 0x544C, 0, "ATI Rage 128 Pro Ultra TL (AGP)"}, \
+ {0x1002, 0x5452, 0, "ATI Rage 128 Pro Ultra TR (AGP)"}, \
+ {0, 0, 0, NULL}
+
+#define mga_PCI_IDS \
+ {0x102b, 0x0520, MGA_CARD_TYPE_G200, "Matrox G200 (PCI)"}, \
+ {0x102b, 0x0521, MGA_CARD_TYPE_G200, "Matrox G200 (AGP)"}, \
+ {0x102b, 0x0525, MGA_CARD_TYPE_G400, "Matrox G400/G450 (AGP)"}, \
+ {0x102b, 0x2527, MGA_CARD_TYPE_G550, "Matrox G550 (AGP)"}, \
+ {0, 0, 0, NULL}
+
+#define mach64_PCI_IDS \
+ {0x1002, 0x4749, 0, "3D Rage Pro"}, \
+ {0x1002, 0x4750, 0, "3D Rage Pro 215GP"}, \
+ {0x1002, 0x4751, 0, "3D Rage Pro 215GQ"}, \
+ {0x1002, 0x4742, 0, "3D Rage Pro AGP 1X/2X"}, \
+ {0x1002, 0x4744, 0, "3D Rage Pro AGP 1X"}, \
+ {0x1002, 0x4c49, 0, "3D Rage LT Pro"}, \
+ {0x1002, 0x4c50, 0, "3D Rage LT Pro"}, \
+ {0x1002, 0x4c51, 0, "3D Rage LT Pro"}, \
+ {0x1002, 0x4c42, 0, "3D Rage LT Pro AGP-133"}, \
+ {0x1002, 0x4c44, 0, "3D Rage LT Pro AGP-66"}, \
+ {0x1002, 0x474c, 0, "Rage XC"}, \
+ {0x1002, 0x474f, 0, "Rage XL"}, \
+ {0x1002, 0x4752, 0, "Rage XL"}, \
+ {0x1002, 0x4753, 0, "Rage XC"}, \
+ {0x1002, 0x474d, 0, "Rage XL AGP 2X"}, \
+ {0x1002, 0x474e, 0, "Rage XC AGP"}, \
+ {0x1002, 0x4c52, 0, "Rage Mobility P/M"}, \
+ {0x1002, 0x4c53, 0, "Rage Mobility L"}, \
+ {0x1002, 0x4c4d, 0, "Rage Mobility P/M AGP 2X"}, \
+ {0x1002, 0x4c4e, 0, "Rage Mobility L AGP 2X"}, \
+ {0, 0, 0, NULL}
+
+#define sis_PCI_IDS \
+ {0x1039, 0x0300, 0, "SiS 300/305"}, \
+ {0x1039, 0x5300, 0, "SiS 540"}, \
+ {0x1039, 0x6300, 0, "SiS 630"}, \
+ {0x1039, 0x6330, SIS_CHIP_315, "SiS 661"}, \
+ {0x1039, 0x7300, 0, "SiS 730"}, \
+ {0x18CA, 0x0040, SIS_CHIP_315, "Volari V3XT/V5/V8"}, \
+ {0x18CA, 0x0042, SIS_CHIP_315, "Volari Unknown"}, \
+ {0, 0, 0, NULL}
+
+#define tdfx_PCI_IDS \
+ {0x121a, 0x0003, 0, "3dfx Voodoo Banshee"}, \
+ {0x121a, 0x0004, 0, "3dfx Voodoo3 2000"}, \
+ {0x121a, 0x0005, 0, "3dfx Voodoo3 3000"}, \
+ {0x121a, 0x0007, 0, "3dfx Voodoo4 4500"}, \
+ {0x121a, 0x0009, 0, "3dfx Voodoo5 5500"}, \
+ {0x121a, 0x000b, 0, "3dfx Voodoo4 4200"}, \
+ {0, 0, 0, NULL}
+
+#define viadrv_PCI_IDS \
+ {0x1106, 0x3022, 0, "VIA CLE266 3022"}, \
+ {0x1106, 0x3118, VIA_PRO_GROUP_A, "VIA CN400 / PM8X0"}, \
+ {0x1106, 0x3122, 0, "VIA CLE266"}, \
+ {0x1106, 0x7205, 0, "VIA KM400"}, \
+ {0x1106, 0x3108, 0, "VIA K8M800"}, \
+ {0x1106, 0x3344, 0, "VIA CN700 / VM800 / P4M800Pro"}, \
+ {0x1106, 0x3343, 0, "VIA P4M890"}, \
+ {0x1106, 0x3230, VIA_DX9_0, "VIA K8M890"}, \
+ {0x1106, 0x3157, VIA_PRO_GROUP_A, "VIA CX700"}, \
+ {0x1106, 0x3371, VIA_DX9_0, "VIA P4M900 / VN896"}, \
+ {0, 0, 0, NULL}
+
+#define i810_PCI_IDS \
+ {0x8086, 0x7121, 0, "Intel i810 GMCH"}, \
+ {0x8086, 0x7123, 0, "Intel i810-DC100 GMCH"}, \
+ {0x8086, 0x7125, 0, "Intel i810E GMCH"}, \
+ {0x8086, 0x1132, 0, "Intel i815 GMCH"}, \
+ {0, 0, 0, NULL}
+
+#define i830_PCI_IDS \
+ {0x8086, 0x3577, 0, "Intel i830M GMCH"}, \
+ {0x8086, 0x2562, 0, "Intel i845G GMCH"}, \
+ {0x8086, 0x3582, 0, "Intel i852GM/i855GM GMCH"}, \
+ {0x8086, 0x2572, 0, "Intel i865G GMCH"}, \
+ {0, 0, 0, NULL}
+
+#define gamma_PCI_IDS \
+ {0x3d3d, 0x0008, 0, "3DLabs GLINT Gamma G1"}, \
+ {0, 0, 0, NULL}
+
+#define savage_PCI_IDS \
+ {0x5333, 0x8a20, S3_SAVAGE3D, "Savage 3D"}, \
+ {0x5333, 0x8a21, S3_SAVAGE3D, "Savage 3D/MV"}, \
+ {0x5333, 0x8a22, S3_SAVAGE4, "Savage4"}, \
+ {0x5333, 0x8a23, S3_SAVAGE4, "Savage4"}, \
+ {0x5333, 0x8c10, S3_SAVAGE_MX, "Savage/MX-MV"}, \
+ {0x5333, 0x8c11, S3_SAVAGE_MX, "Savage/MX"}, \
+ {0x5333, 0x8c12, S3_SAVAGE_MX, "Savage/IX-MV"}, \
+ {0x5333, 0x8c13, S3_SAVAGE_MX, "Savage/IX"}, \
+ {0x5333, 0x8c22, S3_SUPERSAVAGE, "SuperSavage MX/128"}, \
+ {0x5333, 0x8c24, S3_SUPERSAVAGE, "SuperSavage MX/64"}, \
+ {0x5333, 0x8c26, S3_SUPERSAVAGE, "SuperSavage MX/64C"}, \
+ {0x5333, 0x8c2a, S3_SUPERSAVAGE, "SuperSavage IX/128 SDR"}, \
+ {0x5333, 0x8c2b, S3_SUPERSAVAGE, "SuperSavage IX/128 DDR"}, \
+ {0x5333, 0x8c2c, S3_SUPERSAVAGE, "SuperSavage IX/64 SDR"}, \
+ {0x5333, 0x8c2d, S3_SUPERSAVAGE, "SuperSavage IX/64 DDR"}, \
+ {0x5333, 0x8c2e, S3_SUPERSAVAGE, "SuperSavage IX/C SDR"}, \
+ {0x5333, 0x8c2f, S3_SUPERSAVAGE, "SuperSavage IX/C DDR"}, \
+ {0x5333, 0x8a25, S3_PROSAVAGE, "ProSavage PM133"}, \
+ {0x5333, 0x8a26, S3_PROSAVAGE, "ProSavage KM133"}, \
+ {0x5333, 0x8d01, S3_TWISTER, "ProSavage Twister PN133"}, \
+ {0x5333, 0x8d02, S3_TWISTER, "ProSavage Twister KN133"}, \
+ {0x5333, 0x8d03, S3_PROSAVAGEDDR, "ProSavage DDR"}, \
+ {0x5333, 0x8d04, S3_PROSAVAGEDDR, "ProSavage DDR-K"}, \
+ {0, 0, 0, NULL}
+
+#define ffb_PCI_IDS \
+ {0, 0, 0, NULL}
+
+#define i915_PCI_IDS \
+ {0x8086, 0x3577, CHIP_I8XX, "Intel i830M GMCH"}, \
+ {0x8086, 0x2562, CHIP_I8XX, "Intel i845G GMCH"}, \
+ {0x8086, 0x3582, CHIP_I8XX, "Intel i852GM/i855GM GMCH"}, \
+ {0x8086, 0x358e, CHIP_I8XX, "Intel i852GM/i855GM GMCH"}, \
+ {0x8086, 0x2572, CHIP_I8XX, "Intel i865G GMCH"}, \
+ {0x8086, 0x2582, CHIP_I9XX|CHIP_I915, "Intel i915G"}, \
+ {0x8086, 0x258a, CHIP_I9XX|CHIP_I915, "Intel E7221 (i915)"}, \
+ {0x8086, 0x2592, CHIP_I9XX|CHIP_I915, "Intel i915GM"}, \
+ {0x8086, 0x2772, CHIP_I9XX|CHIP_I915, "Intel i945G"}, \
+ {0x8086, 0x27A2, CHIP_I9XX|CHIP_I915, "Intel i945GM"}, \
+ {0x8086, 0x27AE, CHIP_I9XX|CHIP_I915, "Intel i945GME"}, \
+ {0x8086, 0x2972, CHIP_I9XX|CHIP_I965, "Intel i946GZ"}, \
+ {0x8086, 0x2982, CHIP_I9XX|CHIP_I965, "Intel i965G"}, \
+ {0x8086, 0x2992, CHIP_I9XX|CHIP_I965, "Intel i965Q"}, \
+ {0x8086, 0x29A2, CHIP_I9XX|CHIP_I965, "Intel i965G"}, \
+ {0x8086, 0x29B2, CHIP_I9XX|CHIP_I915, "Intel Q35"}, \
+ {0x8086, 0x29C2, CHIP_I9XX|CHIP_I915, "Intel G33"}, \
+ {0x8086, 0x29D2, CHIP_I9XX|CHIP_I915, "Intel Q33"}, \
+ {0x8086, 0x2A02, CHIP_I9XX|CHIP_I965, "Intel i965GM"}, \
+ {0x8086, 0x2A12, CHIP_I9XX|CHIP_I965, "Intel i965GME/GLE"}, \
+ {0x8086, 0x2A42, CHIP_I9XX|CHIP_I965, "Mobile Intel® GM45 Express Chipset"}, \
+ {0x8086, 0x2E02, CHIP_I9XX|CHIP_I965, "Intel Eaglelake"}, \
+ {0x8086, 0x2E12, CHIP_I9XX|CHIP_I965, "Intel Q45/Q43"}, \
+ {0x8086, 0x2E22, CHIP_I9XX|CHIP_I965, "Intel G45/G43"}, \
+ {0x8086, 0x2E32, CHIP_I9XX|CHIP_I965, "Intel G41"}, \
+ {0x8086, 0x2e42, CHIP_I9XX|CHIP_I915, "Intel G43 ?"}, \
+ {0x8086, 0x2e92, CHIP_I9XX|CHIP_I915, "Intel G43 ?"}, \
+ {0x8086, 0x0042, CHIP_I9XX|CHIP_I915, "Intel IronLake"}, \
+ {0x8086, 0x0046, CHIP_I9XX|CHIP_I915, "Intel IronLake"}, \
+ {0x8086, 0x0102, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \
+ {0x8086, 0x0112, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \
+ {0x8086, 0x0122, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \
+ {0x8086, 0x0106, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
+ {0x8086, 0x0116, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
+ {0x8086, 0x0126, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
+ {0x8086, 0x010A, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
+ {0x8086, 0xA001, CHIP_I9XX|CHIP_I965, "Intel Pineview"}, \
+ {0x8086, 0xA011, CHIP_I9XX|CHIP_I965, "Intel Pineview (M)"}, \
+ {0, 0, 0, NULL}
+
+#define imagine_PCI_IDS \
+ {0x105d, 0x2309, IMAGINE_128, "Imagine 128"}, \
+ {0x105d, 0x2339, IMAGINE_128_2, "Imagine 128-II"}, \
+ {0x105d, 0x493d, IMAGINE_T2R, "Ticket to Ride"}, \
+ {0x105d, 0x5348, IMAGINE_REV4, "Revolution IV"}, \
+ {0, 0, 0, NULL}
+
+#define nv_PCI_IDS \
+ {0x10DE, 0x0020, NV04, "NVidia RIVA TNT"}, \
+ {0x10DE, 0x0028, NV04, "NVidia RIVA TNT2"}, \
+ {0x10DE, 0x002A, NV04, "NVidia Unknown TNT2"}, \
+ {0x10DE, 0x002C, NV04, "NVidia Vanta"}, \
+ {0x10DE, 0x0029, NV04, "NVidia RIVA TNT2 Ultra"}, \
+ {0x10DE, 0x002D, NV04, "NVidia RIVA TNT2 Model 64"}, \
+ {0x10DE, 0x00A0, NV04, "NVidia Aladdin TNT2"}, \
+ {0x10DE, 0x0100, NV10, "NVidia GeForce 256"}, \
+ {0x10DE, 0x0101, NV10, "NVidia GeForce DDR"}, \
+ {0x10DE, 0x0103, NV10, "NVidia Quadro"}, \
+ {0x10DE, 0x0110, NV10, "NVidia GeForce2 MX/MX 400"}, \
+ {0x10DE, 0x0111, NV10, "NVidia GeForce2 MX 100/200"}, \
+ {0x10DE, 0x0112, NV10, "NVidia GeForce2 Go"}, \
+ {0x10DE, 0x0113, NV10, "NVidia Quadro2 MXR/EX/Go"}, \
+ {0x10DE, 0x0150, NV10, "NVidia GeForce2 GTS"}, \
+ {0x10DE, 0x0151, NV10, "NVidia GeForce2 Ti"}, \
+ {0x10DE, 0x0152, NV10, "NVidia GeForce2 Ultra"}, \
+ {0x10DE, 0x0153, NV10, "NVidia Quadro2 Pro"}, \
+ {0x10DE, 0x0170, NV10, "NVidia GeForce4 MX 460"}, \
+ {0x10DE, 0x0171, NV10, "NVidia GeForce4 MX 440"}, \
+ {0x10DE, 0x0172, NV10, "NVidia GeForce4 MX 420"}, \
+ {0x10DE, 0x0173, NV10, "NVidia GeForce4 MX 440-SE"}, \
+ {0x10DE, 0x0174, NV10, "NVidia GeForce4 440 Go"}, \
+ {0x10DE, 0x0175, NV10, "NVidia GeForce4 420 Go"}, \
+ {0x10DE, 0x0176, NV10, "NVidia GeForce4 420 Go 32M"}, \
+ {0x10DE, 0x0177, NV10, "NVidia GeForce4 460 Go"}, \
+ {0x10DE, 0x0178, NV10, "NVidia Quadro4 550 XGL"}, \
+ {0x10DE, 0x0179, NV10, "NVidia GeForce4"}, \
+ {0x10DE, 0x017A, NV10, "NVidia Quadro4 NVS"}, \
+ {0x10DE, 0x017C, NV10, "NVidia Quadro4 500 GoGL"}, \
+ {0x10DE, 0x017D, NV10, "NVidia GeForce4 410 Go 16M"}, \
+ {0x10DE, 0x0181, NV10, "NVidia GeForce4 MX 440 with AGP8X"}, \
+ {0x10DE, 0x0182, NV10, "NVidia GeForce4 MX 440SE with AGP8X"}, \
+ {0x10DE, 0x0183, NV10, "NVidia GeForce4 MX 420 with AGP8X"}, \
+ {0x10DE, 0x0185, NV10, "NVidia GeForce4 MX 4000"}, \
+ {0x10DE, 0x0186, NV10, "NVidia GeForce4 448 Go"}, \
+ {0x10DE, 0x0187, NV10, "NVidia GeForce4 488 Go"}, \
+ {0x10DE, 0x0188, NV10, "NVidia Quadro4 580 XGL"}, \
+ {0x10DE, 0x0189, NV10, "NVidia GeForce4 MX with AGP8X (Mac)"}, \
+ {0x10DE, 0x018A, NV10, "NVidia Quadro4 280 NVS"}, \
+ {0x10DE, 0x018B, NV10, "NVidia Quadro4 380 XGL"}, \
+ {0x10DE, 0x018C, NV10, "NVidia Quadro NVS 50 PCI"}, \
+ {0x10DE, 0x018D, NV10, "NVidia GeForce4 448 Go"}, \
+ {0x10DE, 0x01A0, NV10, "NVidia GeForce2 Integrated GPU"}, \
+ {0x10DE, 0x01F0, NV10, "NVidia GeForce4 MX Integrated GPU"}, \
+ {0x10DE, 0x0200, NV20, "NVidia GeForce3"}, \
+ {0x10DE, 0x0201, NV20, "NVidia GeForce3 Ti 200"}, \
+ {0x10DE, 0x0202, NV20, "NVidia GeForce3 Ti 500"}, \
+ {0x10DE, 0x0203, NV20, "NVidia Quadro DCC"}, \
+ {0x10DE, 0x0250, NV20, "NVidia GeForce4 Ti 4600"}, \
+ {0x10DE, 0x0251, NV20, "NVidia GeForce4 Ti 4400"}, \
+ {0x10DE, 0x0252, NV20, "NVidia 0x0252"}, \
+ {0x10DE, 0x0253, NV20, "NVidia GeForce4 Ti 4200"}, \
+ {0x10DE, 0x0258, NV20, "NVidia Quadro4 900 XGL"}, \
+ {0x10DE, 0x0259, NV20, "NVidia Quadro4 750 XGL"}, \
+ {0x10DE, 0x025B, NV20, "NVidia Quadro4 700 XGL"}, \
+ {0x10DE, 0x0280, NV20, "NVidia GeForce4 Ti 4800"}, \
+ {0x10DE, 0x0281, NV20, "NVidia GeForce4 Ti 4200 with AGP8X"}, \
+ {0x10DE, 0x0282, NV20, "NVidia GeForce4 Ti 4800 SE"}, \
+ {0x10DE, 0x0286, NV20, "NVidia GeForce4 4200 Go"}, \
+ {0x10DE, 0x028C, NV20, "NVidia Quadro4 700 GoGL"}, \
+ {0x10DE, 0x0288, NV20, "NVidia Quadro4 980 XGL"}, \
+ {0x10DE, 0x0289, NV20, "NVidia Quadro4 780 XGL"}, \
+ {0x10DE, 0x0301, NV30, "NVidia GeForce FX 5800 Ultra"}, \
+ {0x10DE, 0x0302, NV30, "NVidia GeForce FX 5800"}, \
+ {0x10DE, 0x0308, NV30, "NVidia Quadro FX 2000"}, \
+ {0x10DE, 0x0309, NV30, "NVidia Quadro FX 1000"}, \
+ {0x10DE, 0x0311, NV30, "NVidia GeForce FX 5600 Ultra"}, \
+ {0x10DE, 0x0312, NV30, "NVidia GeForce FX 5600"}, \
+ {0x10DE, 0x0313, NV30, "NVidia 0x0313"}, \
+ {0x10DE, 0x0314, NV30, "NVidia GeForce FX 5600SE"}, \
+ {0x10DE, 0x0316, NV30, "NVidia 0x0316"}, \
+ {0x10DE, 0x0317, NV30, "NVidia 0x0317"}, \
+ {0x10DE, 0x031A, NV30, "NVidia GeForce FX Go5600"}, \
+ {0x10DE, 0x031B, NV30, "NVidia GeForce FX Go5650"}, \
+ {0x10DE, 0x031C, NV30, "NVidia Quadro FX Go700"}, \
+ {0x10DE, 0x031D, NV30, "NVidia 0x031D"}, \
+ {0x10DE, 0x031E, NV30, "NVidia 0x031E"}, \
+ {0x10DE, 0x031F, NV30, "NVidia 0x031F"}, \
+ {0x10DE, 0x0320, NV30, "NVidia GeForce FX 5200"}, \
+ {0x10DE, 0x0321, NV30, "NVidia GeForce FX 5200 Ultra"}, \
+ {0x10DE, 0x0322, NV30, "NVidia GeForce FX 5200"}, \
+ {0x10DE, 0x0323, NV30, "NVidia GeForce FX 5200SE"}, \
+ {0x10DE, 0x0324, NV30, "NVidia GeForce FX Go5200"}, \
+ {0x10DE, 0x0325, NV30, "NVidia GeForce FX Go5250"}, \
+ {0x10DE, 0x0326, NV30, "NVidia GeForce FX 5500"}, \
+ {0x10DE, 0x0327, NV30, "NVidia GeForce FX 5100"}, \
+ {0x10DE, 0x0328, NV30, "NVidia GeForce FX Go5200 32M/64M"}, \
+ {0x10DE, 0x0329, NV30, "NVidia GeForce FX 5200 (Mac)"}, \
+ {0x10DE, 0x032A, NV30, "NVidia Quadro NVS 280 PCI"}, \
+ {0x10DE, 0x032B, NV30, "NVidia Quadro FX 500/600 PCI"}, \
+ {0x10DE, 0x032C, NV30, "NVidia GeForce FX Go53xx Series"}, \
+ {0x10DE, 0x032D, NV30, "NVidia GeForce FX Go5100"}, \
+ {0x10DE, 0x032F, NV30, "NVidia 0x032F"}, \
+ {0x10DE, 0x0330, NV30, "NVidia GeForce FX 5900 Ultra"}, \
+ {0x10DE, 0x0331, NV30, "NVidia GeForce FX 5900"}, \
+ {0x10DE, 0x0332, NV30, "NVidia GeForce FX 5900XT"}, \
+ {0x10DE, 0x0333, NV30, "NVidia GeForce FX 5950 Ultra"}, \
+ {0x10DE, 0x033F, NV30, "NVidia Quadro FX 700"}, \
+ {0x10DE, 0x0334, NV30, "NVidia GeForce FX 5900ZT"}, \
+ {0x10DE, 0x0338, NV30, "NVidia Quadro FX 3000"}, \
+ {0x10DE, 0x0341, NV30, "NVidia GeForce FX 5700 Ultra"}, \
+ {0x10DE, 0x0342, NV30, "NVidia GeForce FX 5700"}, \
+ {0x10DE, 0x0343, NV30, "NVidia GeForce FX 5700LE"}, \
+ {0x10DE, 0x0344, NV30, "NVidia GeForce FX 5700VE"}, \
+ {0x10DE, 0x0345, NV30, "NVidia 0x0345"}, \
+ {0x10DE, 0x0347, NV30, "NVidia GeForce FX Go5700"}, \
+ {0x10DE, 0x0348, NV30, "NVidia GeForce FX Go5700"}, \
+ {0x10DE, 0x0349, NV30, "NVidia 0x0349"}, \
+ {0x10DE, 0x034B, NV30, "NVidia 0x034B"}, \
+ {0x10DE, 0x034C, NV30, "NVidia Quadro FX Go1000"}, \
+ {0x10DE, 0x034E, NV30, "NVidia Quadro FX 1100"}, \
+ {0x10DE, 0x034F, NV30, "NVidia 0x034F"}, \
+ {0x10DE, 0x0040, NV40, "NVidia GeForce 6800 Ultra"}, \
+ {0x10DE, 0x0041, NV40, "NVidia GeForce 6800"}, \
+ {0x10DE, 0x0042, NV40, "NVidia GeForce 6800 LE"}, \
+ {0x10DE, 0x0043, NV40, "NVidia 0x0043"}, \
+ {0x10DE, 0x0045, NV40, "NVidia GeForce 6800 GT"}, \
+ {0x10DE, 0x0046, NV40, "NVidia GeForce 6800 GT"}, \
+ {0x10DE, 0x0049, NV40, "NVidia 0x0049"}, \
+ {0x10DE, 0x004E, NV40, "NVidia Quadro FX 4000"}, \
+ {0x10DE, 0x00C0, NV40, "NVidia 0x00C0"}, \
+ {0x10DE, 0x00C1, NV40, "NVidia GeForce 6800"}, \
+ {0x10DE, 0x00C2, NV40, "NVidia GeForce 6800 LE"}, \
+ {0x10DE, 0x00C8, NV40, "NVidia GeForce Go 6800"}, \
+ {0x10DE, 0x00C9, NV40, "NVidia GeForce Go 6800 Ultra"}, \
+ {0x10DE, 0x00CC, NV40, "NVidia Quadro FX Go1400"}, \
+ {0x10DE, 0x00CD, NV40, "NVidia Quadro FX 3450/4000 SDI"}, \
+ {0x10DE, 0x00CE, NV40, "NVidia Quadro FX 1400"}, \
+ {0x10de, 0x00f0, NV40, "Nvidia GeForce 6600 GT"}, \
+ {0x10de, 0x00f1, NV40, "Nvidia GeForce 6600 GT"}, \
+ {0x10DE, 0x0140, NV40, "NVidia GeForce 6600 GT"}, \
+ {0x10DE, 0x0141, NV40, "NVidia GeForce 6600"}, \
+ {0x10DE, 0x0142, NV40, "NVidia GeForce 6600 LE"}, \
+ {0x10DE, 0x0143, NV40, "NVidia 0x0143"}, \
+ {0x10DE, 0x0144, NV40, "NVidia GeForce Go 6600"}, \
+ {0x10DE, 0x0145, NV40, "NVidia GeForce 6610 XL"}, \
+ {0x10DE, 0x0146, NV40, "NVidia GeForce Go 6600 TE/6200 TE"}, \
+ {0x10DE, 0x0147, NV40, "NVidia GeForce 6700 XL"}, \
+ {0x10DE, 0x0148, NV40, "NVidia GeForce Go 6600"}, \
+ {0x10DE, 0x0149, NV40, "NVidia GeForce Go 6600 GT"}, \
+ {0x10DE, 0x014B, NV40, "NVidia 0x014B"}, \
+ {0x10DE, 0x014C, NV40, "NVidia 0x014C"}, \
+ {0x10DE, 0x014D, NV40, "NVidia 0x014D"}, \
+ {0x10DE, 0x014E, NV40, "NVidia Quadro FX 540"}, \
+ {0x10DE, 0x014F, NV40, "NVidia GeForce 6200"}, \
+ {0x10DE, 0x0160, NV40, "NVidia 0x0160"}, \
+ {0x10DE, 0x0161, NV40, "NVidia GeForce 6200 TurboCache(TM)"}, \
+ {0x10DE, 0x0162, NV40, "NVidia GeForce 6200SE TurboCache(TM)"}, \
+ {0x10DE, 0x0163, NV40, "NVidia 0x0163"}, \
+ {0x10DE, 0x0164, NV40, "NVidia GeForce Go 6200"}, \
+ {0x10DE, 0x0165, NV40, "NVidia Quadro NVS 285"}, \
+ {0x10DE, 0x0166, NV40, "NVidia GeForce Go 6400"}, \
+ {0x10DE, 0x0167, NV40, "NVidia GeForce Go 6200"}, \
+ {0x10DE, 0x0168, NV40, "NVidia GeForce Go 6400"}, \
+ {0x10DE, 0x0169, NV40, "NVidia 0x0169"}, \
+ {0x10DE, 0x016B, NV40, "NVidia 0x016B"}, \
+ {0x10DE, 0x016C, NV40, "NVidia 0x016C"}, \
+ {0x10DE, 0x016D, NV40, "NVidia 0x016D"}, \
+ {0x10DE, 0x016E, NV40, "NVidia 0x016E"}, \
+ {0x10DE, 0x0210, NV40, "NVidia 0x0210"}, \
+ {0x10DE, 0x0211, NV40, "NVidia GeForce 6800"}, \
+ {0x10DE, 0x0212, NV40, "NVidia GeForce 6800 LE"}, \
+ {0x10DE, 0x0215, NV40, "NVidia GeForce 6800 GT"}, \
+ {0x10DE, 0x0220, NV40, "NVidia 0x0220"}, \
+ {0x10DE, 0x0221, NV40, "NVidia GeForce 6200"}, \
+ {0x10DE, 0x0222, NV40, "NVidia 0x0222"}, \
+ {0x10DE, 0x0228, NV40, "NVidia 0x0228"}, \
+ {0x10DE, 0x0090, NV40, "NVidia 0x0090"}, \
+ {0x10DE, 0x0091, NV40, "NVidia GeForce 7800 GTX"}, \
+ {0x10DE, 0x0092, NV40, "NVidia 0x0092"}, \
+ {0x10DE, 0x0093, NV40, "NVidia 0x0093"}, \
+ {0x10DE, 0x0094, NV40, "NVidia 0x0094"}, \
+ {0x10DE, 0x0098, NV40, "NVidia 0x0098"}, \
+ {0x10DE, 0x0099, NV40, "NVidia GeForce Go 7800 GTX"}, \
+ {0x10DE, 0x009C, NV40, "NVidia 0x009C"}, \
+ {0x10DE, 0x009D, NV40, "NVidia Quadro FX 4500"}, \
+ {0x10DE, 0x009E, NV40, "NVidia 0x009E"}, \
+ {0, 0, 0, NULL}
+
+#define xgi_PCI_IDS \
+ {0x18ca, 0x2200, 0, "XP5"}, \
+ {0x18ca, 0x0047, 0, "XP10 / XG47"}, \
+ {0, 0, 0, NULL}
diff --git a/sys/dev/drm2/drm_sarea.h b/sys/dev/drm2/drm_sarea.h
new file mode 100644
index 0000000..9f37fcb
--- /dev/null
+++ b/sys/dev/drm2/drm_sarea.h
@@ -0,0 +1,87 @@
+/**
+ * \file drm_sarea.h
+ * \brief SAREA definitions
+ *
+ * \author Michel D�zer <michel@daenzer.net>
+ */
+
+/*-
+ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef _DRM_SAREA_H_
+#define _DRM_SAREA_H_
+
+#include <dev/drm2/drm.h>
+
+/* SAREA area needs to be at least a page */
+#if defined(__alpha__)
+#define SAREA_MAX 0x2000
+#elif defined(__ia64__)
+#define SAREA_MAX 0x10000 /* 64kB */
+#else
+/* Intel 830M driver needs at least 8k SAREA */
+#define SAREA_MAX 0x2000UL
+#endif
+
+/** Maximum number of drawables in the SAREA */
+#define SAREA_MAX_DRAWABLES 256
+
+#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
+
+/** SAREA drawable */
+struct drm_sarea_drawable {
+ unsigned int stamp;
+ unsigned int flags;
+};
+
+/** SAREA frame */
+struct drm_sarea_frame {
+ unsigned int x;
+ unsigned int y;
+ unsigned int width;
+ unsigned int height;
+ unsigned int fullscreen;
+};
+
+/** SAREA */
+struct drm_sarea {
+ /** first thing is always the DRM locking structure */
+ struct drm_hw_lock lock;
+ /** \todo Use readers/writer lock for drm_sarea::drawable_lock */
+ struct drm_hw_lock drawable_lock;
+ struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
+ struct drm_sarea_frame frame; /**< frame */
+ drm_context_t dummy_context;
+};
+
+#ifndef __KERNEL__
+typedef struct drm_sarea_drawable drm_sarea_drawable_t;
+typedef struct drm_sarea_frame drm_sarea_frame_t;
+typedef struct drm_sarea drm_sarea_t;
+#endif
+
+#endif /* _DRM_SAREA_H_ */
diff --git a/sys/dev/drm2/drm_scatter.c b/sys/dev/drm2/drm_scatter.c
new file mode 100644
index 0000000..ecf231f
--- /dev/null
+++ b/sys/dev/drm2/drm_scatter.c
@@ -0,0 +1,129 @@
+/*-
+ * Copyright (c) 2009 Robert C. Noland III <rnoland@FreeBSD.org>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/** @file drm_scatter.c
+ * Allocation of memory for scatter-gather mappings by the graphics chip.
+ * The memory allocated here is then made into an aperture in the card
+ * by mapping the pages into the GART.
+ */
+
+#include <dev/drm2/drmP.h>
+
+int
+drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather *request)
+{
+ struct drm_sg_mem *entry;
+ vm_size_t size;
+ vm_pindex_t pindex;
+
+ if (dev->sg)
+ return EINVAL;
+
+ DRM_DEBUG("request size=%ld\n", request->size);
+
+ entry = malloc(sizeof(*entry), DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
+
+ size = round_page(request->size);
+ entry->pages = OFF_TO_IDX(size);
+ entry->busaddr = malloc(entry->pages * sizeof(*entry->busaddr),
+ DRM_MEM_SGLISTS, M_WAITOK | M_ZERO);
+
+ entry->vaddr = kmem_alloc_attr(kernel_map, size, M_WAITOK | M_ZERO,
+ 0, BUS_SPACE_MAXADDR_32BIT, VM_MEMATTR_WRITE_COMBINING);
+ if (entry->vaddr == 0) {
+ drm_sg_cleanup(entry);
+ return (ENOMEM);
+ }
+
+ for(pindex = 0; pindex < entry->pages; pindex++) {
+ entry->busaddr[pindex] =
+ vtophys(entry->vaddr + IDX_TO_OFF(pindex));
+ }
+
+ DRM_LOCK(dev);
+ if (dev->sg) {
+ DRM_UNLOCK(dev);
+ drm_sg_cleanup(entry);
+ return (EINVAL);
+ }
+ dev->sg = entry;
+ DRM_UNLOCK(dev);
+
+ request->handle = entry->vaddr;
+
+ DRM_DEBUG("allocated %ju pages @ 0x%08zx, contents=%08lx\n",
+ entry->pages, entry->vaddr, *(unsigned long *)entry->vaddr);
+
+ return (0);
+}
+
+int
+drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_scatter_gather *request = data;
+
+ DRM_DEBUG("\n");
+
+ return (drm_sg_alloc(dev, request));
+}
+
+void
+drm_sg_cleanup(struct drm_sg_mem *entry)
+{
+ if (entry == NULL)
+ return;
+
+ if (entry->vaddr != 0)
+ kmem_free(kernel_map, entry->vaddr, IDX_TO_OFF(entry->pages));
+
+ free(entry->busaddr, DRM_MEM_SGLISTS);
+ free(entry, DRM_MEM_DRIVER);
+
+ return;
+}
+
+int
+drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_scatter_gather *request = data;
+ struct drm_sg_mem *entry;
+
+ DRM_LOCK(dev);
+ entry = dev->sg;
+ dev->sg = NULL;
+ DRM_UNLOCK(dev);
+
+ if (!entry || entry->vaddr != request->handle)
+ return (EINVAL);
+
+ DRM_DEBUG("free 0x%zx\n", entry->vaddr);
+
+ drm_sg_cleanup(entry);
+
+ return (0);
+}
diff --git a/sys/dev/drm2/drm_sman.c b/sys/dev/drm2/drm_sman.c
new file mode 100644
index 0000000..1c1e4af
--- /dev/null
+++ b/sys/dev/drm2/drm_sman.c
@@ -0,0 +1,352 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Simple memory manager interface that keeps track on allocate regions on a
+ * per "owner" basis. All regions associated with an "owner" can be released
+ * with a simple call. Typically if the "owner" exists. The owner is any
+ * "unsigned long" identifier. Can typically be a pointer to a file private
+ * struct or a context identifier.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_sman.h>
+
+struct drm_owner_item {
+ struct drm_hash_item owner_hash;
+ struct list_head sman_list;
+ struct list_head mem_blocks;
+};
+
+void drm_sman_takedown(struct drm_sman * sman)
+{
+ drm_ht_remove(&sman->user_hash_tab);
+ drm_ht_remove(&sman->owner_hash_tab);
+ if (sman->mm)
+ drm_free(sman->mm, sman->num_managers * sizeof(*sman->mm),
+ DRM_MEM_MM);
+}
+
+int
+drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
+ unsigned int user_order, unsigned int owner_order)
+{
+ int ret = 0;
+
+ sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers,
+ sizeof(*sman->mm), DRM_MEM_MM);
+ if (!sman->mm) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ sman->num_managers = num_managers;
+ INIT_LIST_HEAD(&sman->owner_items);
+ ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
+ if (ret)
+ goto out1;
+ ret = drm_ht_create(&sman->user_hash_tab, user_order);
+ if (!ret)
+ goto out;
+
+ drm_ht_remove(&sman->owner_hash_tab);
+out1:
+ drm_free(sman->mm, num_managers * sizeof(*sman->mm), DRM_MEM_MM);
+out:
+ return ret;
+}
+
+static void *drm_sman_mm_allocate(void *private, unsigned long size,
+ unsigned alignment)
+{
+ struct drm_mm *mm = (struct drm_mm *) private;
+ struct drm_mm_node *tmp;
+
+ tmp = drm_mm_search_free(mm, size, alignment, 1);
+ if (!tmp) {
+ return NULL;
+ }
+ /* This could be non-atomic, but we are called from a locked path */
+ tmp = drm_mm_get_block_atomic(tmp, size, alignment);
+ return tmp;
+}
+
+static void drm_sman_mm_free(void *private, void *ref)
+{
+ struct drm_mm_node *node = (struct drm_mm_node *) ref;
+
+ drm_mm_put_block(node);
+}
+
+static void drm_sman_mm_destroy(void *private)
+{
+ struct drm_mm *mm = (struct drm_mm *) private;
+ drm_mm_takedown(mm);
+ drm_free(mm, sizeof(*mm), DRM_MEM_MM);
+}
+
+static unsigned long drm_sman_mm_offset(void *private, void *ref)
+{
+ struct drm_mm_node *node = (struct drm_mm_node *) ref;
+ return node->start;
+}
+
+int
+drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
+ unsigned long start, unsigned long size)
+{
+ struct drm_sman_mm *sman_mm;
+ struct drm_mm *mm;
+ int ret;
+
+ KASSERT(manager < sman->num_managers, ("Invalid manager"));
+
+ sman_mm = &sman->mm[manager];
+ mm = malloc(sizeof(*mm), DRM_MEM_MM, M_NOWAIT | M_ZERO);
+ if (!mm) {
+ return -ENOMEM;
+ }
+ sman_mm->private = mm;
+ ret = drm_mm_init(mm, start, size);
+
+ if (ret) {
+ drm_free(mm, sizeof(*mm), DRM_MEM_MM);
+ return ret;
+ }
+
+ sman_mm->allocate = drm_sman_mm_allocate;
+ sman_mm->free = drm_sman_mm_free;
+ sman_mm->destroy = drm_sman_mm_destroy;
+ sman_mm->offset = drm_sman_mm_offset;
+
+ return 0;
+}
+
+int
+drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
+ struct drm_sman_mm * allocator)
+{
+ KASSERT(manager < sman->num_managers, ("Invalid manager"));
+ sman->mm[manager] = *allocator;
+
+ return 0;
+}
+
+static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
+ unsigned long owner)
+{
+ int ret;
+ struct drm_hash_item *owner_hash_item;
+ struct drm_owner_item *owner_item;
+
+ ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
+ if (!ret) {
+ return drm_hash_entry(owner_hash_item, struct drm_owner_item,
+ owner_hash);
+ }
+
+ owner_item = malloc(sizeof(*owner_item), DRM_MEM_MM, M_NOWAIT | M_ZERO);
+ if (!owner_item)
+ goto out;
+
+ INIT_LIST_HEAD(&owner_item->mem_blocks);
+ owner_item->owner_hash.key = owner;
+ DRM_DEBUG("owner_item = %p, mem_blocks = %p\n", owner_item, &owner_item->mem_blocks);
+ if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
+ goto out1;
+
+ list_add_tail(&owner_item->sman_list, &sman->owner_items);
+ return owner_item;
+
+out1:
+ drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
+out:
+ return NULL;
+}
+
+struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
+ unsigned long size, unsigned alignment,
+ unsigned long owner)
+{
+ void *tmp;
+ struct drm_sman_mm *sman_mm;
+ struct drm_owner_item *owner_item;
+ struct drm_memblock_item *memblock;
+
+ KASSERT(manager < sman->num_managers, ("Invalid manager"));
+
+ sman_mm = &sman->mm[manager];
+ tmp = sman_mm->allocate(sman_mm->private, size, alignment);
+ if (!tmp) {
+ return NULL;
+ }
+
+ memblock = malloc(sizeof(*memblock), DRM_MEM_MM, M_NOWAIT | M_ZERO);
+ DRM_DEBUG("allocated mem_block %p\n", memblock);
+ if (!memblock)
+ goto out;
+
+ memblock->mm_info = tmp;
+ memblock->mm = sman_mm;
+ memblock->sman = sman;
+ INIT_LIST_HEAD(&memblock->owner_list);
+
+ if (drm_ht_just_insert_please
+ (&sman->user_hash_tab, &memblock->user_hash,
+ (unsigned long)memblock, 32, 0, 0))
+ goto out1;
+
+ owner_item = drm_sman_get_owner_item(sman, owner);
+ if (!owner_item)
+ goto out2;
+
+ DRM_DEBUG("owner_item = %p, mem_blocks = %p\n", owner_item, &owner_item->mem_blocks);
+ DRM_DEBUG("owner_list.prev = %p, mem_blocks.prev = %p\n", memblock->owner_list.prev, owner_item->mem_blocks.prev);
+ DRM_DEBUG("owner_list.next = %p, mem_blocks.next = %p\n", memblock->owner_list.next, owner_item->mem_blocks.next);
+ list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
+
+ DRM_DEBUG("Complete\n");
+ return memblock;
+
+out2:
+ drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
+out1:
+ drm_free(memblock, sizeof(*memblock), DRM_MEM_MM);
+out:
+ sman_mm->free(sman_mm->private, tmp);
+
+ return NULL;
+}
+
+static void drm_sman_free(struct drm_memblock_item *item)
+{
+ struct drm_sman *sman = item->sman;
+
+ list_del(&item->owner_list);
+ drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
+ item->mm->free(item->mm->private, item->mm_info);
+ drm_free(item, sizeof(*item), DRM_MEM_MM);
+}
+
+int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
+{
+ struct drm_hash_item *hash_item;
+ struct drm_memblock_item *memblock_item;
+
+ if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
+ return -EINVAL;
+
+ memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
+ user_hash);
+ drm_sman_free(memblock_item);
+ return 0;
+}
+
+static void drm_sman_remove_owner(struct drm_sman *sman,
+ struct drm_owner_item *owner_item)
+{
+ list_del(&owner_item->sman_list);
+ drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
+ drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
+}
+
+int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
+{
+
+ struct drm_hash_item *hash_item;
+ struct drm_owner_item *owner_item;
+
+ if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
+ return -1;
+ }
+
+ owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
+ DRM_DEBUG("cleaning owner_item %p\n", owner_item);
+ if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
+ drm_sman_remove_owner(sman, owner_item);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
+ struct drm_owner_item *owner_item)
+{
+ struct drm_memblock_item *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
+ owner_list) {
+ DRM_DEBUG("freeing mem_block %p\n", entry);
+ drm_sman_free(entry);
+ }
+ drm_sman_remove_owner(sman, owner_item);
+}
+
+void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
+{
+
+ struct drm_hash_item *hash_item;
+ struct drm_owner_item *owner_item;
+
+ if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
+
+ return;
+ }
+
+ owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
+ drm_sman_do_owner_cleanup(sman, owner_item);
+}
+
+void drm_sman_cleanup(struct drm_sman *sman)
+{
+ struct drm_owner_item *entry, *next;
+ unsigned int i;
+ struct drm_sman_mm *sman_mm;
+
+ DRM_DEBUG("sman = %p, owner_items = %p\n",
+ sman, &sman->owner_items);
+ list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
+ DRM_DEBUG("cleaning owner_item = %p\n", entry);
+ drm_sman_do_owner_cleanup(sman, entry);
+ }
+ if (sman->mm) {
+ for (i = 0; i < sman->num_managers; ++i) {
+ sman_mm = &sman->mm[i];
+ if (sman_mm->private) {
+ sman_mm->destroy(sman_mm->private);
+ sman_mm->private = NULL;
+ }
+ }
+ }
+}
diff --git a/sys/dev/drm2/drm_sman.h b/sys/dev/drm2/drm_sman.h
new file mode 100644
index 0000000..3b1693f
--- /dev/null
+++ b/sys/dev/drm2/drm_sman.h
@@ -0,0 +1,181 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Simple memory MANager interface that keeps track on allocate regions on a
+ * per "owner" basis. All regions associated with an "owner" can be released
+ * with a simple call. Typically if the "owner" exists. The owner is any
+ * "unsigned long" identifier. Can typically be a pointer to a file private
+ * struct or a context identifier.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef DRM_SMAN_H
+#define DRM_SMAN_H
+
+#include <dev/drm2/drm_hashtab.h>
+#include <dev/drm2/drm_linux_list.h>
+#include <dev/drm2/drm_mm.h>
+
+/*
+ * A class that is an abstration of a simple memory allocator.
+ * The sman implementation provides a default such allocator
+ * using the drm_mm.c implementation. But the user can replace it.
+ * See the SiS implementation, which may use the SiS FB kernel module
+ * for memory management.
+ */
+
+struct drm_sman_mm {
+ /* private info. If allocated, needs to be destroyed by the destroy
+ function */
+ void *private;
+
+ /* Allocate a memory block with given size and alignment.
+ Return an opaque reference to the memory block */
+
+ void *(*allocate) (void *private, unsigned long size,
+ unsigned alignment);
+
+ /* Free a memory block. "ref" is the opaque reference that we got from
+ the "alloc" function */
+
+ void (*free) (void *private, void *ref);
+
+ /* Free all resources associated with this allocator */
+
+ void (*destroy) (void *private);
+
+ /* Return a memory offset from the opaque reference returned from the
+ "alloc" function */
+
+ unsigned long (*offset) (void *private, void *ref);
+};
+
+struct drm_memblock_item {
+ struct list_head owner_list;
+ struct drm_hash_item user_hash;
+ void *mm_info;
+ struct drm_sman_mm *mm;
+ struct drm_sman *sman;
+};
+
+struct drm_sman {
+ struct drm_sman_mm *mm;
+ int num_managers;
+ struct drm_open_hash owner_hash_tab;
+ struct drm_open_hash user_hash_tab;
+ struct list_head owner_items;
+};
+
+/*
+ * Take down a memory manager. This function should only be called after a
+ * successful init and after a call to drm_sman_cleanup.
+ */
+
+extern void drm_sman_takedown(struct drm_sman * sman);
+
+/*
+ * Allocate structures for a manager.
+ * num_managers are the number of memory pools to manage. (VRAM, AGP, ....)
+ * user_order is the log2 of the number of buckets in the user hash table.
+ * set this to approximately log2 of the max number of memory regions
+ * that will be allocated for _all_ pools together.
+ * owner_order is the log2 of the number of buckets in the owner hash table.
+ * set this to approximately log2 of
+ * the number of client file connections that will
+ * be using the manager.
+ *
+ */
+
+extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
+ unsigned int user_order, unsigned int owner_order);
+
+/*
+ * Initialize a drm_mm.c allocator. Should be called only once for each
+ * manager unless a customized allogator is used.
+ */
+
+extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
+ unsigned long start, unsigned long size);
+
+/*
+ * Initialize a customized allocator for one of the managers.
+ * (See the SiS module). The object pointed to by "allocator" is copied,
+ * so it can be destroyed after this call.
+ */
+
+extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger,
+ struct drm_sman_mm * allocator);
+
+/*
+ * Allocate a memory block. Aligment is not implemented yet.
+ */
+
+extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman,
+ unsigned int manager,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long owner);
+/*
+ * Free a memory block identified by its user hash key.
+ */
+
+extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key);
+
+/*
+ * returns 1 iff there are no stale memory blocks associated with this owner.
+ * Typically called to determine if we need to idle the hardware and call
+ * drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all
+ * resources associated with owner.
+ */
+
+extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner);
+
+/*
+ * Frees all stale memory blocks associated with this owner. Note that this
+ * requires that the hardware is finished with all blocks, so the graphics engine
+ * should be idled before this call is made. This function also frees
+ * any resources associated with "owner" and should be called when owner
+ * is not going to be referenced anymore.
+ */
+
+extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner);
+
+/*
+ * Frees all stale memory blocks associated with the memory manager.
+ * See idling above.
+ */
+
+extern void drm_sman_cleanup(struct drm_sman * sman);
+
+#endif
diff --git a/sys/dev/drm2/drm_stub.c b/sys/dev/drm2/drm_stub.c
new file mode 100644
index 0000000..2c87dec
--- /dev/null
+++ b/sys/dev/drm2/drm_stub.c
@@ -0,0 +1,60 @@
+/**
+ * \file drm_stub.h
+ * Stub support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ */
+
+/*
+ * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
+ *
+ * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "drmP.h"
+
+int
+drm_setmaster_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+
+ DRM_DEBUG("setmaster\n");
+
+ if (file_priv->master != 0)
+ return (0);
+ return (-EPERM);
+}
+
+int
+drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+
+ DRM_DEBUG("dropmaster\n");
+ if (file_priv->master != 0)
+ return (-EINVAL);
+ return (0);
+}
diff --git a/sys/dev/drm2/drm_sysctl.c b/sys/dev/drm2/drm_sysctl.c
new file mode 100644
index 0000000..33048c7
--- /dev/null
+++ b/sys/dev/drm2/drm_sysctl.c
@@ -0,0 +1,364 @@
+/*-
+ * Copyright 2003 Eric Anholt
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/** @file drm_sysctl.c
+ * Implementation of various sysctls for controlling DRM behavior and reporting
+ * debug information.
+ */
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+
+#include <sys/sysctl.h>
+
+static int drm_name_info DRM_SYSCTL_HANDLER_ARGS;
+static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS;
+static int drm_clients_info DRM_SYSCTL_HANDLER_ARGS;
+static int drm_bufs_info DRM_SYSCTL_HANDLER_ARGS;
+static int drm_vblank_info DRM_SYSCTL_HANDLER_ARGS;
+
+struct drm_sysctl_list {
+ const char *name;
+ int (*f) DRM_SYSCTL_HANDLER_ARGS;
+} drm_sysctl_list[] = {
+ {"name", drm_name_info},
+ {"vm", drm_vm_info},
+ {"clients", drm_clients_info},
+ {"bufs", drm_bufs_info},
+ {"vblank", drm_vblank_info},
+};
+#define DRM_SYSCTL_ENTRIES (sizeof(drm_sysctl_list)/sizeof(drm_sysctl_list[0]))
+
+struct drm_sysctl_info {
+ struct sysctl_ctx_list ctx;
+ char name[2];
+};
+
+int drm_sysctl_init(struct drm_device *dev)
+{
+ struct drm_sysctl_info *info;
+ struct sysctl_oid *oid;
+ struct sysctl_oid *top, *drioid;
+ int i;
+
+ info = malloc(sizeof *info, DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
+ dev->sysctl = info;
+
+ /* Add the sysctl node for DRI if it doesn't already exist */
+ drioid = SYSCTL_ADD_NODE(&info->ctx, &sysctl__hw_children, OID_AUTO,
+ "dri", CTLFLAG_RW, NULL, "DRI Graphics");
+ if (!drioid)
+ return 1;
+
+ /* Find the next free slot under hw.dri */
+ i = 0;
+ SLIST_FOREACH(oid, SYSCTL_CHILDREN(drioid), oid_link) {
+ if (i <= oid->oid_arg2)
+ i = oid->oid_arg2 + 1;
+ }
+ if (i > 9)
+ return (1);
+
+ dev->sysctl_node_idx = i;
+ /* Add the hw.dri.x for our device */
+ info->name[0] = '0' + i;
+ info->name[1] = 0;
+ top = SYSCTL_ADD_NODE(&info->ctx, SYSCTL_CHILDREN(drioid),
+ OID_AUTO, info->name, CTLFLAG_RW, NULL, NULL);
+ if (!top)
+ return 1;
+
+ for (i = 0; i < DRM_SYSCTL_ENTRIES; i++) {
+ oid = SYSCTL_ADD_OID(&info->ctx,
+ SYSCTL_CHILDREN(top),
+ OID_AUTO,
+ drm_sysctl_list[i].name,
+ CTLTYPE_STRING | CTLFLAG_RD,
+ dev,
+ 0,
+ drm_sysctl_list[i].f,
+ "A",
+ NULL);
+ if (!oid)
+ return 1;
+ }
+ SYSCTL_ADD_INT(&info->ctx, SYSCTL_CHILDREN(drioid), OID_AUTO, "debug",
+ CTLFLAG_RW, &drm_debug_flag, sizeof(drm_debug_flag),
+ "Enable debugging output");
+ SYSCTL_ADD_INT(&info->ctx, SYSCTL_CHILDREN(drioid), OID_AUTO, "notyet",
+ CTLFLAG_RW, &drm_notyet_flag, sizeof(drm_debug_flag),
+ "Enable notyet reminders");
+
+ if (dev->driver->sysctl_init != NULL)
+ dev->driver->sysctl_init(dev, &info->ctx, top);
+
+ SYSCTL_ADD_INT(&info->ctx, SYSCTL_CHILDREN(drioid), OID_AUTO,
+ "vblank_offdelay", CTLFLAG_RW, &drm_vblank_offdelay,
+ sizeof(drm_vblank_offdelay),
+ "");
+ SYSCTL_ADD_INT(&info->ctx, SYSCTL_CHILDREN(drioid), OID_AUTO,
+ "timestamp_precision", CTLFLAG_RW, &drm_timestamp_precision,
+ sizeof(drm_timestamp_precision),
+ "");
+
+ return (0);
+}
+
+int drm_sysctl_cleanup(struct drm_device *dev)
+{
+ int error;
+
+ error = sysctl_ctx_free(&dev->sysctl->ctx);
+ free(dev->sysctl, DRM_MEM_DRIVER);
+ dev->sysctl = NULL;
+ if (dev->driver->sysctl_cleanup != NULL)
+ dev->driver->sysctl_cleanup(dev);
+
+ return (error);
+}
+
+#define DRM_SYSCTL_PRINT(fmt, arg...) \
+do { \
+ snprintf(buf, sizeof(buf), fmt, ##arg); \
+ retcode = SYSCTL_OUT(req, buf, strlen(buf)); \
+ if (retcode) \
+ goto done; \
+} while (0)
+
+static int drm_name_info DRM_SYSCTL_HANDLER_ARGS
+{
+ struct drm_device *dev = arg1;
+ char buf[128];
+ int retcode;
+ int hasunique = 0;
+
+ DRM_SYSCTL_PRINT("%s 0x%x", dev->driver->name, dev2udev(dev->devnode));
+
+ DRM_LOCK(dev);
+ if (dev->unique) {
+ snprintf(buf, sizeof(buf), " %s", dev->unique);
+ hasunique = 1;
+ }
+ DRM_UNLOCK(dev);
+
+ if (hasunique)
+ SYSCTL_OUT(req, buf, strlen(buf));
+
+ SYSCTL_OUT(req, "", 1);
+
+done:
+ return retcode;
+}
+
+static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS
+{
+ struct drm_device *dev = arg1;
+ drm_local_map_t *map, *tempmaps;
+ const char *types[] = { "FB", "REG", "SHM", "AGP", "SG" };
+ const char *type, *yesno;
+ int i, mapcount;
+ char buf[128];
+ int retcode;
+
+ /* We can't hold the lock while doing SYSCTL_OUTs, so allocate a
+ * temporary copy of all the map entries and then SYSCTL_OUT that.
+ */
+ DRM_LOCK(dev);
+
+ mapcount = 0;
+ TAILQ_FOREACH(map, &dev->maplist, link)
+ mapcount++;
+
+ tempmaps = malloc(sizeof(drm_local_map_t) * mapcount, DRM_MEM_DRIVER,
+ M_NOWAIT);
+ if (tempmaps == NULL) {
+ DRM_UNLOCK(dev);
+ return ENOMEM;
+ }
+
+ i = 0;
+ TAILQ_FOREACH(map, &dev->maplist, link)
+ tempmaps[i++] = *map;
+
+ DRM_UNLOCK(dev);
+
+ DRM_SYSCTL_PRINT("\nslot offset size "
+ "type flags address handle mtrr\n");
+
+ for (i = 0; i < mapcount; i++) {
+ map = &tempmaps[i];
+
+ if (map->type < 0 || map->type > 4)
+ type = "??";
+ else
+ type = types[map->type];
+
+ if (!map->mtrr)
+ yesno = "no";
+ else
+ yesno = "yes";
+
+ DRM_SYSCTL_PRINT(
+ "%4d 0x%016lx 0x%08lx %4.4s 0x%02x 0x%016lx %6d %s\n",
+ i, map->offset, map->size, type, map->flags,
+ (unsigned long)map->virtual,
+ (unsigned int)((unsigned long)map->handle >>
+ DRM_MAP_HANDLE_SHIFT), yesno);
+ }
+ SYSCTL_OUT(req, "", 1);
+
+done:
+ free(tempmaps, DRM_MEM_DRIVER);
+ return retcode;
+}
+
+static int drm_bufs_info DRM_SYSCTL_HANDLER_ARGS
+{
+ struct drm_device *dev = arg1;
+ drm_device_dma_t *dma = dev->dma;
+ drm_device_dma_t tempdma;
+ int *templists;
+ int i;
+ char buf[128];
+ int retcode;
+
+ /* We can't hold the locks around DRM_SYSCTL_PRINT, so make a temporary
+ * copy of the whole structure and the relevant data from buflist.
+ */
+ DRM_LOCK(dev);
+ if (dma == NULL) {
+ DRM_UNLOCK(dev);
+ return 0;
+ }
+ DRM_SPINLOCK(&dev->dma_lock);
+ tempdma = *dma;
+ templists = malloc(sizeof(int) * dma->buf_count, DRM_MEM_DRIVER,
+ M_NOWAIT);
+ for (i = 0; i < dma->buf_count; i++)
+ templists[i] = dma->buflist[i]->list;
+ dma = &tempdma;
+ DRM_SPINUNLOCK(&dev->dma_lock);
+ DRM_UNLOCK(dev);
+
+ DRM_SYSCTL_PRINT("\n o size count free segs pages kB\n");
+ for (i = 0; i <= DRM_MAX_ORDER; i++) {
+ if (dma->bufs[i].buf_count)
+ DRM_SYSCTL_PRINT("%2d %8d %5d %5d %5d %5d %5d\n",
+ i,
+ dma->bufs[i].buf_size,
+ dma->bufs[i].buf_count,
+ atomic_read(&dma->bufs[i]
+ .freelist.count),
+ dma->bufs[i].seg_count,
+ dma->bufs[i].seg_count
+ *(1 << dma->bufs[i].page_order),
+ (dma->bufs[i].seg_count
+ * (1 << dma->bufs[i].page_order))
+ * (int)PAGE_SIZE / 1024);
+ }
+ DRM_SYSCTL_PRINT("\n");
+ for (i = 0; i < dma->buf_count; i++) {
+ if (i && !(i%32)) DRM_SYSCTL_PRINT("\n");
+ DRM_SYSCTL_PRINT(" %d", templists[i]);
+ }
+ DRM_SYSCTL_PRINT("\n");
+
+ SYSCTL_OUT(req, "", 1);
+done:
+ free(templists, DRM_MEM_DRIVER);
+ return retcode;
+}
+
+static int drm_clients_info DRM_SYSCTL_HANDLER_ARGS
+{
+ struct drm_device *dev = arg1;
+ struct drm_file *priv, *tempprivs;
+ char buf[128];
+ int retcode;
+ int privcount, i;
+
+ DRM_LOCK(dev);
+
+ privcount = 0;
+ TAILQ_FOREACH(priv, &dev->files, link)
+ privcount++;
+
+ tempprivs = malloc(sizeof(struct drm_file) * privcount, DRM_MEM_DRIVER,
+ M_NOWAIT);
+ if (tempprivs == NULL) {
+ DRM_UNLOCK(dev);
+ return ENOMEM;
+ }
+ i = 0;
+ TAILQ_FOREACH(priv, &dev->files, link)
+ tempprivs[i++] = *priv;
+
+ DRM_UNLOCK(dev);
+
+ DRM_SYSCTL_PRINT(
+ "\na dev pid uid magic ioctls\n");
+ for (i = 0; i < privcount; i++) {
+ priv = &tempprivs[i];
+ DRM_SYSCTL_PRINT("%c %-12s %5d %5d %10u %10lu\n",
+ priv->authenticated ? 'y' : 'n',
+ devtoname(priv->dev->devnode),
+ priv->pid,
+ priv->uid,
+ priv->magic,
+ priv->ioctl_count);
+ }
+
+ SYSCTL_OUT(req, "", 1);
+done:
+ free(tempprivs, DRM_MEM_DRIVER);
+ return retcode;
+}
+
+static int drm_vblank_info DRM_SYSCTL_HANDLER_ARGS
+{
+ struct drm_device *dev = arg1;
+ char buf[128];
+ int retcode;
+ int i;
+
+ DRM_SYSCTL_PRINT("\ncrtc ref count last enabled inmodeset\n");
+ DRM_LOCK(dev);
+ if (dev->_vblank_count == NULL)
+ goto done;
+ for (i = 0 ; i < dev->num_crtcs ; i++) {
+ DRM_SYSCTL_PRINT(" %02d %02d %08d %08d %02d %02d\n",
+ i, dev->vblank_refcount[i],
+ dev->_vblank_count[i],
+ dev->last_vblank[i],
+ dev->vblank_enabled[i],
+ dev->vblank_inmodeset[i]);
+ }
+done:
+ DRM_UNLOCK(dev);
+
+ SYSCTL_OUT(req, "", -1);
+ return retcode;
+}
diff --git a/sys/dev/drm2/drm_vm.c b/sys/dev/drm2/drm_vm.c
new file mode 100644
index 0000000..a70fe7b
--- /dev/null
+++ b/sys/dev/drm2/drm_vm.c
@@ -0,0 +1,134 @@
+/*-
+ * Copyright 2003 Eric Anholt
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/** @file drm_vm.c
+ * Support code for mmaping of DRM maps.
+ */
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+
+int
+drm_mmap(struct cdev *kdev, vm_ooffset_t offset, vm_paddr_t *paddr,
+ int prot, vm_memattr_t *memattr)
+{
+ struct drm_device *dev = drm_get_device_from_kdev(kdev);
+ struct drm_file *file_priv = NULL;
+ drm_local_map_t *map;
+ enum drm_map_type type;
+ vm_paddr_t phys;
+ int error;
+
+ /* d_mmap gets called twice, we can only reference file_priv during
+ * the first call. We need to assume that if error is EBADF the
+ * call was succesful and the client is authenticated.
+ */
+ error = devfs_get_cdevpriv((void **)&file_priv);
+ if (error == ENOENT) {
+ DRM_ERROR("Could not find authenticator!\n");
+ return EINVAL;
+ }
+
+ if (file_priv && !file_priv->authenticated)
+ return EACCES;
+
+ DRM_DEBUG("called with offset %016jx\n", offset);
+ if (dev->dma && offset < ptoa(dev->dma->page_count)) {
+ drm_device_dma_t *dma = dev->dma;
+
+ DRM_SPINLOCK(&dev->dma_lock);
+
+ if (dma->pagelist != NULL) {
+ unsigned long page = offset >> PAGE_SHIFT;
+ unsigned long phys = dma->pagelist[page];
+
+ DRM_SPINUNLOCK(&dev->dma_lock);
+ *paddr = phys;
+ return 0;
+ } else {
+ DRM_SPINUNLOCK(&dev->dma_lock);
+ return -1;
+ }
+ }
+
+ /* A sequential search of a linked list is
+ fine here because: 1) there will only be
+ about 5-10 entries in the list and, 2) a
+ DRI client only has to do this mapping
+ once, so it doesn't have to be optimized
+ for performance, even if the list was a
+ bit longer.
+ */
+ DRM_LOCK(dev);
+ TAILQ_FOREACH(map, &dev->maplist, link) {
+ if (offset >> DRM_MAP_HANDLE_SHIFT ==
+ (unsigned long)map->handle >> DRM_MAP_HANDLE_SHIFT)
+ break;
+ }
+
+ if (map == NULL) {
+ DRM_DEBUG("Can't find map, request offset = %016jx\n", offset);
+ TAILQ_FOREACH(map, &dev->maplist, link) {
+ DRM_DEBUG("map offset = %016lx, handle = %016lx\n",
+ map->offset, (unsigned long)map->handle);
+ }
+ DRM_UNLOCK(dev);
+ return -1;
+ }
+ if (((map->flags & _DRM_RESTRICTED) && !DRM_SUSER(DRM_CURPROC))) {
+ DRM_UNLOCK(dev);
+ DRM_DEBUG("restricted map\n");
+ return -1;
+ }
+ type = map->type;
+ DRM_UNLOCK(dev);
+
+ offset = offset & ((1ULL << DRM_MAP_HANDLE_SHIFT) - 1);
+
+ switch (type) {
+ case _DRM_FRAME_BUFFER:
+ case _DRM_AGP:
+ *memattr = VM_MEMATTR_WRITE_COMBINING;
+ /* FALLTHROUGH */
+ case _DRM_REGISTERS:
+ phys = map->offset + offset;
+ break;
+ case _DRM_SCATTER_GATHER:
+ *memattr = VM_MEMATTR_WRITE_COMBINING;
+ /* FALLTHROUGH */
+ case _DRM_CONSISTENT:
+ case _DRM_SHM:
+ phys = vtophys((char *)map->virtual + offset);
+ break;
+ default:
+ DRM_ERROR("bad map type %d\n", type);
+ return -1; /* This should never happen. */
+ }
+
+ *paddr = phys;
+ return 0;
+}
+
diff --git a/sys/dev/drm2/i915/i915_debug.c b/sys/dev/drm2/i915/i915_debug.c
new file mode 100644
index 0000000..384ed4ae
--- /dev/null
+++ b/sys/dev/drm2/i915/i915_debug.c
@@ -0,0 +1,1683 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/intel_drv.h>
+#include <dev/drm2/i915/intel_ringbuffer.h>
+
+#include <sys/sysctl.h>
+
+enum {
+ ACTIVE_LIST,
+ FLUSHING_LIST,
+ INACTIVE_LIST,
+ PINNED_LIST,
+ DEFERRED_FREE_LIST,
+};
+
+static const char *
+yesno(int v)
+{
+ return (v ? "yes" : "no");
+}
+
+static int
+i915_capabilities(struct drm_device *dev, struct sbuf *m, void *data)
+{
+ const struct intel_device_info *info = INTEL_INFO(dev);
+
+ sbuf_printf(m, "gen: %d\n", info->gen);
+ if (HAS_PCH_SPLIT(dev))
+ sbuf_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
+#define B(x) sbuf_printf(m, #x ": %s\n", yesno(info->x))
+ B(is_mobile);
+ B(is_i85x);
+ B(is_i915g);
+ B(is_i945gm);
+ B(is_g33);
+ B(need_gfx_hws);
+ B(is_g4x);
+ B(is_pineview);
+ B(has_fbc);
+ B(has_pipe_cxsr);
+ B(has_hotplug);
+ B(cursor_needs_physical);
+ B(has_overlay);
+ B(overlay_needs_physical);
+ B(supports_tv);
+ B(has_bsd_ring);
+ B(has_blt_ring);
+ B(has_llc);
+#undef B
+
+ return (0);
+}
+
+static const char *
+get_pin_flag(struct drm_i915_gem_object *obj)
+{
+ if (obj->user_pin_count > 0)
+ return "P";
+ else if (obj->pin_count > 0)
+ return "p";
+ else
+ return " ";
+}
+
+static const char *
+get_tiling_flag(struct drm_i915_gem_object *obj)
+{
+ switch (obj->tiling_mode) {
+ default:
+ case I915_TILING_NONE: return (" ");
+ case I915_TILING_X: return ("X");
+ case I915_TILING_Y: return ("Y");
+ }
+}
+
+static const char *
+cache_level_str(int type)
+{
+ switch (type) {
+ case I915_CACHE_NONE: return " uncached";
+ case I915_CACHE_LLC: return " snooped (LLC)";
+ case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
+ default: return ("");
+ }
+}
+
+static void
+describe_obj(struct sbuf *m, struct drm_i915_gem_object *obj)
+{
+
+ sbuf_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
+ &obj->base,
+ get_pin_flag(obj),
+ get_tiling_flag(obj),
+ obj->base.size / 1024,
+ obj->base.read_domains,
+ obj->base.write_domain,
+ obj->last_rendering_seqno,
+ obj->last_fenced_seqno,
+ cache_level_str(obj->cache_level),
+ obj->dirty ? " dirty" : "",
+ obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+ if (obj->base.name)
+ sbuf_printf(m, " (name: %d)", obj->base.name);
+ if (obj->fence_reg != I915_FENCE_REG_NONE)
+ sbuf_printf(m, " (fence: %d)", obj->fence_reg);
+ if (obj->gtt_space != NULL)
+ sbuf_printf(m, " (gtt offset: %08x, size: %08x)",
+ obj->gtt_offset, (unsigned int)obj->gtt_space->size);
+ if (obj->pin_mappable || obj->fault_mappable) {
+ char s[3], *t = s;
+ if (obj->pin_mappable)
+ *t++ = 'p';
+ if (obj->fault_mappable)
+ *t++ = 'f';
+ *t = '\0';
+ sbuf_printf(m, " (%s mappable)", s);
+ }
+ if (obj->ring != NULL)
+ sbuf_printf(m, " (%s)", obj->ring->name);
+}
+
+static int
+i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data)
+{
+ uintptr_t list = (uintptr_t)data;
+ struct list_head *head;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ size_t total_obj_size, total_gtt_size;
+ int count;
+
+ if (sx_xlock_sig(&dev->dev_struct_lock))
+ return (EINTR);
+
+ switch (list) {
+ case ACTIVE_LIST:
+ sbuf_printf(m, "Active:\n");
+ head = &dev_priv->mm.active_list;
+ break;
+ case INACTIVE_LIST:
+ sbuf_printf(m, "Inactive:\n");
+ head = &dev_priv->mm.inactive_list;
+ break;
+ case PINNED_LIST:
+ sbuf_printf(m, "Pinned:\n");
+ head = &dev_priv->mm.pinned_list;
+ break;
+ case FLUSHING_LIST:
+ sbuf_printf(m, "Flushing:\n");
+ head = &dev_priv->mm.flushing_list;
+ break;
+ case DEFERRED_FREE_LIST:
+ sbuf_printf(m, "Deferred free:\n");
+ head = &dev_priv->mm.deferred_free_list;
+ break;
+ default:
+ DRM_UNLOCK(dev);
+ return (EINVAL);
+ }
+
+ total_obj_size = total_gtt_size = count = 0;
+ list_for_each_entry(obj, head, mm_list) {
+ sbuf_printf(m, " ");
+ describe_obj(m, obj);
+ sbuf_printf(m, "\n");
+ total_obj_size += obj->base.size;
+ total_gtt_size += obj->gtt_space->size;
+ count++;
+ }
+ DRM_UNLOCK(dev);
+
+ sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ count, total_obj_size, total_gtt_size);
+ return (0);
+}
+
+#define count_objects(list, member) do { \
+ list_for_each_entry(obj, list, member) { \
+ size += obj->gtt_space->size; \
+ ++count; \
+ if (obj->map_and_fenceable) { \
+ mappable_size += obj->gtt_space->size; \
+ ++mappable_count; \
+ } \
+ } \
+} while (0)
+
+static int
+i915_gem_object_info(struct drm_device *dev, struct sbuf *m, void *data)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 count, mappable_count;
+ size_t size, mappable_size;
+ struct drm_i915_gem_object *obj;
+
+ if (sx_xlock_sig(&dev->dev_struct_lock))
+ return (EINTR);
+ sbuf_printf(m, "%u objects, %zu bytes\n",
+ dev_priv->mm.object_count,
+ dev_priv->mm.object_memory);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.gtt_list, gtt_list);
+ sbuf_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.active_list, mm_list);
+ count_objects(&dev_priv->mm.flushing_list, mm_list);
+ sbuf_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.pinned_list, mm_list);
+ sbuf_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.inactive_list, mm_list);
+ sbuf_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.deferred_free_list, mm_list);
+ sbuf_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ if (obj->fault_mappable) {
+ size += obj->gtt_space->size;
+ ++count;
+ }
+ if (obj->pin_mappable) {
+ mappable_size += obj->gtt_space->size;
+ ++mappable_count;
+ }
+ }
+ sbuf_printf(m, "%u pinned mappable objects, %zu bytes\n",
+ mappable_count, mappable_size);
+ sbuf_printf(m, "%u fault mappable objects, %zu bytes\n",
+ count, size);
+
+ sbuf_printf(m, "%zu [%zu] gtt total\n",
+ dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
+ DRM_UNLOCK(dev);
+
+ return (0);
+}
+
+static int
+i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void* data)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ size_t total_obj_size, total_gtt_size;
+ int count;
+
+ if (sx_xlock_sig(&dev->dev_struct_lock))
+ return (EINTR);
+
+ total_obj_size = total_gtt_size = count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ sbuf_printf(m, " ");
+ describe_obj(m, obj);
+ sbuf_printf(m, "\n");
+ total_obj_size += obj->base.size;
+ total_gtt_size += obj->gtt_space->size;
+ count++;
+ }
+
+ DRM_UNLOCK(dev);
+
+ sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ count, total_obj_size, total_gtt_size);
+
+ return (0);
+}
+
+static int
+i915_gem_pageflip_info(struct drm_device *dev, struct sbuf *m, void *data)
+{
+ struct intel_crtc *crtc;
+ struct drm_i915_gem_object *obj;
+ struct intel_unpin_work *work;
+ char pipe;
+ char plane;
+
+ if ((dev->driver->driver_features & DRIVER_MODESET) == 0)
+ return (0);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ pipe = pipe_name(crtc->pipe);
+ plane = plane_name(crtc->plane);
+
+ mtx_lock(&dev->event_lock);
+ work = crtc->unpin_work;
+ if (work == NULL) {
+ sbuf_printf(m, "No flip due on pipe %c (plane %c)\n",
+ pipe, plane);
+ } else {
+ if (!work->pending) {
+ sbuf_printf(m, "Flip queued on pipe %c (plane %c)\n",
+ pipe, plane);
+ } else {
+ sbuf_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
+ pipe, plane);
+ }
+ if (work->enable_stall_check)
+ sbuf_printf(m, "Stall check enabled, ");
+ else
+ sbuf_printf(m, "Stall check waiting for page flip ioctl, ");
+ sbuf_printf(m, "%d prepares\n", work->pending);
+
+ if (work->old_fb_obj) {
+ obj = work->old_fb_obj;
+ if (obj)
+ sbuf_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+ }
+ if (work->pending_flip_obj) {
+ obj = work->pending_flip_obj;
+ if (obj)
+ sbuf_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+ }
+ }
+ mtx_unlock(&dev->event_lock);
+ }
+
+ return (0);
+}
+
+static int
+i915_gem_request_info(struct drm_device *dev, struct sbuf *m, void *data)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *gem_request;
+ int count;
+
+ if (sx_xlock_sig(&dev->dev_struct_lock))
+ return (EINTR);
+
+ count = 0;
+ if (!list_empty(&dev_priv->rings[RCS].request_list)) {
+ sbuf_printf(m, "Render requests:\n");
+ list_for_each_entry(gem_request,
+ &dev_priv->rings[RCS].request_list,
+ list) {
+ sbuf_printf(m, " %d @ %d\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies));
+ }
+ count++;
+ }
+ if (!list_empty(&dev_priv->rings[VCS].request_list)) {
+ sbuf_printf(m, "BSD requests:\n");
+ list_for_each_entry(gem_request,
+ &dev_priv->rings[VCS].request_list,
+ list) {
+ sbuf_printf(m, " %d @ %d\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies));
+ }
+ count++;
+ }
+ if (!list_empty(&dev_priv->rings[BCS].request_list)) {
+ sbuf_printf(m, "BLT requests:\n");
+ list_for_each_entry(gem_request,
+ &dev_priv->rings[BCS].request_list,
+ list) {
+ sbuf_printf(m, " %d @ %d\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies));
+ }
+ count++;
+ }
+ DRM_UNLOCK(dev);
+
+ if (count == 0)
+ sbuf_printf(m, "No requests\n");
+
+ return 0;
+}
+
+static void
+i915_ring_seqno_info(struct sbuf *m, struct intel_ring_buffer *ring)
+{
+ if (ring->get_seqno) {
+ sbuf_printf(m, "Current sequence (%s): %d\n",
+ ring->name, ring->get_seqno(ring));
+ sbuf_printf(m, "Waiter sequence (%s): %d\n",
+ ring->name, ring->waiting_seqno);
+ sbuf_printf(m, "IRQ sequence (%s): %d\n",
+ ring->name, ring->irq_seqno);
+ }
+}
+
+static int
+i915_gem_seqno_info(struct drm_device *dev, struct sbuf *m, void *data)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ if (sx_xlock_sig(&dev->dev_struct_lock))
+ return (EINTR);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ i915_ring_seqno_info(m, &dev_priv->rings[i]);
+ DRM_UNLOCK(dev);
+ return (0);
+}
+
+
+static int
+i915_interrupt_info(struct drm_device *dev, struct sbuf *m, void *data)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i, pipe;
+
+ if (sx_xlock_sig(&dev->dev_struct_lock))
+ return (EINTR);
+
+ if (!HAS_PCH_SPLIT(dev)) {
+ sbuf_printf(m, "Interrupt enable: %08x\n",
+ I915_READ(IER));
+ sbuf_printf(m, "Interrupt identity: %08x\n",
+ I915_READ(IIR));
+ sbuf_printf(m, "Interrupt mask: %08x\n",
+ I915_READ(IMR));
+ for_each_pipe(pipe)
+ sbuf_printf(m, "Pipe %c stat: %08x\n",
+ pipe_name(pipe),
+ I915_READ(PIPESTAT(pipe)));
+ } else {
+ sbuf_printf(m, "North Display Interrupt enable: %08x\n",
+ I915_READ(DEIER));
+ sbuf_printf(m, "North Display Interrupt identity: %08x\n",
+ I915_READ(DEIIR));
+ sbuf_printf(m, "North Display Interrupt mask: %08x\n",
+ I915_READ(DEIMR));
+ sbuf_printf(m, "South Display Interrupt enable: %08x\n",
+ I915_READ(SDEIER));
+ sbuf_printf(m, "South Display Interrupt identity: %08x\n",
+ I915_READ(SDEIIR));
+ sbuf_printf(m, "South Display Interrupt mask: %08x\n",
+ I915_READ(SDEIMR));
+ sbuf_printf(m, "Graphics Interrupt enable: %08x\n",
+ I915_READ(GTIER));
+ sbuf_printf(m, "Graphics Interrupt identity: %08x\n",
+ I915_READ(GTIIR));
+ sbuf_printf(m, "Graphics Interrupt mask: %08x\n",
+ I915_READ(GTIMR));
+ }
+ sbuf_printf(m, "Interrupts received: %d\n",
+ atomic_read(&dev_priv->irq_received));
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ sbuf_printf(m, "Graphics Interrupt mask (%s): %08x\n",
+ dev_priv->rings[i].name,
+ I915_READ_IMR(&dev_priv->rings[i]));
+ }
+ i915_ring_seqno_info(m, &dev_priv->rings[i]);
+ }
+ DRM_UNLOCK(dev);
+
+ return (0);
+}
+
+static int
+i915_gem_fence_regs_info(struct drm_device *dev, struct sbuf *m, void *data)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ if (sx_xlock_sig(&dev->dev_struct_lock))
+ return (EINTR);
+
+ sbuf_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
+ sbuf_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
+ struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
+
+ sbuf_printf(m, "Fenced object[%2d] = ", i);
+ if (obj == NULL)
+ sbuf_printf(m, "unused");
+ else
+ describe_obj(m, obj);
+ sbuf_printf(m, "\n");
+ }
+
+ DRM_UNLOCK(dev);
+ return (0);
+}
+
+static int
+i915_hws_info(struct drm_device *dev, struct sbuf *m, void *data)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ const volatile u32 *hws;
+ int i;
+
+ ring = &dev_priv->rings[(uintptr_t)data];
+ hws = (volatile u32 *)ring->status_page.page_addr;
+ if (hws == NULL)
+ return (0);
+
+ for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
+ sbuf_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i * 4,
+ hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
+ }
+ return (0);
+}
+
+static int
+i915_ringbuffer_data(struct drm_device *dev, struct sbuf *m, void *data)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+
+ if (sx_xlock_sig(&dev->dev_struct_lock))
+ return (EINTR);
+ ring = &dev_priv->rings[(uintptr_t)data];
+ if (!ring->obj) {
+ sbuf_printf(m, "No ringbuffer setup\n");
+ } else {
+ u8 *virt = ring->virtual_start;
+ uint32_t off;
+
+ for (off = 0; off < ring->size; off += 4) {
+ uint32_t *ptr = (uint32_t *)(virt + off);
+ sbuf_printf(m, "%08x : %08x\n", off, *ptr);
+ }
+ }
+ DRM_UNLOCK(dev);
+ return (0);
+}
+
+static int
+i915_ringbuffer_info(struct drm_device *dev, struct sbuf *m, void *data)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+
+ ring = &dev_priv->rings[(uintptr_t)data];
+ if (ring->size == 0)
+ return (0);
+
+ if (sx_xlock_sig(&dev->dev_struct_lock))
+ return (EINTR);
+
+ sbuf_printf(m, "Ring %s:\n", ring->name);
+ sbuf_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
+ sbuf_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
+ sbuf_printf(m, " Size : %08x\n", ring->size);
+ sbuf_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
+ sbuf_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ sbuf_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
+ sbuf_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
+ }
+ sbuf_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
+ sbuf_printf(m, " Start : %08x\n", I915_READ_START(ring));
+
+ DRM_UNLOCK(dev);
+
+ return (0);
+}
+
+static const char *
+ring_str(int ring)
+{
+ switch (ring) {
+ case RCS: return (" render");
+ case VCS: return (" bsd");
+ case BCS: return (" blt");
+ default: return ("");
+ }
+}
+
+static const char *
+pin_flag(int pinned)
+{
+ if (pinned > 0)
+ return (" P");
+ else if (pinned < 0)
+ return (" p");
+ else
+ return ("");
+}
+
+static const char *tiling_flag(int tiling)
+{
+ switch (tiling) {
+ default:
+ case I915_TILING_NONE: return "";
+ case I915_TILING_X: return " X";
+ case I915_TILING_Y: return " Y";
+ }
+}
+
+static const char *dirty_flag(int dirty)
+{
+ return dirty ? " dirty" : "";
+}
+
+static const char *purgeable_flag(int purgeable)
+{
+ return purgeable ? " purgeable" : "";
+}
+
+static void print_error_buffers(struct sbuf *m, const char *name,
+ struct drm_i915_error_buffer *err, int count)
+{
+
+ sbuf_printf(m, "%s [%d]:\n", name, count);
+
+ while (count--) {
+ sbuf_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
+ err->gtt_offset,
+ err->size,
+ err->read_domains,
+ err->write_domain,
+ err->seqno,
+ pin_flag(err->pinned),
+ tiling_flag(err->tiling),
+ dirty_flag(err->dirty),
+ purgeable_flag(err->purgeable),
+ err->ring != -1 ? " " : "",
+ ring_str(err->ring),
+ cache_level_str(err->cache_level));
+
+ if (err->name)
+ sbuf_printf(m, " (name: %d)", err->name);
+ if (err->fence_reg != I915_FENCE_REG_NONE)
+ sbuf_printf(m, " (fence: %d)", err->fence_reg);
+
+ sbuf_printf(m, "\n");
+ err++;
+ }
+}
+
+static void
+i915_ring_error_state(struct sbuf *m, struct drm_device *dev,
+ struct drm_i915_error_state *error, unsigned ring)
+{
+
+ sbuf_printf(m, "%s command stream:\n", ring_str(ring));
+ sbuf_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
+ sbuf_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
+ sbuf_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
+ sbuf_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
+ sbuf_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
+ sbuf_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
+ if (ring == RCS && INTEL_INFO(dev)->gen >= 4) {
+ sbuf_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
+ sbuf_printf(m, " BBADDR: 0x%08jx\n", (uintmax_t)error->bbaddr);
+ }
+ if (INTEL_INFO(dev)->gen >= 4)
+ sbuf_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
+ sbuf_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
+ if (INTEL_INFO(dev)->gen >= 6) {
+ sbuf_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
+ sbuf_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
+ sbuf_printf(m, " SYNC_0: 0x%08x\n",
+ error->semaphore_mboxes[ring][0]);
+ sbuf_printf(m, " SYNC_1: 0x%08x\n",
+ error->semaphore_mboxes[ring][1]);
+ }
+ sbuf_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
+ sbuf_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
+ sbuf_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+}
+
+static int i915_error_state(struct drm_device *dev, struct sbuf *m,
+ void *unused)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error;
+ int i, j, page, offset, elt;
+
+ mtx_lock(&dev_priv->error_lock);
+ if (!dev_priv->first_error) {
+ sbuf_printf(m, "no error state collected\n");
+ goto out;
+ }
+
+ error = dev_priv->first_error;
+
+ sbuf_printf(m, "Time: %jd s %jd us\n", (intmax_t)error->time.tv_sec,
+ (intmax_t)error->time.tv_usec);
+ sbuf_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
+ sbuf_printf(m, "EIR: 0x%08x\n", error->eir);
+ sbuf_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+
+ for (i = 0; i < dev_priv->num_fence_regs; i++)
+ sbuf_printf(m, " fence[%d] = %08jx\n", i,
+ (uintmax_t)error->fence[i]);
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ sbuf_printf(m, "ERROR: 0x%08x\n", error->error);
+ sbuf_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
+ }
+
+ i915_ring_error_state(m, dev, error, RCS);
+ if (HAS_BLT(dev))
+ i915_ring_error_state(m, dev, error, BCS);
+ if (HAS_BSD(dev))
+ i915_ring_error_state(m, dev, error, VCS);
+
+ if (error->active_bo)
+ print_error_buffers(m, "Active",
+ error->active_bo,
+ error->active_bo_count);
+
+ if (error->pinned_bo)
+ print_error_buffers(m, "Pinned",
+ error->pinned_bo,
+ error->pinned_bo_count);
+
+ for (i = 0; i < DRM_ARRAY_SIZE(error->ring); i++) {
+ struct drm_i915_error_object *obj;
+
+ if ((obj = error->ring[i].batchbuffer)) {
+ sbuf_printf(m, "%s --- gtt_offset = 0x%08x\n",
+ dev_priv->rings[i].name,
+ obj->gtt_offset);
+ offset = 0;
+ for (page = 0; page < obj->page_count; page++) {
+ for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+ sbuf_printf(m, "%08x : %08x\n",
+ offset, obj->pages[page][elt]);
+ offset += 4;
+ }
+ }
+ }
+
+ if (error->ring[i].num_requests) {
+ sbuf_printf(m, "%s --- %d requests\n",
+ dev_priv->rings[i].name,
+ error->ring[i].num_requests);
+ for (j = 0; j < error->ring[i].num_requests; j++) {
+ sbuf_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
+ error->ring[i].requests[j].seqno,
+ error->ring[i].requests[j].jiffies,
+ error->ring[i].requests[j].tail);
+ }
+ }
+
+ if ((obj = error->ring[i].ringbuffer)) {
+ sbuf_printf(m, "%s --- ringbuffer = 0x%08x\n",
+ dev_priv->rings[i].name,
+ obj->gtt_offset);
+ offset = 0;
+ for (page = 0; page < obj->page_count; page++) {
+ for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+ sbuf_printf(m, "%08x : %08x\n",
+ offset,
+ obj->pages[page][elt]);
+ offset += 4;
+ }
+ }
+ }
+ }
+
+ if (error->overlay)
+ intel_overlay_print_error_state(m, error->overlay);
+
+ if (error->display)
+ intel_display_print_error_state(m, dev, error->display);
+
+out:
+ mtx_unlock(&dev_priv->error_lock);
+
+ return (0);
+}
+
+static int
+i915_rstdby_delays(struct drm_device *dev, struct sbuf *m, void *unused)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u16 crstanddelay;
+
+ if (sx_xlock_sig(&dev->dev_struct_lock))
+ return (EINTR);
+ crstanddelay = I915_READ16(CRSTANDVID);
+ DRM_UNLOCK(dev);
+
+ sbuf_printf(m, "w/ctx: %d, w/o ctx: %d\n",
+ (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
+
+ return 0;
+}
+
+static int
+i915_cur_delayinfo(struct drm_device *dev, struct sbuf *m, void *unused)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (IS_GEN5(dev)) {
+ u16 rgvswctl = I915_READ16(MEMSWCTL);
+ u16 rgvstat = I915_READ16(MEMSTAT_ILK);
+
+ sbuf_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+ sbuf_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
+ sbuf_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+ MEMSTAT_VID_SHIFT);
+ sbuf_printf(m, "Current P-state: %d\n",
+ (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+ } else if (IS_GEN6(dev)) {
+ u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+ u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ u32 rpstat;
+ u32 rpupei, rpcurup, rpprevup;
+ u32 rpdownei, rpcurdown, rpprevdown;
+ int max_freq;
+
+ /* RPSTAT1 is in the GT power well */
+ if (sx_xlock_sig(&dev->dev_struct_lock))
+ return (EINTR);
+ gen6_gt_force_wake_get(dev_priv);
+
+ rpstat = I915_READ(GEN6_RPSTAT1);
+ rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
+ rpcurup = I915_READ(GEN6_RP_CUR_UP);
+ rpprevup = I915_READ(GEN6_RP_PREV_UP);
+ rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
+ rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
+ rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
+
+ gen6_gt_force_wake_put(dev_priv);
+ DRM_UNLOCK(dev);
+
+ sbuf_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
+ sbuf_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
+ sbuf_printf(m, "Render p-state ratio: %d\n",
+ (gt_perf_status & 0xff00) >> 8);
+ sbuf_printf(m, "Render p-state VID: %d\n",
+ gt_perf_status & 0xff);
+ sbuf_printf(m, "Render p-state limit: %d\n",
+ rp_state_limits & 0xff);
+ sbuf_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
+ GEN6_CAGF_SHIFT) * 50);
+ sbuf_printf(m, "RP CUR UP EI: %dus\n", rpupei &
+ GEN6_CURICONT_MASK);
+ sbuf_printf(m, "RP CUR UP: %dus\n", rpcurup &
+ GEN6_CURBSYTAVG_MASK);
+ sbuf_printf(m, "RP PREV UP: %dus\n", rpprevup &
+ GEN6_CURBSYTAVG_MASK);
+ sbuf_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
+ GEN6_CURIAVG_MASK);
+ sbuf_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
+ GEN6_CURBSYTAVG_MASK);
+ sbuf_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
+ GEN6_CURBSYTAVG_MASK);
+
+ max_freq = (rp_state_cap & 0xff0000) >> 16;
+ sbuf_printf(m, "Lowest (RPN) frequency: %dMHz\n",
+ max_freq * 50);
+
+ max_freq = (rp_state_cap & 0xff00) >> 8;
+ sbuf_printf(m, "Nominal (RP1) frequency: %dMHz\n",
+ max_freq * 50);
+
+ max_freq = rp_state_cap & 0xff;
+ sbuf_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
+ max_freq * 50);
+ } else {
+ sbuf_printf(m, "no P-state info available\n");
+ }
+
+ return 0;
+}
+
+static int
+i915_delayfreq_table(struct drm_device *dev, struct sbuf *m, void *unused)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 delayfreq;
+ int i;
+
+ if (sx_xlock_sig(&dev->dev_struct_lock))
+ return (EINTR);
+ for (i = 0; i < 16; i++) {
+ delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
+ sbuf_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
+ (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
+ }
+ DRM_UNLOCK(dev);
+ return (0);
+}
+
+static inline int
+MAP_TO_MV(int map)
+{
+ return 1250 - (map * 25);
+}
+
+static int
+i915_inttoext_table(struct drm_device *dev, struct sbuf *m, void *unused)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 inttoext;
+ int i;
+
+ if (sx_xlock_sig(&dev->dev_struct_lock))
+ return (EINTR);
+ for (i = 1; i <= 32; i++) {
+ inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
+ sbuf_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
+ }
+ DRM_UNLOCK(dev);
+
+ return (0);
+}
+
+static int
+ironlake_drpc_info(struct drm_device *dev, struct sbuf *m)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 rgvmodectl;
+ u32 rstdbyctl;
+ u16 crstandvid;
+
+ if (sx_xlock_sig(&dev->dev_struct_lock))
+ return (EINTR);
+ rgvmodectl = I915_READ(MEMMODECTL);
+ rstdbyctl = I915_READ(RSTDBYCTL);
+ crstandvid = I915_READ16(CRSTANDVID);
+ DRM_UNLOCK(dev);
+
+ sbuf_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
+ "yes" : "no");
+ sbuf_printf(m, "Boost freq: %d\n",
+ (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
+ MEMMODE_BOOST_FREQ_SHIFT);
+ sbuf_printf(m, "HW control enabled: %s\n",
+ rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
+ sbuf_printf(m, "SW control enabled: %s\n",
+ rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
+ sbuf_printf(m, "Gated voltage change: %s\n",
+ rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
+ sbuf_printf(m, "Starting frequency: P%d\n",
+ (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
+ sbuf_printf(m, "Max P-state: P%d\n",
+ (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
+ sbuf_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
+ sbuf_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
+ sbuf_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
+ sbuf_printf(m, "Render standby enabled: %s\n",
+ (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
+ sbuf_printf(m, "Current RS state: ");
+ switch (rstdbyctl & RSX_STATUS_MASK) {
+ case RSX_STATUS_ON:
+ sbuf_printf(m, "on\n");
+ break;
+ case RSX_STATUS_RC1:
+ sbuf_printf(m, "RC1\n");
+ break;
+ case RSX_STATUS_RC1E:
+ sbuf_printf(m, "RC1E\n");
+ break;
+ case RSX_STATUS_RS1:
+ sbuf_printf(m, "RS1\n");
+ break;
+ case RSX_STATUS_RS2:
+ sbuf_printf(m, "RS2 (RC6)\n");
+ break;
+ case RSX_STATUS_RS3:
+ sbuf_printf(m, "RC3 (RC6+)\n");
+ break;
+ default:
+ sbuf_printf(m, "unknown\n");
+ break;
+ }
+
+ return 0;
+}
+
+static int
+gen6_drpc_info(struct drm_device *dev, struct sbuf *m)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 rpmodectl1, gt_core_status, rcctl1;
+ unsigned forcewake_count;
+ int count=0;
+
+ if (sx_xlock_sig(&dev->dev_struct_lock))
+ return (EINTR);
+
+ mtx_lock(&dev_priv->gt_lock);
+ forcewake_count = dev_priv->forcewake_count;
+ mtx_unlock(&dev_priv->gt_lock);
+
+ if (forcewake_count) {
+ sbuf_printf(m, "RC information inaccurate because userspace "
+ "holds a reference \n");
+ } else {
+ /* NB: we cannot use forcewake, else we read the wrong values */
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
+ DRM_UDELAY(10);
+ sbuf_printf(m, "RC information accurate: %s\n", yesno(count < 51));
+ }
+
+ gt_core_status = DRM_READ32(dev_priv->mmio_map, GEN6_GT_CORE_STATUS);
+ trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
+
+ rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
+ rcctl1 = I915_READ(GEN6_RC_CONTROL);
+ DRM_UNLOCK(dev);
+
+ sbuf_printf(m, "Video Turbo Mode: %s\n",
+ yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
+ sbuf_printf(m, "HW control enabled: %s\n",
+ yesno(rpmodectl1 & GEN6_RP_ENABLE));
+ sbuf_printf(m, "SW control enabled: %s\n",
+ yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
+ GEN6_RP_MEDIA_SW_MODE));
+ sbuf_printf(m, "RC1e Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
+ sbuf_printf(m, "RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
+ sbuf_printf(m, "Deep RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
+ sbuf_printf(m, "Deepest RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
+ sbuf_printf(m, "Current RC state: ");
+ switch (gt_core_status & GEN6_RCn_MASK) {
+ case GEN6_RC0:
+ if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
+ sbuf_printf(m, "Core Power Down\n");
+ else
+ sbuf_printf(m, "on\n");
+ break;
+ case GEN6_RC3:
+ sbuf_printf(m, "RC3\n");
+ break;
+ case GEN6_RC6:
+ sbuf_printf(m, "RC6\n");
+ break;
+ case GEN6_RC7:
+ sbuf_printf(m, "RC7\n");
+ break;
+ default:
+ sbuf_printf(m, "Unknown\n");
+ break;
+ }
+
+ sbuf_printf(m, "Core Power Down: %s\n",
+ yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+ return 0;
+}
+
+static int i915_drpc_info(struct drm_device *dev, struct sbuf *m, void *unused)
+{
+
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ return (gen6_drpc_info(dev, m));
+ else
+ return (ironlake_drpc_info(dev, m));
+}
+static int
+i915_fbc_status(struct drm_device *dev, struct sbuf *m, void *unused)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!I915_HAS_FBC(dev)) {
+ sbuf_printf(m, "FBC unsupported on this chipset");
+ return 0;
+ }
+
+ if (intel_fbc_enabled(dev)) {
+ sbuf_printf(m, "FBC enabled");
+ } else {
+ sbuf_printf(m, "FBC disabled: ");
+ switch (dev_priv->no_fbc_reason) {
+ case FBC_NO_OUTPUT:
+ sbuf_printf(m, "no outputs");
+ break;
+ case FBC_STOLEN_TOO_SMALL:
+ sbuf_printf(m, "not enough stolen memory");
+ break;
+ case FBC_UNSUPPORTED_MODE:
+ sbuf_printf(m, "mode not supported");
+ break;
+ case FBC_MODE_TOO_LARGE:
+ sbuf_printf(m, "mode too large");
+ break;
+ case FBC_BAD_PLANE:
+ sbuf_printf(m, "FBC unsupported on plane");
+ break;
+ case FBC_NOT_TILED:
+ sbuf_printf(m, "scanout buffer not tiled");
+ break;
+ case FBC_MULTIPLE_PIPES:
+ sbuf_printf(m, "multiple pipes are enabled");
+ break;
+ default:
+ sbuf_printf(m, "unknown reason");
+ }
+ }
+ return 0;
+}
+
+static int
+i915_sr_status(struct drm_device *dev, struct sbuf *m, void *unused)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ bool sr_enabled = false;
+
+ if (HAS_PCH_SPLIT(dev))
+ sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
+ else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
+ sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+ else if (IS_I915GM(dev))
+ sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
+ else if (IS_PINEVIEW(dev))
+ sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
+
+ sbuf_printf(m, "self-refresh: %s",
+ sr_enabled ? "enabled" : "disabled");
+
+ return (0);
+}
+
+static int i915_ring_freq_table(struct drm_device *dev, struct sbuf *m,
+ void *unused)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int gpu_freq, ia_freq;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
+ sbuf_printf(m, "unsupported on this chipset");
+ return (0);
+ }
+
+ if (sx_xlock_sig(&dev->dev_struct_lock))
+ return (EINTR);
+
+ sbuf_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
+
+ for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
+ gpu_freq++) {
+ I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
+ GEN6_PCODE_READ_MIN_FREQ_TABLE);
+ if (_intel_wait_for(dev,
+ (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 10, 1, "915frq")) {
+ DRM_ERROR("pcode read of freq table timed out\n");
+ continue;
+ }
+ ia_freq = I915_READ(GEN6_PCODE_DATA);
+ sbuf_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
+ }
+
+ DRM_UNLOCK(dev);
+
+ return (0);
+}
+
+static int
+i915_emon_status(struct drm_device *dev, struct sbuf *m, void *unused)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long temp, chipset, gfx;
+
+ if (!IS_GEN5(dev)) {
+ sbuf_printf(m, "Not supported\n");
+ return (0);
+ }
+
+ if (sx_xlock_sig(&dev->dev_struct_lock))
+ return (EINTR);
+ temp = i915_mch_val(dev_priv);
+ chipset = i915_chipset_val(dev_priv);
+ gfx = i915_gfx_val(dev_priv);
+ DRM_UNLOCK(dev);
+
+ sbuf_printf(m, "GMCH temp: %ld\n", temp);
+ sbuf_printf(m, "Chipset power: %ld\n", chipset);
+ sbuf_printf(m, "GFX power: %ld\n", gfx);
+ sbuf_printf(m, "Total power: %ld\n", chipset + gfx);
+
+ return (0);
+}
+
+static int
+i915_gfxec(struct drm_device *dev, struct sbuf *m, void *unused)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (sx_xlock_sig(&dev->dev_struct_lock))
+ return (EINTR);
+ sbuf_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
+ DRM_UNLOCK(dev);
+
+ return (0);
+}
+
+#if 0
+static int
+i915_opregion(struct drm_device *dev, struct sbuf *m, void *unused)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+
+ if (sx_xlock_sig(&dev->dev_struct_lock))
+ return (EINTR);
+ if (opregion->header)
+ seq_write(m, opregion->header, OPREGION_SIZE);
+ DRM_UNLOCK(dev);
+
+ return 0;
+}
+#endif
+
+static int
+i915_gem_framebuffer_info(struct drm_device *dev, struct sbuf *m, void *data)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_fbdev *ifbdev;
+ struct intel_framebuffer *fb;
+
+ if (sx_xlock_sig(&dev->dev_struct_lock))
+ return (EINTR);
+
+ ifbdev = dev_priv->fbdev;
+ if (ifbdev == NULL) {
+ DRM_UNLOCK(dev);
+ return (0);
+ }
+ fb = to_intel_framebuffer(ifbdev->helper.fb);
+
+ sbuf_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
+ fb->base.width,
+ fb->base.height,
+ fb->base.depth,
+ fb->base.bits_per_pixel);
+ describe_obj(m, fb->obj);
+ sbuf_printf(m, "\n");
+
+ list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
+ if (&fb->base == ifbdev->helper.fb)
+ continue;
+
+ sbuf_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
+ fb->base.width,
+ fb->base.height,
+ fb->base.depth,
+ fb->base.bits_per_pixel);
+ describe_obj(m, fb->obj);
+ sbuf_printf(m, "\n");
+ }
+
+ DRM_UNLOCK(dev);
+
+ return (0);
+}
+
+static int
+i915_context_status(struct drm_device *dev, struct sbuf *m, void *data)
+{
+ drm_i915_private_t *dev_priv;
+ int ret;
+
+ if ((dev->driver->driver_features & DRIVER_MODESET) == 0)
+ return (0);
+
+ dev_priv = dev->dev_private;
+ ret = sx_xlock_sig(&dev->mode_config.mutex);
+ if (ret != 0)
+ return (EINTR);
+
+ if (dev_priv->pwrctx != NULL) {
+ sbuf_printf(m, "power context ");
+ describe_obj(m, dev_priv->pwrctx);
+ sbuf_printf(m, "\n");
+ }
+
+ if (dev_priv->renderctx != NULL) {
+ sbuf_printf(m, "render context ");
+ describe_obj(m, dev_priv->renderctx);
+ sbuf_printf(m, "\n");
+ }
+
+ sx_xunlock(&dev->mode_config.mutex);
+
+ return (0);
+}
+
+static int
+i915_gen6_forcewake_count_info(struct drm_device *dev, struct sbuf *m,
+ void *data)
+{
+ struct drm_i915_private *dev_priv;
+ unsigned forcewake_count;
+
+ dev_priv = dev->dev_private;
+ mtx_lock(&dev_priv->gt_lock);
+ forcewake_count = dev_priv->forcewake_count;
+ mtx_unlock(&dev_priv->gt_lock);
+
+ sbuf_printf(m, "forcewake count = %u\n", forcewake_count);
+
+ return (0);
+}
+
+static const char *
+swizzle_string(unsigned swizzle)
+{
+
+ switch(swizzle) {
+ case I915_BIT_6_SWIZZLE_NONE:
+ return "none";
+ case I915_BIT_6_SWIZZLE_9:
+ return "bit9";
+ case I915_BIT_6_SWIZZLE_9_10:
+ return "bit9/bit10";
+ case I915_BIT_6_SWIZZLE_9_11:
+ return "bit9/bit11";
+ case I915_BIT_6_SWIZZLE_9_10_11:
+ return "bit9/bit10/bit11";
+ case I915_BIT_6_SWIZZLE_9_17:
+ return "bit9/bit17";
+ case I915_BIT_6_SWIZZLE_9_10_17:
+ return "bit9/bit10/bit17";
+ case I915_BIT_6_SWIZZLE_UNKNOWN:
+ return "unknown";
+ }
+
+ return "bug";
+}
+
+static int
+i915_swizzle_info(struct drm_device *dev, struct sbuf *m, void *data)
+{
+ struct drm_i915_private *dev_priv;
+ int ret;
+
+ dev_priv = dev->dev_private;
+ ret = sx_xlock_sig(&dev->dev_struct_lock);
+ if (ret != 0)
+ return (EINTR);
+
+ sbuf_printf(m, "bit6 swizzle for X-tiling = %s\n",
+ swizzle_string(dev_priv->mm.bit_6_swizzle_x));
+ sbuf_printf(m, "bit6 swizzle for Y-tiling = %s\n",
+ swizzle_string(dev_priv->mm.bit_6_swizzle_y));
+
+ if (IS_GEN3(dev) || IS_GEN4(dev)) {
+ sbuf_printf(m, "DDC = 0x%08x\n",
+ I915_READ(DCC));
+ sbuf_printf(m, "C0DRB3 = 0x%04x\n",
+ I915_READ16(C0DRB3));
+ sbuf_printf(m, "C1DRB3 = 0x%04x\n",
+ I915_READ16(C1DRB3));
+ } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ sbuf_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
+ I915_READ(MAD_DIMM_C0));
+ sbuf_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
+ I915_READ(MAD_DIMM_C1));
+ sbuf_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
+ I915_READ(MAD_DIMM_C2));
+ sbuf_printf(m, "TILECTL = 0x%08x\n",
+ I915_READ(TILECTL));
+ sbuf_printf(m, "ARB_MODE = 0x%08x\n",
+ I915_READ(ARB_MODE));
+ sbuf_printf(m, "DISP_ARB_CTL = 0x%08x\n",
+ I915_READ(DISP_ARB_CTL));
+ }
+ DRM_UNLOCK(dev);
+
+ return (0);
+}
+
+static int
+i915_ppgtt_info(struct drm_device *dev, struct sbuf *m, void *data)
+{
+ struct drm_i915_private *dev_priv;
+ struct intel_ring_buffer *ring;
+ int i, ret;
+
+ dev_priv = dev->dev_private;
+
+ ret = sx_xlock_sig(&dev->dev_struct_lock);
+ if (ret != 0)
+ return (EINTR);
+ if (INTEL_INFO(dev)->gen == 6)
+ sbuf_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
+
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ ring = &dev_priv->rings[i];
+
+ sbuf_printf(m, "%s\n", ring->name);
+ if (INTEL_INFO(dev)->gen == 7)
+ sbuf_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
+ sbuf_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
+ sbuf_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
+ sbuf_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
+ }
+ if (dev_priv->mm.aliasing_ppgtt) {
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+ sbuf_printf(m, "aliasing PPGTT:\n");
+ sbuf_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
+ }
+ sbuf_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
+ DRM_UNLOCK(dev);
+
+ return (0);
+}
+
+static int
+i915_debug_set_wedged(SYSCTL_HANDLER_ARGS)
+{
+ struct drm_device *dev;
+ drm_i915_private_t *dev_priv;
+ int error, wedged;
+
+ dev = arg1;
+ dev_priv = dev->dev_private;
+ if (dev_priv == NULL)
+ return (EBUSY);
+ wedged = dev_priv->mm.wedged;
+ error = sysctl_handle_int(oidp, &wedged, 0, req);
+ if (error || !req->newptr)
+ return (error);
+ DRM_INFO("Manually setting wedged to %d\n", wedged);
+ i915_handle_error(dev, wedged);
+ return (error);
+}
+
+static int
+i915_max_freq(SYSCTL_HANDLER_ARGS)
+{
+ struct drm_device *dev;
+ drm_i915_private_t *dev_priv;
+ int error, max_freq;
+
+ dev = arg1;
+ dev_priv = dev->dev_private;
+ if (dev_priv == NULL)
+ return (EBUSY);
+ max_freq = dev_priv->max_delay * 50;
+ error = sysctl_handle_int(oidp, &max_freq, 0, req);
+ if (error || !req->newptr)
+ return (error);
+ DRM_DEBUG("Manually setting max freq to %d\n", max_freq);
+ /*
+ * Turbo will still be enabled, but won't go above the set value.
+ */
+ dev_priv->max_delay = max_freq / 50;
+ gen6_set_rps(dev, max_freq / 50);
+ return (error);
+}
+
+static int
+i915_cache_sharing(SYSCTL_HANDLER_ARGS)
+{
+ struct drm_device *dev;
+ drm_i915_private_t *dev_priv;
+ int error, snpcr, cache_sharing;
+
+ dev = arg1;
+ dev_priv = dev->dev_private;
+ if (dev_priv == NULL)
+ return (EBUSY);
+ DRM_LOCK(dev);
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ DRM_UNLOCK(dev);
+ cache_sharing = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
+ error = sysctl_handle_int(oidp, &cache_sharing, 0, req);
+ if (error || !req->newptr)
+ return (error);
+ if (cache_sharing < 0 || cache_sharing > 3)
+ return (EINVAL);
+ DRM_DEBUG("Manually setting uncore sharing to %d\n", cache_sharing);
+
+ DRM_LOCK(dev);
+ /* Update the cache sharing policy here as well */
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ snpcr &= ~GEN6_MBC_SNPCR_MASK;
+ snpcr |= (cache_sharing << GEN6_MBC_SNPCR_SHIFT);
+ I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+ DRM_UNLOCK(dev);
+ return (0);
+}
+
+static struct i915_info_sysctl_list {
+ const char *name;
+ int (*ptr)(struct drm_device *dev, struct sbuf *m, void *data);
+ int flags;
+ void *data;
+} i915_info_sysctl_list[] = {
+ {"i915_capabilities", i915_capabilities, 0},
+ {"i915_gem_objects", i915_gem_object_info, 0},
+ {"i915_gem_gtt", i915_gem_gtt_info, 0},
+ {"i915_gem_active", i915_gem_object_list_info, 0, (void *)ACTIVE_LIST},
+ {"i915_gem_flushing", i915_gem_object_list_info, 0,
+ (void *)FLUSHING_LIST},
+ {"i915_gem_inactive", i915_gem_object_list_info, 0,
+ (void *)INACTIVE_LIST},
+ {"i915_gem_pinned", i915_gem_object_list_info, 0,
+ (void *)PINNED_LIST},
+ {"i915_gem_deferred_free", i915_gem_object_list_info, 0,
+ (void *)DEFERRED_FREE_LIST},
+ {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
+ {"i915_gem_request", i915_gem_request_info, 0},
+ {"i915_gem_seqno", i915_gem_seqno_info, 0},
+ {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
+ {"i915_gem_interrupt", i915_interrupt_info, 0},
+ {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
+ {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
+ {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
+ {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
+ {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
+ {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
+ {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
+ {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
+ {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
+ {"i915_error_state", i915_error_state, 0},
+ {"i915_rstdby_delays", i915_rstdby_delays, 0},
+ {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
+ {"i915_delayfreq_table", i915_delayfreq_table, 0},
+ {"i915_inttoext_table", i915_inttoext_table, 0},
+ {"i915_drpc_info", i915_drpc_info, 0},
+ {"i915_emon_status", i915_emon_status, 0},
+ {"i915_ring_freq_table", i915_ring_freq_table, 0},
+ {"i915_gfxec", i915_gfxec, 0},
+ {"i915_fbc_status", i915_fbc_status, 0},
+ {"i915_sr_status", i915_sr_status, 0},
+#if 0
+ {"i915_opregion", i915_opregion, 0},
+#endif
+ {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
+ {"i915_context_status", i915_context_status, 0},
+ {"i915_gen6_forcewake_count_info", i915_gen6_forcewake_count_info, 0},
+ {"i915_swizzle_info", i915_swizzle_info, 0},
+ {"i915_ppgtt_info", i915_ppgtt_info, 0},
+};
+
+struct i915_info_sysctl_thunk {
+ struct drm_device *dev;
+ int idx;
+ void *arg;
+};
+
+static int
+i915_info_sysctl_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct sbuf m;
+ struct i915_info_sysctl_thunk *thunk;
+ struct drm_device *dev;
+ drm_i915_private_t *dev_priv;
+ int error;
+
+ thunk = arg1;
+ dev = thunk->dev;
+ dev_priv = dev->dev_private;
+ if (dev_priv == NULL)
+ return (EBUSY);
+ error = sysctl_wire_old_buffer(req, 0);
+ if (error != 0)
+ return (error);
+ sbuf_new_for_sysctl(&m, NULL, 128, req);
+ error = i915_info_sysctl_list[thunk->idx].ptr(dev, &m,
+ thunk->arg);
+ if (error == 0)
+ error = sbuf_finish(&m);
+ sbuf_delete(&m);
+ return (error);
+}
+
+extern int i915_gem_sync_exec_requests;
+extern int i915_fix_mi_batchbuffer_end;
+extern int i915_intr_pf;
+extern long i915_gem_wired_pages_cnt;
+
+int
+i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
+ struct sysctl_oid *top)
+{
+ struct sysctl_oid *oid, *info;
+ struct i915_info_sysctl_thunk *thunks;
+ int i, error;
+
+ thunks = malloc(sizeof(*thunks) * DRM_ARRAY_SIZE(i915_info_sysctl_list),
+ DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
+ for (i = 0; i < DRM_ARRAY_SIZE(i915_info_sysctl_list); i++) {
+ thunks[i].dev = dev;
+ thunks[i].idx = i;
+ thunks[i].arg = i915_info_sysctl_list[i].data;
+ }
+ dev->sysctl_private = thunks;
+ info = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "info",
+ CTLFLAG_RW, NULL, NULL);
+ if (info == NULL)
+ return (ENOMEM);
+ for (i = 0; i < DRM_ARRAY_SIZE(i915_info_sysctl_list); i++) {
+ oid = SYSCTL_ADD_OID(ctx, SYSCTL_CHILDREN(info), OID_AUTO,
+ i915_info_sysctl_list[i].name, CTLTYPE_STRING | CTLFLAG_RD,
+ &thunks[i], 0, i915_info_sysctl_handler, "A", NULL);
+ if (oid == NULL)
+ return (ENOMEM);
+ }
+ oid = SYSCTL_ADD_LONG(ctx, SYSCTL_CHILDREN(info), OID_AUTO,
+ "i915_gem_wired_pages", CTLFLAG_RD, &i915_gem_wired_pages_cnt,
+ NULL);
+ oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "wedged",
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0,
+ i915_debug_set_wedged, "I", NULL);
+ if (oid == NULL)
+ return (ENOMEM);
+ oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "max_freq",
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, i915_max_freq,
+ "I", NULL);
+ if (oid == NULL)
+ return (ENOMEM);
+ oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
+ "cache_sharing", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev,
+ 0, i915_cache_sharing, "I", NULL);
+ if (oid == NULL)
+ return (ENOMEM);
+ oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "sync_exec",
+ CTLFLAG_RW, &i915_gem_sync_exec_requests, 0, NULL);
+ if (oid == NULL)
+ return (ENOMEM);
+ oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "fix_mi",
+ CTLFLAG_RW, &i915_fix_mi_batchbuffer_end, 0, NULL);
+ if (oid == NULL)
+ return (ENOMEM);
+ oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "intr_pf",
+ CTLFLAG_RW, &i915_intr_pf, 0, NULL);
+ if (oid == NULL)
+ return (ENOMEM);
+
+ error = drm_add_busid_modesetting(dev, ctx, top);
+ if (error != 0)
+ return (error);
+
+ return (0);
+}
+
+void
+i915_sysctl_cleanup(struct drm_device *dev)
+{
+
+ free(dev->sysctl_private, DRM_MEM_DRIVER);
+}
diff --git a/sys/dev/drm2/i915/i915_dma.c b/sys/dev/drm2/i915/i915_dma.c
new file mode 100644
index 0000000..f78553e
--- /dev/null
+++ b/sys/dev/drm2/i915/i915_dma.c
@@ -0,0 +1,2075 @@
+/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
+ */
+/*-
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/intel_drv.h>
+#include <dev/drm2/i915/intel_ringbuffer.h>
+
+static struct drm_i915_private *i915_mch_dev;
+/*
+ * Lock protecting IPS related data structures
+ * - i915_mch_dev
+ * - dev_priv->max_delay
+ * - dev_priv->min_delay
+ * - dev_priv->fmax
+ * - dev_priv->gpu_busy
+ */
+static struct mtx mchdev_lock;
+MTX_SYSINIT(mchdev, &mchdev_lock, "mchdev", MTX_DEF);
+
+static void i915_pineview_get_mem_freq(struct drm_device *dev);
+static void i915_ironlake_get_mem_freq(struct drm_device *dev);
+static int i915_driver_unload_int(struct drm_device *dev, bool locked);
+
+static void i915_write_hws_pga(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 addr;
+
+ addr = dev_priv->status_page_dmah->busaddr;
+ if (INTEL_INFO(dev)->gen >= 4)
+ addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+ I915_WRITE(HWS_PGA, addr);
+}
+
+/**
+ * Sets up the hardware status page for devices that need a physical address
+ * in the register.
+ */
+static int i915_init_phys_hws(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+ /*
+ * Program Hardware Status Page
+ * XXXKIB Keep 4GB limit for allocation for now. This method
+ * of allocation is used on <= 965 hardware, that has several
+ * erratas regarding the use of physical memory > 4 GB.
+ */
+ DRM_UNLOCK(dev);
+ dev_priv->status_page_dmah =
+ drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
+ DRM_LOCK(dev);
+ if (!dev_priv->status_page_dmah) {
+ DRM_ERROR("Can not allocate hardware status page\n");
+ return -ENOMEM;
+ }
+ ring->status_page.page_addr = dev_priv->hw_status_page =
+ dev_priv->status_page_dmah->vaddr;
+ dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+
+ memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+
+ i915_write_hws_pga(dev);
+ DRM_DEBUG("Enabled hardware status page, phys %jx\n",
+ (uintmax_t)dev_priv->dma_status_page);
+ return 0;
+}
+
+/**
+ * Frees the hardware status page, whether it's a physical address or a virtual
+ * address set up by the X Server.
+ */
+static void i915_free_hws(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+ if (dev_priv->status_page_dmah) {
+ drm_pci_free(dev, dev_priv->status_page_dmah);
+ dev_priv->status_page_dmah = NULL;
+ }
+
+ if (dev_priv->status_gfx_addr) {
+ dev_priv->status_gfx_addr = 0;
+ ring->status_page.gfx_addr = 0;
+ drm_core_ioremapfree(&dev_priv->hws_map, dev);
+ }
+
+ /* Need to rewrite hardware status page */
+ I915_WRITE(HWS_PGA, 0x1ffff000);
+}
+
+void i915_kernel_lost_context(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+ /*
+ * We should never lose context on the ring with modesetting
+ * as we don't expose it to userspace
+ */
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->size;
+
+#if 1
+ KIB_NOTYET();
+#else
+ if (!dev->primary->master)
+ return;
+#endif
+
+ if (ring->head == ring->tail && dev_priv->sarea_priv)
+ dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
+}
+
+static int i915_dma_cleanup(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+
+ /* Make sure interrupts are disabled here because the uninstall ioctl
+ * may not have been called from userspace and after dev_private
+ * is freed, it's too late.
+ */
+ if (dev->irq_enabled)
+ drm_irq_uninstall(dev);
+
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ intel_cleanup_ring_buffer(&dev_priv->rings[i]);
+
+ /* Clear the HWS virtual address at teardown */
+ if (I915_NEED_GFX_HWS(dev))
+ i915_free_hws(dev);
+
+ return 0;
+}
+
+static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ dev_priv->sarea = drm_getsarea(dev);
+ if (!dev_priv->sarea) {
+ DRM_ERROR("can not find sarea!\n");
+ i915_dma_cleanup(dev);
+ return -EINVAL;
+ }
+
+ dev_priv->sarea_priv = (drm_i915_sarea_t *)
+ ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset);
+
+ if (init->ring_size != 0) {
+ if (LP_RING(dev_priv)->obj != NULL) {
+ i915_dma_cleanup(dev);
+ DRM_ERROR("Client tried to initialize ringbuffer in "
+ "GEM mode\n");
+ return -EINVAL;
+ }
+
+ ret = intel_render_ring_init_dri(dev,
+ init->ring_start,
+ init->ring_size);
+ if (ret) {
+ i915_dma_cleanup(dev);
+ return ret;
+ }
+ }
+
+ dev_priv->cpp = init->cpp;
+ dev_priv->back_offset = init->back_offset;
+ dev_priv->front_offset = init->front_offset;
+ dev_priv->current_page = 0;
+ dev_priv->sarea_priv->pf_current_page = 0;
+
+ /* Allow hardware batchbuffers unless told otherwise.
+ */
+ dev_priv->allow_batchbuffer = 1;
+
+ return 0;
+}
+
+static int i915_dma_resume(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+ DRM_DEBUG("\n");
+
+ if (ring->map.handle == NULL) {
+ DRM_ERROR("can not ioremap virtual address for"
+ " ring buffer\n");
+ return -ENOMEM;
+ }
+
+ /* Program Hardware Status Page */
+ if (!ring->status_page.page_addr) {
+ DRM_ERROR("Can not find hardware status page\n");
+ return -EINVAL;
+ }
+ DRM_DEBUG("hw status page @ %p\n", ring->status_page.page_addr);
+ if (ring->status_page.gfx_addr != 0)
+ intel_ring_setup_status_page(ring);
+ else
+ i915_write_hws_pga(dev);
+
+ DRM_DEBUG("Enabled hardware status page\n");
+
+ return 0;
+}
+
+static int i915_dma_init(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_init_t *init = data;
+ int retcode = 0;
+
+ switch (init->func) {
+ case I915_INIT_DMA:
+ retcode = i915_initialize(dev, init);
+ break;
+ case I915_CLEANUP_DMA:
+ retcode = i915_dma_cleanup(dev);
+ break;
+ case I915_RESUME_DMA:
+ retcode = i915_dma_resume(dev);
+ break;
+ default:
+ retcode = -EINVAL;
+ break;
+ }
+
+ return retcode;
+}
+
+/* Implement basically the same security restrictions as hardware does
+ * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
+ *
+ * Most of the calculations below involve calculating the size of a
+ * particular instruction. It's important to get the size right as
+ * that tells us where the next instruction to check is. Any illegal
+ * instruction detected will be given a size of zero, which is a
+ * signal to abort the rest of the buffer.
+ */
+static int do_validate_cmd(int cmd)
+{
+ switch (((cmd >> 29) & 0x7)) {
+ case 0x0:
+ switch ((cmd >> 23) & 0x3f) {
+ case 0x0:
+ return 1; /* MI_NOOP */
+ case 0x4:
+ return 1; /* MI_FLUSH */
+ default:
+ return 0; /* disallow everything else */
+ }
+ break;
+ case 0x1:
+ return 0; /* reserved */
+ case 0x2:
+ return (cmd & 0xff) + 2; /* 2d commands */
+ case 0x3:
+ if (((cmd >> 24) & 0x1f) <= 0x18)
+ return 1;
+
+ switch ((cmd >> 24) & 0x1f) {
+ case 0x1c:
+ return 1;
+ case 0x1d:
+ switch ((cmd >> 16) & 0xff) {
+ case 0x3:
+ return (cmd & 0x1f) + 2;
+ case 0x4:
+ return (cmd & 0xf) + 2;
+ default:
+ return (cmd & 0xffff) + 2;
+ }
+ case 0x1e:
+ if (cmd & (1 << 23))
+ return (cmd & 0xffff) + 1;
+ else
+ return 1;
+ case 0x1f:
+ if ((cmd & (1 << 23)) == 0) /* inline vertices */
+ return (cmd & 0x1ffff) + 2;
+ else if (cmd & (1 << 17)) /* indirect random */
+ if ((cmd & 0xffff) == 0)
+ return 0; /* unknown length, too hard */
+ else
+ return (((cmd & 0xffff) + 1) / 2) + 1;
+ else
+ return 2; /* indirect sequential */
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int validate_cmd(int cmd)
+{
+ int ret = do_validate_cmd(cmd);
+
+/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
+
+ return ret;
+}
+
+static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
+ int dwords)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
+ return -EINVAL;
+
+ BEGIN_LP_RING((dwords+1)&~1);
+
+ for (i = 0; i < dwords;) {
+ int cmd, sz;
+
+ if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
+ return -EINVAL;
+
+ if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
+ return -EINVAL;
+
+ OUT_RING(cmd);
+
+ while (++i, --sz) {
+ if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
+ sizeof(cmd))) {
+ return -EINVAL;
+ }
+ OUT_RING(cmd);
+ }
+ }
+
+ if (dwords & 1)
+ OUT_RING(0);
+
+ ADVANCE_LP_RING();
+
+ return 0;
+}
+
+int i915_emit_box(struct drm_device * dev,
+ struct drm_clip_rect *boxes,
+ int i, int DR1, int DR4)
+{
+ struct drm_clip_rect box;
+
+ if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
+ return -EFAULT;
+ }
+
+ return (i915_emit_box_p(dev, &box, DR1, DR4));
+}
+
+int
+i915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box,
+ int DR1, int DR4)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ if (box->y2 <= box->y1 || box->x2 <= box->x1 || box->y2 <= 0 ||
+ box->x2 <= 0) {
+ DRM_ERROR("Bad box %d,%d..%d,%d\n",
+ box->x1, box->y1, box->x2, box->y2);
+ return -EINVAL;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ ret = BEGIN_LP_RING(4);
+ if (ret != 0)
+ return (ret);
+
+ OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
+ OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+ OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
+ OUT_RING(DR4);
+ } else {
+ ret = BEGIN_LP_RING(6);
+ if (ret != 0)
+ return (ret);
+
+ OUT_RING(GFX_OP_DRAWRECT_INFO);
+ OUT_RING(DR1);
+ OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+ OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
+ OUT_RING(DR4);
+ OUT_RING(0);
+ }
+ ADVANCE_LP_RING();
+
+ return 0;
+}
+
+/* XXX: Emitting the counter should really be moved to part of the IRQ
+ * emit. For now, do it in both places:
+ */
+
+static void i915_emit_breadcrumb(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (++dev_priv->counter > 0x7FFFFFFFUL)
+ dev_priv->counter = 0;
+ if (dev_priv->sarea_priv)
+ dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
+
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ }
+}
+
+static int i915_dispatch_cmdbuffer(struct drm_device * dev,
+ drm_i915_cmdbuffer_t * cmd, struct drm_clip_rect *cliprects, void *cmdbuf)
+{
+ int nbox = cmd->num_cliprects;
+ int i = 0, count, ret;
+
+ if (cmd->sz & 0x3) {
+ DRM_ERROR("alignment\n");
+ return -EINVAL;
+ }
+
+ i915_kernel_lost_context(dev);
+
+ count = nbox ? nbox : 1;
+
+ for (i = 0; i < count; i++) {
+ if (i < nbox) {
+ ret = i915_emit_box_p(dev, &cmd->cliprects[i],
+ cmd->DR1, cmd->DR4);
+ if (ret)
+ return ret;
+ }
+
+ ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
+ if (ret)
+ return ret;
+ }
+
+ i915_emit_breadcrumb(dev);
+ return 0;
+}
+
+static int
+i915_dispatch_batchbuffer(struct drm_device * dev,
+ drm_i915_batchbuffer_t * batch, struct drm_clip_rect *cliprects)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int nbox = batch->num_cliprects;
+ int i, count, ret;
+
+ if ((batch->start | batch->used) & 0x7) {
+ DRM_ERROR("alignment\n");
+ return -EINVAL;
+ }
+
+ i915_kernel_lost_context(dev);
+
+ count = nbox ? nbox : 1;
+
+ for (i = 0; i < count; i++) {
+ if (i < nbox) {
+ int ret = i915_emit_box_p(dev, &cliprects[i],
+ batch->DR1, batch->DR4);
+ if (ret)
+ return ret;
+ }
+
+ if (!IS_I830(dev) && !IS_845G(dev)) {
+ ret = BEGIN_LP_RING(2);
+ if (ret != 0)
+ return (ret);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) |
+ MI_BATCH_NON_SECURE_I965);
+ OUT_RING(batch->start);
+ } else {
+ OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
+ OUT_RING(batch->start | MI_BATCH_NON_SECURE);
+ }
+ } else {
+ ret = BEGIN_LP_RING(4);
+ if (ret != 0)
+ return (ret);
+
+ OUT_RING(MI_BATCH_BUFFER);
+ OUT_RING(batch->start | MI_BATCH_NON_SECURE);
+ OUT_RING(batch->start + batch->used - 4);
+ OUT_RING(0);
+ }
+ ADVANCE_LP_RING();
+ }
+
+ i915_emit_breadcrumb(dev);
+
+ return 0;
+}
+
+static int i915_dispatch_flip(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ if (!dev_priv->sarea_priv)
+ return -EINVAL;
+
+ DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
+ __func__,
+ dev_priv->current_page,
+ dev_priv->sarea_priv->pf_current_page);
+
+ i915_kernel_lost_context(dev);
+
+ ret = BEGIN_LP_RING(10);
+ if (ret)
+ return ret;
+ OUT_RING(MI_FLUSH | MI_READ_FLUSH);
+ OUT_RING(0);
+
+ OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
+ OUT_RING(0);
+ if (dev_priv->current_page == 0) {
+ OUT_RING(dev_priv->back_offset);
+ dev_priv->current_page = 1;
+ } else {
+ OUT_RING(dev_priv->front_offset);
+ dev_priv->current_page = 0;
+ }
+ OUT_RING(0);
+
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
+ OUT_RING(0);
+
+ ADVANCE_LP_RING();
+
+ if (++dev_priv->counter > 0x7FFFFFFFUL)
+ dev_priv->counter = 0;
+ if (dev_priv->sarea_priv)
+ dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
+
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ }
+
+ dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+ return 0;
+}
+
+static int
+i915_quiescent(struct drm_device *dev)
+{
+ struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
+
+ i915_kernel_lost_context(dev);
+ return (intel_wait_ring_idle(ring));
+}
+
+static int
+i915_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ int ret;
+
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ DRM_LOCK(dev);
+ ret = i915_quiescent(dev);
+ DRM_UNLOCK(dev);
+
+ return (ret);
+}
+
+static int i915_batchbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ drm_i915_sarea_t *sarea_priv;
+ drm_i915_batchbuffer_t *batch = data;
+ struct drm_clip_rect *cliprects;
+ size_t cliplen;
+ int ret;
+
+ if (!dev_priv->allow_batchbuffer) {
+ DRM_ERROR("Batchbuffer ioctl disabled\n");
+ return -EINVAL;
+ }
+ DRM_UNLOCK(dev);
+
+ DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
+ batch->start, batch->used, batch->num_cliprects);
+
+ cliplen = batch->num_cliprects * sizeof(struct drm_clip_rect);
+ if (batch->num_cliprects < 0)
+ return -EFAULT;
+ if (batch->num_cliprects != 0) {
+ cliprects = malloc(batch->num_cliprects *
+ sizeof(struct drm_clip_rect), DRM_MEM_DMA,
+ M_WAITOK | M_ZERO);
+
+ ret = -copyin(batch->cliprects, cliprects,
+ batch->num_cliprects * sizeof(struct drm_clip_rect));
+ if (ret != 0) {
+ DRM_LOCK(dev);
+ goto fail_free;
+ }
+ } else
+ cliprects = NULL;
+
+ DRM_LOCK(dev);
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+ ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
+
+ sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv;
+ if (sarea_priv)
+ sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
+fail_free:
+ free(cliprects, DRM_MEM_DMA);
+ return ret;
+}
+
+static int i915_cmdbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ drm_i915_sarea_t *sarea_priv;
+ drm_i915_cmdbuffer_t *cmdbuf = data;
+ struct drm_clip_rect *cliprects = NULL;
+ void *batch_data;
+ int ret;
+
+ DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
+ cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
+
+ if (cmdbuf->num_cliprects < 0)
+ return -EINVAL;
+
+ DRM_UNLOCK(dev);
+
+ batch_data = malloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK);
+
+ ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz);
+ if (ret != 0) {
+ DRM_LOCK(dev);
+ goto fail_batch_free;
+ }
+
+ if (cmdbuf->num_cliprects) {
+ cliprects = malloc(cmdbuf->num_cliprects *
+ sizeof(struct drm_clip_rect), DRM_MEM_DMA,
+ M_WAITOK | M_ZERO);
+ ret = -copyin(cmdbuf->cliprects, cliprects,
+ cmdbuf->num_cliprects * sizeof(struct drm_clip_rect));
+ if (ret != 0) {
+ DRM_LOCK(dev);
+ goto fail_clip_free;
+ }
+ }
+
+ DRM_LOCK(dev);
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+ ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
+ if (ret) {
+ DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
+ goto fail_clip_free;
+ }
+
+ sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv;
+ if (sarea_priv)
+ sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
+fail_clip_free:
+ free(cliprects, DRM_MEM_DMA);
+fail_batch_free:
+ free(batch_data, DRM_MEM_DMA);
+ return ret;
+}
+
+static int i915_flip_bufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ int ret;
+
+ DRM_DEBUG("%s\n", __func__);
+
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ ret = i915_dispatch_flip(dev);
+
+ return ret;
+}
+
+static int i915_getparam(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_getparam_t *param = data;
+ int value;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ switch (param->param) {
+ case I915_PARAM_IRQ_ACTIVE:
+ value = dev->irq_enabled ? 1 : 0;
+ break;
+ case I915_PARAM_ALLOW_BATCHBUFFER:
+ value = dev_priv->allow_batchbuffer ? 1 : 0;
+ break;
+ case I915_PARAM_LAST_DISPATCH:
+ value = READ_BREADCRUMB(dev_priv);
+ break;
+ case I915_PARAM_CHIPSET_ID:
+ value = dev->pci_device;
+ break;
+ case I915_PARAM_HAS_GEM:
+ value = 1;
+ break;
+ case I915_PARAM_NUM_FENCES_AVAIL:
+ value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
+ break;
+ case I915_PARAM_HAS_OVERLAY:
+ value = dev_priv->overlay ? 1 : 0;
+ break;
+ case I915_PARAM_HAS_PAGEFLIPPING:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXECBUF2:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_BSD:
+ value = HAS_BSD(dev);
+ break;
+ case I915_PARAM_HAS_BLT:
+ value = HAS_BLT(dev);
+ break;
+ case I915_PARAM_HAS_RELAXED_FENCING:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_COHERENT_RINGS:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXEC_CONSTANTS:
+ value = INTEL_INFO(dev)->gen >= 4;
+ break;
+ case I915_PARAM_HAS_RELAXED_DELTA:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_GEN7_SOL_RESET:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_LLC:
+ value = HAS_LLC(dev);
+ break;
+ default:
+ DRM_DEBUG_DRIVER("Unknown parameter %d\n",
+ param->param);
+ return -EINVAL;
+ }
+
+ if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+ DRM_ERROR("DRM_COPY_TO_USER failed\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int i915_setparam(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_setparam_t *param = data;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ switch (param->param) {
+ case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
+ break;
+ case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
+ dev_priv->tex_lru_log_granularity = param->value;
+ break;
+ case I915_SETPARAM_ALLOW_BATCHBUFFER:
+ dev_priv->allow_batchbuffer = param->value;
+ break;
+ case I915_SETPARAM_NUM_USED_FENCES:
+ if (param->value > dev_priv->num_fence_regs ||
+ param->value < 0)
+ return -EINVAL;
+ /* Userspace can use first N regs */
+ dev_priv->fence_reg_start = param->value;
+ break;
+ default:
+ DRM_DEBUG("unknown parameter %d\n", param->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int i915_set_status_page(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_hws_addr_t *hws = data;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+ if (!I915_NEED_GFX_HWS(dev))
+ return -EINVAL;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ DRM_ERROR("tried to set status page when mode setting active\n");
+ return 0;
+ }
+
+ ring->status_page.gfx_addr = dev_priv->status_gfx_addr =
+ hws->addr & (0x1ffff<<12);
+
+ dev_priv->hws_map.offset = dev->agp->base + hws->addr;
+ dev_priv->hws_map.size = 4*1024;
+ dev_priv->hws_map.type = 0;
+ dev_priv->hws_map.flags = 0;
+ dev_priv->hws_map.mtrr = 0;
+
+ drm_core_ioremap_wc(&dev_priv->hws_map, dev);
+ if (dev_priv->hws_map.virtual == NULL) {
+ i915_dma_cleanup(dev);
+ ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 0;
+ DRM_ERROR("can not ioremap virtual address for"
+ " G33 hw status page\n");
+ return -ENOMEM;
+ }
+ ring->status_page.page_addr = dev_priv->hw_status_page =
+ dev_priv->hws_map.virtual;
+
+ memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
+ dev_priv->status_gfx_addr);
+ DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
+ return 0;
+}
+
+static bool
+intel_enable_ppgtt(struct drm_device *dev)
+{
+ if (i915_enable_ppgtt >= 0)
+ return i915_enable_ppgtt;
+
+ /* Disable ppgtt on SNB if VT-d is on. */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_enabled)
+ return false;
+
+ return true;
+}
+
+static int
+i915_load_gem_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long prealloc_size, gtt_size, mappable_size;
+ int ret;
+
+ prealloc_size = dev_priv->mm.gtt.stolen_size;
+ gtt_size = dev_priv->mm.gtt.gtt_total_entries << PAGE_SHIFT;
+ mappable_size = dev_priv->mm.gtt.gtt_mappable_entries << PAGE_SHIFT;
+
+ /* Basic memrange allocator for stolen space */
+ drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+
+ DRM_LOCK(dev);
+ if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
+ /* PPGTT pdes are stolen from global gtt ptes, so shrink the
+ * aperture accordingly when using aliasing ppgtt. */
+ gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+ /* For paranoia keep the guard page in between. */
+ gtt_size -= PAGE_SIZE;
+
+ i915_gem_do_init(dev, 0, mappable_size, gtt_size);
+
+ ret = i915_gem_init_aliasing_ppgtt(dev);
+ if (ret) {
+ DRM_UNLOCK(dev);
+ return ret;
+ }
+ } else {
+ /* Let GEM Manage all of the aperture.
+ *
+ * However, leave one page at the end still bound to the scratch
+ * page. There are a number of places where the hardware
+ * apparently prefetches past the end of the object, and we've
+ * seen multiple hangs with the GPU head pointer stuck in a
+ * batchbuffer bound at the last page of the aperture. One page
+ * should be enough to keep any prefetching inside of the
+ * aperture.
+ */
+ i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
+ }
+
+ ret = i915_gem_init_hw(dev);
+ DRM_UNLOCK(dev);
+ if (ret != 0) {
+ i915_gem_cleanup_aliasing_ppgtt(dev);
+ return (ret);
+ }
+
+#if 0
+ /* Try to set up FBC with a reasonable compressed buffer size */
+ if (I915_HAS_FBC(dev) && i915_powersave) {
+ int cfb_size;
+
+ /* Leave 1M for line length buffer & misc. */
+
+ /* Try to get a 32M buffer... */
+ if (prealloc_size > (36*1024*1024))
+ cfb_size = 32*1024*1024;
+ else /* fall back to 7/8 of the stolen space */
+ cfb_size = prealloc_size * 7 / 8;
+ i915_setup_compression(dev, cfb_size);
+ }
+#endif
+
+ /* Allow hardware batchbuffers unless told otherwise. */
+ dev_priv->allow_batchbuffer = 1;
+ return 0;
+}
+
+static int
+i915_load_modeset_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = intel_parse_bios(dev);
+ if (ret)
+ DRM_INFO("failed to find VBIOS tables\n");
+
+#if 0
+ intel_register_dsm_handler();
+#endif
+
+ /* IIR "flip pending" bit means done if this bit is set */
+ if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
+ dev_priv->flip_pending_is_done = true;
+
+ intel_modeset_init(dev);
+
+ ret = i915_load_gem_init(dev);
+ if (ret != 0)
+ goto cleanup_gem;
+
+ intel_modeset_gem_init(dev);
+
+ ret = drm_irq_install(dev);
+ if (ret)
+ goto cleanup_gem;
+
+ dev->vblank_disable_allowed = 1;
+
+ ret = intel_fbdev_init(dev);
+ if (ret)
+ goto cleanup_gem;
+
+ drm_kms_helper_poll_init(dev);
+
+ /* We're off and running w/KMS */
+ dev_priv->mm.suspended = 0;
+
+ return (0);
+
+cleanup_gem:
+ DRM_LOCK(dev);
+ i915_gem_cleanup_ringbuffer(dev);
+ DRM_UNLOCK(dev);
+ i915_gem_cleanup_aliasing_ppgtt(dev);
+ return (ret);
+}
+
+static int
+i915_get_bridge_dev(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv;
+
+ dev_priv = dev->dev_private;
+
+ dev_priv->bridge_dev = intel_gtt_get_bridge_device();
+ if (dev_priv->bridge_dev == NULL) {
+ DRM_ERROR("bridge device not found\n");
+ return (-1);
+ }
+ return (0);
+}
+
+#define MCHBAR_I915 0x44
+#define MCHBAR_I965 0x48
+#define MCHBAR_SIZE (4*4096)
+
+#define DEVEN_REG 0x54
+#define DEVEN_MCHBAR_EN (1 << 28)
+
+/* Allocate space for the MCH regs if needed, return nonzero on error */
+static int
+intel_alloc_mchbar_resource(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv;
+ device_t vga;
+ int reg;
+ u32 temp_lo, temp_hi;
+ u64 mchbar_addr, temp;
+
+ dev_priv = dev->dev_private;
+ reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ temp_hi = pci_read_config(dev_priv->bridge_dev, reg + 4, 4);
+ else
+ temp_hi = 0;
+ temp_lo = pci_read_config(dev_priv->bridge_dev, reg, 4);
+ mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
+
+ /* If ACPI doesn't have it, assume we need to allocate it ourselves */
+#ifdef XXX_CONFIG_PNP
+ if (mchbar_addr &&
+ pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
+ return 0;
+#endif
+
+ /* Get some space for it */
+ vga = device_get_parent(dev->device);
+ dev_priv->mch_res_rid = 0x100;
+ dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
+ dev->device, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
+ MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE);
+ if (dev_priv->mch_res == NULL) {
+ DRM_ERROR("failed mchbar resource alloc\n");
+ return (-ENOMEM);
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ temp = rman_get_start(dev_priv->mch_res);
+ temp >>= 32;
+ pci_write_config(dev_priv->bridge_dev, reg + 4, temp, 4);
+ }
+ pci_write_config(dev_priv->bridge_dev, reg,
+ rman_get_start(dev_priv->mch_res) & UINT32_MAX, 4);
+ return (0);
+}
+
+static void
+intel_setup_mchbar(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv;
+ int mchbar_reg;
+ u32 temp;
+ bool enabled;
+
+ dev_priv = dev->dev_private;
+ mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+
+ dev_priv->mchbar_need_disable = false;
+
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ temp = pci_read_config(dev_priv->bridge_dev, DEVEN_REG, 4);
+ enabled = (temp & DEVEN_MCHBAR_EN) != 0;
+ } else {
+ temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
+ enabled = temp & 1;
+ }
+
+ /* If it's already enabled, don't have to do anything */
+ if (enabled) {
+ DRM_DEBUG("mchbar already enabled\n");
+ return;
+ }
+
+ if (intel_alloc_mchbar_resource(dev))
+ return;
+
+ dev_priv->mchbar_need_disable = true;
+
+ /* Space is allocated or reserved, so enable it. */
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
+ temp | DEVEN_MCHBAR_EN, 4);
+ } else {
+ temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
+ pci_write_config(dev_priv->bridge_dev, mchbar_reg, temp | 1, 4);
+ }
+}
+
+static void
+intel_teardown_mchbar(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv;
+ device_t vga;
+ int mchbar_reg;
+ u32 temp;
+
+ dev_priv = dev->dev_private;
+ mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+
+ if (dev_priv->mchbar_need_disable) {
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ temp = pci_read_config(dev_priv->bridge_dev,
+ DEVEN_REG, 4);
+ temp &= ~DEVEN_MCHBAR_EN;
+ pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
+ temp, 4);
+ } else {
+ temp = pci_read_config(dev_priv->bridge_dev,
+ mchbar_reg, 4);
+ temp &= ~1;
+ pci_write_config(dev_priv->bridge_dev, mchbar_reg,
+ temp, 4);
+ }
+ }
+
+ if (dev_priv->mch_res != NULL) {
+ vga = device_get_parent(dev->device);
+ BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->device,
+ SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
+ BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->device,
+ SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
+ dev_priv->mch_res = NULL;
+ }
+}
+
+int
+i915_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long base, size;
+ int mmio_bar, ret;
+
+ ret = 0;
+
+ /* i915 has 4 more counters */
+ dev->counters += 4;
+ dev->types[6] = _DRM_STAT_IRQ;
+ dev->types[7] = _DRM_STAT_PRIMARY;
+ dev->types[8] = _DRM_STAT_SECONDARY;
+ dev->types[9] = _DRM_STAT_DMA;
+
+ dev_priv = malloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER,
+ M_ZERO | M_WAITOK);
+ if (dev_priv == NULL)
+ return -ENOMEM;
+
+ dev->dev_private = (void *)dev_priv;
+ dev_priv->dev = dev;
+ dev_priv->info = i915_get_device_id(dev->pci_device);
+
+ if (i915_get_bridge_dev(dev)) {
+ free(dev_priv, DRM_MEM_DRIVER);
+ return (-EIO);
+ }
+ dev_priv->mm.gtt = intel_gtt_get();
+
+ /* Add register map (needed for suspend/resume) */
+ mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ base = drm_get_resource_start(dev, mmio_bar);
+ size = drm_get_resource_len(dev, mmio_bar);
+
+ ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
+ _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
+
+ dev_priv->tq = taskqueue_create("915", M_WAITOK,
+ taskqueue_thread_enqueue, &dev_priv->tq);
+ taskqueue_start_threads(&dev_priv->tq, 1, PWAIT, "i915 taskq");
+ mtx_init(&dev_priv->gt_lock, "915gt", NULL, MTX_DEF);
+ mtx_init(&dev_priv->error_lock, "915err", NULL, MTX_DEF);
+ mtx_init(&dev_priv->error_completion_lock, "915cmp", NULL, MTX_DEF);
+ mtx_init(&dev_priv->rps_lock, "915rps", NULL, MTX_DEF);
+
+ dev_priv->has_gem = 1;
+ intel_irq_init(dev);
+
+ intel_setup_mchbar(dev);
+ intel_setup_gmbus(dev);
+ intel_opregion_setup(dev);
+
+ intel_setup_bios(dev);
+
+ i915_gem_load(dev);
+
+ /* Init HWS */
+ if (!I915_NEED_GFX_HWS(dev)) {
+ ret = i915_init_phys_hws(dev);
+ if (ret != 0) {
+ drm_rmmap(dev, dev_priv->mmio_map);
+ drm_free(dev_priv, sizeof(struct drm_i915_private),
+ DRM_MEM_DRIVER);
+ return ret;
+ }
+ }
+
+ if (IS_PINEVIEW(dev))
+ i915_pineview_get_mem_freq(dev);
+ else if (IS_GEN5(dev))
+ i915_ironlake_get_mem_freq(dev);
+
+ mtx_init(&dev_priv->irq_lock, "userirq", NULL, MTX_DEF);
+
+ if (IS_IVYBRIDGE(dev))
+ dev_priv->num_pipe = 3;
+ else if (IS_MOBILE(dev) || !IS_GEN2(dev))
+ dev_priv->num_pipe = 2;
+ else
+ dev_priv->num_pipe = 1;
+
+ ret = drm_vblank_init(dev, dev_priv->num_pipe);
+ if (ret)
+ goto out_gem_unload;
+
+ /* Start out suspended */
+ dev_priv->mm.suspended = 1;
+
+ intel_detect_pch(dev);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ DRM_UNLOCK(dev);
+ ret = i915_load_modeset_init(dev);
+ DRM_LOCK(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to init modeset\n");
+ goto out_gem_unload;
+ }
+ }
+
+ intel_opregion_init(dev);
+
+ callout_init(&dev_priv->hangcheck_timer, 1);
+ callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD,
+ i915_hangcheck_elapsed, dev);
+
+ if (IS_GEN5(dev)) {
+ mtx_lock(&mchdev_lock);
+ i915_mch_dev = dev_priv;
+ dev_priv->mchdev_lock = &mchdev_lock;
+ mtx_unlock(&mchdev_lock);
+ }
+
+ return (0);
+
+out_gem_unload:
+ /* XXXKIB */
+ (void) i915_driver_unload_int(dev, true);
+ return (ret);
+}
+
+static int
+i915_driver_unload_int(struct drm_device *dev, bool locked)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (!locked)
+ DRM_LOCK(dev);
+ ret = i915_gpu_idle(dev, true);
+ if (ret)
+ DRM_ERROR("failed to idle hardware: %d\n", ret);
+ if (!locked)
+ DRM_UNLOCK(dev);
+
+ i915_free_hws(dev);
+
+ intel_teardown_mchbar(dev);
+
+ if (locked)
+ DRM_UNLOCK(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ intel_fbdev_fini(dev);
+ intel_modeset_cleanup(dev);
+ }
+
+ /* Free error state after interrupts are fully disabled. */
+ callout_stop(&dev_priv->hangcheck_timer);
+ callout_drain(&dev_priv->hangcheck_timer);
+
+ i915_destroy_error_state(dev);
+
+ intel_opregion_fini(dev);
+
+ if (locked)
+ DRM_LOCK(dev);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ if (!locked)
+ DRM_LOCK(dev);
+ i915_gem_free_all_phys_object(dev);
+ i915_gem_cleanup_ringbuffer(dev);
+ if (!locked)
+ DRM_UNLOCK(dev);
+ i915_gem_cleanup_aliasing_ppgtt(dev);
+#if 1
+ KIB_NOTYET();
+#else
+ if (I915_HAS_FBC(dev) && i915_powersave)
+ i915_cleanup_compression(dev);
+#endif
+ drm_mm_takedown(&dev_priv->mm.stolen);
+
+ intel_cleanup_overlay(dev);
+
+ if (!I915_NEED_GFX_HWS(dev))
+ i915_free_hws(dev);
+ }
+
+ i915_gem_unload(dev);
+
+ mtx_destroy(&dev_priv->irq_lock);
+
+ if (dev_priv->tq != NULL)
+ taskqueue_free(dev_priv->tq);
+
+ bus_generic_detach(dev->device);
+ drm_rmmap(dev, dev_priv->mmio_map);
+ intel_teardown_gmbus(dev);
+
+ mtx_destroy(&dev_priv->error_lock);
+ mtx_destroy(&dev_priv->error_completion_lock);
+ mtx_destroy(&dev_priv->rps_lock);
+ drm_free(dev->dev_private, sizeof(drm_i915_private_t),
+ DRM_MEM_DRIVER);
+
+ return (0);
+}
+
+int
+i915_driver_unload(struct drm_device *dev)
+{
+
+ return (i915_driver_unload_int(dev, true));
+}
+
+int
+i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_i915_file_private *i915_file_priv;
+
+ i915_file_priv = malloc(sizeof(*i915_file_priv), DRM_MEM_FILES,
+ M_WAITOK | M_ZERO);
+
+ mtx_init(&i915_file_priv->mm.lck, "915fp", NULL, MTX_DEF);
+ INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
+ file_priv->driver_priv = i915_file_priv;
+
+ return (0);
+}
+
+void
+i915_driver_lastclose(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
+#if 1
+ KIB_NOTYET();
+#else
+ drm_fb_helper_restore();
+ vga_switcheroo_process_delayed_switch();
+#endif
+ return;
+ }
+ i915_gem_lastclose(dev);
+ i915_dma_cleanup(dev);
+}
+
+void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+{
+
+ i915_gem_release(dev, file_priv);
+}
+
+void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+
+ mtx_destroy(&i915_file_priv->mm.lck);
+ drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
+}
+
+struct drm_ioctl_desc i915_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_ALLOC, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_FREE, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
+ DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
+ DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
+ DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH | DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH | DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+};
+
+struct drm_driver_info i915_driver_info = {
+ .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
+ DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | DRIVER_LOCKLESS_IRQ |
+ DRIVER_GEM /*| DRIVER_MODESET*/,
+
+ .buf_priv_size = sizeof(drm_i915_private_t),
+ .load = i915_driver_load,
+ .open = i915_driver_open,
+ .unload = i915_driver_unload,
+ .preclose = i915_driver_preclose,
+ .lastclose = i915_driver_lastclose,
+ .postclose = i915_driver_postclose,
+ .device_is_agp = i915_driver_device_is_agp,
+ .gem_init_object = i915_gem_init_object,
+ .gem_free_object = i915_gem_free_object,
+ .gem_pager_ops = &i915_gem_pager_ops,
+ .dumb_create = i915_gem_dumb_create,
+ .dumb_map_offset = i915_gem_mmap_gtt,
+ .dumb_destroy = i915_gem_dumb_destroy,
+ .sysctl_init = i915_sysctl_init,
+ .sysctl_cleanup = i915_sysctl_cleanup,
+
+ .ioctls = i915_ioctls,
+ .max_ioctl = DRM_ARRAY_SIZE(i915_ioctls),
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+/**
+ * Determine if the device really is AGP or not.
+ *
+ * All Intel graphics chipsets are treated as AGP, even if they are really
+ * built-in.
+ *
+ * \param dev The device to be tested.
+ *
+ * \returns
+ * A value of 1 is always retured to indictate every i9x5 is AGP.
+ */
+int i915_driver_device_is_agp(struct drm_device * dev)
+{
+ return 1;
+}
+
+static void i915_pineview_get_mem_freq(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ tmp = I915_READ(CLKCFG);
+
+ switch (tmp & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_533:
+ dev_priv->fsb_freq = 533; /* 133*4 */
+ break;
+ case CLKCFG_FSB_800:
+ dev_priv->fsb_freq = 800; /* 200*4 */
+ break;
+ case CLKCFG_FSB_667:
+ dev_priv->fsb_freq = 667; /* 167*4 */
+ break;
+ case CLKCFG_FSB_400:
+ dev_priv->fsb_freq = 400; /* 100*4 */
+ break;
+ }
+
+ switch (tmp & CLKCFG_MEM_MASK) {
+ case CLKCFG_MEM_533:
+ dev_priv->mem_freq = 533;
+ break;
+ case CLKCFG_MEM_667:
+ dev_priv->mem_freq = 667;
+ break;
+ case CLKCFG_MEM_800:
+ dev_priv->mem_freq = 800;
+ break;
+ }
+
+ /* detect pineview DDR3 setting */
+ tmp = I915_READ(CSHRDDR3CTL);
+ dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
+}
+
+static void i915_ironlake_get_mem_freq(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u16 ddrpll, csipll;
+
+ ddrpll = I915_READ16(DDRMPLL1);
+ csipll = I915_READ16(CSIPLL0);
+
+ switch (ddrpll & 0xff) {
+ case 0xc:
+ dev_priv->mem_freq = 800;
+ break;
+ case 0x10:
+ dev_priv->mem_freq = 1066;
+ break;
+ case 0x14:
+ dev_priv->mem_freq = 1333;
+ break;
+ case 0x18:
+ dev_priv->mem_freq = 1600;
+ break;
+ default:
+ DRM_DEBUG("unknown memory frequency 0x%02x\n",
+ ddrpll & 0xff);
+ dev_priv->mem_freq = 0;
+ break;
+ }
+
+ dev_priv->r_t = dev_priv->mem_freq;
+
+ switch (csipll & 0x3ff) {
+ case 0x00c:
+ dev_priv->fsb_freq = 3200;
+ break;
+ case 0x00e:
+ dev_priv->fsb_freq = 3733;
+ break;
+ case 0x010:
+ dev_priv->fsb_freq = 4266;
+ break;
+ case 0x012:
+ dev_priv->fsb_freq = 4800;
+ break;
+ case 0x014:
+ dev_priv->fsb_freq = 5333;
+ break;
+ case 0x016:
+ dev_priv->fsb_freq = 5866;
+ break;
+ case 0x018:
+ dev_priv->fsb_freq = 6400;
+ break;
+ default:
+ DRM_DEBUG("unknown fsb frequency 0x%04x\n",
+ csipll & 0x3ff);
+ dev_priv->fsb_freq = 0;
+ break;
+ }
+
+ if (dev_priv->fsb_freq == 3200) {
+ dev_priv->c_m = 0;
+ } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
+ dev_priv->c_m = 1;
+ } else {
+ dev_priv->c_m = 2;
+ }
+}
+
+static const struct cparams {
+ u16 i;
+ u16 t;
+ u16 m;
+ u16 c;
+} cparams[] = {
+ { 1, 1333, 301, 28664 },
+ { 1, 1066, 294, 24460 },
+ { 1, 800, 294, 25192 },
+ { 0, 1333, 276, 27605 },
+ { 0, 1066, 276, 27605 },
+ { 0, 800, 231, 23784 },
+};
+
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+ u64 total_count, diff, ret;
+ u32 count1, count2, count3, m = 0, c = 0;
+ unsigned long now = jiffies_to_msecs(jiffies), diff1;
+ int i;
+
+ diff1 = now - dev_priv->last_time1;
+ /*
+ * sysctl(8) reads the value of sysctl twice in rapid
+ * succession. There is high chance that it happens in the
+ * same timer tick. Use the cached value to not divide by
+ * zero and give the hw a chance to gather more samples.
+ */
+ if (diff1 <= 10)
+ return (dev_priv->chipset_power);
+
+ count1 = I915_READ(DMIEC);
+ count2 = I915_READ(DDREC);
+ count3 = I915_READ(CSIEC);
+
+ total_count = count1 + count2 + count3;
+
+ /* FIXME: handle per-counter overflow */
+ if (total_count < dev_priv->last_count1) {
+ diff = ~0UL - dev_priv->last_count1;
+ diff += total_count;
+ } else {
+ diff = total_count - dev_priv->last_count1;
+ }
+
+ for (i = 0; i < DRM_ARRAY_SIZE(cparams); i++) {
+ if (cparams[i].i == dev_priv->c_m &&
+ cparams[i].t == dev_priv->r_t) {
+ m = cparams[i].m;
+ c = cparams[i].c;
+ break;
+ }
+ }
+
+ diff = diff / diff1;
+ ret = ((m * diff) + c);
+ ret = ret / 10;
+
+ dev_priv->last_count1 = total_count;
+ dev_priv->last_time1 = now;
+
+ dev_priv->chipset_power = ret;
+ return (ret);
+}
+
+unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long m, x, b;
+ u32 tsfs;
+
+ tsfs = I915_READ(TSFS);
+
+ m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
+ x = I915_READ8(I915_TR1);
+
+ b = tsfs & TSFS_INTR_MASK;
+
+ return ((m * x) / 127) - b;
+}
+
+static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+{
+ static const struct v_table {
+ u16 vd; /* in .1 mil */
+ u16 vm; /* in .1 mil */
+ } v_table[] = {
+ { 0, 0, },
+ { 375, 0, },
+ { 500, 0, },
+ { 625, 0, },
+ { 750, 0, },
+ { 875, 0, },
+ { 1000, 0, },
+ { 1125, 0, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4250, 3125, },
+ { 4375, 3250, },
+ { 4500, 3375, },
+ { 4625, 3500, },
+ { 4750, 3625, },
+ { 4875, 3750, },
+ { 5000, 3875, },
+ { 5125, 4000, },
+ { 5250, 4125, },
+ { 5375, 4250, },
+ { 5500, 4375, },
+ { 5625, 4500, },
+ { 5750, 4625, },
+ { 5875, 4750, },
+ { 6000, 4875, },
+ { 6125, 5000, },
+ { 6250, 5125, },
+ { 6375, 5250, },
+ { 6500, 5375, },
+ { 6625, 5500, },
+ { 6750, 5625, },
+ { 6875, 5750, },
+ { 7000, 5875, },
+ { 7125, 6000, },
+ { 7250, 6125, },
+ { 7375, 6250, },
+ { 7500, 6375, },
+ { 7625, 6500, },
+ { 7750, 6625, },
+ { 7875, 6750, },
+ { 8000, 6875, },
+ { 8125, 7000, },
+ { 8250, 7125, },
+ { 8375, 7250, },
+ { 8500, 7375, },
+ { 8625, 7500, },
+ { 8750, 7625, },
+ { 8875, 7750, },
+ { 9000, 7875, },
+ { 9125, 8000, },
+ { 9250, 8125, },
+ { 9375, 8250, },
+ { 9500, 8375, },
+ { 9625, 8500, },
+ { 9750, 8625, },
+ { 9875, 8750, },
+ { 10000, 8875, },
+ { 10125, 9000, },
+ { 10250, 9125, },
+ { 10375, 9250, },
+ { 10500, 9375, },
+ { 10625, 9500, },
+ { 10750, 9625, },
+ { 10875, 9750, },
+ { 11000, 9875, },
+ { 11125, 10000, },
+ { 11250, 10125, },
+ { 11375, 10250, },
+ { 11500, 10375, },
+ { 11625, 10500, },
+ { 11750, 10625, },
+ { 11875, 10750, },
+ { 12000, 10875, },
+ { 12125, 11000, },
+ { 12250, 11125, },
+ { 12375, 11250, },
+ { 12500, 11375, },
+ { 12625, 11500, },
+ { 12750, 11625, },
+ { 12875, 11750, },
+ { 13000, 11875, },
+ { 13125, 12000, },
+ { 13250, 12125, },
+ { 13375, 12250, },
+ { 13500, 12375, },
+ { 13625, 12500, },
+ { 13750, 12625, },
+ { 13875, 12750, },
+ { 14000, 12875, },
+ { 14125, 13000, },
+ { 14250, 13125, },
+ { 14375, 13250, },
+ { 14500, 13375, },
+ { 14625, 13500, },
+ { 14750, 13625, },
+ { 14875, 13750, },
+ { 15000, 13875, },
+ { 15125, 14000, },
+ { 15250, 14125, },
+ { 15375, 14250, },
+ { 15500, 14375, },
+ { 15625, 14500, },
+ { 15750, 14625, },
+ { 15875, 14750, },
+ { 16000, 14875, },
+ { 16125, 15000, },
+ };
+ if (dev_priv->info->is_mobile)
+ return v_table[pxvid].vm;
+ else
+ return v_table[pxvid].vd;
+}
+
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+ struct timespec now, diff1;
+ u64 diff;
+ unsigned long diffms;
+ u32 count;
+
+ if (dev_priv->info->gen != 5)
+ return;
+
+ nanotime(&now);
+ diff1 = now;
+ timespecsub(&diff1, &dev_priv->last_time2);
+
+ /* Don't divide by 0 */
+ diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
+ if (!diffms)
+ return;
+
+ count = I915_READ(GFXEC);
+
+ if (count < dev_priv->last_count2) {
+ diff = ~0UL - dev_priv->last_count2;
+ diff += count;
+ } else {
+ diff = count - dev_priv->last_count2;
+ }
+
+ dev_priv->last_count2 = count;
+ dev_priv->last_time2 = now;
+
+ /* More magic constants... */
+ diff = diff * 1181;
+ diff = diff / (diffms * 10);
+ dev_priv->gfx_power = diff;
+}
+
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long t, corr, state1, corr2, state2;
+ u32 pxvid, ext_v;
+
+ pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+ pxvid = (pxvid >> 24) & 0x7f;
+ ext_v = pvid_to_extvid(dev_priv, pxvid);
+
+ state1 = ext_v;
+
+ t = i915_mch_val(dev_priv);
+
+ /* Revel in the empirically derived constants */
+
+ /* Correction factor in 1/100000 units */
+ if (t > 80)
+ corr = ((t * 2349) + 135940);
+ else if (t >= 50)
+ corr = ((t * 964) + 29317);
+ else /* < 50 */
+ corr = ((t * 301) + 1004);
+
+ corr = corr * ((150142 * state1) / 10000 - 78642);
+ corr /= 100000;
+ corr2 = (corr * dev_priv->corr);
+
+ state2 = (corr2 * state1) / 10000;
+ state2 /= 100; /* convert to mW */
+
+ i915_update_gfx_val(dev_priv);
+
+ return dev_priv->gfx_power + state2;
+}
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+ struct drm_i915_private *dev_priv;
+ unsigned long chipset_val, graphics_val, ret = 0;
+
+ mtx_lock(&mchdev_lock);
+ if (!i915_mch_dev)
+ goto out_unlock;
+ dev_priv = i915_mch_dev;
+
+ chipset_val = i915_chipset_val(dev_priv);
+ graphics_val = i915_gfx_val(dev_priv);
+
+ ret = chipset_val + graphics_val;
+
+out_unlock:
+ mtx_unlock(&mchdev_lock);
+
+ return ret;
+}
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ mtx_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ if (dev_priv->max_delay > dev_priv->fmax)
+ dev_priv->max_delay--;
+
+out_unlock:
+ mtx_unlock(&mchdev_lock);
+
+ return ret;
+}
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ mtx_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ if (dev_priv->max_delay < dev_priv->min_delay)
+ dev_priv->max_delay++;
+
+out_unlock:
+ mtx_unlock(&mchdev_lock);
+
+ return ret;
+}
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = false;
+
+ mtx_lock(&mchdev_lock);
+ if (!i915_mch_dev)
+ goto out_unlock;
+ dev_priv = i915_mch_dev;
+
+ ret = dev_priv->busy;
+
+out_unlock:
+ mtx_unlock(&mchdev_lock);
+
+ return ret;
+}
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ mtx_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ dev_priv->max_delay = dev_priv->fstart;
+
+ if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
+ ret = false;
+
+out_unlock:
+ mtx_unlock(&mchdev_lock);
+
+ return ret;
+}
diff --git a/sys/dev/drm2/i915/i915_drm.h b/sys/dev/drm2/i915/i915_drm.h
new file mode 100644
index 0000000..d4b0ae8
--- /dev/null
+++ b/sys/dev/drm2/i915/i915_drm.h
@@ -0,0 +1,971 @@
+/*-
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef _I915_DRM_H_
+#define _I915_DRM_H_
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ */
+
+#include <dev/drm2/drm.h>
+
+/* Each region is a minimum of 16k, and there are at most 255 of them.
+ */
+#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
+ * of chars for next/prev indices */
+#define I915_LOG_MIN_TEX_REGION_SIZE 14
+
+typedef struct _drm_i915_init {
+ enum {
+ I915_INIT_DMA = 0x01,
+ I915_CLEANUP_DMA = 0x02,
+ I915_RESUME_DMA = 0x03,
+
+ /* Since this struct isn't versioned, just used a new
+ * 'func' code to indicate the presence of dri2 sarea
+ * info. */
+ I915_INIT_DMA2 = 0x04
+ } func;
+ unsigned int mmio_offset;
+ int sarea_priv_offset;
+ unsigned int ring_start;
+ unsigned int ring_end;
+ unsigned int ring_size;
+ unsigned int front_offset;
+ unsigned int back_offset;
+ unsigned int depth_offset;
+ unsigned int w;
+ unsigned int h;
+ unsigned int pitch;
+ unsigned int pitch_bits;
+ unsigned int back_pitch;
+ unsigned int depth_pitch;
+ unsigned int cpp;
+ unsigned int chipset;
+ unsigned int sarea_handle;
+} drm_i915_init_t;
+
+typedef struct drm_i915_sarea {
+ struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
+ int last_upload; /* last time texture was uploaded */
+ int last_enqueue; /* last time a buffer was enqueued */
+ int last_dispatch; /* age of the most recently dispatched buffer */
+ int ctxOwner; /* last context to upload state */
+ int texAge;
+ int pf_enabled; /* is pageflipping allowed? */
+ int pf_active;
+ int pf_current_page; /* which buffer is being displayed? */
+ int perf_boxes; /* performance boxes to be displayed */
+ int width, height; /* screen size in pixels */
+
+ drm_handle_t front_handle;
+ int front_offset;
+ int front_size;
+
+ drm_handle_t back_handle;
+ int back_offset;
+ int back_size;
+
+ drm_handle_t depth_handle;
+ int depth_offset;
+ int depth_size;
+
+ drm_handle_t tex_handle;
+ int tex_offset;
+ int tex_size;
+ int log_tex_granularity;
+ int pitch;
+ int rotation; /* 0, 90, 180 or 270 */
+ int rotated_offset;
+ int rotated_size;
+ int rotated_pitch;
+ int virtualX, virtualY;
+
+ unsigned int front_tiled;
+ unsigned int back_tiled;
+ unsigned int depth_tiled;
+ unsigned int rotated_tiled;
+ unsigned int rotated2_tiled;
+
+ int planeA_x;
+ int planeA_y;
+ int planeA_w;
+ int planeA_h;
+ int planeB_x;
+ int planeB_y;
+ int planeB_w;
+ int planeB_h;
+
+ /* Triple buffering */
+ drm_handle_t third_handle;
+ int third_offset;
+ int third_size;
+ unsigned int third_tiled;
+
+ /* buffer object handles for the static buffers. May change
+ * over the lifetime of the client, though it doesn't in our current
+ * implementation.
+ */
+ unsigned int front_bo_handle;
+ unsigned int back_bo_handle;
+ unsigned int third_bo_handle;
+ unsigned int depth_bo_handle;
+} drm_i915_sarea_t;
+
+/* Driver specific fence types and classes.
+ */
+
+/* The only fence class we support */
+#define DRM_I915_FENCE_CLASS_ACCEL 0
+/* Fence type that guarantees read-write flush */
+#define DRM_I915_FENCE_TYPE_RW 2
+/* MI_FLUSH programmed just before the fence */
+#define DRM_I915_FENCE_FLAG_FLUSHED 0x01000000
+
+/* Flags for perf_boxes
+ */
+#define I915_BOX_RING_EMPTY 0x1
+#define I915_BOX_FLIP 0x2
+#define I915_BOX_WAIT 0x4
+#define I915_BOX_TEXTURE_LOAD 0x8
+#define I915_BOX_LOST_CONTEXT 0x10
+
+/* I915 specific ioctls
+ * The device specific ioctl range is 0x40 to 0x79.
+ */
+#define DRM_I915_INIT 0x00
+#define DRM_I915_FLUSH 0x01
+#define DRM_I915_FLIP 0x02
+#define DRM_I915_BATCHBUFFER 0x03
+#define DRM_I915_IRQ_EMIT 0x04
+#define DRM_I915_IRQ_WAIT 0x05
+#define DRM_I915_GETPARAM 0x06
+#define DRM_I915_SETPARAM 0x07
+#define DRM_I915_ALLOC 0x08
+#define DRM_I915_FREE 0x09
+#define DRM_I915_INIT_HEAP 0x0a
+#define DRM_I915_CMDBUFFER 0x0b
+#define DRM_I915_DESTROY_HEAP 0x0c
+#define DRM_I915_SET_VBLANK_PIPE 0x0d
+#define DRM_I915_GET_VBLANK_PIPE 0x0e
+#define DRM_I915_VBLANK_SWAP 0x0f
+#define DRM_I915_MMIO 0x10
+#define DRM_I915_HWS_ADDR 0x11
+#define DRM_I915_EXECBUFFER 0x12
+#define DRM_I915_GEM_INIT 0x13
+#define DRM_I915_GEM_EXECBUFFER 0x14
+#define DRM_I915_GEM_PIN 0x15
+#define DRM_I915_GEM_UNPIN 0x16
+#define DRM_I915_GEM_BUSY 0x17
+#define DRM_I915_GEM_THROTTLE 0x18
+#define DRM_I915_GEM_ENTERVT 0x19
+#define DRM_I915_GEM_LEAVEVT 0x1a
+#define DRM_I915_GEM_CREATE 0x1b
+#define DRM_I915_GEM_PREAD 0x1c
+#define DRM_I915_GEM_PWRITE 0x1d
+#define DRM_I915_GEM_MMAP 0x1e
+#define DRM_I915_GEM_SET_DOMAIN 0x1f
+#define DRM_I915_GEM_SW_FINISH 0x20
+#define DRM_I915_GEM_SET_TILING 0x21
+#define DRM_I915_GEM_GET_TILING 0x22
+#define DRM_I915_GEM_GET_APERTURE 0x23
+#define DRM_I915_GEM_MMAP_GTT 0x24
+#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
+#define DRM_I915_GEM_MADVISE 0x26
+#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
+#define DRM_I915_OVERLAY_ATTRS 0x28
+#define DRM_I915_GEM_EXECBUFFER2 0x29
+#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
+#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
+
+#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
+#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
+#define DRM_IOCTL_I915_FLIP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FLIP, drm_i915_flip_t)
+#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
+#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
+#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
+#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
+#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
+#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
+#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
+#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
+#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
+#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
+#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
+#define DRM_IOCTL_I915_MMIO DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_MMIO, drm_i915_mmio)
+#define DRM_IOCTL_I915_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)
+#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
+#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
+#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
+#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
+#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
+#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
+#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
+#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
+#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
+#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
+#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
+#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
+#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
+#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
+#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
+#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
+#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
+#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_IOCTL_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
+#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
+#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+
+/* Asynchronous page flipping:
+ */
+typedef struct drm_i915_flip {
+ /*
+ * This is really talking about planes, and we could rename it
+ * except for the fact that some of the duplicated i915_drm.h files
+ * out there check for HAVE_I915_FLIP and so might pick up this
+ * version.
+ */
+ int pipes;
+} drm_i915_flip_t;
+
+/* Allow drivers to submit batchbuffers directly to hardware, relying
+ * on the security mechanisms provided by hardware.
+ */
+typedef struct drm_i915_batchbuffer {
+ int start; /* agp offset */
+ int used; /* nr bytes in use */
+ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
+ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
+ int num_cliprects; /* mulitpass with multiple cliprects? */
+ struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
+} drm_i915_batchbuffer_t;
+
+/* As above, but pass a pointer to userspace buffer which can be
+ * validated by the kernel prior to sending to hardware.
+ */
+typedef struct _drm_i915_cmdbuffer {
+ char __user *buf; /* pointer to userspace command buffer */
+ int sz; /* nr bytes in buf */
+ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
+ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
+ int num_cliprects; /* mulitpass with multiple cliprects? */
+ struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
+} drm_i915_cmdbuffer_t;
+
+/* Userspace can request & wait on irq's:
+ */
+typedef struct drm_i915_irq_emit {
+ int __user *irq_seq;
+} drm_i915_irq_emit_t;
+
+typedef struct drm_i915_irq_wait {
+ int irq_seq;
+} drm_i915_irq_wait_t;
+
+/* Ioctl to query kernel params:
+ */
+#define I915_PARAM_IRQ_ACTIVE 1
+#define I915_PARAM_ALLOW_BATCHBUFFER 2
+#define I915_PARAM_LAST_DISPATCH 3
+#define I915_PARAM_CHIPSET_ID 4
+#define I915_PARAM_HAS_GEM 5
+#define I915_PARAM_NUM_FENCES_AVAIL 6
+#define I915_PARAM_HAS_OVERLAY 7
+#define I915_PARAM_HAS_PAGEFLIPPING 8
+#define I915_PARAM_HAS_EXECBUF2 9
+#define I915_PARAM_HAS_BSD 10
+#define I915_PARAM_HAS_BLT 11
+#define I915_PARAM_HAS_RELAXED_FENCING 12
+#define I915_PARAM_HAS_COHERENT_RINGS 13
+#define I915_PARAM_HAS_EXEC_CONSTANTS 14
+#define I915_PARAM_HAS_RELAXED_DELTA 15
+#define I915_PARAM_HAS_GEN7_SOL_RESET 16
+#define I915_PARAM_HAS_LLC 17
+
+typedef struct drm_i915_getparam {
+ int param;
+ int __user *value;
+} drm_i915_getparam_t;
+
+/* Ioctl to set kernel params:
+ */
+#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
+#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
+#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
+#define I915_SETPARAM_NUM_USED_FENCES 4
+
+typedef struct drm_i915_setparam {
+ int param;
+ int value;
+} drm_i915_setparam_t;
+
+/* A memory manager for regions of shared memory:
+ */
+#define I915_MEM_REGION_AGP 1
+
+typedef struct drm_i915_mem_alloc {
+ int region;
+ int alignment;
+ int size;
+ int __user *region_offset; /* offset from start of fb or agp */
+} drm_i915_mem_alloc_t;
+
+typedef struct drm_i915_mem_free {
+ int region;
+ int region_offset;
+} drm_i915_mem_free_t;
+
+typedef struct drm_i915_mem_init_heap {
+ int region;
+ int size;
+ int start;
+} drm_i915_mem_init_heap_t;
+
+/* Allow memory manager to be torn down and re-initialized (eg on
+ * rotate):
+ */
+typedef struct drm_i915_mem_destroy_heap {
+ int region;
+} drm_i915_mem_destroy_heap_t;
+
+/* Allow X server to configure which pipes to monitor for vblank signals
+ */
+#define DRM_I915_VBLANK_PIPE_A 1
+#define DRM_I915_VBLANK_PIPE_B 2
+
+typedef struct drm_i915_vblank_pipe {
+ int pipe;
+} drm_i915_vblank_pipe_t;
+
+/* Schedule buffer swap at given vertical blank:
+ */
+typedef struct drm_i915_vblank_swap {
+ drm_drawable_t drawable;
+ enum drm_vblank_seq_type seqtype;
+ unsigned int sequence;
+} drm_i915_vblank_swap_t;
+
+#define I915_MMIO_READ 0
+#define I915_MMIO_WRITE 1
+
+#define I915_MMIO_MAY_READ 0x1
+#define I915_MMIO_MAY_WRITE 0x2
+
+#define MMIO_REGS_IA_PRIMATIVES_COUNT 0
+#define MMIO_REGS_IA_VERTICES_COUNT 1
+#define MMIO_REGS_VS_INVOCATION_COUNT 2
+#define MMIO_REGS_GS_PRIMITIVES_COUNT 3
+#define MMIO_REGS_GS_INVOCATION_COUNT 4
+#define MMIO_REGS_CL_PRIMITIVES_COUNT 5
+#define MMIO_REGS_CL_INVOCATION_COUNT 6
+#define MMIO_REGS_PS_INVOCATION_COUNT 7
+#define MMIO_REGS_PS_DEPTH_COUNT 8
+
+typedef struct drm_i915_mmio_entry {
+ unsigned int flag;
+ unsigned int offset;
+ unsigned int size;
+} drm_i915_mmio_entry_t;
+
+typedef struct drm_i915_mmio {
+ unsigned int read_write:1;
+ unsigned int reg:31;
+ void __user *data;
+} drm_i915_mmio_t;
+
+typedef struct drm_i915_hws_addr {
+ uint64_t addr;
+} drm_i915_hws_addr_t;
+
+/*
+ * Relocation header is 4 uint32_ts
+ * 0 - 32 bit reloc count
+ * 1 - 32-bit relocation type
+ * 2-3 - 64-bit user buffer handle ptr for another list of relocs.
+ */
+#define I915_RELOC_HEADER 4
+
+/*
+ * type 0 relocation has 4-uint32_t stride
+ * 0 - offset into buffer
+ * 1 - delta to add in
+ * 2 - buffer handle
+ * 3 - reserved (for optimisations later).
+ */
+/*
+ * type 1 relocation has 4-uint32_t stride.
+ * Hangs off the first item in the op list.
+ * Performed after all valiations are done.
+ * Try to group relocs into the same relocatee together for
+ * performance reasons.
+ * 0 - offset into buffer
+ * 1 - delta to add in
+ * 2 - buffer index in op list.
+ * 3 - relocatee index in op list.
+ */
+#define I915_RELOC_TYPE_0 0
+#define I915_RELOC0_STRIDE 4
+#define I915_RELOC_TYPE_1 1
+#define I915_RELOC1_STRIDE 4
+
+
+struct drm_i915_op_arg {
+ uint64_t next;
+ uint64_t reloc_ptr;
+ int handled;
+ unsigned int pad64;
+ union {
+ struct drm_bo_op_req req;
+ struct drm_bo_arg_rep rep;
+ } d;
+
+};
+
+struct drm_i915_execbuffer {
+ uint64_t ops_list;
+ uint32_t num_buffers;
+ struct drm_i915_batchbuffer batch;
+ drm_context_t context; /* for lockless use in the future */
+ struct drm_fence_arg fence_arg;
+};
+
+struct drm_i915_gem_init {
+ /**
+ * Beginning offset in the GTT to be managed by the DRM memory
+ * manager.
+ */
+ uint64_t gtt_start;
+ /**
+ * Ending offset in the GTT to be managed by the DRM memory
+ * manager.
+ */
+ uint64_t gtt_end;
+};
+
+struct drm_i915_gem_create {
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ */
+ uint64_t size;
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_i915_gem_pread {
+ /** Handle for the object being read. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to read from */
+ uint64_t offset;
+ /** Length of data to read */
+ uint64_t size;
+ /** Pointer to write the data into. */
+ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_i915_gem_pwrite {
+ /** Handle for the object being written to. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to write to */
+ uint64_t offset;
+ /** Length of data to write */
+ uint64_t size;
+ /** Pointer to read the data from. */
+ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_i915_gem_mmap {
+ /** Handle for the object being mapped. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset in the object to map. */
+ uint64_t offset;
+ /**
+ * Length of data to map.
+ *
+ * The value will be page-aligned.
+ */
+ uint64_t size;
+ /** Returned pointer the data was mapped at */
+ uint64_t addr_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_i915_gem_mmap_gtt {
+ /** Handle for the object being mapped. */
+ uint32_t handle;
+ uint32_t pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ uint64_t offset;
+};
+
+struct drm_i915_gem_set_domain {
+ /** Handle for the object */
+ uint32_t handle;
+
+ /** New read domains */
+ uint32_t read_domains;
+
+ /** New write domain */
+ uint32_t write_domain;
+};
+
+struct drm_i915_gem_sw_finish {
+ /** Handle for the object */
+ uint32_t handle;
+};
+
+struct drm_i915_gem_relocation_entry {
+ /**
+ * Handle of the buffer being pointed to by this relocation entry.
+ *
+ * It's appealing to make this be an index into the mm_validate_entry
+ * list to refer to the buffer, but this allows the driver to create
+ * a relocation list for state buffers and not re-write it per
+ * exec using the buffer.
+ */
+ uint32_t target_handle;
+
+ /**
+ * Value to be added to the offset of the target buffer to make up
+ * the relocation entry.
+ */
+ uint32_t delta;
+
+ /** Offset in the buffer the relocation entry will be written into */
+ uint64_t offset;
+
+ /**
+ * Offset value of the target buffer that the relocation entry was last
+ * written as.
+ *
+ * If the buffer has the same offset as last time, we can skip syncing
+ * and writing the relocation. This value is written back out by
+ * the execbuffer ioctl when the relocation is written.
+ */
+ uint64_t presumed_offset;
+
+ /**
+ * Target memory domains read by this operation.
+ */
+ uint32_t read_domains;
+
+ /**
+ * Target memory domains written by this operation.
+ *
+ * Note that only one domain may be written by the whole
+ * execbuffer operation, so that where there are conflicts,
+ * the application will get -EINVAL back.
+ */
+ uint32_t write_domain;
+};
+
+/** @{
+ * Intel memory domains
+ *
+ * Most of these just align with the various caches in
+ * the system and are used to flush and invalidate as
+ * objects end up cached in different domains.
+ */
+/** CPU cache */
+#define I915_GEM_DOMAIN_CPU 0x00000001
+/** Render cache, used by 2D and 3D drawing */
+#define I915_GEM_DOMAIN_RENDER 0x00000002
+/** Sampler cache, used by texture engine */
+#define I915_GEM_DOMAIN_SAMPLER 0x00000004
+/** Command queue, used to load batch buffers */
+#define I915_GEM_DOMAIN_COMMAND 0x00000008
+/** Instruction cache, used by shader programs */
+#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
+/** Vertex address cache */
+#define I915_GEM_DOMAIN_VERTEX 0x00000020
+/** GTT domain - aperture and scanout */
+#define I915_GEM_DOMAIN_GTT 0x00000040
+/** @} */
+
+struct drm_i915_gem_exec_object {
+ /**
+ * User's handle for a buffer to be bound into the GTT for this
+ * operation.
+ */
+ uint32_t handle;
+
+ /** Number of relocations to be performed on this buffer */
+ uint32_t relocation_count;
+ /**
+ * Pointer to array of struct drm_i915_gem_relocation_entry containing
+ * the relocations to be performed in this buffer.
+ */
+ uint64_t relocs_ptr;
+
+ /** Required alignment in graphics aperture */
+ uint64_t alignment;
+
+ /**
+ * Returned value of the updated offset of the object, for future
+ * presumed_offset writes.
+ */
+ uint64_t offset;
+};
+
+struct drm_i915_gem_execbuffer {
+ /**
+ * List of buffers to be validated with their relocations to be
+ * performend on them.
+ *
+ * This is a pointer to an array of struct drm_i915_gem_validate_entry.
+ *
+ * These buffers must be listed in an order such that all relocations
+ * a buffer is performing refer to buffers that have already appeared
+ * in the validate list.
+ */
+ uint64_t buffers_ptr;
+ uint32_t buffer_count;
+
+ /** Offset in the batchbuffer to start execution from. */
+ uint32_t batch_start_offset;
+ /** Bytes used in batchbuffer from batch_start_offset */
+ uint32_t batch_len;
+ uint32_t DR1;
+ uint32_t DR4;
+ uint32_t num_cliprects;
+ uint64_t cliprects_ptr; /* struct drm_clip_rect *cliprects */
+};
+
+struct drm_i915_gem_exec_object2 {
+ /**
+ * User's handle for a buffer to be bound into the GTT for this
+ * operation.
+ */
+ uint32_t handle;
+
+ /** Number of relocations to be performed on this buffer */
+ uint32_t relocation_count;
+ /**
+ * Pointer to array of struct drm_i915_gem_relocation_entry containing
+ * the relocations to be performed in this buffer.
+ */
+ uint64_t relocs_ptr;
+
+ /** Required alignment in graphics aperture */
+ uint64_t alignment;
+
+ /**
+ * Returned value of the updated offset of the object, for future
+ * presumed_offset writes.
+ */
+ uint64_t offset;
+
+#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
+ uint64_t flags;
+ uint64_t rsvd1;
+ uint64_t rsvd2;
+};
+
+struct drm_i915_gem_execbuffer2 {
+ /**
+ * List of gem_exec_object2 structs
+ */
+ uint64_t buffers_ptr;
+ uint32_t buffer_count;
+
+ /** Offset in the batchbuffer to start execution from. */
+ uint32_t batch_start_offset;
+ /** Bytes used in batchbuffer from batch_start_offset */
+ uint32_t batch_len;
+ uint32_t DR1;
+ uint32_t DR4;
+ uint32_t num_cliprects;
+ /** This is a struct drm_clip_rect *cliprects */
+ uint64_t cliprects_ptr;
+#define I915_EXEC_RING_MASK (7<<0)
+#define I915_EXEC_DEFAULT (0<<0)
+#define I915_EXEC_RENDER (1<<0)
+#define I915_EXEC_BSD (2<<0)
+#define I915_EXEC_BLT (3<<0)
+
+/* Used for switching the constants addressing mode on gen4+ RENDER ring.
+ * Gen6+ only supports relative addressing to dynamic state (default) and
+ * absolute addressing.
+ *
+ * These flags are ignored for the BSD and BLT rings.
+ */
+#define I915_EXEC_CONSTANTS_MASK (3<<6)
+#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
+#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
+#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
+ uint64_t flags;
+ uint64_t rsvd1;
+ uint64_t rsvd2;
+};
+
+/** Resets the SO write offset registers for transform feedback on gen7. */
+#define I915_EXEC_GEN7_SOL_RESET (1<<8)
+
+struct drm_i915_gem_pin {
+ /** Handle of the buffer to be pinned. */
+ uint32_t handle;
+ uint32_t pad;
+
+ /** alignment required within the aperture */
+ uint64_t alignment;
+
+ /** Returned GTT offset of the buffer. */
+ uint64_t offset;
+};
+
+struct drm_i915_gem_unpin {
+ /** Handle of the buffer to be unpinned. */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_i915_gem_busy {
+ /** Handle of the buffer to check for busy */
+ uint32_t handle;
+
+ /** Return busy status (1 if busy, 0 if idle) */
+ uint32_t busy;
+};
+
+#define I915_TILING_NONE 0
+#define I915_TILING_X 1
+#define I915_TILING_Y 2
+
+#define I915_BIT_6_SWIZZLE_NONE 0
+#define I915_BIT_6_SWIZZLE_9 1
+#define I915_BIT_6_SWIZZLE_9_10 2
+#define I915_BIT_6_SWIZZLE_9_11 3
+#define I915_BIT_6_SWIZZLE_9_10_11 4
+/* Not seen by userland */
+#define I915_BIT_6_SWIZZLE_UNKNOWN 5
+/* Seen by userland. */
+#define I915_BIT_6_SWIZZLE_9_17 6
+#define I915_BIT_6_SWIZZLE_9_10_17 7
+
+struct drm_i915_gem_set_tiling {
+ /** Handle of the buffer to have its tiling state updated */
+ uint32_t handle;
+
+ /**
+ * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+ * I915_TILING_Y).
+ *
+ * This value is to be set on request, and will be updated by the
+ * kernel on successful return with the actual chosen tiling layout.
+ *
+ * The tiling mode may be demoted to I915_TILING_NONE when the system
+ * has bit 6 swizzling that can't be managed correctly by GEM.
+ *
+ * Buffer contents become undefined when changing tiling_mode.
+ */
+ uint32_t tiling_mode;
+
+ /**
+ * Stride in bytes for the object when in I915_TILING_X or
+ * I915_TILING_Y.
+ */
+ uint32_t stride;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping.
+ */
+ uint32_t swizzle_mode;
+};
+
+struct drm_i915_gem_get_tiling {
+ /** Handle of the buffer to get tiling state for. */
+ uint32_t handle;
+
+ /**
+ * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+ * I915_TILING_Y).
+ */
+ uint32_t tiling_mode;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping.
+ */
+ uint32_t swizzle_mode;
+};
+
+struct drm_i915_gem_get_aperture {
+ /** Total size of the aperture used by i915_gem_execbuffer, in bytes */
+ uint64_t aper_size;
+
+ /**
+ * Available space in the aperture used by i915_gem_execbuffer, in
+ * bytes
+ */
+ uint64_t aper_available_size;
+};
+
+struct drm_i915_get_pipe_from_crtc_id {
+ /** ID of CRTC being requested **/
+ uint32_t crtc_id;
+
+ /** pipe of requested CRTC **/
+ uint32_t pipe;
+};
+
+#define I915_MADV_WILLNEED 0
+#define I915_MADV_DONTNEED 1
+#define I915_MADV_PURGED_INTERNAL 2 /* internal state */
+
+struct drm_i915_gem_madvise {
+ /** Handle of the buffer to change the backing store advice */
+ uint32_t handle;
+
+ /* Advice: either the buffer will be needed again in the near future,
+ * or wont be and could be discarded under memory pressure.
+ */
+ uint32_t madv;
+
+ /** Whether the backing store still exists. */
+ uint32_t retained;
+};
+
+#define I915_OVERLAY_TYPE_MASK 0xff
+#define I915_OVERLAY_YUV_PLANAR 0x01
+#define I915_OVERLAY_YUV_PACKED 0x02
+#define I915_OVERLAY_RGB 0x03
+
+#define I915_OVERLAY_DEPTH_MASK 0xff00
+#define I915_OVERLAY_RGB24 0x1000
+#define I915_OVERLAY_RGB16 0x2000
+#define I915_OVERLAY_RGB15 0x3000
+#define I915_OVERLAY_YUV422 0x0100
+#define I915_OVERLAY_YUV411 0x0200
+#define I915_OVERLAY_YUV420 0x0300
+#define I915_OVERLAY_YUV410 0x0400
+
+#define I915_OVERLAY_SWAP_MASK 0xff0000
+#define I915_OVERLAY_NO_SWAP 0x000000
+#define I915_OVERLAY_UV_SWAP 0x010000
+#define I915_OVERLAY_Y_SWAP 0x020000
+#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
+
+#define I915_OVERLAY_FLAGS_MASK 0xff000000
+#define I915_OVERLAY_ENABLE 0x01000000
+
+struct drm_intel_overlay_put_image {
+ /* various flags and src format description */
+ uint32_t flags;
+ /* source picture description */
+ uint32_t bo_handle;
+ /* stride values and offsets are in bytes, buffer relative */
+ uint16_t stride_Y; /* stride for packed formats */
+ uint16_t stride_UV;
+ uint32_t offset_Y; /* offset for packet formats */
+ uint32_t offset_U;
+ uint32_t offset_V;
+ /* in pixels */
+ uint16_t src_width;
+ uint16_t src_height;
+ /* to compensate the scaling factors for partially covered surfaces */
+ uint16_t src_scan_width;
+ uint16_t src_scan_height;
+ /* output crtc description */
+ uint32_t crtc_id;
+ uint16_t dst_x;
+ uint16_t dst_y;
+ uint16_t dst_width;
+ uint16_t dst_height;
+};
+
+/* flags */
+#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
+#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
+struct drm_intel_overlay_attrs {
+ uint32_t flags;
+ uint32_t color_key;
+ int32_t brightness;
+ uint32_t contrast;
+ uint32_t saturation;
+ uint32_t gamma0;
+ uint32_t gamma1;
+ uint32_t gamma2;
+ uint32_t gamma3;
+ uint32_t gamma4;
+ uint32_t gamma5;
+};
+
+/*
+ * Intel sprite handling
+ *
+ * Color keying works with a min/mask/max tuple. Both source and destination
+ * color keying is allowed.
+ *
+ * Source keying:
+ * Sprite pixels within the min & max values, masked against the color channels
+ * specified in the mask field, will be transparent. All other pixels will
+ * be displayed on top of the primary plane. For RGB surfaces, only the min
+ * and mask fields will be used; ranged compares are not allowed.
+ *
+ * Destination keying:
+ * Primary plane pixels that match the min value, masked against the color
+ * channels specified in the mask field, will be replaced by corresponding
+ * pixels from the sprite plane.
+ *
+ * Note that source & destination keying are exclusive; only one can be
+ * active on a given plane.
+ */
+
+#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
+#define I915_SET_COLORKEY_DESTINATION (1<<1)
+#define I915_SET_COLORKEY_SOURCE (1<<2)
+struct drm_intel_sprite_colorkey {
+ uint32_t plane_id;
+ uint32_t min_value;
+ uint32_t channel_mask;
+ uint32_t max_value;
+ uint32_t flags;
+};
+
+#endif /* _I915_DRM_H_ */
diff --git a/sys/dev/drm2/i915/i915_drv.c b/sys/dev/drm2/i915/i915_drv.c
new file mode 100644
index 0000000..79ab6d5
--- /dev/null
+++ b/sys/dev/drm2/i915/i915_drv.c
@@ -0,0 +1,821 @@
+/* i915_drv.c -- Intel i915 driver -*- linux-c -*-
+ * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
+ */
+/*-
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/drm_mm.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/drm_pciids.h>
+#include <dev/drm2/i915/intel_drv.h>
+
+/* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
+static drm_pci_id_list_t i915_pciidlist[] = {
+ i915_PCI_IDS
+};
+
+static const struct intel_device_info intel_i830_info = {
+ .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+};
+
+static const struct intel_device_info intel_845g_info = {
+ .gen = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+};
+
+static const struct intel_device_info intel_i85x_info = {
+ .gen = 2, .is_i85x = 1, .is_mobile = 1,
+ .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+};
+
+static const struct intel_device_info intel_i865g_info = {
+ .gen = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+};
+
+static const struct intel_device_info intel_i915g_info = {
+ .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+};
+static const struct intel_device_info intel_i915gm_info = {
+ .gen = 3, .is_mobile = 1,
+ .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .supports_tv = 1,
+};
+static const struct intel_device_info intel_i945g_info = {
+ .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+};
+static const struct intel_device_info intel_i945gm_info = {
+ .gen = 3, .is_i945gm = 1, .is_mobile = 1,
+ .has_hotplug = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .supports_tv = 1,
+};
+
+static const struct intel_device_info intel_i965g_info = {
+ .gen = 4, .is_broadwater = 1,
+ .has_hotplug = 1,
+ .has_overlay = 1,
+};
+
+static const struct intel_device_info intel_i965gm_info = {
+ .gen = 4, .is_crestline = 1,
+ .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+ .supports_tv = 1,
+};
+
+static const struct intel_device_info intel_g33_info = {
+ .gen = 3, .is_g33 = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+};
+
+static const struct intel_device_info intel_g45_info = {
+ .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
+ .has_pipe_cxsr = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+};
+
+static const struct intel_device_info intel_gm45_info = {
+ .gen = 4, .is_g4x = 1,
+ .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
+ .has_pipe_cxsr = 1, .has_hotplug = 1,
+ .supports_tv = 1,
+ .has_bsd_ring = 1,
+};
+
+static const struct intel_device_info intel_pineview_info = {
+ .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+};
+
+static const struct intel_device_info intel_ironlake_d_info = {
+ .gen = 5,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+};
+
+static const struct intel_device_info intel_ironlake_m_info = {
+ .gen = 5, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 0, /* disabled due to buggy hardware */
+ .has_bsd_ring = 1,
+};
+
+static const struct intel_device_info intel_sandybridge_d_info = {
+ .gen = 6,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .has_llc = 1,
+};
+
+static const struct intel_device_info intel_sandybridge_m_info = {
+ .gen = 6, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .has_llc = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_d_info = {
+ .is_ivybridge = 1, .gen = 7,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .has_llc = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_m_info = {
+ .is_ivybridge = 1, .gen = 7, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .has_llc = 1,
+};
+
+#define INTEL_VGA_DEVICE(id, info_) { \
+ .device = id, \
+ .info = info_, \
+}
+
+static const struct intel_gfx_device_id {
+ int device;
+ const struct intel_device_info *info;
+} pciidlist[] = { /* aka */
+ INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
+ INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
+ INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
+ INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
+ INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
+ INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
+ INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
+ INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
+ INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
+ INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
+ INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
+ INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
+ INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
+ INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
+ INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
+ INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
+ INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
+ INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),
+ INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
+ INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
+ INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
+ INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
+ INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
+ INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
+ INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
+ INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
+ INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
+ INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
+ INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
+ INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
+ INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
+ INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
+ INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
+ INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
+ INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
+ {0, 0}
+};
+
+static int i915_drm_freeze(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv;
+ int error;
+
+ dev_priv = dev->dev_private;
+ drm_kms_helper_poll_disable(dev);
+
+#if 0
+ pci_save_state(dev->pdev);
+#endif
+
+ DRM_LOCK(dev);
+ /* If KMS is active, we do the leavevt stuff here */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ error = -i915_gem_idle(dev);
+ if (error) {
+ DRM_UNLOCK(dev);
+ device_printf(dev->device,
+ "GEM idle failed, resume might fail\n");
+ return (error);
+ }
+ drm_irq_uninstall(dev);
+ }
+
+ i915_save_state(dev);
+
+ intel_opregion_fini(dev);
+
+ /* Modeset on resume, not lid events */
+ dev_priv->modeset_on_lid = 0;
+ DRM_UNLOCK(dev);
+
+ return 0;
+}
+
+static int
+i915_suspend(device_t kdev)
+{
+ struct drm_device *dev;
+ int error;
+
+ dev = device_get_softc(kdev);
+ if (dev == NULL || dev->dev_private == NULL) {
+ DRM_ERROR("DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ DRM_DEBUG_KMS("starting suspend\n");
+ error = i915_drm_freeze(dev);
+ if (error)
+ return (error);
+
+ error = bus_generic_suspend(kdev);
+ DRM_DEBUG_KMS("finished suspend %d\n", error);
+ return (error);
+}
+
+static int i915_drm_thaw(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int error = 0;
+
+ DRM_LOCK(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ i915_gem_restore_gtt_mappings(dev);
+ }
+
+ i915_restore_state(dev);
+ intel_opregion_setup(dev);
+
+ /* KMS EnterVT equivalent */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ dev_priv->mm.suspended = 0;
+
+ error = i915_gem_init_hw(dev);
+
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_init_pch_refclk(dev);
+
+ DRM_UNLOCK(dev);
+ sx_xlock(&dev->mode_config.mutex);
+ drm_mode_config_reset(dev);
+ sx_xunlock(&dev->mode_config.mutex);
+ drm_irq_install(dev);
+
+ sx_xlock(&dev->mode_config.mutex);
+ /* Resume the modeset for every activated CRTC */
+ drm_helper_resume_force_mode(dev);
+ sx_xunlock(&dev->mode_config.mutex);
+
+ if (IS_IRONLAKE_M(dev))
+ ironlake_enable_rc6(dev);
+ DRM_LOCK(dev);
+ }
+
+ intel_opregion_init(dev);
+
+ dev_priv->modeset_on_lid = 0;
+
+ DRM_UNLOCK(dev);
+
+ return error;
+}
+
+static int
+i915_resume(device_t kdev)
+{
+ struct drm_device *dev;
+ int ret;
+
+ dev = device_get_softc(kdev);
+ DRM_DEBUG_KMS("starting resume\n");
+#if 0
+ if (pci_enable_device(dev->pdev))
+ return -EIO;
+
+ pci_set_master(dev->pdev);
+#endif
+
+ ret = -i915_drm_thaw(dev);
+ if (ret != 0)
+ return (ret);
+
+ drm_kms_helper_poll_enable(dev);
+ ret = bus_generic_resume(kdev);
+ DRM_DEBUG_KMS("finished resume %d\n", ret);
+ return (ret);
+}
+
+static int
+i915_probe(device_t kdev)
+{
+
+ return drm_probe(kdev, i915_pciidlist);
+}
+
+int i915_modeset;
+
+static int
+i915_attach(device_t kdev)
+{
+ struct drm_device *dev;
+
+ dev = device_get_softc(kdev);
+ if (i915_modeset == 1)
+ i915_driver_info.driver_features |= DRIVER_MODESET;
+ dev->driver = &i915_driver_info;
+ return (drm_attach(kdev, i915_pciidlist));
+}
+
+const struct intel_device_info *
+i915_get_device_id(int device)
+{
+ const struct intel_gfx_device_id *did;
+
+ for (did = &pciidlist[0]; did->device != 0; did++) {
+ if (did->device != device)
+ continue;
+ return (did->info);
+ }
+ return (NULL);
+}
+
+static device_method_t i915_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, i915_probe),
+ DEVMETHOD(device_attach, i915_attach),
+ DEVMETHOD(device_suspend, i915_suspend),
+ DEVMETHOD(device_resume, i915_resume),
+ DEVMETHOD(device_detach, drm_detach),
+ DEVMETHOD_END
+};
+
+static driver_t i915_driver = {
+ "drmn",
+ i915_methods,
+ sizeof(struct drm_device)
+};
+
+extern devclass_t drm_devclass;
+DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, 0, 0,
+ SI_ORDER_ANY);
+MODULE_DEPEND(i915kms, drmn, 1, 1, 1);
+MODULE_DEPEND(i915kms, agp, 1, 1, 1);
+MODULE_DEPEND(i915kms, iicbus, 1, 1, 1);
+MODULE_DEPEND(i915kms, iic, 1, 1, 1);
+MODULE_DEPEND(i915kms, iicbb, 1, 1, 1);
+
+int intel_iommu_enabled = 0;
+TUNABLE_INT("drm.i915.intel_iommu_enabled", &intel_iommu_enabled);
+
+int i915_semaphores = -1;
+TUNABLE_INT("drm.i915.semaphores", &i915_semaphores);
+static int i915_try_reset = 1;
+TUNABLE_INT("drm.i915.try_reset", &i915_try_reset);
+unsigned int i915_lvds_downclock = 0;
+TUNABLE_INT("drm.i915.lvds_downclock", &i915_lvds_downclock);
+int i915_vbt_sdvo_panel_type = -1;
+TUNABLE_INT("drm.i915.vbt_sdvo_panel_type", &i915_vbt_sdvo_panel_type);
+unsigned int i915_powersave = 1;
+TUNABLE_INT("drm.i915.powersave", &i915_powersave);
+int i915_enable_fbc = 0;
+TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc);
+int i915_enable_rc6 = 0;
+TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6);
+int i915_panel_use_ssc = -1;
+TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc);
+int i915_panel_ignore_lid = 0;
+TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid);
+int i915_modeset = 1;
+TUNABLE_INT("drm.i915.modeset", &i915_modeset);
+int i915_enable_ppgtt = -1;
+TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt);
+int i915_enable_hangcheck = 1;
+TUNABLE_INT("drm.i915.enable_hangcheck", &i915_enable_hangcheck);
+
+#define PCI_VENDOR_INTEL 0x8086
+#define INTEL_PCH_DEVICE_ID_MASK 0xff00
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+
+void
+intel_detect_pch(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv;
+ device_t pch;
+ uint32_t id;
+
+ dev_priv = dev->dev_private;
+ pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA);
+ if (pch != NULL && pci_get_vendor(pch) == PCI_VENDOR_INTEL) {
+ id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
+ if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_IBX;
+ DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+ } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_CPT;
+ DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+ } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
+ /* PantherPoint is CPT compatible */
+ dev_priv->pch_type = PCH_CPT;
+ DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+ } else
+ DRM_DEBUG_KMS("No PCH detected\n");
+ } else
+ DRM_DEBUG_KMS("No Intel PCI-ISA bridge found\n");
+}
+
+void
+__gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ int count;
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
+ DELAY(10000);
+
+ I915_WRITE_NOTRACE(FORCEWAKE, 1);
+ POSTING_READ(FORCEWAKE);
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
+ DELAY(10000);
+}
+
+void
+__gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+ int count;
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
+ DELAY(10000);
+
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+ POSTING_READ(FORCEWAKE_MT);
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
+ DELAY(10000);
+}
+
+void
+gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+
+ mtx_lock(&dev_priv->gt_lock);
+ if (dev_priv->forcewake_count++ == 0)
+ dev_priv->display.force_wake_get(dev_priv);
+ mtx_unlock(&dev_priv->gt_lock);
+}
+
+static void
+gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
+{
+ u32 gtfifodbg;
+
+ gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
+ if ((gtfifodbg & GT_FIFO_CPU_ERROR_MASK) != 0) {
+ printf("MMIO read or write has been dropped %x\n", gtfifodbg);
+ I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
+ }
+}
+
+void
+__gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+
+ I915_WRITE_NOTRACE(FORCEWAKE, 0);
+ /* The below doubles as a POSTING_READ */
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+void
+__gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+ /* The below doubles as a POSTING_READ */
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+void
+gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+
+ mtx_lock(&dev_priv->gt_lock);
+ if (--dev_priv->forcewake_count == 0)
+ dev_priv->display.force_wake_put(dev_priv);
+ mtx_unlock(&dev_priv->gt_lock);
+}
+
+int
+__gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+ int ret = 0;
+
+ if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
+ int loop = 500;
+ u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
+ DELAY(10000);
+ fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ }
+ if (loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES) {
+ printf("%s loop\n", __func__);
+ ++ret;
+ }
+ dev_priv->gt_fifo_count = fifo;
+ }
+ dev_priv->gt_fifo_count--;
+
+ return (ret);
+}
+
+static int
+i8xx_do_reset(struct drm_device *dev, u8 flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int onems;
+
+ if (IS_I85X(dev))
+ return -ENODEV;
+
+ onems = hz / 1000;
+ if (onems == 0)
+ onems = 1;
+
+ I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
+ POSTING_READ(D_STATE);
+
+ if (IS_I830(dev) || IS_845G(dev)) {
+ I915_WRITE(DEBUG_RESET_I830,
+ DEBUG_RESET_DISPLAY |
+ DEBUG_RESET_RENDER |
+ DEBUG_RESET_FULL);
+ POSTING_READ(DEBUG_RESET_I830);
+ pause("i8xxrst1", onems);
+
+ I915_WRITE(DEBUG_RESET_I830, 0);
+ POSTING_READ(DEBUG_RESET_I830);
+ }
+
+ pause("i8xxrst2", onems);
+
+ I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
+ POSTING_READ(D_STATE);
+
+ return 0;
+}
+
+static int
+i965_reset_complete(struct drm_device *dev)
+{
+ u8 gdrst;
+
+ gdrst = pci_read_config(dev->device, I965_GDRST, 1);
+ return (gdrst & 0x1);
+}
+
+static int
+i965_do_reset(struct drm_device *dev, u8 flags)
+{
+ u8 gdrst;
+
+ /*
+ * Set the domains we want to reset (GRDOM/bits 2 and 3) as
+ * well as the reset bit (GR/bit 0). Setting the GR bit
+ * triggers the reset; when done, the hardware will clear it.
+ */
+ gdrst = pci_read_config(dev->device, I965_GDRST, 1);
+ pci_write_config(dev->device, I965_GDRST, gdrst | flags | 0x1, 1);
+
+ return (_intel_wait_for(dev, i965_reset_complete(dev), 500, 1,
+ "915rst"));
+}
+
+static int
+ironlake_do_reset(struct drm_device *dev, u8 flags)
+{
+ struct drm_i915_private *dev_priv;
+ u32 gdrst;
+
+ dev_priv = dev->dev_private;
+ gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
+ return (_intel_wait_for(dev,
+ (I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1) != 0,
+ 500, 1, "915rst"));
+}
+
+static int
+gen6_do_reset(struct drm_device *dev, u8 flags)
+{
+ struct drm_i915_private *dev_priv;
+ int ret;
+
+ dev_priv = dev->dev_private;
+
+ /* Hold gt_lock across reset to prevent any register access
+ * with forcewake not set correctly
+ */
+ mtx_lock(&dev_priv->gt_lock);
+
+ /* Reset the chip */
+
+ /* GEN6_GDRST is not in the gt power well, no need to check
+ * for fifo space for the write or forcewake the chip for
+ * the read
+ */
+ I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
+
+ /* Spin waiting for the device to ack the reset request */
+ ret = _intel_wait_for(dev,
+ (I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0,
+ 500, 1, "915rst");
+
+ /* If reset with a user forcewake, try to restore, otherwise turn it off */
+ if (dev_priv->forcewake_count)
+ dev_priv->display.force_wake_get(dev_priv);
+ else
+ dev_priv->display.force_wake_put(dev_priv);
+
+ /* Restore fifo count */
+ dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+
+ mtx_unlock(&dev_priv->gt_lock);
+ return (ret);
+}
+
+int
+i915_reset(struct drm_device *dev, u8 flags)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ /*
+ * We really should only reset the display subsystem if we actually
+ * need to
+ */
+ bool need_display = true;
+ int ret;
+
+ if (!i915_try_reset)
+ return (0);
+
+ if (!sx_try_xlock(&dev->dev_struct_lock))
+ return (-EBUSY);
+
+ i915_gem_reset(dev);
+
+ ret = -ENODEV;
+ if (time_second - dev_priv->last_gpu_reset < 5) {
+ DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
+ } else {
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ ret = gen6_do_reset(dev, flags);
+ break;
+ case 5:
+ ret = ironlake_do_reset(dev, flags);
+ break;
+ case 4:
+ ret = i965_do_reset(dev, flags);
+ break;
+ case 2:
+ ret = i8xx_do_reset(dev, flags);
+ break;
+ }
+ }
+ dev_priv->last_gpu_reset = time_second;
+ if (ret) {
+ DRM_ERROR("Failed to reset chip.\n");
+ DRM_UNLOCK(dev);
+ return (ret);
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET) ||
+ !dev_priv->mm.suspended) {
+ dev_priv->mm.suspended = 0;
+
+ i915_gem_init_swizzling(dev);
+
+ dev_priv->rings[RCS].init(&dev_priv->rings[RCS]);
+ if (HAS_BSD(dev))
+ dev_priv->rings[VCS].init(&dev_priv->rings[VCS]);
+ if (HAS_BLT(dev))
+ dev_priv->rings[BCS].init(&dev_priv->rings[BCS]);
+
+ i915_gem_init_ppgtt(dev);
+
+ drm_irq_uninstall(dev);
+ drm_mode_config_reset(dev);
+ DRM_UNLOCK(dev);
+ drm_irq_install(dev);
+ DRM_LOCK(dev);
+ }
+ DRM_UNLOCK(dev);
+
+ if (need_display) {
+ sx_xlock(&dev->mode_config.mutex);
+ drm_helper_resume_force_mode(dev);
+ sx_xunlock(&dev->mode_config.mutex);
+ }
+
+ return (0);
+}
+
+#define __i915_read(x, y) \
+u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
+ u##x val = 0; \
+ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ mtx_lock(&dev_priv->gt_lock); \
+ if (dev_priv->forcewake_count == 0) \
+ dev_priv->display.force_wake_get(dev_priv); \
+ val = DRM_READ##y(dev_priv->mmio_map, reg); \
+ if (dev_priv->forcewake_count == 0) \
+ dev_priv->display.force_wake_put(dev_priv); \
+ mtx_unlock(&dev_priv->gt_lock); \
+ } else { \
+ val = DRM_READ##y(dev_priv->mmio_map, reg); \
+ } \
+ trace_i915_reg_rw(false, reg, val, sizeof(val)); \
+ return val; \
+}
+
+__i915_read(8, 8)
+__i915_read(16, 16)
+__i915_read(32, 32)
+__i915_read(64, 64)
+#undef __i915_read
+
+#define __i915_write(x, y) \
+void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+ u32 __fifo_ret = 0; \
+ trace_i915_reg_rw(true, reg, val, sizeof(val)); \
+ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
+ } \
+ DRM_WRITE##y(dev_priv->mmio_map, reg, val); \
+ if (__predict_false(__fifo_ret)) { \
+ gen6_gt_check_fifodbg(dev_priv); \
+ } \
+}
+__i915_write(8, 8)
+__i915_write(16, 16)
+__i915_write(32, 32)
+__i915_write(64, 64)
+#undef __i915_write
diff --git a/sys/dev/drm2/i915/i915_drv.h b/sys/dev/drm2/i915/i915_drv.h
new file mode 100644
index 0000000..bf1f992
--- /dev/null
+++ b/sys/dev/drm2/i915/i915_drv.h
@@ -0,0 +1,1481 @@
+/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
+ */
+/*
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef _I915_DRV_H_
+#define _I915_DRV_H_
+
+#include <dev/agp/agp_i810.h>
+#include <dev/drm2/drm_mm.h>
+#include <dev/drm2/i915/i915_reg.h>
+#include <dev/drm2/i915/intel_ringbuffer.h>
+#include <dev/drm2/i915/intel_bios.h>
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
+
+#define DRIVER_NAME "i915"
+#define DRIVER_DESC "Intel Graphics"
+#define DRIVER_DATE "20080730"
+
+MALLOC_DECLARE(DRM_I915_GEM);
+
+enum pipe {
+ PIPE_A = 0,
+ PIPE_B,
+ PIPE_C,
+ I915_MAX_PIPES
+};
+#define pipe_name(p) ((p) + 'A')
+#define I915_NUM_PIPE 2
+
+enum plane {
+ PLANE_A = 0,
+ PLANE_B,
+ PLANE_C,
+};
+#define plane_name(p) ((p) + 'A')
+
+#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+
+#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
+
+/* Interface history:
+ *
+ * 1.1: Original.
+ * 1.2: Add Power Management
+ * 1.3: Add vblank support
+ * 1.4: Fix cmdbuffer path, add heap destroy
+ * 1.5: Add vblank pipe configuration
+ * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
+ * - Support vertical blank on secondary display pipe
+ */
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 6
+#define DRIVER_PATCHLEVEL 0
+
+#define WATCH_COHERENCY 0
+#define WATCH_BUF 0
+#define WATCH_EXEC 0
+#define WATCH_LRU 0
+#define WATCH_RELOC 0
+#define WATCH_INACTIVE 0
+#define WATCH_PWRITE 0
+
+#define I915_GEM_PHYS_CURSOR_0 1
+#define I915_GEM_PHYS_CURSOR_1 2
+#define I915_GEM_PHYS_OVERLAY_REGS 3
+#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
+
+struct drm_i915_gem_phys_object {
+ int id;
+ drm_dma_handle_t *handle;
+ struct drm_i915_gem_object *cur_obj;
+};
+
+struct drm_i915_private;
+
+struct drm_i915_display_funcs {
+ void (*dpms)(struct drm_crtc *crtc, int mode);
+ bool (*fbc_enabled)(struct drm_device *dev);
+ void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
+ void (*disable_fbc)(struct drm_device *dev);
+ int (*get_display_clock_speed)(struct drm_device *dev);
+ int (*get_fifo_size)(struct drm_device *dev, int plane);
+ void (*update_wm)(struct drm_device *dev);
+ void (*update_sprite_wm)(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size);
+ int (*crtc_mode_set)(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb);
+ void (*write_eld)(struct drm_connector *connector,
+ struct drm_crtc *crtc);
+ void (*fdi_link_train)(struct drm_crtc *crtc);
+ void (*init_clock_gating)(struct drm_device *dev);
+ void (*init_pch_clock_gating)(struct drm_device *dev);
+ int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj);
+ void (*force_wake_get)(struct drm_i915_private *dev_priv);
+ void (*force_wake_put)(struct drm_i915_private *dev_priv);
+ int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y);
+ /* clock updates for mode set */
+ /* cursor updates */
+ /* render clock increase/decrease */
+ /* display clock increase/decrease */
+ /* pll clock increase/decrease */
+};
+
+struct intel_device_info {
+ u8 gen;
+ u8 is_mobile:1;
+ u8 is_i85x:1;
+ u8 is_i915g:1;
+ u8 is_i945gm:1;
+ u8 is_g33:1;
+ u8 need_gfx_hws:1;
+ u8 is_g4x:1;
+ u8 is_pineview:1;
+ u8 is_broadwater:1;
+ u8 is_crestline:1;
+ u8 is_ivybridge:1;
+ u8 has_fbc:1;
+ u8 has_pipe_cxsr:1;
+ u8 has_hotplug:1;
+ u8 cursor_needs_physical:1;
+ u8 has_overlay:1;
+ u8 overlay_needs_physical:1;
+ u8 supports_tv:1;
+ u8 has_bsd_ring:1;
+ u8 has_blt_ring:1;
+ u8 has_llc:1;
+};
+
+#define I915_PPGTT_PD_ENTRIES 512
+#define I915_PPGTT_PT_ENTRIES 1024
+struct i915_hw_ppgtt {
+ unsigned num_pd_entries;
+ vm_page_t *pt_pages;
+ uint32_t pd_offset;
+ vm_paddr_t *pt_dma_addr;
+ vm_paddr_t scratch_page_dma_addr;
+};
+
+enum no_fbc_reason {
+ FBC_NO_OUTPUT, /* no outputs enabled to compress */
+ FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
+ FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
+ FBC_MODE_TOO_LARGE, /* mode too large for compression */
+ FBC_BAD_PLANE, /* fbc not supported on plane */
+ FBC_NOT_TILED, /* buffer not tiled */
+ FBC_MULTIPLE_PIPES, /* more than one pipe active */
+ FBC_MODULE_PARAM,
+};
+
+struct mem_block {
+ struct mem_block *next;
+ struct mem_block *prev;
+ int start;
+ int size;
+ struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
+};
+
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
+
+struct intel_opregion {
+ struct opregion_header *header;
+ struct opregion_acpi *acpi;
+ struct opregion_swsci *swsci;
+ struct opregion_asle *asle;
+ void *vbt;
+ u32 *lid_state;
+};
+#define OPREGION_SIZE (8*1024)
+
+#define I915_FENCE_REG_NONE -1
+#define I915_MAX_NUM_FENCES 16
+/* 16 fences + sign bit for FENCE_REG_NONE */
+#define I915_MAX_NUM_FENCE_BITS 5
+
+struct drm_i915_fence_reg {
+ struct list_head lru_list;
+ struct drm_i915_gem_object *obj;
+ uint32_t setup_seqno;
+ int pin_count;
+};
+
+struct sdvo_device_mapping {
+ u8 initialized;
+ u8 dvo_port;
+ u8 slave_addr;
+ u8 dvo_wiring;
+ u8 i2c_pin;
+ u8 ddc_pin;
+};
+
+enum intel_pch {
+ PCH_IBX, /* Ibexpeak PCH */
+ PCH_CPT, /* Cougarpoint PCH */
+};
+
+#define QUIRK_PIPEA_FORCE (1<<0)
+#define QUIRK_LVDS_SSC_DISABLE (1<<1)
+
+struct intel_fbdev;
+struct intel_fbc_work;
+
+typedef struct drm_i915_private {
+ struct drm_device *dev;
+
+ device_t *gmbus_bridge;
+ device_t *bbbus_bridge;
+ device_t *gmbus;
+ device_t *bbbus;
+ /** gmbus_sx protects against concurrent usage of the single hw gmbus
+ * controller on different i2c buses. */
+ struct sx gmbus_sx;
+
+ int has_gem;
+ int relative_constants_mode;
+
+ drm_local_map_t *sarea;
+ drm_local_map_t *mmio_map;
+
+ /** gt_fifo_count and the subsequent register write are synchronized
+ * with dev->struct_mutex. */
+ unsigned gt_fifo_count;
+ /** forcewake_count is protected by gt_lock */
+ unsigned forcewake_count;
+ /** gt_lock is also taken in irq contexts. */
+ struct mtx gt_lock;
+
+ drm_i915_sarea_t *sarea_priv;
+ /* drm_i915_ring_buffer_t ring; */
+ struct intel_ring_buffer rings[I915_NUM_RINGS];
+ uint32_t next_seqno;
+
+ drm_dma_handle_t *status_page_dmah;
+ void *hw_status_page;
+ dma_addr_t dma_status_page;
+ uint32_t counter;
+ unsigned int status_gfx_addr;
+ drm_local_map_t hws_map;
+ struct drm_gem_object *hws_obj;
+
+ struct drm_i915_gem_object *pwrctx;
+ struct drm_i915_gem_object *renderctx;
+
+ unsigned int cpp;
+ int back_offset;
+ int front_offset;
+ int current_page;
+ int page_flipping;
+
+ atomic_t irq_received;
+ u32 trace_irq_seqno;
+
+ /** Cached value of IER to avoid reads in updating the bitfield */
+ u32 pipestat[2];
+ u32 irq_mask;
+ u32 gt_irq_mask;
+ u32 pch_irq_mask;
+ struct mtx irq_lock;
+
+ u32 hotplug_supported_mask;
+
+ int tex_lru_log_granularity;
+ int allow_batchbuffer;
+ unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
+ int vblank_pipe;
+ int num_pipe;
+
+ /* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD ((1500 /* in ms */ * hz) / 1000)
+ int hangcheck_count;
+ uint32_t last_acthd;
+ uint32_t last_acthd_bsd;
+ uint32_t last_acthd_blt;
+ uint32_t last_instdone;
+ uint32_t last_instdone1;
+
+ struct intel_opregion opregion;
+
+
+ /* overlay */
+ struct intel_overlay *overlay;
+ bool sprite_scaling_enabled;
+
+ /* LVDS info */
+ int backlight_level; /* restore backlight to this value */
+ bool backlight_enabled;
+ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+
+ /* Feature bits from the VBIOS */
+ unsigned int int_tv_support:1;
+ unsigned int lvds_dither:1;
+ unsigned int lvds_vbt:1;
+ unsigned int int_crt_support:1;
+ unsigned int lvds_use_ssc:1;
+ unsigned int display_clock_mode:1;
+ int lvds_ssc_freq;
+ struct {
+ int rate;
+ int lanes;
+ int preemphasis;
+ int vswing;
+
+ bool initialized;
+ bool support;
+ int bpp;
+ struct edp_power_seq pps;
+ } edp;
+ bool no_aux_handshake;
+
+ int crt_ddc_pin;
+ struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
+ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
+ int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+
+ /* PCH chipset type */
+ enum intel_pch pch_type;
+
+ /* Display functions */
+ struct drm_i915_display_funcs display;
+
+ unsigned long quirks;
+
+ /* Register state */
+ bool modeset_on_lid;
+ u8 saveLBB;
+ u32 saveDSPACNTR;
+ u32 saveDSPBCNTR;
+ u32 saveDSPARB;
+ u32 saveHWS;
+ u32 savePIPEACONF;
+ u32 savePIPEBCONF;
+ u32 savePIPEASRC;
+ u32 savePIPEBSRC;
+ u32 saveFPA0;
+ u32 saveFPA1;
+ u32 saveDPLL_A;
+ u32 saveDPLL_A_MD;
+ u32 saveHTOTAL_A;
+ u32 saveHBLANK_A;
+ u32 saveHSYNC_A;
+ u32 saveVTOTAL_A;
+ u32 saveVBLANK_A;
+ u32 saveVSYNC_A;
+ u32 saveBCLRPAT_A;
+ u32 saveTRANSACONF;
+ u32 saveTRANS_HTOTAL_A;
+ u32 saveTRANS_HBLANK_A;
+ u32 saveTRANS_HSYNC_A;
+ u32 saveTRANS_VTOTAL_A;
+ u32 saveTRANS_VBLANK_A;
+ u32 saveTRANS_VSYNC_A;
+ u32 savePIPEASTAT;
+ u32 saveDSPASTRIDE;
+ u32 saveDSPASIZE;
+ u32 saveDSPAPOS;
+ u32 saveDSPAADDR;
+ u32 saveDSPASURF;
+ u32 saveDSPATILEOFF;
+ u32 savePFIT_PGM_RATIOS;
+ u32 saveBLC_HIST_CTL;
+ u32 saveBLC_PWM_CTL;
+ u32 saveBLC_PWM_CTL2;
+ u32 saveBLC_CPU_PWM_CTL;
+ u32 saveBLC_CPU_PWM_CTL2;
+ u32 saveFPB0;
+ u32 saveFPB1;
+ u32 saveDPLL_B;
+ u32 saveDPLL_B_MD;
+ u32 saveHTOTAL_B;
+ u32 saveHBLANK_B;
+ u32 saveHSYNC_B;
+ u32 saveVTOTAL_B;
+ u32 saveVBLANK_B;
+ u32 saveVSYNC_B;
+ u32 saveBCLRPAT_B;
+ u32 saveTRANSBCONF;
+ u32 saveTRANS_HTOTAL_B;
+ u32 saveTRANS_HBLANK_B;
+ u32 saveTRANS_HSYNC_B;
+ u32 saveTRANS_VTOTAL_B;
+ u32 saveTRANS_VBLANK_B;
+ u32 saveTRANS_VSYNC_B;
+ u32 savePIPEBSTAT;
+ u32 saveDSPBSTRIDE;
+ u32 saveDSPBSIZE;
+ u32 saveDSPBPOS;
+ u32 saveDSPBADDR;
+ u32 saveDSPBSURF;
+ u32 saveDSPBTILEOFF;
+ u32 saveVGA0;
+ u32 saveVGA1;
+ u32 saveVGA_PD;
+ u32 saveVGACNTRL;
+ u32 saveADPA;
+ u32 saveLVDS;
+ u32 savePP_ON_DELAYS;
+ u32 savePP_OFF_DELAYS;
+ u32 saveDVOA;
+ u32 saveDVOB;
+ u32 saveDVOC;
+ u32 savePP_ON;
+ u32 savePP_OFF;
+ u32 savePP_CONTROL;
+ u32 savePP_DIVISOR;
+ u32 savePFIT_CONTROL;
+ u32 save_palette_a[256];
+ u32 save_palette_b[256];
+ u32 saveDPFC_CB_BASE;
+ u32 saveFBC_CFB_BASE;
+ u32 saveFBC_LL_BASE;
+ u32 saveFBC_CONTROL;
+ u32 saveFBC_CONTROL2;
+ u32 saveIER;
+ u32 saveIIR;
+ u32 saveIMR;
+ u32 saveDEIER;
+ u32 saveDEIMR;
+ u32 saveGTIER;
+ u32 saveGTIMR;
+ u32 saveFDI_RXA_IMR;
+ u32 saveFDI_RXB_IMR;
+ u32 saveCACHE_MODE_0;
+ u32 saveMI_ARB_STATE;
+ u32 saveSWF0[16];
+ u32 saveSWF1[16];
+ u32 saveSWF2[3];
+ u8 saveMSR;
+ u8 saveSR[8];
+ u8 saveGR[25];
+ u8 saveAR_INDEX;
+ u8 saveAR[21];
+ u8 saveDACMASK;
+ u8 saveCR[37];
+ uint64_t saveFENCE[I915_MAX_NUM_FENCES];
+ u32 saveCURACNTR;
+ u32 saveCURAPOS;
+ u32 saveCURABASE;
+ u32 saveCURBCNTR;
+ u32 saveCURBPOS;
+ u32 saveCURBBASE;
+ u32 saveCURSIZE;
+ u32 saveDP_B;
+ u32 saveDP_C;
+ u32 saveDP_D;
+ u32 savePIPEA_GMCH_DATA_M;
+ u32 savePIPEB_GMCH_DATA_M;
+ u32 savePIPEA_GMCH_DATA_N;
+ u32 savePIPEB_GMCH_DATA_N;
+ u32 savePIPEA_DP_LINK_M;
+ u32 savePIPEB_DP_LINK_M;
+ u32 savePIPEA_DP_LINK_N;
+ u32 savePIPEB_DP_LINK_N;
+ u32 saveFDI_RXA_CTL;
+ u32 saveFDI_TXA_CTL;
+ u32 saveFDI_RXB_CTL;
+ u32 saveFDI_TXB_CTL;
+ u32 savePFA_CTL_1;
+ u32 savePFB_CTL_1;
+ u32 savePFA_WIN_SZ;
+ u32 savePFB_WIN_SZ;
+ u32 savePFA_WIN_POS;
+ u32 savePFB_WIN_POS;
+ u32 savePCH_DREF_CONTROL;
+ u32 saveDISP_ARB_CTL;
+ u32 savePIPEA_DATA_M1;
+ u32 savePIPEA_DATA_N1;
+ u32 savePIPEA_LINK_M1;
+ u32 savePIPEA_LINK_N1;
+ u32 savePIPEB_DATA_M1;
+ u32 savePIPEB_DATA_N1;
+ u32 savePIPEB_LINK_M1;
+ u32 savePIPEB_LINK_N1;
+ u32 saveMCHBAR_RENDER_STANDBY;
+ u32 savePCH_PORT_HOTPLUG;
+
+ struct {
+ /** Memory allocator for GTT stolen memory */
+ struct drm_mm stolen;
+ /** Memory allocator for GTT */
+ struct drm_mm gtt_space;
+ /** List of all objects in gtt_space. Used to restore gtt
+ * mappings on resume */
+ struct list_head gtt_list;
+
+ /** Usable portion of the GTT for GEM */
+ unsigned long gtt_start;
+ unsigned long gtt_mappable_end;
+ unsigned long gtt_end;
+
+ /** PPGTT used for aliasing the PPGTT with the GTT */
+ struct i915_hw_ppgtt *aliasing_ppgtt;
+
+ /**
+ * List of objects currently involved in rendering from the
+ * ringbuffer.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * List of objects which are not in the ringbuffer but which
+ * still have a write_domain which needs to be flushed before
+ * unbinding.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head flushing_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * last_rendering_seqno is 0 while an object is in this list.
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer but
+ * are still pinned in the GTT.
+ */
+ struct list_head pinned_list;
+
+ /** LRU list of objects with fence regs on them. */
+ struct list_head fence_list;
+
+ /**
+ * List of objects currently pending being freed.
+ *
+ * These objects are no longer in use, but due to a signal
+ * we were prevented from freeing them at the appointed time.
+ */
+ struct list_head deferred_free_list;
+
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
+ struct timeout_task retire_task;
+
+ /**
+ * Are we in a non-interruptible section of code like
+ * modesetting?
+ */
+ bool interruptible;
+
+ uint32_t next_gem_seqno;
+
+ /**
+ * Waiting sequence number, if any
+ */
+ uint32_t waiting_gem_seqno;
+
+ /**
+ * Last seq seen at irq time
+ */
+ uint32_t irq_gem_seqno;
+
+ /**
+ * Flag if the X Server, and thus DRM, is not currently in
+ * control of the device.
+ *
+ * This is set between LeaveVT and EnterVT. It needs to be
+ * replaced with a semaphore. It also needs to be
+ * transitioned away from for kernel modesetting.
+ */
+ int suspended;
+
+ /**
+ * Flag if the hardware appears to be wedged.
+ *
+ * This is set when attempts to idle the device timeout.
+ * It prevents command submission from occuring and makes
+ * every pending request fail
+ */
+ int wedged;
+
+ /** Bit 6 swizzling required for X tiling */
+ uint32_t bit_6_swizzle_x;
+ /** Bit 6 swizzling required for Y tiling */
+ uint32_t bit_6_swizzle_y;
+
+ /* storage for physical objects */
+ struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
+
+ /* accounting, useful for userland debugging */
+ size_t gtt_total;
+ size_t mappable_gtt_total;
+ size_t object_memory;
+ u32 object_count;
+
+ struct intel_gtt gtt;
+ eventhandler_tag i915_lowmem;
+ } mm;
+
+ const struct intel_device_info *info;
+
+ struct sdvo_device_mapping sdvo_mappings[2];
+ /* indicate whether the LVDS_BORDER should be enabled or not */
+ unsigned int lvds_border_bits;
+ /* Panel fitter placement and size for Ironlake+ */
+ u32 pch_pf_pos, pch_pf_size;
+
+ struct drm_crtc *plane_to_crtc_mapping[3];
+ struct drm_crtc *pipe_to_crtc_mapping[3];
+ /* wait_queue_head_t pending_flip_queue; XXXKIB */
+ bool flip_pending_is_done;
+
+ /* Reclocking support */
+ bool render_reclock_avail;
+ bool lvds_downclock_avail;
+ /* indicates the reduced downclock for LVDS*/
+ int lvds_downclock;
+ struct task idle_task;
+ struct callout idle_callout;
+ bool busy;
+ u16 orig_clock;
+ int child_dev_num;
+ struct child_device_config *child_dev;
+ struct drm_connector *int_lvds_connector;
+ struct drm_connector *int_edp_connector;
+
+ device_t bridge_dev;
+ bool mchbar_need_disable;
+ int mch_res_rid;
+ struct resource *mch_res;
+
+ struct mtx rps_lock;
+ u32 pm_iir;
+ struct task rps_task;
+
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ u8 fmax;
+ u8 fstart;
+
+ u64 last_count1;
+ unsigned long last_time1;
+ unsigned long chipset_power;
+ u64 last_count2;
+ struct timespec last_time2;
+ unsigned long gfx_power;
+ int c_m;
+ int r_t;
+ u8 corr;
+ struct mtx *mchdev_lock;
+
+ enum no_fbc_reason no_fbc_reason;
+
+ unsigned long cfb_size;
+ unsigned int cfb_fb;
+ int cfb_plane;
+ int cfb_y;
+ struct intel_fbc_work *fbc_work;
+
+ unsigned int fsb_freq, mem_freq, is_ddr3;
+
+ struct taskqueue *tq;
+ struct task error_task;
+ struct task hotplug_task;
+ int error_completion;
+ struct mtx error_completion_lock;
+ struct drm_i915_error_state *first_error;
+ struct mtx error_lock;
+ struct callout hangcheck_timer;
+
+ unsigned long last_gpu_reset;
+
+ struct intel_fbdev *fbdev;
+
+ struct drm_property *broadcast_rgb_property;
+ struct drm_property *force_audio_property;
+} drm_i915_private_t;
+
+enum hdmi_force_audio {
+ HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
+ HDMI_AUDIO_OFF, /* force turn off HDMI audio */
+ HDMI_AUDIO_AUTO, /* trust EDID */
+ HDMI_AUDIO_ON, /* force turn on HDMI audio */
+};
+
+enum i915_cache_level {
+ I915_CACHE_NONE,
+ I915_CACHE_LLC,
+ I915_CACHE_LLC_MLC, /* gen6+ */
+};
+
+enum intel_chip_family {
+ CHIP_I8XX = 0x01,
+ CHIP_I9XX = 0x02,
+ CHIP_I915 = 0x04,
+ CHIP_I965 = 0x08,
+};
+
+/** driver private structure attached to each drm_gem_object */
+struct drm_i915_gem_object {
+ struct drm_gem_object base;
+
+ /** Current space allocated to this object in the GTT, if any. */
+ struct drm_mm_node *gtt_space;
+ struct list_head gtt_list;
+ /** This object's place on the active/flushing/inactive lists */
+ struct list_head ring_list;
+ struct list_head mm_list;
+ /** This object's place on GPU write list */
+ struct list_head gpu_write_list;
+ /** This object's place in the batchbuffer or on the eviction list */
+ struct list_head exec_list;
+
+ /**
+ * This is set if the object is on the active or flushing lists
+ * (has pending rendering), and is not set if it's on inactive (ready
+ * to be unbound).
+ */
+ unsigned int active:1;
+
+ /**
+ * This is set if the object has been written to since last bound
+ * to the GTT
+ */
+ unsigned int dirty:1;
+
+ /**
+ * This is set if the object has been written to since the last
+ * GPU flush.
+ */
+ unsigned int pending_gpu_write:1;
+
+ /**
+ * Fence register bits (if any) for this object. Will be set
+ * as needed when mapped into the GTT.
+ * Protected by dev->struct_mutex.
+ */
+ signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
+
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ unsigned int madv:2;
+
+ /**
+ * Current tiling mode for the object.
+ */
+ unsigned int tiling_mode:2;
+ unsigned int tiling_changed:1;
+
+ /** How many users have pinned this object in GTT space. The following
+ * users can each hold at most one reference: pwrite/pread, pin_ioctl
+ * (via user_pin_count), execbuffer (objects are not allowed multiple
+ * times for the same batchbuffer), and the framebuffer code. When
+ * switching/pageflipping, the framebuffer code has at most two buffers
+ * pinned per crtc.
+ *
+ * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+ * bits with absolutely no headroom. So use 4 bits. */
+ unsigned int pin_count:4;
+#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
+
+ /**
+ * Is the object at the current location in the gtt mappable and
+ * fenceable? Used to avoid costly recalculations.
+ */
+ unsigned int map_and_fenceable:1;
+
+ /**
+ * Whether the current gtt mapping needs to be mappable (and isn't just
+ * mappable by accident). Track pin and fault separate for a more
+ * accurate mappable working set.
+ */
+ unsigned int fault_mappable:1;
+ unsigned int pin_mappable:1;
+
+ /*
+ * Is the GPU currently using a fence to access this buffer,
+ */
+ unsigned int pending_fenced_gpu_access:1;
+ unsigned int fenced_gpu_access:1;
+
+ unsigned int cache_level:2;
+
+ unsigned int has_aliasing_ppgtt_mapping:1;
+
+ vm_page_t *pages;
+
+ /**
+ * DMAR support
+ */
+ struct sglist *sg_list;
+
+ /**
+ * Used for performing relocations during execbuffer insertion.
+ */
+ LIST_ENTRY(drm_i915_gem_object) exec_node;
+ unsigned long exec_handle;
+ struct drm_i915_gem_exec_object2 *exec_entry;
+
+ /**
+ * Current offset of the object in GTT space.
+ *
+ * This is the same as gtt_space->start
+ */
+ uint32_t gtt_offset;
+
+ /** Breadcrumb of last rendering to the buffer. */
+ uint32_t last_rendering_seqno;
+ struct intel_ring_buffer *ring;
+
+ /** Breadcrumb of last fenced GPU access to the buffer. */
+ uint32_t last_fenced_seqno;
+ struct intel_ring_buffer *last_fenced_ring;
+
+ /** Current tiling stride for the object, if it's tiled. */
+ uint32_t stride;
+
+ /** Record of address bit 17 of each page at last unbind. */
+ unsigned long *bit_17;
+
+ /**
+ * If present, while GEM_DOMAIN_CPU is in the read domain this array
+ * flags which individual pages are valid.
+ */
+ uint8_t *page_cpu_valid;
+
+ /** User space pin count and filp owning the pin */
+ uint32_t user_pin_count;
+ struct drm_file *pin_filp;
+
+ /** for phy allocated objects */
+ struct drm_i915_gem_phys_object *phys_obj;
+
+ /**
+ * Number of crtcs where this object is currently the fb, but
+ * will be page flipped away on the next vblank. When it
+ * reaches 0, dev_priv->pending_flip_queue will be woken up.
+ */
+ int pending_flip;
+};
+
+#define to_intel_bo(x) member2struct(drm_i915_gem_object, base, (x))
+
+/**
+ * Request queue structure.
+ *
+ * The request queue allows us to note sequence numbers that have been emitted
+ * and may be associated with active buffers to be retired.
+ *
+ * By keeping this list, we can avoid having to do questionable
+ * sequence-number comparisons on buffer last_rendering_seqnos, and associate
+ * an emission time with seqnos for tracking how far ahead of the GPU we are.
+ */
+struct drm_i915_gem_request {
+ /** On Which ring this request was generated */
+ struct intel_ring_buffer *ring;
+
+ /** GEM sequence number associated with this request. */
+ uint32_t seqno;
+
+ /** Postion in the ringbuffer of the end of the request */
+ u32 tail;
+
+ /** Time at which this request was emitted, in jiffies. */
+ unsigned long emitted_jiffies;
+
+ /** global list entry for this request */
+ struct list_head list;
+
+ struct drm_i915_file_private *file_priv;
+ /** file_priv list entry for this request */
+ struct list_head client_list;
+};
+
+struct drm_i915_file_private {
+ struct {
+ struct list_head request_list;
+ struct mtx lck;
+ } mm;
+};
+
+struct drm_i915_error_state {
+ u32 eir;
+ u32 pgtbl_er;
+ u32 pipestat[I915_MAX_PIPES];
+ u32 tail[I915_NUM_RINGS];
+ u32 head[I915_NUM_RINGS];
+ u32 ipeir[I915_NUM_RINGS];
+ u32 ipehr[I915_NUM_RINGS];
+ u32 instdone[I915_NUM_RINGS];
+ u32 acthd[I915_NUM_RINGS];
+ u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+ /* our own tracking of ring head and tail */
+ u32 cpu_ring_head[I915_NUM_RINGS];
+ u32 cpu_ring_tail[I915_NUM_RINGS];
+ u32 error; /* gen6+ */
+ u32 instpm[I915_NUM_RINGS];
+ u32 instps[I915_NUM_RINGS];
+ u32 instdone1;
+ u32 seqno[I915_NUM_RINGS];
+ u64 bbaddr;
+ u32 fault_reg[I915_NUM_RINGS];
+ u32 done_reg;
+ u32 faddr[I915_NUM_RINGS];
+ u64 fence[I915_MAX_NUM_FENCES];
+ struct timeval time;
+ struct drm_i915_error_ring {
+ struct drm_i915_error_object {
+ int page_count;
+ u32 gtt_offset;
+ u32 *pages[0];
+ } *ringbuffer, *batchbuffer;
+ struct drm_i915_error_request {
+ long jiffies;
+ u32 seqno;
+ u32 tail;
+ } *requests;
+ int num_requests;
+ } ring[I915_NUM_RINGS];
+ struct drm_i915_error_buffer {
+ u32 size;
+ u32 name;
+ u32 seqno;
+ u32 gtt_offset;
+ u32 read_domains;
+ u32 write_domain;
+ s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
+ s32 pinned:2;
+ u32 tiling:2;
+ u32 dirty:1;
+ u32 purgeable:1;
+ s32 ring:4;
+ u32 cache_level:2;
+ } *active_bo, *pinned_bo;
+ u32 active_bo_count, pinned_bo_count;
+ struct intel_overlay_error_state *overlay;
+ struct intel_display_error_state *display;
+};
+
+/**
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage. This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+#define INTEL_RC6_ENABLE (1<<0)
+#define INTEL_RC6p_ENABLE (1<<1)
+#define INTEL_RC6pp_ENABLE (1<<2)
+
+extern int intel_iommu_enabled;
+extern struct drm_ioctl_desc i915_ioctls[];
+extern struct drm_driver_info i915_driver_info;
+extern struct cdev_pager_ops i915_gem_pager_ops;
+extern unsigned int i915_fbpercrtc;
+extern int i915_panel_ignore_lid;
+extern unsigned int i915_powersave;
+extern int i915_semaphores;
+extern unsigned int i915_lvds_downclock;
+extern int i915_panel_use_ssc;
+extern int i915_vbt_sdvo_panel_type;
+extern int i915_enable_rc6;
+extern int i915_enable_fbc;
+extern int i915_enable_ppgtt;
+extern int i915_enable_hangcheck;
+
+const struct intel_device_info *i915_get_device_id(int device);
+
+int i915_reset(struct drm_device *dev, u8 flags);
+
+/* i915_debug.c */
+int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
+ struct sysctl_oid *top);
+void i915_sysctl_cleanup(struct drm_device *dev);
+
+ /* i915_dma.c */
+extern void i915_kernel_lost_context(struct drm_device * dev);
+extern int i915_driver_load(struct drm_device *, unsigned long flags);
+extern int i915_driver_unload(struct drm_device *);
+extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
+extern void i915_driver_lastclose(struct drm_device * dev);
+extern void i915_driver_preclose(struct drm_device *dev,
+ struct drm_file *file_priv);
+extern void i915_driver_postclose(struct drm_device *dev,
+ struct drm_file *file_priv);
+extern int i915_driver_device_is_agp(struct drm_device * dev);
+extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+extern int i915_emit_box(struct drm_device *dev,
+ struct drm_clip_rect __user *boxes,
+ int i, int DR1, int DR4);
+int i915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box,
+ int DR1, int DR4);
+
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
+unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
+void i915_update_gfx_val(struct drm_i915_private *dev_priv);
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
+unsigned long i915_read_mch_val(void);
+bool i915_gpu_raise(void);
+bool i915_gpu_lower(void);
+bool i915_gpu_busy(void);
+bool i915_gpu_turbo_disable(void);
+
+/* i915_irq.c */
+extern int i915_irq_emit(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int i915_irq_wait(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+extern void intel_irq_init(struct drm_device *dev);
+
+extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int i915_vblank_swap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+void intel_enable_asle(struct drm_device *dev);
+void i915_hangcheck_elapsed(void *context);
+void i915_handle_error(struct drm_device *dev, bool wedged);
+
+void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+
+void i915_destroy_error_state(struct drm_device *dev);
+
+/* i915_gem.c */
+int i915_gem_create(struct drm_file *file, struct drm_device *dev, uint64_t size,
+ uint32_t *handle_p);
+int i915_gem_init_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_set_tiling(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_get_tiling(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+void i915_gem_load(struct drm_device *dev);
+void i915_gem_unload(struct drm_device *dev);
+int i915_gem_init_object(struct drm_gem_object *obj);
+void i915_gem_free_object(struct drm_gem_object *obj);
+int i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment,
+ bool map_and_fenceable);
+void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+void i915_gem_lastclose(struct drm_device *dev);
+uint32_t i915_get_gem_seqno(struct drm_device *dev);
+
+static inline void
+i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ dev_priv->fence_regs[obj->fence_reg].pin_count++;
+ }
+}
+
+static inline void
+i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ dev_priv->fence_regs[obj->fence_reg].pin_count--;
+ }
+}
+
+void i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+ size_t size);
+int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+ unsigned long mappable_end, unsigned long end);
+uint32_t i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
+ uint32_t size, int tiling_mode);
+int i915_mutex_lock_interruptible(struct drm_device *dev);
+int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
+ bool write);
+int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ u32 alignment, struct intel_ring_buffer *pipelined);
+int i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
+int i915_gem_flush_ring(struct intel_ring_buffer *ring,
+ uint32_t invalidate_domains, uint32_t flush_domains);
+void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
+int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
+int i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+int i915_gem_idle(struct drm_device *dev);
+int i915_gem_init_hw(struct drm_device *dev);
+void i915_gem_init_swizzling(struct drm_device *dev);
+void i915_gem_init_ppgtt(struct drm_device *dev);
+void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+int i915_gpu_idle(struct drm_device *dev, bool do_retire);
+void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring, uint32_t seqno);
+int i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file,
+ struct drm_i915_gem_request *request);
+int i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined);
+void i915_gem_reset(struct drm_device *dev);
+int i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno,
+ bool do_retire);
+int i915_gem_mmap(struct drm_device *dev, uint64_t offset, int prot);
+int i915_gem_fault(struct drm_device *dev, uint64_t offset, int prot,
+ uint64_t *phys);
+void i915_gem_release(struct drm_device *dev, struct drm_file *file);
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
+
+void i915_gem_free_all_phys_object(struct drm_device *dev);
+void i915_gem_detach_phys_object(struct drm_device *dev,
+ struct drm_i915_gem_object *obj);
+int i915_gem_attach_phys_object(struct drm_device *dev,
+ struct drm_i915_gem_object *obj, int id, int align);
+
+int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle);
+
+/* i915_gem_tiling.c */
+void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
+
+/* i915_gem_evict.c */
+int i915_gem_evict_something(struct drm_device *dev, int min_size,
+ unsigned alignment, bool mappable);
+int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
+int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only);
+
+/* i915_suspend.c */
+extern int i915_save_state(struct drm_device *dev);
+extern int i915_restore_state(struct drm_device *dev);
+
+/* intel_iic.c */
+extern int intel_setup_gmbus(struct drm_device *dev);
+extern void intel_teardown_gmbus(struct drm_device *dev);
+extern void intel_gmbus_set_speed(device_t idev, int speed);
+extern void intel_gmbus_force_bit(device_t idev, bool force_bit);
+extern void intel_iic_reset(struct drm_device *dev);
+
+/* intel_opregion.c */
+int intel_opregion_setup(struct drm_device *dev);
+extern int intel_opregion_init(struct drm_device *dev);
+extern void intel_opregion_fini(struct drm_device *dev);
+extern void opregion_asle_intr(struct drm_device *dev);
+extern void opregion_enable_asle(struct drm_device *dev);
+
+/* i915_gem_gtt.c */
+int i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
+void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
+void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_object *obj, enum i915_cache_level cache_level);
+void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_object *obj);
+
+void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
+
+/* modesetting */
+extern void intel_modeset_init(struct drm_device *dev);
+extern void intel_modeset_gem_init(struct drm_device *dev);
+extern void intel_modeset_cleanup(struct drm_device *dev);
+extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+extern bool intel_fbc_enabled(struct drm_device *dev);
+extern void intel_disable_fbc(struct drm_device *dev);
+extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern void ironlake_init_pch_refclk(struct drm_device *dev);
+extern void ironlake_enable_rc6(struct drm_device *dev);
+extern void gen6_set_rps(struct drm_device *dev, u8 val);
+extern void intel_detect_pch(struct drm_device *dev);
+extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
+
+extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
+
+extern struct intel_overlay_error_state *intel_overlay_capture_error_state(
+ struct drm_device *dev);
+extern void intel_overlay_print_error_state(struct sbuf *m,
+ struct intel_overlay_error_state *error);
+extern struct intel_display_error_state *intel_display_capture_error_state(
+ struct drm_device *dev);
+extern void intel_display_print_error_state(struct sbuf *m,
+ struct drm_device *dev, struct intel_display_error_state *error);
+
+static inline void
+trace_i915_reg_rw(boolean_t rw, int reg, uint64_t val, int sz)
+{
+
+ CTR4(KTR_DRM_REG, "[%x/%d] %c %x", reg, sz, rw ? "w" : "r", val);
+}
+
+/* On SNB platform, before reading ring registers forcewake bit
+ * must be set to prevent GT core from power down and stale values being
+ * returned.
+ */
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+
+/* We give fast paths for the really cool registers */
+#define NEEDS_FORCE_WAKE(dev_priv, reg) \
+ (((dev_priv)->info->gen >= 6) && \
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE))
+
+#define __i915_read(x, y) \
+ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
+
+__i915_read(8, 8)
+__i915_read(16, 16)
+__i915_read(32, 32)
+__i915_read(64, 64)
+#undef __i915_read
+
+#define __i915_write(x, y) \
+ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
+
+__i915_write(8, 8)
+__i915_write(16, 16)
+__i915_write(32, 32)
+__i915_write(64, 64)
+#undef __i915_write
+
+#define I915_READ8(reg) i915_read8(dev_priv, (reg))
+#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
+
+#define I915_READ16(reg) i915_read16(dev_priv, (reg))
+#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
+#define I915_READ16_NOTRACE(reg) DRM_READ16(dev_priv->mmio_map, (reg))
+#define I915_WRITE16_NOTRACE(reg, val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
+
+#define I915_READ(reg) i915_read32(dev_priv, (reg))
+#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
+#define I915_READ_NOTRACE(reg) DRM_READ32(dev_priv->mmio_map, (reg))
+#define I915_WRITE_NOTRACE(reg, val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
+
+#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
+#define I915_READ64(reg) i915_read64(dev_priv, (reg))
+
+#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
+#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
+
+#define I915_VERBOSE 0
+
+#define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS])
+
+#define BEGIN_LP_RING(n) \
+ intel_ring_begin(LP_RING(dev_priv), (n))
+
+#define OUT_RING(x) \
+ intel_ring_emit(LP_RING(dev_priv), x)
+
+#define ADVANCE_LP_RING() \
+ intel_ring_advance(LP_RING(dev_priv))
+
+#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
+ if (LP_RING(dev->dev_private)->obj == NULL) \
+ LOCK_TEST_WITH_RETURN(dev, file); \
+} while (0)
+
+/**
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 0x04: ring 0 head pointer
+ * 0x05: ring 1 head pointer (915-class)
+ * 0x06: ring 2 head pointer (915-class)
+ * 0x10-0x1b: Context status DWords (GM45)
+ * 0x1f: Last written status offset. (GM45)
+ *
+ * The area from dword 0x20 to 0x3ff is available for driver usage.
+ */
+#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
+#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
+#define I915_GEM_HWS_INDEX 0x20
+#define I915_BREADCRUMB_INDEX 0x21
+
+#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
+
+#define IS_I830(dev) ((dev)->pci_device == 0x3577)
+#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
+#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
+#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
+#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
+#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
+#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
+#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
+#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
+#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
+#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
+#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
+#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
+#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+
+/* XXXKIB LEGACY */
+#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
+ (dev)->pci_device == 0x2982 || \
+ (dev)->pci_device == 0x2992 || \
+ (dev)->pci_device == 0x29A2 || \
+ (dev)->pci_device == 0x2A02 || \
+ (dev)->pci_device == 0x2A12 || \
+ (dev)->pci_device == 0x2A42 || \
+ (dev)->pci_device == 0x2E02 || \
+ (dev)->pci_device == 0x2E12 || \
+ (dev)->pci_device == 0x2E22 || \
+ (dev)->pci_device == 0x2E32)
+
+#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
+
+#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
+#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
+#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
+
+#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
+ IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
+/* XXXKIB LEGACY END */
+
+#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
+#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
+#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
+#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
+#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
+#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
+
+#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
+#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
+#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
+#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+
+#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6)
+
+#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+
+/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+ * rows, which changed the alignment requirements and fence programming.
+ */
+#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
+ IS_I915GM(dev)))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
+#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
+#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
+/* dsparb controlled by hw only */
+#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+
+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
+#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+
+#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev))
+#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
+
+#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+
+#define PRIMARY_RINGBUFFER_SIZE (128*1024)
+
+static inline bool
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+
+ return ((int32_t)(seq1 - seq2) >= 0);
+}
+
+u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
+
+#endif
diff --git a/sys/dev/drm2/i915/i915_gem.c b/sys/dev/drm2/i915/i915_gem.c
new file mode 100644
index 0000000..0573721
--- /dev/null
+++ b/sys/dev/drm2/i915/i915_gem.c
@@ -0,0 +1,3760 @@
+/*-
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ * Copyright (c) 2011 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Konstantin Belousov under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/intel_drv.h>
+#include <dev/drm2/i915/intel_ringbuffer.h>
+#include <sys/resourcevar.h>
+#include <sys/sched.h>
+#include <sys/sf_buf.h>
+
+static void i915_gem_object_flush_cpu_write_domain(
+ struct drm_i915_gem_object *obj);
+static uint32_t i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size,
+ int tiling_mode);
+static uint32_t i915_gem_get_gtt_alignment(struct drm_device *dev,
+ uint32_t size, int tiling_mode);
+static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+ unsigned alignment, bool map_and_fenceable);
+static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
+ int flags);
+static void i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj);
+static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
+ bool write);
+static void i915_gem_object_set_to_full_cpu_read_domain(
+ struct drm_i915_gem_object *obj);
+static int i915_gem_object_set_cpu_read_domain_range(
+ struct drm_i915_gem_object *obj, uint64_t offset, uint64_t size);
+static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj);
+static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+static int i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj);
+static bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj);
+static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj);
+static vm_page_t i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex);
+static void i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
+ uint32_t flush_domains);
+static void i915_gem_clear_fence_reg(struct drm_device *dev,
+ struct drm_i915_fence_reg *reg);
+static void i915_gem_reset_fences(struct drm_device *dev);
+static void i915_gem_retire_task_handler(void *arg, int pending);
+static int i915_gem_phys_pwrite(struct drm_device *dev,
+ struct drm_i915_gem_object *obj, uint64_t data_ptr, uint64_t offset,
+ uint64_t size, struct drm_file *file_priv);
+static void i915_gem_lowmem(void *arg);
+
+MALLOC_DEFINE(DRM_I915_GEM, "i915gem", "Allocations from i915 gem");
+long i915_gem_wired_pages_cnt;
+
+static void
+i915_gem_info_add_obj(struct drm_i915_private *dev_priv, size_t size)
+{
+
+ dev_priv->mm.object_count++;
+ dev_priv->mm.object_memory += size;
+}
+
+static void
+i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, size_t size)
+{
+
+ dev_priv->mm.object_count--;
+ dev_priv->mm.object_memory -= size;
+}
+
+static int
+i915_gem_wait_for_error(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv;
+ int ret;
+
+ dev_priv = dev->dev_private;
+ if (!atomic_load_acq_int(&dev_priv->mm.wedged))
+ return (0);
+
+ mtx_lock(&dev_priv->error_completion_lock);
+ while (dev_priv->error_completion == 0) {
+ ret = -msleep(&dev_priv->error_completion,
+ &dev_priv->error_completion_lock, PCATCH, "915wco", 0);
+ if (ret != 0) {
+ mtx_unlock(&dev_priv->error_completion_lock);
+ return (ret);
+ }
+ }
+ mtx_unlock(&dev_priv->error_completion_lock);
+
+ if (atomic_read(&dev_priv->mm.wedged)) {
+ mtx_lock(&dev_priv->error_completion_lock);
+ dev_priv->error_completion++;
+ mtx_unlock(&dev_priv->error_completion_lock);
+ }
+ return (0);
+}
+
+int
+i915_mutex_lock_interruptible(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv;
+ int ret;
+
+ dev_priv = dev->dev_private;
+ ret = i915_gem_wait_for_error(dev);
+ if (ret != 0)
+ return (ret);
+
+ /*
+ * interruptible shall it be. might indeed be if dev_lock is
+ * changed to sx
+ */
+ ret = sx_xlock_sig(&dev->dev_struct_lock);
+ if (ret != 0)
+ return (-ret);
+
+ return (0);
+}
+
+
+static void
+i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev;
+ drm_i915_private_t *dev_priv;
+ int ret;
+
+ dev = obj->base.dev;
+ dev_priv = dev->dev_private;
+
+ ret = i915_gem_object_unbind(obj);
+ if (ret == -ERESTART) {
+ list_move(&obj->mm_list, &dev_priv->mm.deferred_free_list);
+ return;
+ }
+
+ CTR1(KTR_DRM, "object_destroy_tail %p", obj);
+ drm_gem_free_mmap_offset(&obj->base);
+ drm_gem_object_release(&obj->base);
+ i915_gem_info_remove_obj(dev_priv, obj->base.size);
+
+ free(obj->page_cpu_valid, DRM_I915_GEM);
+ free(obj->bit_17, DRM_I915_GEM);
+ free(obj, DRM_I915_GEM);
+}
+
+void
+i915_gem_free_object(struct drm_gem_object *gem_obj)
+{
+ struct drm_i915_gem_object *obj;
+ struct drm_device *dev;
+
+ obj = to_intel_bo(gem_obj);
+ dev = obj->base.dev;
+
+ while (obj->pin_count > 0)
+ i915_gem_object_unpin(obj);
+
+ if (obj->phys_obj != NULL)
+ i915_gem_detach_phys_object(dev, obj);
+
+ i915_gem_free_object_tail(obj);
+}
+
+static void
+init_ring_lists(struct intel_ring_buffer *ring)
+{
+
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ INIT_LIST_HEAD(&ring->gpu_write_list);
+}
+
+void
+i915_gem_load(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv;
+ int i;
+
+ dev_priv = dev->dev_private;
+
+ INIT_LIST_HEAD(&dev_priv->mm.active_list);
+ INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+ INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
+ INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+ INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
+ INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ init_ring_lists(&dev_priv->rings[i]);
+ for (i = 0; i < I915_MAX_NUM_FENCES; i++)
+ INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
+ TIMEOUT_TASK_INIT(dev_priv->tq, &dev_priv->mm.retire_task, 0,
+ i915_gem_retire_task_handler, dev_priv);
+ dev_priv->error_completion = 0;
+
+ /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
+ if (IS_GEN3(dev)) {
+ u32 tmp = I915_READ(MI_ARB_STATE);
+ if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
+ /*
+ * arb state is a masked write, so set bit +
+ * bit in mask.
+ */
+ tmp = MI_ARB_C3_LP_WRITE_ENABLE |
+ (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
+ I915_WRITE(MI_ARB_STATE, tmp);
+ }
+ }
+
+ dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
+
+ /* Old X drivers will take 0-2 for front, back, depth buffers */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ dev_priv->fence_reg_start = 3;
+
+ if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) ||
+ IS_G33(dev))
+ dev_priv->num_fence_regs = 16;
+ else
+ dev_priv->num_fence_regs = 8;
+
+ /* Initialize fence registers to zero */
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
+ i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
+ }
+ i915_gem_detect_bit_6_swizzle(dev);
+ dev_priv->mm.interruptible = true;
+
+ dev_priv->mm.i915_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
+ i915_gem_lowmem, dev, EVENTHANDLER_PRI_ANY);
+}
+
+int
+i915_gem_do_init(struct drm_device *dev, unsigned long start,
+ unsigned long mappable_end, unsigned long end)
+{
+ drm_i915_private_t *dev_priv;
+ unsigned long mappable;
+ int error;
+
+ dev_priv = dev->dev_private;
+ mappable = min(end, mappable_end) - start;
+
+ drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
+
+ dev_priv->mm.gtt_start = start;
+ dev_priv->mm.gtt_mappable_end = mappable_end;
+ dev_priv->mm.gtt_end = end;
+ dev_priv->mm.gtt_total = end - start;
+ dev_priv->mm.mappable_gtt_total = mappable;
+
+ /* Take over this portion of the GTT */
+ intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+ device_printf(dev->device,
+ "taking over the fictitious range 0x%lx-0x%lx\n",
+ dev->agp->base + start, dev->agp->base + start + mappable);
+ error = -vm_phys_fictitious_reg_range(dev->agp->base + start,
+ dev->agp->base + start + mappable, VM_MEMATTR_WRITE_COMBINING);
+ return (error);
+}
+
+int
+i915_gem_init_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_init *args;
+ drm_i915_private_t *dev_priv;
+
+ dev_priv = dev->dev_private;
+ args = data;
+
+ if (args->gtt_start >= args->gtt_end ||
+ (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
+ return (-EINVAL);
+
+ if (mtx_initialized(&dev_priv->mm.gtt_space.unused_lock))
+ return (-EBUSY);
+ /*
+ * XXXKIB. The second-time initialization should be guarded
+ * against.
+ */
+ return (i915_gem_do_init(dev, args->gtt_start, args->gtt_end,
+ args->gtt_end));
+}
+
+int
+i915_gem_idle(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv;
+ int ret;
+
+ dev_priv = dev->dev_private;
+ if (dev_priv->mm.suspended)
+ return (0);
+
+ ret = i915_gpu_idle(dev, true);
+ if (ret != 0)
+ return (ret);
+
+ /* Under UMS, be paranoid and evict. */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = i915_gem_evict_inactive(dev, false);
+ if (ret != 0)
+ return ret;
+ }
+
+ i915_gem_reset_fences(dev);
+
+ /* Hack! Don't let anybody do execbuf while we don't control the chip.
+ * We need to replace this with a semaphore, or something.
+ * And not confound mm.suspended!
+ */
+ dev_priv->mm.suspended = 1;
+ callout_stop(&dev_priv->hangcheck_timer);
+
+ i915_kernel_lost_context(dev);
+ i915_gem_cleanup_ringbuffer(dev);
+
+ /* Cancel the retire work handler, which should be idle now. */
+ taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->mm.retire_task, NULL);
+ return (ret);
+}
+
+void
+i915_gem_init_swizzling(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv;
+
+ dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen < 5 ||
+ dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
+ return;
+
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_TILE_SURFACE_SWIZZLING);
+
+ if (IS_GEN5(dev))
+ return;
+
+ I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
+ if (IS_GEN6(dev))
+ I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB));
+ else
+ I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB));
+}
+
+void
+i915_gem_init_ppgtt(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv;
+ struct i915_hw_ppgtt *ppgtt;
+ uint32_t pd_offset, pd_entry;
+ vm_paddr_t pt_addr;
+ struct intel_ring_buffer *ring;
+ u_int first_pd_entry_in_global_pt, i;
+
+ dev_priv = dev->dev_private;
+ ppgtt = dev_priv->mm.aliasing_ppgtt;
+ if (ppgtt == NULL)
+ return;
+
+ first_pd_entry_in_global_pt = 512 * 1024 - I915_PPGTT_PD_ENTRIES;
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ pt_addr = VM_PAGE_TO_PHYS(ppgtt->pt_pages[i]);
+ pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
+ pd_entry |= GEN6_PDE_VALID;
+ intel_gtt_write(first_pd_entry_in_global_pt + i, pd_entry);
+ }
+ intel_gtt_read_pte(first_pd_entry_in_global_pt);
+
+ pd_offset = ppgtt->pd_offset;
+ pd_offset /= 64; /* in cachelines, */
+ pd_offset <<= 16;
+
+ if (INTEL_INFO(dev)->gen == 6) {
+ uint32_t ecochk = I915_READ(GAM_ECOCHK);
+ I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
+ ECOCHK_PPGTT_CACHE64B);
+ I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
+ /* GFX_MODE is per-ring on gen7+ */
+ }
+
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ ring = &dev_priv->rings[i];
+
+ if (INTEL_INFO(dev)->gen >= 7)
+ I915_WRITE(RING_MODE_GEN7(ring),
+ GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
+
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
+ }
+}
+
+int
+i915_gem_init_hw(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv;
+ int ret;
+
+ dev_priv = dev->dev_private;
+
+ i915_gem_init_swizzling(dev);
+
+ ret = intel_init_render_ring_buffer(dev);
+ if (ret != 0)
+ return (ret);
+
+ if (HAS_BSD(dev)) {
+ ret = intel_init_bsd_ring_buffer(dev);
+ if (ret != 0)
+ goto cleanup_render_ring;
+ }
+
+ if (HAS_BLT(dev)) {
+ ret = intel_init_blt_ring_buffer(dev);
+ if (ret != 0)
+ goto cleanup_bsd_ring;
+ }
+
+ dev_priv->next_seqno = 1;
+ i915_gem_init_ppgtt(dev);
+ return (0);
+
+cleanup_bsd_ring:
+ intel_cleanup_ring_buffer(&dev_priv->rings[VCS]);
+cleanup_render_ring:
+ intel_cleanup_ring_buffer(&dev_priv->rings[RCS]);
+ return (ret);
+}
+
+int
+i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv;
+ struct drm_i915_gem_get_aperture *args;
+ struct drm_i915_gem_object *obj;
+ size_t pinned;
+
+ dev_priv = dev->dev_private;
+ args = data;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return (-ENODEV);
+
+ pinned = 0;
+ DRM_LOCK(dev);
+ list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
+ pinned += obj->gtt_space->size;
+ DRM_UNLOCK(dev);
+
+ args->aper_size = dev_priv->mm.gtt_total;
+ args->aper_available_size = args->aper_size - pinned;
+
+ return (0);
+}
+
+int
+i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment,
+ bool map_and_fenceable)
+{
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ int ret;
+
+ dev = obj->base.dev;
+ dev_priv = dev->dev_private;
+
+ KASSERT(obj->pin_count != DRM_I915_GEM_OBJECT_MAX_PIN_COUNT,
+ ("Max pin count"));
+
+ if (obj->gtt_space != NULL) {
+ if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+ (map_and_fenceable && !obj->map_and_fenceable)) {
+ DRM_DEBUG("bo is already pinned with incorrect alignment:"
+ " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+ " obj->map_and_fenceable=%d\n",
+ obj->gtt_offset, alignment,
+ map_and_fenceable,
+ obj->map_and_fenceable);
+ ret = i915_gem_object_unbind(obj);
+ if (ret != 0)
+ return (ret);
+ }
+ }
+
+ if (obj->gtt_space == NULL) {
+ ret = i915_gem_object_bind_to_gtt(obj, alignment,
+ map_and_fenceable);
+ if (ret)
+ return (ret);
+ }
+
+ if (obj->pin_count++ == 0 && !obj->active)
+ list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
+ obj->pin_mappable |= map_and_fenceable;
+
+#if 1
+ KIB_NOTYET();
+#else
+ WARN_ON(i915_verify_lists(dev));
+#endif
+ return (0);
+}
+
+void
+i915_gem_object_unpin(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev;
+ drm_i915_private_t *dev_priv;
+
+ dev = obj->base.dev;
+ dev_priv = dev->dev_private;
+
+#if 1
+ KIB_NOTYET();
+#else
+ WARN_ON(i915_verify_lists(dev));
+#endif
+
+ KASSERT(obj->pin_count != 0, ("zero pin count"));
+ KASSERT(obj->gtt_space != NULL, ("No gtt mapping"));
+
+ if (--obj->pin_count == 0) {
+ if (!obj->active)
+ list_move_tail(&obj->mm_list,
+ &dev_priv->mm.inactive_list);
+ obj->pin_mappable = false;
+ }
+#if 1
+ KIB_NOTYET();
+#else
+ WARN_ON(i915_verify_lists(dev));
+#endif
+}
+
+int
+i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_pin *args;
+ struct drm_i915_gem_object *obj;
+ struct drm_gem_object *gobj;
+ int ret;
+
+ args = data;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret != 0)
+ return ret;
+
+ gobj = drm_gem_object_lookup(dev, file, args->handle);
+ if (gobj == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+ obj = to_intel_bo(gobj);
+
+ if (obj->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to pin a purgeable buffer\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (obj->pin_filp != NULL && obj->pin_filp != file) {
+ DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
+ args->handle);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ obj->user_pin_count++;
+ obj->pin_filp = file;
+ if (obj->user_pin_count == 1) {
+ ret = i915_gem_object_pin(obj, args->alignment, true);
+ if (ret != 0)
+ goto out;
+ }
+
+ /* XXX - flush the CPU caches for pinned objects
+ * as the X server doesn't manage domains yet
+ */
+ i915_gem_object_flush_cpu_write_domain(obj);
+ args->offset = obj->gtt_offset;
+out:
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ DRM_UNLOCK(dev);
+ return (ret);
+}
+
+int
+i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_pin *args;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ args = data;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret != 0)
+ return (ret);
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ if (obj->pin_filp != file) {
+ DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
+ args->handle);
+ ret = -EINVAL;
+ goto out;
+ }
+ obj->user_pin_count--;
+ if (obj->user_pin_count == 0) {
+ obj->pin_filp = NULL;
+ i915_gem_object_unpin(obj);
+ }
+
+out:
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ DRM_UNLOCK(dev);
+ return (ret);
+}
+
+int
+i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_busy *args;
+ struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_request *request;
+ int ret;
+
+ args = data;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret != 0)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ args->busy = obj->active;
+ if (args->busy) {
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+ ret = i915_gem_flush_ring(obj->ring,
+ 0, obj->base.write_domain);
+ } else if (obj->ring->outstanding_lazy_request ==
+ obj->last_rendering_seqno) {
+ request = malloc(sizeof(*request), DRM_I915_GEM,
+ M_WAITOK | M_ZERO);
+ ret = i915_add_request(obj->ring, NULL, request);
+ if (ret != 0)
+ free(request, DRM_I915_GEM);
+ }
+
+ i915_gem_retire_requests_ring(obj->ring);
+ args->busy = obj->active;
+ }
+
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ DRM_UNLOCK(dev);
+ return (ret);
+}
+
+static int
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv;
+ struct drm_i915_file_private *file_priv;
+ unsigned long recent_enough;
+ struct drm_i915_gem_request *request;
+ struct intel_ring_buffer *ring;
+ u32 seqno;
+ int ret;
+
+ dev_priv = dev->dev_private;
+ if (atomic_read(&dev_priv->mm.wedged))
+ return (-EIO);
+
+ file_priv = file->driver_priv;
+ recent_enough = ticks - (20 * hz / 1000);
+ ring = NULL;
+ seqno = 0;
+
+ mtx_lock(&file_priv->mm.lck);
+ list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
+ if (time_after_eq(request->emitted_jiffies, recent_enough))
+ break;
+ ring = request->ring;
+ seqno = request->seqno;
+ }
+ mtx_unlock(&file_priv->mm.lck);
+ if (seqno == 0)
+ return (0);
+
+ ret = 0;
+ mtx_lock(&ring->irq_lock);
+ if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
+ if (ring->irq_get(ring)) {
+ while (ret == 0 &&
+ !(i915_seqno_passed(ring->get_seqno(ring), seqno) ||
+ atomic_read(&dev_priv->mm.wedged)))
+ ret = -msleep(ring, &ring->irq_lock, PCATCH,
+ "915thr", 0);
+ ring->irq_put(ring);
+ if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+ ret = -EIO;
+ } else if (_intel_wait_for(dev,
+ i915_seqno_passed(ring->get_seqno(ring), seqno) ||
+ atomic_read(&dev_priv->mm.wedged), 3000, 0, "915rtr")) {
+ ret = -EBUSY;
+ }
+ }
+ mtx_unlock(&ring->irq_lock);
+
+ if (ret == 0)
+ taskqueue_enqueue_timeout(dev_priv->tq,
+ &dev_priv->mm.retire_task, 0);
+
+ return (ret);
+}
+
+int
+i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+
+ return (i915_gem_ring_throttle(dev, file_priv));
+}
+
+int
+i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_madvise *args;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ args = data;
+ switch (args->madv) {
+ case I915_MADV_DONTNEED:
+ case I915_MADV_WILLNEED:
+ break;
+ default:
+ return (-EINVAL);
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret != 0)
+ return (ret);
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ if (obj->pin_count != 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (obj->madv != I915_MADV_PURGED_INTERNAL)
+ obj->madv = args->madv;
+ if (i915_gem_object_is_purgeable(obj) && obj->gtt_space == NULL)
+ i915_gem_object_truncate(obj);
+ args->retained = obj->madv != I915_MADV_PURGED_INTERNAL;
+
+out:
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ DRM_UNLOCK(dev);
+ return (ret);
+}
+
+void
+i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv;
+ int i;
+
+ dev_priv = dev->dev_private;
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ intel_cleanup_ring_buffer(&dev_priv->rings[i]);
+}
+
+int
+i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv;
+ int ret, i;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return (0);
+ dev_priv = dev->dev_private;
+ if (atomic_load_acq_int(&dev_priv->mm.wedged) != 0) {
+ DRM_ERROR("Reenabling wedged hardware, good luck\n");
+ atomic_store_rel_int(&dev_priv->mm.wedged, 0);
+ }
+
+ dev_priv->mm.suspended = 0;
+
+ ret = i915_gem_init_hw(dev);
+ if (ret != 0) {
+ return (ret);
+ }
+
+ KASSERT(list_empty(&dev_priv->mm.active_list), ("active list"));
+ KASSERT(list_empty(&dev_priv->mm.flushing_list), ("flushing list"));
+ KASSERT(list_empty(&dev_priv->mm.inactive_list), ("inactive list"));
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ KASSERT(list_empty(&dev_priv->rings[i].active_list),
+ ("ring %d active list", i));
+ KASSERT(list_empty(&dev_priv->rings[i].request_list),
+ ("ring %d request list", i));
+ }
+
+ DRM_UNLOCK(dev);
+ ret = drm_irq_install(dev);
+ DRM_LOCK(dev);
+ if (ret)
+ goto cleanup_ringbuffer;
+
+ return (0);
+
+cleanup_ringbuffer:
+ i915_gem_cleanup_ringbuffer(dev);
+ dev_priv->mm.suspended = 1;
+
+ return (ret);
+}
+
+int
+i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+
+ drm_irq_uninstall(dev);
+ return (i915_gem_idle(dev));
+}
+
+int
+i915_gem_create(struct drm_file *file, struct drm_device *dev, uint64_t size,
+ uint32_t *handle_p)
+{
+ struct drm_i915_gem_object *obj;
+ uint32_t handle;
+ int ret;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return (-EINVAL);
+
+ obj = i915_gem_alloc_object(dev, size);
+ if (obj == NULL)
+ return (-ENOMEM);
+
+ handle = 0;
+ ret = drm_gem_handle_create(file, &obj->base, &handle);
+ if (ret != 0) {
+ drm_gem_object_release(&obj->base);
+ i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
+ free(obj, DRM_I915_GEM);
+ return (-ret);
+ }
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference(&obj->base);
+ CTR2(KTR_DRM, "object_create %p %x", obj, size);
+ *handle_p = handle;
+ return (0);
+}
+
+int
+i915_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+
+ /* have to work out size/pitch and return them */
+ args->pitch = roundup2(args->width * ((args->bpp + 7) / 8), 64);
+ args->size = args->pitch * args->height;
+ return (i915_gem_create(file, dev, args->size, &args->handle));
+}
+
+int
+i915_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle)
+{
+
+ return (drm_gem_handle_delete(file, handle));
+}
+
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_create *args = data;
+
+ return (i915_gem_create(file, dev, args->size, &args->handle));
+}
+
+static int
+i915_gem_swap_io(struct drm_device *dev, struct drm_i915_gem_object *obj,
+ uint64_t data_ptr, uint64_t size, uint64_t offset, enum uio_rw rw,
+ struct drm_file *file)
+{
+ vm_object_t vm_obj;
+ vm_page_t m;
+ struct sf_buf *sf;
+ vm_offset_t mkva;
+ vm_pindex_t obj_pi;
+ int cnt, do_bit17_swizzling, length, obj_po, ret, swizzled_po;
+
+ if (obj->gtt_offset != 0 && rw == UIO_READ)
+ do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+ else
+ do_bit17_swizzling = 0;
+
+ obj->dirty = 1;
+ vm_obj = obj->base.vm_obj;
+ ret = 0;
+
+ VM_OBJECT_LOCK(vm_obj);
+ vm_object_pip_add(vm_obj, 1);
+ while (size > 0) {
+ obj_pi = OFF_TO_IDX(offset);
+ obj_po = offset & PAGE_MASK;
+
+ m = i915_gem_wire_page(vm_obj, obj_pi);
+ VM_OBJECT_UNLOCK(vm_obj);
+
+ sched_pin();
+ sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
+ mkva = sf_buf_kva(sf);
+ length = min(size, PAGE_SIZE - obj_po);
+ while (length > 0) {
+ if (do_bit17_swizzling &&
+ (VM_PAGE_TO_PHYS(m) & (1 << 17)) != 0) {
+ cnt = roundup2(obj_po + 1, 64);
+ cnt = min(cnt - obj_po, length);
+ swizzled_po = obj_po ^ 64;
+ } else {
+ cnt = length;
+ swizzled_po = obj_po;
+ }
+ if (rw == UIO_READ)
+ ret = -copyout_nofault(
+ (char *)mkva + swizzled_po,
+ (void *)(uintptr_t)data_ptr, cnt);
+ else
+ ret = -copyin_nofault(
+ (void *)(uintptr_t)data_ptr,
+ (char *)mkva + swizzled_po, cnt);
+ if (ret != 0)
+ break;
+ data_ptr += cnt;
+ size -= cnt;
+ length -= cnt;
+ offset += cnt;
+ obj_po += cnt;
+ }
+ sf_buf_free(sf);
+ sched_unpin();
+ VM_OBJECT_LOCK(vm_obj);
+ if (rw == UIO_WRITE)
+ vm_page_dirty(m);
+ vm_page_reference(m);
+ vm_page_lock(m);
+ vm_page_unwire(m, 1);
+ vm_page_unlock(m);
+ atomic_add_long(&i915_gem_wired_pages_cnt, -1);
+
+ if (ret != 0)
+ break;
+ }
+ vm_object_pip_wakeup(vm_obj);
+ VM_OBJECT_UNLOCK(vm_obj);
+
+ return (ret);
+}
+
+static int
+i915_gem_gtt_write(struct drm_device *dev, struct drm_i915_gem_object *obj,
+ uint64_t data_ptr, uint64_t size, uint64_t offset, struct drm_file *file)
+{
+ vm_offset_t mkva;
+ vm_pindex_t obj_pi;
+ int obj_po, ret;
+
+ obj_pi = OFF_TO_IDX(offset);
+ obj_po = offset & PAGE_MASK;
+
+ mkva = (vm_offset_t)pmap_mapdev_attr(dev->agp->base + obj->gtt_offset +
+ IDX_TO_OFF(obj_pi), size, PAT_WRITE_COMBINING);
+ ret = -copyin_nofault((void *)(uintptr_t)data_ptr, (char *)mkva +
+ obj_po, size);
+ pmap_unmapdev(mkva, PAGE_SIZE);
+ return (ret);
+}
+
+static int
+i915_gem_obj_io(struct drm_device *dev, uint32_t handle, uint64_t data_ptr,
+ uint64_t size, uint64_t offset, enum uio_rw rw, struct drm_file *file)
+{
+ struct drm_i915_gem_object *obj;
+ vm_page_t *ma;
+ vm_offset_t start, end;
+ int npages, ret;
+
+ if (size == 0)
+ return (0);
+ start = trunc_page(data_ptr);
+ end = round_page(data_ptr + size);
+ npages = howmany(end - start, PAGE_SIZE);
+ ma = malloc(npages * sizeof(vm_page_t), DRM_I915_GEM, M_WAITOK |
+ M_ZERO);
+ npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
+ (vm_offset_t)data_ptr, size,
+ (rw == UIO_READ ? VM_PROT_WRITE : 0 ) | VM_PROT_READ, ma, npages);
+ if (npages == -1) {
+ ret = -EFAULT;
+ goto free_ma;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret != 0)
+ goto unlocked;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+ if (offset > obj->base.size || size > obj->base.size - offset) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (rw == UIO_READ) {
+ CTR3(KTR_DRM, "object_pread %p %jx %jx", obj, offset, size);
+ ret = i915_gem_object_set_cpu_read_domain_range(obj,
+ offset, size);
+ if (ret != 0)
+ goto out;
+ ret = i915_gem_swap_io(dev, obj, data_ptr, size, offset,
+ UIO_READ, file);
+ } else {
+ if (obj->phys_obj) {
+ CTR3(KTR_DRM, "object_phys_write %p %jx %jx", obj,
+ offset, size);
+ ret = i915_gem_phys_pwrite(dev, obj, data_ptr, offset,
+ size, file);
+ } else if (obj->gtt_space &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ CTR3(KTR_DRM, "object_gtt_write %p %jx %jx", obj,
+ offset, size);
+ ret = i915_gem_object_pin(obj, 0, true);
+ if (ret != 0)
+ goto out;
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret != 0)
+ goto out_unpin;
+ ret = i915_gem_object_put_fence(obj);
+ if (ret != 0)
+ goto out_unpin;
+ ret = i915_gem_gtt_write(dev, obj, data_ptr, size,
+ offset, file);
+out_unpin:
+ i915_gem_object_unpin(obj);
+ } else {
+ CTR3(KTR_DRM, "object_pwrite %p %jx %jx", obj,
+ offset, size);
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
+ if (ret != 0)
+ goto out;
+ ret = i915_gem_swap_io(dev, obj, data_ptr, size, offset,
+ UIO_WRITE, file);
+ }
+ }
+out:
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ DRM_UNLOCK(dev);
+unlocked:
+ vm_page_unhold_pages(ma, npages);
+free_ma:
+ free(ma, DRM_I915_GEM);
+ return (ret);
+}
+
+int
+i915_gem_pread_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_i915_gem_pread *args;
+
+ args = data;
+ return (i915_gem_obj_io(dev, args->handle, args->data_ptr, args->size,
+ args->offset, UIO_READ, file));
+}
+
+int
+i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_i915_gem_pwrite *args;
+
+ args = data;
+ return (i915_gem_obj_io(dev, args->handle, args->data_ptr, args->size,
+ args->offset, UIO_WRITE, file));
+}
+
+int
+i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_set_domain *args;
+ struct drm_i915_gem_object *obj;
+ uint32_t read_domains;
+ uint32_t write_domain;
+ int ret;
+
+ if ((dev->driver->driver_features & DRIVER_GEM) == 0)
+ return (-ENODEV);
+
+ args = data;
+ read_domains = args->read_domains;
+ write_domain = args->write_domain;
+
+ if ((write_domain & I915_GEM_GPU_DOMAINS) != 0 ||
+ (read_domains & I915_GEM_GPU_DOMAINS) != 0 ||
+ (write_domain != 0 && read_domains != write_domain))
+ return (-EINVAL);
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret != 0)
+ return (ret);
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ if ((read_domains & I915_GEM_DOMAIN_GTT) != 0) {
+ ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+ if (ret == -EINVAL)
+ ret = 0;
+ } else
+ ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ DRM_UNLOCK(dev);
+ return (ret);
+}
+
+int
+i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_sw_finish *args;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ args = data;
+ ret = 0;
+ if ((dev->driver->driver_features & DRIVER_GEM) == 0)
+ return (ENODEV);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret != 0)
+ return (ret);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+ if (obj->pin_count != 0)
+ i915_gem_object_flush_cpu_write_domain(obj);
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ DRM_UNLOCK(dev);
+ return (ret);
+}
+
+int
+i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_mmap *args;
+ struct drm_gem_object *obj;
+ struct proc *p;
+ vm_map_t map;
+ vm_offset_t addr;
+ vm_size_t size;
+ int error, rv;
+
+ args = data;
+
+ if ((dev->driver->driver_features & DRIVER_GEM) == 0)
+ return (-ENODEV);
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (obj == NULL)
+ return (-ENOENT);
+ error = 0;
+ if (args->size == 0)
+ goto out;
+ p = curproc;
+ map = &p->p_vmspace->vm_map;
+ size = round_page(args->size);
+ PROC_LOCK(p);
+ if (map->size + size > lim_cur(p, RLIMIT_VMEM)) {
+ PROC_UNLOCK(p);
+ error = ENOMEM;
+ goto out;
+ }
+ PROC_UNLOCK(p);
+
+ addr = 0;
+ vm_object_reference(obj->vm_obj);
+ DRM_UNLOCK(dev);
+ rv = vm_map_find(map, obj->vm_obj, args->offset, &addr, args->size,
+ VMFS_ANY_SPACE, VM_PROT_READ | VM_PROT_WRITE,
+ VM_PROT_READ | VM_PROT_WRITE, MAP_SHARED);
+ if (rv != KERN_SUCCESS) {
+ vm_object_deallocate(obj->vm_obj);
+ error = -vm_mmap_to_errno(rv);
+ } else {
+ args->addr_ptr = (uint64_t)addr;
+ }
+ DRM_LOCK(dev);
+out:
+ drm_gem_object_unreference(obj);
+ return (error);
+}
+
+static int
+i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
+ vm_ooffset_t foff, struct ucred *cred, u_short *color)
+{
+
+ *color = 0; /* XXXKIB */
+ return (0);
+}
+
+int i915_intr_pf;
+
+static int
+i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
+ vm_page_t *mres)
+{
+ struct drm_gem_object *gem_obj;
+ struct drm_i915_gem_object *obj;
+ struct drm_device *dev;
+ drm_i915_private_t *dev_priv;
+ vm_page_t m, oldm;
+ int cause, ret;
+ bool write;
+
+ gem_obj = vm_obj->handle;
+ obj = to_intel_bo(gem_obj);
+ dev = obj->base.dev;
+ dev_priv = dev->dev_private;
+#if 0
+ write = (prot & VM_PROT_WRITE) != 0;
+#else
+ write = true;
+#endif
+ vm_object_pip_add(vm_obj, 1);
+
+ /*
+ * Remove the placeholder page inserted by vm_fault() from the
+ * object before dropping the object lock. If
+ * i915_gem_release_mmap() is active in parallel on this gem
+ * object, then it owns the drm device sx and might find the
+ * placeholder already. Then, since the page is busy,
+ * i915_gem_release_mmap() sleeps waiting for the busy state
+ * of the page cleared. We will be not able to acquire drm
+ * device lock until i915_gem_release_mmap() is able to make a
+ * progress.
+ */
+ if (*mres != NULL) {
+ oldm = *mres;
+ vm_page_lock(oldm);
+ vm_page_remove(oldm);
+ vm_page_unlock(oldm);
+ *mres = NULL;
+ } else
+ oldm = NULL;
+retry:
+ VM_OBJECT_UNLOCK(vm_obj);
+unlocked_vmobj:
+ cause = ret = 0;
+ m = NULL;
+
+
+ if (i915_intr_pf) {
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret != 0) {
+ cause = 10;
+ goto out;
+ }
+ } else
+ DRM_LOCK(dev);
+
+ /* Now bind it into the GTT if needed */
+ if (!obj->map_and_fenceable) {
+ ret = i915_gem_object_unbind(obj);
+ if (ret != 0) {
+ cause = 20;
+ goto unlock;
+ }
+ }
+ if (!obj->gtt_space) {
+ ret = i915_gem_object_bind_to_gtt(obj, 0, true);
+ if (ret != 0) {
+ cause = 30;
+ goto unlock;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret != 0) {
+ cause = 40;
+ goto unlock;
+ }
+ }
+
+ if (obj->tiling_mode == I915_TILING_NONE)
+ ret = i915_gem_object_put_fence(obj);
+ else
+ ret = i915_gem_object_get_fence(obj, NULL);
+ if (ret != 0) {
+ cause = 50;
+ goto unlock;
+ }
+
+ if (i915_gem_object_is_inactive(obj))
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+ obj->fault_mappable = true;
+ VM_OBJECT_LOCK(vm_obj);
+ m = vm_phys_fictitious_to_vm_page(dev->agp->base + obj->gtt_offset +
+ offset);
+ if (m == NULL) {
+ cause = 60;
+ ret = -EFAULT;
+ goto unlock;
+ }
+ KASSERT((m->flags & PG_FICTITIOUS) != 0,
+ ("not fictitious %p", m));
+ KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m));
+
+ if ((m->flags & VPO_BUSY) != 0) {
+ DRM_UNLOCK(dev);
+ vm_page_sleep_if_busy(m, false, "915pbs");
+ goto retry;
+ }
+ m->valid = VM_PAGE_BITS_ALL;
+ *mres = m;
+ vm_page_lock(m);
+ vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
+ vm_page_unlock(m);
+ vm_page_busy(m);
+
+ CTR4(KTR_DRM, "fault %p %jx %x phys %x", gem_obj, offset, prot,
+ m->phys_addr);
+ DRM_UNLOCK(dev);
+ if (oldm != NULL) {
+ vm_page_lock(oldm);
+ vm_page_free(oldm);
+ vm_page_unlock(oldm);
+ }
+ vm_object_pip_wakeup(vm_obj);
+ return (VM_PAGER_OK);
+
+unlock:
+ DRM_UNLOCK(dev);
+out:
+ KASSERT(ret != 0, ("i915_gem_pager_fault: wrong return"));
+ CTR5(KTR_DRM, "fault_fail %p %jx %x err %d %d", gem_obj, offset, prot,
+ -ret, cause);
+ if (ret == -EAGAIN || ret == -EIO || ret == -EINTR) {
+ kern_yield(PRI_USER);
+ goto unlocked_vmobj;
+ }
+ VM_OBJECT_LOCK(vm_obj);
+ vm_object_pip_wakeup(vm_obj);
+ return (VM_PAGER_ERROR);
+}
+
+static void
+i915_gem_pager_dtor(void *handle)
+{
+ struct drm_gem_object *obj;
+ struct drm_device *dev;
+
+ obj = handle;
+ dev = obj->dev;
+
+ DRM_LOCK(dev);
+ drm_gem_free_mmap_offset(obj);
+ i915_gem_release_mmap(to_intel_bo(obj));
+ drm_gem_object_unreference(obj);
+ DRM_UNLOCK(dev);
+}
+
+struct cdev_pager_ops i915_gem_pager_ops = {
+ .cdev_pg_fault = i915_gem_pager_fault,
+ .cdev_pg_ctor = i915_gem_pager_ctor,
+ .cdev_pg_dtor = i915_gem_pager_dtor
+};
+
+int
+i915_gem_mmap_gtt(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ struct drm_i915_private *dev_priv;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return (-ENODEV);
+
+ dev_priv = dev->dev_private;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret != 0)
+ return (ret);
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ if (obj->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to mmap a purgeable buffer\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = drm_gem_create_mmap_offset(&obj->base);
+ if (ret != 0)
+ goto out;
+
+ *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) |
+ DRM_GEM_MAPPING_KEY;
+out:
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ DRM_UNLOCK(dev);
+ return (ret);
+}
+
+int
+i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv;
+ struct drm_i915_gem_mmap_gtt *args;
+
+ dev_priv = dev->dev_private;
+ args = data;
+
+ return (i915_gem_mmap_gtt(file, dev, args->handle, &args->offset));
+}
+
+struct drm_i915_gem_object *
+i915_gem_alloc_object(struct drm_device *dev, size_t size)
+{
+ struct drm_i915_private *dev_priv;
+ struct drm_i915_gem_object *obj;
+
+ dev_priv = dev->dev_private;
+
+ obj = malloc(sizeof(*obj), DRM_I915_GEM, M_WAITOK | M_ZERO);
+
+ if (drm_gem_object_init(dev, &obj->base, size) != 0) {
+ free(obj, DRM_I915_GEM);
+ return (NULL);
+ }
+
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+
+ if (HAS_LLC(dev))
+ obj->cache_level = I915_CACHE_LLC;
+ else
+ obj->cache_level = I915_CACHE_NONE;
+ obj->base.driver_private = NULL;
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ INIT_LIST_HEAD(&obj->mm_list);
+ INIT_LIST_HEAD(&obj->gtt_list);
+ INIT_LIST_HEAD(&obj->ring_list);
+ INIT_LIST_HEAD(&obj->exec_list);
+ INIT_LIST_HEAD(&obj->gpu_write_list);
+ obj->madv = I915_MADV_WILLNEED;
+ /* Avoid an unnecessary call to unbind on the first bind. */
+ obj->map_and_fenceable = true;
+
+ i915_gem_info_add_obj(dev_priv, size);
+
+ return (obj);
+}
+
+void
+i915_gem_clflush_object(struct drm_i915_gem_object *obj)
+{
+
+ /* If we don't have a page list set up, then we're not pinned
+ * to GPU, and we can ignore the cache flush because it'll happen
+ * again at bind time.
+ */
+ if (obj->pages == NULL)
+ return;
+
+ /* If the GPU is snooping the contents of the CPU cache,
+ * we do not need to manually clear the CPU cache lines. However,
+ * the caches are only snooped when the render cache is
+ * flushed/invalidated. As we always have to emit invalidations
+ * and flushes when moving into and out of the RENDER domain, correct
+ * snooping behaviour occurs naturally as the result of our domain
+ * tracking.
+ */
+ if (obj->cache_level != I915_CACHE_NONE)
+ return;
+
+ CTR1(KTR_DRM, "object_clflush %p", obj);
+ drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
+}
+
+static void
+i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
+{
+ uint32_t old_write_domain;
+
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
+ return;
+
+ i915_gem_clflush_object(obj);
+ intel_gtt_chipset_flush();
+ old_write_domain = obj->base.write_domain;
+ obj->base.write_domain = 0;
+
+ CTR3(KTR_DRM, "object_change_domain flush_cpu_write %p %x %x", obj,
+ obj->base.read_domains, old_write_domain);
+}
+
+static int
+i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
+{
+
+ if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
+ return (0);
+ return (i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain));
+}
+
+static void
+i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
+{
+ uint32_t old_write_domain;
+
+ if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
+ return;
+
+ wmb();
+
+ old_write_domain = obj->base.write_domain;
+ obj->base.write_domain = 0;
+
+ CTR3(KTR_DRM, "object_change_domain flush gtt_write %p %x %x", obj,
+ obj->base.read_domains, old_write_domain);
+}
+
+int
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
+{
+ uint32_t old_write_domain, old_read_domains;
+ int ret;
+
+ if (obj->gtt_space == NULL)
+ return (-EINVAL);
+
+ if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+ return 0;
+
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret != 0)
+ return (ret);
+
+ if (obj->pending_gpu_write || write) {
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret != 0)
+ return (ret);
+ }
+
+ i915_gem_object_flush_cpu_write_domain(obj);
+
+ old_write_domain = obj->base.write_domain;
+ old_read_domains = obj->base.read_domains;
+
+ KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) == 0,
+ ("In GTT write domain"));
+ obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+ if (write) {
+ obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+ obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+ obj->dirty = 1;
+ }
+
+ CTR3(KTR_DRM, "object_change_domain set_to_gtt %p %x %x", obj,
+ old_read_domains, old_write_domain);
+ return (0);
+}
+
+int
+i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
+{
+ struct drm_device *dev;
+ drm_i915_private_t *dev_priv;
+ int ret;
+
+ if (obj->cache_level == cache_level)
+ return 0;
+
+ if (obj->pin_count) {
+ DRM_DEBUG("can not change the cache level of pinned objects\n");
+ return (-EBUSY);
+ }
+
+ dev = obj->base.dev;
+ dev_priv = dev->dev_private;
+ if (obj->gtt_space) {
+ ret = i915_gem_object_finish_gpu(obj);
+ if (ret != 0)
+ return (ret);
+
+ i915_gem_object_finish_gtt(obj);
+
+ /* Before SandyBridge, you could not use tiling or fence
+ * registers with snooped memory, so relinquish any fences
+ * currently pointing to our region in the aperture.
+ */
+ if (INTEL_INFO(obj->base.dev)->gen < 6) {
+ ret = i915_gem_object_put_fence(obj);
+ if (ret != 0)
+ return (ret);
+ }
+
+ i915_gem_gtt_rebind_object(obj, cache_level);
+ if (obj->has_aliasing_ppgtt_mapping)
+ i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
+ obj, cache_level);
+ }
+
+ if (cache_level == I915_CACHE_NONE) {
+ u32 old_read_domains, old_write_domain;
+
+ /* If we're coming from LLC cached, then we haven't
+ * actually been tracking whether the data is in the
+ * CPU cache or not, since we only allow one bit set
+ * in obj->write_domain and have been skipping the clflushes.
+ * Just set it to the CPU cache for now.
+ */
+ KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) == 0,
+ ("obj %p in CPU write domain", obj));
+ KASSERT((obj->base.read_domains & ~I915_GEM_DOMAIN_CPU) == 0,
+ ("obj %p in CPU read domain", obj));
+
+ old_read_domains = obj->base.read_domains;
+ old_write_domain = obj->base.write_domain;
+
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+
+ CTR3(KTR_DRM, "object_change_domain set_cache_level %p %x %x",
+ obj, old_read_domains, old_write_domain);
+ }
+
+ obj->cache_level = cache_level;
+ return (0);
+}
+
+int
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ u32 alignment, struct intel_ring_buffer *pipelined)
+{
+ u32 old_read_domains, old_write_domain;
+ int ret;
+
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret != 0)
+ return (ret);
+
+ if (pipelined != obj->ring) {
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret == -ERESTART || ret == -EINTR)
+ return (ret);
+ }
+
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+ if (ret != 0)
+ return (ret);
+
+ ret = i915_gem_object_pin(obj, alignment, true);
+ if (ret != 0)
+ return (ret);
+
+ i915_gem_object_flush_cpu_write_domain(obj);
+
+ old_write_domain = obj->base.write_domain;
+ old_read_domains = obj->base.read_domains;
+
+ KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) == 0,
+ ("obj %p in GTT write domain", obj));
+ obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+
+ CTR3(KTR_DRM, "object_change_domain pin_to_display_plan %p %x %x",
+ obj, old_read_domains, obj->base.write_domain);
+ return (0);
+}
+
+int
+i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
+ return (0);
+
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+ ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
+ if (ret != 0)
+ return (ret);
+ }
+
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret != 0)
+ return (ret);
+
+ obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+
+ return (0);
+}
+
+static int
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
+{
+ uint32_t old_write_domain, old_read_domains;
+ int ret;
+
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+ return 0;
+
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret != 0)
+ return (ret);
+
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret != 0)
+ return (ret);
+
+ i915_gem_object_flush_gtt_write_domain(obj);
+ i915_gem_object_set_to_full_cpu_read_domain(obj);
+
+ old_write_domain = obj->base.write_domain;
+ old_read_domains = obj->base.read_domains;
+
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+ i915_gem_clflush_object(obj);
+ obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
+ }
+
+ KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) == 0,
+ ("In cpu write domain"));
+
+ if (write) {
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+ CTR3(KTR_DRM, "object_change_domain set_to_cpu %p %x %x", obj,
+ old_read_domains, old_write_domain);
+ return (0);
+}
+
+static void
+i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
+{
+ int i;
+
+ if (obj->page_cpu_valid == NULL)
+ return;
+
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0) {
+ for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
+ if (obj->page_cpu_valid[i] != 0)
+ continue;
+ drm_clflush_pages(obj->pages + i, 1);
+ }
+ }
+
+ free(obj->page_cpu_valid, DRM_I915_GEM);
+ obj->page_cpu_valid = NULL;
+}
+
+static int
+i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
+ uint64_t offset, uint64_t size)
+{
+ uint32_t old_read_domains;
+ int i, ret;
+
+ if (offset == 0 && size == obj->base.size)
+ return (i915_gem_object_set_to_cpu_domain(obj, 0));
+
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret != 0)
+ return (ret);
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret != 0)
+ return (ret);
+
+ i915_gem_object_flush_gtt_write_domain(obj);
+
+ if (obj->page_cpu_valid == NULL &&
+ (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
+ return (0);
+
+ if (obj->page_cpu_valid == NULL) {
+ obj->page_cpu_valid = malloc(obj->base.size / PAGE_SIZE,
+ DRM_I915_GEM, M_WAITOK | M_ZERO);
+ } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
+ memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
+
+ for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
+ i++) {
+ if (obj->page_cpu_valid[i])
+ continue;
+ drm_clflush_pages(obj->pages + i, 1);
+ obj->page_cpu_valid[i] = 1;
+ }
+
+ KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) == 0,
+ ("In gpu write domain"));
+
+ old_read_domains = obj->base.read_domains;
+ obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
+
+ CTR3(KTR_DRM, "object_change_domain set_cpu_read %p %x %x", obj,
+ old_read_domains, obj->base.write_domain);
+ return (0);
+}
+
+static uint32_t
+i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
+{
+ uint32_t gtt_size;
+
+ if (INTEL_INFO(dev)->gen >= 4 ||
+ tiling_mode == I915_TILING_NONE)
+ return (size);
+
+ /* Previous chips need a power-of-two fence region when tiling */
+ if (INTEL_INFO(dev)->gen == 3)
+ gtt_size = 1024*1024;
+ else
+ gtt_size = 512*1024;
+
+ while (gtt_size < size)
+ gtt_size <<= 1;
+
+ return (gtt_size);
+}
+
+/**
+ * i915_gem_get_gtt_alignment - return required GTT alignment for an object
+ * @obj: object to check
+ *
+ * Return the required GTT alignment for an object, taking into account
+ * potential fence register mapping.
+ */
+static uint32_t
+i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
+ int tiling_mode)
+{
+
+ /*
+ * Minimum alignment is 4k (GTT page size), but might be greater
+ * if a fence register is needed for the object.
+ */
+ if (INTEL_INFO(dev)->gen >= 4 ||
+ tiling_mode == I915_TILING_NONE)
+ return (4096);
+
+ /*
+ * Previous chips need to be aligned to the size of the smallest
+ * fence register that can contain the object.
+ */
+ return (i915_gem_get_gtt_size(dev, size, tiling_mode));
+}
+
+uint32_t
+i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, uint32_t size,
+ int tiling_mode)
+{
+
+ if (tiling_mode == I915_TILING_NONE)
+ return (4096);
+
+ /*
+ * Minimum alignment is 4k (GTT page size) for sane hw.
+ */
+ if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev))
+ return (4096);
+
+ /*
+ * Previous hardware however needs to be aligned to a power-of-two
+ * tile height. The simplest method for determining this is to reuse
+ * the power-of-tile object size.
+ */
+ return (i915_gem_get_gtt_size(dev, size, tiling_mode));
+}
+
+static int
+i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+ unsigned alignment, bool map_and_fenceable)
+{
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ struct drm_mm_node *free_space;
+ uint32_t size, fence_size, fence_alignment, unfenced_alignment;
+ bool mappable, fenceable;
+ int ret;
+
+ dev = obj->base.dev;
+ dev_priv = dev->dev_private;
+
+ if (obj->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to bind a purgeable object\n");
+ return (-EINVAL);
+ }
+
+ fence_size = i915_gem_get_gtt_size(dev, obj->base.size,
+ obj->tiling_mode);
+ fence_alignment = i915_gem_get_gtt_alignment(dev, obj->base.size,
+ obj->tiling_mode);
+ unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(dev,
+ obj->base.size, obj->tiling_mode);
+ if (alignment == 0)
+ alignment = map_and_fenceable ? fence_alignment :
+ unfenced_alignment;
+ if (map_and_fenceable && (alignment & (fence_alignment - 1)) != 0) {
+ DRM_ERROR("Invalid object alignment requested %u\n", alignment);
+ return (-EINVAL);
+ }
+
+ size = map_and_fenceable ? fence_size : obj->base.size;
+
+ /* If the object is bigger than the entire aperture, reject it early
+ * before evicting everything in a vain attempt to find space.
+ */
+ if (obj->base.size > (map_and_fenceable ?
+ dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
+ DRM_ERROR(
+"Attempting to bind an object larger than the aperture\n");
+ return (-E2BIG);
+ }
+
+ search_free:
+ if (map_and_fenceable)
+ free_space = drm_mm_search_free_in_range(
+ &dev_priv->mm.gtt_space, size, alignment, 0,
+ dev_priv->mm.gtt_mappable_end, 0);
+ else
+ free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
+ size, alignment, 0);
+ if (free_space != NULL) {
+ if (map_and_fenceable)
+ obj->gtt_space = drm_mm_get_block_range_generic(
+ free_space, size, alignment, 0,
+ dev_priv->mm.gtt_mappable_end, 1);
+ else
+ obj->gtt_space = drm_mm_get_block_generic(free_space,
+ size, alignment, 1);
+ }
+ if (obj->gtt_space == NULL) {
+ ret = i915_gem_evict_something(dev, size, alignment,
+ map_and_fenceable);
+ if (ret != 0)
+ return (ret);
+ goto search_free;
+ }
+ ret = i915_gem_object_get_pages_gtt(obj, 0);
+ if (ret != 0) {
+ drm_mm_put_block(obj->gtt_space);
+ obj->gtt_space = NULL;
+ /*
+ * i915_gem_object_get_pages_gtt() cannot return
+ * ENOMEM, since we use vm_page_grab(VM_ALLOC_RETRY)
+ * (which does not support operation without a flag
+ * anyway).
+ */
+ return (ret);
+ }
+
+ ret = i915_gem_gtt_bind_object(obj);
+ if (ret != 0) {
+ i915_gem_object_put_pages_gtt(obj);
+ drm_mm_put_block(obj->gtt_space);
+ obj->gtt_space = NULL;
+ if (i915_gem_evict_everything(dev, false))
+ return (ret);
+ goto search_free;
+ }
+
+ list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
+ list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+ KASSERT((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0,
+ ("Object in gpu read domain"));
+ KASSERT((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0,
+ ("Object in gpu write domain"));
+
+ obj->gtt_offset = obj->gtt_space->start;
+
+ fenceable =
+ obj->gtt_space->size == fence_size &&
+ (obj->gtt_space->start & (fence_alignment - 1)) == 0;
+
+ mappable =
+ obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
+ obj->map_and_fenceable = mappable && fenceable;
+
+ CTR4(KTR_DRM, "object_bind %p %x %x %d", obj, obj->gtt_offset,
+ obj->base.size, map_and_fenceable);
+ return (0);
+}
+
+static void
+i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
+{
+ u32 old_write_domain, old_read_domains;
+
+ /* Act a barrier for all accesses through the GTT */
+ mb();
+
+ /* Force a pagefault for domain tracking on next user access */
+ i915_gem_release_mmap(obj);
+
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+ return;
+
+ old_read_domains = obj->base.read_domains;
+ old_write_domain = obj->base.write_domain;
+
+ obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
+ obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
+
+ CTR3(KTR_DRM, "object_change_domain finish gtt %p %x %x",
+ obj, old_read_domains, old_write_domain);
+}
+
+int
+i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+{
+ drm_i915_private_t *dev_priv;
+ int ret;
+
+ dev_priv = obj->base.dev->dev_private;
+ ret = 0;
+ if (obj->gtt_space == NULL)
+ return (0);
+ if (obj->pin_count != 0) {
+ DRM_ERROR("Attempting to unbind pinned buffer\n");
+ return (-EINVAL);
+ }
+
+ ret = i915_gem_object_finish_gpu(obj);
+ if (ret == -ERESTART || ret == -EINTR)
+ return (ret);
+
+ i915_gem_object_finish_gtt(obj);
+
+ if (ret == 0)
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ if (ret == -ERESTART || ret == -EINTR)
+ return (ret);
+ if (ret != 0) {
+ i915_gem_clflush_object(obj);
+ obj->base.read_domains = obj->base.write_domain =
+ I915_GEM_DOMAIN_CPU;
+ }
+
+ ret = i915_gem_object_put_fence(obj);
+ if (ret == -ERESTART)
+ return (ret);
+
+ i915_gem_gtt_unbind_object(obj);
+ if (obj->has_aliasing_ppgtt_mapping) {
+ i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
+ obj->has_aliasing_ppgtt_mapping = 0;
+ }
+ i915_gem_object_put_pages_gtt(obj);
+
+ list_del_init(&obj->gtt_list);
+ list_del_init(&obj->mm_list);
+ obj->map_and_fenceable = true;
+
+ drm_mm_put_block(obj->gtt_space);
+ obj->gtt_space = NULL;
+ obj->gtt_offset = 0;
+
+ if (i915_gem_object_is_purgeable(obj))
+ i915_gem_object_truncate(obj);
+ CTR1(KTR_DRM, "object_unbind %p", obj);
+
+ return (ret);
+}
+
+static int
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
+ int flags)
+{
+ struct drm_device *dev;
+ vm_object_t vm_obj;
+ vm_page_t m;
+ int page_count, i, j;
+
+ dev = obj->base.dev;
+ KASSERT(obj->pages == NULL, ("Obj already has pages"));
+ page_count = obj->base.size / PAGE_SIZE;
+ obj->pages = malloc(page_count * sizeof(vm_page_t), DRM_I915_GEM,
+ M_WAITOK);
+ vm_obj = obj->base.vm_obj;
+ VM_OBJECT_LOCK(vm_obj);
+ for (i = 0; i < page_count; i++) {
+ if ((obj->pages[i] = i915_gem_wire_page(vm_obj, i)) == NULL)
+ goto failed;
+ }
+ VM_OBJECT_UNLOCK(vm_obj);
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ i915_gem_object_do_bit_17_swizzle(obj);
+ return (0);
+
+failed:
+ for (j = 0; j < i; j++) {
+ m = obj->pages[j];
+ vm_page_lock(m);
+ vm_page_unwire(m, 0);
+ vm_page_unlock(m);
+ atomic_add_long(&i915_gem_wired_pages_cnt, -1);
+ }
+ VM_OBJECT_UNLOCK(vm_obj);
+ free(obj->pages, DRM_I915_GEM);
+ obj->pages = NULL;
+ return (-EIO);
+}
+
+#define GEM_PARANOID_CHECK_GTT 0
+#if GEM_PARANOID_CHECK_GTT
+static void
+i915_gem_assert_pages_not_mapped(struct drm_device *dev, vm_page_t *ma,
+ int page_count)
+{
+ struct drm_i915_private *dev_priv;
+ vm_paddr_t pa;
+ unsigned long start, end;
+ u_int i;
+ int j;
+
+ dev_priv = dev->dev_private;
+ start = OFF_TO_IDX(dev_priv->mm.gtt_start);
+ end = OFF_TO_IDX(dev_priv->mm.gtt_end);
+ for (i = start; i < end; i++) {
+ pa = intel_gtt_read_pte_paddr(i);
+ for (j = 0; j < page_count; j++) {
+ if (pa == VM_PAGE_TO_PHYS(ma[j])) {
+ panic("Page %p in GTT pte index %d pte %x",
+ ma[i], i, intel_gtt_read_pte(i));
+ }
+ }
+ }
+}
+#endif
+
+static void
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
+{
+ vm_page_t m;
+ int page_count, i;
+
+ KASSERT(obj->madv != I915_MADV_PURGED_INTERNAL, ("Purged object"));
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ i915_gem_object_save_bit_17_swizzle(obj);
+ if (obj->madv == I915_MADV_DONTNEED)
+ obj->dirty = 0;
+ page_count = obj->base.size / PAGE_SIZE;
+ VM_OBJECT_LOCK(obj->base.vm_obj);
+#if GEM_PARANOID_CHECK_GTT
+ i915_gem_assert_pages_not_mapped(obj->base.dev, obj->pages, page_count);
+#endif
+ for (i = 0; i < page_count; i++) {
+ m = obj->pages[i];
+ if (obj->dirty)
+ vm_page_dirty(m);
+ if (obj->madv == I915_MADV_WILLNEED)
+ vm_page_reference(m);
+ vm_page_lock(m);
+ vm_page_unwire(obj->pages[i], 1);
+ vm_page_unlock(m);
+ atomic_add_long(&i915_gem_wired_pages_cnt, -1);
+ }
+ VM_OBJECT_UNLOCK(obj->base.vm_obj);
+ obj->dirty = 0;
+ free(obj->pages, DRM_I915_GEM);
+ obj->pages = NULL;
+}
+
+void
+i915_gem_release_mmap(struct drm_i915_gem_object *obj)
+{
+ vm_object_t devobj;
+ vm_page_t m;
+ int i, page_count;
+
+ if (!obj->fault_mappable)
+ return;
+
+ CTR3(KTR_DRM, "release_mmap %p %x %x", obj, obj->gtt_offset,
+ OFF_TO_IDX(obj->base.size));
+ devobj = cdev_pager_lookup(obj);
+ if (devobj != NULL) {
+ page_count = OFF_TO_IDX(obj->base.size);
+
+ VM_OBJECT_LOCK(devobj);
+retry:
+ for (i = 0; i < page_count; i++) {
+ m = vm_page_lookup(devobj, i);
+ if (m == NULL)
+ continue;
+ if (vm_page_sleep_if_busy(m, true, "915unm"))
+ goto retry;
+ cdev_pager_free_page(devobj, m);
+ }
+ VM_OBJECT_UNLOCK(devobj);
+ vm_object_deallocate(devobj);
+ }
+
+ obj->fault_mappable = false;
+}
+
+int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ KASSERT((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0,
+ ("In GPU write domain"));
+
+ CTR5(KTR_DRM, "object_wait_rendering %p %s %x %d %d", obj,
+ obj->ring != NULL ? obj->ring->name : "none", obj->gtt_offset,
+ obj->active, obj->last_rendering_seqno);
+ if (obj->active) {
+ ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
+ true);
+ if (ret != 0)
+ return (ret);
+ }
+ return (0);
+}
+
+void
+i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring, uint32_t seqno)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_fence_reg *reg;
+
+ obj->ring = ring;
+ KASSERT(ring != NULL, ("NULL ring"));
+
+ /* Add a reference if we're newly entering the active list. */
+ if (!obj->active) {
+ drm_gem_object_reference(&obj->base);
+ obj->active = 1;
+ }
+
+ /* Move from whatever list we were on to the tail of execution. */
+ list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
+ list_move_tail(&obj->ring_list, &ring->active_list);
+
+ obj->last_rendering_seqno = seqno;
+ if (obj->fenced_gpu_access) {
+ obj->last_fenced_seqno = seqno;
+ obj->last_fenced_ring = ring;
+
+ /* Bump MRU to take account of the delayed flush */
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ reg = &dev_priv->fence_regs[obj->fence_reg];
+ list_move_tail(&reg->lru_list,
+ &dev_priv->mm.fence_list);
+ }
+ }
+}
+
+static void
+i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
+{
+ list_del_init(&obj->ring_list);
+ obj->last_rendering_seqno = 0;
+ obj->last_fenced_seqno = 0;
+}
+
+static void
+i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ KASSERT(obj->active, ("Object not active"));
+ list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
+
+ i915_gem_object_move_off_active(obj);
+}
+
+static void
+i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (obj->pin_count != 0)
+ list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
+ else
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+ KASSERT(list_empty(&obj->gpu_write_list), ("On gpu_write_list"));
+ KASSERT(obj->active, ("Object not active"));
+ obj->ring = NULL;
+ obj->last_fenced_ring = NULL;
+
+ i915_gem_object_move_off_active(obj);
+ obj->fenced_gpu_access = false;
+
+ obj->active = 0;
+ obj->pending_gpu_write = false;
+ drm_gem_object_unreference(&obj->base);
+
+#if 1
+ KIB_NOTYET();
+#else
+ WARN_ON(i915_verify_lists(dev));
+#endif
+}
+
+static void
+i915_gem_object_truncate(struct drm_i915_gem_object *obj)
+{
+ vm_object_t vm_obj;
+
+ vm_obj = obj->base.vm_obj;
+ VM_OBJECT_LOCK(vm_obj);
+ vm_object_page_remove(vm_obj, 0, 0, false);
+ VM_OBJECT_UNLOCK(vm_obj);
+ obj->madv = I915_MADV_PURGED_INTERNAL;
+}
+
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
+{
+
+ return (obj->madv == I915_MADV_DONTNEED);
+}
+
+static void
+i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
+ uint32_t flush_domains)
+{
+ struct drm_i915_gem_object *obj, *next;
+ uint32_t old_write_domain;
+
+ list_for_each_entry_safe(obj, next, &ring->gpu_write_list,
+ gpu_write_list) {
+ if (obj->base.write_domain & flush_domains) {
+ old_write_domain = obj->base.write_domain;
+ obj->base.write_domain = 0;
+ list_del_init(&obj->gpu_write_list);
+ i915_gem_object_move_to_active(obj, ring,
+ i915_gem_next_request_seqno(ring));
+
+ CTR3(KTR_DRM, "object_change_domain process_flush %p %x %x",
+ obj, obj->base.read_domains, old_write_domain);
+ }
+ }
+}
+
+static int
+i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
+{
+ drm_i915_private_t *dev_priv;
+
+ dev_priv = obj->base.dev->dev_private;
+ return (dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+ obj->tiling_mode != I915_TILING_NONE);
+}
+
+static vm_page_t
+i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex)
+{
+ vm_page_t m;
+ int rv;
+
+ VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
+ if (m->valid != VM_PAGE_BITS_ALL) {
+ if (vm_pager_has_page(object, pindex, NULL, NULL)) {
+ rv = vm_pager_get_pages(object, &m, 1, 0);
+ m = vm_page_lookup(object, pindex);
+ if (m == NULL)
+ return (NULL);
+ if (rv != VM_PAGER_OK) {
+ vm_page_lock(m);
+ vm_page_free(m);
+ vm_page_unlock(m);
+ return (NULL);
+ }
+ } else {
+ pmap_zero_page(m);
+ m->valid = VM_PAGE_BITS_ALL;
+ m->dirty = 0;
+ }
+ }
+ vm_page_lock(m);
+ vm_page_wire(m);
+ vm_page_unlock(m);
+ vm_page_wakeup(m);
+ atomic_add_long(&i915_gem_wired_pages_cnt, 1);
+ return (m);
+}
+
+int
+i915_gem_flush_ring(struct intel_ring_buffer *ring, uint32_t invalidate_domains,
+ uint32_t flush_domains)
+{
+ int ret;
+
+ if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
+ return 0;
+
+ CTR3(KTR_DRM, "ring_flush %s %x %x", ring->name, invalidate_domains,
+ flush_domains);
+ ret = ring->flush(ring, invalidate_domains, flush_domains);
+ if (ret)
+ return ret;
+
+ if (flush_domains & I915_GEM_GPU_DOMAINS)
+ i915_gem_process_flushing_list(ring, flush_domains);
+ return 0;
+}
+
+static int
+i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
+{
+ int ret;
+
+ if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
+ return 0;
+
+ if (!list_empty(&ring->gpu_write_list)) {
+ ret = i915_gem_flush_ring(ring, I915_GEM_GPU_DOMAINS,
+ I915_GEM_GPU_DOMAINS);
+ if (ret != 0)
+ return ret;
+ }
+
+ return (i915_wait_request(ring, i915_gem_next_request_seqno(ring),
+ do_retire));
+}
+
+int
+i915_gpu_idle(struct drm_device *dev, bool do_retire)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret, i;
+
+ /* Flush everything onto the inactive list. */
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ ret = i915_ring_idle(&dev_priv->rings[i], do_retire);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno, bool do_retire)
+{
+ drm_i915_private_t *dev_priv;
+ struct drm_i915_gem_request *request;
+ uint32_t ier;
+ int flags, ret;
+ bool recovery_complete;
+
+ KASSERT(seqno != 0, ("Zero seqno"));
+
+ dev_priv = ring->dev->dev_private;
+ ret = 0;
+
+ if (atomic_load_acq_int(&dev_priv->mm.wedged) != 0) {
+ /* Give the error handler a chance to run. */
+ mtx_lock(&dev_priv->error_completion_lock);
+ recovery_complete = (&dev_priv->error_completion) > 0;
+ mtx_unlock(&dev_priv->error_completion_lock);
+ return (recovery_complete ? -EIO : -EAGAIN);
+ }
+
+ if (seqno == ring->outstanding_lazy_request) {
+ request = malloc(sizeof(*request), DRM_I915_GEM,
+ M_WAITOK | M_ZERO);
+ if (request == NULL)
+ return (-ENOMEM);
+
+ ret = i915_add_request(ring, NULL, request);
+ if (ret != 0) {
+ free(request, DRM_I915_GEM);
+ return (ret);
+ }
+
+ seqno = request->seqno;
+ }
+
+ if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
+ if (HAS_PCH_SPLIT(ring->dev))
+ ier = I915_READ(DEIER) | I915_READ(GTIER);
+ else
+ ier = I915_READ(IER);
+ if (!ier) {
+ DRM_ERROR("something (likely vbetool) disabled "
+ "interrupts, re-enabling\n");
+ ring->dev->driver->irq_preinstall(ring->dev);
+ ring->dev->driver->irq_postinstall(ring->dev);
+ }
+
+ CTR2(KTR_DRM, "request_wait_begin %s %d", ring->name, seqno);
+
+ ring->waiting_seqno = seqno;
+ mtx_lock(&ring->irq_lock);
+ if (ring->irq_get(ring)) {
+ flags = dev_priv->mm.interruptible ? PCATCH : 0;
+ while (!i915_seqno_passed(ring->get_seqno(ring), seqno)
+ && !atomic_load_acq_int(&dev_priv->mm.wedged) &&
+ ret == 0) {
+ ret = -msleep(ring, &ring->irq_lock, flags,
+ "915gwr", 0);
+ }
+ ring->irq_put(ring);
+ mtx_unlock(&ring->irq_lock);
+ } else {
+ mtx_unlock(&ring->irq_lock);
+ if (_intel_wait_for(ring->dev,
+ i915_seqno_passed(ring->get_seqno(ring), seqno) ||
+ atomic_load_acq_int(&dev_priv->mm.wedged), 3000,
+ 0, "i915wrq") != 0)
+ ret = -EBUSY;
+ }
+ ring->waiting_seqno = 0;
+
+ CTR3(KTR_DRM, "request_wait_end %s %d %d", ring->name, seqno,
+ ret);
+ }
+ if (atomic_load_acq_int(&dev_priv->mm.wedged))
+ ret = -EAGAIN;
+
+ /* Directly dispatch request retiring. While we have the work queue
+ * to handle this, the waiter on a request often wants an associated
+ * buffer to have made it to the inactive list, and we would need
+ * a separate wait queue to handle that.
+ */
+ if (ret == 0 && do_retire)
+ i915_gem_retire_requests_ring(ring);
+
+ return (ret);
+}
+
+static u32
+i915_gem_get_seqno(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 seqno = dev_priv->next_seqno;
+
+ /* reserve 0 for non-seqno */
+ if (++dev_priv->next_seqno == 0)
+ dev_priv->next_seqno = 1;
+
+ return seqno;
+}
+
+u32
+i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
+{
+ if (ring->outstanding_lazy_request == 0)
+ ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
+
+ return ring->outstanding_lazy_request;
+}
+
+int
+i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file,
+ struct drm_i915_gem_request *request)
+{
+ drm_i915_private_t *dev_priv;
+ struct drm_i915_file_private *file_priv;
+ uint32_t seqno;
+ u32 request_ring_position;
+ int was_empty;
+ int ret;
+
+ KASSERT(request != NULL, ("NULL request in add"));
+ DRM_LOCK_ASSERT(ring->dev);
+ dev_priv = ring->dev->dev_private;
+
+ seqno = i915_gem_next_request_seqno(ring);
+ request_ring_position = intel_ring_get_tail(ring);
+
+ ret = ring->add_request(ring, &seqno);
+ if (ret != 0)
+ return ret;
+
+ CTR2(KTR_DRM, "request_add %s %d", ring->name, seqno);
+
+ request->seqno = seqno;
+ request->ring = ring;
+ request->tail = request_ring_position;
+ request->emitted_jiffies = ticks;
+ was_empty = list_empty(&ring->request_list);
+ list_add_tail(&request->list, &ring->request_list);
+
+ if (file != NULL) {
+ file_priv = file->driver_priv;
+
+ mtx_lock(&file_priv->mm.lck);
+ request->file_priv = file_priv;
+ list_add_tail(&request->client_list,
+ &file_priv->mm.request_list);
+ mtx_unlock(&file_priv->mm.lck);
+ }
+
+ ring->outstanding_lazy_request = 0;
+
+ if (!dev_priv->mm.suspended) {
+ if (i915_enable_hangcheck) {
+ callout_schedule(&dev_priv->hangcheck_timer,
+ DRM_I915_HANGCHECK_PERIOD);
+ }
+ if (was_empty)
+ taskqueue_enqueue_timeout(dev_priv->tq,
+ &dev_priv->mm.retire_task, hz);
+ }
+ return (0);
+}
+
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
+{
+ struct drm_i915_file_private *file_priv = request->file_priv;
+
+ if (!file_priv)
+ return;
+
+ DRM_LOCK_ASSERT(request->ring->dev);
+
+ mtx_lock(&file_priv->mm.lck);
+ if (request->file_priv != NULL) {
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ }
+ mtx_unlock(&file_priv->mm.lck);
+}
+
+void
+i915_gem_release(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv;
+ struct drm_i915_gem_request *request;
+
+ file_priv = file->driver_priv;
+
+ /* Clean up our request list when the client is going away, so that
+ * later retire_requests won't dereference our soon-to-be-gone
+ * file_priv.
+ */
+ mtx_lock(&file_priv->mm.lck);
+ while (!list_empty(&file_priv->mm.request_list)) {
+ request = list_first_entry(&file_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ client_list);
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ }
+ mtx_unlock(&file_priv->mm.lck);
+}
+
+static void
+i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
+{
+
+ if (ring->dev != NULL)
+ DRM_LOCK_ASSERT(ring->dev);
+
+ while (!list_empty(&ring->request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&ring->request_list,
+ struct drm_i915_gem_request, list);
+
+ list_del(&request->list);
+ i915_gem_request_remove_from_client(request);
+ free(request, DRM_I915_GEM);
+ }
+
+ while (!list_empty(&ring->active_list)) {
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object, ring_list);
+
+ obj->base.write_domain = 0;
+ list_del_init(&obj->gpu_write_list);
+ i915_gem_object_move_to_inactive(obj);
+ }
+}
+
+static void
+i915_gem_reset_fences(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
+ struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+ struct drm_i915_gem_object *obj = reg->obj;
+
+ if (!obj)
+ continue;
+
+ if (obj->tiling_mode)
+ i915_gem_release_mmap(obj);
+
+ reg->obj->fence_reg = I915_FENCE_REG_NONE;
+ reg->obj->fenced_gpu_access = false;
+ reg->obj->last_fenced_seqno = 0;
+ reg->obj->last_fenced_ring = NULL;
+ i915_gem_clear_fence_reg(dev, reg);
+ }
+}
+
+void
+i915_gem_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ int i;
+
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ i915_gem_reset_ring_lists(dev_priv, &dev_priv->rings[i]);
+
+ /* Remove anything from the flushing lists. The GPU cache is likely
+ * to be lost on reset along with the data, so simply move the
+ * lost bo to the inactive list.
+ */
+ while (!list_empty(&dev_priv->mm.flushing_list)) {
+ obj = list_first_entry(&dev_priv->mm.flushing_list,
+ struct drm_i915_gem_object,
+ mm_list);
+
+ obj->base.write_domain = 0;
+ list_del_init(&obj->gpu_write_list);
+ i915_gem_object_move_to_inactive(obj);
+ }
+
+ /* Move everything out of the GPU domains to ensure we do any
+ * necessary invalidation upon reuse.
+ */
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
+ obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+ }
+
+ /* The fence registers are invalidated so clear them out */
+ i915_gem_reset_fences(dev);
+}
+
+/**
+ * This function clears the request list as sequence numbers are passed.
+ */
+void
+i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
+{
+ uint32_t seqno;
+ int i;
+
+ if (list_empty(&ring->request_list))
+ return;
+
+ seqno = ring->get_seqno(ring);
+ CTR2(KTR_DRM, "retire_request_ring %s %d", ring->name, seqno);
+
+ for (i = 0; i < DRM_ARRAY_SIZE(ring->sync_seqno); i++)
+ if (seqno >= ring->sync_seqno[i])
+ ring->sync_seqno[i] = 0;
+
+ while (!list_empty(&ring->request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&ring->request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ if (!i915_seqno_passed(seqno, request->seqno))
+ break;
+
+ CTR2(KTR_DRM, "retire_request_seqno_passed %s %d",
+ ring->name, seqno);
+ ring->last_retired_head = request->tail;
+
+ list_del(&request->list);
+ i915_gem_request_remove_from_client(request);
+ free(request, DRM_I915_GEM);
+ }
+
+ /* Move any buffers on the active list that are no longer referenced
+ * by the ringbuffer to the flushing/inactive lists as appropriate.
+ */
+ while (!list_empty(&ring->active_list)) {
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
+
+ if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
+ break;
+
+ if (obj->base.write_domain != 0)
+ i915_gem_object_move_to_flushing(obj);
+ else
+ i915_gem_object_move_to_inactive(obj);
+ }
+
+ if (ring->trace_irq_seqno &&
+ i915_seqno_passed(seqno, ring->trace_irq_seqno)) {
+ mtx_lock(&ring->irq_lock);
+ ring->irq_put(ring);
+ mtx_unlock(&ring->irq_lock);
+ ring->trace_irq_seqno = 0;
+ }
+}
+
+void
+i915_gem_retire_requests(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj, *next;
+ int i;
+
+ if (!list_empty(&dev_priv->mm.deferred_free_list)) {
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.deferred_free_list, mm_list)
+ i915_gem_free_object_tail(obj);
+ }
+
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ i915_gem_retire_requests_ring(&dev_priv->rings[i]);
+}
+
+static int
+sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
+{
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 size = obj->gtt_space->size;
+ int regnum = obj->fence_reg;
+ uint64_t val;
+
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= (uint64_t)((obj->stride / 128) - 1) <<
+ SANDYBRIDGE_FENCE_PITCH_SHIFT;
+
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
+
+ if (pipelined) {
+ int ret = intel_ring_begin(pipelined, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(pipelined, MI_NOOP);
+ intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
+ intel_ring_emit(pipelined, (u32)val);
+ intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
+ intel_ring_emit(pipelined, (u32)(val >> 32));
+ intel_ring_advance(pipelined);
+ } else
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
+
+ return 0;
+}
+
+static int
+i965_write_fence_reg(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
+{
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 size = obj->gtt_space->size;
+ int regnum = obj->fence_reg;
+ uint64_t val;
+
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
+
+ if (pipelined) {
+ int ret = intel_ring_begin(pipelined, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(pipelined, MI_NOOP);
+ intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
+ intel_ring_emit(pipelined, (u32)val);
+ intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
+ intel_ring_emit(pipelined, (u32)(val >> 32));
+ intel_ring_advance(pipelined);
+ } else
+ I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
+
+ return 0;
+}
+
+static int
+i915_write_fence_reg(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
+{
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 size = obj->gtt_space->size;
+ u32 fence_reg, val, pitch_val;
+ int tile_width;
+
+ if ((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+ (size & -size) != size || (obj->gtt_offset & (size - 1))) {
+ printf(
+"object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+ obj->gtt_offset, obj->map_and_fenceable, size);
+ return -EINVAL;
+ }
+
+ if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+ tile_width = 128;
+ else
+ tile_width = 512;
+
+ /* Note: pitch better be a power of two tile widths */
+ pitch_val = obj->stride / tile_width;
+ pitch_val = ffs(pitch_val) - 1;
+
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I915_FENCE_SIZE_BITS(size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
+
+ fence_reg = obj->fence_reg;
+ if (fence_reg < 8)
+ fence_reg = FENCE_REG_830_0 + fence_reg * 4;
+ else
+ fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
+
+ if (pipelined) {
+ int ret = intel_ring_begin(pipelined, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(pipelined, MI_NOOP);
+ intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(pipelined, fence_reg);
+ intel_ring_emit(pipelined, val);
+ intel_ring_advance(pipelined);
+ } else
+ I915_WRITE(fence_reg, val);
+
+ return 0;
+}
+
+static int
+i830_write_fence_reg(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
+{
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 size = obj->gtt_space->size;
+ int regnum = obj->fence_reg;
+ uint32_t val;
+ uint32_t pitch_val;
+
+ if ((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+ (size & -size) != size || (obj->gtt_offset & (size - 1))) {
+ printf(
+"object 0x%08x not 512K or pot-size 0x%08x aligned\n",
+ obj->gtt_offset, size);
+ return -EINVAL;
+ }
+
+ pitch_val = obj->stride / 128;
+ pitch_val = ffs(pitch_val) - 1;
+
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I830_FENCE_SIZE_BITS(size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
+
+ if (pipelined) {
+ int ret = intel_ring_begin(pipelined, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(pipelined, MI_NOOP);
+ intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
+ intel_ring_emit(pipelined, val);
+ intel_ring_advance(pipelined);
+ } else
+ I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
+
+ return 0;
+}
+
+static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ return i915_seqno_passed(ring->get_seqno(ring), seqno);
+}
+
+static int
+i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
+{
+ int ret;
+
+ if (obj->fenced_gpu_access) {
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+ ret = i915_gem_flush_ring(obj->last_fenced_ring, 0,
+ obj->base.write_domain);
+ if (ret)
+ return ret;
+ }
+
+ obj->fenced_gpu_access = false;
+ }
+
+ if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
+ if (!ring_passed_seqno(obj->last_fenced_ring,
+ obj->last_fenced_seqno)) {
+ ret = i915_wait_request(obj->last_fenced_ring,
+ obj->last_fenced_seqno,
+ true);
+ if (ret)
+ return ret;
+ }
+
+ obj->last_fenced_seqno = 0;
+ obj->last_fenced_ring = NULL;
+ }
+
+ /* Ensure that all CPU reads are completed before installing a fence
+ * and all writes before removing the fence.
+ */
+ if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
+ mb();
+
+ return 0;
+}
+
+int
+i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ if (obj->tiling_mode)
+ i915_gem_release_mmap(obj);
+
+ ret = i915_gem_object_flush_fence(obj, NULL);
+ if (ret)
+ return ret;
+
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
+ if (dev_priv->fence_regs[obj->fence_reg].pin_count != 0)
+ printf("%s: pin_count %d\n", __func__,
+ dev_priv->fence_regs[obj->fence_reg].pin_count);
+ i915_gem_clear_fence_reg(obj->base.dev,
+ &dev_priv->fence_regs[obj->fence_reg]);
+
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ }
+
+ return 0;
+}
+
+static struct drm_i915_fence_reg *
+i915_find_fence_reg(struct drm_device *dev, struct intel_ring_buffer *pipelined)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_fence_reg *reg, *first, *avail;
+ int i;
+
+ /* First try to find a free reg */
+ avail = NULL;
+ for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
+ reg = &dev_priv->fence_regs[i];
+ if (!reg->obj)
+ return reg;
+
+ if (!reg->pin_count)
+ avail = reg;
+ }
+
+ if (avail == NULL)
+ return NULL;
+
+ /* None available, try to steal one or wait for a user to finish */
+ avail = first = NULL;
+ list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
+ if (reg->pin_count)
+ continue;
+
+ if (first == NULL)
+ first = reg;
+
+ if (!pipelined ||
+ !reg->obj->last_fenced_ring ||
+ reg->obj->last_fenced_ring == pipelined) {
+ avail = reg;
+ break;
+ }
+ }
+
+ if (avail == NULL)
+ avail = first;
+
+ return avail;
+}
+
+int
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_fence_reg *reg;
+ int ret;
+
+ pipelined = NULL;
+ ret = 0;
+
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ reg = &dev_priv->fence_regs[obj->fence_reg];
+ list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+
+ if (obj->tiling_changed) {
+ ret = i915_gem_object_flush_fence(obj, pipelined);
+ if (ret)
+ return ret;
+
+ if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
+ pipelined = NULL;
+
+ if (pipelined) {
+ reg->setup_seqno =
+ i915_gem_next_request_seqno(pipelined);
+ obj->last_fenced_seqno = reg->setup_seqno;
+ obj->last_fenced_ring = pipelined;
+ }
+
+ goto update;
+ }
+
+ if (!pipelined) {
+ if (reg->setup_seqno) {
+ if (!ring_passed_seqno(obj->last_fenced_ring,
+ reg->setup_seqno)) {
+ ret = i915_wait_request(
+ obj->last_fenced_ring,
+ reg->setup_seqno,
+ true);
+ if (ret)
+ return ret;
+ }
+
+ reg->setup_seqno = 0;
+ }
+ } else if (obj->last_fenced_ring &&
+ obj->last_fenced_ring != pipelined) {
+ ret = i915_gem_object_flush_fence(obj, pipelined);
+ if (ret)
+ return ret;
+ }
+
+ if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
+ pipelined = NULL;
+ KASSERT(pipelined || reg->setup_seqno == 0, ("!pipelined"));
+
+ if (obj->tiling_changed) {
+ if (pipelined) {
+ reg->setup_seqno =
+ i915_gem_next_request_seqno(pipelined);
+ obj->last_fenced_seqno = reg->setup_seqno;
+ obj->last_fenced_ring = pipelined;
+ }
+ goto update;
+ }
+
+ return 0;
+ }
+
+ reg = i915_find_fence_reg(dev, pipelined);
+ if (reg == NULL)
+ return -EDEADLK;
+
+ ret = i915_gem_object_flush_fence(obj, pipelined);
+ if (ret)
+ return ret;
+
+ if (reg->obj) {
+ struct drm_i915_gem_object *old = reg->obj;
+
+ drm_gem_object_reference(&old->base);
+
+ if (old->tiling_mode)
+ i915_gem_release_mmap(old);
+
+ ret = i915_gem_object_flush_fence(old, pipelined);
+ if (ret) {
+ drm_gem_object_unreference(&old->base);
+ return ret;
+ }
+
+ if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
+ pipelined = NULL;
+
+ old->fence_reg = I915_FENCE_REG_NONE;
+ old->last_fenced_ring = pipelined;
+ old->last_fenced_seqno =
+ pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
+
+ drm_gem_object_unreference(&old->base);
+ } else if (obj->last_fenced_seqno == 0)
+ pipelined = NULL;
+
+ reg->obj = obj;
+ list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+ obj->fence_reg = reg - dev_priv->fence_regs;
+ obj->last_fenced_ring = pipelined;
+
+ reg->setup_seqno =
+ pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
+ obj->last_fenced_seqno = reg->setup_seqno;
+
+update:
+ obj->tiling_changed = false;
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ ret = sandybridge_write_fence_reg(obj, pipelined);
+ break;
+ case 5:
+ case 4:
+ ret = i965_write_fence_reg(obj, pipelined);
+ break;
+ case 3:
+ ret = i915_write_fence_reg(obj, pipelined);
+ break;
+ case 2:
+ ret = i830_write_fence_reg(obj, pipelined);
+ break;
+ }
+
+ return ret;
+}
+
+static void
+i915_gem_clear_fence_reg(struct drm_device *dev, struct drm_i915_fence_reg *reg)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t fence_reg = reg - dev_priv->fence_regs;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
+ break;
+ case 5:
+ case 4:
+ I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
+ break;
+ case 3:
+ if (fence_reg >= 8)
+ fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
+ else
+ case 2:
+ fence_reg = FENCE_REG_830_0 + fence_reg * 4;
+
+ I915_WRITE(fence_reg, 0);
+ break;
+ }
+
+ list_del_init(&reg->lru_list);
+ reg->obj = NULL;
+ reg->setup_seqno = 0;
+ reg->pin_count = 0;
+}
+
+int
+i915_gem_init_object(struct drm_gem_object *obj)
+{
+
+ printf("i915_gem_init_object called\n");
+ return (0);
+}
+
+static bool
+i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
+{
+
+ return (obj->gtt_space && !obj->active && obj->pin_count == 0);
+}
+
+static void
+i915_gem_retire_task_handler(void *arg, int pending)
+{
+ drm_i915_private_t *dev_priv;
+ struct drm_device *dev;
+ bool idle;
+ int i;
+
+ dev_priv = arg;
+ dev = dev_priv->dev;
+
+ /* Come back later if the device is busy... */
+ if (!sx_try_xlock(&dev->dev_struct_lock)) {
+ taskqueue_enqueue_timeout(dev_priv->tq,
+ &dev_priv->mm.retire_task, hz);
+ return;
+ }
+
+ CTR0(KTR_DRM, "retire_task");
+
+ i915_gem_retire_requests(dev);
+
+ /* Send a periodic flush down the ring so we don't hold onto GEM
+ * objects indefinitely.
+ */
+ idle = true;
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct intel_ring_buffer *ring = &dev_priv->rings[i];
+
+ if (!list_empty(&ring->gpu_write_list)) {
+ struct drm_i915_gem_request *request;
+ int ret;
+
+ ret = i915_gem_flush_ring(ring,
+ 0, I915_GEM_GPU_DOMAINS);
+ request = malloc(sizeof(*request), DRM_I915_GEM,
+ M_WAITOK | M_ZERO);
+ if (ret || request == NULL ||
+ i915_add_request(ring, NULL, request))
+ free(request, DRM_I915_GEM);
+ }
+
+ idle &= list_empty(&ring->request_list);
+ }
+
+ if (!dev_priv->mm.suspended && !idle)
+ taskqueue_enqueue_timeout(dev_priv->tq,
+ &dev_priv->mm.retire_task, hz);
+
+ DRM_UNLOCK(dev);
+}
+
+void
+i915_gem_lastclose(struct drm_device *dev)
+{
+ int ret;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ ret = i915_gem_idle(dev);
+ if (ret != 0)
+ DRM_ERROR("failed to idle hardware: %d\n", ret);
+}
+
+static int
+i915_gem_init_phys_object(struct drm_device *dev, int id, int size, int align)
+{
+ drm_i915_private_t *dev_priv;
+ struct drm_i915_gem_phys_object *phys_obj;
+ int ret;
+
+ dev_priv = dev->dev_private;
+ if (dev_priv->mm.phys_objs[id - 1] != NULL || size == 0)
+ return (0);
+
+ phys_obj = malloc(sizeof(struct drm_i915_gem_phys_object), DRM_I915_GEM,
+ M_WAITOK | M_ZERO);
+
+ phys_obj->id = id;
+
+ phys_obj->handle = drm_pci_alloc(dev, size, align, ~0);
+ if (phys_obj->handle == NULL) {
+ ret = -ENOMEM;
+ goto free_obj;
+ }
+ pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr,
+ size / PAGE_SIZE, PAT_WRITE_COMBINING);
+
+ dev_priv->mm.phys_objs[id - 1] = phys_obj;
+
+ return (0);
+
+free_obj:
+ free(phys_obj, DRM_I915_GEM);
+ return (ret);
+}
+
+static void
+i915_gem_free_phys_object(struct drm_device *dev, int id)
+{
+ drm_i915_private_t *dev_priv;
+ struct drm_i915_gem_phys_object *phys_obj;
+
+ dev_priv = dev->dev_private;
+ if (dev_priv->mm.phys_objs[id - 1] == NULL)
+ return;
+
+ phys_obj = dev_priv->mm.phys_objs[id - 1];
+ if (phys_obj->cur_obj != NULL)
+ i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
+
+ drm_pci_free(dev, phys_obj->handle);
+ free(phys_obj, DRM_I915_GEM);
+ dev_priv->mm.phys_objs[id - 1] = NULL;
+}
+
+void
+i915_gem_free_all_phys_object(struct drm_device *dev)
+{
+ int i;
+
+ for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
+ i915_gem_free_phys_object(dev, i);
+}
+
+void
+i915_gem_detach_phys_object(struct drm_device *dev,
+ struct drm_i915_gem_object *obj)
+{
+ vm_page_t m;
+ struct sf_buf *sf;
+ char *vaddr, *dst;
+ int i, page_count;
+
+ if (obj->phys_obj == NULL)
+ return;
+ vaddr = obj->phys_obj->handle->vaddr;
+
+ page_count = obj->base.size / PAGE_SIZE;
+ VM_OBJECT_LOCK(obj->base.vm_obj);
+ for (i = 0; i < page_count; i++) {
+ m = i915_gem_wire_page(obj->base.vm_obj, i);
+ if (m == NULL)
+ continue; /* XXX */
+
+ VM_OBJECT_UNLOCK(obj->base.vm_obj);
+ sf = sf_buf_alloc(m, 0);
+ if (sf != NULL) {
+ dst = (char *)sf_buf_kva(sf);
+ memcpy(dst, vaddr + IDX_TO_OFF(i), PAGE_SIZE);
+ sf_buf_free(sf);
+ }
+ drm_clflush_pages(&m, 1);
+
+ VM_OBJECT_LOCK(obj->base.vm_obj);
+ vm_page_reference(m);
+ vm_page_lock(m);
+ vm_page_dirty(m);
+ vm_page_unwire(m, 0);
+ vm_page_unlock(m);
+ atomic_add_long(&i915_gem_wired_pages_cnt, -1);
+ }
+ VM_OBJECT_UNLOCK(obj->base.vm_obj);
+ intel_gtt_chipset_flush();
+
+ obj->phys_obj->cur_obj = NULL;
+ obj->phys_obj = NULL;
+}
+
+int
+i915_gem_attach_phys_object(struct drm_device *dev,
+ struct drm_i915_gem_object *obj, int id, int align)
+{
+ drm_i915_private_t *dev_priv;
+ vm_page_t m;
+ struct sf_buf *sf;
+ char *dst, *src;
+ int i, page_count, ret;
+
+ if (id > I915_MAX_PHYS_OBJECT)
+ return (-EINVAL);
+
+ if (obj->phys_obj != NULL) {
+ if (obj->phys_obj->id == id)
+ return (0);
+ i915_gem_detach_phys_object(dev, obj);
+ }
+
+ dev_priv = dev->dev_private;
+ if (dev_priv->mm.phys_objs[id - 1] == NULL) {
+ ret = i915_gem_init_phys_object(dev, id, obj->base.size, align);
+ if (ret != 0) {
+ DRM_ERROR("failed to init phys object %d size: %zu\n",
+ id, obj->base.size);
+ return (ret);
+ }
+ }
+
+ /* bind to the object */
+ obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
+ obj->phys_obj->cur_obj = obj;
+
+ page_count = obj->base.size / PAGE_SIZE;
+
+ VM_OBJECT_LOCK(obj->base.vm_obj);
+ ret = 0;
+ for (i = 0; i < page_count; i++) {
+ m = i915_gem_wire_page(obj->base.vm_obj, i);
+ if (m == NULL) {
+ ret = -EIO;
+ break;
+ }
+ VM_OBJECT_UNLOCK(obj->base.vm_obj);
+ sf = sf_buf_alloc(m, 0);
+ src = (char *)sf_buf_kva(sf);
+ dst = (char *)obj->phys_obj->handle->vaddr + IDX_TO_OFF(i);
+ memcpy(dst, src, PAGE_SIZE);
+ sf_buf_free(sf);
+
+ VM_OBJECT_LOCK(obj->base.vm_obj);
+
+ vm_page_reference(m);
+ vm_page_lock(m);
+ vm_page_unwire(m, 0);
+ vm_page_unlock(m);
+ atomic_add_long(&i915_gem_wired_pages_cnt, -1);
+ }
+ VM_OBJECT_UNLOCK(obj->base.vm_obj);
+
+ return (0);
+}
+
+static int
+i915_gem_phys_pwrite(struct drm_device *dev, struct drm_i915_gem_object *obj,
+ uint64_t data_ptr, uint64_t offset, uint64_t size,
+ struct drm_file *file_priv)
+{
+ char *user_data, *vaddr;
+ int ret;
+
+ vaddr = (char *)obj->phys_obj->handle->vaddr + offset;
+ user_data = (char *)(uintptr_t)data_ptr;
+
+ if (copyin_nofault(user_data, vaddr, size) != 0) {
+ /* The physical object once assigned is fixed for the lifetime
+ * of the obj, so we can safely drop the lock and continue
+ * to access vaddr.
+ */
+ DRM_UNLOCK(dev);
+ ret = -copyin(user_data, vaddr, size);
+ DRM_LOCK(dev);
+ if (ret != 0)
+ return (ret);
+ }
+
+ intel_gtt_chipset_flush();
+ return (0);
+}
+
+static int
+i915_gpu_is_active(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv;
+
+ dev_priv = dev->dev_private;
+ return (!list_empty(&dev_priv->mm.flushing_list) ||
+ !list_empty(&dev_priv->mm.active_list));
+}
+
+static void
+i915_gem_lowmem(void *arg)
+{
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ struct drm_i915_gem_object *obj, *next;
+ int cnt, cnt_fail, cnt_total;
+
+ dev = arg;
+ dev_priv = dev->dev_private;
+
+ if (!sx_try_xlock(&dev->dev_struct_lock))
+ return;
+
+ CTR0(KTR_DRM, "gem_lowmem");
+
+rescan:
+ /* first scan for clean buffers */
+ i915_gem_retire_requests(dev);
+
+ cnt_total = cnt_fail = cnt = 0;
+
+ list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
+ mm_list) {
+ if (i915_gem_object_is_purgeable(obj)) {
+ if (i915_gem_object_unbind(obj) != 0)
+ cnt_total++;
+ } else
+ cnt_total++;
+ }
+
+ /* second pass, evict/count anything still on the inactive list */
+ list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
+ mm_list) {
+ if (i915_gem_object_unbind(obj) == 0)
+ cnt++;
+ else
+ cnt_fail++;
+ }
+
+ if (cnt_fail > cnt_total / 100 && i915_gpu_is_active(dev)) {
+ /*
+ * We are desperate for pages, so as a last resort, wait
+ * for the GPU to finish and discard whatever we can.
+ * This has a dramatic impact to reduce the number of
+ * OOM-killer events whilst running the GPU aggressively.
+ */
+ if (i915_gpu_idle(dev, true) == 0)
+ goto rescan;
+ }
+ DRM_UNLOCK(dev);
+}
+
+void
+i915_gem_unload(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv;
+
+ dev_priv = dev->dev_private;
+ EVENTHANDLER_DEREGISTER(vm_lowmem, dev_priv->mm.i915_lowmem);
+}
diff --git a/sys/dev/drm2/i915/i915_gem_evict.c b/sys/dev/drm2/i915/i915_gem_evict.c
new file mode 100644
index 0000000..0d8ac80
--- /dev/null
+++ b/sys/dev/drm2/i915/i915_gem_evict.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright © 2008-2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uuk>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+
+static bool
+mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
+{
+ list_add(&obj->exec_list, unwind);
+ return drm_mm_scan_add_block(obj->gtt_space);
+}
+
+int
+i915_gem_evict_something(struct drm_device *dev, int min_size,
+ unsigned alignment, bool mappable)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct list_head eviction_list, unwind_list;
+ struct drm_i915_gem_object *obj;
+ int ret = 0;
+
+ CTR4(KTR_DRM, "evict_something %p %d %u %d", dev, min_size,
+ alignment, mappable);
+
+ /*
+ * The goal is to evict objects and amalgamate space in LRU order.
+ * The oldest idle objects reside on the inactive list, which is in
+ * retirement order. The next objects to retire are those on the (per
+ * ring) active list that do not have an outstanding flush. Once the
+ * hardware reports completion (the seqno is updated after the
+ * batchbuffer has been finished) the clean buffer objects would
+ * be retired to the inactive list. Any dirty objects would be added
+ * to the tail of the flushing list. So after processing the clean
+ * active objects we need to emit a MI_FLUSH to retire the flushing
+ * list, hence the retirement order of the flushing list is in
+ * advance of the dirty objects on the active lists.
+ *
+ * The retirement sequence is thus:
+ * 1. Inactive objects (already retired)
+ * 2. Clean active objects
+ * 3. Flushing list
+ * 4. Dirty active objects.
+ *
+ * On each list, the oldest objects lie at the HEAD with the freshest
+ * object on the TAIL.
+ */
+
+ INIT_LIST_HEAD(&unwind_list);
+ if (mappable)
+ drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
+ alignment, 0,
+ dev_priv->mm.gtt_mappable_end);
+ else
+ drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
+
+ /* First see if there is a large enough contiguous idle region... */
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
+ if (mark_free(obj, &unwind_list))
+ goto found;
+ }
+
+ /* Now merge in the soon-to-be-expired objects... */
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+ /* Does the object require an outstanding flush? */
+ if (obj->base.write_domain || obj->pin_count)
+ continue;
+
+ if (mark_free(obj, &unwind_list))
+ goto found;
+ }
+
+ /* Finally add anything with a pending flush (in order of retirement) */
+ list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
+ if (obj->pin_count)
+ continue;
+
+ if (mark_free(obj, &unwind_list))
+ goto found;
+ }
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+ if (!obj->base.write_domain || obj->pin_count)
+ continue;
+
+ if (mark_free(obj, &unwind_list))
+ goto found;
+ }
+
+ /* Nothing found, clean up and bail out! */
+ while (!list_empty(&unwind_list)) {
+ obj = list_first_entry(&unwind_list,
+ struct drm_i915_gem_object,
+ exec_list);
+
+ ret = drm_mm_scan_remove_block(obj->gtt_space);
+ KASSERT(ret == 0, ("drm_mm_scan_remove_block failed %d", ret));
+
+ list_del_init(&obj->exec_list);
+ }
+
+ /* We expect the caller to unpin, evict all and try again, or give up.
+ * So calling i915_gem_evict_everything() is unnecessary.
+ */
+ return -ENOSPC;
+
+found:
+ /* drm_mm doesn't allow any other other operations while
+ * scanning, therefore store to be evicted objects on a
+ * temporary list. */
+ INIT_LIST_HEAD(&eviction_list);
+ while (!list_empty(&unwind_list)) {
+ obj = list_first_entry(&unwind_list,
+ struct drm_i915_gem_object,
+ exec_list);
+ if (drm_mm_scan_remove_block(obj->gtt_space)) {
+ list_move(&obj->exec_list, &eviction_list);
+ drm_gem_object_reference(&obj->base);
+ continue;
+ }
+ list_del_init(&obj->exec_list);
+ }
+
+ /* Unbinding will emit any required flushes */
+ while (!list_empty(&eviction_list)) {
+ obj = list_first_entry(&eviction_list,
+ struct drm_i915_gem_object,
+ exec_list);
+ if (ret == 0)
+ ret = i915_gem_object_unbind(obj);
+
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
+ }
+
+ return ret;
+}
+
+int
+i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ bool lists_empty;
+
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list));
+ if (lists_empty)
+ return -ENOSPC;
+
+ CTR2(KTR_DRM, "evict_everything %p %d", dev, purgeable_only);
+
+ /* Flush everything (on to the inactive lists) and evict */
+ ret = i915_gpu_idle(dev, true);
+ if (ret)
+ return ret;
+
+ KASSERT(list_empty(&dev_priv->mm.flushing_list),
+ ("flush list not empty"));
+
+ return i915_gem_evict_inactive(dev, purgeable_only);
+}
+
+/** Unbinds all inactive objects. */
+int
+i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj, *next;
+
+ CTR2(KTR_DRM, "evict_inactive %p %d", dev, purgeable_only);
+
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.inactive_list, mm_list) {
+ if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
+ int ret = i915_gem_object_unbind(obj);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/sys/dev/drm2/i915/i915_gem_execbuffer.c b/sys/dev/drm2/i915/i915_gem_execbuffer.c
new file mode 100644
index 0000000..7143045
--- /dev/null
+++ b/sys/dev/drm2/i915/i915_gem_execbuffer.c
@@ -0,0 +1,1528 @@
+/*
+ * Copyright © 2008,2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/intel_drv.h>
+#include <sys/limits.h>
+#include <sys/sf_buf.h>
+
+struct change_domains {
+ uint32_t invalidate_domains;
+ uint32_t flush_domains;
+ uint32_t flush_rings;
+ uint32_t flips;
+};
+
+/*
+ * Set the next domain for the specified object. This
+ * may not actually perform the necessary flushing/invaliding though,
+ * as that may want to be batched with other set_domain operations
+ *
+ * This is (we hope) the only really tricky part of gem. The goal
+ * is fairly simple -- track which caches hold bits of the object
+ * and make sure they remain coherent. A few concrete examples may
+ * help to explain how it works. For shorthand, we use the notation
+ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
+ * a pair of read and write domain masks.
+ *
+ * Case 1: the batch buffer
+ *
+ * 1. Allocated
+ * 2. Written by CPU
+ * 3. Mapped to GTT
+ * 4. Read by GPU
+ * 5. Unmapped from GTT
+ * 6. Freed
+ *
+ * Let's take these a step at a time
+ *
+ * 1. Allocated
+ * Pages allocated from the kernel may still have
+ * cache contents, so we set them to (CPU, CPU) always.
+ * 2. Written by CPU (using pwrite)
+ * The pwrite function calls set_domain (CPU, CPU) and
+ * this function does nothing (as nothing changes)
+ * 3. Mapped by GTT
+ * This function asserts that the object is not
+ * currently in any GPU-based read or write domains
+ * 4. Read by GPU
+ * i915_gem_execbuffer calls set_domain (COMMAND, 0).
+ * As write_domain is zero, this function adds in the
+ * current read domains (CPU+COMMAND, 0).
+ * flush_domains is set to CPU.
+ * invalidate_domains is set to COMMAND
+ * clflush is run to get data out of the CPU caches
+ * then i915_dev_set_domain calls i915_gem_flush to
+ * emit an MI_FLUSH and drm_agp_chipset_flush
+ * 5. Unmapped from GTT
+ * i915_gem_object_unbind calls set_domain (CPU, CPU)
+ * flush_domains and invalidate_domains end up both zero
+ * so no flushing/invalidating happens
+ * 6. Freed
+ * yay, done
+ *
+ * Case 2: The shared render buffer
+ *
+ * 1. Allocated
+ * 2. Mapped to GTT
+ * 3. Read/written by GPU
+ * 4. set_domain to (CPU,CPU)
+ * 5. Read/written by CPU
+ * 6. Read/written by GPU
+ *
+ * 1. Allocated
+ * Same as last example, (CPU, CPU)
+ * 2. Mapped to GTT
+ * Nothing changes (assertions find that it is not in the GPU)
+ * 3. Read/written by GPU
+ * execbuffer calls set_domain (RENDER, RENDER)
+ * flush_domains gets CPU
+ * invalidate_domains gets GPU
+ * clflush (obj)
+ * MI_FLUSH and drm_agp_chipset_flush
+ * 4. set_domain (CPU, CPU)
+ * flush_domains gets GPU
+ * invalidate_domains gets CPU
+ * wait_rendering (obj) to make sure all drawing is complete.
+ * This will include an MI_FLUSH to get the data from GPU
+ * to memory
+ * clflush (obj) to invalidate the CPU cache
+ * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
+ * 5. Read/written by CPU
+ * cache lines are loaded and dirtied
+ * 6. Read written by GPU
+ * Same as last GPU access
+ *
+ * Case 3: The constant buffer
+ *
+ * 1. Allocated
+ * 2. Written by CPU
+ * 3. Read by GPU
+ * 4. Updated (written) by CPU again
+ * 5. Read by GPU
+ *
+ * 1. Allocated
+ * (CPU, CPU)
+ * 2. Written by CPU
+ * (CPU, CPU)
+ * 3. Read by GPU
+ * (CPU+RENDER, 0)
+ * flush_domains = CPU
+ * invalidate_domains = RENDER
+ * clflush (obj)
+ * MI_FLUSH
+ * drm_agp_chipset_flush
+ * 4. Updated (written) by CPU again
+ * (CPU, CPU)
+ * flush_domains = 0 (no previous write domain)
+ * invalidate_domains = 0 (no new read domains)
+ * 5. Read by GPU
+ * (CPU+RENDER, 0)
+ * flush_domains = CPU
+ * invalidate_domains = RENDER
+ * clflush (obj)
+ * MI_FLUSH
+ * drm_agp_chipset_flush
+ */
+static void
+i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring,
+ struct change_domains *cd)
+{
+ uint32_t invalidate_domains = 0, flush_domains = 0;
+
+ /*
+ * If the object isn't moving to a new write domain,
+ * let the object stay in multiple read domains
+ */
+ if (obj->base.pending_write_domain == 0)
+ obj->base.pending_read_domains |= obj->base.read_domains;
+
+ /*
+ * Flush the current write domain if
+ * the new read domains don't match. Invalidate
+ * any read domains which differ from the old
+ * write domain
+ */
+ if (obj->base.write_domain &&
+ (((obj->base.write_domain != obj->base.pending_read_domains ||
+ obj->ring != ring)) ||
+ (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
+ flush_domains |= obj->base.write_domain;
+ invalidate_domains |=
+ obj->base.pending_read_domains & ~obj->base.write_domain;
+ }
+ /*
+ * Invalidate any read caches which may have
+ * stale data. That is, any new read domains.
+ */
+ invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
+ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
+ i915_gem_clflush_object(obj);
+
+ if (obj->base.pending_write_domain)
+ cd->flips |= atomic_read(&obj->pending_flip);
+
+ /* The actual obj->write_domain will be updated with
+ * pending_write_domain after we emit the accumulated flush for all
+ * of our domain changes in execbuffers (which clears objects'
+ * write_domains). So if we have a current write domain that we
+ * aren't changing, set pending_write_domain to that.
+ */
+ if (flush_domains == 0 && obj->base.pending_write_domain == 0)
+ obj->base.pending_write_domain = obj->base.write_domain;
+
+ cd->invalidate_domains |= invalidate_domains;
+ cd->flush_domains |= flush_domains;
+ if (flush_domains & I915_GEM_GPU_DOMAINS)
+ cd->flush_rings |= intel_ring_flag(obj->ring);
+ if (invalidate_domains & I915_GEM_GPU_DOMAINS)
+ cd->flush_rings |= intel_ring_flag(ring);
+}
+
+struct eb_objects {
+ u_long hashmask;
+ LIST_HEAD(, drm_i915_gem_object) *buckets;
+};
+
+static struct eb_objects *
+eb_create(int size)
+{
+ struct eb_objects *eb;
+
+ eb = malloc(sizeof(*eb), DRM_I915_GEM, M_WAITOK | M_ZERO);
+ eb->buckets = hashinit(size, DRM_I915_GEM, &eb->hashmask);
+ return (eb);
+}
+
+static void
+eb_reset(struct eb_objects *eb)
+{
+ int i;
+
+ for (i = 0; i <= eb->hashmask; i++)
+ LIST_INIT(&eb->buckets[i]);
+}
+
+static void
+eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
+{
+
+ LIST_INSERT_HEAD(&eb->buckets[obj->exec_handle & eb->hashmask],
+ obj, exec_node);
+}
+
+static struct drm_i915_gem_object *
+eb_get_object(struct eb_objects *eb, unsigned long handle)
+{
+ struct drm_i915_gem_object *obj;
+
+ LIST_FOREACH(obj, &eb->buckets[handle & eb->hashmask], exec_node) {
+ if (obj->exec_handle == handle)
+ return (obj);
+ }
+ return (NULL);
+}
+
+static void
+eb_destroy(struct eb_objects *eb)
+{
+
+ free(eb->buckets, DRM_I915_GEM);
+ free(eb, DRM_I915_GEM);
+}
+
+static int
+i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+ struct eb_objects *eb,
+ struct drm_i915_gem_relocation_entry *reloc)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_gem_object *target_obj;
+ uint32_t target_offset;
+ int ret = -EINVAL;
+
+ /* we've already hold a reference to all valid objects */
+ target_obj = &eb_get_object(eb, reloc->target_handle)->base;
+ if (unlikely(target_obj == NULL))
+ return -ENOENT;
+
+ target_offset = to_intel_bo(target_obj)->gtt_offset;
+
+#if WATCH_RELOC
+ DRM_INFO("%s: obj %p offset %08x target %d "
+ "read %08x write %08x gtt %08x "
+ "presumed %08x delta %08x\n",
+ __func__,
+ obj,
+ (int) reloc->offset,
+ (int) reloc->target_handle,
+ (int) reloc->read_domains,
+ (int) reloc->write_domain,
+ (int) target_offset,
+ (int) reloc->presumed_offset,
+ reloc->delta);
+#endif
+
+ /* The target buffer should have appeared before us in the
+ * exec_object list, so it should have a GTT space bound by now.
+ */
+ if (unlikely(target_offset == 0)) {
+ DRM_DEBUG("No GTT space found for object %d\n",
+ reloc->target_handle);
+ return ret;
+ }
+
+ /* Validate that the target is in a valid r/w GPU domain */
+ if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
+ DRM_DEBUG("reloc with multiple write domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ return ret;
+ }
+ if (unlikely((reloc->write_domain | reloc->read_domains)
+ & ~I915_GEM_GPU_DOMAINS)) {
+ DRM_DEBUG("reloc with read/write non-GPU domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ return ret;
+ }
+ if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
+ reloc->write_domain != target_obj->pending_write_domain)) {
+ DRM_DEBUG("Write domain conflict: "
+ "obj %p target %d offset %d "
+ "new %08x old %08x\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->write_domain,
+ target_obj->pending_write_domain);
+ return ret;
+ }
+
+ target_obj->pending_read_domains |= reloc->read_domains;
+ target_obj->pending_write_domain |= reloc->write_domain;
+
+ /* If the relocation already has the right value in it, no
+ * more work needs to be done.
+ */
+ if (target_offset == reloc->presumed_offset)
+ return 0;
+
+ /* Check that the relocation address is valid... */
+ if (unlikely(reloc->offset > obj->base.size - 4)) {
+ DRM_DEBUG("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ (int) obj->base.size);
+ return ret;
+ }
+ if (unlikely(reloc->offset & 3)) {
+ DRM_DEBUG("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset);
+ return ret;
+ }
+
+ reloc->delta += target_offset;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
+ uint32_t page_offset = reloc->offset & PAGE_MASK;
+ char *vaddr;
+ struct sf_buf *sf;
+
+ sf = sf_buf_alloc(obj->pages[OFF_TO_IDX(reloc->offset)],
+ SFB_NOWAIT);
+ if (sf == NULL)
+ return (-ENOMEM);
+ vaddr = (void *)sf_buf_kva(sf);
+ *(uint32_t *)(vaddr + page_offset) = reloc->delta;
+ sf_buf_free(sf);
+ } else {
+ uint32_t *reloc_entry;
+ char *reloc_page;
+
+ /* We can't wait for rendering with pagefaults disabled */
+ if (obj->active && (curthread->td_pflags & TDP_NOFAULTING) != 0)
+ return (-EFAULT);
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * Map the page containing the relocation we're going
+ * to perform.
+ */
+ reloc->offset += obj->gtt_offset;
+ reloc_page = pmap_mapdev_attr(dev->agp->base + (reloc->offset &
+ ~PAGE_MASK), PAGE_SIZE, PAT_WRITE_COMBINING);
+ reloc_entry = (uint32_t *)(reloc_page + (reloc->offset &
+ PAGE_MASK));
+ *(volatile uint32_t *)reloc_entry = reloc->delta;
+ pmap_unmapdev((vm_offset_t)reloc_page, PAGE_SIZE);
+ }
+
+ /* and update the user's relocation entry */
+ reloc->presumed_offset = target_offset;
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
+ struct eb_objects *eb)
+{
+ struct drm_i915_gem_relocation_entry *user_relocs;
+ struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ struct drm_i915_gem_relocation_entry reloc;
+ int i, ret;
+
+ user_relocs = (void *)(uintptr_t)entry->relocs_ptr;
+ for (i = 0; i < entry->relocation_count; i++) {
+ ret = -copyin_nofault(user_relocs + i, &reloc, sizeof(reloc));
+ if (ret != 0)
+ return (ret);
+
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
+ if (ret != 0)
+ return (ret);
+
+ ret = -copyout_nofault(&reloc.presumed_offset,
+ &user_relocs[i].presumed_offset,
+ sizeof(reloc.presumed_offset));
+ if (ret != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+static int
+i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
+ struct eb_objects *eb, struct drm_i915_gem_relocation_entry *relocs)
+{
+ const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ int i, ret;
+
+ for (i = 0; i < entry->relocation_count; i++) {
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate(struct drm_device *dev,
+ struct eb_objects *eb,
+ struct list_head *objects)
+{
+ struct drm_i915_gem_object *obj;
+ int ret, pflags;
+
+ /* Try to move as many of the relocation targets off the active list
+ * to avoid unnecessary fallbacks to the slow path, as we cannot wait
+ * for the retirement with pagefaults disabled.
+ */
+ i915_gem_retire_requests(dev);
+
+ ret = 0;
+ pflags = vm_fault_disable_pagefaults();
+ /* This is the fast path and we cannot handle a pagefault whilst
+ * holding the device lock lest the user pass in the relocations
+ * contained within a mmaped bo. For in such a case we, the page
+ * fault handler would call i915_gem_fault() and we would try to
+ * acquire the device lock again. Obviously this is bad.
+ */
+
+ list_for_each_entry(obj, objects, exec_list) {
+ ret = i915_gem_execbuffer_relocate_object(obj, eb);
+ if (ret != 0)
+ break;
+ }
+ vm_fault_enable_pagefaults(pflags);
+ return (ret);
+}
+
+#define __EXEC_OBJECT_HAS_FENCE (1<<31)
+
+static int
+pin_and_fence_object(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+ bool need_fence, need_mappable;
+ int ret;
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+ need_mappable =
+ entry->relocation_count ? true : need_fence;
+
+ ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
+ if (ret)
+ return ret;
+
+ if (has_fenced_gpu_access) {
+ if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
+ if (obj->tiling_mode) {
+ ret = i915_gem_object_get_fence(obj, ring);
+ if (ret)
+ goto err_unpin;
+
+ entry->flags |= __EXEC_OBJECT_HAS_FENCE;
+ i915_gem_object_pin_fence(obj);
+ } else {
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ goto err_unpin;
+ }
+ obj->pending_fenced_gpu_access = true;
+ }
+ }
+
+ entry->offset = obj->gtt_offset;
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+ return ret;
+}
+
+static int
+i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ struct list_head *objects)
+{
+ drm_i915_private_t *dev_priv;
+ struct drm_i915_gem_object *obj;
+ int ret, retry;
+ bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+ struct list_head ordered_objects;
+
+ dev_priv = ring->dev->dev_private;
+ INIT_LIST_HEAD(&ordered_objects);
+ while (!list_empty(objects)) {
+ struct drm_i915_gem_exec_object2 *entry;
+ bool need_fence, need_mappable;
+
+ obj = list_first_entry(objects,
+ struct drm_i915_gem_object,
+ exec_list);
+ entry = obj->exec_entry;
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+ need_mappable =
+ entry->relocation_count ? true : need_fence;
+
+ if (need_mappable)
+ list_move(&obj->exec_list, &ordered_objects);
+ else
+ list_move_tail(&obj->exec_list, &ordered_objects);
+
+ obj->base.pending_read_domains = 0;
+ obj->base.pending_write_domain = 0;
+ }
+ list_splice(&ordered_objects, objects);
+
+ /* Attempt to pin all of the buffers into the GTT.
+ * This is done in 3 phases:
+ *
+ * 1a. Unbind all objects that do not match the GTT constraints for
+ * the execbuffer (fenceable, mappable, alignment etc).
+ * 1b. Increment pin count for already bound objects and obtain
+ * a fence register if required.
+ * 2. Bind new objects.
+ * 3. Decrement pin count.
+ *
+ * This avoid unnecessary unbinding of later objects in order to makr
+ * room for the earlier objects *unless* we need to defragment.
+ */
+ retry = 0;
+ do {
+ ret = 0;
+
+ /* Unbind any ill-fitting objects or pin. */
+ list_for_each_entry(obj, objects, exec_list) {
+ struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ bool need_fence, need_mappable;
+
+ if (!obj->gtt_space)
+ continue;
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+ need_mappable =
+ entry->relocation_count ? true : need_fence;
+
+ if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
+ (need_mappable && !obj->map_and_fenceable))
+ ret = i915_gem_object_unbind(obj);
+ else
+ ret = pin_and_fence_object(obj, ring);
+ if (ret)
+ goto err;
+ }
+
+ /* Bind fresh objects */
+ list_for_each_entry(obj, objects, exec_list) {
+ if (obj->gtt_space)
+ continue;
+
+ ret = pin_and_fence_object(obj, ring);
+ if (ret) {
+ int ret_ignore;
+
+ /* This can potentially raise a harmless
+ * -EINVAL if we failed to bind in the above
+ * call. It cannot raise -EINTR since we know
+ * that the bo is freshly bound and so will
+ * not need to be flushed or waited upon.
+ */
+ ret_ignore = i915_gem_object_unbind(obj);
+ (void)ret_ignore;
+ if (obj->gtt_space != NULL)
+ printf("%s: gtt_space\n", __func__);
+ break;
+ }
+ }
+
+ /* Decrement pin count for bound objects */
+ list_for_each_entry(obj, objects, exec_list) {
+ struct drm_i915_gem_exec_object2 *entry;
+
+ if (!obj->gtt_space)
+ continue;
+
+ entry = obj->exec_entry;
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
+ i915_gem_object_unpin_fence(obj);
+ entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
+ }
+
+ i915_gem_object_unpin(obj);
+
+ /* ... and ensure ppgtt mapping exist if needed. */
+ if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
+ i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
+ obj, obj->cache_level);
+
+ obj->has_aliasing_ppgtt_mapping = 1;
+ }
+ }
+
+ if (ret != -ENOSPC || retry > 1)
+ return ret;
+
+ /* First attempt, just clear anything that is purgeable.
+ * Second attempt, clear the entire GTT.
+ */
+ ret = i915_gem_evict_everything(ring->dev, retry == 0);
+ if (ret)
+ return ret;
+
+ retry++;
+ } while (1);
+
+err:
+ list_for_each_entry_continue_reverse(obj, objects, exec_list) {
+ struct drm_i915_gem_exec_object2 *entry;
+
+ if (!obj->gtt_space)
+ continue;
+
+ entry = obj->exec_entry;
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
+ i915_gem_object_unpin_fence(obj);
+ entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
+ }
+
+ i915_gem_object_unpin(obj);
+ }
+
+ return ret;
+}
+
+static int
+i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ struct drm_file *file, struct intel_ring_buffer *ring,
+ struct list_head *objects, struct eb_objects *eb,
+ struct drm_i915_gem_exec_object2 *exec, int count)
+{
+ struct drm_i915_gem_relocation_entry *reloc;
+ struct drm_i915_gem_object *obj;
+ int *reloc_offset;
+ int i, total, ret;
+
+ /* We may process another execbuffer during the unlock... */
+ while (!list_empty(objects)) {
+ obj = list_first_entry(objects,
+ struct drm_i915_gem_object,
+ exec_list);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
+ }
+
+ DRM_UNLOCK(dev);
+
+ total = 0;
+ for (i = 0; i < count; i++)
+ total += exec[i].relocation_count;
+
+ reloc_offset = malloc(count * sizeof(*reloc_offset), DRM_I915_GEM,
+ M_WAITOK | M_ZERO);
+ reloc = malloc(total * sizeof(*reloc), DRM_I915_GEM, M_WAITOK | M_ZERO);
+
+ total = 0;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_relocation_entry *user_relocs;
+
+ user_relocs = (void *)(uintptr_t)exec[i].relocs_ptr;
+ ret = -copyin(user_relocs, reloc + total,
+ exec[i].relocation_count * sizeof(*reloc));
+ if (ret != 0) {
+ DRM_LOCK(dev);
+ goto err;
+ }
+
+ reloc_offset[i] = total;
+ total += exec[i].relocation_count;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ DRM_LOCK(dev);
+ goto err;
+ }
+
+ /* reacquire the objects */
+ eb_reset(eb);
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_object *obj;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+ exec[i].handle));
+ if (&obj->base == NULL) {
+ DRM_DEBUG("Invalid object handle %d at index %d\n",
+ exec[i].handle, i);
+ ret = -ENOENT;
+ goto err;
+ }
+
+ list_add_tail(&obj->exec_list, objects);
+ obj->exec_handle = exec[i].handle;
+ obj->exec_entry = &exec[i];
+ eb_add_object(eb, obj);
+ }
+
+ ret = i915_gem_execbuffer_reserve(ring, file, objects);
+ if (ret)
+ goto err;
+
+ list_for_each_entry(obj, objects, exec_list) {
+ int offset = obj->exec_entry - exec;
+ ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
+ reloc + reloc_offset[offset]);
+ if (ret)
+ goto err;
+ }
+
+ /* Leave the user relocations as are, this is the painfully slow path,
+ * and we want to avoid the complication of dropping the lock whilst
+ * having buffers reserved in the aperture and so causing spurious
+ * ENOSPC for random operations.
+ */
+
+err:
+ free(reloc, DRM_I915_GEM);
+ free(reloc_offset, DRM_I915_GEM);
+ return ret;
+}
+
+static int
+i915_gem_execbuffer_flush(struct drm_device *dev,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains,
+ uint32_t flush_rings)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i, ret;
+
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
+ intel_gtt_chipset_flush();
+
+ if (flush_domains & I915_GEM_DOMAIN_GTT)
+ wmb();
+
+ if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ if (flush_rings & (1 << i)) {
+ ret = i915_gem_flush_ring(&dev_priv->rings[i],
+ invalidate_domains, flush_domains);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static bool
+intel_enable_semaphores(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ return 0;
+
+ if (i915_semaphores >= 0)
+ return i915_semaphores;
+
+ /* Enable semaphores on SNB when IO remapping is off */
+ if (INTEL_INFO(dev)->gen == 6)
+ return !intel_iommu_enabled;
+
+ return 1;
+}
+
+static int
+i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *to)
+{
+ struct intel_ring_buffer *from = obj->ring;
+ u32 seqno;
+ int ret, idx;
+
+ if (from == NULL || to == from)
+ return 0;
+
+ /* XXX gpu semaphores are implicated in various hard hangs on SNB */
+ if (!intel_enable_semaphores(obj->base.dev))
+ return i915_gem_object_wait_rendering(obj);
+
+ idx = intel_ring_sync_index(from, to);
+
+ seqno = obj->last_rendering_seqno;
+ if (seqno <= from->sync_seqno[idx])
+ return 0;
+
+ if (seqno == from->outstanding_lazy_request) {
+ struct drm_i915_gem_request *request;
+
+ request = malloc(sizeof(*request), DRM_I915_GEM,
+ M_WAITOK | M_ZERO);
+ ret = i915_add_request(from, NULL, request);
+ if (ret) {
+ free(request, DRM_I915_GEM);
+ return ret;
+ }
+
+ seqno = request->seqno;
+ }
+
+ from->sync_seqno[idx] = seqno;
+
+ return to->sync_to(to, from, seqno - 1);
+}
+
+static int
+i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
+{
+ u32 plane, flip_mask;
+ int ret;
+
+ /* Check for any pending flips. As we only maintain a flip queue depth
+ * of 1, we can simply insert a WAIT for the next display flip prior
+ * to executing the batch and avoid stalling the CPU.
+ */
+
+ for (plane = 0; flips >> plane; plane++) {
+ if (((flips >> plane) & 1) == 0)
+ continue;
+
+ if (plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
+ struct list_head *objects)
+{
+ struct drm_i915_gem_object *obj;
+ struct change_domains cd;
+ int ret;
+
+ memset(&cd, 0, sizeof(cd));
+ list_for_each_entry(obj, objects, exec_list)
+ i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
+
+ if (cd.invalidate_domains | cd.flush_domains) {
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+ __func__,
+ cd.invalidate_domains,
+ cd.flush_domains);
+#endif
+ ret = i915_gem_execbuffer_flush(ring->dev,
+ cd.invalidate_domains,
+ cd.flush_domains,
+ cd.flush_rings);
+ if (ret)
+ return ret;
+ }
+
+ if (cd.flips) {
+ ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
+ if (ret)
+ return ret;
+ }
+
+ list_for_each_entry(obj, objects, exec_list) {
+ ret = i915_gem_execbuffer_sync_rings(obj, ring);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool
+i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+{
+ return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
+}
+
+static int
+validate_exec_list(struct drm_i915_gem_exec_object2 *exec, int count,
+ vm_page_t ***map)
+{
+ vm_page_t *ma;
+ int i, length, page_count;
+
+ /* XXXKIB various limits checking is missing there */
+ *map = malloc(count * sizeof(*ma), DRM_I915_GEM, M_WAITOK | M_ZERO);
+ for (i = 0; i < count; i++) {
+ /* First check for malicious input causing overflow */
+ if (exec[i].relocation_count >
+ INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
+ return -EINVAL;
+
+ length = exec[i].relocation_count *
+ sizeof(struct drm_i915_gem_relocation_entry);
+ if (length == 0) {
+ (*map)[i] = NULL;
+ continue;
+ }
+ /*
+ * Since both start and end of the relocation region
+ * may be not aligned on the page boundary, be
+ * conservative and request a page slot for each
+ * partial page. Thus +2.
+ */
+ page_count = howmany(length, PAGE_SIZE) + 2;
+ ma = (*map)[i] = malloc(page_count * sizeof(vm_page_t),
+ DRM_I915_GEM, M_WAITOK | M_ZERO);
+ if (vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
+ exec[i].relocs_ptr, length, VM_PROT_READ | VM_PROT_WRITE,
+ ma, page_count) == -1) {
+ free(ma, DRM_I915_GEM);
+ (*map)[i] = NULL;
+ return (-EFAULT);
+ }
+ }
+
+ return 0;
+}
+
+static void
+i915_gem_execbuffer_move_to_active(struct list_head *objects,
+ struct intel_ring_buffer *ring,
+ u32 seqno)
+{
+ struct drm_i915_gem_object *obj;
+ uint32_t old_read, old_write;
+
+ list_for_each_entry(obj, objects, exec_list) {
+ old_read = obj->base.read_domains;
+ old_write = obj->base.write_domain;
+
+ obj->base.read_domains = obj->base.pending_read_domains;
+ obj->base.write_domain = obj->base.pending_write_domain;
+ obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
+
+ i915_gem_object_move_to_active(obj, ring, seqno);
+ if (obj->base.write_domain) {
+ obj->dirty = 1;
+ obj->pending_gpu_write = true;
+ list_move_tail(&obj->gpu_write_list,
+ &ring->gpu_write_list);
+ intel_mark_busy(ring->dev, obj);
+ }
+ CTR3(KTR_DRM, "object_change_domain move_to_active %p %x %x",
+ obj, old_read, old_write);
+ }
+}
+
+int i915_gem_sync_exec_requests;
+
+static void
+i915_gem_execbuffer_retire_commands(struct drm_device *dev,
+ struct drm_file *file,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_request *request;
+ u32 invalidate;
+
+ /*
+ * Ensure that the commands in the batch buffer are
+ * finished before the interrupt fires.
+ *
+ * The sampler always gets flushed on i965 (sigh).
+ */
+ invalidate = I915_GEM_DOMAIN_COMMAND;
+ if (INTEL_INFO(dev)->gen >= 4)
+ invalidate |= I915_GEM_DOMAIN_SAMPLER;
+ if (ring->flush(ring, invalidate, 0)) {
+ i915_gem_next_request_seqno(ring);
+ return;
+ }
+
+ /* Add a breadcrumb for the completion of the batch buffer */
+ request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO);
+ if (request == NULL || i915_add_request(ring, file, request)) {
+ i915_gem_next_request_seqno(ring);
+ free(request, DRM_I915_GEM);
+ } else if (i915_gem_sync_exec_requests)
+ i915_wait_request(ring, request->seqno, true);
+}
+
+static void
+i915_gem_fix_mi_batchbuffer_end(struct drm_i915_gem_object *batch_obj,
+ uint32_t batch_start_offset, uint32_t batch_len)
+{
+ char *mkva;
+ uint64_t po_r, po_w;
+ uint32_t cmd;
+
+ po_r = batch_obj->base.dev->agp->base + batch_obj->gtt_offset +
+ batch_start_offset + batch_len;
+ if (batch_len > 0)
+ po_r -= 4;
+ mkva = pmap_mapdev_attr(trunc_page(po_r), 2 * PAGE_SIZE,
+ PAT_WRITE_COMBINING);
+ cmd = *(uint32_t *)(mkva + (po_r & PAGE_MASK));
+
+ if (cmd != MI_BATCH_BUFFER_END) {
+ /*
+ * batch_len != 0 due to the check at the start of
+ * i915_gem_do_execbuffer
+ */
+ if (batch_obj->base.size > batch_start_offset + batch_len) {
+ po_w = po_r + 4;
+/* DRM_DEBUG("batchbuffer does not end by MI_BATCH_BUFFER_END !\n"); */
+ } else {
+ po_w = po_r;
+DRM_DEBUG("batchbuffer does not end by MI_BATCH_BUFFER_END, overwriting last bo cmd !\n");
+ }
+
+ *(uint32_t *)(mkva + (po_w & PAGE_MASK)) = MI_BATCH_BUFFER_END;
+ }
+
+ pmap_unmapdev((vm_offset_t)mkva, 2 * PAGE_SIZE);
+}
+
+int i915_fix_mi_batchbuffer_end = 1;
+
+ static int
+i915_reset_gen7_sol_offsets(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret, i;
+
+ if (!IS_GEN7(dev) || ring != &dev_priv->rings[RCS])
+ return 0;
+
+ ret = intel_ring_begin(ring, 4 * 3);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 4; i++) {
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
+ intel_ring_emit(ring, 0);
+ }
+
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct drm_i915_gem_exec_object2 *exec)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct list_head objects;
+ struct eb_objects *eb;
+ struct drm_i915_gem_object *batch_obj;
+ struct drm_clip_rect *cliprects = NULL;
+ struct intel_ring_buffer *ring;
+ vm_page_t **relocs_ma;
+ u32 exec_start, exec_len;
+ u32 seqno;
+ u32 mask;
+ int ret, mode, i;
+
+ if (!i915_gem_check_execbuffer(args)) {
+ DRM_DEBUG("execbuf with invalid offset/length\n");
+ return -EINVAL;
+ }
+
+ if (args->batch_len == 0)
+ return (0);
+
+ ret = validate_exec_list(exec, args->buffer_count, &relocs_ma);
+ if (ret != 0)
+ goto pre_struct_lock_err;
+
+ switch (args->flags & I915_EXEC_RING_MASK) {
+ case I915_EXEC_DEFAULT:
+ case I915_EXEC_RENDER:
+ ring = &dev_priv->rings[RCS];
+ break;
+ case I915_EXEC_BSD:
+ if (!HAS_BSD(dev)) {
+ DRM_DEBUG("execbuf with invalid ring (BSD)\n");
+ return -EINVAL;
+ }
+ ring = &dev_priv->rings[VCS];
+ break;
+ case I915_EXEC_BLT:
+ if (!HAS_BLT(dev)) {
+ DRM_DEBUG("execbuf with invalid ring (BLT)\n");
+ return -EINVAL;
+ }
+ ring = &dev_priv->rings[BCS];
+ break;
+ default:
+ DRM_DEBUG("execbuf with unknown ring: %d\n",
+ (int)(args->flags & I915_EXEC_RING_MASK));
+ ret = -EINVAL;
+ goto pre_struct_lock_err;
+ }
+
+ mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+ mask = I915_EXEC_CONSTANTS_MASK;
+ switch (mode) {
+ case I915_EXEC_CONSTANTS_REL_GENERAL:
+ case I915_EXEC_CONSTANTS_ABSOLUTE:
+ case I915_EXEC_CONSTANTS_REL_SURFACE:
+ if (ring == &dev_priv->rings[RCS] &&
+ mode != dev_priv->relative_constants_mode) {
+ if (INTEL_INFO(dev)->gen < 4) {
+ ret = -EINVAL;
+ goto pre_struct_lock_err;
+ }
+
+ if (INTEL_INFO(dev)->gen > 5 &&
+ mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
+ ret = -EINVAL;
+ goto pre_struct_lock_err;
+ }
+
+ /* The HW changed the meaning on this bit on gen6 */
+ if (INTEL_INFO(dev)->gen >= 6)
+ mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
+ }
+ break;
+ default:
+ DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
+ ret = -EINVAL;
+ goto pre_struct_lock_err;
+ }
+
+ if (args->buffer_count < 1) {
+ DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
+ ret = -EINVAL;
+ goto pre_struct_lock_err;
+ }
+
+ if (args->num_cliprects != 0) {
+ if (ring != &dev_priv->rings[RCS]) {
+ DRM_DEBUG("clip rectangles are only valid with the render ring\n");
+ ret = -EINVAL;
+ goto pre_struct_lock_err;
+ }
+
+ if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
+ DRM_DEBUG("execbuf with %u cliprects\n",
+ args->num_cliprects);
+ ret = -EINVAL;
+ goto pre_struct_lock_err;
+ }
+ cliprects = malloc( sizeof(*cliprects) * args->num_cliprects,
+ DRM_I915_GEM, M_WAITOK | M_ZERO);
+ ret = -copyin((void *)(uintptr_t)args->cliprects_ptr, cliprects,
+ sizeof(*cliprects) * args->num_cliprects);
+ if (ret != 0)
+ goto pre_struct_lock_err;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto pre_struct_lock_err;
+
+ if (dev_priv->mm.suspended) {
+ ret = -EBUSY;
+ goto struct_lock_err;
+ }
+
+ eb = eb_create(args->buffer_count);
+ if (eb == NULL) {
+ ret = -ENOMEM;
+ goto struct_lock_err;
+ }
+
+ /* Look up object handles */
+ INIT_LIST_HEAD(&objects);
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_i915_gem_object *obj;
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+ exec[i].handle));
+ if (&obj->base == NULL) {
+ DRM_DEBUG("Invalid object handle %d at index %d\n",
+ exec[i].handle, i);
+ /* prevent error path from reading uninitialized data */
+ ret = -ENOENT;
+ goto err;
+ }
+
+ if (!list_empty(&obj->exec_list)) {
+ DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
+ obj, exec[i].handle, i);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ list_add_tail(&obj->exec_list, &objects);
+ obj->exec_handle = exec[i].handle;
+ obj->exec_entry = &exec[i];
+ eb_add_object(eb, obj);
+ }
+
+ /* take note of the batch buffer before we might reorder the lists */
+ batch_obj = list_entry(objects.prev,
+ struct drm_i915_gem_object,
+ exec_list);
+
+ /* Move the objects en-masse into the GTT, evicting if necessary. */
+ ret = i915_gem_execbuffer_reserve(ring, file, &objects);
+ if (ret)
+ goto err;
+
+ /* The objects are in their final locations, apply the relocations. */
+ ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
+ if (ret) {
+ if (ret == -EFAULT) {
+ ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
+ &objects, eb, exec, args->buffer_count);
+ DRM_LOCK_ASSERT(dev);
+ }
+ if (ret)
+ goto err;
+ }
+
+ /* Set the pending read domains for the batch buffer to COMMAND */
+ if (batch_obj->base.pending_write_domain) {
+ DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+
+ ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
+ if (ret)
+ goto err;
+
+ seqno = i915_gem_next_request_seqno(ring);
+ for (i = 0; i < I915_NUM_RINGS - 1; i++) {
+ if (seqno < ring->sync_seqno[i]) {
+ /* The GPU can not handle its semaphore value wrapping,
+ * so every billion or so execbuffers, we need to stall
+ * the GPU in order to reset the counters.
+ */
+ ret = i915_gpu_idle(dev, true);
+ if (ret)
+ goto err;
+
+ KASSERT(ring->sync_seqno[i] == 0, ("Non-zero sync_seqno"));
+ }
+ }
+
+ if (ring == &dev_priv->rings[RCS] &&
+ mode != dev_priv->relative_constants_mode) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto err;
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, INSTPM);
+ intel_ring_emit(ring, mask << 16 | mode);
+ intel_ring_advance(ring);
+
+ dev_priv->relative_constants_mode = mode;
+ }
+
+ if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
+ ret = i915_reset_gen7_sol_offsets(dev, ring);
+ if (ret)
+ goto err;
+ }
+
+ exec_start = batch_obj->gtt_offset + args->batch_start_offset;
+ exec_len = args->batch_len;
+
+ if (i915_fix_mi_batchbuffer_end) {
+ i915_gem_fix_mi_batchbuffer_end(batch_obj,
+ args->batch_start_offset, args->batch_len);
+ }
+
+ CTR4(KTR_DRM, "ring_dispatch %s %d exec %x %x", ring->name, seqno,
+ exec_start, exec_len);
+
+ if (cliprects) {
+ for (i = 0; i < args->num_cliprects; i++) {
+ ret = i915_emit_box_p(dev, &cliprects[i],
+ args->DR1, args->DR4);
+ if (ret)
+ goto err;
+
+ ret = ring->dispatch_execbuffer(ring, exec_start,
+ exec_len);
+ if (ret)
+ goto err;
+ }
+ } else {
+ ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
+ if (ret)
+ goto err;
+ }
+
+ i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
+ i915_gem_execbuffer_retire_commands(dev, file, ring);
+
+err:
+ eb_destroy(eb);
+ while (!list_empty(&objects)) {
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&objects, struct drm_i915_gem_object,
+ exec_list);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
+ }
+struct_lock_err:
+ DRM_UNLOCK(dev);
+
+pre_struct_lock_err:
+ for (i = 0; i < args->buffer_count; i++) {
+ if (relocs_ma[i] != NULL) {
+ vm_page_unhold_pages(relocs_ma[i], howmany(
+ exec[i].relocation_count *
+ sizeof(struct drm_i915_gem_relocation_entry),
+ PAGE_SIZE));
+ free(relocs_ma[i], DRM_I915_GEM);
+ }
+ }
+ free(relocs_ma, DRM_I915_GEM);
+ free(cliprects, DRM_I915_GEM);
+ return ret;
+}
+
+/*
+ * Legacy execbuffer just creates an exec2 list from the original exec object
+ * list array and passes it to the real function.
+ */
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_execbuffer *args = data;
+ struct drm_i915_gem_execbuffer2 exec2;
+ struct drm_i915_gem_exec_object *exec_list = NULL;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret, i;
+
+ DRM_DEBUG("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+
+ if (args->buffer_count < 1) {
+ DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ /* Copy in the exec list from userland */
+ /* XXXKIB user-controlled malloc size */
+ exec_list = malloc(sizeof(*exec_list) * args->buffer_count,
+ DRM_I915_GEM, M_WAITOK);
+ exec2_list = malloc(sizeof(*exec2_list) * args->buffer_count,
+ DRM_I915_GEM, M_WAITOK);
+ ret = -copyin((void *)(uintptr_t)args->buffers_ptr, exec_list,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_DEBUG("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ free(exec_list, DRM_I915_GEM);
+ free(exec2_list, DRM_I915_GEM);
+ return (ret);
+ }
+
+ for (i = 0; i < args->buffer_count; i++) {
+ exec2_list[i].handle = exec_list[i].handle;
+ exec2_list[i].relocation_count = exec_list[i].relocation_count;
+ exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
+ exec2_list[i].alignment = exec_list[i].alignment;
+ exec2_list[i].offset = exec_list[i].offset;
+ if (INTEL_INFO(dev)->gen < 4)
+ exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
+ else
+ exec2_list[i].flags = 0;
+ }
+
+ exec2.buffers_ptr = args->buffers_ptr;
+ exec2.buffer_count = args->buffer_count;
+ exec2.batch_start_offset = args->batch_start_offset;
+ exec2.batch_len = args->batch_len;
+ exec2.DR1 = args->DR1;
+ exec2.DR4 = args->DR4;
+ exec2.num_cliprects = args->num_cliprects;
+ exec2.cliprects_ptr = args->cliprects_ptr;
+ exec2.flags = I915_EXEC_RENDER;
+
+ ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ for (i = 0; i < args->buffer_count; i++)
+ exec_list[i].offset = exec2_list[i].offset;
+ /* ... and back out to userspace */
+ ret = -copyout(exec_list, (void *)(uintptr_t)args->buffers_ptr,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_DEBUG("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ }
+ }
+
+ free(exec_list, DRM_I915_GEM);
+ free(exec2_list, DRM_I915_GEM);
+ return ret;
+}
+
+int
+i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_execbuffer2 *args = data;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret;
+
+ DRM_DEBUG("buffers_ptr %jx buffer_count %d len %08x\n",
+ (uintmax_t)args->buffers_ptr, args->buffer_count, args->batch_len);
+
+ if (args->buffer_count < 1 ||
+ args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
+ DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ /* XXXKIB user-controllable malloc size */
+ exec2_list = malloc(sizeof(*exec2_list) * args->buffer_count,
+ DRM_I915_GEM, M_WAITOK);
+ ret = -copyin((void *)(uintptr_t)args->buffers_ptr, exec2_list,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_DEBUG("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ free(exec2_list, DRM_I915_GEM);
+ return (ret);
+ }
+
+ ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ ret = -copyout(exec2_list, (void *)(uintptr_t)args->buffers_ptr,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret) {
+ DRM_DEBUG("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ }
+ }
+
+ free(exec2_list, DRM_I915_GEM);
+ return ret;
+}
diff --git a/sys/dev/drm2/i915/i915_gem_gtt.c b/sys/dev/drm2/i915/i915_gem_gtt.c
new file mode 100644
index 0000000..90899de
--- /dev/null
+++ b/sys/dev/drm2/i915/i915_gem_gtt.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright © 2010 Daniel Vetter
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/intel_drv.h>
+#include <sys/sched.h>
+#include <sys/sf_buf.h>
+
+/* PPGTT support for Sandybdrige/Gen6 and later */
+static void
+i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
+ unsigned first_entry, unsigned num_entries)
+{
+ uint32_t *pt_vaddr;
+ uint32_t scratch_pte;
+ struct sf_buf *sf;
+ unsigned act_pd, first_pte, last_pte, i;
+
+ act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
+ first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+
+ scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
+ scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
+
+ while (num_entries) {
+ last_pte = first_pte + num_entries;
+ if (last_pte > I915_PPGTT_PT_ENTRIES)
+ last_pte = I915_PPGTT_PT_ENTRIES;
+
+ sched_pin();
+ sf = sf_buf_alloc(ppgtt->pt_pages[act_pd], SFB_CPUPRIVATE);
+ pt_vaddr = (uint32_t *)(uintptr_t)sf_buf_kva(sf);
+
+ for (i = first_pte; i < last_pte; i++)
+ pt_vaddr[i] = scratch_pte;
+
+ sf_buf_free(sf);
+ sched_unpin();
+
+ num_entries -= last_pte - first_pte;
+ first_pte = 0;
+ act_pd++;
+ }
+
+}
+
+int
+i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv;
+ struct i915_hw_ppgtt *ppgtt;
+ u_int first_pd_entry_in_global_pt, i;
+
+ dev_priv = dev->dev_private;
+
+ /*
+ * ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
+ * entries. For aliasing ppgtt support we just steal them at the end for
+ * now.
+ */
+ first_pd_entry_in_global_pt = 512 * 1024 - I915_PPGTT_PD_ENTRIES;
+
+ ppgtt = malloc(sizeof(*ppgtt), DRM_I915_GEM, M_WAITOK | M_ZERO);
+
+ ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
+ ppgtt->pt_pages = malloc(sizeof(vm_page_t) * ppgtt->num_pd_entries,
+ DRM_I915_GEM, M_WAITOK | M_ZERO);
+
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ ppgtt->pt_pages[i] = vm_page_alloc(NULL, 0,
+ VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
+ VM_ALLOC_ZERO);
+ if (ppgtt->pt_pages[i] == NULL) {
+ dev_priv->mm.aliasing_ppgtt = ppgtt;
+ i915_gem_cleanup_aliasing_ppgtt(dev);
+ return (-ENOMEM);
+ }
+ }
+
+ ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt.scratch_page_dma;
+
+ i915_ppgtt_clear_range(ppgtt, 0, ppgtt->num_pd_entries *
+ I915_PPGTT_PT_ENTRIES);
+ ppgtt->pd_offset = (first_pd_entry_in_global_pt) * sizeof(uint32_t);
+ dev_priv->mm.aliasing_ppgtt = ppgtt;
+ return (0);
+}
+
+static void
+i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt, unsigned first_entry,
+ unsigned num_entries, vm_page_t *pages, uint32_t pte_flags)
+{
+ uint32_t *pt_vaddr, pte;
+ struct sf_buf *sf;
+ unsigned act_pd, first_pte;
+ unsigned last_pte, i;
+ vm_paddr_t page_addr;
+
+ act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
+ first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+
+ while (num_entries) {
+ last_pte = first_pte + num_entries;
+ if (last_pte > I915_PPGTT_PT_ENTRIES)
+ last_pte = I915_PPGTT_PT_ENTRIES;
+
+ sched_pin();
+ sf = sf_buf_alloc(ppgtt->pt_pages[act_pd], SFB_CPUPRIVATE);
+ pt_vaddr = (uint32_t *)(uintptr_t)sf_buf_kva(sf);
+
+ for (i = first_pte; i < last_pte; i++) {
+ page_addr = VM_PAGE_TO_PHYS(*pages);
+ pte = GEN6_PTE_ADDR_ENCODE(page_addr);
+ pt_vaddr[i] = pte | pte_flags;
+
+ pages++;
+ }
+
+ sf_buf_free(sf);
+ sched_unpin();
+
+ num_entries -= last_pte - first_pte;
+ first_pte = 0;
+ act_pd++;
+ }
+}
+
+void
+i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_object *obj, enum i915_cache_level cache_level)
+{
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ uint32_t pte_flags;
+
+ dev = obj->base.dev;
+ dev_priv = dev->dev_private;
+ pte_flags = GEN6_PTE_VALID;
+
+ switch (cache_level) {
+ case I915_CACHE_LLC_MLC:
+ pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
+ break;
+ case I915_CACHE_LLC:
+ pte_flags |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte_flags |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ panic("cache mode");
+ }
+
+ i915_ppgtt_insert_pages(ppgtt, obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT, obj->pages, pte_flags);
+}
+
+void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_object *obj)
+{
+ i915_ppgtt_clear_range(ppgtt, obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
+}
+
+void
+i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv;
+ struct i915_hw_ppgtt *ppgtt;
+ vm_page_t m;
+ int i;
+
+ dev_priv = dev->dev_private;
+ ppgtt = dev_priv->mm.aliasing_ppgtt;
+ if (ppgtt == NULL)
+ return;
+ dev_priv->mm.aliasing_ppgtt = NULL;
+
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ m = ppgtt->pt_pages[i];
+ if (m != NULL) {
+ vm_page_unwire(m, 0);
+ vm_page_free(m);
+ }
+ }
+ free(ppgtt->pt_pages, DRM_I915_GEM);
+ free(ppgtt, DRM_I915_GEM);
+}
+
+
+static unsigned int
+cache_level_to_agp_type(struct drm_device *dev, enum i915_cache_level
+ cache_level)
+{
+
+ switch (cache_level) {
+ case I915_CACHE_LLC_MLC:
+ if (INTEL_INFO(dev)->gen >= 6)
+ return (AGP_USER_CACHED_MEMORY_LLC_MLC);
+ /*
+ * Older chipsets do not have this extra level of CPU
+ * cacheing, so fallthrough and request the PTE simply
+ * as cached.
+ */
+ case I915_CACHE_LLC:
+ return (AGP_USER_CACHED_MEMORY);
+
+ default:
+ case I915_CACHE_NONE:
+ return (AGP_USER_MEMORY);
+ }
+}
+
+static bool
+do_idling(struct drm_i915_private *dev_priv)
+{
+ bool ret = dev_priv->mm.interruptible;
+
+ if (dev_priv->mm.gtt.do_idle_maps) {
+ dev_priv->mm.interruptible = false;
+ if (i915_gpu_idle(dev_priv->dev, false)) {
+ DRM_ERROR("Couldn't idle GPU\n");
+ /* Wait a bit, in hopes it avoids the hang */
+ DELAY(10);
+ }
+ }
+
+ return ret;
+}
+
+static void
+undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
+{
+
+ if (dev_priv->mm.gtt.do_idle_maps)
+ dev_priv->mm.interruptible = interruptible;
+}
+
+void
+i915_gem_restore_gtt_mappings(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv;
+ struct drm_i915_gem_object *obj;
+
+ dev_priv = dev->dev_private;
+
+ /* First fill our portion of the GTT with scratch pages */
+ intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
+ (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
+
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ i915_gem_clflush_object(obj);
+ i915_gem_gtt_rebind_object(obj, obj->cache_level);
+ }
+
+ intel_gtt_chipset_flush();
+}
+
+int
+i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
+{
+ unsigned int agp_type;
+
+ agp_type = cache_level_to_agp_type(obj->base.dev, obj->cache_level);
+ intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT, obj->pages, agp_type);
+ return (0);
+}
+
+void
+i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
+{
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ unsigned int agp_type;
+
+ dev = obj->base.dev;
+ dev_priv = dev->dev_private;
+ agp_type = cache_level_to_agp_type(dev, cache_level);
+
+ intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT, obj->pages, agp_type);
+}
+
+void
+i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool interruptible;
+
+ dev = obj->base.dev;
+ dev_priv = dev->dev_private;
+
+ interruptible = do_idling(dev_priv);
+
+ intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
+
+ undo_idling(dev_priv, interruptible);
+}
diff --git a/sys/dev/drm2/i915/i915_gem_tiling.c b/sys/dev/drm2/i915/i915_gem_tiling.c
new file mode 100644
index 0000000..b3d98c8
--- /dev/null
+++ b/sys/dev/drm2/i915/i915_gem_tiling.c
@@ -0,0 +1,495 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+
+#include <sys/sf_buf.h>
+
+/** @file i915_gem_tiling.c
+ *
+ * Support for managing tiling state of buffer objects.
+ *
+ * The idea behind tiling is to increase cache hit rates by rearranging
+ * pixel data so that a group of pixel accesses are in the same cacheline.
+ * Performance improvement from doing this on the back/depth buffer are on
+ * the order of 30%.
+ *
+ * Intel architectures make this somewhat more complicated, though, by
+ * adjustments made to addressing of data when the memory is in interleaved
+ * mode (matched pairs of DIMMS) to improve memory bandwidth.
+ * For interleaved memory, the CPU sends every sequential 64 bytes
+ * to an alternate memory channel so it can get the bandwidth from both.
+ *
+ * The GPU also rearranges its accesses for increased bandwidth to interleaved
+ * memory, and it matches what the CPU does for non-tiled. However, when tiled
+ * it does it a little differently, since one walks addresses not just in the
+ * X direction but also Y. So, along with alternating channels when bit
+ * 6 of the address flips, it also alternates when other bits flip -- Bits 9
+ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
+ * are common to both the 915 and 965-class hardware.
+ *
+ * The CPU also sometimes XORs in higher bits as well, to improve
+ * bandwidth doing strided access like we do so frequently in graphics. This
+ * is called "Channel XOR Randomization" in the MCH documentation. The result
+ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
+ * decode.
+ *
+ * All of this bit 6 XORing has an effect on our memory management,
+ * as we need to make sure that the 3d driver can correctly address object
+ * contents.
+ *
+ * If we don't have interleaved memory, all tiling is safe and no swizzling is
+ * required.
+ *
+ * When bit 17 is XORed in, we simply refuse to tile at all. Bit
+ * 17 is not just a page offset, so as we page an objet out and back in,
+ * individual pages in it will have different bit 17 addresses, resulting in
+ * each 64 bytes being swapped with its neighbor!
+ *
+ * Otherwise, if interleaved, we have to tell the 3d driver what the address
+ * swizzling it needs to do is, since it's writing with the CPU to the pages
+ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
+ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
+ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
+ * to match what the GPU expects.
+ */
+
+/**
+ * Detects bit 6 swizzling of address lookup between IGD access and CPU
+ * access through main memory.
+ */
+void
+i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ uint32_t dimm_c0, dimm_c1;
+ dimm_c0 = I915_READ(MAD_DIMM_C0);
+ dimm_c1 = I915_READ(MAD_DIMM_C1);
+ dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ /* Enable swizzling when the channels are populated with
+ * identically sized dimms. We don't need to check the 3rd
+ * channel because no cpu with gpu attached ships in that
+ * configuration. Also, swizzling only makes sense for 2
+ * channels anyway. */
+ if (dimm_c0 == dimm_c1) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
+ } else if (IS_GEN5(dev)) {
+ /* On Ironlake whatever DRAM config, GPU always do
+ * same swizzling setup.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else if (IS_GEN2(dev)) {
+ /* As far as we know, the 865 doesn't have these bit 6
+ * swizzling issues.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
+ uint32_t dcc;
+
+ /* On 9xx chipsets, channel interleave by the CPU is
+ * determined by DCC. For single-channel, neither the CPU
+ * nor the GPU do swizzling. For dual channel interleaved,
+ * the GPU's interleave is bit 9 and 10 for X tiled, and bit
+ * 9 for Y tiled. The CPU's interleave is independent, and
+ * can be based on either bit 11 (haven't seen this yet) or
+ * bit 17 (common).
+ */
+ dcc = I915_READ(DCC);
+ switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+ case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ break;
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+ if (dcc & DCC_CHANNEL_XOR_DISABLE) {
+ /* This is the base swizzling by the GPU for
+ * tiled buffers.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+ /* Bit 11 swizzling by the CPU in addition. */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+ swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+ } else {
+ /* Bit 17 swizzling by the CPU in addition. */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
+ swizzle_y = I915_BIT_6_SWIZZLE_9_17;
+ }
+ break;
+ }
+ if (dcc == 0xffffffff) {
+ DRM_ERROR("Couldn't read from MCHBAR. "
+ "Disabling tiling.\n");
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ }
+ } else {
+ /* The 965, G33, and newer, have a very flexible memory
+ * configuration. It will enable dual-channel mode
+ * (interleaving) on as much memory as it can, and the GPU
+ * will additionally sometimes enable different bit 6
+ * swizzling for tiled objects from the CPU.
+ *
+ * Here's what I found on the G965:
+ * slot fill memory size swizzling
+ * 0A 0B 1A 1B 1-ch 2-ch
+ * 512 0 0 0 512 0 O
+ * 512 0 512 0 16 1008 X
+ * 512 0 0 512 16 1008 X
+ * 0 512 0 512 16 1008 X
+ * 1024 1024 1024 0 2048 1024 O
+ *
+ * We could probably detect this based on either the DRB
+ * matching, which was the case for the swizzling required in
+ * the table above, or from the 1-ch value being less than
+ * the minimum size of a rank.
+ */
+ if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ }
+ }
+
+ dev_priv->mm.bit_6_swizzle_x = swizzle_x;
+ dev_priv->mm.bit_6_swizzle_y = swizzle_y;
+}
+
+/* Check pitch constriants for all chips & tiling formats */
+static bool
+i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
+{
+ int tile_width;
+
+ /* Linear is always fine */
+ if (tiling_mode == I915_TILING_NONE)
+ return (true);
+
+ if (IS_GEN2(dev) ||
+ (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+ tile_width = 128;
+ else
+ tile_width = 512;
+
+ /* check maximum stride & object size */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* i965 stores the end address of the gtt mapping in the fence
+ * reg, so dont bother to check the size */
+ if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
+ return (false);
+ } else {
+ if (stride > 8192)
+ return (false);
+
+ if (IS_GEN3(dev)) {
+ if (size > I830_FENCE_MAX_SIZE_VAL << 20)
+ return (false);
+ } else {
+ if (size > I830_FENCE_MAX_SIZE_VAL << 19)
+ return (false);
+ }
+ }
+
+ /* 965+ just needs multiples of tile width */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (stride & (tile_width - 1))
+ return (false);
+ return (true);
+ }
+
+ /* Pre-965 needs power of two tile widths */
+ if (stride < tile_width)
+ return (false);
+
+ if (stride & (stride - 1))
+ return (false);
+
+ return (true);
+}
+
+/* Is the current GTT allocation valid for the change in tiling? */
+static bool
+i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
+{
+ u32 size;
+
+ if (tiling_mode == I915_TILING_NONE)
+ return (true);
+
+ if (INTEL_INFO(obj->base.dev)->gen >= 4)
+ return (true);
+
+ if (INTEL_INFO(obj->base.dev)->gen == 3) {
+ if (obj->gtt_offset & ~I915_FENCE_START_MASK)
+ return (false);
+ } else {
+ if (obj->gtt_offset & ~I830_FENCE_START_MASK)
+ return (false);
+ }
+
+ /*
+ * Previous chips need to be aligned to the size of the smallest
+ * fence register that can contain the object.
+ */
+ if (INTEL_INFO(obj->base.dev)->gen == 3)
+ size = 1024*1024;
+ else
+ size = 512*1024;
+
+ while (size < obj->base.size)
+ size <<= 1;
+
+ if (obj->gtt_space->size != size)
+ return (false);
+
+ if (obj->gtt_offset & (size - 1))
+ return (false);
+
+ return (true);
+}
+
+/**
+ * Sets the tiling mode of an object, returning the required swizzling of
+ * bit 6 of addresses in the object.
+ */
+int
+i915_gem_set_tiling(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_set_tiling *args = data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ ret = 0;
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL)
+ return -ENOENT;
+
+ if (!i915_tiling_ok(dev,
+ args->stride, obj->base.size, args->tiling_mode)) {
+ drm_gem_object_unreference(&obj->base);
+ return -EINVAL;
+ }
+
+ if (obj->pin_count) {
+ drm_gem_object_unreference(&obj->base);
+ return -EBUSY;
+ }
+
+ if (args->tiling_mode == I915_TILING_NONE) {
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ args->stride = 0;
+ } else {
+ if (args->tiling_mode == I915_TILING_X)
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+ else
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+
+ /* Hide bit 17 swizzling from the user. This prevents old Mesa
+ * from aborting the application on sw fallbacks to bit 17,
+ * and we use the pread/pwrite bit17 paths to swizzle for it.
+ * If there was a user that was relying on the swizzle
+ * information for drm_intel_bo_map()ed reads/writes this would
+ * break it, but we don't have any of those.
+ */
+ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
+ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
+
+ /* If we can't handle the swizzling, make it untiled. */
+ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
+ args->tiling_mode = I915_TILING_NONE;
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ args->stride = 0;
+ }
+ }
+
+ if (args->tiling_mode != obj->tiling_mode ||
+ args->stride != obj->stride) {
+ /* We need to rebind the object if its current allocation
+ * no longer meets the alignment restrictions for its new
+ * tiling mode. Otherwise we can just leave it alone, but
+ * need to ensure that any fence register is cleared.
+ */
+ i915_gem_release_mmap(obj);
+
+ obj->map_and_fenceable = obj->gtt_space == NULL ||
+ (obj->gtt_offset + obj->base.size <=
+ dev_priv->mm.gtt_mappable_end &&
+ i915_gem_object_fence_ok(obj, args->tiling_mode));
+
+ /* Rebind if we need a change of alignment */
+ if (!obj->map_and_fenceable) {
+ uint32_t unfenced_alignment =
+ i915_gem_get_unfenced_gtt_alignment(dev,
+ obj->base.size, args->tiling_mode);
+ if (obj->gtt_offset & (unfenced_alignment - 1))
+ ret = i915_gem_object_unbind(obj);
+ }
+ if (ret == 0) {
+ obj->tiling_changed = true;
+ obj->tiling_mode = args->tiling_mode;
+ obj->stride = args->stride;
+ }
+ }
+ /* we have to maintain this existing ABI... */
+ args->stride = obj->stride;
+ args->tiling_mode = obj->tiling_mode;
+ drm_gem_object_unreference(&obj->base);
+
+ return (ret);
+}
+
+/**
+ * Returns the current tiling mode and required bit 6 swizzling for the object.
+ */
+int
+i915_gem_get_tiling(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_get_tiling *args = data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL)
+ return -ENOENT;
+
+ args->tiling_mode = obj->tiling_mode;
+ switch (obj->tiling_mode) {
+ case I915_TILING_X:
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+ break;
+ case I915_TILING_Y:
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+ break;
+ case I915_TILING_NONE:
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ break;
+ default:
+ DRM_ERROR("unknown tiling mode\n");
+ }
+
+ /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
+ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
+ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
+
+ drm_gem_object_unreference(&obj->base);
+
+ return 0;
+}
+
+/**
+ * Swap every 64 bytes of this page around, to account for it having a new
+ * bit 17 of its physical address and therefore being interpreted differently
+ * by the GPU.
+ */
+static void
+i915_gem_swizzle_page(vm_page_t m)
+{
+ char temp[64];
+ char *vaddr;
+ struct sf_buf *sf;
+ int i;
+
+ /* XXXKIB sleep */
+ sf = sf_buf_alloc(m, SFB_DEFAULT);
+ vaddr = (char *)sf_buf_kva(sf);
+
+ for (i = 0; i < PAGE_SIZE; i += 128) {
+ memcpy(temp, &vaddr[i], 64);
+ memcpy(&vaddr[i], &vaddr[i + 64], 64);
+ memcpy(&vaddr[i + 64], temp, 64);
+ }
+
+ sf_buf_free(sf);
+}
+
+void
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+ int page_count = obj->base.size >> PAGE_SHIFT;
+ int i;
+
+ if (obj->bit_17 == NULL)
+ return;
+
+ for (i = 0; i < page_count; i++) {
+ char new_bit_17 = VM_PAGE_TO_PHYS(obj->pages[i]) >> 17;
+ if ((new_bit_17 & 0x1) !=
+ (test_bit(i, obj->bit_17) != 0)) {
+ i915_gem_swizzle_page(obj->pages[i]);
+ vm_page_dirty(obj->pages[i]);
+ }
+ }
+}
+
+void
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+ int page_count = obj->base.size >> PAGE_SHIFT;
+ int i;
+
+ if (obj->bit_17 == NULL) {
+ obj->bit_17 = malloc(BITS_TO_LONGS(page_count) *
+ sizeof(long), DRM_I915_GEM, M_WAITOK);
+ }
+
+ /* XXXKIB: review locking, atomics might be not needed there */
+ for (i = 0; i < page_count; i++) {
+ if (VM_PAGE_TO_PHYS(obj->pages[i]) & (1 << 17))
+ set_bit(i, obj->bit_17);
+ else
+ clear_bit(i, obj->bit_17);
+ }
+}
diff --git a/sys/dev/drm2/i915/i915_irq.c b/sys/dev/drm2/i915/i915_irq.c
new file mode 100644
index 0000000..52233ea
--- /dev/null
+++ b/sys/dev/drm2/i915/i915_irq.c
@@ -0,0 +1,2278 @@
+/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
+ */
+/*-
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/intel_drv.h>
+#include <sys/sched.h>
+#include <sys/sf_buf.h>
+
+static void i915_capture_error_state(struct drm_device *dev);
+static u32 ring_last_seqno(struct intel_ring_buffer *ring);
+
+/**
+ * Interrupts that are always left unmasked.
+ *
+ * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
+ * we leave them always unmasked in IMR and then control enabling them through
+ * PIPESTAT alone.
+ */
+#define I915_INTERRUPT_ENABLE_FIX \
+ (I915_ASLE_INTERRUPT | \
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+
+/** Interrupts that we mask and unmask at runtime. */
+#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
+
+#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
+ PIPE_VBLANK_INTERRUPT_STATUS)
+
+#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
+ PIPE_VBLANK_INTERRUPT_ENABLE)
+
+#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
+ DRM_I915_VBLANK_PIPE_B)
+
+/* For display hotplug interrupt */
+static void
+ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+ if ((dev_priv->irq_mask & mask) != 0) {
+ dev_priv->irq_mask &= ~mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ POSTING_READ(DEIMR);
+ }
+}
+
+static inline void
+ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+ if ((dev_priv->irq_mask & mask) != mask) {
+ dev_priv->irq_mask |= mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ POSTING_READ(DEIMR);
+ }
+}
+
+void
+i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+{
+ if ((dev_priv->pipestat[pipe] & mask) != mask) {
+ u32 reg = PIPESTAT(pipe);
+
+ dev_priv->pipestat[pipe] |= mask;
+ /* Enable the interrupt, clear any pending status */
+ I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
+ POSTING_READ(reg);
+ }
+}
+
+void
+i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+{
+ if ((dev_priv->pipestat[pipe] & mask) != 0) {
+ u32 reg = PIPESTAT(pipe);
+
+ dev_priv->pipestat[pipe] &= ~mask;
+ I915_WRITE(reg, dev_priv->pipestat[pipe]);
+ POSTING_READ(reg);
+ }
+}
+
+/**
+ * intel_enable_asle - enable ASLE interrupt for OpRegion
+ */
+void intel_enable_asle(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ mtx_lock(&dev_priv->irq_lock);
+
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_enable_display_irq(dev_priv, DE_GSE);
+ else {
+ i915_enable_pipestat(dev_priv, 1,
+ PIPE_LEGACY_BLC_EVENT_ENABLE);
+ if (INTEL_INFO(dev)->gen >= 4)
+ i915_enable_pipestat(dev_priv, 0,
+ PIPE_LEGACY_BLC_EVENT_ENABLE);
+ }
+
+ mtx_unlock(&dev_priv->irq_lock);
+}
+
+/**
+ * i915_pipe_enabled - check if a pipe is enabled
+ * @dev: DRM device
+ * @pipe: pipe to check
+ *
+ * Reading certain registers when the pipe is disabled can hang the chip.
+ * Use this routine to make sure the PLL is running and the pipe is active
+ * before reading such registers if unsure.
+ */
+static int
+i915_pipe_enabled(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
+}
+
+/* Called from drm generic code, passed a 'crtc', which
+ * we use as a pipe index
+ */
+static u32
+i915_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long high_frame;
+ unsigned long low_frame;
+ u32 high1, high2, low;
+
+ if (!i915_pipe_enabled(dev, pipe)) {
+ DRM_DEBUG("trying to get vblank count for disabled "
+ "pipe %c\n", pipe_name(pipe));
+ return 0;
+ }
+
+ high_frame = PIPEFRAME(pipe);
+ low_frame = PIPEFRAMEPIXEL(pipe);
+
+ /*
+ * High & low register fields aren't synchronized, so make sure
+ * we get a low value that's stable across two reads of the high
+ * register.
+ */
+ do {
+ high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+ low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
+ high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+ } while (high1 != high2);
+
+ high1 >>= PIPE_FRAME_HIGH_SHIFT;
+ low >>= PIPE_FRAME_LOW_SHIFT;
+ return (high1 << 8) | low;
+}
+
+static u32
+gm45_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int reg = PIPE_FRMCOUNT_GM45(pipe);
+
+ if (!i915_pipe_enabled(dev, pipe)) {
+ DRM_DEBUG("i915: trying to get vblank count for disabled "
+ "pipe %c\n", pipe_name(pipe));
+ return 0;
+ }
+
+ return I915_READ(reg);
+}
+
+static int
+i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+ int *vpos, int *hpos)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 vbl = 0, position = 0;
+ int vbl_start, vbl_end, htotal, vtotal;
+ bool in_vbl = true;
+ int ret = 0;
+
+ if (!i915_pipe_enabled(dev, pipe)) {
+ DRM_DEBUG("i915: trying to get scanoutpos for disabled "
+ "pipe %c\n", pipe_name(pipe));
+ return 0;
+ }
+
+ /* Get vtotal. */
+ vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* No obvious pixelcount register. Only query vertical
+ * scanout position from Display scan line register.
+ */
+ position = I915_READ(PIPEDSL(pipe));
+
+ /* Decode into vertical scanout position. Don't have
+ * horizontal scanout position.
+ */
+ *vpos = position & 0x1fff;
+ *hpos = 0;
+ } else {
+ /* Have access to pixelcount since start of frame.
+ * We can split this into vertical and horizontal
+ * scanout position.
+ */
+ position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+
+ htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
+ *vpos = position / htotal;
+ *hpos = position - (*vpos * htotal);
+ }
+
+ /* Query vblank area. */
+ vbl = I915_READ(VBLANK(pipe));
+
+ /* Test position against vblank region. */
+ vbl_start = vbl & 0x1fff;
+ vbl_end = (vbl >> 16) & 0x1fff;
+
+ if ((*vpos < vbl_start) || (*vpos > vbl_end))
+ in_vbl = false;
+
+ /* Inside "upper part" of vblank area? Apply corrective offset: */
+ if (in_vbl && (*vpos >= vbl_start))
+ *vpos = *vpos - vtotal;
+
+ /* Readouts valid? */
+ if (vbl > 0)
+ ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+ /* In vblank? */
+ if (in_vbl)
+ ret |= DRM_SCANOUTPOS_INVBL;
+
+ return ret;
+}
+
+static int
+i915_get_vblank_timestamp(struct drm_device *dev, int pipe, int *max_error,
+ struct timeval *vblank_time, unsigned flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+
+ if (pipe < 0 || pipe >= dev_priv->num_pipe) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
+ return -EINVAL;
+ }
+
+ /* Get drm_crtc to timestamp: */
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
+ if (crtc == NULL) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
+ return -EINVAL;
+ }
+
+ if (!crtc->enabled) {
+#if 0
+ DRM_DEBUG("crtc %d is disabled\n", pipe);
+#endif
+ return -EBUSY;
+ }
+
+ /* Helper routine in DRM core does all the work: */
+ return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
+ vblank_time, flags,
+ crtc);
+}
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void
+i915_hotplug_work_func(void *context, int pending)
+{
+ drm_i915_private_t *dev_priv = context;
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_mode_config *mode_config;
+ struct intel_encoder *encoder;
+
+ DRM_DEBUG("running encoder hotplug functions\n");
+ dev_priv = context;
+ dev = dev_priv->dev;
+
+ mode_config = &dev->mode_config;
+
+ sx_xlock(&mode_config->mutex);
+ DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
+ if (encoder->hot_plug)
+ encoder->hot_plug(encoder);
+
+ sx_xunlock(&mode_config->mutex);
+
+ /* Just fire off a uevent and let userspace tell us what to do */
+#if 0
+ drm_helper_hpd_irq_event(dev);
+#endif
+}
+
+static void i915_handle_rps_change(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 busy_up, busy_down, max_avg, min_avg;
+ u8 new_delay = dev_priv->cur_delay;
+
+ I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
+ busy_up = I915_READ(RCPREVBSYTUPAVG);
+ busy_down = I915_READ(RCPREVBSYTDNAVG);
+ max_avg = I915_READ(RCBMAXAVG);
+ min_avg = I915_READ(RCBMINAVG);
+
+ /* Handle RCS change request from hw */
+ if (busy_up > max_avg) {
+ if (dev_priv->cur_delay != dev_priv->max_delay)
+ new_delay = dev_priv->cur_delay - 1;
+ if (new_delay < dev_priv->max_delay)
+ new_delay = dev_priv->max_delay;
+ } else if (busy_down < min_avg) {
+ if (dev_priv->cur_delay != dev_priv->min_delay)
+ new_delay = dev_priv->cur_delay + 1;
+ if (new_delay > dev_priv->min_delay)
+ new_delay = dev_priv->min_delay;
+ }
+
+ if (ironlake_set_drps(dev, new_delay))
+ dev_priv->cur_delay = new_delay;
+
+ return;
+}
+
+static void notify_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 seqno;
+
+ if (ring->obj == NULL)
+ return;
+
+ seqno = ring->get_seqno(ring);
+ CTR2(KTR_DRM, "request_complete %s %d", ring->name, seqno);
+
+ mtx_lock(&ring->irq_lock);
+ ring->irq_seqno = seqno;
+ wakeup(ring);
+ mtx_unlock(&ring->irq_lock);
+
+ if (i915_enable_hangcheck) {
+ dev_priv->hangcheck_count = 0;
+ callout_schedule(&dev_priv->hangcheck_timer,
+ DRM_I915_HANGCHECK_PERIOD);
+ }
+}
+
+static void
+gen6_pm_rps_work_func(void *arg, int pending)
+{
+ struct drm_device *dev;
+ drm_i915_private_t *dev_priv;
+ u8 new_delay;
+ u32 pm_iir, pm_imr;
+
+ dev_priv = (drm_i915_private_t *)arg;
+ dev = dev_priv->dev;
+ new_delay = dev_priv->cur_delay;
+
+ mtx_lock(&dev_priv->rps_lock);
+ pm_iir = dev_priv->pm_iir;
+ dev_priv->pm_iir = 0;
+ pm_imr = I915_READ(GEN6_PMIMR);
+ I915_WRITE(GEN6_PMIMR, 0);
+ mtx_unlock(&dev_priv->rps_lock);
+
+ if (!pm_iir)
+ return;
+
+ DRM_LOCK(dev);
+ if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+ if (dev_priv->cur_delay != dev_priv->max_delay)
+ new_delay = dev_priv->cur_delay + 1;
+ if (new_delay > dev_priv->max_delay)
+ new_delay = dev_priv->max_delay;
+ } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
+ gen6_gt_force_wake_get(dev_priv);
+ if (dev_priv->cur_delay != dev_priv->min_delay)
+ new_delay = dev_priv->cur_delay - 1;
+ if (new_delay < dev_priv->min_delay) {
+ new_delay = dev_priv->min_delay;
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
+ ((new_delay << 16) & 0x3f0000));
+ } else {
+ /* Make sure we continue to get down interrupts
+ * until we hit the minimum frequency */
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
+ }
+ gen6_gt_force_wake_put(dev_priv);
+ }
+
+ gen6_set_rps(dev, new_delay);
+ dev_priv->cur_delay = new_delay;
+
+ /*
+ * rps_lock not held here because clearing is non-destructive. There is
+ * an *extremely* unlikely race with gen6_rps_enable() that is prevented
+ * by holding struct_mutex for the duration of the write.
+ */
+ DRM_UNLOCK(dev);
+}
+
+static void pch_irq_handler(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 pch_iir;
+ int pipe;
+
+ pch_iir = I915_READ(SDEIIR);
+
+ if (pch_iir & SDE_AUDIO_POWER_MASK)
+ DRM_DEBUG("i915: PCH audio power change on port %d\n",
+ (pch_iir & SDE_AUDIO_POWER_MASK) >>
+ SDE_AUDIO_POWER_SHIFT);
+
+ if (pch_iir & SDE_GMBUS)
+ DRM_DEBUG("i915: PCH GMBUS interrupt\n");
+
+ if (pch_iir & SDE_AUDIO_HDCP_MASK)
+ DRM_DEBUG("i915: PCH HDCP audio interrupt\n");
+
+ if (pch_iir & SDE_AUDIO_TRANS_MASK)
+ DRM_DEBUG("i915: PCH transcoder audio interrupt\n");
+
+ if (pch_iir & SDE_POISON)
+ DRM_ERROR("i915: PCH poison interrupt\n");
+
+ if (pch_iir & SDE_FDI_MASK)
+ for_each_pipe(pipe)
+ DRM_DEBUG(" pipe %c FDI IIR: 0x%08x\n",
+ pipe_name(pipe),
+ I915_READ(FDI_RX_IIR(pipe)));
+
+ if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
+ DRM_DEBUG("i915: PCH transcoder CRC done interrupt\n");
+
+ if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
+ DRM_DEBUG("i915: PCH transcoder CRC error interrupt\n");
+
+ if (pch_iir & SDE_TRANSB_FIFO_UNDER)
+ DRM_DEBUG("i915: PCH transcoder B underrun interrupt\n");
+ if (pch_iir & SDE_TRANSA_FIFO_UNDER)
+ DRM_DEBUG("PCH transcoder A underrun interrupt\n");
+}
+
+static void
+ivybridge_irq_handler(void *arg)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
+#if 0
+ struct drm_i915_master_private *master_priv;
+#endif
+
+ atomic_inc(&dev_priv->irq_received);
+
+ /* disable master interrupt before clearing iir */
+ de_ier = I915_READ(DEIER);
+ I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+ POSTING_READ(DEIER);
+
+ de_iir = I915_READ(DEIIR);
+ gt_iir = I915_READ(GTIIR);
+ pch_iir = I915_READ(SDEIIR);
+ pm_iir = I915_READ(GEN6_PMIIR);
+
+ CTR4(KTR_DRM, "ivybridge_irq de %x gt %x pch %x pm %x", de_iir,
+ gt_iir, pch_iir, pm_iir);
+
+ if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0)
+ goto done;
+
+#if 0
+ if (dev->primary->master) {
+ master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
+ }
+#else
+ if (dev_priv->sarea_priv)
+ dev_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
+#endif
+
+ if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
+ notify_ring(dev, &dev_priv->rings[RCS]);
+ if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->rings[VCS]);
+ if (gt_iir & GT_BLT_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->rings[BCS]);
+
+ if (de_iir & DE_GSE_IVB) {
+#if 1
+ KIB_NOTYET();
+#else
+ intel_opregion_gse_intr(dev);
+#endif
+ }
+
+ if (de_iir & DE_PLANEA_FLIP_DONE_IVB) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip_plane(dev, 0);
+ }
+
+ if (de_iir & DE_PLANEB_FLIP_DONE_IVB) {
+ intel_prepare_page_flip(dev, 1);
+ intel_finish_page_flip_plane(dev, 1);
+ }
+
+ if (de_iir & DE_PIPEA_VBLANK_IVB)
+ drm_handle_vblank(dev, 0);
+
+ if (de_iir & DE_PIPEB_VBLANK_IVB)
+ drm_handle_vblank(dev, 1);
+
+ /* check event from PCH */
+ if (de_iir & DE_PCH_EVENT_IVB) {
+ if (pch_iir & SDE_HOTPLUG_MASK_CPT)
+ taskqueue_enqueue(dev_priv->tq, &dev_priv->hotplug_task);
+ pch_irq_handler(dev);
+ }
+
+ if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
+ mtx_lock(&dev_priv->rps_lock);
+ if ((dev_priv->pm_iir & pm_iir) != 0)
+ printf("Missed a PM interrupt\n");
+ dev_priv->pm_iir |= pm_iir;
+ I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+ POSTING_READ(GEN6_PMIMR);
+ mtx_unlock(&dev_priv->rps_lock);
+ taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task);
+ }
+
+ /* should clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ I915_WRITE(GTIIR, gt_iir);
+ I915_WRITE(DEIIR, de_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+
+done:
+ I915_WRITE(DEIER, de_ier);
+ POSTING_READ(DEIER);
+}
+
+static void
+ironlake_irq_handler(void *arg)
+{
+ struct drm_device *dev = arg;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
+ u32 hotplug_mask;
+#if 0
+ struct drm_i915_master_private *master_priv;
+#endif
+ u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
+
+ atomic_inc(&dev_priv->irq_received);
+
+ if (IS_GEN6(dev))
+ bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
+
+ /* disable master interrupt before clearing iir */
+ de_ier = I915_READ(DEIER);
+ I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+ POSTING_READ(DEIER);
+
+ de_iir = I915_READ(DEIIR);
+ gt_iir = I915_READ(GTIIR);
+ pch_iir = I915_READ(SDEIIR);
+ pm_iir = I915_READ(GEN6_PMIIR);
+
+ CTR4(KTR_DRM, "ironlake_irq de %x gt %x pch %x pm %x", de_iir,
+ gt_iir, pch_iir, pm_iir);
+
+ if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
+ (!IS_GEN6(dev) || pm_iir == 0))
+ goto done;
+
+ if (HAS_PCH_CPT(dev))
+ hotplug_mask = SDE_HOTPLUG_MASK_CPT;
+ else
+ hotplug_mask = SDE_HOTPLUG_MASK;
+
+#if 0
+ if (dev->primary->master) {
+ master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
+ }
+#else
+ if (dev_priv->sarea_priv)
+ dev_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
+#endif
+
+ if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
+ notify_ring(dev, &dev_priv->rings[RCS]);
+ if (gt_iir & bsd_usr_interrupt)
+ notify_ring(dev, &dev_priv->rings[VCS]);
+ if (gt_iir & GT_BLT_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->rings[BCS]);
+
+ if (de_iir & DE_GSE) {
+#if 1
+ KIB_NOTYET();
+#else
+ intel_opregion_gse_intr(dev);
+#endif
+ }
+
+ if (de_iir & DE_PLANEA_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip_plane(dev, 0);
+ }
+
+ if (de_iir & DE_PLANEB_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 1);
+ intel_finish_page_flip_plane(dev, 1);
+ }
+
+ if (de_iir & DE_PIPEA_VBLANK)
+ drm_handle_vblank(dev, 0);
+
+ if (de_iir & DE_PIPEB_VBLANK)
+ drm_handle_vblank(dev, 1);
+
+ /* check event from PCH */
+ if (de_iir & DE_PCH_EVENT) {
+ if (pch_iir & hotplug_mask)
+ taskqueue_enqueue(dev_priv->tq,
+ &dev_priv->hotplug_task);
+ pch_irq_handler(dev);
+ }
+
+ if (de_iir & DE_PCU_EVENT) {
+ I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
+ i915_handle_rps_change(dev);
+ }
+
+ if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
+ mtx_lock(&dev_priv->rps_lock);
+ if ((dev_priv->pm_iir & pm_iir) != 0)
+ printf("Missed a PM interrupt\n");
+ dev_priv->pm_iir |= pm_iir;
+ I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+ POSTING_READ(GEN6_PMIMR);
+ mtx_unlock(&dev_priv->rps_lock);
+ taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task);
+ }
+
+ /* should clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ I915_WRITE(GTIIR, gt_iir);
+ I915_WRITE(DEIIR, de_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+
+done:
+ I915_WRITE(DEIER, de_ier);
+ POSTING_READ(DEIER);
+}
+
+/**
+ * i915_error_work_func - do process context error handling work
+ * @work: work struct
+ *
+ * Fire an error uevent so userspace can see that a hang or error
+ * was detected.
+ */
+static void
+i915_error_work_func(void *context, int pending)
+{
+ drm_i915_private_t *dev_priv = context;
+ struct drm_device *dev = dev_priv->dev;
+
+ /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */
+
+ if (atomic_load_acq_int(&dev_priv->mm.wedged)) {
+ DRM_DEBUG("i915: resetting chip\n");
+ /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); */
+ if (!i915_reset(dev, GRDOM_RENDER)) {
+ atomic_store_rel_int(&dev_priv->mm.wedged, 0);
+ /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); */
+ }
+ mtx_lock(&dev_priv->error_completion_lock);
+ dev_priv->error_completion++;
+ wakeup(&dev_priv->error_completion);
+ mtx_unlock(&dev_priv->error_completion_lock);
+ }
+}
+
+static void i915_report_and_clear_eir(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 eir = I915_READ(EIR);
+ int pipe;
+
+ if (!eir)
+ return;
+
+ printf("i915: render error detected, EIR: 0x%08x\n", eir);
+
+ if (IS_G4X(dev)) {
+ if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
+ u32 ipeir = I915_READ(IPEIR_I965);
+
+ printf(" IPEIR: 0x%08x\n",
+ I915_READ(IPEIR_I965));
+ printf(" IPEHR: 0x%08x\n",
+ I915_READ(IPEHR_I965));
+ printf(" INSTDONE: 0x%08x\n",
+ I915_READ(INSTDONE_I965));
+ printf(" INSTPS: 0x%08x\n",
+ I915_READ(INSTPS));
+ printf(" INSTDONE1: 0x%08x\n",
+ I915_READ(INSTDONE1));
+ printf(" ACTHD: 0x%08x\n",
+ I915_READ(ACTHD_I965));
+ I915_WRITE(IPEIR_I965, ipeir);
+ POSTING_READ(IPEIR_I965);
+ }
+ if (eir & GM45_ERROR_PAGE_TABLE) {
+ u32 pgtbl_err = I915_READ(PGTBL_ER);
+ printf("page table error\n");
+ printf(" PGTBL_ER: 0x%08x\n",
+ pgtbl_err);
+ I915_WRITE(PGTBL_ER, pgtbl_err);
+ POSTING_READ(PGTBL_ER);
+ }
+ }
+
+ if (!IS_GEN2(dev)) {
+ if (eir & I915_ERROR_PAGE_TABLE) {
+ u32 pgtbl_err = I915_READ(PGTBL_ER);
+ printf("page table error\n");
+ printf(" PGTBL_ER: 0x%08x\n",
+ pgtbl_err);
+ I915_WRITE(PGTBL_ER, pgtbl_err);
+ POSTING_READ(PGTBL_ER);
+ }
+ }
+
+ if (eir & I915_ERROR_MEMORY_REFRESH) {
+ printf("memory refresh error:\n");
+ for_each_pipe(pipe)
+ printf("pipe %c stat: 0x%08x\n",
+ pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
+ /* pipestat has already been acked */
+ }
+ if (eir & I915_ERROR_INSTRUCTION) {
+ printf("instruction error\n");
+ printf(" INSTPM: 0x%08x\n",
+ I915_READ(INSTPM));
+ if (INTEL_INFO(dev)->gen < 4) {
+ u32 ipeir = I915_READ(IPEIR);
+
+ printf(" IPEIR: 0x%08x\n",
+ I915_READ(IPEIR));
+ printf(" IPEHR: 0x%08x\n",
+ I915_READ(IPEHR));
+ printf(" INSTDONE: 0x%08x\n",
+ I915_READ(INSTDONE));
+ printf(" ACTHD: 0x%08x\n",
+ I915_READ(ACTHD));
+ I915_WRITE(IPEIR, ipeir);
+ POSTING_READ(IPEIR);
+ } else {
+ u32 ipeir = I915_READ(IPEIR_I965);
+
+ printf(" IPEIR: 0x%08x\n",
+ I915_READ(IPEIR_I965));
+ printf(" IPEHR: 0x%08x\n",
+ I915_READ(IPEHR_I965));
+ printf(" INSTDONE: 0x%08x\n",
+ I915_READ(INSTDONE_I965));
+ printf(" INSTPS: 0x%08x\n",
+ I915_READ(INSTPS));
+ printf(" INSTDONE1: 0x%08x\n",
+ I915_READ(INSTDONE1));
+ printf(" ACTHD: 0x%08x\n",
+ I915_READ(ACTHD_I965));
+ I915_WRITE(IPEIR_I965, ipeir);
+ POSTING_READ(IPEIR_I965);
+ }
+ }
+
+ I915_WRITE(EIR, eir);
+ POSTING_READ(EIR);
+ eir = I915_READ(EIR);
+ if (eir) {
+ /*
+ * some errors might have become stuck,
+ * mask them.
+ */
+ DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
+ I915_WRITE(EMR, I915_READ(EMR) | eir);
+ I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ }
+}
+
+/**
+ * i915_handle_error - handle an error interrupt
+ * @dev: drm device
+ *
+ * Do some basic checking of regsiter state at error interrupt time and
+ * dump it to the syslog. Also call i915_capture_error_state() to make
+ * sure we get a record and make it available in debugfs. Fire a uevent
+ * so userspace knows something bad happened (should trigger collection
+ * of a ring dump etc.).
+ */
+void i915_handle_error(struct drm_device *dev, bool wedged)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ i915_capture_error_state(dev);
+ i915_report_and_clear_eir(dev);
+
+ if (wedged) {
+ mtx_lock(&dev_priv->error_completion_lock);
+ dev_priv->error_completion = 0;
+ dev_priv->mm.wedged = 1;
+ /* unlock acts as rel barrier for store to wedged */
+ mtx_unlock(&dev_priv->error_completion_lock);
+
+ /*
+ * Wakeup waiting processes so they don't hang
+ */
+ mtx_lock(&dev_priv->rings[RCS].irq_lock);
+ wakeup(&dev_priv->rings[RCS]);
+ mtx_unlock(&dev_priv->rings[RCS].irq_lock);
+ if (HAS_BSD(dev)) {
+ mtx_lock(&dev_priv->rings[VCS].irq_lock);
+ wakeup(&dev_priv->rings[VCS]);
+ mtx_unlock(&dev_priv->rings[VCS].irq_lock);
+ }
+ if (HAS_BLT(dev)) {
+ mtx_lock(&dev_priv->rings[BCS].irq_lock);
+ wakeup(&dev_priv->rings[BCS]);
+ mtx_unlock(&dev_priv->rings[BCS].irq_lock);
+ }
+ }
+
+ taskqueue_enqueue(dev_priv->tq, &dev_priv->error_task);
+}
+
+static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj;
+ struct intel_unpin_work *work;
+ bool stall_detected;
+
+ /* Ignore early vblank irqs */
+ if (intel_crtc == NULL)
+ return;
+
+ mtx_lock(&dev->event_lock);
+ work = intel_crtc->unpin_work;
+
+ if (work == NULL || work->pending || !work->enable_stall_check) {
+ /* Either the pending flip IRQ arrived, or we're too early. Don't check */
+ mtx_unlock(&dev->event_lock);
+ return;
+ }
+
+ /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
+ obj = work->pending_flip_obj;
+ if (INTEL_INFO(dev)->gen >= 4) {
+ int dspsurf = DSPSURF(intel_crtc->plane);
+ stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
+ } else {
+ int dspaddr = DSPADDR(intel_crtc->plane);
+ stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
+ crtc->y * crtc->fb->pitches[0] +
+ crtc->x * crtc->fb->bits_per_pixel/8);
+ }
+
+ mtx_unlock(&dev->event_lock);
+
+ if (stall_detected) {
+ DRM_DEBUG("Pageflip stall detected\n");
+ intel_prepare_page_flip(dev, intel_crtc->plane);
+ }
+}
+
+static void
+i915_driver_irq_handler(void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *)dev->dev_private;
+#if 0
+ struct drm_i915_master_private *master_priv;
+#endif
+ u32 iir, new_iir;
+ u32 pipe_stats[I915_MAX_PIPES];
+ u32 vblank_status;
+ int vblank = 0;
+ int irq_received;
+ int pipe;
+ bool blc_event = false;
+
+ atomic_inc(&dev_priv->irq_received);
+
+ iir = I915_READ(IIR);
+
+ CTR1(KTR_DRM, "driver_irq_handler %x", iir);
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
+ else
+ vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
+
+ for (;;) {
+ irq_received = iir != 0;
+
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ mtx_lock(&dev_priv->irq_lock);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false);
+
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = 1;
+ }
+ }
+ mtx_unlock(&dev_priv->irq_lock);
+
+ if (!irq_received)
+ break;
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if ((I915_HAS_HOTPLUG(dev)) &&
+ (iir & I915_DISPLAY_PORT_INTERRUPT)) {
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+ DRM_DEBUG("i915: hotplug event received, stat 0x%08x\n",
+ hotplug_status);
+ if (hotplug_status & dev_priv->hotplug_supported_mask)
+ taskqueue_enqueue(dev_priv->tq,
+ &dev_priv->hotplug_task);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ I915_READ(PORT_HOTPLUG_STAT);
+ }
+
+ I915_WRITE(IIR, iir);
+ new_iir = I915_READ(IIR); /* Flush posted writes */
+
+#if 0
+ if (dev->primary->master) {
+ master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
+ }
+#else
+ if (dev_priv->sarea_priv)
+ dev_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
+#endif
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->rings[RCS]);
+ if (iir & I915_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->rings[VCS]);
+
+ if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
+ intel_prepare_page_flip(dev, 0);
+ if (dev_priv->flip_pending_is_done)
+ intel_finish_page_flip_plane(dev, 0);
+ }
+
+ if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
+ intel_prepare_page_flip(dev, 1);
+ if (dev_priv->flip_pending_is_done)
+ intel_finish_page_flip_plane(dev, 1);
+ }
+
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & vblank_status &&
+ drm_handle_vblank(dev, pipe)) {
+ vblank++;
+ if (!dev_priv->flip_pending_is_done) {
+ i915_pageflip_stall_check(dev, pipe);
+ intel_finish_page_flip(dev, pipe);
+ }
+ }
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+ }
+
+
+ if (blc_event || (iir & I915_ASLE_INTERRUPT)) {
+#if 1
+ KIB_NOTYET();
+#else
+ intel_opregion_asle_intr(dev);
+#endif
+ }
+
+ /* With MSI, interrupts are only generated when iir
+ * transitions from zero to nonzero. If another bit got
+ * set while we were handling the existing iir bits, then
+ * we would never get another interrupt.
+ *
+ * This is fine on non-MSI as well, as if we hit this path
+ * we avoid exiting the interrupt handler only to generate
+ * another one.
+ *
+ * Note that for MSI this could cause a stray interrupt report
+ * if an interrupt landed in the time between writing IIR and
+ * the posting read. This should be rare enough to never
+ * trigger the 99% of 100,000 interrupts test for disabling
+ * stray interrupts.
+ */
+ iir = new_iir;
+ }
+}
+
+static int i915_emit_irq(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+#if 0
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+#endif
+
+ i915_kernel_lost_context(dev);
+
+ DRM_DEBUG("i915: emit_irq\n");
+
+ dev_priv->counter++;
+ if (dev_priv->counter > 0x7FFFFFFFUL)
+ dev_priv->counter = 1;
+#if 0
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+#else
+ if (dev_priv->sarea_priv)
+ dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
+#endif
+
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+ }
+
+ return dev_priv->counter;
+}
+
+static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+#if 0
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+#endif
+ int ret;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+ DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
+ READ_BREADCRUMB(dev_priv));
+
+#if 0
+ if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+ return 0;
+ }
+
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+#else
+ if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+ if (dev_priv->sarea_priv) {
+ dev_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
+ }
+ return 0;
+ }
+
+ if (dev_priv->sarea_priv)
+ dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+#endif
+
+ ret = 0;
+ mtx_lock(&ring->irq_lock);
+ if (ring->irq_get(ring)) {
+ DRM_UNLOCK(dev);
+ while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) {
+ ret = -msleep(ring, &ring->irq_lock, PCATCH,
+ "915wtq", 3 * hz);
+ }
+ ring->irq_put(ring);
+ mtx_unlock(&ring->irq_lock);
+ DRM_LOCK(dev);
+ } else {
+ mtx_unlock(&ring->irq_lock);
+ if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr,
+ 3000, 1, "915wir"))
+ ret = -EBUSY;
+ }
+
+ if (ret == -EBUSY) {
+ DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
+ READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
+ }
+
+ return ret;
+}
+
+/* Needs the lock as it touches the ring.
+ */
+int i915_irq_emit(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_irq_emit_t *emit = data;
+ int result;
+
+ if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ DRM_LOCK(dev);
+ result = i915_emit_irq(dev);
+ DRM_UNLOCK(dev);
+
+ if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+ DRM_ERROR("copy_to_user\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* Doesn't need the hardware lock.
+ */
+int i915_irq_wait(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_irq_wait_t *irqwait = data;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ return i915_wait_irq(dev, irqwait->irq_seq);
+}
+
+/* Called from drm generic code, passed 'crtc' which
+ * we use as a pipe index
+ */
+static int
+i915_enable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return -EINVAL;
+
+ mtx_lock(&dev_priv->irq_lock);
+ if (INTEL_INFO(dev)->gen >= 4)
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ else
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ /* maintain vblank delivery even in deep C-states */
+ if (dev_priv->info->gen == 3)
+ I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16);
+ mtx_unlock(&dev_priv->irq_lock);
+ CTR1(KTR_DRM, "i915_enable_vblank %d", pipe);
+
+ return 0;
+}
+
+static int
+ironlake_enable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return -EINVAL;
+
+ mtx_lock(&dev_priv->irq_lock);
+ ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
+ DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
+ mtx_unlock(&dev_priv->irq_lock);
+ CTR1(KTR_DRM, "ironlake_enable_vblank %d", pipe);
+
+ return 0;
+}
+
+static int
+ivybridge_enable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return -EINVAL;
+
+ mtx_lock(&dev_priv->irq_lock);
+ ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
+ DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
+ mtx_unlock(&dev_priv->irq_lock);
+ CTR1(KTR_DRM, "ivybridge_enable_vblank %d", pipe);
+
+ return 0;
+}
+
+
+/* Called from drm generic code, passed 'crtc' which
+ * we use as a pipe index
+ */
+static void
+i915_disable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ mtx_lock(&dev_priv->irq_lock);
+ if (dev_priv->info->gen == 3)
+ I915_WRITE(INSTPM,
+ INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
+
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_VBLANK_INTERRUPT_ENABLE |
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ mtx_unlock(&dev_priv->irq_lock);
+ CTR1(KTR_DRM, "i915_disable_vblank %d", pipe);
+}
+
+static void
+ironlake_disable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ mtx_lock(&dev_priv->irq_lock);
+ ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
+ DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
+ mtx_unlock(&dev_priv->irq_lock);
+ CTR1(KTR_DRM, "ironlake_disable_vblank %d", pipe);
+}
+
+static void
+ivybridge_disable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ mtx_lock(&dev_priv->irq_lock);
+ ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
+ DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
+ mtx_unlock(&dev_priv->irq_lock);
+ CTR1(KTR_DRM, "ivybridge_disable_vblank %d", pipe);
+}
+
+/* Set the vblank monitor pipe
+ */
+int i915_vblank_pipe_set(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_vblank_pipe_t *pipe = data;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+
+ return 0;
+}
+
+/**
+ * Schedule buffer swap at given vertical blank.
+ */
+int i915_vblank_swap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ /* The delayed swap mechanism was fundamentally racy, and has been
+ * removed. The model was that the client requested a delayed flip/swap
+ * from the kernel, then waited for vblank before continuing to perform
+ * rendering. The problem was that the kernel might wake the client
+ * up before it dispatched the vblank swap (since the lock has to be
+ * held while touching the ringbuffer), in which case the client would
+ * clear and start the next frame before the swap occurred, and
+ * flicker would occur in addition to likely missing the vblank.
+ *
+ * In the absence of this ioctl, userland falls back to a correct path
+ * of waiting for a vblank, then dispatching the swap on its own.
+ * Context switching to userland and back is plenty fast enough for
+ * meeting the requirements of vblank swapping.
+ */
+ return -EINVAL;
+}
+
+static u32
+ring_last_seqno(struct intel_ring_buffer *ring)
+{
+
+ if (list_empty(&ring->request_list))
+ return (0);
+ else
+ return (list_entry(ring->request_list.prev,
+ struct drm_i915_gem_request, list)->seqno);
+}
+
+static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
+{
+ if (list_empty(&ring->request_list) ||
+ i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
+ /* Issue a wake-up to catch stuck h/w. */
+ if (ring->waiting_seqno) {
+ DRM_ERROR(
+"Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
+ ring->name,
+ ring->waiting_seqno,
+ ring->get_seqno(ring));
+ wakeup(ring);
+ *err = true;
+ }
+ return true;
+ }
+ return false;
+}
+
+static bool kick_ring(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp = I915_READ_CTL(ring);
+ if (tmp & RING_WAIT) {
+ DRM_ERROR("Kicking stuck wait on %s\n",
+ ring->name);
+ I915_WRITE_CTL(ring, tmp);
+ return true;
+ }
+ return false;
+}
+
+/**
+ * This is called when the chip hasn't reported back with completed
+ * batchbuffers in a long time. The first time this is called we simply record
+ * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
+ * again, we assume the chip is wedged and try to fix it.
+ */
+void
+i915_hangcheck_elapsed(void *context)
+{
+ struct drm_device *dev = (struct drm_device *)context;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
+ bool err = false;
+
+ if (!i915_enable_hangcheck)
+ return;
+
+ /* If all work is done then ACTHD clearly hasn't advanced. */
+ if (i915_hangcheck_ring_idle(&dev_priv->rings[RCS], &err) &&
+ i915_hangcheck_ring_idle(&dev_priv->rings[VCS], &err) &&
+ i915_hangcheck_ring_idle(&dev_priv->rings[BCS], &err)) {
+ dev_priv->hangcheck_count = 0;
+ if (err)
+ goto repeat;
+ return;
+ }
+
+ if (INTEL_INFO(dev)->gen < 4) {
+ instdone = I915_READ(INSTDONE);
+ instdone1 = 0;
+ } else {
+ instdone = I915_READ(INSTDONE_I965);
+ instdone1 = I915_READ(INSTDONE1);
+ }
+ acthd = intel_ring_get_active_head(&dev_priv->rings[RCS]);
+ acthd_bsd = HAS_BSD(dev) ?
+ intel_ring_get_active_head(&dev_priv->rings[VCS]) : 0;
+ acthd_blt = HAS_BLT(dev) ?
+ intel_ring_get_active_head(&dev_priv->rings[BCS]) : 0;
+
+ if (dev_priv->last_acthd == acthd &&
+ dev_priv->last_acthd_bsd == acthd_bsd &&
+ dev_priv->last_acthd_blt == acthd_blt &&
+ dev_priv->last_instdone == instdone &&
+ dev_priv->last_instdone1 == instdone1) {
+ if (dev_priv->hangcheck_count++ > 1) {
+ DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+ i915_handle_error(dev, true);
+
+ if (!IS_GEN2(dev)) {
+ /* Is the chip hanging on a WAIT_FOR_EVENT?
+ * If so we can simply poke the RB_WAIT bit
+ * and break the hang. This should work on
+ * all but the second generation chipsets.
+ */
+ if (kick_ring(&dev_priv->rings[RCS]))
+ goto repeat;
+
+ if (HAS_BSD(dev) &&
+ kick_ring(&dev_priv->rings[VCS]))
+ goto repeat;
+
+ if (HAS_BLT(dev) &&
+ kick_ring(&dev_priv->rings[BCS]))
+ goto repeat;
+ }
+
+ return;
+ }
+ } else {
+ dev_priv->hangcheck_count = 0;
+
+ dev_priv->last_acthd = acthd;
+ dev_priv->last_acthd_bsd = acthd_bsd;
+ dev_priv->last_acthd_blt = acthd_blt;
+ dev_priv->last_instdone = instdone;
+ dev_priv->last_instdone1 = instdone1;
+ }
+
+repeat:
+ /* Reset timer case chip hangs without another request being added */
+ callout_schedule(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD);
+}
+
+/* drm_dma.h hooks
+*/
+static void
+ironlake_irq_preinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ atomic_set(&dev_priv->irq_received, 0);
+
+ TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func,
+ dev->dev_private);
+ TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func,
+ dev->dev_private);
+ TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func,
+ dev->dev_private);
+
+ I915_WRITE(HWSTAM, 0xeffe);
+
+ /* XXX hotplug from PCH */
+
+ I915_WRITE(DEIMR, 0xffffffff);
+ I915_WRITE(DEIER, 0x0);
+ POSTING_READ(DEIER);
+
+ /* and GT */
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ POSTING_READ(GTIER);
+
+ /* south display irq */
+ I915_WRITE(SDEIMR, 0xffffffff);
+ I915_WRITE(SDEIER, 0x0);
+ POSTING_READ(SDEIER);
+}
+
+/*
+ * Enable digital hotplug on the PCH, and configure the DP short pulse
+ * duration to 2ms (which is the minimum in the Display Port spec)
+ *
+ * This register is the same on all known PCH chips.
+ */
+
+static void ironlake_enable_pch_hotplug(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 hotplug;
+
+ hotplug = I915_READ(PCH_PORT_HOTPLUG);
+ hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
+ hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
+ hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
+ hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
+ I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+}
+
+static int ironlake_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ /* enable kind of interrupts always enabled */
+ u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+ DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
+ u32 render_irqs;
+ u32 hotplug_mask;
+
+ dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+ dev_priv->irq_mask = ~display_mask;
+
+ /* should always can generate irq */
+ I915_WRITE(DEIIR, I915_READ(DEIIR));
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
+ POSTING_READ(DEIER);
+
+ dev_priv->gt_irq_mask = ~0;
+
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+
+ if (IS_GEN6(dev))
+ render_irqs =
+ GT_USER_INTERRUPT |
+ GT_GEN6_BSD_USER_INTERRUPT |
+ GT_BLT_USER_INTERRUPT;
+ else
+ render_irqs =
+ GT_USER_INTERRUPT |
+ GT_PIPE_NOTIFY |
+ GT_BSD_USER_INTERRUPT;
+ I915_WRITE(GTIER, render_irqs);
+ POSTING_READ(GTIER);
+
+ if (HAS_PCH_CPT(dev)) {
+ hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
+ SDE_PORTB_HOTPLUG_CPT |
+ SDE_PORTC_HOTPLUG_CPT |
+ SDE_PORTD_HOTPLUG_CPT);
+ } else {
+ hotplug_mask = (SDE_CRT_HOTPLUG |
+ SDE_PORTB_HOTPLUG |
+ SDE_PORTC_HOTPLUG |
+ SDE_PORTD_HOTPLUG |
+ SDE_AUX_MASK);
+ }
+
+ dev_priv->pch_irq_mask = ~hotplug_mask;
+
+ I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+ I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
+ I915_WRITE(SDEIER, hotplug_mask);
+ POSTING_READ(SDEIER);
+
+ ironlake_enable_pch_hotplug(dev);
+
+ if (IS_IRONLAKE_M(dev)) {
+ /* Clear & enable PCU event interrupts */
+ I915_WRITE(DEIIR, DE_PCU_EVENT);
+ I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
+ ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+ }
+
+ return 0;
+}
+
+static int
+ivybridge_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ /* enable kind of interrupts always enabled */
+ u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
+ DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB |
+ DE_PLANEB_FLIP_DONE_IVB;
+ u32 render_irqs;
+ u32 hotplug_mask;
+
+ dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+ dev_priv->irq_mask = ~display_mask;
+
+ /* should always can generate irq */
+ I915_WRITE(DEIIR, I915_READ(DEIIR));
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB |
+ DE_PIPEB_VBLANK_IVB);
+ POSTING_READ(DEIER);
+
+ dev_priv->gt_irq_mask = ~0;
+
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+
+ render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT |
+ GT_BLT_USER_INTERRUPT;
+ I915_WRITE(GTIER, render_irqs);
+ POSTING_READ(GTIER);
+
+ hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
+ SDE_PORTB_HOTPLUG_CPT |
+ SDE_PORTC_HOTPLUG_CPT |
+ SDE_PORTD_HOTPLUG_CPT);
+ dev_priv->pch_irq_mask = ~hotplug_mask;
+
+ I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+ I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
+ I915_WRITE(SDEIER, hotplug_mask);
+ POSTING_READ(SDEIER);
+
+ ironlake_enable_pch_hotplug(dev);
+
+ return 0;
+}
+
+static void
+i915_driver_irq_preinstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ atomic_set(&dev_priv->irq_received, 0);
+
+ TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func,
+ dev->dev_private);
+ TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func,
+ dev->dev_private);
+ TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func,
+ dev->dev_private);
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ }
+
+ I915_WRITE(HWSTAM, 0xeffe);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+ POSTING_READ(IER);
+}
+
+/*
+ * Must be called after intel_modeset_init or hotplug interrupts won't be
+ * enabled correctly.
+ */
+static int
+i915_driver_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
+ u32 error_mask;
+
+ dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+
+ /* Unmask the interrupts that we always want on. */
+ dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
+
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ /* Enable in IER... */
+ enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
+ /* and unmask in IMR */
+ dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
+ }
+
+ /*
+ * Enable some error detection, note the instruction error mask
+ * bit is reserved, so we leave it masked.
+ */
+ if (IS_G4X(dev)) {
+ error_mask = ~(GM45_ERROR_PAGE_TABLE |
+ GM45_ERROR_MEM_PRIV |
+ GM45_ERROR_CP_PRIV |
+ I915_ERROR_MEMORY_REFRESH);
+ } else {
+ error_mask = ~(I915_ERROR_PAGE_TABLE |
+ I915_ERROR_MEMORY_REFRESH);
+ }
+ I915_WRITE(EMR, error_mask);
+
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ I915_WRITE(IER, enable_mask);
+ POSTING_READ(IER);
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+
+ /* Note HDMI and DP share bits */
+ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
+
+ /* Programming the CRT detection parameters tends
+ to generate a spurious hotplug event about three
+ seconds later. So just do it once.
+ */
+ if (IS_G4X(dev))
+ hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+ }
+
+ /* Ignore TV since it's buggy */
+
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ }
+
+#if 1
+ KIB_NOTYET();
+#else
+ intel_opregion_enable_asle(dev);
+#endif
+
+ return 0;
+}
+
+static void
+ironlake_irq_uninstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (dev_priv == NULL)
+ return;
+
+ dev_priv->vblank_pipe = 0;
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+
+ I915_WRITE(DEIMR, 0xffffffff);
+ I915_WRITE(DEIER, 0x0);
+ I915_WRITE(DEIIR, I915_READ(DEIIR));
+
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+
+ I915_WRITE(SDEIMR, 0xffffffff);
+ I915_WRITE(SDEIER, 0x0);
+ I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+
+ taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
+ taskqueue_drain(dev_priv->tq, &dev_priv->error_task);
+ taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
+}
+
+static void i915_driver_irq_uninstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ if (!dev_priv)
+ return;
+
+ dev_priv->vblank_pipe = 0;
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ }
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe),
+ I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
+ I915_WRITE(IIR, I915_READ(IIR));
+
+ taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
+ taskqueue_drain(dev_priv->tq, &dev_priv->error_task);
+ taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
+}
+
+void
+intel_irq_init(struct drm_device *dev)
+{
+
+ dev->driver->get_vblank_counter = i915_get_vblank_counter;
+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+ if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+ dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+ dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
+ else
+ dev->driver->get_vblank_timestamp = NULL;
+ dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+
+ if (IS_IVYBRIDGE(dev)) {
+ /* Share pre & uninstall handlers with ILK/SNB */
+ dev->driver->irq_handler = ivybridge_irq_handler;
+ dev->driver->irq_preinstall = ironlake_irq_preinstall;
+ dev->driver->irq_postinstall = ivybridge_irq_postinstall;
+ dev->driver->irq_uninstall = ironlake_irq_uninstall;
+ dev->driver->enable_vblank = ivybridge_enable_vblank;
+ dev->driver->disable_vblank = ivybridge_disable_vblank;
+ } else if (HAS_PCH_SPLIT(dev)) {
+ dev->driver->irq_handler = ironlake_irq_handler;
+ dev->driver->irq_preinstall = ironlake_irq_preinstall;
+ dev->driver->irq_postinstall = ironlake_irq_postinstall;
+ dev->driver->irq_uninstall = ironlake_irq_uninstall;
+ dev->driver->enable_vblank = ironlake_enable_vblank;
+ dev->driver->disable_vblank = ironlake_disable_vblank;
+ } else {
+ dev->driver->irq_preinstall = i915_driver_irq_preinstall;
+ dev->driver->irq_postinstall = i915_driver_irq_postinstall;
+ dev->driver->irq_uninstall = i915_driver_irq_uninstall;
+ dev->driver->irq_handler = i915_driver_irq_handler;
+ dev->driver->enable_vblank = i915_enable_vblank;
+ dev->driver->disable_vblank = i915_disable_vblank;
+ }
+}
+
+static struct drm_i915_error_object *
+i915_error_object_create(struct drm_i915_private *dev_priv,
+ struct drm_i915_gem_object *src)
+{
+ struct drm_i915_error_object *dst;
+ struct sf_buf *sf;
+ void *d, *s;
+ int page, page_count;
+ u32 reloc_offset;
+
+ if (src == NULL || src->pages == NULL)
+ return NULL;
+
+ page_count = src->base.size / PAGE_SIZE;
+
+ dst = malloc(sizeof(*dst) + page_count * sizeof(u32 *), DRM_I915_GEM,
+ M_NOWAIT);
+ if (dst == NULL)
+ return (NULL);
+
+ reloc_offset = src->gtt_offset;
+ for (page = 0; page < page_count; page++) {
+ d = malloc(PAGE_SIZE, DRM_I915_GEM, M_NOWAIT);
+ if (d == NULL)
+ goto unwind;
+
+ if (reloc_offset < dev_priv->mm.gtt_mappable_end) {
+ /* Simply ignore tiling or any overlapping fence.
+ * It's part of the error state, and this hopefully
+ * captures what the GPU read.
+ */
+ s = pmap_mapdev_attr(src->base.dev->agp->base +
+ reloc_offset, PAGE_SIZE, PAT_WRITE_COMBINING);
+ memcpy(d, s, PAGE_SIZE);
+ pmap_unmapdev((vm_offset_t)s, PAGE_SIZE);
+ } else {
+ drm_clflush_pages(&src->pages[page], 1);
+
+ sched_pin();
+ sf = sf_buf_alloc(src->pages[page], SFB_CPUPRIVATE |
+ SFB_NOWAIT);
+ if (sf != NULL) {
+ s = (void *)(uintptr_t)sf_buf_kva(sf);
+ memcpy(d, s, PAGE_SIZE);
+ sf_buf_free(sf);
+ } else {
+ bzero(d, PAGE_SIZE);
+ strcpy(d, "XXXKIB");
+ }
+ sched_unpin();
+
+ drm_clflush_pages(&src->pages[page], 1);
+ }
+
+ dst->pages[page] = d;
+
+ reloc_offset += PAGE_SIZE;
+ }
+ dst->page_count = page_count;
+ dst->gtt_offset = src->gtt_offset;
+
+ return (dst);
+
+unwind:
+ while (page--)
+ free(dst->pages[page], DRM_I915_GEM);
+ free(dst, DRM_I915_GEM);
+ return (NULL);
+}
+
+static void
+i915_error_object_free(struct drm_i915_error_object *obj)
+{
+ int page;
+
+ if (obj == NULL)
+ return;
+
+ for (page = 0; page < obj->page_count; page++)
+ free(obj->pages[page], DRM_I915_GEM);
+
+ free(obj, DRM_I915_GEM);
+}
+
+static void
+i915_error_state_free(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ int i;
+
+ for (i = 0; i < DRM_ARRAY_SIZE(error->ring); i++) {
+ i915_error_object_free(error->ring[i].batchbuffer);
+ i915_error_object_free(error->ring[i].ringbuffer);
+ free(error->ring[i].requests, DRM_I915_GEM);
+ }
+
+ free(error->active_bo, DRM_I915_GEM);
+ free(error->overlay, DRM_I915_GEM);
+ free(error, DRM_I915_GEM);
+}
+
+static u32
+capture_bo_list(struct drm_i915_error_buffer *err, int count,
+ struct list_head *head)
+{
+ struct drm_i915_gem_object *obj;
+ int i = 0;
+
+ list_for_each_entry(obj, head, mm_list) {
+ err->size = obj->base.size;
+ err->name = obj->base.name;
+ err->seqno = obj->last_rendering_seqno;
+ err->gtt_offset = obj->gtt_offset;
+ err->read_domains = obj->base.read_domains;
+ err->write_domain = obj->base.write_domain;
+ err->fence_reg = obj->fence_reg;
+ err->pinned = 0;
+ if (obj->pin_count > 0)
+ err->pinned = 1;
+ if (obj->user_pin_count > 0)
+ err->pinned = -1;
+ err->tiling = obj->tiling_mode;
+ err->dirty = obj->dirty;
+ err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->ring = obj->ring ? obj->ring->id : -1;
+ err->cache_level = obj->cache_level;
+
+ if (++i == count)
+ break;
+
+ err++;
+ }
+
+ return (i);
+}
+
+static void
+i915_gem_record_fences(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ for (i = 0; i < 16; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_965_0 +
+ (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ error->fence[i+8] = I915_READ(FENCE_REG_945_8 +
+ (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+
+ }
+}
+
+static struct drm_i915_error_object *
+i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_object *obj;
+ u32 seqno;
+
+ if (!ring->get_seqno)
+ return (NULL);
+
+ seqno = ring->get_seqno(ring);
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+ if (obj->ring != ring)
+ continue;
+
+ if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
+ continue;
+
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
+ continue;
+
+ /* We need to copy these to an anonymous buffer as the simplest
+ * method to avoid being overwritten by userspace.
+ */
+ return (i915_error_object_create(dev_priv, obj));
+ }
+
+ return NULL;
+}
+
+static void
+i915_record_ring_state(struct drm_device *dev,
+ struct drm_i915_error_state *error,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
+ error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
+ error->semaphore_mboxes[ring->id][0]
+ = I915_READ(RING_SYNC_0(ring->mmio_base));
+ error->semaphore_mboxes[ring->id][1]
+ = I915_READ(RING_SYNC_1(ring->mmio_base));
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
+ error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
+ error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
+ error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
+ if (ring->id == RCS) {
+ error->instdone1 = I915_READ(INSTDONE1);
+ error->bbaddr = I915_READ64(BB_ADDR);
+ }
+ } else {
+ error->ipeir[ring->id] = I915_READ(IPEIR);
+ error->ipehr[ring->id] = I915_READ(IPEHR);
+ error->instdone[ring->id] = I915_READ(INSTDONE);
+ }
+
+ error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
+ error->seqno[ring->id] = ring->get_seqno(ring);
+ error->acthd[ring->id] = intel_ring_get_active_head(ring);
+ error->head[ring->id] = I915_READ_HEAD(ring);
+ error->tail[ring->id] = I915_READ_TAIL(ring);
+
+ error->cpu_ring_head[ring->id] = ring->head;
+ error->cpu_ring_tail[ring->id] = ring->tail;
+}
+
+static void
+i915_gem_record_rings(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *request;
+ int i, count;
+
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct intel_ring_buffer *ring = &dev_priv->rings[i];
+
+ if (ring->obj == NULL)
+ continue;
+
+ i915_record_ring_state(dev, error, ring);
+
+ error->ring[i].batchbuffer =
+ i915_error_first_batchbuffer(dev_priv, ring);
+
+ error->ring[i].ringbuffer =
+ i915_error_object_create(dev_priv, ring->obj);
+
+ count = 0;
+ list_for_each_entry(request, &ring->request_list, list)
+ count++;
+
+ error->ring[i].num_requests = count;
+ error->ring[i].requests = malloc(count *
+ sizeof(struct drm_i915_error_request), DRM_I915_GEM,
+ M_WAITOK);
+ if (error->ring[i].requests == NULL) {
+ error->ring[i].num_requests = 0;
+ continue;
+ }
+
+ count = 0;
+ list_for_each_entry(request, &ring->request_list, list) {
+ struct drm_i915_error_request *erq;
+
+ erq = &error->ring[i].requests[count++];
+ erq->seqno = request->seqno;
+ erq->jiffies = request->emitted_jiffies;
+ erq->tail = request->tail;
+ }
+ }
+}
+
+static void
+i915_capture_error_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ struct drm_i915_error_state *error;
+ int i, pipe;
+
+ mtx_lock(&dev_priv->error_lock);
+ error = dev_priv->first_error;
+ mtx_unlock(&dev_priv->error_lock);
+ if (error != NULL)
+ return;
+
+ /* Account for pipe specific data like PIPE*STAT */
+ error = malloc(sizeof(*error), DRM_I915_GEM, M_NOWAIT | M_ZERO);
+ if (error == NULL) {
+ DRM_DEBUG("out of memory, not capturing error state\n");
+ return;
+ }
+
+ DRM_INFO("capturing error event; look for more information in "
+ "sysctl hw.dri.%d.info.i915_error_state\n", dev->sysctl_node_idx);
+
+ error->eir = I915_READ(EIR);
+ error->pgtbl_er = I915_READ(PGTBL_ER);
+ for_each_pipe(pipe)
+ error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->error = I915_READ(ERROR_GEN6);
+ error->done_reg = I915_READ(DONE_REG);
+ }
+
+ i915_gem_record_fences(dev, error);
+ i915_gem_record_rings(dev, error);
+
+ /* Record buffers on the active and pinned lists. */
+ error->active_bo = NULL;
+ error->pinned_bo = NULL;
+
+ i = 0;
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
+ i++;
+ error->active_bo_count = i;
+ list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
+ i++;
+ error->pinned_bo_count = i - error->active_bo_count;
+
+ error->active_bo = NULL;
+ error->pinned_bo = NULL;
+ if (i) {
+ error->active_bo = malloc(sizeof(*error->active_bo) * i,
+ DRM_I915_GEM, M_NOWAIT);
+ if (error->active_bo)
+ error->pinned_bo = error->active_bo +
+ error->active_bo_count;
+ }
+
+ if (error->active_bo)
+ error->active_bo_count = capture_bo_list(error->active_bo,
+ error->active_bo_count, &dev_priv->mm.active_list);
+
+ if (error->pinned_bo)
+ error->pinned_bo_count = capture_bo_list(error->pinned_bo,
+ error->pinned_bo_count, &dev_priv->mm.pinned_list);
+
+ microtime(&error->time);
+
+ error->overlay = intel_overlay_capture_error_state(dev);
+ error->display = intel_display_capture_error_state(dev);
+
+ mtx_lock(&dev_priv->error_lock);
+ if (dev_priv->first_error == NULL) {
+ dev_priv->first_error = error;
+ error = NULL;
+ }
+ mtx_unlock(&dev_priv->error_lock);
+
+ if (error != NULL)
+ i915_error_state_free(dev, error);
+}
+
+void
+i915_destroy_error_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error;
+
+ mtx_lock(&dev_priv->error_lock);
+ error = dev_priv->first_error;
+ dev_priv->first_error = NULL;
+ mtx_unlock(&dev_priv->error_lock);
+
+ if (error != NULL)
+ i915_error_state_free(dev, error);
+}
diff --git a/sys/dev/drm2/i915/i915_reg.h b/sys/dev/drm2/i915/i915_reg.h
new file mode 100644
index 0000000..754e535
--- /dev/null
+++ b/sys/dev/drm2/i915/i915_reg.h
@@ -0,0 +1,3876 @@
+/* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef _I915_REG_H_
+#define _I915_REG_H_
+
+#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+
+/*
+ * The Bridge device's PCI config space has information about the
+ * fb aperture size and the amount of pre-reserved memory.
+ * This is all handled in the intel-gtt.ko module. i915.ko only
+ * cares about the vga bit for the vga rbiter.
+ */
+#define INTEL_GMCH_CTRL 0x52
+#define INTEL_GMCH_VGA_DISABLE (1 << 1)
+
+/* PCI config space */
+
+#define HPLLCC 0xc0 /* 855 only */
+#define GC_CLOCK_CONTROL_MASK (0xf << 0)
+#define GC_CLOCK_133_200 (0 << 0)
+#define GC_CLOCK_100_200 (1 << 0)
+#define GC_CLOCK_100_133 (2 << 0)
+#define GC_CLOCK_166_250 (3 << 0)
+#define GCFGC2 0xda
+#define GCFGC 0xf0 /* 915+ only */
+#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
+#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
+#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4)
+#define GC_DISPLAY_CLOCK_MASK (7 << 4)
+#define GM45_GC_RENDER_CLOCK_MASK (0xf << 0)
+#define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0)
+#define GM45_GC_RENDER_CLOCK_320_MHZ (9 << 0)
+#define GM45_GC_RENDER_CLOCK_400_MHZ (0xb << 0)
+#define GM45_GC_RENDER_CLOCK_533_MHZ (0xc << 0)
+#define I965_GC_RENDER_CLOCK_MASK (0xf << 0)
+#define I965_GC_RENDER_CLOCK_267_MHZ (2 << 0)
+#define I965_GC_RENDER_CLOCK_333_MHZ (3 << 0)
+#define I965_GC_RENDER_CLOCK_444_MHZ (4 << 0)
+#define I965_GC_RENDER_CLOCK_533_MHZ (5 << 0)
+#define I945_GC_RENDER_CLOCK_MASK (7 << 0)
+#define I945_GC_RENDER_CLOCK_166_MHZ (0 << 0)
+#define I945_GC_RENDER_CLOCK_200_MHZ (1 << 0)
+#define I945_GC_RENDER_CLOCK_250_MHZ (3 << 0)
+#define I945_GC_RENDER_CLOCK_400_MHZ (5 << 0)
+#define I915_GC_RENDER_CLOCK_MASK (7 << 0)
+#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
+#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
+#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
+#define LBB 0xf4
+
+/* Graphics reset regs */
+#define I965_GDRST 0xc0 /* PCI config register */
+#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
+#define GRDOM_FULL (0<<2)
+#define GRDOM_RENDER (1<<2)
+#define GRDOM_MEDIA (3<<2)
+
+#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
+#define GEN6_MBC_SNPCR_SHIFT 21
+#define GEN6_MBC_SNPCR_MASK (3<<21)
+#define GEN6_MBC_SNPCR_MAX (0<<21)
+#define GEN6_MBC_SNPCR_MED (1<<21)
+#define GEN6_MBC_SNPCR_LOW (2<<21)
+#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
+
+#define GEN6_MBCTL 0x0907c
+#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
+#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
+#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2)
+#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1)
+#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0)
+
+#define GEN6_GDRST 0x941c
+#define GEN6_GRDOM_FULL (1 << 0)
+#define GEN6_GRDOM_RENDER (1 << 1)
+#define GEN6_GRDOM_MEDIA (1 << 2)
+#define GEN6_GRDOM_BLT (1 << 3)
+
+/* PPGTT stuff */
+#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
+
+#define GEN6_PDE_VALID (1 << 0)
+#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */
+/* gen6+ has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+
+#define GEN6_PTE_VALID (1 << 0)
+#define GEN6_PTE_UNCACHED (1 << 1)
+#define GEN6_PTE_CACHE_LLC (2 << 1)
+#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
+#define GEN6_PTE_CACHE_BITS (3 << 1)
+#define GEN6_PTE_GFDT (1 << 3)
+#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+
+#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
+#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
+#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
+#define PP_DIR_DCLV_2G 0xffffffff
+
+#define GAM_ECOCHK 0x4090
+#define ECOCHK_SNB_BIT (1<<10)
+#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
+#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
+
+/* VGA stuff */
+
+#define VGA_ST01_MDA 0x3ba
+#define VGA_ST01_CGA 0x3da
+
+#define VGA_MSR_WRITE 0x3c2
+#define VGA_MSR_READ 0x3cc
+#define VGA_MSR_MEM_EN (1<<1)
+#define VGA_MSR_CGA_MODE (1<<0)
+
+#define VGA_SR_INDEX 0x3c4
+#define VGA_SR_DATA 0x3c5
+
+#define VGA_AR_INDEX 0x3c0
+#define VGA_AR_VID_EN (1<<5)
+#define VGA_AR_DATA_WRITE 0x3c0
+#define VGA_AR_DATA_READ 0x3c1
+
+#define VGA_GR_INDEX 0x3ce
+#define VGA_GR_DATA 0x3cf
+/* GR05 */
+#define VGA_GR_MEM_READ_MODE_SHIFT 3
+#define VGA_GR_MEM_READ_MODE_PLANE 1
+/* GR06 */
+#define VGA_GR_MEM_MODE_MASK 0xc
+#define VGA_GR_MEM_MODE_SHIFT 2
+#define VGA_GR_MEM_A0000_AFFFF 0
+#define VGA_GR_MEM_A0000_BFFFF 1
+#define VGA_GR_MEM_B0000_B7FFF 2
+#define VGA_GR_MEM_B0000_BFFFF 3
+
+#define VGA_DACMASK 0x3c6
+#define VGA_DACRX 0x3c7
+#define VGA_DACWX 0x3c8
+#define VGA_DACDATA 0x3c9
+
+#define VGA_CR_INDEX_MDA 0x3b4
+#define VGA_CR_DATA_MDA 0x3b5
+#define VGA_CR_INDEX_CGA 0x3d4
+#define VGA_CR_DATA_CGA 0x3d5
+
+/*
+ * Memory interface instructions used by the kernel
+ */
+#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
+
+#define MI_NOOP MI_INSTR(0, 0)
+#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
+#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0)
+#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16)
+#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
+#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
+#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
+#define MI_FLUSH MI_INSTR(0x04, 0)
+#define MI_READ_FLUSH (1 << 0)
+#define MI_EXE_FLUSH (1 << 1)
+#define MI_NO_WRITE_FLUSH (1 << 2)
+#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
+#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
+#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
+#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
+#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
+#define MI_SUSPEND_FLUSH_EN (1<<0)
+#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
+#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0)
+#define MI_OVERLAY_CONTINUE (0x0<<21)
+#define MI_OVERLAY_ON (0x1<<21)
+#define MI_OVERLAY_OFF (0x2<<21)
+#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
+#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
+#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
+#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
+#define MI_MM_SPACE_GTT (1<<8)
+#define MI_MM_SPACE_PHYSICAL (0<<8)
+#define MI_SAVE_EXT_STATE_EN (1<<3)
+#define MI_RESTORE_EXT_STATE_EN (1<<2)
+#define MI_FORCE_RESTORE (1<<1)
+#define MI_RESTORE_INHIBIT (1<<0)
+#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
+#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
+#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
+#define MI_STORE_DWORD_INDEX_SHIFT 2
+/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
+ * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
+ * simply ignores the register load under certain conditions.
+ * - One can actually load arbitrary many arbitrary registers: Simply issue x
+ * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
+ */
+#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
+#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
+#define MI_INVALIDATE_TLB (1<<18)
+#define MI_INVALIDATE_BSD (1<<7)
+#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
+#define MI_BATCH_NON_SECURE (1)
+#define MI_BATCH_NON_SECURE_I965 (1<<8)
+#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
+#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
+#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
+#define MI_SEMAPHORE_UPDATE (1<<21)
+#define MI_SEMAPHORE_COMPARE (1<<20)
+#define MI_SEMAPHORE_REGISTER (1<<18)
+#define MI_SEMAPHORE_SYNC_RV (2<<16)
+#define MI_SEMAPHORE_SYNC_RB (0<<16)
+#define MI_SEMAPHORE_SYNC_VR (0<<16)
+#define MI_SEMAPHORE_SYNC_VB (2<<16)
+#define MI_SEMAPHORE_SYNC_BR (2<<16)
+#define MI_SEMAPHORE_SYNC_BV (0<<16)
+#define MI_SEMAPHORE_SYNC_INVALID (1<<0)
+/*
+ * 3D instructions used by the kernel
+ */
+#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
+
+#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
+#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define SC_UPDATE_SCISSOR (0x1<<1)
+#define SC_ENABLE_MASK (0x1<<0)
+#define SC_ENABLE (0x1<<0)
+#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
+#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+#define SCI_YMIN_MASK (0xffff<<16)
+#define SCI_XMIN_MASK (0xffff<<0)
+#define SCI_YMAX_MASK (0xffff<<16)
+#define SCI_XMAX_MASK (0xffff<<0)
+#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
+#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
+#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
+#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
+#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
+#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
+#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
+#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
+#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
+#define BLT_DEPTH_8 (0<<24)
+#define BLT_DEPTH_16_565 (1<<24)
+#define BLT_DEPTH_16_1555 (2<<24)
+#define BLT_DEPTH_32 (3<<24)
+#define BLT_ROP_GXCOPY (0xcc<<16)
+#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */
+#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */
+#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
+#define ASYNC_FLIP (1<<22)
+#define DISPLAY_PLANE_A (0<<20)
+#define DISPLAY_PLANE_B (1<<20)
+#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
+#define PIPE_CONTROL_CS_STALL (1<<20)
+#define PIPE_CONTROL_QW_WRITE (1<<14)
+#define PIPE_CONTROL_DEPTH_STALL (1<<13)
+#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
+#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
+#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on Ironlake */
+#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
+#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
+#define PIPE_CONTROL_NOTIFY (1<<8)
+#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
+#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
+#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
+#define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1)
+#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
+#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+
+
+/*
+ * Reset registers
+ */
+#define DEBUG_RESET_I830 0x6070
+#define DEBUG_RESET_FULL (1<<7)
+#define DEBUG_RESET_RENDER (1<<8)
+#define DEBUG_RESET_DISPLAY (1<<9)
+
+
+/*
+ * Fence registers
+ */
+#define FENCE_REG_830_0 0x2000
+#define FENCE_REG_945_8 0x3000
+#define I830_FENCE_START_MASK 0x07f80000
+#define I830_FENCE_TILING_Y_SHIFT 12
+#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
+#define I830_FENCE_PITCH_SHIFT 4
+#define I830_FENCE_REG_VALID (1<<0)
+#define I915_FENCE_MAX_PITCH_VAL 4
+#define I830_FENCE_MAX_PITCH_VAL 6
+#define I830_FENCE_MAX_SIZE_VAL (1<<8)
+
+#define I915_FENCE_START_MASK 0x0ff00000
+#define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8)
+
+#define FENCE_REG_965_0 0x03000
+#define I965_FENCE_PITCH_SHIFT 2
+#define I965_FENCE_TILING_Y_SHIFT 1
+#define I965_FENCE_REG_VALID (1<<0)
+#define I965_FENCE_MAX_PITCH_VAL 0x0400
+
+#define FENCE_REG_SANDYBRIDGE_0 0x100000
+#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
+
+/* control register for cpu gtt access */
+#define TILECTL 0x101000
+#define TILECTL_SWZCTL (1 << 0)
+#define TILECTL_TLB_PREFETCH_DIS (1 << 2)
+#define TILECTL_BACKSNOOP_DIS (1 << 3)
+
+/*
+ * Instruction and interrupt control regs
+ */
+#define PGTBL_ER 0x02024
+#define RENDER_RING_BASE 0x02000
+#define BSD_RING_BASE 0x04000
+#define GEN6_BSD_RING_BASE 0x12000
+#define BLT_RING_BASE 0x22000
+#define RING_TAIL(base) ((base)+0x30)
+#define RING_HEAD(base) ((base)+0x34)
+#define RING_START(base) ((base)+0x38)
+#define RING_CTL(base) ((base)+0x3c)
+#define RING_SYNC_0(base) ((base)+0x40)
+#define RING_SYNC_1(base) ((base)+0x44)
+#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
+#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
+#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE))
+#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE))
+#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE))
+#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE))
+#define RING_MAX_IDLE(base) ((base)+0x54)
+#define RING_HWS_PGA(base) ((base)+0x80)
+#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
+#define ARB_MODE 0x04030
+#define ARB_MODE_SWIZZLE_SNB (1<<4)
+#define ARB_MODE_SWIZZLE_IVB (1<<5)
+#define ARB_MODE_ENABLE(x) GFX_MODE_ENABLE(x)
+#define ARB_MODE_DISABLE(x) GFX_MODE_DISABLE(x)
+#define RENDER_HWS_PGA_GEN7 (0x04080)
+#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
+#define DONE_REG 0x40b0
+#define BSD_HWS_PGA_GEN7 (0x04180)
+#define BLT_HWS_PGA_GEN7 (0x04280)
+#define RING_ACTHD(base) ((base)+0x74)
+#define RING_NOPID(base) ((base)+0x94)
+#define RING_IMR(base) ((base)+0xa8)
+#define TAIL_ADDR 0x001FFFF8
+#define HEAD_WRAP_COUNT 0xFFE00000
+#define HEAD_WRAP_ONE 0x00200000
+#define HEAD_ADDR 0x001FFFFC
+#define RING_NR_PAGES 0x001FF000
+#define RING_REPORT_MASK 0x00000006
+#define RING_REPORT_64K 0x00000002
+#define RING_REPORT_128K 0x00000004
+#define RING_NO_REPORT 0x00000000
+#define RING_VALID_MASK 0x00000001
+#define RING_VALID 0x00000001
+#define RING_INVALID 0x00000000
+#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
+#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
+#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
+#if 0
+#define PRB0_TAIL 0x02030
+#define PRB0_HEAD 0x02034
+#define PRB0_START 0x02038
+#define PRB0_CTL 0x0203c
+#define PRB1_TAIL 0x02040 /* 915+ only */
+#define PRB1_HEAD 0x02044 /* 915+ only */
+#define PRB1_START 0x02048 /* 915+ only */
+#define PRB1_CTL 0x0204c /* 915+ only */
+#endif
+#define IPEIR_I965 0x02064
+#define IPEHR_I965 0x02068
+#define INSTDONE_I965 0x0206c
+#define RING_IPEIR(base) ((base)+0x64)
+#define RING_IPEHR(base) ((base)+0x68)
+#define RING_INSTDONE(base) ((base)+0x6c)
+#define RING_INSTPS(base) ((base)+0x70)
+#define RING_DMA_FADD(base) ((base)+0x78)
+#define RING_INSTPM(base) ((base)+0xc0)
+#define INSTPS 0x02070 /* 965+ only */
+#define INSTDONE1 0x0207c /* 965+ only */
+#define ACTHD_I965 0x02074
+#define HWS_PGA 0x02080
+#define HWS_ADDRESS_MASK 0xfffff000
+#define HWS_START_ADDRESS_SHIFT 4
+#define PWRCTXA 0x2088 /* 965GM+ only */
+#define PWRCTX_EN (1<<0)
+#define IPEIR 0x02088
+#define IPEHR 0x0208c
+#define INSTDONE 0x02090
+#define NOPID 0x02094
+#define HWSTAM 0x02098
+
+#define ERROR_GEN6 0x040a0
+
+/* GM45+ chicken bits -- debug workaround bits that may be required
+ * for various sorts of correct behavior. The top 16 bits of each are
+ * the enables for writing to the corresponding low bit.
+ */
+#define _3D_CHICKEN 0x02084
+#define _3D_CHICKEN2 0x0208c
+/* Disables pipelining of read flushes past the SF-WIZ interface.
+ * Required on all Ironlake steppings according to the B-Spec, but the
+ * particular danger of not doing so is not specified.
+ */
+# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
+#define _3D_CHICKEN3 0x02090
+
+#define MI_MODE 0x0209c
+# define VS_TIMER_DISPATCH (1 << 6)
+# define MI_FLUSH_ENABLE (1 << 12)
+
+#define GFX_MODE 0x02520
+#define GFX_MODE_GEN7 0x0229c
+#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
+#define GFX_RUN_LIST_ENABLE (1<<15)
+#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
+#define GFX_SURFACE_FAULT_ENABLE (1<<12)
+#define GFX_REPLAY_MODE (1<<11)
+#define GFX_PSMI_GRANULARITY (1<<10)
+#define GFX_PPGTT_ENABLE (1<<9)
+
+#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
+#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
+
+#define SCPD0 0x0209c /* 915+ only */
+#define IER 0x020a0
+#define IIR 0x020a4
+#define IMR 0x020a8
+#define ISR 0x020ac
+#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
+#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
+#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
+#define I915_HWB_OOM_INTERRUPT (1<<13)
+#define I915_SYNC_STATUS_INTERRUPT (1<<12)
+#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
+#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
+#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
+#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
+#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
+#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
+#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
+#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
+#define I915_DEBUG_INTERRUPT (1<<2)
+#define I915_USER_INTERRUPT (1<<1)
+#define I915_ASLE_INTERRUPT (1<<0)
+#define I915_BSD_USER_INTERRUPT (1<<25)
+#define EIR 0x020b0
+#define EMR 0x020b4
+#define ESR 0x020b8
+#define GM45_ERROR_PAGE_TABLE (1<<5)
+#define GM45_ERROR_MEM_PRIV (1<<4)
+#define I915_ERROR_PAGE_TABLE (1<<4)
+#define GM45_ERROR_CP_PRIV (1<<3)
+#define I915_ERROR_MEMORY_REFRESH (1<<1)
+#define I915_ERROR_INSTRUCTION (1<<0)
+#define INSTPM 0x020c0
+#define INSTPM_SELF_EN (1<<12) /* 915GM only */
+#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
+ will not assert AGPBUSY# and will only
+ be delivered when out of C3. */
+#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
+#define ACTHD 0x020c8
+#define FW_BLC 0x020d8
+#define FW_BLC2 0x020dc
+#define FW_BLC_SELF 0x020e0 /* 915+ only */
+#define FW_BLC_SELF_EN_MASK (1<<31)
+#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
+#define FW_BLC_SELF_EN (1<<15) /* 945 only */
+#define MM_BURST_LENGTH 0x00700000
+#define MM_FIFO_WATERMARK 0x0001F000
+#define LM_BURST_LENGTH 0x00000700
+#define LM_FIFO_WATERMARK 0x0000001F
+#define MI_ARB_STATE 0x020e4 /* 915+ only */
+#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */
+
+/* Make render/texture TLB fetches lower priorty than associated data
+ * fetches. This is not turned on by default
+ */
+#define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15)
+
+/* Isoch request wait on GTT enable (Display A/B/C streams).
+ * Make isoch requests stall on the TLB update. May cause
+ * display underruns (test mode only)
+ */
+#define MI_ARB_ISOCH_WAIT_GTT (1 << 14)
+
+/* Block grant count for isoch requests when block count is
+ * set to a finite value.
+ */
+#define MI_ARB_BLOCK_GRANT_MASK (3 << 12)
+#define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */
+#define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */
+#define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */
+#define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */
+
+/* Enable render writes to complete in C2/C3/C4 power states.
+ * If this isn't enabled, render writes are prevented in low
+ * power states. That seems bad to me.
+ */
+#define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11)
+
+/* This acknowledges an async flip immediately instead
+ * of waiting for 2TLB fetches.
+ */
+#define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10)
+
+/* Enables non-sequential data reads through arbiter
+ */
+#define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9)
+
+/* Disable FSB snooping of cacheable write cycles from binner/render
+ * command stream
+ */
+#define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8)
+
+/* Arbiter time slice for non-isoch streams */
+#define MI_ARB_TIME_SLICE_MASK (7 << 5)
+#define MI_ARB_TIME_SLICE_1 (0 << 5)
+#define MI_ARB_TIME_SLICE_2 (1 << 5)
+#define MI_ARB_TIME_SLICE_4 (2 << 5)
+#define MI_ARB_TIME_SLICE_6 (3 << 5)
+#define MI_ARB_TIME_SLICE_8 (4 << 5)
+#define MI_ARB_TIME_SLICE_10 (5 << 5)
+#define MI_ARB_TIME_SLICE_14 (6 << 5)
+#define MI_ARB_TIME_SLICE_16 (7 << 5)
+
+/* Low priority grace period page size */
+#define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */
+#define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4)
+
+/* Disable display A/B trickle feed */
+#define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2)
+
+/* Set display plane priority */
+#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */
+#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
+
+#define CACHE_MODE_0 0x02120 /* 915+ only */
+#define CM0_MASK_SHIFT 16
+#define CM0_IZ_OPT_DISABLE (1<<6)
+#define CM0_ZR_OPT_DISABLE (1<<5)
+#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
+#define CM0_DEPTH_EVICT_DISABLE (1<<4)
+#define CM0_COLOR_EVICT_DISABLE (1<<3)
+#define CM0_DEPTH_WRITE_DISABLE (1<<1)
+#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
+#define BB_ADDR 0x02140 /* 8 bytes */
+#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
+#define ECOSKPD 0x021d0
+#define ECO_GATING_CX_ONLY (1<<3)
+#define ECO_FLIP_DONE (1<<0)
+
+/* GEN6 interrupt control */
+#define GEN6_RENDER_HWSTAM 0x2098
+#define GEN6_RENDER_IMR 0x20a8
+#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
+#define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7)
+#define GEN6_RENDER_TIMEOUT_COUNTER_EXPIRED (1 << 6)
+#define GEN6_RENDER_L3_PARITY_ERROR (1 << 5)
+#define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4)
+#define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3)
+#define GEN6_RENDER_SYNC_STATUS (1 << 2)
+#define GEN6_RENDER_DEBUG_INTERRUPT (1 << 1)
+#define GEN6_RENDER_USER_INTERRUPT (1 << 0)
+
+#define GEN6_BLITTER_HWSTAM 0x22098
+#define GEN6_BLITTER_IMR 0x220a8
+#define GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT (1 << 26)
+#define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25)
+#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
+#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
+
+#define GEN6_BLITTER_ECOSKPD 0x221d0
+#define GEN6_BLITTER_LOCK_SHIFT 16
+#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
+
+#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
+#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
+#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
+#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0
+#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
+
+#define GEN6_BSD_HWSTAM 0x12098
+#define GEN6_BSD_IMR 0x120a8
+#define GEN6_BSD_USER_INTERRUPT (1 << 12)
+
+#define GEN6_BSD_RNCID 0x12198
+
+/*
+ * Framebuffer compression (915+ only)
+ */
+
+#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
+#define FBC_LL_BASE 0x03204 /* 4k page aligned */
+#define FBC_CONTROL 0x03208
+#define FBC_CTL_EN (1<<31)
+#define FBC_CTL_PERIODIC (1<<30)
+#define FBC_CTL_INTERVAL_SHIFT (16)
+#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
+#define FBC_CTL_C3_IDLE (1<<13)
+#define FBC_CTL_STRIDE_SHIFT (5)
+#define FBC_CTL_FENCENO (1<<0)
+#define FBC_COMMAND 0x0320c
+#define FBC_CMD_COMPRESS (1<<0)
+#define FBC_STATUS 0x03210
+#define FBC_STAT_COMPRESSING (1<<31)
+#define FBC_STAT_COMPRESSED (1<<30)
+#define FBC_STAT_MODIFIED (1<<29)
+#define FBC_STAT_CURRENT_LINE (1<<0)
+#define FBC_CONTROL2 0x03214
+#define FBC_CTL_FENCE_DBL (0<<4)
+#define FBC_CTL_IDLE_IMM (0<<2)
+#define FBC_CTL_IDLE_FULL (1<<2)
+#define FBC_CTL_IDLE_LINE (2<<2)
+#define FBC_CTL_IDLE_DEBUG (3<<2)
+#define FBC_CTL_CPU_FENCE (1<<1)
+#define FBC_CTL_PLANEA (0<<0)
+#define FBC_CTL_PLANEB (1<<0)
+#define FBC_FENCE_OFF 0x0321b
+#define FBC_TAG 0x03300
+
+#define FBC_LL_SIZE (1536)
+
+/* Framebuffer compression for GM45+ */
+#define DPFC_CB_BASE 0x3200
+#define DPFC_CONTROL 0x3208
+#define DPFC_CTL_EN (1<<31)
+#define DPFC_CTL_PLANEA (0<<30)
+#define DPFC_CTL_PLANEB (1<<30)
+#define DPFC_CTL_FENCE_EN (1<<29)
+#define DPFC_CTL_PERSISTENT_MODE (1<<25)
+#define DPFC_SR_EN (1<<10)
+#define DPFC_CTL_LIMIT_1X (0<<6)
+#define DPFC_CTL_LIMIT_2X (1<<6)
+#define DPFC_CTL_LIMIT_4X (2<<6)
+#define DPFC_RECOMP_CTL 0x320c
+#define DPFC_RECOMP_STALL_EN (1<<27)
+#define DPFC_RECOMP_STALL_WM_SHIFT (16)
+#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
+#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
+#define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f)
+#define DPFC_STATUS 0x3210
+#define DPFC_INVAL_SEG_SHIFT (16)
+#define DPFC_INVAL_SEG_MASK (0x07ff0000)
+#define DPFC_COMP_SEG_SHIFT (0)
+#define DPFC_COMP_SEG_MASK (0x000003ff)
+#define DPFC_STATUS2 0x3214
+#define DPFC_FENCE_YOFF 0x3218
+#define DPFC_CHICKEN 0x3224
+#define DPFC_HT_MODIFY (1<<31)
+
+/* Framebuffer compression for Ironlake */
+#define ILK_DPFC_CB_BASE 0x43200
+#define ILK_DPFC_CONTROL 0x43208
+/* The bit 28-8 is reserved */
+#define DPFC_RESERVED (0x1FFFFF00)
+#define ILK_DPFC_RECOMP_CTL 0x4320c
+#define ILK_DPFC_STATUS 0x43210
+#define ILK_DPFC_FENCE_YOFF 0x43218
+#define ILK_DPFC_CHICKEN 0x43224
+#define ILK_FBC_RT_BASE 0x2128
+#define ILK_FBC_RT_VALID (1<<0)
+
+#define ILK_DISPLAY_CHICKEN1 0x42000
+#define ILK_FBCQ_DIS (1<<22)
+#define ILK_PABSTRETCH_DIS (1<<21)
+
+
+/*
+ * Framebuffer compression for Sandybridge
+ *
+ * The following two registers are of type GTTMMADR
+ */
+#define SNB_DPFC_CTL_SA 0x100100
+#define SNB_CPU_FENCE_ENABLE (1<<29)
+#define DPFC_CPU_FENCE_OFFSET 0x100104
+
+
+/*
+ * GPIO regs
+ */
+#define GPIOA 0x5010
+#define GPIOB 0x5014
+#define GPIOC 0x5018
+#define GPIOD 0x501c
+#define GPIOE 0x5020
+#define GPIOF 0x5024
+#define GPIOG 0x5028
+#define GPIOH 0x502c
+# define GPIO_CLOCK_DIR_MASK (1 << 0)
+# define GPIO_CLOCK_DIR_IN (0 << 1)
+# define GPIO_CLOCK_DIR_OUT (1 << 1)
+# define GPIO_CLOCK_VAL_MASK (1 << 2)
+# define GPIO_CLOCK_VAL_OUT (1 << 3)
+# define GPIO_CLOCK_VAL_IN (1 << 4)
+# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
+# define GPIO_DATA_DIR_MASK (1 << 8)
+# define GPIO_DATA_DIR_IN (0 << 9)
+# define GPIO_DATA_DIR_OUT (1 << 9)
+# define GPIO_DATA_VAL_MASK (1 << 10)
+# define GPIO_DATA_VAL_OUT (1 << 11)
+# define GPIO_DATA_VAL_IN (1 << 12)
+# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
+
+#define GMBUS0 0x5100 /* clock/port select */
+#define GMBUS_RATE_100KHZ (0<<8)
+#define GMBUS_RATE_50KHZ (1<<8)
+#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
+#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */
+#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_PORT_DISABLED 0
+#define GMBUS_PORT_SSC 1
+#define GMBUS_PORT_VGADDC 2
+#define GMBUS_PORT_PANEL 3
+#define GMBUS_PORT_DPC 4 /* HDMIC */
+#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
+ /* 6 reserved */
+#define GMBUS_PORT_DPD 7 /* HDMID */
+#define GMBUS_NUM_PORTS 8
+#define GMBUS1 0x5104 /* command/status */
+#define GMBUS_SW_CLR_INT (1<<31)
+#define GMBUS_SW_RDY (1<<30)
+#define GMBUS_ENT (1<<29) /* enable timeout */
+#define GMBUS_CYCLE_NONE (0<<25)
+#define GMBUS_CYCLE_WAIT (1<<25)
+#define GMBUS_CYCLE_INDEX (2<<25)
+#define GMBUS_CYCLE_STOP (4<<25)
+#define GMBUS_BYTE_COUNT_SHIFT 16
+#define GMBUS_SLAVE_INDEX_SHIFT 8
+#define GMBUS_SLAVE_ADDR_SHIFT 1
+#define GMBUS_SLAVE_READ (1<<0)
+#define GMBUS_SLAVE_WRITE (0<<0)
+#define GMBUS2 0x5108 /* status */
+#define GMBUS_INUSE (1<<15)
+#define GMBUS_HW_WAIT_PHASE (1<<14)
+#define GMBUS_STALL_TIMEOUT (1<<13)
+#define GMBUS_INT (1<<12)
+#define GMBUS_HW_RDY (1<<11)
+#define GMBUS_SATOER (1<<10)
+#define GMBUS_ACTIVE (1<<9)
+#define GMBUS3 0x510c /* data buffer bytes 3-0 */
+#define GMBUS4 0x5110 /* interrupt mask (Pineview+) */
+#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
+#define GMBUS_NAK_EN (1<<3)
+#define GMBUS_IDLE_EN (1<<2)
+#define GMBUS_HW_WAIT_EN (1<<1)
+#define GMBUS_HW_RDY_EN (1<<0)
+#define GMBUS5 0x5120 /* byte index */
+#define GMBUS_2BYTE_INDEX_EN (1<<31)
+
+/*
+ * Clock control & power management
+ */
+
+#define VGA0 0x6000
+#define VGA1 0x6004
+#define VGA_PD 0x6010
+#define VGA0_PD_P2_DIV_4 (1 << 7)
+#define VGA0_PD_P1_DIV_2 (1 << 5)
+#define VGA0_PD_P1_SHIFT 0
+#define VGA0_PD_P1_MASK (0x1f << 0)
+#define VGA1_PD_P2_DIV_4 (1 << 15)
+#define VGA1_PD_P1_DIV_2 (1 << 13)
+#define VGA1_PD_P1_SHIFT 8
+#define VGA1_PD_P1_MASK (0x1f << 8)
+#define _DPLL_A 0x06014
+#define _DPLL_B 0x06018
+#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
+#define DPLL_VCO_ENABLE (1 << 31)
+#define DPLL_DVO_HIGH_SPEED (1 << 30)
+#define DPLL_SYNCLOCK_ENABLE (1 << 29)
+#define DPLL_VGA_MODE_DIS (1 << 28)
+#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
+#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
+#define DPLL_MODE_MASK (3 << 26)
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
+#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
+#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
+#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
+
+#define SRX_INDEX 0x3c4
+#define SRX_DATA 0x3c5
+#define SR01 1
+#define SR01_SCREEN_OFF (1<<5)
+
+#define PPCR 0x61204
+#define PPCR_ON (1<<0)
+
+#define DVOB 0x61140
+#define DVOB_ON (1<<31)
+#define DVOC 0x61160
+#define DVOC_ON (1<<31)
+#define LVDS 0x61180
+#define LVDS_ON (1<<31)
+
+/* Scratch pad debug 0 reg:
+ */
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
+/*
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
+#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
+#define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
+/* i830, required in DVO non-gang */
+#define PLL_P2_DIVIDE_BY_4 (1 << 23)
+#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
+#define PLL_REF_INPUT_DREFCLK (0 << 13)
+#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
+#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
+#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
+#define PLL_REF_INPUT_MASK (3 << 13)
+#define PLL_LOAD_PULSE_PHASE_SHIFT 9
+/* Ironlake */
+# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
+# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
+# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9)
+# define DPLL_FPA1_P1_POST_DIV_SHIFT 0
+# define DPLL_FPA1_P1_POST_DIV_MASK 0xff
+
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+#define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
+/*
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ */
+#define SDVO_MULTIPLIER_MASK 0x000000ff
+#define SDVO_MULTIPLIER_SHIFT_HIRES 4
+#define SDVO_MULTIPLIER_SHIFT_VGA 0
+#define _DPLL_A_MD 0x0601c /* 965+ only */
+/*
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
+ */
+#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
+#define DPLL_MD_UDI_DIVIDER_SHIFT 24
+/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
+#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
+/*
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
+#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
+/*
+ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
+#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+#define _DPLL_B_MD 0x06020 /* 965+ only */
+#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
+#define _FPA0 0x06040
+#define _FPA1 0x06044
+#define _FPB0 0x06048
+#define _FPB1 0x0604c
+#define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0)
+#define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1)
+#define FP_N_DIV_MASK 0x003f0000
+#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
+#define FP_N_DIV_SHIFT 16
+#define FP_M1_DIV_MASK 0x00003f00
+#define FP_M1_DIV_SHIFT 8
+#define FP_M2_DIV_MASK 0x0000003f
+#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
+#define FP_M2_DIV_SHIFT 0
+#define DPLL_TEST 0x606c
+#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
+#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
+#define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
+#define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
+#define DPLLB_TEST_N_BYPASS (1 << 19)
+#define DPLLB_TEST_M_BYPASS (1 << 18)
+#define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
+#define DPLLA_TEST_N_BYPASS (1 << 3)
+#define DPLLA_TEST_M_BYPASS (1 << 2)
+#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
+#define D_STATE 0x6104
+#define DSTATE_GFX_RESET_I830 (1<<6)
+#define DSTATE_PLL_D3_OFF (1<<3)
+#define DSTATE_GFX_CLOCK_GATING (1<<1)
+#define DSTATE_DOT_CLOCK_GATING (1<<0)
+#define DSPCLK_GATE_D 0x6200
+# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
+# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
+# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
+# define VRDUNIT_CLOCK_GATE_DISABLE (1 << 27) /* 965 */
+# define AUDUNIT_CLOCK_GATE_DISABLE (1 << 26) /* 965 */
+# define DPUNIT_A_CLOCK_GATE_DISABLE (1 << 25) /* 965 */
+# define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) /* 965 */
+# define TVRUNIT_CLOCK_GATE_DISABLE (1 << 23) /* 915-945 */
+# define TVCUNIT_CLOCK_GATE_DISABLE (1 << 22) /* 915-945 */
+# define TVFUNIT_CLOCK_GATE_DISABLE (1 << 21) /* 915-945 */
+# define TVEUNIT_CLOCK_GATE_DISABLE (1 << 20) /* 915-945 */
+# define DVSUNIT_CLOCK_GATE_DISABLE (1 << 19) /* 915-945 */
+# define DSSUNIT_CLOCK_GATE_DISABLE (1 << 18) /* 915-945 */
+# define DDBUNIT_CLOCK_GATE_DISABLE (1 << 17) /* 915-945 */
+# define DPRUNIT_CLOCK_GATE_DISABLE (1 << 16) /* 915-945 */
+# define DPFUNIT_CLOCK_GATE_DISABLE (1 << 15) /* 915-945 */
+# define DPBMUNIT_CLOCK_GATE_DISABLE (1 << 14) /* 915-945 */
+# define DPLSUNIT_CLOCK_GATE_DISABLE (1 << 13) /* 915-945 */
+# define DPLUNIT_CLOCK_GATE_DISABLE (1 << 12) /* 915-945 */
+# define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11)
+# define DPBUNIT_CLOCK_GATE_DISABLE (1 << 10)
+# define DCUNIT_CLOCK_GATE_DISABLE (1 << 9)
+# define DPUNIT_CLOCK_GATE_DISABLE (1 << 8)
+# define VRUNIT_CLOCK_GATE_DISABLE (1 << 7) /* 915+: reserved */
+# define OVHUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 830-865 */
+# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 915-945 */
+# define OVFUNIT_CLOCK_GATE_DISABLE (1 << 5)
+# define OVBUNIT_CLOCK_GATE_DISABLE (1 << 4)
+/**
+ * This bit must be set on the 830 to prevent hangs when turning off the
+ * overlay scaler.
+ */
+# define OVRUNIT_CLOCK_GATE_DISABLE (1 << 3)
+# define OVCUNIT_CLOCK_GATE_DISABLE (1 << 2)
+# define OVUUNIT_CLOCK_GATE_DISABLE (1 << 1)
+# define ZVUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 830 */
+# define OVLUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 845,865 */
+
+#define RENCLK_GATE_D1 0x6204
+# define BLITTER_CLOCK_GATE_DISABLE (1 << 13) /* 945GM only */
+# define MPEG_CLOCK_GATE_DISABLE (1 << 12) /* 945GM only */
+# define PC_FE_CLOCK_GATE_DISABLE (1 << 11)
+# define PC_BE_CLOCK_GATE_DISABLE (1 << 10)
+# define WINDOWER_CLOCK_GATE_DISABLE (1 << 9)
+# define INTERPOLATOR_CLOCK_GATE_DISABLE (1 << 8)
+# define COLOR_CALCULATOR_CLOCK_GATE_DISABLE (1 << 7)
+# define MOTION_COMP_CLOCK_GATE_DISABLE (1 << 6)
+# define MAG_CLOCK_GATE_DISABLE (1 << 5)
+/** This bit must be unset on 855,865 */
+# define MECI_CLOCK_GATE_DISABLE (1 << 4)
+# define DCMP_CLOCK_GATE_DISABLE (1 << 3)
+# define MEC_CLOCK_GATE_DISABLE (1 << 2)
+# define MECO_CLOCK_GATE_DISABLE (1 << 1)
+/** This bit must be set on 855,865. */
+# define SV_CLOCK_GATE_DISABLE (1 << 0)
+# define I915_MPEG_CLOCK_GATE_DISABLE (1 << 16)
+# define I915_VLD_IP_PR_CLOCK_GATE_DISABLE (1 << 15)
+# define I915_MOTION_COMP_CLOCK_GATE_DISABLE (1 << 14)
+# define I915_BD_BF_CLOCK_GATE_DISABLE (1 << 13)
+# define I915_SF_SE_CLOCK_GATE_DISABLE (1 << 12)
+# define I915_WM_CLOCK_GATE_DISABLE (1 << 11)
+# define I915_IZ_CLOCK_GATE_DISABLE (1 << 10)
+# define I915_PI_CLOCK_GATE_DISABLE (1 << 9)
+# define I915_DI_CLOCK_GATE_DISABLE (1 << 8)
+# define I915_SH_SV_CLOCK_GATE_DISABLE (1 << 7)
+# define I915_PL_DG_QC_FT_CLOCK_GATE_DISABLE (1 << 6)
+# define I915_SC_CLOCK_GATE_DISABLE (1 << 5)
+# define I915_FL_CLOCK_GATE_DISABLE (1 << 4)
+# define I915_DM_CLOCK_GATE_DISABLE (1 << 3)
+# define I915_PS_CLOCK_GATE_DISABLE (1 << 2)
+# define I915_CC_CLOCK_GATE_DISABLE (1 << 1)
+# define I915_BY_CLOCK_GATE_DISABLE (1 << 0)
+
+# define I965_RCZ_CLOCK_GATE_DISABLE (1 << 30)
+/** This bit must always be set on 965G/965GM */
+# define I965_RCC_CLOCK_GATE_DISABLE (1 << 29)
+# define I965_RCPB_CLOCK_GATE_DISABLE (1 << 28)
+# define I965_DAP_CLOCK_GATE_DISABLE (1 << 27)
+# define I965_ROC_CLOCK_GATE_DISABLE (1 << 26)
+# define I965_GW_CLOCK_GATE_DISABLE (1 << 25)
+# define I965_TD_CLOCK_GATE_DISABLE (1 << 24)
+/** This bit must always be set on 965G */
+# define I965_ISC_CLOCK_GATE_DISABLE (1 << 23)
+# define I965_IC_CLOCK_GATE_DISABLE (1 << 22)
+# define I965_EU_CLOCK_GATE_DISABLE (1 << 21)
+# define I965_IF_CLOCK_GATE_DISABLE (1 << 20)
+# define I965_TC_CLOCK_GATE_DISABLE (1 << 19)
+# define I965_SO_CLOCK_GATE_DISABLE (1 << 17)
+# define I965_FBC_CLOCK_GATE_DISABLE (1 << 16)
+# define I965_MARI_CLOCK_GATE_DISABLE (1 << 15)
+# define I965_MASF_CLOCK_GATE_DISABLE (1 << 14)
+# define I965_MAWB_CLOCK_GATE_DISABLE (1 << 13)
+# define I965_EM_CLOCK_GATE_DISABLE (1 << 12)
+# define I965_UC_CLOCK_GATE_DISABLE (1 << 11)
+# define I965_SI_CLOCK_GATE_DISABLE (1 << 6)
+# define I965_MT_CLOCK_GATE_DISABLE (1 << 5)
+# define I965_PL_CLOCK_GATE_DISABLE (1 << 4)
+# define I965_DG_CLOCK_GATE_DISABLE (1 << 3)
+# define I965_QC_CLOCK_GATE_DISABLE (1 << 2)
+# define I965_FT_CLOCK_GATE_DISABLE (1 << 1)
+# define I965_DM_CLOCK_GATE_DISABLE (1 << 0)
+
+#define RENCLK_GATE_D2 0x6208
+#define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9)
+#define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7)
+#define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6)
+#define RAMCLK_GATE_D 0x6210 /* CRL only */
+#define DEUC 0x6214 /* CRL only */
+
+/*
+ * Palette regs
+ */
+
+#define _PALETTE_A 0x0a000
+#define _PALETTE_B 0x0a800
+#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B)
+
+/* MCH MMIO space */
+
+/*
+ * MCHBAR mirror.
+ *
+ * This mirrors the MCHBAR MMIO space whose location is determined by
+ * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
+ * every way. It is not accessible from the CP register read instructions.
+ *
+ */
+#define MCHBAR_MIRROR_BASE 0x10000
+
+#define MCHBAR_MIRROR_BASE_SNB 0x140000
+
+/** 915-945 and GM965 MCH register controlling DRAM channel access */
+#define DCC 0x10200
+#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
+#define DCC_ADDRESSING_MODE_MASK (3 << 0)
+#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
+#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
+
+/** Pineview MCH register contains DDR3 setting */
+#define CSHRDDR3CTL 0x101a8
+#define CSHRDDR3CTL_DDR3 (1 << 2)
+
+/** 965 MCH register controlling DRAM channel configuration */
+#define C0DRB3 0x10206
+#define C1DRB3 0x10606
+
+/** snb MCH registers for reading the DRAM channel configuration */
+#define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004)
+#define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008)
+#define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C)
+#define MAD_DIMM_ECC_MASK (0x3 << 24)
+#define MAD_DIMM_ECC_OFF (0x0 << 24)
+#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24)
+#define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24)
+#define MAD_DIMM_ECC_ON (0x3 << 24)
+#define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22)
+#define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21)
+#define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */
+#define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */
+#define MAD_DIMM_B_DUAL_RANK (0x1 << 18)
+#define MAD_DIMM_A_DUAL_RANK (0x1 << 17)
+#define MAD_DIMM_A_SELECT (0x1 << 16)
+/* DIMM sizes are in multiples of 256mb. */
+#define MAD_DIMM_B_SIZE_SHIFT 8
+#define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT)
+#define MAD_DIMM_A_SIZE_SHIFT 0
+#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
+
+
+/* Clocking configuration register */
+#define CLKCFG 0x10c00
+#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
+#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */
+#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
+#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */
+#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */
+#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */
+/* Note, below two are guess */
+#define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */
+#define CLKCFG_FSB_1600_ALT (0 << 0) /* hrawclk 400 */
+#define CLKCFG_FSB_MASK (7 << 0)
+#define CLKCFG_MEM_533 (1 << 4)
+#define CLKCFG_MEM_667 (2 << 4)
+#define CLKCFG_MEM_800 (3 << 4)
+#define CLKCFG_MEM_MASK (7 << 4)
+
+#define TSC1 0x11001
+#define TSE (1<<0)
+#define I915_TR1 0x11006
+#define TSFS 0x11020
+#define TSFS_SLOPE_MASK 0x0000ff00
+#define TSFS_SLOPE_SHIFT 8
+#define TSFS_INTR_MASK 0x000000ff
+
+#define CRSTANDVID 0x11100
+#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
+#define PXVFREQ_PX_MASK 0x7f000000
+#define PXVFREQ_PX_SHIFT 24
+#define VIDFREQ_BASE 0x11110
+#define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */
+#define VIDFREQ2 0x11114
+#define VIDFREQ3 0x11118
+#define VIDFREQ4 0x1111c
+#define VIDFREQ_P0_MASK 0x1f000000
+#define VIDFREQ_P0_SHIFT 24
+#define VIDFREQ_P0_CSCLK_MASK 0x00f00000
+#define VIDFREQ_P0_CSCLK_SHIFT 20
+#define VIDFREQ_P0_CRCLK_MASK 0x000f0000
+#define VIDFREQ_P0_CRCLK_SHIFT 16
+#define VIDFREQ_P1_MASK 0x00001f00
+#define VIDFREQ_P1_SHIFT 8
+#define VIDFREQ_P1_CSCLK_MASK 0x000000f0
+#define VIDFREQ_P1_CSCLK_SHIFT 4
+#define VIDFREQ_P1_CRCLK_MASK 0x0000000f
+#define INTTOEXT_BASE_ILK 0x11300
+#define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */
+#define INTTOEXT_MAP3_SHIFT 24
+#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT)
+#define INTTOEXT_MAP2_SHIFT 16
+#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT)
+#define INTTOEXT_MAP1_SHIFT 8
+#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT)
+#define INTTOEXT_MAP0_SHIFT 0
+#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT)
+#define MEMSWCTL 0x11170 /* Ironlake only */
+#define MEMCTL_CMD_MASK 0xe000
+#define MEMCTL_CMD_SHIFT 13
+#define MEMCTL_CMD_RCLK_OFF 0
+#define MEMCTL_CMD_RCLK_ON 1
+#define MEMCTL_CMD_CHFREQ 2
+#define MEMCTL_CMD_CHVID 3
+#define MEMCTL_CMD_VMMOFF 4
+#define MEMCTL_CMD_VMMON 5
+#define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears
+ when command complete */
+#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
+#define MEMCTL_FREQ_SHIFT 8
+#define MEMCTL_SFCAVM (1<<7)
+#define MEMCTL_TGT_VID_MASK 0x007f
+#define MEMIHYST 0x1117c
+#define MEMINTREN 0x11180 /* 16 bits */
+#define MEMINT_RSEXIT_EN (1<<8)
+#define MEMINT_CX_SUPR_EN (1<<7)
+#define MEMINT_CONT_BUSY_EN (1<<6)
+#define MEMINT_AVG_BUSY_EN (1<<5)
+#define MEMINT_EVAL_CHG_EN (1<<4)
+#define MEMINT_MON_IDLE_EN (1<<3)
+#define MEMINT_UP_EVAL_EN (1<<2)
+#define MEMINT_DOWN_EVAL_EN (1<<1)
+#define MEMINT_SW_CMD_EN (1<<0)
+#define MEMINTRSTR 0x11182 /* 16 bits */
+#define MEM_RSEXIT_MASK 0xc000
+#define MEM_RSEXIT_SHIFT 14
+#define MEM_CONT_BUSY_MASK 0x3000
+#define MEM_CONT_BUSY_SHIFT 12
+#define MEM_AVG_BUSY_MASK 0x0c00
+#define MEM_AVG_BUSY_SHIFT 10
+#define MEM_EVAL_CHG_MASK 0x0300
+#define MEM_EVAL_BUSY_SHIFT 8
+#define MEM_MON_IDLE_MASK 0x00c0
+#define MEM_MON_IDLE_SHIFT 6
+#define MEM_UP_EVAL_MASK 0x0030
+#define MEM_UP_EVAL_SHIFT 4
+#define MEM_DOWN_EVAL_MASK 0x000c
+#define MEM_DOWN_EVAL_SHIFT 2
+#define MEM_SW_CMD_MASK 0x0003
+#define MEM_INT_STEER_GFX 0
+#define MEM_INT_STEER_CMR 1
+#define MEM_INT_STEER_SMI 2
+#define MEM_INT_STEER_SCI 3
+#define MEMINTRSTS 0x11184
+#define MEMINT_RSEXIT (1<<7)
+#define MEMINT_CONT_BUSY (1<<6)
+#define MEMINT_AVG_BUSY (1<<5)
+#define MEMINT_EVAL_CHG (1<<4)
+#define MEMINT_MON_IDLE (1<<3)
+#define MEMINT_UP_EVAL (1<<2)
+#define MEMINT_DOWN_EVAL (1<<1)
+#define MEMINT_SW_CMD (1<<0)
+#define MEMMODECTL 0x11190
+#define MEMMODE_BOOST_EN (1<<31)
+#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
+#define MEMMODE_BOOST_FREQ_SHIFT 24
+#define MEMMODE_IDLE_MODE_MASK 0x00030000
+#define MEMMODE_IDLE_MODE_SHIFT 16
+#define MEMMODE_IDLE_MODE_EVAL 0
+#define MEMMODE_IDLE_MODE_CONT 1
+#define MEMMODE_HWIDLE_EN (1<<15)
+#define MEMMODE_SWMODE_EN (1<<14)
+#define MEMMODE_RCLK_GATE (1<<13)
+#define MEMMODE_HW_UPDATE (1<<12)
+#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
+#define MEMMODE_FSTART_SHIFT 8
+#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
+#define MEMMODE_FMAX_SHIFT 4
+#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */
+#define RCBMAXAVG 0x1119c
+#define MEMSWCTL2 0x1119e /* Cantiga only */
+#define SWMEMCMD_RENDER_OFF (0 << 13)
+#define SWMEMCMD_RENDER_ON (1 << 13)
+#define SWMEMCMD_SWFREQ (2 << 13)
+#define SWMEMCMD_TARVID (3 << 13)
+#define SWMEMCMD_VRM_OFF (4 << 13)
+#define SWMEMCMD_VRM_ON (5 << 13)
+#define CMDSTS (1<<12)
+#define SFCAVM (1<<11)
+#define SWFREQ_MASK 0x0380 /* P0-7 */
+#define SWFREQ_SHIFT 7
+#define TARVID_MASK 0x001f
+#define MEMSTAT_CTG 0x111a0
+#define RCBMINAVG 0x111a0
+#define RCUPEI 0x111b0
+#define RCDNEI 0x111b4
+#define RSTDBYCTL 0x111b8
+#define RS1EN (1<<31)
+#define RS2EN (1<<30)
+#define RS3EN (1<<29)
+#define D3RS3EN (1<<28) /* Display D3 imlies RS3 */
+#define SWPROMORSX (1<<27) /* RSx promotion timers ignored */
+#define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */
+#define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */
+#define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */
+#define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */
+#define RSX_STATUS_MASK (7<<20)
+#define RSX_STATUS_ON (0<<20)
+#define RSX_STATUS_RC1 (1<<20)
+#define RSX_STATUS_RC1E (2<<20)
+#define RSX_STATUS_RS1 (3<<20)
+#define RSX_STATUS_RS2 (4<<20) /* aka rc6 */
+#define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */
+#define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */
+#define RSX_STATUS_RSVD2 (7<<20)
+#define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */
+#define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */
+#define JRSC (1<<17) /* rsx coupled to cpu c-state */
+#define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */
+#define RS1CONTSAV_MASK (3<<14)
+#define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */
+#define RS1CONTSAV_RSVD (1<<14)
+#define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */
+#define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */
+#define NORMSLEXLAT_MASK (3<<12)
+#define SLOW_RS123 (0<<12)
+#define SLOW_RS23 (1<<12)
+#define SLOW_RS3 (2<<12)
+#define NORMAL_RS123 (3<<12)
+#define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */
+#define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
+#define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */
+#define STATELOCK (1<<7) /* locked to rs_cstate if 0 */
+#define RS_CSTATE_MASK (3<<4)
+#define RS_CSTATE_C367_RS1 (0<<4)
+#define RS_CSTATE_C36_RS1_C7_RS2 (1<<4)
+#define RS_CSTATE_RSVD (2<<4)
+#define RS_CSTATE_C367_RS2 (3<<4)
+#define REDSAVES (1<<3) /* no context save if was idle during rs0 */
+#define REDRESTORES (1<<2) /* no restore if was idle during rs0 */
+#define VIDCTL 0x111c0
+#define VIDSTS 0x111c8
+#define VIDSTART 0x111cc /* 8 bits */
+#define MEMSTAT_ILK 0x111f8
+#define MEMSTAT_VID_MASK 0x7f00
+#define MEMSTAT_VID_SHIFT 8
+#define MEMSTAT_PSTATE_MASK 0x00f8
+#define MEMSTAT_PSTATE_SHIFT 3
+#define MEMSTAT_MON_ACTV (1<<2)
+#define MEMSTAT_SRC_CTL_MASK 0x0003
+#define MEMSTAT_SRC_CTL_CORE 0
+#define MEMSTAT_SRC_CTL_TRB 1
+#define MEMSTAT_SRC_CTL_THM 2
+#define MEMSTAT_SRC_CTL_STDBY 3
+#define RCPREVBSYTUPAVG 0x113b8
+#define RCPREVBSYTDNAVG 0x113bc
+#define PMMISC 0x11214
+#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */
+#define SDEW 0x1124c
+#define CSIEW0 0x11250
+#define CSIEW1 0x11254
+#define CSIEW2 0x11258
+#define PEW 0x1125c
+#define DEW 0x11270
+#define MCHAFE 0x112c0
+#define CSIEC 0x112e0
+#define DMIEC 0x112e4
+#define DDREC 0x112e8
+#define PEG0EC 0x112ec
+#define PEG1EC 0x112f0
+#define GFXEC 0x112f4
+#define RPPREVBSYTUPAVG 0x113b8
+#define RPPREVBSYTDNAVG 0x113bc
+#define ECR 0x11600
+#define ECR_GPFE (1<<31)
+#define ECR_IMONE (1<<30)
+#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */
+#define OGW0 0x11608
+#define OGW1 0x1160c
+#define EG0 0x11610
+#define EG1 0x11614
+#define EG2 0x11618
+#define EG3 0x1161c
+#define EG4 0x11620
+#define EG5 0x11624
+#define EG6 0x11628
+#define EG7 0x1162c
+#define PXW 0x11664
+#define PXWL 0x11680
+#define LCFUSE02 0x116c0
+#define LCFUSE_HIV_MASK 0x000000ff
+#define CSIPLL0 0x12c10
+#define DDRMPLL1 0X12c20
+#define PEG_BAND_GAP_DATA 0x14d68
+
+#define GEN6_GT_PERF_STATUS 0x145948
+#define GEN6_RP_STATE_LIMITS 0x145994
+#define GEN6_RP_STATE_CAP 0x145998
+
+/*
+ * Logical Context regs
+ */
+#define CCID 0x2180
+#define CCID_EN (1<<0)
+/*
+ * Overlay regs
+ */
+
+#define OVADD 0x30000
+#define DOVSTA 0x30008
+#define OC_BUF (0x3<<20)
+#define OGAMC5 0x30010
+#define OGAMC4 0x30014
+#define OGAMC3 0x30018
+#define OGAMC2 0x3001c
+#define OGAMC1 0x30020
+#define OGAMC0 0x30024
+
+/*
+ * Display engine regs
+ */
+
+/* Pipe A timing regs */
+#define _HTOTAL_A 0x60000
+#define _HBLANK_A 0x60004
+#define _HSYNC_A 0x60008
+#define _VTOTAL_A 0x6000c
+#define _VBLANK_A 0x60010
+#define _VSYNC_A 0x60014
+#define _PIPEASRC 0x6001c
+#define _BCLRPAT_A 0x60020
+#define _VSYNCSHIFT_A 0x60028
+
+/* Pipe B timing regs */
+#define _HTOTAL_B 0x61000
+#define _HBLANK_B 0x61004
+#define _HSYNC_B 0x61008
+#define _VTOTAL_B 0x6100c
+#define _VBLANK_B 0x61010
+#define _VSYNC_B 0x61014
+#define _PIPEBSRC 0x6101c
+#define _BCLRPAT_B 0x61020
+#define _VSYNCSHIFT_B 0x61028
+
+
+#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
+#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
+#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B)
+#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B)
+#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
+#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
+#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
+#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
+
+/* VGA port control */
+#define ADPA 0x61100
+#define ADPA_DAC_ENABLE (1<<31)
+#define ADPA_DAC_DISABLE 0
+#define ADPA_PIPE_SELECT_MASK (1<<30)
+#define ADPA_PIPE_A_SELECT 0
+#define ADPA_PIPE_B_SELECT (1<<30)
+#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
+#define ADPA_USE_VGA_HVPOLARITY (1<<15)
+#define ADPA_SETS_HVPOLARITY 0
+#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
+#define ADPA_VSYNC_CNTL_ENABLE 0
+#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
+#define ADPA_HSYNC_CNTL_ENABLE 0
+#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
+#define ADPA_VSYNC_ACTIVE_LOW 0
+#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
+#define ADPA_HSYNC_ACTIVE_LOW 0
+#define ADPA_DPMS_MASK (~(3<<10))
+#define ADPA_DPMS_ON (0<<10)
+#define ADPA_DPMS_SUSPEND (1<<10)
+#define ADPA_DPMS_STANDBY (2<<10)
+#define ADPA_DPMS_OFF (3<<10)
+
+
+/* Hotplug control (945+ only) */
+#define PORT_HOTPLUG_EN 0x61110
+#define HDMIB_HOTPLUG_INT_EN (1 << 29)
+#define DPB_HOTPLUG_INT_EN (1 << 29)
+#define HDMIC_HOTPLUG_INT_EN (1 << 28)
+#define DPC_HOTPLUG_INT_EN (1 << 28)
+#define HDMID_HOTPLUG_INT_EN (1 << 27)
+#define DPD_HOTPLUG_INT_EN (1 << 27)
+#define SDVOB_HOTPLUG_INT_EN (1 << 26)
+#define SDVOC_HOTPLUG_INT_EN (1 << 25)
+#define TV_HOTPLUG_INT_EN (1 << 18)
+#define CRT_HOTPLUG_INT_EN (1 << 9)
+#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
+/* must use period 64 on GM45 according to docs */
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
+#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
+#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
+#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
+#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
+
+#define PORT_HOTPLUG_STAT 0x61114
+#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
+#define DPB_HOTPLUG_INT_STATUS (1 << 29)
+#define HDMIC_HOTPLUG_INT_STATUS (1 << 28)
+#define DPC_HOTPLUG_INT_STATUS (1 << 28)
+#define HDMID_HOTPLUG_INT_STATUS (1 << 27)
+#define DPD_HOTPLUG_INT_STATUS (1 << 27)
+#define CRT_HOTPLUG_INT_STATUS (1 << 11)
+#define TV_HOTPLUG_INT_STATUS (1 << 10)
+#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
+#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
+#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
+#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
+#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
+#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
+
+/* SDVO port control */
+#define SDVOB 0x61140
+#define SDVOC 0x61160
+#define SDVO_ENABLE (1 << 31)
+#define SDVO_PIPE_B_SELECT (1 << 30)
+#define SDVO_STALL_SELECT (1 << 29)
+#define SDVO_INTERRUPT_ENABLE (1 << 26)
+/**
+ * 915G/GM SDVO pixel multiplier.
+ *
+ * Programmed value is multiplier - 1, up to 5x.
+ *
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
+#define SDVO_PORT_MULTIPLY_SHIFT 23
+#define SDVO_PHASE_SELECT_MASK (15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
+#define SDVOC_GANG_MODE (1 << 16)
+#define SDVO_ENCODING_SDVO (0x0 << 10)
+#define SDVO_ENCODING_HDMI (0x2 << 10)
+/** Requird for HDMI operation */
+#define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9)
+#define SDVO_COLOR_RANGE_16_235 (1 << 8)
+#define SDVO_BORDER_ENABLE (1 << 7)
+#define SDVO_AUDIO_ENABLE (1 << 6)
+/** New with 965, default is to be set */
+#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
+/** New with 965, default is to be set */
+#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
+#define SDVOB_PCIE_CONCURRENCY (1 << 3)
+#define SDVO_DETECTED (1 << 2)
+/* Bits to be preserved when writing */
+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26))
+#define SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26))
+
+/* DVO port control */
+#define DVOA 0x61120
+#define DVOB 0x61140
+#define DVOC 0x61160
+#define DVO_ENABLE (1 << 31)
+#define DVO_PIPE_B_SELECT (1 << 30)
+#define DVO_PIPE_STALL_UNUSED (0 << 28)
+#define DVO_PIPE_STALL (1 << 28)
+#define DVO_PIPE_STALL_TV (2 << 28)
+#define DVO_PIPE_STALL_MASK (3 << 28)
+#define DVO_USE_VGA_SYNC (1 << 15)
+#define DVO_DATA_ORDER_I740 (0 << 14)
+#define DVO_DATA_ORDER_FP (1 << 14)
+#define DVO_VSYNC_DISABLE (1 << 11)
+#define DVO_HSYNC_DISABLE (1 << 10)
+#define DVO_VSYNC_TRISTATE (1 << 9)
+#define DVO_HSYNC_TRISTATE (1 << 8)
+#define DVO_BORDER_ENABLE (1 << 7)
+#define DVO_DATA_ORDER_GBRG (1 << 6)
+#define DVO_DATA_ORDER_RGGB (0 << 6)
+#define DVO_DATA_ORDER_GBRG_ERRATA (0 << 6)
+#define DVO_DATA_ORDER_RGGB_ERRATA (1 << 6)
+#define DVO_VSYNC_ACTIVE_HIGH (1 << 4)
+#define DVO_HSYNC_ACTIVE_HIGH (1 << 3)
+#define DVO_BLANK_ACTIVE_HIGH (1 << 2)
+#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */
+#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */
+#define DVO_PRESERVE_MASK (0x7<<24)
+#define DVOA_SRCDIM 0x61124
+#define DVOB_SRCDIM 0x61144
+#define DVOC_SRCDIM 0x61164
+#define DVO_SRCDIM_HORIZONTAL_SHIFT 12
+#define DVO_SRCDIM_VERTICAL_SHIFT 0
+
+/* LVDS port control */
+#define LVDS 0x61180
+/*
+ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+#define LVDS_PORT_EN (1 << 31)
+/* Selects pipe B for LVDS data. Must be set on pre-965. */
+#define LVDS_PIPEB_SELECT (1 << 30)
+#define LVDS_PIPE_MASK (1 << 30)
+#define LVDS_PIPE(pipe) ((pipe) << 30)
+/* LVDS dithering flag on 965/g4x platform */
+#define LVDS_ENABLE_DITHER (1 << 25)
+/* LVDS sync polarity flags. Set to invert (i.e. negative) */
+#define LVDS_VSYNC_POLARITY (1 << 21)
+#define LVDS_HSYNC_POLARITY (1 << 20)
+
+/* Enable border for unscaled (or aspect-scaled) display */
+#define LVDS_BORDER_ENABLE (1 << 15)
+/*
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
+#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
+#define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
+/*
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+#define LVDS_A3_POWER_MASK (3 << 6)
+#define LVDS_A3_POWER_DOWN (0 << 6)
+#define LVDS_A3_POWER_UP (3 << 6)
+/*
+ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+#define LVDS_CLKB_POWER_MASK (3 << 4)
+#define LVDS_CLKB_POWER_DOWN (0 << 4)
+#define LVDS_CLKB_POWER_UP (3 << 4)
+/*
+ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode. The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+#define LVDS_B0B3_POWER_MASK (3 << 2)
+#define LVDS_B0B3_POWER_DOWN (0 << 2)
+#define LVDS_B0B3_POWER_UP (3 << 2)
+
+/* Video Data Island Packet control */
+#define VIDEO_DIP_DATA 0x61178
+#define VIDEO_DIP_CTL 0x61170
+#define VIDEO_DIP_ENABLE (1 << 31)
+#define VIDEO_DIP_PORT_B (1 << 29)
+#define VIDEO_DIP_PORT_C (2 << 29)
+#define VIDEO_DIP_ENABLE_AVI (1 << 21)
+#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
+#define VIDEO_DIP_ENABLE_SPD (8 << 21)
+#define VIDEO_DIP_SELECT_MASK (3 << 19)
+#define VIDEO_DIP_SELECT_AVI (0 << 19)
+#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
+#define VIDEO_DIP_SELECT_SPD (3 << 19)
+#define VIDEO_DIP_FREQ_ONCE (0 << 16)
+#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
+#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
+
+/* Panel power sequencing */
+#define PP_STATUS 0x61200
+#define PP_ON (1 << 31)
+/*
+ * Indicates that all dependencies of the panel are on:
+ *
+ * - PLL enabled
+ * - pipe enabled
+ * - LVDS/DVOB/DVOC on
+ */
+#define PP_READY (1 << 30)
+#define PP_SEQUENCE_NONE (0 << 28)
+#define PP_SEQUENCE_POWER_UP (1 << 28)
+#define PP_SEQUENCE_POWER_DOWN (2 << 28)
+#define PP_SEQUENCE_MASK (3 << 28)
+#define PP_SEQUENCE_SHIFT 28
+#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
+#define PP_SEQUENCE_STATE_MASK 0x0000000f
+#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0)
+#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0)
+#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0)
+#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0)
+#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0)
+#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0)
+#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
+#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
+#define PP_SEQUENCE_STATE_RESET (0xf << 0)
+#define PP_CONTROL 0x61204
+#define POWER_TARGET_ON (1 << 0)
+#define PP_ON_DELAYS 0x61208
+#define PP_OFF_DELAYS 0x6120c
+#define PP_DIVISOR 0x61210
+
+/* Panel fitting */
+#define PFIT_CONTROL 0x61230
+#define PFIT_ENABLE (1 << 31)
+#define PFIT_PIPE_MASK (3 << 29)
+#define PFIT_PIPE_SHIFT 29
+#define VERT_INTERP_DISABLE (0 << 10)
+#define VERT_INTERP_BILINEAR (1 << 10)
+#define VERT_INTERP_MASK (3 << 10)
+#define VERT_AUTO_SCALE (1 << 9)
+#define HORIZ_INTERP_DISABLE (0 << 6)
+#define HORIZ_INTERP_BILINEAR (1 << 6)
+#define HORIZ_INTERP_MASK (3 << 6)
+#define HORIZ_AUTO_SCALE (1 << 5)
+#define PANEL_8TO6_DITHER_ENABLE (1 << 3)
+#define PFIT_FILTER_FUZZY (0 << 24)
+#define PFIT_SCALING_AUTO (0 << 26)
+#define PFIT_SCALING_PROGRAMMED (1 << 26)
+#define PFIT_SCALING_PILLAR (2 << 26)
+#define PFIT_SCALING_LETTER (3 << 26)
+#define PFIT_PGM_RATIOS 0x61234
+#define PFIT_VERT_SCALE_MASK 0xfff00000
+#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+/* Pre-965 */
+#define PFIT_VERT_SCALE_SHIFT 20
+#define PFIT_VERT_SCALE_MASK 0xfff00000
+#define PFIT_HORIZ_SCALE_SHIFT 4
+#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+/* 965+ */
+#define PFIT_VERT_SCALE_SHIFT_965 16
+#define PFIT_VERT_SCALE_MASK_965 0x1fff0000
+#define PFIT_HORIZ_SCALE_SHIFT_965 0
+#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
+
+#define PFIT_AUTO_RATIOS 0x61238
+
+/* Backlight control */
+#define BLC_PWM_CTL 0x61254
+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
+#define BLC_PWM_CTL2 0x61250 /* 965+ only */
+#define BLM_COMBINATION_MODE (1 << 30)
+/*
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
+#define BLM_LEGACY_MODE (1 << 16)
+/*
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
+#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
+
+#define BLC_HIST_CTL 0x61260
+
+/* TV port control */
+#define TV_CTL 0x68000
+/** Enables the TV encoder */
+# define TV_ENC_ENABLE (1 << 31)
+/** Sources the TV encoder input from pipe B instead of A. */
+# define TV_ENC_PIPEB_SELECT (1 << 30)
+/** Outputs composite video (DAC A only) */
+# define TV_ENC_OUTPUT_COMPOSITE (0 << 28)
+/** Outputs SVideo video (DAC B/C) */
+# define TV_ENC_OUTPUT_SVIDEO (1 << 28)
+/** Outputs Component video (DAC A/B/C) */
+# define TV_ENC_OUTPUT_COMPONENT (2 << 28)
+/** Outputs Composite and SVideo (DAC A/B/C) */
+# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28)
+# define TV_TRILEVEL_SYNC (1 << 21)
+/** Enables slow sync generation (945GM only) */
+# define TV_SLOW_SYNC (1 << 20)
+/** Selects 4x oversampling for 480i and 576p */
+# define TV_OVERSAMPLE_4X (0 << 18)
+/** Selects 2x oversampling for 720p and 1080i */
+# define TV_OVERSAMPLE_2X (1 << 18)
+/** Selects no oversampling for 1080p */
+# define TV_OVERSAMPLE_NONE (2 << 18)
+/** Selects 8x oversampling */
+# define TV_OVERSAMPLE_8X (3 << 18)
+/** Selects progressive mode rather than interlaced */
+# define TV_PROGRESSIVE (1 << 17)
+/** Sets the colorburst to PAL mode. Required for non-M PAL modes. */
+# define TV_PAL_BURST (1 << 16)
+/** Field for setting delay of Y compared to C */
+# define TV_YC_SKEW_MASK (7 << 12)
+/** Enables a fix for 480p/576p standard definition modes on the 915GM only */
+# define TV_ENC_SDP_FIX (1 << 11)
+/**
+ * Enables a fix for the 915GM only.
+ *
+ * Not sure what it does.
+ */
+# define TV_ENC_C0_FIX (1 << 10)
+/** Bits that must be preserved by software */
+# define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf)
+# define TV_FUSE_STATE_MASK (3 << 4)
+/** Read-only state that reports all features enabled */
+# define TV_FUSE_STATE_ENABLED (0 << 4)
+/** Read-only state that reports that Macrovision is disabled in hardware*/
+# define TV_FUSE_STATE_NO_MACROVISION (1 << 4)
+/** Read-only state that reports that TV-out is disabled in hardware. */
+# define TV_FUSE_STATE_DISABLED (2 << 4)
+/** Normal operation */
+# define TV_TEST_MODE_NORMAL (0 << 0)
+/** Encoder test pattern 1 - combo pattern */
+# define TV_TEST_MODE_PATTERN_1 (1 << 0)
+/** Encoder test pattern 2 - full screen vertical 75% color bars */
+# define TV_TEST_MODE_PATTERN_2 (2 << 0)
+/** Encoder test pattern 3 - full screen horizontal 75% color bars */
+# define TV_TEST_MODE_PATTERN_3 (3 << 0)
+/** Encoder test pattern 4 - random noise */
+# define TV_TEST_MODE_PATTERN_4 (4 << 0)
+/** Encoder test pattern 5 - linear color ramps */
+# define TV_TEST_MODE_PATTERN_5 (5 << 0)
+/**
+ * This test mode forces the DACs to 50% of full output.
+ *
+ * This is used for load detection in combination with TVDAC_SENSE_MASK
+ */
+# define TV_TEST_MODE_MONITOR_DETECT (7 << 0)
+# define TV_TEST_MODE_MASK (7 << 0)
+
+#define TV_DAC 0x68004
+# define TV_DAC_SAVE 0x00ffff00
+/**
+ * Reports that DAC state change logic has reported change (RO).
+ *
+ * This gets cleared when TV_DAC_STATE_EN is cleared
+*/
+# define TVDAC_STATE_CHG (1 << 31)
+# define TVDAC_SENSE_MASK (7 << 28)
+/** Reports that DAC A voltage is above the detect threshold */
+# define TVDAC_A_SENSE (1 << 30)
+/** Reports that DAC B voltage is above the detect threshold */
+# define TVDAC_B_SENSE (1 << 29)
+/** Reports that DAC C voltage is above the detect threshold */
+# define TVDAC_C_SENSE (1 << 28)
+/**
+ * Enables DAC state detection logic, for load-based TV detection.
+ *
+ * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set
+ * to off, for load detection to work.
+ */
+# define TVDAC_STATE_CHG_EN (1 << 27)
+/** Sets the DAC A sense value to high */
+# define TVDAC_A_SENSE_CTL (1 << 26)
+/** Sets the DAC B sense value to high */
+# define TVDAC_B_SENSE_CTL (1 << 25)
+/** Sets the DAC C sense value to high */
+# define TVDAC_C_SENSE_CTL (1 << 24)
+/** Overrides the ENC_ENABLE and DAC voltage levels */
+# define DAC_CTL_OVERRIDE (1 << 7)
+/** Sets the slew rate. Must be preserved in software */
+# define ENC_TVDAC_SLEW_FAST (1 << 6)
+# define DAC_A_1_3_V (0 << 4)
+# define DAC_A_1_1_V (1 << 4)
+# define DAC_A_0_7_V (2 << 4)
+# define DAC_A_MASK (3 << 4)
+# define DAC_B_1_3_V (0 << 2)
+# define DAC_B_1_1_V (1 << 2)
+# define DAC_B_0_7_V (2 << 2)
+# define DAC_B_MASK (3 << 2)
+# define DAC_C_1_3_V (0 << 0)
+# define DAC_C_1_1_V (1 << 0)
+# define DAC_C_0_7_V (2 << 0)
+# define DAC_C_MASK (3 << 0)
+
+/**
+ * CSC coefficients are stored in a floating point format with 9 bits of
+ * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n,
+ * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
+ * -1 (0x3) being the only legal negative value.
+ */
+#define TV_CSC_Y 0x68010
+# define TV_RY_MASK 0x07ff0000
+# define TV_RY_SHIFT 16
+# define TV_GY_MASK 0x00000fff
+# define TV_GY_SHIFT 0
+
+#define TV_CSC_Y2 0x68014
+# define TV_BY_MASK 0x07ff0000
+# define TV_BY_SHIFT 16
+/**
+ * Y attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AY_MASK 0x000003ff
+# define TV_AY_SHIFT 0
+
+#define TV_CSC_U 0x68018
+# define TV_RU_MASK 0x07ff0000
+# define TV_RU_SHIFT 16
+# define TV_GU_MASK 0x000007ff
+# define TV_GU_SHIFT 0
+
+#define TV_CSC_U2 0x6801c
+# define TV_BU_MASK 0x07ff0000
+# define TV_BU_SHIFT 16
+/**
+ * U attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AU_MASK 0x000003ff
+# define TV_AU_SHIFT 0
+
+#define TV_CSC_V 0x68020
+# define TV_RV_MASK 0x0fff0000
+# define TV_RV_SHIFT 16
+# define TV_GV_MASK 0x000007ff
+# define TV_GV_SHIFT 0
+
+#define TV_CSC_V2 0x68024
+# define TV_BV_MASK 0x07ff0000
+# define TV_BV_SHIFT 16
+/**
+ * V attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AV_MASK 0x000007ff
+# define TV_AV_SHIFT 0
+
+#define TV_CLR_KNOBS 0x68028
+/** 2s-complement brightness adjustment */
+# define TV_BRIGHTNESS_MASK 0xff000000
+# define TV_BRIGHTNESS_SHIFT 24
+/** Contrast adjustment, as a 2.6 unsigned floating point number */
+# define TV_CONTRAST_MASK 0x00ff0000
+# define TV_CONTRAST_SHIFT 16
+/** Saturation adjustment, as a 2.6 unsigned floating point number */
+# define TV_SATURATION_MASK 0x0000ff00
+# define TV_SATURATION_SHIFT 8
+/** Hue adjustment, as an integer phase angle in degrees */
+# define TV_HUE_MASK 0x000000ff
+# define TV_HUE_SHIFT 0
+
+#define TV_CLR_LEVEL 0x6802c
+/** Controls the DAC level for black */
+# define TV_BLACK_LEVEL_MASK 0x01ff0000
+# define TV_BLACK_LEVEL_SHIFT 16
+/** Controls the DAC level for blanking */
+# define TV_BLANK_LEVEL_MASK 0x000001ff
+# define TV_BLANK_LEVEL_SHIFT 0
+
+#define TV_H_CTL_1 0x68030
+/** Number of pixels in the hsync. */
+# define TV_HSYNC_END_MASK 0x1fff0000
+# define TV_HSYNC_END_SHIFT 16
+/** Total number of pixels minus one in the line (display and blanking). */
+# define TV_HTOTAL_MASK 0x00001fff
+# define TV_HTOTAL_SHIFT 0
+
+#define TV_H_CTL_2 0x68034
+/** Enables the colorburst (needed for non-component color) */
+# define TV_BURST_ENA (1 << 31)
+/** Offset of the colorburst from the start of hsync, in pixels minus one. */
+# define TV_HBURST_START_SHIFT 16
+# define TV_HBURST_START_MASK 0x1fff0000
+/** Length of the colorburst */
+# define TV_HBURST_LEN_SHIFT 0
+# define TV_HBURST_LEN_MASK 0x0001fff
+
+#define TV_H_CTL_3 0x68038
+/** End of hblank, measured in pixels minus one from start of hsync */
+# define TV_HBLANK_END_SHIFT 16
+# define TV_HBLANK_END_MASK 0x1fff0000
+/** Start of hblank, measured in pixels minus one from start of hsync */
+# define TV_HBLANK_START_SHIFT 0
+# define TV_HBLANK_START_MASK 0x0001fff
+
+#define TV_V_CTL_1 0x6803c
+/** XXX */
+# define TV_NBR_END_SHIFT 16
+# define TV_NBR_END_MASK 0x07ff0000
+/** XXX */
+# define TV_VI_END_F1_SHIFT 8
+# define TV_VI_END_F1_MASK 0x00003f00
+/** XXX */
+# define TV_VI_END_F2_SHIFT 0
+# define TV_VI_END_F2_MASK 0x0000003f
+
+#define TV_V_CTL_2 0x68040
+/** Length of vsync, in half lines */
+# define TV_VSYNC_LEN_MASK 0x07ff0000
+# define TV_VSYNC_LEN_SHIFT 16
+/** Offset of the start of vsync in field 1, measured in one less than the
+ * number of half lines.
+ */
+# define TV_VSYNC_START_F1_MASK 0x00007f00
+# define TV_VSYNC_START_F1_SHIFT 8
+/**
+ * Offset of the start of vsync in field 2, measured in one less than the
+ * number of half lines.
+ */
+# define TV_VSYNC_START_F2_MASK 0x0000007f
+# define TV_VSYNC_START_F2_SHIFT 0
+
+#define TV_V_CTL_3 0x68044
+/** Enables generation of the equalization signal */
+# define TV_EQUAL_ENA (1 << 31)
+/** Length of vsync, in half lines */
+# define TV_VEQ_LEN_MASK 0x007f0000
+# define TV_VEQ_LEN_SHIFT 16
+/** Offset of the start of equalization in field 1, measured in one less than
+ * the number of half lines.
+ */
+# define TV_VEQ_START_F1_MASK 0x0007f00
+# define TV_VEQ_START_F1_SHIFT 8
+/**
+ * Offset of the start of equalization in field 2, measured in one less than
+ * the number of half lines.
+ */
+# define TV_VEQ_START_F2_MASK 0x000007f
+# define TV_VEQ_START_F2_SHIFT 0
+
+#define TV_V_CTL_4 0x68048
+/**
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F1_MASK 0x003f0000
+# define TV_VBURST_START_F1_SHIFT 16
+/**
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F1_MASK 0x000000ff
+# define TV_VBURST_END_F1_SHIFT 0
+
+#define TV_V_CTL_5 0x6804c
+/**
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F2_MASK 0x003f0000
+# define TV_VBURST_START_F2_SHIFT 16
+/**
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F2_MASK 0x000000ff
+# define TV_VBURST_END_F2_SHIFT 0
+
+#define TV_V_CTL_6 0x68050
+/**
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F3_MASK 0x003f0000
+# define TV_VBURST_START_F3_SHIFT 16
+/**
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F3_MASK 0x000000ff
+# define TV_VBURST_END_F3_SHIFT 0
+
+#define TV_V_CTL_7 0x68054
+/**
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F4_MASK 0x003f0000
+# define TV_VBURST_START_F4_SHIFT 16
+/**
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F4_MASK 0x000000ff
+# define TV_VBURST_END_F4_SHIFT 0
+
+#define TV_SC_CTL_1 0x68060
+/** Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA1_EN (1 << 31)
+/** Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA2_EN (1 << 30)
+/** Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA3_EN (1 << 29)
+/** Sets the subcarrier DDA to reset frequency every other field */
+# define TV_SC_RESET_EVERY_2 (0 << 24)
+/** Sets the subcarrier DDA to reset frequency every fourth field */
+# define TV_SC_RESET_EVERY_4 (1 << 24)
+/** Sets the subcarrier DDA to reset frequency every eighth field */
+# define TV_SC_RESET_EVERY_8 (2 << 24)
+/** Sets the subcarrier DDA to never reset the frequency */
+# define TV_SC_RESET_NEVER (3 << 24)
+/** Sets the peak amplitude of the colorburst.*/
+# define TV_BURST_LEVEL_MASK 0x00ff0000
+# define TV_BURST_LEVEL_SHIFT 16
+/** Sets the increment of the first subcarrier phase generation DDA */
+# define TV_SCDDA1_INC_MASK 0x00000fff
+# define TV_SCDDA1_INC_SHIFT 0
+
+#define TV_SC_CTL_2 0x68064
+/** Sets the rollover for the second subcarrier phase generation DDA */
+# define TV_SCDDA2_SIZE_MASK 0x7fff0000
+# define TV_SCDDA2_SIZE_SHIFT 16
+/** Sets the increent of the second subcarrier phase generation DDA */
+# define TV_SCDDA2_INC_MASK 0x00007fff
+# define TV_SCDDA2_INC_SHIFT 0
+
+#define TV_SC_CTL_3 0x68068
+/** Sets the rollover for the third subcarrier phase generation DDA */
+# define TV_SCDDA3_SIZE_MASK 0x7fff0000
+# define TV_SCDDA3_SIZE_SHIFT 16
+/** Sets the increent of the third subcarrier phase generation DDA */
+# define TV_SCDDA3_INC_MASK 0x00007fff
+# define TV_SCDDA3_INC_SHIFT 0
+
+#define TV_WIN_POS 0x68070
+/** X coordinate of the display from the start of horizontal active */
+# define TV_XPOS_MASK 0x1fff0000
+# define TV_XPOS_SHIFT 16
+/** Y coordinate of the display from the start of vertical active (NBR) */
+# define TV_YPOS_MASK 0x00000fff
+# define TV_YPOS_SHIFT 0
+
+#define TV_WIN_SIZE 0x68074
+/** Horizontal size of the display window, measured in pixels*/
+# define TV_XSIZE_MASK 0x1fff0000
+# define TV_XSIZE_SHIFT 16
+/**
+ * Vertical size of the display window, measured in pixels.
+ *
+ * Must be even for interlaced modes.
+ */
+# define TV_YSIZE_MASK 0x00000fff
+# define TV_YSIZE_SHIFT 0
+
+#define TV_FILTER_CTL_1 0x68080
+/**
+ * Enables automatic scaling calculation.
+ *
+ * If set, the rest of the registers are ignored, and the calculated values can
+ * be read back from the register.
+ */
+# define TV_AUTO_SCALE (1 << 31)
+/**
+ * Disables the vertical filter.
+ *
+ * This is required on modes more than 1024 pixels wide */
+# define TV_V_FILTER_BYPASS (1 << 29)
+/** Enables adaptive vertical filtering */
+# define TV_VADAPT (1 << 28)
+# define TV_VADAPT_MODE_MASK (3 << 26)
+/** Selects the least adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_LEAST (0 << 26)
+/** Selects the moderately adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_MODERATE (1 << 26)
+/** Selects the most adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_MOST (3 << 26)
+/**
+ * Sets the horizontal scaling factor.
+ *
+ * This should be the fractional part of the horizontal scaling factor divided
+ * by the oversampling rate. TV_HSCALE should be less than 1, and set to:
+ *
+ * (src width - 1) / ((oversample * dest width) - 1)
+ */
+# define TV_HSCALE_FRAC_MASK 0x00003fff
+# define TV_HSCALE_FRAC_SHIFT 0
+
+#define TV_FILTER_CTL_2 0x68084
+/**
+ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1)
+ */
+# define TV_VSCALE_INT_MASK 0x00038000
+# define TV_VSCALE_INT_SHIFT 15
+/**
+ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * \sa TV_VSCALE_INT_MASK
+ */
+# define TV_VSCALE_FRAC_MASK 0x00007fff
+# define TV_VSCALE_FRAC_SHIFT 0
+
+#define TV_FILTER_CTL_3 0x68088
+/**
+ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1))
+ *
+ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
+ */
+# define TV_VSCALE_IP_INT_MASK 0x00038000
+# define TV_VSCALE_IP_INT_SHIFT 15
+/**
+ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
+ *
+ * \sa TV_VSCALE_IP_INT_MASK
+ */
+# define TV_VSCALE_IP_FRAC_MASK 0x00007fff
+# define TV_VSCALE_IP_FRAC_SHIFT 0
+
+#define TV_CC_CONTROL 0x68090
+# define TV_CC_ENABLE (1 << 31)
+/**
+ * Specifies which field to send the CC data in.
+ *
+ * CC data is usually sent in field 0.
+ */
+# define TV_CC_FID_MASK (1 << 27)
+# define TV_CC_FID_SHIFT 27
+/** Sets the horizontal position of the CC data. Usually 135. */
+# define TV_CC_HOFF_MASK 0x03ff0000
+# define TV_CC_HOFF_SHIFT 16
+/** Sets the vertical position of the CC data. Usually 21 */
+# define TV_CC_LINE_MASK 0x0000003f
+# define TV_CC_LINE_SHIFT 0
+
+#define TV_CC_DATA 0x68094
+# define TV_CC_RDY (1 << 31)
+/** Second word of CC data to be transmitted. */
+# define TV_CC_DATA_2_MASK 0x007f0000
+# define TV_CC_DATA_2_SHIFT 16
+/** First word of CC data to be transmitted. */
+# define TV_CC_DATA_1_MASK 0x0000007f
+# define TV_CC_DATA_1_SHIFT 0
+
+#define TV_H_LUMA_0 0x68100
+#define TV_H_LUMA_59 0x681ec
+#define TV_H_CHROMA_0 0x68200
+#define TV_H_CHROMA_59 0x682ec
+#define TV_V_LUMA_0 0x68300
+#define TV_V_LUMA_42 0x683a8
+#define TV_V_CHROMA_0 0x68400
+#define TV_V_CHROMA_42 0x684a8
+
+/* Display Port */
+#define DP_A 0x64000 /* eDP */
+#define DP_B 0x64100
+#define DP_C 0x64200
+#define DP_D 0x64300
+
+#define DP_PORT_EN (1 << 31)
+#define DP_PIPEB_SELECT (1 << 30)
+#define DP_PIPE_MASK (1 << 30)
+
+/* Link training mode - select a suitable mode for each stage */
+#define DP_LINK_TRAIN_PAT_1 (0 << 28)
+#define DP_LINK_TRAIN_PAT_2 (1 << 28)
+#define DP_LINK_TRAIN_PAT_IDLE (2 << 28)
+#define DP_LINK_TRAIN_OFF (3 << 28)
+#define DP_LINK_TRAIN_MASK (3 << 28)
+#define DP_LINK_TRAIN_SHIFT 28
+
+/* CPT Link training mode */
+#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8)
+#define DP_LINK_TRAIN_PAT_2_CPT (1 << 8)
+#define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8)
+#define DP_LINK_TRAIN_OFF_CPT (3 << 8)
+#define DP_LINK_TRAIN_MASK_CPT (7 << 8)
+#define DP_LINK_TRAIN_SHIFT_CPT 8
+
+/* Signal voltages. These are mostly controlled by the other end */
+#define DP_VOLTAGE_0_4 (0 << 25)
+#define DP_VOLTAGE_0_6 (1 << 25)
+#define DP_VOLTAGE_0_8 (2 << 25)
+#define DP_VOLTAGE_1_2 (3 << 25)
+#define DP_VOLTAGE_MASK (7 << 25)
+#define DP_VOLTAGE_SHIFT 25
+
+/* Signal pre-emphasis levels, like voltages, the other end tells us what
+ * they want
+ */
+#define DP_PRE_EMPHASIS_0 (0 << 22)
+#define DP_PRE_EMPHASIS_3_5 (1 << 22)
+#define DP_PRE_EMPHASIS_6 (2 << 22)
+#define DP_PRE_EMPHASIS_9_5 (3 << 22)
+#define DP_PRE_EMPHASIS_MASK (7 << 22)
+#define DP_PRE_EMPHASIS_SHIFT 22
+
+/* How many wires to use. I guess 3 was too hard */
+#define DP_PORT_WIDTH_1 (0 << 19)
+#define DP_PORT_WIDTH_2 (1 << 19)
+#define DP_PORT_WIDTH_4 (3 << 19)
+#define DP_PORT_WIDTH_MASK (7 << 19)
+
+/* Mystic DPCD version 1.1 special mode */
+#define DP_ENHANCED_FRAMING (1 << 18)
+
+/* eDP */
+#define DP_PLL_FREQ_270MHZ (0 << 16)
+#define DP_PLL_FREQ_160MHZ (1 << 16)
+#define DP_PLL_FREQ_MASK (3 << 16)
+
+/** locked once port is enabled */
+#define DP_PORT_REVERSAL (1 << 15)
+
+/* eDP */
+#define DP_PLL_ENABLE (1 << 14)
+
+/** sends the clock on lane 15 of the PEG for debug */
+#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
+
+#define DP_SCRAMBLING_DISABLE (1 << 12)
+#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
+
+/** limit RGB values to avoid confusing TVs */
+#define DP_COLOR_RANGE_16_235 (1 << 8)
+
+/** Turn on the audio link */
+#define DP_AUDIO_OUTPUT_ENABLE (1 << 6)
+
+/** vs and hs sync polarity */
+#define DP_SYNC_VS_HIGH (1 << 4)
+#define DP_SYNC_HS_HIGH (1 << 3)
+
+/** A fantasy */
+#define DP_DETECTED (1 << 2)
+
+/** The aux channel provides a way to talk to the
+ * signal sink for DDC etc. Max packet size supported
+ * is 20 bytes in each direction, hence the 5 fixed
+ * data registers
+ */
+#define DPA_AUX_CH_CTL 0x64010
+#define DPA_AUX_CH_DATA1 0x64014
+#define DPA_AUX_CH_DATA2 0x64018
+#define DPA_AUX_CH_DATA3 0x6401c
+#define DPA_AUX_CH_DATA4 0x64020
+#define DPA_AUX_CH_DATA5 0x64024
+
+#define DPB_AUX_CH_CTL 0x64110
+#define DPB_AUX_CH_DATA1 0x64114
+#define DPB_AUX_CH_DATA2 0x64118
+#define DPB_AUX_CH_DATA3 0x6411c
+#define DPB_AUX_CH_DATA4 0x64120
+#define DPB_AUX_CH_DATA5 0x64124
+
+#define DPC_AUX_CH_CTL 0x64210
+#define DPC_AUX_CH_DATA1 0x64214
+#define DPC_AUX_CH_DATA2 0x64218
+#define DPC_AUX_CH_DATA3 0x6421c
+#define DPC_AUX_CH_DATA4 0x64220
+#define DPC_AUX_CH_DATA5 0x64224
+
+#define DPD_AUX_CH_CTL 0x64310
+#define DPD_AUX_CH_DATA1 0x64314
+#define DPD_AUX_CH_DATA2 0x64318
+#define DPD_AUX_CH_DATA3 0x6431c
+#define DPD_AUX_CH_DATA4 0x64320
+#define DPD_AUX_CH_DATA5 0x64324
+
+#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31)
+#define DP_AUX_CH_CTL_DONE (1 << 30)
+#define DP_AUX_CH_CTL_INTERRUPT (1 << 29)
+#define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28)
+#define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26)
+#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25)
+#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
+#define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20
+#define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16)
+#define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16
+#define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15)
+#define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14)
+#define DP_AUX_CH_CTL_SYNC_TEST (1 << 13)
+#define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12)
+#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
+#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
+#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
+
+/*
+ * Computing GMCH M and N values for the Display Port link
+ *
+ * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
+ *
+ * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
+ *
+ * The GMCH value is used internally
+ *
+ * bytes_per_pixel is the number of bytes coming out of the plane,
+ * which is after the LUTs, so we want the bytes for our color format.
+ * For our current usage, this is always 3, one byte for R, G and B.
+ */
+#define _PIPEA_GMCH_DATA_M 0x70050
+#define _PIPEB_GMCH_DATA_M 0x71050
+
+/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
+#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25)
+#define PIPE_GMCH_DATA_M_TU_SIZE_SHIFT 25
+
+#define PIPE_GMCH_DATA_M_MASK (0xffffff)
+
+#define _PIPEA_GMCH_DATA_N 0x70054
+#define _PIPEB_GMCH_DATA_N 0x71054
+#define PIPE_GMCH_DATA_N_MASK (0xffffff)
+
+/*
+ * Computing Link M and N values for the Display Port link
+ *
+ * Link M / N = pixel_clock / ls_clk
+ *
+ * (the DP spec calls pixel_clock the 'strm_clk')
+ *
+ * The Link value is transmitted in the Main Stream
+ * Attributes and VB-ID.
+ */
+
+#define _PIPEA_DP_LINK_M 0x70060
+#define _PIPEB_DP_LINK_M 0x71060
+#define PIPEA_DP_LINK_M_MASK (0xffffff)
+
+#define _PIPEA_DP_LINK_N 0x70064
+#define _PIPEB_DP_LINK_N 0x71064
+#define PIPEA_DP_LINK_N_MASK (0xffffff)
+
+#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
+#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
+#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M)
+#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N)
+
+/* Display & cursor control */
+
+/* Pipe A */
+#define _PIPEADSL 0x70000
+#define DSL_LINEMASK 0x00000fff
+#define _PIPEACONF 0x70008
+#define PIPECONF_ENABLE (1<<31)
+#define PIPECONF_DISABLE 0
+#define PIPECONF_DOUBLE_WIDE (1<<30)
+#define I965_PIPECONF_ACTIVE (1<<30)
+#define PIPECONF_FRAME_START_DELAY_MASK (3<<27)
+#define PIPECONF_SINGLE_WIDE 0
+#define PIPECONF_PIPE_UNLOCKED 0
+#define PIPECONF_PIPE_LOCKED (1<<25)
+#define PIPECONF_PALETTE 0
+#define PIPECONF_GAMMA (1<<24)
+#define PIPECONF_FORCE_BORDER (1<<25)
+#define PIPECONF_INTERLACE_MASK (7 << 21)
+/* Note that pre-gen3 does not support interlaced display directly. Panel
+ * fitting must be disabled on pre-ilk for interlaced. */
+#define PIPECONF_PROGRESSIVE (0 << 21)
+#define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL (4 << 21) /* gen4 only */
+#define PIPECONF_INTERLACE_W_SYNC_SHIFT (5 << 21) /* gen4 only */
+#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
+#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) /* gen3 only */
+/* Ironlake and later have a complete new set of values for interlaced. PFIT
+ * means panel fitter required, PF means progressive fetch, DBL means power
+ * saving pixel doubling. */
+#define PIPECONF_PFIT_PF_INTERLACED_ILK (1 << 21)
+#define PIPECONF_INTERLACED_ILK (3 << 21)
+#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
+#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
+#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
+#define PIPECONF_BPP_MASK (0x000000e0)
+#define PIPECONF_BPP_8 (0<<5)
+#define PIPECONF_BPP_10 (1<<5)
+#define PIPECONF_BPP_6 (2<<5)
+#define PIPECONF_BPP_12 (3<<5)
+#define PIPECONF_DITHER_EN (1<<4)
+#define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
+#define PIPECONF_DITHER_TYPE_SP (0<<2)
+#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
+#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
+#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
+#define _PIPEASTAT 0x70024
+#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
+#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
+#define PIPE_CRC_DONE_ENABLE (1UL<<28)
+#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
+#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
+#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
+#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
+#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
+#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
+#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
+#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
+#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
+#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
+#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
+#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
+#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
+#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
+#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
+#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
+#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
+#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
+#define PIPE_DPST_EVENT_STATUS (1UL<<7)
+#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
+#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
+#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
+#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */
+#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
+#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
+#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
+#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
+#define PIPE_8BPC (0 << 5)
+#define PIPE_10BPC (1 << 5)
+#define PIPE_6BPC (2 << 5)
+#define PIPE_12BPC (3 << 5)
+
+#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
+#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF)
+#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
+#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
+#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
+#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
+
+#define DSPARB 0x70030
+#define DSPARB_CSTART_MASK (0x7f << 7)
+#define DSPARB_CSTART_SHIFT 7
+#define DSPARB_BSTART_MASK (0x7f)
+#define DSPARB_BSTART_SHIFT 0
+#define DSPARB_BEND_SHIFT 9 /* on 855 */
+#define DSPARB_AEND_SHIFT 0
+
+#define DSPFW1 0x70034
+#define DSPFW_SR_SHIFT 23
+#define DSPFW_SR_MASK (0x1ff<<23)
+#define DSPFW_CURSORB_SHIFT 16
+#define DSPFW_CURSORB_MASK (0x3f<<16)
+#define DSPFW_PLANEB_SHIFT 8
+#define DSPFW_PLANEB_MASK (0x7f<<8)
+#define DSPFW_PLANEA_MASK (0x7f)
+#define DSPFW2 0x70038
+#define DSPFW_CURSORA_MASK 0x00003f00
+#define DSPFW_CURSORA_SHIFT 8
+#define DSPFW_PLANEC_MASK (0x7f)
+#define DSPFW3 0x7003c
+#define DSPFW_HPLL_SR_EN (1<<31)
+#define DSPFW_CURSOR_SR_SHIFT 24
+#define PINEVIEW_SELF_REFRESH_EN (1<<30)
+#define DSPFW_CURSOR_SR_MASK (0x3f<<24)
+#define DSPFW_HPLL_CURSOR_SHIFT 16
+#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
+#define DSPFW_HPLL_SR_MASK (0x1ff)
+
+/* FIFO watermark sizes etc */
+#define G4X_FIFO_LINE_SIZE 64
+#define I915_FIFO_LINE_SIZE 64
+#define I830_FIFO_LINE_SIZE 32
+
+#define G4X_FIFO_SIZE 127
+#define I965_FIFO_SIZE 512
+#define I945_FIFO_SIZE 127
+#define I915_FIFO_SIZE 95
+#define I855GM_FIFO_SIZE 127 /* In cachelines */
+#define I830_FIFO_SIZE 95
+
+#define G4X_MAX_WM 0x3f
+#define I915_MAX_WM 0x3f
+
+#define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */
+#define PINEVIEW_FIFO_LINE_SIZE 64
+#define PINEVIEW_MAX_WM 0x1ff
+#define PINEVIEW_DFT_WM 0x3f
+#define PINEVIEW_DFT_HPLLOFF_WM 0
+#define PINEVIEW_GUARD_WM 10
+#define PINEVIEW_CURSOR_FIFO 64
+#define PINEVIEW_CURSOR_MAX_WM 0x3f
+#define PINEVIEW_CURSOR_DFT_WM 0
+#define PINEVIEW_CURSOR_GUARD_WM 5
+
+#define I965_CURSOR_FIFO 64
+#define I965_CURSOR_MAX_WM 32
+#define I965_CURSOR_DFT_WM 8
+
+/* define the Watermark register on Ironlake */
+#define WM0_PIPEA_ILK 0x45100
+#define WM0_PIPE_PLANE_MASK (0x7f<<16)
+#define WM0_PIPE_PLANE_SHIFT 16
+#define WM0_PIPE_SPRITE_MASK (0x3f<<8)
+#define WM0_PIPE_SPRITE_SHIFT 8
+#define WM0_PIPE_CURSOR_MASK (0x1f)
+
+#define WM0_PIPEB_ILK 0x45104
+#define WM0_PIPEC_IVB 0x45200
+#define WM1_LP_ILK 0x45108
+#define WM1_LP_SR_EN (1<<31)
+#define WM1_LP_LATENCY_SHIFT 24
+#define WM1_LP_LATENCY_MASK (0x7f<<24)
+#define WM1_LP_FBC_MASK (0xf<<20)
+#define WM1_LP_FBC_SHIFT 20
+#define WM1_LP_SR_MASK (0x1ff<<8)
+#define WM1_LP_SR_SHIFT 8
+#define WM1_LP_CURSOR_MASK (0x3f)
+#define WM2_LP_ILK 0x4510c
+#define WM2_LP_EN (1<<31)
+#define WM3_LP_ILK 0x45110
+#define WM3_LP_EN (1<<31)
+#define WM1S_LP_ILK 0x45120
+#define WM2S_LP_IVB 0x45124
+#define WM3S_LP_IVB 0x45128
+#define WM1S_LP_EN (1<<31)
+
+/* Memory latency timer register */
+#define MLTR_ILK 0x11222
+#define MLTR_WM1_SHIFT 0
+#define MLTR_WM2_SHIFT 8
+/* the unit of memory self-refresh latency time is 0.5us */
+#define ILK_SRLT_MASK 0x3f
+#define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK)
+#define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT)
+#define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT)
+
+/* define the fifo size on Ironlake */
+#define ILK_DISPLAY_FIFO 128
+#define ILK_DISPLAY_MAXWM 64
+#define ILK_DISPLAY_DFTWM 8
+#define ILK_CURSOR_FIFO 32
+#define ILK_CURSOR_MAXWM 16
+#define ILK_CURSOR_DFTWM 8
+
+#define ILK_DISPLAY_SR_FIFO 512
+#define ILK_DISPLAY_MAX_SRWM 0x1ff
+#define ILK_DISPLAY_DFT_SRWM 0x3f
+#define ILK_CURSOR_SR_FIFO 64
+#define ILK_CURSOR_MAX_SRWM 0x3f
+#define ILK_CURSOR_DFT_SRWM 8
+
+#define ILK_FIFO_LINE_SIZE 64
+
+/* define the WM info on Sandybridge */
+#define SNB_DISPLAY_FIFO 128
+#define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */
+#define SNB_DISPLAY_DFTWM 8
+#define SNB_CURSOR_FIFO 32
+#define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */
+#define SNB_CURSOR_DFTWM 8
+
+#define SNB_DISPLAY_SR_FIFO 512
+#define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */
+#define SNB_DISPLAY_DFT_SRWM 0x3f
+#define SNB_CURSOR_SR_FIFO 64
+#define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */
+#define SNB_CURSOR_DFT_SRWM 8
+
+#define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */
+
+#define SNB_FIFO_LINE_SIZE 64
+
+
+/* the address where we get all kinds of latency value */
+#define SSKPD 0x5d10
+#define SSKPD_WM_MASK 0x3f
+#define SSKPD_WM0_SHIFT 0
+#define SSKPD_WM1_SHIFT 8
+#define SSKPD_WM2_SHIFT 16
+#define SSKPD_WM3_SHIFT 24
+
+#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK)
+#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT)
+#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT)
+#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT)
+#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT)
+
+/*
+ * The two pipe frame counter registers are not synchronized, so
+ * reading a stable value is somewhat tricky. The following code
+ * should work:
+ *
+ * do {
+ * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ * PIPE_FRAME_HIGH_SHIFT;
+ * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
+ * PIPE_FRAME_LOW_SHIFT);
+ * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ * PIPE_FRAME_HIGH_SHIFT);
+ * } while (high1 != high2);
+ * frame = (high1 << 8) | low1;
+ */
+#define _PIPEAFRAMEHIGH 0x70040
+#define PIPE_FRAME_HIGH_MASK 0x0000ffff
+#define PIPE_FRAME_HIGH_SHIFT 0
+#define _PIPEAFRAMEPIXEL 0x70044
+#define PIPE_FRAME_LOW_MASK 0xff000000
+#define PIPE_FRAME_LOW_SHIFT 24
+#define PIPE_PIXEL_MASK 0x00ffffff
+#define PIPE_PIXEL_SHIFT 0
+/* GM45+ just has to be different */
+#define _PIPEA_FRMCOUNT_GM45 0x70040
+#define _PIPEA_FLIPCOUNT_GM45 0x70044
+#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
+
+/* Cursor A & B regs */
+#define _CURACNTR 0x70080
+/* Old style CUR*CNTR flags (desktop 8xx) */
+#define CURSOR_ENABLE 0x80000000
+#define CURSOR_GAMMA_ENABLE 0x40000000
+#define CURSOR_STRIDE_MASK 0x30000000
+#define CURSOR_FORMAT_SHIFT 24
+#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
+#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT)
+#define CURSOR_FORMAT_3C (0x01 << CURSOR_FORMAT_SHIFT)
+#define CURSOR_FORMAT_4C (0x02 << CURSOR_FORMAT_SHIFT)
+#define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT)
+#define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT)
+/* New style CUR*CNTR flags */
+#define CURSOR_MODE 0x27
+#define CURSOR_MODE_DISABLE 0x00
+#define CURSOR_MODE_64_32B_AX 0x07
+#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define MCURSOR_PIPE_SELECT (1 << 28)
+#define MCURSOR_PIPE_A 0x00
+#define MCURSOR_PIPE_B (1 << 28)
+#define MCURSOR_GAMMA_ENABLE (1 << 26)
+#define _CURABASE 0x70084
+#define _CURAPOS 0x70088
+#define CURSOR_POS_MASK 0x007FF
+#define CURSOR_POS_SIGN 0x8000
+#define CURSOR_X_SHIFT 0
+#define CURSOR_Y_SHIFT 16
+#define CURSIZE 0x700a0
+#define _CURBCNTR 0x700c0
+#define _CURBBASE 0x700c4
+#define _CURBPOS 0x700c8
+
+#define _CURBCNTR_IVB 0x71080
+#define _CURBBASE_IVB 0x71084
+#define _CURBPOS_IVB 0x71088
+
+#define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR)
+#define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE)
+#define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS)
+
+#define CURCNTR_IVB(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR_IVB)
+#define CURBASE_IVB(pipe) _PIPE(pipe, _CURABASE, _CURBBASE_IVB)
+#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
+
+/* Display A control */
+#define _DSPACNTR 0x70180
+#define DISPLAY_PLANE_ENABLE (1<<31)
+#define DISPLAY_PLANE_DISABLE 0
+#define DISPPLANE_GAMMA_ENABLE (1<<30)
+#define DISPPLANE_GAMMA_DISABLE 0
+#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
+#define DISPPLANE_8BPP (0x2<<26)
+#define DISPPLANE_15_16BPP (0x4<<26)
+#define DISPPLANE_16BPP (0x5<<26)
+#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
+#define DISPPLANE_32BPP (0x7<<26)
+#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
+#define DISPPLANE_STEREO_ENABLE (1<<25)
+#define DISPPLANE_STEREO_DISABLE 0
+#define DISPPLANE_SEL_PIPE_SHIFT 24
+#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT)
+#define DISPPLANE_SEL_PIPE_A 0
+#define DISPPLANE_SEL_PIPE_B (1<<DISPPLANE_SEL_PIPE_SHIFT)
+#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
+#define DISPPLANE_SRC_KEY_DISABLE 0
+#define DISPPLANE_LINE_DOUBLE (1<<20)
+#define DISPPLANE_NO_LINE_DOUBLE 0
+#define DISPPLANE_STEREO_POLARITY_FIRST 0
+#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
+#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
+#define DISPPLANE_TILED (1<<10)
+#define _DSPAADDR 0x70184
+#define _DSPASTRIDE 0x70188
+#define _DSPAPOS 0x7018C /* reserved */
+#define _DSPASIZE 0x70190
+#define _DSPASURF 0x7019C /* 965+ only */
+#define _DSPATILEOFF 0x701A4 /* 965+ only */
+
+#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
+#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
+#define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE)
+#define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS)
+#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE)
+#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
+#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
+
+/* VBIOS flags */
+#define SWF00 0x71410
+#define SWF01 0x71414
+#define SWF02 0x71418
+#define SWF03 0x7141c
+#define SWF04 0x71420
+#define SWF05 0x71424
+#define SWF06 0x71428
+#define SWF10 0x70410
+#define SWF11 0x70414
+#define SWF14 0x71420
+#define SWF30 0x72414
+#define SWF31 0x72418
+#define SWF32 0x7241c
+
+/* Pipe B */
+#define _PIPEBDSL 0x71000
+#define _PIPEBCONF 0x71008
+#define _PIPEBSTAT 0x71024
+#define _PIPEBFRAMEHIGH 0x71040
+#define _PIPEBFRAMEPIXEL 0x71044
+#define _PIPEB_FRMCOUNT_GM45 0x71040
+#define _PIPEB_FLIPCOUNT_GM45 0x71044
+
+
+/* Display B control */
+#define _DSPBCNTR 0x71180
+#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
+#define DISPPLANE_ALPHA_TRANS_DISABLE 0
+#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
+#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
+#define _DSPBADDR 0x71184
+#define _DSPBSTRIDE 0x71188
+#define _DSPBPOS 0x7118C
+#define _DSPBSIZE 0x71190
+#define _DSPBSURF 0x7119C
+#define _DSPBTILEOFF 0x711A4
+
+/* Sprite A control */
+#define _DVSACNTR 0x72180
+#define DVS_ENABLE (1<<31)
+#define DVS_GAMMA_ENABLE (1<<30)
+#define DVS_PIXFORMAT_MASK (3<<25)
+#define DVS_FORMAT_YUV422 (0<<25)
+#define DVS_FORMAT_RGBX101010 (1<<25)
+#define DVS_FORMAT_RGBX888 (2<<25)
+#define DVS_FORMAT_RGBX161616 (3<<25)
+#define DVS_SOURCE_KEY (1<<22)
+#define DVS_RGB_ORDER_XBGR (1<<20)
+#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
+#define DVS_YUV_ORDER_YUYV (0<<16)
+#define DVS_YUV_ORDER_UYVY (1<<16)
+#define DVS_YUV_ORDER_YVYU (2<<16)
+#define DVS_YUV_ORDER_VYUY (3<<16)
+#define DVS_DEST_KEY (1<<2)
+#define DVS_TRICKLE_FEED_DISABLE (1<<14)
+#define DVS_TILED (1<<10)
+#define _DVSALINOFF 0x72184
+#define _DVSASTRIDE 0x72188
+#define _DVSAPOS 0x7218c
+#define _DVSASIZE 0x72190
+#define _DVSAKEYVAL 0x72194
+#define _DVSAKEYMSK 0x72198
+#define _DVSASURF 0x7219c
+#define _DVSAKEYMAXVAL 0x721a0
+#define _DVSATILEOFF 0x721a4
+#define _DVSASURFLIVE 0x721ac
+#define _DVSASCALE 0x72204
+#define DVS_SCALE_ENABLE (1<<31)
+#define DVS_FILTER_MASK (3<<29)
+#define DVS_FILTER_MEDIUM (0<<29)
+#define DVS_FILTER_ENHANCING (1<<29)
+#define DVS_FILTER_SOFTENING (2<<29)
+#define DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
+#define DVS_VERTICAL_OFFSET_ENABLE (1<<27)
+#define _DVSAGAMC 0x72300
+
+#define _DVSBCNTR 0x73180
+#define _DVSBLINOFF 0x73184
+#define _DVSBSTRIDE 0x73188
+#define _DVSBPOS 0x7318c
+#define _DVSBSIZE 0x73190
+#define _DVSBKEYVAL 0x73194
+#define _DVSBKEYMSK 0x73198
+#define _DVSBSURF 0x7319c
+#define _DVSBKEYMAXVAL 0x731a0
+#define _DVSBTILEOFF 0x731a4
+#define _DVSBSURFLIVE 0x731ac
+#define _DVSBSCALE 0x73204
+#define _DVSBGAMC 0x73300
+
+#define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR)
+#define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
+#define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
+#define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS)
+#define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF)
+#define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
+#define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE)
+#define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE)
+#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
+#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
+#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
+
+#define _SPRA_CTL 0x70280
+#define SPRITE_ENABLE (1<<31)
+#define SPRITE_GAMMA_ENABLE (1<<30)
+#define SPRITE_PIXFORMAT_MASK (7<<25)
+#define SPRITE_FORMAT_YUV422 (0<<25)
+#define SPRITE_FORMAT_RGBX101010 (1<<25)
+#define SPRITE_FORMAT_RGBX888 (2<<25)
+#define SPRITE_FORMAT_RGBX161616 (3<<25)
+#define SPRITE_FORMAT_YUV444 (4<<25)
+#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */
+#define SPRITE_CSC_ENABLE (1<<24)
+#define SPRITE_SOURCE_KEY (1<<22)
+#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
+#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
+#define SPRITE_YUV_CSC_FORMAT_BT709 (1<<18) /* 0 is BT601 */
+#define SPRITE_YUV_BYTE_ORDER_MASK (3<<16)
+#define SPRITE_YUV_ORDER_YUYV (0<<16)
+#define SPRITE_YUV_ORDER_UYVY (1<<16)
+#define SPRITE_YUV_ORDER_YVYU (2<<16)
+#define SPRITE_YUV_ORDER_VYUY (3<<16)
+#define SPRITE_TRICKLE_FEED_DISABLE (1<<14)
+#define SPRITE_INT_GAMMA_ENABLE (1<<13)
+#define SPRITE_TILED (1<<10)
+#define SPRITE_DEST_KEY (1<<2)
+#define _SPRA_LINOFF 0x70284
+#define _SPRA_STRIDE 0x70288
+#define _SPRA_POS 0x7028c
+#define _SPRA_SIZE 0x70290
+#define _SPRA_KEYVAL 0x70294
+#define _SPRA_KEYMSK 0x70298
+#define _SPRA_SURF 0x7029c
+#define _SPRA_KEYMAX 0x702a0
+#define _SPRA_TILEOFF 0x702a4
+#define _SPRA_SCALE 0x70304
+#define SPRITE_SCALE_ENABLE (1<<31)
+#define SPRITE_FILTER_MASK (3<<29)
+#define SPRITE_FILTER_MEDIUM (0<<29)
+#define SPRITE_FILTER_ENHANCING (1<<29)
+#define SPRITE_FILTER_SOFTENING (2<<29)
+#define SPRITE_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
+#define SPRITE_VERTICAL_OFFSET_ENABLE (1<<27)
+#define _SPRA_GAMC 0x70400
+
+#define _SPRB_CTL 0x71280
+#define _SPRB_LINOFF 0x71284
+#define _SPRB_STRIDE 0x71288
+#define _SPRB_POS 0x7128c
+#define _SPRB_SIZE 0x71290
+#define _SPRB_KEYVAL 0x71294
+#define _SPRB_KEYMSK 0x71298
+#define _SPRB_SURF 0x7129c
+#define _SPRB_KEYMAX 0x712a0
+#define _SPRB_TILEOFF 0x712a4
+#define _SPRB_SCALE 0x71304
+#define _SPRB_GAMC 0x71400
+
+#define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
+#define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
+#define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
+#define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS)
+#define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
+#define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
+#define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
+#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
+#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
+#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
+#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
+#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
+
+/* VBIOS regs */
+#define VGACNTRL 0x71400
+# define VGA_DISP_DISABLE (1 << 31)
+# define VGA_2X_MODE (1 << 30)
+# define VGA_PIPE_B_SELECT (1 << 29)
+
+/* Ironlake */
+
+#define CPU_VGACNTRL 0x41000
+
+#define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030
+#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
+#define DIGITAL_PORTA_SHORT_PULSE_2MS (0 << 2)
+#define DIGITAL_PORTA_SHORT_PULSE_4_5MS (1 << 2)
+#define DIGITAL_PORTA_SHORT_PULSE_6MS (2 << 2)
+#define DIGITAL_PORTA_SHORT_PULSE_100MS (3 << 2)
+#define DIGITAL_PORTA_NO_DETECT (0 << 0)
+#define DIGITAL_PORTA_LONG_PULSE_DETECT_MASK (1 << 1)
+#define DIGITAL_PORTA_SHORT_PULSE_DETECT_MASK (1 << 0)
+
+/* refresh rate hardware control */
+#define RR_HW_CTL 0x45300
+#define RR_HW_LOW_POWER_FRAMES_MASK 0xff
+#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
+
+#define FDI_PLL_BIOS_0 0x46000
+#define FDI_PLL_FB_CLOCK_MASK 0xff
+#define FDI_PLL_BIOS_1 0x46004
+#define FDI_PLL_BIOS_2 0x46008
+#define DISPLAY_PORT_PLL_BIOS_0 0x4600c
+#define DISPLAY_PORT_PLL_BIOS_1 0x46010
+#define DISPLAY_PORT_PLL_BIOS_2 0x46014
+
+#define PCH_DSPCLK_GATE_D 0x42020
+# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
+# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
+# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
+# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
+
+#define PCH_3DCGDIS0 0x46020
+# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
+# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
+
+#define PCH_3DCGDIS1 0x46024
+# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11)
+
+#define FDI_PLL_FREQ_CTL 0x46030
+#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
+#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
+#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
+
+
+#define _PIPEA_DATA_M1 0x60030
+#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
+#define TU_SIZE_MASK 0x7e000000
+#define PIPE_DATA_M1_OFFSET 0
+#define _PIPEA_DATA_N1 0x60034
+#define PIPE_DATA_N1_OFFSET 0
+
+#define _PIPEA_DATA_M2 0x60038
+#define PIPE_DATA_M2_OFFSET 0
+#define _PIPEA_DATA_N2 0x6003c
+#define PIPE_DATA_N2_OFFSET 0
+
+#define _PIPEA_LINK_M1 0x60040
+#define PIPE_LINK_M1_OFFSET 0
+#define _PIPEA_LINK_N1 0x60044
+#define PIPE_LINK_N1_OFFSET 0
+
+#define _PIPEA_LINK_M2 0x60048
+#define PIPE_LINK_M2_OFFSET 0
+#define _PIPEA_LINK_N2 0x6004c
+#define PIPE_LINK_N2_OFFSET 0
+
+/* PIPEB timing regs are same start from 0x61000 */
+
+#define _PIPEB_DATA_M1 0x61030
+#define _PIPEB_DATA_N1 0x61034
+
+#define _PIPEB_DATA_M2 0x61038
+#define _PIPEB_DATA_N2 0x6103c
+
+#define _PIPEB_LINK_M1 0x61040
+#define _PIPEB_LINK_N1 0x61044
+
+#define _PIPEB_LINK_M2 0x61048
+#define _PIPEB_LINK_N2 0x6104c
+
+#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
+#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
+#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
+#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
+#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
+#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
+#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
+#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
+
+/* CPU panel fitter */
+/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
+#define _PFA_CTL_1 0x68080
+#define _PFB_CTL_1 0x68880
+#define PF_ENABLE (1<<31)
+#define PF_FILTER_MASK (3<<23)
+#define PF_FILTER_PROGRAMMED (0<<23)
+#define PF_FILTER_MED_3x3 (1<<23)
+#define PF_FILTER_EDGE_ENHANCE (2<<23)
+#define PF_FILTER_EDGE_SOFTEN (3<<23)
+#define _PFA_WIN_SZ 0x68074
+#define _PFB_WIN_SZ 0x68874
+#define _PFA_WIN_POS 0x68070
+#define _PFB_WIN_POS 0x68870
+#define _PFA_VSCALE 0x68084
+#define _PFB_VSCALE 0x68884
+#define _PFA_HSCALE 0x68090
+#define _PFB_HSCALE 0x68890
+
+#define PF_CTL(pipe) _PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
+#define PF_WIN_SZ(pipe) _PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
+#define PF_WIN_POS(pipe) _PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
+#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
+#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
+
+/* legacy palette */
+#define _LGC_PALETTE_A 0x4a000
+#define _LGC_PALETTE_B 0x4a800
+#define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B)
+
+/* interrupts */
+#define DE_MASTER_IRQ_CONTROL (1 << 31)
+#define DE_SPRITEB_FLIP_DONE (1 << 29)
+#define DE_SPRITEA_FLIP_DONE (1 << 28)
+#define DE_PLANEB_FLIP_DONE (1 << 27)
+#define DE_PLANEA_FLIP_DONE (1 << 26)
+#define DE_PCU_EVENT (1 << 25)
+#define DE_GTT_FAULT (1 << 24)
+#define DE_POISON (1 << 23)
+#define DE_PERFORM_COUNTER (1 << 22)
+#define DE_PCH_EVENT (1 << 21)
+#define DE_AUX_CHANNEL_A (1 << 20)
+#define DE_DP_A_HOTPLUG (1 << 19)
+#define DE_GSE (1 << 18)
+#define DE_PIPEB_VBLANK (1 << 15)
+#define DE_PIPEB_EVEN_FIELD (1 << 14)
+#define DE_PIPEB_ODD_FIELD (1 << 13)
+#define DE_PIPEB_LINE_COMPARE (1 << 12)
+#define DE_PIPEB_VSYNC (1 << 11)
+#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
+#define DE_PIPEA_VBLANK (1 << 7)
+#define DE_PIPEA_EVEN_FIELD (1 << 6)
+#define DE_PIPEA_ODD_FIELD (1 << 5)
+#define DE_PIPEA_LINE_COMPARE (1 << 4)
+#define DE_PIPEA_VSYNC (1 << 3)
+#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
+
+/* More Ivybridge lolz */
+#define DE_ERR_DEBUG_IVB (1<<30)
+#define DE_GSE_IVB (1<<29)
+#define DE_PCH_EVENT_IVB (1<<28)
+#define DE_DP_A_HOTPLUG_IVB (1<<27)
+#define DE_AUX_CHANNEL_A_IVB (1<<26)
+#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
+#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
+#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
+#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
+#define DE_PIPEB_VBLANK_IVB (1<<5)
+#define DE_PIPEA_VBLANK_IVB (1<<0)
+
+#define DEISR 0x44000
+#define DEIMR 0x44004
+#define DEIIR 0x44008
+#define DEIER 0x4400c
+
+/* GT interrupt */
+#define GT_PIPE_NOTIFY (1 << 4)
+#define GT_RENDER_CS_ERROR (1 << 3)
+#define GT_SYNC_STATUS (1 << 2)
+#define GT_USER_INTERRUPT (1 << 0)
+#define GT_BSD_USER_INTERRUPT (1 << 5)
+#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
+#define GT_BLT_USER_INTERRUPT (1 << 22)
+
+#define GTISR 0x44010
+#define GTIMR 0x44014
+#define GTIIR 0x44018
+#define GTIER 0x4401c
+
+#define ILK_DISPLAY_CHICKEN2 0x42004
+/* Required on all Ironlake and Sandybridge according to the B-Spec. */
+#define ILK_ELPIN_409_SELECT (1 << 25)
+#define ILK_DPARB_GATE (1<<22)
+#define ILK_VSDPFD_FULL (1<<21)
+#define ILK_DISPLAY_CHICKEN_FUSES 0x42014
+#define ILK_INTERNAL_GRAPHICS_DISABLE (1<<31)
+#define ILK_INTERNAL_DISPLAY_DISABLE (1<<30)
+#define ILK_DISPLAY_DEBUG_DISABLE (1<<29)
+#define ILK_HDCP_DISABLE (1<<25)
+#define ILK_eDP_A_DISABLE (1<<24)
+#define ILK_DESKTOP (1<<23)
+#define ILK_DSPCLK_GATE 0x42020
+#define IVB_VRHUNIT_CLK_GATE (1<<28)
+#define ILK_DPARB_CLK_GATE (1<<5)
+#define ILK_DPFD_CLK_GATE (1<<7)
+
+/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
+#define ILK_CLK_FBC (1<<7)
+#define ILK_DPFC_DIS1 (1<<8)
+#define ILK_DPFC_DIS2 (1<<9)
+
+#define IVB_CHICKEN3 0x4200c
+# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
+# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
+
+#define DISP_ARB_CTL 0x45000
+#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
+#define DISP_FBC_WM_DIS (1<<15)
+
+/* GEN7 chicken */
+#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
+# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
+
+#define GEN7_L3CNTLREG1 0xB01C
+#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
+
+#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
+#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
+
+/* WaCatErrorRejectionIssue */
+#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
+#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
+
+/* PCH */
+
+/* south display engine interrupt */
+#define SDE_AUDIO_POWER_D (1 << 27)
+#define SDE_AUDIO_POWER_C (1 << 26)
+#define SDE_AUDIO_POWER_B (1 << 25)
+#define SDE_AUDIO_POWER_SHIFT (25)
+#define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT)
+#define SDE_GMBUS (1 << 24)
+#define SDE_AUDIO_HDCP_TRANSB (1 << 23)
+#define SDE_AUDIO_HDCP_TRANSA (1 << 22)
+#define SDE_AUDIO_HDCP_MASK (3 << 22)
+#define SDE_AUDIO_TRANSB (1 << 21)
+#define SDE_AUDIO_TRANSA (1 << 20)
+#define SDE_AUDIO_TRANS_MASK (3 << 20)
+#define SDE_POISON (1 << 19)
+/* 18 reserved */
+#define SDE_FDI_RXB (1 << 17)
+#define SDE_FDI_RXA (1 << 16)
+#define SDE_FDI_MASK (3 << 16)
+#define SDE_AUXD (1 << 15)
+#define SDE_AUXC (1 << 14)
+#define SDE_AUXB (1 << 13)
+#define SDE_AUX_MASK (7 << 13)
+/* 12 reserved */
+#define SDE_CRT_HOTPLUG (1 << 11)
+#define SDE_PORTD_HOTPLUG (1 << 10)
+#define SDE_PORTC_HOTPLUG (1 << 9)
+#define SDE_PORTB_HOTPLUG (1 << 8)
+#define SDE_SDVOB_HOTPLUG (1 << 6)
+#define SDE_HOTPLUG_MASK (0xf << 8)
+#define SDE_TRANSB_CRC_DONE (1 << 5)
+#define SDE_TRANSB_CRC_ERR (1 << 4)
+#define SDE_TRANSB_FIFO_UNDER (1 << 3)
+#define SDE_TRANSA_CRC_DONE (1 << 2)
+#define SDE_TRANSA_CRC_ERR (1 << 1)
+#define SDE_TRANSA_FIFO_UNDER (1 << 0)
+#define SDE_TRANS_MASK (0x3f)
+/* CPT */
+#define SDE_CRT_HOTPLUG_CPT (1 << 19)
+#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
+#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
+#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
+#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
+ SDE_PORTD_HOTPLUG_CPT | \
+ SDE_PORTC_HOTPLUG_CPT | \
+ SDE_PORTB_HOTPLUG_CPT)
+
+#define SDEISR 0xc4000
+#define SDEIMR 0xc4004
+#define SDEIIR 0xc4008
+#define SDEIER 0xc400c
+
+/* digital port hotplug */
+#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
+#define PORTD_HOTPLUG_ENABLE (1 << 20)
+#define PORTD_PULSE_DURATION_2ms (0)
+#define PORTD_PULSE_DURATION_4_5ms (1 << 18)
+#define PORTD_PULSE_DURATION_6ms (2 << 18)
+#define PORTD_PULSE_DURATION_100ms (3 << 18)
+#define PORTD_PULSE_DURATION_MASK (3 << 18)
+#define PORTD_HOTPLUG_NO_DETECT (0)
+#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
+#define PORTD_HOTPLUG_LONG_DETECT (1 << 17)
+#define PORTC_HOTPLUG_ENABLE (1 << 12)
+#define PORTC_PULSE_DURATION_2ms (0)
+#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
+#define PORTC_PULSE_DURATION_6ms (2 << 10)
+#define PORTC_PULSE_DURATION_100ms (3 << 10)
+#define PORTC_PULSE_DURATION_MASK (3 << 10)
+#define PORTC_HOTPLUG_NO_DETECT (0)
+#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
+#define PORTC_HOTPLUG_LONG_DETECT (1 << 9)
+#define PORTB_HOTPLUG_ENABLE (1 << 4)
+#define PORTB_PULSE_DURATION_2ms (0)
+#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
+#define PORTB_PULSE_DURATION_6ms (2 << 2)
+#define PORTB_PULSE_DURATION_100ms (3 << 2)
+#define PORTB_PULSE_DURATION_MASK (3 << 2)
+#define PORTB_HOTPLUG_NO_DETECT (0)
+#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
+#define PORTB_HOTPLUG_LONG_DETECT (1 << 1)
+
+#define PCH_GPIOA 0xc5010
+#define PCH_GPIOB 0xc5014
+#define PCH_GPIOC 0xc5018
+#define PCH_GPIOD 0xc501c
+#define PCH_GPIOE 0xc5020
+#define PCH_GPIOF 0xc5024
+
+#define PCH_GMBUS0 0xc5100
+#define PCH_GMBUS1 0xc5104
+#define PCH_GMBUS2 0xc5108
+#define PCH_GMBUS3 0xc510c
+#define PCH_GMBUS4 0xc5110
+#define PCH_GMBUS5 0xc5120
+
+#define _PCH_DPLL_A 0xc6014
+#define _PCH_DPLL_B 0xc6018
+#define PCH_DPLL(pipe) (pipe == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
+
+#define _PCH_FPA0 0xc6040
+#define FP_CB_TUNE (0x3<<22)
+#define _PCH_FPA1 0xc6044
+#define _PCH_FPB0 0xc6048
+#define _PCH_FPB1 0xc604c
+#define PCH_FP0(pipe) (pipe == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define PCH_FP1(pipe) (pipe == 0 ? _PCH_FPA1 : _PCH_FPB1)
+
+#define PCH_DPLL_TEST 0xc606c
+
+#define PCH_DREF_CONTROL 0xC6200
+#define DREF_CONTROL_MASK 0x7fc3
+#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13)
+#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13)
+#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3<<13)
+#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13)
+#define DREF_SSC_SOURCE_DISABLE (0<<11)
+#define DREF_SSC_SOURCE_ENABLE (2<<11)
+#define DREF_SSC_SOURCE_MASK (3<<11)
+#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9)
+#define DREF_NONSPREAD_CK505_ENABLE (1<<9)
+#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9)
+#define DREF_NONSPREAD_SOURCE_MASK (3<<9)
+#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
+#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
+#define DREF_SUPERSPREAD_SOURCE_MASK (3<<7)
+#define DREF_SSC4_DOWNSPREAD (0<<6)
+#define DREF_SSC4_CENTERSPREAD (1<<6)
+#define DREF_SSC1_DISABLE (0<<1)
+#define DREF_SSC1_ENABLE (1<<1)
+#define DREF_SSC4_DISABLE (0)
+#define DREF_SSC4_ENABLE (1)
+
+#define PCH_RAWCLK_FREQ 0xc6204
+#define FDL_TP1_TIMER_SHIFT 12
+#define FDL_TP1_TIMER_MASK (3<<12)
+#define FDL_TP2_TIMER_SHIFT 10
+#define FDL_TP2_TIMER_MASK (3<<10)
+#define RAWCLK_FREQ_MASK 0x3ff
+
+#define PCH_DPLL_TMR_CFG 0xc6208
+
+#define PCH_SSC4_PARMS 0xc6210
+#define PCH_SSC4_AUX_PARMS 0xc6214
+
+#define PCH_DPLL_SEL 0xc7000
+#define TRANSA_DPLL_ENABLE (1<<3)
+#define TRANSA_DPLLB_SEL (1<<0)
+#define TRANSA_DPLLA_SEL 0
+#define TRANSB_DPLL_ENABLE (1<<7)
+#define TRANSB_DPLLB_SEL (1<<4)
+#define TRANSB_DPLLA_SEL (0)
+#define TRANSC_DPLL_ENABLE (1<<11)
+#define TRANSC_DPLLB_SEL (1<<8)
+#define TRANSC_DPLLA_SEL (0)
+
+/* transcoder */
+
+#define _TRANS_HTOTAL_A 0xe0000
+#define TRANS_HTOTAL_SHIFT 16
+#define TRANS_HACTIVE_SHIFT 0
+#define _TRANS_HBLANK_A 0xe0004
+#define TRANS_HBLANK_END_SHIFT 16
+#define TRANS_HBLANK_START_SHIFT 0
+#define _TRANS_HSYNC_A 0xe0008
+#define TRANS_HSYNC_END_SHIFT 16
+#define TRANS_HSYNC_START_SHIFT 0
+#define _TRANS_VTOTAL_A 0xe000c
+#define TRANS_VTOTAL_SHIFT 16
+#define TRANS_VACTIVE_SHIFT 0
+#define _TRANS_VBLANK_A 0xe0010
+#define TRANS_VBLANK_END_SHIFT 16
+#define TRANS_VBLANK_START_SHIFT 0
+#define _TRANS_VSYNC_A 0xe0014
+#define TRANS_VSYNC_END_SHIFT 16
+#define TRANS_VSYNC_START_SHIFT 0
+#define _TRANS_VSYNCSHIFT_A 0xe0028
+
+#define _TRANSA_DATA_M1 0xe0030
+#define _TRANSA_DATA_N1 0xe0034
+#define _TRANSA_DATA_M2 0xe0038
+#define _TRANSA_DATA_N2 0xe003c
+#define _TRANSA_DP_LINK_M1 0xe0040
+#define _TRANSA_DP_LINK_N1 0xe0044
+#define _TRANSA_DP_LINK_M2 0xe0048
+#define _TRANSA_DP_LINK_N2 0xe004c
+
+/* Per-transcoder DIP controls */
+
+#define _VIDEO_DIP_CTL_A 0xe0200
+#define _VIDEO_DIP_DATA_A 0xe0208
+#define _VIDEO_DIP_GCP_A 0xe0210
+
+#define _VIDEO_DIP_CTL_B 0xe1200
+#define _VIDEO_DIP_DATA_B 0xe1208
+#define _VIDEO_DIP_GCP_B 0xe1210
+
+#define TVIDEO_DIP_CTL(pipe) _PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
+#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
+#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+
+#define _TRANS_HTOTAL_B 0xe1000
+#define _TRANS_HBLANK_B 0xe1004
+#define _TRANS_HSYNC_B 0xe1008
+#define _TRANS_VTOTAL_B 0xe100c
+#define _TRANS_VBLANK_B 0xe1010
+#define _TRANS_VSYNC_B 0xe1014
+#define _TRANS_VSYNCSHIFT_B 0xe1028
+
+#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B)
+#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B)
+#define TRANS_HSYNC(pipe) _PIPE(pipe, _TRANS_HSYNC_A, _TRANS_HSYNC_B)
+#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B)
+#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B)
+#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B)
+#define TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _TRANS_VSYNCSHIFT_A, \
+ _TRANS_VSYNCSHIFT_B)
+
+#define _TRANSB_DATA_M1 0xe1030
+#define _TRANSB_DATA_N1 0xe1034
+#define _TRANSB_DATA_M2 0xe1038
+#define _TRANSB_DATA_N2 0xe103c
+#define _TRANSB_DP_LINK_M1 0xe1040
+#define _TRANSB_DP_LINK_N1 0xe1044
+#define _TRANSB_DP_LINK_M2 0xe1048
+#define _TRANSB_DP_LINK_N2 0xe104c
+
+#define TRANSDATA_M1(pipe) _PIPE(pipe, _TRANSA_DATA_M1, _TRANSB_DATA_M1)
+#define TRANSDATA_N1(pipe) _PIPE(pipe, _TRANSA_DATA_N1, _TRANSB_DATA_N1)
+#define TRANSDATA_M2(pipe) _PIPE(pipe, _TRANSA_DATA_M2, _TRANSB_DATA_M2)
+#define TRANSDATA_N2(pipe) _PIPE(pipe, _TRANSA_DATA_N2, _TRANSB_DATA_N2)
+#define TRANSDPLINK_M1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M1, _TRANSB_DP_LINK_M1)
+#define TRANSDPLINK_N1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N1, _TRANSB_DP_LINK_N1)
+#define TRANSDPLINK_M2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M2, _TRANSB_DP_LINK_M2)
+#define TRANSDPLINK_N2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N2, _TRANSB_DP_LINK_N2)
+
+#define _TRANSACONF 0xf0008
+#define _TRANSBCONF 0xf1008
+#define TRANSCONF(plane) _PIPE(plane, _TRANSACONF, _TRANSBCONF)
+#define TRANS_DISABLE (0<<31)
+#define TRANS_ENABLE (1<<31)
+#define TRANS_STATE_MASK (1<<30)
+#define TRANS_STATE_DISABLE (0<<30)
+#define TRANS_STATE_ENABLE (1<<30)
+#define TRANS_FSYNC_DELAY_HB1 (0<<27)
+#define TRANS_FSYNC_DELAY_HB2 (1<<27)
+#define TRANS_FSYNC_DELAY_HB3 (2<<27)
+#define TRANS_FSYNC_DELAY_HB4 (3<<27)
+#define TRANS_DP_AUDIO_ONLY (1<<26)
+#define TRANS_DP_VIDEO_AUDIO (0<<26)
+#define TRANS_INTERLACE_MASK (7<<21)
+#define TRANS_PROGRESSIVE (0<<21)
+#define TRANS_INTERLACED (3<<21)
+#define TRANS_LEGACY_INTERLACED_ILK (2<<21)
+#define TRANS_8BPC (0<<5)
+#define TRANS_10BPC (1<<5)
+#define TRANS_6BPC (2<<5)
+#define TRANS_12BPC (3<<5)
+
+#define _TRANSA_CHICKEN2 0xf0064
+#define _TRANSB_CHICKEN2 0xf1064
+#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
+#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31)
+
+#define SOUTH_CHICKEN1 0xc2000
+#define FDIA_PHASE_SYNC_SHIFT_OVR 19
+#define FDIA_PHASE_SYNC_SHIFT_EN 18
+#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define SOUTH_CHICKEN2 0xc2004
+#define DPLS_EDP_PPS_FIX_DIS (1<<0)
+
+#define _FDI_RXA_CHICKEN 0xc200c
+#define _FDI_RXB_CHICKEN 0xc2010
+#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
+#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0)
+#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
+
+#define SOUTH_DSPCLK_GATE_D 0xc2020
+#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
+
+/* CPU: FDI_TX */
+#define _FDI_TXA_CTL 0x60100
+#define _FDI_TXB_CTL 0x61100
+#define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
+#define FDI_TX_DISABLE (0<<31)
+#define FDI_TX_ENABLE (1<<31)
+#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
+#define FDI_LINK_TRAIN_PATTERN_2 (1<<28)
+#define FDI_LINK_TRAIN_PATTERN_IDLE (2<<28)
+#define FDI_LINK_TRAIN_NONE (3<<28)
+#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0<<25)
+#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1<<25)
+#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2<<25)
+#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3<<25)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
+/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
+ SNB has different settings. */
+/* SNB A-stepping */
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+/* SNB B-stepping */
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
+#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22)
+#define FDI_DP_PORT_WIDTH_X1 (0<<19)
+#define FDI_DP_PORT_WIDTH_X2 (1<<19)
+#define FDI_DP_PORT_WIDTH_X3 (2<<19)
+#define FDI_DP_PORT_WIDTH_X4 (3<<19)
+#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
+/* Ironlake: hardwired to 1 */
+#define FDI_TX_PLL_ENABLE (1<<14)
+
+/* Ivybridge has different bits for lolz */
+#define FDI_LINK_TRAIN_PATTERN_1_IVB (0<<8)
+#define FDI_LINK_TRAIN_PATTERN_2_IVB (1<<8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2<<8)
+#define FDI_LINK_TRAIN_NONE_IVB (3<<8)
+
+/* both Tx and Rx */
+#define FDI_COMPOSITE_SYNC (1<<11)
+#define FDI_LINK_TRAIN_AUTO (1<<10)
+#define FDI_SCRAMBLING_ENABLE (0<<7)
+#define FDI_SCRAMBLING_DISABLE (1<<7)
+/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
+#define _FDI_RXA_CTL 0xf000c
+#define _FDI_RXB_CTL 0xf100c
+#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
+#define FDI_RX_ENABLE (1<<31)
+/* train, dp width same as FDI_TX */
+#define FDI_FS_ERRC_ENABLE (1<<27)
+#define FDI_FE_ERRC_ENABLE (1<<26)
+#define FDI_DP_PORT_WIDTH_X8 (7<<19)
+#define FDI_8BPC (0<<16)
+#define FDI_10BPC (1<<16)
+#define FDI_6BPC (2<<16)
+#define FDI_12BPC (3<<16)
+#define FDI_LINK_REVERSE_OVERWRITE (1<<15)
+#define FDI_DMI_LINK_REVERSE_MASK (1<<14)
+#define FDI_RX_PLL_ENABLE (1<<13)
+#define FDI_FS_ERR_CORRECT_ENABLE (1<<11)
+#define FDI_FE_ERR_CORRECT_ENABLE (1<<10)
+#define FDI_FS_ERR_REPORT_ENABLE (1<<9)
+#define FDI_FE_ERR_REPORT_ENABLE (1<<8)
+#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
+#define FDI_PCDCLK (1<<4)
+/* CPT */
+#define FDI_AUTO_TRAINING (1<<10)
+#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
+#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
+#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
+#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
+
+#define _FDI_RXA_MISC 0xf0010
+#define _FDI_RXB_MISC 0xf1010
+#define _FDI_RXA_TUSIZE1 0xf0030
+#define _FDI_RXA_TUSIZE2 0xf0038
+#define _FDI_RXB_TUSIZE1 0xf1030
+#define _FDI_RXB_TUSIZE2 0xf1038
+#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
+#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
+#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
+
+/* FDI_RX interrupt register format */
+#define FDI_RX_INTER_LANE_ALIGN (1<<10)
+#define FDI_RX_SYMBOL_LOCK (1<<9) /* train 2 */
+#define FDI_RX_BIT_LOCK (1<<8) /* train 1 */
+#define FDI_RX_TRAIN_PATTERN_2_FAIL (1<<7)
+#define FDI_RX_FS_CODE_ERR (1<<6)
+#define FDI_RX_FE_CODE_ERR (1<<5)
+#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1<<4)
+#define FDI_RX_HDCP_LINK_FAIL (1<<3)
+#define FDI_RX_PIXEL_FIFO_OVERFLOW (1<<2)
+#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
+#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
+
+#define _FDI_RXA_IIR 0xf0014
+#define _FDI_RXA_IMR 0xf0018
+#define _FDI_RXB_IIR 0xf1014
+#define _FDI_RXB_IMR 0xf1018
+#define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR)
+#define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR)
+
+#define FDI_PLL_CTL_1 0xfe000
+#define FDI_PLL_CTL_2 0xfe004
+
+/* CRT */
+#define PCH_ADPA 0xe1100
+#define ADPA_TRANS_SELECT_MASK (1<<30)
+#define ADPA_TRANS_A_SELECT 0
+#define ADPA_TRANS_B_SELECT (1<<30)
+#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
+#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
+#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
+#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
+#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
+#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
+#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
+#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
+#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
+#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
+#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
+#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
+
+/* or SDVOB */
+#define HDMIB 0xe1140
+#define PORT_ENABLE (1 << 31)
+#define TRANSCODER(pipe) ((pipe) << 30)
+#define TRANSCODER_CPT(pipe) ((pipe) << 29)
+#define TRANSCODER_MASK (1 << 30)
+#define TRANSCODER_MASK_CPT (3 << 29)
+#define COLOR_FORMAT_8bpc (0)
+#define COLOR_FORMAT_12bpc (3 << 26)
+#define SDVOB_HOTPLUG_ENABLE (1 << 23)
+#define SDVO_ENCODING (0)
+#define TMDS_ENCODING (2 << 10)
+#define NULL_PACKET_VSYNC_ENABLE (1 << 9)
+/* CPT */
+#define HDMI_MODE_SELECT (1 << 9)
+#define DVI_MODE_SELECT (0)
+#define SDVOB_BORDER_ENABLE (1 << 7)
+#define AUDIO_ENABLE (1 << 6)
+#define VSYNC_ACTIVE_HIGH (1 << 4)
+#define HSYNC_ACTIVE_HIGH (1 << 3)
+#define PORT_DETECTED (1 << 2)
+
+/* PCH SDVOB multiplex with HDMIB */
+#define PCH_SDVOB HDMIB
+
+#define HDMIC 0xe1150
+#define HDMID 0xe1160
+
+#define PCH_LVDS 0xe1180
+#define LVDS_DETECTED (1 << 1)
+
+#define BLC_PWM_CPU_CTL2 0x48250
+#define PWM_ENABLE (1 << 31)
+#define PWM_PIPE_A (0 << 29)
+#define PWM_PIPE_B (1 << 29)
+#define BLC_PWM_CPU_CTL 0x48254
+
+#define BLC_PWM_PCH_CTL1 0xc8250
+#define PWM_PCH_ENABLE (1 << 31)
+#define PWM_POLARITY_ACTIVE_LOW (1 << 29)
+#define PWM_POLARITY_ACTIVE_HIGH (0 << 29)
+#define PWM_POLARITY_ACTIVE_LOW2 (1 << 28)
+#define PWM_POLARITY_ACTIVE_HIGH2 (0 << 28)
+
+#define BLC_PWM_PCH_CTL2 0xc8254
+
+#define PCH_PP_STATUS 0xc7200
+#define PCH_PP_CONTROL 0xc7204
+#define PANEL_UNLOCK_REGS (0xabcd << 16)
+#define PANEL_UNLOCK_MASK (0xffff << 16)
+#define EDP_FORCE_VDD (1 << 3)
+#define EDP_BLC_ENABLE (1 << 2)
+#define PANEL_POWER_RESET (1 << 1)
+#define PANEL_POWER_OFF (0 << 0)
+#define PANEL_POWER_ON (1 << 0)
+#define PCH_PP_ON_DELAYS 0xc7208
+#define PANEL_PORT_SELECT_MASK (3 << 30)
+#define PANEL_PORT_SELECT_LVDS (0 << 30)
+#define PANEL_PORT_SELECT_DPA (1 << 30)
+#define EDP_PANEL (1 << 30)
+#define PANEL_PORT_SELECT_DPC (2 << 30)
+#define PANEL_PORT_SELECT_DPD (3 << 30)
+#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000)
+#define PANEL_POWER_UP_DELAY_SHIFT 16
+#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff)
+#define PANEL_LIGHT_ON_DELAY_SHIFT 0
+
+#define PCH_PP_OFF_DELAYS 0xc720c
+#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
+#define PANEL_POWER_DOWN_DELAY_SHIFT 16
+#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
+#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
+
+#define PCH_PP_DIVISOR 0xc7210
+#define PP_REFERENCE_DIVIDER_MASK (0xffffff00)
+#define PP_REFERENCE_DIVIDER_SHIFT 8
+#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f)
+#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
+
+#define PCH_DP_B 0xe4100
+#define PCH_DPB_AUX_CH_CTL 0xe4110
+#define PCH_DPB_AUX_CH_DATA1 0xe4114
+#define PCH_DPB_AUX_CH_DATA2 0xe4118
+#define PCH_DPB_AUX_CH_DATA3 0xe411c
+#define PCH_DPB_AUX_CH_DATA4 0xe4120
+#define PCH_DPB_AUX_CH_DATA5 0xe4124
+
+#define PCH_DP_C 0xe4200
+#define PCH_DPC_AUX_CH_CTL 0xe4210
+#define PCH_DPC_AUX_CH_DATA1 0xe4214
+#define PCH_DPC_AUX_CH_DATA2 0xe4218
+#define PCH_DPC_AUX_CH_DATA3 0xe421c
+#define PCH_DPC_AUX_CH_DATA4 0xe4220
+#define PCH_DPC_AUX_CH_DATA5 0xe4224
+
+#define PCH_DP_D 0xe4300
+#define PCH_DPD_AUX_CH_CTL 0xe4310
+#define PCH_DPD_AUX_CH_DATA1 0xe4314
+#define PCH_DPD_AUX_CH_DATA2 0xe4318
+#define PCH_DPD_AUX_CH_DATA3 0xe431c
+#define PCH_DPD_AUX_CH_DATA4 0xe4320
+#define PCH_DPD_AUX_CH_DATA5 0xe4324
+
+/* CPT */
+#define PORT_TRANS_A_SEL_CPT 0
+#define PORT_TRANS_B_SEL_CPT (1<<29)
+#define PORT_TRANS_C_SEL_CPT (2<<29)
+#define PORT_TRANS_SEL_MASK (3<<29)
+#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
+
+#define TRANS_DP_CTL_A 0xe0300
+#define TRANS_DP_CTL_B 0xe1300
+#define TRANS_DP_CTL_C 0xe2300
+#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000)
+#define TRANS_DP_OUTPUT_ENABLE (1<<31)
+#define TRANS_DP_PORT_SEL_B (0<<29)
+#define TRANS_DP_PORT_SEL_C (1<<29)
+#define TRANS_DP_PORT_SEL_D (2<<29)
+#define TRANS_DP_PORT_SEL_NONE (3<<29)
+#define TRANS_DP_PORT_SEL_MASK (3<<29)
+#define TRANS_DP_AUDIO_ONLY (1<<26)
+#define TRANS_DP_ENH_FRAMING (1<<18)
+#define TRANS_DP_8BPC (0<<9)
+#define TRANS_DP_10BPC (1<<9)
+#define TRANS_DP_6BPC (2<<9)
+#define TRANS_DP_12BPC (3<<9)
+#define TRANS_DP_BPC_MASK (3<<9)
+#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
+#define TRANS_DP_VSYNC_ACTIVE_LOW 0
+#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
+#define TRANS_DP_HSYNC_ACTIVE_LOW 0
+#define TRANS_DP_SYNC_MASK (3<<3)
+
+/* SNB eDP training params */
+/* SNB A-stepping */
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+/* SNB B-stepping */
+#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22)
+#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22)
+#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22)
+#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
+
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
+
+#define FORCEWAKE 0xA18C
+#define FORCEWAKE_ACK 0x130090
+#define FORCEWAKE_MT 0xa188 /* multi-threaded */
+#define FORCEWAKE_MT_ACK 0x130040
+#define ECOBUS 0xa180
+#define FORCEWAKE_MT_ENABLE (1<<5)
+
+#define GTFIFODBG 0x120000
+#define GT_FIFO_CPU_ERROR_MASK 7
+#define GT_FIFO_OVFERR (1<<2)
+#define GT_FIFO_IAWRERR (1<<1)
+#define GT_FIFO_IARDERR (1<<0)
+
+#define GT_FIFO_FREE_ENTRIES 0x120008
+#define GT_FIFO_NUM_RESERVED_ENTRIES 20
+
+#define GEN6_UCGCTL1 0x9400
+# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
+
+#define GEN6_UCGCTL2 0x9404
+# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
+# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
+# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
+
+#define GEN6_RPNSWREQ 0xA008
+#define GEN6_TURBO_DISABLE (1<<31)
+#define GEN6_FREQUENCY(x) ((x)<<25)
+#define GEN6_OFFSET(x) ((x)<<19)
+#define GEN6_AGGRESSIVE_TURBO (0<<15)
+#define GEN6_RC_VIDEO_FREQ 0xA00C
+#define GEN6_RC_CONTROL 0xA090
+#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16)
+#define GEN6_RC_CTL_RC6p_ENABLE (1<<17)
+#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
+#define GEN6_RC_CTL_RC1e_ENABLE (1<<20)
+#define GEN6_RC_CTL_RC7_ENABLE (1<<22)
+#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
+#define GEN6_RC_CTL_HW_ENABLE (1<<31)
+#define GEN6_RP_DOWN_TIMEOUT 0xA010
+#define GEN6_RP_INTERRUPT_LIMITS 0xA014
+#define GEN6_RPSTAT1 0xA01C
+#define GEN6_CAGF_SHIFT 8
+#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
+#define GEN6_RP_CONTROL 0xA024
+#define GEN6_RP_MEDIA_TURBO (1<<11)
+#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
+#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9)
+#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2<<9)
+#define GEN6_RP_MEDIA_HW_MODE (1<<9)
+#define GEN6_RP_MEDIA_SW_MODE (0<<9)
+#define GEN6_RP_MEDIA_IS_GFX (1<<8)
+#define GEN6_RP_ENABLE (1<<7)
+#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
+#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
+#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
+#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
+#define GEN6_RP_UP_THRESHOLD 0xA02C
+#define GEN6_RP_DOWN_THRESHOLD 0xA030
+#define GEN6_RP_CUR_UP_EI 0xA050
+#define GEN6_CURICONT_MASK 0xffffff
+#define GEN6_RP_CUR_UP 0xA054
+#define GEN6_CURBSYTAVG_MASK 0xffffff
+#define GEN6_RP_PREV_UP 0xA058
+#define GEN6_RP_CUR_DOWN_EI 0xA05C
+#define GEN6_CURIAVG_MASK 0xffffff
+#define GEN6_RP_CUR_DOWN 0xA060
+#define GEN6_RP_PREV_DOWN 0xA064
+#define GEN6_RP_UP_EI 0xA068
+#define GEN6_RP_DOWN_EI 0xA06C
+#define GEN6_RP_IDLE_HYSTERSIS 0xA070
+#define GEN6_RC_STATE 0xA094
+#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098
+#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C
+#define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0
+#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8
+#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC
+#define GEN6_RC_SLEEP 0xA0B0
+#define GEN6_RC1e_THRESHOLD 0xA0B4
+#define GEN6_RC6_THRESHOLD 0xA0B8
+#define GEN6_RC6p_THRESHOLD 0xA0BC
+#define GEN6_RC6pp_THRESHOLD 0xA0C0
+#define GEN6_PMINTRMSK 0xA168
+
+#define GEN6_PMISR 0x44020
+#define GEN6_PMIMR 0x44024 /* rps_lock */
+#define GEN6_PMIIR 0x44028
+#define GEN6_PMIER 0x4402C
+#define GEN6_PM_MBOX_EVENT (1<<25)
+#define GEN6_PM_THERMAL_EVENT (1<<24)
+#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6)
+#define GEN6_PM_RP_UP_THRESHOLD (1<<5)
+#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4)
+#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2)
+#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1)
+#define GEN6_PM_DEFERRED_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \
+ GEN6_PM_RP_DOWN_THRESHOLD | \
+ GEN6_PM_RP_DOWN_TIMEOUT)
+
+#define GEN6_PCODE_MAILBOX 0x138124
+#define GEN6_PCODE_READY (1<<31)
+#define GEN6_READ_OC_PARAMS 0xc
+#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
+#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
+#define GEN6_PCODE_DATA 0x138128
+#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
+
+#define GEN6_GT_CORE_STATUS 0x138060
+#define GEN6_CORE_CPD_STATE_MASK (7<<4)
+#define GEN6_RCn_MASK 7
+#define GEN6_RC0 0
+#define GEN6_RC3 2
+#define GEN6_RC6 3
+#define GEN6_RC7 4
+
+#define G4X_AUD_VID_DID 0x62020
+#define INTEL_AUDIO_DEVCL 0x808629FB
+#define INTEL_AUDIO_DEVBLC 0x80862801
+#define INTEL_AUDIO_DEVCTG 0x80862802
+
+#define G4X_AUD_CNTL_ST 0x620B4
+#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
+#define G4X_ELDV_DEVCTG (1 << 14)
+#define G4X_ELD_ADDR (0xf << 5)
+#define G4X_ELD_ACK (1 << 4)
+#define G4X_HDMIW_HDMIEDID 0x6210C
+
+#define IBX_HDMIW_HDMIEDID_A 0xE2050
+#define IBX_AUD_CNTL_ST_A 0xE20B4
+#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
+#define IBX_ELD_ADDRESS (0x1f << 5)
+#define IBX_ELD_ACK (1 << 4)
+#define IBX_AUD_CNTL_ST2 0xE20C0
+#define IBX_ELD_VALIDB (1 << 0)
+#define IBX_CP_READYB (1 << 1)
+
+#define CPT_HDMIW_HDMIEDID_A 0xE5050
+#define CPT_AUD_CNTL_ST_A 0xE50B4
+#define CPT_AUD_CNTRL_ST2 0xE50C0
+
+/* These are the 4 32-bit write offset registers for each stream
+ * output buffer. It determines the offset from the
+ * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
+ */
+#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
+
+#define IBX_AUD_CONFIG_A 0xe2000
+#define CPT_AUD_CONFIG_A 0xe5000
+#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
+#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
+#define AUD_CONFIG_UPPER_N_SHIFT 20
+#define AUD_CONFIG_UPPER_N_VALUE (0xff << 20)
+#define AUD_CONFIG_LOWER_N_SHIFT 4
+#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
+#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
+
+#endif /* _I915_REG_H_ */
diff --git a/sys/dev/drm2/i915/i915_suspend.c b/sys/dev/drm2/i915/i915_suspend.c
new file mode 100644
index 0000000..1e219a1
--- /dev/null
+++ b/sys/dev/drm2/i915/i915_suspend.c
@@ -0,0 +1,909 @@
+/*
+ *
+ * Copyright 2008 (c) Intel Corporation
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/intel_drv.h>
+
+static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpll_reg;
+
+ /* On IVB, 3rd pipe shares PLL with another one */
+ if (pipe > 1)
+ return false;
+
+ if (HAS_PCH_SPLIT(dev))
+ dpll_reg = PCH_DPLL(pipe);
+ else
+ dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
+
+ return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
+}
+
+static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
+ u32 *array;
+ int i;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ if (HAS_PCH_SPLIT(dev))
+ reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
+
+ if (pipe == PIPE_A)
+ array = dev_priv->save_palette_a;
+ else
+ array = dev_priv->save_palette_b;
+
+ for (i = 0; i < 256; i++)
+ array[i] = I915_READ(reg + (i << 2));
+}
+
+static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
+ u32 *array;
+ int i;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ if (HAS_PCH_SPLIT(dev))
+ reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
+
+ if (pipe == PIPE_A)
+ array = dev_priv->save_palette_a;
+ else
+ array = dev_priv->save_palette_b;
+
+ for (i = 0; i < 256; i++)
+ I915_WRITE(reg + (i << 2), array[i]);
+}
+
+static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE8(index_port, reg);
+ return I915_READ8(data_port);
+}
+
+static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_READ8(st01);
+ I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
+ return I915_READ8(VGA_AR_DATA_READ);
+}
+
+static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_READ8(st01);
+ I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
+ I915_WRITE8(VGA_AR_DATA_WRITE, val);
+}
+
+static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE8(index_port, reg);
+ I915_WRITE8(data_port, val);
+}
+
+static void i915_save_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+ u16 cr_index, cr_data, st01;
+
+ /* VGA color palette registers */
+ dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
+
+ /* MSR bits */
+ dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
+ if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+ cr_index = VGA_CR_INDEX_CGA;
+ cr_data = VGA_CR_DATA_CGA;
+ st01 = VGA_ST01_CGA;
+ } else {
+ cr_index = VGA_CR_INDEX_MDA;
+ cr_data = VGA_CR_DATA_MDA;
+ st01 = VGA_ST01_MDA;
+ }
+
+ /* CRT controller regs */
+ i915_write_indexed(dev, cr_index, cr_data, 0x11,
+ i915_read_indexed(dev, cr_index, cr_data, 0x11) &
+ (~0x80));
+ for (i = 0; i <= 0x24; i++)
+ dev_priv->saveCR[i] =
+ i915_read_indexed(dev, cr_index, cr_data, i);
+ /* Make sure we don't turn off CR group 0 writes */
+ dev_priv->saveCR[0x11] &= ~0x80;
+
+ /* Attribute controller registers */
+ I915_READ8(st01);
+ dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
+ for (i = 0; i <= 0x14; i++)
+ dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
+ I915_READ8(st01);
+ I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
+ I915_READ8(st01);
+
+ /* Graphics controller registers */
+ for (i = 0; i < 9; i++)
+ dev_priv->saveGR[i] =
+ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
+
+ dev_priv->saveGR[0x10] =
+ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
+ dev_priv->saveGR[0x11] =
+ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
+ dev_priv->saveGR[0x18] =
+ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
+
+ /* Sequencer registers */
+ for (i = 0; i < 8; i++)
+ dev_priv->saveSR[i] =
+ i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
+}
+
+static void i915_restore_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+ u16 cr_index, cr_data, st01;
+
+ /* MSR bits */
+ I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
+ if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+ cr_index = VGA_CR_INDEX_CGA;
+ cr_data = VGA_CR_DATA_CGA;
+ st01 = VGA_ST01_CGA;
+ } else {
+ cr_index = VGA_CR_INDEX_MDA;
+ cr_data = VGA_CR_DATA_MDA;
+ st01 = VGA_ST01_MDA;
+ }
+
+ /* Sequencer registers, don't write SR07 */
+ for (i = 0; i < 7; i++)
+ i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
+ dev_priv->saveSR[i]);
+
+ /* CRT controller regs */
+ /* Enable CR group 0 writes */
+ i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
+ for (i = 0; i <= 0x24; i++)
+ i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
+
+ /* Graphics controller regs */
+ for (i = 0; i < 9; i++)
+ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
+ dev_priv->saveGR[i]);
+
+ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
+ dev_priv->saveGR[0x10]);
+ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
+ dev_priv->saveGR[0x11]);
+ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
+ dev_priv->saveGR[0x18]);
+
+ /* Attribute controller registers */
+ I915_READ8(st01); /* switch back to index mode */
+ for (i = 0; i <= 0x14; i++)
+ i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
+ I915_READ8(st01); /* switch back to index mode */
+ I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
+ I915_READ8(st01);
+
+ /* VGA color palette registers */
+ I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
+}
+
+static void i915_save_modeset_reg(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ /* Cursor state */
+ dev_priv->saveCURACNTR = I915_READ(_CURACNTR);
+ dev_priv->saveCURAPOS = I915_READ(_CURAPOS);
+ dev_priv->saveCURABASE = I915_READ(_CURABASE);
+ dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR);
+ dev_priv->saveCURBPOS = I915_READ(_CURBPOS);
+ dev_priv->saveCURBBASE = I915_READ(_CURBBASE);
+ if (IS_GEN2(dev))
+ dev_priv->saveCURSIZE = I915_READ(CURSIZE);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
+ dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
+ }
+
+ /* Pipe & plane A info */
+ dev_priv->savePIPEACONF = I915_READ(_PIPEACONF);
+ dev_priv->savePIPEASRC = I915_READ(_PIPEASRC);
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->saveFPA0 = I915_READ(_PCH_FPA0);
+ dev_priv->saveFPA1 = I915_READ(_PCH_FPA1);
+ dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A);
+ } else {
+ dev_priv->saveFPA0 = I915_READ(_FPA0);
+ dev_priv->saveFPA1 = I915_READ(_FPA1);
+ dev_priv->saveDPLL_A = I915_READ(_DPLL_A);
+ }
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
+ dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A);
+ dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A);
+ dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A);
+ dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A);
+ dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A);
+ dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A);
+ if (!HAS_PCH_SPLIT(dev))
+ dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
+ dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
+ dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
+ dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
+
+ dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
+ dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
+
+ dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
+ dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
+ dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
+
+ dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF);
+ dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
+ dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
+ dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
+ dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
+ dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
+ dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
+ }
+
+ dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR);
+ dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
+ dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE);
+ dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS);
+ dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ dev_priv->saveDSPASURF = I915_READ(_DSPASURF);
+ dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
+ }
+ i915_save_palette(dev, PIPE_A);
+ dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT);
+
+ /* Pipe & plane B info */
+ dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF);
+ dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC);
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->saveFPB0 = I915_READ(_PCH_FPB0);
+ dev_priv->saveFPB1 = I915_READ(_PCH_FPB1);
+ dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B);
+ } else {
+ dev_priv->saveFPB0 = I915_READ(_FPB0);
+ dev_priv->saveFPB1 = I915_READ(_FPB1);
+ dev_priv->saveDPLL_B = I915_READ(_DPLL_B);
+ }
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
+ dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B);
+ dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B);
+ dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B);
+ dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B);
+ dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B);
+ dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B);
+ if (!HAS_PCH_SPLIT(dev))
+ dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
+ dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
+ dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
+ dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
+
+ dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
+ dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
+
+ dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
+ dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
+ dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
+
+ dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF);
+ dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
+ dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
+ dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
+ dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
+ dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
+ dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
+ }
+
+ dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR);
+ dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
+ dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE);
+ dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS);
+ dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF);
+ dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
+ }
+ i915_save_palette(dev, PIPE_B);
+ dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT);
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ for (i = 0; i < 16; i++)
+ dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+ }
+
+ return;
+}
+
+static void i915_restore_modeset_reg(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int dpll_a_reg, fpa0_reg, fpa1_reg;
+ int dpll_b_reg, fpb0_reg, fpb1_reg;
+ int i;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
+ break;
+ case 3:
+ case 2:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+ break;
+ }
+
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dpll_a_reg = _PCH_DPLL_A;
+ dpll_b_reg = _PCH_DPLL_B;
+ fpa0_reg = _PCH_FPA0;
+ fpb0_reg = _PCH_FPB0;
+ fpa1_reg = _PCH_FPA1;
+ fpb1_reg = _PCH_FPB1;
+ } else {
+ dpll_a_reg = _DPLL_A;
+ dpll_b_reg = _DPLL_B;
+ fpa0_reg = _FPA0;
+ fpb0_reg = _FPB0;
+ fpa1_reg = _FPA1;
+ fpb1_reg = _FPB1;
+ }
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
+ I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
+ }
+
+ /* Pipe & plane A info */
+ /* Prime the clock */
+ if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
+ ~DPLL_VCO_ENABLE);
+ POSTING_READ(dpll_a_reg);
+ DRM_UDELAY(150);
+ }
+ I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
+ I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
+ /* Actually enable it */
+ I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
+ POSTING_READ(dpll_a_reg);
+ DRM_UDELAY(150);
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+ POSTING_READ(_DPLL_A_MD);
+ }
+ DRM_UDELAY(150);
+
+ /* Restore mode */
+ I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A);
+ I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A);
+ I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A);
+ I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A);
+ I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A);
+ I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A);
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
+ I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
+ I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
+ I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
+
+ I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
+ I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
+
+ I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1);
+ I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
+ I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
+
+ I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF);
+ I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
+ I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
+ I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
+ I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
+ I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
+ I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
+ }
+
+ /* Restore plane info */
+ I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE);
+ I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS);
+ I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC);
+ I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR);
+ I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF);
+ I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+ }
+
+ I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF);
+
+ i915_restore_palette(dev, PIPE_A);
+ /* Enable the plane */
+ I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR);
+ I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
+
+ /* Pipe & plane B info */
+ if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
+ ~DPLL_VCO_ENABLE);
+ POSTING_READ(dpll_b_reg);
+ DRM_UDELAY(150);
+ }
+ I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
+ I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
+ /* Actually enable it */
+ I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
+ POSTING_READ(dpll_b_reg);
+ DRM_UDELAY(150);
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+ POSTING_READ(_DPLL_B_MD);
+ }
+ DRM_UDELAY(150);
+
+ /* Restore mode */
+ I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B);
+ I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B);
+ I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B);
+ I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B);
+ I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B);
+ I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B);
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
+ I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
+ I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
+ I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
+
+ I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
+ I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
+
+ I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1);
+ I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
+ I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
+
+ I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF);
+ I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
+ I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
+ I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
+ I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
+ I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
+ I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
+ }
+
+ /* Restore plane info */
+ I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE);
+ I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS);
+ I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC);
+ I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR);
+ I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF);
+ I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+ }
+
+ I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF);
+
+ i915_restore_palette(dev, PIPE_B);
+ /* Enable the plane */
+ I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR);
+ I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
+
+ /* Cursor state */
+ I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS);
+ I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR);
+ I915_WRITE(_CURABASE, dev_priv->saveCURABASE);
+ I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS);
+ I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR);
+ I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE);
+ if (IS_GEN2(dev))
+ I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
+
+ return;
+}
+
+static void i915_save_display(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Display arbitration control */
+ dev_priv->saveDSPARB = I915_READ(DSPARB);
+
+ /* This is only meaningful in non-KMS mode */
+ /* Don't save them in KMS mode */
+ i915_save_modeset_reg(dev);
+
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->saveADPA = I915_READ(PCH_ADPA);
+ } else {
+ dev_priv->saveADPA = I915_READ(ADPA);
+ }
+
+ /* LVDS state */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
+ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
+ dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
+ dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
+ dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
+ dev_priv->saveLVDS = I915_READ(PCH_LVDS);
+ } else {
+ dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
+ dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+ dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
+ if (INTEL_INFO(dev)->gen >= 4)
+ dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ if (IS_MOBILE(dev) && !IS_I830(dev))
+ dev_priv->saveLVDS = I915_READ(LVDS);
+ }
+
+ if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
+ dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
+ dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
+ dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
+ } else {
+ dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
+ dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
+ dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
+ }
+
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ dev_priv->saveDP_B = I915_READ(DP_B);
+ dev_priv->saveDP_C = I915_READ(DP_C);
+ dev_priv->saveDP_D = I915_READ(DP_D);
+ dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
+ dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
+ dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
+ dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
+ dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
+ dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
+ dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
+ dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
+ }
+ /* FIXME: save TV & SDVO state */
+
+ /* Only save FBC state on the platform that supports FBC */
+ if (I915_HAS_FBC(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
+ } else if (IS_GM45(dev)) {
+ dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
+ } else {
+ dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+ dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+ dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+ dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ }
+ }
+
+ /* VGA state */
+ dev_priv->saveVGA0 = I915_READ(VGA0);
+ dev_priv->saveVGA1 = I915_READ(VGA1);
+ dev_priv->saveVGA_PD = I915_READ(VGA_PD);
+ if (HAS_PCH_SPLIT(dev))
+ dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
+ else
+ dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
+
+ i915_save_vga(dev);
+}
+
+static void i915_restore_display(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Display arbitration */
+ I915_WRITE(DSPARB, dev_priv->saveDSPARB);
+
+ /* Display port ratios (must be done before clock is set) */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
+ I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
+ I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
+ I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
+ I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
+ I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
+ I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
+ I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
+ }
+
+ /* This is only meaningful in non-KMS mode */
+ /* Don't restore them in KMS mode */
+ i915_restore_modeset_reg(dev);
+
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
+ else
+ I915_WRITE(ADPA, dev_priv->saveADPA);
+
+ /* LVDS state */
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
+ } else if (IS_MOBILE(dev) && !IS_I830(dev))
+ I915_WRITE(LVDS, dev_priv->saveLVDS);
+
+ if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
+ I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
+ I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
+ I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
+ I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
+ I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
+ I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
+ I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
+ I915_WRITE(RSTDBYCTL,
+ dev_priv->saveMCHBAR_RENDER_STANDBY);
+ } else {
+ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
+ I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
+ I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL);
+ I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
+ I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
+ I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
+ I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+ }
+
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(DP_B, dev_priv->saveDP_B);
+ I915_WRITE(DP_C, dev_priv->saveDP_C);
+ I915_WRITE(DP_D, dev_priv->saveDP_D);
+ }
+ /* FIXME: restore TV & SDVO state */
+
+ /* only restore FBC info on the platform that supports FBC*/
+ intel_disable_fbc(dev);
+ if (I915_HAS_FBC(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+ } else if (IS_GM45(dev)) {
+ I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+ } else {
+ I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
+ I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
+ I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
+ I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+ }
+ }
+ /* VGA state */
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
+ else
+ I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+
+ I915_WRITE(VGA0, dev_priv->saveVGA0);
+ I915_WRITE(VGA1, dev_priv->saveVGA1);
+ I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
+ POSTING_READ(VGA_PD);
+ DRM_UDELAY(150);
+
+ i915_restore_vga(dev);
+}
+
+int i915_save_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ dev_priv->saveLBB = pci_read_config(dev->device, LBB, 1);
+
+ /* Hardware status page */
+ dev_priv->saveHWS = I915_READ(HWS_PGA);
+
+ i915_save_display(dev);
+
+ /* Interrupt state */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->saveDEIER = I915_READ(DEIER);
+ dev_priv->saveDEIMR = I915_READ(DEIMR);
+ dev_priv->saveGTIER = I915_READ(GTIER);
+ dev_priv->saveGTIMR = I915_READ(GTIMR);
+ dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
+ dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
+ dev_priv->saveMCHBAR_RENDER_STANDBY =
+ I915_READ(RSTDBYCTL);
+ dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
+ } else {
+ dev_priv->saveIER = I915_READ(IER);
+ dev_priv->saveIMR = I915_READ(IMR);
+ }
+
+ if (IS_IRONLAKE_M(dev))
+ ironlake_disable_drps(dev);
+ if (INTEL_INFO(dev)->gen >= 6)
+ gen6_disable_rps(dev);
+
+ /* Cache mode state */
+ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+
+ /* Memory Arbitration state */
+ dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+
+ /* Scratch space */
+ for (i = 0; i < 16; i++) {
+ dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
+ dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+ }
+ for (i = 0; i < 3; i++)
+ dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+
+ return 0;
+}
+
+int i915_restore_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ pci_write_config(dev->device, LBB, dev_priv->saveLBB, 1);
+
+
+ /* Hardware status page */
+ I915_WRITE(HWS_PGA, dev_priv->saveHWS);
+
+ i915_restore_display(dev);
+
+ /* Interrupt state */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(DEIER, dev_priv->saveDEIER);
+ I915_WRITE(DEIMR, dev_priv->saveDEIMR);
+ I915_WRITE(GTIER, dev_priv->saveGTIER);
+ I915_WRITE(GTIMR, dev_priv->saveGTIMR);
+ I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
+ I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
+ I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG);
+ } else {
+ I915_WRITE(IER, dev_priv->saveIER);
+ I915_WRITE(IMR, dev_priv->saveIMR);
+ }
+ DRM_UNLOCK(dev);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ intel_init_clock_gating(dev);
+
+ if (IS_IRONLAKE_M(dev)) {
+ ironlake_enable_drps(dev);
+ intel_init_emon(dev);
+ }
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ gen6_enable_rps(dev_priv);
+ gen6_update_ring_freq(dev_priv);
+ }
+
+ DRM_LOCK(dev);
+
+ /* Cache mode state */
+ I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+
+ /* Memory arbitration state */
+ I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
+
+ for (i = 0; i < 16; i++) {
+ I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
+ I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]);
+ }
+ for (i = 0; i < 3; i++)
+ I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+
+ intel_iic_reset(dev);
+
+ return 0;
+}
diff --git a/sys/dev/drm2/i915/intel_bios.c b/sys/dev/drm2/i915/intel_bios.c
new file mode 100644
index 0000000..8bf38a5
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_bios.c
@@ -0,0 +1,737 @@
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ * $FreeBSD$
+ */
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/drm_dp_helper.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/intel_bios.h>
+
+#define SLAVE_ADDR1 0x70
+#define SLAVE_ADDR2 0x72
+
+static int panel_type;
+
+static void *
+find_section(struct bdb_header *bdb, int section_id)
+{
+ u8 *base = (u8 *)bdb;
+ int index = 0;
+ u16 total, current_size;
+ u8 current_id;
+
+ /* skip to first section */
+ index += bdb->header_size;
+ total = bdb->bdb_size;
+
+ /* walk the sections looking for section_id */
+ while (index < total) {
+ current_id = *(base + index);
+ index++;
+ current_size = *((u16 *)(base + index));
+ index += 2;
+ if (current_id == section_id)
+ return base + index;
+ index += current_size;
+ }
+
+ return NULL;
+}
+
+static u16
+get_blocksize(void *p)
+{
+ u16 *block_ptr, block_size;
+
+ block_ptr = (u16 *)((char *)p - 2);
+ block_size = *block_ptr;
+ return block_size;
+}
+
+static void
+fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
+ const struct lvds_dvo_timing *dvo_timing)
+{
+ panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
+ dvo_timing->hactive_lo;
+ panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
+ ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
+ panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
+ dvo_timing->hsync_pulse_width;
+ panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
+ ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
+
+ panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
+ dvo_timing->vactive_lo;
+ panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
+ dvo_timing->vsync_off;
+ panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
+ dvo_timing->vsync_pulse_width;
+ panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
+ ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
+ panel_fixed_mode->clock = dvo_timing->clock * 10;
+ panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+
+ if (dvo_timing->hsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (dvo_timing->vsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
+ /* Some VBTs have bogus h/vtotal values */
+ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
+ panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
+ if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
+ panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
+
+ drm_mode_set_name(panel_fixed_mode);
+}
+
+static bool
+lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a,
+ const struct lvds_dvo_timing *b)
+{
+ if (a->hactive_hi != b->hactive_hi ||
+ a->hactive_lo != b->hactive_lo)
+ return false;
+
+ if (a->hsync_off_hi != b->hsync_off_hi ||
+ a->hsync_off_lo != b->hsync_off_lo)
+ return false;
+
+ if (a->hsync_pulse_width != b->hsync_pulse_width)
+ return false;
+
+ if (a->hblank_hi != b->hblank_hi ||
+ a->hblank_lo != b->hblank_lo)
+ return false;
+
+ if (a->vactive_hi != b->vactive_hi ||
+ a->vactive_lo != b->vactive_lo)
+ return false;
+
+ if (a->vsync_off != b->vsync_off)
+ return false;
+
+ if (a->vsync_pulse_width != b->vsync_pulse_width)
+ return false;
+
+ if (a->vblank_hi != b->vblank_hi ||
+ a->vblank_lo != b->vblank_lo)
+ return false;
+
+ return true;
+}
+
+static const struct lvds_dvo_timing *
+get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
+ int index)
+{
+ /*
+ * the size of fp_timing varies on the different platform.
+ * So calculate the DVO timing relative offset in LVDS data
+ * entry to get the DVO timing entry
+ */
+
+ int lfp_data_size =
+ lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
+ lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
+ int dvo_timing_offset =
+ lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
+ lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
+ const char *entry = (const char *)lvds_lfp_data->data +
+ lfp_data_size * index;
+
+ return (const struct lvds_dvo_timing *)(entry + dvo_timing_offset);
+}
+
+/* Try to find integrated panel data */
+static void
+parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ const struct bdb_lvds_options *lvds_options;
+ const struct bdb_lvds_lfp_data *lvds_lfp_data;
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
+ const struct lvds_dvo_timing *panel_dvo_timing;
+ struct drm_display_mode *panel_fixed_mode;
+ int i, downclock;
+
+ lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
+ if (!lvds_options)
+ return;
+
+ dev_priv->lvds_dither = lvds_options->pixel_dither;
+ if (lvds_options->panel_type == 0xff)
+ return;
+
+ panel_type = lvds_options->panel_type;
+
+ lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
+ if (!lvds_lfp_data)
+ return;
+
+ lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS);
+ if (!lvds_lfp_data_ptrs)
+ return;
+
+ dev_priv->lvds_vbt = 1;
+
+ panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ lvds_options->panel_type);
+
+ panel_fixed_mode = malloc(sizeof(*panel_fixed_mode), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+
+ fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
+
+ dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
+
+ DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
+ drm_mode_debug_printmodeline(panel_fixed_mode);
+
+ /*
+ * Iterate over the LVDS panel timing info to find the lowest clock
+ * for the native resolution.
+ */
+ downclock = panel_dvo_timing->clock;
+ for (i = 0; i < 16; i++) {
+ const struct lvds_dvo_timing *dvo_timing;
+
+ dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ i);
+ if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) &&
+ dvo_timing->clock < downclock)
+ downclock = dvo_timing->clock;
+ }
+
+ if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) {
+ dev_priv->lvds_downclock_avail = 1;
+ dev_priv->lvds_downclock = downclock * 10;
+ DRM_DEBUG("LVDS downclock is found in VBT. "
+ "Normal Clock %dKHz, downclock %dKHz\n",
+ panel_fixed_mode->clock, 10 * downclock);
+ }
+}
+
+/* Try to find sdvo panel data */
+static void
+parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct lvds_dvo_timing *dvo_timing;
+ struct drm_display_mode *panel_fixed_mode;
+ int index;
+
+ index = i915_vbt_sdvo_panel_type;
+ if (index == -1) {
+ struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+
+ sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+ if (!sdvo_lvds_options)
+ return;
+
+ index = sdvo_lvds_options->panel_type;
+ }
+
+ dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
+ if (!dvo_timing)
+ return;
+
+ panel_fixed_mode = malloc(sizeof(*panel_fixed_mode), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+
+ fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
+
+ dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
+
+ DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
+ drm_mode_debug_printmodeline(panel_fixed_mode);
+}
+
+static int intel_bios_ssc_frequency(struct drm_device *dev,
+ bool alternate)
+{
+ switch (INTEL_INFO(dev)->gen) {
+ case 2:
+ return alternate ? 66 : 48;
+ case 3:
+ case 4:
+ return alternate ? 100 : 96;
+ default:
+ return alternate ? 100 : 120;
+ }
+}
+
+static void
+parse_general_features(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct bdb_general_features *general;
+
+ general = find_section(bdb, BDB_GENERAL_FEATURES);
+ if (general) {
+ dev_priv->int_tv_support = general->int_tv_support;
+ dev_priv->int_crt_support = general->int_crt_support;
+ dev_priv->lvds_use_ssc = general->enable_ssc;
+ dev_priv->lvds_ssc_freq =
+ intel_bios_ssc_frequency(dev, general->ssc_freq);
+ dev_priv->display_clock_mode = general->display_clock_mode;
+ DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n",
+ dev_priv->int_tv_support,
+ dev_priv->int_crt_support,
+ dev_priv->lvds_use_ssc,
+ dev_priv->lvds_ssc_freq,
+ dev_priv->display_clock_mode);
+ }
+}
+
+static void
+parse_general_definitions(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_general_definitions *general;
+
+ general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (general) {
+ u16 block_size = get_blocksize(general);
+ if (block_size >= sizeof(*general)) {
+ int bus_pin = general->crt_ddc_gmbus_pin;
+ DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
+ if (bus_pin >= 1 && bus_pin <= 6)
+ dev_priv->crt_ddc_pin = bus_pin;
+ } else {
+ DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
+ block_size);
+ }
+ }
+}
+
+static void
+parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct sdvo_device_mapping *p_mapping;
+ struct bdb_general_definitions *p_defs;
+ struct child_device_config *p_child;
+ int i, child_device_num, count;
+ u16 block_size;
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+ DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+ * If the child device size obtained from general definition block
+ * is different with sizeof(struct child_device_config), skip the
+ * parsing of sdvo device info
+ */
+ if (p_defs->child_dev_size != sizeof(*p_child)) {
+ /* different child dev size . Ignore it */
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ return;
+ }
+ /* get the block size of general definitions */
+ block_size = get_blocksize(p_defs);
+ /* get the number of child device */
+ child_device_num = (block_size - sizeof(*p_defs)) /
+ sizeof(*p_child);
+ count = 0;
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ if (p_child->slave_addr != SLAVE_ADDR1 &&
+ p_child->slave_addr != SLAVE_ADDR2) {
+ /*
+ * If the slave address is neither 0x70 nor 0x72,
+ * it is not a SDVO device. Skip it.
+ */
+ continue;
+ }
+ if (p_child->dvo_port != DEVICE_PORT_DVOB &&
+ p_child->dvo_port != DEVICE_PORT_DVOC) {
+ /* skip the incorrect SDVO port */
+ DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
+ continue;
+ }
+ DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
+ " %s port\n",
+ p_child->slave_addr,
+ (p_child->dvo_port == DEVICE_PORT_DVOB) ?
+ "SDVOB" : "SDVOC");
+ p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
+ if (!p_mapping->initialized) {
+ p_mapping->dvo_port = p_child->dvo_port;
+ p_mapping->slave_addr = p_child->slave_addr;
+ p_mapping->dvo_wiring = p_child->dvo_wiring;
+ p_mapping->ddc_pin = p_child->ddc_pin;
+ p_mapping->i2c_pin = p_child->i2c_pin;
+ p_mapping->initialized = 1;
+ DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
+ p_mapping->dvo_port,
+ p_mapping->slave_addr,
+ p_mapping->dvo_wiring,
+ p_mapping->ddc_pin,
+ p_mapping->i2c_pin);
+ } else {
+ DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
+ "two SDVO device.\n");
+ }
+ if (p_child->slave2_addr) {
+ /* Maybe this is a SDVO device with multiple inputs */
+ /* And the mapping info is not added */
+ DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
+ " is a SDVO device with multiple inputs.\n");
+ }
+ count++;
+ }
+
+ if (!count) {
+ /* No SDVO device info is found */
+ DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
+ }
+ return;
+}
+
+static void
+parse_driver_features(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct bdb_driver_features *driver;
+
+ driver = find_section(bdb, BDB_DRIVER_FEATURES);
+ if (!driver)
+ return;
+
+ if (SUPPORTS_EDP(dev) &&
+ driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+ dev_priv->edp.support = 1;
+
+ if (driver->dual_frequency)
+ dev_priv->render_reclock_avail = true;
+}
+
+static void
+parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+ struct bdb_edp *edp;
+ struct edp_power_seq *edp_pps;
+ struct edp_link_params *edp_link_params;
+
+ edp = find_section(bdb, BDB_EDP);
+ if (!edp) {
+ if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
+ DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
+ "supported, assume %dbpp panel color "
+ "depth.\n",
+ dev_priv->edp.bpp);
+ }
+ return;
+ }
+
+ switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+ case EDP_18BPP:
+ dev_priv->edp.bpp = 18;
+ break;
+ case EDP_24BPP:
+ dev_priv->edp.bpp = 24;
+ break;
+ case EDP_30BPP:
+ dev_priv->edp.bpp = 30;
+ break;
+ }
+
+ /* Get the eDP sequencing and link info */
+ edp_pps = &edp->power_seqs[panel_type];
+ edp_link_params = &edp->link_params[panel_type];
+
+ dev_priv->edp.pps = *edp_pps;
+
+ dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
+ DP_LINK_BW_1_62;
+ switch (edp_link_params->lanes) {
+ case 0:
+ dev_priv->edp.lanes = 1;
+ break;
+ case 1:
+ dev_priv->edp.lanes = 2;
+ break;
+ case 3:
+ default:
+ dev_priv->edp.lanes = 4;
+ break;
+ }
+ switch (edp_link_params->preemphasis) {
+ case 0:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
+ break;
+ case 1:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
+ break;
+ case 2:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
+ break;
+ case 3:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
+ break;
+ }
+ switch (edp_link_params->vswing) {
+ case 0:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
+ break;
+ case 1:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
+ break;
+ case 2:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
+ break;
+ case 3:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
+ break;
+ }
+}
+
+static void
+parse_device_mapping(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_general_definitions *p_defs;
+ struct child_device_config *p_child, *child_dev_ptr;
+ int i, child_device_num, count;
+ u16 block_size;
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+ DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+ * If the child device size obtained from general definition block
+ * is different with sizeof(struct child_device_config), skip the
+ * parsing of sdvo device info
+ */
+ if (p_defs->child_dev_size != sizeof(*p_child)) {
+ /* different child dev size . Ignore it */
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ return;
+ }
+ /* get the block size of general definitions */
+ block_size = get_blocksize(p_defs);
+ /* get the number of child device */
+ child_device_num = (block_size - sizeof(*p_defs)) /
+ sizeof(*p_child);
+ count = 0;
+ /* get the number of child device that is present */
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ count++;
+ }
+ if (!count) {
+ DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
+ return;
+ }
+ dev_priv->child_dev = malloc(sizeof(*p_child) * count, DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+
+ dev_priv->child_dev_num = count;
+ count = 0;
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ child_dev_ptr = dev_priv->child_dev + count;
+ count++;
+ memcpy((void *)child_dev_ptr, (void *)p_child,
+ sizeof(*p_child));
+ }
+ return;
+}
+
+static void
+init_vbt_defaults(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
+
+ /* LFP panel data */
+ dev_priv->lvds_dither = 1;
+ dev_priv->lvds_vbt = 0;
+
+ /* SDVO panel data */
+ dev_priv->sdvo_lvds_vbt_mode = NULL;
+
+ /* general features */
+ dev_priv->int_tv_support = 1;
+ dev_priv->int_crt_support = 1;
+
+ /* Default to using SSC */
+ dev_priv->lvds_use_ssc = 1;
+ dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
+ DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
+
+ /* eDP data */
+ dev_priv->edp.bpp = 18;
+}
+
+static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_KMS("Falling back to manually reading VBT from "
+ "VBIOS ROM for %s\n",
+ id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_no_opregion_vbt[] = {
+ {
+ .callback = intel_no_opregion_vbt_callback,
+ .ident = "ThinkCentre A57",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"),
+ },
+ },
+ { }
+};
+
+/**
+ * intel_parse_bios - find VBT and initialize settings from the BIOS
+ * @dev: DRM device
+ *
+ * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
+ * to appropriate values.
+ *
+ * Returns 0 on success, nonzero on failure.
+ */
+bool
+intel_parse_bios(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct bdb_header *bdb = NULL;
+ u8 *bios;
+
+ init_vbt_defaults(dev_priv);
+
+ /* XXX Should this validation be moved to intel_opregion.c? */
+ if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) {
+ struct vbt_header *vbt = dev_priv->opregion.vbt;
+ if (memcmp(vbt->signature, "$VBT", 4) == 0) {
+ DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
+ vbt->signature);
+ bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
+ } else
+ dev_priv->opregion.vbt = NULL;
+ }
+ bios = NULL;
+
+#if 1
+ if (bdb == NULL) {
+ KIB_NOTYET();
+ return (-1);
+ }
+#else
+ if (bdb == NULL) {
+ struct vbt_header *vbt = NULL;
+ size_t size;
+ int i;
+
+ bios = pci_map_rom(pdev, &size);
+ if (!bios)
+ return -1;
+
+ /* Scour memory looking for the VBT signature */
+ for (i = 0; i + 4 < size; i++) {
+ if (!memcmp(bios + i, "$VBT", 4)) {
+ vbt = (struct vbt_header *)(bios + i);
+ break;
+ }
+ }
+
+ if (!vbt) {
+ DRM_DEBUG_DRIVER("VBT signature missing\n");
+ pci_unmap_rom(pdev, bios);
+ return -1;
+ }
+
+ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+ }
+#endif
+
+ /* Grab useful general definitions */
+ parse_general_features(dev_priv, bdb);
+ parse_general_definitions(dev_priv, bdb);
+ parse_lfp_panel_data(dev_priv, bdb);
+ parse_sdvo_panel_data(dev_priv, bdb);
+ parse_sdvo_device_mapping(dev_priv, bdb);
+ parse_device_mapping(dev_priv, bdb);
+ parse_driver_features(dev_priv, bdb);
+ parse_edp(dev_priv, bdb);
+
+#if 0
+ if (bios)
+ pci_unmap_rom(pdev, bios);
+#endif
+
+ return 0;
+}
+
+/* Ensure that vital registers have been initialised, even if the BIOS
+ * is absent or just failing to do its job.
+ */
+void intel_setup_bios(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Set the Panel Power On/Off timings if uninitialized. */
+ if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
+ /* Set T2 to 40ms and T5 to 200ms */
+ I915_WRITE(PP_ON_DELAYS, 0x019007d0);
+
+ /* Set T3 to 35ms and Tx to 200ms */
+ I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
+ }
+}
diff --git a/sys/dev/drm2/i915/intel_bios.h b/sys/dev/drm2/i915/intel_bios.h
new file mode 100644
index 0000000..186409c
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_bios.h
@@ -0,0 +1,620 @@
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _I830_BIOS_H_
+#define _I830_BIOS_H_
+
+#include <dev/drm2/drmP.h>
+
+struct vbt_header {
+ u8 signature[20]; /**< Always starts with 'VBT$' */
+ u16 version; /**< decimal */
+ u16 header_size; /**< in bytes */
+ u16 vbt_size; /**< in bytes */
+ u8 vbt_checksum;
+ u8 reserved0;
+ u32 bdb_offset; /**< from beginning of VBT */
+ u32 aim_offset[4]; /**< from beginning of VBT */
+} __attribute__((packed));
+
+struct bdb_header {
+ u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
+ u16 version; /**< decimal */
+ u16 header_size; /**< in bytes */
+ u16 bdb_size; /**< in bytes */
+};
+
+/* strictly speaking, this is a "skip" block, but it has interesting info */
+struct vbios_data {
+ u8 type; /* 0 == desktop, 1 == mobile */
+ u8 relstage;
+ u8 chipset;
+ u8 lvds_present:1;
+ u8 tv_present:1;
+ u8 rsvd2:6; /* finish byte */
+ u8 rsvd3[4];
+ u8 signon[155];
+ u8 copyright[61];
+ u16 code_segment;
+ u8 dos_boot_mode;
+ u8 bandwidth_percent;
+ u8 rsvd4; /* popup memory size */
+ u8 resize_pci_bios;
+ u8 rsvd5; /* is crt already on ddc2 */
+} __attribute__((packed));
+
+/*
+ * There are several types of BIOS data blocks (BDBs), each block has
+ * an ID and size in the first 3 bytes (ID in first, size in next 2).
+ * Known types are listed below.
+ */
+#define BDB_GENERAL_FEATURES 1
+#define BDB_GENERAL_DEFINITIONS 2
+#define BDB_OLD_TOGGLE_LIST 3
+#define BDB_MODE_SUPPORT_LIST 4
+#define BDB_GENERIC_MODE_TABLE 5
+#define BDB_EXT_MMIO_REGS 6
+#define BDB_SWF_IO 7
+#define BDB_SWF_MMIO 8
+#define BDB_DOT_CLOCK_TABLE 9
+#define BDB_MODE_REMOVAL_TABLE 10
+#define BDB_CHILD_DEVICE_TABLE 11
+#define BDB_DRIVER_FEATURES 12
+#define BDB_DRIVER_PERSISTENCE 13
+#define BDB_EXT_TABLE_PTRS 14
+#define BDB_DOT_CLOCK_OVERRIDE 15
+#define BDB_DISPLAY_SELECT 16
+/* 17 rsvd */
+#define BDB_DRIVER_ROTATION 18
+#define BDB_DISPLAY_REMOVE 19
+#define BDB_OEM_CUSTOM 20
+#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
+#define BDB_SDVO_LVDS_OPTIONS 22
+#define BDB_SDVO_PANEL_DTDS 23
+#define BDB_SDVO_LVDS_PNP_IDS 24
+#define BDB_SDVO_LVDS_POWER_SEQ 25
+#define BDB_TV_OPTIONS 26
+#define BDB_EDP 27
+#define BDB_LVDS_OPTIONS 40
+#define BDB_LVDS_LFP_DATA_PTRS 41
+#define BDB_LVDS_LFP_DATA 42
+#define BDB_LVDS_BACKLIGHT 43
+#define BDB_LVDS_POWER 44
+#define BDB_SKIP 254 /* VBIOS private block, ignore */
+
+struct bdb_general_features {
+ /* bits 1 */
+ u8 panel_fitting:2;
+ u8 flexaim:1;
+ u8 msg_enable:1;
+ u8 clear_screen:3;
+ u8 color_flip:1;
+
+ /* bits 2 */
+ u8 download_ext_vbt:1;
+ u8 enable_ssc:1;
+ u8 ssc_freq:1;
+ u8 enable_lfp_on_override:1;
+ u8 disable_ssc_ddt:1;
+ u8 rsvd7:1;
+ u8 display_clock_mode:1;
+ u8 rsvd8:1; /* finish byte */
+
+ /* bits 3 */
+ u8 disable_smooth_vision:1;
+ u8 single_dvi:1;
+ u8 rsvd9:6; /* finish byte */
+
+ /* bits 4 */
+ u8 legacy_monitor_detect;
+
+ /* bits 5 */
+ u8 int_crt_support:1;
+ u8 int_tv_support:1;
+ u8 int_efp_support:1;
+ u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
+ u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
+ u8 rsvd11:3; /* finish byte */
+} __attribute__((packed));
+
+/* pre-915 */
+#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
+#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
+#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
+#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
+
+/* Pre 915 */
+#define DEVICE_TYPE_NONE 0x00
+#define DEVICE_TYPE_CRT 0x01
+#define DEVICE_TYPE_TV 0x09
+#define DEVICE_TYPE_EFP 0x12
+#define DEVICE_TYPE_LFP 0x22
+/* On 915+ */
+#define DEVICE_TYPE_CRT_DPMS 0x6001
+#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001
+#define DEVICE_TYPE_TV_COMPOSITE 0x0209
+#define DEVICE_TYPE_TV_MACROVISION 0x0289
+#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c
+#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609
+#define DEVICE_TYPE_TV_SCART 0x0209
+#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
+#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012
+#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052
+#define DEVICE_TYPE_EFP_DVI_I 0x6053
+#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152
+#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2
+#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062
+#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162
+#define DEVICE_TYPE_LFP_PANELLINK 0x5012
+#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042
+#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062
+#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
+#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
+
+#define DEVICE_CFG_NONE 0x00
+#define DEVICE_CFG_12BIT_DVOB 0x01
+#define DEVICE_CFG_12BIT_DVOC 0x02
+#define DEVICE_CFG_24BIT_DVOBC 0x09
+#define DEVICE_CFG_24BIT_DVOCB 0x0a
+#define DEVICE_CFG_DUAL_DVOB 0x11
+#define DEVICE_CFG_DUAL_DVOC 0x12
+#define DEVICE_CFG_DUAL_DVOBC 0x13
+#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19
+#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a
+
+#define DEVICE_WIRE_NONE 0x00
+#define DEVICE_WIRE_DVOB 0x01
+#define DEVICE_WIRE_DVOC 0x02
+#define DEVICE_WIRE_DVOBC 0x03
+#define DEVICE_WIRE_DVOBB 0x05
+#define DEVICE_WIRE_DVOCC 0x06
+#define DEVICE_WIRE_DVOB_MASTER 0x0d
+#define DEVICE_WIRE_DVOC_MASTER 0x0e
+
+#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
+#define DEVICE_PORT_DVOB 0x01
+#define DEVICE_PORT_DVOC 0x02
+
+struct child_device_config {
+ u16 handle;
+ u16 device_type;
+ u8 device_id[10]; /* ascii string */
+ u16 addin_offset;
+ u8 dvo_port; /* See Device_PORT_* above */
+ u8 i2c_pin;
+ u8 slave_addr;
+ u8 ddc_pin;
+ u16 edid_ptr;
+ u8 dvo_cfg; /* See DEVICE_CFG_* above */
+ u8 dvo2_port;
+ u8 i2c2_pin;
+ u8 slave2_addr;
+ u8 ddc2_pin;
+ u8 capabilities;
+ u8 dvo_wiring;/* See DEVICE_WIRE_* above */
+ u8 dvo2_wiring;
+ u16 extended_type;
+ u8 dvo_function;
+} __attribute__((packed));
+
+struct bdb_general_definitions {
+ /* DDC GPIO */
+ u8 crt_ddc_gmbus_pin;
+
+ /* DPMS bits */
+ u8 dpms_acpi:1;
+ u8 skip_boot_crt_detect:1;
+ u8 dpms_aim:1;
+ u8 rsvd1:5; /* finish byte */
+
+ /* boot device bits */
+ u8 boot_display[2];
+ u8 child_dev_size;
+
+ /*
+ * Device info:
+ * If TV is present, it'll be at devices[0].
+ * LVDS will be next, either devices[0] or [1], if present.
+ * On some platforms the number of device is 6. But could be as few as
+ * 4 if both TV and LVDS are missing.
+ * And the device num is related with the size of general definition
+ * block. It is obtained by using the following formula:
+ * number = (block_size - sizeof(bdb_general_definitions))/
+ * sizeof(child_device_config);
+ */
+ struct child_device_config devices[0];
+} __attribute__((packed));
+
+struct bdb_lvds_options {
+ u8 panel_type;
+ u8 rsvd1;
+ /* LVDS capabilities, stored in a dword */
+ u8 pfit_mode:2;
+ u8 pfit_text_mode_enhanced:1;
+ u8 pfit_gfx_mode_enhanced:1;
+ u8 pfit_ratio_auto:1;
+ u8 pixel_dither:1;
+ u8 lvds_edid:1;
+ u8 rsvd2:1;
+ u8 rsvd4;
+} __attribute__((packed));
+
+/* LFP pointer table contains entries to the struct below */
+struct bdb_lvds_lfp_data_ptr {
+ u16 fp_timing_offset; /* offsets are from start of bdb */
+ u8 fp_table_size;
+ u16 dvo_timing_offset;
+ u8 dvo_table_size;
+ u16 panel_pnp_id_offset;
+ u8 pnp_table_size;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_ptrs {
+ u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+ struct bdb_lvds_lfp_data_ptr ptr[16];
+} __attribute__((packed));
+
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+ u16 x_res;
+ u16 y_res;
+ u32 lvds_reg;
+ u32 lvds_reg_val;
+ u32 pp_on_reg;
+ u32 pp_on_reg_val;
+ u32 pp_off_reg;
+ u32 pp_off_reg_val;
+ u32 pp_cycle_reg;
+ u32 pp_cycle_reg_val;
+ u32 pfit_reg;
+ u32 pfit_reg_val;
+ u16 terminator;
+} __attribute__((packed));
+
+struct lvds_dvo_timing {
+ u16 clock; /**< In 10khz */
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hblank_hi:4;
+ u8 hactive_hi:4;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vblank_hi:4;
+ u8 vactive_hi:4;
+ u8 hsync_off_lo;
+ u8 hsync_pulse_width;
+ u8 vsync_pulse_width:4;
+ u8 vsync_off:4;
+ u8 rsvd0:6;
+ u8 hsync_off_hi:2;
+ u8 h_image;
+ u8 v_image;
+ u8 max_hv;
+ u8 h_border;
+ u8 v_border;
+ u8 rsvd1:3;
+ u8 digital:2;
+ u8 vsync_positive:1;
+ u8 hsync_positive:1;
+ u8 rsvd2:1;
+} __attribute__((packed));
+
+struct lvds_pnp_id {
+ u16 mfg_name;
+ u16 product_code;
+ u32 serial;
+ u8 mfg_week;
+ u8 mfg_year;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_entry {
+ struct lvds_fp_timing fp_timing;
+ struct lvds_dvo_timing dvo_timing;
+ struct lvds_pnp_id pnp_id;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data {
+ struct bdb_lvds_lfp_data_entry data[16];
+} __attribute__((packed));
+
+struct aimdb_header {
+ char signature[16];
+ char oem_device[20];
+ u16 aimdb_version;
+ u16 aimdb_header_size;
+ u16 aimdb_size;
+} __attribute__((packed));
+
+struct aimdb_block {
+ u8 aimdb_id;
+ u16 aimdb_size;
+} __attribute__((packed));
+
+struct vch_panel_data {
+ u16 fp_timing_offset;
+ u8 fp_timing_size;
+ u16 dvo_timing_offset;
+ u8 dvo_timing_size;
+ u16 text_fitting_offset;
+ u8 text_fitting_size;
+ u16 graphics_fitting_offset;
+ u8 graphics_fitting_size;
+} __attribute__((packed));
+
+struct vch_bdb_22 {
+ struct aimdb_block aimdb_block;
+ struct vch_panel_data panels[16];
+} __attribute__((packed));
+
+struct bdb_sdvo_lvds_options {
+ u8 panel_backlight;
+ u8 h40_set_panel_type;
+ u8 panel_type;
+ u8 ssc_clk_freq;
+ u16 als_low_trip;
+ u16 als_high_trip;
+ u8 sclalarcoeff_tab_row_num;
+ u8 sclalarcoeff_tab_row_size;
+ u8 coefficient[8];
+ u8 panel_misc_bits_1;
+ u8 panel_misc_bits_2;
+ u8 panel_misc_bits_3;
+ u8 panel_misc_bits_4;
+} __attribute__((packed));
+
+
+#define BDB_DRIVER_FEATURE_NO_LVDS 0
+#define BDB_DRIVER_FEATURE_INT_LVDS 1
+#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
+#define BDB_DRIVER_FEATURE_EDP 3
+
+struct bdb_driver_features {
+ u8 boot_dev_algorithm:1;
+ u8 block_display_switch:1;
+ u8 allow_display_switch:1;
+ u8 hotplug_dvo:1;
+ u8 dual_view_zoom:1;
+ u8 int15h_hook:1;
+ u8 sprite_in_clone:1;
+ u8 primary_lfp_id:1;
+
+ u16 boot_mode_x;
+ u16 boot_mode_y;
+ u8 boot_mode_bpp;
+ u8 boot_mode_refresh;
+
+ u16 enable_lfp_primary:1;
+ u16 selective_mode_pruning:1;
+ u16 dual_frequency:1;
+ u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
+ u16 nt_clone_support:1;
+ u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
+ u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
+ u16 cui_aspect_scaling:1;
+ u16 preserve_aspect_ratio:1;
+ u16 sdvo_device_power_down:1;
+ u16 crt_hotplug:1;
+ u16 lvds_config:2;
+ u16 tv_hotplug:1;
+ u16 hdmi_config:2;
+
+ u8 static_display:1;
+ u8 reserved2:7;
+ u16 legacy_crt_max_x;
+ u16 legacy_crt_max_y;
+ u8 legacy_crt_max_refresh;
+
+ u8 hdmi_termination;
+ u8 custom_vbt_version;
+} __attribute__((packed));
+
+#define EDP_18BPP 0
+#define EDP_24BPP 1
+#define EDP_30BPP 2
+#define EDP_RATE_1_62 0
+#define EDP_RATE_2_7 1
+#define EDP_LANE_1 0
+#define EDP_LANE_2 1
+#define EDP_LANE_4 3
+#define EDP_PREEMPHASIS_NONE 0
+#define EDP_PREEMPHASIS_3_5dB 1
+#define EDP_PREEMPHASIS_6dB 2
+#define EDP_PREEMPHASIS_9_5dB 3
+#define EDP_VSWING_0_4V 0
+#define EDP_VSWING_0_6V 1
+#define EDP_VSWING_0_8V 2
+#define EDP_VSWING_1_2V 3
+
+struct edp_power_seq {
+ u16 t1_t3;
+ u16 t8;
+ u16 t9;
+ u16 t10;
+ u16 t11_t12;
+} __attribute__ ((packed));
+
+struct edp_link_params {
+ u8 rate:4;
+ u8 lanes:4;
+ u8 preemphasis:4;
+ u8 vswing:4;
+} __attribute__ ((packed));
+
+struct bdb_edp {
+ struct edp_power_seq power_seqs[16];
+ u32 color_depth;
+ struct edp_link_params link_params[16];
+ u32 sdrrs_msa_timing_delay;
+
+ /* ith bit indicates enabled/disabled for (i+1)th panel */
+ u16 edp_s3d_feature;
+ u16 edp_t3_optimization;
+} __attribute__ ((packed));
+
+void intel_setup_bios(struct drm_device *dev);
+bool intel_parse_bios(struct drm_device *dev);
+
+/*
+ * Driver<->VBIOS interaction occurs through scratch bits in
+ * GR18 & SWF*.
+ */
+
+/* GR18 bits are set on display switch and hotkey events */
+#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
+#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
+#define GR18_HK_NONE (0x0<<3)
+#define GR18_HK_LFP_STRETCH (0x1<<3)
+#define GR18_HK_TOGGLE_DISP (0x2<<3)
+#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
+#define GR18_HK_POPUP_DISABLED (0x6<<3)
+#define GR18_HK_POPUP_ENABLED (0x7<<3)
+#define GR18_HK_PFIT (0x8<<3)
+#define GR18_HK_APM_CHANGE (0xa<<3)
+#define GR18_HK_MULTIPLE (0xc<<3)
+#define GR18_USER_INT_EN (1<<2)
+#define GR18_A0000_FLUSH_EN (1<<1)
+#define GR18_SMM_EN (1<<0)
+
+/* Set by driver, cleared by VBIOS */
+#define SWF00_YRES_SHIFT 16
+#define SWF00_XRES_SHIFT 0
+#define SWF00_RES_MASK 0xffff
+
+/* Set by VBIOS at boot time and driver at runtime */
+#define SWF01_TV2_FORMAT_SHIFT 8
+#define SWF01_TV1_FORMAT_SHIFT 0
+#define SWF01_TV_FORMAT_MASK 0xffff
+
+#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
+#define SWF10_GTT_OVERRIDE_EN (1<<28)
+#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
+#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
+#define SWF10_OLD_TOGGLE 0x0
+#define SWF10_TOGGLE_LIST_1 0x1
+#define SWF10_TOGGLE_LIST_2 0x2
+#define SWF10_TOGGLE_LIST_3 0x3
+#define SWF10_TOGGLE_LIST_4 0x4
+#define SWF10_PANNING_EN (1<<23)
+#define SWF10_DRIVER_LOADED (1<<22)
+#define SWF10_EXTENDED_DESKTOP (1<<21)
+#define SWF10_EXCLUSIVE_MODE (1<<20)
+#define SWF10_OVERLAY_EN (1<<19)
+#define SWF10_PLANEB_HOLDOFF (1<<18)
+#define SWF10_PLANEA_HOLDOFF (1<<17)
+#define SWF10_VGA_HOLDOFF (1<<16)
+#define SWF10_ACTIVE_DISP_MASK 0xffff
+#define SWF10_PIPEB_LFP2 (1<<15)
+#define SWF10_PIPEB_EFP2 (1<<14)
+#define SWF10_PIPEB_TV2 (1<<13)
+#define SWF10_PIPEB_CRT2 (1<<12)
+#define SWF10_PIPEB_LFP (1<<11)
+#define SWF10_PIPEB_EFP (1<<10)
+#define SWF10_PIPEB_TV (1<<9)
+#define SWF10_PIPEB_CRT (1<<8)
+#define SWF10_PIPEA_LFP2 (1<<7)
+#define SWF10_PIPEA_EFP2 (1<<6)
+#define SWF10_PIPEA_TV2 (1<<5)
+#define SWF10_PIPEA_CRT2 (1<<4)
+#define SWF10_PIPEA_LFP (1<<3)
+#define SWF10_PIPEA_EFP (1<<2)
+#define SWF10_PIPEA_TV (1<<1)
+#define SWF10_PIPEA_CRT (1<<0)
+
+#define SWF11_MEMORY_SIZE_SHIFT 16
+#define SWF11_SV_TEST_EN (1<<15)
+#define SWF11_IS_AGP (1<<14)
+#define SWF11_DISPLAY_HOLDOFF (1<<13)
+#define SWF11_DPMS_REDUCED (1<<12)
+#define SWF11_IS_VBE_MODE (1<<11)
+#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
+#define SWF11_DPMS_MASK 0x07
+#define SWF11_DPMS_OFF (1<<2)
+#define SWF11_DPMS_SUSPEND (1<<1)
+#define SWF11_DPMS_STANDBY (1<<0)
+#define SWF11_DPMS_ON 0
+
+#define SWF14_GFX_PFIT_EN (1<<31)
+#define SWF14_TEXT_PFIT_EN (1<<30)
+#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
+#define SWF14_POPUP_EN (1<<28)
+#define SWF14_DISPLAY_HOLDOFF (1<<27)
+#define SWF14_DISP_DETECT_EN (1<<26)
+#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
+#define SWF14_DRIVER_STATUS (1<<24)
+#define SWF14_OS_TYPE_WIN9X (1<<23)
+#define SWF14_OS_TYPE_WINNT (1<<22)
+/* 21:19 rsvd */
+#define SWF14_PM_TYPE_MASK 0x00070000
+#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
+#define SWF14_PM_ACPI (0x3 << 16)
+#define SWF14_PM_APM_12 (0x2 << 16)
+#define SWF14_PM_APM_11 (0x1 << 16)
+#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
+ /* if GR18 indicates a display switch */
+#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
+#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
+#define SWF14_DS_PIPEB_TV2_EN (1<<13)
+#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
+#define SWF14_DS_PIPEB_LFP_EN (1<<11)
+#define SWF14_DS_PIPEB_EFP_EN (1<<10)
+#define SWF14_DS_PIPEB_TV_EN (1<<9)
+#define SWF14_DS_PIPEB_CRT_EN (1<<8)
+#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
+#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
+#define SWF14_DS_PIPEA_TV2_EN (1<<5)
+#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
+#define SWF14_DS_PIPEA_LFP_EN (1<<3)
+#define SWF14_DS_PIPEA_EFP_EN (1<<2)
+#define SWF14_DS_PIPEA_TV_EN (1<<1)
+#define SWF14_DS_PIPEA_CRT_EN (1<<0)
+ /* if GR18 indicates a panel fitting request */
+#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
+ /* if GR18 indicates an APM change request */
+#define SWF14_APM_HIBERNATE 0x4
+#define SWF14_APM_SUSPEND 0x3
+#define SWF14_APM_STANDBY 0x1
+#define SWF14_APM_RESTORE 0x0
+
+/* Add the device class for LFP, TV, HDMI */
+#define DEVICE_TYPE_INT_LFP 0x1022
+#define DEVICE_TYPE_INT_TV 0x1009
+#define DEVICE_TYPE_HDMI 0x60D2
+#define DEVICE_TYPE_DP 0x68C6
+#define DEVICE_TYPE_eDP 0x78C6
+
+/* define the DVO port for HDMI output type */
+#define DVO_B 1
+#define DVO_C 2
+#define DVO_D 3
+
+/* define the PORT for DP output type */
+#define PORT_IDPB 7
+#define PORT_IDPC 8
+#define PORT_IDPD 9
+
+#endif /* _I830_BIOS_H_ */
diff --git a/sys/dev/drm2/i915/intel_crt.c b/sys/dev/drm2/i915/intel_crt.c
new file mode 100644
index 0000000..9072553
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_crt.c
@@ -0,0 +1,624 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/drm_crtc.h>
+#include <dev/drm2/drm_crtc_helper.h>
+#include <dev/drm2/drm_edid.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/intel_drv.h>
+
+/* Here's the desired hotplug mode */
+#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
+ ADPA_CRT_HOTPLUG_WARMUP_10MS | \
+ ADPA_CRT_HOTPLUG_SAMPLE_4S | \
+ ADPA_CRT_HOTPLUG_VOLTAGE_50 | \
+ ADPA_CRT_HOTPLUG_VOLREF_325MV | \
+ ADPA_CRT_HOTPLUG_ENABLE)
+
+struct intel_crt {
+ struct intel_encoder base;
+ bool force_hotplug_required;
+};
+
+static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_crt, base);
+}
+
+static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 temp, reg;
+
+ if (HAS_PCH_SPLIT(dev))
+ reg = PCH_ADPA;
+ else
+ reg = ADPA;
+
+ temp = I915_READ(reg);
+ temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+ temp &= ~ADPA_DAC_ENABLE;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ temp |= ADPA_DAC_ENABLE;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+ break;
+ }
+
+ I915_WRITE(reg, temp);
+}
+
+static int intel_crt_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+
+ int max_clock = 0;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (mode->clock < 25000)
+ return MODE_CLOCK_LOW;
+
+ if (IS_GEN2(dev))
+ max_clock = 350000;
+ else
+ max_clock = 400000;
+ if (mode->clock > max_clock)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static bool intel_crt_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void intel_crt_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int dpll_md_reg;
+ u32 adpa, dpll_md;
+ u32 adpa_reg;
+
+ dpll_md_reg = DPLL_MD(intel_crtc->pipe);
+
+ if (HAS_PCH_SPLIT(dev))
+ adpa_reg = PCH_ADPA;
+ else
+ adpa_reg = ADPA;
+
+ /*
+ * Disable separate mode multiplier used when cloning SDVO to CRT
+ * XXX this needs to be adjusted when we really are cloning
+ */
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ dpll_md = I915_READ(dpll_md_reg);
+ I915_WRITE(dpll_md_reg,
+ dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
+ }
+
+ adpa = ADPA_HOTPLUG_BITS;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ adpa |= ADPA_HSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ adpa |= ADPA_VSYNC_ACTIVE_HIGH;
+
+ /* For CPT allow 3 pipe config, for others just use A or B */
+ if (HAS_PCH_CPT(dev))
+ adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
+ else if (intel_crtc->pipe == 0)
+ adpa |= ADPA_PIPE_A_SELECT;
+ else
+ adpa |= ADPA_PIPE_B_SELECT;
+
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
+
+ I915_WRITE(adpa_reg, adpa);
+}
+
+static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_crt *crt = intel_attached_crt(connector);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 adpa;
+ bool ret;
+
+ /* The first time through, trigger an explicit detection cycle */
+ if (crt->force_hotplug_required) {
+ bool turn_off_dac = HAS_PCH_SPLIT(dev);
+ u32 save_adpa;
+
+ crt->force_hotplug_required = 0;
+
+ save_adpa = adpa = I915_READ(PCH_ADPA);
+ DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+
+ adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+ if (turn_off_dac)
+ adpa &= ~ADPA_DAC_ENABLE;
+
+ I915_WRITE(PCH_ADPA, adpa);
+
+ if (_intel_wait_for(dev,
+ (I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+ 1000, 1, "915crt"))
+ DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER\n");
+
+ if (turn_off_dac) {
+ I915_WRITE(PCH_ADPA, save_adpa);
+ POSTING_READ(PCH_ADPA);
+ }
+ }
+
+ /* Check the status to see if both blue and green are on now */
+ adpa = I915_READ(PCH_ADPA);
+ if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
+ ret = true;
+ else
+ ret = false;
+ DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
+
+ return ret;
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
+ *
+ * Not for i915G/i915GM
+ *
+ * \return true if CRT is connected.
+ * \return false if CRT is disconnected.
+ */
+static bool intel_crt_detect_hotplug(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 hotplug_en, orig, stat;
+ bool ret = false;
+ int i, tries = 0;
+
+ if (HAS_PCH_SPLIT(dev))
+ return intel_ironlake_crt_detect_hotplug(connector);
+
+ /*
+ * On 4 series desktop, CRT detect sequence need to be done twice
+ * to get a reliable result.
+ */
+
+ if (IS_G4X(dev) && !IS_GM45(dev))
+ tries = 2;
+ else
+ tries = 1;
+ hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN);
+ hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
+
+ for (i = 0; i < tries ; i++) {
+ /* turn on the FORCE_DETECT */
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ /* wait for FORCE_DETECT to go off */
+ if (_intel_wait_for(dev,
+ (I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT) == 0,
+ 1000, 1, "915cr2"))
+ DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
+ }
+
+ stat = I915_READ(PORT_HOTPLUG_STAT);
+ if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE)
+ ret = true;
+
+ /* clear the interrupt we just generated, if any */
+ I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
+
+ /* and put the bits back */
+ I915_WRITE(PORT_HOTPLUG_EN, orig);
+
+ return ret;
+}
+
+static bool intel_crt_detect_ddc(struct drm_connector *connector)
+{
+ struct intel_crt *crt = intel_attached_crt(connector);
+ struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
+
+ /* CRT should always be at 0, but check anyway */
+ if (crt->base.type != INTEL_OUTPUT_ANALOG)
+ return false;
+
+ if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
+ struct edid *edid;
+ bool is_digital = false;
+
+ edid = drm_get_edid(connector,
+ dev_priv->gmbus[dev_priv->crt_ddc_pin]);
+ /*
+ * This may be a DVI-I connector with a shared DDC
+ * link between analog and digital outputs, so we
+ * have to check the EDID input spec of the attached device.
+ *
+ * On the other hand, what should we do if it is a broken EDID?
+ */
+ if (edid != NULL) {
+ is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
+ connector->display_info.raw_edid = NULL;
+ free(edid, DRM_MEM_KMS);
+ }
+
+ if (!is_digital) {
+ DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+ return true;
+ } else {
+ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
+ }
+ }
+
+ return false;
+}
+
+static enum drm_connector_status
+intel_crt_load_detect(struct intel_crt *crt)
+{
+ struct drm_device *dev = crt->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe;
+ uint32_t save_bclrpat;
+ uint32_t save_vtotal;
+ uint32_t vtotal, vactive;
+ uint32_t vsample;
+ uint32_t vblank, vblank_start, vblank_end;
+ uint32_t dsl;
+ uint32_t bclrpat_reg;
+ uint32_t vtotal_reg;
+ uint32_t vblank_reg;
+ uint32_t vsync_reg;
+ uint32_t pipeconf_reg;
+ uint32_t pipe_dsl_reg;
+ uint8_t st00;
+ enum drm_connector_status status;
+
+ DRM_DEBUG_KMS("starting load-detect on CRT\n");
+
+ bclrpat_reg = BCLRPAT(pipe);
+ vtotal_reg = VTOTAL(pipe);
+ vblank_reg = VBLANK(pipe);
+ vsync_reg = VSYNC(pipe);
+ pipeconf_reg = PIPECONF(pipe);
+ pipe_dsl_reg = PIPEDSL(pipe);
+
+ save_bclrpat = I915_READ(bclrpat_reg);
+ save_vtotal = I915_READ(vtotal_reg);
+ vblank = I915_READ(vblank_reg);
+
+ vtotal = ((save_vtotal >> 16) & 0xfff) + 1;
+ vactive = (save_vtotal & 0x7ff) + 1;
+
+ vblank_start = (vblank & 0xfff) + 1;
+ vblank_end = ((vblank >> 16) & 0xfff) + 1;
+
+ /* Set the border color to purple. */
+ I915_WRITE(bclrpat_reg, 0x500050);
+
+ if (!IS_GEN2(dev)) {
+ uint32_t pipeconf = I915_READ(pipeconf_reg);
+ I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
+ POSTING_READ(pipeconf_reg);
+ /* Wait for next Vblank to substitue
+ * border color for Color info */
+ intel_wait_for_vblank(dev, pipe);
+ st00 = I915_READ8(VGA_MSR_WRITE);
+ status = ((st00 & (1 << 4)) != 0) ?
+ connector_status_connected :
+ connector_status_disconnected;
+
+ I915_WRITE(pipeconf_reg, pipeconf);
+ } else {
+ bool restore_vblank = false;
+ int count, detect;
+
+ /*
+ * If there isn't any border, add some.
+ * Yes, this will flicker
+ */
+ if (vblank_start <= vactive && vblank_end >= vtotal) {
+ uint32_t vsync = I915_READ(vsync_reg);
+ uint32_t vsync_start = (vsync & 0xffff) + 1;
+
+ vblank_start = vsync_start;
+ I915_WRITE(vblank_reg,
+ (vblank_start - 1) |
+ ((vblank_end - 1) << 16));
+ restore_vblank = true;
+ }
+ /* sample in the vertical border, selecting the larger one */
+ if (vblank_start - vactive >= vtotal - vblank_end)
+ vsample = (vblank_start + vactive) >> 1;
+ else
+ vsample = (vtotal + vblank_end) >> 1;
+
+ /*
+ * Wait for the border to be displayed
+ */
+ while (I915_READ(pipe_dsl_reg) >= vactive)
+ ;
+ while ((dsl = I915_READ(pipe_dsl_reg)) <= vsample)
+ ;
+ /*
+ * Watch ST00 for an entire scanline
+ */
+ detect = 0;
+ count = 0;
+ do {
+ count++;
+ /* Read the ST00 VGA status register */
+ st00 = I915_READ8(VGA_MSR_WRITE);
+ if (st00 & (1 << 4))
+ detect++;
+ } while ((I915_READ(pipe_dsl_reg) == dsl));
+
+ /* restore vblank if necessary */
+ if (restore_vblank)
+ I915_WRITE(vblank_reg, vblank);
+ /*
+ * If more than 3/4 of the scanline detected a monitor,
+ * then it is assumed to be present. This works even on i830,
+ * where there isn't any way to force the border color across
+ * the screen
+ */
+ status = detect * 4 > count * 3 ?
+ connector_status_connected :
+ connector_status_disconnected;
+ }
+
+ /* Restore previous settings */
+ I915_WRITE(bclrpat_reg, save_bclrpat);
+
+ return status;
+}
+
+static enum drm_connector_status
+intel_crt_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_crt *crt = intel_attached_crt(connector);
+ enum drm_connector_status status;
+ struct intel_load_detect_pipe tmp;
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ if (intel_crt_detect_hotplug(connector)) {
+ DRM_DEBUG_KMS("CRT detected via hotplug\n");
+ return connector_status_connected;
+ } else {
+ DRM_DEBUG_KMS("CRT not detected via hotplug\n");
+ return connector_status_disconnected;
+ }
+ }
+
+ if (intel_crt_detect_ddc(connector))
+ return connector_status_connected;
+
+ if (!force)
+ return connector->status;
+
+ /* for pre-945g platforms use load detect */
+ if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
+ &tmp)) {
+ if (intel_crt_detect_ddc(connector))
+ status = connector_status_connected;
+ else
+ status = intel_crt_load_detect(crt);
+ intel_release_load_detect_pipe(&crt->base, connector,
+ &tmp);
+ } else
+ status = connector_status_unknown;
+
+ return status;
+}
+
+static void intel_crt_destroy(struct drm_connector *connector)
+{
+
+#if 0
+ drm_sysfs_connector_remove(connector);
+#endif
+ drm_connector_cleanup(connector);
+ free(connector, DRM_MEM_KMS);
+}
+
+static int intel_crt_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = intel_ddc_get_modes(connector,
+ dev_priv->gmbus[dev_priv->crt_ddc_pin]);
+ if (ret || !IS_G4X(dev))
+ return ret;
+
+ /* Try to probe digital port for output in DVI-I -> VGA mode. */
+ return (intel_ddc_get_modes(connector,
+ dev_priv->gmbus[GMBUS_PORT_DPB]));
+}
+
+static int intel_crt_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ return 0;
+}
+
+static void intel_crt_reset(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_crt *crt = intel_attached_crt(connector);
+
+ if (HAS_PCH_SPLIT(dev))
+ crt->force_hotplug_required = 1;
+}
+
+/*
+ * Routines for controlling stuff on the analog port
+ */
+
+static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
+ .dpms = intel_crt_dpms,
+ .mode_fixup = intel_crt_mode_fixup,
+ .prepare = intel_encoder_prepare,
+ .commit = intel_encoder_commit,
+ .mode_set = intel_crt_mode_set,
+};
+
+static const struct drm_connector_funcs intel_crt_connector_funcs = {
+ .reset = intel_crt_reset,
+ .dpms = drm_helper_connector_dpms,
+ .detect = intel_crt_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = intel_crt_destroy,
+ .set_property = intel_crt_set_property,
+};
+
+static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
+ .mode_valid = intel_crt_mode_valid,
+ .get_modes = intel_crt_get_modes,
+ .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs intel_crt_enc_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_KMS("Skipping CRT initialization for %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_no_crt[] = {
+ {
+ .callback = intel_no_crt_dmi_callback,
+ .ident = "ACER ZGB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+ },
+ },
+ { }
+};
+
+void intel_crt_init(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct intel_crt *crt;
+ struct intel_connector *intel_connector;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Skip machines without VGA that falsely report hotplug events */
+ if (dmi_check_system(intel_no_crt))
+ return;
+
+ crt = malloc(sizeof(struct intel_crt), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+
+ connector = &intel_connector->base;
+ drm_connector_init(dev, &intel_connector->base,
+ &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
+ DRM_MODE_ENCODER_DAC);
+
+ intel_connector_attach_encoder(intel_connector, &crt->base);
+
+ crt->base.type = INTEL_OUTPUT_ANALOG;
+ crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
+ 1 << INTEL_ANALOG_CLONE_BIT |
+ 1 << INTEL_SDVO_LVDS_CLONE_BIT);
+ crt->base.crtc_mask = (1 << 0) | (1 << 1);
+ if (IS_GEN2(dev))
+ connector->interlace_allowed = 0;
+ else
+ connector->interlace_allowed = 1;
+ connector->doublescan_allowed = 0;
+
+ drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
+ drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
+
+#if 0
+ drm_sysfs_connector_add(connector);
+#endif
+
+ if (I915_HAS_HOTPLUG(dev))
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+ /*
+ * Configure the automatic hotplug detection stuff
+ */
+ crt->force_hotplug_required = 0;
+ if (HAS_PCH_SPLIT(dev)) {
+ u32 adpa;
+
+ adpa = I915_READ(PCH_ADPA);
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ adpa |= ADPA_HOTPLUG_BITS;
+ I915_WRITE(PCH_ADPA, adpa);
+ POSTING_READ(PCH_ADPA);
+
+ DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
+ crt->force_hotplug_required = 1;
+ }
+
+ dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
+}
diff --git a/sys/dev/drm2/i915/intel_display.c b/sys/dev/drm2/i915/intel_display.c
new file mode 100644
index 0000000..7f30c4f
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_display.c
@@ -0,0 +1,9532 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/intel_drv.h>
+#include <dev/drm2/drm_edid.h>
+#include <dev/drm2/drm_dp_helper.h>
+#include <dev/drm2/drm_crtc_helper.h>
+#include <sys/kdb.h>
+#include <sys/limits.h>
+
+#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
+
+bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
+static void intel_update_watermarks(struct drm_device *dev);
+static void intel_increase_pllclock(struct drm_crtc *crtc);
+static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
+
+typedef struct {
+ /* given values */
+ int n;
+ int m1, m2;
+ int p1, p2;
+ /* derived values */
+ int dot;
+ int vco;
+ int m;
+ int p;
+} intel_clock_t;
+
+typedef struct {
+ int min, max;
+} intel_range_t;
+
+typedef struct {
+ int dot_limit;
+ int p2_slow, p2_fast;
+} intel_p2_t;
+
+#define INTEL_P2_NUM 2
+typedef struct intel_limit intel_limit_t;
+struct intel_limit {
+ intel_range_t dot, vco, n, m, m1, m2, p, p1;
+ intel_p2_t p2;
+ bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
+ int, int, intel_clock_t *, intel_clock_t *);
+};
+
+/* FDI */
+#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
+
+static bool
+intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
+static bool
+intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
+
+static bool
+intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
+static bool
+intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
+
+static inline u32 /* units of 100MHz */
+intel_fdi_link_freq(struct drm_device *dev)
+{
+ if (IS_GEN5(dev)) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
+ } else
+ return 27;
+}
+
+static const intel_limit_t intel_limits_i8xx_dvo = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 930000, .max = 1400000 },
+ .n = { .min = 3, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 2, .max = 33 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 4, .p2_fast = 2 },
+ .find_pll = intel_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_i8xx_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 930000, .max = 1400000 },
+ .n = { .min = 3, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 1, .max = 6 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 14, .p2_fast = 7 },
+ .find_pll = intel_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_i9xx_sdvo = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 10, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+ .p2_slow = 10, .p2_fast = 5 },
+ .find_pll = intel_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_i9xx_lvds = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 10, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 7, .max = 98 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 112000,
+ .p2_slow = 14, .p2_fast = 7 },
+ .find_pll = intel_find_best_PLL,
+};
+
+
+static const intel_limit_t intel_limits_g4x_sdvo = {
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 4 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 1, .max = 3},
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 10,
+ .p2_fast = 10
+ },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_g4x_hdmi = {
+ .dot = { .min = 22000, .max = 400000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 4 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 16, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8},
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 10, .p2_fast = 5 },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
+ .dot = { .min = 20000, .max = 115000 },
+ .vco = { .min = 1750000, .max = 3500000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 14, .p2_fast = 14
+ },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
+ .dot = { .min = 80000, .max = 224000 },
+ .vco = { .min = 1750000, .max = 3500000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 14, .max = 42 },
+ .p1 = { .min = 2, .max = 6 },
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 7, .p2_fast = 7
+ },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_g4x_display_port = {
+ .dot = { .min = 161670, .max = 227000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 2 },
+ .m = { .min = 97, .max = 108 },
+ .m1 = { .min = 0x10, .max = 0x12 },
+ .m2 = { .min = 0x05, .max = 0x06 },
+ .p = { .min = 10, .max = 20 },
+ .p1 = { .min = 1, .max = 2},
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 10, .p2_fast = 10 },
+ .find_pll = intel_find_pll_g4x_dp,
+};
+
+static const intel_limit_t intel_limits_pineview_sdvo = {
+ .dot = { .min = 20000, .max = 400000},
+ .vco = { .min = 1700000, .max = 3500000 },
+ /* Pineview's Ncounter is a ring counter */
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
+ /* Pineview only has one combined m divider, which we treat as m2. */
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+ .p2_slow = 10, .p2_fast = 5 },
+ .find_pll = intel_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_pineview_lvds = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1700000, .max = 3500000 },
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 7, .max = 112 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 112000,
+ .p2_slow = 14, .p2_fast = 14 },
+ .find_pll = intel_find_best_PLL,
+};
+
+/* Ironlake / Sandybridge
+ *
+ * We calculate clock using (register_value + 2) for N/M1/M2, so here
+ * the range value for them is (actual_value - 2).
+ */
+static const intel_limit_t intel_limits_ironlake_dac = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 5 },
+ .m = { .min = 79, .max = 127 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 10, .p2_fast = 5 },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_single_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 118 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 14, .p2_fast = 14 },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_dual_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 127 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 14, .max = 56 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 7, .p2_fast = 7 },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+/* LVDS 100mhz refclk limits. */
+static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 2 },
+ .m = { .min = 79, .max = 126 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 14, .p2_fast = 14 },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 126 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 14, .max = 42 },
+ .p1 = { .min = 2, .max = 6 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 7, .p2_fast = 7 },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_display_port = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000},
+ .n = { .min = 1, .max = 2 },
+ .m = { .min = 81, .max = 90 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 10, .max = 20 },
+ .p1 = { .min = 1, .max = 2},
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 10, .p2_fast = 10 },
+ .find_pll = intel_find_pll_ironlake_dp,
+};
+
+static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
+ int refclk)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const intel_limit_t *limit;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP) {
+ /* LVDS dual channel */
+ if (refclk == 100000)
+ limit = &intel_limits_ironlake_dual_lvds_100m;
+ else
+ limit = &intel_limits_ironlake_dual_lvds;
+ } else {
+ if (refclk == 100000)
+ limit = &intel_limits_ironlake_single_lvds_100m;
+ else
+ limit = &intel_limits_ironlake_single_lvds;
+ }
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ HAS_eDP)
+ limit = &intel_limits_ironlake_display_port;
+ else
+ limit = &intel_limits_ironlake_dac;
+
+ return limit;
+}
+
+static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const intel_limit_t *limit;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP)
+ /* LVDS with dual channel */
+ limit = &intel_limits_g4x_dual_channel_lvds;
+ else
+ /* LVDS with dual channel */
+ limit = &intel_limits_g4x_single_channel_lvds;
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
+ limit = &intel_limits_g4x_hdmi;
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
+ limit = &intel_limits_g4x_sdvo;
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ limit = &intel_limits_g4x_display_port;
+ } else /* The option is for other outputs */
+ limit = &intel_limits_i9xx_sdvo;
+
+ return limit;
+}
+
+static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
+{
+ struct drm_device *dev = crtc->dev;
+ const intel_limit_t *limit;
+
+ if (HAS_PCH_SPLIT(dev))
+ limit = intel_ironlake_limit(crtc, refclk);
+ else if (IS_G4X(dev)) {
+ limit = intel_g4x_limit(crtc);
+ } else if (IS_PINEVIEW(dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits_pineview_lvds;
+ else
+ limit = &intel_limits_pineview_sdvo;
+ } else if (!IS_GEN2(dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits_i9xx_lvds;
+ else
+ limit = &intel_limits_i9xx_sdvo;
+ } else {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits_i8xx_lvds;
+ else
+ limit = &intel_limits_i8xx_dvo;
+ }
+ return limit;
+}
+
+/* m1 is reserved as 0 in Pineview, n is a ring counter */
+static void pineview_clock(int refclk, intel_clock_t *clock)
+{
+ clock->m = clock->m2 + 2;
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = refclk * clock->m / clock->n;
+ clock->dot = clock->vco / clock->p;
+}
+
+static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
+{
+ if (IS_PINEVIEW(dev)) {
+ pineview_clock(refclk, clock);
+ return;
+ }
+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = refclk * clock->m / (clock->n + 2);
+ clock->dot = clock->vco / clock->p;
+}
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
+ if (encoder->base.crtc == crtc && encoder->type == type)
+ return true;
+
+ return false;
+}
+
+#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
+/**
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given connectors.
+ */
+
+static bool intel_PLL_is_valid(struct drm_device *dev,
+ const intel_limit_t *limit,
+ const intel_clock_t *clock)
+{
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ INTELPllInvalid("p1 out of range\n");
+ if (clock->p < limit->p.min || limit->p.max < clock->p)
+ INTELPllInvalid("p out of range\n");
+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+ INTELPllInvalid("m2 out of range\n");
+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+ INTELPllInvalid("m1 out of range\n");
+ if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
+ INTELPllInvalid("m1 <= m2\n");
+ if (clock->m < limit->m.min || limit->m.max < clock->m)
+ INTELPllInvalid("m out of range\n");
+ if (clock->n < limit->n.min || limit->n.max < clock->n)
+ INTELPllInvalid("n out of range\n");
+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+ INTELPllInvalid("vco out of range\n");
+ /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
+ * connector, etc., rather than just a single range.
+ */
+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+ INTELPllInvalid("dot out of range\n");
+
+ return true;
+}
+
+static bool
+intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ intel_clock_t clock;
+ int err = target;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ (I915_READ(LVDS)) != 0) {
+ /*
+ * For LVDS, if the panel is on, just rely on its current
+ * settings for dual-channel. We haven't figured out how to
+ * reliably set up different single/dual channel state, if we
+ * even can.
+ */
+ if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP)
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ clock.p2 = limit->p2.p2_slow;
+ else
+ clock.p2 = limit->p2.p2_fast;
+ }
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ /* m1 is always 0 in Pineview */
+ if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
+ break;
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
+ int this_err;
+
+ intel_clock(dev, refclk, &clock);
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
+ continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+ }
+
+ return (err != target);
+}
+
+static bool
+intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ intel_clock_t clock;
+ int max_n;
+ bool found;
+ /* approximately equals target * 0.00585 */
+ int err_most = (target >> 8) + (target >> 9);
+ found = false;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ int lvds_reg;
+
+ if (HAS_PCH_SPLIT(dev))
+ lvds_reg = PCH_LVDS;
+ else
+ lvds_reg = LVDS;
+ if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP)
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ clock.p2 = limit->p2.p2_slow;
+ else
+ clock.p2 = limit->p2.p2_fast;
+ }
+
+ memset(best_clock, 0, sizeof(*best_clock));
+ max_n = limit->n.max;
+ /* based on hardware requirement, prefer smaller n to precision */
+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ /* based on hardware requirement, prefere larger m1,m2 */
+ for (clock.m1 = limit->m1.max;
+ clock.m1 >= limit->m1.min; clock.m1--) {
+ for (clock.m2 = limit->m2.max;
+ clock.m2 >= limit->m2.min; clock.m2--) {
+ for (clock.p1 = limit->p1.max;
+ clock.p1 >= limit->p1.min; clock.p1--) {
+ int this_err;
+
+ intel_clock(dev, refclk, &clock);
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
+ continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err_most) {
+ *best_clock = clock;
+ err_most = this_err;
+ max_n = clock.n;
+ found = true;
+ }
+ }
+ }
+ }
+ }
+ return found;
+}
+
+static bool
+intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ intel_clock_t clock;
+
+ if (target < 200000) {
+ clock.n = 1;
+ clock.p1 = 2;
+ clock.p2 = 10;
+ clock.m1 = 12;
+ clock.m2 = 9;
+ } else {
+ clock.n = 2;
+ clock.p1 = 1;
+ clock.p2 = 10;
+ clock.m1 = 14;
+ clock.m2 = 8;
+ }
+ intel_clock(dev, refclk, &clock);
+ memcpy(best_clock, &clock, sizeof(intel_clock_t));
+ return true;
+}
+
+/* DisplayPort has only two frequencies, 162MHz and 270MHz */
+static bool
+intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+{
+ intel_clock_t clock;
+ if (target < 200000) {
+ clock.p1 = 2;
+ clock.p2 = 10;
+ clock.n = 2;
+ clock.m1 = 23;
+ clock.m2 = 8;
+ } else {
+ clock.p1 = 1;
+ clock.p2 = 10;
+ clock.n = 1;
+ clock.m1 = 14;
+ clock.m2 = 2;
+ }
+ clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
+ clock.p = (clock.p1 * clock.p2);
+ clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
+ clock.vco = 0;
+ memcpy(best_clock, &clock, sizeof(intel_clock_t));
+ return true;
+}
+
+/**
+ * intel_wait_for_vblank - wait for vblank on a given pipe
+ * @dev: drm device
+ * @pipe: pipe to wait for
+ *
+ * Wait for vblank to occur on a given pipe. Needed for various bits of
+ * mode setting code.
+ */
+void intel_wait_for_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipestat_reg = PIPESTAT(pipe);
+
+ /* Clear existing vblank status. Note this will clear any other
+ * sticky status fields as well.
+ *
+ * This races with i915_driver_irq_handler() with the result
+ * that either function could miss a vblank event. Here it is not
+ * fatal, as we will either wait upon the next vblank interrupt or
+ * timeout. Generally speaking intel_wait_for_vblank() is only
+ * called during modeset at which time the GPU should be idle and
+ * should *not* be performing page flips and thus not waiting on
+ * vblanks...
+ * Currently, the result of us stealing a vblank from the irq
+ * handler is that a single frame will be skipped during swapbuffers.
+ */
+ I915_WRITE(pipestat_reg,
+ I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
+
+ /* Wait for vblank interrupt bit to set */
+ if (_intel_wait_for(dev,
+ I915_READ(pipestat_reg) & PIPE_VBLANK_INTERRUPT_STATUS,
+ 50, 1, "915vbl"))
+ DRM_DEBUG_KMS("vblank wait timed out\n");
+}
+
+/*
+ * intel_wait_for_pipe_off - wait for pipe to turn off
+ * @dev: drm device
+ * @pipe: pipe to wait for
+ *
+ * After disabling a pipe, we can't wait for vblank in the usual way,
+ * spinning on the vblank interrupt status bit, since we won't actually
+ * see an interrupt when the pipe is disabled.
+ *
+ * On Gen4 and above:
+ * wait for the pipe register state bit to turn off
+ *
+ * Otherwise:
+ * wait for the display line value to settle (it usually
+ * ends up stopping at the start of the next frame).
+ *
+ */
+void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ int reg = PIPECONF(pipe);
+
+ /* Wait for the Pipe State to go off */
+ if (_intel_wait_for(dev,
+ (I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 100,
+ 1, "915pip"))
+ DRM_DEBUG_KMS("pipe_off wait timed out\n");
+ } else {
+ u32 last_line;
+ int reg = PIPEDSL(pipe);
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+ /* Wait for the display line to settle */
+ do {
+ last_line = I915_READ(reg) & DSL_LINEMASK;
+ DELAY(5000);
+ } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
+ time_after(timeout, jiffies));
+ if (time_after(jiffies, timeout))
+ DRM_DEBUG_KMS("pipe_off wait timed out\n");
+ }
+}
+
+static const char *state_string(bool enabled)
+{
+ return enabled ? "on" : "off";
+}
+
+/* Only for pre-ILK configs */
+static void assert_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = DPLL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & DPLL_VCO_ENABLE);
+ if (cur_state != state)
+ printf("PLL state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_pll_enabled(d, p) assert_pll(d, p, true)
+#define assert_pll_disabled(d, p) assert_pll(d, p, false)
+
+/* For ILK+ */
+static void assert_pch_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ u32 pch_dpll;
+
+ pch_dpll = I915_READ(PCH_DPLL_SEL);
+
+ /* Make sure the selected PLL is enabled to the transcoder */
+ KASSERT(((pch_dpll >> (4 * pipe)) & 8) != 0,
+ ("transcoder %d PLL not enabled\n", pipe));
+
+ /* Convert the transcoder pipe number to a pll pipe number */
+ pipe = (pch_dpll >> (4 * pipe)) & 1;
+ }
+
+ reg = PCH_DPLL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & DPLL_VCO_ENABLE);
+ if (cur_state != state)
+ printf("PCH PLL state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
+#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
+
+static void assert_fdi_tx(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = FDI_TX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_TX_ENABLE);
+ if (cur_state != state)
+ printf("FDI TX state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
+#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
+
+static void assert_fdi_rx(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_RX_ENABLE);
+ if (cur_state != state)
+ printf("FDI RX state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
+#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
+
+static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* ILK FDI PLL is always enabled */
+ if (dev_priv->info->gen == 5)
+ return;
+
+ reg = FDI_TX_CTL(pipe);
+ val = I915_READ(reg);
+ if (!(val & FDI_TX_PLL_ENABLE))
+ printf("FDI TX PLL assertion failure, should be active but is disabled\n");
+}
+
+static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ if (!(val & FDI_RX_PLL_ENABLE))
+ printf("FDI RX PLL assertion failure, should be active but is disabled\n");
+}
+
+static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int pp_reg, lvds_reg;
+ u32 val;
+ enum pipe panel_pipe = PIPE_A;
+ bool locked = true;
+
+ if (HAS_PCH_SPLIT(dev_priv->dev)) {
+ pp_reg = PCH_PP_CONTROL;
+ lvds_reg = PCH_LVDS;
+ } else {
+ pp_reg = PP_CONTROL;
+ lvds_reg = LVDS;
+ }
+
+ val = I915_READ(pp_reg);
+ if (!(val & PANEL_POWER_ON) ||
+ ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
+ locked = false;
+
+ if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
+ panel_pipe = PIPE_B;
+
+ if (panel_pipe == pipe && locked)
+ printf("panel assertion failure, pipe %c regs locked\n",
+ pipe_name(pipe));
+}
+
+void assert_pipe(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ /* if we need the pipe A quirk it must be always on */
+ if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
+ state = true;
+
+ reg = PIPECONF(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & PIPECONF_ENABLE);
+ if (cur_state != state)
+ printf("pipe %c assertion failure (expected %s, current %s)\n",
+ pipe_name(pipe), state_string(state), state_string(cur_state));
+}
+
+static void assert_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ cur_state = !!(val & DISPLAY_PLANE_ENABLE);
+ if (cur_state != state)
+ printf("plane %c assertion failure, (expected %s, current %s)\n",
+ plane_name(plane), state_string(state), state_string(cur_state));
+}
+
+#define assert_plane_enabled(d, p) assert_plane(d, p, true)
+#define assert_plane_disabled(d, p) assert_plane(d, p, false)
+
+static void assert_planes_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg, i;
+ u32 val;
+ int cur_pipe;
+
+ /* Planes are fixed to pipes on ILK+ */
+ if (HAS_PCH_SPLIT(dev_priv->dev)) {
+ reg = DSPCNTR(pipe);
+ val = I915_READ(reg);
+ if ((val & DISPLAY_PLANE_ENABLE) != 0)
+ printf("plane %c assertion failure, should be disabled but not\n",
+ plane_name(pipe));
+ return;
+ }
+
+ /* Need to check both planes against the pipe */
+ for (i = 0; i < 2; i++) {
+ reg = DSPCNTR(i);
+ val = I915_READ(reg);
+ cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+ DISPPLANE_SEL_PIPE_SHIFT;
+ if ((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe)
+ printf("plane %c assertion failure, should be off on pipe %c but is still active\n",
+ plane_name(i), pipe_name(pipe));
+ }
+}
+
+static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+ bool enabled;
+
+ val = I915_READ(PCH_DREF_CONTROL);
+ enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
+ DREF_SUPERSPREAD_SOURCE_MASK));
+ if (!enabled)
+ printf("PCH refclk assertion failure, should be active but is disabled\n");
+}
+
+static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+ bool enabled;
+
+ reg = TRANSCONF(pipe);
+ val = I915_READ(reg);
+ enabled = !!(val & TRANS_ENABLE);
+ if (enabled)
+ printf("transcoder assertion failed, should be off on pipe %c but is still active\n",
+ pipe_name(pipe));
+}
+
+static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 val)
+{
+ if ((val & PORT_ENABLE) == 0)
+ return false;
+
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+ return false;
+ } else {
+ if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
+ return false;
+ }
+ return true;
+}
+
+static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 val)
+{
+ if ((val & LVDS_PORT_EN) == 0)
+ return false;
+
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+ return false;
+ } else {
+ if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
+ return false;
+ }
+ return true;
+}
+
+static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 val)
+{
+ if ((val & ADPA_DAC_ENABLE) == 0)
+ return false;
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+ return false;
+ } else {
+ if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
+ return false;
+ }
+ return true;
+}
+
+static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 port_sel, u32 val)
+{
+ if ((val & DP_PORT_EN) == 0)
+ return false;
+
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
+ u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
+ if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
+ return false;
+ } else {
+ if ((val & DP_PIPE_MASK) != (pipe << 30))
+ return false;
+ }
+ return true;
+}
+
+static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg, u32 port_sel)
+{
+ u32 val = I915_READ(reg);
+ if (dp_pipe_enabled(dev_priv, pipe, port_sel, val))
+ printf("PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
+ reg, pipe_name(pipe));
+}
+
+static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg)
+{
+ u32 val = I915_READ(reg);
+ if (hdmi_pipe_enabled(dev_priv, val, pipe))
+ printf("PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
+ reg, pipe_name(pipe));
+}
+
+static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
+
+ reg = PCH_ADPA;
+ val = I915_READ(reg);
+ if (adpa_pipe_enabled(dev_priv, val, pipe))
+ printf("PCH VGA enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+ reg = PCH_LVDS;
+ val = I915_READ(reg);
+ if (lvds_pipe_enabled(dev_priv, val, pipe))
+ printf("PCH LVDS enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+ assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
+ assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
+ assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
+}
+
+/**
+ * intel_enable_pll - enable a PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ *
+ * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
+ * make sure the PLL reg is writable first though, since the panel write
+ * protect mechanism may be enabled.
+ *
+ * Note! This is for pre-ILK only.
+ */
+static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* No really, not for ILK+ */
+ KASSERT(dev_priv->info->gen < 5, ("Wrong device gen"));
+
+ /* PLL is protected by panel, make sure we can write it */
+ if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
+ assert_panel_unlocked(dev_priv, pipe);
+
+ reg = DPLL(pipe);
+ val = I915_READ(reg);
+ val |= DPLL_VCO_ENABLE;
+
+ /* We do this three times for luck */
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ DELAY(150); /* wait for warmup */
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ DELAY(150); /* wait for warmup */
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ DELAY(150); /* wait for warmup */
+}
+
+/**
+ * intel_disable_pll - disable a PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to disable
+ *
+ * Disable the PLL for @pipe, making sure the pipe is off first.
+ *
+ * Note! This is for pre-ILK only.
+ */
+static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* Don't disable pipe A or pipe A PLLs if needed */
+ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ return;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_pipe_disabled(dev_priv, pipe);
+
+ reg = DPLL(pipe);
+ val = I915_READ(reg);
+ val &= ~DPLL_VCO_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+}
+
+/**
+ * intel_enable_pch_pll - enable PCH PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ *
+ * The PCH PLL needs to be enabled before the PCH transcoder, since it
+ * drives the transcoder clock.
+ */
+static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ if (pipe > 1)
+ return;
+
+ /* PCH only available on ILK+ */
+ KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
+
+ /* PCH refclock must be enabled first */
+ assert_pch_refclk_enabled(dev_priv);
+
+ reg = PCH_DPLL(pipe);
+ val = I915_READ(reg);
+ val |= DPLL_VCO_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ DELAY(200);
+}
+
+static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
+ pll_sel = TRANSC_DPLL_ENABLE;
+
+ if (pipe > 1)
+ return;
+
+ /* PCH only available on ILK+ */
+ KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
+
+ /* Make sure transcoder isn't still depending on us */
+ assert_transcoder_disabled(dev_priv, pipe);
+
+ if (pipe == 0)
+ pll_sel |= TRANSC_DPLLA_SEL;
+ else if (pipe == 1)
+ pll_sel |= TRANSC_DPLLB_SEL;
+
+
+ if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
+ return;
+
+ reg = PCH_DPLL(pipe);
+ val = I915_READ(reg);
+ val &= ~DPLL_VCO_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ DELAY(200);
+}
+
+static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val, pipeconf_val;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+ /* PCH only available on ILK+ */
+ KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
+
+ /* Make sure PCH DPLL is enabled */
+ assert_pch_pll_enabled(dev_priv, pipe);
+
+ /* FDI must be feeding us bits for PCH ports */
+ assert_fdi_tx_enabled(dev_priv, pipe);
+ assert_fdi_rx_enabled(dev_priv, pipe);
+
+
+ reg = TRANSCONF(pipe);
+ val = I915_READ(reg);
+ pipeconf_val = I915_READ(PIPECONF(pipe));
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ /*
+ * make the BPC in transcoder be consistent with
+ * that in pipeconf reg.
+ */
+ val &= ~PIPE_BPC_MASK;
+ val |= pipeconf_val & PIPE_BPC_MASK;
+ }
+
+ val &= ~TRANS_INTERLACE_MASK;
+ if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
+ if (HAS_PCH_IBX(dev_priv->dev) &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
+ val |= TRANS_LEGACY_INTERLACED_ILK;
+ else
+ val |= TRANS_INTERLACED;
+ else
+ val |= TRANS_PROGRESSIVE;
+
+ I915_WRITE(reg, val | TRANS_ENABLE);
+ if (_intel_wait_for(dev_priv->dev, I915_READ(reg) & TRANS_STATE_ENABLE,
+ 100, 1, "915trc"))
+ DRM_ERROR("failed to enable transcoder %d\n", pipe);
+}
+
+static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* FDI relies on the transcoder */
+ assert_fdi_tx_disabled(dev_priv, pipe);
+ assert_fdi_rx_disabled(dev_priv, pipe);
+
+ /* Ports must be off as well */
+ assert_pch_ports_disabled(dev_priv, pipe);
+
+ reg = TRANSCONF(pipe);
+ val = I915_READ(reg);
+ val &= ~TRANS_ENABLE;
+ I915_WRITE(reg, val);
+ /* wait for PCH transcoder off, transcoder state */
+ if (_intel_wait_for(dev_priv->dev,
+ (I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50,
+ 1, "915trd"))
+ DRM_ERROR("failed to disable transcoder %d\n", pipe);
+}
+
+/**
+ * intel_enable_pipe - enable a pipe, asserting requirements
+ * @dev_priv: i915 private structure
+ * @pipe: pipe to enable
+ * @pch_port: on ILK+, is this pipe driving a PCH port or not
+ *
+ * Enable @pipe, making sure that various hardware specific requirements
+ * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
+ *
+ * @pipe should be %PIPE_A or %PIPE_B.
+ *
+ * Will wait until the pipe is actually running (i.e. first vblank) before
+ * returning.
+ */
+static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
+ bool pch_port)
+{
+ int reg;
+ u32 val;
+
+ /*
+ * A pipe without a PLL won't actually be able to drive bits from
+ * a plane. On ILK+ the pipe PLLs are integrated, so we don't
+ * need the check.
+ */
+ if (!HAS_PCH_SPLIT(dev_priv->dev))
+ assert_pll_enabled(dev_priv, pipe);
+ else {
+ if (pch_port) {
+ /* if driving the PCH, we need FDI enabled */
+ assert_fdi_rx_pll_enabled(dev_priv, pipe);
+ assert_fdi_tx_pll_enabled(dev_priv, pipe);
+ }
+ /* FIXME: assert CPU port conditions for SNB+ */
+ }
+
+ reg = PIPECONF(pipe);
+ val = I915_READ(reg);
+ if (val & PIPECONF_ENABLE)
+ return;
+
+ I915_WRITE(reg, val | PIPECONF_ENABLE);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+/**
+ * intel_disable_pipe - disable a pipe, asserting requirements
+ * @dev_priv: i915 private structure
+ * @pipe: pipe to disable
+ *
+ * Disable @pipe, making sure that various hardware specific requirements
+ * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
+ *
+ * @pipe should be %PIPE_A or %PIPE_B.
+ *
+ * Will wait until the pipe has shut down before returning.
+ */
+static void intel_disable_pipe(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /*
+ * Make sure planes won't keep trying to pump pixels to us,
+ * or we might hang the display.
+ */
+ assert_planes_disabled(dev_priv, pipe);
+
+ /* Don't disable pipe A or pipe A PLLs if needed */
+ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ return;
+
+ reg = PIPECONF(pipe);
+ val = I915_READ(reg);
+ if ((val & PIPECONF_ENABLE) == 0)
+ return;
+
+ I915_WRITE(reg, val & ~PIPECONF_ENABLE);
+ intel_wait_for_pipe_off(dev_priv->dev, pipe);
+}
+
+/*
+ * Plane regs are double buffered, going from enabled->disabled needs a
+ * trigger in order to latch. The display address reg provides this.
+ */
+static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+ enum plane plane)
+{
+ I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
+ I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
+}
+
+/**
+ * intel_enable_plane - enable a display plane on a given pipe
+ * @dev_priv: i915 private structure
+ * @plane: plane to enable
+ * @pipe: pipe being fed
+ *
+ * Enable @plane on @pipe, making sure that @pipe is running first.
+ */
+static void intel_enable_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* If the pipe isn't enabled, we can't pump pixels and may hang */
+ assert_pipe_enabled(dev_priv, pipe);
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ if (val & DISPLAY_PLANE_ENABLE)
+ return;
+
+ I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev_priv, plane);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+/**
+ * intel_disable_plane - disable a display plane
+ * @dev_priv: i915 private structure
+ * @plane: plane to disable
+ * @pipe: pipe consuming the data
+ *
+ * Disable @plane; should be an independent operation.
+ */
+static void intel_disable_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ if ((val & DISPLAY_PLANE_ENABLE) == 0)
+ return;
+
+ I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev_priv, plane);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+static void disable_pch_dp(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg, u32 port_sel)
+{
+ u32 val = I915_READ(reg);
+ if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
+ DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
+ I915_WRITE(reg, val & ~DP_PORT_EN);
+ }
+}
+
+static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg)
+{
+ u32 val = I915_READ(reg);
+ if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
+ DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
+ reg, pipe);
+ I915_WRITE(reg, val & ~PORT_ENABLE);
+ }
+}
+
+/* Disable any ports connected to this transcoder */
+static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ u32 reg, val;
+
+ val = I915_READ(PCH_PP_CONTROL);
+ I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
+
+ disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
+ disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
+ disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
+
+ reg = PCH_ADPA;
+ val = I915_READ(reg);
+ if (adpa_pipe_enabled(dev_priv, val, pipe))
+ I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
+
+ reg = PCH_LVDS;
+ val = I915_READ(reg);
+ if (lvds_pipe_enabled(dev_priv, val, pipe)) {
+ DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
+ I915_WRITE(reg, val & ~LVDS_PORT_EN);
+ POSTING_READ(reg);
+ DELAY(100);
+ }
+
+ disable_pch_hdmi(dev_priv, pipe, HDMIB);
+ disable_pch_hdmi(dev_priv, pipe, HDMIC);
+ disable_pch_hdmi(dev_priv, pipe, HDMID);
+}
+
+static void i8xx_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 fbc_ctl;
+
+ /* Disable compression */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ if ((fbc_ctl & FBC_CTL_EN) == 0)
+ return;
+
+ fbc_ctl &= ~FBC_CTL_EN;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ /* Wait for compressing bit to clear */
+ if (_intel_wait_for(dev,
+ (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10,
+ 1, "915fbd")) {
+ DRM_DEBUG_KMS("FBC idle timed out\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+}
+
+static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int cfb_pitch;
+ int plane, i;
+ u32 fbc_ctl, fbc_ctl2;
+
+ cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+ if (fb->pitches[0] < cfb_pitch)
+ cfb_pitch = fb->pitches[0];
+
+ /* FBC_CTL wants 64B units */
+ cfb_pitch = (cfb_pitch / 64) - 1;
+ plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
+
+ /* Clear old tags */
+ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
+ I915_WRITE(FBC_TAG + (i * 4), 0);
+
+ /* Set it up... */
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+ fbc_ctl2 |= plane;
+ I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+ I915_WRITE(FBC_FENCE_OFF, crtc->y);
+
+ /* enable it... */
+ fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
+ if (IS_I945GM(dev))
+ fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
+ fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+ fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
+ fbc_ctl |= obj->fence_reg;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
+ cfb_pitch, crtc->y, intel_crtc->plane);
+}
+
+static bool i8xx_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+}
+
+static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
+ dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
+ I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
+
+ I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+
+ /* enable it... */
+ I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
+
+ DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
+static void g4x_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+static bool g4x_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+static void sandybridge_blit_fbc_update(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 blt_ecoskpd;
+
+ /* Make sure blitter notifies FBC of writes */
+ gen6_gt_force_wake_get(dev_priv);
+ blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT);
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ POSTING_READ(GEN6_BLITTER_ECOSKPD);
+ gen6_gt_force_wake_put(dev_priv);
+}
+
+static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ dpfc_ctl &= DPFC_RESERVED;
+ dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
+ /* Set persistent mode for front-buffer rendering, ala X. */
+ dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
+ dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
+ I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
+
+ I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+ I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
+ /* enable it... */
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+ if (IS_GEN6(dev)) {
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+ sandybridge_blit_fbc_update(dev);
+ }
+
+ DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
+static void ironlake_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+static bool ironlake_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+bool intel_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.fbc_enabled)
+ return false;
+
+ return dev_priv->display.fbc_enabled(dev);
+}
+
+static void intel_fbc_work_fn(void *arg, int pending)
+{
+ struct intel_fbc_work *work = arg;
+ struct drm_device *dev = work->crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ DRM_LOCK(dev);
+ if (work == dev_priv->fbc_work) {
+ /* Double check that we haven't switched fb without cancelling
+ * the prior work.
+ */
+ if (work->crtc->fb == work->fb) {
+ dev_priv->display.enable_fbc(work->crtc,
+ work->interval);
+
+ dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
+ dev_priv->cfb_fb = work->crtc->fb->base.id;
+ dev_priv->cfb_y = work->crtc->y;
+ }
+
+ dev_priv->fbc_work = NULL;
+ }
+ DRM_UNLOCK(dev);
+
+ free(work, DRM_MEM_KMS);
+}
+
+static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
+{
+ u_int pending;
+
+ if (dev_priv->fbc_work == NULL)
+ return;
+
+ DRM_DEBUG_KMS("cancelling pending FBC enable\n");
+
+ /* Synchronisation is provided by struct_mutex and checking of
+ * dev_priv->fbc_work, so we can perform the cancellation
+ * entirely asynchronously.
+ */
+ if (taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->fbc_work->task,
+ &pending) == 0)
+ /* tasklet was killed before being run, clean up */
+ free(dev_priv->fbc_work, DRM_MEM_KMS);
+
+ /* Mark the work as no longer wanted so that if it does
+ * wake-up (because the work was already running and waiting
+ * for our mutex), it will discover that is no longer
+ * necessary to run.
+ */
+ dev_priv->fbc_work = NULL;
+}
+
+static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct intel_fbc_work *work;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.enable_fbc)
+ return;
+
+ intel_cancel_fbc_work(dev_priv);
+
+ work = malloc(sizeof(*work), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ work->crtc = crtc;
+ work->fb = crtc->fb;
+ work->interval = interval;
+ TIMEOUT_TASK_INIT(dev_priv->tq, &work->task, 0, intel_fbc_work_fn,
+ work);
+
+ dev_priv->fbc_work = work;
+
+ DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
+
+ /* Delay the actual enabling to let pageflipping cease and the
+ * display to settle before starting the compression. Note that
+ * this delay also serves a second purpose: it allows for a
+ * vblank to pass after disabling the FBC before we attempt
+ * to modify the control registers.
+ *
+ * A more complicated solution would involve tracking vblanks
+ * following the termination of the page-flipping sequence
+ * and indeed performing the enable as a co-routine and not
+ * waiting synchronously upon the vblank.
+ */
+ taskqueue_enqueue_timeout(dev_priv->tq, &work->task,
+ msecs_to_jiffies(50));
+}
+
+void intel_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_cancel_fbc_work(dev_priv);
+
+ if (!dev_priv->display.disable_fbc)
+ return;
+
+ dev_priv->display.disable_fbc(dev);
+ dev_priv->cfb_plane = -1;
+}
+
+/**
+ * intel_update_fbc - enable/disable FBC as needed
+ * @dev: the drm_device
+ *
+ * Set up the framebuffer compression hardware at mode set time. We
+ * enable it if possible:
+ * - plane A only (on pre-965)
+ * - no pixel mulitply/line duplication
+ * - no alpha buffer discard
+ * - no dual wide
+ * - framebuffer <= 2048 in width, 1536 in height
+ *
+ * We can't assume that any compression will take place (worst case),
+ * so the compressed buffer has to be the same size as the uncompressed
+ * one. It also must reside (along with the line length buffer) in
+ * stolen memory.
+ *
+ * We need to enable/disable FBC on a global basis.
+ */
+static void intel_update_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = NULL, *tmp_crtc;
+ struct intel_crtc *intel_crtc;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int enable_fbc;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (!i915_powersave)
+ return;
+
+ if (!I915_HAS_FBC(dev))
+ return;
+
+ /*
+ * If FBC is already on, we just have to verify that we can
+ * keep it that way...
+ * Need to disable if:
+ * - more than one pipe is active
+ * - changing FBC params (stride, fence, mode)
+ * - new fb is too large to fit in compressed buffer
+ * - going to an unsupported config (interlace, pixel multiply, etc.)
+ */
+ list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
+ if (tmp_crtc->enabled && tmp_crtc->fb) {
+ if (crtc) {
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+ goto out_disable;
+ }
+ crtc = tmp_crtc;
+ }
+ }
+
+ if (!crtc || crtc->fb == NULL) {
+ DRM_DEBUG_KMS("no output, disabling\n");
+ dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
+ goto out_disable;
+ }
+
+ intel_crtc = to_intel_crtc(crtc);
+ fb = crtc->fb;
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ enable_fbc = i915_enable_fbc;
+ if (enable_fbc < 0) {
+ DRM_DEBUG_KMS("fbc set to per-chip default\n");
+ enable_fbc = 1;
+ if (INTEL_INFO(dev)->gen <= 6)
+ enable_fbc = 0;
+ }
+ if (!enable_fbc) {
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
+ dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
+ goto out_disable;
+ }
+ if (intel_fb->obj->base.size > dev_priv->cfb_size) {
+ DRM_DEBUG_KMS("framebuffer too large, disabling "
+ "compression\n");
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ goto out_disable;
+ }
+ if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
+ (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
+ DRM_DEBUG_KMS("mode incompatible with compression, "
+ "disabling\n");
+ dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
+ goto out_disable;
+ }
+ if ((crtc->mode.hdisplay > 2048) ||
+ (crtc->mode.vdisplay > 1536)) {
+ DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+ dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
+ goto out_disable;
+ }
+ if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
+ DRM_DEBUG_KMS("plane not 0, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_BAD_PLANE;
+ goto out_disable;
+ }
+ if (obj->tiling_mode != I915_TILING_X ||
+ obj->fence_reg == I915_FENCE_REG_NONE) {
+ DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_NOT_TILED;
+ goto out_disable;
+ }
+
+ /* If the kernel debugger is active, always disable compression */
+ if (kdb_active)
+ goto out_disable;
+
+ /* If the scanout has not changed, don't modify the FBC settings.
+ * Note that we make the fundamental assumption that the fb->obj
+ * cannot be unpinned (and have its GTT offset and fence revoked)
+ * without first being decoupled from the scanout and FBC disabled.
+ */
+ if (dev_priv->cfb_plane == intel_crtc->plane &&
+ dev_priv->cfb_fb == fb->base.id &&
+ dev_priv->cfb_y == crtc->y)
+ return;
+
+ if (intel_fbc_enabled(dev)) {
+ /* We update FBC along two paths, after changing fb/crtc
+ * configuration (modeswitching) and after page-flipping
+ * finishes. For the latter, we know that not only did
+ * we disable the FBC at the start of the page-flip
+ * sequence, but also more than one vblank has passed.
+ *
+ * For the former case of modeswitching, it is possible
+ * to switch between two FBC valid configurations
+ * instantaneously so we do need to disable the FBC
+ * before we can modify its control registers. We also
+ * have to wait for the next vblank for that to take
+ * effect. However, since we delay enabling FBC we can
+ * assume that a vblank has passed since disabling and
+ * that we can safely alter the registers in the deferred
+ * callback.
+ *
+ * In the scenario that we go from a valid to invalid
+ * and then back to valid FBC configuration we have
+ * no strict enforcement that a vblank occurred since
+ * disabling the FBC. However, along all current pipe
+ * disabling paths we do need to wait for a vblank at
+ * some point. And we wait before enabling FBC anyway.
+ */
+ DRM_DEBUG_KMS("disabling active FBC for update\n");
+ intel_disable_fbc(dev);
+ }
+
+ intel_enable_fbc(crtc, 500);
+ return;
+
+out_disable:
+ /* Multiple disables should be harmless */
+ if (intel_fbc_enabled(dev)) {
+ DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
+ intel_disable_fbc(dev);
+ }
+}
+
+int
+intel_pin_and_fence_fb_obj(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 alignment;
+ int ret;
+
+ alignment = 0; /* shut gcc */
+ switch (obj->tiling_mode) {
+ case I915_TILING_NONE:
+ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+ alignment = 128 * 1024;
+ else if (INTEL_INFO(dev)->gen >= 4)
+ alignment = 4 * 1024;
+ else
+ alignment = 64 * 1024;
+ break;
+ case I915_TILING_X:
+ /* pin() will align the object as required by fence */
+ alignment = 0;
+ break;
+ case I915_TILING_Y:
+ /* FIXME: Is this true? */
+ DRM_ERROR("Y tiled not allowed for scan out buffers\n");
+ return -EINVAL;
+ default:
+ KASSERT(0, ("Wrong tiling for fb obj"));
+ }
+
+ dev_priv->mm.interruptible = false;
+ ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
+ if (ret)
+ goto err_interruptible;
+
+ /* Install a fence for tiled scan-out. Pre-i965 always needs a
+ * fence, whereas 965+ only requires a fence if using
+ * framebuffer compression. For simplicity, we always install
+ * a fence as the cost is not that onerous.
+ */
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ ret = i915_gem_object_get_fence(obj, pipelined);
+ if (ret)
+ goto err_unpin;
+
+ i915_gem_object_pin_fence(obj);
+ }
+
+ dev_priv->mm.interruptible = true;
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_interruptible:
+ dev_priv->mm.interruptible = true;
+ return ret;
+}
+
+void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin_fence(obj);
+ i915_gem_object_unpin(obj);
+}
+
+static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int plane = intel_crtc->plane;
+ unsigned long Start, Offset;
+ u32 dspcntr;
+ u32 reg;
+
+ switch (plane) {
+ case 0:
+ case 1:
+ break;
+ default:
+ DRM_ERROR("Can't update plane %d in SAREA\n", plane);
+ return -EINVAL;
+ }
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ reg = DSPCNTR(plane);
+ dspcntr = I915_READ(reg);
+ /* Mask out pixel format bits in case we change it */
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+ switch (fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (fb->depth == 15)
+ dspcntr |= DISPPLANE_15_16BPP;
+ else
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ break;
+ default:
+ DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
+ return -EINVAL;
+ }
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
+ }
+
+ I915_WRITE(reg, dspcntr);
+
+ Start = obj->gtt_offset;
+ Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ Start, Offset, x, y, fb->pitches[0]);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(DSPSURF(plane), Start);
+ I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE(DSPADDR(plane), Offset);
+ } else
+ I915_WRITE(DSPADDR(plane), Start + Offset);
+ POSTING_READ(reg);
+
+ return (0);
+}
+
+static int ironlake_update_plane(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int plane = intel_crtc->plane;
+ unsigned long Start, Offset;
+ u32 dspcntr;
+ u32 reg;
+
+ switch (plane) {
+ case 0:
+ case 1:
+ case 2:
+ break;
+ default:
+ DRM_ERROR("Can't update plane %d in SAREA\n", plane);
+ return -EINVAL;
+ }
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ reg = DSPCNTR(plane);
+ dspcntr = I915_READ(reg);
+ /* Mask out pixel format bits in case we change it */
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+ switch (fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (fb->depth != 16) {
+ DRM_ERROR("bpp 16, depth %d\n", fb->depth);
+ return -EINVAL;
+ }
+
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ if (fb->depth == 24)
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ else if (fb->depth == 30)
+ dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
+ else {
+ DRM_ERROR("bpp %d depth %d\n", fb->bits_per_pixel,
+ fb->depth);
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
+
+ /* must disable */
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+ I915_WRITE(reg, dspcntr);
+
+ Start = obj->gtt_offset;
+ Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ Start, Offset, x, y, fb->pitches[0]);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
+ I915_WRITE(DSPSURF(plane), Start);
+ I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE(DSPADDR(plane), Offset);
+ POSTING_READ(reg);
+
+ return 0;
+}
+
+/* Assume fb object is pinned & idle & fenced and just update base pointers */
+static int
+intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = dev_priv->display.update_plane(crtc, fb, x, y);
+ if (ret)
+ return ret;
+
+ intel_update_fbc(dev);
+ intel_increase_pllclock(crtc);
+
+ return 0;
+}
+
+static int
+intel_finish_fb(struct drm_framebuffer *old_fb)
+{
+ struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool was_interruptible = dev_priv->mm.interruptible;
+ int ret;
+
+ mtx_lock(&dev->event_lock);
+ while (!atomic_read(&dev_priv->mm.wedged) &&
+ atomic_read(&obj->pending_flip) != 0) {
+ msleep(&obj->pending_flip, &dev->event_lock,
+ 0, "915flp", 0);
+ }
+ mtx_unlock(&dev->event_lock);
+
+ /* Big Hammer, we also need to ensure that any pending
+ * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+ * current scanout is retired before unpinning the old
+ * framebuffer.
+ *
+ * This should only fail upon a hung GPU, in which case we
+ * can safely continue.
+ */
+ dev_priv->mm.interruptible = false;
+ ret = i915_gem_object_finish_gpu(obj);
+ dev_priv->mm.interruptible = was_interruptible;
+ return ret;
+}
+
+static int
+intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+#if 0
+ struct drm_i915_master_private *master_priv;
+#else
+ drm_i915_private_t *dev_priv = dev->dev_private;
+#endif
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int ret;
+
+ /* no fb bound */
+ if (!crtc->fb) {
+ DRM_ERROR("No FB bound\n");
+ return 0;
+ }
+
+ switch (intel_crtc->plane) {
+ case 0:
+ case 1:
+ break;
+ case 2:
+ if (IS_IVYBRIDGE(dev))
+ break;
+ /* fall through otherwise */
+ default:
+ DRM_ERROR("no plane for crtc\n");
+ return -EINVAL;
+ }
+
+ DRM_LOCK(dev);
+ ret = intel_pin_and_fence_fb_obj(dev,
+ to_intel_framebuffer(crtc->fb)->obj,
+ NULL);
+ if (ret != 0) {
+ DRM_UNLOCK(dev);
+ DRM_ERROR("pin & fence failed\n");
+ return ret;
+ }
+
+ if (old_fb)
+ intel_finish_fb(old_fb);
+
+ ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
+ LEAVE_ATOMIC_MODE_SET);
+ if (ret) {
+ intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
+ DRM_UNLOCK(dev);
+ DRM_ERROR("failed to update base address\n");
+ return ret;
+ }
+
+ if (old_fb) {
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+ }
+
+ DRM_UNLOCK(dev);
+
+#if 0
+ if (!dev->primary->master)
+ return 0;
+
+ master_priv = dev->primary->master->driver_priv;
+ if (!master_priv->sarea_priv)
+ return 0;
+
+ if (intel_crtc->pipe) {
+ master_priv->sarea_priv->pipeB_x = x;
+ master_priv->sarea_priv->pipeB_y = y;
+ } else {
+ master_priv->sarea_priv->pipeA_x = x;
+ master_priv->sarea_priv->pipeA_y = y;
+ }
+#else
+
+ if (!dev_priv->sarea_priv)
+ return 0;
+
+ if (intel_crtc->pipe) {
+ dev_priv->sarea_priv->planeB_x = x;
+ dev_priv->sarea_priv->planeB_y = y;
+ } else {
+ dev_priv->sarea_priv->planeA_x = x;
+ dev_priv->sarea_priv->planeA_y = y;
+ }
+#endif
+
+ return 0;
+}
+
+static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
+ dpa_ctl = I915_READ(DP_A);
+ dpa_ctl &= ~DP_PLL_FREQ_MASK;
+
+ if (clock < 200000) {
+ u32 temp;
+ dpa_ctl |= DP_PLL_FREQ_160MHZ;
+ /* workaround for 160Mhz:
+ 1) program 0x4600c bits 15:0 = 0x8124
+ 2) program 0x46010 bit 0 = 1
+ 3) program 0x46034 bit 24 = 1
+ 4) program 0x64000 bit 14 = 1
+ */
+ temp = I915_READ(0x4600c);
+ temp &= 0xffff0000;
+ I915_WRITE(0x4600c, temp | 0x8124);
+
+ temp = I915_READ(0x46010);
+ I915_WRITE(0x46010, temp | 1);
+
+ temp = I915_READ(0x46034);
+ I915_WRITE(0x46034, temp | (1 << 24));
+ } else {
+ dpa_ctl |= DP_PLL_FREQ_270MHZ;
+ }
+ I915_WRITE(DP_A, dpa_ctl);
+
+ POSTING_READ(DP_A);
+ DELAY(500);
+}
+
+static void intel_fdi_normal_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ /* enable normal train */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (IS_IVYBRIDGE(dev)) {
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+ temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+ }
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE;
+ }
+ I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+ /* wait one idle pattern time */
+ POSTING_READ(reg);
+ DELAY(1000);
+
+ /* IVB wants error correction enabled */
+ if (IS_IVYBRIDGE(dev))
+ I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
+ FDI_FE_ERRC_ENABLE);
+}
+
+static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 flags = I915_READ(SOUTH_CHICKEN1);
+
+ flags |= FDI_PHASE_SYNC_OVR(pipe);
+ I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
+ flags |= FDI_PHASE_SYNC_EN(pipe);
+ I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
+ POSTING_READ(SOUTH_CHICKEN1);
+}
+
+/* The FDI link training functions for ILK/Ibexpeak. */
+static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 reg, temp, tries;
+
+ /* FDI needs bits from pipe & plane first */
+ assert_pipe_enabled(dev_priv, pipe);
+ assert_plane_enabled(dev_priv, plane);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(reg, temp);
+ I915_READ(reg);
+ DELAY(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ DELAY(150);
+
+ /* Ironlake workaround, enable clock pointer after FDI enable*/
+ if (HAS_PCH_IBX(dev)) {
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+ FDI_RX_PHASE_SYNC_POINTER_EN);
+ }
+
+ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if ((temp & FDI_RX_BIT_LOCK)) {
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ break;
+ }
+ }
+ if (tries == 5)
+ DRM_ERROR("FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ DELAY(150);
+
+ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+ }
+ if (tries == 5)
+ DRM_ERROR("FDI train 2 fail!\n");
+
+ DRM_DEBUG_KMS("FDI train done\n");
+
+}
+
+static const int snb_b_fdi_train_param[] = {
+ FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+ FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+ FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+ FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+};
+
+/* The FDI link training functions for SNB/Cougarpoint. */
+static void gen6_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp, i;
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ DELAY(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ DELAY(150);
+
+ if (HAS_PCH_CPT(dev))
+ cpt_phase_pointer_enable(dev, pipe);
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ DELAY(500);
+
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_BIT_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_ERROR("FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ if (IS_GEN6(dev)) {
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ }
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ }
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ DELAY(150);
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ DELAY(500);
+
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_ERROR("FDI train 2 fail!\n");
+
+ DRM_DEBUG_KMS("FDI train done.\n");
+}
+
+/* Manual link training for Ivy Bridge A0 parts */
+static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp, i;
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ DELAY(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
+ temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ temp |= FDI_COMPOSITE_SYNC;
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_AUTO;
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ temp |= FDI_COMPOSITE_SYNC;
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ DELAY(150);
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ DELAY(500);
+
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_BIT_LOCK ||
+ (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_ERROR("FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ DELAY(150);
+
+ for (i = 0; i < 4; i++ ) {
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ DELAY(500);
+
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_ERROR("FDI train 2 fail!\n");
+
+ DRM_DEBUG_KMS("FDI train done.\n");
+}
+
+static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ /* Write the TU size bits so error detection works */
+ I915_WRITE(FDI_RX_TUSIZE1(pipe),
+ I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
+ /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~((0x7 << 19) | (0x7 << 16));
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
+
+ POSTING_READ(reg);
+ DELAY(200);
+
+ /* Switch from Rawclk to PCDclk */
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp | FDI_PCDCLK);
+
+ POSTING_READ(reg);
+ DELAY(200);
+
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+
+ POSTING_READ(reg);
+ DELAY(100);
+ }
+}
+
+static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 flags = I915_READ(SOUTH_CHICKEN1);
+
+ flags &= ~(FDI_PHASE_SYNC_EN(pipe));
+ I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
+ flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
+ I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
+ POSTING_READ(SOUTH_CHICKEN1);
+}
+
+static void ironlake_fdi_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+ POSTING_READ(reg);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(0x7 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ DELAY(100);
+
+ /* Ironlake workaround, disable clock pointer after downing FDI */
+ if (HAS_PCH_IBX(dev)) {
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ I915_WRITE(FDI_RX_CHICKEN(pipe),
+ I915_READ(FDI_RX_CHICKEN(pipe) &
+ ~FDI_RX_PHASE_SYNC_POINTER_EN));
+ } else if (HAS_PCH_CPT(dev)) {
+ cpt_phase_pointer_disable(dev, pipe);
+ }
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ DELAY(100);
+}
+
+/*
+ * When we disable a pipe, we need to clear any pending scanline wait events
+ * to avoid hanging the ring, which we assume we are waiting on.
+ */
+static void intel_clear_scanline_wait(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ u32 tmp;
+
+ if (IS_GEN2(dev))
+ /* Can't break the hang on i8xx */
+ return;
+
+ ring = LP_RING(dev_priv);
+ tmp = I915_READ_CTL(ring);
+ if (tmp & RING_WAIT)
+ I915_WRITE_CTL(ring, tmp);
+}
+
+static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
+{
+ struct drm_i915_gem_object *obj;
+ struct drm_i915_private *dev_priv;
+ struct drm_device *dev;
+
+ if (crtc->fb == NULL)
+ return;
+
+ obj = to_intel_framebuffer(crtc->fb)->obj;
+ dev = crtc->dev;
+ dev_priv = dev->dev_private;
+ mtx_lock(&dev->event_lock);
+ while (atomic_read(&obj->pending_flip) != 0)
+ msleep(&obj->pending_flip, &dev->event_lock, 0, "915wfl", 0);
+ mtx_unlock(&dev->event_lock);
+}
+
+static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+
+ /*
+ * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
+ * must be driven by its own crtc; no sharing is possible.
+ */
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ if (encoder->base.crtc != crtc)
+ continue;
+
+ switch (encoder->type) {
+ case INTEL_OUTPUT_EDP:
+ if (!intel_encoder_is_pch_edp(&encoder->base))
+ return false;
+ continue;
+ }
+ }
+
+ return true;
+}
+
+/*
+ * Enable PCH resources required for PCH ports:
+ * - PCH PLLs
+ * - FDI training & RX/TX
+ * - update transcoder timings
+ * - DP transcoding bits
+ * - transcoder
+ */
+static void ironlake_pch_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp, transc_sel;
+
+ /* For PCH output, training FDI link */
+ dev_priv->display.fdi_link_train(crtc);
+
+ intel_enable_pch_pll(dev_priv, pipe);
+
+ if (HAS_PCH_CPT(dev)) {
+ transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
+ TRANSC_DPLLB_SEL;
+
+ /* Be sure PCH DPLL SEL is set */
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (pipe == 0) {
+ temp &= ~(TRANSA_DPLLB_SEL);
+ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ } else if (pipe == 1) {
+ temp &= ~(TRANSB_DPLLB_SEL);
+ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ } else if (pipe == 2) {
+ temp &= ~(TRANSC_DPLLB_SEL);
+ temp |= (TRANSC_DPLL_ENABLE | transc_sel);
+ }
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ }
+
+ /* set transcoder timing, panel must allow it */
+ assert_panel_unlocked(dev_priv, pipe);
+ I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
+ I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
+ I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
+
+ I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
+ I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
+ I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
+ I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
+
+ intel_fdi_normal_train(crtc);
+
+ /* For PCH DP, enable TRANS_DP_CTL */
+ if (HAS_PCH_CPT(dev) &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+ u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
+ reg = TRANS_DP_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(TRANS_DP_PORT_SEL_MASK |
+ TRANS_DP_SYNC_MASK |
+ TRANS_DP_BPC_MASK);
+ temp |= (TRANS_DP_OUTPUT_ENABLE |
+ TRANS_DP_ENH_FRAMING);
+ temp |= bpc << 9; /* same format but at 11:9 */
+
+ if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
+ if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
+
+ switch (intel_trans_dp_port_sel(crtc)) {
+ case PCH_DP_B:
+ temp |= TRANS_DP_PORT_SEL_B;
+ break;
+ case PCH_DP_C:
+ temp |= TRANS_DP_PORT_SEL_C;
+ break;
+ case PCH_DP_D:
+ temp |= TRANS_DP_PORT_SEL_D;
+ break;
+ default:
+ DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
+ temp |= TRANS_DP_PORT_SEL_B;
+ break;
+ }
+
+ I915_WRITE(reg, temp);
+ }
+
+ intel_enable_transcoder(dev_priv, pipe);
+}
+
+void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
+ u32 temp;
+
+ temp = I915_READ(dslreg);
+ DELAY(500);
+ if (_intel_wait_for(dev, I915_READ(dslreg) != temp, 5, 1, "915cp1")) {
+ /* Without this, mode sets may fail silently on FDI */
+ I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
+ DELAY(250);
+ I915_WRITE(tc2reg, 0);
+ if (_intel_wait_for(dev, I915_READ(dslreg) != temp, 5, 1,
+ "915cp2"))
+ DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
+ }
+}
+
+static void ironlake_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 temp;
+ bool is_pch_port;
+
+ if (intel_crtc->active)
+ return;
+
+ intel_crtc->active = true;
+ intel_update_watermarks(dev);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(PCH_LVDS);
+ if ((temp & LVDS_PORT_EN) == 0)
+ I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
+ }
+
+ is_pch_port = intel_crtc_driving_pch(crtc);
+
+ if (is_pch_port)
+ ironlake_fdi_pll_enable(crtc);
+ else
+ ironlake_fdi_disable(crtc);
+
+ /* Enable panel fitting for LVDS */
+ if (dev_priv->pch_pf_size &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+ I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
+ I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
+ }
+
+ intel_enable_pipe(dev_priv, pipe, is_pch_port);
+ intel_enable_plane(dev_priv, plane, pipe);
+
+ if (is_pch_port)
+ ironlake_pch_enable(crtc);
+
+ intel_crtc_load_lut(crtc);
+
+ DRM_LOCK(dev);
+ intel_update_fbc(dev);
+ DRM_UNLOCK(dev);
+
+ intel_crtc_update_cursor(crtc, true);
+}
+
+static void ironlake_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 reg, temp;
+
+ if (!intel_crtc->active)
+ return;
+
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_vblank_off(dev, pipe);
+ intel_crtc_update_cursor(crtc, false);
+
+ intel_disable_plane(dev_priv, plane, pipe);
+
+ if (dev_priv->cfb_plane == plane)
+ intel_disable_fbc(dev);
+
+ intel_disable_pipe(dev_priv, pipe);
+
+ /* Disable PF */
+ I915_WRITE(PF_CTL(pipe), 0);
+ I915_WRITE(PF_WIN_SZ(pipe), 0);
+
+ ironlake_fdi_disable(crtc);
+
+ /* This is a horrible layering violation; we should be doing this in
+ * the connector/encoder ->prepare instead, but we don't always have
+ * enough information there about the config to know whether it will
+ * actually be necessary or just cause undesired flicker.
+ */
+ intel_disable_pch_ports(dev_priv, pipe);
+
+ intel_disable_transcoder(dev_priv, pipe);
+
+ if (HAS_PCH_CPT(dev)) {
+ /* disable TRANS_DP_CTL */
+ reg = TRANS_DP_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+ temp |= TRANS_DP_PORT_SEL_NONE;
+ I915_WRITE(reg, temp);
+
+ /* disable DPLL_SEL */
+ temp = I915_READ(PCH_DPLL_SEL);
+ switch (pipe) {
+ case 0:
+ temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
+ break;
+ case 1:
+ temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ break;
+ case 2:
+ /* C shares PLL A or B */
+ temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
+ break;
+ default:
+ KASSERT(1, ("Wrong pipe %d", pipe)); /* wtf */
+ }
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ }
+
+ /* disable PCH DPLL */
+ if (!intel_crtc->no_pll)
+ intel_disable_pch_pll(dev_priv, pipe);
+
+ /* Switch from PCDclk to Rawclk */
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_PCDCLK);
+
+ /* Disable CPU FDI TX PLL */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
+
+ POSTING_READ(reg);
+ DELAY(100);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
+
+ /* Wait for the clocks to turn off. */
+ POSTING_READ(reg);
+ DELAY(100);
+
+ intel_crtc->active = false;
+ intel_update_watermarks(dev);
+
+ DRM_LOCK(dev);
+ intel_update_fbc(dev);
+ intel_clear_scanline_wait(dev);
+ DRM_UNLOCK(dev);
+}
+
+static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
+ ironlake_crtc_enable(crtc);
+ break;
+
+ case DRM_MODE_DPMS_OFF:
+ DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
+ ironlake_crtc_disable(crtc);
+ break;
+ }
+}
+
+static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
+{
+ if (!enable && intel_crtc->overlay) {
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ DRM_LOCK(dev);
+ dev_priv->mm.interruptible = false;
+ (void) intel_overlay_switch_off(intel_crtc->overlay);
+ dev_priv->mm.interruptible = true;
+ DRM_UNLOCK(dev);
+ }
+
+ /* Let userspace switch the overlay on again. In most cases userspace
+ * has to recompute where to put it anyway.
+ */
+}
+
+static void i9xx_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+
+ if (intel_crtc->active)
+ return;
+
+ intel_crtc->active = true;
+ intel_update_watermarks(dev);
+
+ intel_enable_pll(dev_priv, pipe);
+ intel_enable_pipe(dev_priv, pipe, false);
+ intel_enable_plane(dev_priv, plane, pipe);
+
+ intel_crtc_load_lut(crtc);
+ intel_update_fbc(dev);
+
+ /* Give the overlay scaler a chance to enable if it's on this pipe */
+ intel_crtc_dpms_overlay(intel_crtc, true);
+ intel_crtc_update_cursor(crtc, true);
+}
+
+static void i9xx_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+
+ if (!intel_crtc->active)
+ return;
+
+ /* Give the overlay scaler a chance to disable if it's on this pipe */
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_vblank_off(dev, pipe);
+ intel_crtc_dpms_overlay(intel_crtc, false);
+ intel_crtc_update_cursor(crtc, false);
+
+ if (dev_priv->cfb_plane == plane)
+ intel_disable_fbc(dev);
+
+ intel_disable_plane(dev_priv, plane, pipe);
+ intel_disable_pipe(dev_priv, pipe);
+ intel_disable_pll(dev_priv, pipe);
+
+ intel_crtc->active = false;
+ intel_update_fbc(dev);
+ intel_update_watermarks(dev);
+ intel_clear_scanline_wait(dev);
+}
+
+static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ i9xx_crtc_enable(crtc);
+ break;
+ case DRM_MODE_DPMS_OFF:
+ i9xx_crtc_disable(crtc);
+ break;
+ }
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ */
+static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+#if 0
+ struct drm_i915_master_private *master_priv;
+#endif
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ bool enabled;
+
+ if (intel_crtc->dpms_mode == mode)
+ return;
+
+ intel_crtc->dpms_mode = mode;
+
+ dev_priv->display.dpms(crtc, mode);
+
+#if 0
+ if (!dev->primary->master)
+ return;
+
+ master_priv = dev->primary->master->driver_priv;
+ if (!master_priv->sarea_priv)
+ return;
+#else
+ if (!dev_priv->sarea_priv)
+ return;
+#endif
+
+ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+
+ switch (pipe) {
+ case 0:
+#if 0
+ master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
+ master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
+#else
+ dev_priv->sarea_priv->planeA_w = enabled ? crtc->mode.hdisplay : 0;
+ dev_priv->sarea_priv->planeA_h = enabled ? crtc->mode.vdisplay : 0;
+#endif
+ break;
+ case 1:
+#if 0
+ master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
+ master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
+#else
+ dev_priv->sarea_priv->planeB_w = enabled ? crtc->mode.hdisplay : 0;
+ dev_priv->sarea_priv->planeB_h = enabled ? crtc->mode.vdisplay : 0;
+#endif
+ break;
+ default:
+ DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
+ break;
+ }
+}
+
+static void intel_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct drm_device *dev = crtc->dev;
+
+ /* Flush any pending WAITs before we disable the pipe. Note that
+ * we need to drop the struct_mutex in order to acquire it again
+ * during the lowlevel dpms routines around a couple of the
+ * operations. It does not look trivial nor desirable to move
+ * that locking higher. So instead we leave a window for the
+ * submission of further commands on the fb before we can actually
+ * disable it. This race with userspace exists anyway, and we can
+ * only rely on the pipe being disabled by userspace after it
+ * receives the hotplug notification and has flushed any pending
+ * batches.
+ */
+ if (crtc->fb) {
+ DRM_LOCK(dev);
+ intel_finish_fb(crtc->fb);
+ DRM_UNLOCK(dev);
+ }
+
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
+ assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
+
+ if (crtc->fb) {
+ DRM_LOCK(dev);
+ intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
+ DRM_UNLOCK(dev);
+ }
+}
+
+/* Prepare for a mode set.
+ *
+ * Note we could be a lot smarter here. We need to figure out which outputs
+ * will be enabled, which disabled (in short, how the config will changes)
+ * and perform the minimum necessary steps to accomplish that, e.g. updating
+ * watermarks, FBC configuration, making sure PLLs are programmed correctly,
+ * panel fitting is in the proper state, etc.
+ */
+static void i9xx_crtc_prepare(struct drm_crtc *crtc)
+{
+ i9xx_crtc_disable(crtc);
+}
+
+static void i9xx_crtc_commit(struct drm_crtc *crtc)
+{
+ i9xx_crtc_enable(crtc);
+}
+
+static void ironlake_crtc_prepare(struct drm_crtc *crtc)
+{
+ ironlake_crtc_disable(crtc);
+}
+
+static void ironlake_crtc_commit(struct drm_crtc *crtc)
+{
+ ironlake_crtc_enable(crtc);
+}
+
+void intel_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ /* lvds has its own version of prepare see intel_lvds_prepare */
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void intel_encoder_commit(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ struct drm_device *dev = encoder->dev;
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
+
+ /* lvds has its own version of commit see intel_lvds_commit */
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+
+ if (HAS_PCH_CPT(dev))
+ intel_cpt_verify_modeset(dev, intel_crtc->pipe);
+}
+
+void intel_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ free(intel_encoder, DRM_MEM_KMS);
+}
+
+static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ /* FDI link clock is fixed at 2.7G */
+ if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
+ return false;
+ }
+
+ /* All interlaced capable intel hw wants timings in frames. Note though
+ * that intel_lvds_mode_fixup does some funny tricks with the crtc
+ * timings, so we need to be careful not to clobber these.*/
+ if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ return true;
+}
+
+static int i945_get_display_clock_speed(struct drm_device *dev)
+{
+ return 400000;
+}
+
+static int i915_get_display_clock_speed(struct drm_device *dev)
+{
+ return 333000;
+}
+
+static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
+{
+ return 200000;
+}
+
+static int i915gm_get_display_clock_speed(struct drm_device *dev)
+{
+ u16 gcfgc = 0;
+
+ gcfgc = pci_read_config(dev->device, GCFGC, 2);
+
+ if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
+ return 133000;
+ else {
+ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+ case GC_DISPLAY_CLOCK_333_MHZ:
+ return 333000;
+ default:
+ case GC_DISPLAY_CLOCK_190_200_MHZ:
+ return 190000;
+ }
+ }
+}
+
+static int i865_get_display_clock_speed(struct drm_device *dev)
+{
+ return 266000;
+}
+
+static int i855_get_display_clock_speed(struct drm_device *dev)
+{
+ u16 hpllcc = 0;
+ /* Assume that the hardware is in the high speed state. This
+ * should be the default.
+ */
+ switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
+ case GC_CLOCK_133_200:
+ case GC_CLOCK_100_200:
+ return 200000;
+ case GC_CLOCK_166_250:
+ return 250000;
+ case GC_CLOCK_100_133:
+ return 133000;
+ }
+
+ /* Shouldn't happen */
+ return 0;
+}
+
+static int i830_get_display_clock_speed(struct drm_device *dev)
+{
+ return 133000;
+}
+
+struct fdi_m_n {
+ u32 tu;
+ u32 gmch_m;
+ u32 gmch_n;
+ u32 link_m;
+ u32 link_n;
+};
+
+static void
+fdi_reduce_ratio(u32 *num, u32 *den)
+{
+ while (*num > 0xffffff || *den > 0xffffff) {
+ *num >>= 1;
+ *den >>= 1;
+ }
+}
+
+static void
+ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
+ int link_clock, struct fdi_m_n *m_n)
+{
+ m_n->tu = 64; /* default size */
+
+ /* BUG_ON(pixel_clock > INT_MAX / 36); */
+ m_n->gmch_m = bits_per_pixel * pixel_clock;
+ m_n->gmch_n = link_clock * nlanes * 8;
+ fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
+
+ m_n->link_m = pixel_clock;
+ m_n->link_n = link_clock;
+ fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
+}
+
+
+struct intel_watermark_params {
+ unsigned long fifo_size;
+ unsigned long max_wm;
+ unsigned long default_wm;
+ unsigned long guard_size;
+ unsigned long cacheline_size;
+};
+
+/* Pineview has different values for various configs */
+static const struct intel_watermark_params pineview_display_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params pineview_display_hplloff_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_HPLLOFF_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params pineview_cursor_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params g4x_wm_info = {
+ G4X_FIFO_SIZE,
+ G4X_MAX_WM,
+ G4X_MAX_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params g4x_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ I965_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i965_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ I965_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ I915_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i945_wm_info = {
+ I945_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I915_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i915_wm_info = {
+ I915_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I915_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i855_wm_info = {
+ I855GM_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I830_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i830_wm_info = {
+ I830_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I830_FIFO_LINE_SIZE
+};
+
+static const struct intel_watermark_params ironlake_display_wm_info = {
+ ILK_DISPLAY_FIFO,
+ ILK_DISPLAY_MAXWM,
+ ILK_DISPLAY_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_cursor_wm_info = {
+ ILK_CURSOR_FIFO,
+ ILK_CURSOR_MAXWM,
+ ILK_CURSOR_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_display_srwm_info = {
+ ILK_DISPLAY_SR_FIFO,
+ ILK_DISPLAY_MAX_SRWM,
+ ILK_DISPLAY_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_cursor_srwm_info = {
+ ILK_CURSOR_SR_FIFO,
+ ILK_CURSOR_MAX_SRWM,
+ ILK_CURSOR_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
+static const struct intel_watermark_params sandybridge_display_wm_info = {
+ SNB_DISPLAY_FIFO,
+ SNB_DISPLAY_MAXWM,
+ SNB_DISPLAY_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_cursor_wm_info = {
+ SNB_CURSOR_FIFO,
+ SNB_CURSOR_MAXWM,
+ SNB_CURSOR_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_display_srwm_info = {
+ SNB_DISPLAY_SR_FIFO,
+ SNB_DISPLAY_MAX_SRWM,
+ SNB_DISPLAY_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
+ SNB_CURSOR_SR_FIFO,
+ SNB_CURSOR_MAX_SRWM,
+ SNB_CURSOR_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+
+/**
+ * intel_calculate_wm - calculate watermark level
+ * @clock_in_khz: pixel clock
+ * @wm: chip FIFO params
+ * @pixel_size: display pixel size
+ * @latency_ns: memory latency for the platform
+ *
+ * Calculate the watermark level (the level at which the display plane will
+ * start fetching from memory again). Each chip has a different display
+ * FIFO size and allocation, so the caller needs to figure that out and pass
+ * in the correct intel_watermark_params structure.
+ *
+ * As the pixel clock runs, the FIFO will be drained at a rate that depends
+ * on the pixel size. When it reaches the watermark level, it'll start
+ * fetching FIFO line sized based chunks from memory until the FIFO fills
+ * past the watermark point. If the FIFO drains completely, a FIFO underrun
+ * will occur, and a display engine hang could result.
+ */
+static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
+ const struct intel_watermark_params *wm,
+ int fifo_size,
+ int pixel_size,
+ unsigned long latency_ns)
+{
+ long entries_required, wm_size;
+
+ /*
+ * Note: we need to make sure we don't overflow for various clock &
+ * latency values.
+ * clocks go from a few thousand to several hundred thousand.
+ * latency is usually a few thousand
+ */
+ entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
+ 1000;
+ entries_required = howmany(entries_required, wm->cacheline_size);
+
+ DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
+
+ wm_size = fifo_size - (entries_required + wm->guard_size);
+
+ DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
+
+ /* Don't promote wm_size to unsigned... */
+ if (wm_size > (long)wm->max_wm)
+ wm_size = wm->max_wm;
+ if (wm_size <= 0)
+ wm_size = wm->default_wm;
+ return wm_size;
+}
+
+struct cxsr_latency {
+ int is_desktop;
+ int is_ddr3;
+ unsigned long fsb_freq;
+ unsigned long mem_freq;
+ unsigned long display_sr;
+ unsigned long display_hpll_disable;
+ unsigned long cursor_sr;
+ unsigned long cursor_hpll_disable;
+};
+
+static const struct cxsr_latency cxsr_latency_table[] = {
+ {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
+ {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
+ {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
+ {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
+ {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
+
+ {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
+ {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
+ {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
+ {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
+ {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
+
+ {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
+ {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
+ {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
+ {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
+ {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
+
+ {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
+ {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
+ {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
+ {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
+ {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
+
+ {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
+ {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
+ {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
+ {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
+ {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
+
+ {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
+ {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
+ {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
+ {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
+ {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
+};
+
+static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
+ int is_ddr3,
+ int fsb,
+ int mem)
+{
+ const struct cxsr_latency *latency;
+ int i;
+
+ if (fsb == 0 || mem == 0)
+ return NULL;
+
+ for (i = 0; i < DRM_ARRAY_SIZE(cxsr_latency_table); i++) {
+ latency = &cxsr_latency_table[i];
+ if (is_desktop == latency->is_desktop &&
+ is_ddr3 == latency->is_ddr3 &&
+ fsb == latency->fsb_freq && mem == latency->mem_freq)
+ return latency;
+ }
+
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+
+ return NULL;
+}
+
+static void pineview_disable_cxsr(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* deactivate cxsr */
+ I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
+}
+
+/*
+ * Latency for FIFO fetches is dependent on several factors:
+ * - memory configuration (speed, channels)
+ * - chipset
+ * - current MCH state
+ * It can be fairly high in some situations, so here we assume a fairly
+ * pessimal value. It's a tradeoff between extra memory fetches (if we
+ * set this value too high, the FIFO will fetch frequently to stay full)
+ * and power consumption (set it too low to save power and we might see
+ * FIFO underruns and display "flicker").
+ *
+ * A value of 5us seems to be a good balance; safe for very low end
+ * platforms but not overly aggressive on lower latency configs.
+ */
+static const int latency_ns = 5000;
+
+static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ if (plane)
+ size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+static int i85x_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x1ff;
+ if (plane)
+ size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
+ size >>= 1; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+static int i845_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 2; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A",
+ size);
+
+ return size;
+}
+
+static int i830_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 1; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
+{
+ struct drm_crtc *crtc, *enabled = NULL;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->enabled && crtc->fb) {
+ if (enabled)
+ return NULL;
+ enabled = crtc;
+ }
+ }
+
+ return enabled;
+}
+
+static void pineview_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ const struct cxsr_latency *latency;
+ u32 reg;
+ unsigned long wm;
+
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ if (!latency) {
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ pineview_disable_cxsr(dev);
+ return;
+ }
+
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
+ int clock = crtc->mode.clock;
+ int pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ /* Display SR */
+ wm = intel_calculate_wm(clock, &pineview_display_wm,
+ pineview_display_wm.fifo_size,
+ pixel_size, latency->display_sr);
+ reg = I915_READ(DSPFW1);
+ reg &= ~DSPFW_SR_MASK;
+ reg |= wm << DSPFW_SR_SHIFT;
+ I915_WRITE(DSPFW1, reg);
+ DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+
+ /* cursor SR */
+ wm = intel_calculate_wm(clock, &pineview_cursor_wm,
+ pineview_display_wm.fifo_size,
+ pixel_size, latency->cursor_sr);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_CURSOR_SR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+
+ /* Display HPLL off SR */
+ wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
+ pixel_size, latency->display_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_SR_MASK;
+ reg |= wm & DSPFW_HPLL_SR_MASK;
+ I915_WRITE(DSPFW3, reg);
+
+ /* cursor HPLL off SR */
+ wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
+ pixel_size, latency->cursor_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_CURSOR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+ DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+
+ /* activate cxsr */
+ I915_WRITE(DSPFW3,
+ I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
+ DRM_DEBUG_KMS("Self-refresh is enabled\n");
+ } else {
+ pineview_disable_cxsr(dev);
+ DRM_DEBUG_KMS("Self-refresh is disabled\n");
+ }
+}
+
+static bool g4x_compute_wm0(struct drm_device *dev,
+ int plane,
+ const struct intel_watermark_params *display,
+ int display_latency_ns,
+ const struct intel_watermark_params *cursor,
+ int cursor_latency_ns,
+ int *plane_wm,
+ int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ int htotal, hdisplay, clock, pixel_size;
+ int line_time_us, line_count;
+ int entries, tlb_miss;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (crtc->fb == NULL || !crtc->enabled) {
+ *cursor_wm = cursor->guard_size;
+ *plane_wm = display->guard_size;
+ return false;
+ }
+
+ htotal = crtc->mode.htotal;
+ hdisplay = crtc->mode.hdisplay;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ /* Use the small buffer method to calculate plane watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = howmany(entries, display->cacheline_size);
+ *plane_wm = entries + display->guard_size;
+ if (*plane_wm > (int)display->max_wm)
+ *plane_wm = display->max_wm;
+
+ /* Use the large buffer method to calculate cursor watermark */
+ line_time_us = ((htotal * 1000) / clock);
+ line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
+ entries = line_count * 64 * pixel_size;
+ tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = howmany(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+ if (*cursor_wm > (int)cursor->max_wm)
+ *cursor_wm = (int)cursor->max_wm;
+
+ return true;
+}
+
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool g4x_check_srwm(struct drm_device *dev,
+ int display_wm, int cursor_wm,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor)
+{
+ DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
+ display_wm, cursor_wm);
+
+ if (display_wm > display->max_wm) {
+ DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
+ display_wm, display->max_wm);
+ return false;
+ }
+
+ if (cursor_wm > cursor->max_wm) {
+ DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
+ cursor_wm, cursor->max_wm);
+ return false;
+ }
+
+ if (!(display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("SR latency is 0, disabling\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool g4x_compute_srwm(struct drm_device *dev,
+ int plane,
+ int latency_ns,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor,
+ int *display_wm, int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ int hdisplay, htotal, pixel_size, clock;
+ unsigned long line_time_us;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *display_wm = *cursor_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ hdisplay = crtc->mode.hdisplay;
+ htotal = crtc->mode.htotal;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = howmany(min(small, large), display->cacheline_size);
+ *display_wm = entries + display->guard_size;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = howmany(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+
+ return g4x_check_srwm(dev,
+ *display_wm, *cursor_wm,
+ display, cursor);
+}
+
+#define single_plane_enabled(mask) ((mask) != 0 && powerof2(mask))
+
+static void g4x_update_wm(struct drm_device *dev)
+{
+ static const int sr_latency_ns = 12000;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int plane_sr, cursor_sr;
+ unsigned int enabled = 0;
+
+ if (g4x_compute_wm0(dev, 0,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planea_wm, &cursora_wm))
+ enabled |= 1;
+
+ if (g4x_compute_wm0(dev, 1,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planeb_wm, &cursorb_wm))
+ enabled |= 2;
+
+ plane_sr = cursor_sr = 0;
+ if (single_plane_enabled(enabled) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ sr_latency_ns,
+ &g4x_wm_info,
+ &g4x_cursor_wm_info,
+ &plane_sr, &cursor_sr))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ else
+ I915_WRITE(FW_BLC_SELF,
+ I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ planea_wm, cursora_wm,
+ planeb_wm, cursorb_wm,
+ plane_sr, cursor_sr);
+
+ I915_WRITE(DSPFW1,
+ (plane_sr << DSPFW_SR_SHIFT) |
+ (cursorb_wm << DSPFW_CURSORB_SHIFT) |
+ (planeb_wm << DSPFW_PLANEB_SHIFT) |
+ planea_wm);
+ I915_WRITE(DSPFW2,
+ (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (cursora_wm << DSPFW_CURSORA_SHIFT));
+ /* HPLL off in SR has some issues on G4x... disable it */
+ I915_WRITE(DSPFW3,
+ (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
+ (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+static void i965_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ int srwm = 1;
+ int cursor_sr = 16;
+
+ /* Calc sr entries for one plane configs */
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 12000;
+ int clock = crtc->mode.clock;
+ int htotal = crtc->mode.htotal;
+ int hdisplay = crtc->mode.hdisplay;
+ int pixel_size = crtc->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
+
+ line_time_us = ((htotal * 1000) / clock);
+
+ /* Use ns/us then divide to preserve precision */
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = howmany(entries, I915_FIFO_LINE_SIZE);
+ srwm = I965_FIFO_SIZE - entries;
+ if (srwm < 0)
+ srwm = 1;
+ srwm &= 0x1ff;
+ DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
+ entries, srwm);
+
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * 64;
+ entries = howmany(entries, i965_cursor_wm_info.cacheline_size);
+ cursor_sr = i965_cursor_wm_info.fifo_size -
+ (entries + i965_cursor_wm_info.guard_size);
+
+ if (cursor_sr > i965_cursor_wm_info.max_wm)
+ cursor_sr = i965_cursor_wm_info.max_wm;
+
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+ "cursor %d\n", srwm, cursor_sr);
+
+ if (IS_CRESTLINE(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ if (IS_CRESTLINE(dev))
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+ srwm);
+
+ /* 965 has limitations... */
+ I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
+ (8 << 16) | (8 << 8) | (8 << 0));
+ I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
+ /* update cursor SR watermark */
+ I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+static void i9xx_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct intel_watermark_params *wm_info;
+ uint32_t fwater_lo;
+ uint32_t fwater_hi;
+ int cwm, srwm = 1;
+ int fifo_size;
+ int planea_wm, planeb_wm;
+ struct drm_crtc *crtc, *enabled = NULL;
+
+ if (IS_I945GM(dev))
+ wm_info = &i945_wm_info;
+ else if (!IS_GEN2(dev))
+ wm_info = &i915_wm_info;
+ else
+ wm_info = &i855_wm_info;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+ crtc = intel_get_crtc_for_plane(dev, 0);
+ if (crtc->enabled && crtc->fb) {
+ planea_wm = intel_calculate_wm(crtc->mode.clock,
+ wm_info, fifo_size,
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ enabled = crtc;
+ } else
+ planea_wm = fifo_size - wm_info->guard_size;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 1);
+ crtc = intel_get_crtc_for_plane(dev, 1);
+ if (crtc->enabled && crtc->fb) {
+ planeb_wm = intel_calculate_wm(crtc->mode.clock,
+ wm_info, fifo_size,
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ if (enabled == NULL)
+ enabled = crtc;
+ else
+ enabled = NULL;
+ } else
+ planeb_wm = fifo_size - wm_info->guard_size;
+
+ DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+
+ /*
+ * Overlay gets an aggressive default since video jitter is bad.
+ */
+ cwm = 2;
+
+ /* Play safe and disable self-refresh before adjusting watermarks. */
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
+
+ /* Calc sr entries for one plane configs */
+ if (HAS_FW_BLC(dev) && enabled) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 6000;
+ int clock = enabled->mode.clock;
+ int htotal = enabled->mode.htotal;
+ int hdisplay = enabled->mode.hdisplay;
+ int pixel_size = enabled->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
+
+ line_time_us = (htotal * 1000) / clock;
+
+ /* Use ns/us then divide to preserve precision */
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = howmany(entries, wm_info->cacheline_size);
+ DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
+ srwm = wm_info->fifo_size - entries;
+ if (srwm < 0)
+ srwm = 1;
+
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ else if (IS_I915GM(dev))
+ I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+ planea_wm, planeb_wm, cwm, srwm);
+
+ fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
+ fwater_hi = (cwm & 0x1f);
+
+ /* Set request length to 8 cachelines per fetch */
+ fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
+ fwater_hi = fwater_hi | (1 << 8);
+
+ I915_WRITE(FW_BLC, fwater_lo);
+ I915_WRITE(FW_BLC2, fwater_hi);
+
+ if (HAS_FW_BLC(dev)) {
+ if (enabled) {
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
+ DRM_DEBUG_KMS("memory self refresh enabled\n");
+ } else
+ DRM_DEBUG_KMS("memory self refresh disabled\n");
+ }
+}
+
+static void i830_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ uint32_t fwater_lo;
+ int planea_wm;
+
+ crtc = single_enabled_crtc(dev);
+ if (crtc == NULL)
+ return;
+
+ planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
+ dev_priv->display.get_fifo_size(dev, 0),
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ fwater_lo = I915_READ(FW_BLC) & ~0xfff;
+ fwater_lo |= (3<<8) | planea_wm;
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
+
+ I915_WRITE(FW_BLC, fwater_lo);
+}
+
+#define ILK_LP0_PLANE_LATENCY 700
+#define ILK_LP0_CURSOR_LATENCY 1300
+
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool ironlake_check_srwm(struct drm_device *dev, int level,
+ int fbc_wm, int display_wm, int cursor_wm,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
+ " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
+
+ if (fbc_wm > SNB_FBC_MAX_SRWM) {
+ DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
+ fbc_wm, SNB_FBC_MAX_SRWM, level);
+
+ /* fbc has it's own way to disable FBC WM */
+ I915_WRITE(DISP_ARB_CTL,
+ I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
+ return false;
+ }
+
+ if (display_wm > display->max_wm) {
+ DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
+ display_wm, SNB_DISPLAY_MAX_SRWM, level);
+ return false;
+ }
+
+ if (cursor_wm > cursor->max_wm) {
+ DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
+ cursor_wm, SNB_CURSOR_MAX_SRWM, level);
+ return false;
+ }
+
+ if (!(fbc_wm || display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Compute watermark values of WM[1-3],
+ */
+static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
+ int latency_ns,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor,
+ int *fbc_wm, int *display_wm, int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ unsigned long line_time_us;
+ int hdisplay, htotal, pixel_size, clock;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *fbc_wm = *display_wm = *cursor_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ hdisplay = crtc->mode.hdisplay;
+ htotal = crtc->mode.htotal;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = howmany(min(small, large), display->cacheline_size);
+ *display_wm = entries + display->guard_size;
+
+ /*
+ * Spec says:
+ * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
+ */
+ *fbc_wm = howmany(*display_wm * 64, line_size) + 2;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = howmany(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+
+ return ironlake_check_srwm(dev, level,
+ *fbc_wm, *display_wm, *cursor_wm,
+ display, cursor);
+}
+
+static void ironlake_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int fbc_wm, plane_wm, cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, 0,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEA_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1;
+ }
+
+ if (g4x_compute_wm0(dev, 1,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEB_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 2;
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled))
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ ILK_READ_WM1_LATENCY() * 500,
+ &ironlake_display_srwm_info,
+ &ironlake_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ ILK_READ_WM2_LATENCY() * 500,
+ &ironlake_display_srwm_info,
+ &ironlake_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /*
+ * WM3 is unsupported on ILK, probably because we don't have latency
+ * data for that power state
+ */
+}
+
+void sandybridge_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int fbc_wm, plane_wm, cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, 0,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEA_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1;
+ }
+
+ if (g4x_compute_wm0(dev, 1,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEB_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 2;
+ }
+
+ /* IVB has 3 pipes */
+ if (IS_IVYBRIDGE(dev) &&
+ g4x_compute_wm0(dev, 2,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEC_IVB);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEC_IVB, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 3;
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ *
+ * SNB support 3 levels of watermark.
+ *
+ * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+ * and disabled in the descending order
+ *
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled) ||
+ dev_priv->sprite_scaling_enabled)
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM3 */
+ if (!ironlake_compute_srwm(dev, 3, enabled,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM3_LP_ILK,
+ WM3_LP_EN |
+ (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+}
+
+static bool
+sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
+ uint32_t sprite_width, int pixel_size,
+ const struct intel_watermark_params *display,
+ int display_latency_ns, int *sprite_wm)
+{
+ struct drm_crtc *crtc;
+ int clock;
+ int entries, tlb_miss;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (crtc->fb == NULL || !crtc->enabled) {
+ *sprite_wm = display->guard_size;
+ return false;
+ }
+
+ clock = crtc->mode.clock;
+
+ /* Use the small buffer method to calculate the sprite watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size -
+ sprite_width * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = howmany(entries, display->cacheline_size);
+ *sprite_wm = entries + display->guard_size;
+ if (*sprite_wm > (int)display->max_wm)
+ *sprite_wm = display->max_wm;
+
+ return true;
+}
+
+static bool
+sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
+ uint32_t sprite_width, int pixel_size,
+ const struct intel_watermark_params *display,
+ int latency_ns, int *sprite_wm)
+{
+ struct drm_crtc *crtc;
+ unsigned long line_time_us;
+ int clock;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ clock = crtc->mode.clock;
+ if (!clock) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ line_time_us = (sprite_width * 1000) / clock;
+ if (!line_time_us) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = sprite_width * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = howmany(min(small, large), display->cacheline_size);
+ *sprite_wm = entries + display->guard_size;
+
+ return *sprite_wm > 0x3ff ? false : true;
+}
+
+static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int sprite_wm, reg;
+ int ret;
+
+ switch (pipe) {
+ case 0:
+ reg = WM0_PIPEA_ILK;
+ break;
+ case 1:
+ reg = WM0_PIPEB_ILK;
+ break;
+ case 2:
+ reg = WM0_PIPEC_IVB;
+ break;
+ default:
+ return; /* bad pipe */
+ }
+
+ ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
+ &sandybridge_display_wm_info,
+ latency, &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
+ pipe);
+ return;
+ }
+
+ val = I915_READ(reg);
+ val &= ~WM0_PIPE_SPRITE_MASK;
+ I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
+ DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
+
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM1S_LP_ILK, sprite_wm);
+
+ /* Only IVB has two more LP watermarks for sprite */
+ if (!IS_IVYBRIDGE(dev))
+ return;
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM2S_LP_IVB, sprite_wm);
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM3S_LP_IVB, sprite_wm);
+}
+
+/**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ *
+ * Calculate watermark values for the various WM regs based on current mode
+ * and plane configuration.
+ *
+ * There are several cases to deal with here:
+ * - normal (i.e. non-self-refresh)
+ * - self-refresh (SR) mode
+ * - lines are large relative to FIFO size (buffer can hold up to 2)
+ * - lines are small relative to FIFO size (buffer can hold more than 2
+ * lines), so need to account for TLB latency
+ *
+ * The normal calculation is:
+ * watermark = dotclock * bytes per pixel * latency
+ * where latency is platform & configuration dependent (we assume pessimal
+ * values here).
+ *
+ * The SR calculation is:
+ * watermark = (trunc(latency/line time)+1) * surface width *
+ * bytes per pixel
+ * where
+ * line time = htotal / dotclock
+ * surface width = hdisplay for normal plane and 64 for cursor
+ * and latency is assumed to be high, as above.
+ *
+ * The final value programmed to the register should always be rounded up,
+ * and include an extra 2 entries to account for clock crossings.
+ *
+ * We don't use the sprite, so we can ignore that. And on Crestline we have
+ * to set the non-SR watermarks to 8.
+ */
+static void intel_update_watermarks(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_wm)
+ dev_priv->display.update_wm(dev);
+}
+
+void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_sprite_wm)
+ dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
+ pixel_size);
+}
+
+static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
+{
+ if (i915_panel_use_ssc >= 0)
+ return i915_panel_use_ssc != 0;
+ return dev_priv->lvds_use_ssc
+ && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
+}
+
+/**
+ * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
+ * @crtc: CRTC structure
+ * @mode: requested mode
+ *
+ * A pipe may be connected to one or more outputs. Based on the depth of the
+ * attached framebuffer, choose a good color depth to use on the pipe.
+ *
+ * If possible, match the pipe depth to the fb depth. In some cases, this
+ * isn't ideal, because the connected output supports a lesser or restricted
+ * set of depths. Resolve that here:
+ * LVDS typically supports only 6bpc, so clamp down in that case
+ * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
+ * Displays may support a restricted set as well, check EDID and clamp as
+ * appropriate.
+ * DP may want to dither down to 6bpc to fit larger modes
+ *
+ * RETURNS:
+ * Dithering requirement (i.e. false if display bpc and pipe bpc match,
+ * true if they don't match).
+ */
+static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
+ unsigned int *pipe_bpp,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ unsigned int display_bpc = UINT_MAX, bpc;
+
+ /* Walk the encoders & connectors on this crtc, get min bpc */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
+ unsigned int lvds_bpc;
+
+ if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
+ LVDS_A3_POWER_UP)
+ lvds_bpc = 8;
+ else
+ lvds_bpc = 6;
+
+ if (lvds_bpc < display_bpc) {
+ DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
+ display_bpc = lvds_bpc;
+ }
+ continue;
+ }
+
+ if (intel_encoder->type == INTEL_OUTPUT_EDP) {
+ /* Use VBT settings if we have an eDP panel */
+ unsigned int edp_bpc = dev_priv->edp.bpp / 3;
+
+ if (edp_bpc < display_bpc) {
+ DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
+ display_bpc = edp_bpc;
+ }
+ continue;
+ }
+
+ /* Not one of the known troublemakers, check the EDID */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ /* Don't use an invalid EDID bpc value */
+ if (connector->display_info.bpc &&
+ connector->display_info.bpc < display_bpc) {
+ DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
+ display_bpc = connector->display_info.bpc;
+ }
+ }
+
+ /*
+ * HDMI is either 12 or 8, so if the display lets 10bpc sneak
+ * through, clamp it down. (Note: >12bpc will be caught below.)
+ */
+ if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
+ if (display_bpc > 8 && display_bpc < 12) {
+ DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
+ display_bpc = 12;
+ } else {
+ DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
+ display_bpc = 8;
+ }
+ }
+ }
+
+ if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
+ display_bpc = 6;
+ }
+
+ /*
+ * We could just drive the pipe at the highest bpc all the time and
+ * enable dithering as needed, but that costs bandwidth. So choose
+ * the minimum value that expresses the full color range of the fb but
+ * also stays within the max display bpc discovered above.
+ */
+
+ switch (crtc->fb->depth) {
+ case 8:
+ bpc = 8; /* since we go through a colormap */
+ break;
+ case 15:
+ case 16:
+ bpc = 6; /* min is 18bpp */
+ break;
+ case 24:
+ bpc = 8;
+ break;
+ case 30:
+ bpc = 10;
+ break;
+ case 48:
+ bpc = 12;
+ break;
+ default:
+ DRM_DEBUG("unsupported depth, assuming 24 bits\n");
+ bpc = min((unsigned int)8, display_bpc);
+ break;
+ }
+
+ display_bpc = min(display_bpc, bpc);
+
+ DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
+ bpc, display_bpc);
+
+ *pipe_bpp = display_bpc * 3;
+
+ return display_bpc != bpc;
+}
+
+static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int refclk;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
+ refclk = dev_priv->lvds_ssc_freq * 1000;
+ DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+ refclk / 1000);
+ } else if (!IS_GEN2(dev)) {
+ refclk = 96000;
+ } else {
+ refclk = 48000;
+ }
+
+ return refclk;
+}
+
+static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock)
+{
+ /* SDVO TV has fixed PLL values depend on its clock range,
+ this mirrors vbios setting. */
+ if (adjusted_mode->clock >= 100000
+ && adjusted_mode->clock < 140500) {
+ clock->p1 = 2;
+ clock->p2 = 10;
+ clock->n = 3;
+ clock->m1 = 16;
+ clock->m2 = 8;
+ } else if (adjusted_mode->clock >= 140500
+ && adjusted_mode->clock <= 200000) {
+ clock->p1 = 1;
+ clock->p2 = 10;
+ clock->n = 6;
+ clock->m1 = 12;
+ clock->m2 = 8;
+ }
+}
+
+static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
+ intel_clock_t *clock,
+ intel_clock_t *reduced_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 fp, fp2 = 0;
+
+ if (IS_PINEVIEW(dev)) {
+ fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
+ if (reduced_clock)
+ fp2 = (1 << reduced_clock->n) << 16 |
+ reduced_clock->m1 << 8 | reduced_clock->m2;
+ } else {
+ fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
+ if (reduced_clock)
+ fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
+ reduced_clock->m2;
+ }
+
+ I915_WRITE(FP0(pipe), fp);
+
+ intel_crtc->lowfreq_avail = false;
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ reduced_clock && i915_powersave) {
+ I915_WRITE(FP1(pipe), fp2);
+ intel_crtc->lowfreq_avail = true;
+ } else {
+ I915_WRITE(FP1(pipe), fp);
+ }
+}
+
+static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ int refclk, num_connectors = 0;
+ intel_clock_t clock, reduced_clock;
+ u32 dpll, dspcntr, pipeconf, vsyncshift;
+ bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
+ bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+ const intel_limit_t *limit;
+ int ret;
+ u32 temp;
+ u32 lvds_sync = 0;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ if (encoder->base.crtc != crtc)
+ continue;
+
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ case INTEL_OUTPUT_HDMI:
+ is_sdvo = true;
+ if (encoder->needs_tv_clock)
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_DVO:
+ is_dvo = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_ANALOG:
+ is_crt = true;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ }
+
+ num_connectors++;
+ }
+
+ refclk = i9xx_get_refclk(crtc, num_connectors);
+
+ /*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or false. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+ limit = intel_limit(crtc, refclk);
+ ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
+ &clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ /* Ensure that the cursor is valid for the new mode before changing... */
+ intel_crtc_update_cursor(crtc, true);
+
+ if (is_lvds && dev_priv->lvds_downclock_avail) {
+ /*
+ * Ensure we match the reduced clock's P to the target clock.
+ * If the clocks don't match, we can't switch the display clock
+ * by using the FP0/FP1. In such case we will disable the LVDS
+ * downclock feature.
+ */
+ has_reduced_clock = limit->find_pll(limit, crtc,
+ dev_priv->lvds_downclock,
+ refclk,
+ &clock,
+ &reduced_clock);
+ }
+
+ if (is_sdvo && is_tv)
+ i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
+
+ i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
+ &reduced_clock : NULL);
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (!IS_GEN2(dev)) {
+ if (is_lvds)
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+ if (is_sdvo) {
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (pixel_multiplier > 1) {
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+ }
+ dpll |= DPLL_DVO_HIGH_SPEED;
+ }
+ if (is_dp)
+ dpll |= DPLL_DVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ if (IS_PINEVIEW(dev))
+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
+ else {
+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (IS_G4X(dev) && has_reduced_clock)
+ dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ }
+ switch (clock.p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+ if (INTEL_INFO(dev)->gen >= 4)
+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+ } else {
+ if (is_lvds) {
+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ } else {
+ if (clock.p1 == 2)
+ dpll |= PLL_P1_DIVIDE_BY_TWO;
+ else
+ dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (clock.p2 == 4)
+ dpll |= PLL_P2_DIVIDE_BY_4;
+ }
+ }
+
+ if (is_sdvo && is_tv)
+ dpll |= PLL_REF_INPUT_TVCLKINBC;
+ else if (is_tv)
+ /* XXX: just matching BIOS for now */
+ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ /* setup pipeconf */
+ pipeconf = I915_READ(PIPECONF(pipe));
+
+ /* Set up the display plane register */
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ if (pipe == 0)
+ dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
+ else
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+ if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
+ /* Enable pixel doubling when the dot clock is > 90% of the (display)
+ * core speed.
+ *
+ * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
+ * pipe == 0 check?
+ */
+ if (mode->clock >
+ dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
+ pipeconf |= PIPECONF_DOUBLE_WIDE;
+ else
+ pipeconf &= ~PIPECONF_DOUBLE_WIDE;
+ }
+
+ /* default to 8bpc */
+ pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+ if (is_dp) {
+ if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ pipeconf |= PIPECONF_BPP_6 |
+ PIPECONF_DITHER_EN |
+ PIPECONF_DITHER_TYPE_SP;
+ }
+ }
+
+ dpll |= DPLL_VCO_ENABLE;
+
+ DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+ drm_mode_debug_printmodeline(mode);
+
+ I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+
+ POSTING_READ(DPLL(pipe));
+ DELAY(150);
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (is_lvds) {
+ temp = I915_READ(LVDS);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ if (pipe == 1) {
+ temp |= LVDS_PIPEB_SELECT;
+ } else {
+ temp &= ~LVDS_PIPEB_SELECT;
+ }
+ /* set the corresponsding LVDS_BORDER bit */
+ temp |= dev_priv->lvds_border_bits;
+ /* Set the B0-B3 data pairs corresponding to whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ if (clock.p2 == 7)
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more thoroughly into how
+ * panels behave in the two modes.
+ */
+ /* set the dithering flag on LVDS as needed */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (dev_priv->lvds_dither)
+ temp |= LVDS_ENABLE_DITHER;
+ else
+ temp &= ~LVDS_ENABLE_DITHER;
+ }
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ lvds_sync |= LVDS_HSYNC_POLARITY;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ lvds_sync |= LVDS_VSYNC_POLARITY;
+ if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
+ != lvds_sync) {
+ char flags[2] = "-+";
+ DRM_INFO("Changing LVDS panel from "
+ "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
+ flags[!(temp & LVDS_HSYNC_POLARITY)],
+ flags[!(temp & LVDS_VSYNC_POLARITY)],
+ flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
+ flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ temp |= lvds_sync;
+ }
+ I915_WRITE(LVDS, temp);
+ }
+
+ if (is_dp) {
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
+ }
+
+ I915_WRITE(DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ DELAY(150);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ temp = 0;
+ if (is_sdvo) {
+ temp = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (temp > 1)
+ temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ else
+ temp = 0;
+ }
+ I915_WRITE(DPLL_MD(pipe), temp);
+ } else {
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(DPLL(pipe), dpll);
+ }
+
+ if (HAS_PIPE_CXSR(dev)) {
+ if (intel_crtc->lowfreq_avail) {
+ DRM_DEBUG_KMS("enabling CxSR downclocking\n");
+ pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
+ } else {
+ DRM_DEBUG_KMS("disabling CxSR downclocking\n");
+ pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
+ }
+ }
+
+ pipeconf &= ~PIPECONF_INTERLACE_MASK;
+ if (!IS_GEN2(dev) &&
+ adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+ /* the chip adds 2 halflines automatically */
+ adjusted_mode->crtc_vtotal -= 1;
+ adjusted_mode->crtc_vblank_end -= 1;
+ vsyncshift = adjusted_mode->crtc_hsync_start
+ - adjusted_mode->crtc_htotal/2;
+ } else {
+ pipeconf |= PIPECONF_PROGRESSIVE;
+ vsyncshift = 0;
+ }
+
+ if (!IS_GEN3(dev))
+ I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
+
+ I915_WRITE(HTOTAL(pipe),
+ (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ I915_WRITE(HBLANK(pipe),
+ (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ I915_WRITE(HSYNC(pipe),
+ (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+
+ I915_WRITE(VTOTAL(pipe),
+ (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ I915_WRITE(VBLANK(pipe),
+ (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ I915_WRITE(VSYNC(pipe),
+ (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ I915_WRITE(DSPSIZE(plane),
+ ((mode->vdisplay - 1) << 16) |
+ (mode->hdisplay - 1));
+ I915_WRITE(DSPPOS(plane), 0);
+ I915_WRITE(PIPESRC(pipe),
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+
+ I915_WRITE(PIPECONF(pipe), pipeconf);
+ POSTING_READ(PIPECONF(pipe));
+ intel_enable_pipe(dev_priv, pipe, false);
+
+ intel_wait_for_vblank(dev, pipe);
+
+ I915_WRITE(DSPCNTR(plane), dspcntr);
+ POSTING_READ(DSPCNTR(plane));
+ intel_enable_plane(dev_priv, plane, pipe);
+
+ ret = intel_pipe_set_base(crtc, x, y, old_fb);
+
+ intel_update_watermarks(dev);
+
+ return ret;
+}
+
+/*
+ * Initialize reference clocks when the driver loads
+ */
+void ironlake_init_pch_refclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+ u32 temp;
+ bool has_lvds = false;
+ bool has_cpu_edp = false;
+ bool has_pch_edp = false;
+ bool has_panel = false;
+ bool has_ck505 = false;
+ bool can_ssc = false;
+
+ /* We need to take the global config into account */
+ list_for_each_entry(encoder, &mode_config->encoder_list,
+ base.head) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ has_panel = true;
+ has_lvds = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ has_panel = true;
+ if (intel_encoder_is_pch_edp(&encoder->base))
+ has_pch_edp = true;
+ else
+ has_cpu_edp = true;
+ break;
+ }
+ }
+
+ if (HAS_PCH_IBX(dev)) {
+ has_ck505 = dev_priv->display_clock_mode;
+ can_ssc = has_ck505;
+ } else {
+ has_ck505 = false;
+ can_ssc = true;
+ }
+
+ DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
+ has_panel, has_lvds, has_pch_edp, has_cpu_edp,
+ has_ck505);
+
+ /* Ironlake: try to setup display ref clock before DPLL
+ * enabling. This is only under driver's control after
+ * PCH B stepping, previous chipset stepping should be
+ * ignoring this setting.
+ */
+ temp = I915_READ(PCH_DREF_CONTROL);
+ /* Always enable nonspread source */
+ temp &= ~DREF_NONSPREAD_SOURCE_MASK;
+
+ if (has_ck505)
+ temp |= DREF_NONSPREAD_CK505_ENABLE;
+ else
+ temp |= DREF_NONSPREAD_SOURCE_ENABLE;
+
+ if (has_panel) {
+ temp &= ~DREF_SSC_SOURCE_MASK;
+ temp |= DREF_SSC_SOURCE_ENABLE;
+
+ /* SSC must be turned on before enabling the CPU output */
+ if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+ DRM_DEBUG_KMS("Using SSC on panel\n");
+ temp |= DREF_SSC1_ENABLE;
+ } else
+ temp &= ~DREF_SSC1_ENABLE;
+
+ /* Get SSC going before enabling the outputs */
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+ DELAY(200);
+
+ temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+ /* Enable CPU source on CPU attached eDP */
+ if (has_cpu_edp) {
+ if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+ DRM_DEBUG_KMS("Using SSC on eDP\n");
+ temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+ }
+ else
+ temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ } else
+ temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+ DELAY(200);
+ } else {
+ DRM_DEBUG_KMS("Disabling SSC entirely\n");
+
+ temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+ /* Turn off CPU output */
+ temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+ DELAY(200);
+
+ /* Turn off the SSC source */
+ temp &= ~DREF_SSC_SOURCE_MASK;
+ temp |= DREF_SSC_SOURCE_DISABLE;
+
+ /* Turn off SSC1 */
+ temp &= ~ DREF_SSC1_ENABLE;
+
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+ DELAY(200);
+ }
+}
+
+static int ironlake_get_refclk(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *encoder;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *edp_encoder = NULL;
+ int num_connectors = 0;
+ bool is_lvds = false;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ if (encoder->base.crtc != crtc)
+ continue;
+
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ edp_encoder = encoder;
+ break;
+ }
+ num_connectors++;
+ }
+
+ if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
+ DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+ dev_priv->lvds_ssc_freq);
+ return dev_priv->lvds_ssc_freq * 1000;
+ }
+
+ return 120000;
+}
+
+static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ int refclk, num_connectors = 0;
+ intel_clock_t clock, reduced_clock;
+ u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
+ bool ok, has_reduced_clock = false, is_sdvo = false;
+ bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
+ struct intel_encoder *has_edp_encoder = NULL;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+ const intel_limit_t *limit;
+ int ret;
+ struct fdi_m_n m_n = {0};
+ u32 temp;
+ u32 lvds_sync = 0;
+ int target_clock, pixel_multiplier, lane, link_bw, factor;
+ unsigned int pipe_bpp;
+ bool dither;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ if (encoder->base.crtc != crtc)
+ continue;
+
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ case INTEL_OUTPUT_HDMI:
+ is_sdvo = true;
+ if (encoder->needs_tv_clock)
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_ANALOG:
+ is_crt = true;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ has_edp_encoder = encoder;
+ break;
+ }
+
+ num_connectors++;
+ }
+
+ refclk = ironlake_get_refclk(crtc);
+
+ /*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or false. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+ limit = intel_limit(crtc, refclk);
+ ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
+ &clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ /* Ensure that the cursor is valid for the new mode before changing... */
+ intel_crtc_update_cursor(crtc, true);
+
+ if (is_lvds && dev_priv->lvds_downclock_avail) {
+ /*
+ * Ensure we match the reduced clock's P to the target clock.
+ * If the clocks don't match, we can't switch the display clock
+ * by using the FP0/FP1. In such case we will disable the LVDS
+ * downclock feature.
+ */
+ has_reduced_clock = limit->find_pll(limit, crtc,
+ dev_priv->lvds_downclock,
+ refclk,
+ &clock,
+ &reduced_clock);
+ }
+ /* SDVO TV has fixed PLL values depend on its clock range,
+ this mirrors vbios setting. */
+ if (is_sdvo && is_tv) {
+ if (adjusted_mode->clock >= 100000
+ && adjusted_mode->clock < 140500) {
+ clock.p1 = 2;
+ clock.p2 = 10;
+ clock.n = 3;
+ clock.m1 = 16;
+ clock.m2 = 8;
+ } else if (adjusted_mode->clock >= 140500
+ && adjusted_mode->clock <= 200000) {
+ clock.p1 = 1;
+ clock.p2 = 10;
+ clock.n = 6;
+ clock.m1 = 12;
+ clock.m2 = 8;
+ }
+ }
+
+ /* FDI link */
+ pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ lane = 0;
+ /* CPU eDP doesn't require FDI link, so just set DP M/N
+ according to current link config */
+ if (has_edp_encoder &&
+ !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ target_clock = mode->clock;
+ intel_edp_link_config(has_edp_encoder,
+ &lane, &link_bw);
+ } else {
+ /* [e]DP over FDI requires target mode clock
+ instead of link clock */
+ if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
+ target_clock = mode->clock;
+ else
+ target_clock = adjusted_mode->clock;
+
+ /* FDI is a binary signal running at ~2.7GHz, encoding
+ * each output octet as 10 bits. The actual frequency
+ * is stored as a divider into a 100MHz clock, and the
+ * mode pixel clock is stored in units of 1KHz.
+ * Hence the bw of each lane in terms of the mode signal
+ * is:
+ */
+ link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
+ }
+
+ /* determine panel color depth */
+ temp = I915_READ(PIPECONF(pipe));
+ temp &= ~PIPE_BPC_MASK;
+ dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
+ switch (pipe_bpp) {
+ case 18:
+ temp |= PIPE_6BPC;
+ break;
+ case 24:
+ temp |= PIPE_8BPC;
+ break;
+ case 30:
+ temp |= PIPE_10BPC;
+ break;
+ case 36:
+ temp |= PIPE_12BPC;
+ break;
+ default:
+ printf("intel_choose_pipe_bpp returned invalid value %d\n",
+ pipe_bpp);
+ temp |= PIPE_8BPC;
+ pipe_bpp = 24;
+ break;
+ }
+
+ intel_crtc->bpp = pipe_bpp;
+ I915_WRITE(PIPECONF(pipe), temp);
+
+ if (!lane) {
+ /*
+ * Account for spread spectrum to avoid
+ * oversubscribing the link. Max center spread
+ * is 2.5%; use 5% for safety's sake.
+ */
+ u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
+ lane = bps / (link_bw * 8) + 1;
+ }
+
+ intel_crtc->fdi_lanes = lane;
+
+ if (pixel_multiplier > 1)
+ link_bw *= pixel_multiplier;
+ ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
+ &m_n);
+
+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+ if (has_reduced_clock)
+ fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
+ reduced_clock.m2;
+
+ /* Enable autotuning of the PLL clock (if permissible) */
+ factor = 21;
+ if (is_lvds) {
+ if ((intel_panel_use_ssc(dev_priv) &&
+ dev_priv->lvds_ssc_freq == 100) ||
+ (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
+ factor = 25;
+ } else if (is_sdvo && is_tv)
+ factor = 20;
+
+ if (clock.m < factor * clock.n)
+ fp |= FP_CB_TUNE;
+
+ dpll = 0;
+
+ if (is_lvds)
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+ if (is_sdvo) {
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (pixel_multiplier > 1) {
+ dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+ }
+ dpll |= DPLL_DVO_HIGH_SPEED;
+ }
+ if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
+ dpll |= DPLL_DVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ /* also FPA1 */
+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+
+ switch (clock.p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+
+ if (is_sdvo && is_tv)
+ dpll |= PLL_REF_INPUT_TVCLKINBC;
+ else if (is_tv)
+ /* XXX: just matching BIOS for now */
+ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ /* setup pipeconf */
+ pipeconf = I915_READ(PIPECONF(pipe));
+
+ /* Set up the display plane register */
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
+ drm_mode_debug_printmodeline(mode);
+
+ /* PCH eDP needs FDI, but CPU eDP does not */
+ if (!intel_crtc->no_pll) {
+ if (!has_edp_encoder ||
+ intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ I915_WRITE(PCH_FP0(pipe), fp);
+ I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+
+ POSTING_READ(PCH_DPLL(pipe));
+ DELAY(150);
+ }
+ } else {
+ if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
+ fp == I915_READ(PCH_FP0(0))) {
+ intel_crtc->use_pll_a = true;
+ DRM_DEBUG_KMS("using pipe a dpll\n");
+ } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
+ fp == I915_READ(PCH_FP0(1))) {
+ intel_crtc->use_pll_a = false;
+ DRM_DEBUG_KMS("using pipe b dpll\n");
+ } else {
+ DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
+ return -EINVAL;
+ }
+ }
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (is_lvds) {
+ temp = I915_READ(PCH_LVDS);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~PORT_TRANS_SEL_MASK;
+ temp |= PORT_TRANS_SEL_CPT(pipe);
+ } else {
+ if (pipe == 1)
+ temp |= LVDS_PIPEB_SELECT;
+ else
+ temp &= ~LVDS_PIPEB_SELECT;
+ }
+
+ /* set the corresponsding LVDS_BORDER bit */
+ temp |= dev_priv->lvds_border_bits;
+ /* Set the B0-B3 data pairs corresponding to whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ if (clock.p2 == 7)
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more thoroughly into how
+ * panels behave in the two modes.
+ */
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ lvds_sync |= LVDS_HSYNC_POLARITY;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ lvds_sync |= LVDS_VSYNC_POLARITY;
+ if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
+ != lvds_sync) {
+ char flags[2] = "-+";
+ DRM_INFO("Changing LVDS panel from "
+ "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
+ flags[!(temp & LVDS_HSYNC_POLARITY)],
+ flags[!(temp & LVDS_VSYNC_POLARITY)],
+ flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
+ flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ temp |= lvds_sync;
+ }
+ I915_WRITE(PCH_LVDS, temp);
+ }
+
+ pipeconf &= ~PIPECONF_DITHER_EN;
+ pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
+ if ((is_lvds && dev_priv->lvds_dither) || dither) {
+ pipeconf |= PIPECONF_DITHER_EN;
+ pipeconf |= PIPECONF_DITHER_TYPE_SP;
+ }
+ if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
+ } else {
+ /* For non-DP output, clear any trans DP clock recovery setting.*/
+ I915_WRITE(TRANSDATA_M1(pipe), 0);
+ I915_WRITE(TRANSDATA_N1(pipe), 0);
+ I915_WRITE(TRANSDPLINK_M1(pipe), 0);
+ I915_WRITE(TRANSDPLINK_N1(pipe), 0);
+ }
+
+ if (!intel_crtc->no_pll &&
+ (!has_edp_encoder ||
+ intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
+ I915_WRITE(PCH_DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(PCH_DPLL(pipe));
+ DELAY(150);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(PCH_DPLL(pipe), dpll);
+ }
+
+ intel_crtc->lowfreq_avail = false;
+ if (!intel_crtc->no_pll) {
+ if (is_lvds && has_reduced_clock && i915_powersave) {
+ I915_WRITE(PCH_FP1(pipe), fp2);
+ intel_crtc->lowfreq_avail = true;
+ if (HAS_PIPE_CXSR(dev)) {
+ DRM_DEBUG_KMS("enabling CxSR downclocking\n");
+ pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
+ }
+ } else {
+ I915_WRITE(PCH_FP1(pipe), fp);
+ if (HAS_PIPE_CXSR(dev)) {
+ DRM_DEBUG_KMS("disabling CxSR downclocking\n");
+ pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
+ }
+ }
+ }
+
+ pipeconf &= ~PIPECONF_INTERLACE_MASK;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ pipeconf |= PIPECONF_INTERLACED_ILK;
+ /* the chip adds 2 halflines automatically */
+ adjusted_mode->crtc_vtotal -= 1;
+ adjusted_mode->crtc_vblank_end -= 1;
+ I915_WRITE(VSYNCSHIFT(pipe),
+ adjusted_mode->crtc_hsync_start
+ - adjusted_mode->crtc_htotal/2);
+ } else {
+ pipeconf |= PIPECONF_PROGRESSIVE;
+ I915_WRITE(VSYNCSHIFT(pipe), 0);
+ }
+
+ I915_WRITE(HTOTAL(pipe),
+ (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ I915_WRITE(HBLANK(pipe),
+ (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ I915_WRITE(HSYNC(pipe),
+ (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+
+ I915_WRITE(VTOTAL(pipe),
+ (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ I915_WRITE(VBLANK(pipe),
+ (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ I915_WRITE(VSYNC(pipe),
+ (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+
+ /* pipesrc controls the size that is scaled from, which should
+ * always be the user's requested size.
+ */
+ I915_WRITE(PIPESRC(pipe),
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+
+ I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
+ I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
+ I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
+
+ if (has_edp_encoder &&
+ !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ ironlake_set_pll_edp(crtc, adjusted_mode->clock);
+ }
+
+ I915_WRITE(PIPECONF(pipe), pipeconf);
+ POSTING_READ(PIPECONF(pipe));
+
+ intel_wait_for_vblank(dev, pipe);
+
+ I915_WRITE(DSPCNTR(plane), dspcntr);
+ POSTING_READ(DSPCNTR(plane));
+
+ ret = intel_pipe_set_base(crtc, x, y, old_fb);
+
+ intel_update_watermarks(dev);
+
+ return ret;
+}
+
+static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int ret;
+
+ drm_vblank_pre_modeset(dev, pipe);
+
+ ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
+ x, y, old_fb);
+ drm_vblank_post_modeset(dev, pipe);
+
+ if (ret)
+ intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
+ else
+ intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
+
+ return ret;
+}
+
+static bool intel_eld_uptodate(struct drm_connector *connector,
+ int reg_eldv, uint32_t bits_eldv,
+ int reg_elda, uint32_t bits_elda,
+ int reg_edid)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t i;
+
+ i = I915_READ(reg_eldv);
+ i &= bits_eldv;
+
+ if (!eld[0])
+ return !i;
+
+ if (!i)
+ return false;
+
+ i = I915_READ(reg_elda);
+ i &= ~bits_elda;
+ I915_WRITE(reg_elda, i);
+
+ for (i = 0; i < eld[2]; i++)
+ if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+ return false;
+
+ return true;
+}
+
+static void g4x_write_eld(struct drm_connector *connector,
+ struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t eldv;
+ uint32_t len;
+ uint32_t i;
+
+ i = I915_READ(G4X_AUD_VID_DID);
+
+ if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
+ eldv = G4X_ELDV_DEVCL_DEVBLC;
+ else
+ eldv = G4X_ELDV_DEVCTG;
+
+ if (intel_eld_uptodate(connector,
+ G4X_AUD_CNTL_ST, eldv,
+ G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
+ G4X_HDMIW_HDMIEDID))
+ return;
+
+ i = I915_READ(G4X_AUD_CNTL_ST);
+ i &= ~(eldv | G4X_ELD_ADDR);
+ len = (i >> 9) & 0x1f; /* ELD buffer size */
+ I915_WRITE(G4X_AUD_CNTL_ST, i);
+
+ if (!eld[0])
+ return;
+
+ if (eld[2] < (uint8_t)len)
+ len = eld[2];
+ DRM_DEBUG_KMS("ELD size %d\n", len);
+ for (i = 0; i < len; i++)
+ I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
+
+ i = I915_READ(G4X_AUD_CNTL_ST);
+ i |= eldv;
+ I915_WRITE(G4X_AUD_CNTL_ST, i);
+}
+
+static void ironlake_write_eld(struct drm_connector *connector,
+ struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t eldv;
+ uint32_t i;
+ int len;
+ int hdmiw_hdmiedid;
+ int aud_config;
+ int aud_cntl_st;
+ int aud_cntrl_st2;
+
+ if (HAS_PCH_IBX(connector->dev)) {
+ hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
+ aud_config = IBX_AUD_CONFIG_A;
+ aud_cntl_st = IBX_AUD_CNTL_ST_A;
+ aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+ } else {
+ hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
+ aud_config = CPT_AUD_CONFIG_A;
+ aud_cntl_st = CPT_AUD_CNTL_ST_A;
+ aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+ }
+
+ i = to_intel_crtc(crtc)->pipe;
+ hdmiw_hdmiedid += i * 0x100;
+ aud_cntl_st += i * 0x100;
+ aud_config += i * 0x100;
+
+ DRM_DEBUG_KMS("ELD on pipe %c\n", pipe_name(i));
+
+ i = I915_READ(aud_cntl_st);
+ i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */
+ if (!i) {
+ DRM_DEBUG_KMS("Audio directed to unknown port\n");
+ /* operate blindly on all ports */
+ eldv = IBX_ELD_VALIDB;
+ eldv |= IBX_ELD_VALIDB << 4;
+ eldv |= IBX_ELD_VALIDB << 8;
+ } else {
+ DRM_DEBUG_KMS("ELD on port %c\n", 'A' + i);
+ eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
+ }
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+ eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
+ I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
+ } else
+ I915_WRITE(aud_config, 0);
+
+ if (intel_eld_uptodate(connector,
+ aud_cntrl_st2, eldv,
+ aud_cntl_st, IBX_ELD_ADDRESS,
+ hdmiw_hdmiedid))
+ return;
+
+ i = I915_READ(aud_cntrl_st2);
+ i &= ~eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+
+ if (!eld[0])
+ return;
+
+ i = I915_READ(aud_cntl_st);
+ i &= ~IBX_ELD_ADDRESS;
+ I915_WRITE(aud_cntl_st, i);
+
+ /* 84 bytes of hw ELD buffer */
+ len = 21;
+ if (eld[2] < (uint8_t)len)
+ len = eld[2];
+ DRM_DEBUG_KMS("ELD size %d\n", len);
+ for (i = 0; i < len; i++)
+ I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+
+ i = I915_READ(aud_cntrl_st2);
+ i |= eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+}
+
+void intel_write_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_crtc *crtc = encoder->crtc;
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ connector = drm_select_eld(encoder, mode);
+ if (!connector)
+ return;
+
+ DRM_DEBUG_KMS("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id,
+ drm_get_connector_name(connector),
+ connector->encoder->base.id,
+ drm_get_encoder_name(connector->encoder));
+
+ connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
+
+ if (dev_priv->display.write_eld)
+ dev_priv->display.write_eld(connector, crtc);
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+void intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int palreg = PALETTE(intel_crtc->pipe);
+ int i;
+
+ /* The clocks have to be on to load the palette. */
+ if (!crtc->enabled || !intel_crtc->active)
+ return;
+
+ /* use legacy palette for Ironlake */
+ if (HAS_PCH_SPLIT(dev))
+ palreg = LGC_PALETTE(intel_crtc->pipe);
+
+ for (i = 0; i < 256; i++) {
+ I915_WRITE(palreg + 4 * i,
+ (intel_crtc->lut_r[i] << 16) |
+ (intel_crtc->lut_g[i] << 8) |
+ intel_crtc->lut_b[i]);
+ }
+}
+
+static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ bool visible = base != 0;
+ u32 cntl;
+
+ if (intel_crtc->cursor_visible == visible)
+ return;
+
+ cntl = I915_READ(_CURACNTR);
+ if (visible) {
+ /* On these chipsets we can only modify the base whilst
+ * the cursor is disabled.
+ */
+ I915_WRITE(_CURABASE, base);
+
+ cntl &= ~(CURSOR_FORMAT_MASK);
+ /* XXX width must be 64, stride 256 => 0x00 << 28 */
+ cntl |= CURSOR_ENABLE |
+ CURSOR_GAMMA_ENABLE |
+ CURSOR_FORMAT_ARGB;
+ } else
+ cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
+ I915_WRITE(_CURACNTR, cntl);
+
+ intel_crtc->cursor_visible = visible;
+}
+
+static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ bool visible = base != 0;
+
+ if (intel_crtc->cursor_visible != visible) {
+ uint32_t cntl = I915_READ(CURCNTR(pipe));
+ if (base) {
+ cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
+ cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+ cntl |= pipe << 28; /* Connect to correct pipe */
+ } else {
+ cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
+ cntl |= CURSOR_MODE_DISABLE;
+ }
+ I915_WRITE(CURCNTR(pipe), cntl);
+
+ intel_crtc->cursor_visible = visible;
+ }
+ /* and commit changes on next vblank */
+ I915_WRITE(CURBASE(pipe), base);
+}
+
+static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ bool visible = base != 0;
+
+ if (intel_crtc->cursor_visible != visible) {
+ uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
+ if (base) {
+ cntl &= ~CURSOR_MODE;
+ cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+ } else {
+ cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
+ cntl |= CURSOR_MODE_DISABLE;
+ }
+ I915_WRITE(CURCNTR_IVB(pipe), cntl);
+
+ intel_crtc->cursor_visible = visible;
+ }
+ /* and commit changes on next vblank */
+ I915_WRITE(CURBASE_IVB(pipe), base);
+}
+
+/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
+static void intel_crtc_update_cursor(struct drm_crtc *crtc,
+ bool on)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int x = intel_crtc->cursor_x;
+ int y = intel_crtc->cursor_y;
+ u32 base, pos;
+ bool visible;
+
+ pos = 0;
+
+ if (on && crtc->enabled && crtc->fb) {
+ base = intel_crtc->cursor_addr;
+ if (x > (int) crtc->fb->width)
+ base = 0;
+
+ if (y > (int) crtc->fb->height)
+ base = 0;
+ } else
+ base = 0;
+
+ if (x < 0) {
+ if (x + intel_crtc->cursor_width < 0)
+ base = 0;
+
+ pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+ x = -x;
+ }
+ pos |= x << CURSOR_X_SHIFT;
+
+ if (y < 0) {
+ if (y + intel_crtc->cursor_height < 0)
+ base = 0;
+
+ pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+ y = -y;
+ }
+ pos |= y << CURSOR_Y_SHIFT;
+
+ visible = base != 0;
+ if (!visible && !intel_crtc->cursor_visible)
+ return;
+
+ if (IS_IVYBRIDGE(dev)) {
+ I915_WRITE(CURPOS_IVB(pipe), pos);
+ ivb_update_cursor(crtc, base);
+ } else {
+ I915_WRITE(CURPOS(pipe), pos);
+ if (IS_845G(dev) || IS_I865G(dev))
+ i845_update_cursor(crtc, base);
+ else
+ i9xx_update_cursor(crtc, base);
+ }
+
+ if (visible)
+ intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
+}
+
+static int intel_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file,
+ uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj;
+ uint32_t addr;
+ int ret;
+
+ DRM_DEBUG_KMS("\n");
+
+ /* if we want to turn off the cursor ignore width and height */
+ if (!handle) {
+ DRM_DEBUG_KMS("cursor off\n");
+ addr = 0;
+ obj = NULL;
+ DRM_LOCK(dev);
+ goto finish;
+ }
+
+ /* Currently we only support 64x64 cursors */
+ if (width != 64 || height != 64) {
+ DRM_ERROR("we currently only support 64x64 cursors\n");
+ return -EINVAL;
+ }
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+ if (&obj->base == NULL)
+ return -ENOENT;
+
+ if (obj->base.size < width * height * 4) {
+ DRM_ERROR("buffer is to small\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /* we only need to pin inside GTT if cursor is non-phy */
+ DRM_LOCK(dev);
+ if (!dev_priv->info->cursor_needs_physical) {
+ if (obj->tiling_mode) {
+ DRM_ERROR("cursor cannot be tiled\n");
+ ret = -EINVAL;
+ goto fail_locked;
+ }
+
+ ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
+ if (ret) {
+ DRM_ERROR("failed to move cursor bo into the GTT\n");
+ goto fail_locked;
+ }
+
+ ret = i915_gem_object_put_fence(obj);
+ if (ret) {
+ DRM_ERROR("failed to release fence for cursor\n");
+ goto fail_unpin;
+ }
+
+ addr = obj->gtt_offset;
+ } else {
+ int align = IS_I830(dev) ? 16 * 1024 : 256;
+ ret = i915_gem_attach_phys_object(dev, obj,
+ (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
+ align);
+ if (ret) {
+ DRM_ERROR("failed to attach phys object\n");
+ goto fail_locked;
+ }
+ addr = obj->phys_obj->handle->busaddr;
+ }
+
+ if (IS_GEN2(dev))
+ I915_WRITE(CURSIZE, (height << 12) | width);
+
+ finish:
+ if (intel_crtc->cursor_bo) {
+ if (dev_priv->info->cursor_needs_physical) {
+ if (intel_crtc->cursor_bo != obj)
+ i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
+ } else
+ i915_gem_object_unpin(intel_crtc->cursor_bo);
+ drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
+ }
+
+ DRM_UNLOCK(dev);
+
+ intel_crtc->cursor_addr = addr;
+ intel_crtc->cursor_bo = obj;
+ intel_crtc->cursor_width = width;
+ intel_crtc->cursor_height = height;
+
+ intel_crtc_update_cursor(crtc, true);
+
+ return 0;
+fail_unpin:
+ i915_gem_object_unpin(obj);
+fail_locked:
+ DRM_UNLOCK(dev);
+fail:
+ drm_gem_object_unreference_unlocked(&obj->base);
+ return ret;
+}
+
+static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ intel_crtc->cursor_x = x;
+ intel_crtc->cursor_y = y;
+
+ intel_crtc_update_cursor(crtc, true);
+
+ return 0;
+}
+
+/** Sets the color ramps on behalf of RandR */
+void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ intel_crtc->lut_r[regno] = red >> 8;
+ intel_crtc->lut_g[regno] = green >> 8;
+ intel_crtc->lut_b[regno] = blue >> 8;
+}
+
+void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ *red = intel_crtc->lut_r[regno] << 8;
+ *green = intel_crtc->lut_g[regno] << 8;
+ *blue = intel_crtc->lut_b[regno] << 8;
+}
+
+static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ int end = (start + size > 256) ? 256 : start + size, i;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ for (i = start; i < end; i++) {
+ intel_crtc->lut_r[i] = red[i] >> 8;
+ intel_crtc->lut_g[i] = green[i] >> 8;
+ intel_crtc->lut_b[i] = blue[i] >> 8;
+ }
+
+ intel_crtc_load_lut(crtc);
+}
+
+/**
+ * Get a pipe with a simple mode set on it for doing load-based monitor
+ * detection.
+ *
+ * It will be up to the load-detect code to adjust the pipe as appropriate for
+ * its requirements. The pipe will be connected to no other encoders.
+ *
+ * Currently this code will only succeed if there is a pipe with no encoders
+ * configured for it. In the future, it could choose to temporarily disable
+ * some outputs to free up a pipe for its use.
+ *
+ * \return crtc, or NULL if no pipes are available.
+ */
+
+/* VESA 640x480x72Hz mode to set on the pipe */
+static struct drm_display_mode load_detect_mode = {
+ DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+};
+
+static int
+intel_framebuffer_create(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj,
+ struct drm_framebuffer **res)
+{
+ struct intel_framebuffer *intel_fb;
+ int ret;
+
+ intel_fb = malloc(sizeof(*intel_fb), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(&obj->base);
+ free(intel_fb, DRM_MEM_KMS);
+ return (ret);
+ }
+
+ *res = &intel_fb->base;
+ return (0);
+}
+
+static u32
+intel_framebuffer_pitch_for_width(int width, int bpp)
+{
+ u32 pitch = howmany(width * bpp, 8);
+ return roundup2(pitch, 64);
+}
+
+static u32
+intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
+{
+ u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
+ return roundup2(pitch * mode->vdisplay, PAGE_SIZE);
+}
+
+static int
+intel_framebuffer_create_for_mode(struct drm_device *dev,
+ struct drm_display_mode *mode, int depth, int bpp,
+ struct drm_framebuffer **res)
+{
+ struct drm_i915_gem_object *obj;
+ struct drm_mode_fb_cmd2 mode_cmd;
+
+ obj = i915_gem_alloc_object(dev,
+ intel_framebuffer_size_for_mode(mode, bpp));
+ if (obj == NULL)
+ return (-ENOMEM);
+
+ mode_cmd.width = mode->hdisplay;
+ mode_cmd.height = mode->vdisplay;
+ mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
+ bpp);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
+ return (intel_framebuffer_create(dev, &mode_cmd, obj, res));
+}
+
+static int
+mode_fits_in_fbdev(struct drm_device *dev,
+ struct drm_display_mode *mode, struct drm_framebuffer **res)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ struct drm_framebuffer *fb;
+
+ if (dev_priv->fbdev == NULL) {
+ *res = NULL;
+ return (0);
+ }
+
+ obj = dev_priv->fbdev->ifb.obj;
+ if (obj == NULL) {
+ *res = NULL;
+ return (0);
+ }
+
+ fb = &dev_priv->fbdev->ifb.base;
+ if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
+ fb->bits_per_pixel)) {
+ *res = NULL;
+ return (0);
+ }
+
+ if (obj->base.size < mode->vdisplay * fb->pitches[0]) {
+ *res = NULL;
+ return (0);
+ }
+
+ *res = fb;
+ return (0);
+}
+
+bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct intel_load_detect_pipe *old)
+{
+ struct intel_crtc *intel_crtc;
+ struct drm_crtc *possible_crtc;
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = NULL;
+ struct drm_device *dev = encoder->dev;
+ struct drm_framebuffer *old_fb;
+ int i = -1, r;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector),
+ encoder->base.id, drm_get_encoder_name(encoder));
+
+ /*
+ * Algorithm gets a little messy:
+ *
+ * - if the connector already has an assigned crtc, use it (but make
+ * sure it's on first)
+ *
+ * - try to find the first unused crtc that can drive this connector,
+ * and use that if we find one
+ */
+
+ /* See if we already have a CRTC for this connector */
+ if (encoder->crtc) {
+ crtc = encoder->crtc;
+
+ intel_crtc = to_intel_crtc(crtc);
+ old->dpms_mode = intel_crtc->dpms_mode;
+ old->load_detect_temp = false;
+
+ /* Make sure the crtc and connector are running */
+ if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+
+ crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+ }
+
+ return true;
+ }
+
+ /* Find an unused one (if possible) */
+ list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
+ i++;
+ if (!(encoder->possible_crtcs & (1 << i)))
+ continue;
+ if (!possible_crtc->enabled) {
+ crtc = possible_crtc;
+ break;
+ }
+ }
+
+ /*
+ * If we didn't find an unused CRTC, don't use any.
+ */
+ if (!crtc) {
+ DRM_DEBUG_KMS("no pipe available for load-detect\n");
+ return false;
+ }
+
+ encoder->crtc = crtc;
+ connector->encoder = encoder;
+
+ intel_crtc = to_intel_crtc(crtc);
+ old->dpms_mode = intel_crtc->dpms_mode;
+ old->load_detect_temp = true;
+ old->release_fb = NULL;
+
+ if (!mode)
+ mode = &load_detect_mode;
+
+ old_fb = crtc->fb;
+
+ /* We need a framebuffer large enough to accommodate all accesses
+ * that the plane may generate whilst we perform load detection.
+ * We can not rely on the fbcon either being present (we get called
+ * during its initialisation to detect all boot displays, or it may
+ * not even exist) or that it is large enough to satisfy the
+ * requested mode.
+ */
+ r = mode_fits_in_fbdev(dev, mode, &crtc->fb);
+ if (crtc->fb == NULL) {
+ DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
+ r = intel_framebuffer_create_for_mode(dev, mode, 24, 32,
+ &crtc->fb);
+ old->release_fb = crtc->fb;
+ } else
+ DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
+ if (r != 0) {
+ DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
+ crtc->fb = old_fb;
+ return false;
+ }
+
+ if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
+ DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
+ if (old->release_fb)
+ old->release_fb->funcs->destroy(old->release_fb);
+ crtc->fb = old_fb;
+ return false;
+ }
+
+ /* let the connector get through one full cycle before testing */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ return true;
+}
+
+void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
+ struct intel_load_detect_pipe *old)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector),
+ encoder->base.id, drm_get_encoder_name(encoder));
+
+ if (old->load_detect_temp) {
+ connector->encoder = NULL;
+ drm_helper_disable_unused_functions(dev);
+
+ if (old->release_fb)
+ old->release_fb->funcs->destroy(old->release_fb);
+
+ return;
+ }
+
+ /* Switch crtc and encoder back off if necessary */
+ if (old->dpms_mode != DRM_MODE_DPMS_ON) {
+ encoder_funcs->dpms(encoder, old->dpms_mode);
+ crtc_funcs->dpms(crtc, old->dpms_mode);
+ }
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 dpll = I915_READ(DPLL(pipe));
+ u32 fp;
+ intel_clock_t clock;
+
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = I915_READ(FP0(pipe));
+ else
+ fp = I915_READ(FP1(pipe));
+
+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+ if (IS_PINEVIEW(dev)) {
+ clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
+ clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ } else {
+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ }
+
+ if (!IS_GEN2(dev)) {
+ if (IS_PINEVIEW(dev))
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
+ else
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+
+ switch (dpll & DPLL_MODE_MASK) {
+ case DPLLB_MODE_DAC_SERIAL:
+ clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
+ 5 : 10;
+ break;
+ case DPLLB_MODE_LVDS:
+ clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
+ 7 : 14;
+ break;
+ default:
+ DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
+ "mode\n", (int)(dpll & DPLL_MODE_MASK));
+ return 0;
+ }
+
+ /* XXX: Handle the 100Mhz refclk */
+ intel_clock(dev, 96000, &clock);
+ } else {
+ bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
+
+ if (is_lvds) {
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+ clock.p2 = 14;
+
+ if ((dpll & PLL_REF_INPUT_MASK) ==
+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+ /* XXX: might not be 66MHz */
+ intel_clock(dev, 66000, &clock);
+ } else
+ intel_clock(dev, 48000, &clock);
+ } else {
+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
+ clock.p1 = 2;
+ else {
+ clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+ }
+ if (dpll & PLL_P2_DIVIDE_BY_4)
+ clock.p2 = 4;
+ else
+ clock.p2 = 2;
+
+ intel_clock(dev, 48000, &clock);
+ }
+ }
+
+ /* XXX: It would be nice to validate the clocks, but we can't reuse
+ * i830PllIsValid() because it relies on the xf86_config connector
+ * configuration being accurate, which it isn't necessarily.
+ */
+
+ return clock.dot;
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ struct drm_display_mode *mode;
+ int htot = I915_READ(HTOTAL(pipe));
+ int hsync = I915_READ(HSYNC(pipe));
+ int vtot = I915_READ(VTOTAL(pipe));
+ int vsync = I915_READ(VSYNC(pipe));
+
+ mode = malloc(sizeof(*mode), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+
+ mode->clock = intel_crtc_clock_get(dev, crtc);
+ mode->hdisplay = (htot & 0xffff) + 1;
+ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+ mode->hsync_start = (hsync & 0xffff) + 1;
+ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+ mode->vdisplay = (vtot & 0xffff) + 1;
+ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+ mode->vsync_start = (vsync & 0xffff) + 1;
+ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+ drm_mode_set_name(mode);
+ drm_mode_set_crtcinfo(mode, 0);
+
+ return mode;
+}
+
+#define GPU_IDLE_TIMEOUT (500 /* ms */ * 1000 / hz)
+
+/* When this timer fires, we've been idle for awhile */
+static void intel_gpu_idle_timer(void *arg)
+{
+ struct drm_device *dev = arg;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!list_empty(&dev_priv->mm.active_list)) {
+ /* Still processing requests, so just re-arm the timer. */
+ callout_schedule(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT);
+ return;
+ }
+
+ dev_priv->busy = false;
+ taskqueue_enqueue(dev_priv->tq, &dev_priv->idle_task);
+}
+
+#define CRTC_IDLE_TIMEOUT (1000 /* ms */ * 1000 / hz)
+
+static void intel_crtc_idle_timer(void *arg)
+{
+ struct intel_crtc *intel_crtc = arg;
+ struct drm_crtc *crtc = &intel_crtc->base;
+ drm_i915_private_t *dev_priv = crtc->dev->dev_private;
+ struct intel_framebuffer *intel_fb;
+
+ intel_fb = to_intel_framebuffer(crtc->fb);
+ if (intel_fb && intel_fb->obj->active) {
+ /* The framebuffer is still being accessed by the GPU. */
+ callout_schedule(&intel_crtc->idle_callout, CRTC_IDLE_TIMEOUT);
+ return;
+ }
+
+ intel_crtc->busy = false;
+ taskqueue_enqueue(dev_priv->tq, &dev_priv->idle_task);
+}
+
+static void intel_increase_pllclock(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int dpll_reg = DPLL(pipe);
+ int dpll;
+
+ if (HAS_PCH_SPLIT(dev))
+ return;
+
+ if (!dev_priv->lvds_downclock_avail)
+ return;
+
+ dpll = I915_READ(dpll_reg);
+ if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
+ DRM_DEBUG_DRIVER("upclocking LVDS\n");
+
+ assert_panel_unlocked(dev_priv, pipe);
+
+ dpll &= ~DISPLAY_RATE_SELECT_FPA1;
+ I915_WRITE(dpll_reg, dpll);
+ intel_wait_for_vblank(dev, pipe);
+
+ dpll = I915_READ(dpll_reg);
+ if (dpll & DISPLAY_RATE_SELECT_FPA1)
+ DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
+ }
+
+ /* Schedule downclock */
+ callout_reset(&intel_crtc->idle_callout, CRTC_IDLE_TIMEOUT,
+ intel_crtc_idle_timer, intel_crtc);
+}
+
+static void intel_decrease_pllclock(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (HAS_PCH_SPLIT(dev))
+ return;
+
+ if (!dev_priv->lvds_downclock_avail)
+ return;
+
+ /*
+ * Since this is called by a timer, we should never get here in
+ * the manual case.
+ */
+ if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
+ int pipe = intel_crtc->pipe;
+ int dpll_reg = DPLL(pipe);
+ u32 dpll;
+
+ DRM_DEBUG_DRIVER("downclocking LVDS\n");
+
+ assert_panel_unlocked(dev_priv, pipe);
+
+ dpll = I915_READ(dpll_reg);
+ dpll |= DISPLAY_RATE_SELECT_FPA1;
+ I915_WRITE(dpll_reg, dpll);
+ intel_wait_for_vblank(dev, pipe);
+ dpll = I915_READ(dpll_reg);
+ if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
+ DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
+ }
+}
+
+/**
+ * intel_idle_update - adjust clocks for idleness
+ * @work: work struct
+ *
+ * Either the GPU or display (or both) went idle. Check the busy status
+ * here and adjust the CRTC and GPU clocks as necessary.
+ */
+static void intel_idle_update(void *arg, int pending)
+{
+ drm_i915_private_t *dev_priv = arg;
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
+
+ if (!i915_powersave)
+ return;
+
+ DRM_LOCK(dev);
+
+ i915_update_gfx_val(dev_priv);
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ /* Skip inactive CRTCs */
+ if (!crtc->fb)
+ continue;
+
+ intel_crtc = to_intel_crtc(crtc);
+ if (!intel_crtc->busy)
+ intel_decrease_pllclock(crtc);
+ }
+
+ DRM_UNLOCK(dev);
+}
+
+/**
+ * intel_mark_busy - mark the GPU and possibly the display busy
+ * @dev: drm device
+ * @obj: object we're operating on
+ *
+ * Callers can use this function to indicate that the GPU is busy processing
+ * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
+ * buffer), we'll also mark the display as busy, so we know to increase its
+ * clock frequency.
+ */
+void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = NULL;
+ struct intel_framebuffer *intel_fb;
+ struct intel_crtc *intel_crtc;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ if (!dev_priv->busy)
+ dev_priv->busy = true;
+ else
+ callout_reset(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT,
+ intel_gpu_idle_timer, dev);
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (!crtc->fb)
+ continue;
+
+ intel_crtc = to_intel_crtc(crtc);
+ intel_fb = to_intel_framebuffer(crtc->fb);
+ if (intel_fb->obj == obj) {
+ if (!intel_crtc->busy) {
+ /* Non-busy -> busy, upclock */
+ intel_increase_pllclock(crtc);
+ intel_crtc->busy = true;
+ } else {
+ /* Busy -> busy, put off timer */
+ callout_reset(&intel_crtc->idle_callout,
+ CRTC_IDLE_TIMEOUT, intel_crtc_idle_timer,
+ intel_crtc);
+ }
+ }
+ }
+}
+
+static void intel_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_unpin_work *work;
+
+ mtx_lock(&dev->event_lock);
+ work = intel_crtc->unpin_work;
+ intel_crtc->unpin_work = NULL;
+ mtx_unlock(&dev->event_lock);
+
+ if (work) {
+ taskqueue_cancel(dev_priv->tq, &work->task, NULL);
+ taskqueue_drain(dev_priv->tq, &work->task);
+ free(work, DRM_MEM_KMS);
+ }
+
+ drm_crtc_cleanup(crtc);
+
+ free(intel_crtc, DRM_MEM_KMS);
+}
+
+static void intel_unpin_work_fn(void *arg, int pending)
+{
+ struct intel_unpin_work *work = arg;
+ struct drm_device *dev;
+
+ dev = work->dev;
+ DRM_LOCK(dev);
+ intel_unpin_fb_obj(work->old_fb_obj);
+ drm_gem_object_unreference(&work->pending_flip_obj->base);
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+
+ intel_update_fbc(work->dev);
+ DRM_UNLOCK(dev);
+ free(work, DRM_MEM_KMS);
+}
+
+static void do_intel_finish_page_flip(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_unpin_work *work;
+ struct drm_i915_gem_object *obj;
+ struct drm_pending_vblank_event *e;
+ struct timeval tnow, tvbl;
+
+ /* Ignore early vblank irqs */
+ if (intel_crtc == NULL)
+ return;
+
+ microtime(&tnow);
+
+ mtx_lock(&dev->event_lock);
+ work = intel_crtc->unpin_work;
+ if (work == NULL || !work->pending) {
+ mtx_unlock(&dev->event_lock);
+ return;
+ }
+
+ intel_crtc->unpin_work = NULL;
+
+ if (work->event) {
+ e = work->event;
+ e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
+
+ /* Called before vblank count and timestamps have
+ * been updated for the vblank interval of flip
+ * completion? Need to increment vblank count and
+ * add one videorefresh duration to returned timestamp
+ * to account for this. We assume this happened if we
+ * get called over 0.9 frame durations after the last
+ * timestamped vblank.
+ *
+ * This calculation can not be used with vrefresh rates
+ * below 5Hz (10Hz to be on the safe side) without
+ * promoting to 64 integers.
+ */
+ if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
+ 9 * crtc->framedur_ns) {
+ e->event.sequence++;
+ tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
+ crtc->framedur_ns);
+ }
+
+ e->event.tv_sec = tvbl.tv_sec;
+ e->event.tv_usec = tvbl.tv_usec;
+
+ list_add_tail(&e->base.link,
+ &e->base.file_priv->event_list);
+ drm_event_wakeup(&e->base);
+ }
+
+ drm_vblank_put(dev, intel_crtc->pipe);
+
+ obj = work->old_fb_obj;
+
+ atomic_clear_int(&obj->pending_flip, 1 << intel_crtc->plane);
+ if (atomic_read(&obj->pending_flip) == 0)
+ wakeup(&obj->pending_flip);
+ mtx_unlock(&dev->event_lock);
+
+ taskqueue_enqueue(dev_priv->tq, &work->task);
+
+ CTR2(KTR_DRM, "i915_flip_complete %d %p", intel_crtc->plane,
+ work->pending_flip_obj);
+}
+
+void intel_finish_page_flip(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+ do_intel_finish_page_flip(dev, crtc);
+}
+
+void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
+
+ do_intel_finish_page_flip(dev, crtc);
+}
+
+void intel_prepare_page_flip(struct drm_device *dev, int plane)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
+
+ mtx_lock(&dev->event_lock);
+ if (intel_crtc->unpin_work) {
+ if ((++intel_crtc->unpin_work->pending) > 1)
+ DRM_ERROR("Prepared flip multiple times\n");
+ } else {
+ DRM_DEBUG("preparing flip with no unpin work?\n");
+ }
+ mtx_unlock(&dev->event_lock);
+}
+
+static int intel_gen2_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long offset;
+ u32 flip_mask;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+ goto out;
+
+ /* Offset into the new buffer for cases of shared fbs between CRTCs */
+ offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
+
+ ret = BEGIN_LP_RING(6);
+ if (ret)
+ goto out;
+
+ /* Can't queue multiple flips, so wait for the previous
+ * one to finish before executing the next.
+ */
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+ OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitches[0]);
+ OUT_RING(obj->gtt_offset + offset);
+ OUT_RING(0); /* aux display base address, unused */
+ ADVANCE_LP_RING();
+out:
+ return ret;
+}
+
+static int intel_gen3_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long offset;
+ u32 flip_mask;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+ goto out;
+
+ /* Offset into the new buffer for cases of shared fbs between CRTCs */
+ offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
+
+ ret = BEGIN_LP_RING(6);
+ if (ret)
+ goto out;
+
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+ OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_DISPLAY_FLIP_I915 |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitches[0]);
+ OUT_RING(obj->gtt_offset + offset);
+ OUT_RING(MI_NOOP);
+
+ ADVANCE_LP_RING();
+out:
+ return ret;
+}
+
+static int intel_gen4_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t pf, pipesrc;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+ goto out;
+
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ goto out;
+
+ /* i965+ uses the linear or tiled offsets from the
+ * Display Registers (which do not change across a page-flip)
+ * so we need only reprogram the base address.
+ */
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitches[0]);
+ OUT_RING(obj->gtt_offset | obj->tiling_mode);
+
+ /* XXX Enabling the panel-fitter across page-flip is so far
+ * untested on non-native modes, so ignore it for now.
+ * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+ */
+ pf = 0;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
+ ADVANCE_LP_RING();
+out:
+ return ret;
+}
+
+static int intel_gen6_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t pf, pipesrc;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+ goto out;
+
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ goto out;
+
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitches[0] | obj->tiling_mode);
+ OUT_RING(obj->gtt_offset);
+
+ /* Contrary to the suggestions in the documentation,
+ * "Enable Panel Fitter" does not seem to be required when page
+ * flipping with a non-native mode, and worse causes a normal
+ * modeset to fail.
+ * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
+ */
+ pf = 0;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
+ ADVANCE_LP_RING();
+out:
+ return ret;
+}
+
+/*
+ * On gen7 we currently use the blit ring because (in early silicon at least)
+ * the render ring doesn't give us interrpts for page flip completion, which
+ * means clients will hang after the first flip is queued. Fortunately the
+ * blit ring generates interrupts properly, so use it instead.
+ */
+static int intel_gen7_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_ring_buffer *ring = &dev_priv->rings[BCS];
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+ if (ret)
+ goto out;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto out;
+
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
+ intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
+ intel_ring_emit(ring, (obj->gtt_offset));
+ intel_ring_emit(ring, (MI_NOOP));
+ intel_ring_advance(ring);
+out:
+ return ret;
+}
+
+static int intel_default_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ return -ENODEV;
+}
+
+static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_unpin_work *work;
+ int ret;
+
+ work = malloc(sizeof *work, DRM_MEM_KMS, M_WAITOK | M_ZERO);
+
+ work->event = event;
+ work->dev = crtc->dev;
+ intel_fb = to_intel_framebuffer(crtc->fb);
+ work->old_fb_obj = intel_fb->obj;
+ TASK_INIT(&work->task, 0, intel_unpin_work_fn, work);
+
+ ret = drm_vblank_get(dev, intel_crtc->pipe);
+ if (ret)
+ goto free_work;
+
+ /* We borrow the event spin lock for protecting unpin_work */
+ mtx_lock(&dev->event_lock);
+ if (intel_crtc->unpin_work) {
+ mtx_unlock(&dev->event_lock);
+ free(work, DRM_MEM_KMS);
+ drm_vblank_put(dev, intel_crtc->pipe);
+
+ DRM_DEBUG("flip queue: crtc already busy\n");
+ return -EBUSY;
+ }
+ intel_crtc->unpin_work = work;
+ mtx_unlock(&dev->event_lock);
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ DRM_LOCK(dev);
+
+ /* Reference the objects for the scheduled work. */
+ drm_gem_object_reference(&work->old_fb_obj->base);
+ drm_gem_object_reference(&obj->base);
+
+ crtc->fb = fb;
+
+ work->pending_flip_obj = obj;
+
+ work->enable_stall_check = true;
+
+ /* Block clients from rendering to the new back buffer until
+ * the flip occurs and the object is no longer visible.
+ */
+ atomic_set_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane);
+
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
+ if (ret)
+ goto cleanup_pending;
+ intel_disable_fbc(dev);
+ DRM_UNLOCK(dev);
+
+ CTR2(KTR_DRM, "i915_flip_request %d %p", intel_crtc->plane, obj);
+
+ return 0;
+
+cleanup_pending:
+ atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+ drm_gem_object_unreference(&obj->base);
+ DRM_UNLOCK(dev);
+
+ mtx_lock(&dev->event_lock);
+ intel_crtc->unpin_work = NULL;
+ mtx_unlock(&dev->event_lock);
+
+ drm_vblank_put(dev, intel_crtc->pipe);
+free_work:
+ free(work, DRM_MEM_KMS);
+
+ return ret;
+}
+
+static void intel_sanitize_modesetting(struct drm_device *dev,
+ int pipe, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg, val;
+
+ /* Clear any frame start delays used for debugging left by the BIOS */
+ for_each_pipe(pipe) {
+ reg = PIPECONF(pipe);
+ I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+ }
+
+ if (HAS_PCH_SPLIT(dev))
+ return;
+
+ /* Who knows what state these registers were left in by the BIOS or
+ * grub?
+ *
+ * If we leave the registers in a conflicting state (e.g. with the
+ * display plane reading from the other pipe than the one we intend
+ * to use) then when we attempt to teardown the active mode, we will
+ * not disable the pipes and planes in the correct order -- leaving
+ * a plane reading from a disabled pipe and possibly leading to
+ * undefined behaviour.
+ */
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+
+ if ((val & DISPLAY_PLANE_ENABLE) == 0)
+ return;
+ if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
+ return;
+
+ /* This display plane is active and attached to the other CPU pipe. */
+ pipe = !pipe;
+
+ /* Disable the plane and wait for it to stop reading from the pipe. */
+ intel_disable_plane(dev_priv, plane, pipe);
+ intel_disable_pipe(dev_priv, pipe);
+}
+
+static void intel_crtc_reset(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /* Reset flags back to the 'unknown' status so that they
+ * will be correctly set on the initial modeset.
+ */
+ intel_crtc->dpms_mode = -1;
+
+ /* We need to fix up any BIOS configuration that conflicts with
+ * our expectations.
+ */
+ intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
+}
+
+static struct drm_crtc_helper_funcs intel_helper_funcs = {
+ .dpms = intel_crtc_dpms,
+ .mode_fixup = intel_crtc_mode_fixup,
+ .mode_set = intel_crtc_mode_set,
+ .mode_set_base = intel_pipe_set_base,
+ .mode_set_base_atomic = intel_pipe_set_base_atomic,
+ .load_lut = intel_crtc_load_lut,
+ .disable = intel_crtc_disable,
+};
+
+static const struct drm_crtc_funcs intel_crtc_funcs = {
+ .reset = intel_crtc_reset,
+ .cursor_set = intel_crtc_cursor_set,
+ .cursor_move = intel_crtc_cursor_move,
+ .gamma_set = intel_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = intel_crtc_destroy,
+ .page_flip = intel_crtc_page_flip,
+};
+
+static void intel_crtc_init(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc;
+ int i;
+
+ intel_crtc = malloc(sizeof(struct intel_crtc) +
+ (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
+ DRM_MEM_KMS, M_WAITOK | M_ZERO);
+
+ drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
+ for (i = 0; i < 256; i++) {
+ intel_crtc->lut_r[i] = i;
+ intel_crtc->lut_g[i] = i;
+ intel_crtc->lut_b[i] = i;
+ }
+
+ /* Swap pipes & planes for FBC on pre-965 */
+ intel_crtc->pipe = pipe;
+ intel_crtc->plane = pipe;
+ if (IS_MOBILE(dev) && IS_GEN3(dev)) {
+ DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
+ intel_crtc->plane = !pipe;
+ }
+
+ KASSERT(pipe < DRM_ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) &&
+ dev_priv->plane_to_crtc_mapping[intel_crtc->plane] == NULL,
+ ("plane_to_crtc is already initialized"));
+ dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
+ dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
+
+ intel_crtc_reset(&intel_crtc->base);
+ intel_crtc->active = true; /* force the pipe off on setup_init_config */
+ intel_crtc->bpp = 24; /* default for pre-Ironlake */
+
+ if (HAS_PCH_SPLIT(dev)) {
+ if (pipe == 2 && IS_IVYBRIDGE(dev))
+ intel_crtc->no_pll = true;
+ intel_helper_funcs.prepare = ironlake_crtc_prepare;
+ intel_helper_funcs.commit = ironlake_crtc_commit;
+ } else {
+ intel_helper_funcs.prepare = i9xx_crtc_prepare;
+ intel_helper_funcs.commit = i9xx_crtc_commit;
+ }
+
+ drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
+
+ intel_crtc->busy = false;
+
+ callout_init(&intel_crtc->idle_callout, CALLOUT_MPSAFE);
+}
+
+int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
+ struct drm_mode_object *drmmode_obj;
+ struct intel_crtc *crtc;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+
+ if (!drmmode_obj) {
+ DRM_ERROR("no such CRTC id\n");
+ return -EINVAL;
+ }
+
+ crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+ pipe_from_crtc_id->pipe = crtc->pipe;
+
+ return 0;
+}
+
+static int intel_encoder_clones(struct drm_device *dev, int type_mask)
+{
+ struct intel_encoder *encoder;
+ int index_mask = 0;
+ int entry = 0;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ if (type_mask & encoder->clone_mask)
+ index_mask |= (1 << entry);
+ entry++;
+ }
+
+ return index_mask;
+}
+
+static bool has_edp_a(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_MOBILE(dev))
+ return false;
+
+ if ((I915_READ(DP_A) & DP_DETECTED) == 0)
+ return false;
+
+ if (IS_GEN5(dev) &&
+ (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
+ return false;
+
+ return true;
+}
+
+static void intel_setup_outputs(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *encoder;
+ bool dpd_is_edp = false;
+ bool has_lvds;
+
+ has_lvds = intel_lvds_init(dev);
+ if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
+ /* disable the panel fitter on everything but LVDS */
+ I915_WRITE(PFIT_CONTROL, 0);
+ }
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dpd_is_edp = intel_dpd_is_edp(dev);
+
+ if (has_edp_a(dev))
+ intel_dp_init(dev, DP_A);
+
+ if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
+ intel_dp_init(dev, PCH_DP_D);
+ }
+
+ intel_crt_init(dev);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ int found;
+
+ DRM_DEBUG_KMS(
+"HDMIB %d PCH_DP_B %d HDMIC %d HDMID %d PCH_DP_C %d PCH_DP_D %d LVDS %d\n",
+ (I915_READ(HDMIB) & PORT_DETECTED) != 0,
+ (I915_READ(PCH_DP_B) & DP_DETECTED) != 0,
+ (I915_READ(HDMIC) & PORT_DETECTED) != 0,
+ (I915_READ(HDMID) & PORT_DETECTED) != 0,
+ (I915_READ(PCH_DP_C) & DP_DETECTED) != 0,
+ (I915_READ(PCH_DP_D) & DP_DETECTED) != 0,
+ (I915_READ(PCH_LVDS) & LVDS_DETECTED) != 0);
+
+ if (I915_READ(HDMIB) & PORT_DETECTED) {
+ /* PCH SDVOB multiplex with HDMIB */
+ found = intel_sdvo_init(dev, PCH_SDVOB);
+ if (!found)
+ intel_hdmi_init(dev, HDMIB);
+ if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
+ intel_dp_init(dev, PCH_DP_B);
+ }
+
+ if (I915_READ(HDMIC) & PORT_DETECTED)
+ intel_hdmi_init(dev, HDMIC);
+
+ if (I915_READ(HDMID) & PORT_DETECTED)
+ intel_hdmi_init(dev, HDMID);
+
+ if (I915_READ(PCH_DP_C) & DP_DETECTED)
+ intel_dp_init(dev, PCH_DP_C);
+
+ if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
+ intel_dp_init(dev, PCH_DP_D);
+
+ } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
+ bool found = false;
+
+ if (I915_READ(SDVOB) & SDVO_DETECTED) {
+ DRM_DEBUG_KMS("probing SDVOB\n");
+ found = intel_sdvo_init(dev, SDVOB);
+ if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
+ DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
+ intel_hdmi_init(dev, SDVOB);
+ }
+
+ if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
+ DRM_DEBUG_KMS("probing DP_B\n");
+ intel_dp_init(dev, DP_B);
+ }
+ }
+
+ /* Before G4X SDVOC doesn't have its own detect register */
+
+ if (I915_READ(SDVOB) & SDVO_DETECTED) {
+ DRM_DEBUG_KMS("probing SDVOC\n");
+ found = intel_sdvo_init(dev, SDVOC);
+ }
+
+ if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
+
+ if (SUPPORTS_INTEGRATED_HDMI(dev)) {
+ DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
+ intel_hdmi_init(dev, SDVOC);
+ }
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ DRM_DEBUG_KMS("probing DP_C\n");
+ intel_dp_init(dev, DP_C);
+ }
+ }
+
+ if (SUPPORTS_INTEGRATED_DP(dev) &&
+ (I915_READ(DP_D) & DP_DETECTED)) {
+ DRM_DEBUG_KMS("probing DP_D\n");
+ intel_dp_init(dev, DP_D);
+ }
+ } else if (IS_GEN2(dev)) {
+#if 1
+ KIB_NOTYET();
+#else
+ intel_dvo_init(dev);
+#endif
+ }
+
+ if (SUPPORTS_TV(dev))
+ intel_tv_init(dev);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ encoder->base.possible_crtcs = encoder->crtc_mask;
+ encoder->base.possible_clones =
+ intel_encoder_clones(dev, encoder->clone_mask);
+ }
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_init_pch_refclk(dev);
+}
+
+static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+ drm_framebuffer_cleanup(fb);
+ drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
+
+ free(intel_fb, DRM_MEM_KMS);
+}
+
+static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file,
+ unsigned int *handle)
+{
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+
+ return drm_gem_handle_create(file, &obj->base, handle);
+}
+
+static const struct drm_framebuffer_funcs intel_fb_funcs = {
+ .destroy = intel_user_framebuffer_destroy,
+ .create_handle = intel_user_framebuffer_create_handle,
+};
+
+int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *intel_fb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ if (obj->tiling_mode == I915_TILING_Y)
+ return -EINVAL;
+
+ if (mode_cmd->pitches[0] & 63)
+ return -EINVAL;
+
+ switch (mode_cmd->pixel_format) {
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ /* RGB formats are common across chipsets */
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_VYUY:
+ break;
+ default:
+ DRM_DEBUG_KMS("unsupported pixel format %u\n",
+ mode_cmd->pixel_format);
+ return -EINVAL;
+ }
+
+ ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
+ if (ret) {
+ DRM_ERROR("framebuffer init failed %d\n", ret);
+ return ret;
+ }
+
+ drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
+ intel_fb->obj = obj;
+ return 0;
+}
+
+static int
+intel_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp, struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_framebuffer **res)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
+ mode_cmd->handles[0]));
+ if (&obj->base == NULL)
+ return (-ENOENT);
+
+ return (intel_framebuffer_create(dev, mode_cmd, obj, res));
+}
+
+static const struct drm_mode_config_funcs intel_mode_funcs = {
+ .fb_create = intel_user_framebuffer_create,
+ .output_poll_changed = intel_fb_output_poll_changed,
+};
+
+static struct drm_i915_gem_object *
+intel_alloc_context_page(struct drm_device *dev)
+{
+ struct drm_i915_gem_object *ctx;
+ int ret;
+
+ DRM_LOCK_ASSERT(dev);
+
+ ctx = i915_gem_alloc_object(dev, 4096);
+ if (!ctx) {
+ DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
+ return NULL;
+ }
+
+ ret = i915_gem_object_pin(ctx, 4096, true);
+ if (ret) {
+ DRM_ERROR("failed to pin power context: %d\n", ret);
+ goto err_unref;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
+ if (ret) {
+ DRM_ERROR("failed to set-domain on power context: %d\n", ret);
+ goto err_unpin;
+ }
+
+ return ctx;
+
+err_unpin:
+ i915_gem_object_unpin(ctx);
+err_unref:
+ drm_gem_object_unreference(&ctx->base);
+ DRM_UNLOCK(dev);
+ return NULL;
+}
+
+bool ironlake_set_drps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 rgvswctl;
+
+ rgvswctl = I915_READ16(MEMSWCTL);
+ if (rgvswctl & MEMCTL_CMD_STS) {
+ DRM_DEBUG("gpu busy, RCS change rejected\n");
+ return false; /* still busy with another command */
+ }
+
+ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+ POSTING_READ16(MEMSWCTL);
+
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+
+ return true;
+}
+
+void ironlake_enable_drps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rgvmodectl = I915_READ(MEMMODECTL);
+ u8 fmax, fmin, fstart, vstart;
+
+ /* Enable temp reporting */
+ I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
+ I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
+
+ /* 100ms RC evaluation intervals */
+ I915_WRITE(RCUPEI, 100000);
+ I915_WRITE(RCDNEI, 100000);
+
+ /* Set max/min thresholds to 90ms and 80ms respectively */
+ I915_WRITE(RCBMAXAVG, 90000);
+ I915_WRITE(RCBMINAVG, 80000);
+
+ I915_WRITE(MEMIHYST, 1);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
+ fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+
+ vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+
+ dev_priv->fmax = fmax; /* IPS callback will increase this */
+ dev_priv->fstart = fstart;
+
+ dev_priv->max_delay = fstart;
+ dev_priv->min_delay = fmin;
+ dev_priv->cur_delay = fstart;
+
+ DRM_DEBUG("fmax: %d, fmin: %d, fstart: %d\n",
+ fmax, fmin, fstart);
+
+ I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+
+ /*
+ * Interrupts will be enabled in ironlake_irq_postinstall
+ */
+
+ I915_WRITE(VIDSTART, vstart);
+ POSTING_READ(VIDSTART);
+
+ rgvmodectl |= MEMMODE_SWMODE_EN;
+ I915_WRITE(MEMMODECTL, rgvmodectl);
+
+ if (_intel_wait_for(dev,
+ (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10,
+ 1, "915per"))
+ DRM_ERROR("stuck trying to change perf mode\n");
+ pause("915dsp", 1);
+
+ ironlake_set_drps(dev, fstart);
+
+ dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
+ I915_READ(0x112e0);
+ dev_priv->last_time1 = jiffies_to_msecs(jiffies);
+ dev_priv->last_count2 = I915_READ(0x112f4);
+ nanotime(&dev_priv->last_time2);
+}
+
+void ironlake_disable_drps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 rgvswctl = I915_READ16(MEMSWCTL);
+
+ /* Ack interrupts, disable EFC interrupt */
+ I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
+ I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
+ I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
+ I915_WRITE(DEIIR, DE_PCU_EVENT);
+ I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
+
+ /* Go back to the starting frequency */
+ ironlake_set_drps(dev, dev_priv->fstart);
+ pause("915dsp", 1);
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+ pause("915dsp", 1);
+
+}
+
+void gen6_set_rps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 swreq;
+
+ swreq = (val & 0x3ff) << 25;
+ I915_WRITE(GEN6_RPNSWREQ, swreq);
+}
+
+void gen6_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+ I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMIER, 0);
+ /* Complete PM interrupt masking here doesn't race with the rps work
+ * item again unmasking PM interrupts because that is using a different
+ * register (PMIMR) to mask PM interrupts. The only risk is in leaving
+ * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
+
+ mtx_lock(&dev_priv->rps_lock);
+ dev_priv->pm_iir = 0;
+ mtx_unlock(&dev_priv->rps_lock);
+
+ I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+}
+
+static unsigned long intel_pxfreq(u32 vidfreq)
+{
+ unsigned long freq;
+ int div = (vidfreq & 0x3f0000) >> 16;
+ int post = (vidfreq & 0x3000) >> 12;
+ int pre = (vidfreq & 0x7);
+
+ if (!pre)
+ return 0;
+
+ freq = ((div * 133333) / ((1<<post) * pre));
+
+ return freq;
+}
+
+void intel_init_emon(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 lcfuse;
+ u8 pxw[16];
+ int i;
+
+ /* Disable to program */
+ I915_WRITE(ECR, 0);
+ POSTING_READ(ECR);
+
+ /* Program energy weights for various events */
+ I915_WRITE(SDEW, 0x15040d00);
+ I915_WRITE(CSIEW0, 0x007f0000);
+ I915_WRITE(CSIEW1, 0x1e220004);
+ I915_WRITE(CSIEW2, 0x04000004);
+
+ for (i = 0; i < 5; i++)
+ I915_WRITE(PEW + (i * 4), 0);
+ for (i = 0; i < 3; i++)
+ I915_WRITE(DEW + (i * 4), 0);
+
+ /* Program P-state weights to account for frequency power adjustment */
+ for (i = 0; i < 16; i++) {
+ u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
+ unsigned long freq = intel_pxfreq(pxvidfreq);
+ unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+ unsigned long val;
+
+ val = vid * vid;
+ val *= (freq / 1000);
+ val *= 255;
+ val /= (127*127*900);
+ if (val > 0xff)
+ DRM_ERROR("bad pxval: %ld\n", val);
+ pxw[i] = val;
+ }
+ /* Render standby states get 0 weight */
+ pxw[14] = 0;
+ pxw[15] = 0;
+
+ for (i = 0; i < 4; i++) {
+ u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
+ (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
+ I915_WRITE(PXW + (i * 4), val);
+ }
+
+ /* Adjust magic regs to magic values (more experimental results) */
+ I915_WRITE(OGW0, 0);
+ I915_WRITE(OGW1, 0);
+ I915_WRITE(EG0, 0x00007f00);
+ I915_WRITE(EG1, 0x0000000e);
+ I915_WRITE(EG2, 0x000e0000);
+ I915_WRITE(EG3, 0x68000300);
+ I915_WRITE(EG4, 0x42000000);
+ I915_WRITE(EG5, 0x00140031);
+ I915_WRITE(EG6, 0);
+ I915_WRITE(EG7, 0);
+
+ for (i = 0; i < 8; i++)
+ I915_WRITE(PXWL + (i * 4), 0);
+
+ /* Enable PMON + select events */
+ I915_WRITE(ECR, 0x80000019);
+
+ lcfuse = I915_READ(LCFUSE02);
+
+ dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
+}
+
+static int intel_enable_rc6(struct drm_device *dev)
+{
+ /*
+ * Respect the kernel parameter if it is set
+ */
+ if (i915_enable_rc6 >= 0)
+ return i915_enable_rc6;
+
+ /*
+ * Disable RC6 on Ironlake
+ */
+ if (INTEL_INFO(dev)->gen == 5)
+ return 0;
+
+ /*
+ * Enable rc6 on Sandybridge if DMA remapping is disabled
+ */
+ if (INTEL_INFO(dev)->gen == 6) {
+ DRM_DEBUG_DRIVER(
+ "Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
+ intel_iommu_enabled ? "true" : "false",
+ !intel_iommu_enabled ? "en" : "dis");
+ return (intel_iommu_enabled ? 0 : INTEL_RC6_ENABLE);
+ }
+ DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
+ return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
+}
+
+void gen6_enable_rps(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ u32 pcu_mbox, rc6_mask = 0;
+ u32 gtfifodbg;
+ int cur_freq, min_freq, max_freq;
+ int rc6_mode;
+ int i;
+
+ /* Here begins a magic sequence of register writes to enable
+ * auto-downclocking.
+ *
+ * Perhaps there might be some value in exposing these to
+ * userspace...
+ */
+ I915_WRITE(GEN6_RC_STATE, 0);
+ DRM_LOCK(dev);
+
+ /* Clear the DBG now so we don't confuse earlier errors */
+ if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+ DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
+ I915_WRITE(GTFIFODBG, gtfifodbg);
+ }
+
+ gen6_gt_force_wake_get(dev_priv);
+
+ /* disable the counters and set deterministic thresholds */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+ I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ I915_WRITE(RING_MAX_IDLE(dev_priv->rings[i].mmio_base), 10);
+
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+ I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+ I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
+ I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ rc6_mode = intel_enable_rc6(dev_priv->dev);
+ if (rc6_mode & INTEL_RC6_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
+
+ if (rc6_mode & INTEL_RC6p_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+
+ if (rc6_mode & INTEL_RC6pp_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+
+ DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+ (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
+ (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
+ (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
+
+ I915_WRITE(GEN6_RC_CONTROL,
+ rc6_mask |
+ GEN6_RC_CTL_EI_MODE(1) |
+ GEN6_RC_CTL_HW_ENABLE);
+
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(10) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ GEN6_FREQUENCY(12));
+
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ 18 << 24 |
+ 6 << 16);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
+ I915_WRITE(GEN6_RP_UP_EI, 100000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_CONT);
+
+ if (_intel_wait_for(dev,
+ (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
+ 1, "915pr1"))
+ DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+ I915_WRITE(GEN6_PCODE_MAILBOX,
+ GEN6_PCODE_READY |
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+ if (_intel_wait_for(dev,
+ (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
+ 1, "915pr2"))
+ DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+
+ min_freq = (rp_state_cap & 0xff0000) >> 16;
+ max_freq = rp_state_cap & 0xff;
+ cur_freq = (gt_perf_status & 0xff00) >> 8;
+
+ /* Check for overclock support */
+ if (_intel_wait_for(dev,
+ (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
+ 1, "915pr3"))
+ DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
+ pcu_mbox = I915_READ(GEN6_PCODE_DATA);
+ if (_intel_wait_for(dev,
+ (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
+ 1, "915pr4"))
+ DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+ if (pcu_mbox & (1<<31)) { /* OC supported */
+ max_freq = pcu_mbox & 0xff;
+ DRM_DEBUG("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
+ }
+
+ /* In units of 100MHz */
+ dev_priv->max_delay = max_freq;
+ dev_priv->min_delay = min_freq;
+ dev_priv->cur_delay = cur_freq;
+
+ /* requires MSI enabled */
+ I915_WRITE(GEN6_PMIER,
+ GEN6_PM_MBOX_EVENT |
+ GEN6_PM_THERMAL_EVENT |
+ GEN6_PM_RP_DOWN_TIMEOUT |
+ GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_UP_EI_EXPIRED |
+ GEN6_PM_RP_DOWN_EI_EXPIRED);
+ mtx_lock(&dev_priv->rps_lock);
+ if (dev_priv->pm_iir != 0)
+ printf("pm_iir %x\n", dev_priv->pm_iir);
+ I915_WRITE(GEN6_PMIMR, 0);
+ mtx_unlock(&dev_priv->rps_lock);
+ /* enable all PM interrupts */
+ I915_WRITE(GEN6_PMINTRMSK, 0);
+
+ gen6_gt_force_wake_put(dev_priv);
+ DRM_UNLOCK(dev);
+}
+
+void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev;
+ int min_freq = 15;
+ int gpu_freq, ia_freq, max_ia_freq;
+ int scaling_factor = 180;
+ uint64_t tsc_freq;
+
+ dev = dev_priv->dev;
+#if 0
+ max_ia_freq = cpufreq_quick_get_max(0);
+ /*
+ * Default to measured freq if none found, PCU will ensure we don't go
+ * over
+ */
+ if (!max_ia_freq)
+ max_ia_freq = tsc_freq;
+
+ /* Convert from Hz to MHz */
+ max_ia_freq /= 1000;
+#else
+ tsc_freq = atomic_load_acq_64(&tsc_freq);
+ max_ia_freq = tsc_freq / 1000 / 1000;
+#endif
+
+ DRM_LOCK(dev);
+
+ /*
+ * For each potential GPU frequency, load a ring frequency we'd like
+ * to use for memory access. We do this by specifying the IA frequency
+ * the PCU should use as a reference to determine the ring frequency.
+ */
+ for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
+ gpu_freq--) {
+ int diff = dev_priv->max_delay - gpu_freq;
+ int d;
+
+ /*
+ * For GPU frequencies less than 750MHz, just use the lowest
+ * ring freq.
+ */
+ if (gpu_freq < min_freq)
+ ia_freq = 800;
+ else
+ ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
+ d = 100;
+ ia_freq = (ia_freq + d / 2) / d;
+
+ I915_WRITE(GEN6_PCODE_DATA,
+ (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
+ gpu_freq);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+ if (_intel_wait_for(dev,
+ (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 10, 1, "915frq")) {
+ DRM_ERROR("pcode write of freq table timed out\n");
+ continue;
+ }
+ }
+
+ DRM_UNLOCK(dev);
+}
+
+static void ironlake_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ /* Required for FBC */
+ dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
+ DPFCRUNIT_CLOCK_GATE_DISABLE |
+ DPFDUNIT_CLOCK_GATE_DISABLE;
+ /* Required for CxSR */
+ dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_3DCGDIS0,
+ MARIUNIT_CLOCK_GATE_DISABLE |
+ SVSMUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(PCH_3DCGDIS1,
+ VFMUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ /*
+ * According to the spec the following bits should be set in
+ * order to enable memory self-refresh
+ * The bit 22/21 of 0x42004
+ * The bit 5 of 0x42020
+ * The bit 15 of 0x45000
+ */
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ (I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+ I915_WRITE(ILK_DSPCLK_GATE,
+ (I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE));
+ I915_WRITE(DISP_ARB_CTL,
+ (I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS));
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /*
+ * Based on the document from hardware guys the following bits
+ * should be set unconditionally in order to enable FBC.
+ * The bit 22 of 0x42000
+ * The bit 22 of 0x42004
+ * The bit 7,8,9 of 0x42020.
+ */
+ if (IS_IRONLAKE_M(dev)) {
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE);
+ I915_WRITE(ILK_DSPCLK_GATE,
+ I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPFC_DIS1 |
+ ILK_DPFC_DIS2 |
+ ILK_CLK_FBC);
+ }
+
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+ I915_WRITE(_3D_CHICKEN2,
+ _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
+ _3D_CHICKEN2_WM_READ_PIPELINED);
+}
+
+static void gen6_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ I915_WRITE(GEN6_UCGCTL1,
+ I915_READ(GEN6_UCGCTL1) |
+ GEN6_BLBUNIT_CLOCK_GATE_DISABLE);
+
+ /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ * gating disable must be set. Failure to set it results in
+ * flickering pixels due to Z write ordering failures after
+ * some amount of runtime in the Mesa "fire" demo, and Unigine
+ * Sanctuary and Tropics, and apparently anything else with
+ * alpha test or pixel discard.
+ *
+ * According to the spec, bit 11 (RCCUNIT) must also be set,
+ * but we didn't debug actual testcases to find it out.
+ */
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+ /*
+ * According to the spec the following bits should be
+ * set in order to enable memory self-refresh and fbc:
+ * The bit21 and bit22 of 0x42000
+ * The bit21 and bit22 of 0x42004
+ * The bit5 and bit7 of 0x42020
+ * The bit14 of 0x70180
+ * The bit14 of 0x71180
+ */
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL);
+ I915_WRITE(ILK_DSPCLK_GATE,
+ I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE |
+ ILK_DPFD_CLK_GATE);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+}
+
+static void ivybridge_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating workaround.
+ */
+ I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+ I915_WRITE(GEN7_L3CNTLREG1,
+ GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+ GEN7_WA_L3_CHICKEN_MODE);
+
+ /* This is required by WaCatErrorRejectionIssue */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+}
+
+static void g4x_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dspclk_gate;
+
+ I915_WRITE(RENCLK_GATE_D1, 0);
+ I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
+ GS_UNIT_CLOCK_GATE_DISABLE |
+ CL_UNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(RAMCLK_GATE_D, 0);
+ dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
+ OVRUNIT_CLOCK_GATE_DISABLE |
+ OVCUNIT_CLOCK_GATE_DISABLE;
+ if (IS_GM45(dev))
+ dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+}
+
+static void crestline_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
+ I915_WRITE(RENCLK_GATE_D2, 0);
+ I915_WRITE(DSPCLK_GATE_D, 0);
+ I915_WRITE(RAMCLK_GATE_D, 0);
+ I915_WRITE16(DEUC, 0);
+}
+
+static void broadwater_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
+ I965_RCC_CLOCK_GATE_DISABLE |
+ I965_RCPB_CLOCK_GATE_DISABLE |
+ I965_ISC_CLOCK_GATE_DISABLE |
+ I965_FBC_CLOCK_GATE_DISABLE);
+ I915_WRITE(RENCLK_GATE_D2, 0);
+}
+
+static void gen3_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dstate = I915_READ(D_STATE);
+
+ dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
+ DSTATE_DOT_CLOCK_GATING;
+ I915_WRITE(D_STATE, dstate);
+}
+
+static void i85x_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+}
+
+static void i830_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+}
+
+static void ibx_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+}
+
+static void cpt_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+ DPLS_EDP_PPS_FIX_DIS);
+ /* Without this, mode sets may fail silently on FDI */
+ for_each_pipe(pipe)
+ I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
+}
+
+static void ironlake_teardown_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->renderctx) {
+ i915_gem_object_unpin(dev_priv->renderctx);
+ drm_gem_object_unreference(&dev_priv->renderctx->base);
+ dev_priv->renderctx = NULL;
+ }
+
+ if (dev_priv->pwrctx) {
+ i915_gem_object_unpin(dev_priv->pwrctx);
+ drm_gem_object_unreference(&dev_priv->pwrctx->base);
+ dev_priv->pwrctx = NULL;
+ }
+}
+
+static void ironlake_disable_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (I915_READ(PWRCTXA)) {
+ /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
+ _intel_wait_for(dev,
+ ((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
+ 50, 1, "915pro");
+
+ I915_WRITE(PWRCTXA, 0);
+ POSTING_READ(PWRCTXA);
+
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+ POSTING_READ(RSTDBYCTL);
+ }
+
+ ironlake_teardown_rc6(dev);
+}
+
+static int ironlake_setup_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->renderctx == NULL)
+ dev_priv->renderctx = intel_alloc_context_page(dev);
+ if (!dev_priv->renderctx)
+ return -ENOMEM;
+
+ if (dev_priv->pwrctx == NULL)
+ dev_priv->pwrctx = intel_alloc_context_page(dev);
+ if (!dev_priv->pwrctx) {
+ ironlake_teardown_rc6(dev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ironlake_enable_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ /* rc6 disabled by default due to repeated reports of hanging during
+ * boot and resume.
+ */
+ if (!intel_enable_rc6(dev))
+ return;
+
+ DRM_LOCK(dev);
+ ret = ironlake_setup_rc6(dev);
+ if (ret) {
+ DRM_UNLOCK(dev);
+ return;
+ }
+
+ /*
+ * GPU can automatically power down the render unit if given a page
+ * to save state.
+ */
+ ret = BEGIN_LP_RING(6);
+ if (ret) {
+ ironlake_teardown_rc6(dev);
+ DRM_UNLOCK(dev);
+ return;
+ }
+
+ OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
+ OUT_RING(MI_SET_CONTEXT);
+ OUT_RING(dev_priv->renderctx->gtt_offset |
+ MI_MM_SPACE_GTT |
+ MI_SAVE_EXT_STATE_EN |
+ MI_RESTORE_EXT_STATE_EN |
+ MI_RESTORE_INHIBIT);
+ OUT_RING(MI_SUSPEND_FLUSH);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_FLUSH);
+ ADVANCE_LP_RING();
+
+ /*
+ * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
+ * does an implicit flush, combined with MI_FLUSH above, it should be
+ * safe to assume that renderctx is valid
+ */
+ ret = intel_wait_ring_idle(LP_RING(dev_priv));
+ if (ret) {
+ DRM_ERROR("failed to enable ironlake power power savings\n");
+ ironlake_teardown_rc6(dev);
+ DRM_UNLOCK(dev);
+ return;
+ }
+
+ I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+ DRM_UNLOCK(dev);
+}
+
+void intel_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->display.init_clock_gating(dev);
+
+ if (dev_priv->display.init_pch_clock_gating)
+ dev_priv->display.init_pch_clock_gating(dev);
+}
+
+/* Set up chip specific display functions */
+static void intel_init_display(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* We always want a DPMS function */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.dpms = ironlake_crtc_dpms;
+ dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+ dev_priv->display.update_plane = ironlake_update_plane;
+ } else {
+ dev_priv->display.dpms = i9xx_crtc_dpms;
+ dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.update_plane = i9xx_update_plane;
+ }
+
+ if (I915_HAS_FBC(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+ dev_priv->display.enable_fbc = ironlake_enable_fbc;
+ dev_priv->display.disable_fbc = ironlake_disable_fbc;
+ } else if (IS_GM45(dev)) {
+ dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+ dev_priv->display.enable_fbc = g4x_enable_fbc;
+ dev_priv->display.disable_fbc = g4x_disable_fbc;
+ } else if (IS_CRESTLINE(dev)) {
+ dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+ dev_priv->display.enable_fbc = i8xx_enable_fbc;
+ dev_priv->display.disable_fbc = i8xx_disable_fbc;
+ }
+ /* 855GM needs testing */
+ }
+
+ /* Returns the core display clock speed */
+ if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
+ dev_priv->display.get_display_clock_speed =
+ i945_get_display_clock_speed;
+ else if (IS_I915G(dev))
+ dev_priv->display.get_display_clock_speed =
+ i915_get_display_clock_speed;
+ else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
+ dev_priv->display.get_display_clock_speed =
+ i9xx_misc_get_display_clock_speed;
+ else if (IS_I915GM(dev))
+ dev_priv->display.get_display_clock_speed =
+ i915gm_get_display_clock_speed;
+ else if (IS_I865G(dev))
+ dev_priv->display.get_display_clock_speed =
+ i865_get_display_clock_speed;
+ else if (IS_I85X(dev))
+ dev_priv->display.get_display_clock_speed =
+ i855_get_display_clock_speed;
+ else /* 852, 830 */
+ dev_priv->display.get_display_clock_speed =
+ i830_get_display_clock_speed;
+
+ /* For FIFO watermark updates */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
+
+ /* IVB configs may use multi-threaded forcewake */
+ if (IS_IVYBRIDGE(dev)) {
+ u32 ecobus;
+
+ /* A small trick here - if the bios hasn't configured MT forcewake,
+ * and if the device is in RC6, then force_wake_mt_get will not wake
+ * the device and the ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT forcewake being
+ * disabled.
+ */
+ DRM_LOCK(dev);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = I915_READ_NOTRACE(ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ DRM_UNLOCK(dev);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ DRM_DEBUG_KMS("Using MT version of forcewake\n");
+ dev_priv->display.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->display.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ }
+ }
+
+ if (HAS_PCH_IBX(dev))
+ dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
+ else if (HAS_PCH_CPT(dev))
+ dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
+
+ if (IS_GEN5(dev)) {
+ if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+ dev_priv->display.update_wm = ironlake_update_wm;
+ else {
+ DRM_DEBUG_KMS("Failed to get proper latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
+ dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ } else if (IS_GEN6(dev)) {
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.fdi_link_train = gen6_fdi_link_train;
+ dev_priv->display.init_clock_gating = gen6_init_clock_gating;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ } else if (IS_IVYBRIDGE(dev)) {
+ /* FIXME: detect B0+ stepping and use auto training */
+ dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ } else
+ dev_priv->display.update_wm = NULL;
+ } else if (IS_PINEVIEW(dev)) {
+ if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ dev_priv->is_ddr3,
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq)) {
+ DRM_INFO("failed to find known CxSR latency "
+ "(found ddr%s fsb freq %d, mem freq %d), "
+ "disabling CxSR\n",
+ (dev_priv->is_ddr3 == 1) ? "3" : "2",
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ /* Disable CxSR and never update its watermark again */
+ pineview_disable_cxsr(dev);
+ dev_priv->display.update_wm = NULL;
+ } else
+ dev_priv->display.update_wm = pineview_update_wm;
+ dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ } else if (IS_G4X(dev)) {
+ dev_priv->display.write_eld = g4x_write_eld;
+ dev_priv->display.update_wm = g4x_update_wm;
+ dev_priv->display.init_clock_gating = g4x_init_clock_gating;
+ } else if (IS_GEN4(dev)) {
+ dev_priv->display.update_wm = i965_update_wm;
+ if (IS_CRESTLINE(dev))
+ dev_priv->display.init_clock_gating = crestline_init_clock_gating;
+ else if (IS_BROADWATER(dev))
+ dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
+ } else if (IS_GEN3(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+ dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ } else if (IS_I865G(dev)) {
+ dev_priv->display.update_wm = i830_update_wm;
+ dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ dev_priv->display.get_fifo_size = i830_get_fifo_size;
+ } else if (IS_I85X(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i85x_get_fifo_size;
+ dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ } else {
+ dev_priv->display.update_wm = i830_update_wm;
+ dev_priv->display.init_clock_gating = i830_init_clock_gating;
+ if (IS_845G(dev))
+ dev_priv->display.get_fifo_size = i845_get_fifo_size;
+ else
+ dev_priv->display.get_fifo_size = i830_get_fifo_size;
+ }
+
+ /* Default just returns -ENODEV to indicate unsupported */
+ dev_priv->display.queue_flip = intel_default_queue_flip;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 2:
+ dev_priv->display.queue_flip = intel_gen2_queue_flip;
+ break;
+
+ case 3:
+ dev_priv->display.queue_flip = intel_gen3_queue_flip;
+ break;
+
+ case 4:
+ case 5:
+ dev_priv->display.queue_flip = intel_gen4_queue_flip;
+ break;
+
+ case 6:
+ dev_priv->display.queue_flip = intel_gen6_queue_flip;
+ break;
+ case 7:
+ dev_priv->display.queue_flip = intel_gen7_queue_flip;
+ break;
+ }
+}
+
+/*
+ * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
+ * resume, or other times. This quirk makes sure that's the case for
+ * affected systems.
+ */
+static void quirk_pipea_force(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->quirks |= QUIRK_PIPEA_FORCE;
+ DRM_DEBUG("applying pipe a force quirk\n");
+}
+
+/*
+ * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
+ */
+static void quirk_ssc_force_disable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
+}
+
+struct intel_quirk {
+ int device;
+ int subsystem_vendor;
+ int subsystem_device;
+ void (*hook)(struct drm_device *dev);
+};
+
+#define PCI_ANY_ID (~0u)
+
+struct intel_quirk intel_quirks[] = {
+ /* HP Mini needs pipe A force quirk (LP: #322104) */
+ { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
+
+ /* Thinkpad R31 needs pipe A force quirk */
+ { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
+ /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
+ { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
+
+ /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
+ { 0x3577, 0x1014, 0x0513, quirk_pipea_force },
+ /* ThinkPad X40 needs pipe A force quirk */
+
+ /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
+ { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
+
+ /* 855 & before need to leave pipe A & dpll A up */
+ { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+ { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+
+ /* Lenovo U160 cannot use SSC on LVDS */
+ { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
+
+ /* Sony Vaio Y cannot use SSC on LVDS */
+ { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
+};
+
+static void intel_init_quirks(struct drm_device *dev)
+{
+ struct intel_quirk *q;
+ device_t d;
+ int i;
+
+ d = dev->device;
+ for (i = 0; i < DRM_ARRAY_SIZE(intel_quirks); i++) {
+ q = &intel_quirks[i];
+ if (pci_get_device(d) == q->device &&
+ (pci_get_subvendor(d) == q->subsystem_vendor ||
+ q->subsystem_vendor == PCI_ANY_ID) &&
+ (pci_get_subdevice(d) == q->subsystem_device ||
+ q->subsystem_device == PCI_ANY_ID))
+ q->hook(dev);
+ }
+}
+
+/* Disable the VGA plane that we never use */
+static void i915_disable_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u8 sr1;
+ u32 vga_reg;
+
+ if (HAS_PCH_SPLIT(dev))
+ vga_reg = CPU_VGACNTRL;
+ else
+ vga_reg = VGACNTRL;
+
+#if 0
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+#endif
+ outb(VGA_SR_INDEX, 1);
+ sr1 = inb(VGA_SR_DATA);
+ outb(VGA_SR_DATA, sr1 | 1 << 5);
+#if 0
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+#endif
+ DELAY(300);
+
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+ POSTING_READ(vga_reg);
+}
+
+void intel_modeset_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i, ret;
+
+ drm_mode_config_init(dev);
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.prefer_shadow = 1;
+
+ dev->mode_config.funcs = __DECONST(struct drm_mode_config_funcs *,
+ &intel_mode_funcs);
+
+ intel_init_quirks(dev);
+
+ intel_init_display(dev);
+
+ if (IS_GEN2(dev)) {
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ } else if (IS_GEN3(dev)) {
+ dev->mode_config.max_width = 4096;
+ dev->mode_config.max_height = 4096;
+ } else {
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
+ }
+ dev->mode_config.fb_base = dev->agp->base;
+
+ DRM_DEBUG_KMS("%d display pipe%s available.\n",
+ dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
+
+ for (i = 0; i < dev_priv->num_pipe; i++) {
+ intel_crtc_init(dev, i);
+ ret = intel_plane_init(dev, i);
+ if (ret)
+ DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
+ }
+
+ /* Just disable it once at startup */
+ i915_disable_vga(dev);
+ intel_setup_outputs(dev);
+
+ intel_init_clock_gating(dev);
+
+ if (IS_IRONLAKE_M(dev)) {
+ ironlake_enable_drps(dev);
+ intel_init_emon(dev);
+ }
+
+ if (IS_GEN6(dev)) {
+ gen6_enable_rps(dev_priv);
+ gen6_update_ring_freq(dev_priv);
+ }
+
+ TASK_INIT(&dev_priv->idle_task, 0, intel_idle_update, dev_priv);
+ callout_init(&dev_priv->idle_callout, CALLOUT_MPSAFE);
+}
+
+void intel_modeset_gem_init(struct drm_device *dev)
+{
+ if (IS_IRONLAKE_M(dev))
+ ironlake_enable_rc6(dev);
+
+ intel_setup_overlay(dev);
+}
+
+void intel_modeset_cleanup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
+
+ drm_kms_helper_poll_fini(dev);
+ DRM_LOCK(dev);
+
+#if 0
+ intel_unregister_dsm_handler();
+#endif
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ /* Skip inactive CRTCs */
+ if (!crtc->fb)
+ continue;
+
+ intel_crtc = to_intel_crtc(crtc);
+ intel_increase_pllclock(crtc);
+ }
+
+ intel_disable_fbc(dev);
+
+ if (IS_IRONLAKE_M(dev))
+ ironlake_disable_drps(dev);
+ if (IS_GEN6(dev))
+ gen6_disable_rps(dev);
+
+ if (IS_IRONLAKE_M(dev))
+ ironlake_disable_rc6(dev);
+
+ /* Disable the irq before mode object teardown, for the irq might
+ * enqueue unpin/hotplug work. */
+ drm_irq_uninstall(dev);
+ DRM_UNLOCK(dev);
+
+ if (taskqueue_cancel(dev_priv->tq, &dev_priv->hotplug_task, NULL))
+ taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
+ if (taskqueue_cancel(dev_priv->tq, &dev_priv->rps_task, NULL))
+ taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
+
+ /* Shut off idle work before the crtcs get freed. */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ intel_crtc = to_intel_crtc(crtc);
+ callout_drain(&intel_crtc->idle_callout);
+ }
+ callout_drain(&dev_priv->idle_callout);
+ if (taskqueue_cancel(dev_priv->tq, &dev_priv->idle_task, NULL))
+ taskqueue_drain(dev_priv->tq, &dev_priv->idle_task);
+
+ drm_mode_config_cleanup(dev);
+}
+
+/*
+ * Return which encoder is currently attached for connector.
+ */
+struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
+{
+ return &intel_attached_encoder(connector)->base;
+}
+
+void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder)
+{
+ connector->encoder = encoder;
+ drm_mode_connector_attach_encoder(&connector->base,
+ &encoder->base);
+}
+
+/*
+ * set vga decode state - true == enable VGA decode
+ */
+int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
+{
+ struct drm_i915_private *dev_priv;
+ device_t bridge_dev;
+ u16 gmch_ctrl;
+
+ dev_priv = dev->dev_private;
+ bridge_dev = intel_gtt_get_bridge_device();
+ gmch_ctrl = pci_read_config(bridge_dev, INTEL_GMCH_CTRL, 2);
+ if (state)
+ gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+ else
+ gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+ pci_write_config(bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl, 2);
+ return (0);
+}
+
+struct intel_display_error_state {
+ struct intel_cursor_error_state {
+ u32 control;
+ u32 position;
+ u32 base;
+ u32 size;
+ } cursor[2];
+
+ struct intel_pipe_error_state {
+ u32 conf;
+ u32 source;
+
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ } pipe[2];
+
+ struct intel_plane_error_state {
+ u32 control;
+ u32 stride;
+ u32 size;
+ u32 pos;
+ u32 addr;
+ u32 surface;
+ u32 tile_offset;
+ } plane[2];
+};
+
+struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_display_error_state *error;
+ int i;
+
+ error = malloc(sizeof(*error), DRM_MEM_KMS, M_NOWAIT);
+ if (error == NULL)
+ return NULL;
+
+ for (i = 0; i < 2; i++) {
+ error->cursor[i].control = I915_READ(CURCNTR(i));
+ error->cursor[i].position = I915_READ(CURPOS(i));
+ error->cursor[i].base = I915_READ(CURBASE(i));
+
+ error->plane[i].control = I915_READ(DSPCNTR(i));
+ error->plane[i].stride = I915_READ(DSPSTRIDE(i));
+ error->plane[i].size = I915_READ(DSPSIZE(i));
+ error->plane[i].pos = I915_READ(DSPPOS(i));
+ error->plane[i].addr = I915_READ(DSPADDR(i));
+ if (INTEL_INFO(dev)->gen >= 4) {
+ error->plane[i].surface = I915_READ(DSPSURF(i));
+ error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
+ }
+
+ error->pipe[i].conf = I915_READ(PIPECONF(i));
+ error->pipe[i].source = I915_READ(PIPESRC(i));
+ error->pipe[i].htotal = I915_READ(HTOTAL(i));
+ error->pipe[i].hblank = I915_READ(HBLANK(i));
+ error->pipe[i].hsync = I915_READ(HSYNC(i));
+ error->pipe[i].vtotal = I915_READ(VTOTAL(i));
+ error->pipe[i].vblank = I915_READ(VBLANK(i));
+ error->pipe[i].vsync = I915_READ(VSYNC(i));
+ }
+
+ return error;
+}
+
+void
+intel_display_print_error_state(struct sbuf *m,
+ struct drm_device *dev,
+ struct intel_display_error_state *error)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ sbuf_printf(m, "Pipe [%d]:\n", i);
+ sbuf_printf(m, " CONF: %08x\n", error->pipe[i].conf);
+ sbuf_printf(m, " SRC: %08x\n", error->pipe[i].source);
+ sbuf_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
+ sbuf_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
+ sbuf_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
+ sbuf_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
+ sbuf_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
+ sbuf_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
+
+ sbuf_printf(m, "Plane [%d]:\n", i);
+ sbuf_printf(m, " CNTR: %08x\n", error->plane[i].control);
+ sbuf_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
+ sbuf_printf(m, " SIZE: %08x\n", error->plane[i].size);
+ sbuf_printf(m, " POS: %08x\n", error->plane[i].pos);
+ sbuf_printf(m, " ADDR: %08x\n", error->plane[i].addr);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ sbuf_printf(m, " SURF: %08x\n", error->plane[i].surface);
+ sbuf_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
+ }
+
+ sbuf_printf(m, "Cursor [%d]:\n", i);
+ sbuf_printf(m, " CNTR: %08x\n", error->cursor[i].control);
+ sbuf_printf(m, " POS: %08x\n", error->cursor[i].position);
+ sbuf_printf(m, " BASE: %08x\n", error->cursor[i].base);
+ }
+}
diff --git a/sys/dev/drm2/i915/intel_dp.c b/sys/dev/drm2/i915/intel_dp.c
new file mode 100644
index 0000000..4820e45
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_dp.c
@@ -0,0 +1,2562 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/drm_crtc.h>
+#include <dev/drm2/drm_crtc_helper.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/intel_drv.h>
+#include <dev/drm2/drm_dp_helper.h>
+
+#define DP_RECEIVER_CAP_SIZE 0xf
+#define DP_LINK_STATUS_SIZE 6
+#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
+
+#define DP_LINK_CONFIGURATION_SIZE 9
+
+/* XXXKIB what is the right code for the FreeBSD ? */
+#define EREMOTEIO ENXIO
+
+struct intel_dp {
+ struct intel_encoder base;
+ uint32_t output_reg;
+ uint32_t DP;
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ bool has_audio;
+ enum hdmi_force_audio force_audio;
+ uint32_t color_range;
+ int dpms_mode;
+ uint8_t link_bw;
+ uint8_t lane_count;
+ uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
+ device_t dp_iic_bus;
+ device_t adapter;
+ bool is_pch_edp;
+ uint8_t train_set[4];
+ int panel_power_up_delay;
+ int panel_power_down_delay;
+ int panel_power_cycle_delay;
+ int backlight_on_delay;
+ int backlight_off_delay;
+ struct drm_display_mode *panel_fixed_mode; /* for eDP */
+ struct timeout_task panel_vdd_task;
+ bool want_panel_vdd;
+};
+
+/**
+ * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
+ * @intel_dp: DP struct
+ *
+ * If a CPU or PCH DP output is attached to an eDP panel, this function
+ * will return true, and false otherwise.
+ */
+static bool is_edp(struct intel_dp *intel_dp)
+{
+ return intel_dp->base.type == INTEL_OUTPUT_EDP;
+}
+
+/**
+ * is_pch_edp - is the port on the PCH and attached to an eDP panel?
+ * @intel_dp: DP struct
+ *
+ * Returns true if the given DP struct corresponds to a PCH DP port attached
+ * to an eDP panel, false otherwise. Helpful for determining whether we
+ * may need FDI resources for a given DP output or not.
+ */
+static bool is_pch_edp(struct intel_dp *intel_dp)
+{
+ return intel_dp->is_pch_edp;
+}
+
+/**
+ * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
+ * @intel_dp: DP struct
+ *
+ * Returns true if the given DP struct corresponds to a CPU eDP port.
+ */
+static bool is_cpu_edp(struct intel_dp *intel_dp)
+{
+ return is_edp(intel_dp) && !is_pch_edp(intel_dp);
+}
+
+static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_dp, base.base);
+}
+
+static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_dp, base);
+}
+
+/**
+ * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
+ * @encoder: DRM encoder
+ *
+ * Return true if @encoder corresponds to a PCH attached eDP panel. Needed
+ * by intel_display.c.
+ */
+bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
+{
+ struct intel_dp *intel_dp;
+
+ if (!encoder)
+ return false;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ return is_pch_edp(intel_dp);
+}
+
+static void intel_dp_start_link_train(struct intel_dp *intel_dp);
+static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+static void intel_dp_link_down(struct intel_dp *intel_dp);
+
+void
+intel_edp_link_config(struct intel_encoder *intel_encoder,
+ int *lane_num, int *link_bw)
+{
+ struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+
+ *lane_num = intel_dp->lane_count;
+ if (intel_dp->link_bw == DP_LINK_BW_1_62)
+ *link_bw = 162000;
+ else if (intel_dp->link_bw == DP_LINK_BW_2_7)
+ *link_bw = 270000;
+}
+
+static int
+intel_dp_max_lane_count(struct intel_dp *intel_dp)
+{
+ int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
+ switch (max_lane_count) {
+ case 1: case 2: case 4:
+ break;
+ default:
+ max_lane_count = 4;
+ }
+ return max_lane_count;
+}
+
+static int
+intel_dp_max_link_bw(struct intel_dp *intel_dp)
+{
+ int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
+
+ switch (max_link_bw) {
+ case DP_LINK_BW_1_62:
+ case DP_LINK_BW_2_7:
+ break;
+ default:
+ max_link_bw = DP_LINK_BW_1_62;
+ break;
+ }
+ return max_link_bw;
+}
+
+static int
+intel_dp_link_clock(uint8_t link_bw)
+{
+ if (link_bw == DP_LINK_BW_2_7)
+ return 270000;
+ else
+ return 162000;
+}
+
+/*
+ * The units on the numbers in the next two are... bizarre. Examples will
+ * make it clearer; this one parallels an example in the eDP spec.
+ *
+ * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
+ *
+ * 270000 * 1 * 8 / 10 == 216000
+ *
+ * The actual data capacity of that configuration is 2.16Gbit/s, so the
+ * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
+ * or equivalently, kilopixels per second - so for 1680x1050R it'd be
+ * 119000. At 18bpp that's 2142000 kilobits per second.
+ *
+ * Thus the strange-looking division by 10 in intel_dp_link_required, to
+ * get the result in decakilobits instead of kilobits.
+ */
+
+static int
+intel_dp_link_required(int pixel_clock, int bpp)
+{
+ return (pixel_clock * bpp + 9) / 10;
+}
+
+static int
+intel_dp_max_data_rate(int max_link_clock, int max_lanes)
+{
+ return (max_link_clock * max_lanes * 8) / 10;
+}
+
+static bool
+intel_dp_adjust_dithering(struct intel_dp *intel_dp,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
+ int max_lanes = intel_dp_max_lane_count(intel_dp);
+ int max_rate, mode_rate;
+
+ mode_rate = intel_dp_link_required(mode->clock, 24);
+ max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+
+ if (mode_rate > max_rate) {
+ mode_rate = intel_dp_link_required(mode->clock, 18);
+ if (mode_rate > max_rate)
+ return false;
+
+ if (adjusted_mode)
+ adjusted_mode->private_flags
+ |= INTEL_MODE_DP_FORCE_6BPC;
+
+ return true;
+ }
+
+ return true;
+}
+
+static int
+intel_dp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+
+ if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
+ if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+ if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ if (!intel_dp_adjust_dithering(intel_dp, mode, NULL))
+ return MODE_CLOCK_HIGH;
+
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+
+ return MODE_OK;
+}
+
+static uint32_t
+pack_aux(uint8_t *src, int src_bytes)
+{
+ int i;
+ uint32_t v = 0;
+
+ if (src_bytes > 4)
+ src_bytes = 4;
+ for (i = 0; i < src_bytes; i++)
+ v |= ((uint32_t) src[i]) << ((3-i) * 8);
+ return v;
+}
+
+static void
+unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+{
+ int i;
+ if (dst_bytes > 4)
+ dst_bytes = 4;
+ for (i = 0; i < dst_bytes; i++)
+ dst[i] = src >> ((3-i) * 8);
+}
+
+/* hrawclock is 1/4 the FSB frequency */
+static int
+intel_hrawclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t clkcfg;
+
+ clkcfg = I915_READ(CLKCFG);
+ switch (clkcfg & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_400:
+ return 100;
+ case CLKCFG_FSB_533:
+ return 133;
+ case CLKCFG_FSB_667:
+ return 166;
+ case CLKCFG_FSB_800:
+ return 200;
+ case CLKCFG_FSB_1067:
+ return 266;
+ case CLKCFG_FSB_1333:
+ return 333;
+ /* these two are just a guess; one of them might be right */
+ case CLKCFG_FSB_1600:
+ case CLKCFG_FSB_1600_ALT:
+ return 400;
+ default:
+ return 133;
+ }
+}
+
+static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
+}
+
+static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
+}
+
+static void
+intel_dp_check_edp(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!is_edp(intel_dp))
+ return;
+ if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
+ printf("eDP powered off while attempting aux channel communication.\n");
+ DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
+ I915_READ(PCH_PP_STATUS),
+ I915_READ(PCH_PP_CONTROL));
+ }
+}
+
+static int
+intel_dp_aux_ch(struct intel_dp *intel_dp,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_size)
+{
+ uint32_t output_reg = intel_dp->output_reg;
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t ch_ctl = output_reg + 0x10;
+ uint32_t ch_data = ch_ctl + 4;
+ int i;
+ int recv_bytes;
+ uint32_t status;
+ uint32_t aux_clock_divider;
+ int try, precharge = 5;
+
+ intel_dp_check_edp(intel_dp);
+ /* The clock divider is based off the hrawclk,
+ * and would like to run at 2MHz. So, take the
+ * hrawclk value and divide by 2 and use that
+ *
+ * Note that PCH attached eDP panels should use a 125MHz input
+ * clock divider.
+ */
+ if (is_cpu_edp(intel_dp)) {
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
+ else
+ aux_clock_divider = 225; /* eDP input clock at 450Mhz */
+ } else if (HAS_PCH_SPLIT(dev))
+ aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
+ else
+ aux_clock_divider = intel_hrawclk(dev) / 2;
+
+ /* Try to wait for any previous AUX channel activity */
+ for (try = 0; try < 3; try++) {
+ status = I915_READ(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ drm_msleep(1, "915ach");
+ }
+
+ if (try == 3) {
+ printf("dp_aux_ch not started status 0x%08x\n",
+ I915_READ(ch_ctl));
+ return -EBUSY;
+ }
+
+ /* Must try at least 3 times according to DP spec */
+ for (try = 0; try < 5; try++) {
+ /* Load the send data into the aux channel data registers */
+ for (i = 0; i < send_bytes; i += 4)
+ I915_WRITE(ch_data + i,
+ pack_aux(send + i, send_bytes - i));
+
+ /* Send the command and wait for it to complete */
+ I915_WRITE(ch_ctl,
+ DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+ for (;;) {
+ status = I915_READ(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ DELAY(100);
+ }
+
+ /* Clear done status and any errors */
+ I915_WRITE(ch_ctl,
+ status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR))
+ continue;
+ if (status & DP_AUX_CH_CTL_DONE)
+ break;
+ }
+
+ if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+ DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
+ return -EBUSY;
+ }
+
+ /* Check for timeout or receive error.
+ * Timeouts occur when the sink is not connected
+ */
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
+ return -EIO;
+ }
+
+ /* Timeouts occur when the device isn't connected, so they're
+ * "normal" -- don't fill the kernel log with these */
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+ DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
+ return -ETIMEDOUT;
+ }
+
+ /* Unload any bytes sent back from the other side */
+ recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+ DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+ if (recv_bytes > recv_size)
+ recv_bytes = recv_size;
+
+ for (i = 0; i < recv_bytes; i += 4)
+ unpack_aux(I915_READ(ch_data + i),
+ recv + i, recv_bytes - i);
+
+ return recv_bytes;
+}
+
+/* Write data to the aux channel in native mode */
+static int
+intel_dp_aux_native_write(struct intel_dp *intel_dp,
+ uint16_t address, uint8_t *send, int send_bytes)
+{
+ int ret;
+ uint8_t msg[20];
+ int msg_bytes;
+ uint8_t ack;
+
+ intel_dp_check_edp(intel_dp);
+ if (send_bytes > 16)
+ return -1;
+ msg[0] = AUX_NATIVE_WRITE << 4;
+ msg[1] = address >> 8;
+ msg[2] = address & 0xff;
+ msg[3] = send_bytes - 1;
+ memcpy(&msg[4], send, send_bytes);
+ msg_bytes = send_bytes + 4;
+ for (;;) {
+ ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
+ if (ret < 0)
+ return ret;
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+ break;
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ DELAY(100);
+ else
+ return -EIO;
+ }
+ return send_bytes;
+}
+
+/* Write a single byte to the aux channel in native mode */
+static int
+intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
+ uint16_t address, uint8_t byte)
+{
+ return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
+}
+
+/* read bytes from a native aux channel */
+static int
+intel_dp_aux_native_read(struct intel_dp *intel_dp,
+ uint16_t address, uint8_t *recv, int recv_bytes)
+{
+ uint8_t msg[4];
+ int msg_bytes;
+ uint8_t reply[20];
+ int reply_bytes;
+ uint8_t ack;
+ int ret;
+
+ intel_dp_check_edp(intel_dp);
+ msg[0] = AUX_NATIVE_READ << 4;
+ msg[1] = address >> 8;
+ msg[2] = address & 0xff;
+ msg[3] = recv_bytes - 1;
+
+ msg_bytes = 4;
+ reply_bytes = recv_bytes + 1;
+
+ for (;;) {
+ ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret == 0)
+ return -EPROTO;
+ if (ret < 0)
+ return ret;
+ ack = reply[0];
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+ memcpy(recv, reply + 1, ret - 1);
+ return ret - 1;
+ }
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ DELAY(100);
+ else
+ return -EIO;
+ }
+}
+
+static int
+intel_dp_i2c_aux_ch(device_t idev, int mode, uint8_t write_byte,
+ uint8_t *read_byte)
+{
+ struct iic_dp_aux_data *data;
+ struct intel_dp *intel_dp;
+ uint16_t address;
+ uint8_t msg[5];
+ uint8_t reply[2];
+ unsigned retry;
+ int msg_bytes;
+ int reply_bytes;
+ int ret;
+
+ data = device_get_softc(idev);
+ intel_dp = data->priv;
+ address = data->address;
+
+ intel_dp_check_edp(intel_dp);
+ /* Set up the command byte */
+ if (mode & MODE_I2C_READ)
+ msg[0] = AUX_I2C_READ << 4;
+ else
+ msg[0] = AUX_I2C_WRITE << 4;
+
+ if (!(mode & MODE_I2C_STOP))
+ msg[0] |= AUX_I2C_MOT << 4;
+
+ msg[1] = address >> 8;
+ msg[2] = address;
+
+ switch (mode) {
+ case MODE_I2C_WRITE:
+ msg[3] = 0;
+ msg[4] = write_byte;
+ msg_bytes = 5;
+ reply_bytes = 1;
+ break;
+ case MODE_I2C_READ:
+ msg[3] = 0;
+ msg_bytes = 4;
+ reply_bytes = 2;
+ break;
+ default:
+ msg_bytes = 3;
+ reply_bytes = 1;
+ break;
+ }
+
+ for (retry = 0; retry < 5; retry++) {
+ ret = intel_dp_aux_ch(intel_dp,
+ msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
+ return (-ret);
+ }
+
+ switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
+ case AUX_NATIVE_REPLY_ACK:
+ /* I2C-over-AUX Reply field is only valid
+ * when paired with AUX ACK.
+ */
+ break;
+ case AUX_NATIVE_REPLY_NACK:
+ DRM_DEBUG_KMS("aux_ch native nack\n");
+ return (EREMOTEIO);
+ case AUX_NATIVE_REPLY_DEFER:
+ DELAY(100);
+ continue;
+ default:
+ DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
+ reply[0]);
+ return (EREMOTEIO);
+ }
+
+ switch (reply[0] & AUX_I2C_REPLY_MASK) {
+ case AUX_I2C_REPLY_ACK:
+ if (mode == MODE_I2C_READ) {
+ *read_byte = reply[1];
+ }
+ return (0/*reply_bytes - 1*/);
+ case AUX_I2C_REPLY_NACK:
+ DRM_DEBUG_KMS("aux_i2c nack\n");
+ return (EREMOTEIO);
+ case AUX_I2C_REPLY_DEFER:
+ DRM_DEBUG_KMS("aux_i2c defer\n");
+ DELAY(100);
+ break;
+ default:
+ DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
+ return (EREMOTEIO);
+ }
+ }
+
+ DRM_ERROR("too many retries, giving up\n");
+ return (EREMOTEIO);
+}
+
+static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
+static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
+
+static int
+intel_dp_i2c_init(struct intel_dp *intel_dp,
+ struct intel_connector *intel_connector, const char *name)
+{
+ int ret;
+
+ DRM_DEBUG_KMS("i2c_init %s\n", name);
+
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ret = iic_dp_aux_add_bus(intel_connector->base.dev->device, name,
+ intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
+ &intel_dp->adapter);
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+ return (ret);
+}
+
+static bool
+intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ int lane_count, clock;
+ int max_lane_count = intel_dp_max_lane_count(intel_dp);
+ int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+ int bpp;
+ static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+
+ if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
+ intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
+ intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
+ mode, adjusted_mode);
+ /*
+ * the mode->clock is used to calculate the Data&Link M/N
+ * of the pipe. For the eDP the fixed clock should be used.
+ */
+ mode->clock = intel_dp->panel_fixed_mode->clock;
+ }
+
+ if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode))
+ return false;
+
+ bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+
+ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ for (clock = 0; clock <= max_clock; clock++) {
+ int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
+
+ if (intel_dp_link_required(mode->clock, bpp)
+ <= link_avail) {
+ intel_dp->link_bw = bws[clock];
+ intel_dp->lane_count = lane_count;
+ adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
+ DRM_DEBUG_KMS("Display port link bw %02x lane "
+ "count %d clock %d\n",
+ intel_dp->link_bw, intel_dp->lane_count,
+ adjusted_mode->clock);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+struct intel_dp_m_n {
+ uint32_t tu;
+ uint32_t gmch_m;
+ uint32_t gmch_n;
+ uint32_t link_m;
+ uint32_t link_n;
+};
+
+static void
+intel_reduce_ratio(uint32_t *num, uint32_t *den)
+{
+ while (*num > 0xffffff || *den > 0xffffff) {
+ *num >>= 1;
+ *den >>= 1;
+ }
+}
+
+static void
+intel_dp_compute_m_n(int bpp,
+ int nlanes,
+ int pixel_clock,
+ int link_clock,
+ struct intel_dp_m_n *m_n)
+{
+ m_n->tu = 64;
+ m_n->gmch_m = (pixel_clock * bpp) >> 3;
+ m_n->gmch_n = link_clock * nlanes;
+ intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
+ m_n->link_m = pixel_clock;
+ m_n->link_n = link_clock;
+ intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
+}
+
+void
+intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_encoder *encoder;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int lane_count = 4;
+ struct intel_dp_m_n m_n;
+ int pipe = intel_crtc->pipe;
+
+ /*
+ * Find the lane count in the intel_encoder private
+ */
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct intel_dp *intel_dp;
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ intel_dp = enc_to_intel_dp(encoder);
+ if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_dp->base.type == INTEL_OUTPUT_EDP)
+ {
+ lane_count = intel_dp->lane_count;
+ break;
+ }
+ }
+
+ /*
+ * Compute the GMCH and Link ratios. The '3' here is
+ * the number of bytes_per_pixel post-LUT, which we always
+ * set up for 8-bits of R/G/B, or 3 bytes total.
+ */
+ intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
+ mode->clock, adjusted_mode->clock, &m_n);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(TRANSDATA_M1(pipe),
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
+ I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
+ I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
+ I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
+ } else {
+ I915_WRITE(PIPE_GMCH_DATA_M(pipe),
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
+ I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
+ I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
+ I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
+ }
+}
+
+static void ironlake_edp_pll_on(struct drm_encoder *encoder);
+static void ironlake_edp_pll_off(struct drm_encoder *encoder);
+
+static void
+intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /* Turn on the eDP PLL if needed */
+ if (is_edp(intel_dp)) {
+ if (!is_pch_edp(intel_dp))
+ ironlake_edp_pll_on(encoder);
+ else
+ ironlake_edp_pll_off(encoder);
+ }
+
+ /*
+ * There are four kinds of DP registers:
+ *
+ * IBX PCH
+ * SNB CPU
+ * IVB CPU
+ * CPT PCH
+ *
+ * IBX PCH and CPU are the same for almost everything,
+ * except that the CPU DP PLL is configured in this
+ * register
+ *
+ * CPT PCH is quite different, having many bits moved
+ * to the TRANS_DP_CTL register instead. That
+ * configuration happens (oddly) in ironlake_pch_enable
+ */
+
+ /* Preserve the BIOS-computed detected bit. This is
+ * supposed to be read-only.
+ */
+ intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+ intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+
+ /* Handle DP bits in common between all three register formats */
+
+ intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+
+ switch (intel_dp->lane_count) {
+ case 1:
+ intel_dp->DP |= DP_PORT_WIDTH_1;
+ break;
+ case 2:
+ intel_dp->DP |= DP_PORT_WIDTH_2;
+ break;
+ case 4:
+ intel_dp->DP |= DP_PORT_WIDTH_4;
+ break;
+ }
+ if (intel_dp->has_audio) {
+ DRM_DEBUG_KMS("Enabling DP audio on pipe %c\n",
+ pipe_name(intel_crtc->pipe));
+ intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+ intel_write_eld(encoder, adjusted_mode);
+ }
+ memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ intel_dp->link_configuration[0] = intel_dp->link_bw;
+ intel_dp->link_configuration[1] = intel_dp->lane_count;
+ /*
+ * Check for DPCD version > 1.1 and enhanced framing support
+ */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
+ intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ }
+
+ /* Split out the IBX/CPU vs CPT settings */
+
+ if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ intel_dp->DP |= intel_crtc->pipe << 29;
+
+ /* don't miss out required setting for eDP */
+ intel_dp->DP |= DP_PLL_ENABLE;
+ if (adjusted_mode->clock < 200000)
+ intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+ else
+ intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+ intel_dp->DP |= intel_dp->color_range;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF;
+
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ if (intel_crtc->pipe == 1)
+ intel_dp->DP |= DP_PIPEB_SELECT;
+
+ if (is_cpu_edp(intel_dp)) {
+ /* don't miss out required setting for eDP */
+ intel_dp->DP |= DP_PLL_ENABLE;
+ if (adjusted_mode->clock < 200000)
+ intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+ else
+ intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ }
+ } else {
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+ }
+}
+
+#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
+#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
+
+#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
+#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+
+#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
+#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+
+static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
+ u32 mask,
+ u32 value)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
+ mask, value,
+ I915_READ(PCH_PP_STATUS),
+ I915_READ(PCH_PP_CONTROL));
+
+ if (_intel_wait_for(dev,
+ (I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10, "915iwp")) {
+ DRM_ERROR("Panel status timeout: status %08x control %08x\n",
+ I915_READ(PCH_PP_STATUS),
+ I915_READ(PCH_PP_CONTROL));
+ }
+}
+
+static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
+{
+ DRM_DEBUG_KMS("Wait for panel power on\n");
+ ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
+}
+
+static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
+{
+ DRM_DEBUG_KMS("Wait for panel power off time\n");
+ ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
+}
+
+static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
+{
+ DRM_DEBUG_KMS("Wait for panel power cycle\n");
+ ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+}
+
+
+/* Read the current pp_control value, unlocking the register if it
+ * is locked
+ */
+
+static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
+{
+ u32 control = I915_READ(PCH_PP_CONTROL);
+
+ control &= ~PANEL_UNLOCK_MASK;
+ control |= PANEL_UNLOCK_REGS;
+ return control;
+}
+
+static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+
+ if (!is_edp(intel_dp))
+ return;
+ DRM_DEBUG_KMS("Turn eDP VDD on\n");
+
+ if (intel_dp->want_panel_vdd)
+ printf("eDP VDD already requested on\n");
+
+ intel_dp->want_panel_vdd = true;
+
+ if (ironlake_edp_have_panel_vdd(intel_dp)) {
+ DRM_DEBUG_KMS("eDP VDD already on\n");
+ return;
+ }
+
+ if (!ironlake_edp_have_panel_power(intel_dp))
+ ironlake_wait_panel_power_cycle(intel_dp);
+
+ pp = ironlake_get_pp_control(dev_priv);
+ pp |= EDP_FORCE_VDD;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+ DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
+ I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
+
+ /*
+ * If the panel wasn't on, delay before accessing aux channel
+ */
+ if (!ironlake_edp_have_panel_power(intel_dp)) {
+ DRM_DEBUG_KMS("eDP was not running\n");
+ drm_msleep(intel_dp->panel_power_up_delay, "915edpon");
+ }
+}
+
+static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+
+ if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
+ pp = ironlake_get_pp_control(dev_priv);
+ pp &= ~EDP_FORCE_VDD;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
+ /* Make sure sequencer is idle before allowing subsequent activity */
+ DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
+ I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
+
+ drm_msleep(intel_dp->panel_power_down_delay, "915vddo");
+ }
+}
+
+static void ironlake_panel_vdd_work(void *arg, int pending __unused)
+{
+ struct intel_dp *intel_dp = arg;
+ struct drm_device *dev = intel_dp->base.base.dev;
+
+ sx_xlock(&dev->mode_config.mutex);
+ ironlake_panel_vdd_off_sync(intel_dp);
+ sx_xunlock(&dev->mode_config.mutex);
+}
+
+static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
+{
+ if (!is_edp(intel_dp))
+ return;
+
+ DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
+ if (!intel_dp->want_panel_vdd)
+ printf("eDP VDD not forced on\n");
+
+ intel_dp->want_panel_vdd = false;
+
+ if (sync) {
+ ironlake_panel_vdd_off_sync(intel_dp);
+ } else {
+ /*
+ * Queue the timer to fire a long
+ * time from now (relative to the power down delay)
+ * to keep the panel power up across a sequence of operations
+ */
+ struct drm_i915_private *dev_priv = intel_dp->base.base.dev->dev_private;
+ taskqueue_enqueue_timeout(dev_priv->tq,
+ &intel_dp->panel_vdd_task,
+ msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
+ }
+}
+
+static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+
+ if (!is_edp(intel_dp))
+ return;
+
+ DRM_DEBUG_KMS("Turn eDP power on\n");
+
+ if (ironlake_edp_have_panel_power(intel_dp)) {
+ DRM_DEBUG_KMS("eDP power already on\n");
+ return;
+ }
+
+ ironlake_wait_panel_power_cycle(intel_dp);
+
+ pp = ironlake_get_pp_control(dev_priv);
+ if (IS_GEN5(dev)) {
+ /* ILK workaround: disable reset around power sequence */
+ pp &= ~PANEL_POWER_RESET;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+ }
+
+ pp |= POWER_TARGET_ON;
+ if (!IS_GEN5(dev))
+ pp |= PANEL_POWER_RESET;
+
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
+ ironlake_wait_panel_on(intel_dp);
+
+ if (IS_GEN5(dev)) {
+ pp |= PANEL_POWER_RESET; /* restore panel reset bit */
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+ }
+}
+
+static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+
+ if (!is_edp(intel_dp))
+ return;
+
+ DRM_DEBUG_KMS("Turn eDP power off\n");
+
+ if (intel_dp->want_panel_vdd)
+ printf("Cannot turn power off while VDD is on\n");
+
+ pp = ironlake_get_pp_control(dev_priv);
+ pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
+ ironlake_wait_panel_off(intel_dp);
+}
+
+static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+
+ if (!is_edp(intel_dp))
+ return;
+
+ DRM_DEBUG_KMS("\n");
+ /*
+ * If we enable the backlight right away following a panel power
+ * on, we may see slight flicker as the panel syncs with the eDP
+ * link. So delay a bit to make sure the image is solid before
+ * allowing it to appear.
+ */
+ drm_msleep(intel_dp->backlight_on_delay, "915ebo");
+ pp = ironlake_get_pp_control(dev_priv);
+ pp |= EDP_BLC_ENABLE;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+}
+
+static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+
+ if (!is_edp(intel_dp))
+ return;
+
+ DRM_DEBUG_KMS("\n");
+ pp = ironlake_get_pp_control(dev_priv);
+ pp &= ~EDP_BLC_ENABLE;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+ drm_msleep(intel_dp->backlight_off_delay, "915bo1");
+}
+
+static void ironlake_edp_pll_on(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ DRM_DEBUG_KMS("\n");
+ dpa_ctl = I915_READ(DP_A);
+ dpa_ctl |= DP_PLL_ENABLE;
+ I915_WRITE(DP_A, dpa_ctl);
+ POSTING_READ(DP_A);
+ DELAY(200);
+}
+
+static void ironlake_edp_pll_off(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ dpa_ctl = I915_READ(DP_A);
+ dpa_ctl &= ~DP_PLL_ENABLE;
+ I915_WRITE(DP_A, dpa_ctl);
+ POSTING_READ(DP_A);
+ DELAY(200);
+}
+
+/* If the sink supports it, try to set the power state appropriately */
+static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
+{
+ int ret, i;
+
+ /* Should have a valid DPCD by this point */
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
+ DP_SET_POWER_D3);
+ if (ret != 1)
+ DRM_DEBUG("failed to write sink power state\n");
+ } else {
+ /*
+ * When turning on, we need to retry for 1ms to give the sink
+ * time to wake up.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = intel_dp_aux_native_write_1(intel_dp,
+ DP_SET_POWER,
+ DP_SET_POWER_D0);
+ if (ret == 1)
+ break;
+ drm_msleep(1, "915dps");
+ }
+ }
+}
+
+static void intel_dp_prepare(struct drm_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ ironlake_edp_backlight_off(intel_dp);
+ ironlake_edp_panel_off(intel_dp);
+
+ /* Wake up the sink first */
+ ironlake_edp_panel_vdd_on(intel_dp);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_link_down(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+
+ /* Make sure the panel is off before trying to
+ * change the mode
+ */
+}
+
+static void intel_dp_commit(struct drm_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
+
+ ironlake_edp_panel_vdd_on(intel_dp);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_start_link_train(intel_dp);
+ ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, true);
+ intel_dp_complete_link_train(intel_dp);
+ ironlake_edp_backlight_on(intel_dp);
+
+ intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
+
+ if (HAS_PCH_CPT(dev))
+ intel_cpt_verify_modeset(dev, intel_crtc->pipe);
+}
+
+static void
+intel_dp_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ ironlake_edp_backlight_off(intel_dp);
+ ironlake_edp_panel_off(intel_dp);
+
+ ironlake_edp_panel_vdd_on(intel_dp);
+ intel_dp_sink_dpms(intel_dp, mode);
+ intel_dp_link_down(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+
+ if (is_cpu_edp(intel_dp))
+ ironlake_edp_pll_off(encoder);
+ } else {
+ if (is_cpu_edp(intel_dp))
+ ironlake_edp_pll_on(encoder);
+
+ ironlake_edp_panel_vdd_on(intel_dp);
+ intel_dp_sink_dpms(intel_dp, mode);
+ if (!(dp_reg & DP_PORT_EN)) {
+ intel_dp_start_link_train(intel_dp);
+ ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, true);
+ intel_dp_complete_link_train(intel_dp);
+ } else
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+ ironlake_edp_backlight_on(intel_dp);
+ }
+ intel_dp->dpms_mode = mode;
+}
+/*
+ * Native read with retry for link status and receiver capability reads for
+ * cases where the sink may still be asleep.
+ */
+static bool
+intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
+ uint8_t *recv, int recv_bytes)
+{
+ int ret, i;
+
+ /*
+ * Sinks are *supposed* to come up within 1ms from an off state,
+ * but we're also supposed to retry 3 times per the spec.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = intel_dp_aux_native_read(intel_dp, address, recv,
+ recv_bytes);
+ if (ret == recv_bytes)
+ return true;
+ drm_msleep(1, "915dpl");
+ }
+
+ return false;
+}
+
+/*
+ * Fetch AUX CH registers 0x202 - 0x207 which contain
+ * link status information
+ */
+static bool
+intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+ return intel_dp_aux_native_read_retry(intel_dp,
+ DP_LANE0_1_STATUS,
+ link_status,
+ DP_LINK_STATUS_SIZE);
+}
+
+static uint8_t
+intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static uint8_t
+intel_get_adjust_request_voltage(uint8_t adjust_request[2],
+ int lane)
+{
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ uint8_t l = adjust_request[lane>>1];
+
+ return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+
+static uint8_t
+intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
+ int lane)
+{
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ uint8_t l = adjust_request[lane>>1];
+
+ return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+
+
+#if 0
+static char *voltage_names[] = {
+ "0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char *pre_emph_names[] = {
+ "0dB", "3.5dB", "6dB", "9.5dB"
+};
+static char *link_train_names[] = {
+ "pattern 1", "pattern 2", "idle", "off"
+};
+#endif
+
+/*
+ * These are source-specific values; current Intel hardware supports
+ * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
+ */
+
+static uint8_t
+intel_dp_voltage_max(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
+ return DP_TRAIN_VOLTAGE_SWING_800;
+ else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ return DP_TRAIN_VOLTAGE_SWING_1200;
+ else
+ return DP_TRAIN_VOLTAGE_SWING_800;
+}
+
+static uint8_t
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ }
+}
+
+static void
+intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+ uint8_t v = 0;
+ uint8_t p = 0;
+ int lane;
+ uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
+ uint8_t voltage_max;
+ uint8_t preemph_max;
+
+ for (lane = 0; lane < intel_dp->lane_count; lane++) {
+ uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
+ uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
+
+ if (this_v > v)
+ v = this_v;
+ if (this_p > p)
+ p = this_p;
+ }
+
+ voltage_max = intel_dp_voltage_max(intel_dp);
+ if (v >= voltage_max)
+ v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
+
+ preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
+ if (p >= preemph_max)
+ p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ for (lane = 0; lane < 4; lane++)
+ intel_dp->train_set[lane] = v | p;
+}
+
+static uint32_t
+intel_dp_signal_levels(uint8_t train_set)
+{
+ uint32_t signal_levels = 0;
+
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ default:
+ signal_levels |= DP_VOLTAGE_0_4;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ signal_levels |= DP_VOLTAGE_0_6;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ signal_levels |= DP_VOLTAGE_0_8;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ signal_levels |= DP_VOLTAGE_1_2;
+ break;
+ }
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPHASIS_0:
+ default:
+ signal_levels |= DP_PRE_EMPHASIS_0;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_3_5:
+ signal_levels |= DP_PRE_EMPHASIS_3_5;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_6:
+ signal_levels |= DP_PRE_EMPHASIS_6;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_9_5:
+ signal_levels |= DP_PRE_EMPHASIS_9_5;
+ break;
+ }
+ return signal_levels;
+}
+
+/* Gen6's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen6_edp_signal_levels(uint8_t train_set)
+{
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
+ }
+}
+
+/* Gen7's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen7_edp_signal_levels(uint8_t train_set)
+{
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_600MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
+
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return EDP_LINK_TRAIN_500MV_0DB_IVB;
+ }
+}
+
+static uint8_t
+intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int s = (lane & 1) * 4;
+ uint8_t l = link_status[lane>>1];
+
+ return (l >> s) & 0xf;
+}
+
+/* Check for clock recovery is done on all channels */
+static bool
+intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+{
+ int lane;
+ uint8_t lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = intel_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+
+/* Check to see if channel eq is done on all channels */
+#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
+ DP_LANE_CHANNEL_EQ_DONE|\
+ DP_LANE_SYMBOL_LOCKED)
+static bool
+intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+ uint8_t lane_align;
+ uint8_t lane_status;
+ int lane;
+
+ lane_align = intel_dp_link_status(link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < intel_dp->lane_count; lane++) {
+ lane_status = intel_get_lane_status(link_status, lane);
+ if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
+ return false;
+ }
+ return true;
+}
+
+static bool
+intel_dp_set_link_train(struct intel_dp *intel_dp,
+ uint32_t dp_reg_value,
+ uint8_t dp_train_pat)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ I915_WRITE(intel_dp->output_reg, dp_reg_value);
+ POSTING_READ(intel_dp->output_reg);
+
+ intel_dp_aux_native_write_1(intel_dp,
+ DP_TRAINING_PATTERN_SET,
+ dp_train_pat);
+
+ ret = intel_dp_aux_native_write(intel_dp,
+ DP_TRAINING_LANE0_SET,
+ intel_dp->train_set,
+ intel_dp->lane_count);
+ if (ret != intel_dp->lane_count)
+ return false;
+
+ return true;
+}
+
+/* Enable corresponding port and start training pattern 1 */
+static void
+intel_dp_start_link_train(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
+ int i;
+ uint8_t voltage;
+ bool clock_recovery = false;
+ int voltage_tries, loop_tries;
+ u32 reg;
+ uint32_t DP = intel_dp->DP;
+
+ /* Enable output, wait for it to become active */
+ I915_WRITE(intel_dp->output_reg, intel_dp->DP);
+ POSTING_READ(intel_dp->output_reg);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ /* Write the link configuration data */
+ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
+ intel_dp->link_configuration,
+ DP_LINK_CONFIGURATION_SIZE);
+
+ DP |= DP_PORT_EN;
+
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
+ DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ else
+ DP &= ~DP_LINK_TRAIN_MASK;
+ memset(intel_dp->train_set, 0, 4);
+ voltage = 0xff;
+ voltage_tries = 0;
+ loop_tries = 0;
+ clock_recovery = false;
+ for (;;) {
+ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+ uint32_t signal_levels;
+
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+ } else {
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
+ DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ }
+
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
+ reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_PAT_1;
+
+ if (!intel_dp_set_link_train(intel_dp, reg,
+ DP_TRAINING_PATTERN_1))
+ break;
+ /* Set training pattern 1 */
+
+ DELAY(100);
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ DRM_ERROR("failed to get link status\n");
+ break;
+ }
+
+ if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ DRM_DEBUG_KMS("clock recovery OK\n");
+ clock_recovery = true;
+ break;
+ }
+
+ /* Check to see if we've tried the max voltage */
+ for (i = 0; i < intel_dp->lane_count; i++)
+ if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ break;
+ if (i == intel_dp->lane_count) {
+ ++loop_tries;
+ if (loop_tries == 5) {
+ DRM_DEBUG_KMS("too many full retries, give up\n");
+ break;
+ }
+ memset(intel_dp->train_set, 0, 4);
+ voltage_tries = 0;
+ continue;
+ }
+
+ /* Check to see if we've tried the same voltage 5 times */
+ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ ++voltage_tries;
+ if (voltage_tries == 5) {
+ DRM_DEBUG_KMS("too many voltage retries, give up\n");
+ break;
+ }
+ } else
+ voltage_tries = 0;
+ voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+ /* Compute new intel_dp->train_set as requested by target */
+ intel_get_adjust_train(intel_dp, link_status);
+ }
+
+ intel_dp->DP = DP;
+}
+
+static void
+intel_dp_complete_link_train(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool channel_eq = false;
+ int tries, cr_tries;
+ u32 reg;
+ uint32_t DP = intel_dp->DP;
+
+ /* channel equalization */
+ tries = 0;
+ cr_tries = 0;
+ channel_eq = false;
+ for (;;) {
+ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+ uint32_t signal_levels;
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+
+ if (cr_tries > 5) {
+ DRM_ERROR("failed to train DP, aborting\n");
+ intel_dp_link_down(intel_dp);
+ break;
+ }
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+ } else {
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ }
+
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
+ reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_PAT_2;
+
+ /* channel eq pattern */
+ if (!intel_dp_set_link_train(intel_dp, reg,
+ DP_TRAINING_PATTERN_2))
+ break;
+
+ DELAY(400);
+ if (!intel_dp_get_link_status(intel_dp, link_status))
+ break;
+
+ /* Make sure clock is still ok */
+ if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ intel_dp_start_link_train(intel_dp);
+ cr_tries++;
+ continue;
+ }
+
+ if (intel_channel_eq_ok(intel_dp, link_status)) {
+ channel_eq = true;
+ break;
+ }
+
+ /* Try 5 times, then try clock recovery if that fails */
+ if (tries > 5) {
+ intel_dp_link_down(intel_dp);
+ intel_dp_start_link_train(intel_dp);
+ tries = 0;
+ cr_tries++;
+ continue;
+ }
+
+ /* Compute new intel_dp->train_set as requested by target */
+ intel_get_adjust_train(intel_dp, link_status);
+ ++tries;
+ }
+
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
+ reg = DP | DP_LINK_TRAIN_OFF_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_OFF;
+
+ I915_WRITE(intel_dp->output_reg, reg);
+ POSTING_READ(intel_dp->output_reg);
+ intel_dp_aux_native_write_1(intel_dp,
+ DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
+}
+
+static void
+intel_dp_link_down(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t DP = intel_dp->DP;
+
+ if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+ return;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (is_edp(intel_dp)) {
+ DP &= ~DP_PLL_ENABLE;
+ I915_WRITE(intel_dp->output_reg, DP);
+ POSTING_READ(intel_dp->output_reg);
+ DELAY(100);
+ }
+
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+ DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
+ } else {
+ DP &= ~DP_LINK_TRAIN_MASK;
+ I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+ }
+ POSTING_READ(intel_dp->output_reg);
+
+ drm_msleep(17, "915dlo");
+
+ if (is_edp(intel_dp)) {
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
+ DP |= DP_LINK_TRAIN_OFF_CPT;
+ else
+ DP |= DP_LINK_TRAIN_OFF;
+ }
+
+
+ if (!HAS_PCH_CPT(dev) &&
+ I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
+
+ /* Hardware workaround: leaving our transcoder select
+ * set to transcoder B while it's off will prevent the
+ * corresponding HDMI output on transcoder A.
+ *
+ * Combine this with another hardware workaround:
+ * transcoder select bit can only be cleared while the
+ * port is enabled.
+ */
+ DP &= ~DP_PIPEB_SELECT;
+ I915_WRITE(intel_dp->output_reg, DP);
+
+ /* Changes to enable or select take place the vblank
+ * after being written.
+ */
+ if (crtc == NULL) {
+ /* We can arrive here never having been attached
+ * to a CRTC, for instance, due to inheriting
+ * random state from the BIOS.
+ *
+ * If the pipe is not running, play safe and
+ * wait for the clocks to stabilise before
+ * continuing.
+ */
+ POSTING_READ(intel_dp->output_reg);
+ drm_msleep(50, "915dla");
+ } else
+ intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
+ }
+
+ DP &= ~DP_AUDIO_OUTPUT_ENABLE;
+ I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+ POSTING_READ(intel_dp->output_reg);
+ drm_msleep(intel_dp->panel_power_down_delay, "915ldo");
+}
+
+static bool
+intel_dp_get_dpcd(struct intel_dp *intel_dp)
+{
+ if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
+ sizeof(intel_dp->dpcd)) &&
+ (intel_dp->dpcd[DP_DPCD_REV] != 0)) {
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
+{
+ int ret;
+
+ ret = intel_dp_aux_native_read_retry(intel_dp,
+ DP_DEVICE_SERVICE_IRQ_VECTOR,
+ sink_irq_vector, 1);
+ if (!ret)
+ return false;
+
+ return true;
+}
+
+static void
+intel_dp_handle_test_request(struct intel_dp *intel_dp)
+{
+ /* NAK by default */
+ intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK);
+}
+
+/*
+ * According to DP spec
+ * 5.1.2:
+ * 1. Read DPCD
+ * 2. Configure link according to Receiver Capabilities
+ * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
+ * 4. Check link status on receipt of hot-plug interrupt
+ */
+
+static void
+intel_dp_check_link_status(struct intel_dp *intel_dp)
+{
+ u8 sink_irq_vector;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
+ return;
+
+ if (!intel_dp->base.base.crtc)
+ return;
+
+ /* Try to read receiver status if the link appears to be up */
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ intel_dp_link_down(intel_dp);
+ return;
+ }
+
+ /* Now read the DPCD to see if it's actually running */
+ if (!intel_dp_get_dpcd(intel_dp)) {
+ intel_dp_link_down(intel_dp);
+ return;
+ }
+
+ /* Try to read the source of the interrupt */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
+ /* Clear interrupt source */
+ intel_dp_aux_native_write_1(intel_dp,
+ DP_DEVICE_SERVICE_IRQ_VECTOR,
+ sink_irq_vector);
+
+ if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
+ intel_dp_handle_test_request(intel_dp);
+ if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
+ DRM_DEBUG_KMS("CP or sink specific irq unhandled\n");
+ }
+
+ if (!intel_channel_eq_ok(intel_dp, link_status)) {
+ DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
+ drm_get_encoder_name(&intel_dp->base.base));
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ }
+}
+
+static enum drm_connector_status
+intel_dp_detect_dpcd(struct intel_dp *intel_dp)
+{
+ if (intel_dp_get_dpcd(intel_dp))
+ return connector_status_connected;
+ return connector_status_disconnected;
+}
+
+static enum drm_connector_status
+ironlake_dp_detect(struct intel_dp *intel_dp)
+{
+ enum drm_connector_status status;
+
+ /* Can't disconnect eDP, but you can close the lid... */
+ if (is_edp(intel_dp)) {
+ status = intel_panel_detect(intel_dp->base.base.dev);
+ if (status == connector_status_unknown)
+ status = connector_status_connected;
+ return status;
+ }
+
+ return intel_dp_detect_dpcd(intel_dp);
+}
+
+static enum drm_connector_status
+g4x_dp_detect(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t temp, bit;
+
+ switch (intel_dp->output_reg) {
+ case DP_B:
+ bit = DPB_HOTPLUG_INT_STATUS;
+ break;
+ case DP_C:
+ bit = DPC_HOTPLUG_INT_STATUS;
+ break;
+ case DP_D:
+ bit = DPD_HOTPLUG_INT_STATUS;
+ break;
+ default:
+ return connector_status_unknown;
+ }
+
+ temp = I915_READ(PORT_HOTPLUG_STAT);
+
+ if ((temp & bit) == 0)
+ return connector_status_disconnected;
+
+ return intel_dp_detect_dpcd(intel_dp);
+}
+
+static struct edid *
+intel_dp_get_edid(struct drm_connector *connector, device_t adapter)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct edid *edid;
+
+ ironlake_edp_panel_vdd_on(intel_dp);
+ edid = drm_get_edid(connector, adapter);
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+ return edid;
+}
+
+static int
+intel_dp_get_edid_modes(struct drm_connector *connector, device_t adapter)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ int ret;
+
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ret = intel_ddc_get_modes(connector, adapter);
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+ return ret;
+}
+
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
+ *
+ * \return true if DP port is connected.
+ * \return false if DP port is disconnected.
+ */
+static enum drm_connector_status
+intel_dp_detect(struct drm_connector *connector, bool force)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_device *dev = intel_dp->base.base.dev;
+ enum drm_connector_status status;
+ struct edid *edid = NULL;
+
+ intel_dp->has_audio = false;
+
+ if (HAS_PCH_SPLIT(dev))
+ status = ironlake_dp_detect(intel_dp);
+ else
+ status = g4x_dp_detect(intel_dp);
+ if (status != connector_status_connected)
+ return status;
+
+ if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
+ intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
+ } else {
+ edid = intel_dp_get_edid(connector, intel_dp->adapter);
+ if (edid) {
+ intel_dp->has_audio = drm_detect_monitor_audio(edid);
+ connector->display_info.raw_edid = NULL;
+ free(edid, DRM_MEM_KMS);
+ }
+ }
+
+ return connector_status_connected;
+}
+
+static int intel_dp_get_modes(struct drm_connector *connector)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ /* We should parse the EDID data and find out if it has an audio sink
+ */
+
+ ret = intel_dp_get_edid_modes(connector, intel_dp->adapter);
+ if (ret) {
+ if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) {
+ struct drm_display_mode *newmode;
+ list_for_each_entry(newmode, &connector->probed_modes,
+ head) {
+ if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) {
+ intel_dp->panel_fixed_mode =
+ drm_mode_duplicate(dev, newmode);
+ break;
+ }
+ }
+ }
+ return ret;
+ }
+
+ /* if eDP has no EDID, try to use fixed panel mode from VBT */
+ if (is_edp(intel_dp)) {
+ /* initialize panel mode from VBT if available for eDP */
+ if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) {
+ intel_dp->panel_fixed_mode =
+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (intel_dp->panel_fixed_mode) {
+ intel_dp->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ }
+ }
+ if (intel_dp->panel_fixed_mode) {
+ struct drm_display_mode *mode;
+ mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static bool
+intel_dp_detect_audio(struct drm_connector *connector)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct edid *edid;
+ bool has_audio = false;
+
+ edid = intel_dp_get_edid(connector, intel_dp->adapter);
+ if (edid) {
+ has_audio = drm_detect_monitor_audio(edid);
+
+ connector->display_info.raw_edid = NULL;
+ free(edid, DRM_MEM_KMS);
+ }
+
+ return has_audio;
+}
+
+static int
+intel_dp_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ int ret;
+
+ ret = drm_connector_property_set_value(connector, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev_priv->force_audio_property) {
+ int i = val;
+ bool has_audio;
+
+ if (i == intel_dp->force_audio)
+ return 0;
+
+ intel_dp->force_audio = i;
+
+ if (i == HDMI_AUDIO_AUTO)
+ has_audio = intel_dp_detect_audio(connector);
+ else
+ has_audio = (i == HDMI_AUDIO_ON);
+
+ if (has_audio == intel_dp->has_audio)
+ return 0;
+
+ intel_dp->has_audio = has_audio;
+ goto done;
+ }
+
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!intel_dp->color_range)
+ return 0;
+
+ intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
+ return -EINVAL;
+
+done:
+ if (intel_dp->base.base.crtc) {
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y,
+ crtc->fb);
+ }
+
+ return 0;
+}
+
+static void
+intel_dp_destroy(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ if (intel_dpd_is_edp(dev))
+ intel_panel_destroy_backlight(dev);
+
+#if 0
+ drm_sysfs_connector_remove(connector);
+#endif
+ drm_connector_cleanup(connector);
+ free(connector, DRM_MEM_KMS);
+}
+
+static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct drm_device *dev;
+ struct intel_dp *intel_dp;
+
+ intel_dp = enc_to_intel_dp(encoder);
+ dev = encoder->dev;
+
+ if (intel_dp->dp_iic_bus != NULL) {
+ if (intel_dp->adapter != NULL) {
+ device_delete_child(intel_dp->dp_iic_bus,
+ intel_dp->adapter);
+ }
+ device_delete_child(dev->device, intel_dp->dp_iic_bus);
+ }
+ drm_encoder_cleanup(encoder);
+ if (is_edp(intel_dp)) {
+ struct drm_i915_private *dev_priv = intel_dp->base.base.dev->dev_private;
+
+ taskqueue_cancel_timeout(dev_priv->tq,
+ &intel_dp->panel_vdd_task, NULL);
+ taskqueue_drain_timeout(dev_priv->tq,
+ &intel_dp->panel_vdd_task);
+ ironlake_panel_vdd_off_sync(intel_dp);
+ }
+ free(intel_dp, DRM_MEM_KMS);
+}
+
+static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
+ .dpms = intel_dp_dpms,
+ .mode_fixup = intel_dp_mode_fixup,
+ .prepare = intel_dp_prepare,
+ .mode_set = intel_dp_mode_set,
+ .commit = intel_dp_commit,
+};
+
+static const struct drm_connector_funcs intel_dp_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = intel_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_dp_set_property,
+ .destroy = intel_dp_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
+ .get_modes = intel_dp_get_modes,
+ .mode_valid = intel_dp_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs intel_dp_enc_funcs = {
+ .destroy = intel_dp_encoder_destroy,
+};
+
+static void
+intel_dp_hot_plug(struct intel_encoder *intel_encoder)
+{
+ struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+
+ intel_dp_check_link_status(intel_dp);
+}
+
+/* Return which DP Port should be selected for Transcoder DP control */
+int
+intel_trans_dp_port_sel(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct intel_dp *intel_dp;
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ intel_dp = enc_to_intel_dp(encoder);
+ if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_dp->base.type == INTEL_OUTPUT_EDP)
+ return intel_dp->output_reg;
+ }
+
+ return -1;
+}
+
+/* check the VBT to see whether the eDP is on DP-D port */
+bool intel_dpd_is_edp(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i;
+
+ if (!dev_priv->child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+
+ if (p_child->dvo_port == PORT_IDPD &&
+ p_child->device_type == DEVICE_TYPE_eDP)
+ return true;
+ }
+ return false;
+}
+
+static void
+intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
+{
+ intel_attach_force_audio_property(connector);
+ intel_attach_broadcast_rgb_property(connector);
+}
+
+void
+intel_dp_init(struct drm_device *dev, int output_reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_connector *connector;
+ struct intel_dp *intel_dp;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
+ const char *name = NULL;
+ int type;
+
+ intel_dp = malloc(sizeof(struct intel_dp), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+
+ intel_dp->output_reg = output_reg;
+ intel_dp->dpms_mode = -1;
+
+ intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+ intel_encoder = &intel_dp->base;
+
+ if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
+ if (intel_dpd_is_edp(dev))
+ intel_dp->is_pch_edp = true;
+
+ if (output_reg == DP_A || is_pch_edp(intel_dp)) {
+ type = DRM_MODE_CONNECTOR_eDP;
+ intel_encoder->type = INTEL_OUTPUT_EDP;
+ } else {
+ type = DRM_MODE_CONNECTOR_DisplayPort;
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ }
+
+ connector = &intel_connector->base;
+ drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
+ drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ if (output_reg == DP_B || output_reg == PCH_DP_B)
+ intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
+ else if (output_reg == DP_C || output_reg == PCH_DP_C)
+ intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
+ else if (output_reg == DP_D || output_reg == PCH_DP_D)
+ intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
+
+ if (is_edp(intel_dp)) {
+ intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
+ TIMEOUT_TASK_INIT(dev_priv->tq, &intel_dp->panel_vdd_task, 0,
+ ironlake_panel_vdd_work, intel_dp);
+ }
+
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = 0;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+#if 0
+ drm_sysfs_connector_add(connector);
+#endif
+
+ /* Set up the DDC bus. */
+ switch (output_reg) {
+ case DP_A:
+ name = "DPDDC-A";
+ break;
+ case DP_B:
+ case PCH_DP_B:
+ dev_priv->hotplug_supported_mask |=
+ HDMIB_HOTPLUG_INT_STATUS;
+ name = "DPDDC-B";
+ break;
+ case DP_C:
+ case PCH_DP_C:
+ dev_priv->hotplug_supported_mask |=
+ HDMIC_HOTPLUG_INT_STATUS;
+ name = "DPDDC-C";
+ break;
+ case DP_D:
+ case PCH_DP_D:
+ dev_priv->hotplug_supported_mask |=
+ HDMID_HOTPLUG_INT_STATUS;
+ name = "DPDDC-D";
+ break;
+ }
+
+ /* Cache some DPCD data in the eDP case */
+ if (is_edp(intel_dp)) {
+ bool ret;
+ struct edp_power_seq cur, vbt;
+ u32 pp_on, pp_off, pp_div;
+
+ pp_on = I915_READ(PCH_PP_ON_DELAYS);
+ pp_off = I915_READ(PCH_PP_OFF_DELAYS);
+ pp_div = I915_READ(PCH_PP_DIVISOR);
+
+ /* Pull timing values out of registers */
+ cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+ PANEL_POWER_UP_DELAY_SHIFT;
+
+ cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+ PANEL_LIGHT_ON_DELAY_SHIFT;
+
+ cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+ PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+ cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+ PANEL_POWER_DOWN_DELAY_SHIFT;
+
+ cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+ PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+
+ DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+ vbt = dev_priv->edp.pps;
+
+ DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
+
+#define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10)
+
+ intel_dp->panel_power_up_delay = get_delay(t1_t3);
+ intel_dp->backlight_on_delay = get_delay(t8);
+ intel_dp->backlight_off_delay = get_delay(t9);
+ intel_dp->panel_power_down_delay = get_delay(t10);
+ intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
+
+ DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+ intel_dp->panel_power_cycle_delay);
+
+ DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+ intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ret = intel_dp_get_dpcd(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+
+ if (ret) {
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
+ dev_priv->no_aux_handshake =
+ intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
+ DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
+ } else {
+ /* if this fails, presume the device is a ghost */
+ DRM_INFO("failed to retrieve link info, disabling eDP\n");
+ intel_dp_encoder_destroy(&intel_dp->base.base);
+ intel_dp_destroy(&intel_connector->base);
+ return;
+ }
+ }
+
+ intel_dp_i2c_init(intel_dp, intel_connector, name);
+
+ intel_encoder->hot_plug = intel_dp_hot_plug;
+
+ if (is_edp(intel_dp)) {
+ dev_priv->int_edp_connector = connector;
+ intel_panel_setup_backlight(dev);
+ }
+
+ intel_dp_add_properties(intel_dp, connector);
+
+ /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+ * 0xd. Failure to do so will result in spurious interrupts being
+ * generated on the port when a cable is not attached.
+ */
+ if (IS_G4X(dev) && !IS_GM45(dev)) {
+ u32 temp = I915_READ(PEG_BAND_GAP_DATA);
+ I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+ }
+}
diff --git a/sys/dev/drm2/i915/intel_drv.h b/sys/dev/drm2/i915/intel_drv.h
new file mode 100644
index 0000000..8039662
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_drv.h
@@ -0,0 +1,428 @@
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef DRM_INTEL_DRV_H
+#define DRM_INTEL_DRV_H
+
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/drm_crtc.h>
+#include <dev/drm2/drm_crtc_helper.h>
+#include <dev/drm2/drm_fb_helper.h>
+
+#define _intel_wait_for(DEV, COND, MS, W, WMSG) \
+({ \
+ int end, ret; \
+ \
+ end = ticks + (MS) * hz / 1000; \
+ ret = 0; \
+ \
+ while (!(COND)) { \
+ if (time_after(ticks, end)) { \
+ ret = -ETIMEDOUT; \
+ break; \
+ } \
+ if (W) \
+ pause((WMSG), 1); \
+ else \
+ DELAY(1000); \
+ } \
+ \
+ ret; \
+})
+
+#define KHz(x) (1000*x)
+#define MHz(x) KHz(1000*x)
+
+/* store information about an Ixxx DVO */
+/* The i830->i865 use multiple DVOs with multiple i2cs */
+/* the i915, i945 have a single sDVO i2c bus - which is different */
+#define MAX_OUTPUTS 6
+/* maximum connectors per crtcs in the mode set */
+#define INTELFB_CONN_LIMIT 4
+
+#define INTEL_I2C_BUS_DVO 1
+#define INTEL_I2C_BUS_SDVO 2
+
+/* these are outputs from the chip - integrated only
+ external chips are via DVO or SDVO output */
+#define INTEL_OUTPUT_UNUSED 0
+#define INTEL_OUTPUT_ANALOG 1
+#define INTEL_OUTPUT_DVO 2
+#define INTEL_OUTPUT_SDVO 3
+#define INTEL_OUTPUT_LVDS 4
+#define INTEL_OUTPUT_TVOUT 5
+#define INTEL_OUTPUT_HDMI 6
+#define INTEL_OUTPUT_DISPLAYPORT 7
+#define INTEL_OUTPUT_EDP 8
+
+/* Intel Pipe Clone Bit */
+#define INTEL_HDMIB_CLONE_BIT 1
+#define INTEL_HDMIC_CLONE_BIT 2
+#define INTEL_HDMID_CLONE_BIT 3
+#define INTEL_HDMIE_CLONE_BIT 4
+#define INTEL_HDMIF_CLONE_BIT 5
+#define INTEL_SDVO_NON_TV_CLONE_BIT 6
+#define INTEL_SDVO_TV_CLONE_BIT 7
+#define INTEL_SDVO_LVDS_CLONE_BIT 8
+#define INTEL_ANALOG_CLONE_BIT 9
+#define INTEL_TV_CLONE_BIT 10
+#define INTEL_DP_B_CLONE_BIT 11
+#define INTEL_DP_C_CLONE_BIT 12
+#define INTEL_DP_D_CLONE_BIT 13
+#define INTEL_LVDS_CLONE_BIT 14
+#define INTEL_DVO_TMDS_CLONE_BIT 15
+#define INTEL_DVO_LVDS_CLONE_BIT 16
+#define INTEL_EDP_CLONE_BIT 17
+
+#define INTEL_DVO_CHIP_NONE 0
+#define INTEL_DVO_CHIP_LVDS 1
+#define INTEL_DVO_CHIP_TMDS 2
+#define INTEL_DVO_CHIP_TVOUT 4
+
+/* drm_display_mode->private_flags */
+#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
+#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+#define INTEL_MODE_DP_FORCE_6BPC (0x10)
+/* This flag must be set by the encoder's mode_fixup if it changes the crtc
+ * timings in the mode to prevent the crtc fixup from overwriting them.
+ * Currently only lvds needs that. */
+#define INTEL_MODE_CRTC_TIMINGS_SET (0x20)
+
+static inline void
+intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
+ int multiplier)
+{
+ mode->clock *= multiplier;
+ mode->private_flags |= multiplier;
+}
+
+static inline int
+intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
+{
+ return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
+}
+
+struct intel_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_i915_gem_object *obj;
+};
+
+struct intel_fbdev {
+ struct drm_fb_helper helper;
+ struct intel_framebuffer ifb;
+ struct list_head fbdev_list;
+ struct drm_display_mode *our_mode;
+};
+
+struct intel_encoder {
+ struct drm_encoder base;
+ int type;
+ bool needs_tv_clock;
+ void (*hot_plug)(struct intel_encoder *);
+ int crtc_mask;
+ int clone_mask;
+};
+
+struct intel_connector {
+ struct drm_connector base;
+ struct intel_encoder *encoder;
+};
+
+struct intel_crtc {
+ struct drm_crtc base;
+ enum pipe pipe;
+ enum plane plane;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ int dpms_mode;
+ bool active; /* is the crtc on? independent of the dpms mode */
+ bool busy; /* is scanout buffer being updated frequently? */
+ struct callout idle_callout;
+ bool lowfreq_avail;
+ struct intel_overlay *overlay;
+ struct intel_unpin_work *unpin_work;
+ int fdi_lanes;
+
+ struct drm_i915_gem_object *cursor_bo;
+ uint32_t cursor_addr;
+ int16_t cursor_x, cursor_y;
+ int16_t cursor_width, cursor_height;
+ bool cursor_visible;
+ unsigned int bpp;
+
+ bool no_pll; /* tertiary pipe for IVB */
+ bool use_pll_a;
+};
+
+struct intel_plane {
+ struct drm_plane base;
+ enum pipe pipe;
+ struct drm_i915_gem_object *obj;
+ bool primary_disabled;
+ int max_downscale;
+ u32 lut_r[1024], lut_g[1024], lut_b[1024];
+ void (*update_plane)(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h);
+ void (*disable_plane)(struct drm_plane *plane);
+ int (*update_colorkey)(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key);
+ void (*get_colorkey)(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key);
+};
+
+#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
+#define to_intel_connector(x) container_of(x, struct intel_connector, base)
+#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
+#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+#define to_intel_plane(x) container_of(x, struct intel_plane, base)
+
+#define DIP_HEADER_SIZE 5
+
+#define DIP_TYPE_AVI 0x82
+#define DIP_VERSION_AVI 0x2
+#define DIP_LEN_AVI 13
+
+#define DIP_TYPE_SPD 0x83
+#define DIP_VERSION_SPD 0x1
+#define DIP_LEN_SPD 25
+#define DIP_SPD_UNKNOWN 0
+#define DIP_SPD_DSTB 0x1
+#define DIP_SPD_DVDP 0x2
+#define DIP_SPD_DVHS 0x3
+#define DIP_SPD_HDDVR 0x4
+#define DIP_SPD_DVC 0x5
+#define DIP_SPD_DSC 0x6
+#define DIP_SPD_VCD 0x7
+#define DIP_SPD_GAME 0x8
+#define DIP_SPD_PC 0x9
+#define DIP_SPD_BD 0xa
+#define DIP_SPD_SCD 0xb
+
+struct dip_infoframe {
+ uint8_t type; /* HB0 */
+ uint8_t ver; /* HB1 */
+ uint8_t len; /* HB2 - body len, not including checksum */
+ uint8_t ecc; /* Header ECC */
+ uint8_t checksum; /* PB0 */
+ union {
+ struct {
+ /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
+ uint8_t Y_A_B_S;
+ /* PB2 - C 7:6, M 5:4, R 3:0 */
+ uint8_t C_M_R;
+ /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
+ uint8_t ITC_EC_Q_SC;
+ /* PB4 - VIC 6:0 */
+ uint8_t VIC;
+ /* PB5 - PR 3:0 */
+ uint8_t PR;
+ /* PB6 to PB13 */
+ uint16_t top_bar_end;
+ uint16_t bottom_bar_start;
+ uint16_t left_bar_end;
+ uint16_t right_bar_start;
+ } avi;
+ struct {
+ uint8_t vn[8];
+ uint8_t pd[16];
+ uint8_t sdi;
+ } spd;
+ uint8_t payload[27];
+ } __attribute__ ((packed)) body;
+} __attribute__((packed));
+
+static inline struct drm_crtc *
+intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return dev_priv->pipe_to_crtc_mapping[pipe];
+}
+
+static inline struct drm_crtc *
+intel_get_crtc_for_plane(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return dev_priv->plane_to_crtc_mapping[plane];
+}
+
+struct intel_unpin_work {
+ struct task task;
+ struct drm_device *dev;
+ struct drm_i915_gem_object *old_fb_obj;
+ struct drm_i915_gem_object *pending_flip_obj;
+ struct drm_pending_vblank_event *event;
+ int pending;
+ bool enable_stall_check;
+};
+
+struct intel_fbc_work {
+ struct timeout_task task;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ int interval;
+};
+
+int intel_ddc_get_modes(struct drm_connector *c, device_t adapter);
+extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
+
+extern void intel_attach_force_audio_property(struct drm_connector *connector);
+extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
+extern void intel_crt_init(struct drm_device *dev);
+extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
+void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
+extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
+extern void intel_dvo_init(struct drm_device *dev);
+extern void intel_tv_init(struct drm_device *dev);
+extern void intel_mark_busy(struct drm_device *dev,
+ struct drm_i915_gem_object *obj);
+extern bool intel_lvds_init(struct drm_device *dev);
+extern void intel_dp_init(struct drm_device *dev, int dp_reg);
+void
+intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+extern bool intel_dpd_is_edp(struct drm_device *dev);
+extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
+extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
+extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
+
+/* intel_panel.c */
+extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode);
+extern void intel_pch_panel_fitting(struct drm_device *dev,
+ int fitting_mode,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
+extern u32 intel_panel_get_backlight(struct drm_device *dev);
+extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
+extern int intel_panel_setup_backlight(struct drm_device *dev);
+extern void intel_panel_enable_backlight(struct drm_device *dev);
+extern void intel_panel_disable_backlight(struct drm_device *dev);
+extern void intel_panel_destroy_backlight(struct drm_device *dev);
+extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
+
+extern void intel_crtc_load_lut(struct drm_crtc *crtc);
+extern void intel_encoder_prepare(struct drm_encoder *encoder);
+extern void intel_encoder_commit(struct drm_encoder *encoder);
+extern void intel_encoder_destroy(struct drm_encoder *encoder);
+
+static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
+{
+ return to_intel_connector(connector)->encoder;
+}
+
+extern void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder);
+extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+
+extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc);
+int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
+extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
+
+struct intel_load_detect_pipe {
+ struct drm_framebuffer *release_fb;
+ bool load_detect_temp;
+ int dpms_mode;
+};
+extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct intel_load_detect_pipe *old);
+extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
+ struct intel_load_detect_pipe *old);
+
+extern void intelfb_restore(void);
+extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
+extern void intel_enable_clock_gating(struct drm_device *dev);
+extern void ironlake_enable_drps(struct drm_device *dev);
+extern void ironlake_disable_drps(struct drm_device *dev);
+extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
+extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
+extern void gen6_disable_rps(struct drm_device *dev);
+extern void intel_init_emon(struct drm_device *dev);
+
+extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined);
+extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
+
+extern int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *ifb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_i915_gem_object *obj);
+extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_fini(struct drm_device *dev);
+
+extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
+extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
+extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
+
+extern void intel_setup_overlay(struct drm_device *dev);
+extern void intel_cleanup_overlay(struct drm_device *dev);
+extern int intel_overlay_switch_off(struct intel_overlay *overlay);
+extern int intel_overlay_put_image(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int intel_overlay_attrs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+extern void intel_fb_output_poll_changed(struct drm_device *dev);
+extern void intel_fb_restore_mode(struct drm_device *dev);
+
+extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
+ bool state);
+#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
+#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+
+extern void intel_init_clock_gating(struct drm_device *dev);
+extern void intel_write_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
+extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
+
+/* For use by IVB LP watermark workaround in intel_sprite.c */
+extern void sandybridge_update_wm(struct drm_device *dev);
+extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+ uint32_t sprite_width,
+ int pixel_size);
+extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+#endif
diff --git a/sys/dev/drm2/i915/intel_fb.c b/sys/dev/drm2/i915/intel_fb.c
new file mode 100644
index 0000000..3cb3b78
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_fb.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * David Airlie
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/drm_crtc.h>
+#include <dev/drm2/drm_fb_helper.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/intel_drv.h>
+
+static int intelfb_create(struct intel_fbdev *ifbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = ifbdev->helper.dev;
+#if 0
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct fb_info *info;
+#endif
+ struct drm_framebuffer *fb;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_i915_gem_object *obj;
+ int size, ret;
+
+ /* we don't do packed 24bpp */
+ if (sizes->surface_bpp == 24)
+ sizes->surface_bpp = 32;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ mode_cmd.pitches[0] = roundup2(mode_cmd.width * ((sizes->surface_bpp + 7) /
+ 8), 64);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = roundup2(size, PAGE_SIZE);
+ obj = i915_gem_alloc_object(dev, size);
+ if (!obj) {
+ DRM_ERROR("failed to allocate framebuffer\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ DRM_LOCK(dev);
+
+ /* Flush everything out, we'll be doing GTT only from now on */
+ ret = intel_pin_and_fence_fb_obj(dev, obj, false);
+ if (ret) {
+ DRM_ERROR("failed to pin fb: %d\n", ret);
+ goto out_unref;
+ }
+
+#if 0
+ info = framebuffer_alloc(0, device);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+
+ info->par = ifbdev;
+#endif
+
+ ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
+ if (ret)
+ goto out_unpin;
+
+ fb = &ifbdev->ifb.base;
+
+ ifbdev->helper.fb = fb;
+#if 0
+ ifbdev->helper.fbdev = info;
+
+ strcpy(info->fix.id, "inteldrmfb");
+
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ info->fbops = &intelfb_ops;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+ /* setup aperture base/size for vesafb takeover */
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+ info->apertures->ranges[0].base = dev->mode_config.fb_base;
+ info->apertures->ranges[0].size =
+ dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
+ info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
+ info->fix.smem_len = size;
+
+ info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
+ if (!info->screen_base) {
+ ret = -ENOSPC;
+ goto out_unpin;
+ }
+ info->screen_size = size;
+
+// memset(info->screen_base, 0, size);
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
+
+ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+#endif
+
+ DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
+ fb->width, fb->height,
+ obj->gtt_offset, obj);
+
+ DRM_UNLOCK(dev);
+#if 1
+ KIB_NOTYET();
+#else
+ vga_switcheroo_client_fb_set(dev->pdev, info);
+#endif
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin(obj);
+out_unref:
+ drm_gem_object_unreference(&obj->base);
+ DRM_UNLOCK(dev);
+out:
+ return ret;
+}
+
+static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = intelfb_create(ifbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+ .gamma_set = intel_crtc_fb_gamma_set,
+ .gamma_get = intel_crtc_fb_gamma_get,
+ .fb_probe = intel_fb_find_or_create_single,
+};
+
+static void intel_fbdev_destroy(struct drm_device *dev,
+ struct intel_fbdev *ifbdev)
+{
+#if 0
+ struct fb_info *info;
+#endif
+ struct intel_framebuffer *ifb = &ifbdev->ifb;
+
+#if 0
+ if (ifbdev->helper.fbdev) {
+ info = ifbdev->helper.fbdev;
+ unregister_framebuffer(info);
+ iounmap(info->screen_base);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+#endif
+
+ drm_fb_helper_fini(&ifbdev->helper);
+
+ drm_framebuffer_cleanup(&ifb->base);
+ if (ifb->obj) {
+ drm_gem_object_unreference_unlocked(&ifb->obj->base);
+ ifb->obj = NULL;
+ }
+}
+
+int intel_fbdev_init(struct drm_device *dev)
+{
+ struct intel_fbdev *ifbdev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ ifbdev = malloc(sizeof(struct intel_fbdev), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+
+ dev_priv->fbdev = ifbdev;
+ ifbdev->helper.funcs = &intel_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(dev, &ifbdev->helper,
+ dev_priv->num_pipe,
+ INTELFB_CONN_LIMIT);
+ if (ret) {
+ free(ifbdev, DRM_MEM_KMS);
+ return ret;
+ }
+
+ drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
+ drm_fb_helper_initial_config(&ifbdev->helper, 32);
+ return 0;
+}
+
+void intel_fbdev_fini(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ if (!dev_priv->fbdev)
+ return;
+
+ intel_fbdev_destroy(dev, dev_priv->fbdev);
+ free(dev_priv->fbdev, DRM_MEM_KMS);
+ dev_priv->fbdev = NULL;
+}
+
+void intel_fb_output_poll_changed(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
+}
+
+void intel_fb_restore_mode(struct drm_device *dev)
+{
+ int ret;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_plane *plane;
+
+ sx_xlock(&dev->mode_config.mutex);
+
+ ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
+ if (ret)
+ DRM_DEBUG("failed to restore crtc mode\n");
+
+ /* Be sure to shut off any planes that may be active */
+ list_for_each_entry(plane, &config->plane_list, head)
+ plane->funcs->disable_plane(plane);
+
+ sx_xunlock(&dev->mode_config.mutex);
+}
diff --git a/sys/dev/drm2/i915/intel_hdmi.c b/sys/dev/drm2/i915/intel_hdmi.c
new file mode 100644
index 0000000..ccb9dce
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_hdmi.c
@@ -0,0 +1,576 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2009 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/drm_crtc.h>
+#include <dev/drm2/drm_edid.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/intel_drv.h>
+
+struct intel_hdmi {
+ struct intel_encoder base;
+ u32 sdvox_reg;
+ int ddc_bus;
+ uint32_t color_range;
+ bool has_hdmi_sink;
+ bool has_audio;
+ enum hdmi_force_audio force_audio;
+ void (*write_infoframe)(struct drm_encoder *encoder,
+ struct dip_infoframe *frame);
+};
+
+static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_hdmi, base.base);
+}
+
+static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_hdmi, base);
+}
+
+void intel_dip_infoframe_csum(struct dip_infoframe *frame)
+{
+ uint8_t *data = (uint8_t *)frame;
+ uint8_t sum = 0;
+ unsigned i;
+
+ frame->checksum = 0;
+ frame->ecc = 0;
+
+ for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++)
+ sum += data[i];
+
+ frame->checksum = 0x100 - sum;
+}
+
+static u32 intel_infoframe_index(struct dip_infoframe *frame)
+{
+ u32 flags = 0;
+
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ flags |= VIDEO_DIP_SELECT_AVI;
+ break;
+ case DIP_TYPE_SPD:
+ flags |= VIDEO_DIP_SELECT_SPD;
+ break;
+ default:
+ DRM_DEBUG("unknown info frame type %d\n", frame->type);
+ break;
+ }
+
+ return flags;
+}
+
+static u32 intel_infoframe_flags(struct dip_infoframe *frame)
+{
+ u32 flags = 0;
+
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC;
+ break;
+ case DIP_TYPE_SPD:
+ flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_VSYNC;
+ break;
+ default:
+ DRM_DEBUG("unknown info frame type %d\n", frame->type);
+ break;
+ }
+
+ return flags;
+}
+
+static void i9xx_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 port, flags, val = I915_READ(VIDEO_DIP_CTL);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+
+
+ /* XXX first guess at handling video port, is this corrent? */
+ if (intel_hdmi->sdvox_reg == SDVOB)
+ port = VIDEO_DIP_PORT_B;
+ else if (intel_hdmi->sdvox_reg == SDVOC)
+ port = VIDEO_DIP_PORT_C;
+ else
+ return;
+
+ flags = intel_infoframe_index(frame);
+
+ val &= ~VIDEO_DIP_SELECT_MASK;
+
+ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(VIDEO_DIP_DATA, *data);
+ data++;
+ }
+
+ flags |= intel_infoframe_flags(frame);
+
+ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+}
+
+static void ironlake_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ u32 flags, val = I915_READ(reg);
+
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ flags = intel_infoframe_index(frame);
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+
+ I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+
+ flags |= intel_infoframe_flags(frame);
+
+ I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+}
+
+static void intel_set_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ if (!intel_hdmi->has_hdmi_sink)
+ return;
+
+ intel_dip_infoframe_csum(frame);
+ intel_hdmi->write_infoframe(encoder, frame);
+}
+
+static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
+{
+ struct dip_infoframe avi_if = {
+ .type = DIP_TYPE_AVI,
+ .ver = DIP_VERSION_AVI,
+ .len = DIP_LEN_AVI,
+ };
+
+ intel_set_infoframe(encoder, &avi_if);
+}
+
+static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+{
+ struct dip_infoframe spd_if;
+
+ memset(&spd_if, 0, sizeof(spd_if));
+ spd_if.type = DIP_TYPE_SPD;
+ spd_if.ver = DIP_VERSION_SPD;
+ spd_if.len = DIP_LEN_SPD;
+ strcpy(spd_if.body.spd.vn, "Intel");
+ strcpy(spd_if.body.spd.pd, "Integrated gfx");
+ spd_if.body.spd.sdi = DIP_SPD_PC;
+
+ intel_set_infoframe(encoder, &spd_if);
+}
+
+static void intel_hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 sdvox;
+
+ sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
+ if (!HAS_PCH_SPLIT(dev))
+ sdvox |= intel_hdmi->color_range;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
+
+ if (intel_crtc->bpp > 24)
+ sdvox |= COLOR_FORMAT_12bpc;
+ else
+ sdvox |= COLOR_FORMAT_8bpc;
+
+ /* Required on CPT */
+ if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
+ sdvox |= HDMI_MODE_SELECT;
+
+ if (intel_hdmi->has_audio) {
+ DRM_DEBUG_KMS("Enabling HDMI audio on pipe %c\n",
+ pipe_name(intel_crtc->pipe));
+ sdvox |= SDVO_AUDIO_ENABLE;
+ sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
+ intel_write_eld(encoder, adjusted_mode);
+ }
+
+ if (HAS_PCH_CPT(dev))
+ sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
+ else if (intel_crtc->pipe == 1)
+ sdvox |= SDVO_PIPE_B_SELECT;
+
+ I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ intel_hdmi_set_avi_infoframe(encoder);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 temp;
+ u32 enable_bits = SDVO_ENABLE;
+
+ if (intel_hdmi->has_audio)
+ enable_bits |= SDVO_AUDIO_ENABLE;
+
+ temp = I915_READ(intel_hdmi->sdvox_reg);
+
+ /* HW workaround, need to toggle enable bit off and on for 12bpc, but
+ * we do this anyway which shows more stable in testing.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+ }
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ temp &= ~enable_bits;
+ } else {
+ temp |= enable_bits;
+ }
+
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* HW workaround, need to write this twice for issue that may result
+ * in first write getting masked.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+ }
+}
+
+static int intel_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+ if (mode->clock < 20000)
+ return MODE_CLOCK_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ return MODE_OK;
+}
+
+static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static enum drm_connector_status
+intel_hdmi_detect(struct drm_connector *connector, bool force)
+{
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct edid *edid;
+ enum drm_connector_status status = connector_status_disconnected;
+
+ intel_hdmi->has_hdmi_sink = false;
+ intel_hdmi->has_audio = false;
+ edid = drm_get_edid(connector, dev_priv->gmbus[intel_hdmi->ddc_bus]);
+
+ if (edid) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
+ intel_hdmi->has_hdmi_sink =
+ drm_detect_hdmi_monitor(edid);
+ intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
+ }
+ connector->display_info.raw_edid = NULL;
+ free(edid, DRM_MEM_KMS);
+ } else {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] got no edid, ddc port %d\n",
+ connector->base.id, drm_get_connector_name(connector),
+ intel_hdmi->ddc_bus);
+ }
+
+ if (status == connector_status_connected) {
+ if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
+ intel_hdmi->has_audio =
+ (intel_hdmi->force_audio == HDMI_AUDIO_ON);
+ }
+
+ return status;
+}
+
+static int intel_hdmi_get_modes(struct drm_connector *connector)
+{
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+
+ /* We should parse the EDID data and find out if it's an HDMI sink so
+ * we can send audio to it.
+ */
+
+ return intel_ddc_get_modes(connector,
+ dev_priv->gmbus[intel_hdmi->ddc_bus]);
+}
+
+static bool
+intel_hdmi_detect_audio(struct drm_connector *connector)
+{
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct edid *edid;
+ bool has_audio = false;
+
+ edid = drm_get_edid(connector, dev_priv->gmbus[intel_hdmi->ddc_bus]);
+ if (edid) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL)
+ has_audio = drm_detect_monitor_audio(edid);
+
+ connector->display_info.raw_edid = NULL;
+ free(edid, DRM_MEM_KMS);
+ }
+
+ return has_audio;
+}
+
+static int
+intel_hdmi_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ int ret;
+
+ ret = drm_connector_property_set_value(connector, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev_priv->force_audio_property) {
+ enum hdmi_force_audio i = val;
+ bool has_audio;
+
+ if (i == intel_hdmi->force_audio)
+ return 0;
+
+ intel_hdmi->force_audio = i;
+
+ if (i == HDMI_AUDIO_AUTO)
+ has_audio = intel_hdmi_detect_audio(connector);
+ else
+ has_audio = (i == HDMI_AUDIO_ON);
+
+ if (i == HDMI_AUDIO_OFF_DVI)
+ intel_hdmi->has_hdmi_sink = 0;
+
+ intel_hdmi->has_audio = has_audio;
+ goto done;
+ }
+
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!intel_hdmi->color_range)
+ return 0;
+
+ intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
+ return -EINVAL;
+
+done:
+ if (intel_hdmi->base.base.crtc) {
+ struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
+ drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y,
+ crtc->fb);
+ }
+
+ return 0;
+}
+
+static void intel_hdmi_destroy(struct drm_connector *connector)
+{
+#if 0
+ drm_sysfs_connector_remove(connector);
+#endif
+ drm_connector_cleanup(connector);
+ free(connector, DRM_MEM_KMS);
+}
+
+static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
+ .dpms = intel_hdmi_dpms,
+ .mode_fixup = intel_hdmi_mode_fixup,
+ .prepare = intel_encoder_prepare,
+ .mode_set = intel_hdmi_mode_set,
+ .commit = intel_encoder_commit,
+};
+
+static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = intel_hdmi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_hdmi_set_property,
+ .destroy = intel_hdmi_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
+ .get_modes = intel_hdmi_get_modes,
+ .mode_valid = intel_hdmi_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+static void
+intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
+{
+ intel_attach_force_audio_property(connector);
+ intel_attach_broadcast_rgb_property(connector);
+}
+
+void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_connector *connector;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
+ struct intel_hdmi *intel_hdmi;
+ int i;
+
+ intel_hdmi = malloc(sizeof(struct intel_hdmi), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+ intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+
+ intel_encoder = &intel_hdmi->base;
+ drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ connector = &intel_connector->base;
+ drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
+
+ intel_encoder->type = INTEL_OUTPUT_HDMI;
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ connector->interlace_allowed = 1;
+ connector->doublescan_allowed = 0;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+
+ /* Set up the DDC bus. */
+ if (sdvox_reg == SDVOB) {
+ intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
+ dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
+ } else if (sdvox_reg == SDVOC) {
+ intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
+ dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
+ } else if (sdvox_reg == HDMIB) {
+ intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
+ dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
+ } else if (sdvox_reg == HDMIC) {
+ intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
+ dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
+ } else if (sdvox_reg == HDMID) {
+ intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
+ dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
+ }
+
+
+ intel_hdmi->sdvox_reg = sdvox_reg;
+
+ if (!HAS_PCH_SPLIT(dev)) {
+ intel_hdmi->write_infoframe = i9xx_write_infoframe;
+ I915_WRITE(VIDEO_DIP_CTL, 0);
+ } else {
+ intel_hdmi->write_infoframe = ironlake_write_infoframe;
+ for_each_pipe(i)
+ I915_WRITE(TVIDEO_DIP_CTL(i), 0);
+ }
+
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+
+ intel_hdmi_add_properties(intel_hdmi, connector);
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+#if 0
+ drm_sysfs_connector_add(connector);
+#endif
+
+ /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+ * 0xd. Failure to do so will result in spurious interrupts being
+ * generated on the port when a cable is not attached.
+ */
+ if (IS_G4X(dev) && !IS_GM45(dev)) {
+ u32 temp = I915_READ(PEG_BAND_GAP_DATA);
+ I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+ }
+}
diff --git a/sys/dev/drm2/i915/intel_iic.c b/sys/dev/drm2/i915/intel_iic.c
new file mode 100644
index 0000000..40e6eca
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_iic.c
@@ -0,0 +1,716 @@
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2008,2010 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ * Copyright (c) 2011 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Konstantin Belousov under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/intel_drv.h>
+#include <dev/iicbus/iic.h>
+#include <dev/iicbus/iiconf.h>
+#include <dev/iicbus/iicbus.h>
+#include "iicbus_if.h"
+#include "iicbb_if.h"
+
+static int intel_iic_quirk_xfer(device_t idev, struct iic_msg *msgs, int nmsgs);
+static void intel_teardown_gmbus_m(struct drm_device *dev, int m);
+
+/* Intel GPIO access functions */
+
+#define I2C_RISEFALL_TIME 10
+
+struct intel_iic_softc {
+ struct drm_device *drm_dev;
+ device_t iic_dev;
+ bool force_bit_dev;
+ char name[32];
+ uint32_t reg;
+ uint32_t reg0;
+};
+
+static void
+intel_iic_quirk_set(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 val;
+
+ /* When using bit bashing for I2C, this bit needs to be set to 1 */
+ if (!IS_PINEVIEW(dev_priv->dev))
+ return;
+
+ val = I915_READ(DSPCLK_GATE_D);
+ if (enable)
+ val |= DPCUNIT_CLOCK_GATE_DISABLE;
+ else
+ val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, val);
+}
+
+static u32
+intel_iic_get_reserved(device_t idev)
+{
+ struct intel_iic_softc *sc;
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ u32 reserved;
+
+ sc = device_get_softc(idev);
+ dev = sc->drm_dev;
+ dev_priv = dev->dev_private;
+
+ if (!IS_I830(dev) && !IS_845G(dev)) {
+ reserved = I915_READ_NOTRACE(sc->reg) &
+ (GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE);
+ } else {
+ reserved = 0;
+ }
+
+ return (reserved);
+}
+
+void
+intel_iic_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv;
+
+ dev_priv = dev->dev_private;
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(PCH_GMBUS0, 0);
+ else
+ I915_WRITE(GMBUS0, 0);
+}
+
+static int
+intel_iicbus_reset(device_t idev, u_char speed, u_char addr, u_char *oldaddr)
+{
+ struct intel_iic_softc *sc;
+ struct drm_device *dev;
+
+ sc = device_get_softc(idev);
+ dev = sc->drm_dev;
+
+ intel_iic_reset(dev);
+ return (0);
+}
+
+static void
+intel_iicbb_setsda(device_t idev, int val)
+{
+ struct intel_iic_softc *sc;
+ struct drm_i915_private *dev_priv;
+ u32 reserved;
+ u32 data_bits;
+
+ sc = device_get_softc(idev);
+ dev_priv = sc->drm_dev->dev_private;
+
+ reserved = intel_iic_get_reserved(idev);
+ if (val)
+ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+ else
+ data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+ GPIO_DATA_VAL_MASK;
+
+ I915_WRITE_NOTRACE(sc->reg, reserved | data_bits);
+ POSTING_READ(sc->reg);
+}
+
+static void
+intel_iicbb_setscl(device_t idev, int val)
+{
+ struct intel_iic_softc *sc;
+ struct drm_i915_private *dev_priv;
+ u32 clock_bits, reserved;
+
+ sc = device_get_softc(idev);
+ dev_priv = sc->drm_dev->dev_private;
+
+ reserved = intel_iic_get_reserved(idev);
+ if (val)
+ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+ else
+ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+ GPIO_CLOCK_VAL_MASK;
+
+ I915_WRITE_NOTRACE(sc->reg, reserved | clock_bits);
+ POSTING_READ(sc->reg);
+}
+
+static int
+intel_iicbb_getsda(device_t idev)
+{
+ struct intel_iic_softc *sc;
+ struct drm_i915_private *dev_priv;
+ u32 reserved;
+
+ sc = device_get_softc(idev);
+ dev_priv = sc->drm_dev->dev_private;
+
+ reserved = intel_iic_get_reserved(idev);
+
+ I915_WRITE_NOTRACE(sc->reg, reserved | GPIO_DATA_DIR_MASK);
+ I915_WRITE_NOTRACE(sc->reg, reserved);
+ return ((I915_READ_NOTRACE(sc->reg) & GPIO_DATA_VAL_IN) != 0);
+}
+
+static int
+intel_iicbb_getscl(device_t idev)
+{
+ struct intel_iic_softc *sc;
+ struct drm_i915_private *dev_priv;
+ u32 reserved;
+
+ sc = device_get_softc(idev);
+ dev_priv = sc->drm_dev->dev_private;
+
+ reserved = intel_iic_get_reserved(idev);
+
+ I915_WRITE_NOTRACE(sc->reg, reserved | GPIO_CLOCK_DIR_MASK);
+ I915_WRITE_NOTRACE(sc->reg, reserved);
+ return ((I915_READ_NOTRACE(sc->reg) & GPIO_CLOCK_VAL_IN) != 0);
+}
+
+static int
+intel_gmbus_transfer(device_t idev, struct iic_msg *msgs, uint32_t nmsgs)
+{
+ struct intel_iic_softc *sc;
+ struct drm_i915_private *dev_priv;
+ u8 *buf;
+ int error, i, reg_offset, unit;
+ u32 val, loop;
+ u16 len;
+
+ sc = device_get_softc(idev);
+ dev_priv = sc->drm_dev->dev_private;
+ unit = device_get_unit(idev);
+
+ sx_xlock(&dev_priv->gmbus_sx);
+ if (sc->force_bit_dev) {
+ error = intel_iic_quirk_xfer(dev_priv->bbbus[unit], msgs, nmsgs);
+ goto out;
+ }
+
+ reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
+
+ I915_WRITE(GMBUS0 + reg_offset, sc->reg0);
+
+ for (i = 0; i < nmsgs; i++) {
+ len = msgs[i].len;
+ buf = msgs[i].buf;
+
+ if ((msgs[i].flags & IIC_M_RD) != 0) {
+ I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_WAIT |
+ (i + 1 == nmsgs ? GMBUS_CYCLE_STOP : 0) |
+ (len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msgs[i].slave << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ POSTING_READ(GMBUS2 + reg_offset);
+ do {
+ loop = 0;
+
+ if (_intel_wait_for(sc->drm_dev,
+ (I915_READ(GMBUS2 + reg_offset) &
+ (GMBUS_SATOER | GMBUS_HW_RDY)) != 0,
+ 50, 1, "915gbr"))
+ goto timeout;
+ if ((I915_READ(GMBUS2 + reg_offset) &
+ GMBUS_SATOER) != 0)
+ goto clear_err;
+
+ val = I915_READ(GMBUS3 + reg_offset);
+ do {
+ *buf++ = val & 0xff;
+ val >>= 8;
+ } while (--len != 0 && ++loop < 4);
+ } while (len != 0);
+ } else {
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len != 0 && ++loop < 4);
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+ I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_WAIT |
+ (i + 1 == nmsgs ? GMBUS_CYCLE_STOP : 0) |
+ (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msgs[i].slave << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ POSTING_READ(GMBUS2+reg_offset);
+
+ while (len != 0) {
+ if (_intel_wait_for(sc->drm_dev,
+ (I915_READ(GMBUS2 + reg_offset) &
+ (GMBUS_SATOER | GMBUS_HW_RDY)) != 0,
+ 50, 1, "915gbw"))
+ goto timeout;
+ if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ goto clear_err;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len != 0 && ++loop < 4);
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+ POSTING_READ(GMBUS2 + reg_offset);
+ }
+ }
+
+ if (i + 1 < nmsgs && _intel_wait_for(sc->drm_dev,
+ (I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER |
+ GMBUS_HW_WAIT_PHASE)) != 0,
+ 50, 1, "915gbh"))
+ goto timeout;
+ if ((I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) != 0)
+ goto clear_err;
+ }
+
+ error = 0;
+done:
+ /* Mark the GMBUS interface as disabled after waiting for idle.
+ * We will re-enable it at the start of the next xfer,
+ * till then let it sleep.
+ */
+ if (_intel_wait_for(dev,
+ (I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
+ 10, 1, "915gbu"))
+ DRM_INFO("GMBUS timed out waiting for idle\n");
+ I915_WRITE(GMBUS0 + reg_offset, 0);
+out:
+ sx_xunlock(&dev_priv->gmbus_sx);
+ return (error);
+
+clear_err:
+ /* Toggle the Software Clear Interrupt bit. This has the effect
+ * of resetting the GMBUS controller and so clearing the
+ * BUS_ERROR raised by the slave's NAK.
+ */
+ I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
+ I915_WRITE(GMBUS1 + reg_offset, 0);
+ error = EIO;
+ goto done;
+
+timeout:
+ DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
+ sc->reg0 & 0xff, sc->name);
+ I915_WRITE(GMBUS0 + reg_offset, 0);
+
+ /*
+ * Hardware may not support GMBUS over these pins?
+ * Try GPIO bitbanging instead.
+ */
+ sc->force_bit_dev = true;
+
+ error = intel_iic_quirk_xfer(dev_priv->bbbus[unit], msgs, nmsgs);
+ goto out;
+}
+
+void
+intel_gmbus_set_speed(device_t idev, int speed)
+{
+ struct intel_iic_softc *sc;
+
+ sc = device_get_softc(device_get_parent(idev));
+
+ sc->reg0 = (sc->reg0 & ~(0x3 << 8)) | speed;
+}
+
+void
+intel_gmbus_force_bit(device_t idev, bool force_bit)
+{
+ struct intel_iic_softc *sc;
+
+ sc = device_get_softc(device_get_parent(idev));
+ sc->force_bit_dev = force_bit;
+}
+
+static int
+intel_iic_quirk_xfer(device_t idev, struct iic_msg *msgs, int nmsgs)
+{
+ device_t bridge_dev;
+ struct intel_iic_softc *sc;
+ struct drm_i915_private *dev_priv;
+ int ret;
+ int i;
+
+ bridge_dev = device_get_parent(device_get_parent(idev));
+ sc = device_get_softc(bridge_dev);
+ dev_priv = sc->drm_dev->dev_private;
+
+ intel_iic_reset(sc->drm_dev);
+ intel_iic_quirk_set(dev_priv, true);
+ IICBB_SETSDA(bridge_dev, 1);
+ IICBB_SETSCL(bridge_dev, 1);
+ DELAY(I2C_RISEFALL_TIME);
+
+ /* convert slave addresses to format expected by iicbb */
+ for (i = 0; i < nmsgs; i++) {
+ msgs[i].slave <<= 1;
+ /* force use of repeated start instead of default stop+start */
+ if (i != (nmsgs - 1))
+ msgs[i].flags |= IIC_M_NOSTOP;
+ }
+ ret = iicbus_transfer(idev, msgs, nmsgs);
+ /* restore the addresses */
+ for (i = 0; i < nmsgs; i++)
+ msgs[i].slave >>= 1;
+ IICBB_SETSDA(bridge_dev, 1);
+ IICBB_SETSCL(bridge_dev, 1);
+ intel_iic_quirk_set(dev_priv, false);
+
+ return (ret);
+}
+
+static const char *gpio_names[GMBUS_NUM_PORTS] = {
+ "disabled",
+ "ssc",
+ "vga",
+ "panel",
+ "dpc",
+ "dpb",
+ "reserved",
+ "dpd",
+};
+
+static int
+intel_gmbus_probe(device_t dev)
+{
+
+ return (BUS_PROBE_SPECIFIC);
+}
+
+static int
+intel_gmbus_attach(device_t idev)
+{
+ struct drm_i915_private *dev_priv;
+ struct intel_iic_softc *sc;
+ int pin;
+
+ sc = device_get_softc(idev);
+ sc->drm_dev = device_get_softc(device_get_parent(idev));
+ dev_priv = sc->drm_dev->dev_private;
+ pin = device_get_unit(idev);
+
+ snprintf(sc->name, sizeof(sc->name), "gmbus bus %s", gpio_names[pin]);
+ device_set_desc(idev, sc->name);
+
+ /* By default use a conservative clock rate */
+ sc->reg0 = pin | GMBUS_RATE_100KHZ;
+
+ /* XXX force bit banging until GMBUS is fully debugged */
+ if (IS_GEN2(sc->drm_dev)) {
+ sc->force_bit_dev = true;
+ }
+
+ /* add bus interface device */
+ sc->iic_dev = device_add_child(idev, "iicbus", -1);
+ if (sc->iic_dev == NULL)
+ return (ENXIO);
+ device_quiet(sc->iic_dev);
+ bus_generic_attach(idev);
+
+ return (0);
+}
+
+static int
+intel_gmbus_detach(device_t idev)
+{
+ struct intel_iic_softc *sc;
+ struct drm_i915_private *dev_priv;
+ device_t child;
+ int u;
+
+ sc = device_get_softc(idev);
+ u = device_get_unit(idev);
+ dev_priv = sc->drm_dev->dev_private;
+
+ child = sc->iic_dev;
+ bus_generic_detach(idev);
+ if (child != NULL)
+ device_delete_child(idev, child);
+
+ return (0);
+}
+
+static int
+intel_iicbb_probe(device_t dev)
+{
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+intel_iicbb_attach(device_t idev)
+{
+ static const int map_pin_to_reg[] = {
+ 0,
+ GPIOB,
+ GPIOA,
+ GPIOC,
+ GPIOD,
+ GPIOE,
+ 0,
+ GPIOF
+ };
+
+ struct intel_iic_softc *sc;
+ struct drm_i915_private *dev_priv;
+ int pin;
+
+ sc = device_get_softc(idev);
+ sc->drm_dev = device_get_softc(device_get_parent(idev));
+ dev_priv = sc->drm_dev->dev_private;
+ pin = device_get_unit(idev);
+
+ snprintf(sc->name, sizeof(sc->name), "i915 iicbb %s", gpio_names[pin]);
+ device_set_desc(idev, sc->name);
+
+ sc->reg0 = pin | GMBUS_RATE_100KHZ;
+ sc->reg = map_pin_to_reg[pin];
+ if (HAS_PCH_SPLIT(dev_priv->dev))
+ sc->reg += PCH_GPIOA - GPIOA;
+
+ /* add generic bit-banging code */
+ sc->iic_dev = device_add_child(idev, "iicbb", -1);
+ if (sc->iic_dev == NULL)
+ return (ENXIO);
+ device_quiet(sc->iic_dev);
+ bus_generic_attach(idev);
+
+ return (0);
+}
+
+static int
+intel_iicbb_detach(device_t idev)
+{
+ struct intel_iic_softc *sc;
+ device_t child;
+
+ sc = device_get_softc(idev);
+ child = sc->iic_dev;
+ bus_generic_detach(idev);
+ if (child)
+ device_delete_child(idev, child);
+ return (0);
+}
+
+static device_method_t intel_gmbus_methods[] = {
+ DEVMETHOD(device_probe, intel_gmbus_probe),
+ DEVMETHOD(device_attach, intel_gmbus_attach),
+ DEVMETHOD(device_detach, intel_gmbus_detach),
+ DEVMETHOD(iicbus_reset, intel_iicbus_reset),
+ DEVMETHOD(iicbus_transfer, intel_gmbus_transfer),
+ DEVMETHOD_END
+};
+static driver_t intel_gmbus_driver = {
+ "intel_gmbus",
+ intel_gmbus_methods,
+ sizeof(struct intel_iic_softc)
+};
+static devclass_t intel_gmbus_devclass;
+DRIVER_MODULE_ORDERED(intel_gmbus, drmn, intel_gmbus_driver,
+ intel_gmbus_devclass, 0, 0, SI_ORDER_FIRST);
+DRIVER_MODULE(iicbus, intel_gmbus, iicbus_driver, iicbus_devclass, 0, 0);
+
+static device_method_t intel_iicbb_methods[] = {
+ DEVMETHOD(device_probe, intel_iicbb_probe),
+ DEVMETHOD(device_attach, intel_iicbb_attach),
+ DEVMETHOD(device_detach, intel_iicbb_detach),
+
+ DEVMETHOD(bus_add_child, bus_generic_add_child),
+ DEVMETHOD(bus_print_child, bus_generic_print_child),
+
+ DEVMETHOD(iicbb_callback, iicbus_null_callback),
+ DEVMETHOD(iicbb_reset, intel_iicbus_reset),
+ DEVMETHOD(iicbb_setsda, intel_iicbb_setsda),
+ DEVMETHOD(iicbb_setscl, intel_iicbb_setscl),
+ DEVMETHOD(iicbb_getsda, intel_iicbb_getsda),
+ DEVMETHOD(iicbb_getscl, intel_iicbb_getscl),
+ DEVMETHOD_END
+};
+static driver_t intel_iicbb_driver = {
+ "intel_iicbb",
+ intel_iicbb_methods,
+ sizeof(struct intel_iic_softc)
+};
+static devclass_t intel_iicbb_devclass;
+DRIVER_MODULE_ORDERED(intel_iicbb, drmn, intel_iicbb_driver,
+ intel_iicbb_devclass, 0, 0, SI_ORDER_FIRST);
+DRIVER_MODULE(iicbb, intel_iicbb, iicbb_driver, iicbb_devclass, 0, 0);
+
+int
+intel_setup_gmbus(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv;
+ device_t iic_dev;
+ int i, ret;
+
+ dev_priv = dev->dev_private;
+ sx_init(&dev_priv->gmbus_sx, "gmbus");
+ dev_priv->gmbus_bridge = malloc(sizeof(device_t) * GMBUS_NUM_PORTS,
+ DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
+ dev_priv->bbbus_bridge = malloc(sizeof(device_t) * GMBUS_NUM_PORTS,
+ DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
+ dev_priv->gmbus = malloc(sizeof(device_t) * GMBUS_NUM_PORTS,
+ DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
+ dev_priv->bbbus = malloc(sizeof(device_t) * GMBUS_NUM_PORTS,
+ DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
+
+ /*
+ * The Giant there is recursed, most likely. Normally, the
+ * intel_setup_gmbus() is called from the attach method of the
+ * driver.
+ */
+ mtx_lock(&Giant);
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ /*
+ * Initialized bbbus_bridge before gmbus_bridge, since
+ * gmbus may decide to force quirk transfer in the
+ * attachment code.
+ */
+ dev_priv->bbbus_bridge[i] = device_add_child(dev->device,
+ "intel_iicbb", i);
+ if (dev_priv->bbbus_bridge[i] == NULL) {
+ DRM_ERROR("bbbus bridge %d creation failed\n", i);
+ ret = ENXIO;
+ goto err;
+ }
+ device_quiet(dev_priv->bbbus_bridge[i]);
+ ret = device_probe_and_attach(dev_priv->bbbus_bridge[i]);
+ if (ret != 0) {
+ DRM_ERROR("bbbus bridge %d attach failed, %d\n", i,
+ ret);
+ goto err;
+ }
+
+ iic_dev = device_find_child(dev_priv->bbbus_bridge[i], "iicbb",
+ -1);
+ if (iic_dev == NULL) {
+ DRM_ERROR("bbbus bridge doesn't have iicbb child\n");
+ goto err;
+ }
+ iic_dev = device_find_child(iic_dev, "iicbus", -1);
+ if (iic_dev == NULL) {
+ DRM_ERROR(
+ "bbbus bridge doesn't have iicbus grandchild\n");
+ goto err;
+ }
+
+ dev_priv->bbbus[i] = iic_dev;
+
+ dev_priv->gmbus_bridge[i] = device_add_child(dev->device,
+ "intel_gmbus", i);
+ if (dev_priv->gmbus_bridge[i] == NULL) {
+ DRM_ERROR("gmbus bridge %d creation failed\n", i);
+ ret = ENXIO;
+ goto err;
+ }
+ device_quiet(dev_priv->gmbus_bridge[i]);
+ ret = device_probe_and_attach(dev_priv->gmbus_bridge[i]);
+ if (ret != 0) {
+ DRM_ERROR("gmbus bridge %d attach failed, %d\n", i,
+ ret);
+ ret = ENXIO;
+ goto err;
+ }
+
+ iic_dev = device_find_child(dev_priv->gmbus_bridge[i],
+ "iicbus", -1);
+ if (iic_dev == NULL) {
+ DRM_ERROR("gmbus bridge doesn't have iicbus child\n");
+ goto err;
+ }
+ dev_priv->gmbus[i] = iic_dev;
+
+ intel_iic_reset(dev);
+ }
+
+ mtx_unlock(&Giant);
+ return (0);
+
+err:
+ intel_teardown_gmbus_m(dev, i);
+ mtx_unlock(&Giant);
+ return (ret);
+}
+
+static void
+intel_teardown_gmbus_m(struct drm_device *dev, int m)
+{
+ struct drm_i915_private *dev_priv;
+
+ dev_priv = dev->dev_private;
+
+ free(dev_priv->gmbus, DRM_MEM_DRIVER);
+ dev_priv->gmbus = NULL;
+ free(dev_priv->bbbus, DRM_MEM_DRIVER);
+ dev_priv->bbbus = NULL;
+ free(dev_priv->gmbus_bridge, DRM_MEM_DRIVER);
+ dev_priv->gmbus_bridge = NULL;
+ free(dev_priv->bbbus_bridge, DRM_MEM_DRIVER);
+ dev_priv->bbbus_bridge = NULL;
+ sx_destroy(&dev_priv->gmbus_sx);
+}
+
+void
+intel_teardown_gmbus(struct drm_device *dev)
+{
+
+ mtx_lock(&Giant);
+ intel_teardown_gmbus_m(dev, GMBUS_NUM_PORTS);
+ mtx_unlock(&Giant);
+}
diff --git a/sys/dev/drm2/i915/intel_lvds.c b/sys/dev/drm2/i915/intel_lvds.c
new file mode 100644
index 0000000..4e29b91
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_lvds.c
@@ -0,0 +1,1125 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/drm_crtc.h>
+#include <dev/drm2/drm_edid.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/intel_drv.h>
+
+/* Private structure for the integrated LVDS support */
+struct intel_lvds {
+ struct intel_encoder base;
+
+ struct edid *edid;
+
+ int fitting_mode;
+ u32 pfit_control;
+ u32 pfit_pgm_ratios;
+ bool pfit_dirty;
+
+ struct drm_display_mode *fixed_mode;
+};
+
+static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_lvds, base.base);
+}
+
+static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_lvds, base);
+}
+
+/**
+ * Sets the power state for the panel.
+ */
+static void intel_lvds_enable(struct intel_lvds *intel_lvds)
+{
+ struct drm_device *dev = intel_lvds->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 ctl_reg, lvds_reg, stat_reg;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ ctl_reg = PCH_PP_CONTROL;
+ lvds_reg = PCH_LVDS;
+ stat_reg = PCH_PP_STATUS;
+ } else {
+ ctl_reg = PP_CONTROL;
+ lvds_reg = LVDS;
+ stat_reg = PP_STATUS;
+ }
+
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
+
+ if (intel_lvds->pfit_dirty) {
+ /*
+ * Enable automatic panel scaling so that non-native modes
+ * fill the screen. The panel fitter should only be
+ * adjusted whilst the pipe is disabled, according to
+ * register description and PRM.
+ */
+ DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
+ intel_lvds->pfit_control,
+ intel_lvds->pfit_pgm_ratios);
+
+ I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
+ intel_lvds->pfit_dirty = false;
+ }
+
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
+ POSTING_READ(lvds_reg);
+ if (_intel_wait_for(dev,
+ (I915_READ(stat_reg) & PP_ON) == 0, 1000,
+ 1, "915lvds"))
+ DRM_ERROR("timed out waiting for panel to power off\n");
+
+ intel_panel_enable_backlight(dev);
+}
+
+static void intel_lvds_disable(struct intel_lvds *intel_lvds)
+{
+ struct drm_device *dev = intel_lvds->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 ctl_reg, lvds_reg, stat_reg;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ ctl_reg = PCH_PP_CONTROL;
+ lvds_reg = PCH_LVDS;
+ stat_reg = PCH_PP_STATUS;
+ } else {
+ ctl_reg = PP_CONTROL;
+ lvds_reg = LVDS;
+ stat_reg = PP_STATUS;
+ }
+
+ intel_panel_disable_backlight(dev);
+
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
+ if (_intel_wait_for(dev,
+ (I915_READ(stat_reg) & PP_ON) == 0, 1000,
+ 1, "915lvo"))
+ DRM_ERROR("timed out waiting for panel to power off\n");
+
+ if (intel_lvds->pfit_control) {
+ I915_WRITE(PFIT_CONTROL, 0);
+ intel_lvds->pfit_dirty = true;
+ }
+
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
+ POSTING_READ(lvds_reg);
+}
+
+static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+
+ if (mode == DRM_MODE_DPMS_ON)
+ intel_lvds_enable(intel_lvds);
+ else
+ intel_lvds_disable(intel_lvds);
+
+ /* XXX: We never power down the LVDS pairs. */
+}
+
+static int intel_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+ struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
+
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
+
+ return MODE_OK;
+}
+
+static void
+centre_horizontally(struct drm_display_mode *mode,
+ int width)
+{
+ u32 border, sync_pos, blank_width, sync_width;
+
+ /* keep the hsync and hblank widths constant */
+ sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
+ sync_pos = (blank_width - sync_width + 1) / 2;
+
+ border = (mode->hdisplay - width + 1) / 2;
+ border += border & 1; /* make the border even */
+
+ mode->crtc_hdisplay = width;
+ mode->crtc_hblank_start = width + border;
+ mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
+
+ mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
+ mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
+
+ mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
+}
+
+static void
+centre_vertically(struct drm_display_mode *mode,
+ int height)
+{
+ u32 border, sync_pos, blank_width, sync_width;
+
+ /* keep the vsync and vblank widths constant */
+ sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
+ sync_pos = (blank_width - sync_width + 1) / 2;
+
+ border = (mode->vdisplay - height + 1) / 2;
+
+ mode->crtc_vdisplay = height;
+ mode->crtc_vblank_start = height + border;
+ mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
+
+ mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
+ mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
+
+ mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
+}
+
+static inline u32 panel_fitter_scaling(u32 source, u32 target)
+{
+ /*
+ * Floating point operation is not supported. So the FACTOR
+ * is defined, which can avoid the floating point computation
+ * when calculating the panel ratio.
+ */
+#define ACCURACY 12
+#define FACTOR (1 << ACCURACY)
+ u32 ratio = source * FACTOR / target;
+ return (FACTOR * ratio + FACTOR/2) / FACTOR;
+}
+
+static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+ struct drm_encoder *tmp_encoder;
+ u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
+ int pipe;
+
+ /* Should never happen!! */
+ if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
+ DRM_ERROR("Can't support LVDS on pipe A\n");
+ return false;
+ }
+
+ /* Should never happen!! */
+ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) {
+ if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) {
+ DRM_ERROR("Can't enable LVDS and another "
+ "encoder on the same pipe\n");
+ return false;
+ }
+ }
+
+ /*
+ * We have timings from the BIOS for the panel, put them in
+ * to the adjusted mode. The CRTC will be set up for this mode,
+ * with the panel scaling set up to source from the H/VDisplay
+ * of the original mode.
+ */
+ intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
+ mode, adjusted_mode);
+ return true;
+ }
+
+ /* Native modes don't need fitting */
+ if (adjusted_mode->hdisplay == mode->hdisplay &&
+ adjusted_mode->vdisplay == mode->vdisplay)
+ goto out;
+
+ /* 965+ wants fuzzy fitting */
+ if (INTEL_INFO(dev)->gen >= 4)
+ pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
+ PFIT_FILTER_FUZZY);
+
+ /*
+ * Enable automatic panel scaling for non-native modes so that they fill
+ * the screen. Should be enabled before the pipe is enabled, according
+ * to register description and PRM.
+ * Change the value here to see the borders for debugging
+ */
+ for_each_pipe(pipe)
+ I915_WRITE(BCLRPAT(pipe), 0);
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ switch (intel_lvds->fitting_mode) {
+ case DRM_MODE_SCALE_CENTER:
+ /*
+ * For centered modes, we have to calculate border widths &
+ * heights and modify the values programmed into the CRTC.
+ */
+ centre_horizontally(adjusted_mode, mode->hdisplay);
+ centre_vertically(adjusted_mode, mode->vdisplay);
+ border = LVDS_BORDER_ENABLE;
+ break;
+
+ case DRM_MODE_SCALE_ASPECT:
+ /* Scale but preserve the aspect ratio */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
+ u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+
+ /* 965+ is easy, it does everything in hw */
+ if (scaled_width > scaled_height)
+ pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR;
+ else if (scaled_width < scaled_height)
+ pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER;
+ else if (adjusted_mode->hdisplay != mode->hdisplay)
+ pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
+ } else {
+ u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
+ u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+ /*
+ * For earlier chips we have to calculate the scaling
+ * ratio by hand and program it into the
+ * PFIT_PGM_RATIO register
+ */
+ if (scaled_width > scaled_height) { /* pillar */
+ centre_horizontally(adjusted_mode, scaled_height / mode->vdisplay);
+
+ border = LVDS_BORDER_ENABLE;
+ if (mode->vdisplay != adjusted_mode->vdisplay) {
+ u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
+ pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+ bits << PFIT_VERT_SCALE_SHIFT);
+ pfit_control |= (PFIT_ENABLE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ } else if (scaled_width < scaled_height) { /* letter */
+ centre_vertically(adjusted_mode, scaled_width / mode->hdisplay);
+
+ border = LVDS_BORDER_ENABLE;
+ if (mode->hdisplay != adjusted_mode->hdisplay) {
+ u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
+ pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+ bits << PFIT_VERT_SCALE_SHIFT);
+ pfit_control |= (PFIT_ENABLE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ } else
+ /* Aspects match, Let hw scale both directions */
+ pfit_control |= (PFIT_ENABLE |
+ VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ break;
+
+ case DRM_MODE_SCALE_FULLSCREEN:
+ /*
+ * Full scaling, even if it changes the aspect ratio.
+ * Fortunately this is all done for us in hw.
+ */
+ if (mode->vdisplay != adjusted_mode->vdisplay ||
+ mode->hdisplay != adjusted_mode->hdisplay) {
+ pfit_control |= PFIT_ENABLE;
+ if (INTEL_INFO(dev)->gen >= 4)
+ pfit_control |= PFIT_SCALING_AUTO;
+ else
+ pfit_control |= (VERT_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_AUTO_SCALE |
+ HORIZ_INTERP_BILINEAR);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+out:
+ /* If not enabling scaling, be consistent and always use 0. */
+ if ((pfit_control & PFIT_ENABLE) == 0) {
+ pfit_control = 0;
+ pfit_pgm_ratios = 0;
+ }
+
+ /* Make sure pre-965 set dither correctly */
+ if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+ if (pfit_control != intel_lvds->pfit_control ||
+ pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
+ intel_lvds->pfit_control = pfit_control;
+ intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
+ intel_lvds->pfit_dirty = true;
+ }
+ dev_priv->lvds_border_bits = border;
+
+ /*
+ * XXX: It would be nice to support lower refresh rates on the
+ * panels to reduce power consumption, and perhaps match the
+ * user's requested refresh rate.
+ */
+
+ return true;
+}
+
+static void intel_lvds_prepare(struct drm_encoder *encoder)
+{
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+
+ /*
+ * Prior to Ironlake, we must disable the pipe if we want to adjust
+ * the panel fitter. However at all other times we can just reset
+ * the registers regardless.
+ */
+ if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty)
+ intel_lvds_disable(intel_lvds);
+}
+
+static void intel_lvds_commit(struct drm_encoder *encoder)
+{
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+
+ /* Always do a full power on as we do not know what state
+ * we were left in.
+ */
+ intel_lvds_enable(intel_lvds);
+}
+
+static void intel_lvds_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /*
+ * The LVDS pin pair will already have been turned on in the
+ * intel_crtc_mode_set since it has a large impact on the DPLL
+ * settings.
+ */
+}
+
+/**
+ * Detect the LVDS connection.
+ *
+ * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means
+ * connected and closed means disconnected. We also send hotplug events as
+ * needed, using lid status notification from the input layer.
+ */
+static enum drm_connector_status
+intel_lvds_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_device *dev = connector->dev;
+ enum drm_connector_status status;
+
+ status = intel_panel_detect(dev);
+ if (status != connector_status_unknown)
+ return status;
+
+ return connector_status_connected;
+}
+
+/**
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+static int intel_lvds_get_modes(struct drm_connector *connector)
+{
+ struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode;
+
+ if (intel_lvds->edid)
+ return drm_add_edid_modes(connector, intel_lvds->edid);
+
+ mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
+ if (mode == NULL)
+ return 0;
+
+ drm_mode_probed_add(connector, mode);
+ return 1;
+}
+
+static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident);
+ return 1;
+}
+
+/* The GPU hangs up on these systems if modeset is performed on LID open */
+static const struct dmi_system_id intel_no_modeset_on_lid[] = {
+ {
+ .callback = intel_no_modeset_on_lid_dmi_callback,
+ .ident = "Toshiba Tecra A11",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
+ },
+ },
+
+ { } /* terminating entry */
+};
+
+#ifdef NOTYET
+/*
+ * Lid events. Note the use of 'modeset_on_lid':
+ * - we set it on lid close, and reset it on open
+ * - we use it as a "only once" bit (ie we ignore
+ * duplicate events where it was already properly
+ * set/reset)
+ * - the suspend/resume paths will also set it to
+ * zero, since they restore the mode ("lid open").
+ */
+static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
+ void *unused)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(nb, struct drm_i915_private, lid_notifier);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_connector *connector = dev_priv->int_lvds_connector;
+
+ if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+ return NOTIFY_OK;
+
+ /*
+ * check and update the status of LVDS connector after receiving
+ * the LID nofication event.
+ */
+ if (connector)
+ connector->status = connector->funcs->detect(connector,
+ false);
+
+ /* Don't force modeset on machines where it causes a GPU lockup */
+ if (dmi_check_system(intel_no_modeset_on_lid))
+ return NOTIFY_OK;
+ if (!acpi_lid_open()) {
+ dev_priv->modeset_on_lid = 1;
+ return NOTIFY_OK;
+ }
+
+ if (!dev_priv->modeset_on_lid)
+ return NOTIFY_OK;
+
+ dev_priv->modeset_on_lid = 0;
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_helper_resume_force_mode(dev);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return NOTIFY_OK;
+}
+#endif
+
+/**
+ * intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+ *
+ * Unregister the DDC bus for this connector then free the driver private
+ * structure.
+ */
+static void intel_lvds_destroy(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+#if 0
+ struct drm_i915_private *dev_priv = dev->dev_private;
+#endif
+
+ intel_panel_destroy_backlight(dev);
+
+#if 0
+ if (dev_priv->lid_notifier.notifier_call)
+ acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
+#endif
+#if 0
+ drm_sysfs_connector_remove(connector);
+#endif
+ drm_connector_cleanup(connector);
+ free(connector, DRM_MEM_KMS);
+}
+
+static int intel_lvds_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+ struct drm_device *dev = connector->dev;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ struct drm_crtc *crtc = intel_lvds->base.base.crtc;
+
+ if (value == DRM_MODE_SCALE_NONE) {
+ DRM_DEBUG_KMS("no scaling not supported\n");
+ return -EINVAL;
+ }
+
+ if (intel_lvds->fitting_mode == value) {
+ /* the LVDS scaling property is not changed */
+ return 0;
+ }
+ intel_lvds->fitting_mode = value;
+ if (crtc && crtc->enabled) {
+ /*
+ * If the CRTC is enabled, the display will be changed
+ * according to the new panel fitting mode.
+ */
+ drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
+ }
+ }
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
+ .dpms = intel_lvds_dpms,
+ .mode_fixup = intel_lvds_mode_fixup,
+ .prepare = intel_lvds_prepare,
+ .mode_set = intel_lvds_mode_set,
+ .commit = intel_lvds_commit,
+};
+
+static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
+ .get_modes = intel_lvds_get_modes,
+ .mode_valid = intel_lvds_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_connector_funcs intel_lvds_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = intel_lvds_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_lvds_set_property,
+ .destroy = intel_lvds_destroy,
+};
+
+static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident);
+ return 1;
+}
+
+/* These systems claim to have LVDS, but really don't */
+static const struct dmi_system_id intel_no_lvds[] = {
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Apple Mac Mini (Core series)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Apple Mac Mini (Core 2 series)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "MSI IM-945GSE-A",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "A9830IMS"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Dell Studio Hybrid",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Dell OptiPlex FX170",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex FX170"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen Mini PC",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i965GMx-IF"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen Mini PC MP915",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i915GMx-F"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen i915GMm-HFS",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen i45GMx-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Aopen i945GTt-VFA",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Clientron U800",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Clientron E830",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Asus EeeBox PC EB1007",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Asus AT5NM10T-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard t5745",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard st5747",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "MSI Wind Box DC500",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
+ },
+ },
+
+ { } /* terminating entry */
+};
+
+/**
+ * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
+ * @dev: drm device
+ * @connector: LVDS connector
+ *
+ * Find the reduced downclock for LVDS in EDID.
+ */
+static void intel_find_lvds_downclock(struct drm_device *dev,
+ struct drm_display_mode *fixed_mode,
+ struct drm_connector *connector)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *scan;
+ int temp_downclock;
+
+ temp_downclock = fixed_mode->clock;
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ /*
+ * If one mode has the same resolution with the fixed_panel
+ * mode while they have the different refresh rate, it means
+ * that the reduced downclock is found for the LVDS. In such
+ * case we can set the different FPx0/1 to dynamically select
+ * between low and high frequency.
+ */
+ if (scan->hdisplay == fixed_mode->hdisplay &&
+ scan->hsync_start == fixed_mode->hsync_start &&
+ scan->hsync_end == fixed_mode->hsync_end &&
+ scan->htotal == fixed_mode->htotal &&
+ scan->vdisplay == fixed_mode->vdisplay &&
+ scan->vsync_start == fixed_mode->vsync_start &&
+ scan->vsync_end == fixed_mode->vsync_end &&
+ scan->vtotal == fixed_mode->vtotal) {
+ if (scan->clock < temp_downclock) {
+ /*
+ * The downclock is already found. But we
+ * expect to find the lower downclock.
+ */
+ temp_downclock = scan->clock;
+ }
+ }
+ }
+ if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
+ /* We found the downclock for LVDS. */
+ dev_priv->lvds_downclock_avail = 1;
+ dev_priv->lvds_downclock = temp_downclock;
+ DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
+ "Normal clock %dKhz, downclock %dKhz\n",
+ fixed_mode->clock, temp_downclock);
+ }
+}
+
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the LVDS is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the LVDS is present.
+ */
+static bool lvds_is_present_in_vbt(struct drm_device *dev,
+ u8 *i2c_pin)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ if (!dev_priv->child_dev_num)
+ return true;
+
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ struct child_device_config *child = dev_priv->child_dev + i;
+
+ /* If the device type is not LFP, continue.
+ * We have to check both the new identifiers as well as the
+ * old for compatibility with some BIOSes.
+ */
+ if (child->device_type != DEVICE_TYPE_INT_LFP &&
+ child->device_type != DEVICE_TYPE_LFP)
+ continue;
+
+ if (child->i2c_pin)
+ *i2c_pin = child->i2c_pin;
+
+ /* However, we cannot trust the BIOS writers to populate
+ * the VBT correctly. Since LVDS requires additional
+ * information from AIM blocks, a non-zero addin offset is
+ * a good indicator that the LVDS is actually present.
+ */
+ if (child->addin_offset)
+ return true;
+
+ /* But even then some BIOS writers perform some black magic
+ * and instantiate the device without reference to any
+ * additional data. Trust that if the VBT was written into
+ * the OpRegion then they have validated the LVDS's existence.
+ */
+ if (dev_priv->opregion.vbt)
+ return true;
+ }
+
+ return false;
+}
+
+static bool intel_lvds_supported(struct drm_device *dev)
+{
+ /* With the introduction of the PCH we gained a dedicated
+ * LVDS presence pin, use it. */
+ if (HAS_PCH_SPLIT(dev))
+ return true;
+
+ /* Otherwise LVDS was only attached to mobile products,
+ * except for the inglorious 830gm */
+ return IS_MOBILE(dev) && !IS_I830(dev);
+}
+
+/**
+ * intel_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+bool intel_lvds_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_lvds *intel_lvds;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
+ struct drm_crtc *crtc;
+ u32 lvds;
+ int pipe;
+ u8 pin;
+
+ if (!intel_lvds_supported(dev))
+ return false;
+
+ /* Skip init on machines we know falsely report LVDS */
+ if (dmi_check_system(intel_no_lvds))
+ return false;
+
+ pin = GMBUS_PORT_PANEL;
+ if (!lvds_is_present_in_vbt(dev, &pin)) {
+ DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+ return false;
+ }
+
+ if (HAS_PCH_SPLIT(dev)) {
+ if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
+ return false;
+ if (dev_priv->edp.support) {
+ DRM_DEBUG_KMS("disable LVDS for eDP support\n");
+ return false;
+ }
+ }
+
+ intel_lvds = malloc(sizeof(struct intel_lvds), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+ intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+
+ if (!HAS_PCH_SPLIT(dev)) {
+ intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
+ }
+
+ intel_encoder = &intel_lvds->base;
+ encoder = &intel_encoder->base;
+ connector = &intel_connector->base;
+ drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
+ DRM_MODE_ENCODER_LVDS);
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+ intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
+ if (HAS_PCH_SPLIT(dev))
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ else
+ intel_encoder->crtc_mask = (1 << 1);
+
+ drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
+ drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ /* create the scaling mode property */
+ drm_mode_create_scaling_mode_property(dev);
+ /*
+ * the initial panel fitting mode will be FULL_SCREEN.
+ */
+
+ drm_connector_attach_property(&intel_connector->base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_ASPECT);
+ intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;
+ /*
+ * LVDS discovery:
+ * 1) check for EDID on DDC
+ * 2) check for VBT data
+ * 3) check to see if LVDS is already on
+ * if none of the above, no panel
+ * 4) make sure lid is open
+ * if closed, act like it's not there for now
+ */
+
+ /*
+ * Attempt to get the fixed panel mode from DDC. Assume that the
+ * preferred mode is the right one.
+ */
+ intel_lvds->edid = drm_get_edid(connector, dev_priv->gmbus[pin]);
+ if (intel_lvds->edid) {
+ if (drm_add_edid_modes(connector,
+ intel_lvds->edid)) {
+ drm_mode_connector_update_edid_property(connector,
+ intel_lvds->edid);
+ } else {
+ free(intel_lvds->edid, DRM_MEM_KMS);
+ intel_lvds->edid = NULL;
+ }
+ }
+ if (!intel_lvds->edid) {
+ /* Didn't get an EDID, so
+ * Set wide sync ranges so we get all modes
+ * handed to valid_mode for checking
+ */
+ connector->display_info.min_vfreq = 0;
+ connector->display_info.max_vfreq = 200;
+ connector->display_info.min_hfreq = 0;
+ connector->display_info.max_hfreq = 200;
+ }
+
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ intel_lvds->fixed_mode =
+ drm_mode_duplicate(dev, scan);
+ intel_find_lvds_downclock(dev,
+ intel_lvds->fixed_mode,
+ connector);
+ goto out;
+ }
+ }
+
+ /* Failed to get EDID, what about VBT? */
+ if (dev_priv->lfp_lvds_vbt_mode) {
+ intel_lvds->fixed_mode =
+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (intel_lvds->fixed_mode) {
+ intel_lvds->fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ goto out;
+ }
+ }
+
+ /*
+ * If we didn't get EDID, try checking if the panel is already turned
+ * on. If so, assume that whatever is currently programmed is the
+ * correct mode.
+ */
+
+ /* Ironlake: FIXME if still fail, not try pipe mode now */
+ if (HAS_PCH_SPLIT(dev))
+ goto failed;
+
+ lvds = I915_READ(LVDS);
+ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
+
+ if (crtc && (lvds & LVDS_PORT_EN)) {
+ intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
+ if (intel_lvds->fixed_mode) {
+ intel_lvds->fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ goto out;
+ }
+ }
+
+ /* If we still don't have a mode after all that, give up. */
+ if (!intel_lvds->fixed_mode)
+ goto failed;
+
+out:
+ if (HAS_PCH_SPLIT(dev)) {
+ u32 pwm;
+
+ pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0;
+
+ /* make sure PWM is enabled and locked to the LVDS pipe */
+ pwm = I915_READ(BLC_PWM_CPU_CTL2);
+ if (pipe == 0 && (pwm & PWM_PIPE_B))
+ I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE);
+ if (pipe)
+ pwm |= PWM_PIPE_B;
+ else
+ pwm &= ~PWM_PIPE_B;
+ I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE);
+
+ pwm = I915_READ(BLC_PWM_PCH_CTL1);
+ pwm |= PWM_PCH_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
+ /*
+ * Unlock registers and just
+ * leave them unlocked
+ */
+ I915_WRITE(PCH_PP_CONTROL,
+ I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
+ } else {
+ /*
+ * Unlock registers and just
+ * leave them unlocked
+ */
+ I915_WRITE(PP_CONTROL,
+ I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
+ }
+#ifdef NOTYET
+ dev_priv->lid_notifier.notifier_call = intel_lid_notify;
+ if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
+ DRM_DEBUG_KMS("lid notifier registration failed\n");
+ dev_priv->lid_notifier.notifier_call = NULL;
+ }
+#endif
+ /* keep the LVDS connector */
+ dev_priv->int_lvds_connector = connector;
+#if 0
+ drm_sysfs_connector_add(connector);
+#endif
+ intel_panel_setup_backlight(dev);
+ return true;
+
+failed:
+ DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
+ drm_connector_cleanup(connector);
+ drm_encoder_cleanup(encoder);
+ free(intel_lvds, DRM_MEM_KMS);
+ free(intel_connector, DRM_MEM_KMS);
+ return false;
+}
diff --git a/sys/dev/drm2/i915/intel_modes.c b/sys/dev/drm2/i915/intel_modes.c
new file mode 100644
index 0000000..8bf1d59
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_modes.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007, 2010 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/intel_drv.h>
+#include <dev/drm2/drm_edid.h>
+#include <dev/iicbus/iiconf.h>
+
+/**
+ * intel_ddc_probe
+ *
+ */
+bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
+{
+ struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
+ u8 out_buf[] = { 0x0, 0x0};
+ u8 buf[2];
+ struct iic_msg msgs[] = {
+ {
+ .slave = DDC_ADDR,
+ .flags = IIC_M_WR,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .slave = DDC_ADDR,
+ .flags = IIC_M_RD,
+ .len = 1,
+ .buf = buf,
+ }
+ };
+
+ return (iicbus_transfer(dev_priv->gmbus[ddc_bus], msgs, 2)
+ == 0/* XXXKIB 2*/);
+}
+
+/**
+ * intel_ddc_get_modes - get modelist from monitor
+ * @connector: DRM connector device to use
+ * @adapter: i2c adapter
+ *
+ * Fetch the EDID information from @connector using the DDC bus.
+ */
+int
+intel_ddc_get_modes(struct drm_connector *connector, device_t adapter)
+{
+ struct edid *edid;
+ int ret = 0;
+
+ edid = drm_get_edid(connector, adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
+ connector->display_info.raw_edid = NULL;
+ free(edid, DRM_MEM_KMS);
+ }
+
+ return ret;
+}
+
+static const struct drm_prop_enum_list force_audio_names[] = {
+ { HDMI_AUDIO_OFF_DVI, "force-dvi" },
+ { HDMI_AUDIO_OFF, "off" },
+ { HDMI_AUDIO_AUTO, "auto" },
+ { HDMI_AUDIO_ON, "on" },
+};
+
+void
+intel_attach_force_audio_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+
+ prop = dev_priv->force_audio_property;
+ if (prop == NULL) {
+ prop = drm_property_create_enum(dev, 0,
+ "audio",
+ force_audio_names,
+ DRM_ARRAY_SIZE(force_audio_names));
+ if (prop == NULL)
+ return;
+
+ dev_priv->force_audio_property = prop;
+ }
+ drm_connector_attach_property(connector, prop, 0);
+}
+
+static const struct drm_prop_enum_list broadcast_rgb_names[] = {
+ { 0, "Full" },
+ { 1, "Limited 16:235" },
+};
+
+void
+intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+
+ prop = dev_priv->broadcast_rgb_property;
+ if (prop == NULL) {
+ prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
+ "Broadcast RGB",
+ broadcast_rgb_names,
+ DRM_ARRAY_SIZE(broadcast_rgb_names));
+ if (prop == NULL)
+ return;
+
+ dev_priv->broadcast_rgb_property = prop;
+ }
+
+ drm_connector_attach_property(connector, prop, 0);
+}
diff --git a/sys/dev/drm2/i915/intel_opregion.c b/sys/dev/drm2/i915/intel_opregion.c
new file mode 100644
index 0000000..8229c30
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_opregion.c
@@ -0,0 +1,550 @@
+/*
+ * Copyright 2008 Intel Corporation <hong.liu@intel.com>
+ * Copyright 2008 Red Hat <mjg@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/intel_drv.h>
+
+#define PCI_ASLE 0xe4
+#define PCI_ASLS 0xfc
+
+#define OPREGION_HEADER_OFFSET 0
+#define OPREGION_ACPI_OFFSET 0x100
+#define ACPI_CLID 0x01ac /* current lid state indicator */
+#define ACPI_CDCK 0x01b0 /* current docking state indicator */
+#define OPREGION_SWSCI_OFFSET 0x200
+#define OPREGION_ASLE_OFFSET 0x300
+#define OPREGION_VBT_OFFSET 0x400
+
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+#define MBOX_ACPI (1<<0)
+#define MBOX_SWSCI (1<<1)
+#define MBOX_ASLE (1<<2)
+
+struct opregion_header {
+ u8 signature[16];
+ u32 size;
+ u32 opregion_ver;
+ u8 bios_ver[32];
+ u8 vbios_ver[16];
+ u8 driver_ver[16];
+ u32 mboxes;
+ u8 reserved[164];
+} __attribute__((packed));
+
+/* OpRegion mailbox #1: public ACPI methods */
+struct opregion_acpi {
+ u32 drdy; /* driver readiness */
+ u32 csts; /* notification status */
+ u32 cevt; /* current event */
+ u8 rsvd1[20];
+ u32 didl[8]; /* supported display devices ID list */
+ u32 cpdl[8]; /* currently presented display list */
+ u32 cadl[8]; /* currently active display list */
+ u32 nadl[8]; /* next active devices list */
+ u32 aslp; /* ASL sleep time-out */
+ u32 tidx; /* toggle table index */
+ u32 chpd; /* current hotplug enable indicator */
+ u32 clid; /* current lid state*/
+ u32 cdck; /* current docking state */
+ u32 sxsw; /* Sx state resume */
+ u32 evts; /* ASL supported events */
+ u32 cnot; /* current OS notification */
+ u32 nrdy; /* driver status */
+ u8 rsvd2[60];
+} __attribute__((packed));
+
+/* OpRegion mailbox #2: SWSCI */
+struct opregion_swsci {
+ u32 scic; /* SWSCI command|status|data */
+ u32 parm; /* command parameters */
+ u32 dslp; /* driver sleep time-out */
+ u8 rsvd[244];
+} __attribute__((packed));
+
+/* OpRegion mailbox #3: ASLE */
+struct opregion_asle {
+ u32 ardy; /* driver readiness */
+ u32 aslc; /* ASLE interrupt command */
+ u32 tche; /* technology enabled indicator */
+ u32 alsi; /* current ALS illuminance reading */
+ u32 bclp; /* backlight brightness to set */
+ u32 pfit; /* panel fitting state */
+ u32 cblv; /* current brightness level */
+ u16 bclm[20]; /* backlight level duty cycle mapping table */
+ u32 cpfm; /* current panel fitting mode */
+ u32 epfm; /* enabled panel fitting modes */
+ u8 plut[74]; /* panel LUT and identifier */
+ u32 pfmb; /* PWM freq and min brightness */
+ u8 rsvd[102];
+} __attribute__((packed));
+
+/* ASLE irq request bits */
+#define ASLE_SET_ALS_ILLUM (1 << 0)
+#define ASLE_SET_BACKLIGHT (1 << 1)
+#define ASLE_SET_PFIT (1 << 2)
+#define ASLE_SET_PWM_FREQ (1 << 3)
+#define ASLE_REQ_MSK 0xf
+
+/* response bits of ASLE irq request */
+#define ASLE_ALS_ILLUM_FAILED (1<<10)
+#define ASLE_BACKLIGHT_FAILED (1<<12)
+#define ASLE_PFIT_FAILED (1<<14)
+#define ASLE_PWM_FREQ_FAILED (1<<16)
+
+/* ASLE backlight brightness to set */
+#define ASLE_BCLP_VALID (1<<31)
+#define ASLE_BCLP_MSK (~(1<<31))
+
+/* ASLE panel fitting request */
+#define ASLE_PFIT_VALID (1<<31)
+#define ASLE_PFIT_CENTER (1<<0)
+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
+#define ASLE_PFIT_STRETCH_GFX (1<<2)
+
+/* PWM frequency and minimum brightness */
+#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
+#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
+#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
+#define ASLE_PFMB_PWM_VALID (1<<31)
+
+#define ASLE_CBLV_VALID (1<<31)
+
+#define ACPI_OTHER_OUTPUT (0<<8)
+#define ACPI_VGA_OUTPUT (1<<8)
+#define ACPI_TV_OUTPUT (2<<8)
+#define ACPI_DIGITAL_OUTPUT (3<<8)
+#define ACPI_LVDS_OUTPUT (4<<8)
+
+#ifdef CONFIG_ACPI
+static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+ u32 max;
+
+ if (!(bclp & ASLE_BCLP_VALID))
+ return ASLE_BACKLIGHT_FAILED;
+
+ bclp &= ASLE_BCLP_MSK;
+ if (bclp > 255)
+ return ASLE_BACKLIGHT_FAILED;
+
+ max = intel_panel_get_max_backlight(dev);
+ intel_panel_set_backlight(dev, bclp * max / 255);
+ asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
+
+ return 0;
+}
+
+static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
+{
+ /* alsi is the current ALS reading in lux. 0 indicates below sensor
+ range, 0xffff indicates above sensor range. 1-0xfffe are valid */
+ return 0;
+}
+
+static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (pfmb & ASLE_PFMB_PWM_VALID) {
+ u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
+ u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
+ blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
+ pwm = pwm >> 9;
+ /* FIXME - what do we do with the PWM? */
+ }
+ return 0;
+}
+
+static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
+{
+ /* Panel fitting is currently controlled by the X code, so this is a
+ noop until modesetting support works fully */
+ if (!(pfit & ASLE_PFIT_VALID))
+ return ASLE_PFIT_FAILED;
+ return 0;
+}
+
+void intel_opregion_asle_intr(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+ u32 asle_stat = 0;
+ u32 asle_req;
+
+ if (!asle)
+ return;
+
+ asle_req = asle->aslc & ASLE_REQ_MSK;
+
+ if (!asle_req) {
+ DRM_DEBUG("non asle set request??\n");
+ return;
+ }
+
+ if (asle_req & ASLE_SET_ALS_ILLUM)
+ asle_stat |= asle_set_als_illum(dev, asle->alsi);
+
+ if (asle_req & ASLE_SET_BACKLIGHT)
+ asle_stat |= asle_set_backlight(dev, asle->bclp);
+
+ if (asle_req & ASLE_SET_PFIT)
+ asle_stat |= asle_set_pfit(dev, asle->pfit);
+
+ if (asle_req & ASLE_SET_PWM_FREQ)
+ asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
+
+ asle->aslc = asle_stat;
+}
+
+void intel_opregion_gse_intr(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+ u32 asle_stat = 0;
+ u32 asle_req;
+
+ if (!asle)
+ return;
+
+ asle_req = asle->aslc & ASLE_REQ_MSK;
+
+ if (!asle_req) {
+ DRM_DEBUG("non asle set request??\n");
+ return;
+ }
+
+ if (asle_req & ASLE_SET_ALS_ILLUM) {
+ DRM_DEBUG("Illum is not supported\n");
+ asle_stat |= ASLE_ALS_ILLUM_FAILED;
+ }
+
+ if (asle_req & ASLE_SET_BACKLIGHT)
+ asle_stat |= asle_set_backlight(dev, asle->bclp);
+
+ if (asle_req & ASLE_SET_PFIT) {
+ DRM_DEBUG("Pfit is not supported\n");
+ asle_stat |= ASLE_PFIT_FAILED;
+ }
+
+ if (asle_req & ASLE_SET_PWM_FREQ) {
+ DRM_DEBUG("PWM freq is not supported\n");
+ asle_stat |= ASLE_PWM_FREQ_FAILED;
+ }
+
+ asle->aslc = asle_stat;
+}
+#define ASLE_ALS_EN (1<<0)
+#define ASLE_BLC_EN (1<<1)
+#define ASLE_PFIT_EN (1<<2)
+#define ASLE_PFMB_EN (1<<3)
+
+void intel_opregion_enable_asle(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+
+ if (asle) {
+ if (IS_MOBILE(dev))
+ intel_enable_asle(dev);
+
+ asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
+ ASLE_PFMB_EN;
+ asle->ardy = 1;
+ }
+}
+
+#define ACPI_EV_DISPLAY_SWITCH (1<<0)
+#define ACPI_EV_LID (1<<1)
+#define ACPI_EV_DOCK (1<<2)
+
+static struct intel_opregion *system_opregion;
+
+static int intel_opregion_video_event(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ /* The only video events relevant to opregion are 0x80. These indicate
+ either a docking event, lid switch or display switch request. In
+ Linux, these are handled by the dock, button and video drivers.
+ */
+ struct opregion_acpi *acpi;
+ struct acpi_bus_event *event = data;
+ int ret = NOTIFY_OK;
+
+ if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
+ return NOTIFY_DONE;
+
+ if (!system_opregion)
+ return NOTIFY_DONE;
+
+ acpi = system_opregion->acpi;
+
+ if (event->type == 0x80 && !(acpi->cevt & 0x1))
+ ret = NOTIFY_BAD;
+
+ acpi->csts = 0;
+
+ return ret;
+}
+
+static struct notifier_block intel_opregion_notifier = {
+ .notifier_call = intel_opregion_video_event,
+};
+
+/*
+ * Initialise the DIDL field in opregion. This passes a list of devices to
+ * the firmware. Values are defined by section B.4.2 of the ACPI specification
+ * (version 3)
+ */
+
+static void intel_didl_outputs(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ struct drm_connector *connector;
+ acpi_handle handle;
+ struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
+ unsigned long long device_id;
+ acpi_status status;
+ int i = 0;
+
+ handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
+ return;
+
+ if (acpi_is_video_device(acpi_dev))
+ acpi_video_bus = acpi_dev;
+ else {
+ list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
+ if (acpi_is_video_device(acpi_cdev)) {
+ acpi_video_bus = acpi_cdev;
+ break;
+ }
+ }
+ }
+
+ if (!acpi_video_bus) {
+ printk(KERN_WARNING "No ACPI video bus found\n");
+ return;
+ }
+
+ list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
+ if (i >= 8) {
+ dev_printk(KERN_ERR, &dev->pdev->dev,
+ "More than 8 outputs detected\n");
+ return;
+ }
+ status =
+ acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
+ NULL, &device_id);
+ if (ACPI_SUCCESS(status)) {
+ if (!device_id)
+ goto blind_set;
+ opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
+ i++;
+ }
+ }
+
+end:
+ /* If fewer than 8 outputs, the list must be null terminated */
+ if (i < 8)
+ opregion->acpi->didl[i] = 0;
+ return;
+
+blind_set:
+ i = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ int output_type = ACPI_OTHER_OUTPUT;
+ if (i >= 8) {
+ device_printf(dev->device,
+ "More than 8 outputs detected\n");
+ return;
+ }
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_VGA:
+ case DRM_MODE_CONNECTOR_DVIA:
+ output_type = ACPI_VGA_OUTPUT;
+ break;
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_Component:
+ case DRM_MODE_CONNECTOR_9PinDIN:
+ output_type = ACPI_TV_OUTPUT;
+ break;
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ output_type = ACPI_DIGITAL_OUTPUT;
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ output_type = ACPI_LVDS_OUTPUT;
+ break;
+ }
+ opregion->acpi->didl[i] |= (1<<31) | output_type | i;
+ i++;
+ }
+ goto end;
+}
+
+void intel_opregion_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ intel_didl_outputs(dev);
+
+ /* Notify BIOS we are ready to handle ACPI video ext notifs.
+ * Right now, all the events are handled by the ACPI video module.
+ * We don't actually need to do anything with them. */
+ opregion->acpi->csts = 0;
+ opregion->acpi->drdy = 1;
+
+ system_opregion = opregion;
+ register_acpi_notifier(&intel_opregion_notifier);
+ }
+
+ if (opregion->asle)
+ intel_opregion_enable_asle(dev);
+}
+
+void intel_opregion_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ opregion->acpi->drdy = 0;
+
+ system_opregion = NULL;
+ unregister_acpi_notifier(&intel_opregion_notifier);
+ }
+
+ /* just clear all opregion memory pointers now */
+ iounmap(opregion->header);
+ opregion->header = NULL;
+ opregion->acpi = NULL;
+ opregion->swsci = NULL;
+ opregion->asle = NULL;
+ opregion->vbt = NULL;
+}
+#else
+int
+intel_opregion_init(struct drm_device *dev)
+{
+
+ return (0);
+}
+
+void
+intel_opregion_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv;
+ struct intel_opregion *opregion;
+
+ dev_priv = dev->dev_private;
+ opregion = &dev_priv->opregion;
+
+ if (opregion->header == NULL)
+ return;
+
+ pmap_unmapdev((vm_offset_t)opregion->header, OPREGION_SIZE);
+ opregion->header = NULL;
+ opregion->acpi = NULL;
+ opregion->swsci = NULL;
+ opregion->asle = NULL;
+ opregion->vbt = NULL;
+}
+#endif
+
+int intel_opregion_setup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ char *base;
+ u32 asls, mboxes;
+ int err = 0;
+
+ asls = pci_read_config(dev->device, PCI_ASLS, 4);
+ DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
+ if (asls == 0) {
+ DRM_DEBUG("ACPI OpRegion not supported!\n");
+ return -ENOTSUP;
+ }
+
+ base = (void *)pmap_mapbios(asls, OPREGION_SIZE);
+ if (!base)
+ return -ENOMEM;
+
+ if (memcmp(base, OPREGION_SIGNATURE, 16)) {
+ DRM_DEBUG("opregion signature mismatch\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+ opregion->header = (struct opregion_header *)base;
+ opregion->vbt = base + OPREGION_VBT_OFFSET;
+
+ opregion->lid_state = (u32 *)(base + ACPI_CLID);
+
+ mboxes = opregion->header->mboxes;
+ if (mboxes & MBOX_ACPI) {
+ DRM_DEBUG("Public ACPI methods supported\n");
+ opregion->acpi = (struct opregion_acpi *)(base +
+ OPREGION_ACPI_OFFSET);
+ }
+
+ if (mboxes & MBOX_SWSCI) {
+ DRM_DEBUG("SWSCI supported\n");
+ opregion->swsci = (struct opregion_swsci *)(base +
+ OPREGION_SWSCI_OFFSET);
+ }
+ if (mboxes & MBOX_ASLE) {
+ DRM_DEBUG("ASLE supported\n");
+ opregion->asle = (struct opregion_asle *)(base +
+ OPREGION_ASLE_OFFSET);
+ }
+
+ return 0;
+
+err_out:
+ pmap_unmapdev((vm_offset_t)base, OPREGION_SIZE);
+ return err;
+}
diff --git a/sys/dev/drm2/i915/intel_overlay.c b/sys/dev/drm2/i915/intel_overlay.c
new file mode 100644
index 0000000..34f2c39
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_overlay.c
@@ -0,0 +1,1582 @@
+/*
+ * Copyright © 2009
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Daniel Vetter <daniel@ffwll.ch>
+ *
+ * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/i915_reg.h>
+#include <dev/drm2/i915/intel_drv.h>
+
+/* Limits for overlay size. According to intel doc, the real limits are:
+ * Y width: 4095, UV width (planar): 2047, Y height: 2047,
+ * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
+ * the mininum of both. */
+#define IMAGE_MAX_WIDTH 2048
+#define IMAGE_MAX_HEIGHT 2046 /* 2 * 1023 */
+/* on 830 and 845 these large limits result in the card hanging */
+#define IMAGE_MAX_WIDTH_LEGACY 1024
+#define IMAGE_MAX_HEIGHT_LEGACY 1088
+
+/* overlay register definitions */
+/* OCMD register */
+#define OCMD_TILED_SURFACE (0x1<<19)
+#define OCMD_MIRROR_MASK (0x3<<17)
+#define OCMD_MIRROR_MODE (0x3<<17)
+#define OCMD_MIRROR_HORIZONTAL (0x1<<17)
+#define OCMD_MIRROR_VERTICAL (0x2<<17)
+#define OCMD_MIRROR_BOTH (0x3<<17)
+#define OCMD_BYTEORDER_MASK (0x3<<14) /* zero for YUYV or FOURCC YUY2 */
+#define OCMD_UV_SWAP (0x1<<14) /* YVYU */
+#define OCMD_Y_SWAP (0x2<<14) /* UYVY or FOURCC UYVY */
+#define OCMD_Y_AND_UV_SWAP (0x3<<14) /* VYUY */
+#define OCMD_SOURCE_FORMAT_MASK (0xf<<10)
+#define OCMD_RGB_888 (0x1<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_555 (0x2<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_565 (0x3<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_422_PACKED (0x8<<10)
+#define OCMD_YUV_411_PACKED (0x9<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_420_PLANAR (0xc<<10)
+#define OCMD_YUV_422_PLANAR (0xd<<10)
+#define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */
+#define OCMD_TVSYNCFLIP_PARITY (0x1<<9)
+#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7)
+#define OCMD_BUF_TYPE_MASK (0x1<<5)
+#define OCMD_BUF_TYPE_FRAME (0x0<<5)
+#define OCMD_BUF_TYPE_FIELD (0x1<<5)
+#define OCMD_TEST_MODE (0x1<<4)
+#define OCMD_BUFFER_SELECT (0x3<<2)
+#define OCMD_BUFFER0 (0x0<<2)
+#define OCMD_BUFFER1 (0x1<<2)
+#define OCMD_FIELD_SELECT (0x1<<2)
+#define OCMD_FIELD0 (0x0<<1)
+#define OCMD_FIELD1 (0x1<<1)
+#define OCMD_ENABLE (0x1<<0)
+
+/* OCONFIG register */
+#define OCONF_PIPE_MASK (0x1<<18)
+#define OCONF_PIPE_A (0x0<<18)
+#define OCONF_PIPE_B (0x1<<18)
+#define OCONF_GAMMA2_ENABLE (0x1<<16)
+#define OCONF_CSC_MODE_BT601 (0x0<<5)
+#define OCONF_CSC_MODE_BT709 (0x1<<5)
+#define OCONF_CSC_BYPASS (0x1<<4)
+#define OCONF_CC_OUT_8BIT (0x1<<3)
+#define OCONF_TEST_MODE (0x1<<2)
+#define OCONF_THREE_LINE_BUFFER (0x1<<0)
+#define OCONF_TWO_LINE_BUFFER (0x0<<0)
+
+/* DCLRKM (dst-key) register */
+#define DST_KEY_ENABLE (0x1<<31)
+#define CLK_RGB24_MASK 0x0
+#define CLK_RGB16_MASK 0x070307
+#define CLK_RGB15_MASK 0x070707
+#define CLK_RGB8I_MASK 0xffffff
+
+#define RGB16_TO_COLORKEY(c) \
+ (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
+#define RGB15_TO_COLORKEY(c) \
+ (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
+
+/* overlay flip addr flag */
+#define OFC_UPDATE 0x1
+
+/* polyphase filter coefficients */
+#define N_HORIZ_Y_TAPS 5
+#define N_VERT_Y_TAPS 3
+#define N_HORIZ_UV_TAPS 3
+#define N_VERT_UV_TAPS 3
+#define N_PHASES 17
+#define MAX_TAPS 5
+
+/* memory bufferd overlay registers */
+struct overlay_registers {
+ u32 OBUF_0Y;
+ u32 OBUF_1Y;
+ u32 OBUF_0U;
+ u32 OBUF_0V;
+ u32 OBUF_1U;
+ u32 OBUF_1V;
+ u32 OSTRIDE;
+ u32 YRGB_VPH;
+ u32 UV_VPH;
+ u32 HORZ_PH;
+ u32 INIT_PHS;
+ u32 DWINPOS;
+ u32 DWINSZ;
+ u32 SWIDTH;
+ u32 SWIDTHSW;
+ u32 SHEIGHT;
+ u32 YRGBSCALE;
+ u32 UVSCALE;
+ u32 OCLRC0;
+ u32 OCLRC1;
+ u32 DCLRKV;
+ u32 DCLRKM;
+ u32 SCLRKVH;
+ u32 SCLRKVL;
+ u32 SCLRKEN;
+ u32 OCONFIG;
+ u32 OCMD;
+ u32 RESERVED1; /* 0x6C */
+ u32 OSTART_0Y;
+ u32 OSTART_1Y;
+ u32 OSTART_0U;
+ u32 OSTART_0V;
+ u32 OSTART_1U;
+ u32 OSTART_1V;
+ u32 OTILEOFF_0Y;
+ u32 OTILEOFF_1Y;
+ u32 OTILEOFF_0U;
+ u32 OTILEOFF_0V;
+ u32 OTILEOFF_1U;
+ u32 OTILEOFF_1V;
+ u32 FASTHSCALE; /* 0xA0 */
+ u32 UVSCALEV; /* 0xA4 */
+ u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
+ u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
+ u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
+ u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
+ u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
+ u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
+ u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
+ u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
+ u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
+};
+
+struct intel_overlay {
+ struct drm_device *dev;
+ struct intel_crtc *crtc;
+ struct drm_i915_gem_object *vid_bo;
+ struct drm_i915_gem_object *old_vid_bo;
+ int active;
+ int pfit_active;
+ u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
+ u32 color_key;
+ u32 brightness, contrast, saturation;
+ u32 old_xscale, old_yscale;
+ /* register access */
+ u32 flip_addr;
+ struct drm_i915_gem_object *reg_bo;
+ /* flip handling */
+ uint32_t last_flip_req;
+ void (*flip_tail)(struct intel_overlay *);
+};
+
+static struct overlay_registers *
+intel_overlay_map_regs(struct intel_overlay *overlay)
+{
+ struct overlay_registers *regs;
+
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) {
+ regs = overlay->reg_bo->phys_obj->handle->vaddr;
+ } else {
+ regs = pmap_mapdev_attr(overlay->dev->agp->base +
+ overlay->reg_bo->gtt_offset, PAGE_SIZE,
+ PAT_WRITE_COMBINING);
+ }
+ return (regs);
+}
+
+static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ pmap_unmapdev((vm_offset_t)regs, PAGE_SIZE);
+}
+
+static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+ struct drm_i915_gem_request *request,
+ void (*tail)(struct intel_overlay *))
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ KASSERT(!overlay->last_flip_req, ("Overlay already has flip req"));
+ ret = i915_add_request(LP_RING(dev_priv), NULL, request);
+ if (ret) {
+ free(request, DRM_I915_GEM);
+ return ret;
+ }
+ overlay->last_flip_req = request->seqno;
+ overlay->flip_tail = tail;
+ ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
+ true);
+ if (ret)
+ return ret;
+
+ overlay->last_flip_req = 0;
+ return 0;
+}
+
+/* Workaround for i830 bug where pipe a must be enable to change control regs */
+static int
+i830_activate_pipe_a(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ struct drm_display_mode vesa_640x480 = {
+ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 489, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
+ }, *mode;
+
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]);
+ if (crtc->dpms_mode == DRM_MODE_DPMS_ON)
+ return 0;
+
+ /* most i8xx have pipe a forced on, so don't trust dpms mode */
+ if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE)
+ return 0;
+
+ crtc_funcs = crtc->base.helper_private;
+ if (crtc_funcs->dpms == NULL)
+ return 0;
+
+ DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
+
+ mode = drm_mode_duplicate(dev, &vesa_640x480);
+ drm_mode_set_crtcinfo(mode, 0);
+ if (!drm_crtc_helper_set_mode(&crtc->base, mode,
+ crtc->base.x, crtc->base.y,
+ crtc->base.fb))
+ return 0;
+
+ crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON);
+ return 1;
+}
+
+static void
+i830_deactivate_pipe_a(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+/* overlay needs to be disable in OCMD reg */
+static int intel_overlay_on(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *request;
+ int pipe_a_quirk = 0;
+ int ret;
+
+ KASSERT(!overlay->active, ("Overlay is active"));
+ overlay->active = 1;
+
+ if (IS_I830(dev)) {
+ pipe_a_quirk = i830_activate_pipe_a(dev);
+ if (pipe_a_quirk < 0)
+ return pipe_a_quirk;
+ }
+
+ request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO);
+
+ ret = BEGIN_LP_RING(4);
+ if (ret) {
+ free(request, DRM_I915_GEM);
+ goto out;
+ }
+
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+ OUT_RING(overlay->flip_addr | OFC_UPDATE);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+
+ ret = intel_overlay_do_wait_request(overlay, request, NULL);
+out:
+ if (pipe_a_quirk)
+ i830_deactivate_pipe_a(dev);
+
+ return ret;
+}
+
+/* overlay needs to be enabled in OCMD reg */
+static int intel_overlay_continue(struct intel_overlay *overlay,
+ bool load_polyphase_filter)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *request;
+ u32 flip_addr = overlay->flip_addr;
+ u32 tmp;
+ int ret;
+
+ KASSERT(overlay->active, ("Overlay not active"));
+
+ request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO);
+
+ if (load_polyphase_filter)
+ flip_addr |= OFC_UPDATE;
+
+ /* check for underruns */
+ tmp = I915_READ(DOVSTA);
+ if (tmp & (1 << 17))
+ DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
+
+ ret = BEGIN_LP_RING(2);
+ if (ret) {
+ free(request, DRM_I915_GEM);
+ return ret;
+ }
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ OUT_RING(flip_addr);
+ ADVANCE_LP_RING();
+
+ ret = i915_add_request(LP_RING(dev_priv), NULL, request);
+ if (ret) {
+ free(request, DRM_I915_GEM);
+ return ret;
+ }
+
+ overlay->last_flip_req = request->seqno;
+ return 0;
+}
+
+static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
+{
+ struct drm_i915_gem_object *obj = overlay->old_vid_bo;
+
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+
+ overlay->old_vid_bo = NULL;
+}
+
+static void intel_overlay_off_tail(struct intel_overlay *overlay)
+{
+ struct drm_i915_gem_object *obj = overlay->vid_bo;
+
+ /* never have the overlay hw on without showing a frame */
+ KASSERT(overlay->vid_bo != NULL, ("No vid_bo"));
+
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+ overlay->vid_bo = NULL;
+
+ overlay->crtc->overlay = NULL;
+ overlay->crtc = NULL;
+ overlay->active = 0;
+}
+
+/* overlay needs to be disabled in OCMD reg */
+static int intel_overlay_off(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 flip_addr = overlay->flip_addr;
+ struct drm_i915_gem_request *request;
+ int ret;
+
+ KASSERT(overlay->active, ("Overlay is not active"));
+
+ request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO);
+
+ /* According to intel docs the overlay hw may hang (when switching
+ * off) without loading the filter coeffs. It is however unclear whether
+ * this applies to the disabling of the overlay or to the switching off
+ * of the hw. Do it in both cases */
+ flip_addr |= OFC_UPDATE;
+
+ ret = BEGIN_LP_RING(6);
+ if (ret) {
+ free(request, DRM_I915_GEM);
+ return ret;
+ }
+ /* wait for overlay to go idle */
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ OUT_RING(flip_addr);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ /* turn overlay off */
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ OUT_RING(flip_addr);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ ADVANCE_LP_RING();
+
+ return intel_overlay_do_wait_request(overlay, request,
+ intel_overlay_off_tail);
+}
+
+/* recover from an interruption due to a signal
+ * We have to be careful not to repeat work forever an make forward progess. */
+static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ if (overlay->last_flip_req == 0)
+ return 0;
+
+ ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
+ true);
+ if (ret)
+ return ret;
+
+ if (overlay->flip_tail)
+ overlay->flip_tail(overlay);
+
+ overlay->last_flip_req = 0;
+ return 0;
+}
+
+/* Wait for pending overlay flip and release old frame.
+ * Needs to be called before the overlay register are changed
+ * via intel_overlay_(un)map_regs
+ */
+static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ /* Only wait if there is actually an old frame to release to
+ * guarantee forward progress.
+ */
+ if (!overlay->old_vid_bo)
+ return 0;
+
+ if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
+ struct drm_i915_gem_request *request;
+
+ /* synchronous slowpath */
+ request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO);
+
+ ret = BEGIN_LP_RING(2);
+ if (ret) {
+ free(request, DRM_I915_GEM);
+ return ret;
+ }
+
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+
+ ret = intel_overlay_do_wait_request(overlay, request,
+ intel_overlay_release_old_vid_tail);
+ if (ret)
+ return ret;
+ }
+
+ intel_overlay_release_old_vid_tail(overlay);
+ return 0;
+}
+
+struct put_image_params {
+ int format;
+ short dst_x;
+ short dst_y;
+ short dst_w;
+ short dst_h;
+ short src_w;
+ short src_scan_h;
+ short src_scan_w;
+ short src_h;
+ short stride_Y;
+ short stride_UV;
+ int offset_Y;
+ int offset_U;
+ int offset_V;
+};
+
+static int packed_depth_bytes(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ return 4;
+ case I915_OVERLAY_YUV411:
+ /* return 6; not implemented */
+ default:
+ return -EINVAL;
+ }
+}
+
+static int packed_width_bytes(u32 format, short width)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ return width << 1;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int uv_hsubsampling(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV420:
+ return 2;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ return 4;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int uv_vsubsampling(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV420:
+ case I915_OVERLAY_YUV410:
+ return 2;
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV411:
+ return 1;
+ default:
+ return -EINVAL;
+ }
+}
+
+static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
+{
+ u32 mask, shift, ret;
+ if (IS_GEN2(dev)) {
+ mask = 0x1f;
+ shift = 5;
+ } else {
+ mask = 0x3f;
+ shift = 6;
+ }
+ ret = ((offset + width + mask) >> shift) - (offset >> shift);
+ if (!IS_GEN2(dev))
+ ret <<= 1;
+ ret -= 1;
+ return ret << 2;
+}
+
+static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
+ 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0,
+ 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440,
+ 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0,
+ 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380,
+ 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320,
+ 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0,
+ 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260,
+ 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200,
+ 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0,
+ 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160,
+ 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120,
+ 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0,
+ 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0,
+ 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
+ 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
+ 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
+ 0xb000, 0x3000, 0x0800, 0x3000, 0xb000
+};
+
+static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
+ 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
+ 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
+ 0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880,
+ 0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00,
+ 0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0,
+ 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
+ 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
+ 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
+ 0x3000, 0x0800, 0x3000
+};
+
+static void update_polyphase_filter(struct overlay_registers *regs)
+{
+ memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
+ memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
+}
+
+static bool update_scaling_factors(struct intel_overlay *overlay,
+ struct overlay_registers *regs,
+ struct put_image_params *params)
+{
+ /* fixed point with a 12 bit shift */
+ u32 xscale, yscale, xscale_UV, yscale_UV;
+#define FP_SHIFT 12
+#define FRACT_MASK 0xfff
+ bool scale_changed = false;
+ int uv_hscale = uv_hsubsampling(params->format);
+ int uv_vscale = uv_vsubsampling(params->format);
+
+ if (params->dst_w > 1)
+ xscale = ((params->src_scan_w - 1) << FP_SHIFT)
+ /(params->dst_w);
+ else
+ xscale = 1 << FP_SHIFT;
+
+ if (params->dst_h > 1)
+ yscale = ((params->src_scan_h - 1) << FP_SHIFT)
+ /(params->dst_h);
+ else
+ yscale = 1 << FP_SHIFT;
+
+ /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
+ xscale_UV = xscale/uv_hscale;
+ yscale_UV = yscale/uv_vscale;
+ /* make the Y scale to UV scale ratio an exact multiply */
+ xscale = xscale_UV * uv_hscale;
+ yscale = yscale_UV * uv_vscale;
+ /*} else {
+ xscale_UV = 0;
+ yscale_UV = 0;
+ }*/
+
+ if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
+ scale_changed = true;
+ overlay->old_xscale = xscale;
+ overlay->old_yscale = yscale;
+
+ regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
+ ((xscale >> FP_SHIFT) << 16) |
+ ((xscale & FRACT_MASK) << 3));
+
+ regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
+ ((xscale_UV >> FP_SHIFT) << 16) |
+ ((xscale_UV & FRACT_MASK) << 3));
+
+ regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) |
+ ((yscale_UV >> FP_SHIFT) << 0)));
+
+ if (scale_changed)
+ update_polyphase_filter(regs);
+
+ return scale_changed;
+}
+
+static void update_colorkey(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ u32 key = overlay->color_key;
+
+ switch (overlay->crtc->base.fb->bits_per_pixel) {
+ case 8:
+ regs->DCLRKV = 0;
+ regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+ break;
+
+ case 16:
+ if (overlay->crtc->base.fb->depth == 15) {
+ regs->DCLRKV = RGB15_TO_COLORKEY(key);
+ regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+ } else {
+ regs->DCLRKV = RGB16_TO_COLORKEY(key);
+ regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+ }
+ break;
+
+ case 24:
+ case 32:
+ regs->DCLRKV = key;
+ regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+ break;
+ }
+}
+
+static u32 overlay_cmd_reg(struct put_image_params *params)
+{
+ u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
+
+ if (params->format & I915_OVERLAY_YUV_PLANAR) {
+ switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PLANAR;
+ break;
+ case I915_OVERLAY_YUV420:
+ cmd |= OCMD_YUV_420_PLANAR;
+ break;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ cmd |= OCMD_YUV_410_PLANAR;
+ break;
+ }
+ } else { /* YUV packed */
+ switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PACKED;
+ break;
+ case I915_OVERLAY_YUV411:
+ cmd |= OCMD_YUV_411_PACKED;
+ break;
+ }
+
+ switch (params->format & I915_OVERLAY_SWAP_MASK) {
+ case I915_OVERLAY_NO_SWAP:
+ break;
+ case I915_OVERLAY_UV_SWAP:
+ cmd |= OCMD_UV_SWAP;
+ break;
+ case I915_OVERLAY_Y_SWAP:
+ cmd |= OCMD_Y_SWAP;
+ break;
+ case I915_OVERLAY_Y_AND_UV_SWAP:
+ cmd |= OCMD_Y_AND_UV_SWAP;
+ break;
+ }
+ }
+
+ return cmd;
+}
+
+static u32
+max_u32(u32 a, u32 b)
+{
+
+ return (a > b ? a : b);
+}
+
+static int intel_overlay_do_put_image(struct intel_overlay *overlay,
+ struct drm_i915_gem_object *new_bo,
+ struct put_image_params *params)
+{
+ int ret, tmp_width;
+ struct overlay_registers *regs;
+ bool scale_changed = false;
+
+ KASSERT(overlay != NULL, ("No overlay ?"));
+ DRM_LOCK_ASSERT(overlay->dev);
+ DRM_MODE_CONFIG_ASSERT_LOCKED(overlay->dev);
+
+ ret = intel_overlay_release_old_vid(overlay);
+ if (ret != 0)
+ return ret;
+
+ ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
+ if (ret != 0)
+ goto out_unpin;
+
+ ret = i915_gem_object_put_fence(new_bo);
+ if (ret)
+ goto out_unpin;
+
+ if (!overlay->active) {
+ regs = intel_overlay_map_regs(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+ regs->OCONFIG = OCONF_CC_OUT_8BIT;
+ if (IS_GEN4(overlay->dev))
+ regs->OCONFIG |= OCONF_CSC_MODE_BT709;
+ regs->OCONFIG |= overlay->crtc->pipe == 0 ?
+ OCONF_PIPE_A : OCONF_PIPE_B;
+ intel_overlay_unmap_regs(overlay, regs);
+
+ ret = intel_overlay_on(overlay);
+ if (ret != 0)
+ goto out_unpin;
+ }
+
+ regs = intel_overlay_map_regs(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+
+ regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
+ regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
+
+ if (params->format & I915_OVERLAY_YUV_PACKED)
+ tmp_width = packed_width_bytes(params->format, params->src_w);
+ else
+ tmp_width = params->src_w;
+
+ regs->SWIDTH = params->src_w;
+ regs->SWIDTHSW = calc_swidthsw(overlay->dev,
+ params->offset_Y, tmp_width);
+ regs->SHEIGHT = params->src_h;
+ regs->OBUF_0Y = new_bo->gtt_offset + params->offset_Y;
+ regs->OSTRIDE = params->stride_Y;
+
+ if (params->format & I915_OVERLAY_YUV_PLANAR) {
+ int uv_hscale = uv_hsubsampling(params->format);
+ int uv_vscale = uv_vsubsampling(params->format);
+ u32 tmp_U, tmp_V;
+ regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
+ tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
+ params->src_w/uv_hscale);
+ tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
+ params->src_w/uv_hscale);
+ regs->SWIDTHSW |= max_u32(tmp_U, tmp_V) << 16;
+ regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
+ regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
+ regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
+ regs->OSTRIDE |= params->stride_UV << 16;
+ }
+
+ scale_changed = update_scaling_factors(overlay, regs, params);
+
+ update_colorkey(overlay, regs);
+
+ regs->OCMD = overlay_cmd_reg(params);
+
+ intel_overlay_unmap_regs(overlay, regs);
+
+ ret = intel_overlay_continue(overlay, scale_changed);
+ if (ret)
+ goto out_unpin;
+
+ overlay->old_vid_bo = overlay->vid_bo;
+ overlay->vid_bo = new_bo;
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin(new_bo);
+ return ret;
+}
+
+int intel_overlay_switch_off(struct intel_overlay *overlay)
+{
+ struct overlay_registers *regs;
+ int ret;
+
+ DRM_LOCK_ASSERT(overlay->dev);
+ DRM_MODE_CONFIG_ASSERT_LOCKED(overlay->dev);
+
+ ret = intel_overlay_recover_from_interrupt(overlay);
+ if (ret != 0)
+ return ret;
+
+ if (!overlay->active)
+ return 0;
+
+ ret = intel_overlay_release_old_vid(overlay);
+ if (ret != 0)
+ return ret;
+
+ regs = intel_overlay_map_regs(overlay);
+ regs->OCMD = 0;
+ intel_overlay_unmap_regs(overlay, regs);
+
+ ret = intel_overlay_off(overlay);
+ if (ret != 0)
+ return ret;
+
+ intel_overlay_off_tail(overlay);
+ return 0;
+}
+
+static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
+ struct intel_crtc *crtc)
+{
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+
+ if (!crtc->active)
+ return -EINVAL;
+
+ /* can't use the overlay with double wide pipe */
+ if (INTEL_INFO(overlay->dev)->gen < 4 &&
+ (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 pfit_control = I915_READ(PFIT_CONTROL);
+ u32 ratio;
+
+ /* XXX: This is not the same logic as in the xorg driver, but more in
+ * line with the intel documentation for the i965
+ */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* on i965 use the PGM reg to read out the autoscaler values */
+ ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
+ } else {
+ if (pfit_control & VERT_AUTO_SCALE)
+ ratio = I915_READ(PFIT_AUTO_RATIOS);
+ else
+ ratio = I915_READ(PFIT_PGM_RATIOS);
+ ratio >>= PFIT_VERT_SCALE_SHIFT;
+ }
+
+ overlay->pfit_vscale_ratio = ratio;
+}
+
+static int check_overlay_dst(struct intel_overlay *overlay,
+ struct drm_intel_overlay_put_image *rec)
+{
+ struct drm_display_mode *mode = &overlay->crtc->base.mode;
+
+ if (rec->dst_x < mode->hdisplay &&
+ rec->dst_x + rec->dst_width <= mode->hdisplay &&
+ rec->dst_y < mode->vdisplay &&
+ rec->dst_y + rec->dst_height <= mode->vdisplay)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int check_overlay_scaling(struct put_image_params *rec)
+{
+ u32 tmp;
+
+ /* downscaling limit is 8.0 */
+ tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16;
+ if (tmp > 7)
+ return -EINVAL;
+ tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16;
+ if (tmp > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int check_overlay_src(struct drm_device *dev,
+ struct drm_intel_overlay_put_image *rec,
+ struct drm_i915_gem_object *new_bo)
+{
+ int uv_hscale = uv_hsubsampling(rec->flags);
+ int uv_vscale = uv_vsubsampling(rec->flags);
+ u32 stride_mask;
+ int depth;
+ u32 tmp;
+
+ /* check src dimensions */
+ if (IS_845G(dev) || IS_I830(dev)) {
+ if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
+ rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
+ return -EINVAL;
+ } else {
+ if (rec->src_height > IMAGE_MAX_HEIGHT ||
+ rec->src_width > IMAGE_MAX_WIDTH)
+ return -EINVAL;
+ }
+
+ /* better safe than sorry, use 4 as the maximal subsampling ratio */
+ if (rec->src_height < N_VERT_Y_TAPS*4 ||
+ rec->src_width < N_HORIZ_Y_TAPS*4)
+ return -EINVAL;
+
+ /* check alignment constraints */
+ switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+ case I915_OVERLAY_RGB:
+ /* not implemented */
+ return -EINVAL;
+
+ case I915_OVERLAY_YUV_PACKED:
+ if (uv_vscale != 1)
+ return -EINVAL;
+
+ depth = packed_depth_bytes(rec->flags);
+ if (depth < 0)
+ return depth;
+
+ /* ignore UV planes */
+ rec->stride_UV = 0;
+ rec->offset_U = 0;
+ rec->offset_V = 0;
+ /* check pixel alignment */
+ if (rec->offset_Y % depth)
+ return -EINVAL;
+ break;
+
+ case I915_OVERLAY_YUV_PLANAR:
+ if (uv_vscale < 0 || uv_hscale < 0)
+ return -EINVAL;
+ /* no offset restrictions for planar formats */
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (rec->src_width % uv_hscale)
+ return -EINVAL;
+
+ /* stride checking */
+ if (IS_I830(dev) || IS_845G(dev))
+ stride_mask = 255;
+ else
+ stride_mask = 63;
+
+ if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
+ return -EINVAL;
+ if (IS_GEN4(dev) && rec->stride_Y < 512)
+ return -EINVAL;
+
+ tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
+ 4096 : 8192;
+ if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
+ return -EINVAL;
+
+ /* check buffer dimensions */
+ switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+ case I915_OVERLAY_RGB:
+ case I915_OVERLAY_YUV_PACKED:
+ /* always 4 Y values per depth pixels */
+ if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
+ return -EINVAL;
+
+ tmp = rec->stride_Y*rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->base.size)
+ return -EINVAL;
+ break;
+
+ case I915_OVERLAY_YUV_PLANAR:
+ if (rec->src_width > rec->stride_Y)
+ return -EINVAL;
+ if (rec->src_width/uv_hscale > rec->stride_UV)
+ return -EINVAL;
+
+ tmp = rec->stride_Y * rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->base.size)
+ return -EINVAL;
+
+ tmp = rec->stride_UV * (rec->src_height / uv_vscale);
+ if (rec->offset_U + tmp > new_bo->base.size ||
+ rec->offset_V + tmp > new_bo->base.size)
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int intel_panel_fitter_pipe(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pfit_control;
+
+ /* i830 doesn't have a panel fitter */
+ if (IS_I830(dev))
+ return -1;
+
+ pfit_control = I915_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+
+ /* 965 can place panel fitter on either pipe */
+ if (IS_GEN4(dev))
+ return (pfit_control >> 29) & 0x3;
+
+ /* older chips can only use pipe 1 */
+ return 1;
+}
+
+int intel_overlay_put_image(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_overlay_put_image *put_image_rec = data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay;
+ struct drm_mode_object *drmmode_obj;
+ struct intel_crtc *crtc;
+ struct drm_i915_gem_object *new_bo;
+ struct put_image_params *params;
+ int ret;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ overlay = dev_priv->overlay;
+ if (!overlay) {
+ DRM_DEBUG("userspace bug: no overlay\n");
+ return -ENODEV;
+ }
+
+ if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
+ sx_xlock(&dev->mode_config.mutex);
+ DRM_LOCK(dev);
+
+ ret = intel_overlay_switch_off(overlay);
+
+ DRM_UNLOCK(dev);
+ sx_xunlock(&dev->mode_config.mutex);
+
+ return ret;
+ }
+
+ params = malloc(sizeof(struct put_image_params), DRM_I915_GEM,
+ M_WAITOK | M_ZERO);
+
+ drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!drmmode_obj) {
+ ret = -ENOENT;
+ goto out_free;
+ }
+ crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+
+ new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
+ put_image_rec->bo_handle));
+ if (&new_bo->base == NULL) {
+ ret = -ENOENT;
+ goto out_free;
+ }
+
+ sx_xlock(&dev->mode_config.mutex);
+ DRM_LOCK(dev);
+
+ if (new_bo->tiling_mode) {
+ DRM_ERROR("buffer used for overlay image can not be tiled\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = intel_overlay_recover_from_interrupt(overlay);
+ if (ret != 0)
+ goto out_unlock;
+
+ if (overlay->crtc != crtc) {
+ struct drm_display_mode *mode = &crtc->base.mode;
+ ret = intel_overlay_switch_off(overlay);
+ if (ret != 0)
+ goto out_unlock;
+
+ ret = check_overlay_possible_on_crtc(overlay, crtc);
+ if (ret != 0)
+ goto out_unlock;
+
+ overlay->crtc = crtc;
+ crtc->overlay = overlay;
+
+ /* line too wide, i.e. one-line-mode */
+ if (mode->hdisplay > 1024 &&
+ intel_panel_fitter_pipe(dev) == crtc->pipe) {
+ overlay->pfit_active = 1;
+ update_pfit_vscale_ratio(overlay);
+ } else
+ overlay->pfit_active = 0;
+ }
+
+ ret = check_overlay_dst(overlay, put_image_rec);
+ if (ret != 0)
+ goto out_unlock;
+
+ if (overlay->pfit_active) {
+ params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
+ overlay->pfit_vscale_ratio);
+ /* shifting right rounds downwards, so add 1 */
+ params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
+ overlay->pfit_vscale_ratio) + 1;
+ } else {
+ params->dst_y = put_image_rec->dst_y;
+ params->dst_h = put_image_rec->dst_height;
+ }
+ params->dst_x = put_image_rec->dst_x;
+ params->dst_w = put_image_rec->dst_width;
+
+ params->src_w = put_image_rec->src_width;
+ params->src_h = put_image_rec->src_height;
+ params->src_scan_w = put_image_rec->src_scan_width;
+ params->src_scan_h = put_image_rec->src_scan_height;
+ if (params->src_scan_h > params->src_h ||
+ params->src_scan_w > params->src_w) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = check_overlay_src(dev, put_image_rec, new_bo);
+ if (ret != 0)
+ goto out_unlock;
+ params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
+ params->stride_Y = put_image_rec->stride_Y;
+ params->stride_UV = put_image_rec->stride_UV;
+ params->offset_Y = put_image_rec->offset_Y;
+ params->offset_U = put_image_rec->offset_U;
+ params->offset_V = put_image_rec->offset_V;
+
+ /* Check scaling after src size to prevent a divide-by-zero. */
+ ret = check_overlay_scaling(params);
+ if (ret != 0)
+ goto out_unlock;
+
+ ret = intel_overlay_do_put_image(overlay, new_bo, params);
+ if (ret != 0)
+ goto out_unlock;
+
+ DRM_UNLOCK(dev);
+ sx_xunlock(&dev->mode_config.mutex);
+
+ free(params, DRM_I915_GEM);
+
+ return 0;
+
+out_unlock:
+ DRM_UNLOCK(dev);
+ sx_xunlock(&dev->mode_config.mutex);
+ drm_gem_object_unreference_unlocked(&new_bo->base);
+out_free:
+ free(params, DRM_I915_GEM);
+
+ return ret;
+}
+
+static void update_reg_attrs(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
+ regs->OCLRC1 = overlay->saturation;
+}
+
+static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
+{
+ int i;
+
+ if (gamma1 & 0xff000000 || gamma2 & 0xff000000)
+ return false;
+
+ for (i = 0; i < 3; i++) {
+ if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_gamma5_errata(u32 gamma5)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ if (((gamma5 >> i*8) & 0xff) == 0x80)
+ return false;
+ }
+
+ return true;
+}
+
+static int check_gamma(struct drm_intel_overlay_attrs *attrs)
+{
+ if (!check_gamma_bounds(0, attrs->gamma0) ||
+ !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
+ !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
+ !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
+ !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
+ !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
+ !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
+ return -EINVAL;
+
+ if (!check_gamma5_errata(attrs->gamma5))
+ return -EINVAL;
+
+ return 0;
+}
+
+int intel_overlay_attrs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_overlay_attrs *attrs = data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay;
+ struct overlay_registers *regs;
+ int ret;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ overlay = dev_priv->overlay;
+ if (!overlay) {
+ DRM_DEBUG("userspace bug: no overlay\n");
+ return -ENODEV;
+ }
+
+ sx_xlock(&dev->mode_config.mutex);
+ DRM_LOCK(dev);
+
+ ret = -EINVAL;
+ if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
+ attrs->color_key = overlay->color_key;
+ attrs->brightness = overlay->brightness;
+ attrs->contrast = overlay->contrast;
+ attrs->saturation = overlay->saturation;
+
+ if (!IS_GEN2(dev)) {
+ attrs->gamma0 = I915_READ(OGAMC0);
+ attrs->gamma1 = I915_READ(OGAMC1);
+ attrs->gamma2 = I915_READ(OGAMC2);
+ attrs->gamma3 = I915_READ(OGAMC3);
+ attrs->gamma4 = I915_READ(OGAMC4);
+ attrs->gamma5 = I915_READ(OGAMC5);
+ }
+ } else {
+ if (attrs->brightness < -128 || attrs->brightness > 127)
+ goto out_unlock;
+ if (attrs->contrast > 255)
+ goto out_unlock;
+ if (attrs->saturation > 1023)
+ goto out_unlock;
+
+ overlay->color_key = attrs->color_key;
+ overlay->brightness = attrs->brightness;
+ overlay->contrast = attrs->contrast;
+ overlay->saturation = attrs->saturation;
+
+ regs = intel_overlay_map_regs(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ update_reg_attrs(overlay, regs);
+
+ intel_overlay_unmap_regs(overlay, regs);
+
+ if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
+ if (IS_GEN2(dev))
+ goto out_unlock;
+
+ if (overlay->active) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ ret = check_gamma(attrs);
+ if (ret)
+ goto out_unlock;
+
+ I915_WRITE(OGAMC0, attrs->gamma0);
+ I915_WRITE(OGAMC1, attrs->gamma1);
+ I915_WRITE(OGAMC2, attrs->gamma2);
+ I915_WRITE(OGAMC3, attrs->gamma3);
+ I915_WRITE(OGAMC4, attrs->gamma4);
+ I915_WRITE(OGAMC5, attrs->gamma5);
+ }
+ }
+
+ ret = 0;
+out_unlock:
+ DRM_UNLOCK(dev);
+ sx_xunlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+void intel_setup_overlay(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay;
+ struct drm_i915_gem_object *reg_bo;
+ struct overlay_registers *regs;
+ int ret;
+
+ if (!HAS_OVERLAY(dev))
+ return;
+
+ overlay = malloc(sizeof(struct intel_overlay), DRM_I915_GEM,
+ M_WAITOK | M_ZERO);
+ DRM_LOCK(dev);
+ if (dev_priv->overlay != NULL)
+ goto out_free;
+ overlay->dev = dev;
+
+ reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
+ if (!reg_bo)
+ goto out_free;
+ overlay->reg_bo = reg_bo;
+
+ if (OVERLAY_NEEDS_PHYSICAL(dev)) {
+ ret = i915_gem_attach_phys_object(dev, reg_bo,
+ I915_GEM_PHYS_OVERLAY_REGS,
+ PAGE_SIZE);
+ if (ret) {
+ DRM_ERROR("failed to attach phys overlay regs\n");
+ goto out_free_bo;
+ }
+ overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
+ } else {
+ ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
+ if (ret) {
+ DRM_ERROR("failed to pin overlay register bo\n");
+ goto out_free_bo;
+ }
+ overlay->flip_addr = reg_bo->gtt_offset;
+
+ ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
+ if (ret) {
+ DRM_ERROR("failed to move overlay register bo into the GTT\n");
+ goto out_unpin_bo;
+ }
+ }
+
+ /* init all values */
+ overlay->color_key = 0x0101fe;
+ overlay->brightness = -19;
+ overlay->contrast = 75;
+ overlay->saturation = 146;
+
+ regs = intel_overlay_map_regs(overlay);
+ if (!regs)
+ goto out_unpin_bo;
+
+ memset(regs, 0, sizeof(struct overlay_registers));
+ update_polyphase_filter(regs);
+ update_reg_attrs(overlay, regs);
+
+ intel_overlay_unmap_regs(overlay, regs);
+
+ dev_priv->overlay = overlay;
+ DRM_INFO("initialized overlay support\n");
+ DRM_UNLOCK(dev);
+ return;
+
+out_unpin_bo:
+ if (!OVERLAY_NEEDS_PHYSICAL(dev))
+ i915_gem_object_unpin(reg_bo);
+out_free_bo:
+ drm_gem_object_unreference(&reg_bo->base);
+out_free:
+ DRM_UNLOCK(dev);
+ free(overlay, DRM_I915_GEM);
+ return;
+}
+
+void intel_cleanup_overlay(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev_priv->overlay)
+ return;
+
+ /* The bo's should be free'd by the generic code already.
+ * Furthermore modesetting teardown happens beforehand so the
+ * hardware should be off already */
+ KASSERT(!dev_priv->overlay->active, ("Overlay still active"));
+
+ drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
+ free(dev_priv->overlay, DRM_I915_GEM);
+}
+
+struct intel_overlay_error_state {
+ struct overlay_registers regs;
+ unsigned long base;
+ u32 dovsta;
+ u32 isr;
+};
+
+struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay = dev_priv->overlay;
+ struct intel_overlay_error_state *error;
+ struct overlay_registers __iomem *regs;
+
+ if (!overlay || !overlay->active)
+ return NULL;
+
+ error = malloc(sizeof(*error), DRM_I915_GEM, M_NOWAIT);
+ if (error == NULL)
+ return NULL;
+
+ error->dovsta = I915_READ(DOVSTA);
+ error->isr = I915_READ(ISR);
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
+ else
+ error->base = (long) overlay->reg_bo->gtt_offset;
+
+ regs = intel_overlay_map_regs(overlay);
+ if (!regs)
+ goto err;
+
+ memcpy(&error->regs, regs, sizeof(struct overlay_registers));
+ intel_overlay_unmap_regs(overlay, regs);
+
+ return (error);
+
+err:
+ free(error, DRM_I915_GEM);
+ return (NULL);
+}
+
+void
+intel_overlay_print_error_state(struct sbuf *m,
+ struct intel_overlay_error_state *error)
+{
+ sbuf_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
+ error->dovsta, error->isr);
+ sbuf_printf(m, " Register file at 0x%08lx:\n",
+ error->base);
+
+#define P(x) sbuf_printf(m, " " #x ": 0x%08x\n", error->regs.x)
+ P(OBUF_0Y);
+ P(OBUF_1Y);
+ P(OBUF_0U);
+ P(OBUF_0V);
+ P(OBUF_1U);
+ P(OBUF_1V);
+ P(OSTRIDE);
+ P(YRGB_VPH);
+ P(UV_VPH);
+ P(HORZ_PH);
+ P(INIT_PHS);
+ P(DWINPOS);
+ P(DWINSZ);
+ P(SWIDTH);
+ P(SWIDTHSW);
+ P(SHEIGHT);
+ P(YRGBSCALE);
+ P(UVSCALE);
+ P(OCLRC0);
+ P(OCLRC1);
+ P(DCLRKV);
+ P(DCLRKM);
+ P(SCLRKVH);
+ P(SCLRKVL);
+ P(SCLRKEN);
+ P(OCONFIG);
+ P(OCMD);
+ P(OSTART_0Y);
+ P(OSTART_1Y);
+ P(OSTART_0U);
+ P(OSTART_0V);
+ P(OSTART_1U);
+ P(OSTART_1V);
+ P(OTILEOFF_0Y);
+ P(OTILEOFF_1Y);
+ P(OTILEOFF_0U);
+ P(OTILEOFF_0V);
+ P(OTILEOFF_1U);
+ P(OTILEOFF_1V);
+ P(FASTHSCALE);
+ P(UVSCALEV);
+#undef P
+}
diff --git a/sys/dev/drm2/i915/intel_panel.c b/sys/dev/drm2/i915/intel_panel.c
new file mode 100644
index 0000000..815848b
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_panel.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright © 2006-2010 Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/intel_drv.h>
+
+#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
+
+void
+intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ adjusted_mode->hdisplay = fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = fixed_mode->hsync_end;
+ adjusted_mode->htotal = fixed_mode->htotal;
+
+ adjusted_mode->vdisplay = fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = fixed_mode->vsync_end;
+ adjusted_mode->vtotal = fixed_mode->vtotal;
+
+ adjusted_mode->clock = fixed_mode->clock;
+}
+
+/* adjusted_mode has been preset to be the panel's fixed mode */
+void
+intel_pch_panel_fitting(struct drm_device *dev,
+ int fitting_mode,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int x, y, width, height;
+
+ x = y = width = height = 0;
+
+ /* Native modes don't need fitting */
+ if (adjusted_mode->hdisplay == mode->hdisplay &&
+ adjusted_mode->vdisplay == mode->vdisplay)
+ goto done;
+
+ switch (fitting_mode) {
+ case DRM_MODE_SCALE_CENTER:
+ width = mode->hdisplay;
+ height = mode->vdisplay;
+ x = (adjusted_mode->hdisplay - width + 1)/2;
+ y = (adjusted_mode->vdisplay - height + 1)/2;
+ break;
+
+ case DRM_MODE_SCALE_ASPECT:
+ /* Scale but preserve the aspect ratio */
+ {
+ u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
+ u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+ if (scaled_width > scaled_height) { /* pillar */
+ width = scaled_height / mode->vdisplay;
+ if (width & 1)
+ width++;
+ x = (adjusted_mode->hdisplay - width + 1) / 2;
+ y = 0;
+ height = adjusted_mode->vdisplay;
+ } else if (scaled_width < scaled_height) { /* letter */
+ height = scaled_width / mode->hdisplay;
+ if (height & 1)
+ height++;
+ y = (adjusted_mode->vdisplay - height + 1) / 2;
+ x = 0;
+ width = adjusted_mode->hdisplay;
+ } else {
+ x = y = 0;
+ width = adjusted_mode->hdisplay;
+ height = adjusted_mode->vdisplay;
+ }
+ }
+ break;
+
+ default:
+ case DRM_MODE_SCALE_FULLSCREEN:
+ x = y = 0;
+ width = adjusted_mode->hdisplay;
+ height = adjusted_mode->vdisplay;
+ break;
+ }
+
+done:
+ dev_priv->pch_pf_pos = (x << 16) | y;
+ dev_priv->pch_pf_size = (width << 16) | height;
+}
+
+static int is_backlight_combination_mode(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
+
+ if (IS_GEN2(dev))
+ return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
+
+ return 0;
+}
+
+static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ /* Restore the CTL value if it lost, e.g. GPU reset */
+
+ if (HAS_PCH_SPLIT(dev_priv->dev)) {
+ val = I915_READ(BLC_PWM_PCH_CTL2);
+ if (dev_priv->saveBLC_PWM_CTL2 == 0) {
+ dev_priv->saveBLC_PWM_CTL2 = val;
+ } else if (val == 0) {
+ I915_WRITE(BLC_PWM_PCH_CTL2,
+ dev_priv->saveBLC_PWM_CTL2);
+ val = dev_priv->saveBLC_PWM_CTL2;
+ }
+ } else {
+ val = I915_READ(BLC_PWM_CTL);
+ if (dev_priv->saveBLC_PWM_CTL == 0) {
+ dev_priv->saveBLC_PWM_CTL = val;
+ dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ } else if (val == 0) {
+ I915_WRITE(BLC_PWM_CTL,
+ dev_priv->saveBLC_PWM_CTL);
+ I915_WRITE(BLC_PWM_CTL2,
+ dev_priv->saveBLC_PWM_CTL2);
+ val = dev_priv->saveBLC_PWM_CTL;
+ }
+ }
+
+ return val;
+}
+
+u32 intel_panel_get_max_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 max;
+
+ max = i915_read_blc_pwm_ctl(dev_priv);
+ if (max == 0) {
+ /* XXX add code here to query mode clock or hardware clock
+ * and program max PWM appropriately.
+ */
+#if 0
+ printf("fixme: max PWM is zero.\n");
+#endif
+ return 1;
+ }
+
+ if (HAS_PCH_SPLIT(dev)) {
+ max >>= 16;
+ } else {
+ if (INTEL_INFO(dev)->gen < 4)
+ max >>= 17;
+ else
+ max >>= 16;
+
+ if (is_backlight_combination_mode(dev))
+ max *= 0xff;
+ }
+
+ DRM_DEBUG("max backlight PWM = %d\n", max);
+ return max;
+}
+
+u32 intel_panel_get_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ } else {
+ val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ if (INTEL_INFO(dev)->gen < 4)
+ val >>= 1;
+
+ if (is_backlight_combination_mode(dev)) {
+ u8 lbpc;
+
+ lbpc = pci_read_config(dev->device, PCI_LBPC, 1);
+ val *= lbpc;
+ }
+ }
+
+ DRM_DEBUG("get backlight PWM = %d\n", val);
+ return val;
+}
+
+static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_CPU_CTL, val | level);
+}
+
+static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ DRM_DEBUG("set backlight PWM = %d\n", level);
+
+ if (HAS_PCH_SPLIT(dev))
+ return intel_pch_panel_set_backlight(dev, level);
+
+ if (is_backlight_combination_mode(dev)) {
+ u32 max = intel_panel_get_max_backlight(dev);
+ u8 lbpc;
+
+ lbpc = level * 0xfe / max + 1;
+ level /= lbpc;
+ pci_write_config(dev->device, PCI_LBPC, lbpc, 4);
+ }
+
+ tmp = I915_READ(BLC_PWM_CTL);
+ if (INTEL_INFO(dev)->gen < 4)
+ level <<= 1;
+ tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_CTL, tmp | level);
+}
+
+void intel_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->backlight_level = level;
+ if (dev_priv->backlight_enabled)
+ intel_panel_actually_set_backlight(dev, level);
+}
+
+void intel_panel_disable_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->backlight_enabled = false;
+ intel_panel_actually_set_backlight(dev, 0);
+}
+
+void intel_panel_enable_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->backlight_level == 0)
+ dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
+
+ dev_priv->backlight_enabled = true;
+ intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
+}
+
+static void intel_panel_init_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->backlight_level = intel_panel_get_backlight(dev);
+ dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
+}
+
+enum drm_connector_status
+intel_panel_detect(struct drm_device *dev)
+{
+#if 0
+ struct drm_i915_private *dev_priv = dev->dev_private;
+#endif
+
+ if (i915_panel_ignore_lid)
+ return i915_panel_ignore_lid > 0 ?
+ connector_status_connected :
+ connector_status_disconnected;
+
+ /* opregion lid state on HP 2540p is wrong at boot up,
+ * appears to be either the BIOS or Linux ACPI fault */
+#if 0
+ /* Assume that the BIOS does not lie through the OpRegion... */
+ if (dev_priv->opregion.lid_state)
+ return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
+ connector_status_connected :
+ connector_status_disconnected;
+#endif
+
+ return connector_status_unknown;
+}
+
+int intel_panel_setup_backlight(struct drm_device *dev)
+{
+ intel_panel_init_backlight(dev);
+ return 0;
+}
+
+void intel_panel_destroy_backlight(struct drm_device *dev)
+{
+ return;
+}
diff --git a/sys/dev/drm2/i915/intel_ringbuffer.c b/sys/dev/drm2/i915/intel_ringbuffer.c
new file mode 100644
index 0000000..c0b752b
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_ringbuffer.c
@@ -0,0 +1,1623 @@
+/*
+ * Copyright © 2008-2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Zou Nan hai <nanhai.zou@intel.com>
+ * Xiang Hai hao<haihao.xiang@intel.com>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/intel_drv.h>
+#include <dev/drm2/i915/intel_ringbuffer.h>
+#include <sys/sched.h>
+#include <sys/sf_buf.h>
+
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+struct pipe_control {
+ struct drm_i915_gem_object *obj;
+ volatile u32 *cpu_page;
+ u32 gtt_offset;
+};
+
+void
+i915_trace_irq_get(struct intel_ring_buffer *ring, uint32_t seqno)
+{
+
+ if (ring->trace_irq_seqno == 0) {
+ mtx_lock(&ring->irq_lock);
+ if (ring->irq_get(ring))
+ ring->trace_irq_seqno = seqno;
+ mtx_unlock(&ring->irq_lock);
+ }
+}
+
+static inline int ring_space(struct intel_ring_buffer *ring)
+{
+ int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
+ if (space < 0)
+ space += ring->size;
+ return space;
+}
+
+static int
+render_ring_flush(struct intel_ring_buffer *ring,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains)
+{
+ struct drm_device *dev = ring->dev;
+ uint32_t cmd;
+ int ret;
+
+ /*
+ * read/write caches:
+ *
+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
+ * also flushed at 2d versus 3d pipeline switches.
+ *
+ * read-only caches:
+ *
+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+ * MI_READ_FLUSH is set, and is always flushed on 965.
+ *
+ * I915_GEM_DOMAIN_COMMAND may not exist?
+ *
+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+ * invalidated when MI_EXE_FLUSH is set.
+ *
+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+ * invalidated with every MI_FLUSH.
+ *
+ * TLBs:
+ *
+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+ * are flushed at any MI_FLUSH.
+ */
+
+ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ if ((invalidate_domains|flush_domains) &
+ I915_GEM_DOMAIN_RENDER)
+ cmd &= ~MI_NO_WRITE_FLUSH;
+ if (INTEL_INFO(dev)->gen < 4) {
+ /*
+ * On the 965, the sampler cache always gets flushed
+ * and this bit is reserved.
+ */
+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+ cmd |= MI_READ_FLUSH;
+ }
+ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+ cmd |= MI_EXE_FLUSH;
+
+ if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
+ (IS_G4X(dev) || IS_GEN5(dev)))
+ cmd |= MI_INVALIDATE_ISP;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+/**
+ * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
+ * implementing two workarounds on gen6. From section 1.4.7.1
+ * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
+ *
+ * [DevSNB-C+{W/A}] Before any depth stall flush (including those
+ * produced by non-pipelined state commands), software needs to first
+ * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
+ * 0.
+ *
+ * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
+ * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
+ *
+ * And the workaround for these two requires this workaround first:
+ *
+ * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
+ * BEFORE the pipe-control with a post-sync op and no write-cache
+ * flushes.
+ *
+ * And this last workaround is tricky because of the requirements on
+ * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
+ * volume 2 part 1:
+ *
+ * "1 of the following must also be set:
+ * - Render Target Cache Flush Enable ([12] of DW1)
+ * - Depth Cache Flush Enable ([0] of DW1)
+ * - Stall at Pixel Scoreboard ([1] of DW1)
+ * - Depth Stall ([13] of DW1)
+ * - Post-Sync Operation ([13] of DW1)
+ * - Notify Enable ([8] of DW1)"
+ *
+ * The cache flushes require the workaround flush that triggered this
+ * one, so we can't use it. Depth stall would trigger the same.
+ * Post-sync nonzero is what triggered this second workaround, so we
+ * can't use that one either. Notify enable is IRQs, which aren't
+ * really our business. That leaves only stall at scoreboard.
+ */
+static int
+intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+ intel_ring_emit(ring, 0); /* low dword */
+ intel_ring_emit(ring, 0); /* high dword */
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+gen6_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains, u32 flush_domains)
+{
+ u32 flags = 0;
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+ /* Force SNB workarounds for PIPE_CONTROL flushes */
+ intel_emit_post_sync_nonzero_flush(ring);
+
+ /* Just flush everything. Experiments have shown that reducing the
+ * number of bits based on the write domains has little performance
+ * impact.
+ */
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, 0); /* lower dword */
+ intel_ring_emit(ring, 0); /* uppwer dword */
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static void ring_write_tail(struct intel_ring_buffer *ring,
+ uint32_t value)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ I915_WRITE_TAIL(ring, value);
+}
+
+u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ uint32_t acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
+ RING_ACTHD(ring->mmio_base) : ACTHD;
+
+ return I915_READ(acthd_reg);
+}
+
+static int init_ring_common(struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_object *obj = ring->obj;
+ uint32_t head;
+
+ /* Stop the ring if it's running. */
+ I915_WRITE_CTL(ring, 0);
+ I915_WRITE_HEAD(ring, 0);
+ ring->write_tail(ring, 0);
+
+ /* Initialize the ring. */
+ I915_WRITE_START(ring, obj->gtt_offset);
+ head = I915_READ_HEAD(ring) & HEAD_ADDR;
+
+ /* G45 ring initialization fails to reset head to zero */
+ if (head != 0) {
+ DRM_DEBUG("%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+
+ I915_WRITE_HEAD(ring, 0);
+
+ if (I915_READ_HEAD(ring) & HEAD_ADDR) {
+ DRM_ERROR("failed to set %s head to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+ }
+ }
+
+ I915_WRITE_CTL(ring,
+ ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
+ | RING_VALID);
+
+ /* If the head is still not zero, the ring is dead */
+ if (_intel_wait_for(ring->dev,
+ (I915_READ_CTL(ring) & RING_VALID) != 0 &&
+ I915_READ_START(ring) == obj->gtt_offset &&
+ (I915_READ_HEAD(ring) & HEAD_ADDR) == 0,
+ 50, 1, "915rii")) {
+ DRM_ERROR("%s initialization failed "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+ return -EIO;
+ }
+
+ if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
+ i915_kernel_lost_context(ring->dev);
+ else {
+ ring->head = I915_READ_HEAD(ring);
+ ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ ring->space = ring_space(ring);
+ }
+
+ return 0;
+}
+
+static int
+init_pipe_control(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ if (ring->private)
+ return 0;
+
+ pc = malloc(sizeof(*pc), DRM_I915_GEM, M_WAITOK);
+ if (!pc)
+ return -ENOMEM;
+
+ obj = i915_gem_alloc_object(ring->dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate seqno page\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+
+ ret = i915_gem_object_pin(obj, 4096, true);
+ if (ret)
+ goto err_unref;
+
+ pc->gtt_offset = obj->gtt_offset;
+ pc->cpu_page = (uint32_t *)kmem_alloc_nofault(kernel_map, PAGE_SIZE);
+ if (pc->cpu_page == NULL)
+ goto err_unpin;
+ pmap_qenter((uintptr_t)pc->cpu_page, &obj->pages[0], 1);
+ pmap_invalidate_range(kernel_pmap, (vm_offset_t)pc->cpu_page,
+ (vm_offset_t)pc->cpu_page + PAGE_SIZE);
+ pmap_invalidate_cache_range((vm_offset_t)pc->cpu_page,
+ (vm_offset_t)pc->cpu_page + PAGE_SIZE);
+
+ pc->obj = obj;
+ ring->private = pc;
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+err:
+ free(pc, DRM_I915_GEM);
+ return ret;
+}
+
+static void
+cleanup_pipe_control(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc = ring->private;
+ struct drm_i915_gem_object *obj;
+
+ if (!ring->private)
+ return;
+
+ obj = pc->obj;
+ pmap_qremove((vm_offset_t)pc->cpu_page, 1);
+ pmap_invalidate_range(kernel_pmap, (vm_offset_t)pc->cpu_page,
+ (vm_offset_t)pc->cpu_page + PAGE_SIZE);
+ kmem_free(kernel_map, (uintptr_t)pc->cpu_page, PAGE_SIZE);
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+
+ free(pc, DRM_I915_GEM);
+ ring->private = NULL;
+}
+
+static int init_render_ring(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = init_ring_common(ring);
+
+ if (INTEL_INFO(dev)->gen > 3) {
+ int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
+ I915_WRITE(MI_MODE, mode);
+ if (IS_GEN7(dev))
+ I915_WRITE(GFX_MODE_GEN7,
+ GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+ GFX_MODE_ENABLE(GFX_REPLAY_MODE));
+ }
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ ret = init_pipe_control(ring);
+ if (ret)
+ return ret;
+ }
+
+
+ if (IS_GEN6(dev)) {
+ /* From the Sandybridge PRM, volume 1 part 3, page 24:
+ * "If this bit is set, STCunit will have LRA as replacement
+ * policy. [...] This bit must be reset. LRA replacement
+ * policy is not supported."
+ */
+ I915_WRITE(CACHE_MODE_0,
+ CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
+ }
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ I915_WRITE(INSTPM,
+ INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
+ }
+
+ return ret;
+}
+
+static void render_ring_cleanup(struct intel_ring_buffer *ring)
+{
+ if (!ring->private)
+ return;
+
+ cleanup_pipe_control(ring);
+}
+
+static void
+update_mboxes(struct intel_ring_buffer *ring,
+ u32 seqno,
+ u32 mmio_offset)
+{
+ intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_REGISTER |
+ MI_SEMAPHORE_UPDATE);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, mmio_offset);
+}
+
+/**
+ * gen6_add_request - Update the semaphore mailbox registers
+ *
+ * @ring - ring that is adding a request
+ * @seqno - return seqno stuck into the ring
+ *
+ * Update the mailbox registers in the *other* rings with the current seqno.
+ * This acts like a signal in the canonical semaphore.
+ */
+static int
+gen6_add_request(struct intel_ring_buffer *ring,
+ u32 *seqno)
+{
+ u32 mbox1_reg;
+ u32 mbox2_reg;
+ int ret;
+
+ ret = intel_ring_begin(ring, 10);
+ if (ret)
+ return ret;
+
+ mbox1_reg = ring->signal_mbox[0];
+ mbox2_reg = ring->signal_mbox[1];
+
+ *seqno = i915_gem_next_request_seqno(ring);
+
+ update_mboxes(ring, *seqno, mbox1_reg);
+ update_mboxes(ring, *seqno, mbox2_reg);
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, *seqno);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+/**
+ * intel_ring_sync - sync the waiter to the signaller on seqno
+ *
+ * @waiter - ring that is waiting
+ * @signaller - ring which has, or will signal
+ * @seqno - seqno which the waiter will block on
+ */
+static int
+intel_ring_sync(struct intel_ring_buffer *waiter,
+ struct intel_ring_buffer *signaller,
+ int ring,
+ u32 seqno)
+{
+ int ret;
+ u32 dw1 = MI_SEMAPHORE_MBOX |
+ MI_SEMAPHORE_COMPARE |
+ MI_SEMAPHORE_REGISTER;
+
+ ret = intel_ring_begin(waiter, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
+ intel_ring_emit(waiter, seqno);
+ intel_ring_emit(waiter, 0);
+ intel_ring_emit(waiter, MI_NOOP);
+ intel_ring_advance(waiter);
+
+ return 0;
+}
+
+int render_ring_sync_to(struct intel_ring_buffer *waiter,
+ struct intel_ring_buffer *signaller, u32 seqno);
+int gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
+ struct intel_ring_buffer *signaller, u32 seqno);
+int gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
+ struct intel_ring_buffer *signaller, u32 seqno);
+
+/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
+int
+render_ring_sync_to(struct intel_ring_buffer *waiter,
+ struct intel_ring_buffer *signaller,
+ u32 seqno)
+{
+ KASSERT(signaller->semaphore_register[RCS] != MI_SEMAPHORE_SYNC_INVALID,
+ ("valid RCS semaphore"));
+ return intel_ring_sync(waiter,
+ signaller,
+ RCS,
+ seqno);
+}
+
+/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
+int
+gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
+ struct intel_ring_buffer *signaller,
+ u32 seqno)
+{
+ KASSERT(signaller->semaphore_register[VCS] != MI_SEMAPHORE_SYNC_INVALID,
+ ("Valid VCS semaphore"));
+ return intel_ring_sync(waiter,
+ signaller,
+ VCS,
+ seqno);
+}
+
+/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
+int
+gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
+ struct intel_ring_buffer *signaller,
+ u32 seqno)
+{
+ KASSERT(signaller->semaphore_register[BCS] != MI_SEMAPHORE_SYNC_INVALID,
+ ("Valid BCS semaphore"));
+ return intel_ring_sync(waiter,
+ signaller,
+ BCS,
+ seqno);
+}
+
+#define PIPE_CONTROL_FLUSH(ring__, addr__) \
+do { \
+ intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
+ PIPE_CONTROL_DEPTH_STALL); \
+ intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
+ intel_ring_emit(ring__, 0); \
+ intel_ring_emit(ring__, 0); \
+} while (0)
+
+static int
+pc_render_add_request(struct intel_ring_buffer *ring,
+ uint32_t *result)
+{
+ u32 seqno = i915_gem_next_request_seqno(ring);
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+ /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
+ * incoherent with writes to memory, i.e. completely fubar,
+ * so we need to use PIPE_NOTIFY instead.
+ *
+ * However, we also need to workaround the qword write
+ * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
+ * memory before requesting an interrupt.
+ */
+ ret = intel_ring_begin(ring, 32);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+ intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, 0);
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128; /* write to separate cachelines */
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+ PIPE_CONTROL_NOTIFY);
+ intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ *result = seqno;
+ return 0;
+}
+
+static int
+render_ring_add_request(struct intel_ring_buffer *ring,
+ uint32_t *result)
+{
+ u32 seqno = i915_gem_next_request_seqno(ring);
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
+
+ *result = seqno;
+ return 0;
+}
+
+ static u32
+gen6_ring_get_seqno(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+
+ /* Workaround to force correct ordering between irq and seqno writes on
+ * ivb (and maybe also on snb) by reading from a CS register (like
+ * ACTHD) before reading the status page. */
+ if (/* IS_GEN6(dev) || */IS_GEN7(dev))
+ intel_ring_get_active_head(ring);
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static uint32_t
+ring_get_seqno(struct intel_ring_buffer *ring)
+{
+ if (ring->status_page.page_addr == NULL)
+ return (-1);
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static uint32_t
+pc_render_get_seqno(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc = ring->private;
+ if (pc != NULL)
+ return pc->cpu_page[0];
+ else
+ return (-1);
+}
+
+static void
+ironlake_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
+{
+ dev_priv->gt_irq_mask &= ~mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+}
+
+static void
+ironlake_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
+{
+ dev_priv->gt_irq_mask |= mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+}
+
+static void
+i915_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
+{
+ dev_priv->irq_mask &= ~mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
+}
+
+static void
+i915_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
+{
+ dev_priv->irq_mask |= mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
+}
+
+static bool
+render_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ mtx_assert(&ring->irq_lock, MA_OWNED);
+ if (ring->irq_refcount++ == 0) {
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_enable_irq(dev_priv,
+ GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
+ else
+ i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+ }
+
+ return true;
+}
+
+static void
+render_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ mtx_assert(&ring->irq_lock, MA_OWNED);
+ if (--ring->irq_refcount == 0) {
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_disable_irq(dev_priv,
+ GT_USER_INTERRUPT |
+ GT_PIPE_NOTIFY);
+ else
+ i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+ }
+}
+
+void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t mmio = 0;
+
+ /* The ring status page addresses are no longer next to the rest of
+ * the ring registers as of gen7.
+ */
+ if (IS_GEN7(dev)) {
+ switch (ring->id) {
+ case RCS:
+ mmio = RENDER_HWS_PGA_GEN7;
+ break;
+ case BCS:
+ mmio = BLT_HWS_PGA_GEN7;
+ break;
+ case VCS:
+ mmio = BSD_HWS_PGA_GEN7;
+ break;
+ }
+ } else if (IS_GEN6(dev)) {
+ mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+ } else {
+ mmio = RING_HWS_PGA(ring->mmio_base);
+ }
+
+ I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+ POSTING_READ(mmio);
+}
+
+static int
+bsd_ring_flush(struct intel_ring_buffer *ring,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ return 0;
+}
+
+static int
+ring_add_request(struct intel_ring_buffer *ring,
+ uint32_t *result)
+{
+ uint32_t seqno;
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ seqno = i915_gem_next_request_seqno(ring);
+
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
+
+ *result = seqno;
+ return 0;
+}
+
+static bool
+gen6_ring_get_irq(struct intel_ring_buffer *ring, uint32_t gflag, uint32_t rflag)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ gen6_gt_force_wake_get(dev_priv);
+
+ mtx_assert(&ring->irq_lock, MA_OWNED);
+ if (ring->irq_refcount++ == 0) {
+ ring->irq_mask &= ~rflag;
+ I915_WRITE_IMR(ring, ring->irq_mask);
+ ironlake_enable_irq(dev_priv, gflag);
+ }
+
+ return true;
+}
+
+static void
+gen6_ring_put_irq(struct intel_ring_buffer *ring, uint32_t gflag, uint32_t rflag)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ mtx_assert(&ring->irq_lock, MA_OWNED);
+ if (--ring->irq_refcount == 0) {
+ ring->irq_mask |= rflag;
+ I915_WRITE_IMR(ring, ring->irq_mask);
+ ironlake_disable_irq(dev_priv, gflag);
+ }
+
+ gen6_gt_force_wake_put(dev_priv);
+}
+
+static bool
+bsd_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ mtx_assert(&ring->irq_lock, MA_OWNED);
+ if (ring->irq_refcount++ == 0) {
+ if (IS_G4X(dev))
+ i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
+ else
+ ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
+ }
+
+ return true;
+}
+static void
+bsd_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ mtx_assert(&ring->irq_lock, MA_OWNED);
+ if (--ring->irq_refcount == 0) {
+ if (IS_G4X(dev))
+ i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
+ else
+ ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
+ }
+}
+
+static int
+ring_dispatch_execbuffer(struct intel_ring_buffer *ring, uint32_t offset,
+ uint32_t length)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | (2 << 6) |
+ MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ uint32_t offset, uint32_t len)
+{
+ struct drm_device *dev = ring->dev;
+ int ret;
+
+ if (IS_I830(dev) || IS_845G(dev)) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, 0);
+ } else {
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | (2 << 6) |
+ MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring, offset);
+ } else {
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | (2 << 6));
+ intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+ }
+ }
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static void cleanup_status_page(struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_object *obj;
+
+ obj = ring->status_page.obj;
+ if (obj == NULL)
+ return;
+
+ pmap_qremove((vm_offset_t)ring->status_page.page_addr, 1);
+ pmap_invalidate_range(kernel_pmap,
+ (vm_offset_t)ring->status_page.page_addr,
+ (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE);
+ kmem_free(kernel_map, (vm_offset_t)ring->status_page.page_addr,
+ PAGE_SIZE);
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+ ring->status_page.obj = NULL;
+
+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+}
+
+static int init_status_page(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ obj = i915_gem_alloc_object(dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate status page\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+
+ ret = i915_gem_object_pin(obj, 4096, true);
+ if (ret != 0) {
+ goto err_unref;
+ }
+
+ ring->status_page.gfx_addr = obj->gtt_offset;
+ ring->status_page.page_addr = (void *)kmem_alloc_nofault(kernel_map,
+ PAGE_SIZE);
+ if (ring->status_page.page_addr == NULL) {
+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+ goto err_unpin;
+ }
+ pmap_qenter((vm_offset_t)ring->status_page.page_addr, &obj->pages[0],
+ 1);
+ pmap_invalidate_range(kernel_pmap,
+ (vm_offset_t)ring->status_page.page_addr,
+ (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE);
+ pmap_invalidate_cache_range((vm_offset_t)ring->status_page.page_addr,
+ (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE);
+ ring->status_page.obj = obj;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+ intel_ring_setup_status_page(ring);
+ DRM_DEBUG("i915: init_status_page %s hws offset: 0x%08x\n",
+ ring->name, ring->status_page.gfx_addr);
+
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+err:
+ return ret;
+}
+
+static
+int intel_init_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ ring->dev = dev;
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ INIT_LIST_HEAD(&ring->gpu_write_list);
+
+ mtx_init(&ring->irq_lock, "ringb", NULL, MTX_DEF);
+ ring->irq_mask = ~0;
+
+ if (I915_NEED_GFX_HWS(dev)) {
+ ret = init_status_page(ring);
+ if (ret)
+ return ret;
+ }
+
+ obj = i915_gem_alloc_object(dev, ring->size);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate ringbuffer\n");
+ ret = -ENOMEM;
+ goto err_hws;
+ }
+
+ ring->obj = obj;
+
+ ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
+ if (ret)
+ goto err_unref;
+
+ ring->map.size = ring->size;
+ ring->map.offset = dev->agp->base + obj->gtt_offset;
+ ring->map.type = 0;
+ ring->map.flags = 0;
+ ring->map.mtrr = 0;
+
+ drm_core_ioremap_wc(&ring->map, dev);
+ if (ring->map.virtual == NULL) {
+ DRM_ERROR("Failed to map ringbuffer.\n");
+ ret = -EINVAL;
+ goto err_unpin;
+ }
+
+ ring->virtual_start = ring->map.virtual;
+ ret = ring->init(ring);
+ if (ret)
+ goto err_unmap;
+
+ /* Workaround an erratum on the i830 which causes a hang if
+ * the TAIL pointer points to within the last 2 cachelines
+ * of the buffer.
+ */
+ ring->effective_size = ring->size;
+ if (IS_I830(ring->dev) || IS_845G(ring->dev))
+ ring->effective_size -= 128;
+
+ return 0;
+
+err_unmap:
+ drm_core_ioremapfree(&ring->map, dev);
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+ ring->obj = NULL;
+err_hws:
+ cleanup_status_page(ring);
+ return ret;
+}
+
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv;
+ int ret;
+
+ if (ring->obj == NULL)
+ return;
+
+ /* Disable the ring buffer. The ring must be idle at this point */
+ dev_priv = ring->dev->dev_private;
+ ret = intel_wait_ring_idle(ring);
+ I915_WRITE_CTL(ring, 0);
+
+ drm_core_ioremapfree(&ring->map, ring->dev);
+
+ i915_gem_object_unpin(ring->obj);
+ drm_gem_object_unreference(&ring->obj->base);
+ ring->obj = NULL;
+
+ if (ring->cleanup)
+ ring->cleanup(ring);
+
+ cleanup_status_page(ring);
+}
+
+static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
+{
+ unsigned int *virt;
+ int rem = ring->size - ring->tail;
+
+ if (ring->space < rem) {
+ int ret = intel_wait_ring_buffer(ring, rem);
+ if (ret)
+ return ret;
+ }
+
+ virt = (unsigned int *)((char *)ring->virtual_start + ring->tail);
+ rem /= 8;
+ while (rem--) {
+ *virt++ = MI_NOOP;
+ *virt++ = MI_NOOP;
+ }
+
+ ring->tail = 0;
+ ring->space = ring_space(ring);
+
+ return 0;
+}
+
+static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ bool was_interruptible;
+ int ret;
+
+ /* XXX As we have not yet audited all the paths to check that
+ * they are ready for ERESTARTSYS from intel_ring_begin, do not
+ * allow us to be interruptible by a signal.
+ */
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ ret = i915_wait_request(ring, seqno, true);
+
+ dev_priv->mm.interruptible = was_interruptible;
+
+ return ret;
+}
+
+static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
+{
+ struct drm_i915_gem_request *request;
+ u32 seqno = 0;
+ int ret;
+
+ i915_gem_retire_requests_ring(ring);
+
+ if (ring->last_retired_head != -1) {
+ ring->head = ring->last_retired_head;
+ ring->last_retired_head = -1;
+ ring->space = ring_space(ring);
+ if (ring->space >= n)
+ return 0;
+ }
+
+ list_for_each_entry(request, &ring->request_list, list) {
+ int space;
+
+ if (request->tail == -1)
+ continue;
+
+ space = request->tail - (ring->tail + 8);
+ if (space < 0)
+ space += ring->size;
+ if (space >= n) {
+ seqno = request->seqno;
+ break;
+ }
+
+ /* Consume this request in case we need more space than
+ * is available and so need to prevent a race between
+ * updating last_retired_head and direct reads of
+ * I915_RING_HEAD. It also provides a nice sanity check.
+ */
+ request->tail = -1;
+ }
+
+ if (seqno == 0)
+ return -ENOSPC;
+
+ ret = intel_ring_wait_seqno(ring, seqno);
+ if (ret)
+ return ret;
+
+ if (ring->last_retired_head == -1)
+ return -ENOSPC;
+
+ ring->head = ring->last_retired_head;
+ ring->last_retired_head = -1;
+ ring->space = ring_space(ring);
+ if (ring->space < n)
+ return -ENOSPC;
+
+ return 0;
+}
+
+int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int end;
+ int ret;
+
+ ret = intel_ring_wait_request(ring, n);
+ if (ret != -ENOSPC)
+ return ret;
+
+ CTR1(KTR_DRM, "ring_wait_begin %s", ring->name);
+ if (drm_core_check_feature(dev, DRIVER_GEM))
+ /* With GEM the hangcheck timer should kick us out of the loop,
+ * leaving it early runs the risk of corrupting GEM state (due
+ * to running on almost untested codepaths). But on resume
+ * timers don't work yet, so prevent a complete hang in that
+ * case by choosing an insanely large timeout. */
+ end = ticks + hz * 60;
+ else
+ end = ticks + hz * 3;
+ do {
+ ring->head = I915_READ_HEAD(ring);
+ ring->space = ring_space(ring);
+ if (ring->space >= n) {
+ CTR1(KTR_DRM, "ring_wait_end %s", ring->name);
+ return 0;
+ }
+
+#if 0
+ if (dev->primary->master) {
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+ }
+#else
+ if (dev_priv->sarea_priv)
+ dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+#endif
+
+ pause("915rng", 1);
+ if (atomic_load_acq_32(&dev_priv->mm.wedged) != 0) {
+ CTR1(KTR_DRM, "ring_wait_end %s wedged", ring->name);
+ return -EAGAIN;
+ }
+ } while (!time_after(ticks, end));
+ CTR1(KTR_DRM, "ring_wait_end %s busy", ring->name);
+ return -EBUSY;
+}
+
+int intel_ring_begin(struct intel_ring_buffer *ring,
+ int num_dwords)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ int n = 4*num_dwords;
+ int ret;
+
+ if (atomic_load_acq_int(&dev_priv->mm.wedged))
+ return -EIO;
+
+ if (ring->tail + n > ring->effective_size) {
+ ret = intel_wrap_ring_buffer(ring);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (ring->space < n) {
+ ret = intel_wait_ring_buffer(ring, n);
+ if (ret != 0)
+ return ret;
+ }
+
+ ring->space -= n;
+ return 0;
+}
+
+void intel_ring_advance(struct intel_ring_buffer *ring)
+{
+ ring->tail &= ring->size - 1;
+ ring->write_tail(ring, ring->tail);
+}
+
+static const struct intel_ring_buffer render_ring = {
+ .name = "render ring",
+ .id = RCS,
+ .mmio_base = RENDER_RING_BASE,
+ .size = 32 * PAGE_SIZE,
+ .init = init_render_ring,
+ .write_tail = ring_write_tail,
+ .flush = render_ring_flush,
+ .add_request = render_ring_add_request,
+ .get_seqno = ring_get_seqno,
+ .irq_get = render_ring_get_irq,
+ .irq_put = render_ring_put_irq,
+ .dispatch_execbuffer = render_ring_dispatch_execbuffer,
+ .cleanup = render_ring_cleanup,
+ .sync_to = render_ring_sync_to,
+ .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID,
+ MI_SEMAPHORE_SYNC_RV,
+ MI_SEMAPHORE_SYNC_RB},
+ .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC},
+};
+
+/* ring buffer for bit-stream decoder */
+
+static const struct intel_ring_buffer bsd_ring = {
+ .name = "bsd ring",
+ .id = VCS,
+ .mmio_base = BSD_RING_BASE,
+ .size = 32 * PAGE_SIZE,
+ .init = init_ring_common,
+ .write_tail = ring_write_tail,
+ .flush = bsd_ring_flush,
+ .add_request = ring_add_request,
+ .get_seqno = ring_get_seqno,
+ .irq_get = bsd_ring_get_irq,
+ .irq_put = bsd_ring_put_irq,
+ .dispatch_execbuffer = ring_dispatch_execbuffer,
+};
+
+
+static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
+ uint32_t value)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+
+ /* Every tail move must follow the sequence below */
+ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
+ I915_WRITE(GEN6_BSD_RNCID, 0x0);
+
+ if (_intel_wait_for(ring->dev,
+ (I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
+ GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, 50,
+ true, "915g6i") != 0)
+ DRM_ERROR("timed out waiting for IDLE Indicator\n");
+
+ I915_WRITE_TAIL(ring, value);
+ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
+}
+
+static int gen6_ring_flush(struct intel_ring_buffer *ring,
+ uint32_t invalidate, uint32_t flush)
+{
+ uint32_t cmd;
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ cmd = MI_FLUSH_DW;
+ if (invalidate & I915_GEM_GPU_DOMAINS)
+ cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ return 0;
+}
+
+static int
+gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ uint32_t offset, uint32_t len)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+ /* bit0-7 is the length on GEN6+ */
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static bool
+gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ return gen6_ring_get_irq(ring,
+ GT_USER_INTERRUPT,
+ GEN6_RENDER_USER_INTERRUPT);
+}
+
+static void
+gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ return gen6_ring_put_irq(ring,
+ GT_USER_INTERRUPT,
+ GEN6_RENDER_USER_INTERRUPT);
+}
+
+static bool
+gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ return gen6_ring_get_irq(ring,
+ GT_GEN6_BSD_USER_INTERRUPT,
+ GEN6_BSD_USER_INTERRUPT);
+}
+
+static void
+gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ return gen6_ring_put_irq(ring,
+ GT_GEN6_BSD_USER_INTERRUPT,
+ GEN6_BSD_USER_INTERRUPT);
+}
+
+/* ring buffer for Video Codec for Gen6+ */
+static const struct intel_ring_buffer gen6_bsd_ring = {
+ .name = "gen6 bsd ring",
+ .id = VCS,
+ .mmio_base = GEN6_BSD_RING_BASE,
+ .size = 32 * PAGE_SIZE,
+ .init = init_ring_common,
+ .write_tail = gen6_bsd_ring_write_tail,
+ .flush = gen6_ring_flush,
+ .add_request = gen6_add_request,
+ .get_seqno = gen6_ring_get_seqno,
+ .irq_get = gen6_bsd_ring_get_irq,
+ .irq_put = gen6_bsd_ring_put_irq,
+ .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
+ .sync_to = gen6_bsd_ring_sync_to,
+ .semaphore_register = {MI_SEMAPHORE_SYNC_VR,
+ MI_SEMAPHORE_SYNC_INVALID,
+ MI_SEMAPHORE_SYNC_VB},
+ .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC},
+};
+
+/* Blitter support (SandyBridge+) */
+
+static bool
+blt_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ return gen6_ring_get_irq(ring,
+ GT_BLT_USER_INTERRUPT,
+ GEN6_BLITTER_USER_INTERRUPT);
+}
+
+static void
+blt_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ gen6_ring_put_irq(ring,
+ GT_BLT_USER_INTERRUPT,
+ GEN6_BLITTER_USER_INTERRUPT);
+}
+
+static int blt_ring_flush(struct intel_ring_buffer *ring,
+ uint32_t invalidate, uint32_t flush)
+{
+ uint32_t cmd;
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ cmd = MI_FLUSH_DW;
+ if (invalidate & I915_GEM_DOMAIN_RENDER)
+ cmd |= MI_INVALIDATE_TLB;
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ return 0;
+}
+
+static const struct intel_ring_buffer gen6_blt_ring = {
+ .name = "blt ring",
+ .id = BCS,
+ .mmio_base = BLT_RING_BASE,
+ .size = 32 * PAGE_SIZE,
+ .init = init_ring_common,
+ .write_tail = ring_write_tail,
+ .flush = blt_ring_flush,
+ .add_request = gen6_add_request,
+ .get_seqno = gen6_ring_get_seqno,
+ .irq_get = blt_ring_get_irq,
+ .irq_put = blt_ring_put_irq,
+ .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
+ .sync_to = gen6_blt_ring_sync_to,
+ .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
+ MI_SEMAPHORE_SYNC_BV,
+ MI_SEMAPHORE_SYNC_INVALID},
+ .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC},
+};
+
+int intel_init_render_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+
+ *ring = render_ring;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ring->add_request = gen6_add_request;
+ ring->flush = gen6_render_ring_flush;
+ ring->irq_get = gen6_render_ring_get_irq;
+ ring->irq_put = gen6_render_ring_put_irq;
+ ring->get_seqno = gen6_ring_get_seqno;
+ } else if (IS_GEN5(dev)) {
+ ring->add_request = pc_render_add_request;
+ ring->get_seqno = pc_render_get_seqno;
+ }
+
+ if (!I915_NEED_GFX_HWS(dev)) {
+ ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ }
+
+ return intel_init_ring_buffer(dev, ring);
+}
+
+int intel_render_ring_init_dri(struct drm_device *dev, uint64_t start,
+ uint32_t size)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+
+ *ring = render_ring;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ring->add_request = gen6_add_request;
+ ring->irq_get = gen6_render_ring_get_irq;
+ ring->irq_put = gen6_render_ring_put_irq;
+ } else if (IS_GEN5(dev)) {
+ ring->add_request = pc_render_add_request;
+ ring->get_seqno = pc_render_get_seqno;
+ }
+
+ ring->dev = dev;
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ INIT_LIST_HEAD(&ring->gpu_write_list);
+
+ ring->size = size;
+ ring->effective_size = ring->size;
+ if (IS_I830(ring->dev))
+ ring->effective_size -= 128;
+
+ ring->map.offset = start;
+ ring->map.size = size;
+ ring->map.type = 0;
+ ring->map.flags = 0;
+ ring->map.mtrr = 0;
+
+ drm_core_ioremap_wc(&ring->map, dev);
+ if (ring->map.virtual == NULL) {
+ DRM_ERROR("can not ioremap virtual address for"
+ " ring buffer\n");
+ return -ENOMEM;
+ }
+
+ ring->virtual_start = (void *)ring->map.virtual;
+ return 0;
+}
+
+int intel_init_bsd_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->rings[VCS];
+
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ *ring = gen6_bsd_ring;
+ else
+ *ring = bsd_ring;
+
+ return intel_init_ring_buffer(dev, ring);
+}
+
+int intel_init_blt_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->rings[BCS];
+
+ *ring = gen6_blt_ring;
+
+ return intel_init_ring_buffer(dev, ring);
+}
diff --git a/sys/dev/drm2/i915/intel_ringbuffer.h b/sys/dev/drm2/i915/intel_ringbuffer.h
new file mode 100644
index 0000000..c20777f
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_ringbuffer.h
@@ -0,0 +1,203 @@
+/*
+ * $FreeBSD$
+ */
+
+#ifndef _INTEL_RINGBUFFER_H_
+#define _INTEL_RINGBUFFER_H_
+
+struct intel_hw_status_page {
+ uint32_t *page_addr;
+ unsigned int gfx_addr;
+ struct drm_i915_gem_object *obj;
+};
+
+#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
+#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
+
+#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
+#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
+
+#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
+#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
+
+#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
+#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+
+#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
+#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
+
+#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
+#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
+#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
+
+struct intel_ring_buffer {
+ const char *name;
+ enum intel_ring_id {
+ RCS = 0x0,
+ VCS,
+ BCS,
+ } id;
+#define I915_NUM_RINGS 3
+ uint32_t mmio_base;
+ void *virtual_start;
+ struct drm_device *dev;
+ struct drm_i915_gem_object *obj;
+
+ uint32_t head;
+ uint32_t tail;
+ int space;
+ int size;
+ int effective_size;
+ struct intel_hw_status_page status_page;
+
+ /** We track the position of the requests in the ring buffer, and
+ * when each is retired we increment last_retired_head as the GPU
+ * must have finished processing the request and so we know we
+ * can advance the ringbuffer up to that position.
+ *
+ * last_retired_head is set to -1 after the value is consumed so
+ * we can detect new retirements.
+ */
+ u32 last_retired_head;
+
+ struct mtx irq_lock;
+ uint32_t irq_refcount;
+ uint32_t irq_mask;
+ uint32_t irq_seqno; /* last seq seem at irq time */
+ uint32_t trace_irq_seqno;
+ uint32_t waiting_seqno;
+ uint32_t sync_seqno[I915_NUM_RINGS-1];
+ bool (*irq_get)(struct intel_ring_buffer *ring);
+ void (*irq_put)(struct intel_ring_buffer *ring);
+
+ int (*init)(struct intel_ring_buffer *ring);
+
+ void (*write_tail)(struct intel_ring_buffer *ring,
+ uint32_t value);
+ int (*flush)(struct intel_ring_buffer *ring,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains);
+ int (*add_request)(struct intel_ring_buffer *ring,
+ uint32_t *seqno);
+ uint32_t (*get_seqno)(struct intel_ring_buffer *ring);
+ int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
+ uint32_t offset, uint32_t length);
+ void (*cleanup)(struct intel_ring_buffer *ring);
+ int (*sync_to)(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *to,
+ u32 seqno);
+
+ u32 semaphore_register[3]; /*our mbox written by others */
+ u32 signal_mbox[2]; /* mboxes this ring signals to */
+
+ /**
+ * List of objects currently involved in rendering from the
+ * ringbuffer.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head request_list;
+
+ /**
+ * List of objects currently pending a GPU write flush.
+ *
+ * All elements on this list will belong to either the
+ * active_list or flushing_list, last_rendering_seqno can
+ * be used to differentiate between the two elements.
+ */
+ struct list_head gpu_write_list;
+
+ /**
+ * Do we have some not yet emitted requests outstanding?
+ */
+ uint32_t outstanding_lazy_request;
+
+ drm_local_map_t map;
+
+ void *private;
+};
+
+static inline unsigned
+intel_ring_flag(struct intel_ring_buffer *ring)
+{
+ return 1 << ring->id;
+}
+
+static inline uint32_t
+intel_ring_sync_index(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *other)
+{
+ int idx;
+
+ /*
+ * cs -> 0 = vcs, 1 = bcs
+ * vcs -> 0 = bcs, 1 = cs,
+ * bcs -> 0 = cs, 1 = vcs.
+ */
+
+ idx = (other - ring) - 1;
+ if (idx < 0)
+ idx += I915_NUM_RINGS;
+
+ return idx;
+}
+
+static inline uint32_t
+intel_read_status_page(struct intel_ring_buffer *ring, int reg)
+{
+
+ return (atomic_load_acq_32(ring->status_page.page_addr + reg));
+}
+
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
+
+int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
+static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
+{
+
+ return (intel_wait_ring_buffer(ring, ring->size - 8));
+}
+
+int intel_ring_begin(struct intel_ring_buffer *ring, int n);
+
+static inline void intel_ring_emit(struct intel_ring_buffer *ring,
+ uint32_t data)
+{
+ *(volatile uint32_t *)((char *)ring->virtual_start +
+ ring->tail) = data;
+ ring->tail += 4;
+}
+
+void intel_ring_advance(struct intel_ring_buffer *ring);
+
+uint32_t intel_ring_get_seqno(struct intel_ring_buffer *ring);
+
+int intel_init_render_ring_buffer(struct drm_device *dev);
+int intel_init_bsd_ring_buffer(struct drm_device *dev);
+int intel_init_blt_ring_buffer(struct drm_device *dev);
+
+u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
+void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
+
+static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
+{
+ return ring->tail;
+}
+
+void i915_trace_irq_get(struct intel_ring_buffer *ring, uint32_t seqno);
+
+/* DRI warts */
+int intel_render_ring_init_dri(struct drm_device *dev, uint64_t start,
+ uint32_t size);
+
+#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/sys/dev/drm2/i915/intel_sdvo.c b/sys/dev/drm2/i915/intel_sdvo.c
new file mode 100644
index 0000000..0800d38
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_sdvo.c
@@ -0,0 +1,2680 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2007 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/drm_crtc.h>
+#include <dev/drm2/drm_edid.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/intel_sdvo_regs.h>
+#include <dev/drm2/i915/intel_drv.h>
+#include <dev/iicbus/iic.h>
+#include <dev/iicbus/iiconf.h>
+#include "iicbus_if.h"
+
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+ SDVO_TV_MASK)
+
+#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
+#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
+#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
+#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
+
+
+static const char *tv_format_names[] = {
+ "NTSC_M" , "NTSC_J" , "NTSC_443",
+ "PAL_B" , "PAL_D" , "PAL_G" ,
+ "PAL_H" , "PAL_I" , "PAL_M" ,
+ "PAL_N" , "PAL_NC" , "PAL_60" ,
+ "SECAM_B" , "SECAM_D" , "SECAM_G" ,
+ "SECAM_K" , "SECAM_K1", "SECAM_L" ,
+ "SECAM_60"
+};
+
+#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
+
+struct intel_sdvo {
+ struct intel_encoder base;
+
+ device_t i2c;
+ u8 slave_addr;
+
+ device_t ddc_iic_bus, ddc;
+
+ /* Register for the SDVO device: SDVOB or SDVOC */
+ int sdvo_reg;
+
+ /* Active outputs controlled by this SDVO output */
+ uint16_t controlled_output;
+
+ /*
+ * Capabilities of the SDVO device returned by
+ * i830_sdvo_get_capabilities()
+ */
+ struct intel_sdvo_caps caps;
+
+ /* Pixel clock limitations reported by the SDVO device, in kHz */
+ int pixel_clock_min, pixel_clock_max;
+
+ /*
+ * For multiple function SDVO device,
+ * this is for current attached outputs.
+ */
+ uint16_t attached_output;
+
+ /*
+ * Hotplug activation bits for this device
+ */
+ uint8_t hotplug_active[2];
+
+ /**
+ * This is used to select the color range of RBG outputs in HDMI mode.
+ * It is only valid when using TMDS encoding and 8 bit per color mode.
+ */
+ uint32_t color_range;
+
+ /**
+ * This is set if we're going to treat the device as TV-out.
+ *
+ * While we have these nice friendly flags for output types that ought
+ * to decide this for us, the S-Video output on our HDMI+S-Video card
+ * shows up as RGB1 (VGA).
+ */
+ bool is_tv;
+
+ /* This is for current tv format name */
+ int tv_format_index;
+
+ /**
+ * This is set if we treat the device as HDMI, instead of DVI.
+ */
+ bool is_hdmi;
+ bool has_hdmi_monitor;
+ bool has_hdmi_audio;
+
+ /**
+ * This is set if we detect output of sdvo device as LVDS and
+ * have a valid fixed mode to use with the panel.
+ */
+ bool is_lvds;
+
+ /**
+ * This is sdvo fixed pannel mode pointer
+ */
+ struct drm_display_mode *sdvo_lvds_fixed_mode;
+
+ /* DDC bus used by this SDVO encoder */
+ uint8_t ddc_bus;
+
+ /* Input timings for adjusted_mode */
+ struct intel_sdvo_dtd input_dtd;
+};
+
+struct intel_sdvo_connector {
+ struct intel_connector base;
+
+ /* Mark the type of connector */
+ uint16_t output_flag;
+
+ enum hdmi_force_audio force_audio;
+
+ /* This contains all current supported TV format */
+ u8 tv_format_supported[TV_FORMAT_NUM];
+ int format_supported_num;
+ struct drm_property *tv_format;
+
+ /* add the property for the SDVO-TV */
+ struct drm_property *left;
+ struct drm_property *right;
+ struct drm_property *top;
+ struct drm_property *bottom;
+ struct drm_property *hpos;
+ struct drm_property *vpos;
+ struct drm_property *contrast;
+ struct drm_property *saturation;
+ struct drm_property *hue;
+ struct drm_property *sharpness;
+ struct drm_property *flicker_filter;
+ struct drm_property *flicker_filter_adaptive;
+ struct drm_property *flicker_filter_2d;
+ struct drm_property *tv_chroma_filter;
+ struct drm_property *tv_luma_filter;
+ struct drm_property *dot_crawl;
+
+ /* add the property for the SDVO-TV/LVDS */
+ struct drm_property *brightness;
+
+ /* Add variable to record current setting for the above property */
+ u32 left_margin, right_margin, top_margin, bottom_margin;
+
+ /* this is to get the range of margin.*/
+ u32 max_hscan, max_vscan;
+ u32 max_hpos, cur_hpos;
+ u32 max_vpos, cur_vpos;
+ u32 cur_brightness, max_brightness;
+ u32 cur_contrast, max_contrast;
+ u32 cur_saturation, max_saturation;
+ u32 cur_hue, max_hue;
+ u32 cur_sharpness, max_sharpness;
+ u32 cur_flicker_filter, max_flicker_filter;
+ u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive;
+ u32 cur_flicker_filter_2d, max_flicker_filter_2d;
+ u32 cur_tv_chroma_filter, max_tv_chroma_filter;
+ u32 cur_tv_luma_filter, max_tv_luma_filter;
+ u32 cur_dot_crawl, max_dot_crawl;
+};
+
+static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_sdvo, base.base);
+}
+
+static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_sdvo, base);
+}
+
+static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
+{
+ return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base);
+}
+
+static bool
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags);
+static bool
+intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ int type);
+static bool
+intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector);
+
+/**
+ * Writes the SDVOB or SDVOC with the given value, but always writes both
+ * SDVOB and SDVOC to work around apparent hardware issues (according to
+ * comments in the BIOS).
+ */
+static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 bval = val, cval = val;
+ int i;
+
+ if (intel_sdvo->sdvo_reg == PCH_SDVOB) {
+ I915_WRITE(intel_sdvo->sdvo_reg, val);
+ I915_READ(intel_sdvo->sdvo_reg);
+ return;
+ }
+
+ if (intel_sdvo->sdvo_reg == SDVOB) {
+ cval = I915_READ(SDVOC);
+ } else {
+ bval = I915_READ(SDVOB);
+ }
+ /*
+ * Write the registers twice for luck. Sometimes,
+ * writing them only once doesn't appear to 'stick'.
+ * The BIOS does this too. Yay, magic
+ */
+ for (i = 0; i < 2; i++)
+ {
+ I915_WRITE(SDVOB, bval);
+ I915_READ(SDVOB);
+ I915_WRITE(SDVOC, cval);
+ I915_READ(SDVOC);
+ }
+}
+
+static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
+{
+ struct iic_msg msgs[] = {
+ {
+ .slave = intel_sdvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &addr,
+ },
+ {
+ .slave = intel_sdvo->slave_addr,
+ .flags = IIC_M_RD,
+ .len = 1,
+ .buf = ch,
+ }
+ };
+ int ret;
+
+ if ((ret = iicbus_transfer(intel_sdvo->i2c, msgs, 2)) == 0)
+ return true;
+
+ DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
+ return false;
+}
+
+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+/** Mapping of command numbers to names, for debug output */
+static const struct _sdvo_cmd_name {
+ u8 cmd;
+ const char *name;
+} sdvo_cmd_names[] = {
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+
+ /* Add the op code for SDVO enhancements */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+
+ /* HDMI op code */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
+};
+
+#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
+#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
+
+static void
+intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
+ const void *args, int args_len)
+{
+ int i;
+
+ if ((drm_debug_flag & DRM_DEBUGBITS_KMS) == 0)
+ return;
+ DRM_DEBUG_KMS("%s: W: %02X ", SDVO_NAME(intel_sdvo), cmd);
+ for (i = 0; i < args_len; i++)
+ printf("%02X ", ((const u8 *)args)[i]);
+ for (; i < 8; i++)
+ printf(" ");
+ for (i = 0; i < DRM_ARRAY_SIZE(sdvo_cmd_names); i++) {
+ if (cmd == sdvo_cmd_names[i].cmd) {
+ printf("(%s)", sdvo_cmd_names[i].name);
+ break;
+ }
+ }
+ if (i == DRM_ARRAY_SIZE(sdvo_cmd_names))
+ printf("(%02X)", cmd);
+ printf("\n");
+}
+
+static const char *cmd_status_names[] = {
+ "Power on",
+ "Success",
+ "Not supported",
+ "Invalid arg",
+ "Pending",
+ "Target not specified",
+ "Scaling not supported"
+};
+
+static bool
+intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args,
+ int args_len)
+{
+ u8 buf[args_len*2 + 2], status;
+ struct iic_msg msgs[args_len + 3];
+ int i, ret;
+
+ intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
+
+ for (i = 0; i < args_len; i++) {
+ msgs[i].slave = intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2 *i;
+ buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
+ buf[2*i + 1] = ((const u8*)args)[i];
+ }
+ msgs[i].slave = intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2*i;
+ buf[2*i + 0] = SDVO_I2C_OPCODE;
+ buf[2*i + 1] = cmd;
+
+ /* the following two are to read the response */
+ status = SDVO_I2C_CMD_STATUS;
+ msgs[i+1].slave = intel_sdvo->slave_addr;
+ msgs[i+1].flags = 0;
+ msgs[i+1].len = 1;
+ msgs[i+1].buf = &status;
+
+ msgs[i+2].slave = intel_sdvo->slave_addr;
+ msgs[i+2].flags = IIC_M_RD;
+ msgs[i+2].len = 1;
+ msgs[i+2].buf = &status;
+
+ ret = iicbus_transfer(intel_sdvo->i2c, msgs, i+3);
+ if (ret != 0) {
+ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+ return (false);
+ }
+#if 0
+ if (ret != i+3) {
+ /* failure in I2C transfer */
+ DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
+ return false;
+ }
+#endif
+
+ return true;
+}
+
+static bool
+intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, void *response,
+ int response_len)
+{
+ u8 retry = 5;
+ u8 status;
+ int i;
+
+ DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
+
+ /*
+ * The documentation states that all commands will be
+ * processed within 15µs, and that we need only poll
+ * the status byte a maximum of 3 times in order for the
+ * command to be complete.
+ *
+ * Check 5 times in case the hardware failed to read the docs.
+ */
+ if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS, &status))
+ goto log_fail;
+
+ while (status == SDVO_CMD_STATUS_PENDING && retry--) {
+ DELAY(15);
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_CMD_STATUS, &status))
+ goto log_fail;
+ }
+
+ if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0) {
+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+ printf("(%s)", cmd_status_names[status]);
+ else
+ printf("(??? %d)", status);
+ }
+
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ goto log_fail;
+
+ /* Read the command response */
+ for (i = 0; i < response_len; i++) {
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_RETURN_0 + i,
+ &((u8 *)response)[i]))
+ goto log_fail;
+ if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0)
+ printf(" %02X", ((u8 *)response)[i]);
+ }
+ if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0)
+ printf("\n");
+ return (true);
+
+log_fail:
+ if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0)
+ printf("... failed\n");
+ return (false);
+}
+
+static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
+{
+ if (mode->clock >= 100000)
+ return 1;
+ else if (mode->clock >= 50000)
+ return 2;
+ else
+ return 4;
+}
+
+static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
+ u8 ddc_bus)
+{
+ /* This must be the immediately preceding write before the i2c xfer */
+ return intel_sdvo_write_cmd(intel_sdvo,
+ SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ &ddc_bus, 1);
+}
+
+static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
+{
+ if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
+ return false;
+
+ return intel_sdvo_read_response(intel_sdvo, NULL, 0);
+}
+
+static bool
+intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len)
+{
+ if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0))
+ return false;
+
+ return intel_sdvo_read_response(intel_sdvo, value, len);
+}
+
+static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo)
+{
+ struct intel_sdvo_set_target_input_args targets = {0};
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TARGET_INPUT,
+ &targets, sizeof(targets));
+}
+
+/**
+ * Return whether each input is trained.
+ *
+ * This function is making an assumption about the layout of the response,
+ * which should be checked against the docs.
+ */
+static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2)
+{
+ struct intel_sdvo_get_trained_inputs_response response;
+
+ CTASSERT(sizeof(response) == 1);
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
+ &response, sizeof(response)))
+ return false;
+
+ *input_1 = response.input0_trained;
+ *input_2 = response.input1_trained;
+ return true;
+}
+
+static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
+ u16 outputs)
+{
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_ACTIVE_OUTPUTS,
+ &outputs, sizeof(outputs));
+}
+
+static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
+ int mode)
+{
+ u8 state = SDVO_ENCODER_STATE_ON;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ state = SDVO_ENCODER_STATE_ON;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ state = SDVO_ENCODER_STATE_STANDBY;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ state = SDVO_ENCODER_STATE_SUSPEND;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ state = SDVO_ENCODER_STATE_OFF;
+ break;
+ }
+
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
+}
+
+static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo,
+ int *clock_min,
+ int *clock_max)
+{
+ struct intel_sdvo_pixel_clock_range clocks;
+
+ CTASSERT(sizeof(clocks) == 4);
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+ &clocks, sizeof(clocks)))
+ return false;
+
+ /* Convert the values from units of 10 kHz to kHz. */
+ *clock_min = clocks.min * 10;
+ *clock_max = clocks.max * 10;
+ return true;
+}
+
+static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo,
+ u16 outputs)
+{
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TARGET_OUTPUT,
+ &outputs, sizeof(outputs));
+}
+
+static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+ intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_set_timing(intel_sdvo,
+ SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_set_timing(intel_sdvo,
+ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
+}
+
+static bool
+intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
+ uint16_t clock,
+ uint16_t width,
+ uint16_t height)
+{
+ struct intel_sdvo_preferred_input_timing_args args;
+
+ memset(&args, 0, sizeof(args));
+ args.clock = clock;
+ args.width = width;
+ args.height = height;
+ args.interlace = 0;
+
+ if (intel_sdvo->is_lvds &&
+ (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
+ intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
+ args.scaled = 1;
+
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+ &args, sizeof(args));
+}
+
+static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_dtd *dtd)
+{
+ CTASSERT(sizeof(dtd->part1) == 8);
+ CTASSERT(sizeof(dtd->part2) == 8);
+ return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+ &dtd->part1, sizeof(dtd->part1)) &&
+ intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+ &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val)
+{
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+}
+
+static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
+ const struct drm_display_mode *mode)
+{
+ uint16_t width, height;
+ uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+ uint16_t h_sync_offset, v_sync_offset;
+ int mode_clock;
+
+ width = mode->crtc_hdisplay;
+ height = mode->crtc_vdisplay;
+
+ /* do some mode translations */
+ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
+ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+
+ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
+ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+
+ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
+ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+
+ mode_clock = mode->clock;
+ mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
+ mode_clock /= 10;
+ dtd->part1.clock = mode_clock;
+
+ dtd->part1.h_active = width & 0xff;
+ dtd->part1.h_blank = h_blank_len & 0xff;
+ dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
+ ((h_blank_len >> 8) & 0xf);
+ dtd->part1.v_active = height & 0xff;
+ dtd->part1.v_blank = v_blank_len & 0xff;
+ dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
+ ((v_blank_len >> 8) & 0xf);
+
+ dtd->part2.h_sync_off = h_sync_offset & 0xff;
+ dtd->part2.h_sync_width = h_sync_len & 0xff;
+ dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
+ (v_sync_len & 0xf);
+ dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
+ ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
+ ((v_sync_len & 0x30) >> 4);
+
+ dtd->part2.dtd_flags = 0x18;
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ dtd->part2.dtd_flags |= 0x2;
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ dtd->part2.dtd_flags |= 0x4;
+
+ dtd->part2.sdvo_flags = 0;
+ dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
+ dtd->part2.reserved = 0;
+}
+
+static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
+ const struct intel_sdvo_dtd *dtd)
+{
+ mode->hdisplay = dtd->part1.h_active;
+ mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
+ mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
+ mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
+ mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
+ mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
+ mode->htotal = mode->hdisplay + dtd->part1.h_blank;
+ mode->htotal += (dtd->part1.h_high & 0xf) << 8;
+
+ mode->vdisplay = dtd->part1.v_active;
+ mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
+ mode->vsync_start = mode->vdisplay;
+ mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
+ mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
+ mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
+ mode->vsync_end = mode->vsync_start +
+ (dtd->part2.v_sync_off_width & 0xf);
+ mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
+ mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
+ mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
+
+ mode->clock = dtd->part1.clock * 10;
+
+ mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ if (dtd->part2.dtd_flags & 0x2)
+ mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ if (dtd->part2.dtd_flags & 0x4)
+ mode->flags |= DRM_MODE_FLAG_PVSYNC;
+}
+
+static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
+{
+ struct intel_sdvo_encode encode;
+
+ CTASSERT(sizeof(encode) == 2);
+ return intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPP_ENCODE,
+ &encode, sizeof(encode));
+}
+
+static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
+ uint8_t mode)
+{
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
+}
+
+static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
+ uint8_t mode)
+{
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
+}
+
+#if 0
+static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
+{
+ int i, j;
+ uint8_t set_buf_index[2];
+ uint8_t av_split;
+ uint8_t buf_size;
+ uint8_t buf[48];
+ uint8_t *pos;
+
+ intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
+
+ for (i = 0; i <= av_split; i++) {
+ set_buf_index[0] = i; set_buf_index[1] = 0;
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2);
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
+ intel_sdvo_read_response(encoder, &buf_size, 1);
+
+ pos = buf;
+ for (j = 0; j <= buf_size; j += 8) {
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
+ NULL, 0);
+ intel_sdvo_read_response(encoder, pos, 8);
+ pos += 8;
+ }
+ }
+}
+#endif
+
+static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
+{
+ struct dip_infoframe avi_if = {
+ .type = DIP_TYPE_AVI,
+ .ver = DIP_VERSION_AVI,
+ .len = DIP_LEN_AVI,
+ };
+ uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
+ uint8_t set_buf_index[2] = { 1, 0 };
+ uint64_t *data = (uint64_t *)&avi_if;
+ unsigned i;
+
+ intel_dip_infoframe_csum(&avi_if);
+
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return false;
+
+ for (i = 0; i < sizeof(avi_if); i += 8) {
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_DATA,
+ data, 8))
+ return false;
+ data++;
+ }
+
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_TXRATE,
+ &tx_rate, 1);
+}
+
+static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
+{
+ struct intel_sdvo_tv_format format;
+ uint32_t format_map;
+
+ format_map = 1 << intel_sdvo->tv_format_index;
+ memset(&format, 0, sizeof(format));
+ memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
+
+ CTASSERT(sizeof(format) == 6);
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TV_FORMAT,
+ &format, sizeof(format));
+}
+
+static bool
+intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
+ struct drm_display_mode *mode)
+{
+ struct intel_sdvo_dtd output_dtd;
+
+ if (!intel_sdvo_set_target_output(intel_sdvo,
+ intel_sdvo->attached_output))
+ return false;
+
+ intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
+ return false;
+
+ return true;
+}
+
+static bool
+intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* Reset the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ return false;
+
+ if (!intel_sdvo_create_preferred_input_timing(intel_sdvo,
+ mode->clock / 10,
+ mode->hdisplay,
+ mode->vdisplay))
+ return false;
+
+ if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
+ &intel_sdvo->input_dtd))
+ return false;
+
+ intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
+
+ return true;
+}
+
+static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+ int multiplier;
+
+ /* We need to construct preferred input timings based on our
+ * output timings. To do that, we have to set the output
+ * timings, even though this isn't really the right place in
+ * the sequence to do it. Oh well.
+ */
+ if (intel_sdvo->is_tv) {
+ if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
+ return false;
+
+ (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
+ mode,
+ adjusted_mode);
+ } else if (intel_sdvo->is_lvds) {
+ if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
+ intel_sdvo->sdvo_lvds_fixed_mode))
+ return false;
+
+ (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
+ mode,
+ adjusted_mode);
+ }
+
+ /* Make the CRTC code factor in the SDVO pixel multiplier. The
+ * SDVO device will factor out the multiplier during mode_set.
+ */
+ multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode);
+ intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
+
+ return true;
+}
+
+static void intel_sdvo_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+ u32 sdvox;
+ struct intel_sdvo_in_out_map in_out;
+ struct intel_sdvo_dtd input_dtd, output_dtd;
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ int rate;
+
+ if (!mode)
+ return;
+
+ /* First, set the input mapping for the first input to our controlled
+ * output. This is only correct if we're a single-input device, in
+ * which case the first input is the output from the appropriate SDVO
+ * channel on the motherboard. In a two-input device, the first input
+ * will be SDVOB and the second SDVOC.
+ */
+ in_out.in0 = intel_sdvo->attached_output;
+ in_out.in1 = 0;
+
+ intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_IN_OUT_MAP,
+ &in_out, sizeof(in_out));
+
+ /* Set the output timings to the screen */
+ if (!intel_sdvo_set_target_output(intel_sdvo,
+ intel_sdvo->attached_output))
+ return;
+
+ /* lvds has a special fixed output timing. */
+ if (intel_sdvo->is_lvds)
+ intel_sdvo_get_dtd_from_mode(&output_dtd,
+ intel_sdvo->sdvo_lvds_fixed_mode);
+ else
+ intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd);
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ return;
+
+ if (intel_sdvo->has_hdmi_monitor) {
+ intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
+ intel_sdvo_set_colorimetry(intel_sdvo,
+ SDVO_COLORIMETRY_RGB256);
+ intel_sdvo_set_avi_infoframe(intel_sdvo);
+ } else
+ intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
+
+ if (intel_sdvo->is_tv &&
+ !intel_sdvo_set_tv_format(intel_sdvo))
+ return;
+
+ /* We have tried to get input timing in mode_fixup, and filled into
+ * adjusted_mode.
+ */
+ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
+
+ switch (pixel_multiplier) {
+ default:
+ case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
+ case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
+ case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
+ }
+ if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate))
+ return;
+
+ /* Set the SDVO control regs. */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* The real mode polarity is set by the SDVO commands, using
+ * struct intel_sdvo_dtd. */
+ sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
+ if (intel_sdvo->is_hdmi)
+ sdvox |= intel_sdvo->color_range;
+ if (INTEL_INFO(dev)->gen < 5)
+ sdvox |= SDVO_BORDER_ENABLE;
+ } else {
+ sdvox = I915_READ(intel_sdvo->sdvo_reg);
+ switch (intel_sdvo->sdvo_reg) {
+ case SDVOB:
+ sdvox &= SDVOB_PRESERVE_MASK;
+ break;
+ case SDVOC:
+ sdvox &= SDVOC_PRESERVE_MASK;
+ break;
+ }
+ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
+ }
+
+ if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+ sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
+ else
+ sdvox |= TRANSCODER(intel_crtc->pipe);
+
+ if (intel_sdvo->has_hdmi_audio)
+ sdvox |= SDVO_AUDIO_ENABLE;
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* done in crtc_mode_set as the dpll_md reg must be written early */
+ } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ /* done in crtc_mode_set as it lives inside the dpll register */
+ } else {
+ sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+ }
+
+ if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
+ INTEL_INFO(dev)->gen < 5)
+ sdvox |= SDVO_STALL_SELECT;
+ intel_sdvo_write_sdvox(intel_sdvo, sdvox);
+}
+
+static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ u32 temp;
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ intel_sdvo_set_active_outputs(intel_sdvo, 0);
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
+
+ if (mode == DRM_MODE_DPMS_OFF) {
+ temp = I915_READ(intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) != 0) {
+ intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
+ }
+ }
+ } else {
+ bool input1, input2;
+ int i;
+ u8 status;
+
+ temp = I915_READ(intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) == 0)
+ intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
+ for (i = 0; i < 2; i++)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
+ /* Warn if the device reported failure to sync.
+ * A lot of SDVO devices fail to notify of sync, but it's
+ * a given it the status is a success, we succeeded.
+ */
+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+ DRM_DEBUG_KMS("First %s output reported failure to "
+ "sync\n", SDVO_NAME(intel_sdvo));
+ }
+
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
+ intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+ }
+ return;
+}
+
+static int intel_sdvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (intel_sdvo->pixel_clock_min > mode->clock)
+ return MODE_CLOCK_LOW;
+
+ if (intel_sdvo->pixel_clock_max < mode->clock)
+ return MODE_CLOCK_HIGH;
+
+ if (intel_sdvo->is_lvds) {
+ if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+ if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ return MODE_OK;
+}
+
+static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
+{
+ CTASSERT(sizeof(*caps) == 8);
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_DEVICE_CAPS,
+ caps, sizeof(*caps)))
+ return false;
+
+ DRM_DEBUG_KMS("SDVO capabilities:\n"
+ " vendor_id: %d\n"
+ " device_id: %d\n"
+ " device_rev_id: %d\n"
+ " sdvo_version_major: %d\n"
+ " sdvo_version_minor: %d\n"
+ " sdvo_inputs_mask: %d\n"
+ " smooth_scaling: %d\n"
+ " sharp_scaling: %d\n"
+ " up_scaling: %d\n"
+ " down_scaling: %d\n"
+ " stall_support: %d\n"
+ " output_flags: %d\n",
+ caps->vendor_id,
+ caps->device_id,
+ caps->device_rev_id,
+ caps->sdvo_version_major,
+ caps->sdvo_version_minor,
+ caps->sdvo_inputs_mask,
+ caps->smooth_scaling,
+ caps->sharp_scaling,
+ caps->up_scaling,
+ caps->down_scaling,
+ caps->stall_support,
+ caps->output_flags);
+
+ return true;
+}
+
+static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ u8 response[2];
+
+ /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
+ * on the line. */
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ return false;
+
+ return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+ &response, 2) && response[0];
+}
+
+static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
+{
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
+ &intel_sdvo->hotplug_active, 2);
+}
+
+static bool
+intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
+{
+ /* Is there more than one type of output? */
+ return bitcount16(intel_sdvo->caps.output_flags) > 1;
+}
+
+static struct edid *
+intel_sdvo_get_edid(struct drm_connector *connector)
+{
+ struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ return drm_get_edid(connector, sdvo->ddc);
+}
+
+/* Mac mini hack -- use the same DDC as the analog connector */
+static struct edid *
+intel_sdvo_get_analog_edid(struct drm_connector *connector)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+
+ return drm_get_edid(connector,
+ dev_priv->gmbus[dev_priv->crt_ddc_pin]);
+}
+
+static enum drm_connector_status
+intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ enum drm_connector_status status;
+ struct edid *edid;
+
+ edid = intel_sdvo_get_edid(connector);
+
+ if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
+ u8 ddc, saved_ddc = intel_sdvo->ddc_bus;
+
+ /*
+ * Don't use the 1 as the argument of DDC bus switch to get
+ * the EDID. It is used for SDVO SPD ROM.
+ */
+ for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
+ intel_sdvo->ddc_bus = ddc;
+ edid = intel_sdvo_get_edid(connector);
+ if (edid)
+ break;
+ }
+ /*
+ * If we found the EDID on the other bus,
+ * assume that is the correct DDC bus.
+ */
+ if (edid == NULL)
+ intel_sdvo->ddc_bus = saved_ddc;
+ }
+
+ /*
+ * When there is no edid and no monitor is connected with VGA
+ * port, try to use the CRT ddc to read the EDID for DVI-connector.
+ */
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+
+ status = connector_status_unknown;
+ if (edid != NULL) {
+ /* DDC bus is shared, match EDID to connector type */
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ if (intel_sdvo->is_hdmi) {
+ intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
+ intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+ }
+ } else
+ status = connector_status_disconnected;
+ connector->display_info.raw_edid = NULL;
+ free(edid, DRM_MEM_KMS);
+ }
+
+ if (status == connector_status_connected) {
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ if (intel_sdvo_connector->force_audio != HDMI_AUDIO_AUTO)
+ intel_sdvo->has_hdmi_audio = (intel_sdvo_connector->force_audio == HDMI_AUDIO_ON);
+ }
+
+ return status;
+}
+
+static bool
+intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
+ struct edid *edid)
+{
+ bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool connector_is_digital = !!IS_DIGITAL(sdvo);
+
+ DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
+ connector_is_digital, monitor_is_digital);
+ return connector_is_digital == monitor_is_digital;
+}
+
+static enum drm_connector_status
+intel_sdvo_detect(struct drm_connector *connector, bool force)
+{
+ uint16_t response;
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ enum drm_connector_status ret;
+
+ if (!intel_sdvo_write_cmd(intel_sdvo,
+ SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
+ return connector_status_unknown;
+
+ /* add 30ms delay when the output type might be TV */
+ if (intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
+ drm_msleep(30, "915svo");
+
+ if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
+ return connector_status_unknown;
+
+ DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
+ response & 0xff, response >> 8,
+ intel_sdvo_connector->output_flag);
+
+ if (response == 0)
+ return connector_status_disconnected;
+
+ intel_sdvo->attached_output = response;
+
+ intel_sdvo->has_hdmi_monitor = false;
+ intel_sdvo->has_hdmi_audio = false;
+
+ if ((intel_sdvo_connector->output_flag & response) == 0)
+ ret = connector_status_disconnected;
+ else if (IS_TMDS(intel_sdvo_connector))
+ ret = intel_sdvo_tmds_sink_detect(connector);
+ else {
+ struct edid *edid;
+
+ /* if we have an edid check it matches the connection */
+ edid = intel_sdvo_get_edid(connector);
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+ if (edid != NULL) {
+ if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
+ edid))
+ ret = connector_status_connected;
+ else
+ ret = connector_status_disconnected;
+
+ connector->display_info.raw_edid = NULL;
+ free(edid, DRM_MEM_KMS);
+ } else
+ ret = connector_status_connected;
+ }
+
+ /* May update encoder flag for like clock for SDVO TV, etc.*/
+ if (ret == connector_status_connected) {
+ intel_sdvo->is_tv = false;
+ intel_sdvo->is_lvds = false;
+ intel_sdvo->base.needs_tv_clock = false;
+
+ if (response & SDVO_TV_MASK) {
+ intel_sdvo->is_tv = true;
+ intel_sdvo->base.needs_tv_clock = true;
+ }
+ if (response & SDVO_LVDS_MASK)
+ intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL;
+ }
+
+ return ret;
+}
+
+static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
+{
+ struct edid *edid;
+
+ /* set the bus switch and get the modes */
+ edid = intel_sdvo_get_edid(connector);
+
+ /*
+ * Mac mini hack. On this device, the DVI-I connector shares one DDC
+ * link between analog and digital outputs. So, if the regular SDVO
+ * DDC fails, check to see if the analog output is disconnected, in
+ * which case we'll look there for the digital DDC data.
+ */
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+
+ if (edid != NULL) {
+ if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+ edid)) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_add_edid_modes(connector, edid);
+ }
+
+ connector->display_info.raw_edid = NULL;
+ free(edid, DRM_MEM_KMS);
+ }
+}
+
+/*
+ * Set of SDVO TV modes.
+ * Note! This is in reply order (see loop in get_tv_modes).
+ * XXX: all 60Hz refresh?
+ */
+static const struct drm_display_mode sdvo_tv_modes[] = {
+ { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
+ 416, 0, 200, 201, 232, 233, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384,
+ 416, 0, 240, 241, 272, 273, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464,
+ 496, 0, 300, 301, 332, 333, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704,
+ 736, 0, 350, 351, 382, 383, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704,
+ 736, 0, 400, 401, 432, 433, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704,
+ 736, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768,
+ 800, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768,
+ 800, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784,
+ 816, 0, 350, 351, 382, 383, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784,
+ 816, 0, 400, 401, 432, 433, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784,
+ 816, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784,
+ 816, 0, 540, 541, 572, 573, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784,
+ 816, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832,
+ 864, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864,
+ 896, 0, 600, 601, 632, 633, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896,
+ 928, 0, 624, 625, 656, 657, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984,
+ 1016, 0, 766, 767, 798, 799, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088,
+ 1120, 0, 768, 769, 800, 801, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344,
+ 1376, 0, 1024, 1025, 1056, 1057, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+
+static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo_sdtv_resolution_request tv_res;
+ uint32_t reply = 0, format_map = 0;
+ int i;
+
+ /* Read the list of supported input resolutions for the selected TV
+ * format.
+ */
+ format_map = 1 << intel_sdvo->tv_format_index;
+ memcpy(&tv_res, &format_map,
+ min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
+
+ if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output))
+ return;
+
+ CTASSERT(sizeof(tv_res) == 3);
+ if (!intel_sdvo_write_cmd(intel_sdvo,
+ SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ &tv_res, sizeof(tv_res)))
+ return;
+ if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
+ return;
+
+ for (i = 0; i < DRM_ARRAY_SIZE(sdvo_tv_modes); i++)
+ if (reply & (1 << i)) {
+ struct drm_display_mode *nmode;
+ nmode = drm_mode_duplicate(connector->dev,
+ &sdvo_tv_modes[i]);
+ if (nmode)
+ drm_mode_probed_add(connector, nmode);
+ }
+}
+
+static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_display_mode *newmode;
+
+ /*
+ * Attempt to get the mode list from DDC.
+ * Assume that the preferred modes are
+ * arranged in priority order.
+ */
+ intel_ddc_get_modes(connector, intel_sdvo->i2c);
+ if (!list_empty(&connector->probed_modes))
+ goto end;
+
+ /* Fetch modes from VBT */
+ if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
+ newmode = drm_mode_duplicate(connector->dev,
+ dev_priv->sdvo_lvds_vbt_mode);
+ if (newmode != NULL) {
+ /* Guarantee the mode is preferred */
+ newmode->type = (DRM_MODE_TYPE_PREFERRED |
+ DRM_MODE_TYPE_DRIVER);
+ drm_mode_probed_add(connector, newmode);
+ }
+ }
+
+end:
+ list_for_each_entry(newmode, &connector->probed_modes, head) {
+ if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+ intel_sdvo->sdvo_lvds_fixed_mode =
+ drm_mode_duplicate(connector->dev, newmode);
+
+ drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
+ 0);
+
+ intel_sdvo->is_lvds = true;
+ break;
+ }
+ }
+
+}
+
+static int intel_sdvo_get_modes(struct drm_connector *connector)
+{
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+
+ if (IS_TV(intel_sdvo_connector))
+ intel_sdvo_get_tv_modes(connector);
+ else if (IS_LVDS(intel_sdvo_connector))
+ intel_sdvo_get_lvds_modes(connector);
+ else
+ intel_sdvo_get_ddc_modes(connector);
+
+ return !list_empty(&connector->probed_modes);
+}
+
+static void
+intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
+{
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct drm_device *dev = connector->dev;
+
+ if (intel_sdvo_connector->left)
+ drm_property_destroy(dev, intel_sdvo_connector->left);
+ if (intel_sdvo_connector->right)
+ drm_property_destroy(dev, intel_sdvo_connector->right);
+ if (intel_sdvo_connector->top)
+ drm_property_destroy(dev, intel_sdvo_connector->top);
+ if (intel_sdvo_connector->bottom)
+ drm_property_destroy(dev, intel_sdvo_connector->bottom);
+ if (intel_sdvo_connector->hpos)
+ drm_property_destroy(dev, intel_sdvo_connector->hpos);
+ if (intel_sdvo_connector->vpos)
+ drm_property_destroy(dev, intel_sdvo_connector->vpos);
+ if (intel_sdvo_connector->saturation)
+ drm_property_destroy(dev, intel_sdvo_connector->saturation);
+ if (intel_sdvo_connector->contrast)
+ drm_property_destroy(dev, intel_sdvo_connector->contrast);
+ if (intel_sdvo_connector->hue)
+ drm_property_destroy(dev, intel_sdvo_connector->hue);
+ if (intel_sdvo_connector->sharpness)
+ drm_property_destroy(dev, intel_sdvo_connector->sharpness);
+ if (intel_sdvo_connector->flicker_filter)
+ drm_property_destroy(dev, intel_sdvo_connector->flicker_filter);
+ if (intel_sdvo_connector->flicker_filter_2d)
+ drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d);
+ if (intel_sdvo_connector->flicker_filter_adaptive)
+ drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive);
+ if (intel_sdvo_connector->tv_luma_filter)
+ drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter);
+ if (intel_sdvo_connector->tv_chroma_filter)
+ drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter);
+ if (intel_sdvo_connector->dot_crawl)
+ drm_property_destroy(dev, intel_sdvo_connector->dot_crawl);
+ if (intel_sdvo_connector->brightness)
+ drm_property_destroy(dev, intel_sdvo_connector->brightness);
+}
+
+static void intel_sdvo_destroy(struct drm_connector *connector)
+{
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+
+ if (intel_sdvo_connector->tv_format)
+ drm_property_destroy(connector->dev,
+ intel_sdvo_connector->tv_format);
+
+ intel_sdvo_destroy_enhance_property(connector);
+#if 0
+ drm_sysfs_connector_remove(connector);
+#endif
+ drm_connector_cleanup(connector);
+ free(connector, DRM_MEM_KMS);
+}
+
+static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct edid *edid;
+ bool has_audio = false;
+
+ if (!intel_sdvo->is_hdmi)
+ return false;
+
+ edid = intel_sdvo_get_edid(connector);
+ if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
+ has_audio = drm_detect_monitor_audio(edid);
+
+ return has_audio;
+}
+
+static int
+intel_sdvo_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint16_t temp_value;
+ uint8_t cmd;
+ int ret;
+
+ ret = drm_connector_property_set_value(connector, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev_priv->force_audio_property) {
+ int i = val;
+ bool has_audio;
+
+ if (i == intel_sdvo_connector->force_audio)
+ return 0;
+
+ intel_sdvo_connector->force_audio = i;
+
+ if (i == HDMI_AUDIO_AUTO)
+ has_audio = intel_sdvo_detect_hdmi_audio(connector);
+ else
+ has_audio = (i == HDMI_AUDIO_ON);
+
+ if (has_audio == intel_sdvo->has_hdmi_audio)
+ return 0;
+
+ intel_sdvo->has_hdmi_audio = has_audio;
+ goto done;
+ }
+
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!intel_sdvo->color_range)
+ return 0;
+
+ intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
+#define CHECK_PROPERTY(name, NAME) \
+ if (intel_sdvo_connector->name == property) { \
+ if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
+ if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
+ cmd = SDVO_CMD_SET_##NAME; \
+ intel_sdvo_connector->cur_##name = temp_value; \
+ goto set_value; \
+ }
+
+ if (property == intel_sdvo_connector->tv_format) {
+ if (val >= TV_FORMAT_NUM)
+ return -EINVAL;
+
+ if (intel_sdvo->tv_format_index ==
+ intel_sdvo_connector->tv_format_supported[val])
+ return 0;
+
+ intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val];
+ goto done;
+ } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
+ temp_value = val;
+ if (intel_sdvo_connector->left == property) {
+ drm_connector_property_set_value(connector,
+ intel_sdvo_connector->right, val);
+ if (intel_sdvo_connector->left_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->left_margin = temp_value;
+ intel_sdvo_connector->right_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_hscan -
+ intel_sdvo_connector->left_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_H;
+ goto set_value;
+ } else if (intel_sdvo_connector->right == property) {
+ drm_connector_property_set_value(connector,
+ intel_sdvo_connector->left, val);
+ if (intel_sdvo_connector->right_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->left_margin = temp_value;
+ intel_sdvo_connector->right_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_hscan -
+ intel_sdvo_connector->left_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_H;
+ goto set_value;
+ } else if (intel_sdvo_connector->top == property) {
+ drm_connector_property_set_value(connector,
+ intel_sdvo_connector->bottom, val);
+ if (intel_sdvo_connector->top_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->top_margin = temp_value;
+ intel_sdvo_connector->bottom_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_vscan -
+ intel_sdvo_connector->top_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_V;
+ goto set_value;
+ } else if (intel_sdvo_connector->bottom == property) {
+ drm_connector_property_set_value(connector,
+ intel_sdvo_connector->top, val);
+ if (intel_sdvo_connector->bottom_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->top_margin = temp_value;
+ intel_sdvo_connector->bottom_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_vscan -
+ intel_sdvo_connector->top_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_V;
+ goto set_value;
+ }
+ CHECK_PROPERTY(hpos, HPOS)
+ CHECK_PROPERTY(vpos, VPOS)
+ CHECK_PROPERTY(saturation, SATURATION)
+ CHECK_PROPERTY(contrast, CONTRAST)
+ CHECK_PROPERTY(hue, HUE)
+ CHECK_PROPERTY(brightness, BRIGHTNESS)
+ CHECK_PROPERTY(sharpness, SHARPNESS)
+ CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
+ CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
+ CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
+ CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
+ CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
+ CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
+ }
+
+ return -EINVAL; /* unknown property */
+
+set_value:
+ if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2))
+ return -EIO;
+
+
+done:
+ if (intel_sdvo->base.base.crtc) {
+ struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
+ drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
+ crtc->y, crtc->fb);
+ }
+
+ return 0;
+#undef CHECK_PROPERTY
+}
+
+static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
+ .dpms = intel_sdvo_dpms,
+ .mode_fixup = intel_sdvo_mode_fixup,
+ .prepare = intel_encoder_prepare,
+ .mode_set = intel_sdvo_mode_set,
+ .commit = intel_encoder_commit,
+};
+
+static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = intel_sdvo_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_sdvo_set_property,
+ .destroy = intel_sdvo_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
+ .get_modes = intel_sdvo_get_modes,
+ .mode_valid = intel_sdvo_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
+{
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+
+ if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
+ drm_mode_destroy(encoder->dev,
+ intel_sdvo->sdvo_lvds_fixed_mode);
+
+ device_delete_child(intel_sdvo->base.base.dev->device,
+ intel_sdvo->ddc_iic_bus);
+ intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
+ .destroy = intel_sdvo_enc_destroy,
+};
+
+static void
+intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
+{
+ uint16_t mask = 0;
+ unsigned int num_bits;
+
+ /* Make a mask of outputs less than or equal to our own priority in the
+ * list.
+ */
+ switch (sdvo->controlled_output) {
+ case SDVO_OUTPUT_LVDS1:
+ mask |= SDVO_OUTPUT_LVDS1;
+ case SDVO_OUTPUT_LVDS0:
+ mask |= SDVO_OUTPUT_LVDS0;
+ case SDVO_OUTPUT_TMDS1:
+ mask |= SDVO_OUTPUT_TMDS1;
+ case SDVO_OUTPUT_TMDS0:
+ mask |= SDVO_OUTPUT_TMDS0;
+ case SDVO_OUTPUT_RGB1:
+ mask |= SDVO_OUTPUT_RGB1;
+ case SDVO_OUTPUT_RGB0:
+ mask |= SDVO_OUTPUT_RGB0;
+ break;
+ }
+
+ /* Count bits to find what number we are in the priority list. */
+ mask &= sdvo->caps.output_flags;
+ num_bits = bitcount16(mask);
+ /* If more than 3 outputs, default to DDC bus 3 for now. */
+ if (num_bits > 3)
+ num_bits = 3;
+
+ /* Corresponds to SDVO_CONTROL_BUS_DDCx */
+ sdvo->ddc_bus = 1 << num_bits;
+}
+
+/**
+ * Choose the appropriate DDC bus for control bus switch command for this
+ * SDVO output based on the controlled output.
+ *
+ * DDC bus number assignment is in a priority order of RGB outputs, then TMDS
+ * outputs, then LVDS outputs.
+ */
+static void
+intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
+ struct intel_sdvo *sdvo, u32 reg)
+{
+ struct sdvo_device_mapping *mapping;
+
+ if (IS_SDVOB(reg))
+ mapping = &(dev_priv->sdvo_mappings[0]);
+ else
+ mapping = &(dev_priv->sdvo_mappings[1]);
+
+ if (mapping->initialized)
+ sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+ else
+ intel_sdvo_guess_ddc_bus(sdvo);
+}
+
+static void
+intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
+ struct intel_sdvo *sdvo, u32 reg)
+{
+ struct sdvo_device_mapping *mapping;
+ u8 pin;
+
+ if (IS_SDVOB(reg))
+ mapping = &dev_priv->sdvo_mappings[0];
+ else
+ mapping = &dev_priv->sdvo_mappings[1];
+
+ pin = GMBUS_PORT_DPB;
+ if (mapping->initialized)
+ pin = mapping->i2c_pin;
+
+ if (pin < GMBUS_NUM_PORTS) {
+ sdvo->i2c = dev_priv->gmbus[pin];
+ intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
+ intel_gmbus_force_bit(sdvo->i2c, true);
+ } else {
+ sdvo->i2c = dev_priv->gmbus[GMBUS_PORT_DPB];
+ }
+}
+
+static bool
+intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
+{
+ return intel_sdvo_check_supp_encode(intel_sdvo);
+}
+
+static u8
+intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct sdvo_device_mapping *my_mapping, *other_mapping;
+
+ if (IS_SDVOB(sdvo_reg)) {
+ my_mapping = &dev_priv->sdvo_mappings[0];
+ other_mapping = &dev_priv->sdvo_mappings[1];
+ } else {
+ my_mapping = &dev_priv->sdvo_mappings[1];
+ other_mapping = &dev_priv->sdvo_mappings[0];
+ }
+
+ /* If the BIOS described our SDVO device, take advantage of it. */
+ if (my_mapping->slave_addr)
+ return my_mapping->slave_addr;
+
+ /* If the BIOS only described a different SDVO device, use the
+ * address that it isn't using.
+ */
+ if (other_mapping->slave_addr) {
+ if (other_mapping->slave_addr == 0x70)
+ return 0x72;
+ else
+ return 0x70;
+ }
+
+ /* No SDVO device info is found for another DVO port,
+ * so use mapping assumption we had before BIOS parsing.
+ */
+ if (IS_SDVOB(sdvo_reg))
+ return 0x70;
+ else
+ return 0x72;
+}
+
+static void
+intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
+ struct intel_sdvo *encoder)
+{
+ drm_connector_init(encoder->base.base.dev,
+ &connector->base.base,
+ &intel_sdvo_connector_funcs,
+ connector->base.base.connector_type);
+
+ drm_connector_helper_add(&connector->base.base,
+ &intel_sdvo_connector_helper_funcs);
+
+ connector->base.base.interlace_allowed = 1;
+ connector->base.base.doublescan_allowed = 0;
+ connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+
+ intel_connector_attach_encoder(&connector->base, &encoder->base);
+#if 0
+ drm_sysfs_connector_add(&connector->base.base);
+#endif
+}
+
+static void
+intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
+{
+ struct drm_device *dev = connector->base.base.dev;
+
+ intel_attach_force_audio_property(&connector->base.base);
+ if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
+ intel_attach_broadcast_rgb_property(&connector->base.base);
+}
+
+static bool
+intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ intel_sdvo_connector = malloc(sizeof(struct intel_sdvo_connector),
+ DRM_MEM_KMS, M_WAITOK | M_ZERO);
+
+ if (device == 0) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+ } else if (device == 1) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+ }
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) {
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ intel_sdvo->hotplug_active[0] |= 1 << device;
+ /* Some SDVO devices have one-shot hotplug interrupts.
+ * Ensure that they get re-enabled when an interrupt happens.
+ */
+ intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
+ intel_sdvo_enable_hotplug(intel_encoder);
+ }
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+ if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
+ connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ intel_sdvo->is_hdmi = true;
+ }
+ intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT));
+
+ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+ if (intel_sdvo->is_hdmi)
+ intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
+
+ return true;
+}
+
+static bool
+intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
+{
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ intel_sdvo_connector = malloc(sizeof(struct intel_sdvo_connector),
+ DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!intel_sdvo_connector)
+ return false;
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+
+ intel_sdvo->controlled_output |= type;
+ intel_sdvo_connector->output_flag = type;
+
+ intel_sdvo->is_tv = true;
+ intel_sdvo->base.needs_tv_clock = true;
+ intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+
+ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+
+ if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
+ goto err;
+
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ goto err;
+
+ return true;
+
+err:
+ intel_sdvo_destroy(connector);
+ return false;
+}
+
+static bool
+intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ intel_sdvo_connector = malloc(sizeof(struct intel_sdvo_connector),
+ DRM_MEM_KMS, M_WAITOK | M_ZERO);
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+
+ if (device == 0) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ } else if (device == 1) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+ }
+
+ intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT));
+
+ intel_sdvo_connector_init(intel_sdvo_connector,
+ intel_sdvo);
+ return true;
+}
+
+static bool
+intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ intel_sdvo_connector = malloc(sizeof(struct intel_sdvo_connector),
+ DRM_MEM_KMS, M_WAITOK | M_ZERO);
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+
+ if (device == 0) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ } else if (device == 1) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+ }
+
+ intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
+ (1 << INTEL_SDVO_LVDS_CLONE_BIT));
+
+ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ goto err;
+
+ return true;
+
+err:
+ intel_sdvo_destroy(connector);
+ return false;
+}
+
+static bool
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
+{
+ intel_sdvo->is_tv = false;
+ intel_sdvo->base.needs_tv_clock = false;
+ intel_sdvo->is_lvds = false;
+
+ /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
+
+ if (flags & SDVO_OUTPUT_TMDS0)
+ if (!intel_sdvo_dvi_init(intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+ if (!intel_sdvo_dvi_init(intel_sdvo, 1))
+ return false;
+
+ /* TV has no XXX1 function block */
+ if (flags & SDVO_OUTPUT_SVID0)
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_CVBS0)
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_RGB0)
+ if (!intel_sdvo_analog_init(intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+ if (!intel_sdvo_analog_init(intel_sdvo, 1))
+ return false;
+
+ if (flags & SDVO_OUTPUT_LVDS0)
+ if (!intel_sdvo_lvds_init(intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+ if (!intel_sdvo_lvds_init(intel_sdvo, 1))
+ return false;
+
+ if ((flags & SDVO_OUTPUT_MASK) == 0) {
+ unsigned char bytes[2];
+
+ intel_sdvo->controlled_output = 0;
+ memcpy(bytes, &intel_sdvo->caps.output_flags, 2);
+ DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
+ SDVO_NAME(intel_sdvo),
+ bytes[0], bytes[1]);
+ return false;
+ }
+ intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+
+ return true;
+}
+
+static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ int type)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct intel_sdvo_tv_format format;
+ uint32_t format_map, i;
+
+ if (!intel_sdvo_set_target_output(intel_sdvo, type))
+ return false;
+
+ CTASSERT(sizeof(format) == 6);
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
+ &format, sizeof(format)))
+ return false;
+
+ memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
+
+ if (format_map == 0)
+ return false;
+
+ intel_sdvo_connector->format_supported_num = 0;
+ for (i = 0 ; i < TV_FORMAT_NUM; i++)
+ if (format_map & (1 << i))
+ intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = i;
+
+
+ intel_sdvo_connector->tv_format =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", intel_sdvo_connector->format_supported_num);
+ if (!intel_sdvo_connector->tv_format)
+ return false;
+
+ for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
+ drm_property_add_enum(
+ intel_sdvo_connector->tv_format, i,
+ i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
+
+ intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
+ drm_connector_attach_property(&intel_sdvo_connector->base.base,
+ intel_sdvo_connector->tv_format, 0);
+ return true;
+
+}
+
+#define ENHANCEMENT(name, NAME) do { \
+ if (enhancements.name) { \
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
+ !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
+ return false; \
+ intel_sdvo_connector->max_##name = data_value[0]; \
+ intel_sdvo_connector->cur_##name = response; \
+ intel_sdvo_connector->name = \
+ drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
+ if (!intel_sdvo_connector->name) return false; \
+ drm_connector_attach_property(connector, \
+ intel_sdvo_connector->name, \
+ intel_sdvo_connector->cur_##name); \
+ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
+ data_value[0], data_value[1], response); \
+ } \
+} while (0)
+
+static bool
+intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ struct intel_sdvo_enhancements_reply enhancements)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector = &intel_sdvo_connector->base.base;
+ uint16_t response, data_value[2];
+
+ /* when horizontal overscan is supported, Add the left/right property */
+ if (enhancements.overscan_h) {
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_H,
+ &data_value, 4))
+ return false;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_H,
+ &response, 2))
+ return false;
+
+ intel_sdvo_connector->max_hscan = data_value[0];
+ intel_sdvo_connector->left_margin = data_value[0] - response;
+ intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin;
+ intel_sdvo_connector->left =
+ drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
+ if (!intel_sdvo_connector->left)
+ return false;
+
+ drm_connector_attach_property(connector,
+ intel_sdvo_connector->left,
+ intel_sdvo_connector->left_margin);
+
+ intel_sdvo_connector->right =
+ drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
+ if (!intel_sdvo_connector->right)
+ return false;
+
+ drm_connector_attach_property(connector,
+ intel_sdvo_connector->right,
+ intel_sdvo_connector->right_margin);
+ DRM_DEBUG_KMS("h_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+
+ if (enhancements.overscan_v) {
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_V,
+ &data_value, 4))
+ return false;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_V,
+ &response, 2))
+ return false;
+
+ intel_sdvo_connector->max_vscan = data_value[0];
+ intel_sdvo_connector->top_margin = data_value[0] - response;
+ intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin;
+ intel_sdvo_connector->top =
+ drm_property_create_range(dev, 0,
+ "top_margin", 0, data_value[0]);
+ if (!intel_sdvo_connector->top)
+ return false;
+
+ drm_connector_attach_property(connector,
+ intel_sdvo_connector->top,
+ intel_sdvo_connector->top_margin);
+
+ intel_sdvo_connector->bottom =
+ drm_property_create_range(dev, 0,
+ "bottom_margin", 0, data_value[0]);
+ if (!intel_sdvo_connector->bottom)
+ return false;
+
+ drm_connector_attach_property(connector,
+ intel_sdvo_connector->bottom,
+ intel_sdvo_connector->bottom_margin);
+ DRM_DEBUG_KMS("v_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+
+ ENHANCEMENT(hpos, HPOS);
+ ENHANCEMENT(vpos, VPOS);
+ ENHANCEMENT(saturation, SATURATION);
+ ENHANCEMENT(contrast, CONTRAST);
+ ENHANCEMENT(hue, HUE);
+ ENHANCEMENT(sharpness, SHARPNESS);
+ ENHANCEMENT(brightness, BRIGHTNESS);
+ ENHANCEMENT(flicker_filter, FLICKER_FILTER);
+ ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
+ ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
+ ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
+ ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
+
+ if (enhancements.dot_crawl) {
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
+ return false;
+
+ intel_sdvo_connector->max_dot_crawl = 1;
+ intel_sdvo_connector->cur_dot_crawl = response & 0x1;
+ intel_sdvo_connector->dot_crawl =
+ drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
+ if (!intel_sdvo_connector->dot_crawl)
+ return false;
+
+ drm_connector_attach_property(connector,
+ intel_sdvo_connector->dot_crawl,
+ intel_sdvo_connector->cur_dot_crawl);
+ DRM_DEBUG_KMS("dot crawl: current %d\n", response);
+ }
+
+ return true;
+}
+
+static bool
+intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ struct intel_sdvo_enhancements_reply enhancements)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector = &intel_sdvo_connector->base.base;
+ uint16_t response, data_value[2];
+
+ ENHANCEMENT(brightness, BRIGHTNESS);
+
+ return true;
+}
+#undef ENHANCEMENT
+
+static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector)
+{
+ union {
+ struct intel_sdvo_enhancements_reply reply;
+ uint16_t response;
+ } enhancements;
+
+ CTASSERT(sizeof(enhancements) == 2);
+
+ enhancements.response = 0;
+ intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ &enhancements, sizeof(enhancements));
+ if (enhancements.response == 0) {
+ DRM_DEBUG_KMS("No enhancement is supported\n");
+ return true;
+ }
+
+ if (IS_TV(intel_sdvo_connector))
+ return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+ else if (IS_LVDS(intel_sdvo_connector))
+ return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+ else
+ return true;
+}
+
+struct intel_sdvo_ddc_proxy_sc {
+ struct intel_sdvo *intel_sdvo;
+ device_t port;
+};
+
+static int
+intel_sdvo_ddc_proxy_probe(device_t idev)
+{
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+intel_sdvo_ddc_proxy_attach(device_t idev)
+{
+ struct intel_sdvo_ddc_proxy_sc *sc;
+
+ sc = device_get_softc(idev);
+ sc->port = device_add_child(idev, "iicbus", -1);
+ if (sc->port == NULL)
+ return (ENXIO);
+ device_quiet(sc->port);
+ bus_generic_attach(idev);
+ return (0);
+}
+
+static int
+intel_sdvo_ddc_proxy_detach(device_t idev)
+{
+ struct intel_sdvo_ddc_proxy_sc *sc;
+ device_t port;
+
+ sc = device_get_softc(idev);
+ port = sc->port;
+ bus_generic_detach(idev);
+ if (port != NULL)
+ device_delete_child(idev, port);
+ return (0);
+}
+
+static int
+intel_sdvo_ddc_proxy_reset(device_t idev, u_char speed, u_char addr,
+ u_char *oldaddr)
+{
+ struct intel_sdvo_ddc_proxy_sc *sc;
+ struct intel_sdvo *sdvo;
+
+ sc = device_get_softc(idev);
+ sdvo = sc->intel_sdvo;
+
+ return (IICBUS_RESET(device_get_parent(sdvo->i2c), speed, addr,
+ oldaddr));
+}
+
+static int
+intel_sdvo_ddc_proxy_transfer(device_t idev, struct iic_msg *msgs, uint32_t num)
+{
+ struct intel_sdvo_ddc_proxy_sc *sc;
+ struct intel_sdvo *sdvo;
+
+ sc = device_get_softc(idev);
+ sdvo = sc->intel_sdvo;
+
+ if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
+ return (EIO);
+
+ return (iicbus_transfer(sdvo->i2c, msgs, num));
+}
+
+static bool
+intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, struct drm_device *dev,
+ int sdvo_reg)
+{
+ struct intel_sdvo_ddc_proxy_sc *sc;
+ int ret;
+
+ sdvo->ddc_iic_bus = device_add_child(dev->device,
+ "intel_sdvo_ddc_proxy", sdvo_reg);
+ if (sdvo->ddc_iic_bus == NULL) {
+ DRM_ERROR("cannot create ddc proxy bus %d\n", sdvo_reg);
+ return (false);
+ }
+ device_quiet(sdvo->ddc_iic_bus);
+ ret = device_probe_and_attach(sdvo->ddc_iic_bus);
+ if (ret != 0) {
+ DRM_ERROR("cannot attach proxy bus %d error %d\n",
+ sdvo_reg, ret);
+ device_delete_child(dev->device, sdvo->ddc_iic_bus);
+ return (false);
+ }
+ sc = device_get_softc(sdvo->ddc_iic_bus);
+ sc->intel_sdvo = sdvo;
+
+ sdvo->ddc = sc->port;
+ return (true);
+}
+
+static device_method_t intel_sdvo_ddc_proxy_methods[] = {
+ DEVMETHOD(device_probe, intel_sdvo_ddc_proxy_probe),
+ DEVMETHOD(device_attach, intel_sdvo_ddc_proxy_attach),
+ DEVMETHOD(device_detach, intel_sdvo_ddc_proxy_detach),
+ DEVMETHOD(iicbus_reset, intel_sdvo_ddc_proxy_reset),
+ DEVMETHOD(iicbus_transfer, intel_sdvo_ddc_proxy_transfer),
+ DEVMETHOD_END
+};
+static driver_t intel_sdvo_ddc_proxy_driver = {
+ "intel_sdvo_ddc_proxy",
+ intel_sdvo_ddc_proxy_methods,
+ sizeof(struct intel_sdvo_ddc_proxy_sc)
+};
+static devclass_t intel_sdvo_devclass;
+DRIVER_MODULE_ORDERED(intel_sdvo_ddc_proxy, drmn, intel_sdvo_ddc_proxy_driver,
+ intel_sdvo_devclass, 0, 0, SI_ORDER_FIRST);
+
+
+bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ struct intel_sdvo *intel_sdvo;
+ int i;
+
+ intel_sdvo = malloc(sizeof(struct intel_sdvo), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+
+ intel_sdvo->sdvo_reg = sdvo_reg;
+ intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
+ intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
+ if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev, sdvo_reg)) {
+ free(intel_sdvo, DRM_MEM_KMS);
+ return false;
+ }
+
+ /* encoder type will be decided later */
+ intel_encoder = &intel_sdvo->base;
+ intel_encoder->type = INTEL_OUTPUT_SDVO;
+ drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0);
+
+ /* Read the regs to test if we can talk to the device */
+ for (i = 0; i < 0x40; i++) {
+ u8 byte;
+
+ if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
+ DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+ goto err;
+ }
+ }
+
+ if (IS_SDVOB(sdvo_reg))
+ dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
+ else
+ dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
+
+ drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
+
+ /* In default case sdvo lvds is false */
+ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
+ goto err;
+
+ /* Set up hotplug command - note paranoia about contents of reply.
+ * We assume that the hardware is in a sane state, and only touch
+ * the bits we think we understand.
+ */
+ intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
+ &intel_sdvo->hotplug_active, 2);
+ intel_sdvo->hotplug_active[0] &= ~0x3;
+
+ if (!intel_sdvo_output_setup(intel_sdvo,
+ intel_sdvo->caps.output_flags)) {
+ DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+ goto err;
+ }
+
+ intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ goto err;
+
+ if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
+ &intel_sdvo->pixel_clock_min,
+ &intel_sdvo->pixel_clock_max))
+ goto err;
+
+ DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+ "clock range %dMHz - %dMHz, "
+ "input 1: %c, input 2: %c, "
+ "output 1: %c, output 2: %c\n",
+ SDVO_NAME(intel_sdvo),
+ intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id,
+ intel_sdvo->caps.device_rev_id,
+ intel_sdvo->pixel_clock_min / 1000,
+ intel_sdvo->pixel_clock_max / 1000,
+ (intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+ (intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+ /* check currently supported outputs */
+ intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
+ intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+ return true;
+
+err:
+ drm_encoder_cleanup(&intel_encoder->base);
+ free(intel_sdvo, DRM_MEM_KMS);
+
+ return false;
+}
diff --git a/sys/dev/drm2/i915/intel_sdvo_regs.h b/sys/dev/drm2/i915/intel_sdvo_regs.h
new file mode 100644
index 0000000..0937197
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_sdvo_regs.h
@@ -0,0 +1,725 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file SDVO command definitions and structures.
+ */
+
+#define SDVO_OUTPUT_FIRST (0)
+#define SDVO_OUTPUT_TMDS0 (1 << 0)
+#define SDVO_OUTPUT_RGB0 (1 << 1)
+#define SDVO_OUTPUT_CVBS0 (1 << 2)
+#define SDVO_OUTPUT_SVID0 (1 << 3)
+#define SDVO_OUTPUT_YPRPB0 (1 << 4)
+#define SDVO_OUTPUT_SCART0 (1 << 5)
+#define SDVO_OUTPUT_LVDS0 (1 << 6)
+#define SDVO_OUTPUT_TMDS1 (1 << 8)
+#define SDVO_OUTPUT_RGB1 (1 << 9)
+#define SDVO_OUTPUT_CVBS1 (1 << 10)
+#define SDVO_OUTPUT_SVID1 (1 << 11)
+#define SDVO_OUTPUT_YPRPB1 (1 << 12)
+#define SDVO_OUTPUT_SCART1 (1 << 13)
+#define SDVO_OUTPUT_LVDS1 (1 << 14)
+#define SDVO_OUTPUT_LAST (14)
+
+struct intel_sdvo_caps {
+ u8 vendor_id;
+ u8 device_id;
+ u8 device_rev_id;
+ u8 sdvo_version_major;
+ u8 sdvo_version_minor;
+ unsigned int sdvo_inputs_mask:2;
+ unsigned int smooth_scaling:1;
+ unsigned int sharp_scaling:1;
+ unsigned int up_scaling:1;
+ unsigned int down_scaling:1;
+ unsigned int stall_support:1;
+ unsigned int pad:1;
+ u16 output_flags;
+} __attribute__((packed));
+
+/** This matches the EDID DTD structure, more or less */
+struct intel_sdvo_dtd {
+ struct {
+ u16 clock; /**< pixel clock, in 10kHz units */
+ u8 h_active; /**< lower 8 bits (pixels) */
+ u8 h_blank; /**< lower 8 bits (pixels) */
+ u8 h_high; /**< upper 4 bits each h_active, h_blank */
+ u8 v_active; /**< lower 8 bits (lines) */
+ u8 v_blank; /**< lower 8 bits (lines) */
+ u8 v_high; /**< upper 4 bits each v_active, v_blank */
+ } part1;
+
+ struct {
+ u8 h_sync_off; /**< lower 8 bits, from hblank start */
+ u8 h_sync_width; /**< lower 8 bits (pixels) */
+ /** lower 4 bits each vsync offset, vsync width */
+ u8 v_sync_off_width;
+ /**
+ * 2 high bits of hsync offset, 2 high bits of hsync width,
+ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
+ */
+ u8 sync_off_width_high;
+ u8 dtd_flags;
+ u8 sdvo_flags;
+ /** bits 6-7 of vsync offset at bits 6-7 */
+ u8 v_sync_off_high;
+ u8 reserved;
+ } part2;
+} __attribute__((packed));
+
+struct intel_sdvo_pixel_clock_range {
+ u16 min; /**< pixel clock, in 10kHz units */
+ u16 max; /**< pixel clock, in 10kHz units */
+} __attribute__((packed));
+
+struct intel_sdvo_preferred_input_timing_args {
+ u16 clock;
+ u16 width;
+ u16 height;
+ u8 interlace:1;
+ u8 scaled:1;
+ u8 pad:6;
+} __attribute__((packed));
+
+/* I2C registers for SDVO */
+#define SDVO_I2C_ARG_0 0x07
+#define SDVO_I2C_ARG_1 0x06
+#define SDVO_I2C_ARG_2 0x05
+#define SDVO_I2C_ARG_3 0x04
+#define SDVO_I2C_ARG_4 0x03
+#define SDVO_I2C_ARG_5 0x02
+#define SDVO_I2C_ARG_6 0x01
+#define SDVO_I2C_ARG_7 0x00
+#define SDVO_I2C_OPCODE 0x08
+#define SDVO_I2C_CMD_STATUS 0x09
+#define SDVO_I2C_RETURN_0 0x0a
+#define SDVO_I2C_RETURN_1 0x0b
+#define SDVO_I2C_RETURN_2 0x0c
+#define SDVO_I2C_RETURN_3 0x0d
+#define SDVO_I2C_RETURN_4 0x0e
+#define SDVO_I2C_RETURN_5 0x0f
+#define SDVO_I2C_RETURN_6 0x10
+#define SDVO_I2C_RETURN_7 0x11
+#define SDVO_I2C_VENDOR_BEGIN 0x20
+
+/* Status results */
+#define SDVO_CMD_STATUS_POWER_ON 0x0
+#define SDVO_CMD_STATUS_SUCCESS 0x1
+#define SDVO_CMD_STATUS_NOTSUPP 0x2
+#define SDVO_CMD_STATUS_INVALID_ARG 0x3
+#define SDVO_CMD_STATUS_PENDING 0x4
+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
+
+/* SDVO commands, argument/result registers */
+
+#define SDVO_CMD_RESET 0x01
+
+/** Returns a struct intel_sdvo_caps */
+#define SDVO_CMD_GET_DEVICE_CAPS 0x02
+
+#define SDVO_CMD_GET_FIRMWARE_REV 0x86
+# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
+# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
+# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
+
+/**
+ * Reports which inputs are trained (managed to sync).
+ *
+ * Devices must have trained within 2 vsyncs of a mode change.
+ */
+#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
+struct intel_sdvo_get_trained_inputs_response {
+ unsigned int input0_trained:1;
+ unsigned int input1_trained:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+/** Returns a struct intel_sdvo_output_flags of active outputs. */
+#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
+
+/**
+ * Sets the current set of active outputs.
+ *
+ * Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
+ * on multi-output devices.
+ */
+#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
+
+/**
+ * Returns the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Returns two struct intel_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_GET_IN_OUT_MAP 0x06
+struct intel_sdvo_in_out_map {
+ u16 in0, in1;
+};
+
+/**
+ * Sets the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Takes two struct i380_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_SET_IN_OUT_MAP 0x07
+
+/**
+ * Returns a struct intel_sdvo_output_flags of attached displays.
+ */
+#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
+
+/**
+ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
+ */
+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
+
+/**
+ * Takes a struct intel_sdvo_output_flags.
+ */
+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
+
+/**
+ * Returns a struct intel_sdvo_output_flags of displays with hot plug
+ * interrupts enabled.
+ */
+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
+
+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
+struct intel_sdvo_get_interrupt_event_source_response {
+ u16 interrupt_status;
+ unsigned int ambient_light_interrupt:1;
+ unsigned int hdmi_audio_encrypt_change:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+/**
+ * Selects which input is affected by future input commands.
+ *
+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
+ */
+#define SDVO_CMD_SET_TARGET_INPUT 0x10
+struct intel_sdvo_set_target_input_args {
+ unsigned int target_1:1;
+ unsigned int pad:7;
+} __attribute__((packed));
+
+/**
+ * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
+ * future output commands.
+ *
+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
+ */
+#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
+
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
+/* Part 1 */
+# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
+# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
+# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
+# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
+# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
+# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
+# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
+# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
+/* Part 2 */
+# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
+# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
+# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
+# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
+# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
+# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
+# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
+# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
+# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
+# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
+# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
+# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
+
+/**
+ * Generates a DTD based on the given width, height, and flags.
+ *
+ * This will be supported by any device supporting scaling or interlaced
+ * modes.
+ */
+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
+
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
+
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
+
+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
+
+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
+# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
+# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
+# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
+/** 6 bytes of bit flags for TV formats shared by all TV format functions */
+struct intel_sdvo_tv_format {
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:3;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_TV_FORMAT 0x28
+
+#define SDVO_CMD_SET_TV_FORMAT 0x29
+
+/** Returns the resolutiosn that can be used with the given TV format */
+#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
+struct intel_sdvo_sdtv_resolution_request {
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int pad:5;
+} __attribute__((packed));
+
+struct intel_sdvo_sdtv_resolution_reply {
+ unsigned int res_320x200:1;
+ unsigned int res_320x240:1;
+ unsigned int res_400x300:1;
+ unsigned int res_640x350:1;
+ unsigned int res_640x400:1;
+ unsigned int res_640x480:1;
+ unsigned int res_704x480:1;
+ unsigned int res_704x576:1;
+
+ unsigned int res_720x350:1;
+ unsigned int res_720x400:1;
+ unsigned int res_720x480:1;
+ unsigned int res_720x540:1;
+ unsigned int res_720x576:1;
+ unsigned int res_768x576:1;
+ unsigned int res_800x600:1;
+ unsigned int res_832x624:1;
+
+ unsigned int res_920x766:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x1024:1;
+ unsigned int pad:5;
+} __attribute__((packed));
+
+/* Get supported resolution with squire pixel aspect ratio that can be
+ scaled for the requested HDTV format */
+#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85
+
+struct intel_sdvo_hdtv_resolution_request {
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+struct intel_sdvo_hdtv_resolution_reply {
+ unsigned int res_640x480:1;
+ unsigned int res_800x600:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x960:1;
+ unsigned int res_1400x1050:1;
+ unsigned int res_1600x1200:1;
+ unsigned int res_1920x1440:1;
+ unsigned int res_2048x1536:1;
+
+ unsigned int res_2560x1920:1;
+ unsigned int res_3200x2400:1;
+ unsigned int res_3840x2880:1;
+ unsigned int pad1:5;
+
+ unsigned int res_848x480:1;
+ unsigned int res_1064x600:1;
+ unsigned int res_1280x720:1;
+ unsigned int res_1360x768:1;
+ unsigned int res_1704x960:1;
+ unsigned int res_1864x1050:1;
+ unsigned int res_1920x1080:1;
+ unsigned int res_2128x1200:1;
+
+ unsigned int res_2560x1400:1;
+ unsigned int res_2728x1536:1;
+ unsigned int res_3408x1920:1;
+ unsigned int res_4264x2400:1;
+ unsigned int res_5120x2880:1;
+ unsigned int pad2:3;
+
+ unsigned int res_768x480:1;
+ unsigned int res_960x600:1;
+ unsigned int res_1152x720:1;
+ unsigned int res_1124x768:1;
+ unsigned int res_1536x960:1;
+ unsigned int res_1680x1050:1;
+ unsigned int res_1728x1080:1;
+ unsigned int res_1920x1200:1;
+
+ unsigned int res_2304x1440:1;
+ unsigned int res_2456x1536:1;
+ unsigned int res_3072x1920:1;
+ unsigned int res_3840x2400:1;
+ unsigned int res_4608x2880:1;
+ unsigned int pad3:3;
+
+ unsigned int res_1280x1024:1;
+ unsigned int pad4:7;
+
+ unsigned int res_1280x768:1;
+ unsigned int pad5:7;
+} __attribute__((packed));
+
+/* Get supported power state returns info for encoder and monitor, rely on
+ last SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
+/* Get power state returns info for encoder and monitor, rely on last
+ SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_POWER_STATE 0x2b
+#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
+#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
+# define SDVO_ENCODER_STATE_ON (1 << 0)
+# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
+# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
+# define SDVO_ENCODER_STATE_OFF (1 << 3)
+# define SDVO_MONITOR_STATE_ON (1 << 4)
+# define SDVO_MONITOR_STATE_STANDBY (1 << 5)
+# define SDVO_MONITOR_STATE_SUSPEND (1 << 6)
+# define SDVO_MONITOR_STATE_OFF (1 << 7)
+
+#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d
+#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e
+#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f
+/**
+ * The panel power sequencing parameters are in units of milliseconds.
+ * The high fields are bits 8:9 of the 10-bit values.
+ */
+struct sdvo_panel_power_sequencing {
+ u8 t0;
+ u8 t1;
+ u8 t2;
+ u8 t3;
+ u8 t4;
+
+ unsigned int t0_high:2;
+ unsigned int t1_high:2;
+ unsigned int t2_high:2;
+ unsigned int t3_high:2;
+
+ unsigned int t4_high:2;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
+struct sdvo_max_backlight_reply {
+ u8 max_value;
+ u8 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
+#define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32
+
+#define SDVO_CMD_GET_AMBIENT_LIGHT 0x33
+struct sdvo_get_ambient_light_reply {
+ u16 trip_low;
+ u16 trip_high;
+ u16 value;
+} __attribute__((packed));
+#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
+struct sdvo_set_ambient_light_reply {
+ u16 trip_low;
+ u16 trip_high;
+ unsigned int enable:1;
+ unsigned int pad:7;
+} __attribute__((packed));
+
+/* Set display power state */
+#define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d
+# define SDVO_DISPLAY_STATE_ON (1 << 0)
+# define SDVO_DISPLAY_STATE_STANDBY (1 << 1)
+# define SDVO_DISPLAY_STATE_SUSPEND (1 << 2)
+# define SDVO_DISPLAY_STATE_OFF (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84
+struct intel_sdvo_enhancements_reply {
+ unsigned int flicker_filter:1;
+ unsigned int flicker_filter_adaptive:1;
+ unsigned int flicker_filter_2d:1;
+ unsigned int saturation:1;
+ unsigned int hue:1;
+ unsigned int brightness:1;
+ unsigned int contrast:1;
+ unsigned int overscan_h:1;
+
+ unsigned int overscan_v:1;
+ unsigned int hpos:1;
+ unsigned int vpos:1;
+ unsigned int sharpness:1;
+ unsigned int dot_crawl:1;
+ unsigned int dither:1;
+ unsigned int tv_chroma_filter:1;
+ unsigned int tv_luma_filter:1;
+} __attribute__((packed));
+
+/* Picture enhancement limits below are dependent on the current TV format,
+ * and thus need to be queried and set after it.
+ */
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52
+#define SDVO_CMD_GET_MAX_SATURATION 0x55
+#define SDVO_CMD_GET_MAX_HUE 0x58
+#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b
+#define SDVO_CMD_GET_MAX_CONTRAST 0x5e
+#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61
+#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64
+#define SDVO_CMD_GET_MAX_HPOS 0x67
+#define SDVO_CMD_GET_MAX_VPOS 0x6a
+#define SDVO_CMD_GET_MAX_SHARPNESS 0x6d
+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
+struct intel_sdvo_enhancement_limits_reply {
+ u16 max_value;
+ u16 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
+#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80
+# define SDVO_LVDS_COLOR_DEPTH_18 (0 << 0)
+# define SDVO_LVDS_COLOR_DEPTH_24 (1 << 0)
+# define SDVO_LVDS_CONNECTOR_SPWG (0 << 2)
+# define SDVO_LVDS_CONNECTOR_OPENLDI (1 << 2)
+# define SDVO_LVDS_SINGLE_CHANNEL (0 << 4)
+# define SDVO_LVDS_DUAL_CHANNEL (1 << 4)
+
+#define SDVO_CMD_GET_FLICKER_FILTER 0x4e
+#define SDVO_CMD_SET_FLICKER_FILTER 0x4f
+#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50
+#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51
+#define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53
+#define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54
+#define SDVO_CMD_GET_SATURATION 0x56
+#define SDVO_CMD_SET_SATURATION 0x57
+#define SDVO_CMD_GET_HUE 0x59
+#define SDVO_CMD_SET_HUE 0x5a
+#define SDVO_CMD_GET_BRIGHTNESS 0x5c
+#define SDVO_CMD_SET_BRIGHTNESS 0x5d
+#define SDVO_CMD_GET_CONTRAST 0x5f
+#define SDVO_CMD_SET_CONTRAST 0x60
+#define SDVO_CMD_GET_OVERSCAN_H 0x62
+#define SDVO_CMD_SET_OVERSCAN_H 0x63
+#define SDVO_CMD_GET_OVERSCAN_V 0x65
+#define SDVO_CMD_SET_OVERSCAN_V 0x66
+#define SDVO_CMD_GET_HPOS 0x68
+#define SDVO_CMD_SET_HPOS 0x69
+#define SDVO_CMD_GET_VPOS 0x6b
+#define SDVO_CMD_SET_VPOS 0x6c
+#define SDVO_CMD_GET_SHARPNESS 0x6e
+#define SDVO_CMD_SET_SHARPNESS 0x6f
+#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75
+#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76
+#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
+#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
+struct intel_sdvo_enhancements_arg {
+ u16 value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_DOT_CRAWL 0x70
+#define SDVO_CMD_SET_DOT_CRAWL 0x71
+# define SDVO_DOT_CRAWL_ON (1 << 0)
+# define SDVO_DOT_CRAWL_DEFAULT_ON (1 << 1)
+
+#define SDVO_CMD_GET_DITHER 0x72
+#define SDVO_CMD_SET_DITHER 0x73
+# define SDVO_DITHER_ON (1 << 0)
+# define SDVO_DITHER_DEFAULT_ON (1 << 1)
+
+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
+# define SDVO_CONTROL_BUS_PROM (1 << 0)
+# define SDVO_CONTROL_BUS_DDC1 (1 << 1)
+# define SDVO_CONTROL_BUS_DDC2 (1 << 2)
+# define SDVO_CONTROL_BUS_DDC3 (1 << 3)
+
+/* HDMI op codes */
+#define SDVO_CMD_GET_SUPP_ENCODE 0x9d
+#define SDVO_CMD_GET_ENCODE 0x9e
+#define SDVO_CMD_SET_ENCODE 0x9f
+ #define SDVO_ENCODE_DVI 0x0
+ #define SDVO_ENCODE_HDMI 0x1
+#define SDVO_CMD_SET_PIXEL_REPLI 0x8b
+#define SDVO_CMD_GET_PIXEL_REPLI 0x8c
+#define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d
+#define SDVO_CMD_SET_COLORIMETRY 0x8e
+ #define SDVO_COLORIMETRY_RGB256 0x0
+ #define SDVO_COLORIMETRY_RGB220 0x1
+ #define SDVO_COLORIMETRY_YCrCb422 0x3
+ #define SDVO_COLORIMETRY_YCrCb444 0x4
+#define SDVO_CMD_GET_COLORIMETRY 0x8f
+#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
+#define SDVO_CMD_SET_AUDIO_STAT 0x91
+#define SDVO_CMD_GET_AUDIO_STAT 0x92
+#define SDVO_CMD_SET_HBUF_INDEX 0x93
+#define SDVO_CMD_GET_HBUF_INDEX 0x94
+#define SDVO_CMD_GET_HBUF_INFO 0x95
+#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
+#define SDVO_CMD_GET_HBUF_AV_SPLIT 0x97
+#define SDVO_CMD_SET_HBUF_DATA 0x98
+#define SDVO_CMD_GET_HBUF_DATA 0x99
+#define SDVO_CMD_SET_HBUF_TXRATE 0x9a
+#define SDVO_CMD_GET_HBUF_TXRATE 0x9b
+ #define SDVO_HBUF_TX_DISABLED (0 << 6)
+ #define SDVO_HBUF_TX_ONCE (2 << 6)
+ #define SDVO_HBUF_TX_VSYNC (3 << 6)
+#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c
+#define SDVO_NEED_TO_STALL (1 << 7)
+
+struct intel_sdvo_encode {
+ u8 dvi_rev;
+ u8 hdmi_rev;
+} __attribute__ ((packed));
diff --git a/sys/dev/drm2/i915/intel_sprite.c b/sys/dev/drm2/i915/intel_sprite.c
new file mode 100644
index 0000000..0737a5c
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_sprite.c
@@ -0,0 +1,669 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * New plane/sprite handling.
+ *
+ * The older chips had a separate interface for programming plane related
+ * registers; newer ones are much simpler and we can use the new DRM plane
+ * support.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/intel_drv.h>
+#include <dev/drm2/drm_fourcc.h>
+
+static void
+ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe;
+ u32 sprctl, sprscale = 0;
+ int pixel_size;
+
+ sprctl = I915_READ(SPRCTL(pipe));
+
+ /* Mask out pixel format bits in case we change it */
+ sprctl &= ~SPRITE_PIXFORMAT_MASK;
+ sprctl &= ~SPRITE_RGB_ORDER_RGBX;
+ sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_XBGR8888:
+ sprctl |= SPRITE_FORMAT_RGBX888;
+ pixel_size = 4;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
+ pixel_size = 4;
+ break;
+ case DRM_FORMAT_YUYV:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_YVYU:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_UYVY:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_VYUY:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
+ pixel_size = 2;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
+ sprctl |= DVS_FORMAT_RGBX888;
+ pixel_size = 4;
+ break;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ sprctl |= SPRITE_TILED;
+
+ /* must disable */
+ sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+ sprctl |= SPRITE_ENABLE;
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
+
+ intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+
+ /*
+ * IVB workaround: must disable low power watermarks for at least
+ * one frame before enabling scaling. LP watermarks can be re-enabled
+ * when scaling is disabled.
+ */
+ if (crtc_w != src_w || crtc_h != src_h) {
+ dev_priv->sprite_scaling_enabled = true;
+ sandybridge_update_wm(dev);
+ intel_wait_for_vblank(dev, pipe);
+ sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
+ } else {
+ dev_priv->sprite_scaling_enabled = false;
+ /* potentially re-enable LP watermarks */
+ sandybridge_update_wm(dev);
+ }
+
+ I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
+ } else {
+ unsigned long offset;
+
+ offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ I915_WRITE(SPRLINOFF(pipe), offset);
+ }
+ I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+ I915_WRITE(SPRSCALE(pipe), sprscale);
+ I915_WRITE(SPRCTL(pipe), sprctl);
+ I915_WRITE(SPRSURF(pipe), obj->gtt_offset);
+ POSTING_READ(SPRSURF(pipe));
+}
+
+static void
+ivb_disable_plane(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe;
+
+ I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
+ /* Can't leave the scaler enabled... */
+ I915_WRITE(SPRSCALE(pipe), 0);
+ /* Activate double buffered register update */
+ I915_WRITE(SPRSURF(pipe), 0);
+ POSTING_READ(SPRSURF(pipe));
+}
+
+static int
+ivb_update_colorkey(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 sprctl;
+ int ret = 0;
+
+ intel_plane = to_intel_plane(plane);
+
+ I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
+ I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
+ I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
+
+ sprctl = I915_READ(SPRCTL(intel_plane->pipe));
+ sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ sprctl |= SPRITE_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SPRITE_SOURCE_KEY;
+ I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
+
+ POSTING_READ(SPRKEYMSK(intel_plane->pipe));
+
+ return ret;
+}
+
+static void
+ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 sprctl;
+
+ intel_plane = to_intel_plane(plane);
+
+ key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
+ key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
+ key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
+ key->flags = 0;
+
+ sprctl = I915_READ(SPRCTL(intel_plane->pipe));
+
+ if (sprctl & SPRITE_DEST_KEY)
+ key->flags = I915_SET_COLORKEY_DESTINATION;
+ else if (sprctl & SPRITE_SOURCE_KEY)
+ key->flags = I915_SET_COLORKEY_SOURCE;
+ else
+ key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static void
+snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe, pixel_size;
+ u32 dvscntr, dvsscale = 0;
+
+ dvscntr = I915_READ(DVSCNTR(pipe));
+
+ /* Mask out pixel format bits in case we change it */
+ dvscntr &= ~DVS_PIXFORMAT_MASK;
+ dvscntr &= ~DVS_RGB_ORDER_XBGR;
+ dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_XBGR8888:
+ dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
+ pixel_size = 4;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ dvscntr |= DVS_FORMAT_RGBX888;
+ pixel_size = 4;
+ break;
+ case DRM_FORMAT_YUYV:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_YVYU:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_UYVY:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_VYUY:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
+ pixel_size = 2;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
+ dvscntr |= DVS_FORMAT_RGBX888;
+ pixel_size = 4;
+ break;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dvscntr |= DVS_TILED;
+
+ /* must disable */
+ dvscntr |= DVS_TRICKLE_FEED_DISABLE;
+ dvscntr |= DVS_ENABLE;
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
+
+ intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+
+ if (crtc_w != src_w || crtc_h != src_h)
+ dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
+
+ I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
+ } else {
+ unsigned long offset;
+
+ offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ I915_WRITE(DVSLINOFF(pipe), offset);
+ }
+ I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+ I915_WRITE(DVSSCALE(pipe), dvsscale);
+ I915_WRITE(DVSCNTR(pipe), dvscntr);
+ I915_WRITE(DVSSURF(pipe), obj->gtt_offset);
+ POSTING_READ(DVSSURF(pipe));
+}
+
+static void
+snb_disable_plane(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe;
+
+ I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
+ /* Disable the scaler */
+ I915_WRITE(DVSSCALE(pipe), 0);
+ /* Flush double buffered register updates */
+ I915_WRITE(DVSSURF(pipe), 0);
+ POSTING_READ(DVSSURF(pipe));
+}
+
+static void
+intel_enable_primary(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int reg = DSPCNTR(intel_crtc->plane);
+
+ I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
+}
+
+static void
+intel_disable_primary(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int reg = DSPCNTR(intel_crtc->plane);
+
+ I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+}
+
+static int
+snb_update_colorkey(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 dvscntr;
+ int ret = 0;
+
+ intel_plane = to_intel_plane(plane);
+
+ I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
+ I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
+ I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
+
+ dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
+ dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ dvscntr |= DVS_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ dvscntr |= DVS_SOURCE_KEY;
+ I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
+
+ POSTING_READ(DVSKEYMSK(intel_plane->pipe));
+
+ return ret;
+}
+
+static void
+snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 dvscntr;
+
+ intel_plane = to_intel_plane(plane);
+
+ key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
+ key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
+ key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
+ key->flags = 0;
+
+ dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
+
+ if (dvscntr & DVS_DEST_KEY)
+ key->flags = I915_SET_COLORKEY_DESTINATION;
+ else if (dvscntr & DVS_SOURCE_KEY)
+ key->flags = I915_SET_COLORKEY_SOURCE;
+ else
+ key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static int
+intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj, *old_obj;
+ int pipe = intel_plane->pipe;
+ int ret = 0;
+ int x = src_x >> 16, y = src_y >> 16;
+ int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
+ bool disable_primary = false;
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ old_obj = intel_plane->obj;
+
+ src_w = src_w >> 16;
+ src_h = src_h >> 16;
+
+ /* Pipe must be running... */
+ if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
+ return -EINVAL;
+
+ if (crtc_x >= primary_w || crtc_y >= primary_h)
+ return -EINVAL;
+
+ /* Don't modify another pipe's plane */
+ if (intel_plane->pipe != intel_crtc->pipe)
+ return -EINVAL;
+
+ /*
+ * Clamp the width & height into the visible area. Note we don't
+ * try to scale the source if part of the visible region is offscreen.
+ * The caller must handle that by adjusting source offset and size.
+ */
+ if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) {
+ crtc_w += crtc_x;
+ crtc_x = 0;
+ }
+ if ((crtc_x + crtc_w) <= 0) /* Nothing to display */
+ goto out;
+ if ((crtc_x + crtc_w) > primary_w)
+ crtc_w = primary_w - crtc_x;
+
+ if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) {
+ crtc_h += crtc_y;
+ crtc_y = 0;
+ }
+ if ((crtc_y + crtc_h) <= 0) /* Nothing to display */
+ goto out;
+ if (crtc_y + crtc_h > primary_h)
+ crtc_h = primary_h - crtc_y;
+
+ if (!crtc_w || !crtc_h) /* Again, nothing to display */
+ goto out;
+
+ /*
+ * We can take a larger source and scale it down, but
+ * only so much... 16x is the max on SNB.
+ */
+ if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale)
+ return -EINVAL;
+
+ /*
+ * If the sprite is completely covering the primary plane,
+ * we can disable the primary and save power.
+ */
+ if ((crtc_x == 0) && (crtc_y == 0) &&
+ (crtc_w == primary_w) && (crtc_h == primary_h))
+ disable_primary = true;
+
+ DRM_LOCK(dev);
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ if (ret)
+ goto out_unlock;
+
+ intel_plane->obj = obj;
+
+ /*
+ * Be sure to re-enable the primary before the sprite is no longer
+ * covering it fully.
+ */
+ if (!disable_primary && intel_plane->primary_disabled) {
+ intel_enable_primary(crtc);
+ intel_plane->primary_disabled = false;
+ }
+
+ intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
+ crtc_w, crtc_h, x, y, src_w, src_h);
+
+ if (disable_primary) {
+ intel_disable_primary(crtc);
+ intel_plane->primary_disabled = true;
+ }
+
+ /* Unpin old obj after new one is active to avoid ugliness */
+ if (old_obj) {
+ /*
+ * It's fairly common to simply update the position of
+ * an existing object. In that case, we don't need to
+ * wait for vblank to avoid ugliness, we only need to
+ * do the pin & ref bookkeeping.
+ */
+ if (old_obj != obj) {
+ DRM_UNLOCK(dev);
+ intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
+ DRM_LOCK(dev);
+ }
+ intel_unpin_fb_obj(old_obj);
+ }
+
+out_unlock:
+ DRM_UNLOCK(dev);
+out:
+ return ret;
+}
+
+static int
+intel_disable_plane(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int ret = 0;
+
+ if (intel_plane->primary_disabled) {
+ intel_enable_primary(plane->crtc);
+ intel_plane->primary_disabled = false;
+ }
+
+ intel_plane->disable_plane(plane);
+
+ if (!intel_plane->obj)
+ goto out;
+
+ DRM_LOCK(dev);
+ intel_unpin_fb_obj(intel_plane->obj);
+ intel_plane->obj = NULL;
+ DRM_UNLOCK(dev);
+out:
+
+ return ret;
+}
+
+static void intel_destroy_plane(struct drm_plane *plane)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ intel_disable_plane(plane);
+ drm_plane_cleanup(plane);
+ free(intel_plane, DRM_MEM_KMS);
+}
+
+int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_sprite_colorkey *set = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct intel_plane *intel_plane;
+ int ret = 0;
+
+ if (!dev_priv)
+ return -EINVAL;
+
+ /* Make sure we don't try to enable both src & dest simultaneously */
+ if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+ return -EINVAL;
+
+ sx_xlock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ plane = obj_to_plane(obj);
+ intel_plane = to_intel_plane(plane);
+ ret = intel_plane->update_colorkey(plane, set);
+
+out_unlock:
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_sprite_colorkey *get = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct intel_plane *intel_plane;
+ int ret = 0;
+
+ if (!dev_priv)
+ return -EINVAL;
+
+ sx_xlock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ plane = obj_to_plane(obj);
+ intel_plane = to_intel_plane(plane);
+ intel_plane->get_colorkey(plane, get);
+
+out_unlock:
+ sx_xunlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+static const struct drm_plane_funcs intel_plane_funcs = {
+ .update_plane = intel_update_plane,
+ .disable_plane = intel_disable_plane,
+ .destroy = intel_destroy_plane,
+};
+
+static uint32_t snb_plane_formats[] = {
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
+int
+intel_plane_init(struct drm_device *dev, enum pipe pipe)
+{
+ struct intel_plane *intel_plane;
+ unsigned long possible_crtcs;
+ int ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
+
+ intel_plane = malloc(sizeof(struct intel_plane), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+
+ if (IS_GEN6(dev)) {
+ intel_plane->max_downscale = 16;
+ intel_plane->update_plane = snb_update_plane;
+ intel_plane->disable_plane = snb_disable_plane;
+ intel_plane->update_colorkey = snb_update_colorkey;
+ intel_plane->get_colorkey = snb_get_colorkey;
+ } else if (IS_GEN7(dev)) {
+ intel_plane->max_downscale = 2;
+ intel_plane->update_plane = ivb_update_plane;
+ intel_plane->disable_plane = ivb_disable_plane;
+ intel_plane->update_colorkey = ivb_update_colorkey;
+ intel_plane->get_colorkey = ivb_get_colorkey;
+ }
+
+ intel_plane->pipe = pipe;
+ possible_crtcs = (1 << pipe);
+ ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
+ &intel_plane_funcs, snb_plane_formats,
+ DRM_ARRAY_SIZE(snb_plane_formats), false);
+ if (ret)
+ free(intel_plane, DRM_MEM_KMS);
+
+ return ret;
+}
+
diff --git a/sys/dev/drm2/i915/intel_tv.c b/sys/dev/drm2/i915/intel_tv.c
new file mode 100644
index 0000000..93fcf0a
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_tv.c
@@ -0,0 +1,1609 @@
+/*
+ * Copyright © 2006-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/** @file
+ * Integrated TV-out support for the 915GM and 945GM.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/drm_crtc.h>
+#include <dev/drm2/drm_edid.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/intel_drv.h>
+
+enum tv_margin {
+ TV_MARGIN_LEFT, TV_MARGIN_TOP,
+ TV_MARGIN_RIGHT, TV_MARGIN_BOTTOM
+};
+
+/** Private structure for the integrated TV support */
+struct intel_tv {
+ struct intel_encoder base;
+
+ int type;
+ const char *tv_format;
+ int margin[4];
+ u32 save_TV_H_CTL_1;
+ u32 save_TV_H_CTL_2;
+ u32 save_TV_H_CTL_3;
+ u32 save_TV_V_CTL_1;
+ u32 save_TV_V_CTL_2;
+ u32 save_TV_V_CTL_3;
+ u32 save_TV_V_CTL_4;
+ u32 save_TV_V_CTL_5;
+ u32 save_TV_V_CTL_6;
+ u32 save_TV_V_CTL_7;
+ u32 save_TV_SC_CTL_1, save_TV_SC_CTL_2, save_TV_SC_CTL_3;
+
+ u32 save_TV_CSC_Y;
+ u32 save_TV_CSC_Y2;
+ u32 save_TV_CSC_U;
+ u32 save_TV_CSC_U2;
+ u32 save_TV_CSC_V;
+ u32 save_TV_CSC_V2;
+ u32 save_TV_CLR_KNOBS;
+ u32 save_TV_CLR_LEVEL;
+ u32 save_TV_WIN_POS;
+ u32 save_TV_WIN_SIZE;
+ u32 save_TV_FILTER_CTL_1;
+ u32 save_TV_FILTER_CTL_2;
+ u32 save_TV_FILTER_CTL_3;
+
+ u32 save_TV_H_LUMA[60];
+ u32 save_TV_H_CHROMA[60];
+ u32 save_TV_V_LUMA[43];
+ u32 save_TV_V_CHROMA[43];
+
+ u32 save_TV_DAC;
+ u32 save_TV_CTL;
+};
+
+struct video_levels {
+ int blank, black, burst;
+};
+
+struct color_conversion {
+ u16 ry, gy, by, ay;
+ u16 ru, gu, bu, au;
+ u16 rv, gv, bv, av;
+};
+
+static const u32 filter_table[] = {
+ 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140,
+ 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000,
+ 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160,
+ 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780,
+ 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50,
+ 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20,
+ 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0,
+ 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0,
+ 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020,
+ 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140,
+ 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20,
+ 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848,
+ 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900,
+ 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080,
+ 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060,
+ 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140,
+ 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000,
+ 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160,
+ 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780,
+ 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50,
+ 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20,
+ 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0,
+ 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0,
+ 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020,
+ 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140,
+ 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20,
+ 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848,
+ 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900,
+ 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080,
+ 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060,
+ 0x36403000, 0x2D002CC0, 0x30003640, 0x2D0036C0,
+ 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540,
+ 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00,
+ 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000,
+ 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00,
+ 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40,
+ 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240,
+ 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00,
+ 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0,
+ 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840,
+ 0x28003100, 0x28002F00, 0x00003100, 0x36403000,
+ 0x2D002CC0, 0x30003640, 0x2D0036C0,
+ 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540,
+ 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00,
+ 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000,
+ 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00,
+ 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40,
+ 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240,
+ 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00,
+ 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0,
+ 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840,
+ 0x28003100, 0x28002F00, 0x00003100,
+};
+
+/*
+ * Color conversion values have 3 separate fixed point formats:
+ *
+ * 10 bit fields (ay, au)
+ * 1.9 fixed point (b.bbbbbbbbb)
+ * 11 bit fields (ry, by, ru, gu, gv)
+ * exp.mantissa (ee.mmmmmmmmm)
+ * ee = 00 = 10^-1 (0.mmmmmmmmm)
+ * ee = 01 = 10^-2 (0.0mmmmmmmmm)
+ * ee = 10 = 10^-3 (0.00mmmmmmmmm)
+ * ee = 11 = 10^-4 (0.000mmmmmmmmm)
+ * 12 bit fields (gy, rv, bu)
+ * exp.mantissa (eee.mmmmmmmmm)
+ * eee = 000 = 10^-1 (0.mmmmmmmmm)
+ * eee = 001 = 10^-2 (0.0mmmmmmmmm)
+ * eee = 010 = 10^-3 (0.00mmmmmmmmm)
+ * eee = 011 = 10^-4 (0.000mmmmmmmmm)
+ * eee = 100 = reserved
+ * eee = 101 = reserved
+ * eee = 110 = reserved
+ * eee = 111 = 10^0 (m.mmmmmmmm) (only usable for 1.0 representation)
+ *
+ * Saturation and contrast are 8 bits, with their own representation:
+ * 8 bit field (saturation, contrast)
+ * exp.mantissa (ee.mmmmmm)
+ * ee = 00 = 10^-1 (0.mmmmmm)
+ * ee = 01 = 10^0 (m.mmmmm)
+ * ee = 10 = 10^1 (mm.mmmm)
+ * ee = 11 = 10^2 (mmm.mmm)
+ *
+ * Simple conversion function:
+ *
+ * static u32
+ * float_to_csc_11(float f)
+ * {
+ * u32 exp;
+ * u32 mant;
+ * u32 ret;
+ *
+ * if (f < 0)
+ * f = -f;
+ *
+ * if (f >= 1) {
+ * exp = 0x7;
+ * mant = 1 << 8;
+ * } else {
+ * for (exp = 0; exp < 3 && f < 0.5; exp++)
+ * f *= 2.0;
+ * mant = (f * (1 << 9) + 0.5);
+ * if (mant >= (1 << 9))
+ * mant = (1 << 9) - 1;
+ * }
+ * ret = (exp << 9) | mant;
+ * return ret;
+ * }
+ */
+
+/*
+ * Behold, magic numbers! If we plant them they might grow a big
+ * s-video cable to the sky... or something.
+ *
+ * Pre-converted to appropriate hex value.
+ */
+
+/*
+ * PAL & NTSC values for composite & s-video connections
+ */
+static const struct color_conversion ntsc_m_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_m_levels_composite = {
+ .blank = 225, .black = 267, .burst = 113,
+};
+
+static const struct color_conversion ntsc_m_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_m_levels_svideo = {
+ .blank = 266, .black = 316, .burst = 133,
+};
+
+static const struct color_conversion ntsc_j_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119,
+ .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200,
+ .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_j_levels_composite = {
+ .blank = 225, .black = 225, .burst = 113,
+};
+
+static const struct color_conversion ntsc_j_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c,
+ .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200,
+ .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_j_levels_svideo = {
+ .blank = 266, .black = 266, .burst = 133,
+};
+
+static const struct color_conversion pal_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113,
+ .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200,
+ .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200,
+};
+
+static const struct video_levels pal_levels_composite = {
+ .blank = 237, .black = 237, .burst = 118,
+};
+
+static const struct color_conversion pal_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
+ .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200,
+ .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200,
+};
+
+static const struct video_levels pal_levels_svideo = {
+ .blank = 280, .black = 280, .burst = 139,
+};
+
+static const struct color_conversion pal_m_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
+};
+
+static const struct video_levels pal_m_levels_composite = {
+ .blank = 225, .black = 267, .burst = 113,
+};
+
+static const struct color_conversion pal_m_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
+};
+
+static const struct video_levels pal_m_levels_svideo = {
+ .blank = 266, .black = 316, .burst = 133,
+};
+
+static const struct color_conversion pal_n_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
+};
+
+static const struct video_levels pal_n_levels_composite = {
+ .blank = 225, .black = 267, .burst = 118,
+};
+
+static const struct color_conversion pal_n_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
+};
+
+static const struct video_levels pal_n_levels_svideo = {
+ .blank = 266, .black = 316, .burst = 139,
+};
+
+/*
+ * Component connections
+ */
+static const struct color_conversion sdtv_csc_yprpb = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
+ .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200,
+ .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
+};
+
+static const struct color_conversion sdtv_csc_rgb = {
+ .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
+ .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
+ .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
+};
+
+static const struct color_conversion hdtv_csc_yprpb = {
+ .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145,
+ .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200,
+ .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
+};
+
+static const struct color_conversion hdtv_csc_rgb = {
+ .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
+ .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
+ .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
+};
+
+static const struct video_levels component_levels = {
+ .blank = 279, .black = 279, .burst = 0,
+};
+
+
+struct tv_mode {
+ const char *name;
+ int clock;
+ int refresh; /* in millihertz (for precision) */
+ u32 oversample;
+ int hsync_end, hblank_start, hblank_end, htotal;
+ bool progressive, trilevel_sync, component_only;
+ int vsync_start_f1, vsync_start_f2, vsync_len;
+ bool veq_ena;
+ int veq_start_f1, veq_start_f2, veq_len;
+ int vi_end_f1, vi_end_f2, nbr_end;
+ bool burst_ena;
+ int hburst_start, hburst_len;
+ int vburst_start_f1, vburst_end_f1;
+ int vburst_start_f2, vburst_end_f2;
+ int vburst_start_f3, vburst_end_f3;
+ int vburst_start_f4, vburst_end_f4;
+ /*
+ * subcarrier programming
+ */
+ int dda2_size, dda3_size, dda1_inc, dda2_inc, dda3_inc;
+ u32 sc_reset;
+ bool pal_burst;
+ /*
+ * blank/black levels
+ */
+ const struct video_levels *composite_levels, *svideo_levels;
+ const struct color_conversion *composite_color, *svideo_color;
+ const u32 *filter_table;
+ int max_srcw;
+};
+
+
+/*
+ * Sub carrier DDA
+ *
+ * I think this works as follows:
+ *
+ * subcarrier freq = pixel_clock * (dda1_inc + dda2_inc / dda2_size) / 4096
+ *
+ * Presumably, when dda3 is added in, it gets to adjust the dda2_inc value
+ *
+ * So,
+ * dda1_ideal = subcarrier/pixel * 4096
+ * dda1_inc = floor (dda1_ideal)
+ * dda2 = dda1_ideal - dda1_inc
+ *
+ * then pick a ratio for dda2 that gives the closest approximation. If
+ * you can't get close enough, you can play with dda3 as well. This
+ * seems likely to happen when dda2 is small as the jumps would be larger
+ *
+ * To invert this,
+ *
+ * pixel_clock = subcarrier * 4096 / (dda1_inc + dda2_inc / dda2_size)
+ *
+ * The constants below were all computed using a 107.520MHz clock
+ */
+
+/**
+ * Register programming values for TV modes.
+ *
+ * These values account for -1s required.
+ */
+
+static const struct tv_mode tv_modes[] = {
+ {
+ .name = "NTSC-M",
+ .clock = 108000,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+
+ .hsync_end = 64, .hblank_end = 124,
+ .hblank_start = 836, .htotal = 857,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 20, .vi_end_f2 = 21,
+ .nbr_end = 240,
+
+ .burst_ena = true,
+ .hburst_start = 72, .hburst_len = 34,
+ .vburst_start_f1 = 9, .vburst_end_f1 = 240,
+ .vburst_start_f2 = 10, .vburst_end_f2 = 240,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 240,
+ .vburst_start_f4 = 10, .vburst_end_f4 = 240,
+
+ /* desired 3.5800000 actual 3.5800000 clock 107.52 */
+ .dda1_inc = 135,
+ .dda2_inc = 20800, .dda2_size = 27456,
+ .dda3_inc = 0, .dda3_size = 0,
+ .sc_reset = TV_SC_RESET_EVERY_4,
+ .pal_burst = false,
+
+ .composite_levels = &ntsc_m_levels_composite,
+ .composite_color = &ntsc_m_csc_composite,
+ .svideo_levels = &ntsc_m_levels_svideo,
+ .svideo_color = &ntsc_m_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "NTSC-443",
+ .clock = 108000,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */
+ .hsync_end = 64, .hblank_end = 124,
+ .hblank_start = 836, .htotal = 857,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 20, .vi_end_f2 = 21,
+ .nbr_end = 240,
+
+ .burst_ena = true,
+ .hburst_start = 72, .hburst_len = 34,
+ .vburst_start_f1 = 9, .vburst_end_f1 = 240,
+ .vburst_start_f2 = 10, .vburst_end_f2 = 240,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 240,
+ .vburst_start_f4 = 10, .vburst_end_f4 = 240,
+
+ /* desired 4.4336180 actual 4.4336180 clock 107.52 */
+ .dda1_inc = 168,
+ .dda2_inc = 4093, .dda2_size = 27456,
+ .dda3_inc = 310, .dda3_size = 525,
+ .sc_reset = TV_SC_RESET_NEVER,
+ .pal_burst = false,
+
+ .composite_levels = &ntsc_m_levels_composite,
+ .composite_color = &ntsc_m_csc_composite,
+ .svideo_levels = &ntsc_m_levels_svideo,
+ .svideo_color = &ntsc_m_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "NTSC-J",
+ .clock = 108000,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+ .hsync_end = 64, .hblank_end = 124,
+ .hblank_start = 836, .htotal = 857,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 20, .vi_end_f2 = 21,
+ .nbr_end = 240,
+
+ .burst_ena = true,
+ .hburst_start = 72, .hburst_len = 34,
+ .vburst_start_f1 = 9, .vburst_end_f1 = 240,
+ .vburst_start_f2 = 10, .vburst_end_f2 = 240,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 240,
+ .vburst_start_f4 = 10, .vburst_end_f4 = 240,
+
+ /* desired 3.5800000 actual 3.5800000 clock 107.52 */
+ .dda1_inc = 135,
+ .dda2_inc = 20800, .dda2_size = 27456,
+ .dda3_inc = 0, .dda3_size = 0,
+ .sc_reset = TV_SC_RESET_EVERY_4,
+ .pal_burst = false,
+
+ .composite_levels = &ntsc_j_levels_composite,
+ .composite_color = &ntsc_j_csc_composite,
+ .svideo_levels = &ntsc_j_levels_svideo,
+ .svideo_color = &ntsc_j_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "PAL-M",
+ .clock = 108000,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+ .hsync_end = 64, .hblank_end = 124,
+ .hblank_start = 836, .htotal = 857,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 20, .vi_end_f2 = 21,
+ .nbr_end = 240,
+
+ .burst_ena = true,
+ .hburst_start = 72, .hburst_len = 34,
+ .vburst_start_f1 = 9, .vburst_end_f1 = 240,
+ .vburst_start_f2 = 10, .vburst_end_f2 = 240,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 240,
+ .vburst_start_f4 = 10, .vburst_end_f4 = 240,
+
+ /* desired 3.5800000 actual 3.5800000 clock 107.52 */
+ .dda1_inc = 135,
+ .dda2_inc = 16704, .dda2_size = 27456,
+ .dda3_inc = 0, .dda3_size = 0,
+ .sc_reset = TV_SC_RESET_EVERY_8,
+ .pal_burst = true,
+
+ .composite_levels = &pal_m_levels_composite,
+ .composite_color = &pal_m_csc_composite,
+ .svideo_levels = &pal_m_levels_svideo,
+ .svideo_color = &pal_m_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
+ .name = "PAL-N",
+ .clock = 108000,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+
+ .hsync_end = 64, .hblank_end = 128,
+ .hblank_start = 844, .htotal = 863,
+
+ .progressive = false, .trilevel_sync = false,
+
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 24, .vi_end_f2 = 25,
+ .nbr_end = 286,
+
+ .burst_ena = true,
+ .hburst_start = 73, .hburst_len = 34,
+ .vburst_start_f1 = 8, .vburst_end_f1 = 285,
+ .vburst_start_f2 = 8, .vburst_end_f2 = 286,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 286,
+ .vburst_start_f4 = 9, .vburst_end_f4 = 285,
+
+
+ /* desired 4.4336180 actual 4.4336180 clock 107.52 */
+ .dda1_inc = 135,
+ .dda2_inc = 23578, .dda2_size = 27648,
+ .dda3_inc = 134, .dda3_size = 625,
+ .sc_reset = TV_SC_RESET_EVERY_8,
+ .pal_burst = true,
+
+ .composite_levels = &pal_n_levels_composite,
+ .composite_color = &pal_n_csc_composite,
+ .svideo_levels = &pal_n_levels_svideo,
+ .svideo_color = &pal_n_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
+ .name = "PAL",
+ .clock = 108000,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+
+ .hsync_end = 64, .hblank_end = 142,
+ .hblank_start = 844, .htotal = 863,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 5, .vsync_start_f2 = 6,
+ .vsync_len = 5,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 15,
+
+ .vi_end_f1 = 24, .vi_end_f2 = 25,
+ .nbr_end = 286,
+
+ .burst_ena = true,
+ .hburst_start = 73, .hburst_len = 32,
+ .vburst_start_f1 = 8, .vburst_end_f1 = 285,
+ .vburst_start_f2 = 8, .vburst_end_f2 = 286,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 286,
+ .vburst_start_f4 = 9, .vburst_end_f4 = 285,
+
+ /* desired 4.4336180 actual 4.4336180 clock 107.52 */
+ .dda1_inc = 168,
+ .dda2_inc = 4122, .dda2_size = 27648,
+ .dda3_inc = 67, .dda3_size = 625,
+ .sc_reset = TV_SC_RESET_EVERY_8,
+ .pal_burst = true,
+
+ .composite_levels = &pal_levels_composite,
+ .composite_color = &pal_csc_composite,
+ .svideo_levels = &pal_levels_svideo,
+ .svideo_color = &pal_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "720p@60Hz",
+ .clock = 148800,
+ .refresh = 60000,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+ .hsync_end = 80, .hblank_end = 300,
+ .hblank_start = 1580, .htotal = 1649,
+
+ .progressive = true, .trilevel_sync = true,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 29, .vi_end_f2 = 29,
+ .nbr_end = 719,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "720p@50Hz",
+ .clock = 148800,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+ .hsync_end = 80, .hblank_end = 300,
+ .hblank_start = 1580, .htotal = 1979,
+
+ .progressive = true, .trilevel_sync = true,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 29, .vi_end_f2 = 29,
+ .nbr_end = 719,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ .max_srcw = 800
+ },
+ {
+ .name = "1080i@50Hz",
+ .clock = 148800,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+ .hsync_end = 88, .hblank_end = 235,
+ .hblank_start = 2155, .htotal = 2639,
+
+ .progressive = false, .trilevel_sync = true,
+
+ .vsync_start_f1 = 4, .vsync_start_f2 = 5,
+ .vsync_len = 10,
+
+ .veq_ena = true, .veq_start_f1 = 4,
+ .veq_start_f2 = 4, .veq_len = 10,
+
+
+ .vi_end_f1 = 21, .vi_end_f2 = 22,
+ .nbr_end = 539,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "1080i@60Hz",
+ .clock = 148800,
+ .refresh = 60000,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+ .hsync_end = 88, .hblank_end = 235,
+ .hblank_start = 2155, .htotal = 2199,
+
+ .progressive = false, .trilevel_sync = true,
+
+ .vsync_start_f1 = 4, .vsync_start_f2 = 5,
+ .vsync_len = 10,
+
+ .veq_ena = true, .veq_start_f1 = 4,
+ .veq_start_f2 = 4, .veq_len = 10,
+
+
+ .vi_end_f1 = 21, .vi_end_f2 = 22,
+ .nbr_end = 539,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+};
+
+static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_tv, base.base);
+}
+
+static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_tv,
+ base);
+}
+
+static void
+intel_tv_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
+ break;
+ }
+}
+
+static const struct tv_mode *
+intel_tv_mode_lookup(const char *tv_format)
+{
+ int i;
+
+ for (i = 0; i < sizeof(tv_modes) / sizeof(tv_modes[0]); i++) {
+ const struct tv_mode *tv_mode = &tv_modes[i];
+
+ if (!strcmp(tv_format, tv_mode->name))
+ return tv_mode;
+ }
+ return NULL;
+}
+
+static const struct tv_mode *
+intel_tv_mode_find(struct intel_tv *intel_tv)
+{
+ return intel_tv_mode_lookup(intel_tv->tv_format);
+}
+
+static enum drm_mode_status
+intel_tv_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+
+ /* Ensure TV refresh is close to desired refresh */
+ if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
+ < 1000)
+ return MODE_OK;
+
+ return MODE_CLOCK_RANGE;
+}
+
+
+static bool
+intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_mode_config *drm_config = &dev->mode_config;
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ struct drm_encoder *other_encoder;
+
+ if (!tv_mode)
+ return false;
+
+ /* FIXME: lock encoder list */
+ list_for_each_entry(other_encoder, &drm_config->encoder_list, head) {
+ if (other_encoder != encoder &&
+ other_encoder->crtc == encoder->crtc)
+ return false;
+ }
+
+ adjusted_mode->clock = tv_mode->clock;
+ return true;
+}
+
+static void
+intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ u32 tv_ctl;
+ u32 hctl1, hctl2, hctl3;
+ u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
+ u32 scctl1, scctl2, scctl3;
+ int i, j;
+ const struct video_levels *video_levels;
+ const struct color_conversion *color_conversion;
+ bool burst_ena;
+ int pipe = intel_crtc->pipe;
+
+ if (!tv_mode)
+ return; /* can't happen (mode_prepare prevents this) */
+
+ tv_ctl = I915_READ(TV_CTL);
+ tv_ctl &= TV_CTL_SAVE;
+
+ switch (intel_tv->type) {
+ default:
+ case DRM_MODE_CONNECTOR_Unknown:
+ case DRM_MODE_CONNECTOR_Composite:
+ tv_ctl |= TV_ENC_OUTPUT_COMPOSITE;
+ video_levels = tv_mode->composite_levels;
+ color_conversion = tv_mode->composite_color;
+ burst_ena = tv_mode->burst_ena;
+ break;
+ case DRM_MODE_CONNECTOR_Component:
+ tv_ctl |= TV_ENC_OUTPUT_COMPONENT;
+ video_levels = &component_levels;
+ if (tv_mode->burst_ena)
+ color_conversion = &sdtv_csc_yprpb;
+ else
+ color_conversion = &hdtv_csc_yprpb;
+ burst_ena = false;
+ break;
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ tv_ctl |= TV_ENC_OUTPUT_SVIDEO;
+ video_levels = tv_mode->svideo_levels;
+ color_conversion = tv_mode->svideo_color;
+ burst_ena = tv_mode->burst_ena;
+ break;
+ }
+ hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) |
+ (tv_mode->htotal << TV_HTOTAL_SHIFT);
+
+ hctl2 = (tv_mode->hburst_start << 16) |
+ (tv_mode->hburst_len << TV_HBURST_LEN_SHIFT);
+
+ if (burst_ena)
+ hctl2 |= TV_BURST_ENA;
+
+ hctl3 = (tv_mode->hblank_start << TV_HBLANK_START_SHIFT) |
+ (tv_mode->hblank_end << TV_HBLANK_END_SHIFT);
+
+ vctl1 = (tv_mode->nbr_end << TV_NBR_END_SHIFT) |
+ (tv_mode->vi_end_f1 << TV_VI_END_F1_SHIFT) |
+ (tv_mode->vi_end_f2 << TV_VI_END_F2_SHIFT);
+
+ vctl2 = (tv_mode->vsync_len << TV_VSYNC_LEN_SHIFT) |
+ (tv_mode->vsync_start_f1 << TV_VSYNC_START_F1_SHIFT) |
+ (tv_mode->vsync_start_f2 << TV_VSYNC_START_F2_SHIFT);
+
+ vctl3 = (tv_mode->veq_len << TV_VEQ_LEN_SHIFT) |
+ (tv_mode->veq_start_f1 << TV_VEQ_START_F1_SHIFT) |
+ (tv_mode->veq_start_f2 << TV_VEQ_START_F2_SHIFT);
+
+ if (tv_mode->veq_ena)
+ vctl3 |= TV_EQUAL_ENA;
+
+ vctl4 = (tv_mode->vburst_start_f1 << TV_VBURST_START_F1_SHIFT) |
+ (tv_mode->vburst_end_f1 << TV_VBURST_END_F1_SHIFT);
+
+ vctl5 = (tv_mode->vburst_start_f2 << TV_VBURST_START_F2_SHIFT) |
+ (tv_mode->vburst_end_f2 << TV_VBURST_END_F2_SHIFT);
+
+ vctl6 = (tv_mode->vburst_start_f3 << TV_VBURST_START_F3_SHIFT) |
+ (tv_mode->vburst_end_f3 << TV_VBURST_END_F3_SHIFT);
+
+ vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
+ (tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
+
+ if (intel_crtc->pipe == 1)
+ tv_ctl |= TV_ENC_PIPEB_SELECT;
+ tv_ctl |= tv_mode->oversample;
+
+ if (tv_mode->progressive)
+ tv_ctl |= TV_PROGRESSIVE;
+ if (tv_mode->trilevel_sync)
+ tv_ctl |= TV_TRILEVEL_SYNC;
+ if (tv_mode->pal_burst)
+ tv_ctl |= TV_PAL_BURST;
+
+ scctl1 = 0;
+ if (tv_mode->dda1_inc)
+ scctl1 |= TV_SC_DDA1_EN;
+ if (tv_mode->dda2_inc)
+ scctl1 |= TV_SC_DDA2_EN;
+ if (tv_mode->dda3_inc)
+ scctl1 |= TV_SC_DDA3_EN;
+ scctl1 |= tv_mode->sc_reset;
+ if (video_levels)
+ scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
+ scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
+
+ scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
+ tv_mode->dda2_inc << TV_SCDDA2_INC_SHIFT;
+
+ scctl3 = tv_mode->dda3_size << TV_SCDDA3_SIZE_SHIFT |
+ tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
+
+ /* Enable two fixes for the chips that need them. */
+ if (dev->pci_device < 0x2772)
+ tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
+
+ I915_WRITE(TV_H_CTL_1, hctl1);
+ I915_WRITE(TV_H_CTL_2, hctl2);
+ I915_WRITE(TV_H_CTL_3, hctl3);
+ I915_WRITE(TV_V_CTL_1, vctl1);
+ I915_WRITE(TV_V_CTL_2, vctl2);
+ I915_WRITE(TV_V_CTL_3, vctl3);
+ I915_WRITE(TV_V_CTL_4, vctl4);
+ I915_WRITE(TV_V_CTL_5, vctl5);
+ I915_WRITE(TV_V_CTL_6, vctl6);
+ I915_WRITE(TV_V_CTL_7, vctl7);
+ I915_WRITE(TV_SC_CTL_1, scctl1);
+ I915_WRITE(TV_SC_CTL_2, scctl2);
+ I915_WRITE(TV_SC_CTL_3, scctl3);
+
+ if (color_conversion) {
+ I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
+ color_conversion->gy);
+ I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) |
+ color_conversion->ay);
+ I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
+ color_conversion->gu);
+ I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
+ color_conversion->au);
+ I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
+ color_conversion->gv);
+ I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
+ color_conversion->av);
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ I915_WRITE(TV_CLR_KNOBS, 0x00404000);
+ else
+ I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+
+ if (video_levels)
+ I915_WRITE(TV_CLR_LEVEL,
+ ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
+ (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
+ {
+ int pipeconf_reg = PIPECONF(pipe);
+ int dspcntr_reg = DSPCNTR(intel_crtc->plane);
+ int pipeconf = I915_READ(pipeconf_reg);
+ int dspcntr = I915_READ(dspcntr_reg);
+ int dspbase_reg = DSPADDR(intel_crtc->plane);
+ int xpos = 0x0, ypos = 0x0;
+ unsigned int xsize, ysize;
+ /* Pipe must be off here */
+ I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+
+ /* Wait for vblank for the disable to take effect */
+ if (IS_GEN2(dev))
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE);
+ /* Wait for vblank for the disable to take effect. */
+ intel_wait_for_pipe_off(dev, intel_crtc->pipe);
+
+ /* Filter ctl must be set before TV_WIN_SIZE */
+ I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
+ xsize = tv_mode->hblank_start - tv_mode->hblank_end;
+ if (tv_mode->progressive)
+ ysize = tv_mode->nbr_end + 1;
+ else
+ ysize = 2*tv_mode->nbr_end + 1;
+
+ xpos += intel_tv->margin[TV_MARGIN_LEFT];
+ ypos += intel_tv->margin[TV_MARGIN_TOP];
+ xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
+ intel_tv->margin[TV_MARGIN_RIGHT]);
+ ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
+ intel_tv->margin[TV_MARGIN_BOTTOM]);
+ I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
+ I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
+
+ I915_WRITE(pipeconf_reg, pipeconf);
+ I915_WRITE(dspcntr_reg, dspcntr);
+ /* Flush the plane changes */
+ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+ }
+
+ j = 0;
+ for (i = 0; i < 60; i++)
+ I915_WRITE(TV_H_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
+ for (i = 0; i < 60; i++)
+ I915_WRITE(TV_H_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
+ for (i = 0; i < 43; i++)
+ I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
+ for (i = 0; i < 43; i++)
+ I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
+ I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
+ I915_WRITE(TV_CTL, tv_ctl);
+}
+
+static const struct drm_display_mode reported_modes[] = {
+ {
+ .name = "NTSC 480i",
+ .clock = 107520,
+ .hdisplay = 1280,
+ .hsync_start = 1368,
+ .hsync_end = 1496,
+ .htotal = 1712,
+
+ .vdisplay = 1024,
+ .vsync_start = 1027,
+ .vsync_end = 1034,
+ .vtotal = 1104,
+ .type = DRM_MODE_TYPE_DRIVER,
+ },
+};
+
+/**
+ * Detects TV presence by checking for load.
+ *
+ * Requires that the current pipe's DPLL is active.
+
+ * \return true if TV is connected.
+ * \return false if TV is disconnected.
+ */
+static int
+intel_tv_detect_type(struct intel_tv *intel_tv,
+ struct drm_connector *connector)
+{
+ struct drm_encoder *encoder = &intel_tv->base.base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tv_ctl, save_tv_ctl;
+ u32 tv_dac, save_tv_dac;
+ int type;
+
+ /* Disable TV interrupts around load detect or we'll recurse */
+ if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+ mtx_lock(&dev_priv->irq_lock);
+ i915_disable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+ mtx_unlock(&dev_priv->irq_lock);
+ }
+
+ save_tv_dac = tv_dac = I915_READ(TV_DAC);
+ save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
+
+ /* Poll for TV detection */
+ tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
+ tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
+ if (intel_crtc->pipe == 1)
+ tv_ctl |= TV_ENC_PIPEB_SELECT;
+ else
+ tv_ctl &= ~TV_ENC_PIPEB_SELECT;
+
+ tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
+ tv_dac |= (TVDAC_STATE_CHG_EN |
+ TVDAC_A_SENSE_CTL |
+ TVDAC_B_SENSE_CTL |
+ TVDAC_C_SENSE_CTL |
+ DAC_CTL_OVERRIDE |
+ DAC_A_0_7_V |
+ DAC_B_0_7_V |
+ DAC_C_0_7_V);
+
+ I915_WRITE(TV_CTL, tv_ctl);
+ I915_WRITE(TV_DAC, tv_dac);
+ POSTING_READ(TV_DAC);
+
+ intel_wait_for_vblank(intel_tv->base.base.dev,
+ to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+
+ type = -1;
+ tv_dac = I915_READ(TV_DAC);
+ DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
+ /*
+ * A B C
+ * 0 1 1 Composite
+ * 1 0 X svideo
+ * 0 0 0 Component
+ */
+ if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
+ DRM_DEBUG_KMS("Detected Composite TV connection\n");
+ type = DRM_MODE_CONNECTOR_Composite;
+ } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
+ DRM_DEBUG_KMS("Detected S-Video TV connection\n");
+ type = DRM_MODE_CONNECTOR_SVIDEO;
+ } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
+ DRM_DEBUG_KMS("Detected Component TV connection\n");
+ type = DRM_MODE_CONNECTOR_Component;
+ } else {
+ DRM_DEBUG_KMS("Unrecognised TV connection\n");
+ type = -1;
+ }
+
+ I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ I915_WRITE(TV_CTL, save_tv_ctl);
+
+ /* Restore interrupt config */
+ if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+ mtx_lock(&dev_priv->irq_lock);
+ i915_enable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+ mtx_unlock(&dev_priv->irq_lock);
+ }
+
+ return type;
+}
+
+/*
+ * Here we set accurate tv format according to connector type
+ * i.e Component TV should not be assigned by NTSC or PAL
+ */
+static void intel_tv_find_better_format(struct drm_connector *connector)
+{
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ int i;
+
+ if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
+ tv_mode->component_only)
+ return;
+
+
+ for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) {
+ tv_mode = tv_modes + i;
+
+ if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
+ tv_mode->component_only)
+ break;
+ }
+
+ intel_tv->tv_format = tv_mode->name;
+ drm_connector_property_set_value(connector,
+ connector->dev->mode_config.tv_mode_property, i);
+}
+
+/**
+ * Detect the TV connection.
+ *
+ * Currently this always returns CONNECTOR_STATUS_UNKNOWN, as we need to be sure
+ * we have a pipe programmed in order to probe the TV.
+ */
+static enum drm_connector_status
+intel_tv_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_display_mode mode;
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ int type;
+
+ mode = reported_modes[0];
+ drm_mode_set_crtcinfo(&mode, 0);
+
+ if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
+ type = intel_tv_detect_type(intel_tv, connector);
+ } else if (force) {
+ struct intel_load_detect_pipe tmp;
+
+ if (intel_get_load_detect_pipe(&intel_tv->base, connector,
+ &mode, &tmp)) {
+ type = intel_tv_detect_type(intel_tv, connector);
+ intel_release_load_detect_pipe(&intel_tv->base,
+ connector,
+ &tmp);
+ } else
+ return connector_status_unknown;
+ } else
+ return connector->status;
+
+ if (type < 0)
+ return connector_status_disconnected;
+
+ intel_tv->type = type;
+ intel_tv_find_better_format(connector);
+
+ return connector_status_connected;
+}
+
+static const struct input_res {
+ const char *name;
+ int w, h;
+} input_res_table[] = {
+ {"640x480", 640, 480},
+ {"800x600", 800, 600},
+ {"1024x768", 1024, 768},
+ {"1280x1024", 1280, 1024},
+ {"848x480", 848, 480},
+ {"1280x720", 1280, 720},
+ {"1920x1080", 1920, 1080},
+};
+
+/*
+ * Chose preferred mode according to line number of TV format
+ */
+static void
+intel_tv_chose_preferred_modes(struct drm_connector *connector,
+ struct drm_display_mode *mode_ptr)
+{
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+
+ if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
+ mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
+ else if (tv_mode->nbr_end > 480) {
+ if (tv_mode->progressive == true && tv_mode->nbr_end < 720) {
+ if (mode_ptr->vdisplay == 720)
+ mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
+ } else if (mode_ptr->vdisplay == 1080)
+ mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
+ }
+}
+
+/**
+ * Stub get_modes function.
+ *
+ * This should probably return a set of fixed modes, unless we can figure out
+ * how to probe modes off of TV connections.
+ */
+
+static int
+intel_tv_get_modes(struct drm_connector *connector)
+{
+ struct drm_display_mode *mode_ptr;
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ int j, count = 0;
+ u64 tmp;
+
+ for (j = 0; j < DRM_ARRAY_SIZE(input_res_table);
+ j++) {
+ const struct input_res *input = &input_res_table[j];
+ unsigned int hactive_s = input->w;
+ unsigned int vactive_s = input->h;
+
+ if (tv_mode->max_srcw && input->w > tv_mode->max_srcw)
+ continue;
+
+ if (input->w > 1024 && (!tv_mode->progressive
+ && !tv_mode->component_only))
+ continue;
+
+ mode_ptr = drm_mode_create(connector->dev);
+ if (!mode_ptr)
+ continue;
+ strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
+
+ mode_ptr->hdisplay = hactive_s;
+ mode_ptr->hsync_start = hactive_s + 1;
+ mode_ptr->hsync_end = hactive_s + 64;
+ if (mode_ptr->hsync_end <= mode_ptr->hsync_start)
+ mode_ptr->hsync_end = mode_ptr->hsync_start + 1;
+ mode_ptr->htotal = hactive_s + 96;
+
+ mode_ptr->vdisplay = vactive_s;
+ mode_ptr->vsync_start = vactive_s + 1;
+ mode_ptr->vsync_end = vactive_s + 32;
+ if (mode_ptr->vsync_end <= mode_ptr->vsync_start)
+ mode_ptr->vsync_end = mode_ptr->vsync_start + 1;
+ mode_ptr->vtotal = vactive_s + 33;
+
+ tmp = (u64) tv_mode->refresh * mode_ptr->vtotal;
+ tmp *= mode_ptr->htotal;
+ tmp = tmp / 1000000;
+ mode_ptr->clock = (int) tmp;
+
+ mode_ptr->type = DRM_MODE_TYPE_DRIVER;
+ intel_tv_chose_preferred_modes(connector, mode_ptr);
+ drm_mode_probed_add(connector, mode_ptr);
+ count++;
+ }
+
+ return count;
+}
+
+static void
+intel_tv_destroy(struct drm_connector *connector)
+{
+#if 0
+ drm_sysfs_connector_remove(connector);
+#endif
+ drm_connector_cleanup(connector);
+ free(connector, DRM_MEM_KMS);
+}
+
+
+static int
+intel_tv_set_property(struct drm_connector *connector, struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ struct drm_crtc *crtc = intel_tv->base.base.crtc;
+ int ret = 0;
+ bool changed = false;
+
+ ret = drm_connector_property_set_value(connector, property, val);
+ if (ret < 0)
+ goto out;
+
+ if (property == dev->mode_config.tv_left_margin_property &&
+ intel_tv->margin[TV_MARGIN_LEFT] != val) {
+ intel_tv->margin[TV_MARGIN_LEFT] = val;
+ changed = true;
+ } else if (property == dev->mode_config.tv_right_margin_property &&
+ intel_tv->margin[TV_MARGIN_RIGHT] != val) {
+ intel_tv->margin[TV_MARGIN_RIGHT] = val;
+ changed = true;
+ } else if (property == dev->mode_config.tv_top_margin_property &&
+ intel_tv->margin[TV_MARGIN_TOP] != val) {
+ intel_tv->margin[TV_MARGIN_TOP] = val;
+ changed = true;
+ } else if (property == dev->mode_config.tv_bottom_margin_property &&
+ intel_tv->margin[TV_MARGIN_BOTTOM] != val) {
+ intel_tv->margin[TV_MARGIN_BOTTOM] = val;
+ changed = true;
+ } else if (property == dev->mode_config.tv_mode_property) {
+ if (val >= DRM_ARRAY_SIZE(tv_modes)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!strcmp(intel_tv->tv_format, tv_modes[val].name))
+ goto out;
+
+ intel_tv->tv_format = tv_modes[val].name;
+ changed = true;
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (changed && crtc)
+ drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
+ crtc->y, crtc->fb);
+out:
+ return ret;
+}
+
+static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
+ .dpms = intel_tv_dpms,
+ .mode_fixup = intel_tv_mode_fixup,
+ .prepare = intel_encoder_prepare,
+ .mode_set = intel_tv_mode_set,
+ .commit = intel_encoder_commit,
+};
+
+static const struct drm_connector_funcs intel_tv_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = intel_tv_detect,
+ .destroy = intel_tv_destroy,
+ .set_property = intel_tv_set_property,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
+ .mode_valid = intel_tv_mode_valid,
+ .get_modes = intel_tv_get_modes,
+ .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs intel_tv_enc_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the integrated TV is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the TV is present.
+ */
+static int tv_is_present_in_vbt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i, ret;
+
+ if (!dev_priv->child_dev_num)
+ return 1;
+
+ ret = 0;
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+ /*
+ * If the device type is not TV, continue.
+ */
+ if (p_child->device_type != DEVICE_TYPE_INT_TV &&
+ p_child->device_type != DEVICE_TYPE_TV)
+ continue;
+ /* Only when the addin_offset is non-zero, it is regarded
+ * as present.
+ */
+ if (p_child->addin_offset) {
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
+
+void
+intel_tv_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_connector *connector;
+ struct intel_tv *intel_tv;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
+ u32 tv_dac_on, tv_dac_off, save_tv_dac;
+ char *tv_format_names[DRM_ARRAY_SIZE(tv_modes)];
+ int i, initial_mode = 0;
+
+ if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
+ return;
+
+ if (!tv_is_present_in_vbt(dev)) {
+ DRM_DEBUG_KMS("Integrated TV is not present.\n");
+ return;
+ }
+ /* Even if we have an encoder we may not have a connector */
+ if (!dev_priv->int_tv_support)
+ return;
+
+ /*
+ * Sanity check the TV output by checking to see if the
+ * DAC register holds a value
+ */
+ save_tv_dac = I915_READ(TV_DAC);
+
+ I915_WRITE(TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
+ tv_dac_on = I915_READ(TV_DAC);
+
+ I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ tv_dac_off = I915_READ(TV_DAC);
+
+ I915_WRITE(TV_DAC, save_tv_dac);
+
+ /*
+ * If the register does not hold the state change enable
+ * bit, (either as a 0 or a 1), assume it doesn't really
+ * exist
+ */
+ if ((tv_dac_on & TVDAC_STATE_CHG_EN) == 0 ||
+ (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
+ return;
+
+ intel_tv = malloc(sizeof(struct intel_tv), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+ intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS,
+ M_WAITOK | M_ZERO);
+
+ intel_encoder = &intel_tv->base;
+ connector = &intel_connector->base;
+
+ /* The documentation, for the older chipsets at least, recommend
+ * using a polling method rather than hotplug detection for TVs.
+ * This is because in order to perform the hotplug detection, the PLLs
+ * for the TV must be kept alive increasing power drain and starving
+ * bandwidth from other encoders. Notably for instance, it causes
+ * pipe underruns on Crestline when this encoder is supposedly idle.
+ *
+ * More recent chipsets favour HDMI rather than integrated S-Video.
+ */
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+ drm_connector_init(dev, connector, &intel_tv_connector_funcs,
+ DRM_MODE_CONNECTOR_SVIDEO);
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
+ DRM_MODE_ENCODER_TVDAC);
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_encoder->type = INTEL_OUTPUT_TVOUT;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
+ intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
+ intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
+ intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
+
+ /* BIOS margin values */
+ intel_tv->margin[TV_MARGIN_LEFT] = 54;
+ intel_tv->margin[TV_MARGIN_TOP] = 36;
+ intel_tv->margin[TV_MARGIN_RIGHT] = 46;
+ intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
+
+ intel_tv->tv_format = tv_modes[initial_mode].name;
+
+ drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
+ drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ /* Create TV properties then attach current values */
+ for (i = 0; i < DRM_ARRAY_SIZE(tv_modes); i++)
+ tv_format_names[i] = __DECONST(char *, tv_modes[i].name);
+ drm_mode_create_tv_properties(dev,
+ DRM_ARRAY_SIZE(tv_modes),
+ tv_format_names);
+
+ drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
+ initial_mode);
+ drm_connector_attach_property(connector,
+ dev->mode_config.tv_left_margin_property,
+ intel_tv->margin[TV_MARGIN_LEFT]);
+ drm_connector_attach_property(connector,
+ dev->mode_config.tv_top_margin_property,
+ intel_tv->margin[TV_MARGIN_TOP]);
+ drm_connector_attach_property(connector,
+ dev->mode_config.tv_right_margin_property,
+ intel_tv->margin[TV_MARGIN_RIGHT]);
+ drm_connector_attach_property(connector,
+ dev->mode_config.tv_bottom_margin_property,
+ intel_tv->margin[TV_MARGIN_BOTTOM]);
+#if 0
+ drm_sysfs_connector_add(connector);
+#endif
+}
OpenPOWER on IntegriCloud