summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordumbbell <dumbbell@FreeBSD.org>2016-03-08 20:33:02 +0000
committerdumbbell <dumbbell@FreeBSD.org>2016-03-08 20:33:02 +0000
commitca453b4fd51db2a41c104a93586ffb49834a681e (patch)
tree28a71ac77183e006bdc64bffca2daf00e060887b
parentca1f165017958711d7a25127b3807240b661ea41 (diff)
downloadFreeBSD-src-ca453b4fd51db2a41c104a93586ffb49834a681e.zip
FreeBSD-src-ca453b4fd51db2a41c104a93586ffb49834a681e.tar.gz
drm/i915: Update to match Linux 3.8.13
This update brings initial support for Haswell GPUs. Tested by: Many users of FreeBSD, PC-BSD and HardenedBSD Relnotes: yes Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D5554
-rw-r--r--sys/dev/agp/agp_i810.c220
-rw-r--r--sys/dev/agp/agp_i810.h34
-rw-r--r--sys/dev/drm2/drmP.h13
-rw-r--r--sys/dev/drm2/drm_atomic.h7
-rw-r--r--sys/dev/drm2/drm_dp_iic_helper.c4
-rw-r--r--sys/dev/drm2/drm_drv.c8
-rw-r--r--sys/dev/drm2/drm_linux_list.h120
-rw-r--r--sys/dev/drm2/drm_mem_util.h59
-rw-r--r--sys/dev/drm2/drm_os_freebsd.c114
-rw-r--r--sys/dev/drm2/drm_os_freebsd.h332
-rw-r--r--sys/dev/drm2/drm_pciids.h134
-rw-r--r--sys/dev/drm2/i915/dvo.h155
-rw-r--r--sys/dev/drm2/i915/dvo_ch7017.c418
-rw-r--r--sys/dev/drm2/i915/dvo_ch7xxx.c347
-rw-r--r--sys/dev/drm2/i915/dvo_ivch.c439
-rw-r--r--sys/dev/drm2/i915/dvo_ns2501.c601
-rw-r--r--sys/dev/drm2/i915/dvo_sil164.c282
-rw-r--r--sys/dev/drm2/i915/dvo_tfp410.c321
-rw-r--r--sys/dev/drm2/i915/i915_debug.c891
-rw-r--r--sys/dev/drm2/i915/i915_dma.c881
-rw-r--r--sys/dev/drm2/i915/i915_drm.h251
-rw-r--r--sys/dev/drm2/i915/i915_drv.c994
-rw-r--r--sys/dev/drm2/i915/i915_drv.h1456
-rw-r--r--sys/dev/drm2/i915/i915_gem.c2058
-rw-r--r--sys/dev/drm2/i915/i915_gem_context.c25
-rw-r--r--sys/dev/drm2/i915/i915_gem_evict.c55
-rw-r--r--sys/dev/drm2/i915/i915_gem_execbuffer.c798
-rw-r--r--sys/dev/drm2/i915/i915_gem_gtt.c605
-rw-r--r--sys/dev/drm2/i915/i915_gem_stolen.c92
-rw-r--r--sys/dev/drm2/i915/i915_gem_tiling.c34
-rw-r--r--sys/dev/drm2/i915/i915_irq.c743
-rw-r--r--sys/dev/drm2/i915/i915_reg.h103
-rw-r--r--sys/dev/drm2/i915/i915_suspend.c775
-rw-r--r--sys/dev/drm2/i915/intel_acpi.c256
-rw-r--r--sys/dev/drm2/i915/intel_bios.c94
-rw-r--r--sys/dev/drm2/i915/intel_bios.h7
-rw-r--r--sys/dev/drm2/i915/intel_crt.c378
-rw-r--r--sys/dev/drm2/i915/intel_ddi.c1140
-rw-r--r--sys/dev/drm2/i915/intel_display.c5003
-rw-r--r--sys/dev/drm2/i915/intel_dp.c1579
-rw-r--r--sys/dev/drm2/i915/intel_drv.h323
-rw-r--r--sys/dev/drm2/i915/intel_dvo.c534
-rw-r--r--sys/dev/drm2/i915/intel_fb.c88
-rw-r--r--sys/dev/drm2/i915/intel_hdmi.c651
-rw-r--r--sys/dev/drm2/i915/intel_iic.c446
-rw-r--r--sys/dev/drm2/i915/intel_lvds.c426
-rw-r--r--sys/dev/drm2/i915/intel_modes.c64
-rw-r--r--sys/dev/drm2/i915/intel_opregion.c179
-rw-r--r--sys/dev/drm2/i915/intel_overlay.c332
-rw-r--r--sys/dev/drm2/i915/intel_panel.c245
-rw-r--r--sys/dev/drm2/i915/intel_pm.c1481
-rw-r--r--sys/dev/drm2/i915/intel_ringbuffer.c694
-rw-r--r--sys/dev/drm2/i915/intel_ringbuffer.h95
-rw-r--r--sys/dev/drm2/i915/intel_sdvo.c714
-rw-r--r--sys/dev/drm2/i915/intel_sprite.c151
-rw-r--r--sys/dev/drm2/i915/intel_tv.c165
-rw-r--r--sys/modules/drm2/i915kms/Makefile10
57 files changed, 19751 insertions, 8673 deletions
diff --git a/sys/dev/agp/agp_i810.c b/sys/dev/agp/agp_i810.c
index 0db332b..c9ebcc5 100644
--- a/sys/dev/agp/agp_i810.c
+++ b/sys/dev/agp/agp_i810.c
@@ -250,6 +250,10 @@ struct agp_i810_driver {
void (*chipset_flush)(device_t);
};
+static struct {
+ struct intel_gtt base;
+} intel_private;
+
static const struct agp_i810_driver agp_i810_i810_driver = {
.chiptype = CHIP_I810,
.gen = 1,
@@ -526,6 +530,29 @@ static const struct agp_i810_driver agp_i810_hsw_driver = {
.chipset_flush = agp_i810_chipset_flush,
};
+static const struct agp_i810_driver agp_i810_valleyview_driver = {
+ .chiptype = CHIP_SB,
+ .gen = 7,
+ .busdma_addr_mask_sz = 40,
+ .res_spec = agp_g4x_res_spec,
+ .check_active = agp_sb_check_active,
+ .set_desc = agp_i810_set_desc,
+ .dump_regs = agp_sb_dump_regs,
+ .get_stolen_size = agp_sb_get_stolen_size,
+ .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries,
+ .get_gtt_total_entries = agp_sb_get_gtt_total_entries,
+ .install_gatt = agp_g4x_install_gatt,
+ .deinstall_gatt = agp_i830_deinstall_gatt,
+ .write_gtt = agp_sb_write_gtt,
+ .install_gtt_pte = agp_sb_install_gtt_pte,
+ .read_gtt_pte = agp_g4x_read_gtt_pte,
+ .read_gtt_pte_paddr = agp_sb_read_gtt_pte_paddr,
+ .set_aperture = agp_i915_set_aperture,
+ .chipset_flush_setup = agp_i810_chipset_flush_setup,
+ .chipset_flush_teardown = agp_i810_chipset_flush_teardown,
+ .chipset_flush = agp_i810_chipset_flush,
+};
+
/* For adding new devices, devid is the id of the graphics controller
* (pci:0:2:0, for example). The placeholder (usually at pci:0:2:1) for the
* second head should never be added. The bridge_offset is the offset to
@@ -763,40 +790,200 @@ static const struct agp_i810_match {
},
{
.devid = 0x04028086,
- .name = "Haswell desktop GT1",
+ .name = "Haswell GT1 desktop",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x04068086,
+ .name = "Haswell GT1 mobile",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x040A8086,
+ .name = "Haswell GT1 server",
.driver = &agp_i810_hsw_driver
},
{
.devid = 0x04128086,
- .name = "Haswell desktop GT2",
+ .name = "Haswell GT2 desktop",
.driver = &agp_i810_hsw_driver
},
{
- .devid = 0x040a8086,
- .name = "Haswell server GT1",
+ .devid = 0x04168086,
+ .name = "Haswell GT2 mobile",
.driver = &agp_i810_hsw_driver
},
{
- .devid = 0x041a8086,
- .name = "Haswell server GT2",
+ .devid = 0x041A8086,
+ .name = "Haswell GT2 server",
.driver = &agp_i810_hsw_driver
},
{
- .devid = 0x04068086,
- .name = "Haswell mobile GT1",
+ .devid = 0x04228086,
+ .name = "Haswell GT2 desktop",
.driver = &agp_i810_hsw_driver
},
{
- .devid = 0x04168086,
- .name = "Haswell mobile GT2",
+ .devid = 0x04268086,
+ .name = "Haswell GT2 mobile",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x042A8086,
+ .name = "Haswell GT2 server",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0A028086,
+ .name = "Haswell ULT GT1 desktop",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0A068086,
+ .name = "Haswell ULT GT1 mobile",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0A0A8086,
+ .name = "Haswell ULT GT1 server",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0A128086,
+ .name = "Haswell ULT GT2 desktop",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0A168086,
+ .name = "Haswell ULT GT2 mobile",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0A1A8086,
+ .name = "Haswell ULT GT2 server",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0A228086,
+ .name = "Haswell ULT GT2 desktop",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0A268086,
+ .name = "Haswell ULT GT2 mobile",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0A2A8086,
+ .name = "Haswell ULT GT2 server",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0C028086,
+ .name = "Haswell SDV GT1 desktop",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0C068086,
+ .name = "Haswell SDV GT1 mobile",
.driver = &agp_i810_hsw_driver
},
{
- .devid = 0x0c168086,
- .name = "Haswell SDV",
+ .devid = 0x0C0A8086,
+ .name = "Haswell SDV GT1 server",
.driver = &agp_i810_hsw_driver
},
{
+ .devid = 0x0C128086,
+ .name = "Haswell SDV GT2 desktop",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0C168086,
+ .name = "Haswell SDV GT2 mobile",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0C1A8086,
+ .name = "Haswell SDV GT2 server",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0C228086,
+ .name = "Haswell SDV GT2 desktop",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0C268086,
+ .name = "Haswell SDV GT2 mobile",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0C2A8086,
+ .name = "Haswell SDV GT2 server",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0D028086,
+ .name = "Haswell CRW GT1 desktop",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0D068086,
+ .name = "Haswell CRW GT1 mobile",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0D0A8086,
+ .name = "Haswell CRW GT1 server",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0D128086,
+ .name = "Haswell CRW GT2 desktop",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0D168086,
+ .name = "Haswell CRW GT2 mobile",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0D1A8086,
+ .name = "Haswell CRW GT2 server",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0D228086,
+ .name = "Haswell CRW GT2 desktop",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0D268086,
+ .name = "Haswell CRW GT2 mobile",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x0D2A8086,
+ .name = "Haswell CRW GT2 server",
+ .driver = &agp_i810_hsw_driver
+ },
+ {
+ .devid = 0x01558086,
+ .name = "Valleyview (desktop)",
+ .driver = &agp_i810_valleyview_driver
+ },
+ {
+ .devid = 0x01578086,
+ .name = "Valleyview (mobile)",
+ .driver = &agp_i810_valleyview_driver
+ },
+ {
+ .devid = 0x0F308086,
+ .name = "Valleyview (mobile)",
+ .driver = &agp_i810_valleyview_driver
+ },
+ {
.devid = 0,
}
};
@@ -2285,6 +2472,10 @@ agp_intel_gtt_get(device_t dev)
res.gtt_mappable_entries = sc->gtt_mappable_entries;
res.do_idle_maps = 0;
res.scratch_page_dma = VM_PAGE_TO_PHYS(bogus_page);
+ if (sc->agp.as_aperture != NULL)
+ res.gma_bus_addr = rman_get_start(sc->agp.as_aperture);
+ else
+ res.gma_bus_addr = 0;
return (res);
}
@@ -2588,11 +2779,12 @@ intel_gtt_insert_pages(u_int first_entry, u_int num_entries, vm_page_t *pages,
pages, flags);
}
-struct intel_gtt
+struct intel_gtt *
intel_gtt_get(void)
{
- return (agp_intel_gtt_get(intel_agp));
+ intel_private.base = agp_intel_gtt_get(intel_agp);
+ return (&intel_private.base);
}
int
diff --git a/sys/dev/agp/agp_i810.h b/sys/dev/agp/agp_i810.h
index 68cad87..2cb71eb 100644
--- a/sys/dev/agp/agp_i810.h
+++ b/sys/dev/agp/agp_i810.h
@@ -33,6 +33,7 @@
#define AGP_AGP_I810_H
#include <sys/param.h>
+#include <sys/rman.h>
#include <sys/sglist.h>
#include <vm/vm.h>
@@ -51,24 +52,23 @@
struct intel_gtt {
/* Size of memory reserved for graphics by the BIOS */
- u_int stolen_size;
+ unsigned int stolen_size;
/* Total number of gtt entries. */
- u_int gtt_total_entries;
- /*
- * Part of the gtt that is mappable by the cpu, for those
- * chips where this is not the full gtt.
- */
- u_int gtt_mappable_entries;
-
- /*
- * Always false.
- */
- u_int do_idle_maps;
-
- /*
- * Share the scratch page dma with ppgtts.
- */
+ unsigned int gtt_total_entries;
+ /* Part of the gtt that is mappable by the cpu, for those chips where
+ * this is not the full gtt. */
+ unsigned int gtt_mappable_entries;
+ /* Whether i915 needs to use the dmar apis or not. */
+ unsigned int needs_dmar : 1;
+ /* Whether we idle the gpu before mapping/unmapping */
+ unsigned int do_idle_maps : 1;
+ /* Share the scratch page dma with ppgtts. */
vm_paddr_t scratch_page_dma;
+ vm_page_t scratch_page;
+ /* for ppgtt PDE access */
+ uint32_t *gtt;
+ /* needed for ioremap in drm/i915 */
+ bus_addr_t gma_bus_addr;
};
struct intel_gtt agp_intel_gtt_get(device_t dev);
@@ -83,7 +83,7 @@ void agp_intel_gtt_insert_sg_entries(device_t dev, struct sglist *sg_list,
void agp_intel_gtt_insert_pages(device_t dev, u_int first_entry,
u_int num_entries, vm_page_t *pages, u_int flags);
-struct intel_gtt intel_gtt_get(void);
+struct intel_gtt *intel_gtt_get(void);
int intel_gtt_chipset_flush(void);
void intel_gtt_unmap_memory(struct sglist *sg_list);
void intel_gtt_clear_range(u_int first_entry, u_int num_entries);
diff --git a/sys/dev/drm2/drmP.h b/sys/dev/drm2/drmP.h
index ae12144..2853678 100644
--- a/sys/dev/drm2/drmP.h
+++ b/sys/dev/drm2/drmP.h
@@ -238,7 +238,6 @@ struct drm_device;
__func__ , ##__VA_ARGS__); \
} while (0)
-
/*@}*/
/***********************************************************************/
@@ -700,6 +699,8 @@ struct drm_driver {
void (*postclose) (struct drm_device *, struct drm_file *);
void (*lastclose) (struct drm_device *);
int (*unload) (struct drm_device *);
+ int (*suspend) (struct drm_device *, pm_message_t state);
+ int (*resume) (struct drm_device *);
int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
int (*dma_quiescent) (struct drm_device *);
int (*context_dtor) (struct drm_device *dev, int context);
@@ -1118,7 +1119,7 @@ struct drm_device {
char busid_str[128];
int modesetting;
- drm_pci_id_list_t *id_entry; /* PCI ID, name, and chipset private */
+ const drm_pci_id_list_t *id_entry; /* PCI ID, name, and chipset private */
};
#define DRM_SWITCH_POWER_ON 0
@@ -1581,6 +1582,8 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map)
{
}
+#include <dev/drm2/drm_mem_util.h>
+
extern int drm_fill_in_dev(struct drm_device *dev,
struct drm_driver *driver);
extern void drm_cancel_fill_in_dev(struct drm_device *dev);
@@ -1758,9 +1761,11 @@ struct dmi_system_id {
bool dmi_check_system(const struct dmi_system_id *);
/* Device setup support (drm_drv.c) */
-int drm_probe_helper(device_t kdev, drm_pci_id_list_t *idlist);
-int drm_attach_helper(device_t kdev, drm_pci_id_list_t *idlist,
+int drm_probe_helper(device_t kdev, const drm_pci_id_list_t *idlist);
+int drm_attach_helper(device_t kdev, const drm_pci_id_list_t *idlist,
struct drm_driver *driver);
+int drm_generic_suspend(device_t kdev);
+int drm_generic_resume(device_t kdev);
int drm_generic_detach(device_t kdev);
void drm_event_wakeup(struct drm_pending_event *e);
diff --git a/sys/dev/drm2/drm_atomic.h b/sys/dev/drm2/drm_atomic.h
index fd84922..53155c7 100644
--- a/sys/dev/drm2/drm_atomic.h
+++ b/sys/dev/drm2/drm_atomic.h
@@ -39,8 +39,8 @@ typedef uint64_t atomic64_t;
#define NB_BITS_PER_LONG (sizeof(long) * NBBY)
#define BITS_TO_LONGS(x) howmany(x, NB_BITS_PER_LONG)
-#define atomic_read(p) (*(volatile u_int *)(p))
-#define atomic_set(p, v) do { *(u_int *)(p) = (v); } while (0)
+#define atomic_read(p) atomic_load_acq_int(p)
+#define atomic_set(p, v) atomic_store_rel_int(p, v)
#define atomic64_read(p) atomic_load_acq_64(p)
#define atomic64_set(p, v) atomic_store_rel_64(p, v)
@@ -78,6 +78,9 @@ typedef uint64_t atomic64_t;
#define cmpxchg(ptr, old, new) \
(atomic_cmpset_int((volatile u_int *)(ptr),(old),(new)) ? (old) : (0))
+#define atomic_inc_not_zero(p) atomic_inc(p)
+#define atomic_clear_mask(b, p) atomic_clear_int((p), (b))
+
static __inline u_long
find_first_zero_bit(const u_long *p, u_long max)
{
diff --git a/sys/dev/drm2/drm_dp_iic_helper.c b/sys/dev/drm2/drm_dp_iic_helper.c
index 492e2f9..35318c1 100644
--- a/sys/dev/drm2/drm_dp_iic_helper.c
+++ b/sys/dev/drm2/drm_dp_iic_helper.c
@@ -149,7 +149,7 @@ iic_dp_aux_xfer(device_t idev, struct iic_msg *msgs, uint32_t num)
buf = msgs[m].buf;
reading = (msgs[m].flags & IIC_M_RD) != 0;
ret = iic_dp_aux_address(idev, msgs[m].slave >> 1, reading);
- if (ret != 0)
+ if (ret < 0)
break;
if (reading) {
for (b = 0; b < len; b++) {
@@ -160,7 +160,7 @@ iic_dp_aux_xfer(device_t idev, struct iic_msg *msgs, uint32_t num)
} else {
for (b = 0; b < len; b++) {
ret = iic_dp_aux_put_byte(idev, buf[b]);
- if (ret != 0)
+ if (ret < 0)
break;
}
}
diff --git a/sys/dev/drm2/drm_drv.c b/sys/dev/drm2/drm_drv.c
index f4431a7..c3e6869 100644
--- a/sys/dev/drm2/drm_drv.c
+++ b/sys/dev/drm2/drm_drv.c
@@ -470,6 +470,14 @@ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
err_i1:
atomic_dec(&dev->ioctl_count);
+ if (retcode == -ERESTARTSYS) {
+ /*
+ * FIXME: Find where in i915 ERESTARTSYS should be
+ * converted to EINTR.
+ */
+ DRM_DEBUG("ret = %d -> %d\n", retcode, -EINTR);
+ retcode = -EINTR;
+ }
if (retcode)
DRM_DEBUG("ret = %d\n", retcode);
if (retcode != 0 &&
diff --git a/sys/dev/drm2/drm_linux_list.h b/sys/dev/drm2/drm_linux_list.h
index f2161439..b24ac96 100644
--- a/sys/dev/drm2/drm_linux_list.h
+++ b/sys/dev/drm2/drm_linux_list.h
@@ -40,7 +40,6 @@ struct list_head {
};
#define list_entry(ptr, type, member) container_of(ptr,type,member)
-#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
static __inline__ void
INIT_LIST_HEAD(struct list_head *head) {
@@ -179,4 +178,123 @@ list_splice(const struct list_head *list, struct list_head *head)
void drm_list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv,
struct list_head *a, struct list_head *b));
+/* hlist, copied from sys/dev/ofed/linux/list.h */
+
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
+
+#define HLIST_HEAD_INIT { }
+#define HLIST_HEAD(name) struct hlist_head name = HLIST_HEAD_INIT
+#define INIT_HLIST_HEAD(head) (head)->first = NULL
+#define INIT_HLIST_NODE(node) \
+do { \
+ (node)->next = NULL; \
+ (node)->pprev = NULL; \
+} while (0)
+
+static inline int
+hlist_unhashed(const struct hlist_node *h)
+{
+
+ return !h->pprev;
+}
+
+static inline int
+hlist_empty(const struct hlist_head *h)
+{
+
+ return !h->first;
+}
+
+static inline void
+hlist_del(struct hlist_node *n)
+{
+
+ if (n->next)
+ n->next->pprev = n->pprev;
+ *n->pprev = n->next;
+}
+
+static inline void
+hlist_del_init(struct hlist_node *n)
+{
+
+ if (hlist_unhashed(n))
+ return;
+ hlist_del(n);
+ INIT_HLIST_NODE(n);
+}
+
+static inline void
+hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+
+ n->next = h->first;
+ if (h->first)
+ h->first->pprev = &n->next;
+ h->first = n;
+ n->pprev = &h->first;
+}
+
+static inline void
+hlist_add_before(struct hlist_node *n, struct hlist_node *next)
+{
+
+ n->pprev = next->pprev;
+ n->next = next;
+ next->pprev = &n->next;
+ *(n->pprev) = n;
+}
+
+static inline void
+hlist_add_after(struct hlist_node *n, struct hlist_node *next)
+{
+
+ next->next = n->next;
+ n->next = next;
+ next->pprev = &n->next;
+ if (next->next)
+ next->next->pprev = &next->next;
+}
+
+static inline void
+hlist_move_list(struct hlist_head *old, struct hlist_head *new)
+{
+
+ new->first = old->first;
+ if (new->first)
+ new->first->pprev = &new->first;
+ old->first = NULL;
+}
+
+#define hlist_entry(ptr, type, field) container_of(ptr, type, field)
+
+#define hlist_for_each(p, head) \
+ for (p = (head)->first; p; p = p->next)
+
+#define hlist_for_each_safe(p, n, head) \
+ for (p = (head)->first; p && ({ n = p->next; 1; }); p = n)
+
+#define hlist_for_each_entry(tp, p, head, field) \
+ for (p = (head)->first; \
+ p ? (tp = hlist_entry(p, typeof(*tp), field)): NULL; p = p->next)
+
+#define hlist_for_each_entry_continue(tp, p, field) \
+ for (p = (p)->next; \
+ p ? (tp = hlist_entry(p, typeof(*tp), field)): NULL; p = p->next)
+
+#define hlist_for_each_entry_from(tp, p, field) \
+ for (; p ? (tp = hlist_entry(p, typeof(*tp), field)): NULL; p = p->next)
+
+#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
+ for (pos = (head)->first; \
+ (pos) != 0 && ({ n = (pos)->next; \
+ tpos = hlist_entry((pos), typeof(*(tpos)), member); 1;}); \
+ pos = (n))
+
#endif /* _DRM_LINUX_LIST_H_ */
diff --git a/sys/dev/drm2/drm_mem_util.h b/sys/dev/drm2/drm_mem_util.h
new file mode 100644
index 0000000..01ca720
--- /dev/null
+++ b/sys/dev/drm2/drm_mem_util.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef _DRM_MEM_UTIL_H_
+#define _DRM_MEM_UTIL_H_
+
+#include <sys/types.h>
+#include <sys/malloc.h>
+
+static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
+{
+ if (size != 0 && nmemb > SIZE_MAX / size)
+ return NULL;
+
+ return malloc(nmemb * size, DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
+}
+
+/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
+static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
+{
+ if (size != 0 && nmemb > SIZE_MAX / size)
+ return NULL;
+
+ return malloc(nmemb * size, DRM_MEM_DRIVER, M_NOWAIT);
+}
+
+static __inline void drm_free_large(void *ptr)
+{
+ free(ptr, DRM_MEM_DRIVER);
+}
+
+#endif
diff --git a/sys/dev/drm2/drm_os_freebsd.c b/sys/dev/drm2/drm_os_freebsd.c
index 5baa01a..9b147a0 100644
--- a/sys/dev/drm2/drm_os_freebsd.c
+++ b/sys/dev/drm2/drm_os_freebsd.c
@@ -67,8 +67,27 @@ ns_to_timeval(const int64_t nsec)
return (tv);
}
-static drm_pci_id_list_t *
-drm_find_description(int vendor, int device, drm_pci_id_list_t *idlist)
+/* Copied from OFED. */
+unsigned long drm_linux_timer_hz_mask;
+
+static void
+drm_linux_timer_init(void *arg)
+{
+
+ /*
+ * Compute an internal HZ value which can divide 2**32 to
+ * avoid timer rounding problems when the tick value wraps
+ * around 2**32:
+ */
+ drm_linux_timer_hz_mask = 1;
+ while (drm_linux_timer_hz_mask < (unsigned long)hz)
+ drm_linux_timer_hz_mask *= 2;
+ drm_linux_timer_hz_mask--;
+}
+SYSINIT(drm_linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, drm_linux_timer_init, NULL);
+
+static const drm_pci_id_list_t *
+drm_find_description(int vendor, int device, const drm_pci_id_list_t *idlist)
{
int i = 0;
@@ -87,9 +106,9 @@ drm_find_description(int vendor, int device, drm_pci_id_list_t *idlist)
* method.
*/
int
-drm_probe_helper(device_t kdev, drm_pci_id_list_t *idlist)
+drm_probe_helper(device_t kdev, const drm_pci_id_list_t *idlist)
{
- drm_pci_id_list_t *id_entry;
+ const drm_pci_id_list_t *id_entry;
int vendor, device;
vendor = pci_get_vendor(kdev);
@@ -118,7 +137,7 @@ drm_probe_helper(device_t kdev, drm_pci_id_list_t *idlist)
* method.
*/
int
-drm_attach_helper(device_t kdev, drm_pci_id_list_t *idlist,
+drm_attach_helper(device_t kdev, const drm_pci_id_list_t *idlist,
struct drm_driver *driver)
{
struct drm_device *dev;
@@ -137,6 +156,55 @@ drm_attach_helper(device_t kdev, drm_pci_id_list_t *idlist,
}
int
+drm_generic_suspend(device_t kdev)
+{
+ struct drm_device *dev;
+ int error;
+
+ DRM_DEBUG_KMS("Starting suspend\n");
+
+ dev = device_get_softc(kdev);
+ if (dev->driver->suspend) {
+ pm_message_t state;
+
+ state.event = PM_EVENT_SUSPEND;
+ error = -dev->driver->suspend(dev, state);
+ if (error)
+ goto out;
+ }
+
+ error = bus_generic_suspend(kdev);
+
+out:
+ DRM_DEBUG_KMS("Finished suspend: %d\n", error);
+
+ return error;
+}
+
+int
+drm_generic_resume(device_t kdev)
+{
+ struct drm_device *dev;
+ int error;
+
+ DRM_DEBUG_KMS("Starting resume\n");
+
+ dev = device_get_softc(kdev);
+ if (dev->driver->resume) {
+ error = -dev->driver->resume(dev);
+ if (error)
+ goto out;
+ }
+
+ error = bus_generic_resume(kdev);
+
+out:
+ DRM_DEBUG_KMS("Finished resume: %d\n", error);
+
+ return error;
+}
+
+int
drm_generic_detach(device_t kdev)
{
struct drm_device *dev;
@@ -331,6 +399,42 @@ drm_clflush_virt_range(char *addr, unsigned long length)
#endif
}
+void
+hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
+ char *linebuf, size_t linebuflen, bool ascii __unused)
+{
+ int i, j, c;
+
+ i = j = 0;
+
+ while (i < len && j <= linebuflen) {
+ c = ((const char *)buf)[i];
+
+ if (i != 0) {
+ if (i % rowsize == 0) {
+ /* Newline required. */
+ sprintf(linebuf + j, "\n");
+ ++j;
+ } else if (i % groupsize == 0) {
+ /* Space required. */
+ sprintf(linebuf + j, " ");
+ ++j;
+ }
+ }
+
+ if (j > linebuflen - 1)
+ break;
+
+ sprintf(linebuf + j, "%02X", c);
+ j += 2;
+
+ ++i;
+ }
+
+ if (j <= linebuflen)
+ sprintf(linebuf + j, "\n");
+}
+
#if DRM_LINUX
#include <sys/sysproto.h>
diff --git a/sys/dev/drm2/drm_os_freebsd.h b/sys/dev/drm2/drm_os_freebsd.h
index 8aa660a..f590493 100644
--- a/sys/dev/drm2/drm_os_freebsd.h
+++ b/sys/dev/drm2/drm_os_freebsd.h
@@ -10,6 +10,7 @@ __FBSDID("$FreeBSD$");
#define _DRM_OS_FREEBSD_H_
#include <sys/fbio.h>
+#include <sys/smp.h>
#if _BYTE_ORDER == _BIG_ENDIAN
#define __BIG_ENDIAN 4321
@@ -24,10 +25,22 @@ __FBSDID("$FreeBSD$");
#endif
#ifndef __user
-#define __user
+#define __user
#endif
#ifndef __iomem
-#define __iomem
+#define __iomem
+#endif
+#ifndef __always_unused
+#define __always_unused
+#endif
+#ifndef __must_check
+#define __must_check
+#endif
+#ifndef __force
+#define __force
+#endif
+#ifndef uninitialized_var
+#define uninitialized_var(x) x
#endif
#define cpu_to_le16(x) htole16(x)
@@ -69,9 +82,23 @@ typedef void irqreturn_t;
#define __exit
#define __read_mostly
-#define WARN_ON(cond) KASSERT(!(cond), ("WARN ON: " #cond))
+#define BUILD_BUG_ON(x) CTASSERT(!(x))
+#define BUILD_BUG_ON_NOT_POWER_OF_2(x)
+
+#ifndef WARN
+#define WARN(condition, format, ...) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ DRM_ERROR(format, ##__VA_ARGS__); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+#define WARN_ONCE(condition, format, ...) \
+ WARN(condition, format, ##__VA_ARGS__)
+#define WARN_ON(cond) WARN(cond, "WARN ON: " #cond)
#define WARN_ON_SMP(cond) WARN_ON(cond)
-#define BUG_ON(cond) KASSERT(!(cond), ("BUG ON: " #cond))
+#define BUG() panic("BUG")
+#define BUG_ON(cond) KASSERT(!(cond), ("BUG ON: " #cond " -> 0x%jx", (uintmax_t)(cond)))
#define unlikely(x) __builtin_expect(!!(x), 0)
#define likely(x) __builtin_expect(!!(x), 1)
#define container_of(ptr, type, member) ({ \
@@ -93,6 +120,15 @@ typedef void irqreturn_t;
#define DRM_UDELAY(udelay) DELAY(udelay)
#define drm_msleep(x, msg) pause((msg), ((int64_t)(x)) * hz / 1000)
#define DRM_MSLEEP(msecs) drm_msleep((msecs), "drm_msleep")
+#define get_seconds() time_second
+
+#define ioread8(addr) *(volatile uint8_t *)((char *)addr)
+#define ioread16(addr) *(volatile uint16_t *)((char *)addr)
+#define ioread32(addr) *(volatile uint32_t *)((char *)addr)
+
+#define iowrite8(data, addr) *(volatile uint8_t *)((char *)addr) = data;
+#define iowrite16(data, addr) *(volatile uint16_t *)((char *)addr) = data;
+#define iowrite32(data, addr) *(volatile uint32_t *)((char *)addr) = data;
#define DRM_READ8(map, offset) \
*(volatile u_int8_t *)(((vm_offset_t)(map)->handle) + \
@@ -127,12 +163,18 @@ typedef void irqreturn_t;
#define DRM_WRITEMEMORYBARRIER() wmb()
#define DRM_MEMORYBARRIER() mb()
#define smp_rmb() rmb()
+#define smp_wmb() wmb()
#define smp_mb__before_atomic_inc() mb()
#define smp_mb__after_atomic_inc() mb()
+#define barrier() __compiler_membar()
#define do_div(a, b) ((a) /= (b))
#define div64_u64(a, b) ((a) / (b))
#define lower_32_bits(n) ((u32)(n))
+#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
+
+#define __set_bit(n, s) set_bit((n), (s))
+#define __clear_bit(n, s) clear_bit((n), (s))
#define min_t(type, x, y) ({ \
type __min1 = (x); \
@@ -148,6 +190,10 @@ typedef void irqreturn_t;
#define memcpy_fromio(a, b, c) memcpy((a), (b), (c))
#define memcpy_toio(a, b, c) memcpy((a), (b), (c))
+#define VERIFY_READ VM_PROT_READ
+#define VERIFY_WRITE VM_PROT_WRITE
+#define access_ok(prot, p, l) useracc((p), (l), (prot))
+
/* XXXKIB what is the right code for the FreeBSD ? */
/* kib@ used ENXIO here -- dumbbell@ */
#define EREMOTEIO EIO
@@ -170,8 +216,10 @@ typedef void irqreturn_t;
#define PCI_VENDOR_ID_SONY 0x104d
#define PCI_VENDOR_ID_VIA 0x1106
-#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
-#define hweight32(i) bitcount32(i)
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#define DIV_ROUND_CLOSEST(n,d) (((n) + (d) / 2) / (d))
+#define div_u64(n, d) ((n) / (d))
+#define hweight32(i) bitcount32(i)
static inline unsigned long
roundup_pow_of_two(unsigned long x)
@@ -195,6 +243,8 @@ ror32(uint32_t word, unsigned int shift)
}
#define IS_ALIGNED(x, y) (((x) & ((y) - 1)) == 0)
+#define round_down(x, y) rounddown2((x), (y))
+#define round_up(x, y) roundup2((x), (y))
#define get_unaligned(ptr) \
({ __typeof__(*(ptr)) __tmp; \
memcpy(&__tmp, (ptr), sizeof(*(ptr))); __tmp; })
@@ -251,7 +301,9 @@ abs64(int64_t x)
int64_t timeval_to_ns(const struct timeval *tv);
struct timeval ns_to_timeval(const int64_t nsec);
-#define PAGE_ALIGN(addr) round_page(addr)
+#define PAGE_ALIGN(addr) round_page(addr)
+#define page_to_phys(x) VM_PAGE_TO_PHYS(x)
+#define offset_in_page(x) ((x) & PAGE_MASK)
#define drm_get_device_from_kdev(_kdev) (((struct drm_minor *)(_kdev)->si_drv1)->dev)
@@ -295,20 +347,193 @@ __get_user(size_t size, const void *ptr, void *x)
}
#define get_user(x, ptr) __get_user(sizeof(*ptr), (ptr), &(x))
+static inline int
+__copy_to_user_inatomic(void __user *to, const void *from, unsigned n)
+{
+
+ return (copyout_nofault(from, to, n) != 0 ? n : 0);
+}
+#define __copy_to_user_inatomic_nocache(to, from, n) \
+ __copy_to_user_inatomic((to), (from), (n))
+
+static inline unsigned long
+__copy_from_user_inatomic(void *to, const void __user *from,
+ unsigned long n)
+{
+
+ /*
+ * XXXKIB. Equivalent Linux function is implemented using
+ * MOVNTI for aligned moves. For unaligned head and tail,
+ * normal move is performed. As such, it is not incorrect, if
+ * only somewhat slower, to use normal copyin. All uses
+ * except shmem_pwrite_fast() have the destination mapped WC.
+ */
+ return ((copyin_nofault(__DECONST(void *, from), to, n) != 0 ? n : 0));
+}
+#define __copy_from_user_inatomic_nocache(to, from, n) \
+ __copy_from_user_inatomic((to), (from), (n))
+
+static inline int
+fault_in_multipages_readable(const char __user *uaddr, int size)
+{
+ char c;
+ int ret = 0;
+ const char __user *end = uaddr + size - 1;
+
+ if (unlikely(size == 0))
+ return ret;
+
+ while (uaddr <= end) {
+ ret = -copyin(uaddr, &c, 1);
+ if (ret != 0)
+ return -EFAULT;
+ uaddr += PAGE_SIZE;
+ }
+
+ /* Check whether the range spilled into the next page. */
+ if (((unsigned long)uaddr & ~PAGE_MASK) ==
+ ((unsigned long)end & ~PAGE_MASK)) {
+ ret = -copyin(end, &c, 1);
+ }
+
+ return ret;
+}
+
+static inline int
+fault_in_multipages_writeable(char __user *uaddr, int size)
+{
+ int ret = 0;
+ char __user *end = uaddr + size - 1;
+
+ if (unlikely(size == 0))
+ return ret;
+
+ /*
+ * Writing zeroes into userspace here is OK, because we know that if
+ * the zero gets there, we'll be overwriting it.
+ */
+ while (uaddr <= end) {
+ ret = subyte(uaddr, 0);
+ if (ret != 0)
+ return -EFAULT;
+ uaddr += PAGE_SIZE;
+ }
+
+ /* Check whether the range spilled into the next page. */
+ if (((unsigned long)uaddr & ~PAGE_MASK) ==
+ ((unsigned long)end & ~PAGE_MASK))
+ ret = subyte(end, 0);
+
+ return ret;
+}
+
+enum __drm_capabilities {
+ CAP_SYS_ADMIN
+};
+
+static inline bool
+capable(enum __drm_capabilities cap)
+{
+
+ switch (cap) {
+ case CAP_SYS_ADMIN:
+ return DRM_SUSER(curthread);
+ }
+}
+
+#define to_user_ptr(x) ((void *)(uintptr_t)(x))
#define sigemptyset(set) SIGEMPTYSET(set)
#define sigaddset(set, sig) SIGADDSET(set, sig)
#define DRM_LOCK(dev) sx_xlock(&(dev)->dev_struct_lock)
#define DRM_UNLOCK(dev) sx_xunlock(&(dev)->dev_struct_lock)
+extern unsigned long drm_linux_timer_hz_mask;
#define jiffies ticks
#define jiffies_to_msecs(x) (((int64_t)(x)) * 1000 / hz)
#define msecs_to_jiffies(x) (((int64_t)(x)) * hz / 1000)
+#define timespec_to_jiffies(x) (((x)->tv_sec * 1000000 + (x)->tv_nsec) * hz / 1000000)
#define time_after(a,b) ((long)(b) - (long)(a) < 0)
#define time_after_eq(a,b) ((long)(b) - (long)(a) <= 0)
+#define round_jiffies(j) ((unsigned long)(((j) + drm_linux_timer_hz_mask) & ~drm_linux_timer_hz_mask))
+#define round_jiffies_up(j) round_jiffies(j) /* TODO */
+#define round_jiffies_up_relative(j) round_jiffies_up(j) /* TODO */
+
+#define getrawmonotonic(ts) getnanouptime(ts)
+
+#define wake_up(queue) wakeup_one((void *)queue)
+#define wake_up_interruptible(queue) wakeup_one((void *)queue)
+#define wake_up_all(queue) wakeup((void *)queue)
+#define wake_up_interruptible_all(queue) wakeup((void *)queue)
+
+struct completion {
+ unsigned int done;
+ struct mtx lock;
+};
+
+#define INIT_COMPLETION(c) ((c).done = 0);
+
+static inline void
+init_completion(struct completion *c)
+{
+
+ mtx_init(&c->lock, "drmcompl", NULL, MTX_DEF);
+ c->done = 0;
+}
+
+static inline void
+free_completion(struct completion *c)
+{
+
+ mtx_destroy(&c->lock);
+}
+
+static inline void
+complete_all(struct completion *c)
+{
-#define wake_up(queue) wakeup((void *)queue)
-#define wake_up_interruptible(queue) wakeup((void *)queue)
+ mtx_lock(&c->lock);
+ c->done++;
+ mtx_unlock(&c->lock);
+ wakeup(c);
+}
+
+static inline long
+wait_for_completion_interruptible_timeout(struct completion *c,
+ unsigned long timeout)
+{
+ unsigned long start_jiffies, elapsed_jiffies;
+ bool timeout_expired = false, awakened = false;
+ long ret = timeout;
+
+ start_jiffies = ticks;
+
+ mtx_lock(&c->lock);
+ while (c->done == 0 && !timeout_expired) {
+ ret = -msleep(c, &c->lock, PCATCH, "drmwco", timeout);
+ switch(ret) {
+ case -EWOULDBLOCK:
+ timeout_expired = true;
+ ret = 0;
+ break;
+ case -EINTR:
+ case -ERESTART:
+ ret = -ERESTARTSYS;
+ break;
+ case 0:
+ awakened = true;
+ break;
+ }
+ }
+ mtx_unlock(&c->lock);
+
+ if (awakened) {
+ elapsed_jiffies = ticks - start_jiffies;
+ ret = timeout > elapsed_jiffies ? timeout - elapsed_jiffies : 1;
+ }
+
+ return (ret);
+}
MALLOC_DECLARE(DRM_MEM_DMA);
MALLOC_DECLARE(DRM_MEM_SAREA);
@@ -356,6 +581,12 @@ typedef struct drm_pci_id_list
#if defined(__i386__) || defined(__amd64__)
#define CONFIG_ACPI
+#define CONFIG_DRM_I915_KMS
+#undef CONFIG_INTEL_IOMMU
+#endif
+
+#ifdef COMPAT_FREEBSD32
+#define CONFIG_COMPAT
#endif
#define CONFIG_AGP 1
@@ -364,19 +595,102 @@ typedef struct drm_pci_id_list
#define CONFIG_FB 1
extern const char *fb_mode_option;
+#undef CONFIG_DEBUG_FS
+#undef CONFIG_VGA_CONSOLE
+
#define EXPORT_SYMBOL(x)
+#define EXPORT_SYMBOL_GPL(x)
#define MODULE_AUTHOR(author)
#define MODULE_DESCRIPTION(desc)
#define MODULE_LICENSE(license)
#define MODULE_PARM_DESC(name, desc)
+#define MODULE_DEVICE_TABLE(name, list)
#define module_param_named(name, var, type, perm)
#define printk printf
+#define pr_err DRM_ERROR
+#define pr_warn DRM_WARNING
+#define pr_warn_once DRM_WARNING
#define KERN_DEBUG ""
+/* I2C compatibility. */
+#define I2C_M_RD IIC_M_RD
+#define I2C_M_WR IIC_M_WR
+#define I2C_M_NOSTART IIC_M_NOSTART
+
struct fb_info * framebuffer_alloc(void);
void framebuffer_release(struct fb_info *info);
+#define console_lock()
+#define console_unlock()
+#define console_trylock() true
+
+#define PM_EVENT_SUSPEND 0x0002
+#define PM_EVENT_QUIESCE 0x0008
+#define PM_EVENT_PRETHAW PM_EVENT_QUIESCE
+
+typedef struct pm_message {
+ int event;
+} pm_message_t;
+
+static inline int
+pci_read_config_byte(device_t kdev, int where, u8 *val)
+{
+
+ *val = (u8)pci_read_config(kdev, where, 1);
+ return (0);
+}
+
+static inline int
+pci_write_config_byte(device_t kdev, int where, u8 val)
+{
+
+ pci_write_config(kdev, where, val, 1);
+ return (0);
+}
+
+static inline int
+pci_read_config_word(device_t kdev, int where, uint16_t *val)
+{
+
+ *val = (uint16_t)pci_read_config(kdev, where, 2);
+ return (0);
+}
+
+static inline int
+pci_write_config_word(device_t kdev, int where, uint16_t val)
+{
+
+ pci_write_config(kdev, where, val, 2);
+ return (0);
+}
+
+static inline int
+pci_read_config_dword(device_t kdev, int where, uint32_t *val)
+{
+
+ *val = (uint32_t)pci_read_config(kdev, where, 4);
+ return (0);
+}
+
+static inline int
+pci_write_config_dword(device_t kdev, int where, uint32_t val)
+{
+
+ pci_write_config(kdev, where, val, 4);
+ return (0);
+}
+
+static inline void
+on_each_cpu(void callback(void *data), void *data, int wait)
+{
+
+ smp_rendezvous(NULL, callback, NULL, data);
+}
+
+void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
+ int groupsize, char *linebuf, size_t linebuflen, bool ascii);
+
#define KIB_NOTYET() \
do { \
if (drm_debug && drm_notyet) \
diff --git a/sys/dev/drm2/drm_pciids.h b/sys/dev/drm2/drm_pciids.h
index 92591d2..b1a95b6 100644
--- a/sys/dev/drm2/drm_pciids.h
+++ b/sys/dev/drm2/drm_pciids.h
@@ -33,57 +33,89 @@
{0, 0, 0, NULL}
#define i915_PCI_IDS \
- {0x8086, 0x0042, CHIP_I9XX|CHIP_I915, "Intel IronLake"}, \
- {0x8086, 0x0046, CHIP_I9XX|CHIP_I915, "Intel IronLake"}, \
- {0x8086, 0x0102, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \
- {0x8086, 0x0106, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
- {0x8086, 0x010A, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
- {0x8086, 0x0112, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \
- {0x8086, 0x0116, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
- {0x8086, 0x0122, CHIP_I9XX|CHIP_I915, "Intel SandyBridge"}, \
- {0x8086, 0x0126, CHIP_I9XX|CHIP_I915, "Intel SandyBridge (M)"}, \
- {0x8086, 0x0152, CHIP_I9XX|CHIP_I915, "Intel IvyBridge"}, \
- {0x8086, 0x0156, CHIP_I9XX|CHIP_I915, "Intel IvyBridge (M)"}, \
- {0x8086, 0x015A, CHIP_I9XX|CHIP_I915, "Intel IvyBridge (S)"}, \
- {0x8086, 0x0162, CHIP_I9XX|CHIP_I915, "Intel IvyBridge"}, \
- {0x8086, 0x0166, CHIP_I9XX|CHIP_I915, "Intel IvyBridge (M)"}, \
- {0x8086, 0x016A, CHIP_I9XX|CHIP_I915, "Intel IvyBridge (S)"}, \
- {0x8086, 0x0402, CHIP_I9XX|CHIP_I915, "Intel Haswell"}, \
- {0x8086, 0x0406, CHIP_I9XX|CHIP_I915, "Intel Haswell (M)"}, \
- {0x8086, 0x040A, CHIP_I9XX|CHIP_I915, "Intel Haswell (S)"}, \
- {0x8086, 0x0412, CHIP_I9XX|CHIP_I915, "Intel Haswell"}, \
- {0x8086, 0x0416, CHIP_I9XX|CHIP_I915, "Intel Haswell (M)"}, \
- {0x8086, 0x041A, CHIP_I9XX|CHIP_I915, "Intel Haswell (S)"}, \
- {0x8086, 0x0C16, CHIP_I9XX|CHIP_I915, "Intel Haswell (SDV)"}, \
- {0x8086, 0x2562, CHIP_I8XX, "Intel i845G GMCH"}, \
- {0x8086, 0x2572, CHIP_I8XX, "Intel i865G GMCH"}, \
- {0x8086, 0x2582, CHIP_I9XX|CHIP_I915, "Intel i915G"}, \
- {0x8086, 0x258A, CHIP_I9XX|CHIP_I915, "Intel E7221 (i915)"}, \
- {0x8086, 0x2592, CHIP_I9XX|CHIP_I915, "Intel i915GM"}, \
- {0x8086, 0x2772, CHIP_I9XX|CHIP_I915, "Intel i945G"}, \
- {0x8086, 0x27A2, CHIP_I9XX|CHIP_I915, "Intel i945GM"}, \
- {0x8086, 0x27AE, CHIP_I9XX|CHIP_I915, "Intel i945GME"}, \
- {0x8086, 0x2972, CHIP_I9XX|CHIP_I965, "Intel i946GZ"}, \
- {0x8086, 0x2982, CHIP_I9XX|CHIP_I965, "Intel i965G"}, \
- {0x8086, 0x2992, CHIP_I9XX|CHIP_I965, "Intel i965Q"}, \
- {0x8086, 0x29A2, CHIP_I9XX|CHIP_I965, "Intel i965G"}, \
- {0x8086, 0x29B2, CHIP_I9XX|CHIP_I915, "Intel Q35"}, \
- {0x8086, 0x29C2, CHIP_I9XX|CHIP_I915, "Intel G33"}, \
- {0x8086, 0x29D2, CHIP_I9XX|CHIP_I915, "Intel Q33"}, \
- {0x8086, 0x2A02, CHIP_I9XX|CHIP_I965, "Intel i965GM"}, \
- {0x8086, 0x2A12, CHIP_I9XX|CHIP_I965, "Intel i965GME/GLE"}, \
- {0x8086, 0x2A42, CHIP_I9XX|CHIP_I965, "Mobile Intel® GM45 Express Chipset"}, \
- {0x8086, 0x2E02, CHIP_I9XX|CHIP_I965, "Intel Eaglelake"}, \
- {0x8086, 0x2E12, CHIP_I9XX|CHIP_I965, "Intel Q45/Q43"}, \
- {0x8086, 0x2E22, CHIP_I9XX|CHIP_I965, "Intel G45/G43"}, \
- {0x8086, 0x2E32, CHIP_I9XX|CHIP_I965, "Intel G41"}, \
- {0x8086, 0x2E42, CHIP_I9XX|CHIP_I915, "Intel G43 ?"}, \
- {0x8086, 0x2E92, CHIP_I9XX|CHIP_I915, "Intel G43 ?"}, \
- {0x8086, 0x3577, CHIP_I8XX, "Intel i830M GMCH"}, \
- {0x8086, 0x3582, CHIP_I8XX, "Intel i852GM/i855GM GMCH"}, \
- {0x8086, 0x358E, CHIP_I8XX, "Intel i852GM/i855GM GMCH"}, \
- {0x8086, 0xA001, CHIP_I9XX|CHIP_I965, "Intel Pineview"}, \
- {0x8086, 0xA011, CHIP_I9XX|CHIP_I965, "Intel Pineview (M)"}, \
+ {0x8086, 0x0042, 0, "Intel IronLake"}, \
+ {0x8086, 0x0046, 0, "Intel IronLake"}, \
+ {0x8086, 0x0102, 0, "Intel SandyBridge"}, \
+ {0x8086, 0x0106, 0, "Intel SandyBridge (M)"}, \
+ {0x8086, 0x010A, 0, "Intel SandyBridge (M)"}, \
+ {0x8086, 0x0112, 0, "Intel SandyBridge"}, \
+ {0x8086, 0x0116, 0, "Intel SandyBridge (M)"}, \
+ {0x8086, 0x0122, 0, "Intel SandyBridge"}, \
+ {0x8086, 0x0126, 0, "Intel SandyBridge (M)"}, \
+ {0x8086, 0x0152, 0, "Intel IvyBridge"}, \
+ {0x8086, 0x0156, 0, "Intel IvyBridge (M)"}, \
+ {0x8086, 0x015A, 0, "Intel IvyBridge (S)"}, \
+ {0x8086, 0x0162, 0, "Intel IvyBridge"}, \
+ {0x8086, 0x0166, 0, "Intel IvyBridge (M)"}, \
+ {0x8086, 0x016A, 0, "Intel IvyBridge (S)"}, \
+ {0x8086, 0x0402, 0, "Intel Haswell (GT1 desktop)"}, \
+ {0x8086, 0x0406, 0, "Intel Haswell (GT1 mobile)"}, \
+ {0x8086, 0x040A, 0, "Intel Haswell (GT1 server)"}, \
+ {0x8086, 0x0412, 0, "Intel Haswell (GT2 desktop)"}, \
+ {0x8086, 0x0416, 0, "Intel Haswell (GT2 mobile)"}, \
+ {0x8086, 0x041A, 0, "Intel Haswell (GT2 server)"}, \
+ {0x8086, 0x0422, 0, "Intel Haswell (GT2 desktop)"}, \
+ {0x8086, 0x0426, 0, "Intel Haswell (GT2 mobile)"}, \
+ {0x8086, 0x042A, 0, "Intel Haswell (GT2 server)"}, \
+ {0x8086, 0x0A02, 0, "Intel Haswell (ULT GT1 desktop)"}, \
+ {0x8086, 0x0A06, 0, "Intel Haswell (ULT GT1 mobile)"}, \
+ {0x8086, 0x0A0A, 0, "Intel Haswell (ULT GT1 server)"}, \
+ {0x8086, 0x0A12, 0, "Intel Haswell (ULT GT2 desktop)"}, \
+ {0x8086, 0x0A16, 0, "Intel Haswell (ULT GT2 mobile)"}, \
+ {0x8086, 0x0A1A, 0, "Intel Haswell (ULT GT2 server)"}, \
+ {0x8086, 0x0A22, 0, "Intel Haswell (ULT GT2 desktop)"}, \
+ {0x8086, 0x0A26, 0, "Intel Haswell (ULT GT2 mobile)"}, \
+ {0x8086, 0x0A2A, 0, "Intel Haswell (ULT GT2 server)"}, \
+ {0x8086, 0x0C02, 0, "Intel Haswell (SDV GT1 desktop)"}, \
+ {0x8086, 0x0C06, 0, "Intel Haswell (SDV GT1 mobile)"}, \
+ {0x8086, 0x0C0A, 0, "Intel Haswell (SDV GT1 server)"}, \
+ {0x8086, 0x0C12, 0, "Intel Haswell (SDV GT2 desktop)"}, \
+ {0x8086, 0x0C16, 0, "Intel Haswell (SDV GT2 mobile)"}, \
+ {0x8086, 0x0C1A, 0, "Intel Haswell (SDV GT2 server)"}, \
+ {0x8086, 0x0C22, 0, "Intel Haswell (SDV GT2 desktop)"}, \
+ {0x8086, 0x0C26, 0, "Intel Haswell (SDV GT2 mobile)"}, \
+ {0x8086, 0x0C2A, 0, "Intel Haswell (SDV GT2 server)"}, \
+ {0x8086, 0x0D02, 0, "Intel Haswell (CRW GT1 desktop)"}, \
+ {0x8086, 0x0D06, 0, "Intel Haswell (CRW GT1 mobile)"}, \
+ {0x8086, 0x0D0A, 0, "Intel Haswell (CRW GT1 server)"}, \
+ {0x8086, 0x0D12, 0, "Intel Haswell (CRW GT2 desktop)"}, \
+ {0x8086, 0x0D16, 0, "Intel Haswell (CRW GT2 mobile)"}, \
+ {0x8086, 0x0D1A, 0, "Intel Haswell (CRW GT2 server)"}, \
+ {0x8086, 0x0D22, 0, "Intel Haswell (CRW GT2 desktop)"}, \
+ {0x8086, 0x0D26, 0, "Intel Haswell (CRW GT2 mobile)"}, \
+ {0x8086, 0x0D2A, 0, "Intel Haswell (CRW GT2 server)"}, \
+ {0x8086, 0x0155, 0, "Intel Valleyview (desktop)"}, \
+ {0x8086, 0x0157, 0, "Intel Valleyview (mobile)"}, \
+ {0x8086, 0x0F30, 0, "Intel Valleyview (mobile)"}, \
+ {0x8086, 0x2562, 0, "Intel i845G GMCH"}, \
+ {0x8086, 0x2572, 0, "Intel i865G GMCH"}, \
+ {0x8086, 0x2582, 0, "Intel i915G"}, \
+ {0x8086, 0x258A, 0, "Intel E7221 (i915)"}, \
+ {0x8086, 0x2592, 0, "Intel i915GM"}, \
+ {0x8086, 0x2772, 0, "Intel i945G"}, \
+ {0x8086, 0x27A2, 0, "Intel i945GM"}, \
+ {0x8086, 0x27AE, 0, "Intel i945GME"}, \
+ {0x8086, 0x2972, 0, "Intel i946GZ"}, \
+ {0x8086, 0x2982, 0, "Intel i965G"}, \
+ {0x8086, 0x2992, 0, "Intel i965Q"}, \
+ {0x8086, 0x29A2, 0, "Intel i965G"}, \
+ {0x8086, 0x29B2, 0, "Intel Q35"}, \
+ {0x8086, 0x29C2, 0, "Intel G33"}, \
+ {0x8086, 0x29D2, 0, "Intel Q33"}, \
+ {0x8086, 0x2A02, 0, "Intel i965GM"}, \
+ {0x8086, 0x2A12, 0, "Intel i965GME/GLE"}, \
+ {0x8086, 0x2A42, 0, "Mobile Intel® GM45 Express Chipset"}, \
+ {0x8086, 0x2E02, 0, "Intel Eaglelake"}, \
+ {0x8086, 0x2E12, 0, "Intel Q45/Q43"}, \
+ {0x8086, 0x2E22, 0, "Intel G45/G43"}, \
+ {0x8086, 0x2E32, 0, "Intel G41"}, \
+ {0x8086, 0x2E42, 0, "Intel G43 ?"}, \
+ {0x8086, 0x2E92, 0, "Intel G43 ?"}, \
+ {0x8086, 0x3577, 0, "Intel i830M GMCH"}, \
+ {0x8086, 0x3582, 0, "Intel i852GM/i855GM GMCH"}, \
+ {0x8086, 0x358E, 0, "Intel i852GM/i855GM GMCH"}, \
+ {0x8086, 0xA001, 0, "Intel Pineview"}, \
+ {0x8086, 0xA011, 0, "Intel Pineview (M)"}, \
{0, 0, 0, NULL}
#define imagine_PCI_IDS \
diff --git a/sys/dev/drm2/i915/dvo.h b/sys/dev/drm2/i915/dvo.h
new file mode 100644
index 0000000..9b486ce
--- /dev/null
+++ b/sys/dev/drm2/i915/dvo.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright © 2006 Eric Anholt
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef _INTEL_DVO_H
+#define _INTEL_DVO_H
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/bus.h>
+#include <dev/iicbus/iic.h>
+#include <dev/iicbus/iiconf.h>
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_crtc.h>
+#include "intel_drv.h"
+
+struct intel_dvo_device {
+ const char *name;
+ int type;
+ /* DVOA/B/C output register */
+ u32 dvo_reg;
+ /* GPIO register used for i2c bus to control this device */
+ u32 gpio;
+ int slave_addr;
+
+ const struct intel_dvo_dev_ops *dev_ops;
+ void *dev_priv;
+ device_t i2c_bus;
+};
+
+struct intel_dvo_dev_ops {
+ /*
+ * Initialize the device at startup time.
+ * Returns NULL if the device does not exist.
+ */
+ bool (*init)(struct intel_dvo_device *dvo,
+ device_t i2cbus);
+
+ /*
+ * Called to allow the output a chance to create properties after the
+ * RandR objects have been created.
+ */
+ void (*create_resources)(struct intel_dvo_device *dvo);
+
+ /*
+ * Turn on/off output.
+ *
+ * Because none of our dvo drivers support an intermediate power levels,
+ * we don't expose this in the interfac.
+ */
+ void (*dpms)(struct intel_dvo_device *dvo, bool enable);
+
+ /*
+ * Callback for testing a video mode for a given output.
+ *
+ * This function should only check for cases where a mode can't
+ * be supported on the output specifically, and not represent
+ * generic CRTC limitations.
+ *
+ * \return MODE_OK if the mode is valid, or another MODE_* otherwise.
+ */
+ int (*mode_valid)(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode);
+
+ /*
+ * Callback to adjust the mode to be set in the CRTC.
+ *
+ * This allows an output to adjust the clock or even the entire set of
+ * timings, which is used for panels with fixed timings or for
+ * buses with clock limitations.
+ */
+ bool (*mode_fixup)(struct intel_dvo_device *dvo,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ /*
+ * Callback for preparing mode changes on an output
+ */
+ void (*prepare)(struct intel_dvo_device *dvo);
+
+ /*
+ * Callback for committing mode changes on an output
+ */
+ void (*commit)(struct intel_dvo_device *dvo);
+
+ /*
+ * Callback for setting up a video mode after fixups have been made.
+ *
+ * This is only called while the output is disabled. The dpms callback
+ * must be all that's necessary for the output, to turn the output on
+ * after this function is called.
+ */
+ void (*mode_set)(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ /*
+ * Probe for a connected output, and return detect_status.
+ */
+ enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
+
+ /*
+ * Probe the current hw status, returning true if the connected output
+ * is active.
+ */
+ bool (*get_hw_state)(struct intel_dvo_device *dev);
+
+ /**
+ * Query the device for the modes it provides.
+ *
+ * This function may also update MonInfo, mm_width, and mm_height.
+ *
+ * \return singly-linked list of modes or NULL if no modes found.
+ */
+ struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
+
+ /**
+ * Clean up driver-specific bits of the output
+ */
+ void (*destroy) (struct intel_dvo_device *dvo);
+
+ /**
+ * Debugging hook to dump device registers to log file
+ */
+ void (*dump_regs)(struct intel_dvo_device *dvo);
+};
+
+extern struct intel_dvo_dev_ops sil164_ops;
+extern struct intel_dvo_dev_ops ch7xxx_ops;
+extern struct intel_dvo_dev_ops ivch_ops;
+extern struct intel_dvo_dev_ops tfp410_ops;
+extern struct intel_dvo_dev_ops ch7017_ops;
+extern struct intel_dvo_dev_ops ns2501_ops;
+
+#endif /* _INTEL_DVO_H */
diff --git a/sys/dev/drm2/i915/dvo_ch7017.c b/sys/dev/drm2/i915/dvo_ch7017.c
new file mode 100644
index 0000000..44f2c6f9
--- /dev/null
+++ b/sys/dev/drm2/i915/dvo_ch7017.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "dvo.h"
+
+#define CH7017_TV_DISPLAY_MODE 0x00
+#define CH7017_FLICKER_FILTER 0x01
+#define CH7017_VIDEO_BANDWIDTH 0x02
+#define CH7017_TEXT_ENHANCEMENT 0x03
+#define CH7017_START_ACTIVE_VIDEO 0x04
+#define CH7017_HORIZONTAL_POSITION 0x05
+#define CH7017_VERTICAL_POSITION 0x06
+#define CH7017_BLACK_LEVEL 0x07
+#define CH7017_CONTRAST_ENHANCEMENT 0x08
+#define CH7017_TV_PLL 0x09
+#define CH7017_TV_PLL_M 0x0a
+#define CH7017_TV_PLL_N 0x0b
+#define CH7017_SUB_CARRIER_0 0x0c
+#define CH7017_CIV_CONTROL 0x10
+#define CH7017_CIV_0 0x11
+#define CH7017_CHROMA_BOOST 0x14
+#define CH7017_CLOCK_MODE 0x1c
+#define CH7017_INPUT_CLOCK 0x1d
+#define CH7017_GPIO_CONTROL 0x1e
+#define CH7017_INPUT_DATA_FORMAT 0x1f
+#define CH7017_CONNECTION_DETECT 0x20
+#define CH7017_DAC_CONTROL 0x21
+#define CH7017_BUFFERED_CLOCK_OUTPUT 0x22
+#define CH7017_DEFEAT_VSYNC 0x47
+#define CH7017_TEST_PATTERN 0x48
+
+#define CH7017_POWER_MANAGEMENT 0x49
+/** Enables the TV output path. */
+#define CH7017_TV_EN (1 << 0)
+#define CH7017_DAC0_POWER_DOWN (1 << 1)
+#define CH7017_DAC1_POWER_DOWN (1 << 2)
+#define CH7017_DAC2_POWER_DOWN (1 << 3)
+#define CH7017_DAC3_POWER_DOWN (1 << 4)
+/** Powers down the TV out block, and DAC0-3 */
+#define CH7017_TV_POWER_DOWN_EN (1 << 5)
+
+#define CH7017_VERSION_ID 0x4a
+
+#define CH7017_DEVICE_ID 0x4b
+#define CH7017_DEVICE_ID_VALUE 0x1b
+#define CH7018_DEVICE_ID_VALUE 0x1a
+#define CH7019_DEVICE_ID_VALUE 0x19
+
+#define CH7017_XCLK_D2_ADJUST 0x53
+#define CH7017_UP_SCALER_COEFF_0 0x55
+#define CH7017_UP_SCALER_COEFF_1 0x56
+#define CH7017_UP_SCALER_COEFF_2 0x57
+#define CH7017_UP_SCALER_COEFF_3 0x58
+#define CH7017_UP_SCALER_COEFF_4 0x59
+#define CH7017_UP_SCALER_VERTICAL_INC_0 0x5a
+#define CH7017_UP_SCALER_VERTICAL_INC_1 0x5b
+#define CH7017_GPIO_INVERT 0x5c
+#define CH7017_UP_SCALER_HORIZONTAL_INC_0 0x5d
+#define CH7017_UP_SCALER_HORIZONTAL_INC_1 0x5e
+
+#define CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT 0x5f
+/**< Low bits of horizontal active pixel input */
+
+#define CH7017_ACTIVE_INPUT_LINE_OUTPUT 0x60
+/** High bits of horizontal active pixel input */
+#define CH7017_LVDS_HAP_INPUT_MASK (0x7 << 0)
+/** High bits of vertical active line output */
+#define CH7017_LVDS_VAL_HIGH_MASK (0x7 << 3)
+
+#define CH7017_VERTICAL_ACTIVE_LINE_OUTPUT 0x61
+/**< Low bits of vertical active line output */
+
+#define CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT 0x62
+/**< Low bits of horizontal active pixel output */
+
+#define CH7017_LVDS_POWER_DOWN 0x63
+/** High bits of horizontal active pixel output */
+#define CH7017_LVDS_HAP_HIGH_MASK (0x7 << 0)
+/** Enables the LVDS power down state transition */
+#define CH7017_LVDS_POWER_DOWN_EN (1 << 6)
+/** Enables the LVDS upscaler */
+#define CH7017_LVDS_UPSCALER_EN (1 << 7)
+#define CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED 0x08
+
+#define CH7017_LVDS_ENCODING 0x64
+#define CH7017_LVDS_DITHER_2D (1 << 2)
+#define CH7017_LVDS_DITHER_DIS (1 << 3)
+#define CH7017_LVDS_DUAL_CHANNEL_EN (1 << 4)
+#define CH7017_LVDS_24_BIT (1 << 5)
+
+#define CH7017_LVDS_ENCODING_2 0x65
+
+#define CH7017_LVDS_PLL_CONTROL 0x66
+/** Enables the LVDS panel output path */
+#define CH7017_LVDS_PANEN (1 << 0)
+/** Enables the LVDS panel backlight */
+#define CH7017_LVDS_BKLEN (1 << 3)
+
+#define CH7017_POWER_SEQUENCING_T1 0x67
+#define CH7017_POWER_SEQUENCING_T2 0x68
+#define CH7017_POWER_SEQUENCING_T3 0x69
+#define CH7017_POWER_SEQUENCING_T4 0x6a
+#define CH7017_POWER_SEQUENCING_T5 0x6b
+#define CH7017_GPIO_DRIVER_TYPE 0x6c
+#define CH7017_GPIO_DATA 0x6d
+#define CH7017_GPIO_DIRECTION_CONTROL 0x6e
+
+#define CH7017_LVDS_PLL_FEEDBACK_DIV 0x71
+# define CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT 4
+# define CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT 0
+# define CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED 0x80
+
+#define CH7017_LVDS_PLL_VCO_CONTROL 0x72
+# define CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED 0x80
+# define CH7017_LVDS_PLL_VCO_SHIFT 4
+# define CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT 0
+
+#define CH7017_OUTPUTS_ENABLE 0x73
+# define CH7017_CHARGE_PUMP_LOW 0x0
+# define CH7017_CHARGE_PUMP_HIGH 0x3
+# define CH7017_LVDS_CHANNEL_A (1 << 3)
+# define CH7017_LVDS_CHANNEL_B (1 << 4)
+# define CH7017_TV_DAC_A (1 << 5)
+# define CH7017_TV_DAC_B (1 << 6)
+# define CH7017_DDC_SELECT_DC2 (1 << 7)
+
+#define CH7017_LVDS_OUTPUT_AMPLITUDE 0x74
+#define CH7017_LVDS_PLL_EMI_REDUCTION 0x75
+#define CH7017_LVDS_POWER_DOWN_FLICKER 0x76
+
+#define CH7017_LVDS_CONTROL_2 0x78
+# define CH7017_LOOP_FILTER_SHIFT 5
+# define CH7017_PHASE_DETECTOR_SHIFT 0
+
+#define CH7017_BANG_LIMIT_CONTROL 0x7f
+
+struct ch7017_priv {
+ uint8_t dummy;
+};
+
+static void ch7017_dump_regs(struct intel_dvo_device *dvo);
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable);
+
+static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
+{
+ struct iic_msg msgs[] = {
+ {
+ .slave = dvo->slave_addr << 1,
+ .flags = 0,
+ .len = 1,
+ .buf = &addr,
+ },
+ {
+ .slave = dvo->slave_addr << 1,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = val,
+ }
+ };
+ return -iicbus_transfer(dvo->i2c_bus, msgs, 2) == 0;
+}
+
+static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
+{
+ uint8_t buf[2] = { addr, val };
+ struct iic_msg msg = {
+ .slave = dvo->slave_addr << 1,
+ .flags = 0,
+ .len = 2,
+ .buf = buf,
+ };
+ return -iicbus_transfer(dvo->i2c_bus, &msg, 1) == 0;
+}
+
+/** Probes for a CH7017 on the given bus and slave address. */
+static bool ch7017_init(struct intel_dvo_device *dvo,
+ device_t adapter)
+{
+ struct ch7017_priv *priv;
+ const char *str;
+ u8 val;
+
+ priv = malloc(sizeof(struct ch7017_priv), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
+ if (priv == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = priv;
+
+ if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
+ goto fail;
+
+ switch (val) {
+ case CH7017_DEVICE_ID_VALUE:
+ str = "ch7017";
+ break;
+ case CH7018_DEVICE_ID_VALUE:
+ str = "ch7018";
+ break;
+ case CH7019_DEVICE_ID_VALUE:
+ str = "ch7019";
+ break;
+ default:
+ DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
+ "slave %d.\n",
+ val, device_get_nameunit(adapter),
+ dvo->slave_addr);
+ goto fail;
+ }
+
+ DRM_DEBUG_KMS("%s detected on %s, addr %d\n",
+ str, device_get_nameunit(adapter), dvo->slave_addr);
+ return true;
+
+fail:
+ free(priv, DRM_MEM_KMS);
+ return false;
+}
+
+static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo)
+{
+ return connector_status_connected;
+}
+
+static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 160000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static void ch7017_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ uint8_t lvds_pll_feedback_div, lvds_pll_vco_control;
+ uint8_t outputs_enable, lvds_control_2, lvds_power_down;
+ uint8_t horizontal_active_pixel_input;
+ uint8_t horizontal_active_pixel_output, vertical_active_line_output;
+ uint8_t active_input_line_output;
+
+ DRM_DEBUG_KMS("Registers before mode setting\n");
+ ch7017_dump_regs(dvo);
+
+ /* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/
+ if (mode->clock < 100000) {
+ outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_LOW;
+ lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
+ (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
+ (13 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
+ lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+ (2 << CH7017_LVDS_PLL_VCO_SHIFT) |
+ (3 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+ lvds_control_2 = (1 << CH7017_LOOP_FILTER_SHIFT) |
+ (0 << CH7017_PHASE_DETECTOR_SHIFT);
+ } else {
+ outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_HIGH;
+ lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
+ (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
+ (3 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
+ lvds_pll_feedback_div = 35;
+ lvds_control_2 = (3 << CH7017_LOOP_FILTER_SHIFT) |
+ (0 << CH7017_PHASE_DETECTOR_SHIFT);
+ if (1) { /* XXX: dual channel panel detection. Assume yes for now. */
+ outputs_enable |= CH7017_LVDS_CHANNEL_B;
+ lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+ (2 << CH7017_LVDS_PLL_VCO_SHIFT) |
+ (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+ } else {
+ lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+ (1 << CH7017_LVDS_PLL_VCO_SHIFT) |
+ (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+ }
+ }
+
+ horizontal_active_pixel_input = mode->hdisplay & 0x00ff;
+
+ vertical_active_line_output = mode->vdisplay & 0x00ff;
+ horizontal_active_pixel_output = mode->hdisplay & 0x00ff;
+
+ active_input_line_output = ((mode->hdisplay & 0x0700) >> 8) |
+ (((mode->vdisplay & 0x0700) >> 8) << 3);
+
+ lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
+ (mode->hdisplay & 0x0700) >> 8;
+
+ ch7017_dpms(dvo, false);
+ ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
+ horizontal_active_pixel_input);
+ ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
+ horizontal_active_pixel_output);
+ ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT,
+ vertical_active_line_output);
+ ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT,
+ active_input_line_output);
+ ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, lvds_pll_vco_control);
+ ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, lvds_pll_feedback_div);
+ ch7017_write(dvo, CH7017_LVDS_CONTROL_2, lvds_control_2);
+ ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, outputs_enable);
+
+ /* Turn the LVDS back on with new settings. */
+ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down);
+
+ DRM_DEBUG_KMS("Registers after mode setting\n");
+ ch7017_dump_regs(dvo);
+}
+
+/* set the CH7017 power state */
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ uint8_t val;
+
+ ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
+
+ /* Turn off TV/VGA, and never turn it on since we don't support it. */
+ ch7017_write(dvo, CH7017_POWER_MANAGEMENT,
+ CH7017_DAC0_POWER_DOWN |
+ CH7017_DAC1_POWER_DOWN |
+ CH7017_DAC2_POWER_DOWN |
+ CH7017_DAC3_POWER_DOWN |
+ CH7017_TV_POWER_DOWN_EN);
+
+ if (enable) {
+ /* Turn on the LVDS */
+ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
+ val & ~CH7017_LVDS_POWER_DOWN_EN);
+ } else {
+ /* Turn off the LVDS */
+ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
+ val | CH7017_LVDS_POWER_DOWN_EN);
+ }
+
+ /* XXX: Should actually wait for update power status somehow */
+ drm_msleep(20, "ch7017");
+}
+
+static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+ ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
+
+ if (val & CH7017_LVDS_POWER_DOWN_EN)
+ return false;
+ else
+ return true;
+}
+
+static void ch7017_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+#define DUMP(reg) \
+do { \
+ ch7017_read(dvo, reg, &val); \
+ DRM_DEBUG_KMS(#reg ": %02x\n", val); \
+} while (0)
+
+ DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT);
+ DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT);
+ DUMP(CH7017_VERTICAL_ACTIVE_LINE_OUTPUT);
+ DUMP(CH7017_ACTIVE_INPUT_LINE_OUTPUT);
+ DUMP(CH7017_LVDS_PLL_VCO_CONTROL);
+ DUMP(CH7017_LVDS_PLL_FEEDBACK_DIV);
+ DUMP(CH7017_LVDS_CONTROL_2);
+ DUMP(CH7017_OUTPUTS_ENABLE);
+ DUMP(CH7017_LVDS_POWER_DOWN);
+}
+
+static void ch7017_destroy(struct intel_dvo_device *dvo)
+{
+ struct ch7017_priv *priv = dvo->dev_priv;
+
+ if (priv) {
+ free(priv, DRM_MEM_KMS);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops ch7017_ops = {
+ .init = ch7017_init,
+ .detect = ch7017_detect,
+ .mode_valid = ch7017_mode_valid,
+ .mode_set = ch7017_mode_set,
+ .dpms = ch7017_dpms,
+ .get_hw_state = ch7017_get_hw_state,
+ .dump_regs = ch7017_dump_regs,
+ .destroy = ch7017_destroy,
+};
diff --git a/sys/dev/drm2/i915/dvo_ch7xxx.c b/sys/dev/drm2/i915/dvo_ch7xxx.c
new file mode 100644
index 0000000..8700b47
--- /dev/null
+++ b/sys/dev/drm2/i915/dvo_ch7xxx.c
@@ -0,0 +1,347 @@
+/**************************************************************************
+
+Copyright © 2006 Dave Airlie
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "dvo.h"
+
+#define CH7xxx_REG_VID 0x4a
+#define CH7xxx_REG_DID 0x4b
+
+#define CH7011_VID 0x83 /* 7010 as well */
+#define CH7009A_VID 0x84
+#define CH7009B_VID 0x85
+#define CH7301_VID 0x95
+
+#define CH7xxx_VID 0x84
+#define CH7xxx_DID 0x17
+
+#define CH7xxx_NUM_REGS 0x4c
+
+#define CH7xxx_CM 0x1c
+#define CH7xxx_CM_XCM (1<<0)
+#define CH7xxx_CM_MCP (1<<2)
+#define CH7xxx_INPUT_CLOCK 0x1d
+#define CH7xxx_GPIO 0x1e
+#define CH7xxx_GPIO_HPIR (1<<3)
+#define CH7xxx_IDF 0x1f
+
+#define CH7xxx_IDF_HSP (1<<3)
+#define CH7xxx_IDF_VSP (1<<4)
+
+#define CH7xxx_CONNECTION_DETECT 0x20
+#define CH7xxx_CDET_DVI (1<<5)
+
+#define CH7301_DAC_CNTL 0x21
+#define CH7301_HOTPLUG 0x23
+#define CH7xxx_TCTL 0x31
+#define CH7xxx_TVCO 0x32
+#define CH7xxx_TPCP 0x33
+#define CH7xxx_TPD 0x34
+#define CH7xxx_TPVT 0x35
+#define CH7xxx_TLPF 0x36
+#define CH7xxx_TCT 0x37
+#define CH7301_TEST_PATTERN 0x48
+
+#define CH7xxx_PM 0x49
+#define CH7xxx_PM_FPD (1<<0)
+#define CH7301_PM_DACPD0 (1<<1)
+#define CH7301_PM_DACPD1 (1<<2)
+#define CH7301_PM_DACPD2 (1<<3)
+#define CH7xxx_PM_DVIL (1<<6)
+#define CH7xxx_PM_DVIP (1<<7)
+
+#define CH7301_SYNC_POLARITY 0x56
+#define CH7301_SYNC_RGB_YUV (1<<0)
+#define CH7301_SYNC_POL_DVI (1<<5)
+
+/** @file
+ * driver for the Chrontel 7xxx DVI chip over DVO.
+ */
+
+static struct ch7xxx_id_struct {
+ uint8_t vid;
+ char *name;
+} ch7xxx_ids[] = {
+ { CH7011_VID, "CH7011" },
+ { CH7009A_VID, "CH7009A" },
+ { CH7009B_VID, "CH7009B" },
+ { CH7301_VID, "CH7301" },
+};
+
+struct ch7xxx_priv {
+ bool quiet;
+};
+
+static char *ch7xxx_get_id(uint8_t vid)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ch7xxx_ids); i++) {
+ if (ch7xxx_ids[i].vid == vid)
+ return ch7xxx_ids[i].name;
+ }
+
+ return NULL;
+}
+
+/** Reads an 8 bit register */
+static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+ struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+ device_t adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+ struct iic_msg msgs[] = {
+ {
+ .slave = dvo->slave_addr << 1,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .slave = dvo->slave_addr << 1,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (-iicbus_transfer(adapter, msgs, 2) == 0) {
+ *ch = in_buf[0];
+ return true;
+ }
+
+ if (!ch7xxx->quiet) {
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+ addr, device_get_nameunit(adapter), dvo->slave_addr);
+ }
+ return false;
+}
+
+/** Writes an 8 bit register */
+static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+ struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+ device_t adapter = dvo->i2c_bus;
+ uint8_t out_buf[2];
+ struct iic_msg msg = {
+ .slave = dvo->slave_addr << 1,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ if (-iicbus_transfer(adapter, &msg, 1) == 0)
+ return true;
+
+ if (!ch7xxx->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+ addr, device_get_nameunit(adapter), dvo->slave_addr);
+ }
+
+ return false;
+}
+
+static bool ch7xxx_init(struct intel_dvo_device *dvo,
+ device_t adapter)
+{
+ /* this will detect the CH7xxx chip on the specified i2c bus */
+ struct ch7xxx_priv *ch7xxx;
+ uint8_t vendor, device;
+ char *name;
+
+ ch7xxx = malloc(sizeof(struct ch7xxx_priv), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
+ if (ch7xxx == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = ch7xxx;
+ ch7xxx->quiet = true;
+
+ if (!ch7xxx_readb(dvo, CH7xxx_REG_VID, &vendor))
+ goto out;
+
+ name = ch7xxx_get_id(vendor);
+ if (!name) {
+ DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
+ "slave %d.\n",
+ vendor, device_get_nameunit(adapter), dvo->slave_addr);
+ goto out;
+ }
+
+
+ if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device))
+ goto out;
+
+ if (device != CH7xxx_DID) {
+ DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
+ "slave %d.\n",
+ vendor, device_get_nameunit(adapter), dvo->slave_addr);
+ goto out;
+ }
+
+ ch7xxx->quiet = false;
+ DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
+ name, vendor, device);
+ return true;
+out:
+ free(ch7xxx, DRM_MEM_KMS);
+ return false;
+}
+
+static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo)
+{
+ uint8_t cdet, orig_pm, pm;
+
+ ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm);
+
+ pm = orig_pm;
+ pm &= ~CH7xxx_PM_FPD;
+ pm |= CH7xxx_PM_DVIL | CH7xxx_PM_DVIP;
+
+ ch7xxx_writeb(dvo, CH7xxx_PM, pm);
+
+ ch7xxx_readb(dvo, CH7xxx_CONNECTION_DETECT, &cdet);
+
+ ch7xxx_writeb(dvo, CH7xxx_PM, orig_pm);
+
+ if (cdet & CH7xxx_CDET_DVI)
+ return connector_status_connected;
+ return connector_status_disconnected;
+}
+
+static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ uint8_t tvco, tpcp, tpd, tlpf, idf;
+
+ if (mode->clock <= 65000) {
+ tvco = 0x23;
+ tpcp = 0x08;
+ tpd = 0x16;
+ tlpf = 0x60;
+ } else {
+ tvco = 0x2d;
+ tpcp = 0x06;
+ tpd = 0x26;
+ tlpf = 0xa0;
+ }
+
+ ch7xxx_writeb(dvo, CH7xxx_TCTL, 0x00);
+ ch7xxx_writeb(dvo, CH7xxx_TVCO, tvco);
+ ch7xxx_writeb(dvo, CH7xxx_TPCP, tpcp);
+ ch7xxx_writeb(dvo, CH7xxx_TPD, tpd);
+ ch7xxx_writeb(dvo, CH7xxx_TPVT, 0x30);
+ ch7xxx_writeb(dvo, CH7xxx_TLPF, tlpf);
+ ch7xxx_writeb(dvo, CH7xxx_TCT, 0x00);
+
+ ch7xxx_readb(dvo, CH7xxx_IDF, &idf);
+
+ idf &= ~(CH7xxx_IDF_HSP | CH7xxx_IDF_VSP);
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ idf |= CH7xxx_IDF_HSP;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ idf |= CH7xxx_IDF_HSP;
+
+ ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
+}
+
+/* set the CH7xxx power state */
+static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ if (enable)
+ ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
+ else
+ ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
+}
+
+static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
+{
+ u8 val;
+
+ ch7xxx_readb(dvo, CH7xxx_PM, &val);
+
+ if (val & (CH7xxx_PM_DVIL | CH7xxx_PM_DVIP))
+ return true;
+ else
+ return false;
+}
+
+static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
+{
+ int i;
+
+ for (i = 0; i < CH7xxx_NUM_REGS; i++) {
+ uint8_t val;
+ if ((i % 8) == 0)
+ DRM_LOG_KMS("\n %02X: ", i);
+ ch7xxx_readb(dvo, i, &val);
+ DRM_LOG_KMS("%02X ", val);
+ }
+}
+
+static void ch7xxx_destroy(struct intel_dvo_device *dvo)
+{
+ struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+
+ if (ch7xxx) {
+ free(ch7xxx, DRM_MEM_KMS);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops ch7xxx_ops = {
+ .init = ch7xxx_init,
+ .detect = ch7xxx_detect,
+ .mode_valid = ch7xxx_mode_valid,
+ .mode_set = ch7xxx_mode_set,
+ .dpms = ch7xxx_dpms,
+ .get_hw_state = ch7xxx_get_hw_state,
+ .dump_regs = ch7xxx_dump_regs,
+ .destroy = ch7xxx_destroy,
+};
diff --git a/sys/dev/drm2/i915/dvo_ivch.c b/sys/dev/drm2/i915/dvo_ivch.c
new file mode 100644
index 0000000..551f379
--- /dev/null
+++ b/sys/dev/drm2/i915/dvo_ivch.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "dvo.h"
+
+/*
+ * register definitions for the i82807aa.
+ *
+ * Documentation on this chipset can be found in datasheet #29069001 at
+ * intel.com.
+ */
+
+/*
+ * VCH Revision & GMBus Base Addr
+ */
+#define VR00 0x00
+# define VR00_BASE_ADDRESS_MASK 0x007f
+
+/*
+ * Functionality Enable
+ */
+#define VR01 0x01
+
+/*
+ * Enable the panel fitter
+ */
+# define VR01_PANEL_FIT_ENABLE (1 << 3)
+/*
+ * Enables the LCD display.
+ *
+ * This must not be set while VR01_DVO_BYPASS_ENABLE is set.
+ */
+# define VR01_LCD_ENABLE (1 << 2)
+/** Enables the DVO repeater. */
+# define VR01_DVO_BYPASS_ENABLE (1 << 1)
+/** Enables the DVO clock */
+# define VR01_DVO_ENABLE (1 << 0)
+
+/*
+ * LCD Interface Format
+ */
+#define VR10 0x10
+/** Enables LVDS output instead of CMOS */
+# define VR10_LVDS_ENABLE (1 << 4)
+/** Enables 18-bit LVDS output. */
+# define VR10_INTERFACE_1X18 (0 << 2)
+/** Enables 24-bit LVDS or CMOS output */
+# define VR10_INTERFACE_1X24 (1 << 2)
+/** Enables 2x18-bit LVDS or CMOS output. */
+# define VR10_INTERFACE_2X18 (2 << 2)
+/** Enables 2x24-bit LVDS output */
+# define VR10_INTERFACE_2X24 (3 << 2)
+
+/*
+ * VR20 LCD Horizontal Display Size
+ */
+#define VR20 0x20
+
+/*
+ * LCD Vertical Display Size
+ */
+#define VR21 0x20
+
+/*
+ * Panel power down status
+ */
+#define VR30 0x30
+/** Read only bit indicating that the panel is not in a safe poweroff state. */
+# define VR30_PANEL_ON (1 << 15)
+
+#define VR40 0x40
+# define VR40_STALL_ENABLE (1 << 13)
+# define VR40_VERTICAL_INTERP_ENABLE (1 << 12)
+# define VR40_ENHANCED_PANEL_FITTING (1 << 11)
+# define VR40_HORIZONTAL_INTERP_ENABLE (1 << 10)
+# define VR40_AUTO_RATIO_ENABLE (1 << 9)
+# define VR40_CLOCK_GATING_ENABLE (1 << 8)
+
+/*
+ * Panel Fitting Vertical Ratio
+ * (((image_height - 1) << 16) / ((panel_height - 1))) >> 2
+ */
+#define VR41 0x41
+
+/*
+ * Panel Fitting Horizontal Ratio
+ * (((image_width - 1) << 16) / ((panel_width - 1))) >> 2
+ */
+#define VR42 0x42
+
+/*
+ * Horizontal Image Size
+ */
+#define VR43 0x43
+
+/* VR80 GPIO 0
+ */
+#define VR80 0x80
+#define VR81 0x81
+#define VR82 0x82
+#define VR83 0x83
+#define VR84 0x84
+#define VR85 0x85
+#define VR86 0x86
+#define VR87 0x87
+
+/* VR88 GPIO 8
+ */
+#define VR88 0x88
+
+/* Graphics BIOS scratch 0
+ */
+#define VR8E 0x8E
+# define VR8E_PANEL_TYPE_MASK (0xf << 0)
+# define VR8E_PANEL_INTERFACE_CMOS (0 << 4)
+# define VR8E_PANEL_INTERFACE_LVDS (1 << 4)
+# define VR8E_FORCE_DEFAULT_PANEL (1 << 5)
+
+/* Graphics BIOS scratch 1
+ */
+#define VR8F 0x8F
+# define VR8F_VCH_PRESENT (1 << 0)
+# define VR8F_DISPLAY_CONN (1 << 1)
+# define VR8F_POWER_MASK (0x3c)
+# define VR8F_POWER_POS (2)
+
+
+struct ivch_priv {
+ bool quiet;
+
+ uint16_t width, height;
+};
+
+
+static void ivch_dump_regs(struct intel_dvo_device *dvo);
+
+/**
+ * Reads a register on the ivch.
+ *
+ * Each of the 256 registers are 16 bits long.
+ */
+static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+ device_t adapter = dvo->i2c_bus;
+ u8 out_buf[1];
+ u8 in_buf[2];
+
+ struct iic_msg msgs[] = {
+ {
+ .slave = dvo->slave_addr << 1,
+ .flags = I2C_M_RD,
+ .len = 0,
+ },
+ {
+ .slave = 0 << 1,
+ .flags = I2C_M_NOSTART,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .slave = dvo->slave_addr << 1,
+ .flags = I2C_M_RD | I2C_M_NOSTART,
+ .len = 2,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+
+ if (-iicbus_transfer(adapter, msgs, 3) == 0) {
+ *data = (in_buf[1] << 8) | in_buf[0];
+ return true;
+ }
+
+ if (!priv->quiet) {
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from "
+ "%s:%02x.\n",
+ addr, device_get_nameunit(adapter), dvo->slave_addr);
+ }
+ return false;
+}
+
+/** Writes a 16-bit register on the ivch */
+static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+ device_t adapter = dvo->i2c_bus;
+ u8 out_buf[3];
+ struct iic_msg msg = {
+ .slave = dvo->slave_addr << 1,
+ .flags = 0,
+ .len = 3,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = data & 0xff;
+ out_buf[2] = data >> 8;
+
+ if (-iicbus_transfer(adapter, &msg, 1) == 0)
+ return true;
+
+ if (!priv->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+ addr, device_get_nameunit(adapter), dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/** Probes the given bus and slave address for an ivch */
+static bool ivch_init(struct intel_dvo_device *dvo,
+ device_t adapter)
+{
+ struct ivch_priv *priv;
+ uint16_t temp;
+
+ priv = malloc(sizeof(struct ivch_priv), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
+ if (priv == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = priv;
+ priv->quiet = true;
+
+ if (!ivch_read(dvo, VR00, &temp))
+ goto out;
+ priv->quiet = false;
+
+ /* Since the identification bits are probably zeroes, which doesn't seem
+ * very unique, check that the value in the base address field matches
+ * the address it's responding on.
+ */
+ if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
+ DRM_DEBUG_KMS("ivch detect failed due to address mismatch "
+ "(%d vs %d)\n",
+ (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
+ goto out;
+ }
+
+ ivch_read(dvo, VR20, &priv->width);
+ ivch_read(dvo, VR21, &priv->height);
+
+ return true;
+
+out:
+ free(priv, DRM_MEM_KMS);
+ return false;
+}
+
+static enum drm_connector_status ivch_detect(struct intel_dvo_device *dvo)
+{
+ return connector_status_connected;
+}
+
+static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 112000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+/** Sets the power state of the panel connected to the ivch */
+static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ int i;
+ uint16_t vr01, vr30, backlight;
+
+ /* Set the new power state of the panel. */
+ if (!ivch_read(dvo, VR01, &vr01))
+ return;
+
+ if (enable)
+ backlight = 1;
+ else
+ backlight = 0;
+ ivch_write(dvo, VR80, backlight);
+
+ if (enable)
+ vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
+ else
+ vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
+
+ ivch_write(dvo, VR01, vr01);
+
+ /* Wait for the panel to make its state transition */
+ for (i = 0; i < 100; i++) {
+ if (!ivch_read(dvo, VR30, &vr30))
+ break;
+
+ if (((vr30 & VR30_PANEL_ON) != 0) == enable)
+ break;
+ udelay(1000);
+ }
+ /* wait some more; vch may fail to resync sometimes without this */
+ udelay(16 * 1000);
+}
+
+static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint16_t vr01;
+
+ /* Set the new power state of the panel. */
+ if (!ivch_read(dvo, VR01, &vr01))
+ return false;
+
+ if (vr01 & VR01_LCD_ENABLE)
+ return true;
+ else
+ return false;
+}
+
+static void ivch_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ uint16_t vr40 = 0;
+ uint16_t vr01;
+
+ vr01 = 0;
+ vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE |
+ VR40_HORIZONTAL_INTERP_ENABLE);
+
+ if (mode->hdisplay != adjusted_mode->hdisplay ||
+ mode->vdisplay != adjusted_mode->vdisplay) {
+ uint16_t x_ratio, y_ratio;
+
+ vr01 |= VR01_PANEL_FIT_ENABLE;
+ vr40 |= VR40_CLOCK_GATING_ENABLE;
+ x_ratio = (((mode->hdisplay - 1) << 16) /
+ (adjusted_mode->hdisplay - 1)) >> 2;
+ y_ratio = (((mode->vdisplay - 1) << 16) /
+ (adjusted_mode->vdisplay - 1)) >> 2;
+ ivch_write(dvo, VR42, x_ratio);
+ ivch_write(dvo, VR41, y_ratio);
+ } else {
+ vr01 &= ~VR01_PANEL_FIT_ENABLE;
+ vr40 &= ~VR40_CLOCK_GATING_ENABLE;
+ }
+ vr40 &= ~VR40_AUTO_RATIO_ENABLE;
+
+ ivch_write(dvo, VR01, vr01);
+ ivch_write(dvo, VR40, vr40);
+
+ ivch_dump_regs(dvo);
+}
+
+static void ivch_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint16_t val;
+
+ ivch_read(dvo, VR00, &val);
+ DRM_LOG_KMS("VR00: 0x%04x\n", val);
+ ivch_read(dvo, VR01, &val);
+ DRM_LOG_KMS("VR01: 0x%04x\n", val);
+ ivch_read(dvo, VR30, &val);
+ DRM_LOG_KMS("VR30: 0x%04x\n", val);
+ ivch_read(dvo, VR40, &val);
+ DRM_LOG_KMS("VR40: 0x%04x\n", val);
+
+ /* GPIO registers */
+ ivch_read(dvo, VR80, &val);
+ DRM_LOG_KMS("VR80: 0x%04x\n", val);
+ ivch_read(dvo, VR81, &val);
+ DRM_LOG_KMS("VR81: 0x%04x\n", val);
+ ivch_read(dvo, VR82, &val);
+ DRM_LOG_KMS("VR82: 0x%04x\n", val);
+ ivch_read(dvo, VR83, &val);
+ DRM_LOG_KMS("VR83: 0x%04x\n", val);
+ ivch_read(dvo, VR84, &val);
+ DRM_LOG_KMS("VR84: 0x%04x\n", val);
+ ivch_read(dvo, VR85, &val);
+ DRM_LOG_KMS("VR85: 0x%04x\n", val);
+ ivch_read(dvo, VR86, &val);
+ DRM_LOG_KMS("VR86: 0x%04x\n", val);
+ ivch_read(dvo, VR87, &val);
+ DRM_LOG_KMS("VR87: 0x%04x\n", val);
+ ivch_read(dvo, VR88, &val);
+ DRM_LOG_KMS("VR88: 0x%04x\n", val);
+
+ /* Scratch register 0 - AIM Panel type */
+ ivch_read(dvo, VR8E, &val);
+ DRM_LOG_KMS("VR8E: 0x%04x\n", val);
+
+ /* Scratch register 1 - Status register */
+ ivch_read(dvo, VR8F, &val);
+ DRM_LOG_KMS("VR8F: 0x%04x\n", val);
+}
+
+static void ivch_destroy(struct intel_dvo_device *dvo)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+
+ if (priv) {
+ free(priv, DRM_MEM_KMS);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops ivch_ops = {
+ .init = ivch_init,
+ .dpms = ivch_dpms,
+ .get_hw_state = ivch_get_hw_state,
+ .mode_valid = ivch_mode_valid,
+ .mode_set = ivch_mode_set,
+ .detect = ivch_detect,
+ .dump_regs = ivch_dump_regs,
+ .destroy = ivch_destroy,
+};
diff --git a/sys/dev/drm2/i915/dvo_ns2501.c b/sys/dev/drm2/i915/dvo_ns2501.c
new file mode 100644
index 0000000..655cb7d
--- /dev/null
+++ b/sys/dev/drm2/i915/dvo_ns2501.c
@@ -0,0 +1,601 @@
+/*
+ *
+ * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "dvo.h"
+#include "i915_reg.h"
+#include "i915_drv.h"
+
+#define NS2501_VID 0x1305
+#define NS2501_DID 0x6726
+
+#define NS2501_VID_LO 0x00
+#define NS2501_VID_HI 0x01
+#define NS2501_DID_LO 0x02
+#define NS2501_DID_HI 0x03
+#define NS2501_REV 0x04
+#define NS2501_RSVD 0x05
+#define NS2501_FREQ_LO 0x06
+#define NS2501_FREQ_HI 0x07
+
+#define NS2501_REG8 0x08
+#define NS2501_8_VEN (1<<5)
+#define NS2501_8_HEN (1<<4)
+#define NS2501_8_DSEL (1<<3)
+#define NS2501_8_BPAS (1<<2)
+#define NS2501_8_RSVD (1<<1)
+#define NS2501_8_PD (1<<0)
+
+#define NS2501_REG9 0x09
+#define NS2501_9_VLOW (1<<7)
+#define NS2501_9_MSEL_MASK (0x7<<4)
+#define NS2501_9_TSEL (1<<3)
+#define NS2501_9_RSEN (1<<2)
+#define NS2501_9_RSVD (1<<1)
+#define NS2501_9_MDI (1<<0)
+
+#define NS2501_REGC 0x0c
+
+struct ns2501_priv {
+ //I2CDevRec d;
+ bool quiet;
+ int reg_8_shadow;
+ int reg_8_set;
+ // Shadow registers for i915
+ int dvoc;
+ int pll_a;
+ int srcdim;
+ int fw_blc;
+};
+
+#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
+
+/*
+ * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens
+ * laptops does not react on the i2c bus unless
+ * both the PLL is running and the display is configured in its native
+ * resolution.
+ * This function forces the DVO on, and stores the registers it touches.
+ * Afterwards, registers are restored to regular values.
+ *
+ * This is pretty much a hack, though it works.
+ * Without that, ns2501_readb and ns2501_writeb fail
+ * when switching the resolution.
+ */
+
+static void enable_dvo(struct intel_dvo_device *dvo)
+{
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+ device_t adapter = dvo->i2c_bus;
+ /*
+ * FIXME Linux<->FreeBSD: device_get_softc() returns a struct
+ * intel_iic_softc in reality, where struct intel_gmbus is
+ * the first member. struct intel_iic_softc is defined in
+ * intel_iic.c.
+ */
+ struct intel_gmbus *bus =
+ (struct intel_gmbus *)device_get_softc(adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
+
+ ns->dvoc = I915_READ(DVO_C);
+ ns->pll_a = I915_READ(_DPLL_A);
+ ns->srcdim = I915_READ(DVOC_SRCDIM);
+ ns->fw_blc = I915_READ(FW_BLC);
+
+ I915_WRITE(DVOC, 0x10004084);
+ I915_WRITE(_DPLL_A, 0xd0820000);
+ I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768
+ I915_WRITE(FW_BLC, 0x1080304);
+
+ I915_WRITE(DVOC, 0x90004084);
+}
+
+/*
+ * Restore the I915 registers modified by the above
+ * trigger function.
+ */
+static void restore_dvo(struct intel_dvo_device *dvo)
+{
+ device_t adapter = dvo->i2c_bus;
+ /*
+ * FIXME Linux<->FreeBSD: device_get_softc() returns a struct
+ * intel_iic_softc in reality, where struct intel_gmbus is
+ * the first member. struct intel_iic_softc is defined in
+ * intel_iic.c.
+ */
+ struct intel_gmbus *bus =
+ (struct intel_gmbus *)device_get_softc(adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+ I915_WRITE(DVOC, ns->dvoc);
+ I915_WRITE(_DPLL_A, ns->pll_a);
+ I915_WRITE(DVOC_SRCDIM, ns->srcdim);
+ I915_WRITE(FW_BLC, ns->fw_blc);
+}
+
+/*
+** Read a register from the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+ device_t adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+ struct iic_msg msgs[] = {
+ {
+ .slave = dvo->slave_addr << 1,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .slave = dvo->slave_addr << 1,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (-iicbus_transfer(adapter, msgs, 2) == 0) {
+ *ch = in_buf[0];
+ return true;
+ }
+
+ if (!ns->quiet) {
+ DRM_DEBUG_KMS
+ ("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
+ device_get_nameunit(adapter), dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/*
+** Write a register to the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+ device_t adapter = dvo->i2c_bus;
+ uint8_t out_buf[2];
+
+ struct iic_msg msg = {
+ .slave = dvo->slave_addr << 1,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ if (-iicbus_transfer(adapter, &msg, 1) == 0) {
+ return true;
+ }
+
+ if (!ns->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
+ addr, device_get_nameunit(adapter), dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/* National Semiconductor 2501 driver for chip on i2c bus
+ * scan for the chip on the bus.
+ * Hope the VBIOS initialized the PLL correctly so we can
+ * talk to it. If not, it will not be seen and not detected.
+ * Bummer!
+ */
+static bool ns2501_init(struct intel_dvo_device *dvo,
+ device_t adapter)
+{
+ /* this will detect the NS2501 chip on the specified i2c bus */
+ struct ns2501_priv *ns;
+ unsigned char ch;
+
+ ns = malloc(sizeof(struct ns2501_priv), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
+ if (ns == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = ns;
+ ns->quiet = true;
+
+ if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
+ goto out;
+
+ if (ch != (NS2501_VID & 0xff)) {
+ DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+ ch, device_get_nameunit(adapter), dvo->slave_addr);
+ goto out;
+ }
+
+ if (!ns2501_readb(dvo, NS2501_DID_LO, &ch))
+ goto out;
+
+ if (ch != (NS2501_DID & 0xff)) {
+ DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+ ch, device_get_nameunit(adapter), dvo->slave_addr);
+ goto out;
+ }
+ ns->quiet = false;
+ ns->reg_8_set = 0;
+ ns->reg_8_shadow =
+ NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
+
+ DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
+ return true;
+
+out:
+ free(ns, DRM_MEM_KMS);
+ return false;
+}
+
+static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
+{
+ /*
+ * This is a Laptop display, it doesn't have hotplugging.
+ * Even if not, the detection bit of the 2501 is unreliable as
+ * it only works for some display types.
+ * It is even more unreliable as the PLL must be active for
+ * allowing reading from the chiop.
+ */
+ return connector_status_connected;
+}
+
+static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ DRM_DEBUG_KMS
+ ("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
+ __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
+ mode->vtotal);
+
+ /*
+ * Currently, these are all the modes I have data from.
+ * More might exist. Unclear how to find the native resolution
+ * of the panel in here so we could always accept it
+ * by disabling the scaler.
+ */
+ if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
+ (mode->hdisplay == 640 && mode->vdisplay == 480) ||
+ (mode->hdisplay == 1024 && mode->vdisplay == 768)) {
+ return MODE_OK;
+ } else {
+ return MODE_ONE_SIZE; /* Is this a reasonable error? */
+ }
+}
+
+static void ns2501_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ bool ok;
+ bool restore = false;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+ DRM_DEBUG_KMS
+ ("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
+ __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
+ mode->vtotal);
+
+ /*
+ * Where do I find the native resolution for which scaling is not required???
+ *
+ * First trigger the DVO on as otherwise the chip does not appear on the i2c
+ * bus.
+ */
+ do {
+ ok = true;
+
+ if (mode->hdisplay == 800 && mode->vdisplay == 600) {
+ /* mode 277 */
+ ns->reg_8_shadow &= ~NS2501_8_BPAS;
+ DRM_DEBUG_KMS("%s: switching to 800x600\n",
+ __FUNCTION__);
+
+ /*
+ * No, I do not know where this data comes from.
+ * It is just what the video bios left in the DVO, so
+ * I'm just copying it here over.
+ * This also means that I cannot support any other modes
+ * except the ones supported by the bios.
+ */
+ ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works.
+ ok &= ns2501_writeb(dvo, 0x1b, 0x19);
+ ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer
+ ok &= ns2501_writeb(dvo, 0x1d, 0x02);
+
+ ok &= ns2501_writeb(dvo, 0x34, 0x03);
+ ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+ ok &= ns2501_writeb(dvo, 0x80, 0x27);
+ ok &= ns2501_writeb(dvo, 0x81, 0x03);
+ ok &= ns2501_writeb(dvo, 0x82, 0x41);
+ ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+ ok &= ns2501_writeb(dvo, 0x8e, 0x04);
+ ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */
+ ok &= ns2501_writeb(dvo, 0x91, 0x07);
+ ok &= ns2501_writeb(dvo, 0x94, 0x00);
+ ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x96, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x99, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+ ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */
+ ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+ ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0xa4, 0x80);
+
+ ok &= ns2501_writeb(dvo, 0xb6, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
+
+ ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
+
+ ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
+
+ ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+ ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
+
+ ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc7, 0x73);
+ ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+ } else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
+ /* mode 274 */
+ DRM_DEBUG_KMS("%s: switching to 640x480\n",
+ __FUNCTION__);
+ /*
+ * No, I do not know where this data comes from.
+ * It is just what the video bios left in the DVO, so
+ * I'm just copying it here over.
+ * This also means that I cannot support any other modes
+ * except the ones supported by the bios.
+ */
+ ns->reg_8_shadow &= ~NS2501_8_BPAS;
+
+ ok &= ns2501_writeb(dvo, 0x11, 0xa0);
+ ok &= ns2501_writeb(dvo, 0x1b, 0x11);
+ ok &= ns2501_writeb(dvo, 0x1c, 0x54);
+ ok &= ns2501_writeb(dvo, 0x1d, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0x34, 0x03);
+ ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+ ok &= ns2501_writeb(dvo, 0x80, 0xff);
+ ok &= ns2501_writeb(dvo, 0x81, 0x07);
+ ok &= ns2501_writeb(dvo, 0x82, 0x3d);
+ ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+ ok &= ns2501_writeb(dvo, 0x8e, 0x10);
+ ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */
+ ok &= ns2501_writeb(dvo, 0x91, 0x07);
+ ok &= ns2501_writeb(dvo, 0x94, 0x00);
+ ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x96, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x99, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+ ok &= ns2501_writeb(dvo, 0x9c, 0x24);
+ ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+ ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0xa4, 0x84);
+
+ ok &= ns2501_writeb(dvo, 0xb6, 0x09);
+
+ ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
+
+ ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xc1, 0x90);
+
+ ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
+
+ ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+ ok &= ns2501_writeb(dvo, 0xc5, 0x16);
+
+ ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc7, 0x02);
+ ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+ } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
+ /* mode 280 */
+ DRM_DEBUG_KMS("%s: switching to 1024x768\n",
+ __FUNCTION__);
+ /*
+ * This might or might not work, actually. I'm silently
+ * assuming here that the native panel resolution is
+ * 1024x768. If not, then this leaves the scaler disabled
+ * generating a picture that is likely not the expected.
+ *
+ * Problem is that I do not know where to take the panel
+ * dimensions from.
+ *
+ * Enable the bypass, scaling not required.
+ *
+ * The scaler registers are irrelevant here....
+ *
+ */
+ ns->reg_8_shadow |= NS2501_8_BPAS;
+ ok &= ns2501_writeb(dvo, 0x37, 0x44);
+ } else {
+ /*
+ * Data not known. Bummer!
+ * Hopefully, the code should not go here
+ * as mode_OK delivered no other modes.
+ */
+ ns->reg_8_shadow |= NS2501_8_BPAS;
+ }
+ ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
+
+ if (!ok) {
+ if (restore)
+ restore_dvo(dvo);
+ enable_dvo(dvo);
+ restore = true;
+ }
+ } while (!ok);
+ /*
+ * Restore the old i915 registers before
+ * forcing the ns2501 on.
+ */
+ if (restore)
+ restore_dvo(dvo);
+}
+
+/* set the NS2501 power state */
+static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
+{
+ unsigned char ch;
+
+ if (!ns2501_readb(dvo, NS2501_REG8, &ch))
+ return false;
+
+ if (ch & NS2501_8_PD)
+ return true;
+ else
+ return false;
+}
+
+/* set the NS2501 power state */
+static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ bool ok;
+ bool restore = false;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+ unsigned char ch;
+
+ DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n",
+ __FUNCTION__, enable);
+
+ ch = ns->reg_8_shadow;
+
+ if (enable)
+ ch |= NS2501_8_PD;
+ else
+ ch &= ~NS2501_8_PD;
+
+ if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
+ ns->reg_8_set = 1;
+ ns->reg_8_shadow = ch;
+
+ do {
+ ok = true;
+ ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
+ ok &=
+ ns2501_writeb(dvo, 0x34,
+ enable ? 0x03 : 0x00);
+ ok &=
+ ns2501_writeb(dvo, 0x35,
+ enable ? 0xff : 0x00);
+ if (!ok) {
+ if (restore)
+ restore_dvo(dvo);
+ enable_dvo(dvo);
+ restore = true;
+ }
+ } while (!ok);
+
+ if (restore)
+ restore_dvo(dvo);
+ }
+}
+
+static void ns2501_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+ ns2501_readb(dvo, NS2501_FREQ_LO, &val);
+ DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_FREQ_HI, &val);
+ DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REG8, &val);
+ DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REG9, &val);
+ DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REGC, &val);
+ DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val);
+}
+
+static void ns2501_destroy(struct intel_dvo_device *dvo)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+
+ if (ns) {
+ free(ns, DRM_MEM_KMS);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops ns2501_ops = {
+ .init = ns2501_init,
+ .detect = ns2501_detect,
+ .mode_valid = ns2501_mode_valid,
+ .mode_set = ns2501_mode_set,
+ .dpms = ns2501_dpms,
+ .get_hw_state = ns2501_get_hw_state,
+ .dump_regs = ns2501_dump_regs,
+ .destroy = ns2501_destroy,
+};
diff --git a/sys/dev/drm2/i915/dvo_sil164.c b/sys/dev/drm2/i915/dvo_sil164.c
new file mode 100644
index 0000000..1976313
--- /dev/null
+++ b/sys/dev/drm2/i915/dvo_sil164.c
@@ -0,0 +1,282 @@
+/**************************************************************************
+
+Copyright © 2006 Dave Airlie
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "dvo.h"
+
+#define SIL164_VID 0x0001
+#define SIL164_DID 0x0006
+
+#define SIL164_VID_LO 0x00
+#define SIL164_VID_HI 0x01
+#define SIL164_DID_LO 0x02
+#define SIL164_DID_HI 0x03
+#define SIL164_REV 0x04
+#define SIL164_RSVD 0x05
+#define SIL164_FREQ_LO 0x06
+#define SIL164_FREQ_HI 0x07
+
+#define SIL164_REG8 0x08
+#define SIL164_8_VEN (1<<5)
+#define SIL164_8_HEN (1<<4)
+#define SIL164_8_DSEL (1<<3)
+#define SIL164_8_BSEL (1<<2)
+#define SIL164_8_EDGE (1<<1)
+#define SIL164_8_PD (1<<0)
+
+#define SIL164_REG9 0x09
+#define SIL164_9_VLOW (1<<7)
+#define SIL164_9_MSEL_MASK (0x7<<4)
+#define SIL164_9_TSEL (1<<3)
+#define SIL164_9_RSEN (1<<2)
+#define SIL164_9_HTPLG (1<<1)
+#define SIL164_9_MDI (1<<0)
+
+#define SIL164_REGC 0x0c
+
+struct sil164_priv {
+ //I2CDevRec d;
+ bool quiet;
+};
+
+#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
+
+static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+ struct sil164_priv *sil = dvo->dev_priv;
+ device_t adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+ struct iic_msg msgs[] = {
+ {
+ .slave = dvo->slave_addr << 1,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .slave = dvo->slave_addr << 1,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (-iicbus_transfer(adapter, msgs, 2) == 0) {
+ *ch = in_buf[0];
+ return true;
+ }
+
+ if (!sil->quiet) {
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+ addr, device_get_nameunit(adapter), dvo->slave_addr);
+ }
+ return false;
+}
+
+static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+ struct sil164_priv *sil = dvo->dev_priv;
+ device_t adapter = dvo->i2c_bus;
+ uint8_t out_buf[2];
+ struct iic_msg msg = {
+ .slave = dvo->slave_addr << 1,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ if (-iicbus_transfer(adapter, &msg, 1) == 0)
+ return true;
+
+ if (!sil->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+ addr, device_get_nameunit(adapter), dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/* Silicon Image 164 driver for chip on i2c bus */
+static bool sil164_init(struct intel_dvo_device *dvo,
+ device_t adapter)
+{
+ /* this will detect the SIL164 chip on the specified i2c bus */
+ struct sil164_priv *sil;
+ unsigned char ch;
+
+ sil = malloc(sizeof(struct sil164_priv), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
+ if (sil == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = sil;
+ sil->quiet = true;
+
+ if (!sil164_readb(dvo, SIL164_VID_LO, &ch))
+ goto out;
+
+ if (ch != (SIL164_VID & 0xff)) {
+ DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
+ ch, device_get_nameunit(adapter), dvo->slave_addr);
+ goto out;
+ }
+
+ if (!sil164_readb(dvo, SIL164_DID_LO, &ch))
+ goto out;
+
+ if (ch != (SIL164_DID & 0xff)) {
+ DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
+ ch, device_get_nameunit(adapter), dvo->slave_addr);
+ goto out;
+ }
+ sil->quiet = false;
+
+ DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n");
+ return true;
+
+out:
+ free(sil, DRM_MEM_KMS);
+ return false;
+}
+
+static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo)
+{
+ uint8_t reg9;
+
+ sil164_readb(dvo, SIL164_REG9, &reg9);
+
+ if (reg9 & SIL164_9_HTPLG)
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static void sil164_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* As long as the basics are set up, since we don't have clock
+ * dependencies in the mode setup, we can just leave the
+ * registers alone and everything will work fine.
+ */
+ /* recommended programming sequence from doc */
+ /*sil164_writeb(sil, 0x08, 0x30);
+ sil164_writeb(sil, 0x09, 0x00);
+ sil164_writeb(sil, 0x0a, 0x90);
+ sil164_writeb(sil, 0x0c, 0x89);
+ sil164_writeb(sil, 0x08, 0x31);*/
+ /* don't do much */
+ return;
+}
+
+/* set the SIL164 power state */
+static void sil164_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ int ret;
+ unsigned char ch;
+
+ ret = sil164_readb(dvo, SIL164_REG8, &ch);
+ if (ret == false)
+ return;
+
+ if (enable)
+ ch |= SIL164_8_PD;
+ else
+ ch &= ~SIL164_8_PD;
+
+ sil164_writeb(dvo, SIL164_REG8, ch);
+ return;
+}
+
+static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
+{
+ int ret;
+ unsigned char ch;
+
+ ret = sil164_readb(dvo, SIL164_REG8, &ch);
+ if (ret == false)
+ return false;
+
+ if (ch & SIL164_8_PD)
+ return true;
+ else
+ return false;
+}
+
+static void sil164_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+ sil164_readb(dvo, SIL164_FREQ_LO, &val);
+ DRM_LOG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
+ sil164_readb(dvo, SIL164_FREQ_HI, &val);
+ DRM_LOG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
+ sil164_readb(dvo, SIL164_REG8, &val);
+ DRM_LOG_KMS("SIL164_REG8: 0x%02x\n", val);
+ sil164_readb(dvo, SIL164_REG9, &val);
+ DRM_LOG_KMS("SIL164_REG9: 0x%02x\n", val);
+ sil164_readb(dvo, SIL164_REGC, &val);
+ DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
+}
+
+static void sil164_destroy(struct intel_dvo_device *dvo)
+{
+ struct sil164_priv *sil = dvo->dev_priv;
+
+ if (sil) {
+ free(sil, DRM_MEM_KMS);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops sil164_ops = {
+ .init = sil164_init,
+ .detect = sil164_detect,
+ .mode_valid = sil164_mode_valid,
+ .mode_set = sil164_mode_set,
+ .dpms = sil164_dpms,
+ .get_hw_state = sil164_get_hw_state,
+ .dump_regs = sil164_dump_regs,
+ .destroy = sil164_destroy,
+};
diff --git a/sys/dev/drm2/i915/dvo_tfp410.c b/sys/dev/drm2/i915/dvo_tfp410.c
new file mode 100644
index 0000000..1bd1bff
--- /dev/null
+++ b/sys/dev/drm2/i915/dvo_tfp410.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright © 2007 Dave Mueller
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Mueller <dave.mueller@gmx.ch>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "dvo.h"
+
+/* register definitions according to the TFP410 data sheet */
+#define TFP410_VID 0x014C
+#define TFP410_DID 0x0410
+
+#define TFP410_VID_LO 0x00
+#define TFP410_VID_HI 0x01
+#define TFP410_DID_LO 0x02
+#define TFP410_DID_HI 0x03
+#define TFP410_REV 0x04
+
+#define TFP410_CTL_1 0x08
+#define TFP410_CTL_1_TDIS (1<<6)
+#define TFP410_CTL_1_VEN (1<<5)
+#define TFP410_CTL_1_HEN (1<<4)
+#define TFP410_CTL_1_DSEL (1<<3)
+#define TFP410_CTL_1_BSEL (1<<2)
+#define TFP410_CTL_1_EDGE (1<<1)
+#define TFP410_CTL_1_PD (1<<0)
+
+#define TFP410_CTL_2 0x09
+#define TFP410_CTL_2_VLOW (1<<7)
+#define TFP410_CTL_2_MSEL_MASK (0x7<<4)
+#define TFP410_CTL_2_MSEL (1<<4)
+#define TFP410_CTL_2_TSEL (1<<3)
+#define TFP410_CTL_2_RSEN (1<<2)
+#define TFP410_CTL_2_HTPLG (1<<1)
+#define TFP410_CTL_2_MDI (1<<0)
+
+#define TFP410_CTL_3 0x0A
+#define TFP410_CTL_3_DK_MASK (0x7<<5)
+#define TFP410_CTL_3_DK (1<<5)
+#define TFP410_CTL_3_DKEN (1<<4)
+#define TFP410_CTL_3_CTL_MASK (0x7<<1)
+#define TFP410_CTL_3_CTL (1<<1)
+
+#define TFP410_USERCFG 0x0B
+
+#define TFP410_DE_DLY 0x32
+
+#define TFP410_DE_CTL 0x33
+#define TFP410_DE_CTL_DEGEN (1<<6)
+#define TFP410_DE_CTL_VSPOL (1<<5)
+#define TFP410_DE_CTL_HSPOL (1<<4)
+#define TFP410_DE_CTL_DEDLY8 (1<<0)
+
+#define TFP410_DE_TOP 0x34
+
+#define TFP410_DE_CNT_LO 0x36
+#define TFP410_DE_CNT_HI 0x37
+
+#define TFP410_DE_LIN_LO 0x38
+#define TFP410_DE_LIN_HI 0x39
+
+#define TFP410_H_RES_LO 0x3A
+#define TFP410_H_RES_HI 0x3B
+
+#define TFP410_V_RES_LO 0x3C
+#define TFP410_V_RES_HI 0x3D
+
+struct tfp410_priv {
+ bool quiet;
+};
+
+static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+ struct tfp410_priv *tfp = dvo->dev_priv;
+ device_t adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+ struct iic_msg msgs[] = {
+ {
+ .slave = dvo->slave_addr << 1,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .slave = dvo->slave_addr << 1,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (-iicbus_transfer(adapter, msgs, 2) == 0) {
+ *ch = in_buf[0];
+ return true;
+ }
+
+ if (!tfp->quiet) {
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+ addr, device_get_nameunit(adapter), dvo->slave_addr);
+ }
+ return false;
+}
+
+static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+ struct tfp410_priv *tfp = dvo->dev_priv;
+ device_t adapter = dvo->i2c_bus;
+ uint8_t out_buf[2];
+ struct iic_msg msg = {
+ .slave = dvo->slave_addr << 1,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ if (-iicbus_transfer(adapter, &msg, 1) == 0)
+ return true;
+
+ if (!tfp->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+ addr, device_get_nameunit(adapter), dvo->slave_addr);
+ }
+
+ return false;
+}
+
+static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
+{
+ uint8_t ch1, ch2;
+
+ if (tfp410_readb(dvo, addr+0, &ch1) &&
+ tfp410_readb(dvo, addr+1, &ch2))
+ return ((ch2 << 8) & 0xFF00) | (ch1 & 0x00FF);
+
+ return -1;
+}
+
+/* Ti TFP410 driver for chip on i2c bus */
+static bool tfp410_init(struct intel_dvo_device *dvo,
+ device_t adapter)
+{
+ /* this will detect the tfp410 chip on the specified i2c bus */
+ struct tfp410_priv *tfp;
+ int id;
+
+ tfp = malloc(sizeof(struct tfp410_priv), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
+ if (tfp == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = tfp;
+ tfp->quiet = true;
+
+ if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
+ DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s "
+ "Slave %d.\n",
+ id, device_get_nameunit(adapter), dvo->slave_addr);
+ goto out;
+ }
+
+ if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
+ DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s "
+ "Slave %d.\n",
+ id, device_get_nameunit(adapter), dvo->slave_addr);
+ goto out;
+ }
+ tfp->quiet = false;
+ return true;
+out:
+ free(tfp, DRM_MEM_KMS);
+ return false;
+}
+
+static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
+{
+ enum drm_connector_status ret = connector_status_disconnected;
+ uint8_t ctl2;
+
+ if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
+ if (ctl2 & TFP410_CTL_2_RSEN)
+ ret = connector_status_connected;
+ else
+ ret = connector_status_disconnected;
+ }
+
+ return ret;
+}
+
+static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static void tfp410_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* As long as the basics are set up, since we don't have clock dependencies
+ * in the mode setup, we can just leave the registers alone and everything
+ * will work fine.
+ */
+ /* don't do much */
+ return;
+}
+
+/* set the tfp410 power state */
+static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ uint8_t ctl1;
+
+ if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
+ return;
+
+ if (enable)
+ ctl1 |= TFP410_CTL_1_PD;
+ else
+ ctl1 &= ~TFP410_CTL_1_PD;
+
+ tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
+}
+
+static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint8_t ctl1;
+
+ if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
+ return false;
+
+ if (ctl1 & TFP410_CTL_1_PD)
+ return true;
+ else
+ return false;
+}
+
+static void tfp410_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint8_t val, val2;
+
+ tfp410_readb(dvo, TFP410_REV, &val);
+ DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_CTL_1, &val);
+ DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_CTL_2, &val);
+ DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_CTL_3, &val);
+ DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_USERCFG, &val);
+ DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_DE_DLY, &val);
+ DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_DE_CTL, &val);
+ DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_DE_TOP, &val);
+ DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
+ tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
+ DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
+ tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
+ tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
+ DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
+ tfp410_readb(dvo, TFP410_H_RES_LO, &val);
+ tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
+ DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
+ tfp410_readb(dvo, TFP410_V_RES_LO, &val);
+ tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
+ DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
+}
+
+static void tfp410_destroy(struct intel_dvo_device *dvo)
+{
+ struct tfp410_priv *tfp = dvo->dev_priv;
+
+ if (tfp) {
+ free(tfp, DRM_MEM_KMS);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops tfp410_ops = {
+ .init = tfp410_init,
+ .detect = tfp410_detect,
+ .mode_valid = tfp410_mode_valid,
+ .mode_set = tfp410_mode_set,
+ .dpms = tfp410_dpms,
+ .get_hw_state = tfp410_get_hw_state,
+ .dump_regs = tfp410_dump_regs,
+ .destroy = tfp410_destroy,
+};
diff --git a/sys/dev/drm2/i915/i915_debug.c b/sys/dev/drm2/i915/i915_debug.c
index 121daec..29693af 100644
--- a/sys/dev/drm2/i915/i915_debug.c
+++ b/sys/dev/drm2/i915/i915_debug.c
@@ -38,9 +38,12 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
+#define seq_printf(m, fmt, ...) sbuf_printf((m), (fmt), ##__VA_ARGS__)
+
+//#if defined(CONFIG_DEBUG_FS)
+
enum {
ACTIVE_LIST,
- FLUSHING_LIST,
INACTIVE_LIST,
PINNED_LIST,
};
@@ -54,29 +57,13 @@ static int i915_capabilities(struct drm_device *dev, struct sbuf *m, void *data)
{
const struct intel_device_info *info = INTEL_INFO(dev);
- sbuf_printf(m, "gen: %d\n", info->gen);
- if (HAS_PCH_SPLIT(dev))
- sbuf_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
-#define B(x) sbuf_printf(m, #x ": %s\n", yesno(info->x))
- B(is_mobile);
- B(is_i85x);
- B(is_i915g);
- B(is_i945gm);
- B(is_g33);
- B(need_gfx_hws);
- B(is_g4x);
- B(is_pineview);
- B(has_fbc);
- B(has_pipe_cxsr);
- B(has_hotplug);
- B(cursor_needs_physical);
- B(has_overlay);
- B(overlay_needs_physical);
- B(supports_tv);
- B(has_bsd_ring);
- B(has_blt_ring);
- B(has_llc);
-#undef B
+ seq_printf(m, "gen: %d\n", info->gen);
+ seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
+#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
+#define DEV_INFO_SEP ;
+ DEV_INFO_FLAGS;
+#undef DEV_INFO_FLAG
+#undef DEV_INFO_SEP
return 0;
}
@@ -114,27 +101,29 @@ static const char *cache_level_str(int type)
static void
describe_obj(struct sbuf *m, struct drm_i915_gem_object *obj)
{
-
- sbuf_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
+ seq_printf(m, "%pK: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain,
- obj->last_rendering_seqno,
+ obj->last_read_seqno,
+ obj->last_write_seqno,
obj->last_fenced_seqno,
cache_level_str(obj->cache_level),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
- sbuf_printf(m, " (name: %d)", obj->base.name);
+ seq_printf(m, " (name: %d)", obj->base.name);
+ if (obj->pin_count)
+ seq_printf(m, " (pinned x %d)", obj->pin_count);
if (obj->pin_display)
- sbuf_printf(m, " (display)");
+ seq_printf(m, " (display)");
if (obj->fence_reg != I915_FENCE_REG_NONE)
- sbuf_printf(m, " (fence: %d)", obj->fence_reg);
+ seq_printf(m, " (fence: %d)", obj->fence_reg);
if (obj->gtt_space != NULL)
- sbuf_printf(m, " (gtt offset: %08x, size: %08x)",
+ seq_printf(m, " (gtt offset: %08x, size: %08x)",
obj->gtt_offset, (unsigned int)obj->gtt_space->size);
if (obj->pin_mappable || obj->fault_mappable) {
char s[3], *t = s;
@@ -143,10 +132,10 @@ describe_obj(struct sbuf *m, struct drm_i915_gem_object *obj)
if (obj->fault_mappable)
*t++ = 'f';
*t = '\0';
- sbuf_printf(m, " (%s mappable)", s);
+ seq_printf(m, " (%s mappable)", s);
}
if (obj->ring != NULL)
- sbuf_printf(m, " (%s)", obj->ring->name);
+ seq_printf(m, " (%s)", obj->ring->name);
}
static int i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data)
@@ -163,17 +152,13 @@ static int i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, voi
switch (list) {
case ACTIVE_LIST:
- sbuf_printf(m, "Active:\n");
+ seq_printf(m, "Active:\n");
head = &dev_priv->mm.active_list;
break;
case INACTIVE_LIST:
- sbuf_printf(m, "Inactive:\n");
+ seq_printf(m, "Inactive:\n");
head = &dev_priv->mm.inactive_list;
break;
- case FLUSHING_LIST:
- sbuf_printf(m, "Flushing:\n");
- head = &dev_priv->mm.flushing_list;
- break;
default:
DRM_UNLOCK(dev);
return -EINVAL;
@@ -181,16 +166,16 @@ static int i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, voi
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, head, mm_list) {
- sbuf_printf(m, " ");
+ seq_printf(m, " ");
describe_obj(m, obj);
- sbuf_printf(m, "\n");
+ seq_printf(m, "\n");
total_obj_size += obj->base.size;
total_gtt_size += obj->gtt_space->size;
count++;
}
DRM_UNLOCK(dev);
- sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
count, total_obj_size, total_gtt_size);
return 0;
}
@@ -209,34 +194,42 @@ static int i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, voi
static int i915_gem_object_info(struct drm_device *dev, struct sbuf *m, void *data)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 count, mappable_count;
- size_t size, mappable_size;
+ u32 count, mappable_count, purgeable_count;
+ size_t size, mappable_size, purgeable_size;
struct drm_i915_gem_object *obj;
if (sx_xlock_sig(&dev->dev_struct_lock))
return -EINTR;
- sbuf_printf(m, "%u objects, %zu bytes\n",
+
+ seq_printf(m, "%u objects, %zu bytes\n",
dev_priv->mm.object_count,
dev_priv->mm.object_memory);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.gtt_list, gtt_list);
- sbuf_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
+ count_objects(&dev_priv->mm.bound_list, gtt_list);
+ seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_objects(&dev_priv->mm.active_list, mm_list);
- count_objects(&dev_priv->mm.flushing_list, mm_list);
- sbuf_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
+ seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_objects(&dev_priv->mm.inactive_list, mm_list);
- sbuf_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
+ seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
+ size = count = purgeable_size = purgeable_count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) {
+ size += obj->base.size, ++count;
+ if (obj->madv == I915_MADV_DONTNEED)
+ purgeable_size += obj->base.size, ++purgeable_count;
+ }
+ seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
+
size = count = mappable_size = mappable_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
if (obj->fault_mappable) {
size += obj->gtt_space->size;
++count;
@@ -245,13 +238,19 @@ static int i915_gem_object_info(struct drm_device *dev, struct sbuf *m, void *da
mappable_size += obj->gtt_space->size;
++mappable_count;
}
+ if (obj->madv == I915_MADV_DONTNEED) {
+ purgeable_size += obj->base.size;
+ ++purgeable_count;
+ }
}
- sbuf_printf(m, "%u pinned mappable objects, %zu bytes\n",
+ seq_printf(m, "%u purgeable objects, %zu bytes\n",
+ purgeable_count, purgeable_size);
+ seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
mappable_count, mappable_size);
- sbuf_printf(m, "%u fault mappable objects, %zu bytes\n",
+ seq_printf(m, "%u fault mappable objects, %zu bytes\n",
count, size);
- sbuf_printf(m, "%zu [%zu] gtt total\n",
+ seq_printf(m, "%zu [%zu] gtt total\n",
dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
DRM_UNLOCK(dev);
@@ -271,13 +270,13 @@ static int i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void *data)
return -EINTR;
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
if (list == PINNED_LIST && obj->pin_count == 0)
continue;
- sbuf_printf(m, " ");
+ seq_printf(m, " ");
describe_obj(m, obj);
- sbuf_printf(m, "\n");
+ seq_printf(m, "\n");
total_obj_size += obj->base.size;
total_gtt_size += obj->gtt_space->size;
count++;
@@ -285,7 +284,7 @@ static int i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void *data)
DRM_UNLOCK(dev);
- sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
count, total_obj_size, total_gtt_size);
return 0;
@@ -303,31 +302,31 @@ static int i915_gem_pageflip_info(struct drm_device *dev, struct sbuf *m, void *
mtx_lock(&dev->event_lock);
work = crtc->unpin_work;
if (work == NULL) {
- sbuf_printf(m, "No flip due on pipe %c (plane %c)\n",
+ seq_printf(m, "No flip due on pipe %c (plane %c)\n",
pipe, plane);
} else {
- if (!work->pending) {
- sbuf_printf(m, "Flip queued on pipe %c (plane %c)\n",
+ if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
+ seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
pipe, plane);
} else {
- sbuf_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
+ seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
pipe, plane);
}
if (work->enable_stall_check)
- sbuf_printf(m, "Stall check enabled, ");
+ seq_printf(m, "Stall check enabled, ");
else
- sbuf_printf(m, "Stall check waiting for page flip ioctl, ");
- sbuf_printf(m, "%d prepares\n", work->pending);
+ seq_printf(m, "Stall check waiting for page flip ioctl, ");
+ seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
if (work->old_fb_obj) {
struct drm_i915_gem_object *obj = work->old_fb_obj;
if (obj)
- sbuf_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+ seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
}
if (work->pending_flip_obj) {
struct drm_i915_gem_object *obj = work->pending_flip_obj;
if (obj)
- sbuf_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+ seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
}
}
mtx_unlock(&dev->event_lock);
@@ -339,41 +338,23 @@ static int i915_gem_pageflip_info(struct drm_device *dev, struct sbuf *m, void *
static int i915_gem_request_info(struct drm_device *dev, struct sbuf *m, void *data)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
struct drm_i915_gem_request *gem_request;
- int count;
+ int count, i;
if (sx_xlock_sig(&dev->dev_struct_lock))
return -EINTR;
count = 0;
- if (!list_empty(&dev_priv->rings[RCS].request_list)) {
- sbuf_printf(m, "Render requests:\n");
- list_for_each_entry(gem_request,
- &dev_priv->rings[RCS].request_list,
- list) {
- sbuf_printf(m, " %d @ %d\n",
- gem_request->seqno,
- (int) (jiffies - gem_request->emitted_jiffies));
- }
- count++;
- }
- if (!list_empty(&dev_priv->rings[VCS].request_list)) {
- sbuf_printf(m, "BSD requests:\n");
- list_for_each_entry(gem_request,
- &dev_priv->rings[VCS].request_list,
- list) {
- sbuf_printf(m, " %d @ %d\n",
- gem_request->seqno,
- (int) (jiffies - gem_request->emitted_jiffies));
- }
- count++;
- }
- if (!list_empty(&dev_priv->rings[BCS].request_list)) {
- sbuf_printf(m, "BLT requests:\n");
+ for_each_ring(ring, dev_priv, i) {
+ if (list_empty(&ring->request_list))
+ continue;
+
+ seq_printf(m, "%s requests:\n", ring->name);
list_for_each_entry(gem_request,
- &dev_priv->rings[BCS].request_list,
+ &ring->request_list,
list) {
- sbuf_printf(m, " %d @ %d\n",
+ seq_printf(m, " %d @ %d\n",
gem_request->seqno,
(int) (jiffies - gem_request->emitted_jiffies));
}
@@ -382,7 +363,7 @@ static int i915_gem_request_info(struct drm_device *dev, struct sbuf *m, void *d
DRM_UNLOCK(dev);
if (count == 0)
- sbuf_printf(m, "No requests\n");
+ seq_printf(m, "No requests\n");
return 0;
}
@@ -391,21 +372,22 @@ static void i915_ring_seqno_info(struct sbuf *m,
struct intel_ring_buffer *ring)
{
if (ring->get_seqno) {
- sbuf_printf(m, "Current sequence (%s): %d\n",
- ring->name, ring->get_seqno(ring));
+ seq_printf(m, "Current sequence (%s): %d\n",
+ ring->name, ring->get_seqno(ring, false));
}
}
static int i915_gem_seqno_info(struct drm_device *dev, struct sbuf *m, void *data)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int i;
if (sx_xlock_sig(&dev->dev_struct_lock))
return -EINTR;
- for (i = 0; i < I915_NUM_RINGS; i++)
- i915_ring_seqno_info(m, &dev_priv->rings[i]);
+ for_each_ring(ring, dev_priv, i)
+ i915_ring_seqno_info(m, ring);
DRM_UNLOCK(dev);
@@ -416,89 +398,90 @@ static int i915_gem_seqno_info(struct drm_device *dev, struct sbuf *m, void *dat
static int i915_interrupt_info(struct drm_device *dev, struct sbuf *m, void *data)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int i, pipe;
if (sx_xlock_sig(&dev->dev_struct_lock))
return -EINTR;
if (IS_VALLEYVIEW(dev)) {
- sbuf_printf(m, "Display IER:\t%08x\n",
+ seq_printf(m, "Display IER:\t%08x\n",
I915_READ(VLV_IER));
- sbuf_printf(m, "Display IIR:\t%08x\n",
+ seq_printf(m, "Display IIR:\t%08x\n",
I915_READ(VLV_IIR));
- sbuf_printf(m, "Display IIR_RW:\t%08x\n",
+ seq_printf(m, "Display IIR_RW:\t%08x\n",
I915_READ(VLV_IIR_RW));
- sbuf_printf(m, "Display IMR:\t%08x\n",
+ seq_printf(m, "Display IMR:\t%08x\n",
I915_READ(VLV_IMR));
for_each_pipe(pipe)
- sbuf_printf(m, "Pipe %c stat:\t%08x\n",
+ seq_printf(m, "Pipe %c stat:\t%08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
- sbuf_printf(m, "Master IER:\t%08x\n",
+ seq_printf(m, "Master IER:\t%08x\n",
I915_READ(VLV_MASTER_IER));
- sbuf_printf(m, "Render IER:\t%08x\n",
+ seq_printf(m, "Render IER:\t%08x\n",
I915_READ(GTIER));
- sbuf_printf(m, "Render IIR:\t%08x\n",
+ seq_printf(m, "Render IIR:\t%08x\n",
I915_READ(GTIIR));
- sbuf_printf(m, "Render IMR:\t%08x\n",
+ seq_printf(m, "Render IMR:\t%08x\n",
I915_READ(GTIMR));
- sbuf_printf(m, "PM IER:\t\t%08x\n",
+ seq_printf(m, "PM IER:\t\t%08x\n",
I915_READ(GEN6_PMIER));
- sbuf_printf(m, "PM IIR:\t\t%08x\n",
+ seq_printf(m, "PM IIR:\t\t%08x\n",
I915_READ(GEN6_PMIIR));
- sbuf_printf(m, "PM IMR:\t\t%08x\n",
+ seq_printf(m, "PM IMR:\t\t%08x\n",
I915_READ(GEN6_PMIMR));
- sbuf_printf(m, "Port hotplug:\t%08x\n",
+ seq_printf(m, "Port hotplug:\t%08x\n",
I915_READ(PORT_HOTPLUG_EN));
- sbuf_printf(m, "DPFLIPSTAT:\t%08x\n",
+ seq_printf(m, "DPFLIPSTAT:\t%08x\n",
I915_READ(VLV_DPFLIPSTAT));
- sbuf_printf(m, "DPINVGTT:\t%08x\n",
+ seq_printf(m, "DPINVGTT:\t%08x\n",
I915_READ(DPINVGTT));
} else if (!HAS_PCH_SPLIT(dev)) {
- sbuf_printf(m, "Interrupt enable: %08x\n",
+ seq_printf(m, "Interrupt enable: %08x\n",
I915_READ(IER));
- sbuf_printf(m, "Interrupt identity: %08x\n",
+ seq_printf(m, "Interrupt identity: %08x\n",
I915_READ(IIR));
- sbuf_printf(m, "Interrupt mask: %08x\n",
+ seq_printf(m, "Interrupt mask: %08x\n",
I915_READ(IMR));
for_each_pipe(pipe)
- sbuf_printf(m, "Pipe %c stat: %08x\n",
+ seq_printf(m, "Pipe %c stat: %08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
} else {
- sbuf_printf(m, "North Display Interrupt enable: %08x\n",
+ seq_printf(m, "North Display Interrupt enable: %08x\n",
I915_READ(DEIER));
- sbuf_printf(m, "North Display Interrupt identity: %08x\n",
+ seq_printf(m, "North Display Interrupt identity: %08x\n",
I915_READ(DEIIR));
- sbuf_printf(m, "North Display Interrupt mask: %08x\n",
+ seq_printf(m, "North Display Interrupt mask: %08x\n",
I915_READ(DEIMR));
- sbuf_printf(m, "South Display Interrupt enable: %08x\n",
+ seq_printf(m, "South Display Interrupt enable: %08x\n",
I915_READ(SDEIER));
- sbuf_printf(m, "South Display Interrupt identity: %08x\n",
+ seq_printf(m, "South Display Interrupt identity: %08x\n",
I915_READ(SDEIIR));
- sbuf_printf(m, "South Display Interrupt mask: %08x\n",
+ seq_printf(m, "South Display Interrupt mask: %08x\n",
I915_READ(SDEIMR));
- sbuf_printf(m, "Graphics Interrupt enable: %08x\n",
+ seq_printf(m, "Graphics Interrupt enable: %08x\n",
I915_READ(GTIER));
- sbuf_printf(m, "Graphics Interrupt identity: %08x\n",
+ seq_printf(m, "Graphics Interrupt identity: %08x\n",
I915_READ(GTIIR));
- sbuf_printf(m, "Graphics Interrupt mask: %08x\n",
+ seq_printf(m, "Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR));
}
- sbuf_printf(m, "Interrupts received: %d\n",
+ seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for_each_ring(ring, dev_priv, i) {
if (IS_GEN6(dev) || IS_GEN7(dev)) {
- sbuf_printf(m,
+ seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
- dev_priv->rings[i].name, I915_READ_IMR(&dev_priv->rings[i]));
+ ring->name, I915_READ_IMR(ring));
}
- i915_ring_seqno_info(m, &dev_priv->rings[i]);
+ i915_ring_seqno_info(m, ring);
}
DRM_UNLOCK(dev);
@@ -513,17 +496,18 @@ static int i915_gem_fence_regs_info(struct drm_device *dev, struct sbuf *m, void
if (sx_xlock_sig(&dev->dev_struct_lock))
return -EINTR;
- sbuf_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
- sbuf_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
+ seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
+ seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
- sbuf_printf(m, "Fenced object[%2d] = ", i);
+ seq_printf(m, "Fence %d, pin count = %d, object = ",
+ i, dev_priv->fence_regs[i].pin_count);
if (obj == NULL)
- sbuf_printf(m, "unused");
+ seq_printf(m, "unused");
else
describe_obj(m, obj);
- sbuf_printf(m, "\n");
+ seq_printf(m, "\n");
}
DRM_UNLOCK(dev);
@@ -537,13 +521,13 @@ static int i915_hws_info(struct drm_device *dev, struct sbuf *m, void *data)
const volatile u32 __iomem *hws;
int i;
- ring = &dev_priv->rings[(uintptr_t)data];
- hws = (volatile u32 *)ring->status_page.page_addr;
+ ring = &dev_priv->ring[(uintptr_t)data];
+ hws = (volatile u32 __iomem *)ring->status_page.page_addr;
if (hws == NULL)
return 0;
for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
- sbuf_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
i * 4,
hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
}
@@ -553,9 +537,9 @@ static int i915_hws_info(struct drm_device *dev, struct sbuf *m, void *data)
static const char *ring_str(int ring)
{
switch (ring) {
- case RCS: return " render";
- case VCS: return " bsd";
- case BCS: return " blt";
+ case RCS: return "render";
+ case VCS: return "bsd";
+ case BCS: return "blt";
default: return "";
}
}
@@ -596,15 +580,15 @@ static void print_error_buffers(struct sbuf *m,
int count)
{
- sbuf_printf(m, "%s [%d]:\n", name, count);
+ seq_printf(m, "%s [%d]:\n", name, count);
while (count--) {
- sbuf_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
+ seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
err->gtt_offset,
err->size,
err->read_domains,
err->write_domain,
- err->seqno,
+ err->rseqno, err->wseqno,
pin_flag(err->pinned),
tiling_flag(err->tiling),
dirty_flag(err->dirty),
@@ -614,11 +598,11 @@ static void print_error_buffers(struct sbuf *m,
cache_level_str(err->cache_level));
if (err->name)
- sbuf_printf(m, " (name: %d)", err->name);
+ seq_printf(m, " (name: %d)", err->name);
if (err->fence_reg != I915_FENCE_REG_NONE)
- sbuf_printf(m, " (fence: %d)", err->fence_reg);
+ seq_printf(m, " (fence: %d)", err->fence_reg);
- sbuf_printf(m, "\n");
+ seq_printf(m, "\n");
err++;
}
}
@@ -628,34 +612,36 @@ static void i915_ring_error_state(struct sbuf *m,
struct drm_i915_error_state *error,
unsigned ring)
{
-
MPASS((ring < I915_NUM_RINGS)); /* shut up confused gcc */
- sbuf_printf(m, "%s command stream:\n", ring_str(ring));
- sbuf_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
- sbuf_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
- sbuf_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
- sbuf_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
- sbuf_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
- sbuf_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
- if (ring == RCS && INTEL_INFO(dev)->gen >= 4) {
- sbuf_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
- sbuf_printf(m, " BBADDR: 0x%08jx\n", (uintmax_t)error->bbaddr);
- }
+ seq_printf(m, "%s command stream:\n", ring_str(ring));
+ seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
+ seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
+ seq_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
+ seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
+ seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
+ seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
+ seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
+ if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
+ seq_printf(m, " BBADDR: 0x%08jx\n", error->bbaddr);
+
if (INTEL_INFO(dev)->gen >= 4)
- sbuf_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
- sbuf_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
- sbuf_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
+ seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
+ seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
+ seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
if (INTEL_INFO(dev)->gen >= 6) {
- sbuf_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
- sbuf_printf(m, " SYNC_0: 0x%08x\n",
- error->semaphore_mboxes[ring][0]);
- sbuf_printf(m, " SYNC_1: 0x%08x\n",
- error->semaphore_mboxes[ring][1]);
+ seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
+ seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
+ seq_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
+ error->semaphore_mboxes[ring][0],
+ error->semaphore_seqno[ring][0]);
+ seq_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
+ error->semaphore_mboxes[ring][1],
+ error->semaphore_seqno[ring][1]);
}
- sbuf_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
- sbuf_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
- sbuf_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
- sbuf_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+ seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
+ seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
+ seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
+ seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
}
static int i915_error_state(struct drm_device *dev, struct sbuf *m,
@@ -671,27 +657,38 @@ static int i915_error_state(struct drm_device *dev, struct sbuf *m,
if (error != NULL)
refcount_acquire(&error->ref);
mtx_unlock(&dev_priv->error_lock);
+
if (!error) {
- sbuf_printf(m, "no error state collected\n");
+ seq_printf(m, "no error state collected\n");
return 0;
}
- sbuf_printf(m, "Time: %jd s %jd us\n", (intmax_t)error->time.tv_sec,
+ seq_printf(m, "Time: %jd s %jd us\n", (intmax_t)error->time.tv_sec,
(intmax_t)error->time.tv_usec);
- sbuf_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
- sbuf_printf(m, "EIR: 0x%08x\n", error->eir);
- sbuf_printf(m, "IER: 0x%08x\n", error->ier);
- sbuf_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+ seq_printf(m, "Kernel: %s\n", version);
+ seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
+ seq_printf(m, "EIR: 0x%08x\n", error->eir);
+ seq_printf(m, "IER: 0x%08x\n", error->ier);
+ seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+ seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
+ seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
+ seq_printf(m, "CCID: 0x%08x\n", error->ccid);
for (i = 0; i < dev_priv->num_fence_regs; i++)
- sbuf_printf(m, " fence[%d] = %08jx\n", i,
+ seq_printf(m, " fence[%d] = %08jx\n", i,
(uintmax_t)error->fence[i]);
+ for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
+ seq_printf(m, " INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]);
+
if (INTEL_INFO(dev)->gen >= 6) {
- sbuf_printf(m, "ERROR: 0x%08x\n", error->error);
- sbuf_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
+ seq_printf(m, "ERROR: 0x%08x\n", error->error);
+ seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
+ if (INTEL_INFO(dev)->gen == 7)
+ seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
+
for_each_ring(ring, dev_priv, i)
i915_ring_error_state(m, dev, error, i);
@@ -709,25 +706,24 @@ static int i915_error_state(struct drm_device *dev, struct sbuf *m,
struct drm_i915_error_object *obj;
if ((obj = error->ring[i].batchbuffer)) {
- sbuf_printf(m, "%s --- gtt_offset = 0x%08x\n",
- dev_priv->rings[i].name,
+ seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
+ dev_priv->ring[i].name,
obj->gtt_offset);
offset = 0;
for (page = 0; page < obj->page_count; page++) {
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
- sbuf_printf(m, "%08x : %08x\n",
- offset, obj->pages[page][elt]);
+ seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
offset += 4;
}
}
}
if (error->ring[i].num_requests) {
- sbuf_printf(m, "%s --- %d requests\n",
- dev_priv->rings[i].name,
+ seq_printf(m, "%s --- %d requests\n",
+ dev_priv->ring[i].name,
error->ring[i].num_requests);
for (j = 0; j < error->ring[i].num_requests; j++) {
- sbuf_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
+ seq_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
error->ring[i].requests[j].seqno,
error->ring[i].requests[j].jiffies,
error->ring[i].requests[j].tail);
@@ -735,13 +731,13 @@ static int i915_error_state(struct drm_device *dev, struct sbuf *m,
}
if ((obj = error->ring[i].ringbuffer)) {
- sbuf_printf(m, "%s --- ringbuffer = 0x%08x\n",
- dev_priv->rings[i].name,
+ seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
+ dev_priv->ring[i].name,
obj->gtt_offset);
offset = 0;
for (page = 0; page < obj->page_count; page++) {
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
- sbuf_printf(m, "%08x : %08x\n",
+ seq_printf(m, "%08x : %08x\n",
offset,
obj->pages[page][elt]);
offset += 4;
@@ -765,16 +761,13 @@ static int i915_error_state(struct drm_device *dev, struct sbuf *m,
static int
i915_error_state_write(struct drm_device *dev, const char *str, void *unused)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_error_state *error;
DRM_DEBUG_DRIVER("Resetting error state\n");
- mtx_lock(&dev_priv->error_lock);
- error = dev_priv->first_error;
- dev_priv->first_error = NULL;
- mtx_unlock(&dev_priv->error_lock);
- if (error != NULL && refcount_release(&error->ref))
- i915_error_state_free(error);
+
+ DRM_LOCK(dev);
+ i915_destroy_error_state(dev);
+ DRM_UNLOCK(dev);
+
return (0);
}
@@ -790,7 +783,7 @@ static int i915_rstdby_delays(struct drm_device *dev, struct sbuf *m, void *unus
DRM_UNLOCK(dev);
- sbuf_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
+ seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
return 0;
}
@@ -803,17 +796,17 @@ static int i915_cur_delayinfo(struct drm_device *dev, struct sbuf *m, void *unus
u16 rgvswctl = I915_READ16(MEMSWCTL);
u16 rgvstat = I915_READ16(MEMSTAT_ILK);
- sbuf_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
- sbuf_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
- sbuf_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+ seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+ seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
+ seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
MEMSTAT_VID_SHIFT);
- sbuf_printf(m, "Current P-state: %d\n",
+ seq_printf(m, "Current P-state: %d\n",
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
- } else if (IS_GEN6(dev)) {
+ } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- u32 rpstat;
+ u32 rpstat, cagf;
u32 rpupei, rpcurup, rpprevup;
u32 rpdownei, rpcurdown, rpprevdown;
int max_freq;
@@ -830,46 +823,50 @@ static int i915_cur_delayinfo(struct drm_device *dev, struct sbuf *m, void *unus
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
+ if (IS_HASWELL(dev))
+ cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+ else
+ cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+ cagf *= GT_FREQUENCY_MULTIPLIER;
gen6_gt_force_wake_put(dev_priv);
DRM_UNLOCK(dev);
- sbuf_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
- sbuf_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
- sbuf_printf(m, "Render p-state ratio: %d\n",
+ seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
+ seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
+ seq_printf(m, "Render p-state ratio: %d\n",
(gt_perf_status & 0xff00) >> 8);
- sbuf_printf(m, "Render p-state VID: %d\n",
+ seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
- sbuf_printf(m, "Render p-state limit: %d\n",
+ seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
- sbuf_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
- GEN6_CAGF_SHIFT) * 50);
- sbuf_printf(m, "RP CUR UP EI: %dus\n", rpupei &
+ seq_printf(m, "CAGF: %dMHz\n", cagf);
+ seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK);
- sbuf_printf(m, "RP CUR UP: %dus\n", rpcurup &
+ seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
GEN6_CURBSYTAVG_MASK);
- sbuf_printf(m, "RP PREV UP: %dus\n", rpprevup &
+ seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
GEN6_CURBSYTAVG_MASK);
- sbuf_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
+ seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
GEN6_CURIAVG_MASK);
- sbuf_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
+ seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
GEN6_CURBSYTAVG_MASK);
- sbuf_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
+ seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
GEN6_CURBSYTAVG_MASK);
max_freq = (rp_state_cap & 0xff0000) >> 16;
- sbuf_printf(m, "Lowest (RPN) frequency: %dMHz\n",
- max_freq * 50);
+ seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
+ max_freq * GT_FREQUENCY_MULTIPLIER);
max_freq = (rp_state_cap & 0xff00) >> 8;
- sbuf_printf(m, "Nominal (RP1) frequency: %dMHz\n",
- max_freq * 50);
+ seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
+ max_freq * GT_FREQUENCY_MULTIPLIER);
max_freq = rp_state_cap & 0xff;
- sbuf_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
- max_freq * 50);
+ seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
+ max_freq * GT_FREQUENCY_MULTIPLIER);
} else {
- sbuf_printf(m, "no P-state info available\n");
+ seq_printf(m, "no P-state info available\n");
}
return 0;
@@ -886,7 +883,7 @@ static int i915_delayfreq_table(struct drm_device *dev, struct sbuf *m, void *un
for (i = 0; i < 16; i++) {
delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
- sbuf_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
+ seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
(delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
}
@@ -911,7 +908,7 @@ static int i915_inttoext_table(struct drm_device *dev, struct sbuf *m, void *unu
for (i = 1; i <= 32; i++) {
inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
- sbuf_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
+ seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
}
DRM_UNLOCK(dev);
@@ -934,48 +931,48 @@ static int ironlake_drpc_info(struct drm_device *dev, struct sbuf *m)
DRM_UNLOCK(dev);
- sbuf_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
+ seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
"yes" : "no");
- sbuf_printf(m, "Boost freq: %d\n",
+ seq_printf(m, "Boost freq: %d\n",
(rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
MEMMODE_BOOST_FREQ_SHIFT);
- sbuf_printf(m, "HW control enabled: %s\n",
+ seq_printf(m, "HW control enabled: %s\n",
rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
- sbuf_printf(m, "SW control enabled: %s\n",
+ seq_printf(m, "SW control enabled: %s\n",
rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
- sbuf_printf(m, "Gated voltage change: %s\n",
+ seq_printf(m, "Gated voltage change: %s\n",
rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
- sbuf_printf(m, "Starting frequency: P%d\n",
+ seq_printf(m, "Starting frequency: P%d\n",
(rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
- sbuf_printf(m, "Max P-state: P%d\n",
+ seq_printf(m, "Max P-state: P%d\n",
(rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
- sbuf_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
- sbuf_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
- sbuf_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
- sbuf_printf(m, "Render standby enabled: %s\n",
+ seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
+ seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
+ seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
+ seq_printf(m, "Render standby enabled: %s\n",
(rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
- sbuf_printf(m, "Current RS state: ");
+ seq_printf(m, "Current RS state: ");
switch (rstdbyctl & RSX_STATUS_MASK) {
case RSX_STATUS_ON:
- sbuf_printf(m, "on\n");
+ seq_printf(m, "on\n");
break;
case RSX_STATUS_RC1:
- sbuf_printf(m, "RC1\n");
+ seq_printf(m, "RC1\n");
break;
case RSX_STATUS_RC1E:
- sbuf_printf(m, "RC1E\n");
+ seq_printf(m, "RC1E\n");
break;
case RSX_STATUS_RS1:
- sbuf_printf(m, "RS1\n");
+ seq_printf(m, "RS1\n");
break;
case RSX_STATUS_RS2:
- sbuf_printf(m, "RS2 (RC6)\n");
+ seq_printf(m, "RS2 (RC6)\n");
break;
case RSX_STATUS_RS3:
- sbuf_printf(m, "RC3 (RC6+)\n");
+ seq_printf(m, "RC3 (RC6+)\n");
break;
default:
- sbuf_printf(m, "unknown\n");
+ seq_printf(m, "unknown\n");
break;
}
@@ -984,8 +981,8 @@ static int ironlake_drpc_info(struct drm_device *dev, struct sbuf *m)
static int gen6_drpc_info(struct drm_device *dev, struct sbuf *m)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 rpmodectl1, gt_core_status, rcctl1;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
unsigned forcewake_count;
int count=0;
@@ -998,13 +995,13 @@ static int gen6_drpc_info(struct drm_device *dev, struct sbuf *m)
mtx_unlock(&dev_priv->gt_lock);
if (forcewake_count) {
- sbuf_printf(m, "RC information inaccurate because userspace "
- "holds a reference \n");
+ seq_printf(m, "RC information inaccurate because somebody "
+ "holds a forcewake reference \n");
} else {
/* NB: we cannot use forcewake, else we read the wrong values */
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
udelay(10);
- sbuf_printf(m, "RC information accurate: %s\n", yesno(count < 51));
+ seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
}
gt_core_status = DRM_READ32(dev_priv->mmio_map, GEN6_GT_CORE_STATUS);
@@ -1013,57 +1010,66 @@ static int gen6_drpc_info(struct drm_device *dev, struct sbuf *m)
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
DRM_UNLOCK(dev);
+ sx_xlock(&dev_priv->rps.hw_lock);
+ sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+ sx_xunlock(&dev_priv->rps.hw_lock);
- sbuf_printf(m, "Video Turbo Mode: %s\n",
+ seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
- sbuf_printf(m, "HW control enabled: %s\n",
+ seq_printf(m, "HW control enabled: %s\n",
yesno(rpmodectl1 & GEN6_RP_ENABLE));
- sbuf_printf(m, "SW control enabled: %s\n",
+ seq_printf(m, "SW control enabled: %s\n",
yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
GEN6_RP_MEDIA_SW_MODE));
- sbuf_printf(m, "RC1e Enabled: %s\n",
+ seq_printf(m, "RC1e Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
- sbuf_printf(m, "RC6 Enabled: %s\n",
+ seq_printf(m, "RC6 Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
- sbuf_printf(m, "Deep RC6 Enabled: %s\n",
+ seq_printf(m, "Deep RC6 Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
- sbuf_printf(m, "Deepest RC6 Enabled: %s\n",
+ seq_printf(m, "Deepest RC6 Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
- sbuf_printf(m, "Current RC state: ");
+ seq_printf(m, "Current RC state: ");
switch (gt_core_status & GEN6_RCn_MASK) {
case GEN6_RC0:
if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
- sbuf_printf(m, "Core Power Down\n");
+ seq_printf(m, "Core Power Down\n");
else
- sbuf_printf(m, "on\n");
+ seq_printf(m, "on\n");
break;
case GEN6_RC3:
- sbuf_printf(m, "RC3\n");
+ seq_printf(m, "RC3\n");
break;
case GEN6_RC6:
- sbuf_printf(m, "RC6\n");
+ seq_printf(m, "RC6\n");
break;
case GEN6_RC7:
- sbuf_printf(m, "RC7\n");
+ seq_printf(m, "RC7\n");
break;
default:
- sbuf_printf(m, "Unknown\n");
+ seq_printf(m, "Unknown\n");
break;
}
- sbuf_printf(m, "Core Power Down: %s\n",
+ seq_printf(m, "Core Power Down: %s\n",
yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
/* Not exactly sure what this is */
- sbuf_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
+ seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
I915_READ(GEN6_GT_GFX_RC6_LOCKED));
- sbuf_printf(m, "RC6 residency since boot: %u\n",
+ seq_printf(m, "RC6 residency since boot: %u\n",
I915_READ(GEN6_GT_GFX_RC6));
- sbuf_printf(m, "RC6+ residency since boot: %u\n",
+ seq_printf(m, "RC6+ residency since boot: %u\n",
I915_READ(GEN6_GT_GFX_RC6p));
- sbuf_printf(m, "RC6++ residency since boot: %u\n",
+ seq_printf(m, "RC6++ residency since boot: %u\n",
I915_READ(GEN6_GT_GFX_RC6pp));
+ seq_printf(m, "RC6 voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
+ seq_printf(m, "RC6+ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
+ seq_printf(m, "RC6++ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
return 0;
}
@@ -1081,39 +1087,43 @@ static int i915_fbc_status(struct drm_device *dev, struct sbuf *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
if (!I915_HAS_FBC(dev)) {
- sbuf_printf(m, "FBC unsupported on this chipset");
+ seq_printf(m, "FBC unsupported on this chipset\n");
return 0;
}
if (intel_fbc_enabled(dev)) {
- sbuf_printf(m, "FBC enabled");
+ seq_printf(m, "FBC enabled\n");
} else {
- sbuf_printf(m, "FBC disabled: ");
+ seq_printf(m, "FBC disabled: ");
switch (dev_priv->no_fbc_reason) {
case FBC_NO_OUTPUT:
- sbuf_printf(m, "no outputs");
+ seq_printf(m, "no outputs");
break;
case FBC_STOLEN_TOO_SMALL:
- sbuf_printf(m, "not enough stolen memory");
+ seq_printf(m, "not enough stolen memory");
break;
case FBC_UNSUPPORTED_MODE:
- sbuf_printf(m, "mode not supported");
+ seq_printf(m, "mode not supported");
break;
case FBC_MODE_TOO_LARGE:
- sbuf_printf(m, "mode too large");
+ seq_printf(m, "mode too large");
break;
case FBC_BAD_PLANE:
- sbuf_printf(m, "FBC unsupported on plane");
+ seq_printf(m, "FBC unsupported on plane");
break;
case FBC_NOT_TILED:
- sbuf_printf(m, "scanout buffer not tiled");
+ seq_printf(m, "scanout buffer not tiled");
break;
case FBC_MULTIPLE_PIPES:
- sbuf_printf(m, "multiple pipes are enabled");
+ seq_printf(m, "multiple pipes are enabled");
+ break;
+ case FBC_MODULE_PARAM:
+ seq_printf(m, "disabled per module param (default off)");
break;
default:
- sbuf_printf(m, "unknown reason");
+ seq_printf(m, "unknown reason");
}
+ seq_printf(m, "\n");
}
return 0;
}
@@ -1132,7 +1142,7 @@ static int i915_sr_status(struct drm_device *dev, struct sbuf *m, void *unused)
else if (IS_PINEVIEW(dev))
sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
- sbuf_printf(m, "self-refresh: %s",
+ seq_printf(m, "self-refresh: %s\n",
sr_enabled ? "enabled" : "disabled");
return 0;
@@ -1154,10 +1164,10 @@ static int i915_emon_status(struct drm_device *dev, struct sbuf *m, void *unused
gfx = i915_gfx_val(dev_priv);
DRM_UNLOCK(dev);
- sbuf_printf(m, "GMCH temp: %ld\n", temp);
- sbuf_printf(m, "Chipset power: %ld\n", chipset);
- sbuf_printf(m, "GFX power: %ld\n", gfx);
- sbuf_printf(m, "Total power: %ld\n", chipset + gfx);
+ seq_printf(m, "GMCH temp: %ld\n", temp);
+ seq_printf(m, "Chipset power: %ld\n", chipset);
+ seq_printf(m, "GFX power: %ld\n", gfx);
+ seq_printf(m, "Total power: %ld\n", chipset + gfx);
return 0;
}
@@ -1169,31 +1179,25 @@ static int i915_ring_freq_table(struct drm_device *dev, struct sbuf *m,
int gpu_freq, ia_freq;
if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
- sbuf_printf(m, "unsupported on this chipset");
+ seq_printf(m, "unsupported on this chipset\n");
return 0;
}
- if (sx_xlock_sig(&dev->dev_struct_lock))
- return -EINTR;
+ sx_xlock(&dev_priv->rps.hw_lock);
- sbuf_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
+ seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
- for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
+ for (gpu_freq = dev_priv->rps.min_delay;
+ gpu_freq <= dev_priv->rps.max_delay;
gpu_freq++) {
- I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
- GEN6_PCODE_READ_MIN_FREQ_TABLE);
- if (_intel_wait_for(dev,
- (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 10, 1, "915frq")) {
- DRM_ERROR("pcode read of freq table timed out\n");
- continue;
- }
- ia_freq = I915_READ(GEN6_PCODE_DATA);
- sbuf_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
+ ia_freq = gpu_freq;
+ sandybridge_pcode_read(dev_priv,
+ GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ &ia_freq);
+ seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
}
- DRM_UNLOCK(dev);
+ sx_xunlock(&dev_priv->rps.hw_lock);
return 0;
}
@@ -1205,7 +1209,7 @@ static int i915_gfxec(struct drm_device *dev, struct sbuf *m, void *unused)
if (sx_xlock_sig(&dev->dev_struct_lock))
return -EINTR;
- sbuf_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
+ seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
DRM_UNLOCK(dev);
@@ -1246,25 +1250,25 @@ static int i915_gem_framebuffer_info(struct drm_device *dev, struct sbuf *m, voi
}
fb = to_intel_framebuffer(ifbdev->helper.fb);
- sbuf_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
+ seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
fb->base.width,
fb->base.height,
fb->base.depth,
fb->base.bits_per_pixel);
describe_obj(m, fb->obj);
- sbuf_printf(m, "\n");
+ seq_printf(m, "\n");
list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
if (&fb->base == ifbdev->helper.fb)
continue;
- sbuf_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
+ seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
fb->base.width,
fb->base.height,
fb->base.depth,
fb->base.bits_per_pixel);
describe_obj(m, fb->obj);
- sbuf_printf(m, "\n");
+ seq_printf(m, "\n");
}
DRM_UNLOCK(dev);
@@ -1274,24 +1278,23 @@ static int i915_gem_framebuffer_info(struct drm_device *dev, struct sbuf *m, voi
static int i915_context_status(struct drm_device *dev, struct sbuf *m, void *data)
{
- drm_i915_private_t *dev_priv;
+ drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- dev_priv = dev->dev_private;
ret = sx_xlock_sig(&dev->mode_config.mutex);
if (ret != 0)
return -EINTR;
- if (dev_priv->pwrctx != NULL) {
- sbuf_printf(m, "power context ");
- describe_obj(m, dev_priv->pwrctx);
- sbuf_printf(m, "\n");
+ if (dev_priv->ips.pwrctx) {
+ seq_printf(m, "power context ");
+ describe_obj(m, dev_priv->ips.pwrctx);
+ seq_printf(m, "\n");
}
- if (dev_priv->renderctx != NULL) {
- sbuf_printf(m, "render context ");
- describe_obj(m, dev_priv->renderctx);
- sbuf_printf(m, "\n");
+ if (dev_priv->ips.renderctx) {
+ seq_printf(m, "render context ");
+ describe_obj(m, dev_priv->ips.renderctx);
+ seq_printf(m, "\n");
}
sx_xunlock(&dev->mode_config.mutex);
@@ -1309,7 +1312,7 @@ static int i915_gen6_forcewake_count_info(struct drm_device *dev, struct sbuf *m
forcewake_count = dev_priv->forcewake_count;
mtx_unlock(&dev_priv->gt_lock);
- sbuf_printf(m, "forcewake count = %u\n", forcewake_count);
+ seq_printf(m, "forcewake count = %u\n", forcewake_count);
return 0;
}
@@ -1348,30 +1351,30 @@ static int i915_swizzle_info(struct drm_device *dev, struct sbuf *m, void *data)
if (ret)
return -EINTR;
- sbuf_printf(m, "bit6 swizzle for X-tiling = %s\n",
+ seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
swizzle_string(dev_priv->mm.bit_6_swizzle_x));
- sbuf_printf(m, "bit6 swizzle for Y-tiling = %s\n",
+ seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
swizzle_string(dev_priv->mm.bit_6_swizzle_y));
if (IS_GEN3(dev) || IS_GEN4(dev)) {
- sbuf_printf(m, "DDC = 0x%08x\n",
+ seq_printf(m, "DDC = 0x%08x\n",
I915_READ(DCC));
- sbuf_printf(m, "C0DRB3 = 0x%04x\n",
+ seq_printf(m, "C0DRB3 = 0x%04x\n",
I915_READ16(C0DRB3));
- sbuf_printf(m, "C1DRB3 = 0x%04x\n",
+ seq_printf(m, "C1DRB3 = 0x%04x\n",
I915_READ16(C1DRB3));
} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
- sbuf_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
+ seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
I915_READ(MAD_DIMM_C0));
- sbuf_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
+ seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
I915_READ(MAD_DIMM_C1));
- sbuf_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
+ seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
I915_READ(MAD_DIMM_C2));
- sbuf_printf(m, "TILECTL = 0x%08x\n",
+ seq_printf(m, "TILECTL = 0x%08x\n",
I915_READ(TILECTL));
- sbuf_printf(m, "ARB_MODE = 0x%08x\n",
+ seq_printf(m, "ARB_MODE = 0x%08x\n",
I915_READ(ARB_MODE));
- sbuf_printf(m, "DISP_ARB_CTL = 0x%08x\n",
+ seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
I915_READ(DISP_ARB_CTL));
}
DRM_UNLOCK(dev);
@@ -1390,25 +1393,23 @@ static int i915_ppgtt_info(struct drm_device *dev, struct sbuf *m, void *data)
if (ret)
return -EINTR;
if (INTEL_INFO(dev)->gen == 6)
- sbuf_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
-
- for (i = 0; i < I915_NUM_RINGS; i++) {
- ring = &dev_priv->rings[i];
+ seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
- sbuf_printf(m, "%s\n", ring->name);
+ for_each_ring(ring, dev_priv, i) {
+ seq_printf(m, "%s\n", ring->name);
if (INTEL_INFO(dev)->gen == 7)
- sbuf_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
- sbuf_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
- sbuf_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
- sbuf_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
+ seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
+ seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
+ seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
+ seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
}
if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- sbuf_printf(m, "aliasing PPGTT:\n");
- sbuf_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
+ seq_printf(m, "aliasing PPGTT:\n");
+ seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
}
- sbuf_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
+ seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
DRM_UNLOCK(dev);
return 0;
@@ -1421,7 +1422,7 @@ static int i915_dpio_info(struct drm_device *dev, struct sbuf *m, void *data)
if (!IS_VALLEYVIEW(dev)) {
- sbuf_printf(m, "unsupported\n");
+ seq_printf(m, "unsupported\n");
return 0;
}
@@ -1429,29 +1430,29 @@ static int i915_dpio_info(struct drm_device *dev, struct sbuf *m, void *data)
if (ret)
return -EINTR;
- sbuf_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
+ seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
- sbuf_printf(m, "DPIO_DIV_A: 0x%08x\n",
+ seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
intel_dpio_read(dev_priv, _DPIO_DIV_A));
- sbuf_printf(m, "DPIO_DIV_B: 0x%08x\n",
+ seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
intel_dpio_read(dev_priv, _DPIO_DIV_B));
- sbuf_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
+ seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
intel_dpio_read(dev_priv, _DPIO_REFSFR_A));
- sbuf_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
+ seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
intel_dpio_read(dev_priv, _DPIO_REFSFR_B));
- sbuf_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
+ seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
- sbuf_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
+ seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
- sbuf_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n",
+ seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n",
intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A));
- sbuf_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n",
+ seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n",
intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B));
- sbuf_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
+ seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
sx_xunlock(&dev->mode_config.mutex);
@@ -1460,21 +1461,48 @@ static int i915_dpio_info(struct drm_device *dev, struct sbuf *m, void *data)
}
static int
-i915_debug_set_wedged(SYSCTL_HANDLER_ARGS)
+i915_wedged(SYSCTL_HANDLER_ARGS)
{
struct drm_device *dev = arg1;
drm_i915_private_t *dev_priv = dev->dev_private;
- int error, wedged;
+ int val = 1, ret;
if (dev_priv == NULL)
return (EBUSY);
- wedged = dev_priv->mm.wedged;
- error = sysctl_handle_int(oidp, &wedged, 0, req);
- if (error || !req->newptr)
- return (error);
- DRM_INFO("Manually setting wedged to %d\n", wedged);
- i915_handle_error(dev, wedged);
- return (error);
+
+ val = atomic_read(&dev_priv->mm.wedged);
+ ret = sysctl_handle_int(oidp, &val, 0, req);
+ if (ret != 0 || !req->newptr)
+ return (ret);
+
+ DRM_INFO("Manually setting wedged to %d\n", val);
+ i915_handle_error(dev, val);
+
+ return (ret);
+}
+
+static int
+i915_ring_stop(SYSCTL_HANDLER_ARGS)
+{
+ struct drm_device *dev = arg1;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int val = 0, ret;
+
+ if (dev_priv == NULL)
+ return (EBUSY);
+
+ val = dev_priv->stop_rings;
+ ret = sysctl_handle_int(oidp, &val, 0, req);
+ if (ret != 0 || !req->newptr)
+ return (ret);
+
+ DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
+
+ sx_xlock(&dev_priv->rps.hw_lock);
+ dev_priv->stop_rings = val;
+ sx_xunlock(&dev_priv->rps.hw_lock);
+
+ return (0);
}
static int
@@ -1482,73 +1510,102 @@ i915_max_freq(SYSCTL_HANDLER_ARGS)
{
struct drm_device *dev = arg1;
drm_i915_private_t *dev_priv = dev->dev_private;
- int error, max_freq;
+ int val = 1, ret;
if (dev_priv == NULL)
return (EBUSY);
- max_freq = dev_priv->max_delay * 50;
- error = sysctl_handle_int(oidp, &max_freq, 0, req);
- if (error || !req->newptr)
- return (error);
- DRM_DEBUG("Manually setting max freq to %d\n", max_freq);
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return (ENODEV);
+
+ sx_xlock(&dev_priv->rps.hw_lock);
+
+ val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+ ret = sysctl_handle_int(oidp, &val, 0, req);
+ if (ret != 0 || !req->newptr) {
+ sx_xunlock(&dev_priv->rps.hw_lock);
+ return (ret);
+ }
+
+ DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
+
/*
* Turbo will still be enabled, but won't go above the set value.
*/
- dev_priv->max_delay = max_freq / 50;
- gen6_set_rps(dev, max_freq / 50);
- return (error);
+ dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
+
+ gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
+ sx_xunlock(&dev_priv->rps.hw_lock);
+
+ return (ret);
}
static int
-i915_cache_sharing(SYSCTL_HANDLER_ARGS)
+i915_min_freq(SYSCTL_HANDLER_ARGS)
{
struct drm_device *dev = arg1;
drm_i915_private_t *dev_priv = dev->dev_private;
- int error, snpcr, cache_sharing;
+ int val = 1, ret;
if (dev_priv == NULL)
return (EBUSY);
- DRM_LOCK(dev);
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
- DRM_UNLOCK(dev);
- cache_sharing = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
- error = sysctl_handle_int(oidp, &cache_sharing, 0, req);
- if (error || !req->newptr)
- return (error);
- if (cache_sharing < 0 || cache_sharing > 3)
- return (EINVAL);
- DRM_DEBUG("Manually setting uncore sharing to %d\n", cache_sharing);
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return (ENODEV);
- DRM_LOCK(dev);
- /* Update the cache sharing policy here as well */
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
- snpcr &= ~GEN6_MBC_SNPCR_MASK;
- snpcr |= (cache_sharing << GEN6_MBC_SNPCR_SHIFT);
- I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
- DRM_UNLOCK(dev);
- return (0);
+ sx_xlock(&dev_priv->rps.hw_lock);
+
+ val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+ ret = sysctl_handle_int(oidp, &val, 0, req);
+ if (ret != 0 || !req->newptr) {
+ sx_xunlock(&dev_priv->rps.hw_lock);
+ return (ret);
+ }
+
+ DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
+
+ /*
+ * Turbo will still be enabled, but won't go above the set value.
+ */
+ dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
+
+ gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
+ sx_xunlock(&dev_priv->rps.hw_lock);
+
+ return (ret);
}
static int
-i915_stop_rings(SYSCTL_HANDLER_ARGS)
+i915_cache_sharing(SYSCTL_HANDLER_ARGS)
{
struct drm_device *dev = arg1;
drm_i915_private_t *dev_priv = dev->dev_private;
- int error, val;
+ u32 snpcr;
+ int val = 1, ret;
if (dev_priv == NULL)
return (EBUSY);
- DRM_LOCK(dev);
- val = dev_priv->stop_rings;
- DRM_UNLOCK(dev);
- error = sysctl_handle_int(oidp, &val, 0, req);
- if (error || !req->newptr)
- return (error);
- DRM_DEBUG("Stopping rings 0x%08x\n", val);
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return (ENODEV);
+
+ sx_xlock(&dev_priv->rps.hw_lock);
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ sx_xunlock(&dev_priv->rps.hw_lock);
+
+ val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
+ ret = sysctl_handle_int(oidp, &val, 0, req);
+ if (ret != 0 || !req->newptr)
+ return (ret);
+
+ if (val < 0 || val > 3)
+ return (EINVAL);
+
+ DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
+
+ /* Update the cache sharing policy here as well */
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ snpcr &= ~GEN6_MBC_SNPCR_MASK;
+ snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
+ I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
- DRM_LOCK(dev);
- dev_priv->stop_rings = val;
- DRM_UNLOCK(dev);
return (0);
}
@@ -1564,7 +1621,6 @@ static struct i915_info_sysctl_list {
{"i915_gem_gtt", i915_gem_gtt_info, NULL, 0},
{"i915_gem_pinned", i915_gem_gtt_info, NULL, 0, (void *)PINNED_LIST},
{"i915_gem_active", i915_gem_object_list_info, NULL, 0, (void *)ACTIVE_LIST},
- {"i915_gem_flushing", i915_gem_object_list_info, NULL, 0, (void *)FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, NULL, 0, (void *)INACTIVE_LIST},
{"i915_gem_pageflip", i915_gem_pageflip_info, NULL, 0},
{"i915_gem_request", i915_gem_request_info, NULL, 0},
@@ -1643,8 +1699,6 @@ out:
return (error);
}
-extern int i915_gem_sync_exec_requests;
-extern int i915_fix_mi_batchbuffer_end;
extern int i915_intr_pf;
extern long i915_gem_wired_pages_cnt;
@@ -1682,7 +1736,7 @@ i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
NULL);
oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "wedged",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0,
- i915_debug_set_wedged, "I", NULL);
+ i915_wedged, "I", NULL);
if (oid == NULL)
return (-ENOMEM);
oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "max_freq",
@@ -1690,22 +1744,19 @@ i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
"I", NULL);
if (oid == NULL)
return (-ENOMEM);
+ oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "min_freq",
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, i915_min_freq,
+ "I", NULL);
+ if (oid == NULL)
+ return (-ENOMEM);
oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
"cache_sharing", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev,
0, i915_cache_sharing, "I", NULL);
if (oid == NULL)
return (-ENOMEM);
oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
- "stop_rings", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev,
- 0, i915_stop_rings, "I", NULL);
- if (oid == NULL)
- return (-ENOMEM);
- oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "sync_exec",
- CTLFLAG_RW, &i915_gem_sync_exec_requests, 0, NULL);
- if (oid == NULL)
- return (-ENOMEM);
- oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "fix_mi",
- CTLFLAG_RW, &i915_fix_mi_batchbuffer_end, 0, NULL);
+ "ring_stop", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev,
+ 0, i915_ring_stop, "I", NULL);
if (oid == NULL)
return (-ENOMEM);
oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "intr_pf",
@@ -1726,3 +1777,5 @@ i915_sysctl_cleanup(struct drm_device *dev)
free(dev->sysctl_private, DRM_MEM_DRIVER);
}
+
+//#endif /* CONFIG_DEBUG_FS */
diff --git a/sys/dev/drm2/i915/i915_dma.c b/sys/dev/drm2/i915/i915_dma.c
index ad2c147..e2d8e4c 100644
--- a/sys/dev/drm2/i915/i915_dma.c
+++ b/sys/dev/drm2/i915/i915_dma.c
@@ -29,14 +29,16 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
+#include <dev/drm2/drm_crtc_helper.h>
+#include <dev/drm2/drm_fb_helper.h>
+#include <dev/drm2/i915/intel_drv.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
-#include <dev/drm2/i915/intel_drv.h>
-#include <dev/drm2/i915/intel_ringbuffer.h>
-#define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS])
+#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
#define BEGIN_LP_RING(n) \
intel_ring_begin(LP_RING(dev_priv), (n))
@@ -62,7 +64,7 @@ static inline u32
intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
{
if (I915_NEED_GFX_HWS(dev_priv->dev))
- return ((volatile u32*)(dev_priv->dri1.gfx_hws_cpu_addr))[reg];
+ return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
else
return intel_read_status_page(LP_RING(dev_priv), reg);
}
@@ -96,39 +98,6 @@ static void i915_write_hws_pga(struct drm_device *dev)
}
/**
- * Sets up the hardware status page for devices that need a physical address
- * in the register.
- */
-static int i915_init_phys_hws(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
-
- /*
- * Program Hardware Status Page
- * XXXKIB Keep 4GB limit for allocation for now. This method
- * of allocation is used on <= 965 hardware, that has several
- * erratas regarding the use of physical memory > 4 GB.
- */
- dev_priv->status_page_dmah =
- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, BUS_SPACE_MAXADDR);
- if (!dev_priv->status_page_dmah) {
- DRM_ERROR("Can not allocate hardware status page\n");
- return -ENOMEM;
- }
- ring->status_page.page_addr = dev_priv->hw_status_page =
- dev_priv->status_page_dmah->vaddr;
- dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
-
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-
- i915_write_hws_pga(dev);
- DRM_DEBUG("Enabled hardware status page, phys %jx\n",
- (uintmax_t)dev_priv->dma_status_page);
- return 0;
-}
-
-/**
* Frees the hardware status page, whether it's a physical address or a virtual
* address set up by the X Server.
*/
@@ -142,8 +111,7 @@ static void i915_free_hws(struct drm_device *dev)
dev_priv->status_page_dmah = NULL;
}
- if (dev_priv->status_gfx_addr) {
- dev_priv->status_gfx_addr = 0;
+ if (ring->status_page.gfx_addr) {
ring->status_page.gfx_addr = 0;
pmap_unmapdev((vm_offset_t)dev_priv->dri1.gfx_hws_cpu_addr,
PAGE_SIZE);
@@ -168,7 +136,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
+ ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
if (ring->space < 0)
ring->space += ring->size;
@@ -194,7 +162,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
DRM_LOCK(dev);
for (i = 0; i < I915_NUM_RINGS; i++)
- intel_cleanup_ring_buffer(&dev_priv->rings[i]);
+ intel_cleanup_ring_buffer(&dev_priv->ring[i]);
DRM_UNLOCK(dev);
/* Clear the HWS virtual address at teardown */
@@ -235,10 +203,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
}
- dev_priv->cpp = init->cpp;
- dev_priv->back_offset = init->back_offset;
- dev_priv->front_offset = init->front_offset;
- dev_priv->current_page = 0;
+ dev_priv->dri1.cpp = init->cpp;
+ dev_priv->dri1.back_offset = init->back_offset;
+ dev_priv->dri1.front_offset = init->front_offset;
+ dev_priv->dri1.current_page = 0;
if (master_priv->sarea_priv)
master_priv->sarea_priv->pf_current_page = 0;
@@ -376,33 +344,24 @@ static int validate_cmd(int cmd)
static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int i;
+ int i, ret;
if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
return -EINVAL;
- BEGIN_LP_RING((dwords+1)&~1);
-
for (i = 0; i < dwords;) {
- int cmd, sz;
-
- if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
+ int sz = validate_cmd(buffer[i]);
+ if (sz == 0 || i + sz > dwords)
return -EINVAL;
-
- if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
- return -EINVAL;
-
- OUT_RING(cmd);
-
- while (++i, --sz) {
- if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
- sizeof(cmd))) {
- return -EINVAL;
- }
- OUT_RING(cmd);
- }
+ i += sz;
}
+ ret = BEGIN_LP_RING((dwords+1)&~1);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < dwords; i++)
+ OUT_RING(buffer[i]);
if (dwords & 1)
OUT_RING(0);
@@ -461,15 +420,16 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- if (++dev_priv->counter > 0x7FFFFFFFUL)
- dev_priv->counter = 0;
+ dev_priv->dri1.counter++;
+ if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
+ dev_priv->dri1.counter = 0;
if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+ master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
+ OUT_RING(dev_priv->dri1.counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
@@ -484,7 +444,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
int i = 0, count, ret;
if (cmd->sz & 0x3) {
- DRM_ERROR("alignment\n");
+ DRM_ERROR("alignment");
return -EINVAL;
}
@@ -517,11 +477,8 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
int nbox = batch->num_cliprects;
int i, count, ret;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
if ((batch->start | batch->used) & 0x7) {
- DRM_ERROR("alignment\n");
+ DRM_ERROR("alignment");
return -EINVAL;
}
@@ -561,6 +518,15 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
ADVANCE_LP_RING();
}
+
+ if (IS_G4X(dev) || IS_GEN5(dev)) {
+ if (BEGIN_LP_RING(2) == 0) {
+ OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+ }
+ }
+
i915_emit_breadcrumb(dev);
return 0;
}
@@ -577,7 +543,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
__func__,
- dev_priv->current_page,
+ dev_priv->dri1.current_page,
master_priv->sarea_priv->pf_current_page);
i915_kernel_lost_context(dev);
@@ -591,12 +557,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
OUT_RING(0);
- if (dev_priv->current_page == 0) {
- OUT_RING(dev_priv->back_offset);
- dev_priv->current_page = 1;
+ if (dev_priv->dri1.current_page == 0) {
+ OUT_RING(dev_priv->dri1.back_offset);
+ dev_priv->dri1.current_page = 1;
} else {
- OUT_RING(dev_priv->front_offset);
- dev_priv->current_page = 0;
+ OUT_RING(dev_priv->dri1.front_offset);
+ dev_priv->dri1.current_page = 0;
}
OUT_RING(0);
@@ -605,24 +571,24 @@ static int i915_dispatch_flip(struct drm_device * dev)
ADVANCE_LP_RING();
- master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
+ master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
+ OUT_RING(dev_priv->dri1.counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
- master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+ master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
return 0;
}
static int i915_quiescent(struct drm_device *dev)
{
i915_kernel_lost_context(dev);
- return intel_wait_ring_idle(LP_RING(dev->dev_private));
+ return intel_ring_idle(LP_RING(dev->dev_private));
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -650,10 +616,12 @@ int i915_batchbuffer(struct drm_device *dev, void *data,
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
master_priv->sarea_priv;
drm_i915_batchbuffer_t *batch = data;
- size_t cliplen;
int ret;
struct drm_clip_rect *cliprects = NULL;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
if (!dev_priv->dri1.allow_batchbuffer) {
DRM_ERROR("Batchbuffer ioctl disabled\n");
return -EINVAL;
@@ -662,24 +630,28 @@ int i915_batchbuffer(struct drm_device *dev, void *data,
DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
batch->start, batch->used, batch->num_cliprects);
- cliplen = batch->num_cliprects * sizeof(struct drm_clip_rect);
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
if (batch->num_cliprects < 0)
- return -EFAULT;
- if (batch->num_cliprects != 0) {
+ return -EINVAL;
+
+ if (batch->num_cliprects) {
cliprects = malloc(batch->num_cliprects *
sizeof(struct drm_clip_rect),
DRM_MEM_DMA, M_WAITOK | M_ZERO);
+ if (cliprects == NULL)
+ return -ENOMEM;
- ret = -copyin(batch->cliprects, cliprects,
+ ret = copy_from_user(cliprects, batch->cliprects,
batch->num_cliprects *
sizeof(struct drm_clip_rect));
- if (ret != 0)
+ if (ret != 0) {
+ ret = -EFAULT;
goto fail_free;
- } else
- cliprects = NULL;
+ }
+ }
DRM_LOCK(dev);
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
DRM_UNLOCK(dev);
@@ -710,27 +682,39 @@ int i915_cmdbuffer(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
if (cmdbuf->num_cliprects < 0)
return -EINVAL;
batch_data = malloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK);
+ if (batch_data == NULL)
+ return -ENOMEM;
- ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz);
- if (ret != 0)
+ ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
+ if (ret != 0) {
+ ret = -EFAULT;
goto fail_batch_free;
+ }
if (cmdbuf->num_cliprects) {
cliprects = malloc(cmdbuf->num_cliprects *
sizeof(struct drm_clip_rect), DRM_MEM_DMA, M_WAITOK | M_ZERO);
- ret = -copyin(cmdbuf->cliprects, cliprects,
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
+ goto fail_batch_free;
+ }
+
+ ret = copy_from_user(cliprects, cmdbuf->cliprects,
cmdbuf->num_cliprects *
sizeof(struct drm_clip_rect));
- if (ret != 0)
+ if (ret != 0) {
+ ret = -EFAULT;
goto fail_clip_free;
+ }
}
DRM_LOCK(dev);
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
DRM_UNLOCK(dev);
if (ret) {
@@ -758,21 +742,21 @@ static int i915_emit_irq(struct drm_device * dev)
DRM_DEBUG_DRIVER("\n");
- dev_priv->counter++;
- if (dev_priv->counter > 0x7FFFFFFFUL)
- dev_priv->counter = 1;
+ dev_priv->dri1.counter++;
+ if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
+ dev_priv->dri1.counter = 1;
if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+ master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
+ OUT_RING(dev_priv->dri1.counter);
OUT_RING(MI_USER_INTERRUPT);
ADVANCE_LP_RING();
}
- return dev_priv->counter;
+ return dev_priv->dri1.counter;
}
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -794,27 +778,22 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
if (master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
- ret = 0;
- mtx_lock(&dev_priv->irq_lock);
if (ring->irq_get(ring)) {
+ mtx_lock(&dev_priv->irq_lock);
while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) {
- ret = -msleep(ring, &dev_priv->irq_lock, PCATCH,
- "915wtq", 3 * hz);
+ ret = -msleep(&ring->irq_queue, &dev_priv->irq_lock,
+ PCATCH, "915wtq", 3 * DRM_HZ);
if (ret == -ERESTART)
ret = -ERESTARTSYS;
}
- ring->irq_put(ring);
- mtx_unlock(&dev_priv->irq_lock);
- } else {
mtx_unlock(&dev_priv->irq_lock);
- if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr,
- 3000, 1, "915wir"))
- ret = -EBUSY;
- }
+ ring->irq_put(ring);
+ } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
+ ret = -EBUSY;
if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
- READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
+ READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
}
return ret;
@@ -969,13 +948,14 @@ int i915_getparam(struct drm_device *dev, void *data,
value = 1;
break;
case I915_PARAM_HAS_EXECBUF2:
+ /* depends on GEM */
value = 1;
break;
case I915_PARAM_HAS_BSD:
- value = intel_ring_initialized(&dev_priv->rings[VCS]);
+ value = intel_ring_initialized(&dev_priv->ring[VCS]);
break;
case I915_PARAM_HAS_BLT:
- value = intel_ring_initialized(&dev_priv->rings[BCS]);
+ value = intel_ring_initialized(&dev_priv->ring[BCS]);
break;
case I915_PARAM_HAS_RELAXED_FENCING:
value = 1;
@@ -998,6 +978,23 @@ int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_ALIASING_PPGTT:
value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
break;
+ case I915_PARAM_HAS_WAIT_TIMEOUT:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_SEMAPHORES:
+ value = i915_semaphore_is_enabled(dev);
+ break;
+ case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_SECURE_BATCHES:
+ /* FIXME Linux<->FreeBSD: Is there a better choice than
+ * curthread? */
+ value = DRM_SUSER(curthread);
+ break;
+ case I915_PARAM_HAS_PINNED_BATCHES:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -1066,34 +1063,33 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- DRM_ERROR("tried to set status page when mode setting active\n");
+ WARN(1, "tried to set status page when mode setting active\n");
return 0;
}
DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
ring = LP_RING(dev_priv);
- ring->status_page.gfx_addr = dev_priv->status_gfx_addr =
- hws->addr & (0x1ffff<<12);
+ ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
dev_priv->dri1.gfx_hws_cpu_addr =
- pmap_mapdev_attr(dev->agp->base + hws->addr, PAGE_SIZE,
+ pmap_mapdev_attr(dev_priv->mm.gtt_base_addr + hws->addr, PAGE_SIZE,
VM_MEMATTR_WRITE_COMBINING);
if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
i915_dma_cleanup(dev);
- ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 0;
+ ring->status_page.gfx_addr = 0;
DRM_ERROR("can not ioremap virtual address for"
" G33 hw status page\n");
return -ENOMEM;
}
- memset(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
+ I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
- dev_priv->status_gfx_addr);
+ ring->status_page.gfx_addr);
DRM_DEBUG_DRIVER("load hws at %p\n",
- dev_priv->hw_status_page);
+ ring->status_page.page_addr);
return 0;
}
@@ -1101,7 +1097,7 @@ static int i915_get_bridge_dev(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- dev_priv->bridge_dev = intel_gtt_get_bridge_device();
+ dev_priv->bridge_dev = pci_find_dbsf(0, 0, 0, 0);
if (!dev_priv->bridge_dev) {
DRM_ERROR("bridge device not found\n");
return -1;
@@ -1123,15 +1119,15 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
- u64 mchbar_addr, temp;
+ u64 mchbar_addr;
if (INTEL_INFO(dev)->gen >= 4)
- temp_hi = pci_read_config(dev_priv->bridge_dev, reg + 4, 4);
- temp_lo = pci_read_config(dev_priv->bridge_dev, reg, 4);
+ pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
+ pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
/* If ACPI doesn't have it, assume we need to allocate it ourselves */
-#ifdef XXX_CONFIG_PNP
+#ifdef CONFIG_PNP
if (mchbar_addr &&
pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
return 0;
@@ -1149,16 +1145,16 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
return -ENOMEM;
}
- if (INTEL_INFO(dev)->gen >= 4) {
- temp = rman_get_start(dev_priv->mch_res);
- temp >>= 32;
- pci_write_config(dev_priv->bridge_dev, reg + 4, temp, 4);
- }
- pci_write_config(dev_priv->bridge_dev, reg,
- rman_get_start(dev_priv->mch_res) & UINT32_MAX, 4);
+ if (INTEL_INFO(dev)->gen >= 4)
+ pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
+ upper_32_bits(rman_get_start(dev_priv->mch_res)));
+
+ pci_write_config_dword(dev_priv->bridge_dev, reg,
+ lower_32_bits(rman_get_start(dev_priv->mch_res)));
return 0;
}
+/* Setup MCHBAR if possible, return true if we should disable it again */
static void
intel_setup_mchbar(struct drm_device *dev)
{
@@ -1170,18 +1166,16 @@ intel_setup_mchbar(struct drm_device *dev)
dev_priv->mchbar_need_disable = false;
if (IS_I915G(dev) || IS_I915GM(dev)) {
- temp = pci_read_config(dev_priv->bridge_dev, DEVEN_REG, 4);
- enabled = (temp & DEVEN_MCHBAR_EN) != 0;
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
+ enabled = !!(temp & DEVEN_MCHBAR_EN);
} else {
- temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
enabled = temp & 1;
}
/* If it's already enabled, don't have to do anything */
- if (enabled) {
- DRM_DEBUG("mchbar already enabled\n");
+ if (enabled)
return;
- }
if (intel_alloc_mchbar_resource(dev))
return;
@@ -1190,11 +1184,11 @@ intel_setup_mchbar(struct drm_device *dev)
/* Space is allocated or reserved, so enable it. */
if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
- temp | DEVEN_MCHBAR_EN, 4);
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
+ temp | DEVEN_MCHBAR_EN);
} else {
- temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
- pci_write_config(dev_priv->bridge_dev, mchbar_reg, temp | 1, 4);
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
}
}
@@ -1207,17 +1201,13 @@ intel_teardown_mchbar(struct drm_device *dev)
if (dev_priv->mchbar_need_disable) {
if (IS_I915G(dev) || IS_I915GM(dev)) {
- temp = pci_read_config(dev_priv->bridge_dev,
- DEVEN_REG, 4);
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
temp &= ~DEVEN_MCHBAR_EN;
- pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
- temp, 4);
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
} else {
- temp = pci_read_config(dev_priv->bridge_dev,
- mchbar_reg, 4);
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
temp &= ~1;
- pci_write_config(dev_priv->bridge_dev, mchbar_reg,
- temp, 4);
+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
}
}
@@ -1232,6 +1222,57 @@ intel_teardown_mchbar(struct drm_device *dev)
}
}
+#ifdef __linux__
+/* true = enable decode, false = disable decoder */
+static unsigned int i915_vga_set_decode(void *cookie, bool state)
+{
+ struct drm_device *dev = cookie;
+
+ intel_modeset_vga_set_state(dev, state);
+ if (state)
+ return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+ else
+ return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+ if (state == VGA_SWITCHEROO_ON) {
+ pr_info("switched on\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ /* i915 resume handler doesn't set to D0 */
+ pci_set_power_state(dev->pdev, PCI_D0);
+ i915_resume(dev);
+ dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ } else {
+ pr_err("switched off\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ i915_suspend(dev, pmm);
+ dev->switch_power_state = DRM_SWITCH_POWER_OFF;
+ }
+}
+
+static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ bool can_switch;
+
+ spin_lock(&dev->count_lock);
+ can_switch = (dev->open_count == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
+}
+
+static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
+ .set_gpu_state = i915_switcheroo_set_state,
+ .reprobe = NULL,
+ .can_switch = i915_switcheroo_can_switch,
+};
+#endif
+
static int i915_load_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1241,8 +1282,23 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
-#if 0
+#ifdef __linux__
+ /* If we have > 1 VGA cards, then we need to arbitrate access
+ * to the common VGA resources.
+ *
+ * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
+ * then we do not take part in VGA arbitration and the
+ * vga_client_register() fails with -ENODEV.
+ */
+ ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
+ if (ret && ret != -ENODEV)
+ goto out;
+
intel_register_dsm_handler();
+
+ ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops);
+ if (ret)
+ goto cleanup_vga_client;
#endif
/* Initialise stolen first so that we may reserve preallocated
@@ -1260,15 +1316,20 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_gem_init(dev);
+ TASK_INIT(&dev_priv->console_resume_work, 0, intel_console_resume,
+ dev->dev_private);
+
ret = drm_irq_install(dev);
if (ret)
goto cleanup_gem;
+ /* Always safe in the mode setting case. */
+ /* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = 1;
ret = intel_fbdev_init(dev);
if (ret)
- goto cleanup_gem;
+ goto cleanup_irq;
drm_kms_helper_poll_init(dev);
@@ -1277,6 +1338,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
return 0;
+cleanup_irq:
+ drm_irq_uninstall(dev);
cleanup_gem:
DRM_LOCK(dev);
i915_gem_cleanup_ringbuffer(dev);
@@ -1285,6 +1348,13 @@ cleanup_gem:
cleanup_gem_stolen:
i915_gem_cleanup_stolen(dev);
cleanup_vga_switcheroo:
+#ifdef __linux__
+ vga_switcheroo_unregister_client(dev->pdev);
+cleanup_vga_client:
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+out:
+#endif
+ intel_free_parsed_bios_data(dev);
return ret;
}
@@ -1292,7 +1362,7 @@ int i915_master_create(struct drm_device *dev, struct drm_master *master)
{
struct drm_i915_master_private *master_priv;
- master_priv = malloc(sizeof(*master_priv), DRM_MEM_DMA, M_NOWAIT | M_ZERO);
+ master_priv = malloc(sizeof(*master_priv), DRM_MEM_DMA, M_WAITOK | M_ZERO);
if (!master_priv)
return -ENOMEM;
@@ -1312,6 +1382,67 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
master->driver_priv = NULL;
}
+static void
+i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
+ unsigned long size)
+{
+ dev_priv->mm.gtt_mtrr = -1;
+
+#if defined(CONFIG_X86_PAT)
+ if (cpu_has_pat)
+ return;
+#endif
+
+ /* Set up a WC MTRR for non-PAT systems. This is more common than
+ * one would think, because the kernel disables PAT on first
+ * generation Core chips because WC PAT gets overridden by a UC
+ * MTRR if present. Even if a UC MTRR isn't present.
+ */
+ dev_priv->mm.gtt_mtrr = drm_mtrr_add(base, size, DRM_MTRR_WC);
+ if (dev_priv->mm.gtt_mtrr < 0) {
+ DRM_INFO("MTRR allocation failed. Graphics "
+ "performance may suffer.\n");
+ }
+}
+
+#ifdef __linux__
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+ struct apertures_struct *ap;
+ struct pci_dev *pdev = dev_priv->dev->pdev;
+ bool primary;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return;
+
+ ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr;
+ ap->ranges[0].size =
+ dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+ primary =
+ pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+
+ remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+
+ kfree(ap);
+}
+#endif
+
+static void i915_dump_device_info(struct drm_i915_private *dev_priv)
+{
+ const struct intel_device_info *info = dev_priv->info;
+
+#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
+#define DEV_INFO_SEP ,
+ DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ info->gen,
+ dev_priv->dev->pci_device,
+ DEV_INFO_FLAGS);
+#undef DEV_INFO_FLAG
+#undef DEV_INFO_SEP
+}
+
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
@@ -1327,8 +1458,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv;
const struct intel_device_info *info;
- unsigned long base, size;
- int ret = 0, mmio_bar;
+ int ret = 0, mmio_bar, mmio_size;
+ uint32_t aperture_size;
info = i915_get_device_id(dev->pci_device);
@@ -1345,42 +1476,113 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv = malloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER,
M_WAITOK | M_ZERO);
+ if (dev_priv == NULL)
+ return -ENOMEM;
dev->dev_private = (void *)dev_priv;
dev_priv->dev = dev;
dev_priv->info = info;
+ i915_dump_device_info(dev_priv);
+
if (i915_get_bridge_dev(dev)) {
- free(dev_priv, DRM_MEM_DRIVER);
- return -EIO;
+ ret = -EIO;
+ goto free_priv;
}
- dev_priv->mm.gtt = intel_gtt_get();
- /* Add register map (needed for suspend/resume) */
+ ret = i915_gem_gtt_init(dev);
+ if (ret)
+ goto put_bridge;
+
+#ifdef __linux__
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_kick_out_firmware_fb(dev_priv);
+
+ pci_set_master(dev->pdev);
+
+ /* overlay on gen2 is broken and can't address above 1G */
+ if (IS_GEN2(dev))
+ dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+
+ /* 965GM sometimes incorrectly writes to hardware status page (HWS)
+ * using 32bit addressing, overwriting memory if HWS is located
+ * above 4GB.
+ *
+ * The documentation also mentions an issue with undefined
+ * behaviour if any general state is accessed within a page above 4GB,
+ * which also needs to be handled carefully.
+ */
+ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+ dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+#endif
+
mmio_bar = IS_GEN2(dev) ? 1 : 0;
- base = drm_get_resource_start(dev, mmio_bar);
- size = drm_get_resource_len(dev, mmio_bar);
+ /* Before gen4, the registers and the GTT are behind different BARs.
+ * However, from gen4 onwards, the registers and the GTT are shared
+ * in the same BAR, so we want to restrict this ioremap from
+ * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+ * the register BAR remains the same size for all the earlier
+ * generations up to Ironlake.
+ */
+ if (info->gen < 5)
+ mmio_size = 512*1024;
+ else
+ mmio_size = 2*1024*1024;
ret = drm_addmap(dev,
- base, size,
+ drm_get_resource_start(dev, mmio_bar), mmio_size,
_DRM_REGISTERS, _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
if (ret != 0) {
- DRM_ERROR("Failed to allocate mmio_map: %d\n", ret);
- free(dev_priv, DRM_MEM_DRIVER);
- return ret;
+ DRM_ERROR("failed to map registers\n");
+ ret = -EIO;
+ goto put_gmch;
}
- dev_priv->tq = taskqueue_create("915", M_WAITOK,
- taskqueue_thread_enqueue, &dev_priv->tq);
- taskqueue_start_threads(&dev_priv->tq, 1, PWAIT, "i915 taskq");
- mtx_init(&dev_priv->gt_lock, "915gt", NULL, MTX_DEF);
- mtx_init(&dev_priv->error_lock, "915err", NULL, MTX_DEF);
- mtx_init(&dev_priv->error_completion_lock, "915cmp", NULL, MTX_DEF);
- mtx_init(&dev_priv->rps_lock, "915rps", NULL, MTX_DEF);
- mtx_init(&dev_priv->dpio_lock, "915dpi", NULL, MTX_DEF);
+ aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+ dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
+
+#ifdef __linux__
+ dev_priv->mm.gtt_mapping =
+ io_mapping_create_wc(dev_priv->mm.gtt_base_addr,
+ aperture_size);
+ if (dev_priv->mm.gtt_mapping == NULL) {
+ ret = -EIO;
+ goto out_rmmap;
+ }
+#endif
+
+ i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr,
+ aperture_size);
+
+ /* The i915 workqueue is primarily used for batched retirement of
+ * requests (and thus managing bo) once the task has been completed
+ * by the GPU. i915_gem_retire_requests() is called directly when we
+ * need high-priority retirement, such as waiting for an explicit
+ * bo.
+ *
+ * It is also used for periodic low-priority events, such as
+ * idle-timers and recording error state.
+ *
+ * All tasks on the workqueue are expected to acquire the dev mutex
+ * so there is no point in running more than one instance of the
+ * workqueue at any time. Use an ordered one.
+ */
+ dev_priv->wq = taskqueue_create("915", M_WAITOK,
+ taskqueue_thread_enqueue, &dev_priv->wq);
+ if (dev_priv->wq == NULL) {
+ DRM_ERROR("Failed to create our workqueue.\n");
+ ret = -ENOMEM;
+ goto out_mtrrfree;
+ }
+ taskqueue_start_threads(&dev_priv->wq, 1, PWAIT, "i915 taskq");
+
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_detect_pch(dev);
intel_irq_init(dev);
+ intel_gt_init(dev);
+ /* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
intel_setup_gmbus(dev);
intel_opregion_setup(dev);
@@ -1403,17 +1605,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!IS_I945G(dev) && !IS_I945GM(dev))
drm_pci_enable_msi(dev);
- /* Init HWS */
- if (!I915_NEED_GFX_HWS(dev)) {
- ret = i915_init_phys_hws(dev);
- if (ret != 0) {
- drm_rmmap(dev, dev_priv->mmio_map);
- free(dev_priv, DRM_MEM_DRIVER);
- return ret;
- }
- }
-
mtx_init(&dev_priv->irq_lock, "userirq", NULL, MTX_DEF);
+ mtx_init(&dev_priv->error_lock, "915err", NULL, MTX_DEF);
+ mtx_init(&dev_priv->rps.lock, "915rps", NULL, MTX_DEF);
+ sx_init(&dev_priv->dpio_lock, "915dpi");
+
+ sx_init(&dev_priv->rps.hw_lock, "915rpshw");
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3;
@@ -1429,8 +1626,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Start out suspended */
dev_priv->mm.suspended = 1;
- intel_detect_pch(dev);
-
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev);
if (ret < 0) {
@@ -1441,7 +1636,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
pci_enable_busmaster(dev->dev);
+#ifdef __linux__
+ i915_setup_sysfs(dev);
+#endif
+
+ /* Must be done after probing outputs */
intel_opregion_init(dev);
+#ifdef __linux__
+ acpi_video_register();
+#endif
callout_init(&dev_priv->hangcheck_timer, 1);
callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD,
@@ -1453,9 +1656,48 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
out_gem_unload:
- /* XXXKIB */
- (void) i915_driver_unload(dev);
- return (ret);
+ EVENTHANDLER_DEREGISTER(vm_lowmem, dev_priv->mm.inactive_shrinker);
+
+ free_completion(&dev_priv->error_completion);
+ mtx_destroy(&dev_priv->irq_lock);
+ mtx_destroy(&dev_priv->error_lock);
+ mtx_destroy(&dev_priv->rps.lock);
+ sx_destroy(&dev_priv->dpio_lock);
+
+ sx_destroy(&dev_priv->rps.hw_lock);
+
+ if (dev->msi_enabled)
+ drm_pci_disable_msi(dev);
+
+ intel_teardown_gmbus(dev);
+ intel_teardown_mchbar(dev);
+ if (dev_priv->wq != NULL) {
+ taskqueue_free(dev_priv->wq);
+ dev_priv->wq = NULL;
+ }
+out_mtrrfree:
+ if (dev_priv->mm.gtt_mtrr >= 0) {
+ drm_mtrr_del(dev_priv->mm.gtt_mtrr,
+ dev_priv->mm.gtt_base_addr,
+ aperture_size,
+ DRM_MTRR_WC);
+ dev_priv->mm.gtt_mtrr = -1;
+ }
+#ifdef __linux__
+ io_mapping_free(dev_priv->mm.gtt_mapping);
+out_rmmap:
+#endif
+ if (dev_priv->mmio_map != NULL)
+ drm_rmmap(dev, dev_priv->mmio_map);
+put_gmch:
+ i915_gem_gtt_fini(dev);
+put_bridge:
+#ifdef __linux__
+ pci_dev_put(dev_priv->bridge_dev);
+#endif
+free_priv:
+ free(dev_priv, DRM_MEM_DRIVER);
+ return ret;
}
int i915_driver_unload(struct drm_device *dev)
@@ -1463,6 +1705,17 @@ int i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ intel_gpu_ips_teardown();
+
+#ifdef __linux__
+ i915_teardown_sysfs(dev);
+
+ if (dev_priv->mm.inactive_shrinker.shrink)
+ unregister_shrinker(&dev_priv->mm.inactive_shrinker);
+#endif
+
+ intel_free_parsed_bios_data(dev);
+
DRM_LOCK(dev);
ret = i915_gpu_idle(dev);
if (ret)
@@ -1470,19 +1723,56 @@ int i915_driver_unload(struct drm_device *dev)
i915_gem_retire_requests(dev);
DRM_UNLOCK(dev);
- i915_free_hws(dev);
+ /* Cancel the retire work handler, which should be idle now. */
+ while (taskqueue_cancel_timeout(dev_priv->wq,
+ &dev_priv->mm.retire_work, NULL) != 0)
+ taskqueue_drain_timeout(dev_priv->wq,
+ &dev_priv->mm.retire_work);
- intel_teardown_mchbar(dev);
+#ifdef __linux__
+ io_mapping_free(dev_priv->mm.gtt_mapping);
+#endif
+ if (dev_priv->mm.gtt_mtrr >= 0) {
+ drm_mtrr_del(dev_priv->mm.gtt_mtrr,
+ dev_priv->mm.gtt_base_addr,
+ dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE,
+ DRM_MTRR_WC);
+ dev_priv->mm.gtt_mtrr = -1;
+ }
+
+#ifdef __linux__
+ acpi_video_unregister();
+#endif
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fbdev_fini(dev);
intel_modeset_cleanup(dev);
+ while (taskqueue_cancel(dev_priv->wq,
+ &dev_priv->console_resume_work, NULL) != 0)
+ taskqueue_drain(dev_priv->wq,
+ &dev_priv->console_resume_work);
+
+ /*
+ * free the memory space allocated for the child device
+ * config parsed from VBT
+ */
+ if (dev_priv->child_dev && dev_priv->child_dev_num) {
+ free(dev_priv->child_dev, DRM_MEM_DRIVER);
+ dev_priv->child_dev = NULL;
+ dev_priv->child_dev_num = 0;
+ }
+
+#ifdef __linux__
+ vga_switcheroo_unregister_client(dev->pdev);
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+#endif
}
/* Free error state after interrupts are fully disabled. */
callout_stop(&dev_priv->hangcheck_timer);
callout_drain(&dev_priv->hangcheck_timer);
-
+ while (taskqueue_cancel(dev_priv->wq, &dev_priv->error_work, NULL) != 0)
+ taskqueue_drain(dev_priv->wq, &dev_priv->error_work);
i915_destroy_error_state(dev);
if (dev->msi_enabled)
@@ -1491,18 +1781,16 @@ int i915_driver_unload(struct drm_device *dev)
intel_opregion_fini(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Flush any outstanding unpin_work. */
+ taskqueue_drain_all(dev_priv->wq);
+
DRM_LOCK(dev);
i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
DRM_UNLOCK(dev);
i915_gem_cleanup_aliasing_ppgtt(dev);
-#if 1
- KIB_NOTYET();
-#else
- if (I915_HAS_FBC(dev) && i915_powersave)
- i915_cleanup_compression(dev);
-#endif
+ i915_gem_cleanup_stolen(dev);
drm_mm_takedown(&dev_priv->mm.stolen);
intel_cleanup_overlay(dev);
@@ -1511,21 +1799,31 @@ int i915_driver_unload(struct drm_device *dev)
i915_free_hws(dev);
}
- i915_gem_unload(dev);
-
- mtx_destroy(&dev_priv->irq_lock);
+ intel_teardown_gmbus(dev);
+ intel_teardown_mchbar(dev);
- if (dev_priv->tq != NULL)
- taskqueue_free(dev_priv->tq);
+ /*
+ * NOTE Linux<->FreeBSD: Free mmio_map after
+ * intel_teardown_gmbus(), because, on FreeBSD,
+ * intel_i2c_reset() is called during iicbus_detach().
+ */
+ if (dev_priv->mmio_map != NULL)
+ drm_rmmap(dev, dev_priv->mmio_map);
- bus_generic_detach(dev->dev);
- drm_rmmap(dev, dev_priv->mmio_map);
- intel_teardown_gmbus(dev);
+ if (dev_priv->wq != NULL)
+ taskqueue_free(dev_priv->wq);
- mtx_destroy(&dev_priv->dpio_lock);
+ free_completion(&dev_priv->error_completion);
+ mtx_destroy(&dev_priv->irq_lock);
mtx_destroy(&dev_priv->error_lock);
- mtx_destroy(&dev_priv->error_completion_lock);
- mtx_destroy(&dev_priv->rps_lock);
+ mtx_destroy(&dev_priv->rps.lock);
+ sx_destroy(&dev_priv->dpio_lock);
+
+ sx_destroy(&dev_priv->rps.hw_lock);
+
+#ifdef __linux__
+ pci_dev_put(dev_priv->bridge_dev);
+#endif
free(dev->dev_private, DRM_MEM_DRIVER);
return 0;
@@ -1535,11 +1833,14 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv;
+ DRM_DEBUG_DRIVER("\n");
file_priv = malloc(sizeof(*file_priv), DRM_MEM_FILES, M_WAITOK | M_ZERO);
+ if (!file_priv)
+ return -ENOMEM;
file->driver_priv = file_priv;
- mtx_init(&file_priv->mm.lck, "915fp", NULL, MTX_DEF);
+ mtx_init(&file_priv->mm.lock, "915fp", NULL, MTX_DEF);
INIT_LIST_HEAD(&file_priv->mm.request_list);
drm_gem_names_init(&file_priv->context_idr);
@@ -1570,10 +1871,8 @@ void i915_driver_lastclose(struct drm_device * dev)
return;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-#if 1
- KIB_NOTYET();
-#else
- drm_fb_helper_restore();
+ intel_fb_restore_mode(dev);
+#ifdef __linux__
vga_switcheroo_process_delayed_switch();
#endif
return;
@@ -1594,104 +1893,62 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- mtx_destroy(&file_priv->mm.lck);
+ mtx_destroy(&file_priv->mm.lock);
free(file_priv, DRM_MEM_FILES);
}
struct drm_ioctl_desc i915_ioctls[] = {
- DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_ALLOC, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_FREE, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
- DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
- DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
- DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH | DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH | DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
};
-#ifdef COMPAT_FREEBSD32
-extern struct drm_ioctl_desc i915_compat_ioctls[];
-extern int i915_compat_ioctls_nr;
-#endif
-
-struct drm_driver i915_driver_info = {
- /*
- * FIXME Linux<->FreeBSD: DRIVER_USE_MTRR is commented out on
- * Linux.
- */
- .driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
-
- .buf_priv_size = sizeof(drm_i915_private_t),
- .load = i915_driver_load,
- .open = i915_driver_open,
- .unload = i915_driver_unload,
- .preclose = i915_driver_preclose,
- .lastclose = i915_driver_lastclose,
- .postclose = i915_driver_postclose,
- .device_is_agp = i915_driver_device_is_agp,
- .master_create = i915_master_create,
- .master_destroy = i915_master_destroy,
- .gem_init_object = i915_gem_init_object,
- .gem_free_object = i915_gem_free_object,
- .gem_pager_ops = &i915_gem_pager_ops,
- .dumb_create = i915_gem_dumb_create,
- .dumb_map_offset = i915_gem_mmap_gtt,
- .dumb_destroy = i915_gem_dumb_destroy,
- .sysctl_init = i915_sysctl_init,
- .sysctl_cleanup = i915_sysctl_cleanup,
-
- .ioctls = i915_ioctls,
-#ifdef COMPAT_FREEBSD32
- .compat_ioctls = i915_compat_ioctls,
- .num_compat_ioctls = &i915_compat_ioctls_nr,
-#endif
- .num_ioctls = ARRAY_SIZE(i915_ioctls),
-
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-};
+int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
/*
* This is really ugly: Because old userspace abused the linux agp interface to
diff --git a/sys/dev/drm2/i915/i915_drm.h b/sys/dev/drm2/i915/i915_drm.h
index deae206..c6f0be3 100644
--- a/sys/dev/drm2/i915/i915_drm.h
+++ b/sys/dev/drm2/i915/i915_drm.h
@@ -1,4 +1,4 @@
-/*-
+/*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
@@ -24,17 +24,18 @@
*
*/
+#ifndef _UAPI_I915_DRM_H_
+#define _UAPI_I915_DRM_H_
+
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
-#ifndef _I915_DRM_H_
-#define _I915_DRM_H_
+#include <dev/drm2/drm.h>
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints.
*/
-#include <dev/drm2/drm.h>
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
@@ -46,12 +47,7 @@ typedef struct _drm_i915_init {
enum {
I915_INIT_DMA = 0x01,
I915_CLEANUP_DMA = 0x02,
- I915_RESUME_DMA = 0x03,
-
- /* Since this struct isn't versioned, just used a new
- * 'func' code to indicate the presence of dri2 sarea
- * info. */
- I915_INIT_DMA2 = 0x04
+ I915_RESUME_DMA = 0x03
} func;
unsigned int mmio_offset;
int sarea_priv_offset;
@@ -69,7 +65,6 @@ typedef struct _drm_i915_init {
unsigned int depth_pitch;
unsigned int cpp;
unsigned int chipset;
- unsigned int sarea_handle;
} drm_i915_init_t;
typedef struct _drm_i915_sarea {
@@ -123,20 +118,18 @@ typedef struct _drm_i915_sarea {
int pipeB_w;
int pipeB_h;
- /* Triple buffering */
- drm_handle_t third_handle;
- int third_offset;
- int third_size;
- unsigned int third_tiled;
+ /* fill out some space for old userspace triple buffer */
+ drm_handle_t unused_handle;
+ __u32 unused1, unused2, unused3;
- /* buffer object handles for the static buffers. May change
- * over the lifetime of the client, though it doesn't in our current
- * implementation.
+ /* buffer object handles for static buffers. May change
+ * over the lifetime of the client.
*/
__u32 front_bo_handle;
__u32 back_bo_handle;
- __u32 third_bo_handle;
+ __u32 unused_bo_handle;
__u32 depth_bo_handle;
+
} drm_i915_sarea_t;
/* due to userspace building against these headers we need some compat here */
@@ -149,16 +142,6 @@ typedef struct _drm_i915_sarea {
#define planeB_w pipeB_w
#define planeB_h pipeB_h
-/* Driver specific fence types and classes.
- */
-
-/* The only fence class we support */
-#define DRM_I915_FENCE_CLASS_ACCEL 0
-/* Fence type that guarantees read-write flush */
-#define DRM_I915_FENCE_TYPE_RW 2
-/* MI_FLUSH programmed just before the fence */
-#define DRM_I915_FENCE_FLAG_FLUSHED 0x01000000
-
/* Flags for perf_boxes
*/
#define I915_BOX_RING_EMPTY 0x1
@@ -186,9 +169,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_SET_VBLANK_PIPE 0x0d
#define DRM_I915_GET_VBLANK_PIPE 0x0e
#define DRM_I915_VBLANK_SWAP 0x0f
-#define DRM_I915_MMIO 0x10
#define DRM_I915_HWS_ADDR 0x11
-#define DRM_I915_EXECBUFFER 0x12
#define DRM_I915_GEM_INIT 0x13
#define DRM_I915_GEM_EXECBUFFER 0x14
#define DRM_I915_GEM_PIN 0x15
@@ -212,14 +193,18 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
#define DRM_I915_OVERLAY_ATTRS 0x28
#define DRM_I915_GEM_EXECBUFFER2 0x29
-#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
-#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
+#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
+#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
+#define DRM_I915_GEM_WAIT 0x2c
#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
+#define DRM_I915_GEM_SET_CACHING 0x2f
+#define DRM_I915_GEM_GET_CACHING 0x30
+#define DRM_I915_REG_READ 0x31
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
-#define DRM_IOCTL_I915_FLIP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FLIP, drm_i915_flip_t)
+#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
@@ -233,13 +218,15 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
-#define DRM_IOCTL_I915_MMIO DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_MMIO, drm_i915_mmio)
+#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
+#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
@@ -255,24 +242,14 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
-#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_IOCTL_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
+#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
-
-/* Asynchronous page flipping:
- */
-typedef struct drm_i915_flip {
- /*
- * This is really talking about planes, and we could rename it
- * except for the fact that some of the duplicated i915_drm.h files
- * out there check for HAVE_I915_FLIP and so might pick up this
- * version.
- */
- int pipes;
-} drm_i915_flip_t;
+#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -310,15 +287,15 @@ typedef struct drm_i915_irq_wait {
/* Ioctl to query kernel params:
*/
-#define I915_PARAM_IRQ_ACTIVE 1
-#define I915_PARAM_ALLOW_BATCHBUFFER 2
-#define I915_PARAM_LAST_DISPATCH 3
-#define I915_PARAM_CHIPSET_ID 4
-#define I915_PARAM_HAS_GEM 5
-#define I915_PARAM_NUM_FENCES_AVAIL 6
-#define I915_PARAM_HAS_OVERLAY 7
+#define I915_PARAM_IRQ_ACTIVE 1
+#define I915_PARAM_ALLOW_BATCHBUFFER 2
+#define I915_PARAM_LAST_DISPATCH 3
+#define I915_PARAM_CHIPSET_ID 4
+#define I915_PARAM_HAS_GEM 5
+#define I915_PARAM_NUM_FENCES_AVAIL 6
+#define I915_PARAM_HAS_OVERLAY 7
#define I915_PARAM_HAS_PAGEFLIPPING 8
-#define I915_PARAM_HAS_EXECBUF2 9
+#define I915_PARAM_HAS_EXECBUF2 9
#define I915_PARAM_HAS_BSD 10
#define I915_PARAM_HAS_BLT 11
#define I915_PARAM_HAS_RELAXED_FENCING 12
@@ -326,8 +303,14 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_EXEC_CONSTANTS 14
#define I915_PARAM_HAS_RELAXED_DELTA 15
#define I915_PARAM_HAS_GEN7_SOL_RESET 16
-#define I915_PARAM_HAS_LLC 17
+#define I915_PARAM_HAS_LLC 17
#define I915_PARAM_HAS_ALIASING_PPGTT 18
+#define I915_PARAM_HAS_WAIT_TIMEOUT 19
+#define I915_PARAM_HAS_SEMAPHORES 20
+#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
+#define I915_PARAM_RSVD_FOR_FUTURE_USE 22
+#define I915_PARAM_HAS_SECURE_BATCHES 23
+#define I915_PARAM_HAS_PINNED_BATCHES 24
typedef struct drm_i915_getparam {
int param;
@@ -392,70 +375,10 @@ typedef struct drm_i915_vblank_swap {
unsigned int sequence;
} drm_i915_vblank_swap_t;
-#define I915_MMIO_READ 0
-#define I915_MMIO_WRITE 1
-
-#define I915_MMIO_MAY_READ 0x1
-#define I915_MMIO_MAY_WRITE 0x2
-
-#define MMIO_REGS_IA_PRIMATIVES_COUNT 0
-#define MMIO_REGS_IA_VERTICES_COUNT 1
-#define MMIO_REGS_VS_INVOCATION_COUNT 2
-#define MMIO_REGS_GS_PRIMITIVES_COUNT 3
-#define MMIO_REGS_GS_INVOCATION_COUNT 4
-#define MMIO_REGS_CL_PRIMITIVES_COUNT 5
-#define MMIO_REGS_CL_INVOCATION_COUNT 6
-#define MMIO_REGS_PS_INVOCATION_COUNT 7
-#define MMIO_REGS_PS_DEPTH_COUNT 8
-
-typedef struct drm_i915_mmio_entry {
- unsigned int flag;
- unsigned int offset;
- unsigned int size;
-} drm_i915_mmio_entry_t;
-
-typedef struct drm_i915_mmio {
- unsigned int read_write:1;
- unsigned int reg:31;
- void __user *data;
-} drm_i915_mmio_t;
-
typedef struct drm_i915_hws_addr {
__u64 addr;
} drm_i915_hws_addr_t;
-/*
- * Relocation header is 4 uint32_ts
- * 0 - 32 bit reloc count
- * 1 - 32-bit relocation type
- * 2-3 - 64-bit user buffer handle ptr for another list of relocs.
- */
-#define I915_RELOC_HEADER 4
-
-/*
- * type 0 relocation has 4-uint32_t stride
- * 0 - offset into buffer
- * 1 - delta to add in
- * 2 - buffer handle
- * 3 - reserved (for optimisations later).
- */
-/*
- * type 1 relocation has 4-uint32_t stride.
- * Hangs off the first item in the op list.
- * Performed after all valiations are done.
- * Try to group relocs into the same relocatee together for
- * performance reasons.
- * 0 - offset into buffer
- * 1 - delta to add in
- * 2 - buffer index in op list.
- * 3 - relocatee index in op list.
- */
-#define I915_RELOC_TYPE_0 0
-#define I915_RELOC0_STRIDE 4
-#define I915_RELOC_TYPE_1 1
-#define I915_RELOC1_STRIDE 4
-
-
struct drm_i915_gem_init {
/**
* Beginning offset in the GTT to be managed by the DRM memory
@@ -493,8 +416,12 @@ struct drm_i915_gem_pread {
__u64 offset;
/** Length of data to read */
__u64 size;
- /** Pointer to write the data into. */
- __u64 data_ptr; /* void *, but pointers are not 32/64 compatible */
+ /**
+ * Pointer to write the data into.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 data_ptr;
};
struct drm_i915_gem_pwrite {
@@ -505,8 +432,12 @@ struct drm_i915_gem_pwrite {
__u64 offset;
/** Length of data to write */
__u64 size;
- /** Pointer to read the data from. */
- __u64 data_ptr; /* void *, but pointers are not 32/64 compatible */
+ /**
+ * Pointer to read the data from.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 data_ptr;
};
struct drm_i915_gem_mmap {
@@ -521,8 +452,12 @@ struct drm_i915_gem_mmap {
* The value will be page-aligned.
*/
__u64 size;
- /** Returned pointer the data was mapped at */
- __u64 addr_ptr; /* void *, but pointers are not 32/64 compatible */
+ /**
+ * Returned pointer the data was mapped at.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 addr_ptr;
};
struct drm_i915_gem_mmap_gtt {
@@ -667,7 +602,8 @@ struct drm_i915_gem_execbuffer {
__u32 DR1;
__u32 DR4;
__u32 num_cliprects;
- __u64 cliprects_ptr; /* struct drm_clip_rect *cliprects */
+ /** This is a struct drm_clip_rect *cliprects */
+ __u64 cliprects_ptr;
};
struct drm_i915_gem_exec_object2 {
@@ -696,7 +632,7 @@ struct drm_i915_gem_exec_object2 {
#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
__u64 flags;
- __u64 rsvd1; /* now used for context info */
+ __u64 rsvd1;
__u64 rsvd2;
};
@@ -733,13 +669,27 @@ struct drm_i915_gem_execbuffer2 {
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
__u64 flags;
- __u64 rsvd1;
+ __u64 rsvd1; /* now used for context info */
__u64 rsvd2;
};
/** Resets the SO write offset registers for transform feedback on gen7. */
#define I915_EXEC_GEN7_SOL_RESET (1<<8)
+/** Request a privileged ("secure") batch buffer. Note only available for
+ * DRM_ROOT_ONLY | DRM_MASTER processes.
+ */
+#define I915_EXEC_SECURE (1<<9)
+
+/** Inform the kernel that the batch is and will always be pinned. This
+ * negates the requirement for a workaround to be performed to avoid
+ * an incoherent CS (such as can be found on 830/845). If this flag is
+ * not passed, the kernel will endeavour to make sure the batch is
+ * coherent with the CS before execution. If this flag is passed,
+ * userspace assumes the responsibility for ensuring the same.
+ */
+#define I915_EXEC_IS_PINNED (1<<10)
+
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
@@ -768,10 +718,31 @@ struct drm_i915_gem_busy {
/** Handle of the buffer to check for busy */
__u32 handle;
- /** Return busy status (1 if busy, 0 if idle) */
+ /** Return busy status (1 if busy, 0 if idle).
+ * The high word is used to indicate on which rings the object
+ * currently resides:
+ * 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
+ */
__u32 busy;
};
+#define I915_CACHING_NONE 0
+#define I915_CACHING_CACHED 1
+
+struct drm_i915_gem_caching {
+ /**
+ * Handle of the buffer to set/get the caching level of. */
+ __u32 handle;
+
+ /**
+ * Cacheing level to apply or return value
+ *
+ * bits0-15 are for generic caching control (i.e. the above defined
+ * values). bits16-31 are reserved for platform-specific variations
+ * (e.g. l3$ caching on gen7). */
+ __u32 caching;
+};
+
#define I915_TILING_NONE 0
#define I915_TILING_X 1
#define I915_TILING_Y 2
@@ -856,7 +827,7 @@ struct drm_i915_get_pipe_from_crtc_id {
#define I915_MADV_WILLNEED 0
#define I915_MADV_DONTNEED 1
-#define I915_MADV_PURGED_INTERNAL 2 /* internal state */
+#define __I915_MADV_PURGED 2 /* internal state */
struct drm_i915_gem_madvise {
/** Handle of the buffer to change the backing store advice */
@@ -871,6 +842,7 @@ struct drm_i915_gem_madvise {
__u32 retained;
};
+/* flags */
#define I915_OVERLAY_TYPE_MASK 0xff
#define I915_OVERLAY_YUV_PLANAR 0x01
#define I915_OVERLAY_YUV_PACKED 0x02
@@ -968,6 +940,14 @@ struct drm_intel_sprite_colorkey {
__u32 flags;
};
+struct drm_i915_gem_wait {
+ /** Handle of BO we shall wait on */
+ __u32 bo_handle;
+ __u32 flags;
+ /** Number of nanoseconds to wait, Returns time remaining. */
+ __s64 timeout_ns;
+};
+
struct drm_i915_gem_context_create {
/* output: id of new context*/
__u32 ctx_id;
@@ -979,4 +959,15 @@ struct drm_i915_gem_context_destroy {
__u32 pad;
};
-#endif /* _I915_DRM_H_ */
+struct drm_i915_reg_read {
+ __u64 offset;
+ __u64 val; /* Return value */
+};
+
+/* For use by IPS driver */
+extern unsigned long i915_read_mch_val(void);
+extern bool i915_gpu_raise(void);
+extern bool i915_gpu_lower(void);
+extern bool i915_gpu_busy(void);
+extern bool i915_gpu_turbo_disable(void);
+#endif /* _UAPI_I915_DRM_H_ */
diff --git a/sys/dev/drm2/i915/i915_drv.c b/sys/dev/drm2/i915/i915_drv.c
index e4bf06e..0e54e9d 100644
--- a/sys/dev/drm2/i915/i915_drv.c
+++ b/sys/dev/drm2/i915/i915_drv.c
@@ -1,31 +1,29 @@
-/* i915_drv.c -- Intel i915 driver -*- linux-c -*-
- * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
+/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
*/
-/*-
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+/*
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
*
- * Authors:
- * Gareth Hughes <gareth@valinux.com>
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
@@ -33,57 +31,126 @@
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
-#include <dev/drm2/drm_mm.h>
-#include <dev/drm2/i915/i915_drm.h>
-#include <dev/drm2/i915/i915_drv.h>
#include <dev/drm2/drm_pciids.h>
-#include <dev/drm2/i915/intel_drv.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include "dev/drm2/i915/i915_drv.h"
+#ifdef __linux__
+#include "dev/drm2/i915/i915_trace.h"
+#endif
+#include "dev/drm2/i915/intel_drv.h"
+
+#include <dev/drm2/drm_crtc_helper.h>
#include "fb_if.h"
-int intel_iommu_enabled = 0;
-TUNABLE_INT("drm.i915.intel_iommu_enabled", &intel_iommu_enabled);
-int intel_iommu_gfx_mapped = 0;
-TUNABLE_INT("drm.i915.intel_iommu_gfx_mapped", &intel_iommu_gfx_mapped);
+static int i915_modeset __read_mostly = 1;
+TUNABLE_INT("drm.i915.modeset", &i915_modeset);
+module_param_named(modeset, i915_modeset, int, 0400);
+MODULE_PARM_DESC(modeset,
+ "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
+ "1=on, -1=force vga console preference [default])");
+
+#ifdef __linux__
+unsigned int i915_fbpercrtc __always_unused = 0;
+module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
+#endif
-int i915_prefault_disable;
-TUNABLE_INT("drm.i915.prefault_disable", &i915_prefault_disable);
-int i915_semaphores = -1;
-TUNABLE_INT("drm.i915.semaphores", &i915_semaphores);
-static int i915_try_reset = 1;
-TUNABLE_INT("drm.i915.try_reset", &i915_try_reset);
-unsigned int i915_lvds_downclock = 0;
-TUNABLE_INT("drm.i915.lvds_downclock", &i915_lvds_downclock);
-int i915_vbt_sdvo_panel_type = -1;
-TUNABLE_INT("drm.i915.vbt_sdvo_panel_type", &i915_vbt_sdvo_panel_type);
-unsigned int i915_powersave = 1;
+int i915_panel_ignore_lid __read_mostly = 1;
+TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid);
+module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
+MODULE_PARM_DESC(panel_ignore_lid,
+ "Override lid status (0=autodetect, 1=autodetect disabled [default], "
+ "-1=force lid closed, -2=force lid open)");
+
+unsigned int i915_powersave __read_mostly = 1;
TUNABLE_INT("drm.i915.powersave", &i915_powersave);
-int i915_enable_fbc = 0;
-TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc);
-int i915_enable_rc6 = 0;
+module_param_named(powersave, i915_powersave, int, 0600);
+MODULE_PARM_DESC(powersave,
+ "Enable powersavings, fbc, downclocking, etc. (default: true)");
+
+int i915_semaphores __read_mostly = -1;
+TUNABLE_INT("drm.i915.semaphores", &i915_semaphores);
+module_param_named(semaphores, i915_semaphores, int, 0600);
+MODULE_PARM_DESC(semaphores,
+ "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
+
+int i915_enable_rc6 __read_mostly = -1;
TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6);
-int i915_lvds_channel_mode;
+module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
+MODULE_PARM_DESC(i915_enable_rc6,
+ "Enable power-saving render C-state 6. "
+ "Different stages can be selected via bitmask values "
+ "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
+ "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
+ "default: -1 (use per-chip default)");
+
+int i915_enable_fbc __read_mostly = -1;
+TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc);
+module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
+MODULE_PARM_DESC(i915_enable_fbc,
+ "Enable frame buffer compression for power savings "
+ "(default: -1 (use per-chip default))");
+
+unsigned int i915_lvds_downclock __read_mostly = 0;
+TUNABLE_INT("drm.i915.lvds_downclock", &i915_lvds_downclock);
+module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
+MODULE_PARM_DESC(lvds_downclock,
+ "Use panel (LVDS/eDP) downclocking for power savings "
+ "(default: false)");
+
+int i915_lvds_channel_mode __read_mostly;
TUNABLE_INT("drm.i915.lvds_channel_mode", &i915_lvds_channel_mode);
-int i915_panel_use_ssc = -1;
+module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
+MODULE_PARM_DESC(lvds_channel_mode,
+ "Specify LVDS channel mode "
+ "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
+
+int i915_panel_use_ssc __read_mostly = -1;
TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc);
-int i915_panel_ignore_lid = 0;
-TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid);
-int i915_panel_invert_brightness;
-TUNABLE_INT("drm.i915.panel_invert_brightness", &i915_panel_invert_brightness);
-int i915_modeset = 1;
-TUNABLE_INT("drm.i915.modeset", &i915_modeset);
-int i915_enable_ppgtt = -1;
-TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt);
-int i915_enable_hangcheck = 1;
+module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
+MODULE_PARM_DESC(lvds_use_ssc,
+ "Use Spread Spectrum Clock with panels [LVDS/eDP] "
+ "(default: auto from VBT)");
+
+int i915_vbt_sdvo_panel_type __read_mostly = -1;
+TUNABLE_INT("drm.i915.vbt_sdvo_panel_type", &i915_vbt_sdvo_panel_type);
+module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
+MODULE_PARM_DESC(vbt_sdvo_panel_type,
+ "Override/Ignore selection of SDVO panel mode in the VBT "
+ "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
+
+static int i915_try_reset __read_mostly = true;
+TUNABLE_INT("drm.i915.try_reset", &i915_try_reset);
+module_param_named(reset, i915_try_reset, bool, 0600);
+MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
+
+int i915_enable_hangcheck __read_mostly = true;
TUNABLE_INT("drm.i915.enable_hangcheck", &i915_enable_hangcheck);
-static int i915_enable_unsupported = 0;
-TUNABLE_INT("drm.i915.enable_unsupported", &i915_enable_unsupported);
+module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
+MODULE_PARM_DESC(enable_hangcheck,
+ "Periodically check GPU activity for detecting hangs. "
+ "WARNING: Disabling this can cause system wide hangs. "
+ "(default: true)");
-/* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
-static drm_pci_id_list_t i915_pciidlist[] = {
- i915_PCI_IDS
-};
+int i915_enable_ppgtt __read_mostly = -1;
+TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt);
+module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
+MODULE_PARM_DESC(i915_enable_ppgtt,
+ "Enable PPGTT (default: true)");
+
+unsigned int i915_preliminary_hw_support __read_mostly = 0;
+TUNABLE_INT("drm.i915.enable_unsupported", &i915_preliminary_hw_support);
+module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
+MODULE_PARM_DESC(preliminary_hw_support,
+ "Enable preliminary hardware support. "
+ "Enable Haswell and ValleyView Support. "
+ "(default: false)");
+
+int intel_iommu_gfx_mapped = 0;
+TUNABLE_INT("drm.i915.intel_iommu_gfx_mapped", &intel_iommu_gfx_mapped);
+
+static struct drm_driver driver;
+int intel_agp_enabled = 1; /* On FreeBSD, agp is a required dependency. */
#define INTEL_VGA_DEVICE(id, info_) { \
.device = id, \
@@ -175,15 +242,13 @@ static const struct intel_device_info intel_ironlake_d_info = {
.gen = 5,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
- .has_pch_split = 1,
};
static const struct intel_device_info intel_ironlake_m_info = {
.gen = 5, .is_mobile = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
- .has_fbc = 0, /* disabled due to buggy hardware */
+ .has_fbc = 1,
.has_bsd_ring = 1,
- .has_pch_split = 1,
};
static const struct intel_device_info intel_sandybridge_d_info = {
@@ -192,7 +257,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
- .has_pch_split = 1,
+ .has_force_wake = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
@@ -202,7 +267,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
- .has_pch_split = 1,
+ .has_force_wake = 1,
};
static const struct intel_device_info intel_ivybridge_d_info = {
@@ -211,7 +276,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
- .has_pch_split = 1,
+ .has_force_wake = 1,
};
static const struct intel_device_info intel_ivybridge_m_info = {
@@ -221,7 +286,7 @@ static const struct intel_device_info intel_ivybridge_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
- .has_pch_split = 1,
+ .has_force_wake = 1,
};
static const struct intel_device_info intel_valleyview_m_info = {
@@ -231,7 +296,6 @@ static const struct intel_device_info intel_valleyview_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.is_valleyview = 1,
- .not_supported = 1,
};
static const struct intel_device_info intel_valleyview_d_info = {
@@ -241,7 +305,6 @@ static const struct intel_device_info intel_valleyview_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.is_valleyview = 1,
- .not_supported = 1,
};
static const struct intel_device_info intel_haswell_d_info = {
@@ -250,8 +313,7 @@ static const struct intel_device_info intel_haswell_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
- .has_pch_split = 1,
- .not_supported = 1,
+ .has_force_wake = 1,
};
static const struct intel_device_info intel_haswell_m_info = {
@@ -260,8 +322,12 @@ static const struct intel_device_info intel_haswell_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
- .has_pch_split = 1,
- .not_supported = 1,
+ .has_force_wake = 1,
+};
+
+/* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
+static const drm_pci_id_list_t pciidlist[] = {
+ i915_PCI_IDS
};
static const struct intel_gfx_device_id {
@@ -354,45 +420,58 @@ static const struct intel_gfx_device_id {
{0, 0}
};
-#define PCI_VENDOR_INTEL 0x8086
-#define INTEL_PCH_DEVICE_ID_MASK 0xff00
-#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
-#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
-#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
-#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
+#if defined(CONFIG_DRM_I915_KMS)
+MODULE_DEVICE_TABLE(pci, pciidlist);
+#endif
void intel_detect_pch(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
device_t pch;
- uint32_t id;
+ /*
+ * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+ * make graphics device passthrough work easy for VMM, that only
+ * need to expose ISA bridge to let driver know the real hardware
+ * underneath. This is a requirement from virtualization team.
+ */
pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA);
- if (pch != NULL && pci_get_vendor(pch) == PCI_VENDOR_INTEL) {
- id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
- if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_IBX;
- dev_priv->num_pch_pll = 2;
- DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
- } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_CPT;
- dev_priv->num_pch_pll = 2;
- DRM_DEBUG_KMS("Found CougarPoint PCH\n");
- } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
- /* PantherPoint is CPT compatible */
- dev_priv->pch_type = PCH_CPT;
- dev_priv->num_pch_pll = 2;
- DRM_DEBUG_KMS("Found PatherPoint PCH\n");
- } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_LPT;
- dev_priv->num_pch_pll = 0;
- DRM_DEBUG_KMS("Found LynxPoint PCH\n");
- } else
- DRM_DEBUG_KMS("No PCH detected\n");
- KASSERT(dev_priv->num_pch_pll <= I915_NUM_PLLS,
- ("num_pch_pll %d\n", dev_priv->num_pch_pll));
- } else
- DRM_DEBUG_KMS("No Intel PCI-ISA bridge found\n");
+ if (pch) {
+ if (pci_get_vendor(pch) == PCI_VENDOR_ID_INTEL) {
+ unsigned short id;
+ id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
+ dev_priv->pch_id = id;
+
+ if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_IBX;
+ dev_priv->num_pch_pll = 2;
+ DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+ WARN_ON(!IS_GEN5(dev));
+ } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_CPT;
+ dev_priv->num_pch_pll = 2;
+ DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+ WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+ } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
+ /* PantherPoint is CPT compatible */
+ dev_priv->pch_type = PCH_CPT;
+ dev_priv->num_pch_pll = 2;
+ DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+ WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+ } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_LPT;
+ dev_priv->num_pch_pll = 0;
+ DRM_DEBUG_KMS("Found LynxPoint PCH\n");
+ WARN_ON(!IS_HASWELL(dev));
+ } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_LPT;
+ dev_priv->num_pch_pll = 0;
+ DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev));
+ }
+ BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
+ }
+ }
}
bool i915_semaphore_is_enabled(struct drm_device *dev)
@@ -403,9 +482,11 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
if (i915_semaphores >= 0)
return i915_semaphores;
+#ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
return false;
+#endif
return 1;
}
@@ -416,7 +497,7 @@ static int i915_drm_freeze(struct drm_device *dev)
drm_kms_helper_poll_disable(dev);
-#if 0
+#ifdef __linux__
pci_save_state(dev->pdev);
#endif
@@ -424,10 +505,16 @@ static int i915_drm_freeze(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
int error = i915_gem_idle(dev);
if (error) {
- device_printf(dev->dev,
+ dev_err(dev->dev,
"GEM idle failed, resume might fail\n");
return error;
}
+
+ taskqueue_cancel_timeout(dev_priv->wq,
+ &dev_priv->rps.delayed_resume_work, NULL);
+
+ intel_modeset_disable(dev);
+
drm_irq_uninstall(dev);
}
@@ -438,48 +525,67 @@ static int i915_drm_freeze(struct drm_device *dev)
/* Modeset on resume, not lid events */
dev_priv->modeset_on_lid = 0;
+ console_lock();
+ intel_fbdev_set_suspend(dev, 1);
+ console_unlock();
+
return 0;
}
-static int i915_suspend(device_t kdev)
+int i915_suspend(struct drm_device *dev, pm_message_t state)
{
- struct drm_device *dev;
int error;
- dev = device_get_softc(kdev);
if (!dev || !dev->dev_private) {
+ DRM_ERROR("dev: %p\n", dev);
DRM_ERROR("DRM not initialized, aborting suspend.\n");
- return ENODEV;
+ return -ENODEV;
}
- DRM_DEBUG_KMS("starting suspend\n");
+ if (state.event == PM_EVENT_PRETHAW)
+ return 0;
+
+
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
error = i915_drm_freeze(dev);
if (error)
- return -error;
+ return error;
+
+ if (state.event == PM_EVENT_SUSPEND) {
+#ifdef __linux__
+ /* Shut down the device */
+ pci_disable_device(dev->pdev);
+ pci_set_power_state(dev->pdev, PCI_D3hot);
+#endif
+ }
+
+ return 0;
+}
- error = bus_generic_suspend(kdev);
- DRM_DEBUG_KMS("finished suspend %d\n", error);
- return (error);
+void intel_console_resume(void *arg, int pending)
+{
+ struct drm_i915_private *dev_priv =
+ arg;
+ struct drm_device *dev = dev_priv->dev;
+
+ console_lock();
+ intel_fbdev_set_suspend(dev, 0);
+ console_unlock();
}
-static int i915_drm_thaw(struct drm_device *dev)
+static int __i915_drm_thaw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- DRM_LOCK(dev);
- i915_gem_restore_gtt_mappings(dev);
- DRM_UNLOCK(dev);
- }
-
i915_restore_state(dev);
intel_opregion_setup(dev);
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- if (HAS_PCH_SPLIT(dev))
- ironlake_init_pch_refclk(dev);
+ intel_init_pch_refclk(dev);
DRM_LOCK(dev);
dev_priv->mm.suspended = 0;
@@ -488,46 +594,83 @@ static int i915_drm_thaw(struct drm_device *dev)
DRM_UNLOCK(dev);
intel_modeset_init_hw(dev);
- sx_xlock(&dev->mode_config.mutex);
- drm_mode_config_reset(dev);
- sx_xunlock(&dev->mode_config.mutex);
+ intel_modeset_setup_hw_state(dev, false);
drm_irq_install(dev);
-
- sx_xlock(&dev->mode_config.mutex);
- /* Resume the modeset for every activated CRTC */
- drm_helper_resume_force_mode(dev);
- sx_xunlock(&dev->mode_config.mutex);
}
intel_opregion_init(dev);
dev_priv->modeset_on_lid = 0;
+ /*
+ * The console lock can be pretty contented on resume due
+ * to all the printk activity. Try to keep it out of the hot
+ * path of resume if possible.
+ */
+ if (console_trylock()) {
+ intel_fbdev_set_suspend(dev, 0);
+ console_unlock();
+ } else {
+ taskqueue_enqueue(dev_priv->wq,
+ &dev_priv->console_resume_work);
+ }
+
return error;
}
-static int i915_resume(device_t kdev)
+#ifdef __linux__
+static int i915_drm_thaw(struct drm_device *dev)
{
- struct drm_device *dev;
+ int error = 0;
+
+ intel_gt_reset(dev);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ DRM_LOCK(dev);
+ i915_gem_restore_gtt_mappings(dev);
+ DRM_UNLOCK(dev);
+ }
+
+ __i915_drm_thaw(dev);
+
+ return error;
+}
+#endif
+
+int i915_resume(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- dev = device_get_softc(kdev);
- DRM_DEBUG_KMS("starting resume\n");
-#if 0
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+#ifdef __linux__
if (pci_enable_device(dev->pdev))
return -EIO;
pci_set_master(dev->pdev);
#endif
- ret = i915_drm_thaw(dev);
+ intel_gt_reset(dev);
+
+ /*
+ * Platforms with opregion should have sane BIOS, older ones (gen3 and
+ * earlier) need this since the BIOS might clear all our scratch PTEs.
+ */
+ if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+ !dev_priv->opregion.header) {
+ DRM_LOCK(dev);
+ i915_gem_restore_gtt_mappings(dev);
+ DRM_UNLOCK(dev);
+ }
+
+ ret = __i915_drm_thaw(dev);
if (ret)
- return -ret;
+ return ret;
drm_kms_helper_poll_enable(dev);
- ret = bus_generic_resume(kdev);
- DRM_DEBUG_KMS("finished resume %d\n", ret);
- return (ret);
+ return 0;
}
static int i8xx_do_reset(struct drm_device *dev)
@@ -568,8 +711,7 @@ static int i8xx_do_reset(struct drm_device *dev)
static int i965_reset_complete(struct drm_device *dev)
{
u8 gdrst;
-
- gdrst = pci_read_config(dev->dev, I965_GDRST, 1);
+ pci_read_config_byte(dev->dev, I965_GDRST, &gdrst);
return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
@@ -583,19 +725,19 @@ static int i965_do_reset(struct drm_device *dev)
* well as the reset bit (GR/bit 0). Setting the GR bit
* triggers the reset; when done, the hardware will clear it.
*/
- gdrst = pci_read_config(dev->dev, I965_GDRST, 1);
- pci_write_config(dev->dev, I965_GDRST,
+ pci_read_config_byte(dev->dev, I965_GDRST, &gdrst);
+ pci_write_config_byte(dev->dev, I965_GDRST,
gdrst | GRDOM_RENDER |
- GRDOM_RESET_ENABLE, 1);
+ GRDOM_RESET_ENABLE);
ret = wait_for(i965_reset_complete(dev), 500);
if (ret)
return ret;
/* We can't reset render&media without also resetting display ... */
- gdrst = pci_read_config(dev->dev, I965_GDRST, 1);
- pci_write_config(dev->dev, I965_GDRST,
+ pci_read_config_byte(dev->dev, I965_GDRST, &gdrst);
+ pci_write_config_byte(dev->dev, I965_GDRST,
gdrst | GRDOM_MEDIA |
- GRDOM_RESET_ENABLE, 1);
+ GRDOM_RESET_ENABLE);
return wait_for(i965_reset_complete(dev), 500);
}
@@ -639,15 +781,21 @@ static int gen6_do_reset(struct drm_device *dev)
I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
/* Spin waiting for the device to ack the reset request */
+ /*
+ * NOTE Linux<->FreeBSD: We use _intel_wait_for() instead of
+ * wait_for(), because we want to set the 4th argument to 0.
+ * This allows us to use a struct mtx for dev_priv->gt_lock and
+ * avoid a LOR.
+ */
ret = _intel_wait_for(dev,
(I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0,
500, 0, "915rst");
/* If reset with a user forcewake, try to restore, otherwise turn it off */
if (dev_priv->forcewake_count)
- dev_priv->display.force_wake_get(dev_priv);
+ dev_priv->gt.force_wake_get(dev_priv);
else
- dev_priv->display.force_wake_put(dev_priv);
+ dev_priv->gt.force_wake_put(dev_priv);
/* Restore fifo count */
dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
@@ -714,20 +862,17 @@ int i915_reset(struct drm_device *dev)
if (!i915_try_reset)
return 0;
- if (!sx_try_xlock(&dev->dev_struct_lock))
- return (-EBUSY);
-
- dev_priv->stop_rings = 0;
+ DRM_LOCK(dev);
i915_gem_reset(dev);
ret = -ENODEV;
- if (time_second - dev_priv->last_gpu_reset < 5)
+ if (get_seconds() - dev_priv->last_gpu_reset < 5)
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
else
ret = intel_gpu_reset(dev);
- dev_priv->last_gpu_reset = time_second;
+ dev_priv->last_gpu_reset = get_seconds();
if (ret) {
DRM_ERROR("Failed to reset chip.\n");
DRM_UNLOCK(dev);
@@ -771,9 +916,6 @@ int i915_reset(struct drm_device *dev)
DRM_UNLOCK(dev);
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_modeset_init_hw(dev);
-
drm_irq_uninstall(dev);
drm_irq_install(dev);
} else {
@@ -791,8 +933,6 @@ i915_get_device_id(int device)
for (did = &i915_infolist[0]; did->device != 0; did++) {
if (did->device != device)
continue;
- if (did->info->not_supported && !i915_enable_unsupported)
- return (NULL);
return (did->info);
}
return (NULL);
@@ -805,16 +945,243 @@ static int i915_probe(device_t kdev)
if (intel_info == NULL)
return (ENXIO);
+ if (intel_info->is_valleyview)
+ if(!i915_preliminary_hw_support) {
+ DRM_ERROR("Preliminary hardware support disabled\n");
+ return (ENXIO);
+ }
+
+ /* Only bind to function 0 of the device. Early generations
+ * used function 1 as a placeholder for multi-head. This causes
+ * us confusion instead, especially on the systems where both
+ * functions have the same PCI-ID!
+ */
+ if (pci_get_function(kdev))
+ return (ENXIO);
+
+ /* We've managed to ship a kms-enabled ddx that shipped with an XvMC
+ * implementation for gen3 (and only gen3) that used legacy drm maps
+ * (gasp!) to share buffers between X and the client. Hence we need to
+ * keep around the fake agp stuff for gen3, even when kms is enabled. */
+ if (intel_info->gen != 3) {
+ driver.driver_features &=
+ ~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
+ } else if (!intel_agp_enabled) {
+ DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
+ return (ENXIO);
+ }
- return -drm_probe_helper(kdev, i915_pciidlist);
+ return -drm_probe_helper(kdev, pciidlist);
}
-static int i915_attach(device_t kdev)
+#ifdef __linux__
+static void
+i915_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static int i915_pm_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int error;
+ if (!drm_dev || !drm_dev->dev_private) {
+ dev_err(dev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ error = i915_drm_freeze(drm_dev);
+ if (error)
+ return error;
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int i915_pm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return i915_resume(drm_dev);
+}
+
+static int i915_pm_freeze(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ if (!drm_dev || !drm_dev->dev_private) {
+ dev_err(dev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ return i915_drm_freeze(drm_dev);
+}
+
+static int i915_pm_thaw(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return i915_drm_thaw(drm_dev);
+}
+
+static int i915_pm_poweroff(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return i915_drm_freeze(drm_dev);
+}
+
+static const struct dev_pm_ops i915_pm_ops = {
+ .suspend = i915_pm_suspend,
+ .resume = i915_pm_resume,
+ .freeze = i915_pm_freeze,
+ .thaw = i915_pm_thaw,
+ .poweroff = i915_pm_poweroff,
+ .restore = i915_pm_resume,
+};
+
+static const struct vm_operations_struct i915_gem_vm_ops = {
+ .fault = i915_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct file_operations i915_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = i915_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+#endif /* __linux__ */
+
+#ifdef COMPAT_FREEBSD32
+extern struct drm_ioctl_desc i915_compat_ioctls[];
+extern int i915_compat_ioctls_nr;
+#endif
+
+static struct drm_driver driver = {
+ /* Don't use MTRRs here; the Xserver or userspace app should
+ * deal with them for Intel hardware.
+ */
+ .driver_features =
+ DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
+ .load = i915_driver_load,
+ .unload = i915_driver_unload,
+ .open = i915_driver_open,
+ .lastclose = i915_driver_lastclose,
+ .preclose = i915_driver_preclose,
+ .postclose = i915_driver_postclose,
+
+ /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
+ .suspend = i915_suspend,
+ .resume = i915_resume,
+
+ .device_is_agp = i915_driver_device_is_agp,
+ .master_create = i915_master_create,
+ .master_destroy = i915_master_destroy,
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = i915_debugfs_init,
+ .debugfs_cleanup = i915_debugfs_cleanup,
+#endif
+ .gem_init_object = i915_gem_init_object,
+ .gem_free_object = i915_gem_free_object,
+#if defined(__linux__)
+ .gem_vm_ops = &i915_gem_vm_ops,
+#elif defined(__FreeBSD__)
+ .gem_pager_ops = &i915_gem_pager_ops,
+#endif
+
+#ifdef FREEBSD_WIP
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = i915_gem_prime_export,
+ .gem_prime_import = i915_gem_prime_import,
+#endif /* FREEBSD_WIP */
+
+ .dumb_create = i915_gem_dumb_create,
+ .dumb_map_offset = i915_gem_mmap_gtt,
+ .dumb_destroy = i915_gem_dumb_destroy,
+ .ioctls = i915_ioctls,
+#ifdef COMPAT_FREEBSD32
+ .compat_ioctls = i915_compat_ioctls,
+ .num_compat_ioctls = &i915_compat_ioctls_nr,
+#endif
+#ifdef __linux__
+ .fops = &i915_driver_fops,
+#endif
+#ifdef __FreeBSD__
+ .sysctl_init = i915_sysctl_init,
+ .sysctl_cleanup = i915_sysctl_cleanup,
+#endif
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+#ifdef __linux__
+static struct pci_driver i915_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = i915_pci_probe,
+ .remove = i915_pci_remove,
+ .driver.pm = &i915_pm_ops,
+};
+#endif
+
+static int __init i915_attach(device_t kdev)
+{
+ driver.num_ioctls = i915_max_ioctl;
+
+ /*
+ * If CONFIG_DRM_I915_KMS is set, default to KMS unless
+ * explicitly disabled with the module pararmeter.
+ *
+ * Otherwise, just follow the parameter (defaulting to off).
+ *
+ * Allow optional vga_text_mode_force boot option to override
+ * the default behavior.
+ */
+#if defined(CONFIG_DRM_I915_KMS)
+ if (i915_modeset != 0)
+ driver.driver_features |= DRIVER_MODESET;
+#endif
if (i915_modeset == 1)
- i915_driver_info.driver_features |= DRIVER_MODESET;
- return (-drm_attach_helper(kdev, i915_pciidlist, &i915_driver_info));
+ driver.driver_features |= DRIVER_MODESET;
+
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && i915_modeset == -1)
+ driver.driver_features &= ~DRIVER_MODESET;
+#endif
+
+ if (!(driver.driver_features & DRIVER_MODESET))
+ driver.get_vblank_timestamp = NULL;
+
+ return (-drm_attach_helper(kdev, pciidlist, &driver));
}
static struct fb_info *
@@ -840,8 +1207,8 @@ static device_method_t i915_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, i915_probe),
DEVMETHOD(device_attach, i915_attach),
- DEVMETHOD(device_suspend, i915_suspend),
- DEVMETHOD(device_resume, i915_resume),
+ DEVMETHOD(device_suspend, drm_generic_suspend),
+ DEVMETHOD(device_resume, drm_generic_resume),
DEVMETHOD(device_detach, drm_generic_detach),
/* Framebuffer service methods */
@@ -856,6 +1223,10 @@ static driver_t i915_driver = {
sizeof(struct drm_device)
};
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
+
extern devclass_t drm_devclass;
DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, 0, 0,
SI_ORDER_ANY);
@@ -867,154 +1238,129 @@ MODULE_DEPEND(i915kms, iicbb, 1, 1, 1);
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
- (((dev_priv)->info->gen >= 6) && \
+ ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
((reg) < 0x40000) && \
- ((reg) != FORCEWAKE)) && \
- (!IS_VALLEYVIEW((dev_priv)->dev))
+ ((reg) != FORCEWAKE))
-void
-__gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+static bool IS_DISPLAYREG(u32 reg)
{
- int count;
-
- count = 0;
- while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
- DELAY(10);
+ /*
+ * This should make it easier to transition modules over to the
+ * new register block scheme, since we can do it incrementally.
+ */
+ if (reg >= VLV_DISPLAY_BASE)
+ return false;
- I915_WRITE_NOTRACE(FORCEWAKE, 1);
- POSTING_READ(FORCEWAKE);
+ if (reg >= RENDER_RING_BASE &&
+ reg < RENDER_RING_BASE + 0xff)
+ return false;
+ if (reg >= GEN6_BSD_RING_BASE &&
+ reg < GEN6_BSD_RING_BASE + 0xff)
+ return false;
+ if (reg >= BLT_RING_BASE &&
+ reg < BLT_RING_BASE + 0xff)
+ return false;
- count = 0;
- while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
- DELAY(10);
-}
+ if (reg == PGTBL_ER)
+ return false;
-void
-__gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
-{
- int count;
+ if (reg >= IPEIR_I965 &&
+ reg < HWSTAM)
+ return false;
- count = 0;
- while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
- DELAY(10);
+ if (reg == MI_MODE)
+ return false;
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
- POSTING_READ(FORCEWAKE_MT);
+ if (reg == GFX_MODE_GEN7)
+ return false;
- count = 0;
- while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
- DELAY(10);
-}
+ if (reg == RENDER_HWS_PGA_GEN7 ||
+ reg == BSD_HWS_PGA_GEN7 ||
+ reg == BLT_HWS_PGA_GEN7)
+ return false;
-void
-gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
-{
+ if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL ||
+ reg == GEN6_BSD_RNCID)
+ return false;
- mtx_lock(&dev_priv->gt_lock);
- if (dev_priv->forcewake_count++ == 0)
- dev_priv->display.force_wake_get(dev_priv);
- mtx_unlock(&dev_priv->gt_lock);
-}
+ if (reg == GEN6_BLITTER_ECOSKPD)
+ return false;
-static void
-gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
-{
- u32 gtfifodbg;
+ if (reg >= 0x4000c &&
+ reg <= 0x4002c)
+ return false;
- gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
- if ((gtfifodbg & GT_FIFO_CPU_ERROR_MASK) != 0) {
- printf("MMIO read or write has been dropped %x\n", gtfifodbg);
- I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
- }
-}
+ if (reg >= 0x4f000 &&
+ reg <= 0x4f08f)
+ return false;
-void
-__gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
-{
+ if (reg >= 0x4f100 &&
+ reg <= 0x4f11f)
+ return false;
- I915_WRITE_NOTRACE(FORCEWAKE, 0);
- /* The below doubles as a POSTING_READ */
- gen6_gt_check_fifodbg(dev_priv);
-}
+ if (reg >= VLV_MASTER_IER &&
+ reg <= GEN6_PMIER)
+ return false;
-void
-__gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
-{
+ if (reg >= FENCE_REG_SANDYBRIDGE_0 &&
+ reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8)))
+ return false;
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
- /* The below doubles as a POSTING_READ */
- gen6_gt_check_fifodbg(dev_priv);
-}
+ if (reg >= VLV_IIR_RW &&
+ reg <= VLV_ISR)
+ return false;
-void
-gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
-{
+ if (reg == FORCEWAKE_VLV ||
+ reg == FORCEWAKE_ACK_VLV)
+ return false;
- mtx_lock(&dev_priv->gt_lock);
- if (--dev_priv->forcewake_count == 0)
- dev_priv->display.force_wake_put(dev_priv);
- mtx_unlock(&dev_priv->gt_lock);
-}
+ if (reg == GEN6_GDRST)
+ return false;
-int
-__gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
-{
- int ret = 0;
-
- if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
- int loop = 500;
- u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
- while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
- DELAY(10);
- fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
- }
- if (loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES) {
- printf("%s loop\n", __func__);
- ++ret;
- }
- dev_priv->gt_fifo_count = fifo;
+ switch (reg) {
+ case _3D_CHICKEN3:
+ case IVB_CHICKEN3:
+ case GEN7_COMMON_SLICE_CHICKEN1:
+ case GEN7_L3CNTLREG1:
+ case GEN7_L3_CHICKEN_MODE_REGISTER:
+ case GEN7_ROW_CHICKEN2:
+ case GEN7_L3SQCREG4:
+ case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
+ case GEN7_HALF_SLICE_CHICKEN1:
+ case GEN6_MBCTL:
+ case GEN6_UCGCTL2:
+ return false;
+ default:
+ break;
}
- dev_priv->gt_fifo_count--;
- return (ret);
+ return true;
}
-void vlv_force_wake_get(struct drm_i915_private *dev_priv)
-{
- int count;
-
- count = 0;
-
- /* Already awake? */
- if ((I915_READ(0x130094) & 0xa1) == 0xa1)
- return;
-
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
- POSTING_READ(FORCEWAKE_VLV);
-
- count = 0;
- while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0)
- DELAY(10);
-}
-
-void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+static void
+ilk_dummy_write(struct drm_i915_private *dev_priv)
{
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
- /* FIXME: confirm VLV behavior with Punit folks */
- POSTING_READ(FORCEWAKE_VLV);
+ /* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
+ * chip from rc6 before touching it for real. MI_MODE is masked, hence
+ * harmless to write 0 into. */
+ I915_WRITE_NOTRACE(MI_MODE, 0);
}
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
+ if (IS_GEN5(dev_priv->dev)) \
+ ilk_dummy_write(dev_priv); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
mtx_lock(&dev_priv->gt_lock); \
if (dev_priv->forcewake_count == 0) \
- dev_priv->display.force_wake_get(dev_priv); \
+ dev_priv->gt.force_wake_get(dev_priv); \
val = DRM_READ##x(dev_priv->mmio_map, reg); \
if (dev_priv->forcewake_count == 0) \
- dev_priv->display.force_wake_put(dev_priv); \
+ dev_priv->gt.force_wake_put(dev_priv); \
mtx_unlock(&dev_priv->gt_lock); \
+ } else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
+ val = DRM_READ##x(dev_priv->mmio_map, reg + 0x180000); \
} else { \
val = DRM_READ##x(dev_priv->mmio_map, reg); \
} \
@@ -1022,10 +1368,10 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
return val; \
}
-__i915_read(8, 8)
-__i915_read(16, 16)
-__i915_read(32, 32)
-__i915_read(64, 64)
+__i915_read(8, b)
+__i915_read(16, w)
+__i915_read(32, l)
+__i915_read(64, q)
#undef __i915_read
#define __i915_write(x, y) \
@@ -1035,13 +1381,73 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
- DRM_WRITE##x(dev_priv->mmio_map, reg, val); \
- if (__predict_false(__fifo_ret)) { \
+ if (IS_GEN5(dev_priv->dev)) \
+ ilk_dummy_write(dev_priv); \
+ if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
+ DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
+ I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
+ } \
+ if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
+ DRM_WRITE##x(dev_priv->mmio_map, reg + 0x180000, val); \
+ } else { \
+ DRM_WRITE##x(dev_priv->mmio_map, reg, val); \
+ } \
+ if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
+ if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
+ DRM_ERROR("Unclaimed write to %x\n", reg); \
+ DRM_WRITE32(dev_priv->mmio_map, GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
+ } \
}
-__i915_write(8, 8)
-__i915_write(16, 16)
-__i915_write(32, 32)
-__i915_write(64, 64)
+__i915_write(8, b)
+__i915_write(16, w)
+__i915_write(32, l)
+__i915_write(64, q)
#undef __i915_write
+
+static const struct register_whitelist {
+ uint64_t offset;
+ uint32_t size;
+ uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
+} whitelist[] = {
+ { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
+};
+
+int i915_reg_read_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_reg_read *reg = data;
+ struct register_whitelist const *entry = whitelist;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
+ if (entry->offset == reg->offset &&
+ (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(whitelist))
+ return -EINVAL;
+
+ switch (entry->size) {
+ case 8:
+ reg->val = I915_READ64(reg->offset);
+ break;
+ case 4:
+ reg->val = I915_READ(reg->offset);
+ break;
+ case 2:
+ reg->val = I915_READ16(reg->offset);
+ break;
+ case 1:
+ reg->val = I915_READ8(reg->offset);
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/sys/dev/drm2/i915/i915_drv.h b/sys/dev/drm2/i915/i915_drv.h
index ffdbc18..2dff53c 100644
--- a/sys/dev/drm2/i915/i915_drv.h
+++ b/sys/dev/drm2/i915/i915_drv.h
@@ -57,7 +57,14 @@ enum pipe {
I915_MAX_PIPES
};
#define pipe_name(p) ((p) + 'A')
-#define I915_NUM_PIPE 2
+
+enum transcoder {
+ TRANSCODER_A = 0,
+ TRANSCODER_B,
+ TRANSCODER_C,
+ TRANSCODER_EDP = 0xF,
+};
+#define transcoder_name(t) ((t) + 'A')
enum plane {
PLANE_A = 0,
@@ -78,9 +85,12 @@ enum port {
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
-
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
+#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
+ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
+ if ((intel_encoder)->base.crtc == (__crtc))
+
struct intel_pch_pll {
int refcount; /* count of number of CRTCs sharing this PLL */
int active; /* count of number of active CRTCs (i.e. DPMS on) */
@@ -91,6 +101,12 @@ struct intel_pch_pll {
};
#define I915_NUM_PLLS 2
+struct intel_ddi_plls {
+ int spll_refcount;
+ int wrpll1_refcount;
+ int wrpll2_refcount;
+};
+
/* Interface history:
*
* 1.1: Original.
@@ -106,12 +122,8 @@ struct intel_pch_pll {
#define DRIVER_PATCHLEVEL 0
#define WATCH_COHERENCY 0
-#define WATCH_BUF 0
-#define WATCH_EXEC 0
-#define WATCH_LRU 0
-#define WATCH_RELOC 0
-#define WATCH_INACTIVE 0
-#define WATCH_PWRITE 0
+#define WATCH_LISTS 0
+#define WATCH_GTT 0
#define I915_GEM_PHYS_CURSOR_0 1
#define I915_GEM_PHYS_CURSOR_1 2
@@ -124,10 +136,120 @@ struct drm_i915_gem_phys_object {
struct drm_i915_gem_object *cur_obj;
};
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
struct drm_i915_private;
+struct intel_opregion {
+ struct opregion_header __iomem *header;
+ struct opregion_acpi __iomem *acpi;
+ struct opregion_swsci __iomem *swsci;
+ struct opregion_asle __iomem *asle;
+ void __iomem *vbt;
+ u32 __iomem *lid_state;
+};
+#define OPREGION_SIZE (8*1024)
+
+struct intel_overlay;
+struct intel_overlay_error_state;
+
+struct drm_i915_master_private {
+ drm_local_map_t *sarea;
+ struct _drm_i915_sarea *sarea_priv;
+};
+#define I915_FENCE_REG_NONE -1
+#define I915_MAX_NUM_FENCES 16
+/* 16 fences + sign bit for FENCE_REG_NONE */
+#define I915_MAX_NUM_FENCE_BITS 5
+
+struct drm_i915_fence_reg {
+ struct list_head lru_list;
+ struct drm_i915_gem_object *obj;
+ int pin_count;
+};
+
+struct sdvo_device_mapping {
+ u8 initialized;
+ u8 dvo_port;
+ u8 slave_addr;
+ u8 dvo_wiring;
+ u8 i2c_pin;
+ u8 ddc_pin;
+};
+
+struct intel_display_error_state;
+
+struct drm_i915_error_state {
+ u_int ref;
+ u32 eir;
+ u32 pgtbl_er;
+ u32 ier;
+ u32 ccid;
+ u32 derrmr;
+ u32 forcewake;
+ bool waiting[I915_NUM_RINGS];
+ u32 pipestat[I915_MAX_PIPES];
+ u32 tail[I915_NUM_RINGS];
+ u32 head[I915_NUM_RINGS];
+ u32 ctl[I915_NUM_RINGS];
+ u32 ipeir[I915_NUM_RINGS];
+ u32 ipehr[I915_NUM_RINGS];
+ u32 instdone[I915_NUM_RINGS];
+ u32 acthd[I915_NUM_RINGS];
+ u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+ u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+ u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
+ /* our own tracking of ring head and tail */
+ u32 cpu_ring_head[I915_NUM_RINGS];
+ u32 cpu_ring_tail[I915_NUM_RINGS];
+ u32 error; /* gen6+ */
+ u32 err_int; /* gen7 */
+ u32 instpm[I915_NUM_RINGS];
+ u32 instps[I915_NUM_RINGS];
+ u32 extra_instdone[I915_NUM_INSTDONE_REG];
+ u32 seqno[I915_NUM_RINGS];
+ u64 bbaddr;
+ u32 fault_reg[I915_NUM_RINGS];
+ u32 done_reg;
+ u32 faddr[I915_NUM_RINGS];
+ u64 fence[I915_MAX_NUM_FENCES];
+ struct timeval time;
+ struct drm_i915_error_ring {
+ struct drm_i915_error_object {
+ int page_count;
+ u32 gtt_offset;
+ u32 *pages[0];
+ } *ringbuffer, *batchbuffer;
+ struct drm_i915_error_request {
+ long jiffies;
+ u32 seqno;
+ u32 tail;
+ } *requests;
+ int num_requests;
+ } ring[I915_NUM_RINGS];
+ struct drm_i915_error_buffer {
+ u32 size;
+ u32 name;
+ u32 rseqno, wseqno;
+ u32 gtt_offset;
+ u32 read_domains;
+ u32 write_domain;
+ s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
+ s32 pinned:2;
+ u32 tiling:2;
+ u32 dirty:1;
+ u32 purgeable:1;
+ s32 ring:4;
+ u32 cache_level:2;
+ } *active_bo, *pinned_bo;
+ u32 active_bo_count, pinned_bo_count;
+ struct intel_overlay_error_state *overlay;
+ struct intel_display_error_state *display;
+};
+
struct drm_i915_display_funcs {
- void (*dpms)(struct drm_crtc *crtc, int mode);
bool (*fbc_enabled)(struct drm_device *dev);
void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
void (*disable_fbc)(struct drm_device *dev);
@@ -136,25 +258,24 @@ struct drm_i915_display_funcs {
void (*update_wm)(struct drm_device *dev);
void (*update_sprite_wm)(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size);
- void (*sanitize_pm)(struct drm_device *dev);
void (*update_linetime_wm)(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
+ void (*modeset_global_resources)(struct drm_device *dev);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb);
+ void (*crtc_enable)(struct drm_crtc *crtc);
+ void (*crtc_disable)(struct drm_crtc *crtc);
void (*off)(struct drm_crtc *crtc);
void (*write_eld)(struct drm_connector *connector,
struct drm_crtc *crtc);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
- void (*init_pch_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj);
- void (*force_wake_get)(struct drm_i915_private *dev_priv);
- void (*force_wake_put)(struct drm_i915_private *dev_priv);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
/* clock updates for mode set */
@@ -164,9 +285,39 @@ struct drm_i915_display_funcs {
/* pll clock increase/decrease */
};
+struct drm_i915_gt_funcs {
+ void (*force_wake_get)(struct drm_i915_private *dev_priv);
+ void (*force_wake_put)(struct drm_i915_private *dev_priv);
+};
+
+#define DEV_INFO_FLAGS \
+ DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
+ DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
+ DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
+ DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
+ DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_llc)
+
struct intel_device_info {
u8 gen;
- u8 not_supported:1;
u8 is_mobile:1;
u8 is_i85x:1;
u8 is_i915g:1;
@@ -179,7 +330,7 @@ struct intel_device_info {
u8 is_crestline:1;
u8 is_ivybridge:1;
u8 is_valleyview:1;
- u8 has_pch_split:1;
+ u8 has_force_wake:1;
u8 is_haswell:1;
u8 has_fbc:1;
u8 has_pipe_cxsr:1;
@@ -196,6 +347,7 @@ struct intel_device_info {
#define I915_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt {
+ struct drm_device *dev;
unsigned num_pd_entries;
vm_page_t *pt_pages;
uint32_t pd_offset;
@@ -225,59 +377,18 @@ enum no_fbc_reason {
FBC_MODULE_PARAM,
};
-struct mem_block {
- struct mem_block *next;
- struct mem_block *prev;
- int start;
- int size;
- struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
-};
-
-struct opregion_header;
-struct opregion_acpi;
-struct opregion_swsci;
-struct opregion_asle;
-
-struct intel_opregion {
- struct opregion_header *header;
- struct opregion_acpi *acpi;
- struct opregion_swsci *swsci;
- struct opregion_asle *asle;
- void *vbt;
- u32 *lid_state;
-};
-#define OPREGION_SIZE (8*1024)
-
-struct drm_i915_master_private {
- drm_local_map_t *sarea;
- struct _drm_i915_sarea *sarea_priv;
-};
-#define I915_FENCE_REG_NONE -1
-#define I915_MAX_NUM_FENCES 16
-/* 16 fences + sign bit for FENCE_REG_NONE */
-#define I915_MAX_NUM_FENCE_BITS 5
-
-struct drm_i915_fence_reg {
- struct list_head lru_list;
- struct drm_i915_gem_object *obj;
- int pin_count;
-};
-
-struct sdvo_device_mapping {
- u8 initialized;
- u8 dvo_port;
- u8 slave_addr;
- u8 dvo_wiring;
- u8 i2c_pin;
- u8 ddc_pin;
-};
-
enum intel_pch {
+ PCH_NONE = 0, /* No PCH present */
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint PCH */
PCH_LPT, /* Lynxpoint PCH */
};
+enum intel_sbi_destination {
+ SBI_ICLK,
+ SBI_MPHY,
+};
+
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
@@ -285,133 +396,22 @@ enum intel_pch {
struct intel_fbdev;
struct intel_fbc_work;
-typedef struct drm_i915_private {
- struct drm_device *dev;
-
- device_t gmbus_bridge[GMBUS_NUM_PORTS + 1];
- device_t bbbus_bridge[GMBUS_NUM_PORTS + 1];
- device_t gmbus[GMBUS_NUM_PORTS + 1];
- device_t bbbus[GMBUS_NUM_PORTS + 1];
- /** gmbus_sx protects against concurrent usage of the single hw gmbus
- * controller on different i2c buses. */
- struct sx gmbus_sx;
- uint32_t gpio_mmio_base;
-
- int relative_constants_mode;
-
- drm_local_map_t *mmio_map;
-
- /** gt_fifo_count and the subsequent register write are synchronized
- * with dev->struct_mutex. */
- unsigned gt_fifo_count;
- /** forcewake_count is protected by gt_lock */
- unsigned forcewake_count;
- /** gt_lock is also taken in irq contexts. */
- struct mtx gt_lock;
-
- /* drm_i915_ring_buffer_t ring; */
- struct intel_ring_buffer rings[I915_NUM_RINGS];
- uint32_t next_seqno;
-
- drm_dma_handle_t *status_page_dmah;
- void *hw_status_page;
- dma_addr_t dma_status_page;
- uint32_t counter;
- unsigned int status_gfx_addr;
- struct drm_gem_object *hws_obj;
-
- struct drm_i915_gem_object *pwrctx;
- struct drm_i915_gem_object *renderctx;
-
- unsigned int cpp;
- int back_offset;
- int front_offset;
- int current_page;
- int page_flipping;
-
- atomic_t irq_received;
- u32 trace_irq_seqno;
-
- /** Cached value of IER to avoid reads in updating the bitfield */
- u32 pipestat[2];
- u32 irq_mask;
- u32 gt_irq_mask;
- u32 pch_irq_mask;
- struct mtx irq_lock;
-
- struct mtx dpio_lock;
-
- u32 hotplug_supported_mask;
-
- unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
- int num_pipe;
- int num_pch_pll;
-
- /* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD ((1500 /* in ms */ * hz) / 1000)
- int hangcheck_count;
- uint32_t last_acthd[I915_NUM_RINGS];
- uint32_t last_instdone;
- uint32_t last_instdone1;
-
- unsigned int stop_rings;
-
- struct intel_opregion opregion;
-
-
- /* overlay */
- struct intel_overlay *overlay;
- bool sprite_scaling_enabled;
-
- /* LVDS info */
- int backlight_level; /* restore backlight to this value */
- bool backlight_enabled;
- struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
- struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
-
- /* Feature bits from the VBIOS */
- unsigned int int_tv_support:1;
- unsigned int lvds_dither:1;
- unsigned int lvds_vbt:1;
- unsigned int int_crt_support:1;
- unsigned int lvds_use_ssc:1;
- unsigned int display_clock_mode:1;
- int lvds_ssc_freq;
- unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
- unsigned int lvds_val; /* used for checking LVDS channel mode */
- struct {
- int rate;
- int lanes;
- int preemphasis;
- int vswing;
-
- bool initialized;
- bool support;
- int bpp;
- struct edp_power_seq pps;
- } edp;
- bool no_aux_handshake;
-
- int crt_ddc_pin;
- struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
- int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
- int num_fence_regs; /* 8 on pre-965, 16 otherwise */
-
- /* PCH chipset type */
- enum intel_pch pch_type;
-
- /* Display functions */
- struct drm_i915_display_funcs display;
-
- unsigned long quirks;
+struct intel_gmbus {
+ device_t gmbus_bridge;
+ device_t gmbus;
+ device_t bbbus_bridge;
+ device_t bbbus;
+ u32 force_bit;
+ u32 reg0;
+ u32 gpio_reg;
+ struct drm_i915_private *dev_priv;
+};
- /* Register state */
- bool modeset_on_lid;
+struct i915_suspend_saved_registers {
u8 saveLBB;
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB;
- u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
u32 savePIPEASRC;
@@ -557,27 +557,243 @@ typedef struct drm_i915_private {
u32 savePIPEB_LINK_N1;
u32 saveMCHBAR_RENDER_STANDBY;
u32 savePCH_PORT_HOTPLUG;
+};
+
+struct intel_gen6_power_mgmt {
+ struct task work;
+ u32 pm_iir;
+ /* lock - irqsave spinlock that protectects the work_struct and
+ * pm_iir. */
+ struct mtx lock;
+
+ /* The below variables an all the rps hw state are protected by
+ * dev->struct mutext. */
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+
+ struct timeout_task delayed_resume_work;
+
+ /*
+ * Protects RPS/RC6 register access and PCU communication.
+ * Must be taken after struct_mutex if nested.
+ */
+ struct sx hw_lock;
+};
+
+struct intel_ilk_power_mgmt {
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ u8 fmax;
+ u8 fstart;
+
+ u64 last_count1;
+ unsigned long last_time1;
+ unsigned long chipset_power;
+ u64 last_count2;
+ struct timespec last_time2;
+ unsigned long gfx_power;
+ u8 corr;
+
+ int c_m;
+ int r_t;
+
+ struct drm_i915_gem_object *pwrctx;
+ struct drm_i915_gem_object *renderctx;
+};
+
+struct i915_dri1_state {
+ unsigned allow_batchbuffer : 1;
+ u32 __iomem *gfx_hws_cpu_addr;
+
+ unsigned int cpp;
+ int back_offset;
+ int front_offset;
+ int current_page;
+ int page_flipping;
+
+ uint32_t counter;
+};
+
+struct intel_l3_parity {
+ u32 *remap_info;
+ struct task error_work;
+};
+
+typedef struct drm_i915_private {
+ struct drm_device *dev;
+
+ const struct intel_device_info *info;
+
+ int relative_constants_mode;
+
+ /* FIXME Linux<->FreeBSD: "void *regs" on Linux. */
+ drm_local_map_t *mmio_map;
+
+ struct drm_i915_gt_funcs gt;
+ /** gt_fifo_count and the subsequent register write are synchronized
+ * with dev->struct_mutex. */
+ unsigned gt_fifo_count;
+ /** forcewake_count is protected by gt_lock */
+ unsigned forcewake_count;
+ /** gt_lock is also taken in irq contexts. */
+ struct mtx gt_lock;
+
+ struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
+
+ /** gmbus_mutex protects against concurrent usage of the single hw gmbus
+ * controller on different i2c buses. */
+ struct sx gmbus_mutex;
+
+ /**
+ * Base address of the gmbus and gpio block.
+ */
+ uint32_t gpio_mmio_base;
+
+ device_t bridge_dev;
+ struct intel_ring_buffer ring[I915_NUM_RINGS];
+ uint32_t next_seqno;
+
+ drm_dma_handle_t *status_page_dmah;
+ int mch_res_rid;
+ struct resource *mch_res;
+
+ atomic_t irq_received;
+
+ /* protects the irq masks */
+ struct mtx irq_lock;
+
+ /* DPIO indirect register protection */
+ struct sx dpio_lock;
+
+ /** Cached value of IMR to avoid reads in updating the bitfield */
+ u32 pipestat[2];
+ u32 irq_mask;
+ u32 gt_irq_mask;
+ u32 pch_irq_mask;
+
+ u32 hotplug_supported_mask;
+ struct task hotplug_work;
+
+ int num_pipe;
+ int num_pch_pll;
+
+ /* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
+#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+ struct callout hangcheck_timer;
+ int hangcheck_count;
+ uint32_t last_acthd[I915_NUM_RINGS];
+ uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
+
+ unsigned int stop_rings;
+
+ unsigned long cfb_size;
+ unsigned int cfb_fb;
+ enum plane cfb_plane;
+ int cfb_y;
+ struct intel_fbc_work *fbc_work;
+
+ struct intel_opregion opregion;
+
+ /* overlay */
+ struct intel_overlay *overlay;
+ bool sprite_scaling_enabled;
+
+ /* LVDS info */
+ int backlight_level; /* restore backlight to this value */
+ bool backlight_enabled;
+ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+ /* Feature bits from the VBIOS */
+ unsigned int int_tv_support:1;
+ unsigned int lvds_dither:1;
+ unsigned int lvds_vbt:1;
+ unsigned int int_crt_support:1;
+ unsigned int lvds_use_ssc:1;
+ unsigned int display_clock_mode:1;
+ unsigned int fdi_rx_polarity_inverted:1;
+ int lvds_ssc_freq;
+ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+ unsigned int lvds_val; /* used for checking LVDS channel mode */
struct {
+ int rate;
+ int lanes;
+ int preemphasis;
+ int vswing;
+
+ bool initialized;
+ bool support;
+ int bpp;
+ struct edp_power_seq pps;
+ } edp;
+ bool no_aux_handshake;
+
+ int crt_ddc_pin;
+ struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
+ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
+ int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+
+ unsigned int fsb_freq, mem_freq, is_ddr3;
+
+ struct mtx error_lock;
+ /* Protected by dev->error_lock. */
+ struct drm_i915_error_state *first_error;
+ struct task error_work;
+ struct completion error_completion;
+ struct taskqueue *wq;
+
+ /* Display functions */
+ struct drm_i915_display_funcs display;
+
+ /* PCH chipset type */
+ enum intel_pch pch_type;
+ unsigned short pch_id;
+
+ unsigned long quirks;
+
+ /* Register state */
+ bool modeset_on_lid;
+
+ struct {
+ /** Bridge to intel-gtt-ko */
+ struct intel_gtt *gtt;
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
/** Memory allocator for GTT */
struct drm_mm gtt_space;
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
- struct list_head gtt_list;
+ struct list_head bound_list;
+ /**
+ * List of objects which are not bound to the GTT (thus
+ * are idle and not used by the GPU) but still have
+ * (presumably uncached) pages still attached.
+ */
+ struct list_head unbound_list;
/** Usable portion of the GTT for GEM */
unsigned long gtt_start;
unsigned long gtt_mappable_end;
unsigned long gtt_end;
+ unsigned long stolen_base; /* limited to low memory (32-bit) */
+
+#ifdef __linux__
+ struct io_mapping *gtt_mapping;
+#endif
+ vm_paddr_t gtt_base_addr;
+ int gtt_mtrr;
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
+ eventhandler_tag inactive_shrinker;
+ bool shrinker_no_lock_stealing;
+
/**
- * List of objects currently involved in rendering from the
- * ringbuffer.
+ * List of objects currently involved in rendering.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
@@ -588,15 +804,6 @@ typedef struct drm_i915_private {
struct list_head active_list;
/**
- * List of objects which are not in the ringbuffer but which
- * still have a write_domain which needs to be flushed before
- * unbinding.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head flushing_list;
-
- /**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
@@ -618,26 +825,14 @@ typedef struct drm_i915_private {
* fire periodically while the ring is running. When it
* fires, go retire requests.
*/
- struct timeout_task retire_task;
+ struct timeout_task retire_work;
- /**
+ /**
* Are we in a non-interruptible section of code like
* modesetting?
*/
bool interruptible;
- uint32_t next_gem_seqno;
-
- /**
- * Waiting sequence number, if any
- */
- uint32_t waiting_gem_seqno;
-
- /**
- * Last seq seen at irq time
- */
- uint32_t irq_gem_seqno;
-
/**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
@@ -652,10 +847,10 @@ typedef struct drm_i915_private {
* Flag if the hardware appears to be wedged.
*
* This is set when attempts to idle the device timeout.
- * It prevents command submission from occuring and makes
+ * It prevents command submission from occurring and makes
* every pending request fail
*/
- int wedged;
+ atomic_t wedged;
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
@@ -670,20 +865,8 @@ typedef struct drm_i915_private {
size_t mappable_gtt_total;
size_t object_memory;
u32 object_count;
-
- struct intel_gtt gtt;
- eventhandler_tag i915_lowmem;
} mm;
- const struct intel_device_info *info;
-
- /* Old dri1 support infrastructure, beware the dragons ya fools entering
- * here! */
- struct {
- unsigned allow_batchbuffer : 1;
- u32 *gfx_hws_cpu_addr;
- } dri1;
-
/* Kernel Modesetting */
struct sdvo_device_mapping sdvo_mappings[2];
@@ -694,88 +877,68 @@ typedef struct drm_i915_private {
struct drm_crtc *plane_to_crtc_mapping[3];
struct drm_crtc *pipe_to_crtc_mapping[3];
- /* wait_queue_head_t pending_flip_queue; XXXKIB */
+ wait_queue_head_t pending_flip_queue;
struct intel_pch_pll pch_plls[I915_NUM_PLLS];
+ struct intel_ddi_plls ddi_plls;
/* Reclocking support */
bool render_reclock_avail;
bool lvds_downclock_avail;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
- struct task idle_task;
- struct callout idle_callout;
- bool busy;
u16 orig_clock;
int child_dev_num;
struct child_device_config *child_dev;
- struct drm_connector *int_lvds_connector;
- struct drm_connector *int_edp_connector;
- device_t bridge_dev;
bool mchbar_need_disable;
- int mch_res_rid;
- struct resource *mch_res;
- struct mtx rps_lock;
- u32 pm_iir;
- struct task rps_task;
+ struct intel_l3_parity l3_parity;
- u8 cur_delay;
- u8 min_delay;
- u8 max_delay;
- u8 fmax;
- u8 fstart;
+ /* gen6+ rps state */
+ struct intel_gen6_power_mgmt rps;
- u64 last_count1;
- unsigned long last_time1;
- unsigned long chipset_power;
- u64 last_count2;
- struct timespec last_time2;
- unsigned long gfx_power;
- int c_m;
- int r_t;
- u8 corr;
- struct mtx *mchdev_lock;
+ /* ilk-only ips/rps state. Everything in here is protected by the global
+ * mchdev_lock in intel_pm.c */
+ struct intel_ilk_power_mgmt ips;
enum no_fbc_reason no_fbc_reason;
struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb;
- unsigned long cfb_size;
- unsigned int cfb_fb;
- int cfb_plane;
- int cfb_y;
- struct intel_fbc_work *fbc_work;
-
- unsigned int fsb_freq, mem_freq, is_ddr3;
-
- struct taskqueue *tq;
- struct task error_task;
- struct task hotplug_task;
- int error_completion;
- struct mtx error_completion_lock;
- /* Protected by dev->error_lock. */
- struct drm_i915_error_state *first_error;
- struct mtx error_lock;
- struct callout hangcheck_timer;
-
unsigned long last_gpu_reset;
+ /* list of fbdev register on this device */
struct intel_fbdev *fbdev;
+ /*
+ * The console may be contended at resume, but we don't
+ * want it to block on it.
+ */
+ struct task console_resume_work;
+
+ struct backlight_device *backlight;
+
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
bool hw_contexts_disabled;
uint32_t hw_context_size;
+
+ u32 fdi_rx_config;
+
+ struct i915_suspend_saved_registers regfile;
+
+ /* Old dri1 support infrastructure, beware the dragons ya fools entering
+ * here! */
+ struct i915_dri1_state dri1;
} drm_i915_private_t;
/* Iterate over initialised rings */
#define for_each_ring(ring__, dev_priv__, i__) \
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
- if (((ring__) = &(dev_priv__)->rings[(i__)]), intel_ring_initialized((ring__)))
+ if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
@@ -785,37 +948,48 @@ enum hdmi_force_audio {
};
enum i915_cache_level {
- I915_CACHE_NONE,
+ I915_CACHE_NONE = 0,
I915_CACHE_LLC,
- I915_CACHE_LLC_MLC, /* gen6+ */
+ I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
};
-enum intel_chip_family {
- CHIP_I8XX = 0x01,
- CHIP_I9XX = 0x02,
- CHIP_I915 = 0x04,
- CHIP_I965 = 0x08,
+struct drm_i915_gem_object_ops {
+ /* Interface between the GEM object and its backing storage.
+ * get_pages() is called once prior to the use of the associated set
+ * of pages before to binding them into the GTT, and put_pages() is
+ * called after we no longer need them. As we expect there to be
+ * associated cost with migrating pages between the backing storage
+ * and making them available for the GPU (e.g. clflush), we may hold
+ * onto the pages after they are no longer referenced by the GPU
+ * in case they may be used again shortly (for example migrating the
+ * pages to a different memory domain within the GTT). put_pages()
+ * will therefore most likely be called when the object itself is
+ * being released or under memory pressure (where we attempt to
+ * reap pages for the shrinker).
+ */
+ int (*get_pages)(struct drm_i915_gem_object *);
+ void (*put_pages)(struct drm_i915_gem_object *);
};
-/** driver private structure attached to each drm_gem_object */
struct drm_i915_gem_object {
struct drm_gem_object base;
+ const struct drm_i915_gem_object_ops *ops;
+
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
struct list_head gtt_list;
- /** This object's place on the active/flushing/inactive lists */
+
+ /** This object's place on the active/inactive lists */
struct list_head ring_list;
struct list_head mm_list;
- /** This object's place on GPU write list */
- struct list_head gpu_write_list;
/** This object's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
/**
- * This is set if the object is on the active or flushing lists
- * (has pending rendering), and is not set if it's on inactive (ready
- * to be unbound).
+ * This is set if the object is on the active lists (has pending
+ * rendering and so a non-zero seqno), and is not set if it i s on
+ * inactive (ready to be unbound) list.
*/
unsigned int active:1;
@@ -826,12 +1000,6 @@ struct drm_i915_gem_object {
unsigned int dirty:1;
/**
- * This is set if the object has been written to since the last
- * GPU flush.
- */
- unsigned int pending_gpu_write:1;
-
- /**
* Fence register bits (if any) for this object. Will be set
* as needed when mapped into the GTT.
* Protected by dev->struct_mutex.
@@ -893,19 +1061,15 @@ struct drm_i915_gem_object {
unsigned int has_aliasing_ppgtt_mapping:1;
unsigned int has_global_gtt_mapping:1;
+ unsigned int has_dma_mapping:1;
vm_page_t *pages;
int pages_pin_count;
/**
- * DMAR support
- */
- struct sglist *sg_list;
-
- /**
* Used for performing relocations during execbuffer insertion.
*/
- LIST_ENTRY(drm_i915_gem_object) exec_node;
+ struct hlist_node exec_node;
unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
@@ -919,7 +1083,8 @@ struct drm_i915_gem_object {
struct intel_ring_buffer *ring;
/** Breadcrumb of last rendering to the buffer. */
- uint32_t last_rendering_seqno;
+ uint32_t last_read_seqno;
+ uint32_t last_write_seqno;
/** Breadcrumb of last fenced GPU access to the buffer. */
uint32_t last_fenced_seqno;
@@ -941,10 +1106,11 @@ struct drm_i915_gem_object {
* will be page flipped away on the next vblank. When it
* reaches 0, dev_priv->pending_flip_queue will be woken up.
*/
- int pending_flip;
+ atomic_t pending_flip;
};
+#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
-#define to_intel_bo(x) __containerof(x, struct drm_i915_gem_object, base)
+#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
/**
* Request queue structure.
@@ -979,72 +1145,110 @@ struct drm_i915_gem_request {
struct drm_i915_file_private {
struct {
+ struct mtx lock;
struct list_head request_list;
- struct mtx lck;
} mm;
struct drm_gem_names context_idr;
};
-struct drm_i915_error_state {
- u_int ref;
- u32 eir;
- u32 pgtbl_er;
- u32 ier;
- bool waiting[I915_NUM_RINGS];
- u32 pipestat[I915_MAX_PIPES];
- u32 tail[I915_NUM_RINGS];
- u32 head[I915_NUM_RINGS];
- u32 ipeir[I915_NUM_RINGS];
- u32 ipehr[I915_NUM_RINGS];
- u32 instdone[I915_NUM_RINGS];
- u32 acthd[I915_NUM_RINGS];
- u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
- /* our own tracking of ring head and tail */
- u32 cpu_ring_head[I915_NUM_RINGS];
- u32 cpu_ring_tail[I915_NUM_RINGS];
- u32 error; /* gen6+ */
- u32 instpm[I915_NUM_RINGS];
- u32 instps[I915_NUM_RINGS];
- u32 instdone1;
- u32 seqno[I915_NUM_RINGS];
- u64 bbaddr;
- u32 fault_reg[I915_NUM_RINGS];
- u32 done_reg;
- u32 faddr[I915_NUM_RINGS];
- u64 fence[I915_MAX_NUM_FENCES];
- struct timeval time;
- struct drm_i915_error_ring {
- struct drm_i915_error_object {
- int page_count;
- u32 gtt_offset;
- u32 *pages[0];
- } *ringbuffer, *batchbuffer;
- struct drm_i915_error_request {
- long jiffies;
- u32 seqno;
- u32 tail;
- } *requests;
- int num_requests;
- } ring[I915_NUM_RINGS];
- struct drm_i915_error_buffer {
- u32 size;
- u32 name;
- u32 seqno;
- u32 gtt_offset;
- u32 read_domains;
- u32 write_domain;
- s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
- s32 pinned:2;
- u32 tiling:2;
- u32 dirty:1;
- u32 purgeable:1;
- s32 ring:4;
- u32 cache_level:2;
- } *active_bo, *pinned_bo;
- u32 active_bo_count, pinned_bo_count;
- struct intel_overlay_error_state *overlay;
- struct intel_display_error_state *display;
-};
+#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
+
+#define IS_I830(dev) ((dev)->pci_device == 0x3577)
+#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
+#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
+#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
+#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
+#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
+#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
+#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
+#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
+#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
+#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
+#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
+#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
+#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
+ (dev)->pci_device == 0x0152 || \
+ (dev)->pci_device == 0x015a)
+#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
+ (dev)->pci_device == 0x0106 || \
+ (dev)->pci_device == 0x010A)
+#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
+#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
+#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+#define IS_ULT(dev) (IS_HASWELL(dev) && \
+ ((dev)->pci_device & 0xFF00) == 0x0A00)
+
+/*
+ * The genX designation typically refers to the render engine, so render
+ * capability related checks should use IS_GEN, while display and other checks
+ * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
+ * chips, etc.).
+ */
+#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
+#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
+#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
+#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
+#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
+#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
+
+#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
+#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
+#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
+#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+
+#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
+#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
+
+#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+
+/* Early gen2 have a totally busted CS tlb and require pinned batches. */
+#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+
+/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+ * rows, which changed the alignment requirements and fence programming.
+ */
+#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
+ IS_I915GM(dev)))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
+#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
+#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
+/* dsparb controlled by hw only */
+#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+
+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
+#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+
+#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
+
+#define INTEL_PCH_DEVICE_ID_MASK 0xff00
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+
+#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
+
+#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
+
+#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+
+#define GT_FREQUENCY_MULTIPLIER 50
/**
* RC6 is a special power stage which allows the GPU to enter an very
@@ -1067,45 +1271,39 @@ struct drm_i915_error_state {
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
-extern int intel_iommu_enabled;
extern struct drm_ioctl_desc i915_ioctls[];
+extern int i915_max_ioctl;
+extern unsigned int i915_fbpercrtc __always_unused;
+extern int i915_panel_ignore_lid __read_mostly;
+extern unsigned int i915_powersave __read_mostly;
+extern int i915_semaphores __read_mostly;
+extern unsigned int i915_lvds_downclock __read_mostly;
+extern int i915_lvds_channel_mode __read_mostly;
+extern int i915_panel_use_ssc __read_mostly;
+extern int i915_vbt_sdvo_panel_type __read_mostly;
+extern int i915_enable_rc6 __read_mostly;
+extern int i915_enable_fbc __read_mostly;
+extern int i915_enable_hangcheck __read_mostly;
+extern int i915_enable_ppgtt __read_mostly;
+extern unsigned int i915_preliminary_hw_support __read_mostly;
+
extern struct drm_driver i915_driver_info;
extern struct cdev_pager_ops i915_gem_pager_ops;
-extern unsigned int i915_fbpercrtc;
-extern int i915_panel_ignore_lid;
-extern int i915_panel_invert_brightness;
-extern unsigned int i915_powersave;
-extern int i915_prefault_disable;
-extern int i915_semaphores;
-extern unsigned int i915_lvds_downclock;
-extern int i915_lvds_channel_mode;
-extern int i915_panel_use_ssc;
-extern int i915_vbt_sdvo_panel_type;
-extern int i915_enable_rc6;
-extern int i915_enable_fbc;
-extern int i915_enable_ppgtt;
-extern int i915_enable_hangcheck;
+extern int intel_iommu_gfx_mapped;
const struct intel_device_info *i915_get_device_id(int device);
-int i915_reset(struct drm_device *dev);
-extern int intel_gpu_reset(struct drm_device *dev);
-
/* i915_debug.c */
int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
struct sysctl_oid *top);
void i915_sysctl_cleanup(struct drm_device *dev);
+extern int i915_suspend(struct drm_device *dev, pm_message_t state);
+extern int i915_resume(struct drm_device *dev);
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
/* i915_dma.c */
-int i915_batchbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_cmdbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_getparam(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
@@ -1117,35 +1315,54 @@ extern void i915_driver_preclose(struct drm_device *dev,
extern void i915_driver_postclose(struct drm_device *dev,
struct drm_file *file_priv);
extern int i915_driver_device_is_agp(struct drm_device * dev);
+#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
+#endif
extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *box,
int DR1, int DR4);
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
-unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
-void i915_update_gfx_val(struct drm_i915_private *dev_priv);
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
-unsigned long i915_read_mch_val(void);
-bool i915_gpu_raise(void);
-bool i915_gpu_lower(void);
-bool i915_gpu_busy(void);
-bool i915_gpu_turbo_disable(void);
+extern int intel_gpu_reset(struct drm_device *dev);
+extern int i915_reset(struct drm_device *dev);
+extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
+extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
+extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
+extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
-/* i915_irq.c */
+extern int i915_batchbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int i915_cmdbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern void intel_irq_init(struct drm_device *dev);
+extern int i915_getparam(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
-void intel_enable_asle(struct drm_device *dev);
-void i915_hangcheck_elapsed(void *context);
+extern void intel_console_resume(void *context, int pending);
+
+/* i915_irq.c */
+void i915_hangcheck_elapsed(void *data);
void i915_handle_error(struct drm_device *dev, bool wedged);
+
+extern void intel_irq_init(struct drm_device *dev);
+extern void intel_gt_init(struct drm_device *dev);
+extern void intel_gt_reset(struct drm_device *dev);
+
void i915_error_state_free(struct drm_i915_error_state *error);
-void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
-void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+void
+i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+
+void
+i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
-void i915_destroy_error_state(struct drm_device *dev);
+void intel_enable_asle(struct drm_device *dev);
+
+//#ifdef CONFIG_DEBUG_FS
+extern void i915_destroy_error_state(struct drm_device *dev);
+//#else
+//#define i915_destroy_error_state(x)
+//#endif
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
@@ -1167,13 +1384,17 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
int i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_execbuffer2(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+ struct drm_file *file_priv);
int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
@@ -1188,16 +1409,64 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
-void i915_gem_unload(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops);
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+ size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
-int i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment,
- bool map_and_fenceable);
+int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ uint32_t alignment,
+ bool map_and_fenceable,
+ bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
-int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
+
+int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
uint32_t i915_get_gem_seqno(struct drm_device *dev);
+static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+ /* KASSERT(obj->pages != NULL, ("pin and NULL pages")); */
+ obj->pages_pin_count++;
+}
+
+static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+ KASSERT(obj->pages_pin_count != 0, ("zero pages_pin_count"));
+ obj->pages_pin_count--;
+}
+
+int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
+int i915_gem_object_sync(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *to);
+void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring);
+
+int i915_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle);
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static inline bool
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+ return (int32_t)(seq1 - seq2) >= 0;
+}
+
+extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
+
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
static inline bool
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
@@ -1221,46 +1490,66 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
+int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+ bool interruptible);
+
+void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
-struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
- size_t size);
-uint32_t i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
- uint32_t size, int tiling_mode);
-int i915_mutex_lock_interruptible(struct drm_device *dev);
-int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
- bool write);
-int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
- bool write);
-int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
- u32 alignment, struct intel_ring_buffer *pipelined);
-void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
-int i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
-int i915_gem_flush_ring(struct intel_ring_buffer *ring,
- uint32_t invalidate_domains, uint32_t flush_domains);
-void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
-int i915_gem_object_sync(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *to);
-int i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
-int i915_gem_idle(struct drm_device *dev);
-int i915_gem_init(struct drm_device *dev);
-int i915_gem_init_hw(struct drm_device *dev);
+int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain);
+int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_init(struct drm_device *dev);
+int __must_check i915_gem_init_hw(struct drm_device *dev);
+void i915_gem_l3_remap(struct drm_device *dev);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
-int i915_gpu_idle(struct drm_device *dev);
-void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring, uint32_t seqno);
-int i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file,
- struct drm_i915_gem_request *request);
-int i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
-void i915_gem_reset(struct drm_device *dev);
-int i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno);
-int i915_gem_mmap(struct drm_device *dev, uint64_t offset, int prot);
+int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gem_idle(struct drm_device *dev);
+int i915_add_request(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ u32 *seqno);
+int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
+ uint32_t seqno);
int i915_gem_fault(struct drm_device *dev, uint64_t offset, int prot,
uint64_t *phys);
+int __must_check
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
+ bool write);
+int __must_check
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
+int __must_check
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ u32 alignment,
+ struct intel_ring_buffer *pipelined);
+void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
+int i915_gem_attach_phys_object(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ int id,
+ int align);
+void i915_gem_detach_phys_object(struct drm_device *dev,
+ struct drm_i915_gem_object *obj);
+void i915_gem_free_all_phys_object(struct drm_device *dev);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
+
+uint32_t
+i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
+ uint32_t size,
+ int tiling_mode);
+
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level);
+ enum i915_cache_level cache_level);
+
+#ifdef FREEBSD_WIP
+struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
+struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *gem_obj, int flags);
+#endif /* FREEBSD_WIP */
+
+int i915_gem_mmap(struct drm_device *dev, uint64_t offset, int prot);
/* i915_gem_context.c */
void i915_gem_context_init(struct drm_device *dev);
@@ -1273,18 +1562,44 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-void i915_gem_free_all_phys_object(struct drm_device *dev);
-void i915_gem_detach_phys_object(struct drm_device *dev,
- struct drm_i915_gem_object *obj);
-int i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_i915_gem_object *obj, int id, int align);
+/* i915_gem_gtt.c */
+int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
+void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
+void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
+void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_object *obj);
-int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
- struct drm_mode_create_dumb *args);
-int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
-int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle);
+void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+void i915_gem_init_global_gtt(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end);
+int i915_gem_gtt_init(struct drm_device *dev);
+void i915_gem_gtt_fini(struct drm_device *dev);
+static inline void i915_gem_chipset_flush(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ intel_gtt_chipset_flush();
+}
+
+/* i915_gem_evict.c */
+int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
+ unsigned alignment,
+ unsigned cache_level,
+ bool mappable,
+ bool nonblock);
+int i915_gem_evict_everything(struct drm_device *dev);
+
+/* i915_gem_stolen.c */
+int i915_gem_init_stolen(struct drm_device *dev);
+void i915_gem_cleanup_stolen(struct drm_device *dev);
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -1293,56 +1608,62 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_do_bit_17_swizzle_page(struct drm_i915_gem_object *obj,
struct vm_page *m);
-/* i915_gem_evict.c */
-int i915_gem_evict_something(struct drm_device *dev, int min_size,
- unsigned alignment, bool mappable);
-int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
-
-/* i915_gem_stolen.c */
-int i915_gem_init_stolen(struct drm_device *dev);
-void i915_gem_cleanup_stolen(struct drm_device *dev);
+/* i915_gem_debug.c */
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
+ const char *where, uint32_t mark);
+#if WATCH_LISTS
+int i915_verify_lists(struct drm_device *dev);
+#else
+#define i915_verify_lists(dev) 0
+#endif
+void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
+ int handle);
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
+ const char *where, uint32_t mark);
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
-/* intel_iic.c */
+/* intel_i2c.c */
extern int intel_setup_gmbus(struct drm_device *dev);
extern void intel_teardown_gmbus(struct drm_device *dev);
-extern void intel_gmbus_set_speed(device_t idev, int speed);
-extern void intel_gmbus_force_bit(device_t idev, bool force_bit);
-extern void intel_iic_reset(struct drm_device *dev);
static inline bool intel_gmbus_is_port_valid(unsigned port)
{
return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
}
-extern device_t intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
- unsigned port);
+
+extern device_t intel_gmbus_get_adapter(
+ struct drm_i915_private *dev_priv, unsigned port);
+extern void intel_gmbus_set_speed(device_t idev, int speed);
+extern void intel_gmbus_force_bit(device_t idev, bool force_bit);
+extern bool intel_gmbus_is_forced_bit(device_t adapter);
+extern void intel_i2c_reset(struct drm_device *dev);
/* intel_opregion.c */
-int intel_opregion_setup(struct drm_device *dev);
+extern int intel_opregion_setup(struct drm_device *dev);
+#ifdef CONFIG_ACPI
extern void intel_opregion_init(struct drm_device *dev);
extern void intel_opregion_fini(struct drm_device *dev);
extern void intel_opregion_asle_intr(struct drm_device *dev);
extern void intel_opregion_gse_intr(struct drm_device *dev);
extern void intel_opregion_enable_asle(struct drm_device *dev);
+#else
+static inline void intel_opregion_init(struct drm_device *dev) { return; }
+static inline void intel_opregion_fini(struct drm_device *dev) { return; }
+static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
+static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
+#endif
-/* i915_gem_gtt.c */
-int i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
-void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
-void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_object *obj, enum i915_cache_level cache_level);
-void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_object *obj);
-
-void i915_gem_restore_gtt_mappings(struct drm_device *dev);
-int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level);
-void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
-int i915_gem_init_global_gtt(struct drm_device *dev, unsigned long start,
- unsigned long mappable_end, unsigned long end);
+/* intel_acpi.c */
+#ifdef CONFIG_ACPI
+extern void intel_register_dsm_handler(void);
+extern void intel_unregister_dsm_handler(void);
+#else
+static inline void intel_register_dsm_handler(void) { return; }
+static inline void intel_unregister_dsm_handler(void) { return; }
+#endif /* CONFIG_ACPI */
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
@@ -1350,34 +1671,31 @@ extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+extern void intel_modeset_setup_hw_state(struct drm_device *dev,
+ bool force_restore);
+extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
-extern void ironlake_init_pch_refclk(struct drm_device *dev);
-extern void ironlake_enable_rc6(struct drm_device *dev);
+extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
-/* IPS */
-extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
-extern void intel_gpu_ips_teardown(void);
+extern int intel_enable_rc6(const struct drm_device *dev);
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
-extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
-extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
-extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
-extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
-
-extern void vlv_force_wake_get(struct drm_i915_private *dev_priv);
-extern void vlv_force_wake_put(struct drm_i915_private *dev_priv);
-
-extern struct intel_overlay_error_state *intel_overlay_capture_error_state(
- struct drm_device *dev);
-extern void intel_overlay_print_error_state(struct sbuf *m,
- struct intel_overlay_error_state *error);
-extern struct intel_display_error_state *intel_display_capture_error_state(
- struct drm_device *dev);
+int i915_reg_read_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+/* overlay */
+//#ifdef CONFIG_DEBUG_FS
+extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
+extern void intel_overlay_print_error_state(struct sbuf *m, struct intel_overlay_error_state *error);
+
+extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
extern void intel_display_print_error_state(struct sbuf *m,
- struct drm_device *dev, struct intel_display_error_state *error);
+ struct drm_device *dev,
+ struct intel_display_error_state *error);
+//#endif
static inline void
trace_i915_reg_rw(boolean_t rw, int reg, uint64_t val, int sz)
@@ -1394,6 +1712,9 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
+
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
@@ -1431,143 +1752,6 @@ __i915_write(64, 64)
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
-#define I915_VERBOSE 0
-
-/**
- * Reads a dword out of the status page, which is written to from the command
- * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
- * MI_STORE_DATA_IMM.
- *
- * The following dwords have a reserved meaning:
- * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
- * 0x04: ring 0 head pointer
- * 0x05: ring 1 head pointer (915-class)
- * 0x06: ring 2 head pointer (915-class)
- * 0x10-0x1b: Context status DWords (GM45)
- * 0x1f: Last written status offset. (GM45)
- *
- * The area from dword 0x20 to 0x3ff is available for driver usage.
- */
-#define I915_GEM_HWS_INDEX 0x20
-
-#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
-
-#define IS_I830(dev) ((dev)->pci_device == 0x3577)
-#define IS_845G(dev) ((dev)->pci_device == 0x2562)
-#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
-#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
-#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
-#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
-#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
-#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
-#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
-#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
-#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
-#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
-#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
-#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
-#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
-#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
-#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
-
-/* XXXKIB LEGACY */
-#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
- (dev)->pci_device == 0x2982 || \
- (dev)->pci_device == 0x2992 || \
- (dev)->pci_device == 0x29A2 || \
- (dev)->pci_device == 0x2A02 || \
- (dev)->pci_device == 0x2A12 || \
- (dev)->pci_device == 0x2A42 || \
- (dev)->pci_device == 0x2E02 || \
- (dev)->pci_device == 0x2E12 || \
- (dev)->pci_device == 0x2E22 || \
- (dev)->pci_device == 0x2E32)
-
-#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
-
-#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
-#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
-#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
-
-#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
- IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
-/* XXXKIB LEGACY END */
-
-#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
-#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
-#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
-#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
-#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
-#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
-
-#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
-#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
-#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
-#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
-
-#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
-#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6)
-
-#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
-#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
-
-/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
- * rows, which changed the alignment requirements and fence programming.
- */
-#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
- IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
-#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
-#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
-#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
-/* dsparb controlled by hw only */
-#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-
-#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
-#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
-#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
-
-#define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split)
-#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
-
-#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
-#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
-#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
-#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
-
-#define PRIMARY_RINGBUFFER_SIZE (128*1024)
-
-static inline bool
-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
-{
-
- return ((int32_t)(seq1 - seq2) >= 0);
-}
-
-static inline void i915_gem_chipset_flush(struct drm_device *dev)
-{
- if (INTEL_INFO(dev)->gen < 6)
- intel_gtt_chipset_flush();
-}
-
-static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
-{
- /* KASSERT(obj->pages != NULL, ("pin and NULL pages")); */
- obj->pages_pin_count++;
-}
-static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
-{
- KASSERT(obj->pages_pin_count != 0, ("zero pages_pin_count"));
- obj->pages_pin_count--;
-}
-
u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
#endif
diff --git a/sys/dev/drm2/i915/i915_gem.c b/sys/dev/drm2/i915/i915_gem.c
index 2d97f74..bece6b2 100644
--- a/sys/dev/drm2/i915/i915_gem.c
+++ b/sys/dev/drm2/i915/i915_gem.c
@@ -55,11 +55,9 @@
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
#include <dev/drm2/i915/intel_drv.h>
-#include <dev/drm2/i915/intel_ringbuffer.h>
#include <sys/resourcevar.h>
#include <sys/sched.h>
@@ -70,19 +68,12 @@ __FBSDID("$FreeBSD$");
#include <machine/md_var.h>
-#define __user
-#define __force
-#define __iomem
-#define __must_check
-#define to_user_ptr(x) ((void *)(uintptr_t)(x))
-#define offset_in_page(x) ((x) & PAGE_MASK)
-#define page_to_phys(x) VM_PAGE_TO_PHYS(x)
-
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment,
- bool map_and_fenceable);
+ bool map_and_fenceable,
+ bool nonblocking);
static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
@@ -94,13 +85,13 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable);
-static void i915_gem_lowmem(void *arg);
+static void i915_gem_inactive_shrink(void *);
+static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
+static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
static int i915_gem_object_get_pages_range(struct drm_i915_gem_object *obj,
off_t start, off_t end);
-static void i915_gem_object_put_pages_range(struct drm_i915_gem_object *obj,
- off_t start, off_t end);
static vm_page_t i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex,
bool *fresh);
@@ -108,20 +99,6 @@ static vm_page_t i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex,
MALLOC_DEFINE(DRM_I915_GEM, "i915gem", "Allocations from i915 gem");
long i915_gem_wired_pages_cnt;
-static bool cpu_cache_is_coherent(struct drm_device *dev,
- enum i915_cache_level level)
-{
- return HAS_LLC(dev) || level != I915_CACHE_NONE;
-}
-
-static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
-{
- if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
- return true;
-
- return obj->pin_display;
-}
-
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
{
if (obj->tiling_mode)
@@ -153,33 +130,34 @@ static int
i915_gem_wait_for_error(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct completion *x = &dev_priv->error_completion;
int ret;
- if (!atomic_load_acq_int(&dev_priv->mm.wedged))
+ if (!atomic_read(&dev_priv->mm.wedged))
return 0;
- mtx_lock(&dev_priv->error_completion_lock);
- while (dev_priv->error_completion == 0) {
- ret = -msleep(&dev_priv->error_completion,
- &dev_priv->error_completion_lock, PCATCH, "915wco", 0);
- if (ret == -ERESTART)
- ret = -ERESTARTSYS;
- if (ret != 0) {
- mtx_unlock(&dev_priv->error_completion_lock);
- return ret;
- }
+ /*
+ * Only wait 10 seconds for the gpu reset to complete to avoid hanging
+ * userspace. If it takes that long something really bad is going on and
+ * we should simply try to bail out and fail as gracefully as possible.
+ */
+ ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
+ if (ret == 0) {
+ DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
+ return -EIO;
+ } else if (ret < 0) {
+ return ret;
}
- mtx_unlock(&dev_priv->error_completion_lock);
- if (atomic_load_acq_int(&dev_priv->mm.wedged)) {
+ if (atomic_read(&dev_priv->mm.wedged)) {
/* GPU is hung, bump the completion count to account for
* the token we just consumed so that we never hit zero and
* end up waiting upon a subsequent completion event that
* will never happen.
*/
- mtx_lock(&dev_priv->error_completion_lock);
- dev_priv->error_completion++;
- mtx_unlock(&dev_priv->error_completion_lock);
+ mtx_lock(&x->lock);
+ x->done++;
+ mtx_unlock(&x->lock);
}
return 0;
}
@@ -196,17 +174,18 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
* interruptible shall it be. might indeed be if dev_lock is
* changed to sx
*/
- ret = -sx_xlock_sig(&dev->dev_struct_lock);
+ ret = sx_xlock_sig(&dev->dev_struct_lock);
if (ret)
- return ret;
+ return -EINTR;
+ WARN_ON(i915_verify_lists(dev));
return 0;
}
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
- return !obj->active;
+ return obj->gtt_space && !obj->active;
}
int
@@ -214,8 +193,6 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_init *args = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
@@ -224,9 +201,6 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
(args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
return -EINVAL;
- if (mtx_initialized(&dev_priv->mm.gtt_space.unused_lock))
- return -EBUSY;
-
/* GEM with user mode setting was never supported on ilk and later. */
if (INTEL_INFO(dev)->gen >= 5)
return -ENODEV;
@@ -236,11 +210,11 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
* against.
*/
DRM_LOCK(dev);
- ret = i915_gem_init_global_gtt(dev, args->gtt_start,
+ i915_gem_init_global_gtt(dev, args->gtt_start,
args->gtt_end, args->gtt_end);
DRM_UNLOCK(dev);
- return ret;
+ return 0;
}
int
@@ -254,7 +228,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned = 0;
DRM_LOCK(dev);
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
if (obj->pin_count)
pinned += obj->gtt_space->size;
DRM_UNLOCK(dev);
@@ -341,79 +315,6 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
}
static inline int
-__copy_to_user_inatomic(void __user *to, const void *from, unsigned n)
-{
- return (copyout_nofault(from, to, n) != 0 ? n : 0);
-}
-static inline unsigned long
-__copy_from_user_inatomic_nocache(void *to, const void __user *from,
- unsigned long n)
-{
-
- /*
- * XXXKIB. Equivalent Linux function is implemented using
- * MOVNTI for aligned moves. For unaligned head and tail,
- * normal move is performed. As such, it is not incorrect, if
- * only somewhat slower, to use normal copyin. All uses
- * except shmem_pwrite_fast() have the destination mapped WC.
- */
- return ((copyin_nofault(__DECONST(void *, from), to, n) != 0 ? n : 0));
-}
-static inline int
-fault_in_multipages_readable(const char __user *uaddr, int size)
-{
- char c;
- int ret = 0;
- const char __user *end = uaddr + size - 1;
-
- if (unlikely(size == 0))
- return ret;
-
- while (uaddr <= end) {
- ret = -copyin(uaddr, &c, 1);
- if (ret != 0)
- return -EFAULT;
- uaddr += PAGE_SIZE;
- }
-
- /* Check whether the range spilled into the next page. */
- if (((unsigned long)uaddr & ~PAGE_MASK) ==
- ((unsigned long)end & ~PAGE_MASK)) {
- ret = -copyin(end, &c, 1);
- }
-
- return ret;
-}
-
-static inline int
-fault_in_multipages_writeable(char __user *uaddr, int size)
-{
- int ret = 0;
- char __user *end = uaddr + size - 1;
-
- if (unlikely(size == 0))
- return ret;
-
- /*
- * Writing zeroes into userspace here is OK, because we know that if
- * the zero gets there, we'll be overwriting it.
- */
- while (uaddr <= end) {
- ret = subyte(uaddr, 0);
- if (ret != 0)
- return -EFAULT;
- uaddr += PAGE_SIZE;
- }
-
- /* Check whether the range spilled into the next page. */
- if (((unsigned long)uaddr & ~PAGE_MASK) ==
- ((unsigned long)end & ~PAGE_MASK))
- ret = subyte(end, 0);
-
- return ret;
-}
-
-static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
const char *gpu_vaddr, int gpu_offset,
int length)
@@ -511,8 +412,8 @@ shmem_clflush_swizzled_range(char *addr, unsigned long length,
* channels. Lame, but simple and it works. Swizzled
* pwrite/pread is far from a hotpath - current userspace
* doesn't use it at all. */
- start = rounddown2(start, 128);
- end = roundup2(end, 128);
+ start = round_down(start, 128);
+ end = round_up(end, 128);
drm_clflush_virt_range((void *)start, end - start);
} else {
@@ -559,15 +460,16 @@ i915_gem_shmem_pread(struct drm_device *dev,
struct drm_file *file)
{
char __user *user_data;
- ssize_t remain, sremain;
- off_t offset, soffset;
+ ssize_t remain;
+ off_t offset;
int shmem_page_offset, page_length, ret = 0;
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+ int hit_slowpath = 0;
int prefaulted = 0;
int needs_clflush = 0;
user_data = to_user_ptr(args->data_ptr);
- sremain = remain = args->size;
+ remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@ -576,19 +478,23 @@ i915_gem_shmem_pread(struct drm_device *dev,
* read domain and manually flush cachelines (if required). This
* optimizes for the case when the gpu will dirty the data
* anyway again before the next pread happens. */
- needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
- if (ret)
- return ret;
+ if (obj->cache_level == I915_CACHE_NONE)
+ needs_clflush = 1;
+ if (obj->gtt_space) {
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (ret)
+ return ret;
+ }
}
- soffset = offset = args->offset;
- ret = i915_gem_object_get_pages_range(obj, soffset, soffset + sremain);
+ ret = i915_gem_object_get_pages(obj);
if (ret)
return ret;
i915_gem_object_pin_pages(obj);
+ offset = args->offset;
+
VM_OBJECT_WLOCK(obj->base.vm_obj);
for (vm_page_t page = vm_page_find_least(obj->base.vm_obj,
OFF_TO_IDX(offset));; page = vm_page_next(page)) {
@@ -616,9 +522,10 @@ i915_gem_shmem_pread(struct drm_device *dev,
if (ret == 0)
goto next_page;
+ hit_slowpath = 1;
DRM_UNLOCK(dev);
- if (likely(!i915_prefault_disable) && !prefaulted) {
+ if (!prefaulted) {
ret = fault_in_multipages_writeable(user_data, remain);
/* Userspace is tricking us, but we've already clobbered
* its pages with the prefault and promised to write the
@@ -648,7 +555,12 @@ next_page:
out:
i915_gem_object_unpin_pages(obj);
- i915_gem_object_put_pages_range(obj, soffset, soffset + sremain);
+
+ if (hit_slowpath) {
+ /* Fixup: Kill any reinstated backing storage pages */
+ if (obj->madv == __I915_MADV_PURGED)
+ i915_gem_object_truncate(obj);
+ }
return ret;
}
@@ -689,9 +601,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
goto out;
}
-#if 1
- KIB_NOTYET();
-#else
+#ifdef FREEBSD_WIP
/* prime objects have no backing filp to GEM pread/pwrite
* pages from.
*/
@@ -699,7 +609,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
ret = -EINVAL;
goto out;
}
-#endif
+#endif /* FREEBSD_WIP */
CTR3(KTR_DRM, "pread %p %jx %jx", obj, args->offset, args->size);
@@ -717,7 +627,7 @@ unlock:
*/
static inline int
-fast_user_write(struct drm_device *dev,
+fast_user_write(vm_paddr_t mapping_addr,
off_t page_base, int page_offset,
char __user *user_data,
int length)
@@ -726,7 +636,7 @@ fast_user_write(struct drm_device *dev,
void *vaddr;
unsigned long unwritten;
- vaddr_atomic = pmap_mapdev_attr(dev->agp->base + page_base,
+ vaddr_atomic = pmap_mapdev_attr(mapping_addr + page_base,
length, PAT_WRITE_COMBINING);
/* We can use the cpu mem copy function because this is X86. */
vaddr = (char __force*)vaddr_atomic + page_offset;
@@ -746,13 +656,13 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
+ drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
off_t offset, page_base;
char __user *user_data;
int page_offset, page_length, ret;
- ret = i915_gem_object_pin(obj, 0, true);
- /* XXXKIB ret = i915_gem_obj_ggtt_pin(obj, 0, true, true); */
+ ret = i915_gem_object_pin(obj, 0, true, true);
if (ret)
goto out;
@@ -786,7 +696,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
* source page isn't available. Return the error and we'll
* retry in the slow path.
*/
- if (fast_user_write(dev, page_base,
+ if (fast_user_write(dev_priv->mm.gtt_base_addr, page_base,
page_offset, user_data, page_length)) {
ret = -EFAULT;
goto out_unpin;
@@ -885,8 +795,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
- ssize_t remain, sremain;
- off_t offset, soffset;
+ ssize_t remain;
+ off_t offset;
char __user *user_data;
int shmem_page_offset, page_length, ret = 0;
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
@@ -895,7 +805,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
int needs_clflush_before = 0;
user_data = to_user_ptr(args->data_ptr);
- sremain = remain = args->size;
+ remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@ -904,24 +814,27 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
* write domain and manually flush cachelines (if required). This
* optimizes for the case when the gpu will use the data
* right away and we therefore have to clflush anyway. */
- needs_clflush_after = cpu_write_needs_clflush(obj);
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- return ret;
+ if (obj->cache_level == I915_CACHE_NONE)
+ needs_clflush_after = 1;
+ if (obj->gtt_space) {
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
+ }
}
- /* Same trick applies to invalidate partially written cachelines read
- * before writing. */
- if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
- needs_clflush_before =
- !cpu_cache_is_coherent(dev, obj->cache_level);
+ /* Same trick applies for invalidate partially written cachelines before
+ * writing. */
+ if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
+ && obj->cache_level == I915_CACHE_NONE)
+ needs_clflush_before = 1;
- soffset = offset = args->offset;
- ret = i915_gem_object_get_pages_range(obj, soffset, soffset + sremain);
+ ret = i915_gem_object_get_pages(obj);
if (ret)
return ret;
i915_gem_object_pin_pages(obj);
+ offset = args->offset;
obj->dirty = 1;
VM_OBJECT_WLOCK(obj->base.vm_obj);
@@ -985,16 +898,14 @@ next_page:
out:
i915_gem_object_unpin_pages(obj);
- i915_gem_object_put_pages_range(obj, soffset, soffset + sremain);
if (hit_slowpath) {
- /*
- * Fixup: Flush cpu caches in case we didn't flush the dirty
- * cachelines in-line while writing and the object moved
- * out of the cpu write domain while we've dropped the lock.
- */
- if (!needs_clflush_after &&
- obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ /* Fixup: Kill any reinstated backing storage pages */
+ if (obj->madv == __I915_MADV_PURGED)
+ i915_gem_object_truncate(obj);
+ /* and flush dirty cachelines in case the object isn't in the cpu write
+ * domain anymore. */
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj);
i915_gem_chipset_flush(dev);
}
@@ -1025,12 +936,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (!useracc(to_user_ptr(args->data_ptr), args->size, VM_PROT_READ))
return -EFAULT;
- if (likely(!i915_prefault_disable)) {
- ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
- args->size);
- if (ret)
- return -EFAULT;
- }
+ ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
+ args->size);
+ if (ret)
+ return -EFAULT;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
@@ -1049,9 +958,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto out;
}
-#if 1
- KIB_NOTYET();
-#else
+#ifdef FREEBSD_WIP
/* prime objects have no backing filp to GEM pread/pwrite
* pages from.
*/
@@ -1059,7 +966,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
ret = -EINVAL;
goto out;
}
-#endif
+#endif /* FREEBSD_WIP */
CTR3(KTR_DRM, "pwrite %p %jx %jx", obj, args->offset, args->size);
@@ -1075,9 +982,9 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto out;
}
- if (obj->tiling_mode == I915_TILING_NONE &&
- obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
- cpu_write_needs_clflush(obj)) {
+ if (obj->cache_level == I915_CACHE_NONE &&
+ obj->tiling_mode == I915_TILING_NONE &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
/* Note that the gtt paths might fail with non-page-backed user
* pointers (e.g. gtt mappings when moving data between
@@ -1094,20 +1001,29 @@ unlock:
return ret;
}
-static int
-i915_gem_check_wedge(struct drm_i915_private *dev_priv)
+int
+i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+ bool interruptible)
{
- DRM_LOCK_ASSERT(dev_priv->dev);
-
- if (atomic_load_acq_int(&dev_priv->mm.wedged) != 0) {
+ if (atomic_read(&dev_priv->mm.wedged)) {
+ struct completion *x = &dev_priv->error_completion;
bool recovery_complete;
/* Give the error handler a chance to run. */
- mtx_lock(&dev_priv->error_completion_lock);
- recovery_complete = (&dev_priv->error_completion) > 0;
- mtx_unlock(&dev_priv->error_completion_lock);
+ mtx_lock(&x->lock);
+ recovery_complete = x->done > 0;
+ mtx_unlock(&x->lock);
- return (recovery_complete ? -EIO : -EAGAIN);
+ /* Non-interruptible callers can't handle -EAGAIN, hence return
+ * -EIO unconditionally for these. */
+ if (!interruptible)
+ return -EIO;
+
+ /* Recovery complete, but still wedged means reset failure. */
+ if (recovery_complete)
+ return -EIO;
+
+ return -EAGAIN;
}
return 0;
@@ -1125,54 +1041,114 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
DRM_LOCK_ASSERT(ring->dev);
ret = 0;
- if (seqno == ring->outstanding_lazy_request) {
- struct drm_i915_gem_request *request;
+ if (seqno == ring->outstanding_lazy_request)
+ ret = i915_add_request(ring, NULL, NULL);
- request = malloc(sizeof(*request), DRM_I915_GEM,
- M_WAITOK | M_ZERO);
-
- ret = i915_add_request(ring, NULL, request);
- if (ret != 0) {
- free(request, DRM_I915_GEM);
- return ret;
- }
-
- MPASS(seqno == request->seqno);
- }
return ret;
}
+/**
+ * __wait_seqno - wait until execution of seqno has finished
+ * @ring: the ring expected to report seqno
+ * @seqno: duh!
+ * @interruptible: do an interruptible wait (normally yes)
+ * @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ *
+ * Returns 0 if the seqno was found within the alloted time. Else returns the
+ * errno with remaining time filled in timeout argument.
+ */
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
- bool interruptible)
+ bool interruptible, struct timespec *timeout)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
- int ret = 0, flags;
+ struct timespec before, now, wait_time={1,0};
+ sbintime_t timeout_sbt;
+ long end;
+ bool wait_forever = true;
+ int ret, flags;
- if (i915_seqno_passed(ring->get_seqno(ring), seqno))
+ if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
CTR2(KTR_DRM, "request_wait_begin %s %d", ring->name, seqno);
- mtx_lock(&dev_priv->irq_lock);
- if (!ring->irq_get(ring)) {
- mtx_unlock(&dev_priv->irq_lock);
- return -ENODEV;
+ if (timeout != NULL) {
+ wait_time = *timeout;
+ wait_forever = false;
}
+ timeout_sbt = tstosbt(wait_time);
+
+ if (WARN_ON(!ring->irq_get(ring)))
+ return -ENODEV;
+
+ /* Record current time in case interrupted by signal, or wedged * */
+ getrawmonotonic(&before);
+
+#define EXIT_COND \
+ (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
+ atomic_read(&dev_priv->mm.wedged))
flags = interruptible ? PCATCH : 0;
- while (!i915_seqno_passed(ring->get_seqno(ring), seqno)
- && !atomic_load_acq_int(&dev_priv->mm.wedged) &&
- ret == 0) {
- ret = -msleep(ring, &dev_priv->irq_lock, flags, "915gwr", 0);
- if (ret == -ERESTART)
- ret = -ERESTARTSYS;
- }
- ring->irq_put(ring);
+ mtx_lock(&dev_priv->irq_lock);
+ do {
+ if (EXIT_COND) {
+ end = 1;
+ } else {
+ ret = -msleep_sbt(&ring->irq_queue, &dev_priv->irq_lock, flags,
+ "915gwr", timeout_sbt, 0, 0);
+
+ /*
+ * NOTE Linux<->FreeBSD: Convert msleep_sbt() return
+ * value to something close to wait_event*_timeout()
+ * functions used on Linux.
+ *
+ * >0 -> condition is true (end = time remaining)
+ * =0 -> sleep timed out
+ * <0 -> error (interrupted)
+ *
+ * We fake the remaining time by returning 1. We
+ * compute a proper value later.
+ */
+ if (EXIT_COND)
+ /* We fake a remaining time of 1 tick. */
+ end = 1;
+ else if (ret == -EINTR || ret == -ERESTART)
+ /* Interrupted. */
+ end = -ERESTARTSYS;
+ else
+ /* Timeout. */
+ end = 0;
+ }
+
+ ret = i915_gem_check_wedge(dev_priv, interruptible);
+ if (ret)
+ end = ret;
+ } while (end == 0 && wait_forever);
mtx_unlock(&dev_priv->irq_lock);
- CTR3(KTR_DRM, "request_wait_end %s %d %d", ring->name, seqno, ret);
+ getrawmonotonic(&now);
- return ret;
+ ring->irq_put(ring);
+ CTR3(KTR_DRM, "request_wait_end %s %d %d", ring->name, seqno, end);
+#undef EXIT_COND
+
+ if (timeout) {
+ timespecsub(&now, &before);
+ timespecsub(timeout, &now);
+ }
+
+ switch (end) {
+ case -EIO:
+ case -EAGAIN: /* Wedged */
+ case -ERESTARTSYS: /* Signal */
+ case -ETIMEDOUT: /* Timeout */
+ return (int)end;
+ case 0: /* Timeout */
+ return -ETIMEDOUT;
+ default: /* Completed */
+ WARN_ON(end < 0); /* We're not aware of other errors */
+ return 0;
+ }
}
/**
@@ -1180,15 +1156,17 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
* request and object lists appropriately for that event.
*/
int
-i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno)
+i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ bool interruptible = dev_priv->mm.interruptible;
int ret;
- KASSERT(seqno != 0, ("Zero seqno"));
+ DRM_LOCK_ASSERT(dev);
+ BUG_ON(seqno == 0);
- ret = i915_gem_check_wedge(dev_priv);
+ ret = i915_gem_check_wedge(dev_priv, interruptible);
if (ret)
return ret;
@@ -1196,11 +1174,7 @@ i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno)
if (ret)
return ret;
- ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible);
- if (atomic_load_acq_int(&dev_priv->mm.wedged))
- ret = -EAGAIN;
-
- return ret;
+ return __wait_seqno(ring, seqno, interruptible, NULL);
}
/**
@@ -1208,26 +1182,86 @@ i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno)
* safe to unbind from the GTT or access from the CPU.
*/
static __must_check int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+ bool readonly)
{
+ struct intel_ring_buffer *ring = obj->ring;
+ u32 seqno;
int ret;
- KASSERT((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0,
- ("In GPU write domain"));
+ seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
+ if (seqno == 0)
+ return 0;
+
+ ret = i915_wait_seqno(ring, seqno);
+ if (ret)
+ return ret;
- CTR5(KTR_DRM, "object_wait_rendering %p %s %x %d %d", obj,
- obj->ring != NULL ? obj->ring->name : "none", obj->gtt_offset,
- obj->active, obj->last_rendering_seqno);
- if (obj->active) {
- ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
- if (ret != 0)
- return (ret);
- i915_gem_retire_requests_ring(obj->ring);
+ i915_gem_retire_requests_ring(ring);
+
+ /* Manually manage the write flush as we may have not yet
+ * retired the buffer.
+ */
+ if (obj->last_write_seqno &&
+ i915_seqno_passed(seqno, obj->last_write_seqno)) {
+ obj->last_write_seqno = 0;
+ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
return 0;
}
+/* A nonblocking variant of the above wait. This is a highly dangerous routine
+ * as the object state may change during this call.
+ */
+static __must_check int
+i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
+ bool readonly)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = obj->ring;
+ u32 seqno;
+ int ret;
+
+ DRM_LOCK_ASSERT(dev);
+ BUG_ON(!dev_priv->mm.interruptible);
+
+ seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
+ if (seqno == 0)
+ return 0;
+
+ ret = i915_gem_check_wedge(dev_priv, true);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_olr(ring, seqno);
+ if (ret)
+ return ret;
+
+ DRM_UNLOCK(dev);
+ ret = __wait_seqno(ring, seqno, true, NULL);
+ DRM_LOCK(dev);
+
+ i915_gem_retire_requests_ring(ring);
+
+ /* Manually manage the write flush as we may have not yet
+ * retired the buffer.
+ */
+ if (ret == 0 &&
+ obj->last_write_seqno &&
+ i915_seqno_passed(seqno, obj->last_write_seqno)) {
+ obj->last_write_seqno = 0;
+ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+ }
+
+ return ret;
+}
+
+/**
+ * Called when user space prepares to use an object with the CPU, either
+ * through the mmap ioctl's mapping or a GTT mapping.
+ */
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -1261,6 +1295,14 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
goto unlock;
}
+ /* Try to flush the object off the GPU without holding the lock.
+ * We will repeat the flush holding the lock in the normal manner
+ * to catch cases where we are gazumped.
+ */
+ ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
+ if (ret)
+ goto unref;
+
if (read_domains & I915_GEM_DOMAIN_GTT) {
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
@@ -1274,6 +1316,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
+unref:
drm_gem_object_unreference(&obj->base);
unlock:
DRM_UNLOCK(dev);
@@ -1334,6 +1377,16 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (obj == NULL)
return -ENOENT;
+#ifdef FREEBSD_WIP
+ /* prime objects have no backing filp to GEM mmap
+ * pages from.
+ */
+ if (!obj->filp) {
+ drm_gem_object_unreference_unlocked(obj);
+ return -EINVAL;
+ }
+#endif /* FREEBSD_WIP */
+
error = 0;
if (args->size == 0)
goto out;
@@ -1360,7 +1413,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
args->addr_ptr = (uint64_t)addr;
}
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference_unlocked(obj);
return (error);
}
@@ -1369,6 +1422,34 @@ i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t foff, struct ucred *cred, u_short *color)
{
+ /*
+ * NOTE Linux<->FreeBSD: drm_gem_mmap_single() takes care of
+ * calling drm_gem_object_reference(). That's why we don't
+ * do this here. i915_gem_pager_dtor(), below, will call
+ * drm_gem_object_unreference().
+ *
+ * On Linux, drm_gem_vm_open() references the object because
+ * it's called the mapping is copied. drm_gem_vm_open() is not
+ * called when the mapping is created. So the possible sequences
+ * are:
+ * 1. drm_gem_mmap(): ref++
+ * 2. drm_gem_vm_close(): ref--
+ *
+ * 1. drm_gem_mmap(): ref++
+ * 2. drm_gem_vm_open(): ref++ (for the copied vma)
+ * 3. drm_gem_vm_close(): ref-- (for the copied vma)
+ * 4. drm_gem_vm_close(): ref-- (for the initial vma)
+ *
+ * On FreeBSD, i915_gem_pager_ctor() is called once during the
+ * creation of the mapping. No callback is called when the
+ * mapping is shared during a fork(). i915_gem_pager_dtor() is
+ * called when the last reference to the mapping is dropped. So
+ * the only sequence is:
+ * 1. drm_gem_mmap_single(): ref++
+ * 2. i915_gem_pager_ctor(): <noop>
+ * 3. i915_gem_pager_dtor(): ref--
+ */
+
*color = 0; /* XXXKIB */
return (0);
}
@@ -1396,23 +1477,19 @@ static int
i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
vm_page_t *mres)
{
- struct drm_gem_object *gem_obj;
- struct drm_i915_gem_object *obj;
- struct drm_device *dev;
- drm_i915_private_t *dev_priv;
+ struct drm_gem_object *gem_obj = vm_obj->handle;
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
vm_page_t page, oldpage;
- int cause, ret;
- bool write;
-
- gem_obj = vm_obj->handle;
- obj = to_intel_bo(gem_obj);
- dev = obj->base.dev;
- dev_priv = dev->dev_private;
-#if 0
- write = (prot & VM_PROT_WRITE) != 0;
+ int ret = 0;
+#ifdef FREEBSD_WIP
+ bool write = (prot & VM_PROT_WRITE) != 0;
#else
- write = true;
-#endif
+ bool write = true;
+#endif /* FREEBSD_WIP */
+ bool pinned;
+
vm_object_pip_add(vm_obj, 1);
/*
@@ -1422,7 +1499,7 @@ i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
* object, then it owns the drm device sx and might find the
* placeholder already. Then, since the page is busy,
* i915_gem_release_mmap() sleeps waiting for the busy state
- * of the page cleared. We will be not able to acquire drm
+ * of the page cleared. We will be unable to acquire drm
* device lock until i915_gem_release_mmap() is able to make a
* progress.
*/
@@ -1436,15 +1513,14 @@ i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
oldpage = NULL;
VM_OBJECT_WUNLOCK(vm_obj);
retry:
- cause = ret = 0;
+ ret = 0;
+ pinned = 0;
page = NULL;
if (i915_intr_pf) {
ret = i915_mutex_lock_interruptible(dev);
- if (ret != 0) {
- cause = 10;
+ if (ret != 0)
goto out;
- }
} else
DRM_LOCK(dev);
@@ -1468,56 +1544,37 @@ retry:
VM_OBJECT_WUNLOCK(vm_obj);
/* Now bind it into the GTT if needed */
- if (!obj->map_and_fenceable) {
- ret = i915_gem_object_unbind(obj);
- if (ret != 0) {
- cause = 20;
- goto unlock;
- }
- }
- if (!obj->gtt_space) {
- ret = i915_gem_object_bind_to_gtt(obj, 0, true);
- if (ret != 0) {
- cause = 30;
- goto unlock;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
- if (ret != 0) {
- cause = 40;
- goto unlock;
- }
- }
+ ret = i915_gem_object_pin(obj, 0, true, false);
+ if (ret)
+ goto unlock;
+ pinned = 1;
- if (!obj->has_global_gtt_mapping)
- i915_gem_gtt_bind_object(obj, obj->cache_level);
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto unpin;
ret = i915_gem_object_get_fence(obj);
- if (ret != 0) {
- cause = 50;
- goto unlock;
- }
-
- if (i915_gem_object_is_inactive(obj))
- list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ if (ret)
+ goto unpin;
obj->fault_mappable = true;
+
VM_OBJECT_WLOCK(vm_obj);
- page = PHYS_TO_VM_PAGE(dev->agp->base + obj->gtt_offset + offset);
+ page = PHYS_TO_VM_PAGE(dev_priv->mm.gtt_base_addr + obj->gtt_offset + offset);
KASSERT((page->flags & PG_FICTITIOUS) != 0,
("physical address %#jx not fictitious",
- (uintmax_t)(dev->agp->base + obj->gtt_offset + offset)));
+ (uintmax_t)(dev_priv->mm.gtt_base_addr + obj->gtt_offset + offset)));
if (page == NULL) {
VM_OBJECT_WUNLOCK(vm_obj);
- cause = 60;
ret = -EFAULT;
- goto unlock;
+ goto unpin;
}
KASSERT((page->flags & PG_FICTITIOUS) != 0,
("not fictitious %p", page));
KASSERT(page->wire_count == 1, ("wire_count not 1 %p", page));
if (vm_page_busied(page)) {
+ i915_gem_object_unpin(obj);
DRM_UNLOCK(dev);
vm_page_lock(page);
VM_OBJECT_WUNLOCK(vm_obj);
@@ -1525,6 +1582,7 @@ retry:
goto retry;
}
if (vm_page_insert(page, vm_obj, OFF_TO_IDX(offset))) {
+ i915_gem_object_unpin(obj);
DRM_UNLOCK(dev);
VM_OBJECT_WUNLOCK(vm_obj);
VM_WAIT;
@@ -1537,6 +1595,13 @@ have_page:
CTR4(KTR_DRM, "fault %p %jx %x phys %x", gem_obj, offset, prot,
page->phys_addr);
+ if (pinned) {
+ /*
+ * We may have not pinned the object if the page was
+ * found by the call to vm_page_lookup()
+ */
+ i915_gem_object_unpin(obj);
+ }
DRM_UNLOCK(dev);
if (oldpage != NULL) {
vm_page_lock(oldpage);
@@ -1546,12 +1611,14 @@ have_page:
vm_object_pip_wakeup(vm_obj);
return (VM_PAGER_OK);
+unpin:
+ i915_gem_object_unpin(obj);
unlock:
DRM_UNLOCK(dev);
out:
KASSERT(ret != 0, ("i915_gem_pager_fault: wrong return"));
- CTR5(KTR_DRM, "fault_fail %p %jx %x err %d %d", gem_obj, offset, prot,
- -ret, cause);
+ CTR4(KTR_DRM, "fault_fail %p %jx %x err %d", gem_obj, offset, prot,
+ -ret);
if (ret == -EAGAIN || ret == -EIO || ret == -EINTR) {
kern_yield(PRI_USER);
goto retry;
@@ -1564,15 +1631,10 @@ out:
static void
i915_gem_pager_dtor(void *handle)
{
- struct drm_gem_object *obj;
- struct drm_device *dev;
-
- obj = handle;
- dev = obj->dev;
+ struct drm_gem_object *obj = handle;
+ struct drm_device *dev = obj->dev;
DRM_LOCK(dev);
- drm_gem_free_mmap_offset(obj);
- i915_gem_release_mmap(to_intel_bo(obj));
drm_gem_object_unreference(obj);
DRM_UNLOCK(dev);
}
@@ -1707,6 +1769,48 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
return i915_gem_get_gtt_size(dev, size, tiling_mode);
}
+static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ int ret;
+
+ if (obj->base.on_map)
+ return 0;
+
+ dev_priv->mm.shrinker_no_lock_stealing = true;
+
+ ret = drm_gem_create_mmap_offset(&obj->base);
+ if (ret != -ENOSPC)
+ goto out;
+
+ /* Badly fragmented mmap space? The only way we can recover
+ * space is by destroying unwanted objects. We can't randomly release
+ * mmap_offsets as userspace expects them to be persistent for the
+ * lifetime of the objects. The closest we can is to release the
+ * offsets on purgeable objects by truncating it and marking it purged,
+ * which prevents userspace from ever using that object again.
+ */
+ i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
+ ret = drm_gem_create_mmap_offset(&obj->base);
+ if (ret != -ENOSPC)
+ goto out;
+
+ i915_gem_shrink_all(dev_priv);
+ ret = drm_gem_create_mmap_offset(&obj->base);
+out:
+ dev_priv->mm.shrinker_no_lock_stealing = false;
+
+ return ret;
+}
+
+static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ if (!obj->base.on_map)
+ return;
+
+ drm_gem_free_mmap_offset(&obj->base);
+}
+
int
i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev,
@@ -1738,7 +1842,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto out;
}
- ret = drm_gem_create_mmap_offset(&obj->base);
+ ret = i915_gem_object_create_mmap_offset(obj);
if (ret)
goto out;
@@ -1786,8 +1890,9 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
VM_OBJECT_WLOCK(vm_obj);
vm_object_page_remove(vm_obj, 0, 0, false);
VM_OBJECT_WUNLOCK(vm_obj);
- drm_gem_free_mmap_offset(&obj->base);
- obj->madv = I915_MADV_PURGED_INTERNAL;
+ i915_gem_object_free_mmap_offset(obj);
+
+ obj->madv = __I915_MADV_PURGED;
}
static inline int
@@ -1846,27 +1951,24 @@ i915_gem_assert_pages_not_mapped(struct drm_device *dev, vm_page_t *ma,
#endif
static void
-i915_gem_object_put_pages_range(struct drm_i915_gem_object *obj,
- off_t start, off_t end)
-{
- vm_object_t vm_obj;
-
- vm_obj = obj->base.vm_obj;
- VM_OBJECT_WLOCK(vm_obj);
- i915_gem_object_put_pages_range_locked(obj,
- OFF_TO_IDX(trunc_page(start)), OFF_TO_IDX(round_page(end)));
- VM_OBJECT_WUNLOCK(vm_obj);
-}
-
-static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
int page_count = obj->base.size / PAGE_SIZE;
- int i;
+ int ret, i;
+
+ BUG_ON(obj->madv == __I915_MADV_PURGED);
- KASSERT(obj->madv != I915_MADV_PURGED_INTERNAL, ("Purged object"));
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
+ if (ret) {
+ /* In the event of a disaster, abandon all caches and
+ * hope for the best.
+ */
+ WARN_ON(ret != -EIO);
+ i915_gem_clflush_object(obj);
+ obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
- if (obj->tiling_mode != I915_TILING_NONE)
+ if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj);
if (obj->madv == I915_MADV_DONTNEED)
@@ -1898,65 +2000,80 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
}
static int
-i915_gpu_is_active(struct drm_device *dev)
+i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ const struct drm_i915_gem_object_ops *ops = obj->ops;
- return (!list_empty(&dev_priv->mm.flushing_list) ||
- !list_empty(&dev_priv->mm.active_list));
-}
+ if (obj->pages == NULL)
+ return 0;
-static void
-i915_gem_lowmem(void *arg)
-{
- struct drm_device *dev;
- struct drm_i915_private *dev_priv;
- struct drm_i915_gem_object *obj, *next;
- int cnt, cnt_fail, cnt_total;
+ BUG_ON(obj->gtt_space);
- dev = arg;
- dev_priv = dev->dev_private;
+ if (obj->pages_pin_count)
+ return -EBUSY;
- if (!sx_try_xlock(&dev->dev_struct_lock))
- return;
+ /* ->put_pages might need to allocate memory for the bit17 swizzle
+ * array, hence protect them from being reaped by removing them from gtt
+ * lists early. */
+ list_del(&obj->gtt_list);
- CTR0(KTR_DRM, "gem_lowmem");
+ ops->put_pages(obj);
+ obj->pages = NULL;
-rescan:
- /* first scan for clean buffers */
- i915_gem_retire_requests(dev);
+ if (i915_gem_object_is_purgeable(obj))
+ i915_gem_object_truncate(obj);
- cnt_total = cnt_fail = cnt = 0;
+ return 0;
+}
- list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
- mm_list) {
- if (i915_gem_object_is_purgeable(obj)) {
- if (i915_gem_object_unbind(obj) != 0)
- cnt_total++;
- } else
- cnt_total++;
+static long
+__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
+ bool purgeable_only)
+{
+ struct drm_i915_gem_object *obj, *next;
+ long count = 0;
+
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.unbound_list,
+ gtt_list) {
+ if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
+ i915_gem_object_put_pages(obj) == 0) {
+ count += obj->base.size >> PAGE_SHIFT;
+ if (target != -1 && count >= target)
+ return count;
+ }
}
- /* second pass, evict/count anything still on the inactive list */
- list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
- mm_list) {
- if (i915_gem_object_unbind(obj) == 0)
- cnt++;
- else
- cnt_fail++;
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.inactive_list,
+ mm_list) {
+ if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
+ i915_gem_object_unbind(obj) == 0 &&
+ i915_gem_object_put_pages(obj) == 0) {
+ count += obj->base.size >> PAGE_SHIFT;
+ if (target != -1 && count >= target)
+ return count;
+ }
}
- if (cnt_fail > cnt_total / 100 && i915_gpu_is_active(dev)) {
- /*
- * We are desperate for pages, so as a last resort, wait
- * for the GPU to finish and discard whatever we can.
- * This has a dramatic impact to reduce the number of
- * OOM-killer events whilst running the GPU aggressively.
- */
- if (i915_gpu_idle(dev) == 0)
- goto rescan;
- }
- DRM_UNLOCK(dev);
+ return count;
+}
+
+static long
+i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+{
+ return __i915_gem_shrink(dev_priv, target, true);
+}
+
+static void
+i915_gem_shrink_all(struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_gem_object *obj, *next;
+
+ i915_gem_evict_everything(dev_priv->dev);
+
+ list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
+ i915_gem_object_put_pages(obj);
}
static int
@@ -1989,14 +2106,19 @@ failed:
}
static int
-i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
- int flags)
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
vm_object_t vm_obj;
vm_page_t page;
vm_pindex_t i, page_count;
int res;
+ /* Assert that the object is not currently in any GPU domain. As it
+ * wasn't in the GTT, there shouldn't be any way it could have been in
+ * a GPU cache
+ */
+ BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+ BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
KASSERT(obj->pages == NULL, ("Obj already has pages"));
page_count = OFF_TO_IDX(obj->base.size);
@@ -2020,15 +2142,42 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
return (0);
}
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_get_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_put_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int
+i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ const struct drm_i915_gem_object_ops *ops = obj->ops;
+ int ret;
+
+ if (obj->pages)
+ return 0;
+
+ BUG_ON(obj->pages_pin_count);
+
+ ret = ops->get_pages(obj);
+ if (ret)
+ return ret;
+
+ list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
+ return 0;
+}
+
void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring, uint32_t seqno)
+ struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_fence_reg *reg;
+ u32 seqno = intel_ring_get_seqno(ring);
- KASSERT(ring != NULL, ("NULL ring"));
+ BUG_ON(ring == NULL);
obj->ring = ring;
/* Add a reference if we're newly entering the active list. */
@@ -2041,12 +2190,15 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
list_move_tail(&obj->ring_list, &ring->active_list);
- obj->last_rendering_seqno = seqno;
+ obj->last_read_seqno = seqno;
+
if (obj->fenced_gpu_access) {
obj->last_fenced_seqno = seqno;
/* Bump MRU to take account of the delayed flush */
if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_fence_reg *reg;
+
reg = &dev_priv->fence_regs[obj->fence_reg];
list_move_tail(&reg->lru_list,
&dev_priv->mm.fence_list);
@@ -2055,114 +2207,141 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
}
static void
-i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
-{
- list_del_init(&obj->ring_list);
- obj->last_rendering_seqno = 0;
- obj->last_fenced_seqno = 0;
-}
-
-static void
-i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
-{
- struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- KASSERT(obj->active, ("Object not active"));
- list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
-
- i915_gem_object_move_off_active(obj);
-}
-
-static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
+ BUG_ON(!obj->active);
+
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
- KASSERT(list_empty(&obj->gpu_write_list), ("On gpu_write_list"));
- KASSERT(obj->active, ("Object not active"));
+ list_del_init(&obj->ring_list);
obj->ring = NULL;
- i915_gem_object_move_off_active(obj);
+ obj->last_read_seqno = 0;
+ obj->last_write_seqno = 0;
+ obj->base.write_domain = 0;
+
+ obj->last_fenced_seqno = 0;
obj->fenced_gpu_access = false;
obj->active = 0;
- obj->pending_gpu_write = false;
drm_gem_object_unreference(&obj->base);
-#if 1
- KIB_NOTYET();
-#else
WARN_ON(i915_verify_lists(dev));
-#endif
}
-static u32
-i915_gem_get_seqno(struct drm_device *dev)
+static int
+i915_gem_handle_seqno_wrap(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 seqno = dev_priv->next_seqno;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int ret, i, j;
- /* reserve 0 for non-seqno */
- if (++dev_priv->next_seqno == 0)
- dev_priv->next_seqno = 1;
+ /* The hardware uses various monotonic 32-bit counters, if we
+ * detect that they will wraparound we need to idle the GPU
+ * and reset those counters.
+ */
+ ret = 0;
+ for_each_ring(ring, dev_priv, i) {
+ for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
+ ret |= ring->sync_seqno[j] != 0;
+ }
+ if (ret == 0)
+ return ret;
+
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ return ret;
- return seqno;
+ i915_gem_retire_requests(dev);
+ for_each_ring(ring, dev_priv, i) {
+ for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
+ ring->sync_seqno[j] = 0;
+ }
+
+ return 0;
}
-u32
-i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
+int
+i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
{
- if (ring->outstanding_lazy_request == 0)
- ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
- return ring->outstanding_lazy_request;
+ /* reserve 0 for non-seqno */
+ if (dev_priv->next_seqno == 0) {
+ int ret = i915_gem_handle_seqno_wrap(dev);
+ if (ret)
+ return ret;
+
+ dev_priv->next_seqno = 1;
+ }
+
+ *seqno = dev_priv->next_seqno++;
+ return 0;
}
int
i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
- struct drm_i915_gem_request *request)
+ u32 *out_seqno)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
- struct drm_i915_file_private *file_priv;
- uint32_t seqno;
+ struct drm_i915_gem_request *request;
u32 request_ring_position;
int was_empty;
int ret;
- KASSERT(request != NULL, ("NULL request in add"));
- DRM_LOCK_ASSERT(ring->dev);
+ /*
+ * Emit any outstanding flushes - execbuf can fail to emit the flush
+ * after having emitted the batchbuffer command. Hence we need to fix
+ * things up similar to emitting the lazy request. The difference here
+ * is that the flush _must_ happen before the next request, no matter
+ * what.
+ */
+ ret = intel_ring_flush_all_caches(ring);
+ if (ret)
+ return ret;
- seqno = i915_gem_next_request_seqno(ring);
- request_ring_position = intel_ring_get_tail(ring);
+ request = malloc(sizeof(*request), DRM_I915_GEM, M_NOWAIT);
+ if (request == NULL)
+ return -ENOMEM;
- ret = ring->add_request(ring, &seqno);
- if (ret != 0)
- return ret;
- CTR2(KTR_DRM, "request_add %s %d", ring->name, seqno);
+ /* Record the position of the start of the request so that
+ * should we detect the updated seqno part-way through the
+ * GPU processing the request, we never over-estimate the
+ * position of the head.
+ */
+ request_ring_position = intel_ring_get_tail(ring);
- request->seqno = seqno;
+ ret = ring->add_request(ring);
+ if (ret) {
+ free(request, DRM_I915_GEM);
+ return ret;
+ }
+
+ request->seqno = intel_ring_get_seqno(ring);
request->ring = ring;
request->tail = request_ring_position;
- request->emitted_jiffies = ticks;
+ request->emitted_jiffies = jiffies;
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
+ request->file_priv = NULL;
if (file) {
- file_priv = file->driver_priv;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
- mtx_lock(&file_priv->mm.lck);
+ mtx_lock(&file_priv->mm.lock);
request->file_priv = file_priv;
list_add_tail(&request->client_list,
&file_priv->mm.request_list);
- mtx_unlock(&file_priv->mm.lck);
+ mtx_unlock(&file_priv->mm.lock);
}
+ CTR2(KTR_DRM, "request_add %s %d", ring->name, request->seqno);
ring->outstanding_lazy_request = 0;
if (!dev_priv->mm.suspended) {
@@ -2170,11 +2349,15 @@ i915_add_request(struct intel_ring_buffer *ring,
callout_schedule(&dev_priv->hangcheck_timer,
DRM_I915_HANGCHECK_PERIOD);
}
- if (was_empty)
- taskqueue_enqueue_timeout(dev_priv->tq,
- &dev_priv->mm.retire_task, hz);
+ if (was_empty) {
+ taskqueue_enqueue_timeout(dev_priv->wq,
+ &dev_priv->mm.retire_work, hz);
+ intel_mark_busy(dev_priv->dev);
+ }
}
+ if (out_seqno)
+ *out_seqno = request->seqno;
return 0;
}
@@ -2186,14 +2369,12 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
if (!file_priv)
return;
- DRM_LOCK_ASSERT(request->ring->dev);
-
- mtx_lock(&file_priv->mm.lck);
+ mtx_lock(&file_priv->mm.lock);
if (request->file_priv) {
list_del(&request->client_list);
request->file_priv = NULL;
}
- mtx_unlock(&file_priv->mm.lck);
+ mtx_unlock(&file_priv->mm.lock);
}
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
@@ -2221,8 +2402,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object,
ring_list);
- obj->base.write_domain = 0;
- list_del_init(&obj->gpu_write_list);
i915_gem_object_move_to_inactive(obj);
}
}
@@ -2258,24 +2437,13 @@ void i915_gem_reset(struct drm_device *dev)
for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_lists(dev_priv, ring);
- /* Remove anything from the flushing lists. The GPU cache is likely
- * to be lost on reset along with the data, so simply move the
- * lost bo to the inactive list.
- */
- while (!list_empty(&dev_priv->mm.flushing_list)) {
- obj = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- mm_list);
-
- obj->base.write_domain = 0;
- list_del_init(&obj->gpu_write_list);
- i915_gem_object_move_to_inactive(obj);
- }
-
/* Move everything out of the GPU domains to ensure we do any
* necessary invalidation upon reuse.
*/
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
+ list_for_each_entry(obj,
+ &dev_priv->mm.inactive_list,
+ mm_list)
+ {
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
}
@@ -2290,17 +2458,14 @@ void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
uint32_t seqno;
- int i;
if (list_empty(&ring->request_list))
return;
- seqno = ring->get_seqno(ring);
- CTR2(KTR_DRM, "retire_request_ring %s %d", ring->name, seqno);
+ WARN_ON(i915_verify_lists(ring->dev));
- for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
- if (seqno >= ring->sync_seqno[i])
- ring->sync_seqno[i] = 0;
+ seqno = ring->get_seqno(ring, true);
+ CTR2(KTR_DRM, "retire_request_ring %s %d", ring->name, seqno);
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
@@ -2314,6 +2479,11 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
CTR2(KTR_DRM, "retire_request_seqno_passed %s %d",
ring->name, seqno);
+ /* We know the GPU must have read the request to have
+ * sent us the seqno + interrupt, so use the position
+ * of tail of the request to update the last known position
+ * of the GPU head.
+ */
ring->last_retired_head = request->tail;
list_del(&request->list);
@@ -2331,23 +2501,19 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
struct drm_i915_gem_object,
ring_list);
- if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
+ if (!i915_seqno_passed(seqno, obj->last_read_seqno))
break;
- if (obj->base.write_domain != 0)
- i915_gem_object_move_to_flushing(obj);
- else
- i915_gem_object_move_to_inactive(obj);
+ i915_gem_object_move_to_inactive(obj);
}
- if (ring->trace_irq_seqno &&
- i915_seqno_passed(seqno, ring->trace_irq_seqno)) {
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- mtx_lock(&dev_priv->irq_lock);
+ if (unlikely(ring->trace_irq_seqno &&
+ i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
ring->irq_put(ring);
- mtx_unlock(&dev_priv->irq_lock);
ring->trace_irq_seqno = 0;
}
+
+ WARN_ON(i915_verify_lists(ring->dev));
}
void
@@ -2362,49 +2528,7 @@ i915_gem_retire_requests(struct drm_device *dev)
}
static void
-i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
- uint32_t flush_domains)
-{
- struct drm_i915_gem_object *obj, *next;
- uint32_t old_write_domain;
-
- list_for_each_entry_safe(obj, next, &ring->gpu_write_list,
- gpu_write_list) {
- if (obj->base.write_domain & flush_domains) {
- old_write_domain = obj->base.write_domain;
- obj->base.write_domain = 0;
- list_del_init(&obj->gpu_write_list);
- i915_gem_object_move_to_active(obj, ring,
- i915_gem_next_request_seqno(ring));
-
- CTR3(KTR_DRM, "object_change_domain process_flush %p %x %x",
- obj, obj->base.read_domains, old_write_domain);
- }
- }
-}
-
-int
-i915_gem_flush_ring(struct intel_ring_buffer *ring, uint32_t invalidate_domains,
- uint32_t flush_domains)
-{
- int ret;
-
- if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
- return 0;
-
- CTR3(KTR_DRM, "ring_flush %s %x %x", ring->name, invalidate_domains,
- flush_domains);
- ret = ring->flush(ring, invalidate_domains, flush_domains);
- if (ret)
- return ret;
-
- if (flush_domains & I915_GEM_GPU_DOMAINS)
- i915_gem_process_flushing_list(ring, flush_domains);
- return 0;
-}
-
-static void
-i915_gem_retire_task_handler(void *arg, int pending)
+i915_gem_retire_work_handler(void *arg, int pending)
{
drm_i915_private_t *dev_priv;
struct drm_device *dev;
@@ -2417,8 +2541,8 @@ i915_gem_retire_task_handler(void *arg, int pending)
/* Come back later if the device is busy... */
if (!sx_try_xlock(&dev->dev_struct_lock)) {
- taskqueue_enqueue_timeout(dev_priv->tq,
- &dev_priv->mm.retire_task, hz);
+ taskqueue_enqueue_timeout(dev_priv->wq,
+ &dev_priv->mm.retire_work, hz);
return;
}
@@ -2431,31 +2555,138 @@ i915_gem_retire_task_handler(void *arg, int pending)
*/
idle = true;
for_each_ring(ring, dev_priv, i) {
- struct intel_ring_buffer *ring = &dev_priv->rings[i];
-
- if (!list_empty(&ring->gpu_write_list)) {
- struct drm_i915_gem_request *request;
- int ret;
-
- ret = i915_gem_flush_ring(ring,
- 0, I915_GEM_GPU_DOMAINS);
- request = malloc(sizeof(*request), DRM_I915_GEM,
- M_WAITOK | M_ZERO);
- if (ret || request == NULL ||
- i915_add_request(ring, NULL, request))
- free(request, DRM_I915_GEM);
- }
+ if (ring->gpu_caches_dirty)
+ i915_add_request(ring, NULL, NULL);
idle &= list_empty(&ring->request_list);
}
if (!dev_priv->mm.suspended && !idle)
- taskqueue_enqueue_timeout(dev_priv->tq,
- &dev_priv->mm.retire_task, hz);
+ taskqueue_enqueue_timeout(dev_priv->wq,
+ &dev_priv->mm.retire_work, hz);
+ if (idle)
+ intel_mark_idle(dev);
+
+ DRM_UNLOCK(dev);
+}
+
+/**
+ * Ensures that an object will eventually get non-busy by flushing any required
+ * write domains, emitting any outstanding lazy request and retiring and
+ * completed requests.
+ */
+static int
+i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ if (obj->active) {
+ ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests_ring(obj->ring);
+ }
+
+ return 0;
+}
+
+/**
+ * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Returns 0 if successful, else an error is returned with the remaining time in
+ * the timeout parameter.
+ * -ETIME: object is still busy after timeout
+ * -ERESTARTSYS: signal interrupted the wait
+ * -ENONENT: object doesn't exist
+ * Also possible, but rare:
+ * -EAGAIN: GPU wedged
+ * -ENOMEM: damn
+ * -ENODEV: Internal IRQ fail
+ * -E?: The add request failed
+ *
+ * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
+ * non-zero timeout parameter the wait ioctl will wait for the given number of
+ * nanoseconds on an object becoming unbusy. Since the wait itself does so
+ * without holding struct_mutex the object may become re-busied before this
+ * function completes. A similar but shorter * race condition exists in the busy
+ * ioctl
+ */
+int
+i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_i915_gem_wait *args = data;
+ struct drm_i915_gem_object *obj;
+ struct intel_ring_buffer *ring = NULL;
+ struct timespec timeout_stack, *timeout = NULL;
+ u32 seqno = 0;
+ int ret = 0;
+
+ if (args->timeout_ns >= 0) {
+ timeout_stack.tv_sec = args->timeout_ns / 1000000;
+ timeout_stack.tv_nsec = args->timeout_ns % 1000000;
+ timeout = &timeout_stack;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
+ if (&obj->base == NULL) {
+ DRM_UNLOCK(dev);
+ return -ENOENT;
+ }
+
+ /* Need to make sure the object gets inactive eventually. */
+ ret = i915_gem_object_flush_active(obj);
+ if (ret)
+ goto out;
+
+ if (obj->active) {
+ seqno = obj->last_read_seqno;
+ ring = obj->ring;
+ }
+
+ if (seqno == 0)
+ goto out;
+
+ /* Do this after OLR check to make sure we make forward progress polling
+ * on this IOCTL with a 0 timeout (like busy ioctl)
+ */
+ if (!args->timeout_ns) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ drm_gem_object_unreference(&obj->base);
+ DRM_UNLOCK(dev);
+ ret = __wait_seqno(ring, seqno, true, timeout);
+ if (timeout) {
+ args->timeout_ns = timeout->tv_sec * 1000000 + timeout->tv_nsec;
+ }
+ return ret;
+
+out:
+ drm_gem_object_unreference(&obj->base);
DRM_UNLOCK(dev);
+ return ret;
}
+/**
+ * i915_gem_object_sync - sync an object to a ring.
+ *
+ * @obj: object which may be in use on another ring.
+ * @to: ring we wish to use the object on. May be NULL.
+ *
+ * This code is meant to abstract object synchronization with the GPU.
+ * Calling with NULL implies synchronizing the object with the CPU
+ * rather than a particular GPU ring.
+ *
+ * Returns 0 if successful, else propagates up the lower layer error.
+ */
int
i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to)
@@ -2468,31 +2699,25 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
return 0;
if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
- return i915_gem_object_wait_rendering(obj);
+ return i915_gem_object_wait_rendering(obj, false);
idx = intel_ring_sync_index(from, to);
- seqno = obj->last_rendering_seqno;
+ seqno = obj->last_read_seqno;
if (seqno <= from->sync_seqno[idx])
return 0;
- if (seqno == from->outstanding_lazy_request) {
- struct drm_i915_gem_request *request;
-
- request = malloc(sizeof(*request), DRM_I915_GEM,
- M_WAITOK | M_ZERO);
- ret = i915_add_request(from, NULL, request);
- if (ret) {
- free(request, DRM_I915_GEM);
- return ret;
- }
- seqno = request->seqno;
- }
-
+ ret = i915_gem_check_olr(obj->ring, seqno);
+ if (ret)
+ return ret;
ret = to->sync_to(to, from, seqno);
if (!ret)
- from->sync_seqno[idx] = seqno;
+ /* We use last_read_seqno because sync_to()
+ * might have just caused seqno wrap under
+ * the radar.
+ */
+ from->sync_seqno[idx] = obj->last_read_seqno;
return ret;
}
@@ -2533,24 +2758,20 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return 0;
if (obj->pin_count)
- return -EINVAL;
+ return -EBUSY;
+
+ BUG_ON(obj->pages == NULL);
ret = i915_gem_object_finish_gpu(obj);
- if (ret == -ERESTARTSYS || ret == -EINTR)
+ if (ret)
return ret;
+ /* Continue on if we fail due to EIO, the GPU is hung so we
+ * should be safe and we need to cleanup or else we might
+ * cause memory corruption through use-after-free.
+ */
i915_gem_object_finish_gtt(obj);
- if (ret == 0)
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret == -ERESTARTSYS || ret == -EINTR)
- return ret;
- if (ret != 0) {
- i915_gem_clflush_object(obj);
- obj->base.read_domains = obj->base.write_domain =
- I915_GEM_DOMAIN_CPU;
- }
-
/* release the fence reg _after_ flushing */
ret = i915_gem_object_put_fence(obj);
if (ret)
@@ -2564,39 +2785,16 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
}
i915_gem_gtt_finish_object(obj);
- i915_gem_object_put_pages_gtt(obj);
-
- list_del_init(&obj->gtt_list);
- list_del_init(&obj->mm_list);
+ list_del(&obj->mm_list);
+ list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
+ /* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL;
obj->gtt_offset = 0;
- if (i915_gem_object_is_purgeable(obj))
- i915_gem_object_truncate(obj);
- CTR1(KTR_DRM, "object_unbind %p", obj);
-
- return ret;
-}
-
-static int
-i915_ring_idle(struct intel_ring_buffer *ring)
-{
- int ret;
-
- if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
- return 0;
-
- if (!list_empty(&ring->gpu_write_list)) {
- ret = i915_gem_flush_ring(ring, I915_GEM_GPU_DOMAINS,
- I915_GEM_GPU_DOMAINS);
- if (ret != 0)
- return ret;
- }
-
- return (i915_wait_request(ring, i915_gem_next_request_seqno(ring)));
+ return 0;
}
int i915_gpu_idle(struct drm_device *dev)
@@ -2611,13 +2809,9 @@ int i915_gpu_idle(struct drm_device *dev)
if (ret)
return ret;
- ret = i915_ring_idle(ring);
+ ret = intel_ring_idle(ring);
if (ret)
return ret;
-
- /* Is the device fubar? */
- if (!list_empty(&ring->gpu_write_list))
- return -EBUSY;
}
return 0;
@@ -2682,10 +2876,9 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
int pitch_val;
int tile_width;
- if ((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+ WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
(size & -size) != size ||
- (obj->gtt_offset & (size - 1)))
- printf(
+ (obj->gtt_offset & (size - 1)),
"object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
obj->gtt_offset, obj->map_and_fenceable, size);
@@ -2726,10 +2919,9 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
u32 size = obj->gtt_space->size;
uint32_t pitch_val;
- if ((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+ WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
(size & -size) != size ||
- (obj->gtt_offset & (size - 1)))
- printf(
+ (obj->gtt_offset & (size - 1)),
"object 0x%08x not 512K or pot-size 0x%08x aligned\n",
obj->gtt_offset, size);
@@ -2769,6 +2961,11 @@ static inline int fence_number(struct drm_i915_private *dev_priv,
return fence - dev_priv->fence_regs;
}
+static void i915_gem_write_fence__ipi(void *data)
+{
+ wbinvd();
+}
+
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable)
@@ -2777,6 +2974,18 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_private *dev_priv = dev->dev_private;
int fence_reg = fence_number(dev_priv, fence);
+ /* In order to fully serialize access to the fenced region and
+ * the update to the fence register we need to take extreme
+ * measures on SNB+. In theory, the write to the fence register
+ * flushes all memory transactions before, and coupled with the
+ * mb() placed around the register write we serialise all memory
+ * operations with respect to the changes in the tiler. Yet, on
+ * SNB+ we need to take a step further and emit an explicit wbinvd()
+ * on each processor in order to manually flush all memory
+ * transactions before updating the fence register.
+ */
+ if (HAS_LLC(obj->base.dev))
+ on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
if (enable) {
@@ -2793,22 +3002,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
static int
i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
{
- int ret;
-
- if (obj->fenced_gpu_access) {
- if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->ring,
- 0, obj->base.write_domain);
- if (ret)
- return ret;
- }
-
- obj->fenced_gpu_access = false;
- }
-
if (obj->last_fenced_seqno) {
- ret = i915_wait_request(obj->ring,
- obj->last_fenced_seqno);
+ int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
if (ret)
return ret;
@@ -2821,6 +3016,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
mb();
+ obj->fenced_gpu_access = false;
return 0;
}
@@ -2940,17 +3136,88 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
return 0;
}
+static bool i915_gem_valid_gtt_space(struct drm_device *dev,
+ struct drm_mm_node *gtt_space,
+ unsigned long cache_level)
+{
+ struct drm_mm_node *other;
+
+ /* On non-LLC machines we have to be careful when putting differing
+ * types of snoopable memory together to avoid the prefetcher
+ * crossing memory domains and dieing.
+ */
+ if (HAS_LLC(dev))
+ return true;
+
+ if (gtt_space == NULL)
+ return true;
+
+ if (list_empty(&gtt_space->node_list))
+ return true;
+
+ other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
+ if (other->allocated && !other->hole_follows && other->color != cache_level)
+ return false;
+
+ other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
+ if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
+ return false;
+
+ return true;
+}
+
+static void i915_gem_verify_gtt(struct drm_device *dev)
+{
+#if WATCH_GTT
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ int err = 0;
+
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ if (obj->gtt_space == NULL) {
+ DRM_ERROR("object found on GTT list with no space reserved\n");
+ err++;
+ continue;
+ }
+
+ if (obj->cache_level != obj->gtt_space->color) {
+ DRM_ERROR("object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
+ obj->gtt_space->start,
+ obj->gtt_space->start + obj->gtt_space->size,
+ obj->cache_level,
+ obj->gtt_space->color);
+ err++;
+ continue;
+ }
+
+ if (!i915_gem_valid_gtt_space(dev,
+ obj->gtt_space,
+ obj->cache_level)) {
+ DRM_ERROR("invalid GTT space found at [%08lx, %08lx] - color=%x\n",
+ obj->gtt_space->start,
+ obj->gtt_space->start + obj->gtt_space->size,
+ obj->cache_level);
+ err++;
+ continue;
+ }
+ }
+
+ WARN_ON(err);
+#endif
+}
+
/**
* Finds free space in the GTT aperture and binds the object there.
*/
static int
i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment,
- bool map_and_fenceable)
+ bool map_and_fenceable,
+ bool nonblocking)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_mm_node *free_space;
+ struct drm_mm_node *node;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
int ret;
@@ -2990,75 +3257,70 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
return -E2BIG;
}
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_pin_pages(obj);
+
+ node = malloc(sizeof(*node), DRM_I915_GEM, M_NOWAIT | M_ZERO);
+ if (node == NULL) {
+ i915_gem_object_unpin_pages(obj);
+ return -ENOMEM;
+ }
+
search_free:
if (map_and_fenceable)
- free_space = drm_mm_search_free_in_range(
- &dev_priv->mm.gtt_space, size, alignment, 0,
- dev_priv->mm.gtt_mappable_end, 0);
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
+ size, alignment, obj->cache_level,
+ 0, dev_priv->mm.gtt_mappable_end);
else
- free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
- size, alignment, 0);
- if (free_space != NULL) {
- if (map_and_fenceable)
- obj->gtt_space = drm_mm_get_block_range_generic(
- free_space, size, alignment, 0, 0,
- dev_priv->mm.gtt_mappable_end, 1);
- else
- obj->gtt_space = drm_mm_get_block_generic(free_space,
- size, alignment, 0, 1);
- }
- if (obj->gtt_space == NULL) {
- ret = i915_gem_evict_something(dev, size, alignment,
- map_and_fenceable);
- if (ret != 0)
- return ret;
- goto search_free;
- }
- ret = i915_gem_object_get_pages_gtt(obj, 0);
+ ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
+ size, alignment, obj->cache_level);
if (ret) {
- drm_mm_put_block(obj->gtt_space);
- obj->gtt_space = NULL;
- /*
- * i915_gem_object_get_pages_gtt() cannot return
- * ENOMEM, since we use vm_page_grab().
- */
+ ret = i915_gem_evict_something(dev, size, alignment,
+ obj->cache_level,
+ map_and_fenceable,
+ nonblocking);
+ if (ret == 0)
+ goto search_free;
+
+ i915_gem_object_unpin_pages(obj);
+ free(node, DRM_I915_GEM);
return ret;
}
+ if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
+ i915_gem_object_unpin_pages(obj);
+ drm_mm_put_block(node);
+ return -EINVAL;
+ }
ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
- i915_gem_object_put_pages_gtt(obj);
- drm_mm_put_block(obj->gtt_space);
- obj->gtt_space = NULL;
- if (i915_gem_evict_everything(dev, false))
- return ret;
- goto search_free;
+ i915_gem_object_unpin_pages(obj);
+ drm_mm_put_block(node);
+ return ret;
}
- if (!dev_priv->mm.aliasing_ppgtt)
- i915_gem_gtt_bind_object(obj, obj->cache_level);
-
- list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
+ list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
- KASSERT((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0,
- ("Object in gpu read domain"));
- KASSERT((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0,
- ("Object in gpu write domain"));
-
- obj->gtt_offset = obj->gtt_space->start;
+ obj->gtt_space = node;
+ obj->gtt_offset = node->start;
fenceable =
- obj->gtt_space->size == fence_size &&
- (obj->gtt_space->start & (fence_alignment - 1)) == 0;
+ node->size == fence_size &&
+ (node->start & (fence_alignment - 1)) == 0;
mappable =
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
obj->map_and_fenceable = mappable && fenceable;
+ i915_gem_object_unpin_pages(obj);
CTR4(KTR_DRM, "object_bind %p %x %x %d", obj, obj->gtt_offset,
obj->base.size, map_and_fenceable);
+ i915_gem_verify_gtt(dev);
return 0;
}
@@ -3124,7 +3386,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
return;
i915_gem_clflush_object(obj);
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(obj->base.dev);
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
@@ -3132,15 +3394,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
obj->base.read_domains, old_write_domain);
}
-static int
-i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
-{
-
- if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
- return (0);
- return (i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain));
-}
-
/**
* Moves a single object to the GTT read, and possibly write domain.
*
@@ -3161,16 +3414,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
return 0;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_wait_rendering(obj, !write);
if (ret)
return ret;
- if (obj->pending_gpu_write || write) {
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return (ret);
- }
-
i915_gem_object_flush_cpu_write_domain(obj);
old_write_domain = obj->base.write_domain;
@@ -3179,8 +3426,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) == 0,
- ("In GTT write domain"));
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
obj->base.read_domains = I915_GEM_DOMAIN_GTT;
@@ -3213,6 +3459,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY;
}
+ if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
+ ret = i915_gem_object_unbind(obj);
+ if (ret)
+ return ret;
+ }
+
if (obj->gtt_space) {
ret = i915_gem_object_finish_gpu(obj);
if (ret)
@@ -3224,7 +3476,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* registers with snooped memory, so relinquish any fences
* currently pointing to our region in the aperture.
*/
- if (INTEL_INFO(obj->base.dev)->gen < 6) {
+ if (INTEL_INFO(dev)->gen < 6) {
ret = i915_gem_object_put_fence(obj);
if (ret)
return ret;
@@ -3235,6 +3487,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
if (obj->has_aliasing_ppgtt_mapping)
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, cache_level);
+
+ obj->gtt_space->color = cache_level;
}
if (cache_level == I915_CACHE_NONE) {
@@ -3246,10 +3500,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* in obj->write_domain and have been skipping the clflushes.
* Just set it to the CPU cache for now.
*/
- KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) == 0,
- ("obj %p in CPU write domain", obj));
- KASSERT((obj->base.read_domains & ~I915_GEM_DOMAIN_CPU) == 0,
- ("obj %p in CPU read domain", obj));
+ WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
+ WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
old_read_domains = obj->base.read_domains;
old_write_domain = obj->base.write_domain;
@@ -3262,9 +3514,72 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
}
obj->cache_level = cache_level;
+ i915_gem_verify_gtt(dev);
return 0;
}
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_caching *args = data;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ args->caching = obj->cache_level != I915_CACHE_NONE;
+
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ DRM_UNLOCK(dev);
+ return ret;
+}
+
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_caching *args = data;
+ struct drm_i915_gem_object *obj;
+ enum i915_cache_level level;
+ int ret;
+
+ switch (args->caching) {
+ case I915_CACHING_NONE:
+ level = I915_CACHE_NONE;
+ break;
+ case I915_CACHING_CACHED:
+ level = I915_CACHE_LLC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ ret = i915_gem_object_set_cache_level(obj, level);
+
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ DRM_UNLOCK(dev);
+ return ret;
+}
+
static bool is_pin_display(struct drm_i915_gem_object *obj)
{
/* There are 3 sources that pin objects:
@@ -3281,6 +3596,11 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
return obj->pin_count - !!obj->user_pin_count;
}
+/*
+ * Prepare buffer for display plane (scanout, cursors, etc).
+ * Can be called from an uninterruptible phase (modesetting) and allows
+ * any flushes to be pipelined (for pageflips).
+ */
int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
@@ -3289,10 +3609,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 old_read_domains, old_write_domain;
int ret;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret)
- return ret;
-
if (pipelined != obj->ring) {
ret = i915_gem_object_sync(obj, pipelined);
if (ret)
@@ -3321,7 +3637,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers.
*/
- ret = i915_gem_object_pin(obj, alignment, true);
+ ret = i915_gem_object_pin(obj, alignment, true, false);
if (ret)
goto err_unpin_display;
@@ -3330,12 +3646,14 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
- KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) == 0,
- ("obj %p in GTT write domain", obj));
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ obj->base.write_domain = 0;
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
CTR3(KTR_DRM, "object_change_domain pin_to_display_plan %p %x %x",
- obj, old_read_domains, obj->base.write_domain);
+ obj, old_read_domains, old_write_domain);
return 0;
@@ -3359,13 +3677,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
return 0;
- if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
- if (ret)
- return ret;
- }
-
- ret = i915_gem_object_wait_rendering(obj);
+ ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
@@ -3389,16 +3701,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
return 0;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_wait_rendering(obj, !write);
if (ret)
return ret;
- if (write || obj->pending_gpu_write) {
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
- }
-
i915_gem_object_flush_gtt_write_domain(obj);
old_write_domain = obj->base.write_domain;
@@ -3414,8 +3720,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) == 0,
- ("In cpu write domain"));
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
@@ -3446,30 +3751,32 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
- unsigned long recent_enough = ticks - (20 * hz / 1000);
+ unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
struct drm_i915_gem_request *request;
struct intel_ring_buffer *ring = NULL;
u32 seqno = 0;
int ret;
- if (atomic_load_acq_int(&dev_priv->mm.wedged))
+ if (atomic_read(&dev_priv->mm.wedged))
return -EIO;
- mtx_lock(&file_priv->mm.lck);
+ mtx_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
+
ring = request->ring;
seqno = request->seqno;
}
- mtx_unlock(&file_priv->mm.lck);
+ mtx_unlock(&file_priv->mm.lock);
+
if (seqno == 0)
return 0;
- ret = __wait_seqno(ring, seqno, true);
+ ret = __wait_seqno(ring, seqno, true, NULL);
if (ret == 0)
- taskqueue_enqueue_timeout(dev_priv->tq,
- &dev_priv->mm.retire_task, 0);
+ taskqueue_enqueue_timeout(dev_priv->wq,
+ &dev_priv->mm.retire_work, 0);
return ret;
}
@@ -3477,17 +3784,19 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
int
i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
- bool map_and_fenceable)
+ bool map_and_fenceable,
+ bool nonblocking)
{
int ret;
- if (obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)
+ if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
if (obj->gtt_space != NULL) {
if ((alignment && obj->gtt_offset & (alignment - 1)) ||
(map_and_fenceable && !obj->map_and_fenceable)) {
- DRM_DEBUG("bo is already pinned with incorrect alignment:"
+ WARN(obj->pin_count,
+ "bo is already pinned with incorrect alignment:"
" offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
obj->gtt_offset, alignment,
@@ -3500,10 +3809,16 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
}
if (obj->gtt_space == NULL) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
ret = i915_gem_object_bind_to_gtt(obj, alignment,
- map_and_fenceable);
+ map_and_fenceable,
+ nonblocking);
if (ret)
return ret;
+
+ if (!dev_priv->mm.aliasing_ppgtt)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
}
if (!obj->has_global_gtt_mapping && map_and_fenceable)
@@ -3518,9 +3833,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
void
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{
-
- KASSERT(obj->pin_count != 0, ("zero pin count"));
- KASSERT(obj->gtt_space != NULL, ("No gtt mapping"));
+ BUG_ON(obj->pin_count == 0);
+ BUG_ON(obj->gtt_space == NULL);
if (--obj->pin_count == 0)
obj->pin_mappable = false;
@@ -3532,19 +3846,17 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_pin *args = data;
struct drm_i915_gem_object *obj;
- struct drm_gem_object *gobj;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- gobj = drm_gem_object_lookup(dev, file, args->handle);
- if (gobj == NULL) {
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj = to_intel_bo(gobj);
if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to pin a purgeable buffer\n");
@@ -3559,14 +3871,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
goto out;
}
- obj->user_pin_count++;
- obj->pin_filp = file;
- if (obj->user_pin_count == 1) {
- ret = i915_gem_object_pin(obj, args->alignment, true);
+ if (obj->user_pin_count == 0) {
+ ret = i915_gem_object_pin(obj, args->alignment, true, false);
if (ret)
goto out;
}
+ obj->user_pin_count++;
+ obj->pin_filp = file;
+
/* XXX - flush the CPU caches for pinned objects
* as the X server doesn't manage domains yet
*/
@@ -3634,18 +3947,17 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
goto unlock;
}
- args->busy = obj->active;
- if (args->busy) {
- if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->ring,
- 0, obj->base.write_domain);
- } else {
- ret = i915_gem_check_olr(obj->ring,
- obj->last_rendering_seqno);
- }
+ /* Count all active objects as busy, even if they are currently not used
+ * by the gpu. Users of this interface expect objects to eventually
+ * become non-busy without any further actions, therefore emit any
+ * necessary flushes here.
+ */
+ ret = i915_gem_object_flush_active(obj);
- i915_gem_retire_requests_ring(obj->ring);
- args->busy = obj->active;
+ args->busy = obj->active;
+ if (obj->ring) {
+ BUILD_BUG_ON(I915_NUM_RINGS > 16);
+ args->busy |= intel_ring_flag(obj->ring) << 16;
}
drm_gem_object_unreference(&obj->base);
@@ -3692,14 +4004,14 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
goto out;
}
- if (obj->madv != I915_MADV_PURGED_INTERNAL)
+ if (obj->madv != __I915_MADV_PURGED)
obj->madv = args->madv;
/* if the object is no longer attached, discard its backing storage */
- if (i915_gem_object_is_purgeable(obj) && obj->gtt_space == NULL)
+ if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
i915_gem_object_truncate(obj);
- args->retained = obj->madv != I915_MADV_PURGED_INTERNAL;
+ args->retained = obj->madv != __I915_MADV_PURGED;
out:
drm_gem_object_unreference(&obj->base);
@@ -3708,21 +4020,57 @@ unlock:
return ret;
}
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops)
+{
+ INIT_LIST_HEAD(&obj->mm_list);
+ INIT_LIST_HEAD(&obj->gtt_list);
+ INIT_LIST_HEAD(&obj->ring_list);
+ INIT_LIST_HEAD(&obj->exec_list);
+
+ obj->ops = ops;
+
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ obj->madv = I915_MADV_WILLNEED;
+ /* Avoid an unnecessary call to unbind on the first bind. */
+ obj->map_and_fenceable = true;
+
+ i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
+ .get_pages = i915_gem_object_get_pages_gtt,
+ .put_pages = i915_gem_object_put_pages_gtt,
+};
+
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size)
{
- struct drm_i915_private *dev_priv;
struct drm_i915_gem_object *obj;
- dev_priv = dev->dev_private;
-
obj = malloc(sizeof(*obj), DRM_I915_GEM, M_WAITOK | M_ZERO);
+ if (obj == NULL)
+ return NULL;
if (drm_gem_object_init(dev, &obj->base, size) != 0) {
free(obj, DRM_I915_GEM);
return NULL;
}
+#ifdef FREEBSD_WIP
+ mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
+ if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
+ /* 965gm cannot relocate objects above 4GiB. */
+ mask &= ~__GFP_HIGHMEM;
+ mask |= __GFP_DMA32;
+ }
+
+ mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ mapping_set_gfp_mask(mapping, mask);
+#endif /* FREEBSD_WIP */
+
+ i915_gem_object_init(obj, &i915_gem_object_ops);
+
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
@@ -3742,18 +4090,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
obj->cache_level = I915_CACHE_LLC;
} else
obj->cache_level = I915_CACHE_NONE;
- obj->base.driver_private = NULL;
- obj->fence_reg = I915_FENCE_REG_NONE;
- INIT_LIST_HEAD(&obj->mm_list);
- INIT_LIST_HEAD(&obj->gtt_list);
- INIT_LIST_HEAD(&obj->ring_list);
- INIT_LIST_HEAD(&obj->exec_list);
- INIT_LIST_HEAD(&obj->gpu_write_list);
- obj->madv = I915_MADV_WILLNEED;
- /* Avoid an unnecessary call to unbind on the first bind. */
- obj->map_and_fenceable = true;
-
- i915_gem_info_add_obj(dev_priv, size);
return obj;
}
@@ -3777,19 +4113,28 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_detach_phys_object(dev, obj);
obj->pin_count = 0;
- if (i915_gem_object_unbind(obj) == -ERESTARTSYS) {
+ if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
bool was_interruptible;
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
- if (i915_gem_object_unbind(obj))
- printf("i915_gem_free_object: unbind\n");
+ WARN_ON(i915_gem_object_unbind(obj));
dev_priv->mm.interruptible = was_interruptible;
}
- drm_gem_free_mmap_offset(&obj->base);
+ obj->pages_pin_count = 0;
+ i915_gem_object_put_pages(obj);
+ i915_gem_object_free_mmap_offset(obj);
+
+ BUG_ON(obj->pages);
+
+#ifdef FREEBSD_WIP
+ if (obj->base.import_attach)
+ drm_prime_gem_destroy(&obj->base, NULL);
+#endif /* FREEBSD_WIP */
+
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->base.size);
@@ -3818,13 +4163,8 @@ i915_gem_idle(struct drm_device *dev)
i915_gem_retire_requests(dev);
/* Under UMS, be paranoid and evict. */
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_gem_evict_everything(dev, false);
- if (ret) {
- DRM_UNLOCK(dev);
- return ret;
- }
- }
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_gem_evict_everything(dev);
i915_gem_reset_fences(dev);
@@ -3841,9 +4181,41 @@ i915_gem_idle(struct drm_device *dev)
DRM_UNLOCK(dev);
/* Cancel the retire work handler, which should be idle now. */
- taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->mm.retire_task, NULL);
+ taskqueue_cancel_timeout(dev_priv->wq, &dev_priv->mm.retire_work, NULL);
- return ret;
+ return 0;
+}
+
+void i915_gem_l3_remap(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 misccpctl;
+ int i;
+
+ if (!HAS_L3_GPU_CACHE(dev))
+ return;
+
+ if (!dev_priv->l3_parity.remap_info)
+ return;
+
+ misccpctl = I915_READ(GEN7_MISCCPCTL);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ POSTING_READ(GEN7_MISCCPCTL);
+
+ for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
+ u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
+ if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
+ DRM_DEBUG("0x%x was already programmed to %x\n",
+ GEN7_L3LOG_BASE + i, remap);
+ if (remap && !dev_priv->l3_parity.remap_info[i/4])
+ DRM_DEBUG_DRIVER("Clearing remapped register\n");
+ I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
+ }
+
+ /* Make sure all the writes land before disabling dop clock gating */
+ POSTING_READ(GEN7_L3LOG_BASE);
+
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl);
}
void i915_gem_init_swizzling(struct drm_device *dev)
@@ -3867,12 +4239,38 @@ void i915_gem_init_swizzling(struct drm_device *dev)
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
}
+static bool
+intel_enable_blt(struct drm_device *dev)
+{
+ if (!HAS_BLT(dev))
+ return false;
+
+ /* The blitter was dysfunctional on early prototypes */
+ if (IS_GEN6(dev) && pci_get_revid(dev->dev) < 8) {
+ DRM_INFO("BLT not supported on this pre-production hardware;"
+ " graphics performance will be degraded.\n");
+ return false;
+ }
+
+ return true;
+}
+
int
i915_gem_init_hw(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
+#ifdef FREEBSD_WIP
+ if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
+ return -EIO;
+#endif /* FREEBSD_WIP */
+
+ if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
+ I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
+
+ i915_gem_l3_remap(dev);
+
i915_gem_init_swizzling(dev);
ret = intel_init_render_ring_buffer(dev);
@@ -3885,7 +4283,7 @@ i915_gem_init_hw(struct drm_device *dev)
goto cleanup_render_ring;
}
- if (HAS_BLT(dev)) {
+ if (intel_enable_blt(dev)) {
ret = intel_init_blt_ring_buffer(dev);
if (ret)
goto cleanup_bsd_ring;
@@ -3903,9 +4301,9 @@ i915_gem_init_hw(struct drm_device *dev)
return 0;
cleanup_bsd_ring:
- intel_cleanup_ring_buffer(&dev_priv->rings[VCS]);
+ intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
cleanup_render_ring:
- intel_cleanup_ring_buffer(&dev_priv->rings[RCS]);
+ intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
return ret;
}
@@ -3915,9 +4313,11 @@ intel_enable_ppgtt(struct drm_device *dev)
if (i915_enable_ppgtt >= 0)
return i915_enable_ppgtt;
+#ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */
- if (INTEL_INFO(dev)->gen == 6 && intel_iommu_enabled)
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
return false;
+#endif
return true;
}
@@ -3928,8 +4328,8 @@ int i915_gem_init(struct drm_device *dev)
unsigned long gtt_size, mappable_size;
int ret;
- gtt_size = dev_priv->mm.gtt.gtt_total_entries << PAGE_SHIFT;
- mappable_size = dev_priv->mm.gtt.gtt_mappable_entries << PAGE_SHIFT;
+ gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
+ mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
DRM_LOCK(dev);
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
@@ -3993,9 +4393,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- if (atomic_load_acq_int(&dev_priv->mm.wedged) != 0) {
+ if (atomic_read(&dev_priv->mm.wedged)) {
DRM_ERROR("Reenabling wedged hardware, good luck\n");
- atomic_store_rel_int(&dev_priv->mm.wedged, 0);
+ atomic_set(&dev_priv->mm.wedged, 0);
}
DRM_LOCK(dev);
@@ -4007,9 +4407,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
return ret;
}
- KASSERT(list_empty(&dev_priv->mm.active_list), ("active list"));
- KASSERT(list_empty(&dev_priv->mm.flushing_list), ("flushing list"));
- KASSERT(list_empty(&dev_priv->mm.inactive_list), ("inactive list"));
+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
DRM_UNLOCK(dev);
ret = drm_irq_install(dev);
@@ -4056,7 +4454,6 @@ init_ring_lists(struct intel_ring_buffer *ring)
{
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
- INIT_LIST_HEAD(&ring->gpu_write_list);
}
void
@@ -4066,17 +4463,17 @@ i915_gem_load(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
INIT_LIST_HEAD(&dev_priv->mm.active_list);
- INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
+ INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
for (i = 0; i < I915_NUM_RINGS; i++)
- init_ring_lists(&dev_priv->rings[i]);
+ init_ring_lists(&dev_priv->ring[i]);
for (i = 0; i < I915_MAX_NUM_FENCES; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
- TIMEOUT_TASK_INIT(dev_priv->tq, &dev_priv->mm.retire_task, 0,
- i915_gem_retire_task_handler, dev_priv);
- dev_priv->error_completion = 0;
+ TIMEOUT_TASK_INIT(dev_priv->wq, &dev_priv->mm.retire_work, 0,
+ i915_gem_retire_work_handler, dev_priv);
+ init_completion(&dev_priv->error_completion);
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (IS_GEN3(dev)) {
@@ -4099,19 +4496,12 @@ i915_gem_load(struct drm_device *dev)
i915_gem_reset_fences(dev);
i915_gem_detect_bit_6_swizzle(dev);
- dev_priv->mm.interruptible = true;
+ DRM_INIT_WAITQUEUE(&dev_priv->pending_flip_queue);
- dev_priv->mm.i915_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
- i915_gem_lowmem, dev, EVENTHANDLER_PRI_ANY);
-}
-
-void
-i915_gem_unload(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv;
+ dev_priv->mm.interruptible = true;
- dev_priv = dev->dev_private;
- EVENTHANDLER_DEREGISTER(vm_lowmem, dev_priv->mm.i915_lowmem);
+ dev_priv->mm.inactive_shrinker = EVENTHANDLER_REGISTER(vm_lowmem,
+ i915_gem_inactive_shrink, dev, EVENTHANDLER_PRI_ANY);
}
/*
@@ -4130,6 +4520,8 @@ static int i915_gem_init_phys_object(struct drm_device *dev,
phys_obj = malloc(sizeof(struct drm_i915_gem_phys_object),
DRM_I915_GEM, M_WAITOK | M_ZERO);
+ if (!phys_obj)
+ return -ENOMEM;
phys_obj->id = id;
@@ -4138,8 +4530,10 @@ static int i915_gem_init_phys_object(struct drm_device *dev,
ret = -ENOMEM;
goto kfree_obj;
}
+#ifdef CONFIG_X86
pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr,
size / PAGE_SIZE, PAT_WRITE_COMBINING);
+#endif
dev_priv->mm.phys_objs[id - 1] = phys_obj;
@@ -4162,6 +4556,12 @@ static void i915_gem_free_phys_object(struct drm_device *dev, int id)
i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
}
+#ifdef FREEBSD_WIP
+#ifdef CONFIG_X86
+ set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
+#endif
+#endif /* FREEBSD_WIP */
+
drm_pci_free(dev, phys_obj->handle);
free(phys_obj, DRM_I915_GEM);
dev_priv->mm.phys_objs[id - 1] = NULL;
@@ -4178,10 +4578,11 @@ void i915_gem_free_all_phys_object(struct drm_device *dev)
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj)
{
- vm_page_t page;
struct sf_buf *sf;
- char *vaddr, *dst;
- int i, page_count;
+ char *vaddr;
+ char *dst;
+ int i;
+ int page_count;
if (!obj->phys_obj)
return;
@@ -4190,7 +4591,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
page_count = obj->base.size / PAGE_SIZE;
VM_OBJECT_WLOCK(obj->base.vm_obj);
for (i = 0; i < page_count; i++) {
- page = i915_gem_wire_page(obj->base.vm_obj, i, NULL);
+ vm_page_t page = i915_gem_wire_page(obj->base.vm_obj, i, NULL);
if (page == NULL)
continue; /* XXX */
@@ -4212,7 +4613,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
atomic_add_long(&i915_gem_wired_pages_cnt, -1);
}
VM_OBJECT_WUNLOCK(obj->base.vm_obj);
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(dev);
obj->phys_obj->cur_obj = NULL;
obj->phys_obj = NULL;
@@ -4225,7 +4626,6 @@ i915_gem_attach_phys_object(struct drm_device *dev,
int align)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- vm_page_t page;
struct sf_buf *sf;
char *dst, *src;
int ret = 0;
@@ -4260,7 +4660,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
VM_OBJECT_WLOCK(obj->base.vm_obj);
for (i = 0; i < page_count; i++) {
- page = i915_gem_wire_page(obj->base.vm_obj, i, NULL);
+ vm_page_t page = i915_gem_wire_page(obj->base.vm_obj, i, NULL);
if (page == NULL) {
ret = -EIO;
break;
@@ -4320,7 +4720,7 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
* later retire_requests won't dereference our soon-to-be-gone
* file_priv.
*/
- mtx_lock(&file_priv->mm.lck);
+ mtx_lock(&file_priv->mm.lock);
while (!list_empty(&file_priv->mm.request_list)) {
struct drm_i915_gem_request *request;
@@ -4330,7 +4730,29 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
list_del(&request->client_list);
request->file_priv = NULL;
}
- mtx_unlock(&file_priv->mm.lck);
+ mtx_unlock(&file_priv->mm.lock);
+}
+
+static void
+i915_gem_inactive_shrink(void *arg)
+{
+ struct drm_device *dev = arg;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pass1, pass2;
+
+ if (!sx_try_xlock(&dev->dev_struct_lock)) {
+ return;
+ }
+
+ CTR0(KTR_DRM, "gem_lowmem");
+
+ pass1 = i915_gem_purge(dev_priv, -1);
+ pass2 = __i915_gem_shrink(dev_priv, -1, false);
+
+ if (pass2 <= pass1 / 100)
+ i915_gem_shrink_all(dev_priv);
+
+ DRM_UNLOCK(dev);
}
static vm_page_t
@@ -4369,11 +4791,3 @@ i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex, bool *fresh)
atomic_add_long(&i915_gem_wired_pages_cnt, 1);
return (page);
}
-
-#undef __user
-#undef __force
-#undef __iomem
-#undef __must_check
-#undef to_user_ptr
-#undef offset_in_page
-#undef page_to_phys
diff --git a/sys/dev/drm2/i915/i915_gem_context.c b/sys/dev/drm2/i915/i915_gem_context.c
index 39d09b0..5ab9af1 100644
--- a/sys/dev/drm2/i915/i915_gem_context.c
+++ b/sys/dev/drm2/i915/i915_gem_context.c
@@ -115,11 +115,9 @@ static int get_context_size(struct drm_device *dev)
break;
case 7:
reg = I915_READ(GEN7_CXT_SIZE);
-#ifdef FREEBSD_WIP
if (IS_HASWELL(dev))
ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
else
-#endif
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break;
default:
@@ -140,7 +138,7 @@ static void do_destroy(struct i915_hw_context *ctx)
if (ctx->file_priv)
drm_gem_names_remove(&ctx->file_priv->context_idr, ctx->id);
else
- KASSERT(ctx == dev_priv->rings[RCS].default_context,
+ KASSERT(ctx == dev_priv->ring[RCS].default_context,
("i915_gem_context: ctx != default_context"));
drm_gem_object_unreference(&ctx->obj->base);
@@ -178,7 +176,7 @@ create_hw_context(struct drm_device *dev,
* object tracking code. We give an initial ring value simple to pass an
* assertion in the context switch code.
*/
- ctx->ring = &dev_priv->rings[RCS];
+ ctx->ring = &dev_priv->ring[RCS];
/* Default context will never have a file_priv */
if (file_priv == NULL) {
@@ -234,8 +232,8 @@ static int create_default_context(struct drm_i915_private *dev_priv)
* may not be available. To avoid this we always pin the
* default context.
*/
- dev_priv->rings[RCS].default_context = ctx;
- ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false);
+ dev_priv->ring[RCS].default_context = ctx;
+ ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
if (ret)
goto err_destroy;
@@ -265,12 +263,12 @@ void i915_gem_context_init(struct drm_device *dev)
/* If called from reset, or thaw... we've been here already */
if (dev_priv->hw_contexts_disabled ||
- dev_priv->rings[RCS].default_context)
+ dev_priv->ring[RCS].default_context)
return;
ctx_size = get_context_size(dev);
dev_priv->hw_context_size = get_context_size(dev);
- dev_priv->hw_context_size = roundup(dev_priv->hw_context_size, 4096);
+ dev_priv->hw_context_size = round_up(dev_priv->hw_context_size, 4096);
if (ctx_size <= 0 || ctx_size > (1<<20)) {
dev_priv->hw_contexts_disabled = true;
@@ -297,9 +295,9 @@ void i915_gem_context_fini(struct drm_device *dev)
* other code, leading to spurious errors. */
intel_gpu_reset(dev);
- i915_gem_object_unpin(dev_priv->rings[RCS].default_context->obj);
+ i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj);
- do_destroy(dev_priv->rings[RCS].default_context);
+ do_destroy(dev_priv->ring[RCS].default_context);
}
static int context_idr_cleanup(uint32_t id, void *p, void *data)
@@ -389,7 +387,7 @@ static int do_switch(struct i915_hw_context *to)
if (from_obj == to->obj)
return 0;
- ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false);
+ ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
if (ret)
return ret;
@@ -426,8 +424,7 @@ static int do_switch(struct i915_hw_context *to)
*/
if (from_obj != NULL) {
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_gem_object_move_to_active(from_obj, ring,
- i915_gem_next_request_seqno(ring));
+ i915_gem_object_move_to_active(from_obj, ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@@ -472,7 +469,7 @@ int i915_switch_context(struct intel_ring_buffer *ring,
if (dev_priv->hw_contexts_disabled)
return 0;
- if (ring != &dev_priv->rings[RCS])
+ if (ring != &dev_priv->ring[RCS])
return 0;
if (to_id == DEFAULT_CONTEXT_ID) {
diff --git a/sys/dev/drm2/i915/i915_gem_evict.c b/sys/dev/drm2/i915/i915_gem_evict.c
index e800b8f..f68eeb6 100644
--- a/sys/dev/drm2/i915/i915_gem_evict.c
+++ b/sys/dev/drm2/i915/i915_gem_evict.c
@@ -30,9 +30,8 @@
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
-#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/i915_drm.h>
static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
@@ -46,7 +45,8 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
int
i915_gem_evict_something(struct drm_device *dev, int min_size,
- unsigned alignment, bool mappable)
+ unsigned alignment, unsigned cache_level,
+ bool mappable, bool nonblocking)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
@@ -81,11 +81,12 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
INIT_LIST_HEAD(&unwind_list);
if (mappable)
- drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
- alignment, 0, 0,
- dev_priv->mm.gtt_mappable_end);
+ drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
+ min_size, alignment, cache_level,
+ 0, dev_priv->mm.gtt_mappable_end);
else
- drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment, 0);
+ drm_mm_init_scan(&dev_priv->mm.gtt_space,
+ min_size, alignment, cache_level);
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
@@ -93,29 +94,16 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
goto found;
}
- /* Now merge in the soon-to-be-expired objects... */
- list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- /* Does the object require an outstanding flush? */
- if (obj->base.write_domain)
- continue;
-
- if (mark_free(obj, &unwind_list))
- goto found;
- }
+ if (nonblocking)
+ goto none;
- /* Finally add anything with a pending flush (in order of retirement) */
- list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
- if (mark_free(obj, &unwind_list))
- goto found;
- }
+ /* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- if (!obj->base.write_domain)
- continue;
-
if (mark_free(obj, &unwind_list))
goto found;
}
+none:
/* Nothing found, clean up and bail out! */
while (!list_empty(&unwind_list)) {
obj = list_first_entry(&unwind_list,
@@ -123,7 +111,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
exec_list);
ret = drm_mm_scan_remove_block(obj->gtt_space);
- KASSERT(ret == 0, ("drm_mm_scan_remove_block failed %d", ret));
+ BUG_ON(ret);
list_del_init(&obj->exec_list);
}
@@ -166,7 +154,7 @@ found:
}
int
-i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
+i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
@@ -174,12 +162,11 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
int ret;
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->mm.active_list));
if (lists_empty)
return -ENOSPC;
- CTR2(KTR_DRM, "evict_everything %p %d", dev, purgeable_only);
+ CTR1(KTR_DRM, "evict_everything %p", dev);
/* The gpu_idle will flush everything in the write domain to the
* active list. Then we must move everything off the active list
@@ -191,17 +178,11 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
i915_gem_retire_requests(dev);
- KASSERT(list_empty(&dev_priv->mm.flushing_list),
- ("flush list not empty"));
-
/* Having flushed everything, unbind() should never raise an error */
list_for_each_entry_safe(obj, next,
- &dev_priv->mm.inactive_list, mm_list) {
- if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
- if (obj->pin_count == 0)
- i915_gem_object_unbind(obj);
- }
- }
+ &dev_priv->mm.inactive_list, mm_list)
+ if (obj->pin_count == 0)
+ WARN_ON(i915_gem_object_unbind(obj));
return 0;
}
diff --git a/sys/dev/drm2/i915/i915_gem_execbuffer.c b/sys/dev/drm2/i915/i915_gem_execbuffer.c
index 6ed1bb9..be3a9c1 100644
--- a/sys/dev/drm2/i915/i915_gem_execbuffer.c
+++ b/sys/dev/drm2/i915/i915_gem_execbuffer.c
@@ -30,226 +30,59 @@
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
#include <dev/drm2/i915/intel_drv.h>
+
#include <sys/limits.h>
#include <sys/sf_buf.h>
-struct change_domains {
- uint32_t invalidate_domains;
- uint32_t flush_domains;
- uint32_t flush_rings;
- uint32_t flips;
-};
-
-/*
- * Set the next domain for the specified object. This
- * may not actually perform the necessary flushing/invaliding though,
- * as that may want to be batched with other set_domain operations
- *
- * This is (we hope) the only really tricky part of gem. The goal
- * is fairly simple -- track which caches hold bits of the object
- * and make sure they remain coherent. A few concrete examples may
- * help to explain how it works. For shorthand, we use the notation
- * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
- * a pair of read and write domain masks.
- *
- * Case 1: the batch buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Mapped to GTT
- * 4. Read by GPU
- * 5. Unmapped from GTT
- * 6. Freed
- *
- * Let's take these a step at a time
- *
- * 1. Allocated
- * Pages allocated from the kernel may still have
- * cache contents, so we set them to (CPU, CPU) always.
- * 2. Written by CPU (using pwrite)
- * The pwrite function calls set_domain (CPU, CPU) and
- * this function does nothing (as nothing changes)
- * 3. Mapped by GTT
- * This function asserts that the object is not
- * currently in any GPU-based read or write domains
- * 4. Read by GPU
- * i915_gem_execbuffer calls set_domain (COMMAND, 0).
- * As write_domain is zero, this function adds in the
- * current read domains (CPU+COMMAND, 0).
- * flush_domains is set to CPU.
- * invalidate_domains is set to COMMAND
- * clflush is run to get data out of the CPU caches
- * then i915_dev_set_domain calls i915_gem_flush to
- * emit an MI_FLUSH and drm_agp_chipset_flush
- * 5. Unmapped from GTT
- * i915_gem_object_unbind calls set_domain (CPU, CPU)
- * flush_domains and invalidate_domains end up both zero
- * so no flushing/invalidating happens
- * 6. Freed
- * yay, done
- *
- * Case 2: The shared render buffer
- *
- * 1. Allocated
- * 2. Mapped to GTT
- * 3. Read/written by GPU
- * 4. set_domain to (CPU,CPU)
- * 5. Read/written by CPU
- * 6. Read/written by GPU
- *
- * 1. Allocated
- * Same as last example, (CPU, CPU)
- * 2. Mapped to GTT
- * Nothing changes (assertions find that it is not in the GPU)
- * 3. Read/written by GPU
- * execbuffer calls set_domain (RENDER, RENDER)
- * flush_domains gets CPU
- * invalidate_domains gets GPU
- * clflush (obj)
- * MI_FLUSH and drm_agp_chipset_flush
- * 4. set_domain (CPU, CPU)
- * flush_domains gets GPU
- * invalidate_domains gets CPU
- * wait_rendering (obj) to make sure all drawing is complete.
- * This will include an MI_FLUSH to get the data from GPU
- * to memory
- * clflush (obj) to invalidate the CPU cache
- * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
- * 5. Read/written by CPU
- * cache lines are loaded and dirtied
- * 6. Read written by GPU
- * Same as last GPU access
- *
- * Case 3: The constant buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Read by GPU
- * 4. Updated (written) by CPU again
- * 5. Read by GPU
- *
- * 1. Allocated
- * (CPU, CPU)
- * 2. Written by CPU
- * (CPU, CPU)
- * 3. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- * 4. Updated (written) by CPU again
- * (CPU, CPU)
- * flush_domains = 0 (no previous write domain)
- * invalidate_domains = 0 (no new read domains)
- * 5. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- */
-static void
-i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring,
- struct change_domains *cd)
-{
- uint32_t invalidate_domains = 0, flush_domains = 0;
-
- /*
- * If the object isn't moving to a new write domain,
- * let the object stay in multiple read domains
- */
- if (obj->base.pending_write_domain == 0)
- obj->base.pending_read_domains |= obj->base.read_domains;
-
- /*
- * Flush the current write domain if
- * the new read domains don't match. Invalidate
- * any read domains which differ from the old
- * write domain
- */
- if (obj->base.write_domain &&
- (((obj->base.write_domain != obj->base.pending_read_domains ||
- obj->ring != ring)) ||
- (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
- flush_domains |= obj->base.write_domain;
- invalidate_domains |=
- obj->base.pending_read_domains & ~obj->base.write_domain;
- }
- /*
- * Invalidate any read caches which may have
- * stale data. That is, any new read domains.
- */
- invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
- i915_gem_clflush_object(obj);
-
- if (obj->base.pending_write_domain)
- cd->flips |= atomic_load_acq_int(&obj->pending_flip);
-
- /* The actual obj->write_domain will be updated with
- * pending_write_domain after we emit the accumulated flush for all
- * of our domain changes in execbuffers (which clears objects'
- * write_domains). So if we have a current write domain that we
- * aren't changing, set pending_write_domain to that.
- */
- if (flush_domains == 0 && obj->base.pending_write_domain == 0)
- obj->base.pending_write_domain = obj->base.write_domain;
-
- cd->invalidate_domains |= invalidate_domains;
- cd->flush_domains |= flush_domains;
- if (flush_domains & I915_GEM_GPU_DOMAINS)
- cd->flush_rings |= intel_ring_flag(obj->ring);
- if (invalidate_domains & I915_GEM_GPU_DOMAINS)
- cd->flush_rings |= intel_ring_flag(ring);
-}
-
struct eb_objects {
- u_long hashmask;
- LIST_HEAD(, drm_i915_gem_object) *buckets;
+ int and;
+ struct hlist_head buckets[0];
};
static struct eb_objects *
eb_create(int size)
{
struct eb_objects *eb;
-
- eb = malloc(sizeof(*eb),
+ int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+ BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
+ while (count > size)
+ count >>= 1;
+ eb = malloc(count*sizeof(struct hlist_head) +
+ sizeof(struct eb_objects),
DRM_I915_GEM, M_WAITOK | M_ZERO);
- eb->buckets = hashinit(size, DRM_I915_GEM, &eb->hashmask);
+ if (eb == NULL)
+ return eb;
+
+ eb->and = count - 1;
return eb;
}
static void
eb_reset(struct eb_objects *eb)
{
- int i;
-
- for (i = 0; i <= eb->hashmask; i++)
- LIST_INIT(&eb->buckets[i]);
+ memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
}
static void
eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
{
-
- LIST_INSERT_HEAD(&eb->buckets[obj->exec_handle & eb->hashmask],
- obj, exec_node);
+ hlist_add_head(&obj->exec_node,
+ &eb->buckets[obj->exec_handle & eb->and]);
}
static struct drm_i915_gem_object *
eb_get_object(struct eb_objects *eb, unsigned long handle)
{
+ struct hlist_head *head;
+ struct hlist_node *node;
struct drm_i915_gem_object *obj;
- LIST_FOREACH(obj, &eb->buckets[handle & eb->hashmask], exec_node) {
+ head = &eb->buckets[handle & eb->and];
+ hlist_for_each(node, head) {
+ obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
if (obj->exec_handle == handle)
return obj;
}
@@ -260,14 +93,13 @@ eb_get_object(struct eb_objects *eb, unsigned long handle)
static void
eb_destroy(struct eb_objects *eb)
{
-
- free(eb->buckets, DRM_I915_GEM);
free(eb, DRM_I915_GEM);
}
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+ !obj->map_and_fenceable ||
obj->cache_level != I915_CACHE_NONE);
}
@@ -290,28 +122,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
target_i915_obj = to_intel_bo(target_obj);
target_offset = target_i915_obj->gtt_offset;
-#if WATCH_RELOC
- DRM_INFO("%s: obj %p offset %08x target %d "
- "read %08x write %08x gtt %08x "
- "presumed %08x delta %08x\n",
- __func__,
- obj,
- (int) reloc->offset,
- (int) reloc->target_handle,
- (int) reloc->read_domains,
- (int) reloc->write_domain,
- (int) target_offset,
- (int) reloc->presumed_offset,
- reloc->delta);
-#endif
-
- /* The target buffer should have appeared before us in the
- * exec_object list, so it should have a GTT space bound by now.
- */
- if (unlikely(target_offset == 0)) {
- DRM_DEBUG("No GTT space found for object %d\n",
- reloc->target_handle);
- return ret;
+ /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
+ * pipe_control writes because the gpu doesn't properly redirect them
+ * through the ppgtt for non_secure batchbuffers. */
+ if (unlikely(IS_GEN6(dev) &&
+ reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
+ !target_i915_obj->has_global_gtt_mapping)) {
+ i915_gem_gtt_bind_object(target_i915_obj,
+ target_i915_obj->cache_level);
}
/* Validate that the target is in a valid r/w GPU domain */
@@ -396,8 +214,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
sf_buf_free(sf);
} else {
- uint32_t *reloc_entry;
- char *reloc_page;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t __iomem *reloc_entry;
+ char __iomem *reloc_page;
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
@@ -409,24 +228,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
/* Map the page containing the relocation we're going to perform. */
reloc->offset += obj->gtt_offset;
- reloc_page = pmap_mapdev_attr(dev->agp->base + (reloc->offset &
+ reloc_page = pmap_mapdev_attr(dev_priv->mm.gtt_base_addr + (reloc->offset &
~PAGE_MASK), PAGE_SIZE, PAT_WRITE_COMBINING);
- reloc_entry = (uint32_t *)
+ reloc_entry = (uint32_t __iomem *)
(reloc_page + (reloc->offset & PAGE_MASK));
*(volatile uint32_t *)reloc_entry = reloc->delta;
pmap_unmapdev((vm_offset_t)reloc_page, PAGE_SIZE);
}
- /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
- * pipe_control writes because the gpu doesn't properly redirect them
- * through the ppgtt for non_secure batchbuffers. */
- if (unlikely(IS_GEN6(dev) &&
- reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
- !target_i915_obj->has_global_gtt_mapping)) {
- i915_gem_gtt_bind_object(target_i915_obj,
- target_i915_obj->cache_level);
- }
-
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;
@@ -439,22 +248,22 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
- struct drm_i915_gem_relocation_entry *user_relocs;
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int remain, ret;
- user_relocs = (void *)(uintptr_t)entry->relocs_ptr;
+ user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
+
remain = entry->relocation_count;
while (remain) {
struct drm_i915_gem_relocation_entry *r = stack_reloc;
int count = remain;
- if (count > DRM_ARRAY_SIZE(stack_reloc))
- count = DRM_ARRAY_SIZE(stack_reloc);
+ if (count > ARRAY_SIZE(stack_reloc))
+ count = ARRAY_SIZE(stack_reloc);
remain -= count;
- ret = -copyin_nofault(user_relocs, r, count*sizeof(r[0]));
- if (ret != 0)
- return (ret);
+ if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
+ return -EFAULT;
do {
u64 offset = r->presumed_offset;
@@ -464,9 +273,9 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
return ret;
if (r->presumed_offset != offset &&
- copyout_nofault(&r->presumed_offset,
- &user_relocs->presumed_offset,
- sizeof(r->presumed_offset))) {
+ __copy_to_user_inatomic(&user_relocs->presumed_offset,
+ &r->presumed_offset,
+ sizeof(r->presumed_offset))) {
return -EFAULT;
}
@@ -504,17 +313,12 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
struct drm_i915_gem_object *obj;
int ret = 0, pflags;
- /* Try to move as many of the relocation targets off the active list
- * to avoid unnecessary fallbacks to the slow path, as we cannot wait
- * for the retirement with pagefaults disabled.
- */
- i915_gem_retire_requests(dev);
-
/* This is the fast path and we cannot handle a pagefault whilst
- * holding the device lock lest the user pass in the relocations
+ * holding the struct mutex lest the user pass in the relocations
* contained within a mmaped bo. For in such a case we, the page
* fault handler would call i915_gem_fault() and we would try to
- * acquire the device lock again. Obviously this is bad.
+ * acquire the struct mutex again. Obviously this is bad and so
+ * lockdep complains vehemently.
*/
pflags = vm_fault_disable_pagefaults();
list_for_each_entry(obj, objects, exec_list) {
@@ -527,7 +331,8 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
return ret;
}
-#define __EXEC_OBJECT_HAS_FENCE (1<<31)
+#define __EXEC_OBJECT_HAS_PIN (1<<31)
+#define __EXEC_OBJECT_HAS_FENCE (1<<30)
static int
need_reloc_mappable(struct drm_i915_gem_object *obj)
@@ -537,9 +342,10 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
}
static int
-pin_and_fence_object(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring)
+i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring)
{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence, need_mappable;
@@ -551,15 +357,17 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
- ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
+ ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
if (ret)
return ret;
+ entry->flags |= __EXEC_OBJECT_HAS_PIN;
+
if (has_fenced_gpu_access) {
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
ret = i915_gem_object_get_fence(obj);
if (ret)
- goto err_unpin;
+ return ret;
if (i915_gem_object_pin_fence(obj))
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
@@ -568,12 +376,35 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
}
}
+ /* Ensure ppgtt mapping exists if needed */
+ if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
+ i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
+ obj, obj->cache_level);
+
+ obj->has_aliasing_ppgtt_mapping = 1;
+ }
+
entry->offset = obj->gtt_offset;
return 0;
+}
-err_unpin:
- i915_gem_object_unpin(obj);
- return ret;
+static void
+i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_gem_exec_object2 *entry;
+
+ if (!obj->gtt_space)
+ return;
+
+ entry = obj->exec_entry;
+
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
+ i915_gem_object_unpin_fence(obj);
+
+ if (entry->flags & __EXEC_OBJECT_HAS_PIN)
+ i915_gem_object_unpin(obj);
+
+ entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}
static int
@@ -581,14 +412,11 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
struct list_head *objects)
{
- drm_i915_private_t *dev_priv;
struct drm_i915_gem_object *obj;
struct list_head ordered_objects;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
int retry;
- int ret;
- dev_priv = ring->dev->dev_private;
INIT_LIST_HEAD(&ordered_objects);
while (!list_empty(objects)) {
struct drm_i915_gem_exec_object2 *entry;
@@ -612,6 +440,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0;
+ obj->pending_fenced_gpu_access = false;
}
list_splice(&ordered_objects, objects);
@@ -629,7 +458,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
*/
retry = 0;
do {
- ret = 0;
+ int ret = 0;
/* Unbind any ill-fitting objects or pin. */
list_for_each_entry(obj, objects, exec_list) {
@@ -649,7 +478,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
else
- ret = pin_and_fence_object(obj, ring);
+ ret = i915_gem_execbuffer_reserve_object(obj, ring);
if (ret)
goto err;
}
@@ -659,78 +488,22 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if (obj->gtt_space)
continue;
- ret = pin_and_fence_object(obj, ring);
- if (ret) {
- int ret_ignore;
-
- /* This can potentially raise a harmless
- * -EINVAL if we failed to bind in the above
- * call. It cannot raise -EINTR since we know
- * that the bo is freshly bound and so will
- * not need to be flushed or waited upon.
- */
- ret_ignore = i915_gem_object_unbind(obj);
- (void)ret_ignore;
- if (obj->gtt_space != NULL)
- printf("%s: gtt_space\n", __func__);
- break;
- }
+ ret = i915_gem_execbuffer_reserve_object(obj, ring);
+ if (ret)
+ goto err;
}
- /* Decrement pin count for bound objects */
- list_for_each_entry(obj, objects, exec_list) {
- struct drm_i915_gem_exec_object2 *entry;
-
- if (!obj->gtt_space)
- continue;
-
- entry = obj->exec_entry;
- if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
- i915_gem_object_unpin_fence(obj);
- entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
- }
-
- i915_gem_object_unpin(obj);
-
- /* ... and ensure ppgtt mapping exist if needed. */
- if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
- i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
- obj, obj->cache_level);
-
- obj->has_aliasing_ppgtt_mapping = 1;
- }
- }
+err: /* Decrement pin count for bound objects */
+ list_for_each_entry(obj, objects, exec_list)
+ i915_gem_execbuffer_unreserve_object(obj);
- if (ret != -ENOSPC || retry > 1)
+ if (ret != -ENOSPC || retry++)
return ret;
- /* First attempt, just clear anything that is purgeable.
- * Second attempt, clear the entire GTT.
- */
- ret = i915_gem_evict_everything(ring->dev, retry == 0);
+ ret = i915_gem_evict_everything(ring->dev);
if (ret)
return ret;
-
- retry++;
} while (1);
-
-err:
- list_for_each_entry_continue_reverse(obj, objects, exec_list) {
- struct drm_i915_gem_exec_object2 *entry;
-
- if (!obj->gtt_space)
- continue;
-
- entry = obj->exec_entry;
- if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
- i915_gem_object_unpin_fence(obj);
- entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
- }
-
- i915_gem_object_unpin(obj);
- }
-
- return ret;
}
static int
@@ -762,22 +535,49 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
for (i = 0; i < count; i++)
total += exec[i].relocation_count;
- reloc_offset = malloc(count * sizeof(*reloc_offset), DRM_I915_GEM,
- M_WAITOK | M_ZERO);
- reloc = malloc(total * sizeof(*reloc), DRM_I915_GEM, M_WAITOK | M_ZERO);
+ reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
+ reloc = drm_malloc_ab(total, sizeof(*reloc));
+ if (reloc == NULL || reloc_offset == NULL) {
+ drm_free_large(reloc);
+ drm_free_large(reloc_offset);
+ DRM_LOCK(dev);
+ return -ENOMEM;
+ }
total = 0;
for (i = 0; i < count; i++) {
- struct drm_i915_gem_relocation_entry *user_relocs;
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+ u64 invalid_offset = (u64)-1;
+ int j;
+
+ user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
- user_relocs = (void *)(uintptr_t)exec[i].relocs_ptr;
- ret = -copyin(user_relocs, reloc + total,
- exec[i].relocation_count * sizeof(*reloc));
- if (ret != 0) {
+ if (copy_from_user(reloc+total, user_relocs,
+ exec[i].relocation_count * sizeof(*reloc))) {
+ ret = -EFAULT;
DRM_LOCK(dev);
goto err;
}
+ /* As we do not update the known relocation offsets after
+ * relocating (due to the complexities in lock handling),
+ * we need to mark them as invalid now so that we force the
+ * relocation processing next time. Just in case the target
+ * object is evicted and then rebound into its old
+ * presumed_offset before the next execbuffer - if that
+ * happened we would make the mistake of assuming that the
+ * relocations were valid.
+ */
+ for (j = 0; j < exec[i].relocation_count; j++) {
+ if (copy_to_user(&user_relocs[j].presumed_offset,
+ &invalid_offset,
+ sizeof(invalid_offset))) {
+ ret = -EFAULT;
+ DRM_LOCK(dev);
+ goto err;
+ }
+ }
+
reloc_offset[i] = total;
total += exec[i].relocation_count;
}
@@ -791,8 +591,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
/* reacquire the objects */
eb_reset(eb);
for (i = 0; i < count; i++) {
- struct drm_i915_gem_object *obj;
-
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
@@ -827,40 +625,12 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
*/
err:
- free(reloc, DRM_I915_GEM);
- free(reloc_offset, DRM_I915_GEM);
+ drm_free_large(reloc);
+ drm_free_large(reloc_offset);
return ret;
}
static int
-i915_gem_execbuffer_flush(struct drm_device *dev,
- uint32_t invalidate_domains,
- uint32_t flush_domains,
- uint32_t flush_rings)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int i, ret;
-
- if (flush_domains & I915_GEM_DOMAIN_CPU)
- intel_gtt_chipset_flush();
-
- if (flush_domains & I915_GEM_DOMAIN_GTT)
- wmb();
-
- if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
- for (i = 0; i < I915_NUM_RINGS; i++)
- if (flush_rings & (1 << i)) {
- ret = i915_gem_flush_ring(&dev_priv->rings[i],
- invalidate_domains, flush_domains);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
u32 plane, flip_mask;
@@ -897,41 +667,40 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
- struct change_domains cd;
+ uint32_t flush_domains = 0;
+ uint32_t flips = 0;
int ret;
- memset(&cd, 0, sizeof(cd));
- list_for_each_entry(obj, objects, exec_list)
- i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
-
- if (cd.invalidate_domains | cd.flush_domains) {
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
- __func__,
- cd.invalidate_domains,
- cd.flush_domains);
-#endif
- ret = i915_gem_execbuffer_flush(ring->dev,
- cd.invalidate_domains,
- cd.flush_domains,
- cd.flush_rings);
+ list_for_each_entry(obj, objects, exec_list) {
+ ret = i915_gem_object_sync(obj, ring);
if (ret)
return ret;
- }
- if (cd.flips) {
- ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
- if (ret)
- return ret;
+ if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
+ i915_gem_clflush_object(obj);
+
+ if (obj->base.pending_write_domain)
+ flips |= atomic_read(&obj->pending_flip);
+
+ flush_domains |= obj->base.write_domain;
}
- list_for_each_entry(obj, objects, exec_list) {
- ret = i915_gem_object_sync(obj, ring);
+ if (flips) {
+ ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
if (ret)
return ret;
}
- return 0;
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
+ i915_gem_chipset_flush(ring->dev);
+
+ if (flush_domains & I915_GEM_DOMAIN_GTT)
+ wmb();
+
+ /* Unconditionally invalidate gpu caches and ensure that we do flush
+ * any residual writes from the previous batch.
+ */
+ return intel_ring_invalidate_all_caches(ring);
}
static bool
@@ -941,11 +710,13 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
}
static int
-validate_exec_list(struct drm_i915_gem_exec_object2 *exec, int count,
- vm_page_t ***map, int **maplen)
+validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+ int count, vm_page_t ***map, int **maplen)
{
+ int i;
+ int relocs_total = 0;
+ int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
vm_page_t *ma;
- int i, length, page_count;
/* XXXKIB various limits checking is missing there */
*map = malloc(count * sizeof(*ma), DRM_I915_GEM, M_WAITOK | M_ZERO);
@@ -953,10 +724,16 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, int count,
M_ZERO);
for (i = 0; i < count; i++) {
- /* First check for malicious input causing overflow */
- if (exec[i].relocation_count >
- INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
+ char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
+ int length; /* limited by fault_in_pages_readable() */
+
+ /* First check for malicious input causing overflow in
+ * the worst case where we need to allocate the entire
+ * relocation tree as a single array.
+ */
+ if (exec[i].relocation_count > relocs_max - relocs_total)
return -EINVAL;
+ relocs_total += exec[i].relocation_count;
length = exec[i].relocation_count *
sizeof(struct drm_i915_gem_relocation_entry);
@@ -971,11 +748,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, int count,
* conservative and request a page slot for each
* partial page. Thus +2.
*/
+ int page_count;
+
page_count = howmany(length, PAGE_SIZE) + 2;
ma = (*map)[i] = malloc(page_count * sizeof(vm_page_t),
DRM_I915_GEM, M_WAITOK | M_ZERO);
(*maplen)[i] = vm_fault_quick_hold_pages(
- &curproc->p_vmspace->vm_map, exec[i].relocs_ptr, length,
+ &curproc->p_vmspace->vm_map, (vm_offset_t)ptr, length,
VM_PROT_READ | VM_PROT_WRITE, ma, page_count);
if ((*maplen)[i] == -1) {
free(ma, DRM_I915_GEM);
@@ -989,108 +768,45 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, int count,
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
- struct intel_ring_buffer *ring,
- u32 seqno)
+ struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
- uint32_t old_read, old_write;
list_for_each_entry(obj, objects, exec_list) {
- old_read = obj->base.read_domains;
- old_write = obj->base.write_domain;
+#if defined(KTR)
+ u32 old_read = obj->base.read_domains;
+ u32 old_write = obj->base.write_domain;
+#endif
obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
- i915_gem_object_move_to_active(obj, ring, seqno);
+ i915_gem_object_move_to_active(obj, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
- obj->pending_gpu_write = true;
- list_move_tail(&obj->gpu_write_list,
- &ring->gpu_write_list);
+ obj->last_write_seqno = intel_ring_get_seqno(ring);
if (obj->pin_count) /* check for potential scanout */
- intel_mark_busy(ring->dev, obj);
+ intel_mark_fb_busy(obj);
}
+
CTR3(KTR_DRM, "object_change_domain move_to_active %p %x %x",
obj, old_read, old_write);
}
-
- intel_mark_busy(ring->dev, NULL);
}
-int i915_gem_sync_exec_requests;
-
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring)
{
- struct drm_i915_gem_request *request;
- u32 invalidate;
-
- /*
- * Ensure that the commands in the batch buffer are
- * finished before the interrupt fires.
- *
- * The sampler always gets flushed on i965 (sigh).
- */
- invalidate = I915_GEM_DOMAIN_COMMAND;
- if (INTEL_INFO(dev)->gen >= 4)
- invalidate |= I915_GEM_DOMAIN_SAMPLER;
- if (ring->flush(ring, invalidate, 0)) {
- i915_gem_next_request_seqno(ring);
- return;
- }
+ /* Unconditionally force add_request to emit a full flush. */
+ ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
- request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO);
- if (request == NULL || i915_add_request(ring, file, request)) {
- i915_gem_next_request_seqno(ring);
- free(request, DRM_I915_GEM);
- } else if (i915_gem_sync_exec_requests) {
- i915_wait_request(ring, request->seqno);
- i915_gem_retire_requests(dev);
- }
-}
-
-static void
-i915_gem_fix_mi_batchbuffer_end(struct drm_i915_gem_object *batch_obj,
- uint32_t batch_start_offset, uint32_t batch_len)
-{
- char *mkva;
- uint64_t po_r, po_w;
- uint32_t cmd;
-
- po_r = batch_obj->base.dev->agp->base + batch_obj->gtt_offset +
- batch_start_offset + batch_len;
- if (batch_len > 0)
- po_r -= 4;
- mkva = pmap_mapdev_attr(trunc_page(po_r), 2 * PAGE_SIZE,
- PAT_WRITE_COMBINING);
- po_r &= PAGE_MASK;
- cmd = *(uint32_t *)(mkva + po_r);
-
- if (cmd != MI_BATCH_BUFFER_END) {
- /*
- * batch_len != 0 due to the check at the start of
- * i915_gem_do_execbuffer
- */
- if (batch_obj->base.size > batch_start_offset + batch_len) {
- po_w = po_r + 4;
-/* DRM_DEBUG("batchbuffer does not end by MI_BATCH_BUFFER_END !\n"); */
- } else {
- po_w = po_r;
-DRM_DEBUG("batchbuffer does not end by MI_BATCH_BUFFER_END, overwriting last bo cmd !\n");
- }
- *(uint32_t *)(mkva + po_w) = MI_BATCH_BUFFER_END;
- }
-
- pmap_unmapdev((vm_offset_t)mkva, 2 * PAGE_SIZE);
+ (void)i915_add_request(ring, file, NULL);
}
-int i915_fix_mi_batchbuffer_end = 0;
-
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct intel_ring_buffer *ring)
@@ -1098,7 +814,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
drm_i915_private_t *dev_priv = dev->dev_private;
int ret, i;
- if (!IS_GEN7(dev) || ring != &dev_priv->rings[RCS])
+ if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
return 0;
ret = intel_ring_begin(ring, 4 * 3);
@@ -1130,8 +846,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct intel_ring_buffer *ring;
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
- u32 seqno;
u32 mask;
+ u32 flags;
int ret, mode, i;
vm_page_t **relocs_ma;
int *relocs_len;
@@ -1141,21 +857,30 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (args->batch_len == 0)
- return (0);
-
ret = validate_exec_list(exec, args->buffer_count,
&relocs_ma, &relocs_len);
if (ret)
goto pre_mutex_err;
+ flags = 0;
+ if (args->flags & I915_EXEC_SECURE) {
+ if (!file->is_master || !capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ goto pre_mutex_err;
+ }
+
+ flags |= I915_DISPATCH_SECURE;
+ }
+ if (args->flags & I915_EXEC_IS_PINNED)
+ flags |= I915_DISPATCH_PINNED;
+
switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER:
- ring = &dev_priv->rings[RCS];
+ ring = &dev_priv->ring[RCS];
break;
case I915_EXEC_BSD:
- ring = &dev_priv->rings[VCS];
+ ring = &dev_priv->ring[VCS];
if (ctx_id != 0) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
@@ -1164,7 +889,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
break;
case I915_EXEC_BLT:
- ring = &dev_priv->rings[BCS];
+ ring = &dev_priv->ring[BCS];
if (ctx_id != 0) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
@@ -1191,7 +916,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
- if (ring == &dev_priv->rings[RCS] &&
+ if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
if (INTEL_INFO(dev)->gen < 4) {
ret = -EINVAL;
@@ -1222,7 +947,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
if (args->num_cliprects != 0) {
- if (ring != &dev_priv->rings[RCS]) {
+ if (ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
ret = -EINVAL;
goto pre_mutex_err;
@@ -1240,12 +965,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = -EINVAL;
goto pre_mutex_err;
}
+
cliprects = malloc(args->num_cliprects * sizeof(*cliprects),
- DRM_I915_GEM, M_WAITOK | M_ZERO);
- ret = -copyin((void *)(uintptr_t)args->cliprects_ptr, cliprects,
- sizeof(*cliprects) * args->num_cliprects);
- if (ret != 0)
+ DRM_I915_GEM, M_WAITOK);
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
goto pre_mutex_err;
+ }
+
+ if (copy_from_user(cliprects,
+ (struct drm_clip_rect __user *)(uintptr_t)
+ args->cliprects_ptr,
+ sizeof(*cliprects)*args->num_cliprects)) {
+ ret = -EFAULT;
+ goto pre_mutex_err;
+ }
}
ret = i915_mutex_lock_interruptible(dev);
@@ -1325,6 +1059,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+ /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
+ * batch" bit. Hence we need to pin secure batches into the global gtt.
+ * hsw should have this fixed, but let's be paranoid and do it
+ * unconditionally for now. */
+ if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
+
ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
if (ret)
goto err;
@@ -1333,23 +1074,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto err;
- seqno = i915_gem_next_request_seqno(ring);
- for (i = 0; i < I915_NUM_RINGS - 1; i++) {
- if (seqno < ring->sync_seqno[i]) {
- /* The GPU can not handle its semaphore value wrapping,
- * so every billion or so execbuffers, we need to stall
- * the GPU in order to reset the counters.
- */
- ret = i915_gpu_idle(dev);
- if (ret)
- goto err;
- i915_gem_retire_requests(dev);
-
- KASSERT(ring->sync_seqno[i] == 0, ("Non-zero sync_seqno"));
- }
- }
-
- if (ring == &dev_priv->rings[RCS] &&
+ if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(ring, 4);
if (ret)
@@ -1372,12 +1097,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_len = args->batch_len;
-
- if (i915_fix_mi_batchbuffer_end) {
- i915_gem_fix_mi_batchbuffer_end(batch_obj,
- args->batch_start_offset, args->batch_len);
- }
-
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
ret = i915_emit_box(dev, &cliprects[i],
@@ -1386,21 +1105,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
ret = ring->dispatch_execbuffer(ring,
- exec_start, exec_len);
+ exec_start, exec_len,
+ flags);
if (ret)
goto err;
}
} else {
ret = ring->dispatch_execbuffer(ring,
- exec_start, exec_len);
+ exec_start, exec_len,
+ flags);
if (ret)
goto err;
}
- CTR4(KTR_DRM, "ring_dispatch %s %d exec %x %x", ring->name, seqno,
- exec_start, exec_len);
+ CTR3(KTR_DRM, "ring_dispatch ring=%s seqno=%d flags=%u", ring->name,
+ intel_ring_get_seqno(ring), flags);
- i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
+ i915_gem_execbuffer_move_to_active(&objects, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring);
err:
@@ -1444,9 +1165,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret, i;
- DRM_DEBUG("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
@@ -1454,18 +1172,24 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
/* Copy in the exec list from userland */
/* XXXKIB user-controlled malloc size */
- exec_list = malloc(sizeof(*exec_list) * args->buffer_count,
- DRM_I915_GEM, M_WAITOK);
- exec2_list = malloc(sizeof(*exec2_list) * args->buffer_count,
- DRM_I915_GEM, M_WAITOK);
- ret = -copyin((void *)(uintptr_t)args->buffers_ptr, exec_list,
- sizeof(*exec_list) * args->buffer_count);
+ exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ if (exec_list == NULL || exec2_list == NULL) {
+ DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec_list,
+ (void __user *)(uintptr_t)args->buffers_ptr,
+ sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
- free(exec_list, DRM_I915_GEM);
- free(exec2_list, DRM_I915_GEM);
- return ret;
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -EFAULT;
}
for (i = 0; i < args->buffer_count; i++) {
@@ -1497,17 +1221,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
for (i = 0; i < args->buffer_count; i++)
exec_list[i].offset = exec2_list[i].offset;
/* ... and back out to userspace */
- ret = -copyout(exec_list, (void *)(uintptr_t)args->buffers_ptr,
- sizeof(*exec_list) * args->buffer_count);
- if (ret != 0) {
+ ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
+ exec_list,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret) {
+ ret = -EFAULT;
DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
}
}
- free(exec_list, DRM_I915_GEM);
- free(exec2_list, DRM_I915_GEM);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
return ret;
}
@@ -1519,9 +1245,6 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
- DRM_DEBUG("buffers_ptr %jx buffer_count %d len %08x\n",
- (uintmax_t)args->buffers_ptr, args->buffer_count, args->batch_len);
-
if (args->buffer_count < 1 ||
args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
@@ -1531,8 +1254,15 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
/* XXXKIB user-controllable malloc size */
exec2_list = malloc(sizeof(*exec2_list)*args->buffer_count,
DRM_I915_GEM, M_WAITOK);
- ret = -copyin((void *)(uintptr_t)args->buffers_ptr, exec2_list,
- sizeof(*exec2_list) * args->buffer_count);
+ if (exec2_list == NULL) {
+ DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec2_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec2_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
@@ -1543,9 +1273,11 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
- ret = -copyout(exec2_list, (void *)(uintptr_t)args->buffers_ptr,
- sizeof(*exec2_list) * args->buffer_count);
+ ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
+ exec2_list,
+ sizeof(*exec2_list) * args->buffer_count);
if (ret) {
+ ret = -EFAULT;
DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
diff --git a/sys/dev/drm2/i915/i915_gem_gtt.c b/sys/dev/drm2/i915/i915_gem_gtt.c
index 7942979..4b8163d 100644
--- a/sys/dev/drm2/i915/i915_gem_gtt.c
+++ b/sys/dev/drm2/i915/i915_gem_gtt.c
@@ -26,28 +26,75 @@
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
#include <dev/drm2/i915/intel_drv.h>
#include <sys/sched.h>
#include <sys/sf_buf.h>
+#include <vm/vm_pageout.h>
+
+typedef uint32_t gtt_pte_t;
+
+/* PPGTT stuff */
+#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
+
+#define GEN6_PDE_VALID (1 << 0)
+/* gen6+ has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+
+#define GEN6_PTE_VALID (1 << 0)
+#define GEN6_PTE_UNCACHED (1 << 1)
+#define HSW_PTE_UNCACHED (0)
+#define GEN6_PTE_CACHE_LLC (2 << 1)
+#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
+#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+
+static inline gtt_pte_t pte_encode(struct drm_device *dev,
+ dma_addr_t addr,
+ enum i915_cache_level level)
+{
+ gtt_pte_t pte = GEN6_PTE_VALID;
+ pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+ switch (level) {
+ case I915_CACHE_LLC_MLC:
+ /* Haswell doesn't set L3 this way */
+ if (IS_HASWELL(dev))
+ pte |= GEN6_PTE_CACHE_LLC;
+ else
+ pte |= GEN6_PTE_CACHE_LLC_MLC;
+ break;
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ if (IS_HASWELL(dev))
+ pte |= HSW_PTE_UNCACHED;
+ else
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ BUG();
+ }
+
+
+ return pte;
+}
/* PPGTT support for Sandybdrige/Gen6 and later */
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry,
unsigned num_entries)
{
- uint32_t *pt_vaddr;
- uint32_t scratch_pte;
+ gtt_pte_t *pt_vaddr;
+ gtt_pte_t scratch_pte;
+ unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
+ unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned last_pte, i;
struct sf_buf *sf;
- unsigned act_pd, first_pte, last_pte, i;
-
- act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
- first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
- scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
- scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
+ scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
+ I915_CACHE_LLC);
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -68,7 +115,6 @@ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
first_pte = 0;
act_pd++;
}
-
}
int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
@@ -77,47 +123,125 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
struct i915_hw_ppgtt *ppgtt;
unsigned first_pd_entry_in_global_pt;
int i;
-
+ int ret = -ENOMEM;
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
* entries. For aliasing ppgtt support we just steal them at the end for
- * now. */
- first_pd_entry_in_global_pt = 512 * 1024 - I915_PPGTT_PD_ENTRIES;
+ * now. */
+ first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
ppgtt = malloc(sizeof(*ppgtt), DRM_I915_GEM, M_WAITOK | M_ZERO);
+ if (!ppgtt)
+ return ret;
+ ppgtt->dev = dev;
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
- ppgtt->pt_pages = malloc(sizeof(vm_page_t) * ppgtt->num_pd_entries,
- DRM_I915_GEM, M_WAITOK | M_ZERO);
+ ppgtt->pt_pages = malloc(sizeof(struct page *)*ppgtt->num_pd_entries,
+ DRM_I915_GEM, M_WAITOK | M_ZERO);
+ if (!ppgtt->pt_pages)
+ goto err_ppgtt;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
ppgtt->pt_pages[i] = vm_page_alloc(NULL, 0,
VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
VM_ALLOC_ZERO);
- if (ppgtt->pt_pages[i] == NULL) {
- dev_priv->mm.aliasing_ppgtt = ppgtt;
- i915_gem_cleanup_aliasing_ppgtt(dev);
- return (-ENOMEM);
+ if (!ppgtt->pt_pages[i])
+ goto err_pt_alloc;
+ }
+
+ if (dev_priv->mm.gtt->needs_dmar) {
+ ppgtt->pt_dma_addr = malloc(sizeof(dma_addr_t)
+ *ppgtt->num_pd_entries,
+ DRM_I915_GEM, M_WAITOK | M_ZERO);
+ if (!ppgtt->pt_dma_addr)
+ goto err_pt_alloc;
+
+#ifdef CONFIG_INTEL_IOMMU /* <- Added as a marker on FreeBSD. */
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ dma_addr_t pt_addr;
+
+ pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
+ 0, 4096,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (pci_dma_mapping_error(dev->pdev,
+ pt_addr)) {
+ ret = -EIO;
+ goto err_pd_pin;
+
+ }
+ ppgtt->pt_dma_addr[i] = pt_addr;
}
+#endif
}
- ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt.scratch_page_dma;
+ ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
+
+ i915_ppgtt_clear_range(ppgtt, 0,
+ ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
+
+ ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
- i915_ppgtt_clear_range(ppgtt, 0, ppgtt->num_pd_entries *
- I915_PPGTT_PT_ENTRIES);
- ppgtt->pd_offset = (first_pd_entry_in_global_pt) * sizeof(uint32_t);
dev_priv->mm.aliasing_ppgtt = ppgtt;
return 0;
+
+#ifdef CONFIG_INTEL_IOMMU /* <- Added as a marker on FreeBSD. */
+err_pd_pin:
+ if (ppgtt->pt_dma_addr) {
+ for (i--; i >= 0; i--)
+ pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
+ 4096, PCI_DMA_BIDIRECTIONAL);
+ }
+#endif
+err_pt_alloc:
+ free(ppgtt->pt_dma_addr, DRM_I915_GEM);
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ if (ppgtt->pt_pages[i]) {
+ vm_page_unwire(ppgtt->pt_pages[i], PQ_INACTIVE);
+ vm_page_free(ppgtt->pt_pages[i]);
+ }
+ }
+ free(ppgtt->pt_pages, DRM_I915_GEM);
+err_ppgtt:
+ free(ppgtt, DRM_I915_GEM);
+
+ return ret;
+}
+
+void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ int i;
+
+ if (!ppgtt)
+ return;
+
+#ifdef CONFIG_INTEL_IOMMU /* <- Added as a marker on FreeBSD. */
+ if (ppgtt->pt_dma_addr) {
+ for (i = 0; i < ppgtt->num_pd_entries; i++)
+ pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
+ 4096, PCI_DMA_BIDIRECTIONAL);
+ }
+#endif
+
+ free(ppgtt->pt_dma_addr, DRM_I915_GEM);
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ vm_page_unwire(ppgtt->pt_pages[i], PQ_INACTIVE);
+ vm_page_free(ppgtt->pt_pages[i]);
+ }
+ free(ppgtt->pt_pages, DRM_I915_GEM);
+ free(ppgtt, DRM_I915_GEM);
}
static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
+ vm_page_t *pages,
unsigned first_entry,
unsigned num_entries,
- vm_page_t *pages,
- uint32_t pte_flags)
+ enum i915_cache_level cache_level)
{
- uint32_t *pt_vaddr, pte;
+ uint32_t *pt_vaddr;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned j, last_pte;
@@ -135,8 +259,8 @@ static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
for (j = first_pte; j < last_pte; j++) {
page_addr = VM_PAGE_TO_PHYS(*pages);
- pte = GEN6_PTE_ADDR_ENCODE(page_addr);
- pt_vaddr[j] = pte | pte_flags;
+ pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
+ cache_level);
pages++;
}
@@ -154,30 +278,11 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- struct drm_device *dev;
- struct drm_i915_private *dev_priv;
- uint32_t pte_flags;
-
- dev = obj->base.dev;
- dev_priv = dev->dev_private;
- pte_flags = GEN6_PTE_VALID;
-
- switch (cache_level) {
- case I915_CACHE_LLC_MLC:
- pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
- break;
- case I915_CACHE_LLC:
- pte_flags |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- pte_flags |= GEN6_PTE_UNCACHED;
- break;
- default:
- panic("cache mode");
- }
-
- i915_ppgtt_insert_pages(ppgtt, obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT, obj->pages, pte_flags);
+ i915_ppgtt_insert_pages(ppgtt,
+ obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT,
+ cache_level);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -194,7 +299,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- u_int first_pd_entry_in_global_pt;
+ uint32_t __iomem *pd_addr;
uint32_t pd_entry;
int i;
@@ -202,17 +307,22 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
return;
- first_pd_entry_in_global_pt = 512 * 1024 - I915_PPGTT_PD_ENTRIES;
+ pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
vm_paddr_t pt_addr;
- pt_addr = VM_PAGE_TO_PHYS(ppgtt->pt_pages[i]);
+ if (dev_priv->mm.gtt->needs_dmar)
+ pt_addr = ppgtt->pt_dma_addr[i];
+ else
+ pt_addr = VM_PAGE_TO_PHYS(ppgtt->pt_pages[i]);
+
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
- intel_gtt_write(first_pd_entry_in_global_pt + i, pd_entry);
+ /* NOTE Linux<->FreeBSD: Arguments of writel() are reversed. */
+ writel(pd_addr + i, pd_entry);
}
- intel_gtt_read_pte(first_pd_entry_in_global_pt);
+ readl(pd_addr);
pd_offset = ppgtt->pd_offset;
pd_offset /= 64; /* in cachelines, */
@@ -250,12 +360,12 @@ static bool do_idling(struct drm_i915_private *dev_priv)
{
bool ret = dev_priv->mm.interruptible;
- if (dev_priv->mm.gtt.do_idle_maps) {
+ if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
dev_priv->mm.interruptible = false;
if (i915_gpu_idle(dev_priv->dev)) {
DRM_ERROR("Couldn't idle GPU\n");
/* Wait a bit, in hopes it avoids the hang */
- DELAY(10);
+ udelay(10);
}
}
@@ -264,57 +374,35 @@ static bool do_idling(struct drm_i915_private *dev_priv)
static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
- if (dev_priv->mm.gtt.do_idle_maps)
+ if (unlikely(dev_priv->mm.gtt->do_idle_maps))
dev_priv->mm.interruptible = interruptible;
}
-void
-i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
+
+static void i915_ggtt_clear_range(struct drm_device *dev,
+ unsigned first_entry,
+ unsigned num_entries)
{
- struct drm_i915_private *dev_priv;
- struct i915_hw_ppgtt *ppgtt;
- vm_page_t m;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ gtt_pte_t scratch_pte;
+ gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
+ const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
int i;
- dev_priv = dev->dev_private;
- ppgtt = dev_priv->mm.aliasing_ppgtt;
- if (ppgtt == NULL)
+ if (INTEL_INFO(dev)->gen < 6) {
+ intel_gtt_clear_range(first_entry, num_entries);
return;
- dev_priv->mm.aliasing_ppgtt = NULL;
-
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- m = ppgtt->pt_pages[i];
- if (m != NULL) {
- vm_page_unwire(m, PQ_INACTIVE);
- vm_page_free(m);
- }
}
- free(ppgtt->pt_pages, DRM_I915_GEM);
- free(ppgtt, DRM_I915_GEM);
-}
-
-
-static unsigned int
-cache_level_to_agp_type(struct drm_device *dev, enum i915_cache_level
- cache_level)
-{
- switch (cache_level) {
- case I915_CACHE_LLC_MLC:
- if (INTEL_INFO(dev)->gen >= 6)
- return (AGP_USER_CACHED_MEMORY_LLC_MLC);
- /*
- * Older chipsets do not have this extra level of CPU
- * cacheing, so fallthrough and request the PTE simply
- * as cached.
- */
- case I915_CACHE_LLC:
- return (AGP_USER_CACHED_MEMORY);
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
- default:
- case I915_CACHE_NONE:
- return (AGP_USER_MEMORY);
- }
+ scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
+ for (i = 0; i < num_entries; i++)
+ iowrite32(scratch_pte, &gtt_base[i]);
+ readl(gtt_base);
}
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
@@ -323,45 +411,99 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
struct drm_i915_gem_object *obj;
/* First fill our portion of the GTT with scratch pages */
- intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
+ i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
i915_gem_clflush_object(obj);
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(dev);
}
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
+ if (obj->has_dma_mapping)
+ return 0;
+
+#ifdef FREEBSD_WIP
+ if (!dma_map_sg(&obj->base.dev->pdev->dev,
+ obj->pages->sgl, obj->pages->nents,
+ PCI_DMA_BIDIRECTIONAL))
+ return -ENOSPC;
+#endif /* FREEBSD_WIP */
return 0;
}
-void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level)
+/*
+ * Binds an object into the global gtt with the specified cache level. The object
+ * will be accessible to the GPU via commands whose operands reference offsets
+ * within the global GTT as well as accessible by the GPU through the GMADR
+ * mapped BAR (dev_priv->mm.gtt->gtt).
+ */
+static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level level)
{
- struct drm_device *dev;
- struct drm_i915_private *dev_priv;
- unsigned int agp_type;
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
+#if defined(INVARIANTS)
+ const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
+#endif
+ gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
+ int i = 0;
+ vm_paddr_t addr;
+
+ for (i = 0; i < obj->base.size >> PAGE_SHIFT; ++i) {
+ addr = VM_PAGE_TO_PHYS(obj->pages[i]);
+ iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
+ }
- dev = obj->base.dev;
- dev_priv = dev->dev_private;
- agp_type = cache_level_to_agp_type(dev, cache_level);
+ BUG_ON(i > max_entries);
+ BUG_ON(i != obj->base.size / PAGE_SIZE);
+
+ /* XXX: This serves as a posting read to make sure that the PTE has
+ * actually been updated. There is some concern that even though
+ * registers and PTEs are within the same BAR that they are potentially
+ * of NUMA access patterns. Therefore, even with the way we assume
+ * hardware should work, we must keep this posting read for paranoia.
+ */
+ if (i != 0)
+ WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
+
+ /* This next bit makes the above posting read even more important. We
+ * want to flush the TLBs only after we're certain all the PTE updates
+ * have finished.
+ */
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
- intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT, obj->pages, agp_type);
+void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
+{
+ struct drm_device *dev = obj->base.dev;
+ if (INTEL_INFO(dev)->gen < 6) {
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+ intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT,
+ obj->pages,
+ flags);
+ } else {
+ gen6_ggtt_bind_object(obj, cache_level);
+ }
obj->has_global_gtt_mapping = 1;
}
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
-
- intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
+ i915_ggtt_clear_range(obj->base.dev,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0;
}
@@ -374,35 +516,244 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
interruptible = do_idling(dev_priv);
+#ifdef FREEBSD_WIP
+ if (!obj->has_dma_mapping)
+ dma_unmap_sg(&dev->pdev->dev,
+ obj->pages->sgl, obj->pages->nents,
+ PCI_DMA_BIDIRECTIONAL);
+#endif /* FREEBSD_WIP */
+
undo_idling(dev_priv, interruptible);
}
-int i915_gem_init_global_gtt(struct drm_device *dev,
+static void i915_gtt_color_adjust(struct drm_mm_node *node,
+ unsigned long color,
+ unsigned long *start,
+ unsigned long *end)
+{
+ if (node->color != color)
+ *start += 4096;
+
+ if (!list_empty(&node->node_list)) {
+ node = list_entry(node->node_list.next,
+ struct drm_mm_node,
+ node_list);
+ if (node->allocated && node->color != color)
+ *end -= 4096;
+ }
+}
+
+void i915_gem_init_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- unsigned long mappable;
- int error;
-
- mappable = min(end, mappable_end) - start;
/* Substract the guard page ... */
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
+ if (!HAS_LLC(dev))
+ dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
dev_priv->mm.gtt_start = start;
dev_priv->mm.gtt_mappable_end = mappable_end;
dev_priv->mm.gtt_end = end;
dev_priv->mm.gtt_total = end - start;
- dev_priv->mm.mappable_gtt_total = mappable;
+ dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
/* ... but ensure that we clear the entire range. */
- intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+ i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+
device_printf(dev->dev,
"taking over the fictitious range 0x%lx-0x%lx\n",
- dev->agp->base + start, dev->agp->base + start + mappable);
- error = -vm_phys_fictitious_reg_range(dev->agp->base + start,
- dev->agp->base + start + mappable, VM_MEMATTR_WRITE_COMBINING);
- return (error);
+ dev_priv->mm.gtt_base_addr + start,
+ dev_priv->mm.gtt_base_addr + start + dev_priv->mm.mappable_gtt_total);
+ vm_phys_fictitious_reg_range(dev_priv->mm.gtt_base_addr + start,
+ dev_priv->mm.gtt_base_addr + start + dev_priv->mm.mappable_gtt_total,
+ VM_MEMATTR_WRITE_COMBINING);
+}
+
+static int setup_scratch_page(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ vm_page_t page;
+ dma_addr_t dma_addr;
+ int tries = 0;
+ int req = VM_ALLOC_ZERO | VM_ALLOC_NOOBJ;
+
+retry:
+ page = vm_page_alloc_contig(NULL, 0, req, 1, 0, 0xffffffff,
+ PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE);
+ if (page == NULL) {
+ if (tries < 1) {
+ if (!vm_page_reclaim_contig(req, 1, 0, 0xffffffff,
+ PAGE_SIZE, 0))
+ VM_WAIT;
+ tries++;
+ goto retry;
+ }
+ return -ENOMEM;
+ }
+ if ((page->flags & PG_ZERO) == 0)
+ pmap_zero_page(page);
+
+#ifdef CONFIG_INTEL_IOMMU
+ dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, dma_addr))
+ return -EINVAL;
+#else
+ dma_addr = VM_PAGE_TO_PHYS(page);
+#endif
+ dev_priv->mm.gtt->scratch_page = page;
+ dev_priv->mm.gtt->scratch_page_dma = dma_addr;
+
+ return 0;
+}
+
+static void teardown_scratch_page(struct drm_device *dev)
+{
+#ifdef CONFIG_INTEL_IOMMU /* <- Added as a marker on FreeBSD. */
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+#endif
+}
+
+static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
+{
+ snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
+ snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
+ return snb_gmch_ctl << 20;
+}
+
+static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
+{
+ snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
+ snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
+ return snb_gmch_ctl << 25; /* 32 MB units */
+}
+
+static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
+{
+ static const int stolen_decoder[] = {
+ 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
+ snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
+ snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
+ return stolen_decoder[snb_gmch_ctl] << 20;
+}
+
+int i915_gem_gtt_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ vm_paddr_t gtt_bus_addr;
+ u16 snb_gmch_ctl;
+ int ret;
+
+ /* On modern platforms we need not worry ourself with the legacy
+ * hostbridge query stuff. Skip it entirely
+ */
+ if (INTEL_INFO(dev)->gen < 6) {
+#ifdef FREEBSD_WIP
+ ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
+ if (!ret) {
+ DRM_ERROR("failed to set up gmch\n");
+ return -EIO;
+ }
+#endif /* FREEBSD_WIP */
+
+ dev_priv->mm.gtt = intel_gtt_get();
+ if (!dev_priv->mm.gtt) {
+ DRM_ERROR("Failed to initialize GTT\n");
+#ifdef FREEBSD_WIP
+ intel_gmch_remove();
+#endif /* FREEBSD_WIP */
+ return -ENODEV;
+ }
+ return 0;
+ }
+
+ dev_priv->mm.gtt = malloc(sizeof(*dev_priv->mm.gtt), DRM_I915_GEM, M_WAITOK | M_ZERO);
+ if (!dev_priv->mm.gtt)
+ return -ENOMEM;
+
+#ifdef FREEBSD_WIP
+ if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
+ pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
+#endif /* FREEBSD_WIP */
+
+#ifdef CONFIG_INTEL_IOMMU
+ dev_priv->mm.gtt->needs_dmar = 1;
+#endif
+
+ /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
+ gtt_bus_addr = drm_get_resource_start(dev, 0) + (2<<20);
+ dev_priv->mm.gtt->gma_bus_addr = drm_get_resource_start(dev, 2);
+
+ /* i9xx_setup */
+ pci_read_config_word(dev->dev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ dev_priv->mm.gtt->gtt_total_entries =
+ gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
+ if (INTEL_INFO(dev)->gen < 7)
+ dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
+ else
+ dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
+
+ dev_priv->mm.gtt->gtt_mappable_entries = drm_get_resource_len(dev, 2) >> PAGE_SHIFT;
+ /* 64/512MB is the current min/max we actually know of, but this is just a
+ * coarse sanity check.
+ */
+ if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
+ dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
+ DRM_ERROR("Unknown GMADR entries (%d)\n",
+ dev_priv->mm.gtt->gtt_mappable_entries);
+ ret = -ENXIO;
+ goto err_out;
+ }
+
+ ret = setup_scratch_page(dev);
+ if (ret) {
+ DRM_ERROR("Scratch setup failed\n");
+ goto err_out;
+ }
+
+ dev_priv->mm.gtt->gtt = pmap_mapdev_attr(gtt_bus_addr,
+ /* The size is used later by pmap_unmapdev. */
+ dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t),
+ VM_MEMATTR_WRITE_COMBINING);
+ if (!dev_priv->mm.gtt->gtt) {
+ DRM_ERROR("Failed to map the gtt page table\n");
+ teardown_scratch_page(dev);
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
+ DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
+ DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
+ DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
+
+ return 0;
+
+err_out:
+ free(dev_priv->mm.gtt, DRM_I915_GEM);
+#ifdef FREEBSD_WIP
+ if (INTEL_INFO(dev)->gen < 6)
+ intel_gmch_remove();
+#endif /* FREEBSD_WIP */
+ return ret;
+}
+
+void i915_gem_gtt_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ pmap_unmapdev((vm_offset_t)dev_priv->mm.gtt->gtt,
+ dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
+ teardown_scratch_page(dev);
+#ifdef FREEBSD_WIP
+ if (INTEL_INFO(dev)->gen < 6)
+ intel_gmch_remove();
+#endif /* FREEBSD_WIP */
+ if (INTEL_INFO(dev)->gen >= 6)
+ free(dev_priv->mm.gtt, DRM_I915_GEM);
}
diff --git a/sys/dev/drm2/i915/i915_gem_stolen.c b/sys/dev/drm2/i915/i915_gem_stolen.c
index 1955a11..6d42e73 100644
--- a/sys/dev/drm2/i915/i915_gem_stolen.c
+++ b/sys/dev/drm2/i915/i915_gem_stolen.c
@@ -30,7 +30,6 @@
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
@@ -46,56 +45,48 @@ __FBSDID("$FreeBSD$");
* for is a boon.
*/
-#define PTE_ADDRESS_MASK 0xfffff000
-#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
-#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
-#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
-#define PTE_MAPPING_TYPE_CACHED (3 << 1)
-#define PTE_MAPPING_TYPE_MASK (3 << 1)
-#define PTE_VALID (1 << 0)
-
-/**
- * i915_stolen_to_phys - take an offset into stolen memory and turn it into
- * a physical one
- * @dev: drm device
- * @offset: address to translate
- *
- * Some chip functions require allocations from stolen space and need the
- * physical address of the memory in question.
- */
-static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
+static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- device_t pdev = dev_priv->bridge_dev;
u32 base;
-#if 0
/* On the machines I have tested the Graphics Base of Stolen Memory
- * is unreliable, so compute the base by subtracting the stolen memory
- * from the Top of Low Usable DRAM which is where the BIOS places
- * the graphics stolen memory.
+ * is unreliable, so on those compute the base by subtracting the
+ * stolen memory from the Top of Low Usable DRAM which is where the
+ * BIOS places the graphics stolen memory.
+ *
+ * On gen2, the layout is slightly different with the Graphics Segment
+ * immediately following Top of Memory (or Top of Usable DRAM). Note
+ * it appears that TOUD is only reported by 865g, so we just use the
+ * top of memory as determined by the e820 probe.
+ *
+ * XXX gen2 requires an unavailable symbol and 945gm fails with
+ * its value of TOLUD.
*/
- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- /* top 32bits are reserved = 0 */
- pci_read_config_dword(pdev, 0xA4, &base);
- } else {
- /* XXX presume 8xx is the same as i915 */
- pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
- }
-#else
- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- u16 val;
- val = pci_read_config(pdev, 0xb0, 2);
- base = val >> 4 << 20;
- } else {
+ base = 0;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ /* Read Base Data of Stolen Memory Register (BDSM) directly.
+ * Note that there is also a MCHBAR miror at 0x1080c0 or
+ * we could use device 2:0x5c instead.
+ */
+ pci_read_config_dword(dev->dev, 0xB0, &base);
+ base &= ~4095; /* lower bits used for locking register */
+ } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ /* Read Graphics Base of Stolen Memory directly */
+ pci_read_config_dword(dev->dev, 0xA4, &base);
+#if 0
+ } else if (IS_GEN3(dev)) {
u8 val;
- val = pci_read_config(pdev, 0x9c, 1);
+ /* Stolen is immediately below Top of Low Usable DRAM */
+ pci_read_config_byte(pdev, 0x9c, &val);
base = val >> 3 << 27;
- }
- base -= dev_priv->mm.gtt.stolen_size;
+ base -= dev_priv->mm.gtt->stolen_size;
+ } else {
+ /* Stolen is immediately above Top of Memory */
+ base = max_low_pfn_mapped << PAGE_SHIFT;
#endif
+ }
- return base + offset;
+ return base;
}
static void i915_warn_stolen(struct drm_device *dev)
@@ -107,7 +98,7 @@ static void i915_warn_stolen(struct drm_device *dev)
static void i915_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mm_node *compressed_fb, *compressed_llb;
+ struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
unsigned long cfb_base;
unsigned long ll_base = 0;
@@ -120,7 +111,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
if (!compressed_fb)
goto err;
- cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
+ cfb_base = dev_priv->mm.stolen_base + compressed_fb->start;
if (!cfb_base)
goto err_fb;
@@ -133,7 +124,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
if (!compressed_llb)
goto err_fb;
- ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
+ ll_base = dev_priv->mm.stolen_base + compressed_llb->start;
if (!ll_base)
goto err_llb;
}
@@ -152,7 +143,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
}
DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
- cfb_base, ll_base, size >> 20);
+ (long)cfb_base, (long)ll_base, size >> 20);
return;
err_llb:
@@ -182,7 +173,14 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
int i915_gem_init_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long prealloc_size = dev_priv->mm.gtt.stolen_size;
+ unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size;
+
+ dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
+ if (dev_priv->mm.stolen_base == 0)
+ return 0;
+
+ DRM_DEBUG_KMS("found %d bytes of stolen memory at %08lx\n",
+ dev_priv->mm.gtt->stolen_size, dev_priv->mm.stolen_base);
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
diff --git a/sys/dev/drm2/i915/i915_gem_tiling.c b/sys/dev/drm2/i915/i915_gem_tiling.c
index 8e849a3..62c3c2d 100644
--- a/sys/dev/drm2/i915/i915_gem_tiling.c
+++ b/sys/dev/drm2/i915/i915_gem_tiling.c
@@ -29,7 +29,6 @@
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
@@ -95,7 +94,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (IS_VALLEYVIEW(dev)) {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else if (INTEL_INFO(dev)->gen >= 6) {
uint32_t dimm_c0, dimm_c1;
dimm_c0 = I915_READ(MAD_DIMM_C0);
dimm_c1 = I915_READ(MAD_DIMM_C1);
@@ -313,12 +315,12 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!i915_tiling_ok(dev,
args->stride, obj->base.size, args->tiling_mode)) {
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference_unlocked(&obj->base);
return -EINVAL;
}
if (obj->pin_count) {
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference_unlocked(&obj->base);
return -EBUSY;
}
@@ -483,7 +485,8 @@ i915_gem_object_do_bit_17_swizzle_page(struct drm_i915_gem_object *obj,
return;
new_bit_17 = VM_PAGE_TO_PHYS(m) >> 17;
- if ((new_bit_17 & 0x1) != (test_bit(m->pindex, obj->bit_17) != 0)) {
+ if ((new_bit_17 & 0x1) !=
+ (test_bit(m->pindex, obj->bit_17) != 0)) {
i915_gem_swizzle_page(m);
vm_page_dirty(m);
}
@@ -499,11 +502,12 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
return;
for (i = 0; i < page_count; i++) {
- char new_bit_17 = VM_PAGE_TO_PHYS(obj->pages[i]) >> 17;
+ vm_page_t page = obj->pages[i];
+ char new_bit_17 = VM_PAGE_TO_PHYS(page) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) {
- i915_gem_swizzle_page(obj->pages[i]);
- vm_page_dirty(obj->pages[i]);
+ i915_gem_swizzle_page(page);
+ vm_page_dirty(page);
}
}
}
@@ -516,14 +520,20 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
if (obj->bit_17 == NULL) {
obj->bit_17 = malloc(BITS_TO_LONGS(page_count) *
- sizeof(long), DRM_I915_GEM, M_WAITOK);
+ sizeof(long), DRM_I915_GEM, M_WAITOK);
+ if (obj->bit_17 == NULL) {
+ DRM_ERROR("Failed to allocate memory for bit 17 "
+ "record\n");
+ return;
+ }
}
/* XXXKIB: review locking, atomics might be not needed there */
for (i = 0; i < page_count; i++) {
- if (VM_PAGE_TO_PHYS(obj->pages[i]) & (1 << 17))
- set_bit(i, obj->bit_17);
+ vm_page_t page = obj->pages[i];
+ if (VM_PAGE_TO_PHYS(page) & (1 << 17))
+ __set_bit(i, obj->bit_17);
else
- clear_bit(i, obj->bit_17);
+ __clear_bit(i, obj->bit_17);
}
}
diff --git a/sys/dev/drm2/i915/i915_irq.c b/sys/dev/drm2/i915/i915_irq.c
index d452b69..a0ae879 100644
--- a/sys/dev/drm2/i915/i915_irq.c
+++ b/sys/dev/drm2/i915/i915_irq.c
@@ -29,8 +29,9 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
#include <dev/drm2/i915/intel_drv.h>
@@ -39,9 +40,6 @@ __FBSDID("$FreeBSD$");
#include <sys/sf_buf.h>
#include <sys/sleepqueue.h>
-static void i915_capture_error_state(struct drm_device *dev);
-static u32 ring_last_seqno(struct intel_ring_buffer *ring);
-
/* For display hotplug interrupt */
static void
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
@@ -127,7 +125,10 @@ static int
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+
+ return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
}
/* Called from drm generic code, passed a 'crtc', which
@@ -187,6 +188,8 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
int vbl_start, vbl_end, htotal, vtotal;
bool in_vbl = true;
int ret = 0;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
@@ -195,7 +198,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
}
/* Get vtotal. */
- vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
+ vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
if (INTEL_INFO(dev)->gen >= 4) {
/* No obvious pixelcount register. Only query vertical
@@ -215,13 +218,13 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
*/
position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
- htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
+ htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
*vpos = position / htotal;
*hpos = position - (*vpos * htotal);
}
/* Query vblank area. */
- vbl = I915_READ(VBLANK(pipe));
+ vbl = I915_READ(VBLANK(cpu_transcoder));
/* Test position against vblank region. */
vbl_start = vbl & 0x1fff;
@@ -266,9 +269,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
}
if (!crtc->enabled) {
-#if 0
DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
-#endif
return -EBUSY;
}
@@ -288,8 +289,6 @@ static void i915_hotplug_work_func(void *context, int pending)
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
- DRM_DEBUG("running encoder hotplug functions\n");
-
sx_xlock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
@@ -300,16 +299,23 @@ static void i915_hotplug_work_func(void *context, int pending)
sx_xunlock(&mode_config->mutex);
/* Just fire off a uevent and let userspace tell us what to do */
-#if 0
drm_helper_hpd_irq_event(dev);
-#endif
}
-static void i915_handle_rps_change(struct drm_device *dev)
+/* defined intel_pm.c */
+extern struct mtx mchdev_lock;
+
+static void ironlake_handle_rps_change(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 busy_up, busy_down, max_avg, min_avg;
- u8 new_delay = dev_priv->cur_delay;
+ u8 new_delay;
+
+ mtx_lock(&mchdev_lock);
+
+ I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
+
+ new_delay = dev_priv->ips.cur_delay;
I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
busy_up = I915_READ(RCPREVBSYTUPAVG);
@@ -319,19 +325,21 @@ static void i915_handle_rps_change(struct drm_device *dev)
/* Handle RCS change request from hw */
if (busy_up > max_avg) {
- if (dev_priv->cur_delay != dev_priv->max_delay)
- new_delay = dev_priv->cur_delay - 1;
- if (new_delay < dev_priv->max_delay)
- new_delay = dev_priv->max_delay;
+ if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
+ new_delay = dev_priv->ips.cur_delay - 1;
+ if (new_delay < dev_priv->ips.max_delay)
+ new_delay = dev_priv->ips.max_delay;
} else if (busy_down < min_avg) {
- if (dev_priv->cur_delay != dev_priv->min_delay)
- new_delay = dev_priv->cur_delay + 1;
- if (new_delay > dev_priv->min_delay)
- new_delay = dev_priv->min_delay;
+ if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
+ new_delay = dev_priv->ips.cur_delay + 1;
+ if (new_delay > dev_priv->ips.min_delay)
+ new_delay = dev_priv->ips.min_delay;
}
if (ironlake_set_drps(dev, new_delay))
- dev_priv->cur_delay = new_delay;
+ dev_priv->ips.cur_delay = new_delay;
+
+ mtx_unlock(&mchdev_lock);
return;
}
@@ -344,11 +352,9 @@ static void notify_ring(struct drm_device *dev,
if (ring->obj == NULL)
return;
- CTR2(KTR_DRM, "request_complete %s %d", ring->name, ring->get_seqno(ring));
+ CTR2(KTR_DRM, "request_complete %s %d", ring->name, ring->get_seqno(ring, false));
- mtx_lock(&dev_priv->irq_lock);
- wakeup(ring);
- mtx_unlock(&dev_priv->irq_lock);
+ wake_up_all(&ring->irq_queue);
if (i915_enable_hangcheck) {
dev_priv->hangcheck_count = 0;
callout_schedule(&dev_priv->hangcheck_timer,
@@ -358,57 +364,119 @@ static void notify_ring(struct drm_device *dev,
static void gen6_pm_rps_work(void *context, int pending)
{
- struct drm_device *dev;
drm_i915_private_t *dev_priv = context;
u32 pm_iir, pm_imr;
u8 new_delay;
- dev = dev_priv->dev;
- new_delay = dev_priv->cur_delay;
-
- mtx_lock(&dev_priv->rps_lock);
- pm_iir = dev_priv->pm_iir;
- dev_priv->pm_iir = 0;
+ mtx_lock(&dev_priv->rps.lock);
+ pm_iir = dev_priv->rps.pm_iir;
+ dev_priv->rps.pm_iir = 0;
pm_imr = I915_READ(GEN6_PMIMR);
I915_WRITE(GEN6_PMIMR, 0);
- mtx_unlock(&dev_priv->rps_lock);
+ mtx_unlock(&dev_priv->rps.lock);
- if (!pm_iir)
+ if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
- DRM_LOCK(dev);
- if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
- if (dev_priv->cur_delay != dev_priv->max_delay)
- new_delay = dev_priv->cur_delay + 1;
- if (new_delay > dev_priv->max_delay)
- new_delay = dev_priv->max_delay;
- } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
- gen6_gt_force_wake_get(dev_priv);
- if (dev_priv->cur_delay != dev_priv->min_delay)
- new_delay = dev_priv->cur_delay - 1;
- if (new_delay < dev_priv->min_delay) {
- new_delay = dev_priv->min_delay;
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
- ((new_delay << 16) & 0x3f0000));
- } else {
- /* Make sure we continue to get down interrupts
- * until we hit the minimum frequency */
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
- }
- gen6_gt_force_wake_put(dev_priv);
+ sx_xlock(&dev_priv->rps.hw_lock);
+
+ if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
+ new_delay = dev_priv->rps.cur_delay + 1;
+ else
+ new_delay = dev_priv->rps.cur_delay - 1;
+
+ /* sysfs frequency interfaces may have snuck in while servicing the
+ * interrupt
+ */
+ if (!(new_delay > dev_priv->rps.max_delay ||
+ new_delay < dev_priv->rps.min_delay)) {
+ gen6_set_rps(dev_priv->dev, new_delay);
}
- gen6_set_rps(dev, new_delay);
- dev_priv->cur_delay = new_delay;
+ sx_xunlock(&dev_priv->rps.hw_lock);
+}
- /*
- * rps_lock not held here because clearing is non-destructive. There is
- * an *extremely* unlikely race with gen6_rps_enable() that is prevented
- * by holding struct_mutex for the duration of the write.
+
+/**
+ * ivybridge_parity_work - Workqueue called when a parity error interrupt
+ * occurred.
+ * @work: workqueue struct
+ *
+ * Doesn't actually do anything except notify userspace. As a consequence of
+ * this event, userspace should try to remap the bad rows since statistically
+ * it is likely the same row is more likely to go bad again.
+ */
+static void ivybridge_parity_work(void *context, int pending)
+{
+ drm_i915_private_t *dev_priv = context;
+ u32 error_status, row, bank, subbank;
+#ifdef __linux__
+ char *parity_event[5];
+#endif
+ uint32_t misccpctl;
+
+ /* We must turn off DOP level clock gating to access the L3 registers.
+ * In order to prevent a get/put style interface, acquire struct mutex
+ * any time we access those registers.
*/
- DRM_UNLOCK(dev);
+ DRM_LOCK(dev_priv->dev);
+
+ misccpctl = I915_READ(GEN7_MISCCPCTL);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ POSTING_READ(GEN7_MISCCPCTL);
+
+ error_status = I915_READ(GEN7_L3CDERRST1);
+ row = GEN7_PARITY_ERROR_ROW(error_status);
+ bank = GEN7_PARITY_ERROR_BANK(error_status);
+ subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
+
+ I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
+ GEN7_L3CDERRST1_ENABLE);
+ POSTING_READ(GEN7_L3CDERRST1);
+
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+
+ mtx_lock(&dev_priv->irq_lock);
+ dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ mtx_unlock(&dev_priv->irq_lock);
+
+ DRM_UNLOCK(dev_priv->dev);
+
+#ifdef __linux__
+ parity_event[0] = "L3_PARITY_ERROR=1";
+ parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
+ parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
+ parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
+ parity_event[4] = NULL;
+
+ kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
+ KOBJ_CHANGE, parity_event);
+#endif
+
+ DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
+ row, bank, subbank);
+
+#ifdef __linux__
+ kfree(parity_event[3]);
+ kfree(parity_event[2]);
+ kfree(parity_event[1]);
+#endif
+}
+
+static void ivybridge_handle_parity_error(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (!HAS_L3_GPU_CACHE(dev))
+ return;
+
+ mtx_lock(&dev_priv->irq_lock);
+ dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ mtx_unlock(&dev_priv->irq_lock);
+
+ taskqueue_enqueue(dev_priv->wq, &dev_priv->l3_parity.error_work);
}
static void snb_gt_irq_handler(struct drm_device *dev,
@@ -418,11 +486,11 @@ static void snb_gt_irq_handler(struct drm_device *dev,
if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
- notify_ring(dev, &dev_priv->rings[RCS]);
+ notify_ring(dev, &dev_priv->ring[RCS]);
if (gt_iir & GEN6_BSD_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->rings[VCS]);
+ notify_ring(dev, &dev_priv->ring[VCS]);
if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->rings[BCS]);
+ notify_ring(dev, &dev_priv->ring[BCS]);
if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
GT_GEN6_BSD_CS_ERROR_INTERRUPT |
@@ -430,6 +498,9 @@ static void snb_gt_irq_handler(struct drm_device *dev,
DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
i915_handle_error(dev, false);
}
+
+ if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
+ ivybridge_handle_parity_error(dev);
}
static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
@@ -440,21 +511,19 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
* IIR bits should never already be set because IMR should
* prevent an interrupt from being shown in IIR. The warning
* displays a case where we've unsafely cleared
- * dev_priv->pm_iir. Although missing an interrupt of the same
+ * dev_priv->rps.pm_iir. Although missing an interrupt of the same
* type is not a problem, it displays a problem in the logic.
*
- * The mask bit in IMR is cleared by rps_work.
+ * The mask bit in IMR is cleared by dev_priv->rps.work.
*/
- mtx_lock(&dev_priv->rps_lock);
- if (dev_priv->pm_iir & pm_iir)
- printf("Missed a PM interrupt\n");
- dev_priv->pm_iir |= pm_iir;
- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+ mtx_lock(&dev_priv->rps.lock);
+ dev_priv->rps.pm_iir |= pm_iir;
+ I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
POSTING_READ(GEN6_PMIMR);
- mtx_unlock(&dev_priv->rps_lock);
+ mtx_unlock(&dev_priv->rps.lock);
- taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task);
+ taskqueue_enqueue(dev_priv->wq, &dev_priv->rps.work);
}
static void valleyview_irq_handler(DRM_IRQ_ARGS)
@@ -464,15 +533,10 @@ static void valleyview_irq_handler(DRM_IRQ_ARGS)
u32 iir, gt_iir, pm_iir;
int pipe;
u32 pipe_stats[I915_MAX_PIPES];
- u32 vblank_status;
- int vblank = 0;
bool blc_event;
atomic_inc(&dev_priv->irq_received);
- vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS |
- PIPE_VBLANK_INTERRUPT_STATUS;
-
while (true) {
iir = I915_READ(VLV_IIR);
gt_iir = I915_READ(GTIIR);
@@ -500,6 +564,16 @@ static void valleyview_irq_handler(DRM_IRQ_ARGS)
}
mtx_unlock(&dev_priv->irq_lock);
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
+ drm_handle_vblank(dev, pipe);
+
+ if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip(dev, pipe);
+ }
+ }
+
/* Consume port. Then clear IIR or we'll miss events */
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
@@ -507,26 +581,13 @@ static void valleyview_irq_handler(DRM_IRQ_ARGS)
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_status & dev_priv->hotplug_supported_mask)
- taskqueue_enqueue(dev_priv->tq,
- &dev_priv->hotplug_task);
+ taskqueue_enqueue(dev_priv->wq,
+ &dev_priv->hotplug_work);
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
}
-
- if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) {
- drm_handle_vblank(dev, 0);
- vblank++;
- intel_finish_page_flip(dev, 0);
- }
-
- if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) {
- drm_handle_vblank(dev, 1);
- vblank++;
- intel_finish_page_flip(dev, 0);
- }
-
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
@@ -542,11 +603,14 @@ out:
return;
}
-static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
+static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
+ if (pch_iir & SDE_HOTPLUG_MASK)
+ taskqueue_enqueue(dev_priv->wq, &dev_priv->hotplug_work);
+
if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -582,6 +646,38 @@ static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
}
+static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ if (pch_iir & SDE_HOTPLUG_MASK_CPT)
+ taskqueue_enqueue(dev_priv->wq, &dev_priv->hotplug_work);
+
+ if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
+ DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
+ (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
+ SDE_AUDIO_POWER_SHIFT_CPT);
+
+ if (pch_iir & SDE_AUX_MASK_CPT)
+ DRM_DEBUG_DRIVER("AUX channel interrupt\n");
+
+ if (pch_iir & SDE_GMBUS_CPT)
+ DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+
+ if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
+ DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
+
+ if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
+ DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
+
+ if (pch_iir & SDE_FDI_MASK_CPT)
+ for_each_pipe(pipe)
+ DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
+ pipe_name(pipe),
+ I915_READ(FDI_RX_IIR(pipe)));
+}
+
static void ivybridge_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -594,7 +690,6 @@ static void ivybridge_irq_handler(DRM_IRQ_ARGS)
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
- POSTING_READ(DEIER);
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
@@ -608,22 +703,19 @@ static void ivybridge_irq_handler(DRM_IRQ_ARGS)
intel_opregion_gse_intr(dev);
for (i = 0; i < 3; i++) {
+ if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+ drm_handle_vblank(dev, i);
if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
intel_prepare_page_flip(dev, i);
intel_finish_page_flip_plane(dev, i);
}
- if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
- drm_handle_vblank(dev, i);
}
/* check event from PCH */
if (de_iir & DE_PCH_EVENT_IVB) {
u32 pch_iir = I915_READ(SDEIIR);
- if (pch_iir & SDE_HOTPLUG_MASK_CPT)
- taskqueue_enqueue(dev_priv->tq,
- &dev_priv->hotplug_task);
- pch_irq_handler(dev, pch_iir);
+ cpt_irq_handler(dev, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
@@ -651,9 +743,9 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
u32 gt_iir)
{
if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
- notify_ring(dev, &dev_priv->rings[RCS]);
+ notify_ring(dev, &dev_priv->ring[RCS]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->rings[VCS]);
+ notify_ring(dev, &dev_priv->ring[VCS]);
}
static void ironlake_irq_handler(DRM_IRQ_ARGS)
@@ -661,7 +753,6 @@ static void ironlake_irq_handler(DRM_IRQ_ARGS)
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
- u32 hotplug_mask;
atomic_inc(&dev_priv->irq_received);
@@ -682,11 +773,6 @@ static void ironlake_irq_handler(DRM_IRQ_ARGS)
(!IS_GEN6(dev) || pm_iir == 0))
goto done;
- if (HAS_PCH_CPT(dev))
- hotplug_mask = SDE_HOTPLUG_MASK_CPT;
- else
- hotplug_mask = SDE_HOTPLUG_MASK;
-
if (IS_GEN5(dev))
ilk_gt_irq_handler(dev, dev_priv, gt_iir);
else
@@ -695,6 +781,12 @@ static void ironlake_irq_handler(DRM_IRQ_ARGS)
if (de_iir & DE_GSE)
intel_opregion_gse_intr(dev);
+ if (de_iir & DE_PIPEA_VBLANK)
+ drm_handle_vblank(dev, 0);
+
+ if (de_iir & DE_PIPEB_VBLANK)
+ drm_handle_vblank(dev, 1);
+
if (de_iir & DE_PLANEA_FLIP_DONE) {
intel_prepare_page_flip(dev, 0);
intel_finish_page_flip_plane(dev, 0);
@@ -705,24 +797,16 @@ static void ironlake_irq_handler(DRM_IRQ_ARGS)
intel_finish_page_flip_plane(dev, 1);
}
- if (de_iir & DE_PIPEA_VBLANK)
- drm_handle_vblank(dev, 0);
-
- if (de_iir & DE_PIPEB_VBLANK)
- drm_handle_vblank(dev, 1);
-
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
- if (pch_iir & hotplug_mask)
- taskqueue_enqueue(dev_priv->tq,
- &dev_priv->hotplug_task);
- pch_irq_handler(dev, pch_iir);
+ if (HAS_PCH_CPT(dev))
+ cpt_irq_handler(dev, pch_iir);
+ else
+ ibx_irq_handler(dev, pch_iir);
}
- if (de_iir & DE_PCU_EVENT) {
- I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
- i915_handle_rps_change(dev);
- }
+ if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
+ ironlake_handle_rps_change(dev);
if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
gen6_queue_rps_work(dev_priv, pm_iir);
@@ -749,23 +833,59 @@ static void i915_error_work_func(void *context, int pending)
{
drm_i915_private_t *dev_priv = context;
struct drm_device *dev = dev_priv->dev;
+#ifdef __linux__
+ char *error_event[] = { "ERROR=1", NULL };
+ char *reset_event[] = { "RESET=1", NULL };
+ char *reset_done_event[] = { "ERROR=0", NULL };
- /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
+#endif
- if (atomic_load_acq_int(&dev_priv->mm.wedged)) {
+ if (atomic_read(&dev_priv->mm.wedged)) {
DRM_DEBUG_DRIVER("resetting chip\n");
- /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); */
+#ifdef __linux__
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
+#endif
if (!i915_reset(dev)) {
- atomic_store_rel_int(&dev_priv->mm.wedged, 0);
- /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); */
+ atomic_set(&dev_priv->mm.wedged, 0);
+#ifdef __linux__
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
+#endif
}
- mtx_lock(&dev_priv->error_completion_lock);
- dev_priv->error_completion++;
- wakeup(&dev_priv->error_completion);
- mtx_unlock(&dev_priv->error_completion_lock);
+ complete_all(&dev_priv->error_completion);
+ }
+}
+
+/* NB: please notice the memset */
+static void i915_get_extra_instdone(struct drm_device *dev,
+ uint32_t *instdone)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
+
+ switch(INTEL_INFO(dev)->gen) {
+ case 2:
+ case 3:
+ instdone[0] = I915_READ(INSTDONE);
+ break;
+ case 4:
+ case 5:
+ case 6:
+ instdone[0] = I915_READ(INSTDONE_I965);
+ instdone[1] = I915_READ(INSTDONE1);
+ break;
+ default:
+ WARN_ONCE(1, "Unsupported platform\n");
+ case 7:
+ instdone[0] = I915_READ(GEN7_INSTDONE_1);
+ instdone[1] = I915_READ(GEN7_SC_INSTDONE);
+ instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
+ instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
+ break;
}
}
+//#ifdef CONFIG_DEBUG_FS
static struct drm_i915_error_object *
i915_error_object_create(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src)
@@ -793,16 +913,17 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
src->has_global_gtt_mapping) {
- void *s;
+ void __iomem *s;
/* Simply ignore tiling or any overlapping fence.
* It's part of the error state, and this hopefully
* captures what the GPU read.
*/
- s = pmap_mapdev_attr(src->base.dev->agp->base +
+
+ s = pmap_mapdev_attr(dev_priv->mm.gtt_base_addr +
reloc_offset,
PAGE_SIZE, PAT_WRITE_COMBINING);
- memcpy(d, s, PAGE_SIZE);
+ memcpy_fromio(d, s, PAGE_SIZE);
pmap_unmapdev((vm_offset_t)s, PAGE_SIZE);
} else {
struct sf_buf *sf;
@@ -871,13 +992,13 @@ i915_error_state_free(struct drm_i915_error_state *error)
free(error->overlay, DRM_I915_GEM);
free(error, DRM_I915_GEM);
}
-
static void capture_bo(struct drm_i915_error_buffer *err,
struct drm_i915_gem_object *obj)
{
err->size = obj->base.size;
err->name = obj->base.name;
- err->seqno = obj->last_rendering_seqno;
+ err->rseqno = obj->last_read_seqno;
+ err->wseqno = obj->last_write_seqno;
err->gtt_offset = obj->gtt_offset;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
@@ -967,12 +1088,24 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (!ring->get_seqno)
return NULL;
- seqno = ring->get_seqno(ring);
+ if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
+ u32 acthd = I915_READ(ACTHD);
+
+ if (WARN_ON(ring->id != RCS))
+ return NULL;
+
+ obj = ring->private;
+ if (acthd >= obj->gtt_offset &&
+ acthd < obj->gtt_offset + obj->base.size)
+ return i915_error_object_create(dev_priv, obj);
+ }
+
+ seqno = ring->get_seqno(ring, false);
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (obj->ring != ring)
continue;
- if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
+ if (i915_seqno_passed(seqno, obj->last_read_seqno))
continue;
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
@@ -994,11 +1127,14 @@ static void i915_record_ring_state(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 6) {
+ error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
error->semaphore_mboxes[ring->id][0]
= I915_READ(RING_SYNC_0(ring->mmio_base));
error->semaphore_mboxes[ring->id][1]
= I915_READ(RING_SYNC_1(ring->mmio_base));
+ error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
+ error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
}
if (INTEL_INFO(dev)->gen >= 4) {
@@ -1007,10 +1143,8 @@ static void i915_record_ring_state(struct drm_device *dev,
error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
- if (ring->id == RCS) {
- error->instdone1 = I915_READ(INSTDONE1);
+ if (ring->id == RCS)
error->bbaddr = I915_READ64(BB_ADDR);
- }
} else {
error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
error->ipeir[ring->id] = I915_READ(IPEIR);
@@ -1018,14 +1152,15 @@ static void i915_record_ring_state(struct drm_device *dev,
error->instdone[ring->id] = I915_READ(INSTDONE);
}
- sleepq_lock(ring);
- error->waiting[ring->id] = sleepq_sleepcnt(ring, 0) != 0;
- sleepq_release(ring);
+ sleepq_lock(&ring->irq_queue);
+ error->waiting[ring->id] = sleepq_sleepcnt(&ring->irq_queue, 0) != 0;
+ sleepq_release(&ring->irq_queue);
error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
- error->seqno[ring->id] = ring->get_seqno(ring);
+ error->seqno[ring->id] = ring->get_seqno(ring, false);
error->acthd[ring->id] = intel_ring_get_active_head(ring);
error->head[ring->id] = I915_READ_HEAD(ring);
error->tail[ring->id] = I915_READ_TAIL(ring);
+ error->ctl[ring->id] = I915_READ_CTL(ring);
error->cpu_ring_head[ring->id] = ring->head;
error->cpu_ring_tail[ring->id] = ring->tail;
@@ -1073,6 +1208,15 @@ static void i915_gem_record_rings(struct drm_device *dev,
}
}
+/**
+ * i915_capture_error_state - capture an error record for later analysis
+ * @dev: drm device
+ *
+ * Should be called when an error is detected (either a hang or an error
+ * interrupt) to capture error state from the time of the error. Fills
+ * out a structure which becomes available in debugfs for user level tools
+ * to pick up.
+ */
static void i915_capture_error_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1099,6 +1243,7 @@ static void i915_capture_error_state(struct drm_device *dev)
refcount_init(&error->ref, 1);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
+ error->ccid = I915_READ(CCID);
if (HAS_PCH_SPLIT(dev))
error->ier = I915_READ(DEIER) | I915_READ(GTIER);
@@ -1109,6 +1254,16 @@ static void i915_capture_error_state(struct drm_device *dev)
else
error->ier = I915_READ(IER);
+ if (INTEL_INFO(dev)->gen >= 6)
+ error->derrmr = I915_READ(DERRMR);
+
+ if (IS_VALLEYVIEW(dev))
+ error->forcewake = I915_READ(FORCEWAKE_VLV);
+ else if (INTEL_INFO(dev)->gen >= 7)
+ error->forcewake = I915_READ(FORCEWAKE_MT);
+ else if (INTEL_INFO(dev)->gen == 6)
+ error->forcewake = I915_READ(FORCEWAKE);
+
for_each_pipe(pipe)
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
@@ -1117,6 +1272,11 @@ static void i915_capture_error_state(struct drm_device *dev)
error->done_reg = I915_READ(DONE_REG);
}
+ if (INTEL_INFO(dev)->gen == 7)
+ error->err_int = I915_READ(GEN7_ERR_INT);
+
+ i915_get_extra_instdone(dev, error->extra_instdone);
+
i915_gem_record_fences(dev, error);
i915_gem_record_rings(dev, error);
@@ -1128,7 +1288,7 @@ static void i915_capture_error_state(struct drm_device *dev)
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
i++;
error->active_bo_count = i;
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, mm_list)
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
if (obj->pin_count)
i++;
error->pinned_bo_count = i - error->active_bo_count;
@@ -1153,7 +1313,7 @@ static void i915_capture_error_state(struct drm_device *dev)
error->pinned_bo_count =
capture_pinned_bo(error->pinned_bo,
error->pinned_bo_count,
- &dev_priv->mm.gtt_list);
+ &dev_priv->mm.bound_list);
microtime(&error->time);
@@ -1184,19 +1344,23 @@ void i915_destroy_error_state(struct drm_device *dev)
if (error && refcount_release(&error->ref))
i915_error_state_free(error);
}
-
-#define pr_err(...) printf(__VA_ARGS__)
+//#else
+//#define i915_capture_error_state(x)
+//#endif
static void i915_report_and_clear_eir(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t instdone[I915_NUM_INSTDONE_REG];
u32 eir = I915_READ(EIR);
- int pipe;
+ int pipe, i;
if (!eir)
return;
- printf("i915: render error detected, EIR: 0x%08x\n", eir);
+ pr_err("render error detected, EIR: 0x%08x\n", eir);
+
+ i915_get_extra_instdone(dev, instdone);
if (IS_G4X(dev)) {
if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
@@ -1204,10 +1368,9 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
- pr_err(" INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE_I965));
+ for (i = 0; i < ARRAY_SIZE(instdone); i++)
+ pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
- pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
POSTING_READ(IPEIR_I965);
@@ -1241,12 +1404,13 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
if (eir & I915_ERROR_INSTRUCTION) {
pr_err("instruction error\n");
pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
+ for (i = 0; i < ARRAY_SIZE(instdone); i++)
+ pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
if (INTEL_INFO(dev)->gen < 4) {
u32 ipeir = I915_READ(IPEIR);
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
- pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
I915_WRITE(IPEIR, ipeir);
POSTING_READ(IPEIR);
@@ -1255,10 +1419,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
- pr_err(" INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE_I965));
pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
- pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
POSTING_READ(IPEIR_I965);
@@ -1299,23 +1460,17 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
i915_report_and_clear_eir(dev);
if (wedged) {
- mtx_lock(&dev_priv->error_completion_lock);
- dev_priv->error_completion = 0;
- dev_priv->mm.wedged = 1;
- /* unlock acts as rel barrier for store to wedged */
- mtx_unlock(&dev_priv->error_completion_lock);
+ INIT_COMPLETION(dev_priv->error_completion);
+ atomic_set(&dev_priv->mm.wedged, 1);
/*
* Wakeup waiting processes so they don't hang
*/
- for_each_ring(ring, dev_priv, i) {
- mtx_lock(&dev_priv->irq_lock);
- wakeup(ring);
- mtx_unlock(&dev_priv->irq_lock);
- }
+ for_each_ring(ring, dev_priv, i)
+ wake_up_all(&ring->irq_queue);
}
- taskqueue_enqueue(dev_priv->tq, &dev_priv->error_task);
+ taskqueue_enqueue(dev_priv->wq, &dev_priv->error_work);
}
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
@@ -1335,7 +1490,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
work = intel_crtc->unpin_work;
if (work == NULL ||
- work->pending ||
+ atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
!work->enable_stall_check) {
/* Either the pending flip IRQ arrived, or we're too early. Don't check */
mtx_unlock(&dev->event_lock);
@@ -1425,23 +1580,20 @@ static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 dpfl, imr;
+ u32 imr;
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
mtx_lock(&dev_priv->irq_lock);
- dpfl = I915_READ(VLV_DPFLIPSTAT);
imr = I915_READ(VLV_IMR);
- if (pipe == 0) {
- dpfl |= PIPEA_VBLANK_INT_EN;
+ if (pipe == 0)
imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
- } else {
- dpfl |= PIPEA_VBLANK_INT_EN;
+ else
imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
- }
- I915_WRITE(VLV_DPFLIPSTAT, dpfl);
I915_WRITE(VLV_IMR, imr);
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
mtx_unlock(&dev_priv->irq_lock);
return 0;
@@ -1490,49 +1642,43 @@ static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 dpfl, imr;
+ u32 imr;
mtx_lock(&dev_priv->irq_lock);
- dpfl = I915_READ(VLV_DPFLIPSTAT);
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
imr = I915_READ(VLV_IMR);
- if (pipe == 0) {
- dpfl &= ~PIPEA_VBLANK_INT_EN;
+ if (pipe == 0)
imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
- } else {
- dpfl &= ~PIPEB_VBLANK_INT_EN;
+ else
imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
- }
I915_WRITE(VLV_IMR, imr);
- I915_WRITE(VLV_DPFLIPSTAT, dpfl);
mtx_unlock(&dev_priv->irq_lock);
+ CTR2(KTR_DRM, "%s %d", __func__, pipe);
}
static u32
ring_last_seqno(struct intel_ring_buffer *ring)
{
-
- if (list_empty(&ring->request_list))
- return (0);
- else
- return (list_entry(ring->request_list.prev,
- struct drm_i915_gem_request, list)->seqno);
+ return list_entry(ring->request_list.prev,
+ struct drm_i915_gem_request, list)->seqno;
}
static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
{
if (list_empty(&ring->request_list) ||
- i915_seqno_passed(ring->get_seqno(ring),
+ i915_seqno_passed(ring->get_seqno(ring, false),
ring_last_seqno(ring))) {
/* Issue a wake-up to catch stuck h/w. */
- sleepq_lock(ring);
- if (sleepq_sleepcnt(ring, 0) != 0) {
- sleepq_release(ring);
+ sleepq_lock(&ring->irq_queue);
+ if (sleepq_sleepcnt(&ring->irq_queue, 0) != 0) {
+ sleepq_release(&ring->irq_queue);
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
ring->name);
- wakeup(ring);
+ wake_up_all(&ring->irq_queue);
*err = true;
} else
- sleepq_release(ring);
+ sleepq_release(&ring->irq_queue);
return true;
}
return false;
@@ -1591,7 +1737,7 @@ void i915_hangcheck_elapsed(void *data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
+ uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
struct intel_ring_buffer *ring;
bool err = false, idle;
int i;
@@ -1619,24 +1765,16 @@ void i915_hangcheck_elapsed(void *data)
return;
}
- if (INTEL_INFO(dev)->gen < 4) {
- instdone = I915_READ(INSTDONE);
- instdone1 = 0;
- } else {
- instdone = I915_READ(INSTDONE_I965);
- instdone1 = I915_READ(INSTDONE1);
- }
+ i915_get_extra_instdone(dev, instdone);
if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
- dev_priv->last_instdone == instdone &&
- dev_priv->last_instdone1 == instdone1) {
+ memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
if (i915_hangcheck_hung(dev))
return;
} else {
dev_priv->hangcheck_count = 0;
memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
- dev_priv->last_instdone = instdone;
- dev_priv->last_instdone1 = instdone1;
+ memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
}
repeat:
@@ -1814,13 +1952,13 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
DE_PIPEA_VBLANK_IVB);
POSTING_READ(DEIER);
- dev_priv->gt_irq_mask = ~0;
+ dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
- GEN6_BLITTER_USER_INTERRUPT;
+ GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
@@ -1843,26 +1981,35 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
static int valleyview_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 render_irqs;
u32 enable_mask;
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+ u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
+ u32 render_irqs;
u16 msid;
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
- enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+ enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
- dev_priv->irq_mask = ~enable_mask;
+ /*
+ *Leave vblank interrupts masked initially. enable/disable will
+ * toggle them based on usage.
+ */
+ dev_priv->irq_mask = (~enable_mask) |
+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
/* Hack for broken MSIs on VLV */
- pci_write_config(dev->dev, 0x94, 0xfee00000, 4);
- msid = pci_read_config(dev->dev, 0x98, 2);
+ pci_write_config_dword(dev->dev, 0x94, 0xfee00000);
+ pci_read_config_word(dev->dev, 0x98, &msid);
msid &= 0xff; /* mask out delivery bits */
msid |= (1<<14);
- pci_write_config(dev->dev, 0x98, msid, 2);
+ pci_write_config_word(dev->dev, 0x98, msid);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, enable_mask);
@@ -1871,25 +2018,17 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
I915_WRITE(PIPESTAT(1), 0xffff);
POSTING_READ(VLV_IER);
+ i915_enable_pipestat(dev_priv, 0, pipestat_enable);
+ i915_enable_pipestat(dev_priv, 1, pipestat_enable);
+
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IIR, 0xffffffff);
- render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
- GT_GEN6_BLT_CS_ERROR_INTERRUPT |
- GT_GEN6_BLT_USER_INTERRUPT |
- GT_GEN6_BSD_USER_INTERRUPT |
- GT_GEN6_BSD_CS_ERROR_INTERRUPT |
- GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
- GT_PIPE_NOTIFY |
- GT_RENDER_CS_ERROR_INTERRUPT |
- GT_SYNC_STATUS |
- GT_USER_INTERRUPT;
-
- dev_priv->gt_irq_mask = ~render_irqs;
-
- I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, 0);
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+
+ render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
+ GEN6_BLITTER_USER_INTERRUPT;
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
@@ -1900,7 +2039,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
#endif
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
-#if 0 /* FIXME: check register definitions; some have moved */
/* Note HDMI and DP share bits */
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
@@ -1908,15 +2046,14 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
}
-#endif
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
@@ -2061,7 +2198,7 @@ static void i8xx_irq_handler(DRM_IRQ_ARGS)
i915_update_dri1_breadcrumb(dev);
if (iir & I915_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->rings[RCS]);
+ notify_ring(dev, &dev_priv->ring[RCS]);
if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
drm_handle_vblank(dev, 0)) {
@@ -2166,9 +2303,9 @@ static int i915_irq_postinstall(struct drm_device *dev)
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN;
@@ -2241,8 +2378,8 @@ static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_status & dev_priv->hotplug_supported_mask)
- taskqueue_enqueue(dev_priv->tq,
- &dev_priv->hotplug_task);
+ taskqueue_enqueue(dev_priv->wq,
+ &dev_priv->hotplug_work);
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
POSTING_READ(PORT_HOTPLUG_STAT);
@@ -2252,7 +2389,7 @@ static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->rings[RCS]);
+ notify_ring(dev, &dev_priv->ring[RCS]);
for_each_pipe(pipe) {
int plane = pipe;
@@ -2274,7 +2411,6 @@ static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
if (blc_event || (iir & I915_ASLE_INTERRUPT))
intel_opregion_asle_intr(dev);
-
/* With MSI, interrupts are only generated when iir
* transitions from zero to nonzero. If another bit got
* set while we were handling the existing iir bits, then
@@ -2301,9 +2437,6 @@ static void i915_irq_uninstall(struct drm_device * dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
- if (!dev_priv)
- return;
-
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -2328,10 +2461,8 @@ static void i965_irq_preinstall(struct drm_device * dev)
atomic_set(&dev_priv->irq_received, 0);
- if (I915_HAS_HOTPLUG(dev)) {
- I915_WRITE(PORT_HOTPLUG_EN, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
- }
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
I915_WRITE(HWSTAM, 0xeffe);
for_each_pipe(pipe)
@@ -2344,11 +2475,13 @@ static void i965_irq_preinstall(struct drm_device * dev)
static int i965_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 hotplug_en;
u32 enable_mask;
u32 error_mask;
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -2364,13 +2497,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
- if (I915_HAS_HOTPLUG(dev)) {
- /* Enable in IER... */
- enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
- /* and unmask in IMR */
- dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
- }
-
/*
* Enable some error detection, note the instruction error mask
* bit is reserved, so we leave it masked.
@@ -2390,36 +2516,40 @@ static int i965_irq_postinstall(struct drm_device *dev)
I915_WRITE(IER, enable_mask);
POSTING_READ(IER);
- if (I915_HAS_HOTPLUG(dev)) {
- u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
-
- /* Note HDMI and DP share bits */
- if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMID_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ /* Note HDMI and DP share hotplug bits */
+ hotplug_en = 0;
+ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (IS_G4X(dev)) {
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
- hotplug_en |= CRT_HOTPLUG_INT_EN;
+ } else {
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ }
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
- /* Programming the CRT detection parameters tends
- to generate a spurious hotplug event about three
- seconds later. So just do it once.
- */
- if (IS_G4X(dev))
- hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
- hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
- }
+ /* Programming the CRT detection parameters tends
+ to generate a spurious hotplug event about three
+ seconds later. So just do it once.
+ */
+ if (IS_G4X(dev))
+ hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+ }
- /* Ignore TV since it's buggy */
+ /* Ignore TV since it's buggy */
- I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
- }
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
intel_opregion_enable_asle(dev);
@@ -2474,15 +2604,14 @@ static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
break;
/* Consume port. Then clear IIR or we'll miss events */
- if ((I915_HAS_HOTPLUG(dev)) &&
- (iir & I915_DISPLAY_PORT_INTERRUPT)) {
+ if (iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_status & dev_priv->hotplug_supported_mask)
- taskqueue_enqueue(dev_priv->tq,
- &dev_priv->hotplug_task);
+ taskqueue_enqueue(dev_priv->wq,
+ &dev_priv->hotplug_work);
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
@@ -2492,9 +2621,9 @@ static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->rings[RCS]);
+ notify_ring(dev, &dev_priv->ring[RCS]);
if (iir & I915_BSD_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->rings[VCS]);
+ notify_ring(dev, &dev_priv->ring[VCS]);
if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
intel_prepare_page_flip(dev, 0);
@@ -2543,10 +2672,11 @@ static void i965_irq_uninstall(struct drm_device * dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
- if (I915_HAS_HOTPLUG(dev)) {
- I915_WRITE(PORT_HOTPLUG_EN, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
- }
+ if (!dev_priv)
+ return;
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
I915_WRITE(HWSTAM, 0xffffffff);
for_each_pipe(pipe)
@@ -2564,12 +2694,10 @@ void intel_irq_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func,
- dev->dev_private);
- TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func,
- dev->dev_private);
- TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work,
- dev->dev_private);
+ TASK_INIT(&dev_priv->hotplug_work, 0, i915_hotplug_work_func, dev->dev_private);
+ TASK_INIT(&dev_priv->error_work, 0, i915_error_work_func, dev->dev_private);
+ TASK_INIT(&dev_priv->rps.work, 0, gen6_pm_rps_work, dev->dev_private);
+ TASK_INIT(&dev_priv->l3_parity.error_work, 0, ivybridge_parity_work, dev->dev_private);
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
@@ -2621,9 +2749,6 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_handler = i8xx_irq_handler;
dev->driver->irq_uninstall = i8xx_irq_uninstall;
} else if (INTEL_INFO(dev)->gen == 3) {
- /* IIR "flip pending" means done if this bit is set */
- I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
-
dev->driver->irq_preinstall = i915_irq_preinstall;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
diff --git a/sys/dev/drm2/i915/i915_reg.h b/sys/dev/drm2/i915/i915_reg.h
index 20c2aa7..80a7c7a 100644
--- a/sys/dev/drm2/i915/i915_reg.h
+++ b/sys/dev/drm2/i915/i915_reg.h
@@ -117,21 +117,6 @@ __FBSDID("$FreeBSD$");
#define GEN6_GRDOM_MEDIA (1 << 2)
#define GEN6_GRDOM_BLT (1 << 3)
-#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
-
-#define GEN6_PDE_VALID (1 << 0)
-#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */
-/* gen6+ has bit 11-4 for physical addr bit 39-32 */
-#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-
-#define GEN6_PTE_VALID (1 << 0)
-#define GEN6_PTE_UNCACHED (1 << 1)
-#define GEN6_PTE_CACHE_LLC (2 << 1)
-#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
-#define GEN6_PTE_CACHE_BITS (3 << 1)
-#define GEN6_PTE_GFDT (1 << 3)
-#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-
#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
@@ -740,10 +725,6 @@ __FBSDID("$FreeBSD$");
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
#define GEN6_BSD_SLEEP_INDICATOR (1 << 3)
#define GEN6_BSD_GO_INDICATOR (1 << 4)
-#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
-#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
-#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0
-#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
#define GEN6_BSD_HWSTAM 0x12098
#define GEN6_BSD_IMR 0x120a8
@@ -1682,21 +1663,19 @@ __FBSDID("$FreeBSD$");
#define PORT_HOTPLUG_STAT 0x61114
/* HDMI/DP bits are gen4+ */
-#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
-#define DPB_HOTPLUG_INT_STATUS (1 << 29)
-#define HDMIC_HOTPLUG_INT_STATUS (1 << 28)
-#define DPC_HOTPLUG_INT_STATUS (1 << 28)
-#define HDMID_HOTPLUG_INT_STATUS (1 << 27)
-#define DPD_HOTPLUG_INT_STATUS (1 << 27)
+#define DPB_HOTPLUG_LIVE_STATUS (1 << 29)
+#define DPC_HOTPLUG_LIVE_STATUS (1 << 28)
+#define DPD_HOTPLUG_LIVE_STATUS (1 << 27)
+#define DPD_HOTPLUG_INT_STATUS (3 << 21)
+#define DPC_HOTPLUG_INT_STATUS (3 << 19)
+#define DPB_HOTPLUG_INT_STATUS (3 << 17)
/* HDMI bits are shared with the DP bits */
-/*
#define HDMIB_HOTPLUG_LIVE_STATUS (1 << 29)
#define HDMIC_HOTPLUG_LIVE_STATUS (1 << 28)
#define HDMID_HOTPLUG_LIVE_STATUS (1 << 27)
#define HDMID_HOTPLUG_INT_STATUS (3 << 21)
#define HDMIC_HOTPLUG_INT_STATUS (3 << 19)
#define HDMIB_HOTPLUG_INT_STATUS (3 << 17)
-*/
/* CRT/TV common between gen3+ */
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
@@ -1704,8 +1683,6 @@ __FBSDID("$FreeBSD$");
#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
-#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
-#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
/* SDVO is different across gen3/4 */
#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
@@ -3307,12 +3284,6 @@ __FBSDID("$FreeBSD$");
#define DISPLAY_PORT_PLL_BIOS_1 0x46010
#define DISPLAY_PORT_PLL_BIOS_2 0x46014
-#define PCH_DSPCLK_GATE_D 0x42020
-# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
-# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
-# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
-# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
-
#define PCH_3DCGDIS0 0x46020
# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
@@ -3486,15 +3457,6 @@ __FBSDID("$FreeBSD$");
#define ILK_HDCP_DISABLE (1<<25)
#define ILK_eDP_A_DISABLE (1<<24)
#define ILK_DESKTOP (1<<23)
-#define ILK_DSPCLK_GATE 0x42020
-#define IVB_VRHUNIT_CLK_GATE (1<<28)
-#define ILK_DPARB_CLK_GATE (1<<5)
-#define ILK_DPFD_CLK_GATE (1<<7)
-
-/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
-#define ILK_CLK_FBC (1<<7)
-#define ILK_DPFC_DIS1 (1<<8)
-#define ILK_DPFC_DIS2 (1<<9)
#define ILK_DSPCLK_GATE_D 0x42020
#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
@@ -3763,7 +3725,7 @@ __FBSDID("$FreeBSD$");
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
-#define VLV_VIDEO_DIP_CTL_A 0x60220
+#define VLV_VIDEO_DIP_CTL_A 0x60200
#define VLV_VIDEO_DIP_DATA_A 0x60208
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
@@ -3879,7 +3841,6 @@ __FBSDID("$FreeBSD$");
#define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064
#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
-#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31)
#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29)
@@ -4030,33 +3991,7 @@ __FBSDID("$FreeBSD$");
#define FDI_PLL_CTL_1 0xfe000
#define FDI_PLL_CTL_2 0xfe004
-/* CRT */
-#define PCH_ADPA 0xe1100
-#define ADPA_TRANS_SELECT_MASK (1<<30)
-#define ADPA_TRANS_A_SELECT 0
-#define ADPA_TRANS_B_SELECT (1<<30)
-#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
-#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
-#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
-#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
-#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
-#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
-#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
-#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
-#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
-#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
-#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
-#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
-
/* or SDVOB */
-#define VLV_HDMIB 0x61140
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
#define TRANSCODER(pipe) ((pipe) << 30)
@@ -4100,21 +4035,6 @@ __FBSDID("$FreeBSD$");
#define PIPEB_PP_OFF_DELAYS 0x6130c
#define PIPEB_PP_DIVISOR 0x61310
-#define BLC_PWM_CPU_CTL2 0x48250
-#define PWM_ENABLE (1 << 31)
-#define PWM_PIPE_A (0 << 29)
-#define PWM_PIPE_B (1 << 29)
-#define BLC_PWM_CPU_CTL 0x48254
-
-#define BLC_PWM_PCH_CTL1 0xc8250
-#define PWM_PCH_ENABLE (1 << 31)
-#define PWM_POLARITY_ACTIVE_LOW (1 << 29)
-#define PWM_POLARITY_ACTIVE_HIGH (0 << 29)
-#define PWM_POLARITY_ACTIVE_LOW2 (1 << 28)
-#define PWM_POLARITY_ACTIVE_HIGH2 (0 << 28)
-
-#define BLC_PWM_PCH_CTL2 0xc8254
-
#define PCH_PP_STATUS 0xc7200
#define PCH_PP_CONTROL 0xc7204
#define PANEL_UNLOCK_REGS (0xabcd << 16)
@@ -4701,15 +4621,6 @@ __FBSDID("$FreeBSD$");
#define TRANS_CLK_SEL_DISABLED (0x0<<29)
#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
-/* Pipe clock selection */
-#define PIPE_CLK_SEL_A 0x46140
-#define PIPE_CLK_SEL_B 0x46144
-#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \
- PIPE_CLK_SEL_A, \
- PIPE_CLK_SEL_B)
-/* For each pipe, we need to select the corresponding port clock */
-#define PIPE_CLK_SEL_DISABLED (0x0<<29)
-#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
#define _TRANSA_MSA_MISC 0x60410
#define _TRANSB_MSA_MISC 0x61410
#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
diff --git a/sys/dev/drm2/i915/i915_suspend.c b/sys/dev/drm2/i915/i915_suspend.c
index 384a1d1..4ee9749 100644
--- a/sys/dev/drm2/i915/i915_suspend.c
+++ b/sys/dev/drm2/i915/i915_suspend.c
@@ -28,9 +28,9 @@
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/intel_drv.h>
+#include <dev/drm2/i915/i915_reg.h>
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
{
@@ -63,9 +63,9 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
- array = dev_priv->save_palette_a;
+ array = dev_priv->regfile.save_palette_a;
else
- array = dev_priv->save_palette_b;
+ array = dev_priv->regfile.save_palette_b;
for (i = 0; i < 256; i++)
array[i] = I915_READ(reg + (i << 2));
@@ -85,9 +85,9 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
- array = dev_priv->save_palette_a;
+ array = dev_priv->regfile.save_palette_a;
else
- array = dev_priv->save_palette_b;
+ array = dev_priv->regfile.save_palette_b;
for (i = 0; i < 256; i++)
I915_WRITE(reg + (i << 2), array[i]);
@@ -134,11 +134,11 @@ static void i915_save_vga(struct drm_device *dev)
u16 cr_index, cr_data, st01;
/* VGA color palette registers */
- dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
+ dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
/* MSR bits */
- dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
- if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+ dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ);
+ if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
@@ -153,35 +153,35 @@ static void i915_save_vga(struct drm_device *dev)
i915_read_indexed(dev, cr_index, cr_data, 0x11) &
(~0x80));
for (i = 0; i <= 0x24; i++)
- dev_priv->saveCR[i] =
+ dev_priv->regfile.saveCR[i] =
i915_read_indexed(dev, cr_index, cr_data, i);
/* Make sure we don't turn off CR group 0 writes */
- dev_priv->saveCR[0x11] &= ~0x80;
+ dev_priv->regfile.saveCR[0x11] &= ~0x80;
/* Attribute controller registers */
I915_READ8(st01);
- dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
+ dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
for (i = 0; i <= 0x14; i++)
- dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
+ dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0);
I915_READ8(st01);
- I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
+ I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX);
I915_READ8(st01);
/* Graphics controller registers */
for (i = 0; i < 9; i++)
- dev_priv->saveGR[i] =
+ dev_priv->regfile.saveGR[i] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
- dev_priv->saveGR[0x10] =
+ dev_priv->regfile.saveGR[0x10] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
- dev_priv->saveGR[0x11] =
+ dev_priv->regfile.saveGR[0x11] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
- dev_priv->saveGR[0x18] =
+ dev_priv->regfile.saveGR[0x18] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
/* Sequencer registers */
for (i = 0; i < 8; i++)
- dev_priv->saveSR[i] =
+ dev_priv->regfile.saveSR[i] =
i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
}
@@ -192,8 +192,8 @@ static void i915_restore_vga(struct drm_device *dev)
u16 cr_index, cr_data, st01;
/* MSR bits */
- I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
- if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+ I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
+ if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
@@ -206,36 +206,36 @@ static void i915_restore_vga(struct drm_device *dev)
/* Sequencer registers, don't write SR07 */
for (i = 0; i < 7; i++)
i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
- dev_priv->saveSR[i]);
+ dev_priv->regfile.saveSR[i]);
/* CRT controller regs */
/* Enable CR group 0 writes */
- i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
+ i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]);
for (i = 0; i <= 0x24; i++)
- i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
+ i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]);
/* Graphics controller regs */
for (i = 0; i < 9; i++)
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
- dev_priv->saveGR[i]);
+ dev_priv->regfile.saveGR[i]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
- dev_priv->saveGR[0x10]);
+ dev_priv->regfile.saveGR[0x10]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
- dev_priv->saveGR[0x11]);
+ dev_priv->regfile.saveGR[0x11]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
- dev_priv->saveGR[0x18]);
+ dev_priv->regfile.saveGR[0x18]);
/* Attribute controller registers */
I915_READ8(st01); /* switch back to index mode */
for (i = 0; i <= 0x14; i++)
- i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
+ i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0);
I915_READ8(st01); /* switch back to index mode */
- I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
+ I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20);
I915_READ8(st01);
/* VGA color palette registers */
- I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
+ I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
}
static void i915_save_modeset_reg(struct drm_device *dev)
@@ -247,156 +247,162 @@ static void i915_save_modeset_reg(struct drm_device *dev)
return;
/* Cursor state */
- dev_priv->saveCURACNTR = I915_READ(_CURACNTR);
- dev_priv->saveCURAPOS = I915_READ(_CURAPOS);
- dev_priv->saveCURABASE = I915_READ(_CURABASE);
- dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR);
- dev_priv->saveCURBPOS = I915_READ(_CURBPOS);
- dev_priv->saveCURBBASE = I915_READ(_CURBBASE);
+ dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
+ dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
+ dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
+ dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
+ dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
+ dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
if (IS_GEN2(dev))
- dev_priv->saveCURSIZE = I915_READ(CURSIZE);
+ dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
- dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
+ dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
+ dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
}
/* Pipe & plane A info */
- dev_priv->savePIPEACONF = I915_READ(_PIPEACONF);
- dev_priv->savePIPEASRC = I915_READ(_PIPEASRC);
+ dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
+ dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveFPA0 = I915_READ(_PCH_FPA0);
- dev_priv->saveFPA1 = I915_READ(_PCH_FPA1);
- dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A);
+ dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
+ dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
+ dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
} else {
- dev_priv->saveFPA0 = I915_READ(_FPA0);
- dev_priv->saveFPA1 = I915_READ(_FPA1);
- dev_priv->saveDPLL_A = I915_READ(_DPLL_A);
+ dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
+ dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
+ dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
- dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A);
- dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A);
- dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A);
- dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A);
- dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A);
- dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A);
+ dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
+ dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
+ dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
+ dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
+ dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
+ dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
+ dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
if (!HAS_PCH_SPLIT(dev))
- dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
+ dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
- dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
- dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
- dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
-
- dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
- dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
-
- dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
- dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
- dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
-
- dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF);
- dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
- dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
- dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
- dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
- dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
- dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
- }
-
- dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR);
- dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
- dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE);
- dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS);
- dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR);
+ dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
+ dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
+ dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
+ dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
+
+ dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
+ dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
+
+ dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
+ dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
+ dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
+
+ dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
+ dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
+ dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
+ dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
+ dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
+ dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
+ dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
+ }
+
+ dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
+ dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
+ dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
+ dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
+ dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->saveDSPASURF = I915_READ(_DSPASURF);
- dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
+ dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
+ dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
- dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT);
+ dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
/* Pipe & plane B info */
- dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF);
- dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC);
+ dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
+ dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveFPB0 = I915_READ(_PCH_FPB0);
- dev_priv->saveFPB1 = I915_READ(_PCH_FPB1);
- dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B);
+ dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
+ dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
+ dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
} else {
- dev_priv->saveFPB0 = I915_READ(_FPB0);
- dev_priv->saveFPB1 = I915_READ(_FPB1);
- dev_priv->saveDPLL_B = I915_READ(_DPLL_B);
+ dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
+ dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
+ dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
- dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B);
- dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B);
- dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B);
- dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B);
- dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B);
- dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B);
+ dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
+ dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
+ dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
+ dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
+ dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
+ dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
+ dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
if (!HAS_PCH_SPLIT(dev))
- dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
+ dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
- dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
- dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
- dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
-
- dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
- dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
-
- dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
- dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
- dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
-
- dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF);
- dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
- dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
- dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
- dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
- dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
- dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
- }
-
- dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR);
- dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
- dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE);
- dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS);
- dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR);
+ dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
+ dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
+ dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
+ dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
+
+ dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
+ dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
+
+ dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
+ dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
+ dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
+
+ dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
+ dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
+ dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
+ dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
+ dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
+ dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
+ dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
+ }
+
+ dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
+ dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
+ dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
+ dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
+ dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF);
- dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
+ dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
+ dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
- dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT);
+ dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
break;
case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
case 2:
for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
}
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
+ else
+ dev_priv->regfile.saveADPA = I915_READ(ADPA);
+
return;
}
@@ -415,20 +421,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
case 7:
case 6:
for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
break;
case 3:
case 2:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
break;
}
@@ -450,158 +456,164 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
}
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
- I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
+ I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
+ I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
}
/* Pipe & plane A info */
/* Prime the clock */
- if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
+ if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
~DPLL_VCO_ENABLE);
POSTING_READ(dpll_a_reg);
udelay(150);
}
- I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
- I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
+ I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
+ I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
/* Actually enable it */
- I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
+ I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
POSTING_READ(dpll_a_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+ I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
POSTING_READ(_DPLL_A_MD);
}
udelay(150);
/* Restore mode */
- I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A);
- I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A);
- I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A);
- I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A);
- I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A);
- I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A);
+ I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
+ I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
+ I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
+ I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
+ I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
+ I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+ I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
- I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
- I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
- I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
+ I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
+ I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
+ I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
+ I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
- I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
- I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
+ I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
+ I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
- I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1);
- I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
- I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
+ I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
+ I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
+ I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
- I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF);
- I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
- I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
- I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
- I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
- I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
- I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
+ I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
+ I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
+ I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
+ I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
+ I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
+ I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
+ I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
}
/* Restore plane info */
- I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE);
- I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS);
- I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC);
- I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR);
- I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+ I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
+ I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
+ I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
+ I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
+ I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF);
- I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+ I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
+ I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
}
- I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF);
+ I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
- I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR);
+ I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
/* Pipe & plane B info */
- if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
+ if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
~DPLL_VCO_ENABLE);
POSTING_READ(dpll_b_reg);
udelay(150);
}
- I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
- I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
+ I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
+ I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
/* Actually enable it */
- I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
+ I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
POSTING_READ(dpll_b_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+ I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
POSTING_READ(_DPLL_B_MD);
}
udelay(150);
/* Restore mode */
- I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B);
- I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B);
- I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B);
- I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B);
- I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B);
- I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B);
+ I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
+ I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
+ I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
+ I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
+ I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
+ I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+ I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
- I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
- I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
- I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
+ I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
+ I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
+ I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
+ I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
- I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
- I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
+ I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
+ I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
- I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1);
- I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
- I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
+ I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
+ I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
+ I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
- I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF);
- I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
- I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
- I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
- I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
- I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
- I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
+ I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
+ I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
+ I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
+ I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
+ I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
+ I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
+ I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
}
/* Restore plane info */
- I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE);
- I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS);
- I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC);
- I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR);
- I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+ I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
+ I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
+ I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
+ I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
+ I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF);
- I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+ I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
+ I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
}
- I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF);
+ I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
- I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR);
+ I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
/* Cursor state */
- I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS);
- I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR);
- I915_WRITE(_CURABASE, dev_priv->saveCURABASE);
- I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS);
- I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR);
- I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE);
+ I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
+ I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
+ I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
+ I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
+ I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
+ I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
if (IS_GEN2(dev))
- I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
+ I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
+
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
+ else
+ I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
return;
}
@@ -611,89 +623,84 @@ static void i915_save_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration control */
- dev_priv->saveDSPARB = I915_READ(DSPARB);
+ dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
/* This is only meaningful in non-KMS mode */
- /* Don't save them in KMS mode */
+ /* Don't regfile.save them in KMS mode */
i915_save_modeset_reg(dev);
- /* CRT state */
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveADPA = I915_READ(PCH_ADPA);
- } else {
- dev_priv->saveADPA = I915_READ(ADPA);
- }
-
/* LVDS state */
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
- dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
- dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
- dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
- dev_priv->saveLVDS = I915_READ(PCH_LVDS);
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
+ dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
+ dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
+ dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
+ dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
+ dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
} else {
- dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
- dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
- dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
+ dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+ dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+ dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
if (INTEL_INFO(dev)->gen >= 4)
- dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
- dev_priv->saveLVDS = I915_READ(LVDS);
+ dev_priv->regfile.saveLVDS = I915_READ(LVDS);
}
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
- dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+ dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
- dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
- dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
+ dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
+ dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
+ dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
} else {
- dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
- dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
- dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
- }
-
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- dev_priv->saveDP_B = I915_READ(DP_B);
- dev_priv->saveDP_C = I915_READ(DP_C);
- dev_priv->saveDP_D = I915_READ(DP_D);
- dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
- dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
- dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
- dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
- dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
- dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
- dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
- dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
- }
- /* FIXME: save TV & SDVO state */
-
- /* Only save FBC state on the platform that supports FBC */
+ dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
+ dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
+ dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
+ }
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ dev_priv->regfile.saveDP_B = I915_READ(DP_B);
+ dev_priv->regfile.saveDP_C = I915_READ(DP_C);
+ dev_priv->regfile.saveDP_D = I915_READ(DP_D);
+ dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
+ dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
+ dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
+ dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
+ dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
+ dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
+ dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
+ dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
+ }
+ /* FIXME: regfile.save TV & SDVO state */
+ }
+
+ /* Only regfile.save FBC state on the platform that supports FBC */
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
+ dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
} else if (IS_GM45(dev)) {
- dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
+ dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
} else {
- dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
- dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
- dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
- dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+ dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+ dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+ dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
}
}
/* VGA state */
- dev_priv->saveVGA0 = I915_READ(VGA0);
- dev_priv->saveVGA1 = I915_READ(VGA1);
- dev_priv->saveVGA_PD = I915_READ(VGA_PD);
+ dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
+ dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
+ dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
if (HAS_PCH_SPLIT(dev))
- dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
+ dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL);
else
- dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
+ dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL);
i915_save_vga(dev);
}
@@ -703,94 +710,95 @@ static void i915_restore_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration */
- I915_WRITE(DSPARB, dev_priv->saveDSPARB);
-
- /* Display port ratios (must be done before clock is set) */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
- I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
- I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
- I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
- I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
- I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
- I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
- I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
+ I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Display port ratios (must be done before clock is set) */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
+ I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
+ I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
+ I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
+ I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
+ I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
+ I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
+ I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
+ }
}
/* This is only meaningful in non-KMS mode */
/* Don't restore them in KMS mode */
i915_restore_modeset_reg(dev);
- /* CRT state */
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
- else
- I915_WRITE(ADPA, dev_priv->saveADPA);
-
/* LVDS state */
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
+ I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS);
} else if (IS_MOBILE(dev) && !IS_I830(dev))
- I915_WRITE(LVDS, dev_priv->saveLVDS);
+ I915_WRITE(LVDS, dev_priv->regfile.saveLVDS);
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
- I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
+ I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
- I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
- I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
- I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
- I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
- I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
+ I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
+ I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+ /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
+ * otherwise we get blank eDP screen after S3 on some machines
+ */
+ I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
+ I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+ I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+ I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+ I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
I915_WRITE(RSTDBYCTL,
- dev_priv->saveMCHBAR_RENDER_STANDBY);
+ dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
} else {
- I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
- I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL);
- I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
- I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
- I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
- I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
- }
-
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(DP_B, dev_priv->saveDP_B);
- I915_WRITE(DP_C, dev_priv->saveDP_C);
- I915_WRITE(DP_D, dev_priv->saveDP_D);
+ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
+ I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
+ I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
+ I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+ I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+ I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+ I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
+ }
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
+ I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
+ I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
+ }
+ /* FIXME: restore TV & SDVO state */
}
- /* FIXME: restore TV & SDVO state */
/* only restore FBC info on the platform that supports FBC*/
intel_disable_fbc(dev);
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+ I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
} else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+ I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
} else {
- I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
- I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
- I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
- I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+ I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE);
+ I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE);
+ I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2);
+ I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
}
}
/* VGA state */
if (HAS_PCH_SPLIT(dev))
- I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
+ I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL);
else
- I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+ I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL);
- I915_WRITE(VGA0, dev_priv->saveVGA0);
- I915_WRITE(VGA1, dev_priv->saveVGA1);
- I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
+ I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
+ I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
+ I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
POSTING_READ(VGA_PD);
udelay(150);
@@ -802,49 +810,45 @@ int i915_save_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- dev_priv->saveLBB = pci_read_config(dev->dev, LBB, 1);
-
- /* Hardware status page */
- dev_priv->saveHWS = I915_READ(HWS_PGA);
+ pci_read_config_byte(dev->dev, LBB, &dev_priv->regfile.saveLBB);
DRM_LOCK(dev);
i915_save_display(dev);
- /* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveDEIER = I915_READ(DEIER);
- dev_priv->saveDEIMR = I915_READ(DEIMR);
- dev_priv->saveGTIER = I915_READ(GTIER);
- dev_priv->saveGTIMR = I915_READ(GTIMR);
- dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
- dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
- dev_priv->saveMCHBAR_RENDER_STANDBY =
- I915_READ(RSTDBYCTL);
- dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
- } else {
- dev_priv->saveIER = I915_READ(IER);
- dev_priv->saveIMR = I915_READ(IMR);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Interrupt state */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveDEIER = I915_READ(DEIER);
+ dev_priv->regfile.saveDEIMR = I915_READ(DEIMR);
+ dev_priv->regfile.saveGTIER = I915_READ(GTIER);
+ dev_priv->regfile.saveGTIMR = I915_READ(GTIMR);
+ dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
+ dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
+ dev_priv->regfile.saveMCHBAR_RENDER_STANDBY =
+ I915_READ(RSTDBYCTL);
+ dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
+ } else {
+ dev_priv->regfile.saveIER = I915_READ(IER);
+ dev_priv->regfile.saveIMR = I915_READ(IMR);
+ }
}
- if (IS_IRONLAKE_M(dev))
- ironlake_disable_drps(dev);
- if (INTEL_INFO(dev)->gen >= 6)
- gen6_disable_rps(dev);
+ intel_disable_gt_powersave(dev);
/* Cache mode state */
- dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+ dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
- dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+ dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
/* Scratch space */
for (i = 0; i < 16; i++) {
- dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
- dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+ dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2));
+ dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2));
}
for (i = 0; i < 3; i++)
- dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+ dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2));
DRM_UNLOCK(dev);
@@ -856,45 +860,44 @@ int i915_restore_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- pci_write_config(dev->dev, LBB, dev_priv->saveLBB, 1);
+ pci_write_config_byte(dev->dev, LBB, dev_priv->regfile.saveLBB);
DRM_LOCK(dev);
- /* Hardware status page */
- I915_WRITE(HWS_PGA, dev_priv->saveHWS);
-
i915_restore_display(dev);
- /* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(DEIER, dev_priv->saveDEIER);
- I915_WRITE(DEIMR, dev_priv->saveDEIMR);
- I915_WRITE(GTIER, dev_priv->saveGTIER);
- I915_WRITE(GTIMR, dev_priv->saveGTIMR);
- I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
- I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
- I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG);
- } else {
- I915_WRITE(IER, dev_priv->saveIER);
- I915_WRITE(IMR, dev_priv->saveIMR);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Interrupt state */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(DEIER, dev_priv->regfile.saveDEIER);
+ I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR);
+ I915_WRITE(GTIER, dev_priv->regfile.saveGTIER);
+ I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR);
+ I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
+ I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
+ I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
+ } else {
+ I915_WRITE(IER, dev_priv->regfile.saveIER);
+ I915_WRITE(IMR, dev_priv->regfile.saveIMR);
+ }
}
/* Cache mode state */
- I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+ I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000);
/* Memory arbitration state */
- I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
+ I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
for (i = 0; i < 16; i++) {
- I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
- I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]);
+ I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]);
+ I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]);
}
for (i = 0; i < 3; i++)
- I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+ I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]);
DRM_UNLOCK(dev);
- intel_iic_reset(dev);
+ intel_i2c_reset(dev);
return 0;
}
diff --git a/sys/dev/drm2/i915/intel_acpi.c b/sys/dev/drm2/i915/intel_acpi.c
new file mode 100644
index 0000000..19affc7
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_acpi.c
@@ -0,0 +1,256 @@
+/*
+ * Intel ACPI functions
+ *
+ * _DSM related code stolen from nouveau_acpi.c.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+#include <dev/acpica/acpivar.h>
+
+#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
+
+#define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */
+#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
+
+static struct intel_dsm_priv {
+ ACPI_HANDLE dhandle;
+} intel_dsm_priv;
+
+static const u8 intel_dsm_guid[] = {
+ 0xd3, 0x73, 0xd8, 0x7e,
+ 0xd0, 0xc2,
+ 0x4f, 0x4e,
+ 0xa8, 0x54,
+ 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
+};
+
+static int intel_dsm(ACPI_HANDLE handle, int func, int arg)
+{
+ ACPI_BUFFER output = { ACPI_ALLOCATE_BUFFER, NULL };
+ ACPI_OBJECT_LIST input;
+ ACPI_OBJECT params[4];
+ ACPI_OBJECT *obj;
+ u32 result;
+ int ret = 0;
+
+ input.Count = 4;
+ input.Pointer = params;
+ params[0].Type = ACPI_TYPE_BUFFER;
+ params[0].Buffer.Length = sizeof(intel_dsm_guid);
+ params[0].Buffer.Pointer = __DECONST(char *, intel_dsm_guid);
+ params[1].Type = ACPI_TYPE_INTEGER;
+ params[1].Integer.Value = INTEL_DSM_REVISION_ID;
+ params[2].Type = ACPI_TYPE_INTEGER;
+ params[2].Integer.Value = func;
+ params[3].Type = ACPI_TYPE_INTEGER;
+ params[3].Integer.Value = arg;
+
+ ret = AcpiEvaluateObject(handle, "_DSM", &input, &output);
+ if (ret) {
+ DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
+ return ret;
+ }
+
+ obj = (ACPI_OBJECT *)output.Pointer;
+
+ result = 0;
+ switch (obj->Type) {
+ case ACPI_TYPE_INTEGER:
+ result = obj->Integer.Value;
+ break;
+
+ case ACPI_TYPE_BUFFER:
+ if (obj->Buffer.Length == 4) {
+ result = (obj->Buffer.Pointer[0] |
+ (obj->Buffer.Pointer[1] << 8) |
+ (obj->Buffer.Pointer[2] << 16) |
+ (obj->Buffer.Pointer[3] << 24));
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (result == 0x80000002)
+ ret = -ENODEV;
+
+ AcpiOsFree(output.Pointer);
+ return ret;
+}
+
+static char *intel_dsm_port_name(u8 id)
+{
+ switch (id) {
+ case 0:
+ return "Reserved";
+ case 1:
+ return "Analog VGA";
+ case 2:
+ return "LVDS";
+ case 3:
+ return "Reserved";
+ case 4:
+ return "HDMI/DVI_B";
+ case 5:
+ return "HDMI/DVI_C";
+ case 6:
+ return "HDMI/DVI_D";
+ case 7:
+ return "DisplayPort_A";
+ case 8:
+ return "DisplayPort_B";
+ case 9:
+ return "DisplayPort_C";
+ case 0xa:
+ return "DisplayPort_D";
+ case 0xb:
+ case 0xc:
+ case 0xd:
+ return "Reserved";
+ case 0xe:
+ return "WiDi";
+ default:
+ return "bad type";
+ }
+}
+
+static char *intel_dsm_mux_type(u8 type)
+{
+ switch (type) {
+ case 0:
+ return "unknown";
+ case 1:
+ return "No MUX, iGPU only";
+ case 2:
+ return "No MUX, dGPU only";
+ case 3:
+ return "MUXed between iGPU and dGPU";
+ default:
+ return "bad type";
+ }
+}
+
+static void intel_dsm_platform_mux_info(void)
+{
+ ACPI_BUFFER output = { ACPI_ALLOCATE_BUFFER, NULL };
+ ACPI_OBJECT_LIST input;
+ ACPI_OBJECT params[4];
+ ACPI_OBJECT *pkg;
+ int i, ret;
+
+ input.Count = 4;
+ input.Pointer = params;
+ params[0].Type = ACPI_TYPE_BUFFER;
+ params[0].Buffer.Length = sizeof(intel_dsm_guid);
+ params[0].Buffer.Pointer = __DECONST(char *, intel_dsm_guid);
+ params[1].Type = ACPI_TYPE_INTEGER;
+ params[1].Integer.Value = INTEL_DSM_REVISION_ID;
+ params[2].Type = ACPI_TYPE_INTEGER;
+ params[2].Integer.Value = INTEL_DSM_FN_PLATFORM_MUX_INFO;
+ params[3].Type = ACPI_TYPE_INTEGER;
+ params[3].Integer.Value = 0;
+
+ ret = AcpiEvaluateObject(intel_dsm_priv.dhandle, "_DSM", &input,
+ &output);
+ if (ret) {
+ DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
+ goto out;
+ }
+
+ pkg = (ACPI_OBJECT *)output.Pointer;
+
+ if (pkg->Type == ACPI_TYPE_PACKAGE) {
+ ACPI_OBJECT *connector_count = &pkg->Package.Elements[0];
+ DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
+ (unsigned long long)connector_count->Integer.Value);
+ for (i = 1; i < pkg->Package.Count; i++) {
+ ACPI_OBJECT *obj = &pkg->Package.Elements[i];
+ ACPI_OBJECT *connector_id =
+ &obj->Package.Elements[0];
+ ACPI_OBJECT *info = &obj->Package.Elements[1];
+ DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
+ (unsigned long long)connector_id->Integer.Value);
+ DRM_DEBUG_DRIVER(" port id: %s\n",
+ intel_dsm_port_name(info->Buffer.Pointer[0]));
+ DRM_DEBUG_DRIVER(" display mux info: %s\n",
+ intel_dsm_mux_type(info->Buffer.Pointer[1]));
+ DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n",
+ intel_dsm_mux_type(info->Buffer.Pointer[2]));
+ DRM_DEBUG_DRIVER(" hpd mux info: %s\n",
+ intel_dsm_mux_type(info->Buffer.Pointer[3]));
+ }
+ }
+
+out:
+ AcpiOsFree(output.Pointer);
+}
+
+static bool intel_dsm_pci_probe(device_t dev)
+{
+ ACPI_HANDLE dhandle, intel_handle;
+ ACPI_STATUS status;
+ int ret;
+
+ dhandle = acpi_get_handle(dev);
+ if (!dhandle)
+ return false;
+
+ status = AcpiGetHandle(dhandle, "_DSM", &intel_handle);
+ if (ACPI_FAILURE(status)) {
+ DRM_DEBUG_KMS("no _DSM method for intel device\n");
+ return false;
+ }
+
+ ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("failed to get supported _DSM functions\n");
+ return false;
+ }
+
+ intel_dsm_priv.dhandle = dhandle;
+
+ intel_dsm_platform_mux_info();
+ return true;
+}
+
+static bool intel_dsm_detect(void)
+{
+ char acpi_method_name[255] = { 0 };
+ ACPI_BUFFER buffer = {sizeof(acpi_method_name), acpi_method_name};
+ device_t dev = NULL;
+ bool has_dsm = false;
+ int vga_count = 0;
+
+#ifdef FREEBSD_WIP
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+#endif /* FREEBSD_WIP */
+ if ((dev = pci_find_class(PCIC_DISPLAY, PCIS_DISPLAY_VGA)) != NULL) {
+ vga_count++;
+ has_dsm |= intel_dsm_pci_probe(dev);
+ }
+
+ if (vga_count == 2 && has_dsm) {
+ AcpiGetName(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
+ DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n",
+ acpi_method_name);
+ return true;
+ }
+
+ return false;
+}
+
+void intel_register_dsm_handler(void)
+{
+ if (!intel_dsm_detect())
+ return;
+}
+
+void intel_unregister_dsm_handler(void)
+{
+}
diff --git a/sys/dev/drm2/i915/intel_bios.c b/sys/dev/drm2/i915/intel_bios.c
index 0499794..f528766 100644
--- a/sys/dev/drm2/i915/intel_bios.c
+++ b/sys/dev/drm2/i915/intel_bios.c
@@ -23,10 +23,12 @@
* Authors:
* Eric Anholt <eric@anholt.net>
*
- * $FreeBSD$
*/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
#include <dev/drm2/drm_dp_helper.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
@@ -169,8 +171,7 @@ get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
int dvo_timing_offset =
lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
- const char *entry = (const char *)lvds_lfp_data->data +
- lfp_data_size * index;
+ const char *entry = (const char *)lvds_lfp_data->data + lfp_data_size * index;
return (const struct lvds_dvo_timing *)(entry + dvo_timing_offset);
}
@@ -188,7 +189,7 @@ get_lvds_fp_timing(const struct bdb_header *bdb,
u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
size_t ofs;
- if (index >= DRM_ARRAY_SIZE(ptrs->ptr))
+ if (index >= ARRAY_SIZE(ptrs->ptr))
return NULL;
ofs = ptrs->ptr[index].fp_timing_offset;
if (ofs < data_ofs ||
@@ -234,8 +235,9 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
lvds_lfp_data_ptrs,
lvds_options->panel_type);
- panel_fixed_mode = malloc(sizeof(*panel_fixed_mode), DRM_MEM_KMS,
- M_WAITOK | M_ZERO);
+ panel_fixed_mode = malloc(sizeof(*panel_fixed_mode), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!panel_fixed_mode)
+ return;
fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
@@ -263,9 +265,9 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) {
dev_priv->lvds_downclock_avail = 1;
dev_priv->lvds_downclock = downclock * 10;
- DRM_DEBUG("LVDS downclock is found in VBT. "
+ DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
"Normal Clock %dKHz, downclock %dKHz\n",
- panel_fixed_mode->clock, 10 * downclock);
+ panel_fixed_mode->clock, 10*downclock);
}
fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
@@ -311,8 +313,9 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
if (!dvo_timing)
return;
- panel_fixed_mode = malloc(sizeof(*panel_fixed_mode), DRM_MEM_KMS,
- M_WAITOK | M_ZERO);
+ panel_fixed_mode = malloc(sizeof(*panel_fixed_mode), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!panel_fixed_mode)
+ return;
fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
@@ -351,12 +354,14 @@ parse_general_features(struct drm_i915_private *dev_priv,
dev_priv->lvds_ssc_freq =
intel_bios_ssc_frequency(dev, general->ssc_freq);
dev_priv->display_clock_mode = general->display_clock_mode;
- DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n",
+ dev_priv->fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+ DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
dev_priv->int_tv_support,
dev_priv->int_crt_support,
dev_priv->lvds_use_ssc,
dev_priv->lvds_ssc_freq,
- dev_priv->display_clock_mode);
+ dev_priv->display_clock_mode,
+ dev_priv->fdi_rx_polarity_inverted);
}
}
@@ -499,12 +504,8 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
edp = find_section(bdb, BDB_EDP);
if (!edp) {
- if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
- DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
- "supported, assume %dbpp panel color "
- "depth.\n",
- dev_priv->edp.bpp);
- }
+ if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support)
+ DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
return;
}
@@ -613,8 +614,11 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
return;
}
- dev_priv->child_dev = malloc(sizeof(*p_child) * count, DRM_MEM_KMS,
- M_WAITOK | M_ZERO);
+ dev_priv->child_dev = malloc(count * sizeof(*p_child), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!dev_priv->child_dev) {
+ DRM_DEBUG_KMS("No memory space for child device\n");
+ return;
+ }
dev_priv->child_dev_num = count;
count = 0;
@@ -654,12 +658,9 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
dev_priv->lvds_use_ssc = 1;
dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
-
- /* eDP data */
- dev_priv->edp.bpp = 18;
}
-static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
+static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
{
DRM_DEBUG_KMS("Falling back to manually reading VBT from "
"VBIOS ROM for %s\n",
@@ -688,12 +689,13 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
*
* Returns 0 on success, nonzero on failure.
*/
-bool
+int
intel_parse_bios(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ device_t vga_dev = device_get_parent(dev->dev);;
struct bdb_header *bdb = NULL;
- u8 *bios;
+ u8 __iomem *bios = NULL;
init_vbt_defaults(dev_priv);
@@ -707,20 +709,13 @@ intel_parse_bios(struct drm_device *dev)
} else
dev_priv->opregion.vbt = NULL;
}
- bios = NULL;
-#if 1
- if (bdb == NULL) {
- KIB_NOTYET();
- return (-1);
- }
-#else
if (bdb == NULL) {
struct vbt_header *vbt = NULL;
size_t size;
int i;
- bios = pci_map_rom(pdev, &size);
+ bios = vga_pci_map_bios(vga_dev, &size);
if (!bios)
return -1;
@@ -734,13 +729,12 @@ intel_parse_bios(struct drm_device *dev)
if (!vbt) {
DRM_DEBUG_DRIVER("VBT signature missing\n");
- pci_unmap_rom(pdev, bios);
+ vga_pci_unmap_bios(vga_dev, bios);
return -1;
}
bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
}
-#endif
/* Grab useful general definitions */
parse_general_features(dev_priv, bdb);
@@ -752,14 +746,31 @@ intel_parse_bios(struct drm_device *dev)
parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
-#if 0
if (bios)
- pci_unmap_rom(pdev, bios);
-#endif
+ vga_pci_unmap_bios(vga_dev, bios);
return 0;
}
+/*
+ * NOTE Linux<->FreeBSD:
+ * Apparently, Linux doesn't free those pointers.
+ * TODO: Report that upstream.
+ */
+void
+intel_free_parsed_bios_data(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ free(dev_priv->lfp_lvds_vbt_mode, DRM_MEM_KMS);
+ free(dev_priv->sdvo_lvds_vbt_mode, DRM_MEM_KMS);
+ free(dev_priv->child_dev, DRM_MEM_KMS);
+
+ dev_priv->lfp_lvds_vbt_mode = NULL;
+ dev_priv->sdvo_lvds_vbt_mode = NULL;
+ dev_priv->child_dev = NULL;
+}
+
/* Ensure that vital registers have been initialised, even if the BIOS
* is absent or just failing to do its job.
*/
@@ -768,7 +779,8 @@ void intel_setup_bios(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Set the Panel Power On/Off timings if uninitialized. */
- if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
+ if (!HAS_PCH_SPLIT(dev) &&
+ I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
/* Set T2 to 40ms and T5 to 200ms */
I915_WRITE(PP_ON_DELAYS, 0x019007d0);
diff --git a/sys/dev/drm2/i915/intel_bios.h b/sys/dev/drm2/i915/intel_bios.h
index 186409c..8da5e01 100644
--- a/sys/dev/drm2/i915/intel_bios.h
+++ b/sys/dev/drm2/i915/intel_bios.h
@@ -128,7 +128,9 @@ struct bdb_general_features {
/* bits 3 */
u8 disable_smooth_vision:1;
u8 single_dvi:1;
- u8 rsvd9:6; /* finish byte */
+ u8 rsvd9:1;
+ u8 fdi_rx_polarity_inverted:1;
+ u8 rsvd10:4; /* finish byte */
/* bits 4 */
u8 legacy_monitor_detect;
@@ -477,7 +479,8 @@ struct bdb_edp {
} __attribute__ ((packed));
void intel_setup_bios(struct drm_device *dev);
-bool intel_parse_bios(struct drm_device *dev);
+int intel_parse_bios(struct drm_device *dev);
+void intel_free_parsed_bios_data(struct drm_device *dev);
/*
* Driver<->VBIOS interaction occurs through scratch bits in
diff --git a/sys/dev/drm2/i915/intel_crt.c b/sys/dev/drm2/i915/intel_crt.c
index 6bad47f..e9ac349 100644
--- a/sys/dev/drm2/i915/intel_crt.c
+++ b/sys/dev/drm2/i915/intel_crt.c
@@ -28,13 +28,12 @@
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
#include <dev/drm2/drm_crtc.h>
#include <dev/drm2/drm_crtc_helper.h>
#include <dev/drm2/drm_edid.h>
+#include <dev/drm2/i915/intel_drv.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
-#include <dev/drm2/i915/intel_drv.h>
/* Here's the desired hotplug mode */
#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
@@ -46,7 +45,11 @@ __FBSDID("$FreeBSD$");
struct intel_crt {
struct intel_encoder base;
+ /* DPMS state is stored in the connector, which we need in the
+ * encoder's enable/disable callbacks */
+ struct intel_connector *connector;
bool force_hotplug_required;
+ u32 adpa_reg;
};
static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
@@ -55,36 +58,42 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
struct intel_crt, base);
}
-static void pch_crt_dpms(struct drm_encoder *encoder, int mode)
+static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
+ return container_of(encoder, struct intel_crt, base);
+}
+
+static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 temp;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ u32 tmp;
- temp = I915_READ(PCH_ADPA);
- temp &= ~ADPA_DAC_ENABLE;
+ tmp = I915_READ(crt->adpa_reg);
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- temp |= ADPA_DAC_ENABLE;
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- /* Just leave port enable cleared */
- break;
- }
+ if (!(tmp & ADPA_DAC_ENABLE))
+ return false;
- I915_WRITE(PCH_ADPA, temp);
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
}
-static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
+/* Note: The caller is required to filter out dpms modes not supported by the
+ * platform. */
+static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 temp;
- temp = I915_READ(ADPA);
+ temp = I915_READ(crt->adpa_reg);
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
@@ -103,7 +112,64 @@ static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
break;
}
- I915_WRITE(ADPA, temp);
+ I915_WRITE(crt->adpa_reg, temp);
+}
+
+static void intel_disable_crt(struct intel_encoder *encoder)
+{
+ intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void intel_enable_crt(struct intel_encoder *encoder)
+{
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+
+ intel_crt_set_dpms(encoder, crt->connector->base.dpms);
+}
+
+
+static void intel_crt_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct drm_crtc *crtc;
+ int old_dpms;
+
+ /* PCH platforms and VLV only support on/off. */
+ if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ old_dpms = connector->dpms;
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = encoder->base.crtc;
+ if (!crtc) {
+ encoder->connectors_active = false;
+ return;
+ }
+
+ /* We need the pipe to run for anything but OFF. */
+ if (mode == DRM_MODE_DPMS_OFF)
+ encoder->connectors_active = false;
+ else
+ encoder->connectors_active = true;
+
+ if (mode < old_dpms) {
+ /* From off to on, enable the pipe first. */
+ intel_crtc_update_dpms(crtc);
+
+ intel_crt_set_dpms(encoder, mode);
+ } else {
+ intel_crt_set_dpms(encoder, mode);
+
+ intel_crtc_update_dpms(crtc);
+ }
+
+ intel_modeset_check_state(connector->dev);
}
static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -125,6 +191,11 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock > max_clock)
return MODE_CLOCK_HIGH;
+ /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
+ if (HAS_PCH_LPT(dev) &&
+ (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
+ return MODE_CLOCK_HIGH;
+
return MODE_OK;
}
@@ -142,37 +213,26 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crt *crt =
+ intel_encoder_to_crt(to_intel_encoder(encoder));
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
- int dpll_md_reg;
- u32 adpa, dpll_md;
- u32 adpa_reg;
-
- dpll_md_reg = DPLL_MD(intel_crtc->pipe);
+ u32 adpa;
if (HAS_PCH_SPLIT(dev))
- adpa_reg = PCH_ADPA;
+ adpa = ADPA_HOTPLUG_BITS;
else
- adpa_reg = ADPA;
-
- /*
- * Disable separate mode multiplier used when cloning SDVO to CRT
- * XXX this needs to be adjusted when we really are cloning
- */
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- dpll_md = I915_READ(dpll_md_reg);
- I915_WRITE(dpll_md_reg,
- dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
- }
+ adpa = 0;
- adpa = ADPA_HOTPLUG_BITS;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
adpa |= ADPA_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
/* For CPT allow 3 pipe config, for others just use A or B */
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_LPT(dev))
+ ; /* Those bits don't exist here */
+ else if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
else if (intel_crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
@@ -182,7 +242,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
- I915_WRITE(adpa_reg, adpa);
+ I915_WRITE(crt->adpa_reg, adpa);
}
static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
@@ -209,10 +269,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(PCH_ADPA, adpa);
- if (_intel_wait_for(dev,
- (I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
- 1000, 1, "915crt"))
- DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER\n");
+ if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+ 1000))
+ DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
if (turn_off_dac) {
I915_WRITE(PCH_ADPA, save_adpa);
@@ -231,6 +290,42 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
return ret;
}
+static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 adpa;
+ bool ret;
+ u32 save_adpa;
+
+ save_adpa = adpa = I915_READ(ADPA);
+ DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+
+ adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+
+ I915_WRITE(ADPA, adpa);
+
+ if (wait_for((I915_READ(ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+ 1000)) {
+ DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+ I915_WRITE(ADPA, save_adpa);
+ }
+
+ /* Check the status to see if both blue and green are on now */
+ adpa = I915_READ(ADPA);
+ if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
+ ret = true;
+ else
+ ret = false;
+
+ DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
+
+ /* FIXME: debug force function and remove */
+ ret = true;
+
+ return ret;
+}
+
/**
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
*
@@ -250,6 +345,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
if (HAS_PCH_SPLIT(dev))
return intel_ironlake_crt_detect_hotplug(connector);
+ if (IS_VALLEYVIEW(dev))
+ return valleyview_crt_detect_hotplug(connector);
+
/*
* On 4 series desktop, CRT detect sequence need to be done twice
* to get a reliable result.
@@ -266,9 +364,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
/* turn on the FORCE_DETECT */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
/* wait for FORCE_DETECT to go off */
- if (_intel_wait_for(dev,
- (I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT) == 0,
- 1000, 1, "915cr2"))
+ if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
+ CRT_HOTPLUG_FORCE_DETECT) == 0,
+ 1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
}
@@ -285,42 +383,72 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
return ret;
}
+static struct edid *intel_crt_get_edid(struct drm_connector *connector,
+ device_t i2c)
+{
+ struct edid *edid;
+
+ edid = drm_get_edid(connector, i2c);
+
+ if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
+ DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
+ intel_gmbus_force_bit(i2c, true);
+ edid = drm_get_edid(connector, i2c);
+ intel_gmbus_force_bit(i2c, false);
+ }
+
+ return edid;
+}
+
+/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */
+static int intel_crt_ddc_get_modes(struct drm_connector *connector,
+ device_t adapter)
+{
+ struct edid *edid;
+ int ret;
+
+ edid = intel_crt_get_edid(connector, adapter);
+ if (!edid)
+ return 0;
+
+ ret = intel_connector_update_modes(connector, edid);
+ free(edid, DRM_MEM_KMS);
+
+ return ret;
+}
+
static bool intel_crt_detect_ddc(struct drm_connector *connector)
{
struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
+ struct edid *edid;
+ device_t i2c;
- /* CRT should always be at 0, but check anyway */
- if (crt->base.type != INTEL_OUTPUT_ANALOG)
- return false;
+ BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
+
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
+ edid = intel_crt_get_edid(connector, i2c);
- if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
- struct edid *edid;
- bool is_digital = false;
- device_t iic;
+ if (edid) {
+ bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
- iic = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
- edid = drm_get_edid(connector, iic);
/*
* This may be a DVI-I connector with a shared DDC
* link between analog and digital outputs, so we
* have to check the EDID input spec of the attached device.
- *
- * On the other hand, what should we do if it is a broken EDID?
*/
- if (edid != NULL) {
- is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
- free(edid, DRM_MEM_KMS);
- }
-
if (!is_digital) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
return true;
- } else {
- DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
}
+
+ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
+ } else {
+ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
}
+ free(edid, DRM_MEM_KMS);
+
return false;
}
@@ -453,30 +581,37 @@ intel_crt_detect(struct drm_connector *connector, bool force)
struct intel_load_detect_pipe tmp;
if (I915_HAS_HOTPLUG(dev)) {
+ /* We can not rely on the HPD pin always being correctly wired
+ * up, for example many KVM do not pass it through, and so
+ * only trust an assertion that the monitor is connected.
+ */
if (intel_crt_detect_hotplug(connector)) {
DRM_DEBUG_KMS("CRT detected via hotplug\n");
return connector_status_connected;
- } else {
+ } else
DRM_DEBUG_KMS("CRT not detected via hotplug\n");
- return connector_status_disconnected;
- }
}
if (intel_crt_detect_ddc(connector))
return connector_status_connected;
+ /* Load detection is broken on HPD capable machines. Whoever wants a
+ * broken monitor (without edid) to work behind a broken kvm (that fails
+ * to have the right resistors for HP detection) needs to fix this up.
+ * For now just bail out. */
+ if (I915_HAS_HOTPLUG(dev))
+ return connector_status_disconnected;
+
if (!force)
return connector->status;
/* for pre-945g platforms use load detect */
- if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
- &tmp)) {
+ if (intel_get_load_detect_pipe(connector, NULL, &tmp)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
else
status = intel_crt_load_detect(crt);
- intel_release_load_detect_pipe(&crt->base, connector,
- &tmp);
+ intel_release_load_detect_pipe(connector, &tmp);
} else
status = connector_status_unknown;
@@ -485,10 +620,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
static void intel_crt_destroy(struct drm_connector *connector)
{
-
-#if 0
- drm_sysfs_connector_remove(connector);
-#endif
drm_connector_cleanup(connector);
free(connector, DRM_MEM_KMS);
}
@@ -501,13 +632,13 @@ static int intel_crt_get_modes(struct drm_connector *connector)
device_t i2c;
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
- ret = intel_ddc_get_modes(connector, i2c);
+ ret = intel_crt_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev))
return ret;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
- return intel_ddc_get_modes(connector, i2c);
+ return intel_crt_ddc_get_modes(connector, i2c);
}
static int intel_crt_set_property(struct drm_connector *connector,
@@ -520,36 +651,37 @@ static int intel_crt_set_property(struct drm_connector *connector,
static void intel_crt_reset(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
if (HAS_PCH_SPLIT(dev)) {
+ u32 adpa;
+
+ adpa = I915_READ(PCH_ADPA);
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ adpa |= ADPA_HOTPLUG_BITS;
+ I915_WRITE(PCH_ADPA, adpa);
+ POSTING_READ(PCH_ADPA);
+
+ DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;
}
+
}
/*
* Routines for controlling stuff on the analog port
*/
-static const struct drm_encoder_helper_funcs pch_encoder_funcs = {
+static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
.mode_fixup = intel_crt_mode_fixup,
- .prepare = intel_encoder_prepare,
- .commit = intel_encoder_commit,
.mode_set = intel_crt_mode_set,
- .dpms = pch_crt_dpms,
-};
-
-static const struct drm_encoder_helper_funcs gmch_encoder_funcs = {
- .mode_fixup = intel_crt_mode_fixup,
- .prepare = intel_encoder_prepare,
- .commit = intel_encoder_commit,
- .mode_set = intel_crt_mode_set,
- .dpms = gmch_crt_dpms,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
.reset = intel_crt_reset,
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_crt_dpms,
.detect = intel_crt_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_crt_destroy,
@@ -566,7 +698,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
.destroy = intel_encoder_destroy,
};
-static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
+static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
{
DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
return 1;
@@ -590,17 +722,23 @@ void intel_crt_init(struct drm_device *dev)
struct intel_crt *crt;
struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- const struct drm_encoder_helper_funcs *encoder_helper_funcs;
/* Skip machines without VGA that falsely report hotplug events */
if (dmi_check_system(intel_no_crt))
return;
crt = malloc(sizeof(struct intel_crt), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!crt)
+ return;
intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!intel_connector) {
+ free(crt, DRM_MEM_KMS);
+ return;
+ }
connector = &intel_connector->base;
+ crt->connector = intel_connector;
drm_connector_init(dev, &intel_connector->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
@@ -610,13 +748,11 @@ void intel_crt_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, &crt->base);
crt->base.type = INTEL_OUTPUT_ANALOG;
- crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
- 1 << INTEL_ANALOG_CLONE_BIT |
- 1 << INTEL_SDVO_LVDS_CLONE_BIT);
- if (IS_HASWELL(dev))
+ crt->base.cloneable = true;
+ if (IS_I830(dev))
crt->base.crtc_mask = (1 << 0);
else
- crt->base.crtc_mask = (1 << 0) | (1 << 1);
+ crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
if (IS_GEN2(dev))
connector->interlace_allowed = 0;
@@ -625,16 +761,22 @@ void intel_crt_init(struct drm_device *dev)
connector->doublescan_allowed = 0;
if (HAS_PCH_SPLIT(dev))
- encoder_helper_funcs = &pch_encoder_funcs;
+ crt->adpa_reg = PCH_ADPA;
+ else if (IS_VALLEYVIEW(dev))
+ crt->adpa_reg = VLV_ADPA;
else
- encoder_helper_funcs = &gmch_encoder_funcs;
+ crt->adpa_reg = ADPA;
- drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs);
- drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
+ crt->base.disable = intel_disable_crt;
+ crt->base.enable = intel_enable_crt;
+ if (IS_HASWELL(dev))
+ crt->base.get_hw_state = intel_ddi_get_hw_state;
+ else
+ crt->base.get_hw_state = intel_crt_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
-#if 0
- drm_sysfs_connector_add(connector);
-#endif
+ drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
+ drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
if (I915_HAS_HOTPLUG(dev))
connector->polled = DRM_CONNECTOR_POLL_HPD;
@@ -645,18 +787,18 @@ void intel_crt_init(struct drm_device *dev)
* Configure the automatic hotplug detection stuff
*/
crt->force_hotplug_required = 0;
- if (HAS_PCH_SPLIT(dev)) {
- u32 adpa;
- adpa = I915_READ(PCH_ADPA);
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
- adpa |= ADPA_HOTPLUG_BITS;
- I915_WRITE(PCH_ADPA, adpa);
- POSTING_READ(PCH_ADPA);
+ dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
- DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
- crt->force_hotplug_required = 1;
- }
+ /*
+ * TODO: find a proper way to discover whether we need to set the the
+ * polarity and link reversal bits or not, instead of relying on the
+ * BIOS.
+ */
+ if (HAS_PCH_LPT(dev)) {
+ u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
+ FDI_RX_LINK_REVERSAL_OVERRIDE;
- dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
+ dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config;
+ }
}
diff --git a/sys/dev/drm2/i915/intel_ddi.c b/sys/dev/drm2/i915/intel_ddi.c
index 52bcc1e..1dd4a35 100644
--- a/sys/dev/drm2/i915/intel_ddi.c
+++ b/sys/dev/drm2/i915/intel_ddi.c
@@ -29,8 +29,6 @@
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
-#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
#include <dev/drm2/i915/intel_drv.h>
@@ -64,6 +62,26 @@ static const u32 hsw_ddi_translations_fdi[] = {
0x00FFFFFF, 0x00040006 /* HDMI parameters */
};
+static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
+ type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
+ return intel_dig_port->port;
+
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ return PORT_E;
+
+ } else {
+ DRM_ERROR("Invalid DDI encoder type %d\n", type);
+ BUG();
+ }
+}
+
/* On Haswell, DDI port buffers must be programmed with correct values
* in advance. The buffer values are different for FDI and DP modes,
* but the HDMI/DVI fields are shared among those. So we program the DDI
@@ -83,11 +101,11 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bo
port_name(port),
use_fdi_mode ? "FDI" : "DP");
- if (use_fdi_mode && (port != PORT_E))
- DRM_DEBUG_KMS("Programming port %c in FDI mode, this probably will not work.\n",
+ WARN((use_fdi_mode && (port != PORT_E)),
+ "Programming port %c in FDI mode, this probably will not work.\n",
port_name(port));
- for (i=0, reg=DDI_BUF_TRANS(port); i < DRM_ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
+ for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
I915_WRITE(reg, ddi_translations[i]);
reg += 4;
}
@@ -124,6 +142,19 @@ static const long hsw_ddi_buf_ctl_values[] = {
DDI_BUF_EMP_800MV_3_5DB_HSW
};
+static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ uint32_t reg = DDI_BUF_CTL(port);
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ udelay(1);
+ if (I915_READ(reg) & DDI_BUF_IS_IDLE)
+ return;
+ }
+ DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
+}
/* Starting with Haswell, different DDI ports can work in FDI mode for
* connection to the PCH-located connectors. For this, it is necessary to train
@@ -139,25 +170,34 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- u32 reg, temp, i;
+ u32 temp, i, rx_ctl_val;
+
+ /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
+ * mode set "sequence for CRT port" document:
+ * - TP1 to TP2 time with the default value
+ * - FDI delay to 90h
+ */
+ I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
+ FDI_RX_PWRDN_LANE0_VAL(2) |
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
- /* Configure CPU PLL, wait for warmup */
- I915_WRITE(SPLL_CTL,
- SPLL_PLL_ENABLE |
- SPLL_PLL_FREQ_1350MHz |
- SPLL_PLL_SSC);
+ /* Enable the PCH Receiver FDI PLL */
+ rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
+ FDI_RX_PLL_ENABLE | ((intel_crtc->fdi_lanes - 1) << 19);
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+ udelay(220);
- /* Use SPLL to drive the output when in FDI mode */
- I915_WRITE(PORT_CLK_SEL(PORT_E),
- PORT_CLK_SEL_SPLL);
- I915_WRITE(PIPE_CLK_SEL(pipe),
- PIPE_CLK_SEL_PORT(PORT_E));
+ /* Switch from Rawclk to PCDclk */
+ rx_ctl_val |= FDI_PCDCLK;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
- DELAY(20);
+ /* Configure Port Clock Select */
+ I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
- /* Start the training iterating through available voltages and emphasis */
- for (i=0; i < DRM_ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) {
+ /* Start the training iterating through available voltages and emphasis,
+ * testing each value twice. */
+ for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) {
/* Configure DP_TP_CTL with auto-training */
I915_WRITE(DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN |
@@ -165,95 +205,79 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
DP_TP_CTL_LINK_TRAIN_PAT1 |
DP_TP_CTL_ENABLE);
- /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
- temp = I915_READ(DDI_BUF_CTL(PORT_E));
- temp = (temp & ~DDI_BUF_EMP_MASK);
+ /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
+ * DDI E does not support port reversal, the functionality is
+ * achieved on the PCH side in FDI_RX_CTL, so no need to set the
+ * port reversal bit */
I915_WRITE(DDI_BUF_CTL(PORT_E),
- temp |
- DDI_BUF_CTL_ENABLE |
- DDI_PORT_WIDTH_X2 |
- hsw_ddi_buf_ctl_values[i]);
-
- DELAY(600);
-
- /* Enable CPU FDI Receiver with auto-training */
- reg = FDI_RX_CTL(pipe);
- I915_WRITE(reg,
- I915_READ(reg) |
- FDI_LINK_TRAIN_AUTO |
- FDI_RX_ENABLE |
- FDI_LINK_TRAIN_PATTERN_1_CPT |
- FDI_RX_ENHANCE_FRAME_ENABLE |
- FDI_PORT_WIDTH_2X_LPT |
- FDI_RX_PLL_ENABLE);
- POSTING_READ(reg);
- DELAY(100);
+ DDI_BUF_CTL_ENABLE |
+ ((intel_crtc->fdi_lanes - 1) << 1) |
+ hsw_ddi_buf_ctl_values[i / 2]);
+ POSTING_READ(DDI_BUF_CTL(PORT_E));
+
+ udelay(600);
+
+ /* Program PCH FDI Receiver TU */
+ I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64));
+
+ /* Enable PCH FDI Receiver with auto-training */
+ rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+
+ /* Wait for FDI receiver lane calibration */
+ udelay(30);
+
+ /* Unset FDI_RX_MISC pwrdn lanes */
+ temp = I915_READ(_FDI_RXA_MISC);
+ temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ I915_WRITE(_FDI_RXA_MISC, temp);
+ POSTING_READ(_FDI_RXA_MISC);
+
+ /* Wait for FDI auto training time */
+ udelay(5);
temp = I915_READ(DP_TP_STATUS(PORT_E));
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
- DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i);
+ DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
/* Enable normal pixel sending for FDI */
I915_WRITE(DP_TP_CTL(PORT_E),
- DP_TP_CTL_FDI_AUTOTRAIN |
- DP_TP_CTL_LINK_TRAIN_NORMAL |
- DP_TP_CTL_ENHANCED_FRAME_ENABLE |
- DP_TP_CTL_ENABLE);
-
- /* Enable PIPE_TRANS_DDI_FUNC_CTL for the pipe to work in FDI mode */
- temp = I915_READ(TRANS_DDI_FUNC_CTL(pipe));
- temp &= ~TRANS_DDI_PORT_MASK;
- temp |= TRANS_DDI_SELECT_PORT(PORT_E) |
- TRANS_DDI_MODE_SELECT_FDI |
- TRANS_DDI_FUNC_ENABLE |
- TRANS_DDI_PORT_WIDTH_X2;
- I915_WRITE(TRANS_DDI_FUNC_CTL(pipe),
- temp);
- break;
- } else {
- DRM_ERROR("Error training BUF_CTL %d\n", i);
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_LINK_TRAIN_NORMAL |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_ENABLE);
- /* Disable DP_TP_CTL and FDI_RX_CTL) and retry */
- I915_WRITE(DP_TP_CTL(PORT_E),
- I915_READ(DP_TP_CTL(PORT_E)) &
- ~DP_TP_CTL_ENABLE);
- I915_WRITE(FDI_RX_CTL(pipe),
- I915_READ(FDI_RX_CTL(pipe)) &
- ~FDI_RX_PLL_ENABLE);
- continue;
+ return;
}
- }
- DRM_DEBUG_KMS("FDI train done.\n");
-}
+ temp = I915_READ(DDI_BUF_CTL(PORT_E));
+ temp &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
+ POSTING_READ(DDI_BUF_CTL(PORT_E));
-/* For DDI connections, it is possible to support different outputs over the
- * same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by
- * the time the output is detected what exactly is on the other end of it. This
- * function aims at providing support for this detection and proper output
- * configuration.
- */
-void intel_ddi_init(struct drm_device *dev, enum port port)
-{
- /* For now, we don't do any proper output detection and assume that we
- * handle HDMI only */
+ /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
+ temp = I915_READ(DP_TP_CTL(PORT_E));
+ temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(PORT_E), temp);
+ POSTING_READ(DP_TP_CTL(PORT_E));
- switch(port){
- case PORT_A:
- /* We don't handle eDP and DP yet */
- DRM_DEBUG_DRIVER("Found digital output on DDI port A\n");
- break;
- /* Assume that the ports B, C and D are working in HDMI mode for now */
- case PORT_B:
- case PORT_C:
- case PORT_D:
- intel_hdmi_init(dev, DDI_BUF_CTL(port));
- break;
- default:
- DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
- port);
- break;
+ intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+
+ rx_ctl_val &= ~FDI_RX_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+
+ /* Reset FDI_RX_MISC pwrdn lanes */
+ temp = I915_READ(_FDI_RXA_MISC);
+ temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+ I915_WRITE(_FDI_RXA_MISC, temp);
+ POSTING_READ(_FDI_RXA_MISC);
}
+
+ DRM_ERROR("FDI link training failed!\n");
}
/* WRPLL clock dividers */
@@ -264,7 +288,8 @@ struct wrpll_tmds_clock {
u16 r2; /* Reference divider */
};
-/* Table of matching values for WRPLL clocks programming for each frequency */
+/* Table of matching values for WRPLL clocks programming for each frequency.
+ * The code assumes this table is sorted. */
static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{19750, 38, 25, 18},
{20000, 48, 32, 18},
@@ -274,7 +299,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{23000, 36, 23, 15},
{23500, 40, 40, 23},
{23750, 26, 16, 14},
- {23750, 26, 16, 14},
{24000, 36, 24, 15},
{25000, 36, 25, 15},
{25175, 26, 40, 33},
@@ -434,7 +458,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{108000, 8, 24, 15},
{108108, 8, 173, 108},
{109000, 6, 23, 19},
- {109000, 6, 23, 19},
{110000, 6, 22, 18},
{110013, 6, 22, 18},
{110250, 8, 49, 30},
@@ -611,7 +634,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{218250, 4, 42, 26},
{218750, 4, 34, 21},
{219000, 4, 47, 29},
- {219000, 4, 47, 29},
{220000, 4, 44, 27},
{220640, 4, 49, 30},
{220750, 4, 36, 22},
@@ -644,118 +666,864 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{298000, 2, 21, 19},
};
-void intel_ddi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_ddi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- int port = intel_hdmi->ddi_port;
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ int port = intel_ddi_get_encoder_port(intel_encoder);
int pipe = intel_crtc->pipe;
- int p, n2, r2, valid=0;
- u32 temp, i;
+ int type = intel_encoder->type;
- /* On Haswell, we need to enable the clocks and prepare DDI function to
- * work in HDMI mode for this pipe.
- */
- DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
+ DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
+ port_name(port), pipe_name(pipe));
- for (i=0; i < DRM_ARRAY_SIZE(wrpll_tmds_clock_table); i++) {
- if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) {
- p = wrpll_tmds_clock_table[i].p;
- n2 = wrpll_tmds_clock_table[i].n2;
- r2 = wrpll_tmds_clock_table[i].r2;
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
- DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n",
- crtc->mode.clock,
- p, n2, r2);
+ intel_dp->DP = intel_dig_port->port_reversal |
+ DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+ switch (intel_dp->lane_count) {
+ case 1:
+ intel_dp->DP |= DDI_PORT_WIDTH_X1;
+ break;
+ case 2:
+ intel_dp->DP |= DDI_PORT_WIDTH_X2;
+ break;
+ case 4:
+ intel_dp->DP |= DDI_PORT_WIDTH_X4;
+ break;
+ default:
+ intel_dp->DP |= DDI_PORT_WIDTH_X4;
+ WARN(1, "Unexpected DP lane count %d\n",
+ intel_dp->lane_count);
+ break;
+ }
- valid = 1;
+ if (intel_dp->has_audio) {
+ DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
+ pipe_name(intel_crtc->pipe));
+
+ /* write eld */
+ DRM_DEBUG_DRIVER("DP audio: write eld information\n");
+ intel_write_eld(encoder, adjusted_mode);
+ }
+
+ intel_dp_init_link_config(intel_dp);
+
+ } else if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ if (intel_hdmi->has_audio) {
+ /* Proper support for digital audio needs a new logic
+ * and a new set of registers, so we leave it for future
+ * patch bombing.
+ */
+ DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
+ pipe_name(intel_crtc->pipe));
+
+ /* write eld */
+ DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
+ intel_write_eld(encoder, adjusted_mode);
+ }
+
+ intel_hdmi->set_infoframes(encoder, adjusted_mode);
+ }
+}
+
+static struct intel_encoder *
+intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder, *ret = NULL;
+ int num_encoders = 0;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ ret = intel_encoder;
+ num_encoders++;
+ }
+
+ if (num_encoders != 1)
+ WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders,
+ intel_crtc->pipe);
+
+ BUG_ON(ret == NULL);
+ return ret;
+}
+
+void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t val;
+
+ switch (intel_crtc->ddi_pll_sel) {
+ case PORT_CLK_SEL_SPLL:
+ plls->spll_refcount--;
+ if (plls->spll_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling SPLL\n");
+ val = I915_READ(SPLL_CTL);
+ WARN_ON(!(val & SPLL_PLL_ENABLE));
+ I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ POSTING_READ(SPLL_CTL);
+ }
+ break;
+ case PORT_CLK_SEL_WRPLL1:
+ plls->wrpll1_refcount--;
+ if (plls->wrpll1_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling WRPLL 1\n");
+ val = I915_READ(WRPLL_CTL1);
+ WARN_ON(!(val & WRPLL_PLL_ENABLE));
+ I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL1);
+ }
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ plls->wrpll2_refcount--;
+ if (plls->wrpll2_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling WRPLL 2\n");
+ val = I915_READ(WRPLL_CTL2);
+ WARN_ON(!(val & WRPLL_PLL_ENABLE));
+ I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL2);
+ }
+ break;
+ }
+
+ WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
+ WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
+ WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
+
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
+}
+
+static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
+ if (clock <= wrpll_tmds_clock_table[i].clock)
+ break;
+
+ if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
+ i--;
+
+ *p = wrpll_tmds_clock_table[i].p;
+ *n2 = wrpll_tmds_clock_table[i].n2;
+ *r2 = wrpll_tmds_clock_table[i].r2;
+
+ if (wrpll_tmds_clock_table[i].clock != clock)
+ DRM_INFO("WRPLL: using settings for %dKHz on %dKHz mode\n",
+ wrpll_tmds_clock_table[i].clock, clock);
+
+ DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
+ clock, *p, *n2, *r2);
+}
+
+bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ int type = intel_encoder->type;
+ enum pipe pipe = intel_crtc->pipe;
+ uint32_t reg, val;
+
+ /* TODO: reuse PLLs when possible (compare values) */
+
+ intel_ddi_put_crtc_pll(crtc);
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ switch (intel_dp->link_bw) {
+ case DP_LINK_BW_1_62:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
+ break;
+ case DP_LINK_BW_2_7:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
+ break;
+ case DP_LINK_BW_5_4:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
break;
+ default:
+ DRM_ERROR("Link bandwidth %d unsupported\n",
+ intel_dp->link_bw);
+ return false;
}
+
+ /* We don't need to turn any PLL on because we'll use LCPLL. */
+ return true;
+
+ } else if (type == INTEL_OUTPUT_HDMI) {
+ int p, n2, r2;
+
+ if (plls->wrpll1_refcount == 0) {
+ DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
+ pipe_name(pipe));
+ plls->wrpll1_refcount++;
+ reg = WRPLL_CTL1;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
+ } else if (plls->wrpll2_refcount == 0) {
+ DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
+ pipe_name(pipe));
+ plls->wrpll2_refcount++;
+ reg = WRPLL_CTL2;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
+ } else {
+ DRM_ERROR("No WRPLLs available!\n");
+ return false;
+ }
+
+ WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
+ "WRPLL already enabled\n");
+
+ intel_ddi_calculate_wrpll(clock, &p, &n2, &r2);
+
+ val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+ WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p);
+
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ if (plls->spll_refcount == 0) {
+ DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
+ pipe_name(pipe));
+ plls->spll_refcount++;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
+ }
+ reg = SPLL_CTL;
+
+ WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
+ "SPLL already enabled\n");
+
+ val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
+
+ } else {
+ WARN(1, "Invalid DDI encoder type %d\n", type);
+ return false;
}
- if (!valid) {
- DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n",
- crtc->mode.clock);
- return;
+ I915_WRITE(reg, val);
+ udelay(20);
+
+ return true;
+}
+
+void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ int type = intel_encoder->type;
+ uint32_t temp;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+
+ temp = TRANS_MSA_SYNC_CLK;
+ switch (intel_crtc->bpp) {
+ case 18:
+ temp |= TRANS_MSA_6_BPC;
+ break;
+ case 24:
+ temp |= TRANS_MSA_8_BPC;
+ break;
+ case 30:
+ temp |= TRANS_MSA_10_BPC;
+ break;
+ case 36:
+ temp |= TRANS_MSA_12_BPC;
+ break;
+ default:
+ temp |= TRANS_MSA_8_BPC;
+ WARN(1, "%d bpp unsupported by DDI function\n",
+ intel_crtc->bpp);
+ }
+ I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
+}
- /* Enable LCPLL if disabled */
- temp = I915_READ(LCPLL_CTL);
- if (temp & LCPLL_PLL_DISABLE)
- I915_WRITE(LCPLL_CTL,
- temp & ~LCPLL_PLL_DISABLE);
-
- /* Configure WR PLL 1, program the correct divider values for
- * the desired frequency and wait for warmup */
- I915_WRITE(WRPLL_CTL1,
- WRPLL_PLL_ENABLE |
- WRPLL_PLL_SELECT_LCPLL_2700 |
- WRPLL_DIVIDER_REFERENCE(r2) |
- WRPLL_DIVIDER_FEEDBACK(n2) |
- WRPLL_DIVIDER_POST(p));
-
- DELAY(20);
-
- /* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use
- * this port for connection.
- */
- I915_WRITE(PORT_CLK_SEL(port),
- PORT_CLK_SEL_WRPLL1);
- I915_WRITE(PIPE_CLK_SEL(pipe),
- PIPE_CLK_SEL_PORT(port));
+void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ enum pipe pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+ uint32_t temp;
- DELAY(20);
+ /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
+ temp = TRANS_DDI_FUNC_ENABLE;
+ temp |= TRANS_DDI_SELECT_PORT(port);
- if (intel_hdmi->has_audio) {
- /* Proper support for digital audio needs a new logic and a new set
- * of registers, so we leave it for future patch bombing.
- */
- DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n",
- pipe_name(intel_crtc->pipe));
+ switch (intel_crtc->bpp) {
+ case 18:
+ temp |= TRANS_DDI_BPC_6;
+ break;
+ case 24:
+ temp |= TRANS_DDI_BPC_8;
+ break;
+ case 30:
+ temp |= TRANS_DDI_BPC_10;
+ break;
+ case 36:
+ temp |= TRANS_DDI_BPC_12;
+ break;
+ default:
+ WARN(1, "%d bpp unsupported by transcoder DDI function\n",
+ intel_crtc->bpp);
+ }
+
+ if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= TRANS_DDI_PVSYNC;
+ if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= TRANS_DDI_PHSYNC;
+
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ switch (pipe) {
+ case PIPE_A:
+ temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
+ break;
+ case PIPE_B:
+ temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
+ break;
+ case PIPE_C:
+ temp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+
+ if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ if (intel_hdmi->has_hdmi_sink)
+ temp |= TRANS_DDI_MODE_SELECT_HDMI;
+ else
+ temp |= TRANS_DDI_MODE_SELECT_DVI;
+
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ temp |= TRANS_DDI_MODE_SELECT_FDI;
+ temp |= (intel_crtc->fdi_lanes - 1) << 1;
+
+ } else if (type == INTEL_OUTPUT_DISPLAYPORT ||
+ type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+
+ switch (intel_dp->lane_count) {
+ case 1:
+ temp |= TRANS_DDI_PORT_WIDTH_X1;
+ break;
+ case 2:
+ temp |= TRANS_DDI_PORT_WIDTH_X2;
+ break;
+ case 4:
+ temp |= TRANS_DDI_PORT_WIDTH_X4;
+ break;
+ default:
+ temp |= TRANS_DDI_PORT_WIDTH_X4;
+ WARN(1, "Unsupported lane count %d\n",
+ intel_dp->lane_count);
+ }
+
+ } else {
+ WARN(1, "Invalid encoder type %d for pipe %d\n",
+ intel_encoder->type, pipe);
}
- /* Enable PIPE_TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
- temp = I915_READ(TRANS_DDI_FUNC_CTL(pipe));
- temp &= ~TRANS_DDI_PORT_MASK;
- temp &= ~TRANS_DDI_BPC_12;
- temp |= TRANS_DDI_SELECT_PORT(port) |
- TRANS_DDI_MODE_SELECT_HDMI |
- ((intel_crtc->bpp > 24) ?
- TRANS_DDI_BPC_12 :
- TRANS_DDI_BPC_8) |
- TRANS_DDI_FUNC_ENABLE;
-
- I915_WRITE(TRANS_DDI_FUNC_CTL(pipe), temp);
-
- intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
- intel_hdmi_set_spd_infoframe(encoder);
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
-void intel_ddi_dpms(struct drm_encoder *encoder, int mode)
+void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
{
- struct drm_device *dev = encoder->dev;
+ uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
+ uint32_t val = I915_READ(reg);
+
+ val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
+ val |= TRANS_DDI_PORT_NONE;
+ I915_WRITE(reg, val);
+}
+
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
+{
+ struct drm_device *dev = intel_connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- int port = intel_hdmi->ddi_port;
- u32 temp;
+ struct intel_encoder *intel_encoder = intel_connector->encoder;
+ int type = intel_connector->base.connector_type;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ enum pipe pipe = 0;
+ enum transcoder cpu_transcoder;
+ uint32_t tmp;
- temp = I915_READ(DDI_BUF_CTL(port));
+ if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
+ return false;
- if (mode != DRM_MODE_DPMS_ON) {
- temp &= ~DDI_BUF_CTL_ENABLE;
+ if (port == PORT_A)
+ cpu_transcoder = TRANSCODER_EDP;
+ else
+ cpu_transcoder = (enum transcoder)pipe;
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+ switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
+ case TRANS_DDI_MODE_SELECT_HDMI:
+ case TRANS_DDI_MODE_SELECT_DVI:
+ return (type == DRM_MODE_CONNECTOR_HDMIA);
+
+ case TRANS_DDI_MODE_SELECT_DP_SST:
+ if (type == DRM_MODE_CONNECTOR_eDP)
+ return true;
+ case TRANS_DDI_MODE_SELECT_DP_MST:
+ return (type == DRM_MODE_CONNECTOR_DisplayPort);
+
+ case TRANS_DDI_MODE_SELECT_FDI:
+ return (type == DRM_MODE_CONNECTOR_VGA);
+
+ default:
+ return false;
+ }
+}
+
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_ddi_get_encoder_port(encoder);
+ u32 tmp;
+ int i;
+
+ tmp = I915_READ(DDI_BUF_CTL(port));
+
+ if (!(tmp & DDI_BUF_CTL_ENABLE))
+ return false;
+
+ if (port == PORT_A) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ *pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ *pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ *pipe = PIPE_C;
+ break;
+ }
+
+ return true;
+ } else {
+ for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+
+ if ((tmp & TRANS_DDI_PORT_MASK)
+ == TRANS_DDI_SELECT_PORT(port)) {
+ *pipe = i;
+ return true;
+ }
+ }
+ }
+
+ DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port);
+
+ return true;
+}
+
+static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ uint32_t temp, ret;
+ enum port port;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+ int i;
+
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ port = PORT_A;
} else {
- temp |= DDI_BUF_CTL_ENABLE;
+ temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ temp &= TRANS_DDI_PORT_MASK;
+
+ for (i = PORT_B; i <= PORT_E; i++)
+ if (temp == TRANS_DDI_SELECT_PORT(i))
+ port = i;
+ }
+
+ ret = I915_READ(PORT_CLK_SEL(port));
+
+ DRM_DEBUG_KMS("Pipe %c connected to port %c using clock 0x%08x\n",
+ pipe_name(pipe), port_name(port), ret);
+
+ return ret;
+}
+
+void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+ struct intel_crtc *intel_crtc;
+
+ for_each_pipe(pipe) {
+ intel_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+ if (!intel_crtc->active)
+ continue;
+
+ intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
+ pipe);
+
+ switch (intel_crtc->ddi_pll_sel) {
+ case PORT_CLK_SEL_SPLL:
+ dev_priv->ddi_plls.spll_refcount++;
+ break;
+ case PORT_CLK_SEL_WRPLL1:
+ dev_priv->ddi_plls.wrpll1_refcount++;
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ dev_priv->ddi_plls.wrpll2_refcount++;
+ break;
+ }
+ }
+}
+
+void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
+{
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+ if (cpu_transcoder != TRANSCODER_EDP)
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_PORT(port));
+}
+
+void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
+{
+ struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+ if (cpu_transcoder != TRANSCODER_EDP)
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_DISABLED);
+}
+
+static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, true);
+ }
+
+ WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
+ I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel);
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ }
+}
+
+static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+ uint32_t val;
+ bool wait = false;
+
+ val = I915_READ(DDI_BUF_CTL(port));
+ if (val & DDI_BUF_CTL_ENABLE) {
+ val &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), val);
+ wait = true;
+ }
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(port), val);
+
+ if (wait)
+ intel_wait_ddi_buf_idle(dev_priv, port);
+
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_panel_off(intel_dp);
}
- /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
- * and swing/emphasis values are ignored so nothing special needs
- * to be done besides enabling the port.
+ I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+}
+
+static void intel_enable_ddi(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
+
+ /* In HDMI/DVI mode, the port width, and swing/emphasis values
+ * are ignored so nothing special needs to be done besides
+ * enabling the port.
+ */
+ I915_WRITE(DDI_BUF_CTL(port),
+ intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE);
+ } else if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ ironlake_edp_backlight_on(intel_dp);
+ }
+}
+
+static void intel_disable_ddi(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ ironlake_edp_backlight_off(intel_dp);
+ }
+}
+
+int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
+{
+ if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ return 450;
+ else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
+ LCPLL_CLK_FREQ_450)
+ return 450;
+ else if (IS_ULT(dev_priv->dev))
+ return 338;
+ else
+ return 540;
+}
+
+void intel_ddi_pll_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val = I915_READ(LCPLL_CTL);
+
+ /* The LCPLL register should be turned on by the BIOS. For now let's
+ * just check its state and print errors in case something is wrong.
+ * Don't even try to turn it on.
*/
- I915_WRITE(DDI_BUF_CTL(port),
- temp);
+
+ DRM_DEBUG_KMS("CDCLK running at %dMHz\n",
+ intel_ddi_get_cdclk_freq(dev_priv));
+
+ if (val & LCPLL_CD_SOURCE_FCLK)
+ DRM_ERROR("CDCLK source is not LCPLL\n");
+
+ if (val & LCPLL_PLL_DISABLE)
+ DRM_ERROR("LCPLL is disabled\n");
+}
+
+void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ enum port port = intel_dig_port->port;
+ bool wait = false;
+ uint32_t val;
+
+ if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
+ val = I915_READ(DDI_BUF_CTL(port));
+ if (val & DDI_BUF_CTL_ENABLE) {
+ val &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), val);
+ wait = true;
+ }
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(port), val);
+ POSTING_READ(DP_TP_CTL(port));
+
+ if (wait)
+ intel_wait_ddi_buf_idle(dev_priv, port);
+ }
+
+ val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
+ DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ I915_WRITE(DP_TP_CTL(port), val);
+ POSTING_READ(DP_TP_CTL(port));
+
+ intel_dp->DP |= DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
+ POSTING_READ(DDI_BUF_CTL(port));
+
+ udelay(600);
+}
+
+void intel_ddi_fdi_disable(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ uint32_t val;
+
+ intel_ddi_post_disable(intel_encoder);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_RX_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, val);
+
+ val = I915_READ(_FDI_RXA_MISC);
+ val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+ I915_WRITE(_FDI_RXA_MISC, val);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_PCDCLK;
+ I915_WRITE(_FDI_RXA_CTL, val);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_RX_PLL_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, val);
+}
+
+static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
+ intel_dp_check_link_status(intel_dp);
+}
+
+static void intel_ddi_destroy(struct drm_encoder *encoder)
+{
+ /* HDMI has nothing special to destroy, so we can go with this. */
+ intel_dp_encoder_destroy(encoder);
+}
+
+static bool intel_ddi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ int type = intel_encoder->type;
+
+ WARN(type == INTEL_OUTPUT_UNKNOWN, "mode_fixup() on unknown output!\n");
+
+ if (type == INTEL_OUTPUT_HDMI)
+ return intel_hdmi_mode_fixup(encoder, mode, adjusted_mode);
+ else
+ return intel_dp_mode_fixup(encoder, mode, adjusted_mode);
+}
+
+static const struct drm_encoder_funcs intel_ddi_funcs = {
+ .destroy = intel_ddi_destroy,
+};
+
+static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
+ .mode_fixup = intel_ddi_mode_fixup,
+ .mode_set = intel_ddi_mode_set,
+ .disable = intel_encoder_noop,
+};
+
+void intel_ddi_init(struct drm_device *dev, enum port port)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *hdmi_connector = NULL;
+ struct intel_connector *dp_connector = NULL;
+
+ intel_dig_port = malloc(sizeof(struct intel_digital_port), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!intel_dig_port)
+ return;
+
+ dp_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!dp_connector) {
+ free(intel_dig_port, DRM_MEM_KMS);
+ return;
+ }
+
+ if (port != PORT_A) {
+ hdmi_connector = malloc(sizeof(struct intel_connector),
+ DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!hdmi_connector) {
+ free(dp_connector, DRM_MEM_KMS);
+ free(intel_dig_port, DRM_MEM_KMS);
+ return;
+ }
+ }
+
+ intel_encoder = &intel_dig_port->base;
+ encoder = &intel_encoder->base;
+
+ drm_encoder_init(dev, encoder, &intel_ddi_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
+
+ intel_encoder->enable = intel_enable_ddi;
+ intel_encoder->pre_enable = intel_ddi_pre_enable;
+ intel_encoder->disable = intel_disable_ddi;
+ intel_encoder->post_disable = intel_ddi_post_disable;
+ intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+
+ intel_dig_port->port = port;
+ intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
+ DDI_BUF_PORT_REVERSAL;
+ if (hdmi_connector)
+ intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
+ else
+ intel_dig_port->hdmi.sdvox_reg = 0;
+ intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
+
+ intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = false;
+ intel_encoder->hot_plug = intel_ddi_hot_plug;
+
+ if (hdmi_connector)
+ intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
+ intel_dp_init_connector(intel_dig_port, dp_connector);
}
diff --git a/sys/dev/drm2/i915/intel_display.c b/sys/dev/drm2/i915/intel_display.c
index 5af8c05..363cbf2 100644
--- a/sys/dev/drm2/i915/intel_display.c
+++ b/sys/dev/drm2/i915/intel_display.c
@@ -28,16 +28,12 @@
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
+#include <dev/drm2/drm_edid.h>
+#include <dev/drm2/i915/intel_drv.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
-#include <dev/drm2/i915/intel_drv.h>
-#include <dev/drm2/drm_edid.h>
#include <dev/drm2/drm_dp_helper.h>
#include <dev/drm2/drm_crtc_helper.h>
-#include <sys/limits.h>
-
-#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
static void intel_increase_pllclock(struct drm_crtc *crtc);
@@ -76,6 +72,16 @@ struct intel_limit {
/* FDI */
#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
+int
+intel_pch_rawclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!HAS_PCH_SPLIT(dev));
+
+ return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
+}
+
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
@@ -94,6 +100,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock);
+static bool
+intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
+
static inline u32 /* units of 100MHz */
intel_fdi_link_freq(struct drm_device *dev)
{
@@ -137,8 +148,8 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
.vco = { .min = 1400000, .max = 2800000 },
.n = { .min = 1, .max = 6 },
.m = { .min = 70, .max = 120 },
- .m1 = { .min = 10, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
.p = { .min = 5, .max = 80 },
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 200000,
@@ -355,11 +366,53 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
.find_pll = intel_find_pll_ironlake_dp,
};
+static const intel_limit_t intel_limits_vlv_dac = {
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 4000000, .max = 6000000 },
+ .n = { .min = 1, .max = 7 },
+ .m = { .min = 22, .max = 450 }, /* guess */
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 2, .max = 3 },
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 2, .p2_fast = 20 },
+ .find_pll = intel_vlv_find_best_pll,
+};
+
+static const intel_limit_t intel_limits_vlv_hdmi = {
+ .dot = { .min = 20000, .max = 165000 },
+ .vco = { .min = 4000000, .max = 5994000},
+ .n = { .min = 1, .max = 7 },
+ .m = { .min = 60, .max = 300 }, /* guess */
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 2, .max = 3 },
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 2, .p2_fast = 20 },
+ .find_pll = intel_vlv_find_best_pll,
+};
+
+static const intel_limit_t intel_limits_vlv_dp = {
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 4000000, .max = 6000000 },
+ .n = { .min = 1, .max = 7 },
+ .m = { .min = 22, .max = 450 },
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 2, .max = 3 },
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 2, .p2_fast = 20 },
+ .find_pll = intel_vlv_find_best_pll,
+};
+
u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
{
u32 val = 0;
- mtx_lock(&dev_priv->dpio_lock);
+ sx_xlock(&dev_priv->dpio_lock);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO idle wait timed out\n");
goto out_unlock;
@@ -375,16 +428,15 @@ u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
val = I915_READ(DPIO_DATA);
out_unlock:
- mtx_unlock(&dev_priv->dpio_lock);
+ sx_xunlock(&dev_priv->dpio_lock);
return val;
}
-#if 0
static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
u32 val)
{
- mtx_lock(&dev_priv->dpio_lock);
+ sx_xlock(&dev_priv->dpio_lock);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO idle wait timed out\n");
goto out_unlock;
@@ -398,9 +450,8 @@ static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
DRM_ERROR("DPIO write wait timed out\n");
out_unlock:
- mtx_unlock(&dev_priv->dpio_lock);
+ sx_xunlock(&dev_priv->dpio_lock);
}
-#endif
static void vlv_init_dpio(struct drm_device *dev)
{
@@ -452,7 +503,7 @@ static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
* register is uninitialized.
*/
val = I915_READ(reg);
- if (!(val & ~LVDS_DETECTED))
+ if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
val = dev_priv->bios_lvds_val;
dev_priv->lvds_val = val;
}
@@ -480,7 +531,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
limit = &intel_limits_ironlake_single_lvds;
}
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
- HAS_eDP)
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
limit = &intel_limits_ironlake_display_port;
else
limit = &intel_limits_ironlake_dac;
@@ -528,6 +579,13 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
+ } else if (IS_VALLEYVIEW(dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
+ limit = &intel_limits_vlv_dac;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+ limit = &intel_limits_vlv_hdmi;
+ else
+ limit = &intel_limits_vlv_dp;
} else if (!IS_GEN2(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
@@ -569,11 +627,10 @@ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock
bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
{
struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
- if (encoder->base.crtc == crtc && encoder->type == type)
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->type == type)
return true;
return false;
@@ -801,6 +858,83 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
memcpy(best_clock, &clock, sizeof(intel_clock_t));
return true;
}
+static bool
+intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+{
+ u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
+ u32 m, n, fastclk;
+ u32 updrate, minupdate, fracbits, p;
+ unsigned long bestppm, ppm, absppm;
+ int dotclk, flag;
+
+ flag = 0;
+ dotclk = target * 1000;
+ bestppm = 1000000;
+ ppm = absppm = 0;
+ fastclk = dotclk / (2*100);
+ updrate = 0;
+ minupdate = 19200;
+ fracbits = 1;
+ n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
+ bestm1 = bestm2 = bestp1 = bestp2 = 0;
+
+ /* based on hardware requirement, prefer smaller n to precision */
+ for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
+ updrate = refclk / n;
+ for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
+ for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
+ if (p2 > 10)
+ p2 = p2 - 1;
+ p = p1 * p2;
+ /* based on hardware requirement, prefer bigger m1,m2 values */
+ for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
+ m2 = (((2*(fastclk * p * n / m1 )) +
+ refclk) / (2*refclk));
+ m = m1 * m2;
+ vco = updrate * m;
+ if (vco >= limit->vco.min && vco < limit->vco.max) {
+ ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
+ absppm = (ppm > 0) ? ppm : (-ppm);
+ if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
+ bestppm = 0;
+ flag = 1;
+ }
+ if (absppm < bestppm - 10) {
+ bestppm = absppm;
+ flag = 1;
+ }
+ if (flag) {
+ bestn = n;
+ bestm1 = m1;
+ bestm2 = m2;
+ bestp1 = p1;
+ bestp2 = p2;
+ flag = 0;
+ }
+ }
+ }
+ }
+ }
+ }
+ best_clock->n = bestn;
+ best_clock->m1 = bestm1;
+ best_clock->m2 = bestm2;
+ best_clock->p1 = bestp1;
+ best_clock->p2 = bestp2;
+
+ return true;
+}
+
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ return intel_crtc->cpu_transcoder;
+}
static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
{
@@ -848,9 +982,9 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
/* Wait for vblank interrupt bit to set */
- if (_intel_wait_for(dev,
- I915_READ(pipestat_reg) & PIPE_VBLANK_INTERRUPT_STATUS,
- 50, 1, "915vbl"))
+ if (wait_for(I915_READ(pipestat_reg) &
+ PIPE_VBLANK_INTERRUPT_STATUS,
+ 50))
DRM_DEBUG_KMS("vblank wait timed out\n");
}
@@ -874,15 +1008,16 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
if (INTEL_INFO(dev)->gen >= 4) {
- int reg = PIPECONF(pipe);
+ int reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
- if (_intel_wait_for(dev,
- (I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 100,
- 1, "915pip"))
- DRM_DEBUG_KMS("pipe_off wait timed out\n");
+ if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
+ 100))
+ WARN(1, "pipe_off wait timed out\n");
} else {
u32 last_line, line_mask;
int reg = PIPEDSL(pipe);
@@ -896,11 +1031,11 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
/* Wait for the display line to settle */
do {
last_line = I915_READ(reg) & line_mask;
- DELAY(5000);
+ mdelay(5);
} while (((I915_READ(reg) & line_mask) != last_line) &&
time_after(timeout, jiffies));
if (time_after(jiffies, timeout))
- DRM_DEBUG_KMS("pipe_off wait timed out\n");
+ WARN(1, "pipe_off wait timed out\n");
}
}
@@ -920,19 +1055,19 @@ static void assert_pll(struct drm_i915_private *dev_priv,
reg = DPLL(pipe);
val = I915_READ(reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
- if (cur_state != state)
- printf("PLL state assertion failure (expected %s, current %s)\n",
- state_string(state), state_string(cur_state));
+ WARN(cur_state != state,
+ "PLL state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
}
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
/* For ILK+ */
static void assert_pch_pll(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc,
+ struct intel_pch_pll *pll,
+ struct intel_crtc *crtc,
bool state)
{
- int reg;
u32 val;
bool cur_state;
@@ -941,30 +1076,37 @@ static void assert_pch_pll(struct drm_i915_private *dev_priv,
return;
}
- if (!intel_crtc->pch_pll) {
- printf("asserting PCH PLL enabled with no PLL\n");
+ if (WARN (!pll,
+ "asserting PCH PLL %s with no PLL\n", state_string(state)))
return;
- }
- if (HAS_PCH_CPT(dev_priv->dev)) {
+ val = I915_READ(pll->pll_reg);
+ cur_state = !!(val & DPLL_VCO_ENABLE);
+ WARN(cur_state != state,
+ "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
+ pll->pll_reg, state_string(state), state_string(cur_state), val);
+
+ /* Make sure the selected PLL is correctly attached to the transcoder */
+ if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
u32 pch_dpll;
pch_dpll = I915_READ(PCH_DPLL_SEL);
-
- /* Make sure the selected PLL is enabled to the transcoder */
- KASSERT(((pch_dpll >> (4 * intel_crtc->pipe)) & 8) != 0,
- ("transcoder %d PLL not enabled\n", intel_crtc->pipe));
+ cur_state = pll->pll_reg == _PCH_DPLL_B;
+ if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
+ "PLL[%d] not attached to this transcoder %d: %08x\n",
+ cur_state, crtc->pipe, pch_dpll)) {
+ cur_state = !!(val >> (4*crtc->pipe + 3));
+ WARN(cur_state != state,
+ "PLL[%d] not %s on this transcoder %d: %08x\n",
+ pll->pll_reg == _PCH_DPLL_B,
+ state_string(state),
+ crtc->pipe,
+ val);
+ }
}
-
- reg = intel_crtc->pch_pll->pll_reg;
- val = I915_READ(reg);
- cur_state = !!(val & DPLL_VCO_ENABLE);
- if (cur_state != state)
- printf("PCH PLL state assertion failure (expected %s, current %s)\n",
- state_string(state), state_string(cur_state));
}
-#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
-#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
+#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
+#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
@@ -972,10 +1114,12 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
int reg;
u32 val;
bool cur_state;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
if (IS_HASWELL(dev_priv->dev)) {
/* On Haswell, DDI is used instead of FDI_TX_CTL */
- reg = TRANS_DDI_FUNC_CTL(pipe);
+ reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
val = I915_READ(reg);
cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
} else {
@@ -983,9 +1127,9 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
val = I915_READ(reg);
cur_state = !!(val & FDI_TX_ENABLE);
}
- if (cur_state != state)
- printf("FDI TX state assertion failure (expected %s, current %s)\n",
- state_string(state), state_string(cur_state));
+ WARN(cur_state != state,
+ "FDI TX state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
}
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
@@ -997,17 +1141,12 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
- DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
- return;
- } else {
- reg = FDI_RX_CTL(pipe);
- val = I915_READ(reg);
- cur_state = !!(val & FDI_RX_ENABLE);
- }
- if (cur_state != state)
- printf("FDI RX state assertion failure (expected %s, current %s)\n",
- state_string(state), state_string(cur_state));
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_RX_ENABLE);
+ WARN(cur_state != state,
+ "FDI RX state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
}
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
@@ -1028,8 +1167,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
- if (!(val & FDI_TX_PLL_ENABLE))
- printf("FDI TX PLL assertion failure, should be active but is disabled\n");
+ WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
}
static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
@@ -1038,14 +1176,9 @@ static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
int reg;
u32 val;
- if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
- DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
- return;
- }
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
- if (!(val & FDI_RX_PLL_ENABLE))
- printf("FDI RX PLL assertion failure, should be active but is disabled\n");
+ WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
}
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
@@ -1072,8 +1205,8 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
panel_pipe = PIPE_B;
- if (panel_pipe == pipe && locked)
- printf("panel assertion failure, pipe %c regs locked\n",
+ WARN(panel_pipe == pipe && locked,
+ "panel assertion failure, pipe %c regs locked\n",
pipe_name(pipe));
}
@@ -1083,17 +1216,19 @@ void assert_pipe(struct drm_i915_private *dev_priv,
int reg;
u32 val;
bool cur_state;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
/* if we need the pipe A quirk it must be always on */
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
state = true;
- reg = PIPECONF(pipe);
+ reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
cur_state = !!(val & PIPECONF_ENABLE);
- if (cur_state != state)
- printf("pipe %c assertion failure (expected %s, current %s)\n",
- pipe_name(pipe), state_string(state), state_string(cur_state));
+ WARN(cur_state != state,
+ "pipe %c assertion failure (expected %s, current %s)\n",
+ pipe_name(pipe), state_string(state), state_string(cur_state));
}
static void assert_plane(struct drm_i915_private *dev_priv,
@@ -1106,9 +1241,9 @@ static void assert_plane(struct drm_i915_private *dev_priv,
reg = DSPCNTR(plane);
val = I915_READ(reg);
cur_state = !!(val & DISPLAY_PLANE_ENABLE);
- if (cur_state != state)
- printf("plane %c assertion failure, (expected %s, current %s)\n",
- plane_name(plane), state_string(state), state_string(cur_state));
+ WARN(cur_state != state,
+ "plane %c assertion failure (expected %s, current %s)\n",
+ plane_name(plane), state_string(state), state_string(cur_state));
}
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
@@ -1125,9 +1260,9 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
if (HAS_PCH_SPLIT(dev_priv->dev)) {
reg = DSPCNTR(pipe);
val = I915_READ(reg);
- if ((val & DISPLAY_PLANE_ENABLE) != 0)
- printf("plane %c assertion failure, should be disabled but not\n",
- plane_name(pipe));
+ WARN((val & DISPLAY_PLANE_ENABLE),
+ "plane %c assertion failure, should be disabled but not\n",
+ plane_name(pipe));
return;
}
@@ -1137,8 +1272,8 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
val = I915_READ(reg);
cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
DISPPLANE_SEL_PIPE_SHIFT;
- if ((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe)
- printf("plane %c assertion failure, should be off on pipe %c but is still active\n",
+ WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
+ "plane %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(i), pipe_name(pipe));
}
}
@@ -1156,8 +1291,7 @@ static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
val = I915_READ(PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
DREF_SUPERSPREAD_SOURCE_MASK));
- if (!enabled)
- printf("PCH refclk assertion failure, should be active but is disabled\n");
+ WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
}
static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
@@ -1170,8 +1304,8 @@ static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
reg = TRANSCONF(pipe);
val = I915_READ(reg);
enabled = !!(val & TRANS_ENABLE);
- if (enabled)
- printf("transcoder assertion failed, should be off on pipe %c but is still active\n",
+ WARN(enabled,
+ "transcoder assertion failed, should be off on pipe %c but is still active\n",
pipe_name(pipe));
}
@@ -1244,18 +1378,26 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg, u32 port_sel)
{
u32 val = I915_READ(reg);
- if (dp_pipe_enabled(dev_priv, pipe, port_sel, val))
- printf("PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
+ WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
+ "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
+
+ WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
+ && (val & DP_PIPEB_SELECT),
+ "IBX PCH dp port still using transcoder B\n");
}
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
- if (hdmi_pipe_enabled(dev_priv, val, pipe))
- printf("PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
+ WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
+ "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
+
+ WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0
+ && (val & SDVO_PIPE_B_SELECT),
+ "IBX PCH hdmi port still using transcoder B\n");
}
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
@@ -1270,14 +1412,14 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
reg = PCH_ADPA;
val = I915_READ(reg);
- if (adpa_pipe_enabled(dev_priv, val, pipe))
- printf("PCH VGA enabled on transcoder %c, should be disabled\n",
+ WARN(adpa_pipe_enabled(dev_priv, pipe, val),
+ "PCH VGA enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
reg = PCH_LVDS;
val = I915_READ(reg);
- if (lvds_pipe_enabled(dev_priv, val, pipe))
- printf("PCH LVDS enabled on transcoder %c, should be disabled\n",
+ WARN(lvds_pipe_enabled(dev_priv, pipe, val),
+ "PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
@@ -1295,6 +1437,8 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
* protect mechanism may be enabled.
*
* Note! This is for pre-ILK only.
+ *
+ * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
*/
static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
@@ -1302,7 +1446,7 @@ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 val;
/* No really, not for ILK+ */
- KASSERT(dev_priv->info->gen < 5, ("Wrong device gen"));
+ BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
@@ -1315,13 +1459,13 @@ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
/* We do this three times for luck */
I915_WRITE(reg, val);
POSTING_READ(reg);
- DELAY(150); /* wait for warmup */
+ udelay(150); /* wait for warmup */
I915_WRITE(reg, val);
POSTING_READ(reg);
- DELAY(150); /* wait for warmup */
+ udelay(150); /* wait for warmup */
I915_WRITE(reg, val);
POSTING_READ(reg);
- DELAY(150); /* wait for warmup */
+ udelay(150); /* wait for warmup */
}
/**
@@ -1354,48 +1498,57 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
/* SBI access */
static void
-intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
+intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
+ enum intel_sbi_destination destination)
{
+ u32 tmp;
- mtx_lock(&dev_priv->dpio_lock);
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0, 100)) {
+ sx_xlock(&dev_priv->dpio_lock);
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
goto out_unlock;
}
I915_WRITE(SBI_ADDR, (reg << 16));
I915_WRITE(SBI_DATA, value);
- I915_WRITE(SBI_CTL_STAT,
- SBI_BUSY |
- SBI_CTL_OP_CRWR);
- if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
+ if (destination == SBI_ICLK)
+ tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
+ else
+ tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
+ I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
goto out_unlock;
}
out_unlock:
- mtx_unlock(&dev_priv->dpio_lock);
+ sx_xunlock(&dev_priv->dpio_lock);
}
static u32
-intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
+intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
+ enum intel_sbi_destination destination)
{
u32 value = 0;
- mtx_lock(&dev_priv->dpio_lock);
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0, 100)) {
+ sx_xlock(&dev_priv->dpio_lock);
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
goto out_unlock;
}
I915_WRITE(SBI_ADDR, (reg << 16));
- I915_WRITE(SBI_CTL_STAT,
- SBI_BUSY |
- SBI_CTL_OP_CRRD);
- if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
+ if (destination == SBI_ICLK)
+ value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+ else
+ value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+ I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
goto out_unlock;
@@ -1404,19 +1557,19 @@ intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
value = I915_READ(SBI_DATA);
out_unlock:
- mtx_unlock(&dev_priv->dpio_lock);
+ sx_xunlock(&dev_priv->dpio_lock);
return value;
}
/**
- * intel_enable_pch_pll - enable PCH PLL
+ * ironlake_enable_pch_pll - enable PCH PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to enable
*
* The PCH PLL needs to be enabled before the PCH transcoder, since it
* drives the transcoder clock.
*/
-static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
+static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
struct intel_pch_pll *pll;
@@ -1424,15 +1577,13 @@ static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
u32 val;
/* PCH PLLs only available on ILK, SNB and IVB */
- KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
+ BUG_ON(dev_priv->info->gen < 5);
pll = intel_crtc->pch_pll;
if (pll == NULL)
return;
- if (pll->refcount == 0) {
- DRM_DEBUG_KMS("pll->refcount == 0\n");
+ if (WARN_ON(pll->refcount == 0))
return;
- }
DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
pll->pll_reg, pll->active, pll->on,
@@ -1442,7 +1593,7 @@ static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
assert_pch_refclk_enabled(dev_priv);
if (pll->active++ && pll->on) {
- assert_pch_pll_enabled(dev_priv, intel_crtc);
+ assert_pch_pll_enabled(dev_priv, pll, NULL);
return;
}
@@ -1453,7 +1604,7 @@ static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
val |= DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
- DELAY(200);
+ udelay(200);
pll->on = true;
}
@@ -1466,27 +1617,24 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
u32 val;
/* PCH only available on ILK+ */
- KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
+ BUG_ON(dev_priv->info->gen < 5);
if (pll == NULL)
- return;
+ return;
- if (pll->refcount == 0) {
- DRM_DEBUG_KMS("pll->refcount == 0\n");
+ if (WARN_ON(pll->refcount == 0))
return;
- }
DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
pll->pll_reg, pll->active, pll->on,
intel_crtc->base.base.id);
- if (pll->active == 0) {
- DRM_DEBUG_KMS("pll->active == 0\n");
- assert_pch_pll_disabled(dev_priv, intel_crtc);
+ if (WARN_ON(pll->active == 0)) {
+ assert_pch_pll_disabled(dev_priv, pll, NULL);
return;
}
if (--pll->active) {
- assert_pch_pll_enabled(dev_priv, intel_crtc);
+ assert_pch_pll_enabled(dev_priv, pll, NULL);
return;
}
@@ -1500,32 +1648,37 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
val &= ~DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
- DELAY(200);
+ udelay(200);
pll->on = false;
}
-static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
+static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- int reg;
- u32 val, pipeconf_val;
+ struct drm_device *dev = dev_priv->dev;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ uint32_t reg, val, pipeconf_val;
/* PCH only available on ILK+ */
- KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
+ BUG_ON(dev_priv->info->gen < 5);
/* Make sure PCH DPLL is enabled */
assert_pch_pll_enabled(dev_priv,
+ to_intel_crtc(crtc)->pch_pll,
to_intel_crtc(crtc));
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
assert_fdi_rx_enabled(dev_priv, pipe);
- if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
- DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
- return;
+ if (HAS_PCH_CPT(dev)) {
+ /* Workaround: Set the timing override bit before enabling the
+ * pch transcoder. */
+ reg = TRANS_CHICKEN2(pipe);
+ val = I915_READ(reg);
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(reg, val);
}
reg = TRANSCONF(pipe);
@@ -1552,16 +1705,46 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
val |= TRANS_PROGRESSIVE;
I915_WRITE(reg, val | TRANS_ENABLE);
- if (_intel_wait_for(dev_priv->dev, I915_READ(reg) & TRANS_STATE_ENABLE,
- 100, 1, "915trc"))
+ if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
}
-static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
{
- int reg;
- u32 val;
+ u32 val, pipeconf_val;
+
+ /* PCH only available on ILK+ */
+ BUG_ON(dev_priv->info->gen < 5);
+
+ /* FDI must be feeding us bits for PCH ports */
+ assert_fdi_tx_enabled(dev_priv, (enum pipe)cpu_transcoder);
+ assert_fdi_rx_enabled(dev_priv, (enum pipe)TRANSCODER_A);
+
+ /* Workaround: set timing override bit. */
+ val = I915_READ(_TRANSA_CHICKEN2);
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(_TRANSA_CHICKEN2, val);
+
+ val = TRANS_ENABLE;
+ pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
+
+ if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
+ PIPECONF_INTERLACED_ILK)
+ val |= TRANS_INTERLACED;
+ else
+ val |= TRANS_PROGRESSIVE;
+
+ I915_WRITE(TRANSCONF(TRANSCODER_A), val);
+ if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100))
+ DRM_ERROR("Failed to enable PCH transcoder\n");
+}
+
+static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_device *dev = dev_priv->dev;
+ uint32_t reg, val;
/* FDI relies on the transcoder */
assert_fdi_tx_disabled(dev_priv, pipe);
@@ -1575,10 +1758,33 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
val &= ~TRANS_ENABLE;
I915_WRITE(reg, val);
/* wait for PCH transcoder off, transcoder state */
- if (_intel_wait_for(dev_priv->dev,
- (I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50,
- 1, "915trd"))
+ if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
DRM_ERROR("failed to disable transcoder %d\n", pipe);
+
+ if (!HAS_PCH_IBX(dev)) {
+ /* Workaround: Clear the timing override chicken bit again. */
+ reg = TRANS_CHICKEN2(pipe);
+ val = I915_READ(reg);
+ val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(reg, val);
+ }
+}
+
+static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = I915_READ(_TRANSACONF);
+ val &= ~TRANS_ENABLE;
+ I915_WRITE(_TRANSACONF, val);
+ /* wait for PCH transcoder off, transcoder state */
+ if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50))
+ DRM_ERROR("Failed to disable PCH transcoder\n");
+
+ /* Workaround: clear timing override bit. */
+ val = I915_READ(_TRANSA_CHICKEN2);
+ val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(_TRANSA_CHICKEN2, val);
}
/**
@@ -1598,9 +1804,17 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
bool pch_port)
{
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+ enum transcoder pch_transcoder;
int reg;
u32 val;
+ if (IS_HASWELL(dev_priv->dev))
+ pch_transcoder = TRANSCODER_A;
+ else
+ pch_transcoder = (enum transcoder)pipe;
+
/*
* A pipe without a PLL won't actually be able to drive bits from
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
@@ -1611,13 +1825,13 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
else {
if (pch_port) {
/* if driving the PCH, we need FDI enabled */
- assert_fdi_rx_pll_enabled(dev_priv, pipe);
- assert_fdi_tx_pll_enabled(dev_priv, pipe);
+ assert_fdi_rx_pll_enabled(dev_priv, (enum pipe)pch_transcoder);
+ assert_fdi_tx_pll_enabled(dev_priv, (enum pipe)cpu_transcoder);
}
/* FIXME: assert CPU port conditions for SNB+ */
}
- reg = PIPECONF(pipe);
+ reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if (val & PIPECONF_ENABLE)
return;
@@ -1641,6 +1855,8 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
int reg;
u32 val;
@@ -1654,7 +1870,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
return;
- reg = PIPECONF(pipe);
+ reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if ((val & PIPECONF_ENABLE) == 0)
return;
@@ -1670,8 +1886,10 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane)
{
- I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
- I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
+ if (dev_priv->info->gen >= 4)
+ I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
+ else
+ I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
}
/**
@@ -1725,59 +1943,6 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv,
intel_wait_for_vblank(dev_priv->dev, pipe);
}
-static void disable_pch_dp(struct drm_i915_private *dev_priv,
- enum pipe pipe, int reg, u32 port_sel)
-{
- u32 val = I915_READ(reg);
- if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
- DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
- I915_WRITE(reg, val & ~DP_PORT_EN);
- }
-}
-
-static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
- enum pipe pipe, int reg)
-{
- u32 val = I915_READ(reg);
- if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
- DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
- reg, pipe);
- I915_WRITE(reg, val & ~PORT_ENABLE);
- }
-}
-
-/* Disable any ports connected to this transcoder */
-static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- u32 reg, val;
-
- val = I915_READ(PCH_PP_CONTROL);
- I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
-
- disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
- disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
- disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
-
- reg = PCH_ADPA;
- val = I915_READ(reg);
- if (adpa_pipe_enabled(dev_priv, val, pipe))
- I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
-
- reg = PCH_LVDS;
- val = I915_READ(reg);
- if (lvds_pipe_enabled(dev_priv, val, pipe)) {
- DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
- I915_WRITE(reg, val & ~LVDS_PORT_EN);
- POSTING_READ(reg);
- DELAY(100);
- }
-
- disable_pch_hdmi(dev_priv, pipe, HDMIB);
- disable_pch_hdmi(dev_priv, pipe, HDMIC);
- disable_pch_hdmi(dev_priv, pipe, HDMID);
-}
-
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -1787,7 +1952,6 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
u32 alignment;
int ret;
- alignment = 0; /* shut gcc */
switch (obj->tiling_mode) {
case I915_TILING_NONE:
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
@@ -1806,7 +1970,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
DRM_ERROR("Y tiled not allowed for scan out buffers\n");
return -EINVAL;
default:
- KASSERT(0, ("Wrong tiling for fb obj"));
+ BUG();
}
dev_priv->mm.interruptible = false;
@@ -1829,7 +1993,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
return 0;
err_unpin:
- i915_gem_object_unpin_from_display_plane(obj);
+ i915_gem_object_unpin(obj);
err_interruptible:
dev_priv->mm.interruptible = true;
return ret;
@@ -1838,7 +2002,34 @@ err_interruptible:
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
{
i915_gem_object_unpin_fence(obj);
- i915_gem_object_unpin_from_display_plane(obj);
+ i915_gem_object_unpin(obj);
+}
+
+/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
+ * is assumed to be a power-of-two. */
+unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+ unsigned int tiling_mode,
+ unsigned int cpp,
+ unsigned int pitch)
+{
+ if (tiling_mode != I915_TILING_NONE) {
+ unsigned int tile_rows, tiles;
+
+ tile_rows = *y / 8;
+ *y %= 8;
+
+ tiles = *x / (512/cpp);
+ *x %= 512/cpp;
+
+ return tile_rows * pitch * 8 + tiles * 4096;
+ } else {
+ unsigned int offset;
+
+ offset = *y * pitch + *x * cpp;
+ *y = 0;
+ *x = (offset & 4095) / cpp;
+ return offset & -4096;
+ }
}
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -1850,7 +2041,7 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
- unsigned long Start, Offset;
+ unsigned long linear_offset;
u32 dspcntr;
u32 reg;
@@ -1870,22 +2061,35 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (fb->bits_per_pixel) {
- case 8:
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
break;
- case 16:
- if (fb->depth == 15)
- dspcntr |= DISPPLANE_BGRX555;
- else
- dspcntr |= DISPPLANE_BGRX565;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ dspcntr |= DISPPLANE_BGRX555;
break;
- case 24:
- case 32:
+ case DRM_FORMAT_RGB565:
+ dspcntr |= DISPPLANE_BGRX565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
dspcntr |= DISPPLANE_BGRX888;
break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBX888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRX101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBX101010;
+ break;
default:
- DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
+ DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
return -EINVAL;
}
@@ -1898,18 +2102,28 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
I915_WRITE(reg, dspcntr);
- Start = obj->gtt_offset;
- Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ intel_crtc->dspaddr_offset =
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
+ linear_offset -= intel_crtc->dspaddr_offset;
+ } else {
+ intel_crtc->dspaddr_offset = linear_offset;
+ }
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- Start, Offset, x, y, fb->pitches[0]);
+ DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
+ obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
+ I915_MODIFY_DISPBASE(DSPSURF(plane),
+ obj->gtt_offset + intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
- I915_WRITE(DSPADDR(plane), Offset);
+ I915_WRITE(DSPLINOFF(plane), linear_offset);
} else
- I915_WRITE(DSPADDR(plane), Start + Offset);
+ I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
POSTING_READ(reg);
return 0;
@@ -1924,7 +2138,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
- unsigned long Start, Offset;
+ unsigned long linear_offset;
u32 dspcntr;
u32 reg;
@@ -1945,32 +2159,31 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (fb->bits_per_pixel) {
- case 8:
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
break;
- case 16:
- if (fb->depth != 16) {
- DRM_ERROR("bpp 16, depth %d\n", fb->depth);
- return -EINVAL;
- }
-
+ case DRM_FORMAT_RGB565:
dspcntr |= DISPPLANE_BGRX565;
break;
- case 24:
- case 32:
- if (fb->depth == 24)
- dspcntr |= DISPPLANE_BGRX888;
- else if (fb->depth == 30)
- dspcntr |= DISPPLANE_BGRX101010;
- else {
- DRM_ERROR("bpp %d depth %d\n", fb->bits_per_pixel,
- fb->depth);
- return -EINVAL;
- }
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISPPLANE_BGRX888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBX888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRX101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBX101010;
break;
default:
- DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
+ DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
return -EINVAL;
}
@@ -1984,15 +2197,24 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
I915_WRITE(reg, dspcntr);
- Start = obj->gtt_offset;
- Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ intel_crtc->dspaddr_offset =
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
+ linear_offset -= intel_crtc->dspaddr_offset;
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- Start, Offset, x, y, fb->pitches[0]);
+ DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
+ obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
- I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
- I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
- I915_WRITE(DSPADDR(plane), Offset);
+ I915_MODIFY_DISPBASE(DSPSURF(plane),
+ obj->gtt_offset + intel_crtc->dspaddr_offset);
+ if (IS_HASWELL(dev)) {
+ I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
+ } else {
+ I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE(DSPLINOFF(plane), linear_offset);
+ }
POSTING_READ(reg);
return 0;
@@ -2018,14 +2240,14 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
{
struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
bool was_interruptible = dev_priv->mm.interruptible;
int ret;
mtx_lock(&dev->event_lock);
- while (!atomic_load_acq_int(&dev_priv->mm.wedged) &&
- atomic_load_acq_int(&obj->pending_flip) != 0) {
- msleep(&obj->pending_flip, &dev->event_lock,
+ while (!(atomic_read(&dev_priv->mm.wedged) ||
+ atomic_read(&obj->pending_flip) == 0)) {
+ msleep(&dev_priv->pending_flip_queue, &dev->event_lock,
0, "915flp", 0);
}
mtx_unlock(&dev->event_lock);
@@ -2045,18 +2267,45 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
return ret;
}
+static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_master_private *master_priv;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (!dev->primary->master)
+ return;
+
+ master_priv = dev->primary->master->driver_priv;
+ if (!master_priv->sarea_priv)
+ return;
+
+ switch (intel_crtc->pipe) {
+ case 0:
+ master_priv->sarea_priv->pipeA_x = x;
+ master_priv->sarea_priv->pipeA_y = y;
+ break;
+ case 1:
+ master_priv->sarea_priv->pipeB_x = x;
+ master_priv->sarea_priv->pipeB_y = y;
+ break;
+ default:
+ break;
+ }
+}
+
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+ struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_framebuffer *old_fb;
int ret;
/* no fb bound */
- if (!crtc->fb) {
+ if (!fb) {
DRM_ERROR("No FB bound\n");
return 0;
}
@@ -2070,7 +2319,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
DRM_LOCK(dev);
ret = intel_pin_and_fence_fb_obj(dev,
- to_intel_framebuffer(crtc->fb)->obj,
+ to_intel_framebuffer(fb)->obj,
NULL);
if (ret != 0) {
DRM_UNLOCK(dev);
@@ -2078,17 +2327,22 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- if (old_fb)
- intel_finish_fb(old_fb);
+ if (crtc->fb)
+ intel_finish_fb(crtc->fb);
- ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y);
+ ret = dev_priv->display.update_plane(crtc, fb, x, y);
if (ret) {
- intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
+ intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
DRM_UNLOCK(dev);
DRM_ERROR("failed to update base address\n");
return ret;
}
+ old_fb = crtc->fb;
+ crtc->fb = fb;
+ crtc->x = x;
+ crtc->y = y;
+
if (old_fb) {
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
@@ -2097,20 +2351,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
intel_update_fbc(dev);
DRM_UNLOCK(dev);
- if (!dev->primary->master)
- return 0;
-
- master_priv = dev->primary->master->driver_priv;
- if (!master_priv->sarea_priv)
- return 0;
-
- if (intel_crtc->pipe) {
- master_priv->sarea_priv->pipeB_x = x;
- master_priv->sarea_priv->pipeB_y = y;
- } else {
- master_priv->sarea_priv->pipeA_x = x;
- master_priv->sarea_priv->pipeA_y = y;
- }
+ intel_crtc_update_sarea_pos(crtc, x, y);
return 0;
}
@@ -2149,7 +2390,7 @@ static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
I915_WRITE(DP_A, dpa_ctl);
POSTING_READ(DP_A);
- DELAY(500);
+ udelay(500);
}
static void intel_fdi_normal_train(struct drm_crtc *crtc)
@@ -2185,7 +2426,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
/* wait one idle pattern time */
POSTING_READ(reg);
- DELAY(1000);
+ udelay(1000);
/* IVB wants error correction enabled */
if (IS_IVYBRIDGE(dev))
@@ -2193,16 +2434,27 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
FDI_FE_ERRC_ENABLE);
}
-static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
+static void ivb_modeset_global_resources(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 flags = I915_READ(SOUTH_CHICKEN1);
+ struct intel_crtc *pipe_B_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+ struct intel_crtc *pipe_C_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
+ uint32_t temp;
- flags |= FDI_PHASE_SYNC_OVR(pipe);
- I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
- flags |= FDI_PHASE_SYNC_EN(pipe);
- I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
- POSTING_READ(SOUTH_CHICKEN1);
+ /* When everything is off disable fdi C so that we could enable fdi B
+ * with all lanes. XXX: This misses the case where a pipe is not using
+ * any pch resources and so doesn't need any fdi lanes. */
+ if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) {
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+ temp = I915_READ(SOUTH_CHICKEN1);
+ temp &= ~FDI_BC_BIFURCATION_SELECT;
+ DRM_DEBUG_KMS("disabling fdi C rx\n");
+ I915_WRITE(SOUTH_CHICKEN1, temp);
+ }
}
/* The FDI link training functions for ILK/Ibexpeak. */
@@ -2227,7 +2479,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
temp &= ~FDI_RX_BIT_LOCK;
I915_WRITE(reg, temp);
I915_READ(reg);
- DELAY(150);
+ udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
@@ -2245,14 +2497,12 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(reg, temp | FDI_RX_ENABLE);
POSTING_READ(reg);
- DELAY(150);
+ udelay(150);
/* Ironlake workaround, enable clock pointer after FDI enable*/
- if (HAS_PCH_IBX(dev)) {
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
- FDI_RX_PHASE_SYNC_POINTER_EN);
- }
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+ FDI_RX_PHASE_SYNC_POINTER_EN);
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
@@ -2282,7 +2532,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(reg, temp);
POSTING_READ(reg);
- DELAY(150);
+ udelay(150);
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
@@ -2327,7 +2577,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(reg, temp);
POSTING_READ(reg);
- DELAY(150);
+ udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
@@ -2341,6 +2591,9 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
+ I915_WRITE(FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
@@ -2353,10 +2606,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(reg, temp | FDI_RX_ENABLE);
POSTING_READ(reg);
- DELAY(150);
-
- if (HAS_PCH_CPT(dev))
- cpt_phase_pointer_enable(dev, pipe);
+ udelay(150);
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
@@ -2366,19 +2616,18 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(reg, temp);
POSTING_READ(reg);
- DELAY(500);
+ udelay(500);
for (retry = 0; retry < 5; retry++) {
reg = FDI_RX_IIR(pipe);
temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
if (temp & FDI_RX_BIT_LOCK) {
I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
DRM_DEBUG_KMS("FDI train 1 done.\n");
break;
}
- DELAY(50);
+ udelay(50);
}
if (retry < 5)
break;
@@ -2410,7 +2659,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(reg, temp);
POSTING_READ(reg);
- DELAY(150);
+ udelay(150);
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
@@ -2420,19 +2669,18 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(reg, temp);
POSTING_READ(reg);
- DELAY(500);
+ udelay(500);
for (retry = 0; retry < 5; retry++) {
reg = FDI_RX_IIR(pipe);
temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
if (temp & FDI_RX_SYMBOL_LOCK) {
I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
DRM_DEBUG_KMS("FDI train 2 done.\n");
break;
}
- DELAY(50);
+ udelay(50);
}
if (retry < 5)
break;
@@ -2461,7 +2709,10 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(reg, temp);
POSTING_READ(reg);
- DELAY(150);
+ udelay(150);
+
+ DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
+ I915_READ(FDI_RX_IIR(pipe)));
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
@@ -2475,6 +2726,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
temp |= FDI_COMPOSITE_SYNC;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
+ I915_WRITE(FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_AUTO;
@@ -2484,7 +2738,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(reg, temp | FDI_RX_ENABLE);
POSTING_READ(reg);
- DELAY(150);
+ udelay(150);
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
@@ -2494,7 +2748,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(reg, temp);
POSTING_READ(reg);
- DELAY(500);
+ udelay(500);
reg = FDI_RX_IIR(pipe);
temp = I915_READ(reg);
@@ -2503,7 +2757,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
if (temp & FDI_RX_BIT_LOCK ||
(I915_READ(reg) & FDI_RX_BIT_LOCK)) {
I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("FDI train 1 done.\n");
+ DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
break;
}
}
@@ -2526,7 +2780,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(reg, temp);
POSTING_READ(reg);
- DELAY(150);
+ udelay(150);
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
@@ -2536,7 +2790,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(reg, temp);
POSTING_READ(reg);
- DELAY(500);
+ udelay(500);
reg = FDI_RX_IIR(pipe);
temp = I915_READ(reg);
@@ -2544,7 +2798,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
if (temp & FDI_RX_SYMBOL_LOCK) {
I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("FDI train 2 done.\n");
+ DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
break;
}
}
@@ -2554,17 +2808,13 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
DRM_DEBUG_KMS("FDI train done.\n");
}
-static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
+static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp;
- /* Write the TU size bits so error detection works */
- I915_WRITE(FDI_RX_TUSIZE1(pipe),
- I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
reg = FDI_RX_CTL(pipe);
@@ -2575,14 +2825,14 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
POSTING_READ(reg);
- DELAY(200);
+ udelay(200);
/* Switch from Rawclk to PCDclk */
temp = I915_READ(reg);
I915_WRITE(reg, temp | FDI_PCDCLK);
POSTING_READ(reg);
- DELAY(200);
+ udelay(200);
/* On Haswell, the PLL configuration for ports and pipes is handled
* separately, as part of DDI setup */
@@ -2594,21 +2844,38 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
POSTING_READ(reg);
- DELAY(100);
+ udelay(100);
}
- }
+ }
}
-static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
+static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
{
+ struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 flags = I915_READ(SOUTH_CHICKEN1);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
- flags &= ~(FDI_PHASE_SYNC_EN(pipe));
- I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
- flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
- I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
- POSTING_READ(SOUTH_CHICKEN1);
+ /* Switch from PCDclk to Rawclk */
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_PCDCLK);
+
+ /* Disable CPU FDI TX PLL */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(100);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
+
+ /* Wait for the clocks to turn off. */
+ POSTING_READ(reg);
+ udelay(100);
}
static void ironlake_fdi_disable(struct drm_crtc *crtc)
@@ -2632,16 +2899,11 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
POSTING_READ(reg);
- DELAY(100);
+ udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
if (HAS_PCH_IBX(dev)) {
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
- I915_WRITE(FDI_RX_CHICKEN(pipe),
- I915_READ(FDI_RX_CHICKEN(pipe) &
- ~FDI_RX_PHASE_SYNC_POINTER_EN));
- } else if (HAS_PCH_CPT(dev)) {
- cpt_phase_pointer_disable(dev, pipe);
}
/* still set train pattern 1 */
@@ -2666,55 +2928,60 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
I915_WRITE(reg, temp);
POSTING_READ(reg);
- DELAY(100);
+ udelay(100);
+}
+
+static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool pending;
+
+ if (atomic_read(&dev_priv->mm.wedged))
+ return false;
+
+ /*
+ * NOTE Linux<->FreeBSD dev->event_lock is already locked in
+ * intel_crtc_wait_for_pending_flips().
+ */
+ pending = to_intel_crtc(crtc)->unpin_work != NULL;
+
+ return pending;
}
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (crtc->fb == NULL)
return;
+ mtx_lock(&dev->event_lock);
+ while (intel_crtc_has_pending_flip(crtc)) {
+ msleep(&dev_priv->pending_flip_queue, &dev->event_lock,
+ 0, "915flp", 0);
+ }
+ mtx_unlock(&dev->event_lock);
+
DRM_LOCK(dev);
intel_finish_fb(crtc->fb);
DRM_UNLOCK(dev);
}
-static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct intel_encoder *encoder;
+ struct intel_encoder *intel_encoder;
/*
* If there's a non-PCH eDP on this crtc, it must be DP_A, and that
* must be driven by its own crtc; no sharing is possible.
*/
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
- if (encoder->base.crtc != crtc)
- continue;
-
- /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
- * CPU handles all others */
- if (IS_HASWELL(dev)) {
- /* It is still unclear how this will work on PPT, so throw up a warning */
- if (!HAS_PCH_LPT(dev))
- DRM_DEBUG_KMS("Haswell: PPT\n");
-
- if (encoder->type == DRM_MODE_ENCODER_DAC) {
- DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
- return true;
- } else {
- DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
- encoder->type);
- return false;
- }
- }
-
- switch (encoder->type) {
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
case INTEL_OUTPUT_EDP:
- if (!intel_encoder_is_pch_edp(&encoder->base))
+ if (!intel_encoder_is_pch_edp(&intel_encoder->base))
return false;
continue;
}
@@ -2723,6 +2990,11 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
return true;
}
+static bool haswell_crtc_driving_pch(struct drm_crtc *crtc)
+{
+ return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG);
+}
+
/* Program iCLKIP clock to the desired frequency */
static void lpt_program_iclkip(struct drm_crtc *crtc)
{
@@ -2738,8 +3010,9 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
/* Disable SSCCTL */
intel_sbi_write(dev_priv, SBI_SSCCTL6,
- intel_sbi_read(dev_priv, SBI_SSCCTL6) |
- SBI_SSCCTL_DISABLE);
+ intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
+ SBI_SSCCTL_DISABLE,
+ SBI_ICLK);
/* 20MHz is a corner case which is out of range for the 7-bit divisor */
if (crtc->mode.clock == 20000) {
@@ -2767,12 +3040,10 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
}
/* This should not happen with any sane values */
- if ((SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
- ~SBI_SSCDIVINTPHASE_DIVSEL_MASK))
- DRM_DEBUG_KMS("DIVSEL_MASK");
- if ((SBI_SSCDIVINTPHASE_DIR(phasedir) &
- ~SBI_SSCDIVINTPHASE_INCVAL_MASK))
- DRM_DEBUG_KMS("INCVAL_MASK");
+ WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
+ ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
+ WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
+ ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
crtc->mode.clock,
@@ -2782,29 +3053,28 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
phaseinc);
/* Program SSCDIVINTPHASE6 */
- temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
+ temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
-
- intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp);
+ intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
/* Program SSCAUXDIV */
- temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
+ temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
- intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp);
+ intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
/* Enable modulator and associated divider */
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
+ temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
temp &= ~SBI_SSCCTL_DISABLE;
- intel_sbi_write(dev_priv, SBI_SSCCTL6, temp);
+ intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
/* Wait for initialization time */
- DELAY(24);
+ udelay(24);
I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
}
@@ -2827,15 +3097,24 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
assert_transcoder_disabled(dev_priv, pipe);
+ /* Write the TU size bits before fdi link training, so that error
+ * detection works. */
+ I915_WRITE(FDI_RX_TUSIZE1(pipe),
+ I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
- intel_enable_pch_pll(intel_crtc);
+ /* XXX: pch pll's can be enabled any time before we enable the PCH
+ * transcoder, and we actually should do this to not upset any PCH
+ * transcoder that already use the clock when we share it.
+ *
+ * Note that enable_pch_pll tries to do the right thing, but get_pch_pll
+ * unconditionally resets the pll - we need that to have the right LVDS
+ * enable sequence. */
+ ironlake_enable_pch_pll(intel_crtc);
- if (HAS_PCH_LPT(dev)) {
- DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
- lpt_program_iclkip(crtc);
- } else if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev)) {
u32 sel;
temp = I915_READ(PCH_DPLL_SEL);
@@ -2872,8 +3151,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
- if (!IS_HASWELL(dev))
- intel_fdi_normal_train(crtc);
+ intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
@@ -2905,15 +3183,37 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
temp |= TRANS_DP_PORT_SEL_D;
break;
default:
- DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
- temp |= TRANS_DP_PORT_SEL_B;
- break;
+ BUG();
}
I915_WRITE(reg, temp);
}
- intel_enable_transcoder(dev_priv, pipe);
+ ironlake_enable_pch_transcoder(dev_priv, pipe);
+}
+
+static void lpt_pch_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+ assert_transcoder_disabled(dev_priv, (enum pipe)TRANSCODER_A);
+
+ lpt_program_iclkip(crtc);
+
+ /* Set transcoder timing. */
+ I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder)));
+ I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
+ I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder)));
+
+ I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
+ I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
+ I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder)));
+ I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
+
+ lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
@@ -2924,7 +3224,7 @@ static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
return;
if (pll->refcount == 0) {
- printf("bad PCH PLL refcount\n");
+ WARN(1, "bad PCH PLL refcount\n");
return;
}
@@ -2974,7 +3274,7 @@ static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u3
}
/* Ok no matching timings, maybe there's a free one? */
- for (i = 0; i < dev_priv->num_pch_pll; i++) { /* XXXKIB: HACK */
+ for (i = 0; i < dev_priv->num_pch_pll; i++) {
pll = &dev_priv->pch_plls[i];
if (pll->refcount == 0) {
DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
@@ -2995,7 +3295,7 @@ prepare: /* separate function? */
/* Wait for the clocks to stabilize before rewriting the regs */
I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
POSTING_READ(pll->pll_reg);
- DELAY(150);
+ udelay(150);
I915_WRITE(pll->fp0_reg, fp);
I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
@@ -3006,18 +3306,13 @@ prepare: /* separate function? */
void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
+ int dslreg = PIPEDSL(pipe);
u32 temp;
temp = I915_READ(dslreg);
- DELAY(500);
- if (_intel_wait_for(dev, I915_READ(dslreg) != temp, 5, 1, "915cp1")) {
- /* Without this, mode sets may fail silently on FDI */
- I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
- DELAY(250);
- I915_WRITE(tc2reg, 0);
- if (_intel_wait_for(dev, I915_READ(dslreg) != temp, 5, 1,
- "915cp2"))
+ udelay(500);
+ if (wait_for(I915_READ(dslreg) != temp, 5)) {
+ if (wait_for(I915_READ(dslreg) != temp, 5))
DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
}
}
@@ -3027,11 +3322,14 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 temp;
bool is_pch_port;
+ WARN_ON(!crtc->enabled);
+
if (intel_crtc->active)
return;
@@ -3044,39 +3342,149 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
}
- is_pch_port = intel_crtc_driving_pch(crtc);
+ is_pch_port = ironlake_crtc_driving_pch(crtc);
if (is_pch_port) {
- ironlake_fdi_pll_enable(crtc);
+ /* Note: FDI PLL enabling _must_ be done before we enable the
+ * cpu pipes, hence this is separate from all the other fdi/pch
+ * enabling. */
+ ironlake_fdi_pll_enable(intel_crtc);
} else {
- ironlake_fdi_disable(crtc);
+ assert_fdi_tx_disabled(dev_priv, pipe);
+ assert_fdi_rx_disabled(dev_priv, pipe);
}
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
+
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
- (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
*/
- I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+ if (IS_IVYBRIDGE(dev))
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+ PF_PIPE_SEL_IVB(pipe));
+ else
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
}
+ /*
+ * On ILK+ LUT must be loaded before the pipe is running but with
+ * clocks enabled
+ */
+ intel_crtc_load_lut(crtc);
+
intel_enable_pipe(dev_priv, pipe, is_pch_port);
intel_enable_plane(dev_priv, plane, pipe);
if (is_pch_port)
ironlake_pch_enable(crtc);
+ DRM_LOCK(dev);
+ intel_update_fbc(dev);
+ DRM_UNLOCK(dev);
+
+ intel_crtc_update_cursor(crtc, true);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+
+ if (HAS_PCH_CPT(dev))
+ intel_cpt_verify_modeset(dev, intel_crtc->pipe);
+
+ /*
+ * There seems to be a race in PCH platform hw (at least on some
+ * outputs) where an enabled pipe still completes any pageflip right
+ * away (as if the pipe is off) instead of waiting for vblank. As soon
+ * as the first vblank happend, everything works as expected. Hence just
+ * wait for one vblank before returning to avoid strange things
+ * happening.
+ */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+}
+
+static void haswell_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ bool is_pch_port;
+
+ WARN_ON(!crtc->enabled);
+
+ if (intel_crtc->active)
+ return;
+
+ intel_crtc->active = true;
+ intel_update_watermarks(dev);
+
+ is_pch_port = haswell_crtc_driving_pch(crtc);
+
+ if (is_pch_port)
+ dev_priv->display.fdi_link_train(crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
+
+ intel_ddi_enable_pipe_clock(intel_crtc);
+
+ /* Enable panel fitting for eDP */
+ if (dev_priv->pch_pf_size &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+ PF_PIPE_SEL_IVB(pipe));
+ I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
+ I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
+ }
+
+ /*
+ * On ILK+ LUT must be loaded before the pipe is running but with
+ * clocks enabled
+ */
intel_crtc_load_lut(crtc);
+ intel_ddi_set_pipe_settings(crtc);
+ intel_ddi_enable_pipe_func(crtc);
+
+ intel_enable_pipe(dev_priv, pipe, is_pch_port);
+ intel_enable_plane(dev_priv, plane, pipe);
+
+ if (is_pch_port)
+ lpt_pch_enable(crtc);
+
DRM_LOCK(dev);
intel_update_fbc(dev);
DRM_UNLOCK(dev);
intel_crtc_update_cursor(crtc, true);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+
+ /*
+ * There seems to be a race in PCH platform hw (at least on some
+ * outputs) where an enabled pipe still completes any pageflip right
+ * away (as if the pipe is off) instead of waiting for vblank. As soon
+ * as the first vblank happend, everything works as expected. Hence just
+ * wait for one vblank before returning to avoid strange things
+ * happening.
+ */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
}
static void ironlake_crtc_disable(struct drm_crtc *crtc)
@@ -3084,13 +3492,18 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 reg, temp;
+
if (!intel_crtc->active)
return;
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
intel_crtc_wait_for_pending_flips(crtc);
drm_vblank_off(dev, pipe);
intel_crtc_update_cursor(crtc, false);
@@ -3106,16 +3519,13 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
- ironlake_fdi_disable(crtc);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
- /* This is a horrible layering violation; we should be doing this in
- * the connector/encoder ->prepare instead, but we don't always have
- * enough information there about the config to know whether it will
- * actually be necessary or just cause undesired flicker.
- */
- intel_disable_pch_ports(dev_priv, pipe);
+ ironlake_fdi_disable(crtc);
- intel_disable_transcoder(dev_priv, pipe);
+ ironlake_disable_pch_transcoder(dev_priv, pipe);
if (HAS_PCH_CPT(dev)) {
/* disable TRANS_DP_CTL */
@@ -3139,7 +3549,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
break;
default:
- KASSERT(1, ("Wrong pipe %d", pipe)); /* wtf */
+ BUG(); /* wtf */
}
I915_WRITE(PCH_DPLL_SEL, temp);
}
@@ -3147,26 +3557,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
/* disable PCH DPLL */
intel_disable_pch_pll(intel_crtc);
- /* Switch from PCDclk to Rawclk */
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_PCDCLK);
-
- /* Disable CPU FDI TX PLL */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
-
- POSTING_READ(reg);
- DELAY(100);
-
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
-
- /* Wait for the clocks to turn off. */
- POSTING_READ(reg);
- DELAY(100);
+ ironlake_fdi_pll_disable(intel_crtc);
intel_crtc->active = false;
intel_update_watermarks(dev);
@@ -3176,28 +3567,59 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
DRM_UNLOCK(dev);
}
-static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void haswell_crtc_disable(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ bool is_pch_port;
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
- ironlake_crtc_enable(crtc);
- break;
+ if (!intel_crtc->active)
+ return;
- case DRM_MODE_DPMS_OFF:
- DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
- ironlake_crtc_disable(crtc);
- break;
+ is_pch_port = haswell_crtc_driving_pch(crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_vblank_off(dev, pipe);
+ intel_crtc_update_cursor(crtc, false);
+
+ intel_disable_plane(dev_priv, plane, pipe);
+
+ if (dev_priv->cfb_plane == plane)
+ intel_disable_fbc(dev);
+
+ intel_disable_pipe(dev_priv, pipe);
+
+ intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
+
+ /* Disable PF */
+ I915_WRITE(PF_CTL(pipe), 0);
+ I915_WRITE(PF_WIN_SZ(pipe), 0);
+
+ intel_ddi_disable_pipe_clock(intel_crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
+
+ if (is_pch_port) {
+ lpt_disable_pch_transcoder(dev_priv);
+ intel_ddi_fdi_disable(crtc);
}
+
+ intel_crtc->active = false;
+ intel_update_watermarks(dev);
+
+ DRM_LOCK(dev);
+ intel_update_fbc(dev);
+ DRM_UNLOCK(dev);
}
static void ironlake_crtc_off(struct drm_crtc *crtc)
@@ -3206,6 +3628,17 @@ static void ironlake_crtc_off(struct drm_crtc *crtc)
intel_put_pch_pll(intel_crtc);
}
+static void haswell_crtc_off(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /* Stop saying we're using TRANSCODER_EDP because some other CRTC might
+ * start using it. */
+ intel_crtc->cpu_transcoder = (enum transcoder)intel_crtc->pipe;
+
+ intel_ddi_put_crtc_pll(crtc);
+}
+
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
if (!enable && intel_crtc->overlay) {
@@ -3229,9 +3662,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+ WARN_ON(!crtc->enabled);
+
if (intel_crtc->active)
return;
@@ -3248,6 +3684,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
/* Give the overlay scaler a chance to enable if it's on this pipe */
intel_crtc_dpms_overlay(intel_crtc, true);
intel_crtc_update_cursor(crtc, true);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
}
static void i9xx_crtc_disable(struct drm_crtc *crtc)
@@ -3255,12 +3694,18 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+ u32 pctl;
+
if (!intel_crtc->active)
return;
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
/* Give the overlay scaler a chance to disable if it's on this pipe */
intel_crtc_wait_for_pending_flips(crtc);
drm_vblank_off(dev, pipe);
@@ -3272,6 +3717,13 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_disable_plane(dev_priv, plane, pipe);
intel_disable_pipe(dev_priv, pipe);
+
+ /* Disable pannel fitter if it is on this pipe. */
+ pctl = I915_READ(PFIT_CONTROL);
+ if ((pctl & PFIT_ENABLE) &&
+ ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe)
+ I915_WRITE(PFIT_CONTROL, 0);
+
intel_disable_pll(dev_priv, pipe);
intel_crtc->active = false;
@@ -3279,45 +3731,17 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_update_watermarks(dev);
}
-static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- i9xx_crtc_enable(crtc);
- break;
- case DRM_MODE_DPMS_OFF:
- i9xx_crtc_disable(crtc);
- break;
- }
-}
-
static void i9xx_crtc_off(struct drm_crtc *crtc)
{
}
-/**
- * Sets the power management mode of the pipe and plane.
- */
-static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void intel_crtc_update_sarea(struct drm_crtc *crtc,
+ bool enabled)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- bool enabled;
-
- if (intel_crtc->dpms_mode == mode)
- return;
-
- intel_crtc->dpms_mode = mode;
-
- dev_priv->display.dpms(crtc, mode);
if (!dev->primary->master)
return;
@@ -3326,8 +3750,6 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
if (!master_priv->sarea_priv)
return;
- enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
-
switch (pipe) {
case 0:
master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
@@ -3343,13 +3765,42 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
+/**
+ * Sets the power management mode of the pipe and plane.
+ */
+void intel_crtc_update_dpms(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ bool enable = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+ enable |= intel_encoder->connectors_active;
+
+ if (enable)
+ dev_priv->display.crtc_enable(crtc);
+ else
+ dev_priv->display.crtc_disable(crtc);
+
+ intel_crtc_update_sarea(crtc, enable);
+}
+
+static void intel_crtc_noop(struct drm_crtc *crtc)
+{
+}
+
static void intel_crtc_disable(struct drm_crtc *crtc)
{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ /* crtc should still be enabled when we disable it. */
+ WARN_ON(!crtc->enabled);
+
+ dev_priv->display.crtc_disable(crtc);
+ intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
@@ -3359,63 +3810,128 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
DRM_LOCK(dev);
intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
DRM_UNLOCK(dev);
+ crtc->fb = NULL;
+ }
+
+ /* Update computed state. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder || !connector->encoder->crtc)
+ continue;
+
+ if (connector->encoder->crtc != crtc)
+ continue;
+
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ to_intel_encoder(connector->encoder)->connectors_active = false;
}
}
-/* Prepare for a mode set.
- *
- * Note we could be a lot smarter here. We need to figure out which outputs
- * will be enabled, which disabled (in short, how the config will changes)
- * and perform the minimum necessary steps to accomplish that, e.g. updating
- * watermarks, FBC configuration, making sure PLLs are programmed correctly,
- * panel fitting is in the proper state, etc.
- */
-static void i9xx_crtc_prepare(struct drm_crtc *crtc)
+void intel_modeset_disable(struct drm_device *dev)
{
- i9xx_crtc_disable(crtc);
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->enabled)
+ intel_crtc_disable(crtc);
+ }
}
-static void i9xx_crtc_commit(struct drm_crtc *crtc)
+void intel_encoder_noop(struct drm_encoder *encoder)
{
- i9xx_crtc_enable(crtc);
}
-static void ironlake_crtc_prepare(struct drm_crtc *crtc)
+void intel_encoder_destroy(struct drm_encoder *encoder)
{
- ironlake_crtc_disable(crtc);
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ free(intel_encoder, DRM_MEM_KMS);
}
-static void ironlake_crtc_commit(struct drm_crtc *crtc)
+/* Simple dpms helper for encodres with just one connector, no cloning and only
+ * one kind of off state. It clamps all !ON modes to fully OFF and changes the
+ * state of the entire output pipe. */
+void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
{
- ironlake_crtc_enable(crtc);
+ if (mode == DRM_MODE_DPMS_ON) {
+ encoder->connectors_active = true;
+
+ intel_crtc_update_dpms(encoder->base.crtc);
+ } else {
+ encoder->connectors_active = false;
+
+ intel_crtc_update_dpms(encoder->base.crtc);
+ }
}
-void intel_encoder_prepare(struct drm_encoder *encoder)
+/* Cross check the actual hw state with our own modeset state tracking (and it's
+ * internal consistency). */
+static void intel_connector_check_state(struct intel_connector *connector)
{
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- /* lvds has its own version of prepare see intel_lvds_prepare */
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+ if (connector->get_hw_state(connector)) {
+ struct intel_encoder *encoder = connector->encoder;
+ struct drm_crtc *crtc;
+ bool encoder_enabled;
+ enum pipe pipe;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base));
+
+ WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
+ "wrong connector dpms state\n");
+ WARN(connector->base.encoder != &encoder->base,
+ "active connector not linked to encoder\n");
+ WARN(!encoder->connectors_active,
+ "encoder->connectors_active not set\n");
+
+ encoder_enabled = encoder->get_hw_state(encoder, &pipe);
+ WARN(!encoder_enabled, "encoder not enabled\n");
+ if (WARN_ON(!encoder->base.crtc))
+ return;
+
+ crtc = encoder->base.crtc;
+
+ WARN(!crtc->enabled, "crtc not enabled\n");
+ WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
+ WARN(pipe != to_intel_crtc(crtc)->pipe,
+ "encoder active on the wrong pipe\n");
+ }
}
-void intel_encoder_commit(struct drm_encoder *encoder)
+/* Even simpler default implementation, if there's really no special case to
+ * consider. */
+void intel_connector_dpms(struct drm_connector *connector, int mode)
{
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- struct drm_device *dev = encoder->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
- /* lvds has its own version of commit see intel_lvds_commit */
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+ /* All the simple cases only support two dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
- if (HAS_PCH_CPT(dev))
- intel_cpt_verify_modeset(dev, intel_crtc->pipe);
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ if (encoder->base.crtc)
+ intel_encoder_dpms(encoder, mode);
+ else
+ WARN_ON(encoder->connectors_active != false);
+
+ intel_modeset_check_state(connector->dev);
}
-void intel_encoder_destroy(struct drm_encoder *encoder)
+/* Simple connector->get_hw_state implementation for encoders that support only
+ * one connector and no cloning and hence the encoder state determines the state
+ * of the connector. */
+bool intel_connector_get_hw_state(struct intel_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ enum pipe pipe = 0;
+ struct intel_encoder *encoder = connector->encoder;
- drm_encoder_cleanup(encoder);
- free(intel_encoder, DRM_MEM_KMS);
+ return encoder->get_hw_state(encoder, &pipe);
}
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -3436,6 +3952,13 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
drm_mode_set_crtcinfo(adjusted_mode, 0);
+ /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
+ * with a hsync front porch of 0.
+ */
+ if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
+ adjusted_mode->hsync_start == adjusted_mode->hdisplay)
+ return false;
+
return true;
}
@@ -3463,7 +3986,7 @@ static int i915gm_get_display_clock_speed(struct drm_device *dev)
{
u16 gcfgc = 0;
- gcfgc = pci_read_config(dev->dev, GCFGC, 2);
+ pci_read_config_word(dev->dev, GCFGC, &gcfgc);
if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
return 133000;
@@ -3571,21 +4094,18 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
* true if they don't match).
*/
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
unsigned int *pipe_bpp,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_encoder *encoder;
struct drm_connector *connector;
+ struct intel_encoder *intel_encoder;
unsigned int display_bpc = UINT_MAX, bpc;
/* Walk the encoders & connectors on this crtc, get min bpc */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
-
- if (encoder->crtc != crtc)
- continue;
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
unsigned int lvds_bpc;
@@ -3603,21 +4123,10 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
continue;
}
- if (intel_encoder->type == INTEL_OUTPUT_EDP) {
- /* Use VBT settings if we have an eDP panel */
- unsigned int edp_bpc = dev_priv->edp.bpp / 3;
-
- if (edp_bpc < display_bpc) {
- DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
- display_bpc = edp_bpc;
- }
- continue;
- }
-
/* Not one of the known troublemakers, check the EDID */
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
- if (connector->encoder != encoder)
+ if (connector->encoder != &intel_encoder->base)
continue;
/* Don't use an invalid EDID bpc value */
@@ -3628,6 +4137,17 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
}
}
+ if (intel_encoder->type == INTEL_OUTPUT_EDP) {
+ /* Use VBT settings if we have an eDP panel */
+ unsigned int edp_bpc = dev_priv->edp.bpp / 3;
+
+ if (edp_bpc && edp_bpc < display_bpc) {
+ DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
+ display_bpc = edp_bpc;
+ }
+ continue;
+ }
+
/*
* HDMI is either 12 or 8, so if the display lets 10bpc sneak
* through, clamp it down. (Note: >12bpc will be caught below.)
@@ -3655,7 +4175,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
* also stays within the max display bpc discovered above.
*/
- switch (crtc->fb->depth) {
+ switch (fb->depth) {
case 8:
bpc = 8; /* since we go through a colormap */
break;
@@ -3688,13 +4208,37 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
return display_bpc != bpc;
}
+static int vlv_get_refclk(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int refclk = 27000; /* for DP & HDMI */
+
+ return 100000; /* only one validated so far */
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
+ refclk = 96000;
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv))
+ refclk = 100000;
+ else
+ refclk = 96000;
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+ refclk = 100000;
+ }
+
+ return refclk;
+}
+
static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int refclk;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ if (IS_VALLEYVIEW(dev)) {
+ refclk = vlv_get_refclk(crtc);
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->lvds_ssc_freq * 1000;
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
@@ -3809,6 +4353,106 @@ static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
I915_WRITE(LVDS, temp);
}
+static void vlv_update_pll(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock, intel_clock_t *reduced_clock,
+ int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 dpll, mdiv, pdiv;
+ u32 bestn, bestm1, bestm2, bestp1, bestp2;
+ bool is_sdvo;
+ u32 temp;
+
+ is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+
+ dpll = DPLL_VGA_MODE_DIS;
+ dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
+ dpll |= DPLL_REFA_CLK_ENABLE_VLV;
+ dpll |= DPLL_INTEGRATED_CLOCK_VLV;
+
+ I915_WRITE(DPLL(pipe), dpll);
+ POSTING_READ(DPLL(pipe));
+
+ bestn = clock->n;
+ bestm1 = clock->m1;
+ bestm2 = clock->m2;
+ bestp1 = clock->p1;
+ bestp2 = clock->p2;
+
+ /*
+ * In Valleyview PLL and program lane counter registers are exposed
+ * through DPIO interface
+ */
+ mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
+ mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
+ mdiv |= ((bestn << DPIO_N_SHIFT));
+ mdiv |= (1 << DPIO_POST_DIV_SHIFT);
+ mdiv |= (1 << DPIO_K_SHIFT);
+ mdiv |= DPIO_ENABLE_CALIBRATION;
+ intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
+
+ intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
+
+ pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) |
+ (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
+ (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
+ (5 << DPIO_CLK_BIAS_CTL_SHIFT);
+ intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
+
+ intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b);
+
+ dpll |= DPLL_VCO_ENABLE;
+ I915_WRITE(DPLL(pipe), dpll);
+ POSTING_READ(DPLL(pipe));
+ if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+ DRM_ERROR("DPLL %d failed to lock\n", pipe);
+
+ intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
+
+ I915_WRITE(DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ temp = 0;
+ if (is_sdvo) {
+ temp = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (temp > 1)
+ temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ else
+ temp = 0;
+ }
+ I915_WRITE(DPLL_MD(pipe), temp);
+ POSTING_READ(DPLL_MD(pipe));
+
+ /* Now program lane control registers */
+ if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)
+ || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+ {
+ temp = 0x1000C4;
+ if(pipe == 1)
+ temp |= (1 << 21);
+ intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
+ }
+ if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP))
+ {
+ temp = 0x1000C4;
+ if(pipe == 1)
+ temp |= (1 << 21);
+ intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
+ }
+}
+
static void i9xx_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -3822,6 +4466,8 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
u32 dpll;
bool is_sdvo;
+ i9xx_update_pll_dividers(crtc, clock, reduced_clock);
+
is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
@@ -3882,7 +4528,7 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
dpll |= DPLL_VCO_ENABLE;
I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
POSTING_READ(DPLL(pipe));
- DELAY(150);
+ udelay(150);
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
@@ -3898,7 +4544,7 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
/* Wait for the clocks to stabilize. */
POSTING_READ(DPLL(pipe));
- DELAY(150);
+ udelay(150);
if (INTEL_INFO(dev)->gen >= 4) {
u32 temp = 0;
@@ -3922,7 +4568,7 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
static void i8xx_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
- intel_clock_t *clock,
+ intel_clock_t *clock, intel_clock_t *reduced_clock,
int num_connectors)
{
struct drm_device *dev = crtc->dev;
@@ -3931,6 +4577,8 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
u32 dpll;
+ i9xx_update_pll_dividers(crtc, clock, reduced_clock);
+
dpll = DPLL_VGA_MODE_DIS;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -3957,13 +4605,7 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
dpll |= DPLL_VCO_ENABLE;
I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
POSTING_READ(DPLL(pipe));
- DELAY(150);
-
- I915_WRITE(DPLL(pipe), dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(DPLL(pipe));
- DELAY(150);
+ udelay(150);
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
@@ -3972,6 +4614,12 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
intel_update_lvds(crtc, clock, adjusted_mode);
+ I915_WRITE(DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
@@ -3980,11 +4628,69 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
I915_WRITE(DPLL(pipe), dpll);
}
+static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ uint32_t vsyncshift;
+
+ if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* the chip adds 2 halflines automatically */
+ adjusted_mode->crtc_vtotal -= 1;
+ adjusted_mode->crtc_vblank_end -= 1;
+ vsyncshift = adjusted_mode->crtc_hsync_start
+ - adjusted_mode->crtc_htotal / 2;
+ } else {
+ vsyncshift = 0;
+ }
+
+ if (INTEL_INFO(dev)->gen > 3)
+ I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
+
+ I915_WRITE(HTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ I915_WRITE(HBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ I915_WRITE(HSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+
+ I915_WRITE(VTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ I915_WRITE(VBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ I915_WRITE(VSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+
+ /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
+ * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
+ * documented on the DDI_FUNC_CTL register description, EDP Input Select
+ * bits. */
+ if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
+ (pipe == PIPE_B || pipe == PIPE_C))
+ I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
+
+ /* pipesrc controls the size that is scaled from, which should
+ * always be the user's requested size.
+ */
+ I915_WRITE(PIPESRC(pipe),
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+}
+
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
- struct drm_framebuffer *old_fb)
+ struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3993,18 +4699,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dspcntr, pipeconf, vsyncshift;
+ u32 dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_lvds = false, is_tv = false, is_dp = false;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
- if (encoder->base.crtc != crtc)
- continue;
-
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -4030,7 +4732,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
/*
* Returns a set of divisors for the desired target clock with the given
- * refclk, or false. The returned values represent the clock equation:
+ * refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
@@ -4061,11 +4763,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
if (is_sdvo && is_tv)
i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
- i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
- &reduced_clock : NULL);
-
if (IS_GEN2(dev))
- i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
+ i8xx_update_pll(crtc, adjusted_mode, &clock,
+ has_reduced_clock ? &reduced_clock : NULL,
+ num_connectors);
+ else if (IS_VALLEYVIEW(dev))
+ vlv_update_pll(crtc, mode, adjusted_mode, &clock,
+ has_reduced_clock ? &reduced_clock : NULL,
+ num_connectors);
else
i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
has_reduced_clock ? &reduced_clock : NULL,
@@ -4099,13 +4804,21 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
/* default to 8bpc */
pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
if (is_dp) {
- if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
pipeconf |= PIPECONF_BPP_6 |
PIPECONF_DITHER_EN |
PIPECONF_DITHER_TYPE_SP;
}
}
+ if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+ if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ pipeconf |= PIPECONF_BPP_6 |
+ PIPECONF_ENABLE |
+ I965_PIPECONF_ACTIVE;
+ }
+ }
+
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
@@ -4121,40 +4834,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (!IS_GEN2(dev) &&
- adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
- /* the chip adds 2 halflines automatically */
- adjusted_mode->crtc_vtotal -= 1;
- adjusted_mode->crtc_vblank_end -= 1;
- vsyncshift = adjusted_mode->crtc_hsync_start
- - adjusted_mode->crtc_htotal/2;
- } else {
+ else
pipeconf |= PIPECONF_PROGRESSIVE;
- vsyncshift = 0;
- }
-
- if (!IS_GEN3(dev))
- I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
- I915_WRITE(HTOTAL(pipe),
- (adjusted_mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- I915_WRITE(HBLANK(pipe),
- (adjusted_mode->crtc_hblank_start - 1) |
- ((adjusted_mode->crtc_hblank_end - 1) << 16));
- I915_WRITE(HSYNC(pipe),
- (adjusted_mode->crtc_hsync_start - 1) |
- ((adjusted_mode->crtc_hsync_end - 1) << 16));
-
- I915_WRITE(VTOTAL(pipe),
- (adjusted_mode->crtc_vdisplay - 1) |
- ((adjusted_mode->crtc_vtotal - 1) << 16));
- I915_WRITE(VBLANK(pipe),
- (adjusted_mode->crtc_vblank_start - 1) |
- ((adjusted_mode->crtc_vblank_end - 1) << 16));
- I915_WRITE(VSYNC(pipe),
- (adjusted_mode->crtc_vsync_start - 1) |
- ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
@@ -4163,8 +4848,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
((mode->vdisplay - 1) << 16) |
(mode->hdisplay - 1));
I915_WRITE(DSPPOS(plane), 0);
- I915_WRITE(PIPESRC(pipe),
- ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
I915_WRITE(PIPECONF(pipe), pipeconf);
POSTING_READ(PIPECONF(pipe));
@@ -4175,17 +4858,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
- ret = intel_pipe_set_base(crtc, x, y, old_fb);
+ ret = intel_pipe_set_base(crtc, x, y, fb);
intel_update_watermarks(dev);
return ret;
}
-/*
- * Initialize reference clocks when the driver loads
- */
-void ironlake_init_pch_refclk(struct drm_device *dev)
+static void ironlake_init_pch_refclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
@@ -4256,7 +4936,7 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
/* Get SSC going before enabling the outputs */
I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
- DELAY(200);
+ udelay(200);
temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
@@ -4273,7 +4953,7 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
- DELAY(200);
+ udelay(200);
} else {
DRM_DEBUG_KMS("Disabling SSC entirely\n");
@@ -4284,7 +4964,7 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
- DELAY(200);
+ udelay(200);
/* Turn off the SSC source */
temp &= ~DREF_SSC_SOURCE_MASK;
@@ -4295,8 +4975,184 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
- DELAY(200);
+ udelay(200);
+ }
+}
+
+/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
+static void lpt_init_pch_refclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+ bool has_vga = false;
+ bool is_sdv = false;
+ u32 tmp;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_ANALOG:
+ has_vga = true;
+ break;
+ }
}
+
+ if (!has_vga)
+ return;
+
+ /* XXX: Rip out SDV support once Haswell ships for real. */
+ if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
+ is_sdv = true;
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_DISABLE;
+ tmp |= SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+ udelay(24);
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+ if (!is_sdv) {
+ tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+ I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+ if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+ DRM_ERROR("FDI mPHY reset assert timeout\n");
+
+ tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+ I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+ if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
+ 100))
+ DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+ tmp &= ~(0xFF << 24);
+ tmp |= (0x12 << 24);
+ intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+
+ if (!is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
+ tmp &= ~(0x3 << 6);
+ tmp |= (1 << 6) | (1 << 0);
+ intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
+ }
+
+ if (is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
+ tmp |= 0x7FFF;
+ intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+
+ if (is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
+ tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+ intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
+ tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+ intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
+ tmp |= (0x3F << 8);
+ intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
+ tmp |= (0x3F << 8);
+ intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+
+ if (!is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+
+ if (!is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+ }
+
+ /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
+ tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
+ tmp |= SBI_DBUFF0_ENABLE;
+ intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
+}
+
+/*
+ * Initialize reference clocks when the driver loads
+ */
+void intel_init_pch_refclk(struct drm_device *dev)
+{
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ ironlake_init_pch_refclk(dev);
+ else if (HAS_PCH_LPT(dev))
+ lpt_init_pch_refclk(dev);
}
static int ironlake_get_refclk(struct drm_crtc *crtc)
@@ -4304,15 +5160,11 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *edp_encoder = NULL;
int num_connectors = 0;
bool is_lvds = false;
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
- if (encoder->base.crtc != crtc)
- continue;
-
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -4333,86 +5185,117 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
return 120000;
}
-static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+static void ironlake_set_pipeconf(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
+ bool dither)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
- int refclk, num_connectors = 0;
- intel_clock_t clock, reduced_clock;
- u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
- bool ok, has_reduced_clock = false, is_sdvo = false;
- bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct intel_encoder *encoder, *edp_encoder = NULL;
- const intel_limit_t *limit;
- int ret;
- struct fdi_m_n m_n = {0};
- u32 temp;
- int target_clock, pixel_multiplier, lane, link_bw, factor;
- unsigned int pipe_bpp;
- bool dither;
- bool is_cpu_edp = false, is_pch_edp = false;
+ uint32_t val;
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
- if (encoder->base.crtc != crtc)
- continue;
+ val = I915_READ(PIPECONF(pipe));
- switch (encoder->type) {
+ val &= ~PIPE_BPC_MASK;
+ switch (intel_crtc->bpp) {
+ case 18:
+ val |= PIPE_6BPC;
+ break;
+ case 24:
+ val |= PIPE_8BPC;
+ break;
+ case 30:
+ val |= PIPE_10BPC;
+ break;
+ case 36:
+ val |= PIPE_12BPC;
+ break;
+ default:
+ /* Case prevented by intel_choose_pipe_bpp_dither. */
+ BUG();
+ }
+
+ val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
+ if (dither)
+ val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+ val &= ~PIPECONF_INTERLACE_MASK;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ val |= PIPECONF_INTERLACED_ILK;
+ else
+ val |= PIPECONF_PROGRESSIVE;
+
+ I915_WRITE(PIPECONF(pipe), val);
+ POSTING_READ(PIPECONF(pipe));
+}
+
+static void haswell_set_pipeconf(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ bool dither)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ uint32_t val;
+
+ val = I915_READ(PIPECONF(cpu_transcoder));
+
+ val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
+ if (dither)
+ val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+ val &= ~PIPECONF_INTERLACE_MASK_HSW;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ val |= PIPECONF_INTERLACED_ILK;
+ else
+ val |= PIPECONF_PROGRESSIVE;
+
+ I915_WRITE(PIPECONF(cpu_transcoder), val);
+ POSTING_READ(PIPECONF(cpu_transcoder));
+}
+
+static bool ironlake_compute_clocks(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock,
+ bool *has_reduced_clock,
+ intel_clock_t *reduced_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ int refclk;
+ const intel_limit_t *limit;
+ bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
- if (encoder->needs_tv_clock)
+ if (intel_encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
- case INTEL_OUTPUT_ANALOG:
- is_crt = true;
- break;
- case INTEL_OUTPUT_DISPLAYPORT:
- is_dp = true;
- break;
- case INTEL_OUTPUT_EDP:
- is_dp = true;
- if (intel_encoder_is_pch_edp(&encoder->base))
- is_pch_edp = true;
- else
- is_cpu_edp = true;
- edp_encoder = encoder;
- break;
}
-
- num_connectors++;
}
refclk = ironlake_get_refclk(crtc);
/*
* Returns a set of divisors for the desired target clock with the given
- * refclk, or false. The returned values represent the clock equation:
+ * refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
- ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
- &clock);
- if (!ok) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- /* Ensure that the cursor is valid for the new mode before changing... */
- intel_crtc_update_cursor(crtc, true);
+ ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
+ clock);
+ if (!ret)
+ return false;
if (is_lvds && dev_priv->lvds_downclock_avail) {
/*
@@ -4421,29 +5304,136 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
* by using the FP0/FP1. In such case we will disable the LVDS
* downclock feature.
*/
- has_reduced_clock = limit->find_pll(limit, crtc,
- dev_priv->lvds_downclock,
- refclk,
- &clock,
- &reduced_clock);
+ *has_reduced_clock = limit->find_pll(limit, crtc,
+ dev_priv->lvds_downclock,
+ refclk,
+ clock,
+ reduced_clock);
}
- /* SDVO TV has fixed PLL values depend on its clock range,
- this mirrors vbios setting. */
- if (is_sdvo && is_tv) {
- if (adjusted_mode->clock >= 100000
- && adjusted_mode->clock < 140500) {
- clock.p1 = 2;
- clock.p2 = 10;
- clock.n = 3;
- clock.m1 = 16;
- clock.m2 = 8;
- } else if (adjusted_mode->clock >= 140500
- && adjusted_mode->clock <= 200000) {
- clock.p1 = 1;
- clock.p2 = 10;
- clock.n = 6;
- clock.m1 = 12;
- clock.m2 = 8;
+
+ if (is_sdvo && is_tv)
+ i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock);
+
+ return true;
+}
+
+static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t temp;
+
+ temp = I915_READ(SOUTH_CHICKEN1);
+ if (temp & FDI_BC_BIFURCATION_SELECT)
+ return;
+
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+ temp |= FDI_BC_BIFURCATION_SELECT;
+ DRM_DEBUG_KMS("enabling fdi C rx\n");
+ I915_WRITE(SOUTH_CHICKEN1, temp);
+ POSTING_READ(SOUTH_CHICKEN1);
+}
+
+static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *pipe_B_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+
+ DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
+ intel_crtc->pipe, intel_crtc->fdi_lanes);
+ if (intel_crtc->fdi_lanes > 4) {
+ DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
+ intel_crtc->pipe, intel_crtc->fdi_lanes);
+ /* Clamp lanes to avoid programming the hw with bogus values. */
+ intel_crtc->fdi_lanes = 4;
+
+ return false;
+ }
+
+ if (dev_priv->num_pipe == 2)
+ return true;
+
+ switch (intel_crtc->pipe) {
+ case PIPE_A:
+ return true;
+ case PIPE_B:
+ if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
+ intel_crtc->fdi_lanes > 2) {
+ DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
+ intel_crtc->pipe, intel_crtc->fdi_lanes);
+ /* Clamp lanes to avoid programming the hw with bogus values. */
+ intel_crtc->fdi_lanes = 2;
+
+ return false;
+ }
+
+ if (intel_crtc->fdi_lanes > 2)
+ WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
+ else
+ cpt_enable_fdi_bc_bifurcation(dev);
+
+ return true;
+ case PIPE_C:
+ if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) {
+ if (intel_crtc->fdi_lanes > 2) {
+ DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
+ intel_crtc->pipe, intel_crtc->fdi_lanes);
+ /* Clamp lanes to avoid programming the hw with bogus values. */
+ intel_crtc->fdi_lanes = 2;
+
+ return false;
+ }
+ } else {
+ DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
+ return false;
+ }
+
+ cpt_enable_fdi_bc_bifurcation(dev);
+
+ return true;
+ default:
+ BUG();
+ }
+}
+
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
+{
+ /*
+ * Account for spread spectrum to avoid
+ * oversubscribing the link. Max center spread
+ * is 2.5%; use 5% for safety's sake.
+ */
+ u32 bps = target_clock * bpp * 21 / 20;
+ return bps / (link_bw * 8) + 1;
+}
+
+static void ironlake_set_m_n(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ struct intel_encoder *intel_encoder, *edp_encoder = NULL;
+ struct fdi_m_n m_n = {0};
+ int target_clock, pixel_multiplier, lane, link_bw;
+ bool is_dp = false, is_cpu_edp = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_dp = true;
+ if (!intel_encoder_is_pch_edp(&intel_encoder->base))
+ is_cpu_edp = true;
+ edp_encoder = intel_encoder;
+ break;
}
}
@@ -4453,16 +5443,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* CPU eDP doesn't require FDI link, so just set DP M/N
according to current link config */
if (is_cpu_edp) {
- target_clock = mode->clock;
intel_edp_link_config(edp_encoder, &lane, &link_bw);
} else {
- /* [e]DP over FDI requires target mode clock
- instead of link clock */
- if (is_dp)
- target_clock = mode->clock;
- else
- target_clock = adjusted_mode->clock;
-
/* FDI is a binary signal running at ~2.7GHz, encoding
* each output octet as 10 bits. The actual frequency
* is stored as a divider into a 100MHz clock, and the
@@ -4473,43 +5455,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
}
- /* determine panel color depth */
- temp = I915_READ(PIPECONF(pipe));
- temp &= ~PIPE_BPC_MASK;
- dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
- switch (pipe_bpp) {
- case 18:
- temp |= PIPE_6BPC;
- break;
- case 24:
- temp |= PIPE_8BPC;
- break;
- case 30:
- temp |= PIPE_10BPC;
- break;
- case 36:
- temp |= PIPE_12BPC;
- break;
- default:
- printf("intel_choose_pipe_bpp returned invalid value %d\n",
- pipe_bpp);
- temp |= PIPE_8BPC;
- pipe_bpp = 24;
- break;
- }
-
- intel_crtc->bpp = pipe_bpp;
- I915_WRITE(PIPECONF(pipe), temp);
+ /* [e]DP over FDI requires target mode clock instead of link clock. */
+ if (edp_encoder)
+ target_clock = intel_edp_target_clock(edp_encoder, mode);
+ else if (is_dp)
+ target_clock = mode->clock;
+ else
+ target_clock = adjusted_mode->clock;
- if (!lane) {
- /*
- * Account for spread spectrum to avoid
- * oversubscribing the link. Max center spread
- * is 2.5%; use 5% for safety's sake.
- */
- u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
- lane = bps / (link_bw * 8) + 1;
- }
+ if (!lane)
+ lane = ironlake_get_lanes_required(target_clock, link_bw,
+ intel_crtc->bpp);
intel_crtc->fdi_lanes = lane;
@@ -4518,10 +5474,51 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
&m_n);
- fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
- if (has_reduced_clock)
- fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
- reduced_clock.m2;
+ I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
+ I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
+ I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
+}
+
+static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock, u32 fp)
+{
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ uint32_t dpll;
+ int factor, pixel_multiplier, num_connectors = 0;
+ bool is_lvds = false, is_sdvo = false, is_tv = false;
+ bool is_dp = false, is_cpu_edp = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ case INTEL_OUTPUT_HDMI:
+ is_sdvo = true;
+ if (intel_encoder->needs_tv_clock)
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_dp = true;
+ if (!intel_encoder_is_pch_edp(&intel_encoder->base))
+ is_cpu_edp = true;
+ break;
+ }
+
+ num_connectors++;
+ }
/* Enable autotuning of the PLL clock (if permissible) */
factor = 21;
@@ -4533,7 +5530,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
} else if (is_sdvo && is_tv)
factor = 20;
- if (clock.m < factor * clock.n)
+ if (clock->m < factor * clock->n)
fp |= FP_CB_TUNE;
dpll = 0;
@@ -4543,7 +5540,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (is_sdvo) {
- int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
if (pixel_multiplier > 1) {
dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
@@ -4553,11 +5550,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- switch (clock.p2) {
+ switch (clock->p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
@@ -4583,20 +5580,79 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
dpll |= PLL_REF_INPUT_DREFCLK;
- /* setup pipeconf */
- pipeconf = I915_READ(PIPECONF(pipe));
+ return dpll;
+}
+
+static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ int num_connectors = 0;
+ intel_clock_t clock, reduced_clock;
+ u32 dpll, fp = 0, fp2 = 0;
+ bool ok, has_reduced_clock = false;
+ bool is_lvds = false, is_dp = false, is_cpu_edp = false;
+ struct intel_encoder *encoder;
+ u32 temp;
+ int ret;
+ bool dither, fdi_config_ok;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_dp = true;
+ if (!intel_encoder_is_pch_edp(&encoder->base))
+ is_cpu_edp = true;
+ break;
+ }
+
+ num_connectors++;
+ }
+
+ WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
+ "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
+
+ ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+ &has_reduced_clock, &reduced_clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ /* Ensure that the cursor is valid for the new mode before changing... */
+ intel_crtc_update_cursor(crtc, true);
+
+ /* determine panel color depth */
+ dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
+ adjusted_mode);
+ if (is_lvds && dev_priv->lvds_dither)
+ dither = true;
+
+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+ if (has_reduced_clock)
+ fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
+ reduced_clock.m2;
+
+ dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp);
- /* Set up the display plane register */
- dspcntr = DISPPLANE_GAMMA_ENABLE;
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
- /* CPU eDP is the only output that doesn't need a PCH PLL of its own on
- * pre-Haswell/LPT generation */
- if (HAS_PCH_LPT(dev)) {
- DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
- pipe);
- } else if (!is_cpu_edp) {
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+ if (!is_cpu_edp) {
struct intel_pch_pll *pll;
pll = intel_get_pch_pll(intel_crtc, dpll, fp);
@@ -4647,12 +5703,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(PCH_LVDS, temp);
}
- pipeconf &= ~PIPECONF_DITHER_EN;
- pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
- if ((is_lvds && dev_priv->lvds_dither) || dither) {
- pipeconf |= PIPECONF_DITHER_EN;
- pipeconf |= PIPECONF_DITHER_TYPE_SP;
- }
if (is_dp && !is_cpu_edp) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
@@ -4668,7 +5718,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* Wait for the clocks to stabilize. */
POSTING_READ(intel_crtc->pch_pll->pll_reg);
- DELAY(150);
+ udelay(150);
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
@@ -4683,76 +5733,242 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (is_lvds && has_reduced_clock && i915_powersave) {
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
intel_crtc->lowfreq_avail = true;
- if (HAS_PIPE_CXSR(dev)) {
- DRM_DEBUG_KMS("enabling CxSR downclocking\n");
- pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
- }
} else {
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
- if (HAS_PIPE_CXSR(dev)) {
- DRM_DEBUG_KMS("disabling CxSR downclocking\n");
- pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
+ }
+ }
+
+ intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
+
+ /* Note, this also computes intel_crtc->fdi_lanes which is used below in
+ * ironlake_check_fdi_lanes. */
+ ironlake_set_m_n(crtc, mode, adjusted_mode);
+
+ fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
+
+ if (is_cpu_edp)
+ ironlake_set_pll_edp(crtc, adjusted_mode->clock);
+
+ ironlake_set_pipeconf(crtc, adjusted_mode, dither);
+
+ intel_wait_for_vblank(dev, pipe);
+
+ /* Set up the display plane register */
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
+ POSTING_READ(DSPCNTR(plane));
+
+ ret = intel_pipe_set_base(crtc, x, y, fb);
+
+ intel_update_watermarks(dev);
+
+ intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
+
+ return fdi_config_ok ? ret : -EINVAL;
+}
+
+static int haswell_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ int num_connectors = 0;
+ intel_clock_t clock, reduced_clock;
+ u32 dpll = 0, fp = 0, fp2 = 0;
+ bool ok, has_reduced_clock = false;
+ bool is_lvds = false, is_dp = false, is_cpu_edp = false;
+ struct intel_encoder *encoder;
+ u32 temp;
+ int ret;
+ bool dither;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_dp = true;
+ if (!intel_encoder_is_pch_edp(&encoder->base))
+ is_cpu_edp = true;
+ break;
+ }
+
+ num_connectors++;
+ }
+
+ if (is_cpu_edp)
+ intel_crtc->cpu_transcoder = TRANSCODER_EDP;
+ else
+ intel_crtc->cpu_transcoder = pipe;
+
+ /* We are not sure yet this won't happen. */
+ WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
+ INTEL_PCH_TYPE(dev));
+
+ WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
+ num_connectors, pipe_name(pipe));
+
+ WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) &
+ (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
+
+ WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
+
+ if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
+ return -EINVAL;
+
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+ &has_reduced_clock,
+ &reduced_clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Ensure that the cursor is valid for the new mode before changing... */
+ intel_crtc_update_cursor(crtc, true);
+
+ /* determine panel color depth */
+ dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
+ adjusted_mode);
+ if (is_lvds && dev_priv->lvds_dither)
+ dither = true;
+
+ DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
+ drm_mode_debug_printmodeline(mode);
+
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+ if (has_reduced_clock)
+ fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
+ reduced_clock.m2;
+
+ dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
+ fp);
+
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its
+ * own on pre-Haswell/LPT generation */
+ if (!is_cpu_edp) {
+ struct intel_pch_pll *pll;
+
+ pll = intel_get_pch_pll(intel_crtc, dpll, fp);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
+ pipe);
+ return -EINVAL;
+ }
+ } else
+ intel_put_pch_pll(intel_crtc);
+
+ /* The LVDS pin pair needs to be on before the DPLLs are
+ * enabled. This is an exception to the general rule that
+ * mode_set doesn't turn things on.
+ */
+ if (is_lvds) {
+ temp = I915_READ(PCH_LVDS);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~PORT_TRANS_SEL_MASK;
+ temp |= PORT_TRANS_SEL_CPT(pipe);
+ } else {
+ if (pipe == 1)
+ temp |= LVDS_PIPEB_SELECT;
+ else
+ temp &= ~LVDS_PIPEB_SELECT;
}
+
+ /* set the corresponsding LVDS_BORDER bit */
+ temp |= dev_priv->lvds_border_bits;
+ /* Set the B0-B3 data pairs corresponding to whether
+ * we're going to set the DPLLs for dual-channel mode or
+ * not.
+ */
+ if (clock.p2 == 7)
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ temp &= ~(LVDS_B0B3_POWER_UP |
+ LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode
+ * (LVDS_A3_POWER_UP) appropriately here, but we need to
+ * look more thoroughly into how panels behave in the
+ * two modes.
+ */
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ temp |= LVDS_HSYNC_POLARITY;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ temp |= LVDS_VSYNC_POLARITY;
+ I915_WRITE(PCH_LVDS, temp);
}
}
- pipeconf &= ~PIPECONF_INTERLACE_MASK;
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- pipeconf |= PIPECONF_INTERLACED_ILK;
- /* the chip adds 2 halflines automatically */
- adjusted_mode->crtc_vtotal -= 1;
- adjusted_mode->crtc_vblank_end -= 1;
- I915_WRITE(VSYNCSHIFT(pipe),
- adjusted_mode->crtc_hsync_start
- - adjusted_mode->crtc_htotal/2);
+ if (is_dp && !is_cpu_edp) {
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
- pipeconf |= PIPECONF_PROGRESSIVE;
- I915_WRITE(VSYNCSHIFT(pipe), 0);
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ /* For non-DP output, clear any trans DP clock recovery
+ * setting.*/
+ I915_WRITE(TRANSDATA_M1(pipe), 0);
+ I915_WRITE(TRANSDATA_N1(pipe), 0);
+ I915_WRITE(TRANSDPLINK_M1(pipe), 0);
+ I915_WRITE(TRANSDPLINK_N1(pipe), 0);
+ }
}
- I915_WRITE(HTOTAL(pipe),
- (adjusted_mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- I915_WRITE(HBLANK(pipe),
- (adjusted_mode->crtc_hblank_start - 1) |
- ((adjusted_mode->crtc_hblank_end - 1) << 16));
- I915_WRITE(HSYNC(pipe),
- (adjusted_mode->crtc_hsync_start - 1) |
- ((adjusted_mode->crtc_hsync_end - 1) << 16));
-
- I915_WRITE(VTOTAL(pipe),
- (adjusted_mode->crtc_vdisplay - 1) |
- ((adjusted_mode->crtc_vtotal - 1) << 16));
- I915_WRITE(VBLANK(pipe),
- (adjusted_mode->crtc_vblank_start - 1) |
- ((adjusted_mode->crtc_vblank_end - 1) << 16));
- I915_WRITE(VSYNC(pipe),
- (adjusted_mode->crtc_vsync_start - 1) |
- ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ intel_crtc->lowfreq_avail = false;
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ if (intel_crtc->pch_pll) {
+ I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(intel_crtc->pch_pll->pll_reg);
+ udelay(150);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
+ }
- /* pipesrc controls the size that is scaled from, which should
- * always be the user's requested size.
- */
- I915_WRITE(PIPESRC(pipe),
- ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+ if (intel_crtc->pch_pll) {
+ if (is_lvds && has_reduced_clock && i915_powersave) {
+ I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
+ intel_crtc->lowfreq_avail = true;
+ } else {
+ I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
+ }
+ }
+ }
- I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
- I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
- I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
- I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
+ intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
- if (is_cpu_edp)
- ironlake_set_pll_edp(crtc, adjusted_mode->clock);
+ if (!is_dp || is_cpu_edp)
+ ironlake_set_m_n(crtc, mode, adjusted_mode);
- I915_WRITE(PIPECONF(pipe), pipeconf);
- POSTING_READ(PIPECONF(pipe));
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ if (is_cpu_edp)
+ ironlake_set_pll_edp(crtc, adjusted_mode->clock);
- intel_wait_for_vblank(dev, pipe);
+ haswell_set_pipeconf(crtc, adjusted_mode, dither);
- I915_WRITE(DSPCNTR(plane), dspcntr);
+ /* Set up the display plane register */
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
POSTING_READ(DSPCNTR(plane));
- ret = intel_pipe_set_base(crtc, x, y, old_fb);
+ ret = intel_pipe_set_base(crtc, x, y, fb);
intel_update_watermarks(dev);
@@ -4765,10 +5981,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
- struct drm_framebuffer *old_fb)
+ struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int ret;
@@ -4776,15 +5994,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
drm_vblank_pre_modeset(dev, pipe);
ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
- x, y, old_fb);
+ x, y, fb);
drm_vblank_post_modeset(dev, pipe);
- if (ret)
- intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
- else
- intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
+ if (ret != 0)
+ return ret;
- return ret;
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base),
+ mode->base.id, mode->name);
+ encoder_funcs = encoder->base.helper_private;
+ encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
+ }
+
+ return 0;
}
static bool intel_eld_uptodate(struct drm_connector *connector,
@@ -4846,9 +6071,8 @@ static void g4x_write_eld(struct drm_connector *connector,
if (!eld[0])
return;
- if (eld[2] < (uint8_t)len)
- len = eld[2];
- DRM_DEBUG_KMS("ELD size %d\n", len);
+ len = min_t(uint8_t, eld[2], len);
+ DRM_DEBUG_DRIVER("ELD size %d\n", len);
for (i = 0; i < len; i++)
I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
@@ -4857,6 +6081,91 @@ static void g4x_write_eld(struct drm_connector *connector,
I915_WRITE(G4X_AUD_CNTL_ST, i);
}
+static void haswell_write_eld(struct drm_connector *connector,
+ struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ struct drm_device *dev = crtc->dev;
+ uint32_t eldv;
+ uint32_t i;
+ int len;
+ int pipe = to_intel_crtc(crtc)->pipe;
+ int tmp;
+
+ int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
+ int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
+ int aud_config = HSW_AUD_CFG(pipe);
+ int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
+
+
+ DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
+
+ /* Audio output enable */
+ DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
+ tmp = I915_READ(aud_cntrl_st2);
+ tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
+ I915_WRITE(aud_cntrl_st2, tmp);
+
+ /* Wait for 1 vertical blank */
+ intel_wait_for_vblank(dev, pipe);
+
+ /* Set ELD valid state */
+ tmp = I915_READ(aud_cntrl_st2);
+ DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
+ tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
+ I915_WRITE(aud_cntrl_st2, tmp);
+ tmp = I915_READ(aud_cntrl_st2);
+ DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
+
+ /* Enable HDMI mode */
+ tmp = I915_READ(aud_config);
+ DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
+ /* clear N_programing_enable and N_value_index */
+ tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
+ I915_WRITE(aud_config, tmp);
+
+ DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
+
+ eldv = AUDIO_ELD_VALID_A << (pipe * 4);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+ eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
+ I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
+ } else
+ I915_WRITE(aud_config, 0);
+
+ if (intel_eld_uptodate(connector,
+ aud_cntrl_st2, eldv,
+ aud_cntl_st, IBX_ELD_ADDRESS,
+ hdmiw_hdmiedid))
+ return;
+
+ i = I915_READ(aud_cntrl_st2);
+ i &= ~eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+
+ if (!eld[0])
+ return;
+
+ i = I915_READ(aud_cntl_st);
+ i &= ~IBX_ELD_ADDRESS;
+ I915_WRITE(aud_cntl_st, i);
+ i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
+ DRM_DEBUG_DRIVER("port num:%d\n", i);
+
+ len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
+ DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ for (i = 0; i < len; i++)
+ I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+
+ i = I915_READ(aud_cntrl_st2);
+ i |= eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+
+}
+
static void ironlake_write_eld(struct drm_connector *connector,
struct drm_crtc *crtc)
{
@@ -4869,36 +6178,32 @@ static void ironlake_write_eld(struct drm_connector *connector,
int aud_config;
int aud_cntl_st;
int aud_cntrl_st2;
+ int pipe = to_intel_crtc(crtc)->pipe;
if (HAS_PCH_IBX(connector->dev)) {
- hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
- aud_config = IBX_AUD_CONFIG_A;
- aud_cntl_st = IBX_AUD_CNTL_ST_A;
+ hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+ aud_config = IBX_AUD_CFG(pipe);
+ aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else {
- hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
- aud_config = CPT_AUD_CONFIG_A;
- aud_cntl_st = CPT_AUD_CNTL_ST_A;
+ hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
+ aud_config = CPT_AUD_CFG(pipe);
+ aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
}
- i = to_intel_crtc(crtc)->pipe;
- hdmiw_hdmiedid += i * 0x100;
- aud_cntl_st += i * 0x100;
- aud_config += i * 0x100;
-
- DRM_DEBUG_KMS("ELD on pipe %c\n", pipe_name(i));
+ DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
i = I915_READ(aud_cntl_st);
- i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */
+ i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
if (!i) {
- DRM_DEBUG_KMS("Audio directed to unknown port\n");
+ DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
/* operate blindly on all ports */
eldv = IBX_ELD_VALIDB;
eldv |= IBX_ELD_VALIDB << 4;
eldv |= IBX_ELD_VALIDB << 8;
} else {
- DRM_DEBUG_KMS("ELD on port %c\n", 'A' + i);
+ DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
}
@@ -4926,11 +6231,8 @@ static void ironlake_write_eld(struct drm_connector *connector,
i &= ~IBX_ELD_ADDRESS;
I915_WRITE(aud_cntl_st, i);
- /* 84 bytes of hw ELD buffer */
- len = 21;
- if (eld[2] < (uint8_t)len)
- len = eld[2];
- DRM_DEBUG_KMS("ELD size %d\n", len);
+ len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
+ DRM_DEBUG_DRIVER("ELD size %d\n", len);
for (i = 0; i < len; i++)
I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
@@ -4951,7 +6253,7 @@ void intel_write_eld(struct drm_encoder *encoder,
if (!connector)
return;
- DRM_DEBUG_KMS("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id,
drm_get_connector_name(connector),
connector->encoder->base.id,
@@ -5140,8 +6442,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t addr;
int ret;
- DRM_DEBUG_KMS("\n");
-
/* if we want to turn off the cursor ignore width and height */
if (!handle) {
DRM_DEBUG_KMS("cursor off\n");
@@ -5184,7 +6484,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
ret = i915_gem_object_put_fence(obj);
if (ret) {
- DRM_ERROR("failed to release fence for cursor\n");
+ DRM_ERROR("failed to release fence for cursor");
goto fail_unpin;
}
@@ -5210,7 +6510,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
if (intel_crtc->cursor_bo != obj)
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
} else
- i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
+ i915_gem_object_unpin(intel_crtc->cursor_bo);
drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
}
@@ -5225,7 +6525,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
return 0;
fail_unpin:
- i915_gem_object_unpin_from_display_plane(obj);
+ i915_gem_object_unpin(obj);
fail_locked:
DRM_UNLOCK(dev);
fail:
@@ -5311,6 +6611,11 @@ intel_framebuffer_create(struct drm_device *dev,
int ret;
intel_fb = malloc(sizeof(*intel_fb), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!intel_fb) {
+ drm_gem_object_unreference_unlocked(&obj->base);
+ return -ENOMEM;
+ }
+
ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
if (ret) {
drm_gem_object_unreference_unlocked(&obj->base);
@@ -5325,7 +6630,7 @@ intel_framebuffer_create(struct drm_device *dev,
static u32
intel_framebuffer_pitch_for_width(int width, int bpp)
{
- u32 pitch = howmany(width * bpp, 8);
+ u32 pitch = DIV_ROUND_UP(width * bpp, 8);
return roundup2(pitch, 64);
}
@@ -5359,53 +6664,44 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
return intel_framebuffer_create(dev, &mode_cmd, obj, res);
}
-static int
+static struct drm_framebuffer *
mode_fits_in_fbdev(struct drm_device *dev,
- struct drm_display_mode *mode,
- struct drm_framebuffer **res)
+ struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_framebuffer *fb;
- if (dev_priv->fbdev == NULL) {
- *res = NULL;
- return 0;
- }
+ if (dev_priv->fbdev == NULL)
+ return NULL;
obj = dev_priv->fbdev->ifb.obj;
- if (obj == NULL) {
- *res = NULL;
- return 0;
- }
+ if (obj == NULL)
+ return NULL;
fb = &dev_priv->fbdev->ifb.base;
if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
- fb->bits_per_pixel)) {
- *res = NULL;
- return 0;
- }
+ fb->bits_per_pixel))
+ return NULL;
- if (obj->base.size < mode->vdisplay * fb->pitches[0]) {
- *res = NULL;
- return 0;
- }
+ if (obj->base.size < mode->vdisplay * fb->pitches[0])
+ return NULL;
- *res = fb;
- return 0;
+ return fb;
}
-bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
- struct drm_connector *connector,
+bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old)
{
struct intel_crtc *intel_crtc;
+ struct intel_encoder *intel_encoder =
+ intel_attached_encoder(connector);
struct drm_crtc *possible_crtc;
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
- struct drm_framebuffer *old_fb;
+ struct drm_framebuffer *fb;
int i = -1;
int ret;
@@ -5427,21 +6723,12 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
if (encoder->crtc) {
crtc = encoder->crtc;
- intel_crtc = to_intel_crtc(crtc);
- old->dpms_mode = intel_crtc->dpms_mode;
+ old->dpms_mode = connector->dpms;
old->load_detect_temp = false;
/* Make sure the crtc and connector are running */
- if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
- struct drm_encoder_helper_funcs *encoder_funcs;
- struct drm_crtc_helper_funcs *crtc_funcs;
-
- crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-
- encoder_funcs = encoder->helper_private;
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
- }
+ if (connector->dpms != DRM_MODE_DPMS_ON)
+ connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
return true;
}
@@ -5465,19 +6752,17 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
return false;
}
- encoder->crtc = crtc;
- connector->encoder = encoder;
+ intel_encoder->new_crtc = to_intel_crtc(crtc);
+ to_intel_connector(connector)->new_encoder = intel_encoder;
intel_crtc = to_intel_crtc(crtc);
- old->dpms_mode = intel_crtc->dpms_mode;
+ old->dpms_mode = connector->dpms;
old->load_detect_temp = true;
old->release_fb = NULL;
if (!mode)
mode = &load_detect_mode;
- old_fb = crtc->fb;
-
/* We need a framebuffer large enough to accommodate all accesses
* that the plane may generate whilst we perform load detection.
* We can not rely on the fbcon either being present (we get called
@@ -5485,25 +6770,23 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
* not even exist) or that it is large enough to satisfy the
* requested mode.
*/
- ret = mode_fits_in_fbdev(dev, mode, &crtc->fb);
- if (crtc->fb == NULL) {
+ ret = 0;
+ fb = mode_fits_in_fbdev(dev, mode);
+ if (fb == NULL) {
DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
- ret = intel_framebuffer_create_for_mode(dev, mode, 24, 32,
- &crtc->fb);
- old->release_fb = crtc->fb;
+ ret = intel_framebuffer_create_for_mode(dev, mode, 24, 32, &fb);
+ old->release_fb = fb;
} else
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
if (ret) {
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
- crtc->fb = old_fb;
return false;
}
- if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
+ if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
- crtc->fb = old_fb;
return false;
}
@@ -5512,23 +6795,23 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
return true;
}
-void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
- struct drm_connector *connector,
+void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old)
{
+ struct intel_encoder *intel_encoder =
+ intel_attached_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_device *dev = encoder->dev;
- struct drm_crtc *crtc = encoder->crtc;
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector),
encoder->base.id, drm_get_encoder_name(encoder));
if (old->load_detect_temp) {
- connector->encoder = NULL;
- drm_helper_disable_unused_functions(dev);
+ struct drm_crtc *crtc = encoder->crtc;
+
+ to_intel_connector(connector)->new_encoder = NULL;
+ intel_encoder->new_crtc = NULL;
+ intel_set_mode(crtc, NULL, 0, 0, NULL);
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
@@ -5537,10 +6820,8 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
}
/* Switch crtc and encoder back off if necessary */
- if (old->dpms_mode != DRM_MODE_DPMS_ON) {
- encoder_funcs->dpms(encoder, old->dpms_mode);
- crtc_funcs->dpms(crtc, old->dpms_mode);
- }
+ if (old->dpms_mode != DRM_MODE_DPMS_ON)
+ connector->funcs->dpms(connector, old->dpms_mode);
}
/* Returns the clock of the currently programmed mode of the given pipe. */
@@ -5636,14 +6917,16 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
struct drm_display_mode *mode;
- int htot = I915_READ(HTOTAL(pipe));
- int hsync = I915_READ(HSYNC(pipe));
- int vtot = I915_READ(VTOTAL(pipe));
- int vsync = I915_READ(VSYNC(pipe));
+ int htot = I915_READ(HTOTAL(cpu_transcoder));
+ int hsync = I915_READ(HSYNC(cpu_transcoder));
+ int vtot = I915_READ(VTOTAL(cpu_transcoder));
+ int vsync = I915_READ(VSYNC(cpu_transcoder));
mode = malloc(sizeof(*mode), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!mode)
+ return NULL;
mode->clock = intel_crtc_clock_get(dev, crtc);
mode->hdisplay = (htot & 0xffff) + 1;
@@ -5660,44 +6943,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-#define GPU_IDLE_TIMEOUT (500 /* ms */ * 1000 / hz)
-
-/* When this timer fires, we've been idle for awhile */
-static void intel_gpu_idle_timer(void *arg)
-{
- struct drm_device *dev = arg;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (!list_empty(&dev_priv->mm.active_list)) {
- /* Still processing requests, so just re-arm the timer. */
- callout_schedule(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT);
- return;
- }
-
- dev_priv->busy = false;
- taskqueue_enqueue(dev_priv->tq, &dev_priv->idle_task);
-}
-
-#define CRTC_IDLE_TIMEOUT (1000 /* ms */ * 1000 / hz)
-
-static void intel_crtc_idle_timer(void *arg)
-{
- struct intel_crtc *intel_crtc = arg;
- struct drm_crtc *crtc = &intel_crtc->base;
- drm_i915_private_t *dev_priv = crtc->dev->dev_private;
- struct intel_framebuffer *intel_fb;
-
- intel_fb = to_intel_framebuffer(crtc->fb);
- if (intel_fb && intel_fb->obj->active) {
- /* The framebuffer is still being accessed by the GPU. */
- callout_schedule(&intel_crtc->idle_callout, CRTC_IDLE_TIMEOUT);
- return;
- }
-
- intel_crtc->busy = false;
- taskqueue_enqueue(dev_priv->tq, &dev_priv->idle_task);
-}
-
static void intel_increase_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -5727,10 +6972,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
}
-
- /* Schedule downclock */
- callout_reset(&intel_crtc->idle_callout, CRTC_IDLE_TIMEOUT,
- intel_crtc_idle_timer, intel_crtc);
}
static void intel_decrease_pllclock(struct drm_crtc *crtc)
@@ -5769,88 +7010,40 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
}
-/**
- * intel_idle_update - adjust clocks for idleness
- * @work: work struct
- *
- * Either the GPU or display (or both) went idle. Check the busy status
- * here and adjust the CRTC and GPU clocks as necessary.
- */
-static void intel_idle_update(void *arg, int pending)
+void intel_mark_busy(struct drm_device *dev)
+{
+ i915_update_gfx_val(dev->dev_private);
+}
+
+void intel_mark_idle(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = arg;
- struct drm_device *dev = dev_priv->dev;
struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
if (!i915_powersave)
return;
- DRM_LOCK(dev);
-
- i915_update_gfx_val(dev_priv);
-
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- /* Skip inactive CRTCs */
if (!crtc->fb)
continue;
- intel_crtc = to_intel_crtc(crtc);
- if (!intel_crtc->busy)
- intel_decrease_pllclock(crtc);
+ intel_decrease_pllclock(crtc);
}
-
- DRM_UNLOCK(dev);
}
-/**
- * intel_mark_busy - mark the GPU and possibly the display busy
- * @dev: drm device
- * @obj: object we're operating on
- *
- * Callers can use this function to indicate that the GPU is busy processing
- * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
- * buffer), we'll also mark the display as busy, so we know to increase its
- * clock frequency.
- */
-void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
+void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = NULL;
- struct intel_framebuffer *intel_fb;
- struct intel_crtc *intel_crtc;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- if (!dev_priv->busy) {
- intel_sanitize_pm(dev);
- dev_priv->busy = true;
- } else
- callout_reset(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT,
- intel_gpu_idle_timer, dev);
+ struct drm_device *dev = obj->base.dev;
+ struct drm_crtc *crtc;
- if (obj == NULL)
+ if (!i915_powersave)
return;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
continue;
- intel_crtc = to_intel_crtc(crtc);
- intel_fb = to_intel_framebuffer(crtc->fb);
- if (intel_fb->obj == obj) {
- if (!intel_crtc->busy) {
- /* Non-busy -> busy, upclock */
- intel_increase_pllclock(crtc);
- intel_crtc->busy = true;
- } else {
- /* Busy -> busy, put off timer */
- callout_reset(&intel_crtc->idle_callout,
- CRTC_IDLE_TIMEOUT, intel_crtc_idle_timer,
- intel_crtc);
- }
- }
+ if (to_intel_framebuffer(crtc->fb)->obj == obj)
+ intel_increase_pllclock(crtc);
}
}
@@ -5867,8 +7060,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
mtx_unlock(&dev->event_lock);
if (work) {
- taskqueue_cancel(dev_priv->tq, &work->task, NULL);
- taskqueue_drain(dev_priv->tq, &work->task);
+ taskqueue_cancel(dev_priv->wq, &work->work, NULL);
+ taskqueue_drain(dev_priv->wq, &work->work);
free(work, DRM_MEM_KMS);
}
@@ -5881,7 +7074,7 @@ static void intel_unpin_work_fn(void *arg, int pending)
{
struct intel_unpin_work *work =
arg;
- struct drm_device *dev = work->dev;
+ struct drm_device *dev = work->crtc->dev;
DRM_LOCK(dev);
intel_unpin_fb_obj(work->old_fb_obj);
@@ -5890,6 +7083,10 @@ static void intel_unpin_work_fn(void *arg, int pending)
intel_update_fbc(dev);
DRM_UNLOCK(dev);
+
+ BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
+ atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
+
free(work, DRM_MEM_KMS);
}
@@ -5900,65 +7097,41 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
struct drm_i915_gem_object *obj;
- struct drm_pending_vblank_event *e;
- struct timeval tnow, tvbl;
/* Ignore early vblank irqs */
if (intel_crtc == NULL)
return;
- microtime(&tnow);
-
mtx_lock(&dev->event_lock);
work = intel_crtc->unpin_work;
- if (work == NULL || !work->pending) {
+
+ /* Ensure we don't miss a work->pending update ... */
+ smp_rmb();
+
+ if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
mtx_unlock(&dev->event_lock);
return;
}
- intel_crtc->unpin_work = NULL;
+ /* and that the unpin work is consistent wrt ->pending. */
+ smp_rmb();
- if (work->event) {
- e = work->event;
- e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
-
- /* Called before vblank count and timestamps have
- * been updated for the vblank interval of flip
- * completion? Need to increment vblank count and
- * add one videorefresh duration to returned timestamp
- * to account for this. We assume this happened if we
- * get called over 0.9 frame durations after the last
- * timestamped vblank.
- *
- * This calculation can not be used with vrefresh rates
- * below 5Hz (10Hz to be on the safe side) without
- * promoting to 64 integers.
- */
- if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
- 9 * crtc->framedur_ns) {
- e->event.sequence++;
- tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
- crtc->framedur_ns);
- }
-
- e->event.tv_sec = tvbl.tv_sec;
- e->event.tv_usec = tvbl.tv_usec;
+ intel_crtc->unpin_work = NULL;
- list_add_tail(&e->base.link,
- &e->base.file_priv->event_list);
- drm_event_wakeup(&e->base);
- }
+ if (work->event)
+ drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
drm_vblank_put(dev, intel_crtc->pipe);
+ mtx_unlock(&dev->event_lock);
+
obj = work->old_fb_obj;
- atomic_clear_int(&obj->pending_flip, 1 << intel_crtc->plane);
- if (atomic_load_acq_int(&obj->pending_flip) == 0)
- wakeup(&obj->pending_flip);
- mtx_unlock(&dev->event_lock);
+ atomic_clear_mask(1 << intel_crtc->plane,
+ &obj->pending_flip);
+ wake_up(&dev_priv->pending_flip_queue);
- taskqueue_enqueue(dev_priv->tq, &work->task);
+ taskqueue_enqueue(dev_priv->wq, &work->work);
CTR2(KTR_DRM, "i915_flip_complete %d %p", intel_crtc->plane,
work->pending_flip_obj);
@@ -5986,16 +7159,25 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
+ /* NB: An MMIO update of the plane base pointer will also
+ * generate a page-flip completion irq, i.e. every modeset
+ * is also accompanied by a spurious intel_prepare_page_flip().
+ */
mtx_lock(&dev->event_lock);
- if (intel_crtc->unpin_work) {
- if ((++intel_crtc->unpin_work->pending) > 1)
- DRM_ERROR("Prepared flip multiple times\n");
- } else {
- DRM_DEBUG("preparing flip with no unpin work?\n");
- }
+ if (intel_crtc->unpin_work)
+ atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
mtx_unlock(&dev->event_lock);
}
+inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
+{
+ /* Ensure that the work item is consistent when activating it ... */
+ smp_wmb();
+ atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
+ /* and that it is marked active as soon as the irq could fire. */
+ smp_wmb();
+}
+
static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -6003,18 +7185,14 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long offset;
u32 flip_mask;
- struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;
- /* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
-
ret = intel_ring_begin(ring, 6);
if (ret)
goto err_unpin;
@@ -6031,8 +7209,10 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, obj->gtt_offset + offset);
+ intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
+
+ intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
@@ -6049,18 +7229,14 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long offset;
u32 flip_mask;
- struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;
- /* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
-
ret = intel_ring_begin(ring, 6);
if (ret)
goto err_unpin;
@@ -6074,9 +7250,10 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, obj->gtt_offset + offset);
+ intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, MI_NOOP);
+ intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
@@ -6094,7 +7271,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
- struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
@@ -6112,7 +7289,9 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode);
+ intel_ring_emit(ring,
+ (obj->gtt_offset + intel_crtc->dspaddr_offset) |
+ obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
@@ -6121,6 +7300,8 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(ring, pf | pipesrc);
+
+ intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
@@ -6137,7 +7318,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
uint32_t pf, pipesrc;
int ret;
@@ -6152,7 +7333,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
- intel_ring_emit(ring, obj->gtt_offset);
+ intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -6163,6 +7344,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(ring, pf | pipesrc);
+
+ intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
@@ -6185,21 +7368,40 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_ring_buffer *ring = &dev_priv->rings[BCS];
+ struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+ uint32_t plane_bit = 0;
int ret;
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;
+ switch(intel_crtc->plane) {
+ case PLANE_A:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
+ break;
+ case PLANE_B:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
+ break;
+ case PLANE_C:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
+ break;
+ default:
+ WARN_ONCE(1, "unknown plane in flip command\n");
+ ret = -ENODEV;
+ goto err_unpin;
+ }
+
ret = intel_ring_begin(ring, 4);
if (ret)
goto err_unpin;
- intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
- intel_ring_emit(ring, (obj->gtt_offset));
+ intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, (MI_NOOP));
+
+ intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
@@ -6223,19 +7425,33 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj;
+ struct drm_framebuffer *old_fb = crtc->fb;
+ struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
int ret;
+ /* Can't change pixel format via MI display flips. */
+ if (fb->pixel_format != crtc->fb->pixel_format)
+ return -EINVAL;
+
+ /*
+ * TILEOFF/LINOFF registers can't be changed via MI display flips.
+ * Note that pitch changes could also affect these register.
+ */
+ if (INTEL_INFO(dev)->gen > 3 &&
+ (fb->offsets[0] != crtc->fb->offsets[0] ||
+ fb->pitches[0] != crtc->fb->pitches[0]))
+ return -EINVAL;
+
work = malloc(sizeof *work, DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (work == NULL)
+ return -ENOMEM;
work->event = event;
- work->dev = crtc->dev;
- intel_fb = to_intel_framebuffer(crtc->fb);
- work->old_fb_obj = intel_fb->obj;
- TASK_INIT(&work->task, 0, intel_unpin_work_fn, work);
+ work->crtc = crtc;
+ work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
+ TASK_INIT(&work->work, 0, intel_unpin_work_fn, work);
ret = drm_vblank_get(dev, intel_crtc->pipe);
if (ret)
@@ -6254,10 +7470,12 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_crtc->unpin_work = work;
mtx_unlock(&dev->event_lock);
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
+ if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
+ taskqueue_drain_all(dev_priv->wq);
- DRM_LOCK(dev);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto cleanup;
/* Reference the objects for the scheduled work. */
drm_gem_object_reference(&work->old_fb_obj->base);
@@ -6272,13 +7490,15 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
/* Block clients from rendering to the new back buffer until
* the flip occurs and the object is no longer visible.
*/
- atomic_set_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane);
+ atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+ atomic_inc(&intel_crtc->unpin_work_count);
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
if (ret)
goto cleanup_pending;
+
intel_disable_fbc(dev);
- intel_mark_busy(dev, obj);
+ intel_mark_fb_busy(obj);
DRM_UNLOCK(dev);
CTR2(KTR_DRM, "i915_flip_request %d %p", intel_crtc->plane, obj);
@@ -6286,11 +7506,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
cleanup_pending:
- atomic_clear_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane);
+ atomic_dec(&intel_crtc->unpin_work_count);
+ crtc->fb = old_fb;
+ atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
DRM_UNLOCK(dev);
+cleanup:
mtx_lock(&dev->event_lock);
intel_crtc->unpin_work = NULL;
mtx_unlock(&dev->event_lock);
@@ -6302,85 +7525,807 @@ free_work:
return ret;
}
-static void intel_sanitize_modesetting(struct drm_device *dev,
- int pipe, int plane)
+static struct drm_crtc_helper_funcs intel_helper_funcs = {
+ .mode_set_base_atomic = intel_pipe_set_base_atomic,
+ .load_lut = intel_crtc_load_lut,
+ .disable = intel_crtc_noop,
+};
+
+bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg, val;
- int i;
+ struct intel_encoder *other_encoder;
+ struct drm_crtc *crtc = &encoder->new_crtc->base;
- /* Clear any frame start delays used for debugging left by the BIOS */
- for_each_pipe(i) {
- reg = PIPECONF(i);
- I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+ if (WARN_ON(!crtc))
+ return false;
+
+ list_for_each_entry(other_encoder,
+ &crtc->dev->mode_config.encoder_list,
+ base.head) {
+
+ if (&other_encoder->new_crtc->base != crtc ||
+ encoder == other_encoder)
+ continue;
+ else
+ return true;
}
- if (HAS_PCH_SPLIT(dev))
- return;
+ return false;
+}
- /* Who knows what state these registers were left in by the BIOS or
- * grub?
- *
- * If we leave the registers in a conflicting state (e.g. with the
- * display plane reading from the other pipe than the one we intend
- * to use) then when we attempt to teardown the active mode, we will
- * not disable the pipes and planes in the correct order -- leaving
- * a plane reading from a disabled pipe and possibly leading to
- * undefined behaviour.
+static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
+ struct drm_crtc *crtc)
+{
+ struct drm_device *dev;
+ struct drm_crtc *tmp;
+ int crtc_mask = 1;
+
+ WARN(!crtc, "checking null crtc?\n");
+
+ dev = crtc->dev;
+
+ list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+ if (tmp == crtc)
+ break;
+ crtc_mask <<= 1;
+ }
+
+ if (encoder->possible_crtcs & crtc_mask)
+ return true;
+ return false;
+}
+
+/**
+ * intel_modeset_update_staged_output_state
+ *
+ * Updates the staged output configuration state, e.g. after we've read out the
+ * current hw state.
+ */
+static void intel_modeset_update_staged_output_state(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ connector->new_encoder =
+ to_intel_encoder(connector->base.encoder);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ encoder->new_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ }
+}
+
+/**
+ * intel_modeset_commit_output_state
+ *
+ * This function copies the stage display pipe configuration to the real one.
+ */
+static void intel_modeset_commit_output_state(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ connector->base.encoder = &connector->new_encoder->base;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ encoder->base.crtc = &encoder->new_crtc->base;
+ }
+}
+
+static int
+intel_modeset_adjusted_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode **res)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_display_mode *adjusted_mode;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct intel_encoder *encoder;
+
+ adjusted_mode = drm_mode_duplicate(dev, mode);
+ if (!adjusted_mode)
+ return -ENOMEM;
+
+ /* Pass our mode to the connectors and the CRTC to give them a chance to
+ * adjust it according to limitations or connector properties, and also
+ * a chance to reject the mode entirely.
*/
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
- reg = DSPCNTR(plane);
- val = I915_READ(reg);
+ if (&encoder->new_crtc->base != crtc)
+ continue;
+ encoder_funcs = encoder->base.helper_private;
+ if (!(encoder_funcs->mode_fixup(&encoder->base, mode,
+ adjusted_mode))) {
+ DRM_DEBUG_KMS("Encoder fixup failed\n");
+ goto fail;
+ }
+ }
- if ((val & DISPLAY_PLANE_ENABLE) == 0)
- return;
- if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
- return;
+ if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) {
+ DRM_DEBUG_KMS("CRTC fixup failed\n");
+ goto fail;
+ }
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
- /* This display plane is active and attached to the other CPU pipe. */
- pipe = !pipe;
+ *res = adjusted_mode;
+ return 0;
+fail:
+ drm_mode_destroy(dev, adjusted_mode);
+ return -EINVAL;
+}
- /* Disable the plane and wait for it to stop reading from the pipe. */
- intel_disable_plane(dev_priv, plane, pipe);
- intel_disable_pipe(dev_priv, pipe);
+/* Computes which crtcs are affected and sets the relevant bits in the mask. For
+ * simplicity we use the crtc's pipe number (because it's easier to obtain). */
+static void
+intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
+ unsigned *prepare_pipes, unsigned *disable_pipes)
+{
+ struct intel_crtc *intel_crtc;
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct drm_crtc *tmp_crtc;
+
+ *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
+
+ /* Check which crtcs have changed outputs connected to them, these need
+ * to be part of the prepare_pipes mask. We don't (yet) support global
+ * modeset across multiple crtcs, so modeset_pipes will only have one
+ * bit set at most. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->base.encoder == &connector->new_encoder->base)
+ continue;
+
+ if (connector->base.encoder) {
+ tmp_crtc = connector->base.encoder->crtc;
+
+ *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
+ }
+
+ if (connector->new_encoder)
+ *prepare_pipes |=
+ 1 << connector->new_encoder->new_crtc->pipe;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->base.crtc == &encoder->new_crtc->base)
+ continue;
+
+ if (encoder->base.crtc) {
+ tmp_crtc = encoder->base.crtc;
+
+ *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
+ }
+
+ if (encoder->new_crtc)
+ *prepare_pipes |= 1 << encoder->new_crtc->pipe;
+ }
+
+ /* Check for any pipes that will be fully disabled ... */
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ bool used = false;
+
+ /* Don't try to disable disabled crtcs. */
+ if (!intel_crtc->base.enabled)
+ continue;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->new_crtc == intel_crtc)
+ used = true;
+ }
+
+ if (!used)
+ *disable_pipes |= 1 << intel_crtc->pipe;
+ }
+
+
+ /* set_mode is also used to update properties on life display pipes. */
+ intel_crtc = to_intel_crtc(crtc);
+ if (crtc->enabled)
+ *prepare_pipes |= 1 << intel_crtc->pipe;
+
+ /*
+ * For simplicity do a full modeset on any pipe where the output routing
+ * changed. We could be more clever, but that would require us to be
+ * more careful with calling the relevant encoder->mode_set functions.
+ */
+ if (*prepare_pipes)
+ *modeset_pipes = *prepare_pipes;
+
+ /* ... and mask these out. */
+ *modeset_pipes &= ~(*disable_pipes);
+ *prepare_pipes &= ~(*disable_pipes);
+
+ /*
+ * HACK: We don't (yet) fully support global modesets. intel_set_config
+ * obies this rule, but the modeset restore mode of
+ * intel_modeset_setup_hw_state does not.
+ */
+ *modeset_pipes &= 1 << intel_crtc->pipe;
+ *prepare_pipes &= 1 << intel_crtc->pipe;
}
-static void intel_crtc_reset(struct drm_crtc *crtc)
+static bool intel_crtc_in_use(struct drm_crtc *crtc)
{
+ struct drm_encoder *encoder;
struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- /* Reset flags back to the 'unknown' status so that they
- * will be correctly set on the initial modeset.
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->crtc == crtc)
+ return true;
+
+ return false;
+}
+
+static void
+intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
+{
+ struct intel_encoder *intel_encoder;
+ struct intel_crtc *intel_crtc;
+ struct drm_connector *connector;
+
+ list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (!intel_encoder->base.crtc)
+ continue;
+
+ intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
+
+ if (prepare_pipes & (1 << intel_crtc->pipe))
+ intel_encoder->connectors_active = false;
+ }
+
+ intel_modeset_commit_output_state(dev);
+
+ /* Update computed state. */
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
+ }
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder || !connector->encoder->crtc)
+ continue;
+
+ intel_crtc = to_intel_crtc(connector->encoder->crtc);
+
+ if (prepare_pipes & (1 << intel_crtc->pipe)) {
+ struct drm_property *dpms_property =
+ dev->mode_config.dpms_property;
+
+ connector->dpms = DRM_MODE_DPMS_ON;
+ drm_object_property_set_value(&connector->base,
+ dpms_property,
+ DRM_MODE_DPMS_ON);
+
+ intel_encoder = to_intel_encoder(connector->encoder);
+ intel_encoder->connectors_active = true;
+ }
+ }
+
+}
+
+#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
+ list_for_each_entry((intel_crtc), \
+ &(dev)->mode_config.crtc_list, \
+ base.head) \
+ if (mask & (1 <<(intel_crtc)->pipe)) \
+
+void
+intel_modeset_check_state(struct drm_device *dev)
+{
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ /* This also checks the encoder/connector hw state with the
+ * ->get_hw_state callbacks. */
+ intel_connector_check_state(connector);
+
+ WARN(&connector->new_encoder->base != connector->base.encoder,
+ "connector's staged encoder doesn't match current encoder\n");
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ bool enabled = false;
+ bool active = false;
+ enum pipe pipe, tracked_pipe;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base));
+
+ WARN(&encoder->new_crtc->base != encoder->base.crtc,
+ "encoder's stage crtc doesn't match current crtc\n");
+ WARN(encoder->connectors_active && !encoder->base.crtc,
+ "encoder's active_connectors set, but no crtc\n");
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->base.encoder != &encoder->base)
+ continue;
+ enabled = true;
+ if (connector->base.dpms != DRM_MODE_DPMS_OFF)
+ active = true;
+ }
+ WARN(!!encoder->base.crtc != enabled,
+ "encoder's enabled state mismatch "
+ "(expected %i, found %i)\n",
+ !!encoder->base.crtc, enabled);
+ WARN(active && !encoder->base.crtc,
+ "active encoder with no crtc\n");
+
+ WARN(encoder->connectors_active != active,
+ "encoder's computed active state doesn't match tracked active state "
+ "(expected %i, found %i)\n", active, encoder->connectors_active);
+
+ active = encoder->get_hw_state(encoder, &pipe);
+ WARN(active != encoder->connectors_active,
+ "encoder's hw state doesn't match sw tracking "
+ "(expected %i, found %i)\n",
+ encoder->connectors_active, active);
+
+ if (!encoder->base.crtc)
+ continue;
+
+ tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+ WARN(active && pipe != tracked_pipe,
+ "active encoder's pipe doesn't match"
+ "(expected %i, found %i)\n",
+ tracked_pipe, pipe);
+
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ bool enabled = false;
+ bool active = false;
+
+ DRM_DEBUG_KMS("[CRTC:%d]\n",
+ crtc->base.base.id);
+
+ WARN(crtc->active && !crtc->base.enabled,
+ "active crtc, but not enabled in sw tracking\n");
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->base.crtc != &crtc->base)
+ continue;
+ enabled = true;
+ if (encoder->connectors_active)
+ active = true;
+ }
+ WARN(active != crtc->active,
+ "crtc's computed active state doesn't match tracked active state "
+ "(expected %i, found %i)\n", active, crtc->active);
+ WARN(enabled != crtc->base.enabled,
+ "crtc's computed enabled state doesn't match tracked enabled state "
+ "(expected %i, found %i)\n", enabled, crtc->base.enabled);
+
+ assert_pipe(dev->dev_private, crtc->pipe, crtc->active);
+ }
+}
+
+bool intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+ struct intel_crtc *intel_crtc;
+ unsigned disable_pipes, prepare_pipes, modeset_pipes;
+ bool ret = true;
+
+ intel_modeset_affected_pipes(crtc, &modeset_pipes,
+ &prepare_pipes, &disable_pipes);
+
+ DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
+ modeset_pipes, prepare_pipes, disable_pipes);
+
+ for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
+ intel_crtc_disable(&intel_crtc->base);
+
+ saved_hwmode = crtc->hwmode;
+ saved_mode = crtc->mode;
+
+ /* Hack: Because we don't (yet) support global modeset on multiple
+ * crtcs, we don't keep track of the new mode for more than one crtc.
+ * Hence simply check whether any bit is set in modeset_pipes in all the
+ * pieces of code that are not yet converted to deal with mutliple crtcs
+ * changing their mode at the same time. */
+ adjusted_mode = NULL;
+ if (modeset_pipes) {
+ int err = intel_modeset_adjusted_mode(crtc, mode, &adjusted_mode);
+ if (err) {
+ return false;
+ }
+ }
+
+ for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
+ if (intel_crtc->base.enabled)
+ dev_priv->display.crtc_disable(&intel_crtc->base);
+ }
+
+ /* crtc->mode is already used by the ->mode_set callbacks, hence we need
+ * to set it here already despite that we pass it down the callchain.
*/
- intel_crtc->dpms_mode = -1;
+ if (modeset_pipes)
+ crtc->mode = *mode;
+
+ /* Only after disabling all output pipelines that will be changed can we
+ * update the the output configuration. */
+ intel_modeset_update_state(dev, prepare_pipes);
- /* We need to fix up any BIOS configuration that conflicts with
- * our expectations.
+ if (dev_priv->display.modeset_global_resources)
+ dev_priv->display.modeset_global_resources(dev);
+
+ /* Set up the DPLL and any encoders state that needs to adjust or depend
+ * on the DPLL.
*/
- intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
+ for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
+ ret = !intel_crtc_mode_set(&intel_crtc->base,
+ mode, adjusted_mode,
+ x, y, fb);
+ if (!ret)
+ goto done;
+ }
+
+ /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+ for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
+ dev_priv->display.crtc_enable(&intel_crtc->base);
+
+ if (modeset_pipes) {
+ /* Store real post-adjustment hardware mode. */
+ crtc->hwmode = *adjusted_mode;
+
+ /* Calculate and store various constants which
+ * are later needed by vblank and swap-completion
+ * timestamping. They are derived from true hwmode.
+ */
+ drm_calc_timestamping_constants(crtc);
+ }
+
+ /* FIXME: add subpixel order */
+done:
+ drm_mode_destroy(dev, adjusted_mode);
+ if (!ret && crtc->enabled) {
+ crtc->hwmode = saved_hwmode;
+ crtc->mode = saved_mode;
+ } else {
+ intel_modeset_check_state(dev);
+ }
+
+ return ret;
}
-static struct drm_crtc_helper_funcs intel_helper_funcs = {
- .dpms = intel_crtc_dpms,
- .mode_fixup = intel_crtc_mode_fixup,
- .mode_set = intel_crtc_mode_set,
- .mode_set_base = intel_pipe_set_base,
- .mode_set_base_atomic = intel_pipe_set_base_atomic,
- .load_lut = intel_crtc_load_lut,
- .disable = intel_crtc_disable,
-};
+#undef for_each_intel_crtc_masked
+
+static void intel_set_config_free(struct intel_set_config *config)
+{
+ if (!config)
+ return;
+
+ free(config->save_connector_encoders, DRM_MEM_KMS);
+ free(config->save_encoder_crtcs, DRM_MEM_KMS);
+ free(config, DRM_MEM_KMS);
+}
+
+static int intel_set_config_save_state(struct drm_device *dev,
+ struct intel_set_config *config)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int count;
+
+ config->save_encoder_crtcs =
+ malloc(dev->mode_config.num_encoder *
+ sizeof(struct drm_crtc *), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
+ if (!config->save_encoder_crtcs)
+ return -ENOMEM;
+
+ config->save_connector_encoders =
+ malloc(dev->mode_config.num_connector *
+ sizeof(struct drm_encoder *), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
+ if (!config->save_connector_encoders)
+ return -ENOMEM;
+
+ /* Copy data. Note that driver private data is not affected.
+ * Should anything bad happen only the expected state is
+ * restored, not the drivers personal bookkeeping.
+ */
+ count = 0;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ config->save_encoder_crtcs[count++] = encoder->crtc;
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ config->save_connector_encoders[count++] = connector->encoder;
+ }
+
+ return 0;
+}
+
+static void intel_set_config_restore_state(struct drm_device *dev,
+ struct intel_set_config *config)
+{
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ int count;
+
+ count = 0;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ encoder->new_crtc =
+ to_intel_crtc(config->save_encoder_crtcs[count++]);
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+ connector->new_encoder =
+ to_intel_encoder(config->save_connector_encoders[count++]);
+ }
+}
+
+static void
+intel_set_config_compute_mode_changes(struct drm_mode_set *set,
+ struct intel_set_config *config)
+{
+
+ /* We should be able to check here if the fb has the same properties
+ * and then just flip_or_move it */
+ if (set->crtc->fb != set->fb) {
+ /* If we have no fb then treat it as a full mode set */
+ if (set->crtc->fb == NULL) {
+ DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
+ config->mode_changed = true;
+ } else if (set->fb == NULL) {
+ config->mode_changed = true;
+ } else if (set->fb->depth != set->crtc->fb->depth) {
+ config->mode_changed = true;
+ } else if (set->fb->bits_per_pixel !=
+ set->crtc->fb->bits_per_pixel) {
+ config->mode_changed = true;
+ } else
+ config->fb_changed = true;
+ }
+
+ if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
+ config->fb_changed = true;
+
+ if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+ DRM_DEBUG_KMS("modes are different, full mode set\n");
+ drm_mode_debug_printmodeline(&set->crtc->mode);
+ drm_mode_debug_printmodeline(set->mode);
+ config->mode_changed = true;
+ }
+}
+
+static int
+intel_modeset_stage_output_state(struct drm_device *dev,
+ struct drm_mode_set *set,
+ struct intel_set_config *config)
+{
+ struct drm_crtc *new_crtc;
+ struct intel_connector *connector;
+ struct intel_encoder *encoder;
+ int count, ro;
+
+ /* The upper layers ensure that we either disabl a crtc or have a list
+ * of connectors. For paranoia, double-check this. */
+ WARN_ON(!set->fb && (set->num_connectors != 0));
+ WARN_ON(set->fb && (set->num_connectors == 0));
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ /* Otherwise traverse passed in connector list and get encoders
+ * for them. */
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == &connector->base) {
+ connector->new_encoder = connector->encoder;
+ break;
+ }
+ }
+
+ /* If we disable the crtc, disable all its connectors. Also, if
+ * the connector is on the changing crtc but not on the new
+ * connector list, disable it. */
+ if ((!set->fb || ro == set->num_connectors) &&
+ connector->base.encoder &&
+ connector->base.encoder->crtc == set->crtc) {
+ connector->new_encoder = NULL;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base));
+ }
+
+
+ if (&connector->new_encoder->base != connector->base.encoder) {
+ DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+ config->mode_changed = true;
+ }
+ }
+ /* connector->new_encoder is now updated for all connectors. */
+
+ /* Update crtc of enabled connectors. */
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (!connector->new_encoder)
+ continue;
+
+ new_crtc = connector->new_encoder->base.crtc;
+
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == &connector->base)
+ new_crtc = set->crtc;
+ }
+
+ /* Make sure the new CRTC will work with the encoder */
+ if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
+ new_crtc)) {
+ return -EINVAL;
+ }
+ connector->encoder->new_crtc = to_intel_crtc(new_crtc);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base),
+ new_crtc->base.id);
+ }
+
+ /* Check for any encoders that needs to be disabled. */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->new_encoder == encoder) {
+ WARN_ON(!connector->new_encoder->new_crtc);
+
+ goto next_encoder;
+ }
+ }
+ encoder->new_crtc = NULL;
+next_encoder:
+ /* Only now check for crtc changes so we don't miss encoders
+ * that will be disabled. */
+ if (&encoder->new_crtc->base != encoder->base.crtc) {
+ DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+ config->mode_changed = true;
+ }
+ }
+ /* Now we've also updated encoder->new_crtc for all encoders. */
+
+ return 0;
+}
+
+static int intel_crtc_set_config(struct drm_mode_set *set)
+{
+ struct drm_device *dev;
+ struct drm_mode_set save_set;
+ struct intel_set_config *config;
+ int ret;
+
+ BUG_ON(!set);
+ BUG_ON(!set->crtc);
+ BUG_ON(!set->crtc->helper_private);
+
+ if (!set->mode)
+ set->fb = NULL;
+
+ /* The fb helper likes to play gross jokes with ->mode_set_config.
+ * Unfortunately the crtc helper doesn't do much at all for this case,
+ * so we have to cope with this madness until the fb helper is fixed up. */
+ if (set->fb && set->num_connectors == 0)
+ return 0;
+
+ if (set->fb) {
+ DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+ set->crtc->base.id, set->fb->base.id,
+ (int)set->num_connectors, set->x, set->y);
+ } else {
+ DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+ }
+
+ dev = set->crtc->dev;
+
+ ret = -ENOMEM;
+ config = malloc(sizeof(*config), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
+ if (!config)
+ goto out_config;
+
+ ret = intel_set_config_save_state(dev, config);
+ if (ret)
+ goto out_config;
+
+ save_set.crtc = set->crtc;
+ save_set.mode = &set->crtc->mode;
+ save_set.x = set->crtc->x;
+ save_set.y = set->crtc->y;
+ save_set.fb = set->crtc->fb;
+
+ /* Compute whether we need a full modeset, only an fb base update or no
+ * change at all. In the future we might also check whether only the
+ * mode changed, e.g. for LVDS where we only change the panel fitter in
+ * such cases. */
+ intel_set_config_compute_mode_changes(set, config);
+
+ ret = intel_modeset_stage_output_state(dev, set, config);
+ if (ret)
+ goto fail;
+
+ if (config->mode_changed) {
+ if (set->mode) {
+ DRM_DEBUG_KMS("attempting to set mode from"
+ " userspace\n");
+ drm_mode_debug_printmodeline(set->mode);
+ }
+
+ if (!intel_set_mode(set->crtc, set->mode,
+ set->x, set->y, set->fb)) {
+ DRM_ERROR("failed to set mode on [CRTC:%d]\n",
+ set->crtc->base.id);
+ ret = -EINVAL;
+ goto fail;
+ }
+ } else if (config->fb_changed) {
+ ret = intel_pipe_set_base(set->crtc,
+ set->x, set->y, set->fb);
+ }
+
+ intel_set_config_free(config);
+
+ return 0;
+
+fail:
+ intel_set_config_restore_state(dev, config);
+
+ /* Try to restore the config */
+ if (config->mode_changed &&
+ !intel_set_mode(save_set.crtc, save_set.mode,
+ save_set.x, save_set.y, save_set.fb))
+ DRM_ERROR("failed to restore config after modeset failure\n");
+
+out_config:
+ intel_set_config_free(config);
+ return ret;
+}
static const struct drm_crtc_funcs intel_crtc_funcs = {
- .reset = intel_crtc_reset,
.cursor_set = intel_crtc_cursor_set,
.cursor_move = intel_crtc_cursor_move,
.gamma_set = intel_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
+ .set_config = intel_crtc_set_config,
.destroy = intel_crtc_destroy,
.page_flip = intel_crtc_page_flip,
};
+static void intel_cpu_pll_init(struct drm_device *dev)
+{
+ if (IS_HASWELL(dev))
+ intel_ddi_pll_init(dev);
+}
+
static void intel_pch_pll_init(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -6404,9 +8349,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
struct intel_crtc *intel_crtc;
int i;
- intel_crtc = malloc(sizeof(struct intel_crtc) +
- (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
- DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ intel_crtc = malloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (intel_crtc == NULL)
+ return;
drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
@@ -6420,34 +8365,20 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
/* Swap pipes & planes for FBC on pre-965 */
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
+ intel_crtc->cpu_transcoder = pipe;
if (IS_MOBILE(dev) && IS_GEN3(dev)) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
intel_crtc->plane = !pipe;
}
- KASSERT(pipe < DRM_ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) &&
- dev_priv->plane_to_crtc_mapping[intel_crtc->plane] == NULL,
- ("plane_to_crtc is already initialized"));
+ BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+ dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
- intel_crtc_reset(&intel_crtc->base);
- intel_crtc->active = true; /* force the pipe off on setup_init_config */
intel_crtc->bpp = 24; /* default for pre-Ironlake */
- if (HAS_PCH_SPLIT(dev)) {
- intel_helper_funcs.prepare = ironlake_crtc_prepare;
- intel_helper_funcs.commit = ironlake_crtc_commit;
- } else {
- intel_helper_funcs.prepare = i9xx_crtc_prepare;
- intel_helper_funcs.commit = i9xx_crtc_commit;
- }
-
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
-
- intel_crtc->busy = false;
-
- callout_init(&intel_crtc->idle_callout, 1);
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -6474,15 +8405,23 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
return 0;
}
-static int intel_encoder_clones(struct drm_device *dev, int type_mask)
+static int intel_encoder_clones(struct intel_encoder *encoder)
{
- struct intel_encoder *encoder;
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_encoder *source_encoder;
int index_mask = 0;
int entry = 0;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
- if (type_mask & encoder->clone_mask)
+ list_for_each_entry(source_encoder,
+ &dev->mode_config.encoder_list, base.head) {
+
+ if (encoder == source_encoder)
+ index_mask |= (1 << entry);
+
+ /* Intel hw has only one MUX where enocoders could be cloned. */
+ if (encoder->cloneable && source_encoder->cloneable)
index_mask |= (1 << entry);
+
entry++;
}
@@ -6519,17 +8458,9 @@ static void intel_setup_outputs(struct drm_device *dev)
I915_WRITE(PFIT_CONTROL, 0);
}
- if (HAS_PCH_SPLIT(dev)) {
- dpd_is_edp = intel_dpd_is_edp(dev);
-
- if (has_edp_a(dev))
- intel_dp_init(dev, DP_A);
-
- if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
- intel_dp_init(dev, PCH_DP_D);
- }
-
- intel_crt_init(dev);
+ if (!(IS_HASWELL(dev) &&
+ (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
+ intel_crt_init(dev);
if (IS_HASWELL(dev)) {
int found;
@@ -6552,37 +8483,49 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_ddi_init(dev, PORT_D);
} else if (HAS_PCH_SPLIT(dev)) {
int found;
+ dpd_is_edp = intel_dpd_is_edp(dev);
- DRM_DEBUG_KMS(
-"HDMIB %d PCH_DP_B %d HDMIC %d HDMID %d PCH_DP_C %d PCH_DP_D %d LVDS %d\n",
- (I915_READ(HDMIB) & PORT_DETECTED) != 0,
- (I915_READ(PCH_DP_B) & DP_DETECTED) != 0,
- (I915_READ(HDMIC) & PORT_DETECTED) != 0,
- (I915_READ(HDMID) & PORT_DETECTED) != 0,
- (I915_READ(PCH_DP_C) & DP_DETECTED) != 0,
- (I915_READ(PCH_DP_D) & DP_DETECTED) != 0,
- (I915_READ(PCH_LVDS) & LVDS_DETECTED) != 0);
+ if (has_edp_a(dev))
+ intel_dp_init(dev, DP_A, PORT_A);
if (I915_READ(HDMIB) & PORT_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, PCH_SDVOB, true);
if (!found)
- intel_hdmi_init(dev, HDMIB);
+ intel_hdmi_init(dev, HDMIB, PORT_B);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
- intel_dp_init(dev, PCH_DP_B);
+ intel_dp_init(dev, PCH_DP_B, PORT_B);
}
if (I915_READ(HDMIC) & PORT_DETECTED)
- intel_hdmi_init(dev, HDMIC);
+ intel_hdmi_init(dev, HDMIC, PORT_C);
- if (I915_READ(HDMID) & PORT_DETECTED)
- intel_hdmi_init(dev, HDMID);
+ if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
+ intel_hdmi_init(dev, HDMID, PORT_D);
if (I915_READ(PCH_DP_C) & DP_DETECTED)
- intel_dp_init(dev, PCH_DP_C);
+ intel_dp_init(dev, PCH_DP_C, PORT_C);
- if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
- intel_dp_init(dev, PCH_DP_D);
+ if (I915_READ(PCH_DP_D) & DP_DETECTED)
+ intel_dp_init(dev, PCH_DP_D, PORT_D);
+ } else if (IS_VALLEYVIEW(dev)) {
+ int found;
+
+ /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
+ if (I915_READ(DP_C) & DP_DETECTED)
+ intel_dp_init(dev, DP_C, PORT_C);
+
+ if (I915_READ(SDVOB) & PORT_DETECTED) {
+ /* SDVOB multiplex with HDMIB */
+ found = intel_sdvo_init(dev, SDVOB, true);
+ if (!found)
+ intel_hdmi_init(dev, SDVOB, PORT_B);
+ if (!found && (I915_READ(DP_B) & DP_DETECTED))
+ intel_dp_init(dev, DP_B, PORT_B);
+ }
+
+ if (I915_READ(SDVOC) & PORT_DETECTED)
+ intel_hdmi_init(dev, SDVOC, PORT_C);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
@@ -6592,12 +8535,12 @@ static void intel_setup_outputs(struct drm_device *dev)
found = intel_sdvo_init(dev, SDVOB, true);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
- intel_hdmi_init(dev, SDVOB);
+ intel_hdmi_init(dev, SDVOB, PORT_B);
}
if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_B\n");
- intel_dp_init(dev, DP_B);
+ intel_dp_init(dev, DP_B, PORT_B);
}
}
@@ -6612,26 +8555,21 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
- intel_hdmi_init(dev, SDVOC);
+ intel_hdmi_init(dev, SDVOC, PORT_C);
}
if (SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_C\n");
- intel_dp_init(dev, DP_C);
+ intel_dp_init(dev, DP_C, PORT_C);
}
}
if (SUPPORTS_INTEGRATED_DP(dev) &&
(I915_READ(DP_D) & DP_DETECTED)) {
DRM_DEBUG_KMS("probing DP_D\n");
- intel_dp_init(dev, DP_D);
+ intel_dp_init(dev, DP_D, PORT_D);
}
- } else if (IS_GEN2(dev)) {
-#if 1
- KIB_NOTYET();
-#else
+ } else if (IS_GEN2(dev))
intel_dvo_init(dev);
-#endif
- }
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
@@ -6639,14 +8577,12 @@ static void intel_setup_outputs(struct drm_device *dev)
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
encoder->base.possible_crtcs = encoder->crtc_mask;
encoder->base.possible_clones =
- intel_encoder_clones(dev, encoder->clone_mask);
+ intel_encoder_clones(encoder);
}
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
+ intel_init_pch_refclk(dev);
- if (HAS_PCH_SPLIT(dev))
- ironlake_init_pch_refclk(dev);
+ drm_helper_move_panel_connectors_to_head(dev);
}
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -6681,32 +8617,74 @@ int intel_framebuffer_init(struct drm_device *dev,
{
int ret;
- if (obj->tiling_mode == I915_TILING_Y)
+ if (obj->tiling_mode == I915_TILING_Y) {
+ DRM_DEBUG("hardware does not support tiling Y\n");
+ return -EINVAL;
+ }
+
+ if (mode_cmd->pitches[0] & 63) {
+ DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
+ mode_cmd->pitches[0]);
+ return -EINVAL;
+ }
+
+ /* FIXME <= Gen4 stride limits are bit unclear */
+ if (mode_cmd->pitches[0] > 32768) {
+ DRM_DEBUG("pitch (%d) must be at less than 32768\n",
+ mode_cmd->pitches[0]);
return -EINVAL;
+ }
- if (mode_cmd->pitches[0] & 63)
+ if (obj->tiling_mode != I915_TILING_NONE &&
+ mode_cmd->pitches[0] != obj->stride) {
+ DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
+ mode_cmd->pitches[0], obj->stride);
return -EINVAL;
+ }
+ /* Reject formats not supported by any plane early. */
switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ if (INTEL_INFO(dev)->gen > 3) {
+ DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
+ return -EINVAL;
+ }
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
- /* RGB formats are common across chipsets */
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ if (INTEL_INFO(dev)->gen < 4) {
+ DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
+ return -EINVAL;
+ }
break;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
+ if (INTEL_INFO(dev)->gen < 5) {
+ DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
+ return -EINVAL;
+ }
break;
default:
- DRM_DEBUG("unsupported pixel format %u\n", mode_cmd->pixel_format);
+ DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL;
}
+ /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
+ if (mode_cmd->offsets[0] != 0)
+ return -EINVAL;
+
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -6745,14 +8723,22 @@ static void intel_init_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* We always want a DPMS function */
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->display.dpms = ironlake_crtc_dpms;
+ if (IS_HASWELL(dev)) {
+ dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
+ dev_priv->display.crtc_enable = haswell_crtc_enable;
+ dev_priv->display.crtc_disable = haswell_crtc_disable;
+ dev_priv->display.off = haswell_crtc_off;
+ dev_priv->display.update_plane = ironlake_update_plane;
+ } else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+ dev_priv->display.crtc_enable = ironlake_crtc_enable;
+ dev_priv->display.crtc_disable = ironlake_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_plane = ironlake_update_plane;
} else {
- dev_priv->display.dpms = i9xx_crtc_dpms;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.crtc_enable = i9xx_crtc_enable;
+ dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
dev_priv->display.update_plane = i9xx_update_plane;
}
@@ -6794,14 +8780,13 @@ static void intel_init_display(struct drm_device *dev)
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
dev_priv->display.write_eld = ironlake_write_eld;
+ dev_priv->display.modeset_global_resources =
+ ivb_modeset_global_resources;
} else if (IS_HASWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
- dev_priv->display.write_eld = ironlake_write_eld;
+ dev_priv->display.write_eld = haswell_write_eld;
} else
dev_priv->display.update_wm = NULL;
- } else if (IS_VALLEYVIEW(dev)) {
- dev_priv->display.force_wake_get = vlv_force_wake_get;
- dev_priv->display.force_wake_put = vlv_force_wake_put;
} else if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
}
@@ -6873,27 +8858,49 @@ struct intel_quirk {
void (*hook)(struct drm_device *dev);
};
+/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
+struct intel_dmi_quirk {
+ void (*hook)(struct drm_device *dev);
+ const struct dmi_system_id (*dmi_id_list)[];
+};
+
+static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+{
+ DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
+ return 1;
+}
+
+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
+ {
+ .dmi_id_list = &(const struct dmi_system_id[]) {
+ {
+ .callback = intel_dmi_reverse_brightness,
+ .ident = "NCR Corporation",
+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
+ },
+ },
+ { } /* terminating entry */
+ },
+ .hook = quirk_invert_brightness,
+ },
+};
+
#define PCI_ANY_ID (~0u)
static struct intel_quirk intel_quirks[] = {
/* HP Mini needs pipe A force quirk (LP: #322104) */
{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
- /* Thinkpad R31 needs pipe A force quirk */
- { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
- /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
- { 0x3577, 0x1014, 0x0513, quirk_pipea_force },
- /* ThinkPad X40 needs pipe A force quirk */
-
/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
- /* 855 & before need to leave pipe A & dpll A up */
- { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+ /* 830/845 need to leave pipe A & dpll A up */
{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+ { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
/* Lenovo U160 cannot use SSC on LVDS */
{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
@@ -6903,24 +8910,38 @@ static struct intel_quirk intel_quirks[] = {
/* Acer Aspire 5734Z must invert backlight brightness */
{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+ /* Acer Aspire 4736Z */
+ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+ /* Acer/eMachines G725 */
+ { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
+
+ /* Acer/eMachines e725 */
+ { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
+
+ /* Acer/Packard Bell NCL20 */
+ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
};
static void intel_init_quirks(struct drm_device *dev)
{
- struct intel_quirk *q;
- device_t d;
int i;
- d = dev->dev;
for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
- q = &intel_quirks[i];
- if (pci_get_device(d) == q->device &&
- (pci_get_subvendor(d) == q->subsystem_vendor ||
+ struct intel_quirk *q = &intel_quirks[i];
+
+ if (pci_get_device(dev->dev) == q->device &&
+ (pci_get_subvendor(dev->dev) == q->subsystem_vendor ||
q->subsystem_vendor == PCI_ANY_ID) &&
- (pci_get_subdevice(d) == q->subsystem_device ||
+ (pci_get_subdevice(dev->dev) == q->subsystem_device ||
q->subsystem_device == PCI_ANY_ID))
q->hook(dev);
}
+ for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
+ if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
+ intel_dmi_quirks[i].hook(dev);
+ }
}
/* Disable the VGA plane that we never use */
@@ -6935,53 +8956,35 @@ static void i915_disable_vga(struct drm_device *dev)
else
vga_reg = VGACNTRL;
-#if 0
+#ifdef FREEBSD_WIP
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
-#endif
+#endif /* FREEBSD_WIP */
outb(VGA_SR_INDEX, SR01);
sr1 = inb(VGA_SR_DATA);
outb(VGA_SR_DATA, sr1 | 1<<5);
-#if 0
+#ifdef FREEBSD_WIP
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
-#endif
- DELAY(300);
+#endif /* FREEBSD_WIP */
+ udelay(300);
I915_WRITE(vga_reg, VGA_DISP_DISABLE);
POSTING_READ(vga_reg);
}
-static void ivb_pch_pwm_override(struct drm_device *dev)
+void intel_modeset_init_hw(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /*
- * IVB has CPU eDP backlight regs too, set things up to let the
- * PCH regs control the backlight
+ /* We attempt to init the necessary power wells early in the initialization
+ * time, so the subsystems that expect power to be enabled can work.
*/
- I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE);
- I915_WRITE(BLC_PWM_CPU_CTL, 0);
- I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE);
-}
+ intel_init_power_wells(dev);
-void intel_modeset_init_hw(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ intel_prepare_ddi(dev);
intel_init_clock_gating(dev);
- if (IS_IRONLAKE_M(dev)) {
- ironlake_enable_drps(dev);
- ironlake_enable_rc6(dev);
- intel_init_emon(dev);
- }
-
- if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
- gen6_enable_rps(dev_priv);
- gen6_update_ring_freq(dev_priv);
- }
-
- if (IS_IVYBRIDGE(dev))
- ivb_pch_pwm_override(dev);
+ DRM_LOCK(dev);
+ intel_enable_gt_powersave(dev);
+ DRM_UNLOCK(dev);
}
void intel_modeset_init(struct drm_device *dev)
@@ -7003,8 +9006,6 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_pm(dev);
- intel_prepare_ddi(dev);
-
intel_init_display(dev);
if (IS_GEN2(dev)) {
@@ -7017,7 +9018,7 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
}
- dev->mode_config.fb_base = dev->agp->base;
+ dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
@@ -7029,14 +9030,327 @@ void intel_modeset_init(struct drm_device *dev)
DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
}
+ intel_cpu_pll_init(dev);
intel_pch_pll_init(dev);
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
+}
+
+static void
+intel_connector_break_all_links(struct intel_connector *connector)
+{
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ connector->encoder->connectors_active = false;
+ connector->encoder->base.crtc = NULL;
+}
+
+static void intel_enable_pipe_a(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+ struct drm_connector *crt = NULL;
+ struct intel_load_detect_pipe load_detect_temp;
+
+ /* We can't just switch on the pipe A, we need to set things up with a
+ * proper mode and output configuration. As a gross hack, enable pipe A
+ * by enabling the load detect pipe once. */
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
+ crt = &connector->base;
+ break;
+ }
+ }
+
+ if (!crt)
+ return;
+
+ if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
+ intel_release_load_detect_pipe(crt, &load_detect_temp);
+
- TASK_INIT(&dev_priv->idle_task, 0, intel_idle_update, dev_priv);
- callout_init(&dev_priv->idle_callout, 1);
+}
+
+static bool
+intel_check_plane_mapping(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ u32 reg, val;
+
+ if (dev_priv->num_pipe == 1)
+ return true;
+
+ reg = DSPCNTR(!crtc->plane);
+ val = I915_READ(reg);
+
+ if ((val & DISPLAY_PLANE_ENABLE) &&
+ (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
+ return false;
+
+ return true;
+}
+
+static void intel_sanitize_crtc(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
+
+ /* Clear any frame start delays used for debugging left by the BIOS */
+ reg = PIPECONF(crtc->cpu_transcoder);
+ I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+
+ /* We need to sanitize the plane -> pipe mapping first because this will
+ * disable the crtc (and hence change the state) if it is wrong. Note
+ * that gen4+ has a fixed plane -> pipe mapping. */
+ if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
+ struct intel_connector *connector;
+ bool plane;
+
+ DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
+ crtc->base.base.id);
+
+ /* Pipe has the wrong plane attached and the plane is active.
+ * Temporarily change the plane mapping and disable everything
+ * ... */
+ plane = crtc->plane;
+ crtc->plane = !plane;
+ dev_priv->display.crtc_disable(&crtc->base);
+ crtc->plane = plane;
+
+ /* ... and break all links. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder->base.crtc != &crtc->base)
+ continue;
+
+ intel_connector_break_all_links(connector);
+ }
+
+ WARN_ON(crtc->active);
+ crtc->base.enabled = false;
+ }
+
+ if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
+ crtc->pipe == PIPE_A && !crtc->active) {
+ /* BIOS forgot to enable pipe A, this mostly happens after
+ * resume. Force-enable the pipe to fix this, the update_dpms
+ * call below we restore the pipe to the right state, but leave
+ * the required bits on. */
+ intel_enable_pipe_a(dev);
+ }
+
+ /* Adjust the state of the output pipe according to whether we
+ * have active connectors/encoders. */
+ intel_crtc_update_dpms(&crtc->base);
+
+ if (crtc->active != crtc->base.enabled) {
+ struct intel_encoder *encoder;
+
+ /* This can happen either due to bugs in the get_hw_state
+ * functions or because the pipe is force-enabled due to the
+ * pipe A quirk. */
+ DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
+ crtc->base.base.id,
+ crtc->base.enabled ? "enabled" : "disabled",
+ crtc->active ? "enabled" : "disabled");
+
+ crtc->base.enabled = crtc->active;
+
+ /* Because we only establish the connector -> encoder ->
+ * crtc links if something is active, this means the
+ * crtc is now deactivated. Break the links. connector
+ * -> encoder links are only establish when things are
+ * actually up, hence no need to break them. */
+ WARN_ON(crtc->active);
+
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
+ WARN_ON(encoder->connectors_active);
+ encoder->base.crtc = NULL;
+ }
+ }
+}
+
+static void intel_sanitize_encoder(struct intel_encoder *encoder)
+{
+ struct intel_connector *connector;
+ struct drm_device *dev = encoder->base.dev;
+
+ /* We need to check both for a crtc link (meaning that the
+ * encoder is active and trying to read from a pipe) and the
+ * pipe itself being active. */
+ bool has_active_crtc = encoder->base.crtc &&
+ to_intel_crtc(encoder->base.crtc)->active;
+
+ if (encoder->connectors_active && !has_active_crtc) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base));
+
+ /* Connector is active, but has no active pipe. This is
+ * fallout from our resume register restoring. Disable
+ * the encoder manually again. */
+ if (encoder->base.crtc) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base));
+ encoder->disable(encoder);
+ }
+
+ /* Inconsistent output/port/pipe state happens presumably due to
+ * a bug in one of the get_hw_state functions. Or someplace else
+ * in our code, like the register restore mess on resume. Clamp
+ * things to off as a safer default. */
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ intel_connector_break_all_links(connector);
+ }
+ }
+ /* Enabled encoders without active connectors will be fixed in
+ * the crtc fixup. */
+}
+
+static void i915_redisable_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 vga_reg;
+
+ if (HAS_PCH_SPLIT(dev))
+ vga_reg = CPU_VGACNTRL;
+ else
+ vga_reg = VGACNTRL;
+
+ if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
+ DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+ POSTING_READ(vga_reg);
+ }
+}
+
+/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
+ * and i915 state tracking structures. */
+void intel_modeset_setup_hw_state(struct drm_device *dev,
+ bool force_restore)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+ u32 tmp;
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ if (IS_HASWELL(dev)) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+
+ if (tmp & TRANS_DDI_FUNC_ENABLE) {
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ pipe = PIPE_C;
+ break;
+ }
+
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc->cpu_transcoder = TRANSCODER_EDP;
+
+ DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
+ pipe_name(pipe));
+ }
+ }
+
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+ tmp = I915_READ(PIPECONF(crtc->cpu_transcoder));
+ if (tmp & PIPECONF_ENABLE)
+ crtc->active = true;
+ else
+ crtc->active = false;
+
+ crtc->base.enabled = crtc->active;
+
+ DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
+ crtc->base.base.id,
+ crtc->active ? "enabled" : "disabled");
+ }
+
+ if (IS_HASWELL(dev))
+ intel_ddi_setup_hw_pll_state(dev);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ pipe = 0;
+
+ if (encoder->get_hw_state(encoder, &pipe)) {
+ encoder->base.crtc =
+ dev_priv->pipe_to_crtc_mapping[pipe];
+ } else {
+ encoder->base.crtc = NULL;
+ }
+
+ encoder->connectors_active = false;
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base),
+ encoder->base.crtc ? "enabled" : "disabled",
+ pipe);
+ }
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->get_hw_state(connector)) {
+ connector->base.dpms = DRM_MODE_DPMS_ON;
+ connector->encoder->connectors_active = true;
+ connector->base.encoder = &connector->encoder->base;
+ } else {
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ }
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base),
+ connector->base.encoder ? "enabled" : "disabled");
+ }
+
+ /* HW state is read out, now we need to sanitize this mess. */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ intel_sanitize_encoder(encoder);
+ }
+
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ intel_sanitize_crtc(crtc);
+ }
+
+ if (force_restore) {
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ intel_set_mode(&crtc->base, &crtc->base.mode,
+ crtc->base.x, crtc->base.y, crtc->base.fb);
+ }
+
+ i915_redisable_vga(dev);
+ } else {
+ intel_modeset_update_staged_output_state(dev);
+ }
+
+ intel_modeset_check_state(dev);
+
+ drm_mode_config_reset(dev);
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -7044,6 +9358,8 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_modeset_init_hw(dev);
intel_setup_overlay(dev);
+
+ intel_modeset_setup_hw_state(dev, false);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -7055,9 +9371,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
drm_kms_helper_poll_fini(dev);
DRM_LOCK(dev);
-#if 0
intel_unregister_dsm_handler();
-#endif
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -7071,13 +9385,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_disable_fbc(dev);
- if (IS_IRONLAKE_M(dev))
- ironlake_disable_drps(dev);
- if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev))
- gen6_disable_rps(dev);
+ intel_disable_gt_powersave(dev);
- if (IS_IRONLAKE_M(dev))
- ironlake_disable_rc6(dev);
+ ironlake_teardown_rc6(dev);
if (IS_VALLEYVIEW(dev))
vlv_init_dpio(dev);
@@ -7087,19 +9397,16 @@ void intel_modeset_cleanup(struct drm_device *dev)
/* Disable the irq before mode object teardown, for the irq might
* enqueue unpin/hotplug work. */
drm_irq_uninstall(dev);
- if (taskqueue_cancel(dev_priv->tq, &dev_priv->hotplug_task, NULL))
- taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
- if (taskqueue_cancel(dev_priv->tq, &dev_priv->rps_task, NULL))
- taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
+ if (taskqueue_cancel(dev_priv->wq, &dev_priv->hotplug_work, NULL))
+ taskqueue_drain(dev_priv->wq, &dev_priv->hotplug_work);
+ if (taskqueue_cancel(dev_priv->wq, &dev_priv->rps.work, NULL))
+ taskqueue_drain(dev_priv->wq, &dev_priv->rps.work);
- /* Shut off idle work before the crtcs get freed. */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- intel_crtc = to_intel_crtc(crtc);
- callout_drain(&intel_crtc->idle_callout);
- }
- callout_drain(&dev_priv->idle_callout);
- if (taskqueue_cancel(dev_priv->tq, &dev_priv->idle_task, NULL))
- taskqueue_drain(dev_priv->tq, &dev_priv->idle_task);
+ /* flush any delayed tasks or pending work */
+ taskqueue_drain_all(dev_priv->wq);
+
+ /* destroy backlight, if any, before the connectors */
+ intel_panel_destroy_backlight(dev);
drm_mode_config_cleanup(dev);
}
@@ -7125,26 +9432,28 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
*/
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
{
- device_t bridge_dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u16 gmch_ctrl;
- bridge_dev = intel_gtt_get_bridge_device();
- gmch_ctrl = pci_read_config(bridge_dev, INTEL_GMCH_CTRL, 2);
+ pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
if (state)
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
else
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
- pci_write_config(bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl, 2);
+ pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
return 0;
}
+//#ifdef CONFIG_DEBUG_FS
+#define seq_printf(m, fmt, ...) sbuf_printf((m), (fmt), ##__VA_ARGS__)
+
struct intel_display_error_state {
struct intel_cursor_error_state {
u32 control;
u32 position;
u32 base;
u32 size;
- } cursor[2];
+ } cursor[I915_MAX_PIPES];
struct intel_pipe_error_state {
u32 conf;
@@ -7156,7 +9465,7 @@ struct intel_display_error_state {
u32 vtotal;
u32 vblank;
u32 vsync;
- } pipe[2];
+ } pipe[I915_MAX_PIPES];
struct intel_plane_error_state {
u32 control;
@@ -7166,7 +9475,7 @@ struct intel_display_error_state {
u32 addr;
u32 surface;
u32 tile_offset;
- } plane[2];
+ } plane[I915_MAX_PIPES];
};
struct intel_display_error_state *
@@ -7174,13 +9483,16 @@ intel_display_capture_error_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
+ enum transcoder cpu_transcoder;
int i;
error = malloc(sizeof(*error), DRM_MEM_KMS, M_NOWAIT);
if (error == NULL)
return NULL;
- for (i = 0; i < 2; i++) {
+ for_each_pipe(i) {
+ cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
+
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
error->cursor[i].base = I915_READ(CURBASE(i));
@@ -7195,14 +9507,14 @@ intel_display_capture_error_state(struct drm_device *dev)
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
}
- error->pipe[i].conf = I915_READ(PIPECONF(i));
+ error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
error->pipe[i].source = I915_READ(PIPESRC(i));
- error->pipe[i].htotal = I915_READ(HTOTAL(i));
- error->pipe[i].hblank = I915_READ(HBLANK(i));
- error->pipe[i].hsync = I915_READ(HSYNC(i));
- error->pipe[i].vtotal = I915_READ(VTOTAL(i));
- error->pipe[i].vblank = I915_READ(VBLANK(i));
- error->pipe[i].vsync = I915_READ(VSYNC(i));
+ error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
+ error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
+ error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
+ error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
+ error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
+ error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
}
return error;
@@ -7213,33 +9525,36 @@ intel_display_print_error_state(struct sbuf *m,
struct drm_device *dev,
struct intel_display_error_state *error)
{
+ drm_i915_private_t *dev_priv = dev->dev_private;
int i;
- for (i = 0; i < 2; i++) {
- sbuf_printf(m, "Pipe [%d]:\n", i);
- sbuf_printf(m, " CONF: %08x\n", error->pipe[i].conf);
- sbuf_printf(m, " SRC: %08x\n", error->pipe[i].source);
- sbuf_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
- sbuf_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
- sbuf_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
- sbuf_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
- sbuf_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
- sbuf_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
-
- sbuf_printf(m, "Plane [%d]:\n", i);
- sbuf_printf(m, " CNTR: %08x\n", error->plane[i].control);
- sbuf_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
- sbuf_printf(m, " SIZE: %08x\n", error->plane[i].size);
- sbuf_printf(m, " POS: %08x\n", error->plane[i].pos);
- sbuf_printf(m, " ADDR: %08x\n", error->plane[i].addr);
+ seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe);
+ for_each_pipe(i) {
+ seq_printf(m, "Pipe [%d]:\n", i);
+ seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
+ seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
+ seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
+ seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
+ seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
+ seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
+ seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
+ seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
+
+ seq_printf(m, "Plane [%d]:\n", i);
+ seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
+ seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
+ seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
+ seq_printf(m, " POS: %08x\n", error->plane[i].pos);
+ seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
if (INTEL_INFO(dev)->gen >= 4) {
- sbuf_printf(m, " SURF: %08x\n", error->plane[i].surface);
- sbuf_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
+ seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
+ seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
}
- sbuf_printf(m, "Cursor [%d]:\n", i);
- sbuf_printf(m, " CNTR: %08x\n", error->cursor[i].control);
- sbuf_printf(m, " POS: %08x\n", error->cursor[i].position);
- sbuf_printf(m, " BASE: %08x\n", error->cursor[i].base);
+ seq_printf(m, "Cursor [%d]:\n", i);
+ seq_printf(m, " CNTR: %08x\n", error->cursor[i].control);
+ seq_printf(m, " POS: %08x\n", error->cursor[i].position);
+ seq_printf(m, " BASE: %08x\n", error->cursor[i].base);
}
}
+//#endif
diff --git a/sys/dev/drm2/i915/intel_dp.c b/sys/dev/drm2/i915/intel_dp.c
index eba5fe3..8bff4610 100644
--- a/sys/dev/drm2/i915/intel_dp.c
+++ b/sys/dev/drm2/i915/intel_dp.c
@@ -29,46 +29,15 @@
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
#include <dev/drm2/drm_crtc.h>
#include <dev/drm2/drm_crtc_helper.h>
+#include <dev/drm2/drm_edid.h>
+#include <dev/drm2/i915/intel_drv.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
-#include <dev/drm2/i915/intel_drv.h>
-#include <dev/drm2/drm_dp_helper.h>
-#define DP_RECEIVER_CAP_SIZE 0xf
-#define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
-#define DP_LINK_CONFIGURATION_SIZE 9
-
-struct intel_dp {
- struct intel_encoder base;
- uint32_t output_reg;
- uint32_t DP;
- uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
- bool has_audio;
- enum hdmi_force_audio force_audio;
- uint32_t color_range;
- int dpms_mode;
- uint8_t link_bw;
- uint8_t lane_count;
- uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
- device_t dp_iic_bus;
- device_t adapter;
- bool is_pch_edp;
- uint8_t train_set[4];
- int panel_power_up_delay;
- int panel_power_down_delay;
- int panel_power_cycle_delay;
- int backlight_on_delay;
- int backlight_off_delay;
- struct drm_display_mode *panel_fixed_mode; /* for eDP */
- struct timeout_task panel_vdd_task;
- bool want_panel_vdd;
-};
-
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
@@ -78,7 +47,9 @@ struct intel_dp {
*/
static bool is_edp(struct intel_dp *intel_dp)
{
- return intel_dp->base.type == INTEL_OUTPUT_EDP;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+ return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
}
/**
@@ -105,15 +76,16 @@ static bool is_cpu_edp(struct intel_dp *intel_dp)
return is_edp(intel_dp) && !is_pch_edp(intel_dp);
}
-static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
{
- return container_of(encoder, struct intel_dp, base.base);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+ return intel_dig_port->base.base.dev;
}
static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
{
- return container_of(intel_attached_encoder(connector),
- struct intel_dp, base);
+ return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
}
/**
@@ -135,34 +107,29 @@ bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
return is_pch_edp(intel_dp);
}
-static void intel_dp_start_link_train(struct intel_dp *intel_dp);
-static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
static void intel_dp_link_down(struct intel_dp *intel_dp);
void
intel_edp_link_config(struct intel_encoder *intel_encoder,
int *lane_num, int *link_bw)
{
- struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
*lane_num = intel_dp->lane_count;
- if (intel_dp->link_bw == DP_LINK_BW_1_62)
- *link_bw = 162000;
- else if (intel_dp->link_bw == DP_LINK_BW_2_7)
- *link_bw = 270000;
+ *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
}
-static int
-intel_dp_max_lane_count(struct intel_dp *intel_dp)
+int
+intel_edp_target_clock(struct intel_encoder *intel_encoder,
+ struct drm_display_mode *mode)
{
- int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
- switch (max_lane_count) {
- case 1: case 2: case 4:
- break;
- default:
- max_lane_count = 4;
- }
- return max_lane_count;
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+
+ if (intel_connector->panel.fixed_mode)
+ return intel_connector->panel.fixed_mode->clock;
+ else
+ return mode->clock;
}
static int
@@ -221,11 +188,11 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
static bool
intel_dp_adjust_dithering(struct intel_dp *intel_dp,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ struct drm_display_mode *mode,
+ bool adjust_mode)
{
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
- int max_lanes = intel_dp_max_lane_count(intel_dp);
+ int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
int max_rate, mode_rate;
mode_rate = intel_dp_link_required(mode->clock, 24);
@@ -236,8 +203,8 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp,
if (mode_rate > max_rate)
return false;
- if (adjusted_mode)
- adjusted_mode->private_flags
+ if (adjust_mode)
+ mode->private_flags
|= INTEL_MODE_DP_FORCE_6BPC;
return true;
@@ -251,21 +218,26 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
- if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
- if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
+ if (is_edp(intel_dp) && fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
- if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
+ if (mode->vdisplay > fixed_mode->vdisplay)
return MODE_PANEL;
}
- if (!intel_dp_adjust_dithering(intel_dp, mode, NULL))
+ if (!intel_dp_adjust_dithering(intel_dp, mode, false))
return MODE_CLOCK_HIGH;
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_H_ILLEGAL;
+
return MODE_OK;
}
@@ -299,6 +271,10 @@ intel_hrawclk(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t clkcfg;
+ /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
+ if (IS_VALLEYVIEW(dev))
+ return 200;
+
clkcfg = I915_READ(CLKCFG);
switch (clkcfg & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_400:
@@ -324,7 +300,7 @@ intel_hrawclk(struct drm_device *dev)
static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
@@ -332,7 +308,7 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
@@ -341,13 +317,13 @@ static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
static void
intel_dp_check_edp(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
if (!is_edp(intel_dp))
return;
if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
- printf("eDP powered off while attempting aux channel communication.\n");
+ WARN(1, "eDP powered off while attempting aux channel communication.\n");
DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
I915_READ(PCH_PP_STATUS),
I915_READ(PCH_PP_CONTROL));
@@ -360,7 +336,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *recv, int recv_size)
{
uint32_t output_reg = intel_dp->output_reg;
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
@@ -368,7 +345,30 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
int recv_bytes;
uint32_t status;
uint32_t aux_clock_divider;
- int try, precharge = 5;
+ int try, precharge;
+
+ if (IS_HASWELL(dev)) {
+ switch (intel_dig_port->port) {
+ case PORT_A:
+ ch_ctl = DPA_AUX_CH_CTL;
+ ch_data = DPA_AUX_CH_DATA1;
+ break;
+ case PORT_B:
+ ch_ctl = PCH_DPB_AUX_CH_CTL;
+ ch_data = PCH_DPB_AUX_CH_DATA1;
+ break;
+ case PORT_C:
+ ch_ctl = PCH_DPC_AUX_CH_CTL;
+ ch_data = PCH_DPC_AUX_CH_DATA1;
+ break;
+ case PORT_D:
+ ch_ctl = PCH_DPD_AUX_CH_CTL;
+ ch_data = PCH_DPD_AUX_CH_DATA1;
+ break;
+ default:
+ BUG();
+ }
+ }
intel_dp_check_edp(intel_dp);
/* The clock divider is based off the hrawclk,
@@ -379,25 +379,34 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* clock divider.
*/
if (is_cpu_edp(intel_dp)) {
- if (IS_GEN6(dev) || IS_GEN7(dev))
+ if (IS_HASWELL(dev))
+ aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
+ else if (IS_VALLEYVIEW(dev))
+ aux_clock_divider = 100;
+ else if (IS_GEN6(dev) || IS_GEN7(dev))
aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev))
- aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
+ aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
else
aux_clock_divider = intel_hrawclk(dev) / 2;
+ if (IS_GEN6(dev))
+ precharge = 3;
+ else
+ precharge = 5;
+
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
status = I915_READ(ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
- drm_msleep(1, "915ach");
+ DRM_MSLEEP(1);
}
if (try == 3) {
- printf("dp_aux_ch not started status 0x%08x\n",
+ WARN(1, "dp_aux_ch not started status 0x%08x\n",
I915_READ(ch_ctl));
return -EBUSY;
}
@@ -423,7 +432,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
status = I915_READ(ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
- DELAY(100);
+ udelay(100);
}
/* Clear done status and any errors */
@@ -499,7 +508,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
break;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
- DELAY(100);
+ udelay(100);
else
return -EIO;
}
@@ -548,7 +557,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
return ret - 1;
}
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
- DELAY(100);
+ udelay(100);
else
return -EIO;
}
@@ -558,9 +567,9 @@ static int
intel_dp_i2c_aux_ch(device_t adapter, int mode,
uint8_t write_byte, uint8_t *read_byte)
{
- struct iic_dp_aux_data *data = device_get_softc(adapter);
- struct intel_dp *intel_dp = data->priv;
- uint16_t address = data->address;
+ struct iic_dp_aux_data *algo_data = device_get_softc(adapter);
+ struct intel_dp *intel_dp = algo_data->priv;
+ uint16_t address = algo_data->address;
uint8_t msg[5];
uint8_t reply[2];
unsigned retry;
@@ -618,7 +627,7 @@ intel_dp_i2c_aux_ch(device_t adapter, int mode,
DRM_DEBUG_KMS("aux_ch native nack\n");
return -EREMOTEIO;
case AUX_NATIVE_REPLY_DEFER:
- DELAY(100);
+ udelay(100);
continue;
default:
DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
@@ -631,13 +640,13 @@ intel_dp_i2c_aux_ch(device_t adapter, int mode,
if (mode == MODE_I2C_READ) {
*read_byte = reply[1];
}
- return (0/*reply_bytes - 1*/);
+ return reply_bytes - 1;
case AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("aux_i2c nack\n");
return -EREMOTEIO;
case AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_i2c defer\n");
- DELAY(100);
+ udelay(100);
break;
default:
DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
@@ -649,9 +658,6 @@ intel_dp_i2c_aux_ch(device_t adapter, int mode,
return -EREMOTEIO;
}
-static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
-static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-
static int
intel_dp_i2c_init(struct intel_dp *intel_dp,
struct intel_connector *intel_connector, const char *name)
@@ -668,36 +674,43 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
return ret;
}
-static bool
-intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
+bool
+intel_dp_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
int lane_count, clock;
- int max_lane_count = intel_dp_max_lane_count(intel_dp);
+ int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
int bpp, mode_rate;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
- if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
- intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
- intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
+ if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+ intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+ adjusted_mode);
+ intel_pch_panel_fitting(dev,
+ intel_connector->panel.fitting_mode,
mode, adjusted_mode);
}
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return false;
+
DRM_DEBUG_KMS("DP link computation with max lane count %i "
"max bw %02x pixel clock %iKHz\n",
- max_lane_count, bws[max_clock], mode->clock);
+ max_lane_count, bws[max_clock], adjusted_mode->clock);
- if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, adjusted_mode))
+ if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true))
return false;
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
- for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
- for (clock = 0; clock <= max_clock; clock++) {
+ for (clock = 0; clock <= max_clock; clock++) {
+ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
if (mode_rate <= link_avail) {
@@ -756,59 +769,86 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder;
+ struct intel_dp *intel_dp;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4;
struct intel_dp_m_n m_n;
int pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ int target_clock;
/*
* Find the lane count in the intel_encoder private
*/
- list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_dp *intel_dp;
-
- if (encoder->crtc != crtc)
- continue;
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ intel_dp = enc_to_intel_dp(&intel_encoder->base);
- intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_dp->base.type == INTEL_OUTPUT_EDP)
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_EDP)
{
lane_count = intel_dp->lane_count;
break;
}
}
+ target_clock = mode->clock;
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ if (intel_encoder->type == INTEL_OUTPUT_EDP) {
+ target_clock = intel_edp_target_clock(intel_encoder,
+ mode);
+ break;
+ }
+ }
+
/*
* Compute the GMCH and Link ratios. The '3' here is
* the number of bytes_per_pixel post-LUT, which we always
* set up for 8-bits of R/G/B, or 3 bytes total.
*/
intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
- mode->clock, adjusted_mode->clock, &m_n);
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(TRANSDATA_M1(pipe),
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
+ target_clock, adjusted_mode->clock, &m_n);
+
+ if (IS_HASWELL(dev)) {
+ I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
+ TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
+ I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
+ I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
+ } else if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
+ } else if (IS_VALLEYVIEW(dev)) {
+ I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
+ I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
+ I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
} else {
I915_WRITE(PIPE_GMCH_DATA_M(pipe),
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
+ TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
}
}
-static void ironlake_edp_pll_on(struct drm_encoder *encoder);
-static void ironlake_edp_pll_off(struct drm_encoder *encoder);
+void intel_dp_init_link_config(struct intel_dp *intel_dp)
+{
+ memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ intel_dp->link_configuration[0] = intel_dp->link_bw;
+ intel_dp->link_configuration[1] = intel_dp->lane_count;
+ intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
+ /*
+ * Check for DPCD version > 1.1 and enhanced framing support
+ */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
+ intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ }
+}
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
@@ -817,17 +857,9 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- /* Turn on the eDP PLL if needed */
- if (is_edp(intel_dp)) {
- if (!is_pch_edp(intel_dp))
- ironlake_edp_pll_on(encoder);
- else
- ironlake_edp_pll_off(encoder);
- }
-
/*
* There are four kinds of DP registers:
*
@@ -849,10 +881,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
* supposed to be read-only.
*/
intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
- intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
/* Handle DP bits in common between all three register formats */
-
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
switch (intel_dp->lane_count) {
@@ -867,25 +897,17 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
break;
}
if (intel_dp->has_audio) {
- DRM_DEBUG_KMS("Enabling DP audio on pipe %c\n",
+ DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(intel_crtc->pipe));
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
intel_write_eld(encoder, adjusted_mode);
}
- memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
- intel_dp->link_configuration[0] = intel_dp->link_bw;
- intel_dp->link_configuration[1] = intel_dp->lane_count;
- /*
- * Check for DPCD version > 1.1 and enhanced framing support
- */
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
- (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
- intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- }
+
+ intel_dp_init_link_config(intel_dp);
/* Split out the IBX/CPU vs CPT settings */
- if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+ if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -898,7 +920,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_dp->DP |= intel_crtc->pipe << 29;
/* don't miss out required setting for eDP */
- intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
@@ -920,7 +941,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (is_cpu_edp(intel_dp)) {
/* don't miss out required setting for eDP */
- intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
@@ -944,7 +964,7 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
u32 mask,
u32 value)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
@@ -991,9 +1011,9 @@ static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
return control;
}
-static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
+void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
@@ -1001,8 +1021,8 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
return;
DRM_DEBUG_KMS("Turn eDP VDD on\n");
- if (intel_dp->want_panel_vdd)
- printf("eDP VDD already requested on\n");
+ WARN(intel_dp->want_panel_vdd,
+ "eDP VDD already requested on\n");
intel_dp->want_panel_vdd = true;
@@ -1026,13 +1046,13 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
*/
if (!ironlake_edp_have_panel_power(intel_dp)) {
DRM_DEBUG_KMS("eDP was not running\n");
- drm_msleep(intel_dp->panel_power_up_delay, "915edpon");
+ DRM_MSLEEP(intel_dp->panel_power_up_delay);
}
}
static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
@@ -1046,28 +1066,27 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
- drm_msleep(intel_dp->panel_power_down_delay, "915vddo");
+ DRM_MSLEEP(intel_dp->panel_power_down_delay);
}
}
static void ironlake_panel_vdd_work(void *arg, int pending __unused)
{
struct intel_dp *intel_dp = arg;
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
sx_xlock(&dev->mode_config.mutex);
ironlake_panel_vdd_off_sync(intel_dp);
sx_xunlock(&dev->mode_config.mutex);
}
-static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
+void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
{
if (!is_edp(intel_dp))
return;
DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
- if (!intel_dp->want_panel_vdd)
- printf("eDP VDD not forced on\n");
+ WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
intel_dp->want_panel_vdd = false;
@@ -1079,16 +1098,18 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
* time from now (relative to the power down delay)
* to keep the panel power up across a sequence of operations
*/
- struct drm_i915_private *dev_priv = intel_dp->base.base.dev->dev_private;
- taskqueue_enqueue_timeout(dev_priv->tq,
- &intel_dp->panel_vdd_task,
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ taskqueue_enqueue_timeout(dev_priv->wq,
+ &intel_dp->panel_vdd_work,
msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
}
}
-static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
+void ironlake_edp_panel_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
@@ -1128,9 +1149,9 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
}
}
-static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
+void ironlake_edp_panel_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
@@ -1139,22 +1160,26 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Turn eDP power off\n");
- if (intel_dp->want_panel_vdd)
- printf("Cannot turn power off while VDD is on\n");
- ironlake_panel_vdd_off_sync(intel_dp); /* finish any pending work */
+ WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
pp = ironlake_get_pp_control(dev_priv);
+ /* We need to switch off panel power _and_ force vdd, for otherwise some
+ * panels get very unhappy and cease to work. */
pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
+ intel_dp->want_panel_vdd = false;
+
ironlake_wait_panel_off(intel_dp);
}
-static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
+void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
u32 pp;
if (!is_edp(intel_dp))
@@ -1167,59 +1192,87 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
* link. So delay a bit to make sure the image is solid before
* allowing it to appear.
*/
- drm_msleep(intel_dp->backlight_on_delay, "915ebo");
+ DRM_MSLEEP(intel_dp->backlight_on_delay);
pp = ironlake_get_pp_control(dev_priv);
pp |= EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
+
+ intel_panel_enable_backlight(dev, pipe);
}
-static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
+void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
if (!is_edp(intel_dp))
return;
+ intel_panel_disable_backlight(dev);
+
DRM_DEBUG_KMS("\n");
pp = ironlake_get_pp_control(dev_priv);
pp &= ~EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
- drm_msleep(intel_dp->backlight_off_delay, "915bo1");
+ DRM_MSLEEP(intel_dp->backlight_off_delay);
}
-static void ironlake_edp_pll_on(struct drm_encoder *encoder)
+static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = encoder->dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
+ assert_pipe_disabled(dev_priv,
+ to_intel_crtc(crtc)->pipe);
+
DRM_DEBUG_KMS("\n");
dpa_ctl = I915_READ(DP_A);
- dpa_ctl |= DP_PLL_ENABLE;
- I915_WRITE(DP_A, dpa_ctl);
+ WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
+ WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+ /* We don't adjust intel_dp->DP while tearing down the link, to
+ * facilitate link retraining (e.g. after hotplug). Hence clear all
+ * enable bits here to ensure that we don't enable too much. */
+ intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+ intel_dp->DP |= DP_PLL_ENABLE;
+ I915_WRITE(DP_A, intel_dp->DP);
POSTING_READ(DP_A);
- DELAY(200);
+ udelay(200);
}
-static void ironlake_edp_pll_off(struct drm_encoder *encoder)
+static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = encoder->dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
+ assert_pipe_disabled(dev_priv,
+ to_intel_crtc(crtc)->pipe);
+
dpa_ctl = I915_READ(DP_A);
+ WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
+ "dp pll off, should be on\n");
+ WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+ /* We can't rely on the value tracked for the DP register in
+ * intel_dp->DP because link_down must not change that (otherwise link
+ * re-training will fail. */
dpa_ctl &= ~DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
POSTING_READ(DP_A);
- DELAY(200);
+ udelay(200);
}
/* If the sink supports it, try to set the power state appropriately */
-static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
+void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
{
int ret, i;
@@ -1231,7 +1284,7 @@ static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
DP_SET_POWER_D3);
if (ret != 1)
- DRM_DEBUG("failed to write sink power state\n");
+ DRM_DEBUG_DRIVER("failed to write sink power state\n");
} else {
/*
* When turning on, we need to retry for 1ms to give the sink
@@ -1243,85 +1296,113 @@ static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
DP_SET_POWER_D0);
if (ret == 1)
break;
- drm_msleep(1, "915dps");
+ DRM_MSLEEP(1);
}
}
}
-static void intel_dp_prepare(struct drm_encoder *encoder)
+static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp = I915_READ(intel_dp->output_reg);
- ironlake_edp_backlight_off(intel_dp);
- ironlake_edp_panel_off(intel_dp);
+ if (!(tmp & DP_PORT_EN))
+ return false;
- /* Wake up the sink first */
- ironlake_edp_panel_vdd_on(intel_dp);
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
- intel_dp_link_down(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, false);
+ if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+ *pipe = PORT_TO_PIPE(tmp);
+ } else {
+ u32 trans_sel;
+ u32 trans_dp;
+ int i;
- /* Make sure the panel is off before trying to
- * change the mode
- */
+ switch (intel_dp->output_reg) {
+ case PCH_DP_B:
+ trans_sel = TRANS_DP_PORT_SEL_B;
+ break;
+ case PCH_DP_C:
+ trans_sel = TRANS_DP_PORT_SEL_C;
+ break;
+ case PCH_DP_D:
+ trans_sel = TRANS_DP_PORT_SEL_D;
+ break;
+ default:
+ return true;
+ }
+
+ for_each_pipe(i) {
+ trans_dp = I915_READ(TRANS_DP_CTL(i));
+ if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
+ *pipe = i;
+ return true;
+ }
+ }
+
+ DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
+ intel_dp->output_reg);
+ }
+
+ return true;
}
-static void intel_dp_commit(struct drm_encoder *encoder)
+static void intel_disable_dp(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = encoder->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ /* Make sure the panel is off before trying to change the mode. But also
+ * ensure that we have vdd while we switch off the panel. */
ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
- intel_dp_start_link_train(intel_dp);
- ironlake_edp_panel_on(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, true);
- intel_dp_complete_link_train(intel_dp);
- ironlake_edp_backlight_on(intel_dp);
+ ironlake_edp_panel_off(intel_dp);
- intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
+ /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
+ if (!is_cpu_edp(intel_dp))
+ intel_dp_link_down(intel_dp);
+}
+
+static void intel_post_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- if (HAS_PCH_CPT(dev))
- intel_cpt_verify_modeset(dev, intel_crtc->pipe);
+ if (is_cpu_edp(intel_dp)) {
+ intel_dp_link_down(intel_dp);
+ ironlake_edp_pll_off(intel_dp);
+ }
}
-static void
-intel_dp_dpms(struct drm_encoder *encoder, int mode)
+static void intel_enable_dp(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = encoder->dev;
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
- if (mode != DRM_MODE_DPMS_ON) {
- ironlake_edp_backlight_off(intel_dp);
- ironlake_edp_panel_off(intel_dp);
+ if (WARN_ON(dp_reg & DP_PORT_EN))
+ return;
- ironlake_edp_panel_vdd_on(intel_dp);
- intel_dp_sink_dpms(intel_dp, mode);
- intel_dp_link_down(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, false);
+ ironlake_edp_panel_vdd_on(intel_dp);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_start_link_train(intel_dp);
+ ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, true);
+ intel_dp_complete_link_train(intel_dp);
+ ironlake_edp_backlight_on(intel_dp);
+}
- if (is_cpu_edp(intel_dp))
- ironlake_edp_pll_off(encoder);
- } else {
- if (is_cpu_edp(intel_dp))
- ironlake_edp_pll_on(encoder);
+static void intel_pre_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- ironlake_edp_panel_vdd_on(intel_dp);
- intel_dp_sink_dpms(intel_dp, mode);
- if (!(dp_reg & DP_PORT_EN)) {
- intel_dp_start_link_train(intel_dp);
- ironlake_edp_panel_on(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, true);
- intel_dp_complete_link_train(intel_dp);
- } else
- ironlake_edp_panel_vdd_off(intel_dp, false);
- ironlake_edp_backlight_on(intel_dp);
- }
- intel_dp->dpms_mode = mode;
+ if (is_cpu_edp(intel_dp))
+ ironlake_edp_pll_on(intel_dp);
}
+
/*
* Native read with retry for link status and receiver capability reads for
* cases where the sink may still be asleep.
@@ -1341,7 +1422,7 @@ intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
recv_bytes);
if (ret == recv_bytes)
return true;
- drm_msleep(1, "915dpl");
+ DRM_MSLEEP(1);
}
return false;
@@ -1360,38 +1441,6 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
DP_LINK_STATUS_SIZE);
}
-static uint8_t
-intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
- int r)
-{
- return link_status[r - DP_LANE0_1_STATUS];
-}
-
-static uint8_t
-intel_get_adjust_request_voltage(uint8_t adjust_request[2],
- int lane)
-{
- int s = ((lane & 1) ?
- DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
- DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
- uint8_t l = adjust_request[lane>>1];
-
- return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
-}
-
-static uint8_t
-intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
- int lane)
-{
- int s = ((lane & 1) ?
- DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
- DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
- uint8_t l = adjust_request[lane>>1];
-
- return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
-}
-
-
#if 0
static char *voltage_names[] = {
"0.4V", "0.6V", "0.8V", "1.2V"
@@ -1412,7 +1461,7 @@ static char *link_train_names[] = {
static uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
return DP_TRAIN_VOLTAGE_SWING_800;
@@ -1425,9 +1474,21 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
static uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
- if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ if (IS_HASWELL(dev)) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_9_5;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_6;
@@ -1458,13 +1519,12 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
uint8_t v = 0;
uint8_t p = 0;
int lane;
- uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
uint8_t voltage_max;
uint8_t preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
- uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
- uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
+ uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+ uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
if (this_v > v)
v = this_v;
@@ -1581,52 +1641,38 @@ intel_gen7_edp_signal_levels(uint8_t train_set)
}
}
-static uint8_t
-intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
- int lane)
-{
- int s = (lane & 1) * 4;
- uint8_t l = link_status[lane>>1];
-
- return (l >> s) & 0xf;
-}
-
-/* Check for clock recovery is done on all channels */
-static bool
-intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_dp_signal_levels_hsw(uint8_t train_set)
{
- int lane;
- uint8_t lane_status;
-
- for (lane = 0; lane < lane_count; lane++) {
- lane_status = intel_get_lane_status(link_status, lane);
- if ((lane_status & DP_LANE_CR_DONE) == 0)
- return false;
- }
- return true;
-}
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_400MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_400MV_3_5DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return DDI_BUF_EMP_400MV_6DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
+ return DDI_BUF_EMP_400MV_9_5DB_HSW;
-/* Check to see if channel eq is done on all channels */
-#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
- DP_LANE_CHANNEL_EQ_DONE|\
- DP_LANE_SYMBOL_LOCKED)
-static bool
-intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
-{
- uint8_t lane_align;
- uint8_t lane_status;
- int lane;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_600MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_600MV_3_5DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+ return DDI_BUF_EMP_600MV_6DB_HSW;
- lane_align = intel_dp_link_status(link_status,
- DP_LANE_ALIGN_STATUS_UPDATED);
- if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
- return false;
- for (lane = 0; lane < intel_dp->lane_count; lane++) {
- lane_status = intel_get_lane_status(link_status, lane);
- if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
- return false;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_800MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_800MV_3_5DB_HSW;
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return DDI_BUF_EMP_400MV_0DB_HSW;
}
- return true;
}
static bool
@@ -1634,9 +1680,90 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
uint8_t dp_train_pat)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
int ret;
+ uint32_t temp;
+
+ if (IS_HASWELL(dev)) {
+ temp = I915_READ(DP_TP_CTL(port));
+
+ if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
+ temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
+ else
+ temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
+
+ temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+
+ if (port != PORT_A) {
+ temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
+ I915_WRITE(DP_TP_CTL(port), temp);
+
+ if (wait_for((I915_READ(DP_TP_STATUS(port)) &
+ DP_TP_STATUS_IDLE_DONE), 1))
+ DRM_ERROR("Timed out waiting for DP idle patterns\n");
+
+ temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ }
+
+ temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
+
+ break;
+ case DP_TRAINING_PATTERN_1:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
+ break;
+ }
+ I915_WRITE(DP_TP_CTL(port), temp);
+
+ } else if (HAS_PCH_CPT(dev) &&
+ (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+ dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
+
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ DRM_ERROR("DP training pattern 3 not supported\n");
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+ break;
+ }
+
+ } else {
+ dp_reg_value &= ~DP_LINK_TRAIN_MASK;
+
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ dp_reg_value |= DP_LINK_TRAIN_OFF;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ DRM_ERROR("DP training pattern 3 not supported\n");
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+ break;
+ }
+ }
I915_WRITE(intel_dp->output_reg, dp_reg_value);
POSTING_READ(intel_dp->output_reg);
@@ -1645,34 +1772,33 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
- ret = intel_dp_aux_native_write(intel_dp,
- DP_TRAINING_LANE0_SET,
- intel_dp->train_set,
- intel_dp->lane_count);
- if (ret != intel_dp->lane_count)
- return false;
+ if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
+ DP_TRAINING_PATTERN_DISABLE) {
+ ret = intel_dp_aux_native_write(intel_dp,
+ DP_TRAINING_LANE0_SET,
+ intel_dp->train_set,
+ intel_dp->lane_count);
+ if (ret != intel_dp->lane_count)
+ return false;
+ }
return true;
}
/* Enable corresponding port and start training pattern 1 */
-static void
+void
intel_dp_start_link_train(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
+ struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
+ struct drm_device *dev = encoder->dev;
int i;
uint8_t voltage;
bool clock_recovery = false;
int voltage_tries, loop_tries;
- u32 reg;
uint32_t DP = intel_dp->DP;
- /* Enable output, wait for it to become active */
- I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- POSTING_READ(intel_dp->output_reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ if (IS_HASWELL(dev))
+ intel_ddi_prepare_link_retrain(encoder);
/* Write the link configuration data */
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
@@ -1681,10 +1807,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP |= DP_PORT_EN;
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- DP &= ~DP_LINK_TRAIN_MASK_CPT;
- else
- DP &= ~DP_LINK_TRAIN_MASK;
memset(intel_dp->train_set, 0, 4);
voltage = 0xff;
voltage_tries = 0;
@@ -1695,8 +1817,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
uint8_t link_status[DP_LINK_STATUS_SIZE];
uint32_t signal_levels;
-
- if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ if (IS_HASWELL(dev)) {
+ signal_levels = intel_dp_signal_levels_hsw(
+ intel_dp->train_set[0]);
+ DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
+ } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
@@ -1704,27 +1829,24 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
- DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
+ DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
+ signal_levels);
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
- else
- reg = DP | DP_LINK_TRAIN_PAT_1;
-
- if (!intel_dp_set_link_train(intel_dp, reg,
- DP_TRAINING_PATTERN_1))
- break;
/* Set training pattern 1 */
+ if (!intel_dp_set_link_train(intel_dp, DP,
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE))
+ break;
- DELAY(100);
+ drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("failed to get link status\n");
break;
}
- if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("clock recovery OK\n");
clock_recovery = true;
break;
@@ -1763,14 +1885,12 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
intel_dp->DP = DP;
}
-static void
+void
intel_dp_complete_link_train(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
bool channel_eq = false;
int tries, cr_tries;
- u32 reg;
uint32_t DP = intel_dp->DP;
/* channel equalization */
@@ -1788,7 +1908,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
}
- if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ if (IS_HASWELL(dev)) {
+ signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
+ DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
+ } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
@@ -1799,28 +1922,24 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
- else
- reg = DP | DP_LINK_TRAIN_PAT_2;
-
/* channel eq pattern */
- if (!intel_dp_set_link_train(intel_dp, reg,
- DP_TRAINING_PATTERN_2))
+ if (!intel_dp_set_link_train(intel_dp, DP,
+ DP_TRAINING_PATTERN_2 |
+ DP_LINK_SCRAMBLING_DISABLE))
break;
- DELAY(400);
+ drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status))
break;
/* Make sure clock is still ok */
- if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
cr_tries++;
continue;
}
- if (intel_channel_eq_ok(intel_dp, link_status)) {
+ if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
channel_eq = true;
break;
}
@@ -1839,35 +1958,42 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
++tries;
}
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- reg = DP | DP_LINK_TRAIN_OFF_CPT;
- else
- reg = DP | DP_LINK_TRAIN_OFF;
+ if (channel_eq)
+ DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n");
- I915_WRITE(intel_dp->output_reg, reg);
- POSTING_READ(intel_dp->output_reg);
- intel_dp_aux_native_write_1(intel_dp,
- DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
+ intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
}
static void
intel_dp_link_down(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t DP = intel_dp->DP;
- if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+ /*
+ * DDI code has a strict mode set sequence and we should try to respect
+ * it, otherwise we might hang the machine in many different ways. So we
+ * really should be disabling the port only on a complete crtc_disable
+ * sequence. This function is just called under two conditions on DDI
+ * code:
+ * - Link train failed while doing crtc_enable, and on this case we
+ * really should respect the mode set sequence and wait for a
+ * crtc_disable.
+ * - Someone turned the monitor off and intel_dp_check_link_status
+ * called us. We don't need to disable the whole port on this case, so
+ * when someone turns the monitor on again,
+ * intel_ddi_prepare_link_retrain will take care of redoing the link
+ * train.
+ */
+ if (IS_HASWELL(dev))
return;
- DRM_DEBUG_KMS("\n");
+ if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
+ return;
- if (is_edp(intel_dp)) {
- DP &= ~DP_PLL_ENABLE;
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
- DELAY(100);
- }
+ DRM_DEBUG_KMS("\n");
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
@@ -1878,19 +2004,11 @@ intel_dp_link_down(struct intel_dp *intel_dp)
}
POSTING_READ(intel_dp->output_reg);
- drm_msleep(17, "915dlo");
-
- if (is_edp(intel_dp)) {
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- DP |= DP_LINK_TRAIN_OFF_CPT;
- else
- DP |= DP_LINK_TRAIN_OFF;
- }
+ DRM_MSLEEP(17);
-
- if (!HAS_PCH_CPT(dev) &&
+ if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
/* Hardware workaround: leaving our transcoder select
* set to transcoder B while it's off will prevent the
@@ -1916,7 +2034,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
* continuing.
*/
POSTING_READ(intel_dp->output_reg);
- drm_msleep(50, "915dla");
+ DRM_MSLEEP(50);
} else
intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
}
@@ -1924,19 +2042,32 @@ intel_dp_link_down(struct intel_dp *intel_dp)
DP &= ~DP_AUDIO_OUTPUT_ENABLE;
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(intel_dp->output_reg);
- drm_msleep(intel_dp->panel_power_down_delay, "915ldo");
+ DRM_MSLEEP(intel_dp->panel_power_down_delay);
}
static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
- sizeof(intel_dp->dpcd)) &&
- (intel_dp->dpcd[DP_DPCD_REV] != 0)) {
- return true;
- }
+ sizeof(intel_dp->dpcd)) == 0)
+ return false; /* aux transfer failed */
- return false;
+ if (intel_dp->dpcd[DP_DPCD_REV] == 0)
+ return false; /* DPCD not present */
+
+ if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DWN_STRM_PORT_PRESENT))
+ return true; /* native DP sink */
+
+ if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
+ return true; /* no per-port downstream info */
+
+ if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
+ intel_dp->downstream_ports,
+ DP_MAX_DOWNSTREAM_PORTS) == 0)
+ return false; /* downstream port status fetch failed */
+
+ return true;
}
static void
@@ -1947,6 +2078,8 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
+ ironlake_edp_panel_vdd_on(intel_dp);
+
if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
DRM_DEBUG_KMS("Sink OUI: %02x%02x%02x\n",
buf[0], buf[1], buf[2]);
@@ -1954,6 +2087,8 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
DRM_DEBUG_KMS("Branch OUI: %02x%02x%02x\n",
buf[0], buf[1], buf[2]);
+
+ ironlake_edp_panel_vdd_off(intel_dp, false);
}
static bool
@@ -1974,7 +2109,7 @@ static void
intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
/* NAK by default */
- intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK);
+ intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
}
/*
@@ -1986,16 +2121,17 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
* 4. Check link status on receipt of hot-plug interrupt
*/
-static void
+void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
+ struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
- if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
+ if (!intel_encoder->connectors_active)
return;
- if (!intel_dp->base.base.crtc)
+ if (WARN_ON(!intel_encoder->base.crtc))
return;
/* Try to read receiver status if the link appears to be up */
@@ -2021,33 +2157,66 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
intel_dp_handle_test_request(intel_dp);
if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
- DRM_DEBUG_KMS("CP or sink specific irq unhandled\n");
+ DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
- if (!intel_channel_eq_ok(intel_dp, link_status)) {
+ if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
- drm_get_encoder_name(&intel_dp->base.base));
- intel_dp_start_link_train(intel_dp);
+ drm_get_encoder_name(&intel_encoder->base));
+ intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
}
}
+/* XXX this is probably wrong for multiple downstream ports */
static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
{
- if (intel_dp_get_dpcd(intel_dp))
+ uint8_t *dpcd = intel_dp->dpcd;
+ bool hpd;
+ uint8_t type;
+
+ if (!intel_dp_get_dpcd(intel_dp))
+ return connector_status_disconnected;
+
+ /* if there's no downstream port, we're done */
+ if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
+ return connector_status_connected;
+
+ /* If we're HPD-aware, SINK_COUNT changes dynamically */
+ hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
+ if (hpd) {
+ uint8_t reg;
+ if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
+ &reg, 1))
+ return connector_status_unknown;
+ return DP_GET_SINK_COUNT(reg) ? connector_status_connected
+ : connector_status_disconnected;
+ }
+
+ /* If no HPD, poke DDC gently */
+ if (drm_probe_ddc(intel_dp->adapter))
return connector_status_connected;
+
+ /* Well we tried, say unknown for unreliable port types */
+ type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
+ if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
+ return connector_status_unknown;
+
+ /* Anything else is out of spec, warn and ignore */
+ DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
return connector_status_disconnected;
}
static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum drm_connector_status status;
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp)) {
- status = intel_panel_detect(intel_dp->base.base.dev);
+ status = intel_panel_detect(dev);
if (status == connector_status_unknown)
status = connector_status_connected;
return status;
@@ -2059,27 +2228,25 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
static enum drm_connector_status
g4x_dp_detect(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t temp, bit;
+ uint32_t bit;
switch (intel_dp->output_reg) {
case DP_B:
- bit = DPB_HOTPLUG_INT_STATUS;
+ bit = DPB_HOTPLUG_LIVE_STATUS;
break;
case DP_C:
- bit = DPC_HOTPLUG_INT_STATUS;
+ bit = DPC_HOTPLUG_LIVE_STATUS;
break;
case DP_D:
- bit = DPD_HOTPLUG_INT_STATUS;
+ bit = DPD_HOTPLUG_LIVE_STATUS;
break;
default:
return connector_status_unknown;
}
- temp = I915_READ(PORT_HOTPLUG_STAT);
-
- if ((temp & bit) == 0)
+ if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
return connector_status_disconnected;
return intel_dp_detect_dpcd(intel_dp);
@@ -2088,25 +2255,45 @@ g4x_dp_detect(struct intel_dp *intel_dp)
static struct edid *
intel_dp_get_edid(struct drm_connector *connector, device_t adapter)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct edid *edid;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
- ironlake_edp_panel_vdd_on(intel_dp);
- edid = drm_get_edid(connector, adapter);
- ironlake_edp_panel_vdd_off(intel_dp, false);
- return edid;
+ /* use cached edid if we have one */
+ if (intel_connector->edid) {
+ struct edid *edid;
+ int size;
+
+ /* invalid edid */
+ if (intel_connector->edid_err)
+ return NULL;
+
+ size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
+ edid = malloc(size, DRM_MEM_KMS, M_WAITOK);
+ if (!edid)
+ return NULL;
+
+ memcpy(edid, intel_connector->edid, size);
+ return edid;
+ }
+
+ return drm_get_edid(connector, adapter);
}
static int
intel_dp_get_edid_modes(struct drm_connector *connector, device_t adapter)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
- int ret;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
- ironlake_edp_panel_vdd_on(intel_dp);
- ret = intel_ddc_get_modes(connector, adapter);
- ironlake_edp_panel_vdd_off(intel_dp, false);
- return ret;
+ /* use cached edid if we have one */
+ if (intel_connector->edid) {
+ /* invalid edid */
+ if (intel_connector->edid_err)
+ return 0;
+
+ return intel_connector_update_modes(connector,
+ intel_connector->edid);
+ }
+
+ return intel_ddc_get_modes(connector, adapter);
}
@@ -2120,9 +2307,12 @@ static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector, bool force)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = connector->dev;
enum drm_connector_status status;
struct edid *edid = NULL;
+ char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
intel_dp->has_audio = false;
@@ -2130,6 +2320,11 @@ intel_dp_detect(struct drm_connector *connector, bool force)
status = ironlake_dp_detect(intel_dp);
else
status = g4x_dp_detect(intel_dp);
+
+ hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
+ 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
+ DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
+
if (status != connector_status_connected)
return status;
@@ -2145,49 +2340,31 @@ intel_dp_detect(struct drm_connector *connector, bool force)
}
}
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
return connector_status_connected;
}
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_device *dev = connector->dev;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
ret = intel_dp_get_edid_modes(connector, intel_dp->adapter);
- if (ret) {
- if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) {
- struct drm_display_mode *newmode;
- list_for_each_entry(newmode, &connector->probed_modes,
- head) {
- if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) {
- intel_dp->panel_fixed_mode =
- drm_mode_duplicate(dev, newmode);
- break;
- }
- }
- }
+ if (ret)
return ret;
- }
- /* if eDP has no EDID, try to use fixed panel mode from VBT */
- if (is_edp(intel_dp)) {
- /* initialize panel mode from VBT if available for eDP */
- if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) {
- intel_dp->panel_fixed_mode =
- drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- if (intel_dp->panel_fixed_mode) {
- intel_dp->panel_fixed_mode->type |=
- DRM_MODE_TYPE_PREFERRED;
- }
- }
- if (intel_dp->panel_fixed_mode) {
- struct drm_display_mode *mode;
- mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
+ /* if eDP has no EDID, fall back to fixed mode */
+ if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+ struct drm_display_mode *mode;
+ mode = drm_mode_duplicate(dev,
+ intel_connector->panel.fixed_mode);
+ if (mode) {
drm_mode_probed_add(connector, mode);
return 1;
}
@@ -2205,7 +2382,6 @@ intel_dp_detect_audio(struct drm_connector *connector)
edid = intel_dp_get_edid(connector, intel_dp->adapter);
if (edid) {
has_audio = drm_detect_monitor_audio(edid);
-
free(edid, DRM_MEM_KMS);
}
@@ -2218,7 +2394,9 @@ intel_dp_set_property(struct drm_connector *connector,
uint64_t val)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
int ret;
ret = drm_object_property_set_value(&connector->base, property, val);
@@ -2254,14 +2432,29 @@ intel_dp_set_property(struct drm_connector *connector,
goto done;
}
+ if (is_edp(intel_dp) &&
+ property == connector->dev->mode_config.scaling_mode_property) {
+ if (val == DRM_MODE_SCALE_NONE) {
+ DRM_DEBUG_KMS("no scaling not supported\n");
+ return -EINVAL;
+ }
+
+ if (intel_connector->panel.fitting_mode == val) {
+ /* the eDP scaling property is not changed */
+ return 0;
+ }
+ intel_connector->panel.fitting_mode = val;
+
+ goto done;
+ }
+
return -EINVAL;
done:
- if (intel_dp->base.base.crtc) {
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
- drm_crtc_helper_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y,
- crtc->fb);
+ if (intel_encoder->base.crtc) {
+ struct drm_crtc *crtc = intel_encoder->base.crtc;
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
}
return 0;
@@ -2270,25 +2463,23 @@ done:
static void
intel_dp_destroy(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
- if (intel_dpd_is_edp(dev))
- intel_panel_destroy_backlight(dev);
+ free(intel_connector->edid, DRM_MEM_KMS);
+
+ if (is_edp(intel_dp))
+ intel_panel_fini(&intel_connector->panel);
-#if 0
- drm_sysfs_connector_remove(connector);
-#endif
drm_connector_cleanup(connector);
free(connector, DRM_MEM_KMS);
}
-static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+void intel_dp_encoder_destroy(struct drm_encoder *encoder)
{
- struct drm_device *dev;
- struct intel_dp *intel_dp;
-
- intel_dp = enc_to_intel_dp(encoder);
- dev = encoder->dev;
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
if (intel_dp->dp_iic_bus != NULL) {
if (intel_dp->adapter != NULL) {
@@ -2299,27 +2490,25 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
}
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
- struct drm_i915_private *dev_priv = intel_dp->base.base.dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- taskqueue_cancel_timeout(dev_priv->tq,
- &intel_dp->panel_vdd_task, NULL);
- taskqueue_drain_timeout(dev_priv->tq,
- &intel_dp->panel_vdd_task);
+ taskqueue_cancel_timeout(dev_priv->wq,
+ &intel_dp->panel_vdd_work, NULL);
+ taskqueue_drain_timeout(dev_priv->wq,
+ &intel_dp->panel_vdd_work);
ironlake_panel_vdd_off_sync(intel_dp);
}
- free(intel_dp, DRM_MEM_KMS);
+ free(intel_dig_port, DRM_MEM_KMS);
}
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
- .dpms = intel_dp_dpms,
.mode_fixup = intel_dp_mode_fixup,
- .prepare = intel_dp_prepare,
.mode_set = intel_dp_mode_set,
- .commit = intel_dp_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_dp_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_connector_dpms,
.detect = intel_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_set_property,
@@ -2339,7 +2528,7 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
static void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
- struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
intel_dp_check_link_status(intel_dp);
}
@@ -2349,18 +2538,14 @@ int
intel_trans_dp_port_sel(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder;
+ struct intel_dp *intel_dp;
- list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_dp *intel_dp;
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ intel_dp = enc_to_intel_dp(&intel_encoder->base);
- if (encoder->crtc != crtc)
- continue;
-
- intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_dp->base.type == INTEL_OUTPUT_EDP)
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_EDP)
return intel_dp->output_reg;
}
@@ -2390,156 +2575,237 @@ bool intel_dpd_is_edp(struct drm_device *dev)
static void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
+
+ if (is_edp(intel_dp)) {
+ drm_mode_create_scaling_mode_property(connector->dev);
+ drm_object_attach_property(
+ &connector->base,
+ connector->dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_ASPECT);
+ intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+ }
+}
+
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+ struct intel_dp *intel_dp,
+ struct edp_power_seq *out)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct edp_power_seq cur, vbt, spec, final;
+ u32 pp_on, pp_off, pp_div, pp;
+
+ /* Workaround: Need to write PP_CONTROL with the unlock key as
+ * the very first thing. */
+ pp = ironlake_get_pp_control(dev_priv);
+ I915_WRITE(PCH_PP_CONTROL, pp);
+
+ pp_on = I915_READ(PCH_PP_ON_DELAYS);
+ pp_off = I915_READ(PCH_PP_OFF_DELAYS);
+ pp_div = I915_READ(PCH_PP_DIVISOR);
+
+ /* Pull timing values out of registers */
+ cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+ PANEL_POWER_UP_DELAY_SHIFT;
+
+ cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+ PANEL_LIGHT_ON_DELAY_SHIFT;
+
+ cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+ PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+ cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+ PANEL_POWER_DOWN_DELAY_SHIFT;
+
+ cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+ PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+
+ DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+ vbt = dev_priv->edp.pps;
+
+ /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
+ * our hw here, which are all in 100usec. */
+ spec.t1_t3 = 210 * 10;
+ spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
+ spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
+ spec.t10 = 500 * 10;
+ /* This one is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+ spec.t11_t12 = (510 + 100) * 10;
+
+ DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
+
+ /* Use the max of the register settings and vbt. If both are
+ * unset, fall back to the spec limits. */
+#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
+ spec.field : \
+ max(cur.field, vbt.field))
+ assign_final(t1_t3);
+ assign_final(t8);
+ assign_final(t9);
+ assign_final(t10);
+ assign_final(t11_t12);
+#undef assign_final
+
+#define get_delay(field) (DIV_ROUND_UP(final.field, 10))
+ intel_dp->panel_power_up_delay = get_delay(t1_t3);
+ intel_dp->backlight_on_delay = get_delay(t8);
+ intel_dp->backlight_off_delay = get_delay(t9);
+ intel_dp->panel_power_down_delay = get_delay(t10);
+ intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
+#undef get_delay
+
+ DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+ intel_dp->panel_power_cycle_delay);
+
+ DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+ intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+ if (out)
+ *out = final;
+}
+
+static void
+intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
+ struct intel_dp *intel_dp,
+ struct edp_power_seq *seq)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_on, pp_off, pp_div;
+
+ /* And finally store the new values in the power sequencer. */
+ pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
+ (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
+ pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+ (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
+ /* Compute the divisor for the pp clock, simply match the Bspec
+ * formula. */
+ pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
+ << PP_REFERENCE_DIVIDER_SHIFT;
+ pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
+ << PANEL_POWER_CYCLE_DELAY_SHIFT);
+
+ /* Haswell doesn't have any port selection bits for the panel
+ * power sequencer any more. */
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ if (is_cpu_edp(intel_dp))
+ pp_on |= PANEL_POWER_PORT_DP_A;
+ else
+ pp_on |= PANEL_POWER_PORT_DP_D;
+ }
+
+ I915_WRITE(PCH_PP_ON_DELAYS, pp_on);
+ I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
+ I915_WRITE(PCH_PP_DIVISOR, pp_div);
+
+ DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+ I915_READ(PCH_PP_ON_DELAYS),
+ I915_READ(PCH_PP_OFF_DELAYS),
+ I915_READ(PCH_PP_DIVISOR));
}
void
-intel_dp_init(struct drm_device *dev, int output_reg)
+intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector)
{
+ struct drm_connector *connector = &intel_connector->base;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
- struct intel_dp *intel_dp;
- struct intel_encoder *intel_encoder;
- struct intel_connector *intel_connector;
+ struct drm_display_mode *fixed_mode = NULL;
+ struct edp_power_seq power_seq = { 0 };
+ enum port port = intel_dig_port->port;
const char *name = NULL;
int type;
- intel_dp = malloc(sizeof(struct intel_dp), DRM_MEM_KMS,
- M_WAITOK | M_ZERO);
-
- intel_dp->output_reg = output_reg;
- intel_dp->dpms_mode = -1;
+ /* Preserve the current hw state. */
+ intel_dp->DP = I915_READ(intel_dp->output_reg);
+ intel_dp->attached_connector = intel_connector;
- intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS,
- M_WAITOK | M_ZERO);
- intel_encoder = &intel_dp->base;
-
- if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
+ if (HAS_PCH_SPLIT(dev) && port == PORT_D)
if (intel_dpd_is_edp(dev))
intel_dp->is_pch_edp = true;
- if (output_reg == DP_A || is_pch_edp(intel_dp)) {
+ /*
+ * FIXME : We need to initialize built-in panels before external panels.
+ * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
+ */
+ if (IS_VALLEYVIEW(dev) && port == PORT_C) {
+ type = DRM_MODE_CONNECTOR_eDP;
+ intel_encoder->type = INTEL_OUTPUT_EDP;
+ } else if (port == PORT_A || is_pch_edp(intel_dp)) {
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
} else {
+ /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for
+ * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't
+ * rewrite it.
+ */
type = DRM_MODE_CONNECTOR_DisplayPort;
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
}
- connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_HPD;
-
- if (output_reg == DP_B || output_reg == PCH_DP_B)
- intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
- else if (output_reg == DP_C || output_reg == PCH_DP_C)
- intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
- else if (output_reg == DP_D || output_reg == PCH_DP_D)
- intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
-
- if (is_edp(intel_dp)) {
- intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
- TIMEOUT_TASK_INIT(dev_priv->tq, &intel_dp->panel_vdd_task, 0,
- ironlake_panel_vdd_work, intel_dp);
- }
-
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
-
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
- drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
- DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
+ TIMEOUT_TASK_INIT(dev_priv->wq, &intel_dp->panel_vdd_work, 0,
+ ironlake_panel_vdd_work, intel_dp);
intel_connector_attach_encoder(intel_connector, intel_encoder);
-#if 0
- drm_sysfs_connector_add(connector);
-#endif
-
- /* Set up the DDC bus. */
- switch (output_reg) {
- case DP_A:
- name = "DPDDC-A";
- break;
- case DP_B:
- case PCH_DP_B:
- dev_priv->hotplug_supported_mask |=
- HDMIB_HOTPLUG_INT_STATUS;
- name = "DPDDC-B";
- break;
- case DP_C:
- case PCH_DP_C:
- dev_priv->hotplug_supported_mask |=
- HDMIC_HOTPLUG_INT_STATUS;
- name = "DPDDC-C";
- break;
- case DP_D:
- case PCH_DP_D:
- dev_priv->hotplug_supported_mask |=
- HDMID_HOTPLUG_INT_STATUS;
- name = "DPDDC-D";
- break;
- }
-
- /* Cache some DPCD data in the eDP case */
- if (is_edp(intel_dp)) {
- bool ret;
- struct edp_power_seq cur, vbt;
- u32 pp_on, pp_off, pp_div;
- pp_on = I915_READ(PCH_PP_ON_DELAYS);
- pp_off = I915_READ(PCH_PP_OFF_DELAYS);
- pp_div = I915_READ(PCH_PP_DIVISOR);
-
- if (!pp_on || !pp_off || !pp_div) {
- DRM_INFO("bad panel power sequencing delays, disabling panel\n");
- intel_dp_encoder_destroy(&intel_dp->base.base);
- intel_dp_destroy(&intel_connector->base);
- return;
- }
-
- /* Pull timing values out of registers */
- cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
- PANEL_POWER_UP_DELAY_SHIFT;
-
- cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
- PANEL_LIGHT_ON_DELAY_SHIFT;
-
- cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
- PANEL_LIGHT_OFF_DELAY_SHIFT;
-
- cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
- PANEL_POWER_DOWN_DELAY_SHIFT;
-
- cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
- PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
-
- DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
-
- vbt = dev_priv->edp.pps;
+ if (IS_HASWELL(dev))
+ intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+ else
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
- DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
-#define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10)
+ /* Set up the DDC bus. */
+ switch (port) {
+ case PORT_A:
+ name = "DPDDC-A";
+ break;
+ case PORT_B:
+ dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
+ name = "DPDDC-B";
+ break;
+ case PORT_C:
+ dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
+ name = "DPDDC-C";
+ break;
+ case PORT_D:
+ dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
+ name = "DPDDC-D";
+ break;
+ default:
+ WARN(1, "Invalid port %c\n", port_name(port));
+ break;
+ }
- intel_dp->panel_power_up_delay = get_delay(t1_t3);
- intel_dp->backlight_on_delay = get_delay(t8);
- intel_dp->backlight_off_delay = get_delay(t9);
- intel_dp->panel_power_down_delay = get_delay(t10);
- intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
+ if (is_edp(intel_dp))
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
- DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
- intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
- intel_dp->panel_power_cycle_delay);
+ intel_dp_i2c_init(intel_dp, intel_connector, name);
- DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
- intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+ /* Cache DPCD and EDID for edp. */
+ if (is_edp(intel_dp)) {
+ bool ret;
+ struct drm_display_mode *scan;
+ struct edid *edid;
+ int edid_err = 0;
ironlake_edp_panel_vdd_on(intel_dp);
ret = intel_dp_get_dpcd(intel_dp);
@@ -2553,19 +2819,54 @@ intel_dp_init(struct drm_device *dev, int output_reg)
} else {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
- intel_dp_encoder_destroy(&intel_dp->base.base);
- intel_dp_destroy(&intel_connector->base);
+ intel_dp_encoder_destroy(&intel_encoder->base);
+ intel_dp_destroy(connector);
return;
}
- }
- intel_dp_i2c_init(intel_dp, intel_connector, name);
+ /* We now know it's not a ghost, init power sequence regs. */
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+ &power_seq);
- intel_encoder->hot_plug = intel_dp_hot_plug;
+ ironlake_edp_panel_vdd_on(intel_dp);
+ edid = drm_get_edid(connector, intel_dp->adapter);
+ if (edid) {
+ if (drm_add_edid_modes(connector, edid)) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_edid_to_eld(connector, edid);
+ } else {
+ free(edid, DRM_MEM_KMS);
+ edid = NULL;
+ edid_err = -EINVAL;
+ }
+ } else {
+ edid = NULL;
+ edid_err = -ENOENT;
+ }
+ intel_connector->edid = edid;
+ intel_connector->edid_err = edid_err;
+
+ /* prefer fixed mode from EDID if available */
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ break;
+ }
+ }
+
+ /* fallback to VBT if available for eDP */
+ if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
+ fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (fixed_mode)
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ }
+
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+ }
if (is_edp(intel_dp)) {
- dev_priv->int_edp_connector = connector;
- intel_panel_setup_backlight(dev);
+ intel_panel_init(&intel_connector->panel, fixed_mode);
+ intel_panel_setup_backlight(connector);
}
intel_dp_add_properties(intel_dp, connector);
@@ -2579,3 +2880,45 @@ intel_dp_init(struct drm_device *dev, int output_reg)
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
}
+
+void
+intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
+{
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *intel_connector;
+
+ intel_dig_port = malloc(sizeof(struct intel_digital_port), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!intel_dig_port)
+ return;
+
+ intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!intel_connector) {
+ free(intel_dig_port, DRM_MEM_KMS);
+ return;
+ }
+
+ intel_encoder = &intel_dig_port->base;
+ encoder = &intel_encoder->base;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
+
+ intel_encoder->enable = intel_enable_dp;
+ intel_encoder->pre_enable = intel_pre_enable_dp;
+ intel_encoder->disable = intel_disable_dp;
+ intel_encoder->post_disable = intel_post_disable_dp;
+ intel_encoder->get_hw_state = intel_dp_get_hw_state;
+
+ intel_dig_port->port = port;
+ intel_dig_port->dp.output_reg = output_reg;
+
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = false;
+ intel_encoder->hot_plug = intel_dp_hot_plug;
+
+ intel_dp_init_connector(intel_dig_port, intel_connector);
+}
diff --git a/sys/dev/drm2/i915/intel_drv.h b/sys/dev/drm2/i915/intel_drv.h
index 34aa6bd..fd5817f 100644
--- a/sys/dev/drm2/i915/intel_drv.h
+++ b/sys/dev/drm2/i915/intel_drv.h
@@ -24,15 +24,15 @@
*
* $FreeBSD$
*/
-
-#ifndef DRM_INTEL_DRV_H
-#define DRM_INTEL_DRV_H
+#ifndef __INTEL_DRV_H__
+#define __INTEL_DRV_H__
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
#include <dev/drm2/drm_crtc.h>
#include <dev/drm2/drm_crtc_helper.h>
#include <dev/drm2/drm_fb_helper.h>
+#include <dev/drm2/drm_dp_helper.h>
#define _intel_wait_for(DEV, COND, MS, W, WMSG) \
({ \
@@ -55,6 +55,23 @@
ret; \
})
+#define _wait_for(COND, MS, W, WMSG) ({ \
+ int timeout__ = ticks + (MS) * hz / 1000; \
+ int ret__ = 0; \
+ while (!(COND)) { \
+ if (time_after(ticks, timeout__)) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ if (W) { \
+ pause((WMSG), 1); \
+ } else { \
+ DELAY(1000); \
+ } \
+ } \
+ ret__; \
+})
+
#define wait_for_atomic_us(COND, US) ({ \
int i, ret__ = -ETIMEDOUT; \
for (i = 0; i < (US); i++) { \
@@ -67,8 +84,8 @@
ret__; \
})
-#define wait_for(COND, MS) _intel_wait_for(NULL, COND, MS, 1, "915wfi")
-#define wait_for_atomic(COND, MS) _intel_wait_for(NULL, COND, MS, 0, "915wfa")
+#define wait_for(COND, MS) _intel_wait_for(NULL, COND, MS, 1, "915wfi")
+#define wait_for_atomic(COND, MS) _intel_wait_for(NULL, COND, MS, 0, "915wfa")
#define KHz(x) (1000*x)
#define MHz(x) KHz(1000*x)
@@ -100,25 +117,6 @@
#define INTEL_OUTPUT_EDP 8
#define INTEL_OUTPUT_UNKNOWN 9
-/* Intel Pipe Clone Bit */
-#define INTEL_HDMIB_CLONE_BIT 1
-#define INTEL_HDMIC_CLONE_BIT 2
-#define INTEL_HDMID_CLONE_BIT 3
-#define INTEL_HDMIE_CLONE_BIT 4
-#define INTEL_HDMIF_CLONE_BIT 5
-#define INTEL_SDVO_NON_TV_CLONE_BIT 6
-#define INTEL_SDVO_TV_CLONE_BIT 7
-#define INTEL_SDVO_LVDS_CLONE_BIT 8
-#define INTEL_ANALOG_CLONE_BIT 9
-#define INTEL_TV_CLONE_BIT 10
-#define INTEL_DP_B_CLONE_BIT 11
-#define INTEL_DP_C_CLONE_BIT 12
-#define INTEL_DP_D_CLONE_BIT 13
-#define INTEL_LVDS_CLONE_BIT 14
-#define INTEL_DVO_TMDS_CLONE_BIT 15
-#define INTEL_DVO_LVDS_CLONE_BIT 16
-#define INTEL_EDP_CLONE_BIT 17
-
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
#define INTEL_DVO_CHIP_TMDS 2
@@ -161,32 +159,87 @@ struct intel_fbdev {
struct intel_encoder {
struct drm_encoder base;
+ /*
+ * The new crtc this encoder will be driven from. Only differs from
+ * base->crtc while a modeset is in progress.
+ */
+ struct intel_crtc *new_crtc;
+
int type;
bool needs_tv_clock;
+ /*
+ * Intel hw has only one MUX where encoders could be clone, hence a
+ * simple flag is enough to compute the possible_clones mask.
+ */
+ bool cloneable;
+ bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
+ void (*pre_enable)(struct intel_encoder *);
+ void (*enable)(struct intel_encoder *);
+ void (*disable)(struct intel_encoder *);
+ void (*post_disable)(struct intel_encoder *);
+ /* Read out the current hw state of this connector, returning true if
+ * the encoder is active. If the encoder is enabled it also set the pipe
+ * it is connected to in the pipe parameter. */
+ bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
int crtc_mask;
- int clone_mask;
+};
+
+struct intel_panel {
+ struct drm_display_mode *fixed_mode;
+ int fitting_mode;
};
struct intel_connector {
struct drm_connector base;
+ /*
+ * The fixed encoder this connector is connected to.
+ */
struct intel_encoder *encoder;
+
+ /*
+ * The new encoder this connector will be driven. Only differs from
+ * encoder while a modeset is in progress.
+ */
+ struct intel_encoder *new_encoder;
+
+ /* Reads out the current hw, returning true if the connector is enabled
+ * and active (i.e. dpms ON state). */
+ bool (*get_hw_state)(struct intel_connector *);
+
+ /* Panel info for eDP and LVDS */
+ struct intel_panel panel;
+
+ /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
+ struct edid *edid;
+ int edid_err;
};
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
enum plane plane;
+ enum transcoder cpu_transcoder;
u8 lut_r[256], lut_g[256], lut_b[256];
- int dpms_mode;
- bool active; /* is the crtc on? independent of the dpms mode */
- bool busy; /* is scanout buffer being updated frequently? */
- struct callout idle_callout;
+ /*
+ * Whether the crtc and the connected output pipeline is active. Implies
+ * that crtc->enabled is set, i.e. the current mode configuration has
+ * some outputs connected to this crtc.
+ */
+ bool active;
+ bool primary_disabled; /* is the crtc obscured by a plane? */
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
int fdi_lanes;
+ atomic_t unpin_work_count;
+
+ /* Display surface base address adjustement for pageflips. Note that on
+ * gen4+ this only adjusts up to a tile, offsets within a tile are
+ * handled in the hw itself (with the TILEOFF register). */
+ unsigned long dspaddr_offset;
+
struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
int16_t cursor_x, cursor_y;
@@ -196,13 +249,14 @@ struct intel_crtc {
/* We can share PLLs across outputs if the timings match */
struct intel_pch_pll *pch_pll;
+ uint32_t ddi_pll_sel;
};
struct intel_plane {
struct drm_plane base;
enum pipe pipe;
struct drm_i915_gem_object *obj;
- bool primary_disabled;
+ bool can_scale;
int max_downscale;
u32 lut_r[1024], lut_g[1024], lut_b[1024];
void (*update_plane)(struct drm_plane *plane,
@@ -302,16 +356,52 @@ struct dip_infoframe {
} __attribute__((packed));
struct intel_hdmi {
- struct intel_encoder base;
u32 sdvox_reg;
int ddc_bus;
- int ddi_port;
uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
enum hdmi_force_audio force_audio;
void (*write_infoframe)(struct drm_encoder *encoder,
struct dip_infoframe *frame);
+ void (*set_infoframes)(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode);
+};
+
+#define DP_MAX_DOWNSTREAM_PORTS 0x10
+#define DP_LINK_CONFIGURATION_SIZE 9
+
+struct intel_dp {
+ uint32_t output_reg;
+ uint32_t DP;
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ bool has_audio;
+ enum hdmi_force_audio force_audio;
+ uint32_t color_range;
+ uint8_t link_bw;
+ uint8_t lane_count;
+ uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
+ uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
+ device_t dp_iic_bus;
+ device_t adapter;
+ bool is_pch_edp;
+ uint8_t train_set[4];
+ int panel_power_up_delay;
+ int panel_power_down_delay;
+ int panel_power_cycle_delay;
+ int backlight_on_delay;
+ int backlight_off_delay;
+ struct timeout_task panel_vdd_work;
+ bool want_panel_vdd;
+ struct intel_connector *attached_connector;
+};
+
+struct intel_digital_port {
+ struct intel_encoder base;
+ enum port port;
+ u32 port_reversal;
+ struct intel_dp dp;
+ struct intel_hdmi hdmi;
};
static inline struct drm_crtc *
@@ -329,56 +419,88 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
}
struct intel_unpin_work {
- struct task task;
- struct drm_device *dev;
+ struct task work;
+ struct drm_crtc *crtc;
struct drm_i915_gem_object *old_fb_obj;
struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
- int pending;
+ atomic_t pending;
+#define INTEL_FLIP_INACTIVE 0
+#define INTEL_FLIP_PENDING 1
+#define INTEL_FLIP_COMPLETE 2
bool enable_stall_check;
};
struct intel_fbc_work {
- struct timeout_task task;
+ struct timeout_task work;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
int interval;
};
+int intel_pch_rawclk(struct drm_device *dev);
+
+int intel_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid);
int intel_ddc_get_modes(struct drm_connector *c, device_t adapter);
-extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
extern void intel_attach_force_audio_property(struct drm_connector *connector);
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
extern void intel_crt_init(struct drm_device *dev);
-extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
+extern void intel_hdmi_init(struct drm_device *dev,
+ int sdvox_reg, enum port port);
+extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
-extern void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
- struct drm_display_mode *adjusted_mode);
-extern void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder);
+extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
-extern void intel_mark_busy(struct drm_device *dev,
- struct drm_i915_gem_object *obj);
+extern void intel_mark_busy(struct drm_device *dev);
+extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
+extern void intel_mark_idle(struct drm_device *dev);
extern bool intel_lvds_init(struct drm_device *dev);
-extern void intel_dp_init(struct drm_device *dev, int dp_reg);
+extern void intel_dp_init(struct drm_device *dev, int output_reg,
+ enum port port);
+extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
+extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
+extern bool intel_dp_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
extern bool intel_dpd_is_edp(struct drm_device *dev);
+extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
+extern int intel_edp_target_clock(struct intel_encoder *,
+ struct drm_display_mode *mode);
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane);
-void intel_sanitize_pm(struct drm_device *dev);
-
/* intel_panel.c */
+extern int intel_panel_init(struct intel_panel *panel,
+ struct drm_display_mode *fixed_mode);
+extern void intel_panel_fini(struct intel_panel *panel);
+
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct drm_device *dev,
@@ -386,24 +508,66 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
-extern u32 intel_panel_get_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
-extern int intel_panel_setup_backlight(struct drm_device *dev);
-extern void intel_panel_enable_backlight(struct drm_device *dev);
+extern int intel_panel_setup_backlight(struct drm_connector *connector);
+extern void intel_panel_enable_backlight(struct drm_device *dev,
+ enum pipe pipe);
extern void intel_panel_disable_backlight(struct drm_device *dev);
extern void intel_panel_destroy_backlight(struct drm_device *dev);
extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
+struct intel_set_config {
+ struct drm_encoder **save_connector_encoders;
+ struct drm_crtc **save_encoder_crtcs;
+
+ bool fb_changed;
+ bool mode_changed;
+};
+
+extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *old_fb);
+extern void intel_modeset_disable(struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
-extern void intel_encoder_prepare(struct drm_encoder *encoder);
-extern void intel_encoder_commit(struct drm_encoder *encoder);
+extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
+extern void intel_encoder_noop(struct drm_encoder *encoder);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
+extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
+extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
+extern void intel_connector_dpms(struct drm_connector *, int mode);
+extern bool intel_connector_get_hw_state(struct intel_connector *connector);
+extern void intel_modeset_check_state(struct drm_device *dev);
+
static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
{
return to_intel_connector(connector)->encoder;
}
+static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *intel_dig_port =
+ container_of(encoder, struct intel_digital_port, base.base);
+ return &intel_dig_port->dp;
+}
+
+static inline struct intel_digital_port *
+enc_to_dig_port(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_digital_port, base.base);
+}
+
+static inline struct intel_digital_port *
+dp_to_dig_port(struct intel_dp *intel_dp)
+{
+ return container_of(intel_dp, struct intel_digital_port, dp);
+}
+
+static inline struct intel_digital_port *
+hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
+{
+ return container_of(intel_hdmi, struct intel_digital_port, hdmi);
+}
+
extern void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@@ -412,20 +576,22 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern enum transcoder
+intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
+extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
struct intel_load_detect_pipe {
struct drm_framebuffer *release_fb;
bool load_detect_temp;
int dpms_mode;
};
-extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
- struct drm_connector *connector,
+extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old);
-extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
- struct drm_connector *connector,
+extern void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old);
extern void intelfb_restore(void);
@@ -434,19 +600,6 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
extern void intel_enable_clock_gating(struct drm_device *dev);
-extern void ironlake_disable_rc6(struct drm_device *dev);
-extern void ironlake_enable_drps(struct drm_device *dev);
-extern void ironlake_disable_drps(struct drm_device *dev);
-extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
-extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
-extern void gen6_disable_rps(struct drm_device *dev);
-extern void intel_init_emon(struct drm_device *dev);
-extern int intel_enable_rc6(const struct drm_device *dev);
-
-extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode);
-extern void intel_ddi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -459,7 +612,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
struct drm_i915_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
-
+extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
@@ -496,6 +649,11 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
+extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+ unsigned int tiling_mode,
+ unsigned int bpp,
+ unsigned int pitch);
+
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
@@ -509,5 +667,32 @@ extern void intel_init_pm(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern void intel_update_fbc(struct drm_device *dev);
+/* IPS */
+extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+extern void intel_gpu_ips_teardown(void);
+
+extern void intel_init_power_wells(struct drm_device *dev);
+extern void intel_enable_gt_powersave(struct drm_device *dev);
+extern void intel_disable_gt_powersave(struct drm_device *dev);
+extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
+extern void ironlake_teardown_rc6(struct drm_device *dev);
+
+extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe);
+extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
+extern void intel_ddi_pll_init(struct drm_device *dev);
+extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc);
+extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder);
+extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
+extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
+extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
+extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock);
+extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
+extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
+extern bool
+intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
#endif /* __INTEL_DRV_H__ */
diff --git a/sys/dev/drm2/i915/intel_dvo.c b/sys/dev/drm2/i915/intel_dvo.c
new file mode 100644
index 0000000..328ab04
--- /dev/null
+++ b/sys/dev/drm2/i915/intel_dvo.c
@@ -0,0 +1,534 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm_crtc.h>
+#include <dev/drm2/i915/intel_drv.h>
+#include <dev/drm2/i915/i915_drm.h>
+#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/dvo.h>
+
+#define SIL164_ADDR 0x38
+#define CH7xxx_ADDR 0x76
+#define TFP410_ADDR 0x38
+#define NS2501_ADDR 0x38
+
+static const struct intel_dvo_device intel_dvo_devices[] = {
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "sil164",
+ .dvo_reg = DVOC,
+ .slave_addr = SIL164_ADDR,
+ .dev_ops = &sil164_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "ch7xxx",
+ .dvo_reg = DVOC,
+ .slave_addr = CH7xxx_ADDR,
+ .dev_ops = &ch7xxx_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_LVDS,
+ .name = "ivch",
+ .dvo_reg = DVOA,
+ .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
+ .dev_ops = &ivch_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "tfp410",
+ .dvo_reg = DVOC,
+ .slave_addr = TFP410_ADDR,
+ .dev_ops = &tfp410_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_LVDS,
+ .name = "ch7017",
+ .dvo_reg = DVOC,
+ .slave_addr = 0x75,
+ .gpio = GMBUS_PORT_DPB,
+ .dev_ops = &ch7017_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "ns2501",
+ .dvo_reg = DVOC,
+ .slave_addr = NS2501_ADDR,
+ .dev_ops = &ns2501_ops,
+ }
+};
+
+struct intel_dvo {
+ struct intel_encoder base;
+
+ struct intel_dvo_device dev;
+
+ struct drm_display_mode *panel_fixed_mode;
+ bool panel_wants_dither;
+};
+
+static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_dvo, base.base);
+}
+
+static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_dvo, base);
+}
+
+static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
+{
+ struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+
+ return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
+}
+
+static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ u32 tmp;
+
+ tmp = I915_READ(intel_dvo->dev.dvo_reg);
+
+ if (!(tmp & DVO_ENABLE))
+ return false;
+
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_disable_dvo(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ u32 dvo_reg = intel_dvo->dev.dvo_reg;
+ u32 temp = I915_READ(dvo_reg);
+
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+ I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
+ I915_READ(dvo_reg);
+}
+
+static void intel_enable_dvo(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ u32 dvo_reg = intel_dvo->dev.dvo_reg;
+ u32 temp = I915_READ(dvo_reg);
+
+ I915_WRITE(dvo_reg, temp | DVO_ENABLE);
+ I915_READ(dvo_reg);
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
+}
+
+static void intel_dvo_dpms(struct drm_connector *connector, int mode)
+{
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct drm_crtc *crtc;
+
+ /* dvo supports only 2 dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = intel_dvo->base.base.crtc;
+ if (!crtc) {
+ intel_dvo->base.connectors_active = false;
+ return;
+ }
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ intel_dvo->base.connectors_active = true;
+
+ intel_crtc_update_dpms(crtc);
+
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
+ } else {
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+
+ intel_dvo->base.connectors_active = false;
+
+ intel_crtc_update_dpms(crtc);
+ }
+
+ intel_modeset_check_state(connector->dev);
+}
+
+static int intel_dvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* XXX: Validate clock range */
+
+ if (intel_dvo->panel_fixed_mode) {
+ if (mode->hdisplay > intel_dvo->panel_fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
+}
+
+static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+
+ /* If we have timings from the BIOS for the panel, put them in
+ * to the adjusted mode. The CRTC will be set up for this mode,
+ * with the panel scaling set up to source from the H/VDisplay
+ * of the original mode.
+ */
+ if (intel_dvo->panel_fixed_mode != NULL) {
+#define C(x) adjusted_mode->x = intel_dvo->panel_fixed_mode->x
+ C(hdisplay);
+ C(hsync_start);
+ C(hsync_end);
+ C(htotal);
+ C(vdisplay);
+ C(vsync_start);
+ C(vsync_end);
+ C(vtotal);
+ C(clock);
+#undef C
+ }
+
+ if (intel_dvo->dev.dev_ops->mode_fixup)
+ return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode);
+
+ return true;
+}
+
+static void intel_dvo_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ int pipe = intel_crtc->pipe;
+ u32 dvo_val;
+ u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
+ int dpll_reg = DPLL(pipe);
+
+ switch (dvo_reg) {
+ case DVOA:
+ default:
+ dvo_srcdim_reg = DVOA_SRCDIM;
+ break;
+ case DVOB:
+ dvo_srcdim_reg = DVOB_SRCDIM;
+ break;
+ case DVOC:
+ dvo_srcdim_reg = DVOC_SRCDIM;
+ break;
+ }
+
+ intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode);
+
+ /* Save the data order, since I don't know what it should be set to. */
+ dvo_val = I915_READ(dvo_reg) &
+ (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
+ dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
+ DVO_BLANK_ACTIVE_HIGH;
+
+ if (pipe == 1)
+ dvo_val |= DVO_PIPE_B_SELECT;
+ dvo_val |= DVO_PIPE_STALL;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ dvo_val |= DVO_HSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
+
+ I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
+
+ /*I915_WRITE(DVOB_SRCDIM,
+ (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
+ (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
+ I915_WRITE(dvo_srcdim_reg,
+ (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
+ (adjusted_mode->vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
+ /*I915_WRITE(DVOB, dvo_val);*/
+ I915_WRITE(dvo_reg, dvo_val);
+}
+
+/**
+ * Detect the output connection on our DVO device.
+ *
+ * Unimplemented.
+ */
+static enum drm_connector_status
+intel_dvo_detect(struct drm_connector *connector, bool force)
+{
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
+}
+
+static int intel_dvo_get_modes(struct drm_connector *connector)
+{
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+
+ /* We should probably have an i2c driver get_modes function for those
+ * devices which will have a fixed set of modes determined by the chip
+ * (TV-out, for example), but for now with just TMDS and LVDS,
+ * that's not the case.
+ */
+ intel_ddc_get_modes(connector,
+ intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPC));
+ if (!list_empty(&connector->probed_modes))
+ return 1;
+
+ if (intel_dvo->panel_fixed_mode != NULL) {
+ struct drm_display_mode *mode;
+ mode = drm_mode_duplicate(connector->dev, intel_dvo->panel_fixed_mode);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static void intel_dvo_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+ free(connector, DRM_MEM_KMS);
+}
+
+static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
+ .mode_fixup = intel_dvo_mode_fixup,
+ .mode_set = intel_dvo_mode_set,
+ .disable = intel_encoder_noop,
+};
+
+static const struct drm_connector_funcs intel_dvo_connector_funcs = {
+ .dpms = intel_dvo_dpms,
+ .detect = intel_dvo_detect,
+ .destroy = intel_dvo_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
+ .mode_valid = intel_dvo_mode_valid,
+ .get_modes = intel_dvo_get_modes,
+ .best_encoder = intel_best_encoder,
+};
+
+static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
+{
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+
+ if (intel_dvo->dev.dev_ops->destroy)
+ intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
+
+ free(intel_dvo->panel_fixed_mode, DRM_MEM_KMS);
+
+ intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
+ .destroy = intel_dvo_enc_destroy,
+};
+
+/**
+ * Attempts to get a fixed panel timing for LVDS (currently only the i830).
+ *
+ * Other chips with DVO LVDS will need to extend this to deal with the LVDS
+ * chip being on DVOB/C and having multiple pipes.
+ */
+static struct drm_display_mode *
+intel_dvo_get_current_mode(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
+ struct drm_display_mode *mode = NULL;
+
+ /* If the DVO port is active, that'll be the LVDS, so we can pull out
+ * its timings to get how the BIOS set up the panel.
+ */
+ if (dvo_val & DVO_ENABLE) {
+ struct drm_crtc *crtc;
+ int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
+
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
+ if (crtc) {
+ mode = intel_crtc_mode_get(dev, crtc);
+ if (mode) {
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
+ mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ if (dvo_val & DVO_VSYNC_ACTIVE_HIGH)
+ mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ }
+ }
+ }
+
+ return mode;
+}
+
+void intel_dvo_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ struct intel_dvo *intel_dvo;
+ struct intel_connector *intel_connector;
+ int i;
+ int encoder_type = DRM_MODE_ENCODER_NONE;
+
+ intel_dvo = malloc(sizeof(struct intel_dvo), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!intel_dvo)
+ return;
+
+ intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!intel_connector) {
+ free(intel_dvo, DRM_MEM_KMS);
+ return;
+ }
+
+ intel_encoder = &intel_dvo->base;
+ drm_encoder_init(dev, &intel_encoder->base,
+ &intel_dvo_enc_funcs, encoder_type);
+
+ intel_encoder->disable = intel_disable_dvo;
+ intel_encoder->enable = intel_enable_dvo;
+ intel_encoder->get_hw_state = intel_dvo_get_hw_state;
+ intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
+
+ /* Now, try to find a controller */
+ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
+ struct drm_connector *connector = &intel_connector->base;
+ const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
+ device_t i2c;
+ int gpio;
+ bool dvoinit;
+
+ /* Allow the I2C driver info to specify the GPIO to be used in
+ * special cases, but otherwise default to what's defined
+ * in the spec.
+ */
+ if (intel_gmbus_is_port_valid(dvo->gpio))
+ gpio = dvo->gpio;
+ else if (dvo->type == INTEL_DVO_CHIP_LVDS)
+ gpio = GMBUS_PORT_SSC;
+ else
+ gpio = GMBUS_PORT_DPB;
+
+ /* Set up the I2C bus necessary for the chip we're probing.
+ * It appears that everything is on GPIOE except for panels
+ * on i830 laptops, which are on GPIOB (DVOA).
+ */
+ i2c = intel_gmbus_get_adapter(dev_priv, gpio);
+
+ intel_dvo->dev = *dvo;
+
+ /* GMBUS NAK handling seems to be unstable, hence let the
+ * transmitter detection run in bit banging mode for now.
+ */
+ intel_gmbus_force_bit(i2c, true);
+
+ dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
+
+ intel_gmbus_force_bit(i2c, false);
+
+ if (!dvoinit)
+ continue;
+
+ intel_encoder->type = INTEL_OUTPUT_DVO;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ switch (dvo->type) {
+ case INTEL_DVO_CHIP_TMDS:
+ intel_encoder->cloneable = true;
+ drm_connector_init(dev, connector,
+ &intel_dvo_connector_funcs,
+ DRM_MODE_CONNECTOR_DVII);
+ encoder_type = DRM_MODE_ENCODER_TMDS;
+ break;
+ case INTEL_DVO_CHIP_LVDS:
+ intel_encoder->cloneable = false;
+ drm_connector_init(dev, connector,
+ &intel_dvo_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ encoder_type = DRM_MODE_ENCODER_LVDS;
+ break;
+ }
+
+ drm_connector_helper_add(connector,
+ &intel_dvo_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ drm_encoder_helper_add(&intel_encoder->base,
+ &intel_dvo_helper_funcs);
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ if (dvo->type == INTEL_DVO_CHIP_LVDS) {
+ /* For our LVDS chipsets, we should hopefully be able
+ * to dig the fixed panel mode out of the BIOS data.
+ * However, it's in a different format from the BIOS
+ * data on chipsets with integrated LVDS (stored in AIM
+ * headers, likely), so for now, just get the current
+ * mode being output through DVO.
+ */
+ intel_dvo->panel_fixed_mode =
+ intel_dvo_get_current_mode(connector);
+ intel_dvo->panel_wants_dither = true;
+ }
+
+ return;
+ }
+
+ drm_encoder_cleanup(&intel_encoder->base);
+ free(intel_dvo, DRM_MEM_KMS);
+ free(intel_connector, DRM_MEM_KMS);
+}
diff --git a/sys/dev/drm2/i915/intel_fb.c b/sys/dev/drm2/i915/intel_fb.c
index 5ec2c17..4dd9449 100644
--- a/sys/dev/drm2/i915/intel_fb.c
+++ b/sys/dev/drm2/i915/intel_fb.c
@@ -29,20 +29,33 @@ __FBSDID("$FreeBSD$");
#include "opt_syscons.h"
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
#include <dev/drm2/drm_crtc.h>
#include <dev/drm2/drm_fb_helper.h>
#include <dev/drm2/i915/intel_drv.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
+#if defined(__linux__)
+static struct fb_ops intelfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+#endif
+
static int intelfb_create(struct intel_fbdev *ifbdev,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = ifbdev->helper.dev;
-#if 0
struct drm_i915_private *dev_priv = dev->dev_private;
-#endif
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd = {};
@@ -85,17 +98,12 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
goto out_unpin;
}
-#if 0
- info->par = ifbdev;
-#else
info->fb_size = size;
info->fb_bpp = sizes->surface_bpp;
- info->fb_pbase = dev->agp->base + obj->gtt_offset;
+ info->fb_pbase = dev_priv->mm.gtt_base_addr + obj->gtt_offset;
info->fb_vbase = (vm_offset_t)pmap_mapdev_attr(info->fb_pbase, size,
PAT_WRITE_COMBINING);
-#endif
-
ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
if (ret)
goto out_unpin;
@@ -104,40 +112,6 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
ifbdev->helper.fb = fb;
ifbdev->helper.fbdev = info;
-#if 0
-
- strcpy(info->fix.id, "inteldrmfb");
-
- info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
- info->fbops = &intelfb_ops;
-
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto out_unpin;
- }
- /* setup aperture base/size for vesafb takeover */
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto out_unpin;
- }
- info->apertures->ranges[0].base = dev->mode_config.fb_base;
- info->apertures->ranges[0].size =
- dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
-
- info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
- info->fix.smem_len = size;
-
- info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
- if (!info->screen_base) {
- ret = -ENOSPC;
- goto out_unpin;
- }
- info->screen_size = size;
-
-// memset(info->screen_base, 0, size);
-#endif
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
@@ -148,10 +122,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
fb->width, fb->height, fb->depth,
obj->gtt_offset, obj);
+
DRM_UNLOCK(dev);
-#if 1
- KIB_NOTYET();
-#else
+#ifdef __linux__
vga_switcheroo_client_fb_set(dev->pdev, info);
#endif
return 0;
@@ -195,12 +168,8 @@ static void intel_fbdev_destroy(struct drm_device *dev,
if (ifbdev->helper.fbdev) {
info = ifbdev->helper.fbdev;
-#if 0
- unregister_framebuffer(info);
- iounmap(info->screen_base);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
-#endif
+ if (info->fb_fbd_dev != NULL)
+ device_delete_child(dev->dev, info->fb_fbd_dev);
framebuffer_release(info);
}
@@ -224,6 +193,8 @@ int intel_fbdev_init(struct drm_device *dev)
int ret;
ifbdev = malloc(sizeof(struct intel_fbdev), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!ifbdev)
+ return -ENOMEM;
dev_priv->fbdev = ifbdev;
ifbdev->helper.funcs = &intel_fb_helper_funcs;
@@ -255,6 +226,19 @@ void intel_fbdev_fini(struct drm_device *dev)
dev_priv->fbdev = NULL;
}
+void intel_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ if (!dev_priv->fbdev)
+ return;
+
+#ifdef FREEBSD_WIP
+ fb_set_suspend(dev_priv->fbdev->helper.fbdev, state);
+#endif /* FREEBSD_WIP */
+}
+
+MODULE_LICENSE("GPL and additional rights");
+
void intel_fb_output_poll_changed(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
diff --git a/sys/dev/drm2/i915/intel_hdmi.c b/sys/dev/drm2/i915/intel_hdmi.c
index c009b03..473e5b6 100644
--- a/sys/dev/drm2/i915/intel_hdmi.c
+++ b/sys/dev/drm2/i915/intel_hdmi.c
@@ -30,22 +30,42 @@
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
#include <dev/drm2/drm_crtc.h>
#include <dev/drm2/drm_edid.h>
+#include <dev/drm2/i915/intel_drv.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
-#include <dev/drm2/i915/intel_drv.h>
+
+#define mmiowb() barrier()
+
+static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
+{
+ return hdmi_to_dig_port(intel_hdmi)->base.base.dev;
+}
+
+static void
+assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
+{
+ struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t enabled_bits;
+
+ enabled_bits = IS_HASWELL(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+
+ WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits,
+ "HDMI port enabled, expecting disabled\n");
+}
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
- return container_of(encoder, struct intel_hdmi, base.base);
+ struct intel_digital_port *intel_dig_port =
+ container_of(encoder, struct intel_digital_port, base.base);
+ return &intel_dig_port->hdmi;
}
static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
{
- return container_of(intel_attached_encoder(connector),
- struct intel_hdmi, base);
+ return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
}
void intel_dip_infoframe_csum(struct dip_infoframe *frame)
@@ -121,36 +141,34 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 val = I915_READ(VIDEO_DIP_CTL);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
- val &= ~VIDEO_DIP_PORT_MASK;
- if (intel_hdmi->sdvox_reg == SDVOB)
- val |= VIDEO_DIP_PORT_B;
- else if (intel_hdmi->sdvox_reg == SDVOC)
- val |= VIDEO_DIP_PORT_C;
- else
- return;
+ WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val &= ~g4x_infoframe_enable(frame);
- val |= VIDEO_DIP_ENABLE;
I915_WRITE(VIDEO_DIP_CTL, val);
+ mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(VIDEO_DIP_DATA, 0);
+ mmiowb();
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
I915_WRITE(VIDEO_DIP_CTL, val);
+ POSTING_READ(VIDEO_DIP_CTL);
}
static void ibx_write_infoframe(struct drm_encoder *encoder,
@@ -160,46 +178,35 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(reg);
- val &= ~VIDEO_DIP_PORT_MASK;
- switch (intel_hdmi->sdvox_reg) {
- case HDMIB:
- val |= VIDEO_DIP_PORT_B;
- break;
- case HDMIC:
- val |= VIDEO_DIP_PORT_C;
- break;
- case HDMID:
- val |= VIDEO_DIP_PORT_D;
- break;
- default:
- return;
- }
-
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val &= ~g4x_infoframe_enable(frame);
- val |= VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
+ mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ mmiowb();
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
I915_WRITE(reg, val);
+ POSTING_READ(reg);
}
static void cpt_write_infoframe(struct drm_encoder *encoder,
@@ -213,32 +220,34 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
unsigned i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
/* The DIP control register spec says that we need to update the AVI
* infoframe without clearing its enable bit */
- if (frame->type == DIP_TYPE_AVI)
- val |= VIDEO_DIP_ENABLE_AVI;
- else
+ if (frame->type != DIP_TYPE_AVI)
val &= ~g4x_infoframe_enable(frame);
- val |= VIDEO_DIP_ENABLE;
-
I915_WRITE(reg, val);
+ mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ mmiowb();
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
I915_WRITE(reg, val);
+ POSTING_READ(reg);
}
static void vlv_write_infoframe(struct drm_encoder *encoder,
@@ -252,26 +261,31 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
unsigned i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val &= ~g4x_infoframe_enable(frame);
- val |= VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
+ mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ mmiowb();
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
I915_WRITE(reg, val);
+ POSTING_READ(reg);
}
static void hsw_write_infoframe(struct drm_encoder *encoder,
@@ -289,18 +303,22 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
if (data_reg == 0)
return;
- intel_wait_for_vblank(dev, intel_crtc->pipe);
-
val &= ~hsw_infoframe_enable(frame);
I915_WRITE(ctl_reg, val);
+ mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(data_reg + i, *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(data_reg + i, 0);
+ mmiowb();
val |= hsw_infoframe_enable(frame);
I915_WRITE(ctl_reg, val);
+ POSTING_READ(ctl_reg);
}
static void intel_set_infoframe(struct drm_encoder *encoder,
@@ -308,14 +326,11 @@ static void intel_set_infoframe(struct drm_encoder *encoder,
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- if (!intel_hdmi->has_hdmi_sink)
- return;
-
intel_dip_infoframe_csum(frame);
intel_hdmi->write_infoframe(encoder, frame);
}
-void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct dip_infoframe avi_if = {
@@ -327,10 +342,12 @@ void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
+ avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
+
intel_set_infoframe(encoder, &avi_if);
}
-void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
{
struct dip_infoframe spd_if;
@@ -345,6 +362,225 @@ void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
intel_set_infoframe(encoder, &spd_if);
}
+static void g4x_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = VIDEO_DIP_CTL;
+ u32 val = I915_READ(reg);
+ u32 port;
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* If the registers were not initialized yet, they might be zeroes,
+ * which means we're selecting the AVI DIP and we're setting its
+ * frequency to once. This seems to really confuse the HW and make
+ * things stop working (the register spec says the AVI always needs to
+ * be sent every VSync). So here we avoid writing to the register more
+ * than we need and also explicitly select the AVI DIP and explicitly
+ * set its frequency to every VSync. Avoiding to write it twice seems to
+ * be enough to solve the problem, but being defensive shouldn't hurt us
+ * either. */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ switch (intel_hdmi->sdvox_reg) {
+ case SDVOB:
+ port = VIDEO_DIP_PORT_B;
+ break;
+ case SDVOC:
+ port = VIDEO_DIP_PORT_C;
+ break;
+ default:
+ BUG();
+ return;
+ }
+
+ if (port != (val & VIDEO_DIP_PORT_MASK)) {
+ if (val & VIDEO_DIP_ENABLE) {
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ }
+ val &= ~VIDEO_DIP_PORT_MASK;
+ val |= port;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~VIDEO_DIP_ENABLE_VENDOR;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void ibx_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+ u32 port;
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ switch (intel_hdmi->sdvox_reg) {
+ case HDMIB:
+ port = VIDEO_DIP_PORT_B;
+ break;
+ case HDMIC:
+ port = VIDEO_DIP_PORT_C;
+ break;
+ case HDMID:
+ port = VIDEO_DIP_PORT_D;
+ break;
+ default:
+ BUG();
+ return;
+ }
+
+ if (port != (val & VIDEO_DIP_PORT_MASK)) {
+ if (val & VIDEO_DIP_ENABLE) {
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ }
+ val &= ~VIDEO_DIP_PORT_MASK;
+ val |= port;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_GCP);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void cpt_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ /* Set both together, unset both together: see the spec. */
+ val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI;
+ val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_GCP);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void vlv_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_GCP);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void hsw_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ I915_WRITE(reg, 0);
+ POSTING_READ(reg);
+ return;
+ }
+
+ val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
+ VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -355,7 +591,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 sdvox;
- sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
+ sdvox = SDVO_ENCODING_HDMI;
if (!HAS_PCH_SPLIT(dev))
sdvox |= intel_hdmi->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -373,7 +609,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
sdvox |= HDMI_MODE_SELECT;
if (intel_hdmi->has_audio) {
- DRM_DEBUG_KMS("Enabling HDMI audio on pipe %c\n",
+ DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(intel_crtc->pipe));
sdvox |= SDVO_AUDIO_ENABLE;
sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
@@ -382,21 +618,41 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (HAS_PCH_CPT(dev))
sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
- else if (intel_crtc->pipe == 1)
+ else if (intel_crtc->pipe == PIPE_B)
sdvox |= SDVO_PIPE_B_SELECT;
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
POSTING_READ(intel_hdmi->sdvox_reg);
- intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
- intel_hdmi_set_spd_infoframe(encoder);
+ intel_hdmi->set_infoframes(encoder, adjusted_mode);
}
-static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
+static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ u32 tmp;
+
+ tmp = I915_READ(intel_hdmi->sdvox_reg);
+
+ if (!(tmp & SDVO_ENABLE))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_enable_hdmi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
u32 enable_bits = SDVO_ENABLE;
@@ -405,6 +661,17 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
temp = I915_READ(intel_hdmi->sdvox_reg);
+ /* HW workaround for IBX, we need to move the port to transcoder A
+ * before disabling it. */
+ if (HAS_PCH_IBX(dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ /* Restore the transcoder select bit. */
+ if (pipe == PIPE_B)
+ enable_bits |= SDVO_PIPE_B_SELECT;
+ }
+
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
@@ -413,12 +680,64 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
POSTING_READ(intel_hdmi->sdvox_reg);
}
- if (mode != DRM_MODE_DPMS_ON) {
- temp &= ~enable_bits;
- } else {
- temp |= enable_bits;
+ temp |= enable_bits;
+
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* HW workaround, need to write this twice for issue that may result
+ * in first write getting masked.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+ }
+}
+
+static void intel_disable_hdmi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ u32 temp;
+ u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
+
+ temp = I915_READ(intel_hdmi->sdvox_reg);
+
+ /* HW workaround for IBX, we need to move the port to transcoder A
+ * before disabling it. */
+ if (HAS_PCH_IBX(dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ if (temp & SDVO_PIPE_B_SELECT) {
+ temp &= ~SDVO_PIPE_B_SELECT;
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* Again we need to write this twice. */
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* Transcoder selection bits only update
+ * effectively on vblank. */
+ if (crtc)
+ intel_wait_for_vblank(dev, pipe);
+ else
+ DRM_MSLEEP(50);
+ }
+ }
+
+ /* HW workaround, need to toggle enable bit off and on for 12bpc, but
+ * we do this anyway which shows more stable in testing.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
+ POSTING_READ(intel_hdmi->sdvox_reg);
}
+ temp &= ~enable_bits;
+
I915_WRITE(intel_hdmi->sdvox_reg, temp);
POSTING_READ(intel_hdmi->sdvox_reg);
@@ -445,25 +764,53 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
return true;
}
+static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
+{
+ struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t bit;
+
+ switch (intel_hdmi->sdvox_reg) {
+ case SDVOB:
+ bit = HDMIB_HOTPLUG_LIVE_STATUS;
+ break;
+ case SDVOC:
+ bit = HDMIC_HOTPLUG_LIVE_STATUS;
+ break;
+ default:
+ bit = 0;
+ break;
+ }
+
+ return I915_READ(PORT_HOTPLUG_STAT) & bit;
+}
+
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_digital_port *intel_dig_port =
+ hdmi_to_dig_port(intel_hdmi);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
+ if (IS_G4X(connector->dev) && !g4x_hdmi_connected(intel_hdmi))
+ return status;
+
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
- edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv,
- intel_hdmi->ddc_bus));
+ edid = drm_get_edid(connector,
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -474,16 +821,13 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
}
free(edid, DRM_MEM_KMS);
- } else {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] got no edid, ddc port %d\n",
- connector->base.id, drm_get_connector_name(connector),
- intel_hdmi->ddc_bus);
}
if (status == connector_status_connected) {
if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
intel_hdmi->has_audio =
(intel_hdmi->force_audio == HDMI_AUDIO_ON);
+ intel_encoder->type = INTEL_OUTPUT_HDMI;
}
return status;
@@ -517,7 +861,6 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
-
free(edid, DRM_MEM_KMS);
}
@@ -526,10 +869,12 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
static int
intel_hdmi_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
+ struct drm_property *property,
+ uint64_t val)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_digital_port *intel_dig_port =
+ hdmi_to_dig_port(intel_hdmi);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
int ret;
@@ -569,11 +914,10 @@ intel_hdmi_set_property(struct drm_connector *connector,
return -EINVAL;
done:
- if (intel_hdmi->base.base.crtc) {
- struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
- drm_crtc_helper_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y,
- crtc->fb);
+ if (intel_dig_port->base.base.crtc) {
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
}
return 0;
@@ -581,31 +925,18 @@ done:
static void intel_hdmi_destroy(struct drm_connector *connector)
{
-#if 0
- drm_sysfs_connector_remove(connector);
-#endif
drm_connector_cleanup(connector);
free(connector, DRM_MEM_KMS);
}
-static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
- .dpms = intel_ddi_dpms,
- .mode_fixup = intel_hdmi_mode_fixup,
- .prepare = intel_encoder_prepare,
- .mode_set = intel_ddi_mode_set,
- .commit = intel_encoder_commit,
-};
-
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
- .dpms = intel_hdmi_dpms,
.mode_fixup = intel_hdmi_mode_fixup,
- .prepare = intel_encoder_prepare,
.mode_set = intel_hdmi_mode_set,
- .commit = intel_encoder_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_connector_dpms,
.detect = intel_hdmi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_hdmi_set_property,
@@ -629,117 +960,68 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_broadcast_rgb_property(connector);
}
-void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector)
{
+ struct drm_connector *connector = &intel_connector->base;
+ struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
- struct intel_encoder *intel_encoder;
- struct intel_connector *intel_connector;
- struct intel_hdmi *intel_hdmi;
- int i;
-
- intel_hdmi = malloc(sizeof(struct intel_hdmi), DRM_MEM_KMS,
- M_WAITOK | M_ZERO);
- intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS,
- M_WAITOK | M_ZERO);
-
- intel_encoder = &intel_hdmi->base;
- drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
- DRM_MODE_ENCODER_TMDS);
+ enum port port = intel_dig_port->port;
- connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
- intel_encoder->type = INTEL_OUTPUT_HDMI;
-
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- /* Set up the DDC bus. */
- if (sdvox_reg == SDVOB) {
- intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
- intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
- dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == SDVOC) {
- intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
- intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
- dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == HDMIB) {
- intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
- intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
- dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == HDMIC) {
- intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
- intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
- dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == HDMID) {
- intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
- intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
- dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
- DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
- intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
+ switch (port) {
+ case PORT_B:
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
- intel_hdmi->ddi_port = PORT_B;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) {
- DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n");
- intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
+ break;
+ case PORT_C:
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
- intel_hdmi->ddi_port = PORT_C;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) {
- DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n");
- intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
+ break;
+ case PORT_D:
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
- intel_hdmi->ddi_port = PORT_D;
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
- } else {
- /* If we got an unknown sdvox_reg, things are pretty much broken
- * in a way that we should let the kernel know about it */
- DRM_DEBUG_KMS("unknown sdvox_reg %d\n", sdvox_reg);
+ break;
+ case PORT_A:
+ /* Internal port only for eDP. */
+ default:
+ BUG();
}
- intel_hdmi->sdvox_reg = sdvox_reg;
if (!HAS_PCH_SPLIT(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
- I915_WRITE(VIDEO_DIP_CTL, 0);
+ intel_hdmi->set_infoframes = g4x_set_infoframes;
} else if (IS_VALLEYVIEW(dev)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
- for_each_pipe(i)
- I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0);
+ intel_hdmi->set_infoframes = vlv_set_infoframes;
} else if (IS_HASWELL(dev)) {
- /* FIXME: Haswell has a new set of DIP frame registers, but we are
- * just doing the minimal required for HDMI to work at this stage.
- */
intel_hdmi->write_infoframe = hsw_write_infoframe;
- for_each_pipe(i)
- I915_WRITE(HSW_TVIDEO_DIP_CTL(i), 0);
+ intel_hdmi->set_infoframes = hsw_set_infoframes;
} else if (HAS_PCH_IBX(dev)) {
intel_hdmi->write_infoframe = ibx_write_infoframe;
- for_each_pipe(i)
- I915_WRITE(TVIDEO_DIP_CTL(i), 0);
+ intel_hdmi->set_infoframes = ibx_set_infoframes;
} else {
intel_hdmi->write_infoframe = cpt_write_infoframe;
- for_each_pipe(i)
- I915_WRITE(TVIDEO_DIP_CTL(i), 0);
+ intel_hdmi->set_infoframes = cpt_set_infoframes;
}
if (IS_HASWELL(dev))
- drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs_hsw);
+ intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
- drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_hdmi_add_properties(intel_hdmi, connector);
intel_connector_attach_encoder(intel_connector, intel_encoder);
-#if 0
- drm_sysfs_connector_add(connector);
-#endif
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
@@ -750,3 +1032,42 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
}
+
+void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
+{
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *intel_connector;
+
+ intel_dig_port = malloc(sizeof(struct intel_digital_port), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!intel_dig_port)
+ return;
+
+ intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!intel_connector) {
+ free(intel_dig_port, DRM_MEM_KMS);
+ return;
+ }
+
+ intel_encoder = &intel_dig_port->base;
+ encoder = &intel_encoder->base;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+
+ intel_encoder->enable = intel_enable_hdmi;
+ intel_encoder->disable = intel_disable_hdmi;
+ intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
+
+ intel_encoder->type = INTEL_OUTPUT_HDMI;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = false;
+
+ intel_dig_port->port = port;
+ intel_dig_port->hdmi.sdvox_reg = sdvox_reg;
+ intel_dig_port->dp.output_reg = 0;
+
+ intel_hdmi_init_connector(intel_dig_port, intel_connector);
+}
diff --git a/sys/dev/drm2/i915/intel_iic.c b/sys/dev/drm2/i915/intel_iic.c
index aacc954..f7383c0 100644
--- a/sys/dev/drm2/i915/intel_iic.c
+++ b/sys/dev/drm2/i915/intel_iic.c
@@ -25,50 +25,20 @@
* Authors:
* Eric Anholt <eric@anholt.net>
* Chris Wilson <chris@chris-wilson.co.uk>
- *
- * Copyright (c) 2011 The FreeBSD Foundation
- * All rights reserved.
- *
- * This software was developed by Konstantin Belousov under sponsorship from
- * the FreeBSD Foundation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
+#include <dev/drm2/i915/intel_drv.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
-#include <dev/drm2/i915/intel_drv.h>
#include <dev/iicbus/iic.h>
#include <dev/iicbus/iiconf.h>
#include <dev/iicbus/iicbus.h>
#include "iicbus_if.h"
#include "iicbb_if.h"
-static void intel_teardown_gmbus_m(struct drm_device *dev, int m);
-
struct gmbus_port {
const char *name;
int reg;
@@ -87,17 +57,37 @@ static const struct gmbus_port gmbus_ports[] = {
#define I2C_RISEFALL_TIME 10
+/*
+ * FIXME Linux<->FreeBSD: dvo_ns2501.C wants the struct intel_gmbus
+ * below but it just has the device_t at hand. It still uses
+ * device_get_softc(), thus expects struct intel_gmbus to remain the
+ * first member.
+ */
struct intel_iic_softc {
- struct drm_device *drm_dev;
+ struct intel_gmbus *bus;
device_t iic_dev;
- bool force_bit_dev;
char name[32];
- uint32_t reg;
- uint32_t reg0;
};
+static inline struct intel_gmbus *
+to_intel_gmbus(device_t i2c)
+{
+ struct intel_iic_softc *sc;
+
+ sc = device_get_softc(i2c);
+ return sc->bus;
+}
+
+bool intel_gmbus_is_forced_bit(device_t adapter)
+{
+ struct intel_iic_softc *sc = device_get_softc(adapter);
+ struct intel_gmbus *bus = sc->bus;
+
+ return bus->force_bit;
+}
+
void
-intel_iic_reset(struct drm_device *dev)
+intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
@@ -110,9 +100,9 @@ intel_iicbus_reset(device_t idev, u_char speed, u_char addr, u_char *oldaddr)
struct drm_device *dev;
sc = device_get_softc(idev);
- dev = sc->drm_dev;
+ dev = sc->bus->dev_priv->dev;
- intel_iic_reset(dev);
+ intel_i2c_reset(dev);
return (0);
}
@@ -132,15 +122,15 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
I915_WRITE(DSPCLK_GATE_D, val);
}
-static u32 get_reserved(device_t idev)
+static u32 get_reserved(struct intel_gmbus *bus)
{
- struct intel_iic_softc *sc = device_get_softc(idev);
- struct drm_device *dev = sc->drm_dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
u32 reserved = 0;
+ /* On most chips, these bits must be preserved in software. */
if (!IS_I830(dev) && !IS_845G(dev))
- reserved = I915_READ_NOTRACE(sc->reg) &
+ reserved = I915_READ_NOTRACE(bus->gpio_reg) &
(GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
@@ -150,28 +140,31 @@ static u32 get_reserved(device_t idev)
static int get_clock(device_t adapter)
{
struct intel_iic_softc *sc = device_get_softc(adapter);
- struct drm_i915_private *dev_priv = sc->drm_dev->dev_private;
- u32 reserved = get_reserved(adapter);
- I915_WRITE_NOTRACE(sc->reg, reserved | GPIO_CLOCK_DIR_MASK);
- I915_WRITE_NOTRACE(sc->reg, reserved);
- return ((I915_READ_NOTRACE(sc->reg) & GPIO_CLOCK_VAL_IN) != 0);
+ struct intel_gmbus *bus = sc->bus;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ u32 reserved = get_reserved(bus);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
+ return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(device_t adapter)
{
struct intel_iic_softc *sc = device_get_softc(adapter);
- struct drm_i915_private *dev_priv = sc->drm_dev->dev_private;
- u32 reserved = get_reserved(adapter);
- I915_WRITE_NOTRACE(sc->reg, reserved | GPIO_DATA_DIR_MASK);
- I915_WRITE_NOTRACE(sc->reg, reserved);
- return ((I915_READ_NOTRACE(sc->reg) & GPIO_DATA_VAL_IN) != 0);
+ struct intel_gmbus *bus = sc->bus;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ u32 reserved = get_reserved(bus);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
+ return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(device_t adapter, int state_high)
{
struct intel_iic_softc *sc = device_get_softc(adapter);
- struct drm_i915_private *dev_priv = sc->drm_dev->dev_private;
- u32 reserved = get_reserved(adapter);
+ struct intel_gmbus *bus = sc->bus;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ u32 reserved = get_reserved(bus);
u32 clock_bits;
if (state_high)
@@ -180,15 +173,16 @@ static void set_clock(device_t adapter, int state_high)
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
- I915_WRITE_NOTRACE(sc->reg, reserved | clock_bits);
- POSTING_READ(sc->reg);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved | clock_bits);
+ POSTING_READ(bus->gpio_reg);
}
static void set_data(device_t adapter, int state_high)
{
struct intel_iic_softc *sc = device_get_softc(adapter);
- struct drm_i915_private *dev_priv = sc->drm_dev->dev_private;
- u32 reserved = get_reserved(adapter);
+ struct intel_gmbus *bus = sc->bus;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ u32 reserved = get_reserved(bus);
u32 data_bits;
if (state_high)
@@ -197,21 +191,22 @@ static void set_data(device_t adapter, int state_high)
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
- I915_WRITE_NOTRACE(sc->reg, reserved | data_bits);
- POSTING_READ(sc->reg);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved | data_bits);
+ POSTING_READ(bus->gpio_reg);
}
static int
intel_gpio_pre_xfer(device_t adapter)
{
struct intel_iic_softc *sc = device_get_softc(adapter);
- struct drm_i915_private *dev_priv = sc->drm_dev->dev_private;
+ struct intel_gmbus *bus = sc->bus;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
- intel_iic_reset(sc->drm_dev);
+ intel_i2c_reset(dev_priv->dev);
intel_i2c_quirk_set(dev_priv, true);
IICBB_SETSDA(adapter, 1);
IICBB_SETSCL(adapter, 1);
- DELAY(I2C_RISEFALL_TIME);
+ udelay(I2C_RISEFALL_TIME);
return 0;
}
@@ -219,13 +214,23 @@ static void
intel_gpio_post_xfer(device_t adapter)
{
struct intel_iic_softc *sc = device_get_softc(adapter);
- struct drm_i915_private *dev_priv = sc->drm_dev->dev_private;
+ struct intel_gmbus *bus = sc->bus;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
IICBB_SETSDA(adapter, 1);
IICBB_SETSCL(adapter, 1);
intel_i2c_quirk_set(dev_priv, false);
}
+static void
+intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
+{
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ /* -1 to map pin pair to gmbus index */
+ bus->gpio_reg = dev_priv->gpio_mmio_base + gmbus_ports[pin - 1].reg;
+}
+
static int
gmbus_xfer_read(struct drm_i915_private *dev_priv, struct iic_msg *msg,
u32 gmbus1_index)
@@ -245,10 +250,9 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct iic_msg *msg,
u32 val, loop = 0;
u32 gmbus2;
- ret = _intel_wait_for(sc->drm_dev,
- ((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
- (GMBUS_SATOER | GMBUS_HW_RDY)),
- 50, 1, "915gbr");
+ ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
+ (GMBUS_SATOER | GMBUS_HW_RDY),
+ 50);
if (ret)
return -ETIMEDOUT;
if (gmbus2 & GMBUS_SATOER)
@@ -295,10 +299,9 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct iic_msg *msg)
I915_WRITE(GMBUS3 + reg_offset, val);
- ret = _intel_wait_for(sc->drm_dev,
- ((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
- (GMBUS_SATOER | GMBUS_HW_RDY)),
- 50, 1, "915gbw");
+ ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
+ (GMBUS_SATOER | GMBUS_HW_RDY),
+ 50);
if (ret)
return -ETIMEDOUT;
if (gmbus2 & GMBUS_SATOER)
@@ -315,8 +318,8 @@ static bool
gmbus_is_index_read(struct iic_msg *msgs, int i, int num)
{
return (i + 1 < num &&
- !(msgs[i].flags & IIC_M_RD) && msgs[i].len <= 2 &&
- (msgs[i + 1].flags & IIC_M_RD));
+ !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
+ (msgs[i + 1].flags & I2C_M_RD));
}
static int
@@ -353,43 +356,42 @@ gmbus_xfer(device_t adapter,
uint32_t num)
{
struct intel_iic_softc *sc = device_get_softc(adapter);
- struct drm_i915_private *dev_priv = sc->drm_dev->dev_private;
- int error, i, ret, reg_offset, unit;
+ struct intel_gmbus *bus = sc->bus;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ int i, reg_offset;
+ int ret = 0;
- error = 0;
- unit = device_get_unit(adapter);
+ sx_xlock(&dev_priv->gmbus_mutex);
- sx_xlock(&dev_priv->gmbus_sx);
- if (sc->force_bit_dev) {
- error = -IICBUS_TRANSFER(dev_priv->bbbus[unit], msgs, num);
+ if (bus->force_bit) {
+ ret = -IICBUS_TRANSFER(bus->bbbus, msgs, num);
goto out;
}
reg_offset = dev_priv->gpio_mmio_base;
- I915_WRITE(GMBUS0 + reg_offset, sc->reg0);
+ I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
for (i = 0; i < num; i++) {
u32 gmbus2;
if (gmbus_is_index_read(msgs, i, num)) {
- error = gmbus_xfer_index_read(dev_priv, &msgs[i]);
+ ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
i += 1; /* set i to the index of the read xfer */
- } else if (msgs[i].flags & IIC_M_RD) {
- error = gmbus_xfer_read(dev_priv, &msgs[i], 0);
+ } else if (msgs[i].flags & I2C_M_RD) {
+ ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
} else {
- error = gmbus_xfer_write(dev_priv, &msgs[i]);
+ ret = gmbus_xfer_write(dev_priv, &msgs[i]);
}
- if (error == -ETIMEDOUT)
+ if (ret == -ETIMEDOUT)
goto timeout;
- if (error == -ENXIO)
+ if (ret == -ENXIO)
goto clear_err;
- ret = _intel_wait_for(sc->drm_dev,
- ((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
- (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE)),
- 50, 1, "915gbh");
+ ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
+ (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE),
+ 50);
if (ret)
goto timeout;
if (gmbus2 & GMBUS_SATOER)
@@ -406,12 +408,11 @@ gmbus_xfer(device_t adapter,
* We will re-enable it at the start of the next xfer,
* till then let it sleep.
*/
- if (_intel_wait_for(dev,
- (I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
- 10, 1, "915gbu")) {
+ if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
+ 10)) {
DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
- sc->name);
- error = -ETIMEDOUT;
+ device_get_desc(adapter));
+ ret = -ETIMEDOUT;
}
I915_WRITE(GMBUS0 + reg_offset, 0);
goto out;
@@ -421,11 +422,22 @@ clear_err:
* Wait for bus to IDLE before clearing NAK.
* If we clear the NAK while bus is still active, then it will stay
* active and the next transaction may fail.
+ *
+ * If no ACK is received during the address phase of a transaction, the
+ * adapter must report -ENXIO. It is not clear what to return if no ACK
+ * is received at other times. But we have to be careful to not return
+ * spurious -ENXIO because that will prevent i2c and drm edid functions
+ * from retrying. So return -ENXIO only when gmbus properly quiescents -
+ * timing out seems to happen when there _is_ a ddc chip present, but
+ * it's slow responding and only answers on the 2nd retry.
*/
- if (_intel_wait_for(dev,
- (I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
- 10, 1, "915gbu"))
- DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n", sc->name);
+ ret = -ENXIO;
+ if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
+ 10)) {
+ DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
+ device_get_desc(adapter));
+ ret = -ETIMEDOUT;
+ }
/* Toggle the Software Clear Interrupt bit. This has the effect
* of resetting the GMBUS controller and so clearing the
@@ -436,31 +448,23 @@ clear_err:
I915_WRITE(GMBUS0 + reg_offset, 0);
DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
- sc->name, msgs[i].slave,
- (msgs[i].flags & IIC_M_RD) ? 'r' : 'w', msgs[i].len);
+ device_get_desc(adapter), msgs[i].slave >> 1,
+ (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
- /*
- * If no ACK is received during the address phase of a transaction,
- * the adapter must report -ENXIO.
- * It is not clear what to return if no ACK is received at other times.
- * So, we always return -ENXIO in all NAK cases, to ensure we send
- * it at least during the one case that is specified.
- */
- error = -ENXIO;
goto out;
timeout:
DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
- sc->name, sc->reg0 & 0xff);
+ device_get_desc(adapter), bus->reg0 & 0xff);
I915_WRITE(GMBUS0 + reg_offset, 0);
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
- sc->force_bit_dev = true;
- error = -IICBUS_TRANSFER(dev_priv->bbbus[unit], msgs, num);
+ bus->force_bit = 1;
+ ret = -IICBUS_TRANSFER(bus->bbbus, msgs, num);
out:
- sx_xunlock(&dev_priv->gmbus_sx);
- return -error;
+ sx_xunlock(&dev_priv->gmbus_mutex);
+ return -ret;
}
static int
@@ -473,32 +477,23 @@ intel_gmbus_probe(device_t dev)
static int
intel_gmbus_attach(device_t idev)
{
- struct drm_i915_private *dev_priv;
struct intel_iic_softc *sc;
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
int pin, port;
sc = device_get_softc(idev);
- sc->drm_dev = device_get_softc(device_get_parent(idev));
- dev_priv = sc->drm_dev->dev_private;
pin = device_get_unit(idev);
- port = pin + 1;
+ port = pin + 1; /* +1 to map gmbus index to pin pair */
- snprintf(sc->name, sizeof(sc->name), "gmbus %s",
+ snprintf(sc->name, sizeof(sc->name), "i915 gmbus %s",
intel_gmbus_is_port_valid(port) ? gmbus_ports[pin].name :
"reserved");
device_set_desc(idev, sc->name);
- /* By default use a conservative clock rate */
- sc->reg0 = port | GMBUS_RATE_100KHZ;
-
- /* gmbus seems to be broken on i830 */
- if (IS_I830(sc->drm_dev))
- sc->force_bit_dev = true;
-#if 0
- if (IS_GEN2(sc->drm_dev)) {
- sc->force_bit_dev = true;
- }
-#endif
+ dev = device_get_softc(device_get_parent(idev));
+ dev_priv = dev->dev_private;
+ sc->bus = &dev_priv->gmbus[pin];
/* add bus interface device */
sc->iic_dev = device_add_child(idev, "iicbus", -1);
@@ -513,23 +508,31 @@ intel_gmbus_attach(device_t idev)
static int
intel_gmbus_detach(device_t idev)
{
- struct intel_iic_softc *sc;
- struct drm_i915_private *dev_priv;
- device_t child;
- int u;
- sc = device_get_softc(idev);
- u = device_get_unit(idev);
- dev_priv = sc->drm_dev->dev_private;
-
- child = sc->iic_dev;
bus_generic_detach(idev);
- if (child != NULL)
- device_delete_child(idev, child);
+ device_delete_children(idev);
return (0);
}
+static device_method_t intel_gmbus_methods[] = {
+ DEVMETHOD(device_probe, intel_gmbus_probe),
+ DEVMETHOD(device_attach, intel_gmbus_attach),
+ DEVMETHOD(device_detach, intel_gmbus_detach),
+ DEVMETHOD(iicbus_reset, intel_iicbus_reset),
+ DEVMETHOD(iicbus_transfer, gmbus_xfer),
+ DEVMETHOD_END
+};
+static driver_t intel_gmbus_driver = {
+ "intel_gmbus",
+ intel_gmbus_methods,
+ sizeof(struct intel_iic_softc)
+};
+static devclass_t intel_gmbus_devclass;
+DRIVER_MODULE_ORDERED(intel_gmbus, drmn, intel_gmbus_driver,
+ intel_gmbus_devclass, 0, 0, SI_ORDER_FIRST);
+DRIVER_MODULE(iicbus, intel_gmbus, iicbus_driver, iicbus_devclass, 0, 0);
+
static int
intel_iicbb_probe(device_t dev)
{
@@ -541,12 +544,11 @@ static int
intel_iicbb_attach(device_t idev)
{
struct intel_iic_softc *sc;
+ struct drm_device *dev;
struct drm_i915_private *dev_priv;
int pin, port;
sc = device_get_softc(idev);
- sc->drm_dev = device_get_softc(device_get_parent(idev));
- dev_priv = sc->drm_dev->dev_private;
pin = device_get_unit(idev);
port = pin + 1;
@@ -555,10 +557,9 @@ intel_iicbb_attach(device_t idev)
"reserved");
device_set_desc(idev, sc->name);
- if (!intel_gmbus_is_port_valid(port))
- pin = 1 ; /* GPIOA, VGA */
- sc->reg0 = pin | GMBUS_RATE_100KHZ;
- sc->reg = dev_priv->gpio_mmio_base + gmbus_ports[pin].reg;
+ dev = device_get_softc(device_get_parent(idev));
+ dev_priv = dev->dev_private;
+ sc->bus = &dev_priv->gmbus[pin];
/* add generic bit-banging code */
sc->iic_dev = device_add_child(idev, "iicbb", -1);
@@ -574,35 +575,13 @@ intel_iicbb_attach(device_t idev)
static int
intel_iicbb_detach(device_t idev)
{
- struct intel_iic_softc *sc;
- device_t child;
- sc = device_get_softc(idev);
- child = sc->iic_dev;
bus_generic_detach(idev);
- if (child)
- device_delete_child(idev, child);
+ device_delete_children(idev);
+
return (0);
}
-static device_method_t intel_gmbus_methods[] = {
- DEVMETHOD(device_probe, intel_gmbus_probe),
- DEVMETHOD(device_attach, intel_gmbus_attach),
- DEVMETHOD(device_detach, intel_gmbus_detach),
- DEVMETHOD(iicbus_reset, intel_iicbus_reset),
- DEVMETHOD(iicbus_transfer, gmbus_xfer),
- DEVMETHOD_END
-};
-static driver_t intel_gmbus_driver = {
- "intel_gmbus",
- intel_gmbus_methods,
- sizeof(struct intel_iic_softc)
-};
-static devclass_t intel_gmbus_devclass;
-DRIVER_MODULE_ORDERED(intel_gmbus, drmn, intel_gmbus_driver,
- intel_gmbus_devclass, 0, 0, SI_ORDER_FIRST);
-DRIVER_MODULE(iicbus, intel_gmbus, iicbus_driver, iicbus_devclass, 0, 0);
-
static device_method_t intel_iicbb_methods[] = {
DEVMETHOD(device_probe, intel_iicbb_probe),
DEVMETHOD(device_attach, intel_iicbb_attach),
@@ -631,6 +610,10 @@ DRIVER_MODULE_ORDERED(intel_iicbb, drmn, intel_iicbb_driver,
intel_iicbb_devclass, 0, 0, SI_ORDER_FIRST);
DRIVER_MODULE(iicbb, intel_iicbb, iicbb_driver, iicbb_devclass, 0, 0);
+/**
+ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
+ * @dev: DRM device
+ */
int intel_setup_gmbus(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -642,7 +625,7 @@ int intel_setup_gmbus(struct drm_device *dev)
else
dev_priv->gpio_mmio_base = 0;
- sx_init(&dev_priv->gmbus_sx, "gmbus");
+ sx_init(&dev_priv->gmbus_mutex, "gmbus");
/*
* The Giant there is recursed, most likely. Normally, the
@@ -650,29 +633,46 @@ int intel_setup_gmbus(struct drm_device *dev)
* driver.
*/
mtx_lock(&Giant);
- for (i = 0; i <= GMBUS_NUM_PORTS; i++) {
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ u32 port = i + 1; /* +1 to map gmbus index to pin pair */
+
+ bus->dev_priv = dev_priv;
+
+ /* By default use a conservative clock rate */
+ bus->reg0 = port | GMBUS_RATE_100KHZ;
+
+ /* gmbus seems to be broken on i830 */
+ if (IS_I830(dev))
+ bus->force_bit = 1;
+
+ intel_gpio_setup(bus, port);
+
/*
+ * bbbus_bridge
+ *
* Initialized bbbus_bridge before gmbus_bridge, since
* gmbus may decide to force quirk transfer in the
* attachment code.
*/
- dev_priv->bbbus_bridge[i] = device_add_child(dev->dev,
+ bus->bbbus_bridge = device_add_child(dev->dev,
"intel_iicbb", i);
- if (dev_priv->bbbus_bridge[i] == NULL) {
+ if (bus->bbbus_bridge == NULL) {
DRM_ERROR("bbbus bridge %d creation failed\n", i);
ret = -ENXIO;
goto err;
}
- device_quiet(dev_priv->bbbus_bridge[i]);
- ret = -device_probe_and_attach(dev_priv->bbbus_bridge[i]);
+ device_quiet(bus->bbbus_bridge);
+ ret = -device_probe_and_attach(bus->bbbus_bridge);
if (ret != 0) {
DRM_ERROR("bbbus bridge %d attach failed, %d\n", i,
ret);
goto err;
}
- iic_dev = device_find_child(dev_priv->bbbus_bridge[i], "iicbb",
- -1);
+ /* bbbus */
+ iic_dev = device_find_child(bus->bbbus_bridge,
+ "iicbb", -1);
if (iic_dev == NULL) {
DRM_ERROR("bbbus bridge doesn't have iicbb child\n");
goto err;
@@ -684,17 +684,18 @@ int intel_setup_gmbus(struct drm_device *dev)
goto err;
}
- dev_priv->bbbus[i] = iic_dev;
+ bus->bbbus = iic_dev;
- dev_priv->gmbus_bridge[i] = device_add_child(dev->dev,
+ /* gmbus_bridge */
+ bus->gmbus_bridge = device_add_child(dev->dev,
"intel_gmbus", i);
- if (dev_priv->gmbus_bridge[i] == NULL) {
+ if (bus->gmbus_bridge == NULL) {
DRM_ERROR("gmbus bridge %d creation failed\n", i);
ret = -ENXIO;
goto err;
}
- device_quiet(dev_priv->gmbus_bridge[i]);
- ret = -device_probe_and_attach(dev_priv->gmbus_bridge[i]);
+ device_quiet(bus->gmbus_bridge);
+ ret = -device_probe_and_attach(bus->gmbus_bridge);
if (ret != 0) {
DRM_ERROR("gmbus bridge %d attach failed, %d\n", i,
ret);
@@ -702,67 +703,84 @@ int intel_setup_gmbus(struct drm_device *dev)
goto err;
}
- iic_dev = device_find_child(dev_priv->gmbus_bridge[i],
+ /* gmbus */
+ iic_dev = device_find_child(bus->gmbus_bridge,
"iicbus", -1);
if (iic_dev == NULL) {
DRM_ERROR("gmbus bridge doesn't have iicbus child\n");
goto err;
}
- dev_priv->gmbus[i] = iic_dev;
- intel_iic_reset(dev);
+ bus->gmbus = iic_dev;
}
mtx_unlock(&Giant);
+ intel_i2c_reset(dev_priv->dev);
+
return 0;
err:
- intel_teardown_gmbus_m(dev, i);
+ while (--i) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ if (bus->gmbus_bridge != NULL)
+ device_delete_child(dev->dev, bus->gmbus_bridge);
+ if (bus->bbbus_bridge != NULL)
+ device_delete_child(dev->dev, bus->bbbus_bridge);
+ }
mtx_unlock(&Giant);
+ sx_destroy(&dev_priv->gmbus_mutex);
return ret;
}
device_t intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
unsigned port)
{
-
- if (!intel_gmbus_is_port_valid(port))
- DRM_ERROR("GMBUS get adapter %d: invalid port\n", port);
- return (intel_gmbus_is_port_valid(port) ? dev_priv->gmbus[port - 1] :
- NULL);
+ WARN_ON(!intel_gmbus_is_port_valid(port));
+ /* -1 to map pin pair to gmbus index */
+ return (intel_gmbus_is_port_valid(port)) ?
+ dev_priv->gmbus[port - 1].gmbus : NULL;
}
void intel_gmbus_set_speed(device_t adapter, int speed)
{
- struct intel_iic_softc *sc;
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
- sc = device_get_softc(device_get_parent(adapter));
-
- sc->reg0 = (sc->reg0 & ~(0x3 << 8)) | speed;
+ bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | speed;
}
void intel_gmbus_force_bit(device_t adapter, bool force_bit)
{
- struct intel_iic_softc *sc;
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
- sc = device_get_softc(device_get_parent(adapter));
- sc->force_bit_dev = force_bit;
+ bus->force_bit += force_bit ? 1 : -1;
+ DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
+ force_bit ? "en" : "dis", device_get_desc(adapter),
+ bus->force_bit);
}
-static void
-intel_teardown_gmbus_m(struct drm_device *dev, int m)
+void intel_teardown_gmbus(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+ int ret;
- dev_priv = dev->dev_private;
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
- sx_destroy(&dev_priv->gmbus_sx);
-}
+ mtx_lock(&Giant);
+ ret = device_delete_child(dev->dev, bus->gmbus_bridge);
+ mtx_unlock(&Giant);
-void intel_teardown_gmbus(struct drm_device *dev)
-{
+ KASSERT(ret == 0, ("unable to detach iic gmbus %s: %d",
+ device_get_desc(bus->gmbus_bridge), ret));
- mtx_lock(&Giant);
- intel_teardown_gmbus_m(dev, GMBUS_NUM_PORTS);
- mtx_unlock(&Giant);
+ mtx_lock(&Giant);
+ ret = device_delete_child(dev->dev, bus->bbbus_bridge);
+ mtx_unlock(&Giant);
+
+ KASSERT(ret == 0, ("unable to detach iic bbbus %s: %d",
+ device_get_desc(bus->bbbus_bridge), ret));
+ }
+
+ sx_destroy(&dev_priv->gmbus_mutex);
}
diff --git a/sys/dev/drm2/i915/intel_lvds.c b/sys/dev/drm2/i915/intel_lvds.c
index 9e6c667..e5db0ee 100644
--- a/sys/dev/drm2/i915/intel_lvds.c
+++ b/sys/dev/drm2/i915/intel_lvds.c
@@ -31,44 +31,75 @@
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
#include <dev/drm2/drm_crtc.h>
#include <dev/drm2/drm_edid.h>
+#include <dev/drm2/i915/intel_drv.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
-#include <dev/drm2/i915/intel_drv.h>
/* Private structure for the integrated LVDS support */
-struct intel_lvds {
- struct intel_encoder base;
+struct intel_lvds_connector {
+ struct intel_connector base;
- struct edid *edid;
+#ifdef FREEBSD_WIP
+ struct notifier_block lid_notifier;
+#endif /* FREEBSD_WIP */
+};
+
+struct intel_lvds_encoder {
+ struct intel_encoder base;
- int fitting_mode;
u32 pfit_control;
u32 pfit_pgm_ratios;
bool pfit_dirty;
- struct drm_display_mode *fixed_mode;
+ struct intel_lvds_connector *attached_connector;
};
-static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
+static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
{
- return container_of(encoder, struct intel_lvds, base.base);
+ return container_of(encoder, struct intel_lvds_encoder, base.base);
}
-static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
+static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
{
- return container_of(intel_attached_encoder(connector),
- struct intel_lvds, base);
+ return container_of(connector, struct intel_lvds_connector, base.base);
+}
+
+static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 lvds_reg, tmp;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ lvds_reg = PCH_LVDS;
+ } else {
+ lvds_reg = LVDS;
+ }
+
+ tmp = I915_READ(lvds_reg);
+
+ if (!(tmp & LVDS_PORT_EN))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
}
/**
* Sets the power state for the panel.
*/
-static void intel_lvds_enable(struct intel_lvds *intel_lvds)
+static void intel_enable_lvds(struct intel_encoder *encoder)
{
- struct drm_device *dev = intel_lvds->base.base.dev;
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
@@ -84,7 +115,7 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
- if (intel_lvds->pfit_dirty) {
+ if (lvds_encoder->pfit_dirty) {
/*
* Enable automatic panel scaling so that non-native modes
* fill the screen. The panel fitter should only be
@@ -92,27 +123,26 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
* register description and PRM.
*/
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
- intel_lvds->pfit_control,
- intel_lvds->pfit_pgm_ratios);
+ lvds_encoder->pfit_control,
+ lvds_encoder->pfit_pgm_ratios);
- I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
- intel_lvds->pfit_dirty = false;
+ I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control);
+ lvds_encoder->pfit_dirty = false;
}
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
POSTING_READ(lvds_reg);
- if (_intel_wait_for(dev,
- (I915_READ(stat_reg) & PP_ON) == 0, 1000,
- 1, "915lvds"))
- DRM_ERROR("timed out waiting for panel to power off\n");
+ if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
+ DRM_ERROR("timed out waiting for panel to power on\n");
- intel_panel_enable_backlight(dev);
+ intel_panel_enable_backlight(dev, intel_crtc->pipe);
}
-static void intel_lvds_disable(struct intel_lvds *intel_lvds)
+static void intel_disable_lvds(struct intel_encoder *encoder)
{
- struct drm_device *dev = intel_lvds->base.base.dev;
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
@@ -129,37 +159,23 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds)
intel_panel_disable_backlight(dev);
I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
- if (_intel_wait_for(dev,
- (I915_READ(stat_reg) & PP_ON) == 0, 1000,
- 1, "915lvo"))
+ if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
- if (intel_lvds->pfit_control) {
+ if (lvds_encoder->pfit_control) {
I915_WRITE(PFIT_CONTROL, 0);
- intel_lvds->pfit_dirty = true;
+ lvds_encoder->pfit_dirty = true;
}
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
POSTING_READ(lvds_reg);
}
-static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
-{
- struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
- if (mode == DRM_MODE_DPMS_ON)
- intel_lvds_enable(intel_lvds);
- else
- intel_lvds_disable(intel_lvds);
-
- /* XXX: We never power down the LVDS pairs. */
-}
-
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
- struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
@@ -235,9 +251,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- struct drm_encoder *tmp_encoder;
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
+ struct intel_connector *intel_connector =
+ &lvds_encoder->attached_connector->base;
+ struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
int pipe;
@@ -247,14 +264,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
return false;
}
- /* Should never happen!! */
- list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) {
- if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) {
- DRM_ERROR("Can't enable LVDS and another "
- "encoder on the same pipe\n");
- return false;
- }
- }
+ if (intel_encoder_check_is_cloned(&lvds_encoder->base))
+ return false;
/*
* We have timings from the BIOS for the panel, put them in
@@ -262,10 +273,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
+ intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+ adjusted_mode);
if (HAS_PCH_SPLIT(dev)) {
- intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
+ intel_pch_panel_fitting(dev,
+ intel_connector->panel.fitting_mode,
mode, adjusted_mode);
return true;
}
@@ -291,7 +304,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, 0);
- switch (intel_lvds->fitting_mode) {
+ switch (intel_connector->panel.fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
@@ -389,11 +402,11 @@ out:
if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
- if (pfit_control != intel_lvds->pfit_control ||
- pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
- intel_lvds->pfit_control = pfit_control;
- intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
- intel_lvds->pfit_dirty = true;
+ if (pfit_control != lvds_encoder->pfit_control ||
+ pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
+ lvds_encoder->pfit_control = pfit_control;
+ lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
+ lvds_encoder->pfit_dirty = true;
}
dev_priv->lvds_border_bits = border;
@@ -406,29 +419,6 @@ out:
return true;
}
-static void intel_lvds_prepare(struct drm_encoder *encoder)
-{
- struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
- /*
- * Prior to Ironlake, we must disable the pipe if we want to adjust
- * the panel fitter. However at all other times we can just reset
- * the registers regardless.
- */
- if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty)
- intel_lvds_disable(intel_lvds);
-}
-
-static void intel_lvds_commit(struct drm_encoder *encoder)
-{
- struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
- /* Always do a full power on as we do not know what state
- * we were left in.
- */
- intel_lvds_enable(intel_lvds);
-}
-
static void intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -465,14 +455,15 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
*/
static int intel_lvds_get_modes(struct drm_connector *connector)
{
- struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+ struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
- if (intel_lvds->edid)
- return drm_add_edid_modes(connector, intel_lvds->edid);
+ /* use cached edid if we have one */
+ if (lvds_connector->base.edid)
+ return drm_add_edid_modes(connector, lvds_connector->base.edid);
- mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
+ mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
if (mode == NULL)
return 0;
@@ -500,7 +491,7 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = {
{ } /* terminating entry */
};
-#ifdef NOTYET
+#ifdef FREEBSD_WIP
/*
* Lid events. Note the use of 'modeset_on_lid':
* - we set it on lid close, and reset it on open
@@ -513,10 +504,11 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = {
static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
void *unused)
{
- struct drm_i915_private *dev_priv =
- container_of(nb, struct drm_i915_private, lid_notifier);
- struct drm_device *dev = dev_priv->dev;
- struct drm_connector *connector = dev_priv->int_lvds_connector;
+ struct intel_lvds_connector *lvds_connector =
+ container_of(nb, struct intel_lvds_connector, lid_notifier);
+ struct drm_connector *connector = &lvds_connector->base.base;
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return NOTIFY_OK;
@@ -525,9 +517,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
* check and update the status of LVDS connector after receiving
* the LID nofication event.
*/
- if (connector)
- connector->status = connector->funcs->detect(connector,
- false);
+ connector->status = connector->funcs->detect(connector, false);
/* Don't force modeset on machines where it causes a GPU lockup */
if (dmi_check_system(intel_no_modeset_on_lid))
@@ -543,12 +533,12 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
dev_priv->modeset_on_lid = 0;
mutex_lock(&dev->mode_config.mutex);
- drm_helper_resume_force_mode(dev);
+ intel_modeset_setup_hw_state(dev, true);
mutex_unlock(&dev->mode_config.mutex);
return NOTIFY_OK;
}
-#endif
+#endif /* FREEBSD_WIP */
/**
* intel_lvds_destroy - unregister and free LVDS structures
@@ -559,20 +549,18 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
*/
static void intel_lvds_destroy(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
-#if 0
- struct drm_i915_private *dev_priv = dev->dev_private;
-#endif
+ struct intel_lvds_connector *lvds_connector =
+ to_lvds_connector(connector);
+
+#ifdef FREEBSD_WIP
+ if (lvds_connector->lid_notifier.notifier_call)
+ acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
+#endif /* FREEBSD_WIP */
+
+ free(lvds_connector->base.edid, DRM_MEM_KMS);
- intel_panel_destroy_backlight(dev);
+ intel_panel_fini(&lvds_connector->base.panel);
-#if 0
- if (dev_priv->lid_notifier.notifier_call)
- acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
-#endif
-#if 0
- drm_sysfs_connector_remove(connector);
-#endif
drm_connector_cleanup(connector);
free(connector, DRM_MEM_KMS);
}
@@ -581,29 +569,31 @@ static int intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
- struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
if (property == dev->mode_config.scaling_mode_property) {
- struct drm_crtc *crtc = intel_lvds->base.base.crtc;
+ struct drm_crtc *crtc;
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
- if (intel_lvds->fitting_mode == value) {
+ if (intel_connector->panel.fitting_mode == value) {
/* the LVDS scaling property is not changed */
return 0;
}
- intel_lvds->fitting_mode = value;
+ intel_connector->panel.fitting_mode = value;
+
+ crtc = intel_attached_encoder(connector)->base.crtc;
if (crtc && crtc->enabled) {
/*
* If the CRTC is enabled, the display will be changed
* according to the new panel fitting mode.
*/
- drm_crtc_helper_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
}
}
@@ -611,11 +601,9 @@ static int intel_lvds_set_property(struct drm_connector *connector,
}
static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
- .dpms = intel_lvds_dpms,
.mode_fixup = intel_lvds_mode_fixup,
- .prepare = intel_lvds_prepare,
.mode_set = intel_lvds_mode_set,
- .commit = intel_lvds_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
@@ -625,7 +613,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_connector_dpms,
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
@@ -636,7 +624,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
.destroy = intel_encoder_destroy,
};
-static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
+static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
{
DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
return 1;
@@ -757,6 +745,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard HP t5740e Thin Client",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
.ident = "Hewlett-Packard t5745",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
@@ -779,6 +775,30 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Gigabyte GA-D525TUD",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Supermicro X7SPA-H",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Fujitsu Esprimo Q900",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
+ },
+ },
{ } /* terminating entry */
};
@@ -906,12 +926,16 @@ static bool intel_lvds_supported(struct drm_device *dev)
bool intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds *intel_lvds;
+ struct intel_lvds_encoder *lvds_encoder;
struct intel_encoder *intel_encoder;
+ struct intel_lvds_connector *lvds_connector;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
+ struct drm_display_mode *fixed_mode = NULL;
+ struct edid *edid;
+ int edid_err = 0;
struct drm_crtc *crtc;
u32 lvds;
int pipe;
@@ -939,17 +963,25 @@ bool intel_lvds_init(struct drm_device *dev)
}
}
- intel_lvds = malloc(sizeof(struct intel_lvds), DRM_MEM_KMS,
- M_WAITOK | M_ZERO);
- intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS,
- M_WAITOK | M_ZERO);
+ lvds_encoder = malloc(sizeof(struct intel_lvds_encoder), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!lvds_encoder)
+ return false;
+
+ lvds_connector = malloc(sizeof(struct intel_lvds_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!lvds_connector) {
+ free(lvds_encoder, DRM_MEM_KMS);
+ return false;
+ }
+
+ lvds_encoder->attached_connector = lvds_connector;
if (!HAS_PCH_SPLIT(dev)) {
- intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
+ lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL);
}
- intel_encoder = &intel_lvds->base;
+ intel_encoder = &lvds_encoder->base;
encoder = &intel_encoder->base;
+ intel_connector = &lvds_connector->base;
connector = &intel_connector->base;
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
@@ -957,12 +989,19 @@ bool intel_lvds_init(struct drm_device *dev)
drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
+ intel_encoder->enable = intel_enable_lvds;
+ intel_encoder->disable = intel_disable_lvds;
+ intel_encoder->get_hw_state = intel_lvds_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_LVDS;
- intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
+ intel_encoder->cloneable = false;
if (HAS_PCH_SPLIT(dev))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ else if (IS_GEN4(dev))
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
else
intel_encoder->crtc_mask = (1 << 1);
@@ -974,14 +1013,10 @@ bool intel_lvds_init(struct drm_device *dev)
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
- /*
- * the initial panel fitting mode will be FULL_SCREEN.
- */
-
drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
- intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;
+ intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
/*
* LVDS discovery:
* 1) check for EDID on DDC
@@ -996,19 +1031,24 @@ bool intel_lvds_init(struct drm_device *dev)
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- intel_lvds->edid = drm_get_edid(connector,
- intel_gmbus_get_adapter(dev_priv, pin));
- if (intel_lvds->edid) {
- if (drm_add_edid_modes(connector,
- intel_lvds->edid)) {
+ edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
+ if (edid) {
+ if (drm_add_edid_modes(connector, edid)) {
drm_mode_connector_update_edid_property(connector,
- intel_lvds->edid);
+ edid);
} else {
- free(intel_lvds->edid, DRM_MEM_KMS);
- intel_lvds->edid = NULL;
+ free(edid, DRM_MEM_KMS);
+ edid = NULL;
+ edid_err = -EINVAL;
}
+ } else {
+ edid = NULL;
+ edid_err = -ENOENT;
}
- if (!intel_lvds->edid) {
+ lvds_connector->base.edid = edid;
+ lvds_connector->base.edid_err = edid_err;
+
+ if (edid_err) {
/* Didn't get an EDID, so
* Set wide sync ranges so we get all modes
* handed to valid_mode for checking
@@ -1021,22 +1061,26 @@ bool intel_lvds_init(struct drm_device *dev)
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
- intel_lvds->fixed_mode =
- drm_mode_duplicate(dev, scan);
- intel_find_lvds_downclock(dev,
- intel_lvds->fixed_mode,
- connector);
- goto out;
+ DRM_DEBUG_KMS("using preferred mode from EDID: ");
+ drm_mode_debug_printmodeline(scan);
+
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ if (fixed_mode) {
+ intel_find_lvds_downclock(dev, fixed_mode,
+ connector);
+ goto out;
+ }
}
}
/* Failed to get EDID, what about VBT? */
if (dev_priv->lfp_lvds_vbt_mode) {
- intel_lvds->fixed_mode =
- drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- if (intel_lvds->fixed_mode) {
- intel_lvds->fixed_mode->type |=
- DRM_MODE_TYPE_PREFERRED;
+ DRM_DEBUG_KMS("using mode from VBT: ");
+ drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode);
+
+ fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (fixed_mode) {
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out;
}
}
@@ -1056,71 +1100,51 @@ bool intel_lvds_init(struct drm_device *dev)
crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) {
- intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
- if (intel_lvds->fixed_mode) {
- intel_lvds->fixed_mode->type |=
- DRM_MODE_TYPE_PREFERRED;
+ fixed_mode = intel_crtc_mode_get(dev, crtc);
+ if (fixed_mode) {
+ DRM_DEBUG_KMS("using current (BIOS) mode: ");
+ drm_mode_debug_printmodeline(fixed_mode);
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out;
}
}
/* If we still don't have a mode after all that, give up. */
- if (!intel_lvds->fixed_mode)
+ if (!fixed_mode)
goto failed;
out:
+ /*
+ * Unlock registers and just
+ * leave them unlocked
+ */
if (HAS_PCH_SPLIT(dev)) {
- u32 pwm;
-
- pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0;
-
- /* make sure PWM is enabled and locked to the LVDS pipe */
- pwm = I915_READ(BLC_PWM_CPU_CTL2);
- if (pipe == 0 && (pwm & PWM_PIPE_B))
- I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE);
- if (pipe)
- pwm |= PWM_PIPE_B;
- else
- pwm &= ~PWM_PIPE_B;
- I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE);
-
- pwm = I915_READ(BLC_PWM_PCH_CTL1);
- pwm |= PWM_PCH_ENABLE;
- I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
- /*
- * Unlock registers and just
- * leave them unlocked
- */
I915_WRITE(PCH_PP_CONTROL,
I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
} else {
- /*
- * Unlock registers and just
- * leave them unlocked
- */
I915_WRITE(PP_CONTROL,
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
}
-#ifdef NOTYET
- dev_priv->lid_notifier.notifier_call = intel_lid_notify;
- if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
+#ifdef FREEBSD_WIP
+ lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
+ if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
DRM_DEBUG_KMS("lid notifier registration failed\n");
- dev_priv->lid_notifier.notifier_call = NULL;
+ lvds_connector->lid_notifier.notifier_call = NULL;
}
-#endif
- /* keep the LVDS connector */
- dev_priv->int_lvds_connector = connector;
-#if 0
- drm_sysfs_connector_add(connector);
-#endif
- intel_panel_setup_backlight(dev);
+#endif /* FREEBSD_WIP */
+
+ intel_panel_init(&intel_connector->panel, fixed_mode);
+ intel_panel_setup_backlight(connector);
+
return true;
failed:
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
- free(intel_lvds, DRM_MEM_KMS);
- free(intel_connector, DRM_MEM_KMS);
+ if (fixed_mode)
+ drm_mode_destroy(dev, fixed_mode);
+ free(lvds_encoder, DRM_MEM_KMS);
+ free(lvds_connector, DRM_MEM_KMS);
return false;
}
diff --git a/sys/dev/drm2/i915/intel_modes.c b/sys/dev/drm2/i915/intel_modes.c
index d061d08..5ff9591 100644
--- a/sys/dev/drm2/i915/intel_modes.c
+++ b/sys/dev/drm2/i915/intel_modes.c
@@ -27,39 +27,26 @@
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
-#include <dev/drm2/i915/i915_drm.h>
-#include <dev/drm2/i915/i915_drv.h>
-#include <dev/drm2/i915/intel_drv.h>
#include <dev/drm2/drm_edid.h>
+#include <dev/drm2/i915/intel_drv.h>
+#include <dev/drm2/i915/i915_drv.h>
#include <dev/iicbus/iiconf.h>
/**
- * intel_ddc_probe
- *
+ * intel_connector_update_modes - update connector from edid
+ * @connector: DRM connector device to use
+ * @edid: previously read EDID information
*/
-bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
+int intel_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid)
{
- struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
- u8 out_buf[] = { 0x0, 0x0};
- u8 buf[2];
- struct iic_msg msgs[] = {
- {
- .slave = DDC_ADDR << 1,
- .flags = IIC_M_WR,
- .len = 1,
- .buf = out_buf,
- },
- {
- .slave = DDC_ADDR << 1,
- .flags = IIC_M_RD,
- .len = 1,
- .buf = buf,
- }
- };
-
- return (iicbus_transfer(intel_gmbus_get_adapter(dev_priv, ddc_bus),
- msgs, 2) == 0/* XXXKIB 2*/);
+ int ret;
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
+
+ return ret;
}
/**
@@ -69,19 +56,18 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
*
* Fetch the EDID information from @connector using the DDC bus.
*/
-int
-intel_ddc_get_modes(struct drm_connector *connector, device_t adapter)
+int intel_ddc_get_modes(struct drm_connector *connector,
+ device_t adapter)
{
struct edid *edid;
- int ret = 0;
+ int ret;
edid = drm_get_edid(connector, adapter);
- if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- drm_edid_to_eld(connector, edid);
- free(edid, DRM_MEM_KMS);
- }
+ if (!edid)
+ return 0;
+
+ ret = intel_connector_update_modes(connector, edid);
+ free(edid, DRM_MEM_KMS);
return ret;
}
@@ -129,9 +115,9 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
prop = dev_priv->broadcast_rgb_property;
if (prop == NULL) {
prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
- "Broadcast RGB",
- broadcast_rgb_names,
- ARRAY_SIZE(broadcast_rgb_names));
+ "Broadcast RGB",
+ broadcast_rgb_names,
+ ARRAY_SIZE(broadcast_rgb_names));
if (prop == NULL)
return;
diff --git a/sys/dev/drm2/i915/intel_opregion.c b/sys/dev/drm2/i915/intel_opregion.c
index 397c559..7e6cb91 100644
--- a/sys/dev/drm2/i915/intel_opregion.c
+++ b/sys/dev/drm2/i915/intel_opregion.c
@@ -28,10 +28,13 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <dev/drm2/drmP.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
#include <dev/drm2/i915/intel_drv.h>
+
#include <contrib/dev/acpica/include/acpi.h>
#include <contrib/dev/acpica/include/accommon.h>
#include <dev/acpica/acpivar.h>
@@ -147,13 +150,15 @@ struct opregion_asle {
#define ACPI_DIGITAL_OUTPUT (3<<8)
#define ACPI_LVDS_OUTPUT (4<<8)
-#if defined(CONFIG_ACPI)
+#ifdef CONFIG_ACPI
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 max;
+ DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
+
if (!(bclp & ASLE_BCLP_VALID))
return ASLE_BACKLIGHT_FAILED;
@@ -163,7 +168,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
max = intel_panel_get_max_backlight(dev);
intel_panel_set_backlight(dev, bclp * max / 255);
- asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
+ iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv);
return 0;
}
@@ -200,71 +205,71 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
void intel_opregion_asle_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 asle_stat = 0;
u32 asle_req;
if (!asle)
return;
- asle_req = asle->aslc & ASLE_REQ_MSK;
+ asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
if (!asle_req) {
- DRM_DEBUG("non asle set request??\n");
+ DRM_DEBUG_DRIVER("non asle set request??\n");
return;
}
if (asle_req & ASLE_SET_ALS_ILLUM)
- asle_stat |= asle_set_als_illum(dev, asle->alsi);
+ asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight(dev, asle->bclp);
+ asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
if (asle_req & ASLE_SET_PFIT)
- asle_stat |= asle_set_pfit(dev, asle->pfit);
+ asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
if (asle_req & ASLE_SET_PWM_FREQ)
- asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
+ asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
- asle->aslc = asle_stat;
+ iowrite32(asle_stat, &asle->aslc);
}
void intel_opregion_gse_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 asle_stat = 0;
u32 asle_req;
if (!asle)
return;
- asle_req = asle->aslc & ASLE_REQ_MSK;
+ asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
if (!asle_req) {
- DRM_DEBUG("non asle set request??\n");
+ DRM_DEBUG_DRIVER("non asle set request??\n");
return;
}
if (asle_req & ASLE_SET_ALS_ILLUM) {
- DRM_DEBUG("Illum is not supported\n");
+ DRM_DEBUG_DRIVER("Illum is not supported\n");
asle_stat |= ASLE_ALS_ILLUM_FAILED;
}
if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight(dev, asle->bclp);
+ asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
if (asle_req & ASLE_SET_PFIT) {
- DRM_DEBUG("Pfit is not supported\n");
+ DRM_DEBUG_DRIVER("Pfit is not supported\n");
asle_stat |= ASLE_PFIT_FAILED;
}
if (asle_req & ASLE_SET_PWM_FREQ) {
- DRM_DEBUG("PWM freq is not supported\n");
+ DRM_DEBUG_DRIVER("PWM freq is not supported\n");
asle_stat |= ASLE_PWM_FREQ_FAILED;
}
- asle->aslc = asle_stat;
+ iowrite32(asle_stat, &asle->aslc);
}
#define ASLE_ALS_EN (1<<0)
#define ASLE_BLC_EN (1<<1)
@@ -274,15 +279,16 @@ void intel_opregion_gse_intr(struct drm_device *dev)
void intel_opregion_enable_asle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
if (asle) {
if (IS_MOBILE(dev))
intel_enable_asle(dev);
- asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
- ASLE_PFMB_EN;
- asle->ardy = 1;
+ iowrite32(ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
+ ASLE_PFMB_EN,
+ &asle->tche);
+ iowrite32(1, &asle->ardy);
}
}
@@ -292,7 +298,7 @@ void intel_opregion_enable_asle(struct drm_device *dev)
static struct intel_opregion *system_opregion;
-#if 0
+#ifdef FREEBSD_WIP
static int intel_opregion_video_event(struct notifier_block *nb,
unsigned long val, void *data)
{
@@ -300,7 +306,8 @@ static int intel_opregion_video_event(struct notifier_block *nb,
either a docking event, lid switch or display switch request. In
Linux, these are handled by the dock, button and video drivers.
*/
- struct opregion_acpi *acpi;
+
+ struct opregion_acpi __iomem *acpi;
struct acpi_bus_event *event = data;
int ret = NOTIFY_OK;
@@ -312,10 +319,11 @@ static int intel_opregion_video_event(struct notifier_block *nb,
acpi = system_opregion->acpi;
- if (event->type == 0x80 && !(acpi->cevt & 0x1))
+ if (event->type == 0x80 &&
+ (ioread32(&acpi->cevt) & 1) == 0)
ret = NOTIFY_BAD;
- acpi->csts = 0;
+ iowrite32(0, &acpi->csts);
return ret;
}
@@ -323,7 +331,7 @@ static int intel_opregion_video_event(struct notifier_block *nb,
static struct notifier_block intel_opregion_notifier = {
.notifier_call = intel_opregion_video_event,
};
-#endif
+#endif /* FREEBSD_WIP */
/*
* Initialise the DIDL field in opregion. This passes a list of devices to
@@ -345,9 +353,10 @@ static void intel_didl_outputs(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
struct drm_connector *connector;
+ ACPI_HANDLE handle, acpi_cdev, acpi_video_bus = NULL;
u32 device_id;
- ACPI_HANDLE handle, acpi_video_bus, acpi_cdev;
ACPI_STATUS status;
+ u32 temp;
int i = 0;
handle = acpi_get_handle(dev->dev);
@@ -358,7 +367,6 @@ static void intel_didl_outputs(struct drm_device *dev)
acpi_video_bus = handle;
else {
acpi_cdev = NULL;
- acpi_video_bus = NULL;
while (AcpiGetNextObject(ACPI_TYPE_DEVICE, handle, acpi_cdev,
&acpi_cdev) != AE_NOT_FOUND) {
if (acpi_is_video_device(acpi_cdev)) {
@@ -366,14 +374,6 @@ static void intel_didl_outputs(struct drm_device *dev)
break;
}
}
-#if 0
- list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
- if (acpi_is_video_device(acpi_cdev)) {
- acpi_video_bus = acpi_cdev;
- break;
- }
- }
-#endif
}
if (!acpi_video_bus) {
@@ -388,37 +388,22 @@ static void intel_didl_outputs(struct drm_device *dev)
device_printf(dev->dev, "More than 8 outputs detected\n");
return;
}
- status = acpi_GetInteger(acpi_cdev, "_ADR", &device_id);
- if (ACPI_SUCCESS(status)) {
- if (!device_id)
- goto blind_set;
- opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
- i++;
- }
- }
-#if 0
- list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
- if (i >= 8) {
- dev_printk(KERN_ERR, &dev->pdev->dev,
- "More than 8 outputs detected\n");
- return;
- }
status =
- acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
- NULL, &device_id);
+ acpi_GetInteger(acpi_cdev, "_ADR",
+ &device_id);
if (ACPI_SUCCESS(status)) {
if (!device_id)
goto blind_set;
- opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
+ iowrite32((u32)(device_id & 0x0f0f),
+ &opregion->acpi->didl[i]);
i++;
}
}
-#endif
end:
/* If fewer than 8 outputs, the list must be null terminated */
if (i < 8)
- opregion->acpi->didl[i] = 0;
+ iowrite32(0, &opregion->acpi->didl[i]);
return;
blind_set:
@@ -452,7 +437,9 @@ blind_set:
output_type = ACPI_LVDS_OUTPUT;
break;
}
- opregion->acpi->didl[i] |= (1<<31) | output_type | i;
+ temp = ioread32(&opregion->acpi->didl[i]);
+ iowrite32(temp | (1<<31) | output_type | i,
+ &opregion->acpi->didl[i]);
i++;
}
goto end;
@@ -472,8 +459,8 @@ static void intel_setup_cadls(struct drm_device *dev)
* display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
* there are less than eight devices. */
do {
- disp_id = opregion->acpi->didl[i];
- opregion->acpi->cadl[i] = disp_id;
+ disp_id = ioread32(&opregion->acpi->didl[i]);
+ iowrite32(disp_id, &opregion->acpi->cadl[i]);
} while (++i < 8 && disp_id != 0);
}
@@ -494,13 +481,13 @@ void intel_opregion_init(struct drm_device *dev)
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
* We don't actually need to do anything with them. */
- opregion->acpi->csts = 0;
- opregion->acpi->drdy = 1;
+ iowrite32(0, &opregion->acpi->csts);
+ iowrite32(1, &opregion->acpi->drdy);
system_opregion = opregion;
-#if 0
+#ifdef FREEBSD_WIP
register_acpi_notifier(&intel_opregion_notifier);
-#endif
+#endif /* FREEBSD_WIP */
}
if (opregion->asle)
@@ -516,12 +503,12 @@ void intel_opregion_fini(struct drm_device *dev)
return;
if (opregion->acpi) {
- opregion->acpi->drdy = 0;
+ iowrite32(0, &opregion->acpi->drdy);
system_opregion = NULL;
-#if 0
+#ifdef FREEBSD_WIP
unregister_acpi_notifier(&intel_opregion_notifier);
-#endif
+#endif /* FREEBSD_WIP */
}
/* just clear all opregion memory pointers now */
@@ -532,45 +519,21 @@ void intel_opregion_fini(struct drm_device *dev)
opregion->asle = NULL;
opregion->vbt = NULL;
}
-#else
-void
-intel_opregion_init(struct drm_device *dev)
-{
-}
-
-void
-intel_opregion_fini(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv;
- struct intel_opregion *opregion;
-
- dev_priv = dev->dev_private;
- opregion = &dev_priv->opregion;
-
- if (opregion->header == NULL)
- return;
-
- pmap_unmapdev((vm_offset_t)opregion->header, OPREGION_SIZE);
- opregion->header = NULL;
- opregion->acpi = NULL;
- opregion->swsci = NULL;
- opregion->asle = NULL;
- opregion->vbt = NULL;
-}
#endif
int intel_opregion_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
- char *base;
+ void __iomem *base;
u32 asls, mboxes;
+ char buf[sizeof(OPREGION_SIGNATURE)];
int err = 0;
- asls = pci_read_config(dev->dev, PCI_ASLS, 4);
- DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
+ pci_read_config_dword(dev->dev, PCI_ASLS, &asls);
+ DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
if (asls == 0) {
- DRM_DEBUG("ACPI OpRegion not supported!\n");
+ DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
return -ENOTSUP;
}
@@ -578,31 +541,33 @@ int intel_opregion_setup(struct drm_device *dev)
if (!base)
return -ENOMEM;
- if (memcmp(base, OPREGION_SIGNATURE, 16)) {
- DRM_DEBUG("opregion signature mismatch\n");
+ memcpy_fromio(buf, base, sizeof(buf));
+
+ if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
+ DRM_DEBUG_DRIVER("opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
}
opregion->header = (struct opregion_header *)base;
- opregion->vbt = base + OPREGION_VBT_OFFSET;
+ opregion->vbt = (char *)base + OPREGION_VBT_OFFSET;
- opregion->lid_state = (u32 *)(base + ACPI_CLID);
+ opregion->lid_state = (u32 *)((char *)base + ACPI_CLID);
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
- DRM_DEBUG("Public ACPI methods supported\n");
- opregion->acpi = (struct opregion_acpi *)(base +
+ DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
+ opregion->acpi = (struct opregion_acpi *)((char *)base +
OPREGION_ACPI_OFFSET);
}
if (mboxes & MBOX_SWSCI) {
- DRM_DEBUG("SWSCI supported\n");
- opregion->swsci = (struct opregion_swsci *)(base +
+ DRM_DEBUG_DRIVER("SWSCI supported\n");
+ opregion->swsci = (struct opregion_swsci *)((char *)base +
OPREGION_SWSCI_OFFSET);
}
if (mboxes & MBOX_ASLE) {
- DRM_DEBUG("ASLE supported\n");
- opregion->asle = (struct opregion_asle *)(base +
+ DRM_DEBUG_DRIVER("ASLE supported\n");
+ opregion->asle = (struct opregion_asle *)((char *)base +
OPREGION_ASLE_OFFSET);
}
diff --git a/sys/dev/drm2/i915/intel_overlay.c b/sys/dev/drm2/i915/intel_overlay.c
index 349a74f..f526633 100644
--- a/sys/dev/drm2/i915/intel_overlay.c
+++ b/sys/dev/drm2/i915/intel_overlay.c
@@ -25,12 +25,10 @@
*
* Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
*/
-
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
#include <dev/drm2/i915/i915_reg.h>
@@ -191,46 +189,44 @@ struct intel_overlay {
void (*flip_tail)(struct intel_overlay *);
};
-static struct overlay_registers *
+static struct overlay_registers __iomem *
intel_overlay_map_regs(struct intel_overlay *overlay)
{
- struct overlay_registers *regs;
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+ struct overlay_registers __iomem *regs;
- if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) {
- regs = overlay->reg_bo->phys_obj->handle->vaddr;
- } else {
- regs = pmap_mapdev_attr(overlay->dev->agp->base +
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
+ else
+ regs = pmap_mapdev_attr(dev_priv->mm.gtt_base_addr +
overlay->reg_bo->gtt_offset, PAGE_SIZE,
PAT_WRITE_COMBINING);
- }
- return (regs);
+
+ return regs;
}
static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
pmap_unmapdev((vm_offset_t)regs, PAGE_SIZE);
}
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
- struct drm_i915_gem_request *request,
void (*tail)(struct intel_overlay *))
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
- KASSERT(!overlay->last_flip_req, ("Overlay already has flip req"));
- ret = i915_add_request(ring, NULL, request);
- if (ret) {
- free(request, DRM_I915_GEM);
+ BUG_ON(overlay->last_flip_req);
+ ret = i915_add_request(ring, NULL, &overlay->last_flip_req);
+ if (ret)
return ret;
- }
- overlay->last_flip_req = request->seqno;
+
overlay->flip_tail = tail;
- ret = i915_wait_request(ring, overlay->last_flip_req);
+ ret = i915_wait_seqno(ring, overlay->last_flip_req);
if (ret)
return ret;
i915_gem_retire_requests(dev);
@@ -239,80 +235,22 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
return 0;
}
-/* Workaround for i830 bug where pipe a must be enable to change control regs */
-static int
-i830_activate_pipe_a(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_crtc *crtc;
- struct drm_crtc_helper_funcs *crtc_funcs;
- struct drm_display_mode vesa_640x480 = {
- DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 489, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
- }, *mode;
-
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]);
- if (crtc->dpms_mode == DRM_MODE_DPMS_ON)
- return 0;
-
- /* most i8xx have pipe a forced on, so don't trust dpms mode */
- if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE)
- return 0;
-
- crtc_funcs = crtc->base.helper_private;
- if (crtc_funcs->dpms == NULL)
- return 0;
-
- DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
-
- mode = drm_mode_duplicate(dev, &vesa_640x480);
-
- if (!drm_crtc_helper_set_mode(&crtc->base, mode,
- crtc->base.x, crtc->base.y,
- crtc->base.fb))
- return 0;
-
- crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON);
- return 1;
-}
-
-static void
-i830_deactivate_pipe_a(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
/* overlay needs to be disable in OCMD reg */
static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
- struct drm_i915_gem_request *request;
- int pipe_a_quirk = 0;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
- KASSERT(!overlay->active, ("Overlay is active"));
+ BUG_ON(overlay->active);
overlay->active = 1;
- if (IS_I830(dev)) {
- pipe_a_quirk = i830_activate_pipe_a(dev);
- if (pipe_a_quirk < 0)
- return pipe_a_quirk;
- }
-
- request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO);
+ WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
ret = intel_ring_begin(ring, 4);
- if (ret) {
- free(request, DRM_I915_GEM);
- goto out;
- }
+ if (ret)
+ return ret;
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
@@ -320,12 +258,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
- ret = intel_overlay_do_wait_request(overlay, request, NULL);
-out:
- if (pipe_a_quirk)
- i830_deactivate_pipe_a(dev);
-
- return ret;
+ return intel_overlay_do_wait_request(overlay, NULL);
}
/* overlay needs to be enabled in OCMD reg */
@@ -334,15 +267,12 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
- struct drm_i915_gem_request *request;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
u32 flip_addr = overlay->flip_addr;
u32 tmp;
int ret;
- KASSERT(overlay->active, ("Overlay not active"));
-
- request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO);
+ BUG_ON(!overlay->active);
if (load_polyphase_filter)
flip_addr |= OFC_UPDATE;
@@ -353,22 +283,14 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
ret = intel_ring_begin(ring, 2);
- if (ret) {
- free(request, DRM_I915_GEM);
+ if (ret)
return ret;
- }
+
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_advance(ring);
- ret = i915_add_request(ring, NULL, request);
- if (ret) {
- free(request, DRM_I915_GEM);
- return ret;
- }
-
- overlay->last_flip_req = request->seqno;
- return 0;
+ return i915_add_request(ring, NULL, &overlay->last_flip_req);
}
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
@@ -386,7 +308,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
struct drm_i915_gem_object *obj = overlay->vid_bo;
/* never have the overlay hw on without showing a frame */
- KASSERT(overlay->vid_bo != NULL, ("No vid_bo"));
+ BUG_ON(!overlay->vid_bo);
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
@@ -402,14 +324,11 @@ static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
u32 flip_addr = overlay->flip_addr;
- struct drm_i915_gem_request *request;
int ret;
- KASSERT(overlay->active, ("Overlay is not active"));
-
- request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO);
+ BUG_ON(!overlay->active);
/* According to intel docs the overlay hw may hang (when switching
* off) without loading the filter coeffs. It is however unclear whether
@@ -418,22 +337,28 @@ static int intel_overlay_off(struct intel_overlay *overlay)
flip_addr |= OFC_UPDATE;
ret = intel_ring_begin(ring, 6);
- if (ret) {
- free(request, DRM_I915_GEM);
+ if (ret)
return ret;
- }
+
/* wait for overlay to go idle */
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- intel_ring_emit(ring, flip_addr);
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ if (IS_I830(dev)) {
+ /* Workaround: Don't disable the overlay fully, since otherwise
+ * it dies on the next OVERLAY_ON cmd. */
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
+ } else {
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ }
intel_ring_advance(ring);
- return intel_overlay_do_wait_request(overlay, request,
- intel_overlay_off_tail);
+ return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
}
/* recover from an interruption due to a signal
@@ -442,13 +367,13 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
if (overlay->last_flip_req == 0)
return 0;
- ret = i915_wait_request(ring, overlay->last_flip_req);
+ ret = i915_wait_seqno(ring, overlay->last_flip_req);
if (ret)
return ret;
i915_gem_retire_requests(dev);
@@ -468,7 +393,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
/* Only wait if there is actually an old frame to release to
@@ -478,22 +403,16 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
return 0;
if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
- struct drm_i915_gem_request *request;
-
/* synchronous slowpath */
- request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO);
-
ret = intel_ring_begin(ring, 2);
- if (ret) {
- free(request, DRM_I915_GEM);
+ if (ret)
return ret;
- }
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
- ret = intel_overlay_do_wait_request(overlay, request,
+ ret = intel_overlay_do_wait_request(overlay,
intel_overlay_release_old_vid_tail);
if (ret)
return ret;
@@ -619,14 +538,15 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
0x3000, 0x0800, 0x3000
};
-static void update_polyphase_filter(struct overlay_registers *regs)
+static void update_polyphase_filter(struct overlay_registers __iomem *regs)
{
- memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
- memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
+ memcpy_toio(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
+ memcpy_toio(regs->UV_HCOEFS, uv_static_hcoeffs,
+ sizeof(uv_static_hcoeffs));
}
static bool update_scaling_factors(struct intel_overlay *overlay,
- struct overlay_registers *regs,
+ struct overlay_registers __iomem *regs,
struct put_image_params *params)
{
/* fixed point with a 12 bit shift */
@@ -665,16 +585,19 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
overlay->old_xscale = xscale;
overlay->old_yscale = yscale;
- regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
- ((xscale >> FP_SHIFT) << 16) |
- ((xscale & FRACT_MASK) << 3));
+ iowrite32(((yscale & FRACT_MASK) << 20) |
+ ((xscale >> FP_SHIFT) << 16) |
+ ((xscale & FRACT_MASK) << 3),
+ &regs->YRGBSCALE);
- regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
- ((xscale_UV >> FP_SHIFT) << 16) |
- ((xscale_UV & FRACT_MASK) << 3));
+ iowrite32(((yscale_UV & FRACT_MASK) << 20) |
+ ((xscale_UV >> FP_SHIFT) << 16) |
+ ((xscale_UV & FRACT_MASK) << 3),
+ &regs->UVSCALE);
- regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) |
- ((yscale_UV >> FP_SHIFT) << 0)));
+ iowrite32((((yscale >> FP_SHIFT) << 16) |
+ ((yscale_UV >> FP_SHIFT) << 0)),
+ &regs->UVSCALEV);
if (scale_changed)
update_polyphase_filter(regs);
@@ -683,30 +606,32 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
}
static void update_colorkey(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
u32 key = overlay->color_key;
switch (overlay->crtc->base.fb->bits_per_pixel) {
case 8:
- regs->DCLRKV = 0;
- regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+ iowrite32(0, &regs->DCLRKV);
+ iowrite32(CLK_RGB8I_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
break;
case 16:
if (overlay->crtc->base.fb->depth == 15) {
- regs->DCLRKV = RGB15_TO_COLORKEY(key);
- regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+ iowrite32(RGB15_TO_COLORKEY(key), &regs->DCLRKV);
+ iowrite32(CLK_RGB15_MASK | DST_KEY_ENABLE,
+ &regs->DCLRKM);
} else {
- regs->DCLRKV = RGB16_TO_COLORKEY(key);
- regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+ iowrite32(RGB16_TO_COLORKEY(key), &regs->DCLRKV);
+ iowrite32(CLK_RGB16_MASK | DST_KEY_ENABLE,
+ &regs->DCLRKM);
}
break;
case 24:
case 32:
- regs->DCLRKV = key;
- regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+ iowrite32(key, &regs->DCLRKV);
+ iowrite32(CLK_RGB24_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
break;
}
}
@@ -756,24 +681,21 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
return cmd;
}
-static u32
-max_u32(u32 a, u32 b)
-{
-
- return (a > b ? a : b);
-}
-
static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct drm_i915_gem_object *new_bo,
struct put_image_params *params)
{
int ret, tmp_width;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
bool scale_changed = false;
+#ifdef INVARIANTS
+ struct drm_device *dev = overlay->dev;
+#endif
u32 swidth, swidthsw, sheight, ostride;
- KASSERT(overlay != NULL, ("No overlay ?"));
- DRM_LOCK_ASSERT(overlay->dev);
+ DRM_LOCK_ASSERT(dev);
+ sx_assert(&dev->mode_config.mutex, SA_XLOCKED);
+ BUG_ON(!overlay);
ret = intel_overlay_release_old_vid(overlay);
if (ret != 0)
@@ -781,7 +703,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
if (ret != 0)
- goto out_unpin;
+ return ret;
ret = i915_gem_object_put_fence(new_bo);
if (ret)
@@ -799,7 +721,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= overlay->crtc->pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
- regs->OCONFIG = oconfig;
+ iowrite32(oconfig, &regs->OCONFIG);
intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_on(overlay);
@@ -813,8 +735,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
}
- regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
- regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
+ iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
+ iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
if (params->format & I915_OVERLAY_YUV_PACKED)
tmp_width = packed_width_bytes(params->format, params->src_w);
@@ -824,7 +746,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
swidth = params->src_w;
swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
sheight = params->src_h;
- regs->OBUF_0Y = new_bo->gtt_offset + params->offset_Y;
+ iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y);
ostride = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -836,23 +758,23 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
params->src_w/uv_hscale);
tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
params->src_w/uv_hscale);
- swidthsw |= max_u32(tmp_U, tmp_V) << 16;
+ swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
sheight |= (params->src_h/uv_vscale) << 16;
- regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
- regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
+ iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U);
+ iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V);
ostride |= params->stride_UV << 16;
}
- regs->SWIDTH = swidth;
- regs->SWIDTHSW = swidthsw;
- regs->SHEIGHT = sheight;
- regs->OSTRIDE = ostride;
+ iowrite32(swidth, &regs->SWIDTH);
+ iowrite32(swidthsw, &regs->SWIDTHSW);
+ iowrite32(sheight, &regs->SHEIGHT);
+ iowrite32(ostride, &regs->OSTRIDE);
scale_changed = update_scaling_factors(overlay, regs, params);
update_colorkey(overlay, regs);
- regs->OCMD = overlay_cmd_reg(params);
+ iowrite32(overlay_cmd_reg(params), &regs->OCMD);
intel_overlay_unmap_regs(overlay, regs);
@@ -872,10 +794,14 @@ out_unpin:
int intel_overlay_switch_off(struct intel_overlay *overlay)
{
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
+#ifdef INVARIANTS
+ struct drm_device *dev = overlay->dev;
+#endif
int ret;
- DRM_LOCK_ASSERT(overlay->dev);
+ DRM_LOCK_ASSERT(dev);
+ sx_assert(&dev->mode_config.mutex, SA_XLOCKED);
ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
@@ -889,7 +815,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
return ret;
regs = intel_overlay_map_regs(overlay);
- regs->OCMD = 0;
+ iowrite32(0, &regs->OCMD);
intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_off(overlay);
@@ -1138,7 +1064,9 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
return ret;
}
- params = malloc(sizeof(struct put_image_params), DRM_I915_GEM, M_WAITOK | M_ZERO);
+ params = malloc(sizeof(struct put_image_params), DRM_I915_GEM, M_WAITOK);
+ if (!params)
+ return -ENOMEM;
drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
DRM_MODE_OBJECT_CRTC);
@@ -1254,10 +1182,11 @@ out_free:
}
static void update_reg_attrs(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
- regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
- regs->OCLRC1 = overlay->saturation;
+ iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff),
+ &regs->OCLRC0);
+ iowrite32(overlay->saturation, &regs->OCLRC1);
}
static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
@@ -1310,7 +1239,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_intel_overlay_attrs *attrs = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
int ret;
/* No need to check for DRIVER_MODESET - we don't set it up then. */
@@ -1396,16 +1325,20 @@ void intel_setup_overlay(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
struct drm_i915_gem_object *reg_bo;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
int ret;
if (!HAS_OVERLAY(dev))
return;
overlay = malloc(sizeof(struct intel_overlay), DRM_I915_GEM, M_WAITOK | M_ZERO);
+ if (!overlay)
+ return;
+
DRM_LOCK(dev);
- if (dev_priv->overlay != NULL)
+ if (WARN_ON(dev_priv->overlay))
goto out_free;
+
overlay->dev = dev;
reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
@@ -1423,7 +1356,7 @@ void intel_setup_overlay(struct drm_device *dev)
}
overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
} else {
- ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
+ ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);
if (ret) {
DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo;
@@ -1447,7 +1380,7 @@ void intel_setup_overlay(struct drm_device *dev)
if (!regs)
goto out_unpin_bo;
- memset(regs, 0, sizeof(struct overlay_registers));
+ memset_io(regs, 0, sizeof(struct overlay_registers));
update_polyphase_filter(regs);
update_reg_attrs(overlay, regs);
@@ -1479,12 +1412,15 @@ void intel_cleanup_overlay(struct drm_device *dev)
/* The bo's should be free'd by the generic code already.
* Furthermore modesetting teardown happens beforehand so the
* hardware should be off already */
- KASSERT(!dev_priv->overlay->active, ("Overlay still active"));
+ BUG_ON(dev_priv->overlay->active);
drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
free(dev_priv->overlay, DRM_I915_GEM);
}
+//#ifdef CONFIG_DEBUG_FS
+#define seq_printf(m, fmt, ...) sbuf_printf((m), (fmt), ##__VA_ARGS__)
+
struct intel_overlay_error_state {
struct overlay_registers regs;
unsigned long base;
@@ -1492,6 +1428,11 @@ struct intel_overlay_error_state {
u32 isr;
};
+/*
+ * NOTE Linux<->FreeBSD: We use the normal intel_overlay_map_regs() and
+ * intel_overlay_unmap_regs() defined at the top of this file.
+ */
+
struct intel_overlay_error_state *
intel_overlay_capture_error_state(struct drm_device *dev)
{
@@ -1510,15 +1451,15 @@ intel_overlay_capture_error_state(struct drm_device *dev)
error->dovsta = I915_READ(DOVSTA);
error->isr = I915_READ(ISR);
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
+ error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
else
- error->base = (long) overlay->reg_bo->gtt_offset;
+ error->base = overlay->reg_bo->gtt_offset;
regs = intel_overlay_map_regs(overlay);
if (!regs)
goto err;
- memcpy(&error->regs, regs, sizeof(struct overlay_registers));
+ memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
intel_overlay_unmap_regs(overlay, regs);
return error;
@@ -1531,12 +1472,12 @@ err:
void
intel_overlay_print_error_state(struct sbuf *m, struct intel_overlay_error_state *error)
{
- sbuf_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
- error->dovsta, error->isr);
- sbuf_printf(m, " Register file at 0x%08lx:\n",
- error->base);
+ seq_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
+ error->dovsta, error->isr);
+ seq_printf(m, " Register file at 0x%08lx:\n",
+ error->base);
-#define P(x) sbuf_printf(m, " " #x ": 0x%08x\n", error->regs.x)
+#define P(x) seq_printf(m, " " #x ": 0x%08x\n", error->regs.x)
P(OBUF_0Y);
P(OBUF_1Y);
P(OBUF_0U);
@@ -1580,3 +1521,4 @@ intel_overlay_print_error_state(struct sbuf *m, struct intel_overlay_error_state
P(UVSCALEV);
#undef P
}
+//#endif
diff --git a/sys/dev/drm2/i915/intel_panel.c b/sys/dev/drm2/i915/intel_panel.c
index 2d4210d..f822649 100644
--- a/sys/dev/drm2/i915/intel_panel.c
+++ b/sys/dev/drm2/i915/intel_panel.c
@@ -31,9 +31,9 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
-#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/intel_drv.h>
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
@@ -95,7 +95,7 @@ intel_pch_panel_fitting(struct drm_device *dev,
} else if (scaled_width < scaled_height) { /* letter */
height = scaled_width / mode->hdisplay;
if (height & 1)
- height++;
+ height++;
y = (adjusted_mode->vdisplay - height + 1) / 2;
x = 0;
width = adjusted_mode->hdisplay;
@@ -133,53 +133,45 @@ static int is_backlight_combination_mode(struct drm_device *dev)
return 0;
}
-static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
+static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
/* Restore the CTL value if it lost, e.g. GPU reset */
if (HAS_PCH_SPLIT(dev_priv->dev)) {
val = I915_READ(BLC_PWM_PCH_CTL2);
- if (dev_priv->saveBLC_PWM_CTL2 == 0) {
- dev_priv->saveBLC_PWM_CTL2 = val;
+ if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
+ dev_priv->regfile.saveBLC_PWM_CTL2 = val;
} else if (val == 0) {
- I915_WRITE(BLC_PWM_PCH_CTL2,
- dev_priv->saveBLC_PWM_CTL2);
- val = dev_priv->saveBLC_PWM_CTL2;
+ val = dev_priv->regfile.saveBLC_PWM_CTL2;
+ I915_WRITE(BLC_PWM_PCH_CTL2, val);
}
} else {
val = I915_READ(BLC_PWM_CTL);
- if (dev_priv->saveBLC_PWM_CTL == 0) {
- dev_priv->saveBLC_PWM_CTL = val;
- dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
+ dev_priv->regfile.saveBLC_PWM_CTL = val;
+ if (INTEL_INFO(dev)->gen >= 4)
+ dev_priv->regfile.saveBLC_PWM_CTL2 =
+ I915_READ(BLC_PWM_CTL2);
} else if (val == 0) {
- I915_WRITE(BLC_PWM_CTL,
- dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(BLC_PWM_CTL2,
- dev_priv->saveBLC_PWM_CTL2);
- val = dev_priv->saveBLC_PWM_CTL;
+ val = dev_priv->regfile.saveBLC_PWM_CTL;
+ I915_WRITE(BLC_PWM_CTL, val);
+ if (INTEL_INFO(dev)->gen >= 4)
+ I915_WRITE(BLC_PWM_CTL2,
+ dev_priv->regfile.saveBLC_PWM_CTL2);
}
}
return val;
}
-u32 intel_panel_get_max_backlight(struct drm_device *dev)
+static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 max;
- max = i915_read_blc_pwm_ctl(dev_priv);
- if (max == 0) {
- /* XXX add code here to query mode clock or hardware clock
- * and program max PWM appropriately.
- */
-#if 0
- printf("fixme: max PWM is zero.\n");
-#endif
- return 1;
- }
+ max = i915_read_blc_pwm_ctl(dev);
if (HAS_PCH_SPLIT(dev)) {
max >>= 16;
@@ -193,10 +185,34 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
max *= 0xff;
}
- DRM_DEBUG("max backlight PWM = %d\n", max);
return max;
}
+u32 intel_panel_get_max_backlight(struct drm_device *dev)
+{
+ u32 max;
+
+ max = _intel_panel_get_max_backlight(dev);
+ if (max == 0) {
+ /* XXX add code here to query mode clock or hardware clock
+ * and program max PWM appropriately.
+ */
+ pr_warn_once("fixme: max PWM is zero\n");
+ return 1;
+ }
+
+ DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
+ return max;
+}
+
+static int i915_panel_invert_brightness;
+TUNABLE_INT("drm.i915.invert_brightness", &i915_panel_invert_brightness);
+MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
+ "(-1 force normal, 0 machine defaults, 1 force inversion), please "
+ "report PCI device ID, subsystem vendor and subsystem device ID "
+ "to dri-devel@lists.freedesktop.org, if your machine needs it. "
+ "It will then be included in an upcoming module version.");
+module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -211,7 +227,7 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
return val;
}
-u32 intel_panel_get_backlight(struct drm_device *dev)
+static u32 intel_panel_get_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
@@ -226,7 +242,7 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
if (is_backlight_combination_mode(dev)) {
u8 lbpc;
- lbpc = pci_read_config(dev->dev, PCI_LBPC, 1);
+ pci_read_config_byte(dev->dev, PCI_LBPC, &lbpc);
val *= lbpc;
}
}
@@ -260,11 +276,11 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
lbpc = level * 0xfe / max + 1;
level /= lbpc;
- pci_write_config(dev->dev, PCI_LBPC, lbpc, 4);
+ pci_write_config_byte(dev->dev, PCI_LBPC, lbpc);
}
tmp = I915_READ(BLC_PWM_CTL);
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_INFO(dev)->gen < 4)
level <<= 1;
tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, tmp | level);
@@ -285,15 +301,69 @@ void intel_panel_disable_backlight(struct drm_device *dev)
dev_priv->backlight_enabled = false;
intel_panel_actually_set_backlight(dev, 0);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ uint32_t reg, tmp;
+
+ reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
+
+ I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ tmp = I915_READ(BLC_PWM_PCH_CTL1);
+ tmp &= ~BLM_PCH_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
+ }
+ }
}
-void intel_panel_enable_backlight(struct drm_device *dev)
+void intel_panel_enable_backlight(struct drm_device *dev,
+ enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->backlight_level == 0)
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ uint32_t reg, tmp;
+
+ reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
+
+
+ tmp = I915_READ(reg);
+
+ /* Note that this can also get called through dpms changes. And
+ * we don't track the backlight dpms state, hence check whether
+ * we have to do anything first. */
+ if (tmp & BLM_PWM_ENABLE)
+ goto set_level;
+
+ if (dev_priv->num_pipe == 3)
+ tmp &= ~BLM_PIPE_SELECT_IVB;
+ else
+ tmp &= ~BLM_PIPE_SELECT;
+
+ tmp |= BLM_PIPE(pipe);
+ tmp &= ~BLM_PWM_ENABLE;
+
+ I915_WRITE(reg, tmp);
+ POSTING_READ(reg);
+ I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ tmp = I915_READ(BLC_PWM_PCH_CTL1);
+ tmp |= BLM_PCH_PWM_ENABLE;
+ tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
+ }
+ }
+
+set_level:
+ /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
+ * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
+ * registers are set.
+ */
dev_priv->backlight_enabled = true;
intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
}
@@ -309,31 +379,90 @@ static void intel_panel_init_backlight(struct drm_device *dev)
enum drm_connector_status
intel_panel_detect(struct drm_device *dev)
{
-#if 0
struct drm_i915_private *dev_priv = dev->dev_private;
-#endif
-
- if (i915_panel_ignore_lid)
- return i915_panel_ignore_lid > 0 ?
- connector_status_connected :
- connector_status_disconnected;
- /* opregion lid state on HP 2540p is wrong at boot up,
- * appears to be either the BIOS or Linux ACPI fault */
-#if 0
/* Assume that the BIOS does not lie through the OpRegion... */
- if (dev_priv->opregion.lid_state)
+ if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
connector_status_connected :
connector_status_disconnected;
-#endif
+ }
- return connector_status_unknown;
+ switch (i915_panel_ignore_lid) {
+ case -2:
+ return connector_status_connected;
+ case -1:
+ return connector_status_disconnected;
+ default:
+ return connector_status_unknown;
+ }
}
-int intel_panel_setup_backlight(struct drm_device *dev)
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+static int intel_panel_update_status(struct backlight_device *bd)
{
+ struct drm_device *dev = bl_get_data(bd);
+ intel_panel_set_backlight(dev, bd->props.brightness);
+ return 0;
+}
+
+static int intel_panel_get_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return dev_priv->backlight_level;
+}
+
+static const struct backlight_ops intel_panel_bl_ops = {
+ .update_status = intel_panel_update_status,
+ .get_brightness = intel_panel_get_brightness,
+};
+
+int intel_panel_setup_backlight(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct backlight_properties props;
+
intel_panel_init_backlight(dev);
+
+ if (WARN_ON(dev_priv->backlight))
+ return -ENODEV;
+
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = _intel_panel_get_max_backlight(dev);
+ if (props.max_brightness == 0) {
+ DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
+ return -ENODEV;
+ }
+ dev_priv->backlight =
+ backlight_device_register("intel_backlight",
+ &connector->kdev, dev,
+ &intel_panel_bl_ops, &props);
+
+ if (IS_ERR(dev_priv->backlight)) {
+ DRM_ERROR("Failed to register backlight: %ld\n",
+ PTR_ERR(dev_priv->backlight));
+ dev_priv->backlight = NULL;
+ return -ENODEV;
+ }
+ dev_priv->backlight->props.brightness = intel_panel_get_backlight(dev);
+ return 0;
+}
+
+void intel_panel_destroy_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (dev_priv->backlight) {
+ backlight_device_unregister(dev_priv->backlight);
+ dev_priv->backlight = NULL;
+ }
+}
+#else
+int intel_panel_setup_backlight(struct drm_connector *connector)
+{
+ intel_panel_init_backlight(connector->dev);
return 0;
}
@@ -341,3 +470,21 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
{
return;
}
+#endif
+
+int intel_panel_init(struct intel_panel *panel,
+ struct drm_display_mode *fixed_mode)
+{
+ panel->fixed_mode = fixed_mode;
+
+ return 0;
+}
+
+void intel_panel_fini(struct intel_panel *panel)
+{
+ struct intel_connector *intel_connector =
+ container_of(panel, struct intel_connector, panel);
+
+ if (panel->fixed_mode)
+ drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
+}
diff --git a/sys/dev/drm2/i915/intel_pm.c b/sys/dev/drm2/i915/intel_pm.c
index 90f513d..ab9eee4 100644
--- a/sys/dev/drm2/i915/intel_pm.c
+++ b/sys/dev/drm2/i915/intel_pm.c
@@ -29,23 +29,11 @@
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
-#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
#include <dev/drm2/i915/intel_drv.h>
#include <sys/kdb.h>
-static struct drm_i915_private *i915_mch_dev;
-/*
- * Lock protecting IPS related data structures
- * - i915_mch_dev
- * - dev_priv->max_delay
- * - dev_priv->min_delay
- * - dev_priv->fmax
- * - dev_priv->gpu_busy
- */
-static struct mtx mchdev_lock;
-MTX_SYSINIT(mchdev, &mchdev_lock, "mchdev", MTX_DEF);
+#define FORCEWAKE_ACK_TIMEOUT_MS 2
/* FBC, or Frame Buffer Compression, is a technique employed to compress the
* framebuffer contents in-memory, aiming at reducing the required bandwidth
@@ -58,6 +46,14 @@ MTX_SYSINIT(mchdev, &mchdev_lock, "mchdev", MTX_DEF);
* i915.i915_enable_fbc parameter
*/
+static bool intel_crtc_active(struct drm_crtc *crtc)
+{
+ /* Be paranoid as we can arrive here with only partial
+ * state retrieved from the hardware during setup.
+ */
+ return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
+}
+
static void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -298,8 +294,6 @@ static void intel_fbc_work_fn(void *arg, int pending)
static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
{
- u_int pending;
-
if (dev_priv->fbc_work == NULL)
return;
@@ -309,8 +303,8 @@ static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
* dev_priv->fbc_work, so we can perform the cancellation
* entirely asynchronously.
*/
- if (taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->fbc_work->task,
- &pending) == 0)
+ if (taskqueue_cancel_timeout(dev_priv->wq, &dev_priv->fbc_work->work,
+ NULL) == 0)
/* tasklet was killed before being run, clean up */
free(dev_priv->fbc_work, DRM_MEM_KMS);
@@ -333,12 +327,16 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
intel_cancel_fbc_work(dev_priv);
- work = malloc(sizeof(*work), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ work = malloc(sizeof *work, DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (work == NULL) {
+ dev_priv->display.enable_fbc(crtc, interval);
+ return;
+ }
work->crtc = crtc;
work->fb = crtc->fb;
work->interval = interval;
- TIMEOUT_TASK_INIT(dev_priv->tq, &work->task, 0, intel_fbc_work_fn,
+ TIMEOUT_TASK_INIT(dev_priv->wq, &work->work, 0, intel_fbc_work_fn,
work);
dev_priv->fbc_work = work;
@@ -356,7 +354,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
* and indeed performing the enable as a co-routine and not
* waiting synchronously upon the vblank.
*/
- taskqueue_enqueue_timeout(dev_priv->tq, &work->task,
+ taskqueue_enqueue_timeout(dev_priv->wq, &work->work,
msecs_to_jiffies(50));
}
@@ -402,8 +400,6 @@ void intel_update_fbc(struct drm_device *dev)
struct drm_i915_gem_object *obj;
int enable_fbc;
- DRM_DEBUG_KMS("\n");
-
if (!i915_powersave)
return;
@@ -420,7 +416,8 @@ void intel_update_fbc(struct drm_device *dev)
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
- if (tmp_crtc->enabled && tmp_crtc->fb) {
+ if (intel_crtc_active(tmp_crtc) &&
+ !to_intel_crtc(tmp_crtc)->primary_disabled) {
if (crtc) {
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
@@ -608,7 +605,7 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)
break;
}
- dev_priv->r_t = dev_priv->mem_freq;
+ dev_priv->ips.r_t = dev_priv->mem_freq;
switch (csipll & 0x3ff) {
case 0x00c:
@@ -640,11 +637,11 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)
}
if (dev_priv->fsb_freq == 3200) {
- dev_priv->c_m = 0;
+ dev_priv->ips.c_m = 0;
} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
- dev_priv->c_m = 1;
+ dev_priv->ips.c_m = 1;
} else {
- dev_priv->c_m = 2;
+ dev_priv->ips.c_m = 2;
}
}
@@ -697,7 +694,7 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
if (fsb == 0 || mem == 0)
return NULL;
- for (i = 0; i < DRM_ARRAY_SIZE(cxsr_latency_table); i++) {
+ for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
latency = &cxsr_latency_table[i];
if (is_desktop == latency->is_desktop &&
is_ddr3 == latency->is_ddr3 &&
@@ -1005,7 +1002,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
struct drm_crtc *crtc, *enabled = NULL;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->enabled && crtc->fb) {
+ if (intel_crtc_active(crtc)) {
if (enabled)
return NULL;
enabled = crtc;
@@ -1099,7 +1096,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled) {
+ if (!intel_crtc_active(crtc)) {
*cursor_wm = cursor->guard_size;
*plane_wm = display->guard_size;
return false;
@@ -1228,7 +1225,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
int entries;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled)
+ if (!intel_crtc_active(crtc))
return false;
clock = crtc->mode.clock; /* VESA DOT Clock */
@@ -1299,6 +1296,7 @@ static void valleyview_update_wm(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
+ int ignore_plane_sr, ignore_cursor_sr;
unsigned int enabled = 0;
vlv_update_drain_latency(dev);
@@ -1315,17 +1313,23 @@ static void valleyview_update_wm(struct drm_device *dev)
&planeb_wm, &cursorb_wm))
enabled |= 2;
- plane_sr = cursor_sr = 0;
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
&valleyview_wm_info,
&valleyview_cursor_wm_info,
- &plane_sr, &cursor_sr))
+ &plane_sr, &ignore_cursor_sr) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ 2*sr_latency_ns,
+ &valleyview_wm_info,
+ &valleyview_cursor_wm_info,
+ &ignore_plane_sr, &cursor_sr)) {
I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
- else
+ } else {
I915_WRITE(FW_BLC_SELF_VLV,
I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+ plane_sr = cursor_sr = 0;
+ }
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
@@ -1338,10 +1342,11 @@ static void valleyview_update_wm(struct drm_device *dev)
(planeb_wm << DSPFW_PLANEB_SHIFT) |
planea_wm);
I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
+ (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
+ (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
static void g4x_update_wm(struct drm_device *dev)
@@ -1364,17 +1369,18 @@ static void g4x_update_wm(struct drm_device *dev)
&planeb_wm, &cursorb_wm))
enabled |= 2;
- plane_sr = cursor_sr = 0;
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
&g4x_wm_info,
&g4x_cursor_wm_info,
- &plane_sr, &cursor_sr))
+ &plane_sr, &cursor_sr)) {
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
- else
+ } else {
I915_WRITE(FW_BLC_SELF,
I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+ plane_sr = cursor_sr = 0;
+ }
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
@@ -1387,11 +1393,11 @@ static void g4x_update_wm(struct drm_device *dev)
(planeb_wm << DSPFW_PLANEB_SHIFT) |
planea_wm);
I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
/* HPLL off in SR has some issues on G4x... disable it */
I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
+ (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
@@ -1480,10 +1486,13 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
- if (crtc->enabled && crtc->fb) {
+ if (intel_crtc_active(crtc)) {
+ int cpp = crtc->fb->bits_per_pixel / 8;
+ if (IS_GEN2(dev))
+ cpp = 4;
+
planea_wm = intel_calculate_wm(crtc->mode.clock,
- wm_info, fifo_size,
- crtc->fb->bits_per_pixel / 8,
+ wm_info, fifo_size, cpp,
latency_ns);
enabled = crtc;
} else
@@ -1491,10 +1500,13 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
crtc = intel_get_crtc_for_plane(dev, 1);
- if (crtc->enabled && crtc->fb) {
+ if (intel_crtc_active(crtc)) {
+ int cpp = crtc->fb->bits_per_pixel / 8;
+ if (IS_GEN2(dev))
+ cpp = 4;
+
planeb_wm = intel_calculate_wm(crtc->mode.clock,
- wm_info, fifo_size,
- crtc->fb->bits_per_pixel / 8,
+ wm_info, fifo_size, cpp,
latency_ns);
if (enabled == NULL)
enabled = crtc;
@@ -1584,8 +1596,7 @@ static void i830_update_wm(struct drm_device *dev)
planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
- crtc->fb->bits_per_pixel / 8,
- latency_ns);
+ 4, latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
@@ -1818,8 +1829,110 @@ static void sandybridge_update_wm(struct drm_device *dev)
enabled |= 2;
}
- if ((dev_priv->num_pipe == 3) &&
- g4x_compute_wm0(dev, 2,
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ *
+ * SNB support 3 levels of watermark.
+ *
+ * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+ * and disabled in the descending order
+ *
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled) ||
+ dev_priv->sprite_scaling_enabled)
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM3 */
+ if (!ironlake_compute_srwm(dev, 3, enabled,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM3_LP_ILK,
+ WM3_LP_EN |
+ (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+}
+
+static void ivybridge_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int fbc_wm, plane_wm, cursor_wm;
+ int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, 0,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEA_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1;
+ }
+
+ if (g4x_compute_wm0(dev, 1,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEB_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 2;
+ }
+
+ if (g4x_compute_wm0(dev, 2,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
@@ -1882,12 +1995,17 @@ static void sandybridge_update_wm(struct drm_device *dev)
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
- /* WM3 */
+ /* WM3, note we have to correct the cursor latency */
if (!ironlake_compute_srwm(dev, 3, enabled,
SNB_READ_WM3_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
+ &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
+ !ironlake_compute_srwm(dev, 3, enabled,
+ 2 * SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
return;
I915_WRITE(WM3_LP_ILK,
@@ -1936,7 +2054,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled) {
+ if (!intel_crtc_active(crtc)) {
*sprite_wm = display->guard_size;
return false;
}
@@ -2153,7 +2271,7 @@ intel_alloc_context_page(struct drm_device *dev)
return NULL;
}
- ret = i915_gem_object_pin(ctx, 4096, true);
+ ret = i915_gem_object_pin(ctx, 4096, true, false);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
@@ -2175,11 +2293,23 @@ err_unref:
return NULL;
}
+/**
+ * Lock protecting IPS related data structures
+ */
+struct mtx mchdev_lock;
+MTX_SYSINIT(mchdev, &mchdev_lock, "mchdev", MTX_DEF);
+
+/* Global for IPS driver to get at the current i915 device. Protected by
+ * mchdev_lock. */
+static struct drm_i915_private *i915_mch_dev;
+
bool ironlake_set_drps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl;
+ mtx_assert(&mchdev_lock, MA_OWNED);
+
rgvswctl = I915_READ16(MEMSWCTL);
if (rgvswctl & MEMCTL_CMD_STS) {
DRM_DEBUG("gpu busy, RCS change rejected\n");
@@ -2197,12 +2327,14 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val)
return true;
}
-void ironlake_enable_drps(struct drm_device *dev)
+static void ironlake_enable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl = I915_READ(MEMMODECTL);
u8 fmax, fmin, fstart, vstart;
+ mtx_lock(&mchdev_lock);
+
/* Enable temp reporting */
I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
@@ -2226,12 +2358,12 @@ void ironlake_enable_drps(struct drm_device *dev)
vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
- dev_priv->fmax = fmax; /* IPS callback will increase this */
- dev_priv->fstart = fstart;
+ dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
+ dev_priv->ips.fstart = fstart;
- dev_priv->max_delay = fstart;
- dev_priv->min_delay = fmin;
- dev_priv->cur_delay = fstart;
+ dev_priv->ips.max_delay = fstart;
+ dev_priv->ips.min_delay = fmin;
+ dev_priv->ips.cur_delay = fstart;
DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
fmax, fmin, fstart);
@@ -2248,23 +2380,29 @@ void ironlake_enable_drps(struct drm_device *dev)
rgvmodectl |= MEMMODE_SWMODE_EN;
I915_WRITE(MEMMODECTL, rgvmodectl);
- if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+ if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
DRM_ERROR("stuck trying to change perf mode\n");
- pause("915dsp", 1);
+ mdelay(1);
ironlake_set_drps(dev, fstart);
- dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
+ dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
I915_READ(0x112e0);
- dev_priv->last_time1 = jiffies_to_msecs(jiffies);
- dev_priv->last_count2 = I915_READ(0x112f4);
- nanotime(&dev_priv->last_time2);
+ dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
+ dev_priv->ips.last_count2 = I915_READ(0x112f4);
+ getrawmonotonic(&dev_priv->ips.last_time2);
+
+ mtx_unlock(&mchdev_lock);
}
-void ironlake_disable_drps(struct drm_device *dev)
+static void ironlake_disable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u16 rgvswctl = I915_READ16(MEMSWCTL);
+ u16 rgvswctl;
+
+ mtx_lock(&mchdev_lock);
+
+ rgvswctl = I915_READ16(MEMSWCTL);
/* Ack interrupts, disable EFC interrupt */
I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
@@ -2274,27 +2412,76 @@ void ironlake_disable_drps(struct drm_device *dev)
I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
/* Go back to the starting frequency */
- ironlake_set_drps(dev, dev_priv->fstart);
- pause("915dsp", 1);
+ ironlake_set_drps(dev, dev_priv->ips.fstart);
+ mdelay(1);
rgvswctl |= MEMCTL_CMD_STS;
I915_WRITE(MEMSWCTL, rgvswctl);
- pause("915dsp", 1);
+ mdelay(1);
+
+ mtx_unlock(&mchdev_lock);
+}
+
+/* There's a funny hw issue where the hw returns all 0 when reading from
+ * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
+ * ourselves, instead of doing a rmw cycle (which might result in us clearing
+ * all limits and the gpu stuck at whatever frequency it is at atm).
+ */
+static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
+{
+ u32 limits;
+
+ limits = 0;
+
+ if (*val >= dev_priv->rps.max_delay)
+ *val = dev_priv->rps.max_delay;
+ limits |= dev_priv->rps.max_delay << 24;
+
+ /* Only set the down limit when we've reached the lowest level to avoid
+ * getting more interrupts, otherwise leave this clear. This prevents a
+ * race in the hw when coming out of rc6: There's a tiny window where
+ * the hw runs at the minimal clock before selecting the desired
+ * frequency, if the down threshold expires in that window we will not
+ * receive a down interrupt. */
+ if (*val <= dev_priv->rps.min_delay) {
+ *val = dev_priv->rps.min_delay;
+ limits |= dev_priv->rps.min_delay << 16;
+ }
+ return limits;
}
void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 swreq;
+ u32 limits = gen6_rps_limits(dev_priv, &val);
+
+ sx_assert(&dev_priv->rps.hw_lock, SA_XLOCKED);
+ WARN_ON(val > dev_priv->rps.max_delay);
+ WARN_ON(val < dev_priv->rps.min_delay);
+
+ if (val == dev_priv->rps.cur_delay)
+ return;
+
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(val) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+
+ /* Make sure we continue to get interrupts
+ * until we hit the minimum or maximum frequencies.
+ */
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
- swreq = (val & 0x3ff) << 25;
- I915_WRITE(GEN6_RPNSWREQ, swreq);
+ POSTING_READ(GEN6_RPNSWREQ);
+
+ dev_priv->rps.cur_delay = val;
}
-void gen6_disable_rps(struct drm_device *dev)
+static void gen6_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
I915_WRITE(GEN6_PMIER, 0);
@@ -2303,52 +2490,50 @@ void gen6_disable_rps(struct drm_device *dev)
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
- mtx_lock(&dev_priv->rps_lock);
- dev_priv->pm_iir = 0;
- mtx_unlock(&dev_priv->rps_lock);
+ mtx_lock(&dev_priv->rps.lock);
+ dev_priv->rps.pm_iir = 0;
+ mtx_unlock(&dev_priv->rps.lock);
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
}
int intel_enable_rc6(const struct drm_device *dev)
{
- /*
- * Respect the kernel parameter if it is set
- */
+ /* Respect the kernel parameter if it is set */
if (i915_enable_rc6 >= 0)
return i915_enable_rc6;
- /*
- * Disable RC6 on Ironlake
- */
+ /* Disable RC6 on Ironlake */
if (INTEL_INFO(dev)->gen == 5)
return 0;
- /* Sorry Haswell, no RC6 for you for now. */
- if (IS_HASWELL(dev))
- return 0;
+ if (IS_HASWELL(dev)) {
+ DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
+ return INTEL_RC6_ENABLE;
+ }
- /*
- * Disable rc6 on Sandybridge
- */
+ /* snb/ivb have more than one rc6 state. */
if (INTEL_INFO(dev)->gen == 6) {
DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
return INTEL_RC6_ENABLE;
}
+
DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
}
-void gen6_enable_rps(struct drm_i915_private *dev_priv)
+static void gen6_enable_rps(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
- u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
- u32 pcu_mbox, rc6_mask = 0;
+ u32 rp_state_cap;
+ u32 gt_perf_status;
+ u32 rc6vids, pcu_mbox, rc6_mask = 0;
u32 gtfifodbg;
- int cur_freq, min_freq, max_freq;
int rc6_mode;
- int i;
+ int i, ret;
+
+ sx_assert(&dev_priv->rps.hw_lock, SA_XLOCKED);
/* Here begins a magic sequence of register writes to enable
* auto-downclocking.
@@ -2357,7 +2542,6 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
* userspace...
*/
I915_WRITE(GEN6_RC_STATE, 0);
- DRM_LOCK(dev_priv->dev);
/* Clear the DBG now so we don't confuse earlier errors */
if ((gtfifodbg = I915_READ(GTFIFODBG))) {
@@ -2367,6 +2551,14 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
gen6_gt_force_wake_get(dev_priv);
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+
+ /* In units of 100MHz */
+ dev_priv->rps.max_delay = rp_state_cap & 0xff;
+ dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
+ dev_priv->rps.cur_delay = 0;
+
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -2382,23 +2574,27 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
- I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
+ I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+ /* Check if we are enabling RC6 */
rc6_mode = intel_enable_rc6(dev_priv->dev);
if (rc6_mode & INTEL_RC6_ENABLE)
rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
- if (rc6_mode & INTEL_RC6p_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+ /* We don't use those on Haswell */
+ if (!IS_HASWELL(dev)) {
+ if (rc6_mode & INTEL_RC6p_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
- if (rc6_mode & INTEL_RC6pp_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+ if (rc6_mode & INTEL_RC6pp_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+ }
DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
- (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
- (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
- (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
+ (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+ (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+ (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
I915_WRITE(GEN6_RC_CONTROL,
rc6_mask |
@@ -2414,85 +2610,74 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- 18 << 24 |
- 6 << 16);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
- I915_WRITE(GEN6_RP_UP_EI, 100000);
- I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
+ dev_priv->rps.max_delay << 24 |
+ dev_priv->rps.min_delay << 16);
+
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+ I915_WRITE(GEN6_RP_UP_EI, 66000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_MODE |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_CONT);
-
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
-
- I915_WRITE(GEN6_PCODE_DATA, 0);
- I915_WRITE(GEN6_PCODE_MAILBOX,
- GEN6_PCODE_READY |
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
-
- min_freq = (rp_state_cap & 0xff0000) >> 16;
- max_freq = rp_state_cap & 0xff;
- cur_freq = (gt_perf_status & 0xff00) >> 8;
+ (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
- /* Check for overclock support */
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
- pcu_mbox = I915_READ(GEN6_PCODE_DATA);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
- if (pcu_mbox & (1<<31)) { /* OC supported */
- max_freq = pcu_mbox & 0xff;
- DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
+ ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
+ if (!ret) {
+ pcu_mbox = 0;
+ ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
+ if (ret && pcu_mbox & (1<<31)) { /* OC supported */
+ dev_priv->rps.max_delay = pcu_mbox & 0xff;
+ DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
+ }
+ } else {
+ DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
}
- /* In units of 100MHz */
- dev_priv->max_delay = max_freq;
- dev_priv->min_delay = min_freq;
- dev_priv->cur_delay = cur_freq;
+ gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
/* requires MSI enabled */
- I915_WRITE(GEN6_PMIER,
- GEN6_PM_MBOX_EVENT |
- GEN6_PM_THERMAL_EVENT |
- GEN6_PM_RP_DOWN_TIMEOUT |
- GEN6_PM_RP_UP_THRESHOLD |
- GEN6_PM_RP_DOWN_THRESHOLD |
- GEN6_PM_RP_UP_EI_EXPIRED |
- GEN6_PM_RP_DOWN_EI_EXPIRED);
- mtx_lock(&dev_priv->rps_lock);
- if (dev_priv->pm_iir != 0)
- printf("KMS: pm_iir %x\n", dev_priv->pm_iir);
+ I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
+ mtx_lock(&dev_priv->rps.lock);
+ WARN_ON(dev_priv->rps.pm_iir != 0);
I915_WRITE(GEN6_PMIMR, 0);
- mtx_unlock(&dev_priv->rps_lock);
+ mtx_unlock(&dev_priv->rps.lock);
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
+ rc6vids = 0;
+ ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+ if (IS_GEN6(dev) && ret) {
+ DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+ } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+ DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+ GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+ rc6vids &= 0xffff00;
+ rc6vids |= GEN6_ENCODE_RC6_VID(450);
+ ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+ if (ret)
+ DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+ }
+
gen6_gt_force_wake_put(dev_priv);
- DRM_UNLOCK(dev_priv->dev);
}
-void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
+static void gen6_update_ring_freq(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
int min_freq = 15;
- int gpu_freq, ia_freq, max_ia_freq;
+ int gpu_freq;
+ unsigned int ia_freq, max_ia_freq;
int scaling_factor = 180;
- uint64_t tsc_freq;
-#if 0
+ sx_assert(&dev_priv->rps.hw_lock, SA_XLOCKED);
+
+#ifdef FREEBSD_WIP
max_ia_freq = cpufreq_quick_get_max(0);
/*
* Default to measured freq if none found, PCU will ensure we don't go
@@ -2500,25 +2685,23 @@ void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
*/
if (!max_ia_freq)
max_ia_freq = tsc_khz;
-
- /* Convert from kHz to MHz */
- max_ia_freq /= 1000;
#else
+ uint64_t tsc_freq;
tsc_freq = atomic_load_acq_64(&tsc_freq);
- max_ia_freq = tsc_freq / 1000 / 1000;
-#endif
+ max_ia_freq = tsc_freq / 1000;
+#endif /* FREEBSD_WIP */
- DRM_LOCK(dev_priv->dev);
+ /* Convert from kHz to MHz */
+ max_ia_freq /= 1000;
/*
* For each potential GPU frequency, load a ring frequency we'd like
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
*/
- for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
+ for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
gpu_freq--) {
- int diff = dev_priv->max_delay - gpu_freq;
- int d;
+ int diff = dev_priv->rps.max_delay - gpu_freq;
/*
* For GPU frequencies less than 750MHz, just use the lowest
@@ -2528,42 +2711,33 @@ void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
ia_freq = 800;
else
ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
- d = 100;
- ia_freq = (ia_freq + d / 2) / d;
-
- I915_WRITE(GEN6_PCODE_DATA,
- (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
- gpu_freq);
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
- GEN6_PCODE_READY) == 0, 10)) {
- DRM_ERROR("pcode write of freq table timed out\n");
- continue;
- }
- }
+ ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+ ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT;
- DRM_UNLOCK(dev_priv->dev);
+ sandybridge_pcode_write(dev_priv,
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+ ia_freq | gpu_freq);
+ }
}
-static void ironlake_teardown_rc6(struct drm_device *dev)
+void ironlake_teardown_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->renderctx) {
- i915_gem_object_unpin(dev_priv->renderctx);
- drm_gem_object_unreference(&dev_priv->renderctx->base);
- dev_priv->renderctx = NULL;
+ if (dev_priv->ips.renderctx) {
+ i915_gem_object_unpin(dev_priv->ips.renderctx);
+ drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
+ dev_priv->ips.renderctx = NULL;
}
- if (dev_priv->pwrctx) {
- i915_gem_object_unpin(dev_priv->pwrctx);
- drm_gem_object_unreference(&dev_priv->pwrctx->base);
- dev_priv->pwrctx = NULL;
+ if (dev_priv->ips.pwrctx) {
+ i915_gem_object_unpin(dev_priv->ips.pwrctx);
+ drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
+ dev_priv->ips.pwrctx = NULL;
}
}
-void ironlake_disable_rc6(struct drm_device *dev)
+static void ironlake_disable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2579,22 +2753,20 @@ void ironlake_disable_rc6(struct drm_device *dev)
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
POSTING_READ(RSTDBYCTL);
}
-
- ironlake_teardown_rc6(dev);
}
static int ironlake_setup_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->renderctx == NULL)
- dev_priv->renderctx = intel_alloc_context_page(dev);
- if (!dev_priv->renderctx)
+ if (dev_priv->ips.renderctx == NULL)
+ dev_priv->ips.renderctx = intel_alloc_context_page(dev);
+ if (!dev_priv->ips.renderctx)
return -ENOMEM;
- if (dev_priv->pwrctx == NULL)
- dev_priv->pwrctx = intel_alloc_context_page(dev);
- if (!dev_priv->pwrctx) {
+ if (dev_priv->ips.pwrctx == NULL)
+ dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
+ if (!dev_priv->ips.pwrctx) {
ironlake_teardown_rc6(dev);
return -ENOMEM;
}
@@ -2602,10 +2774,11 @@ static int ironlake_setup_rc6(struct drm_device *dev)
return 0;
}
-void ironlake_enable_rc6(struct drm_device *dev)
+static void ironlake_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ bool was_interruptible;
int ret;
/* rc6 disabled by default due to repeated reports of hanging during
@@ -2614,12 +2787,14 @@ void ironlake_enable_rc6(struct drm_device *dev)
if (!intel_enable_rc6(dev))
return;
- DRM_LOCK(dev);
+ DRM_LOCK_ASSERT(dev);
+
ret = ironlake_setup_rc6(dev);
- if (ret) {
- DRM_UNLOCK(dev);
+ if (ret)
return;
- }
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
/*
* GPU can automatically power down the render unit if given a page
@@ -2628,13 +2803,13 @@ void ironlake_enable_rc6(struct drm_device *dev)
ret = intel_ring_begin(ring, 6);
if (ret) {
ironlake_teardown_rc6(dev);
- DRM_UNLOCK(dev);
+ dev_priv->mm.interruptible = was_interruptible;
return;
}
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
+ intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -2649,17 +2824,16 @@ void ironlake_enable_rc6(struct drm_device *dev)
* does an implicit flush, combined with MI_FLUSH above, it should be
* safe to assume that renderctx is valid
*/
- ret = intel_wait_ring_idle(ring);
+ ret = intel_ring_idle(ring);
+ dev_priv->mm.interruptible = was_interruptible;
if (ret) {
DRM_ERROR("failed to enable ironlake power power savings\n");
ironlake_teardown_rc6(dev);
- DRM_UNLOCK(dev);
return;
}
- I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
+ I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
- DRM_UNLOCK(dev);
}
static unsigned long intel_pxfreq(u32 vidfreq)
@@ -2691,22 +2865,24 @@ static const struct cparams {
{ 0, 800, 231, 23784 },
};
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
{
u64 total_count, diff, ret;
u32 count1, count2, count3, m = 0, c = 0;
unsigned long now = jiffies_to_msecs(jiffies), diff1;
int i;
- diff1 = now - dev_priv->last_time1;
- /*
- * sysctl(8) reads the value of sysctl twice in rapid
- * succession. There is high chance that it happens in the
- * same timer tick. Use the cached value to not divide by
- * zero and give the hw a chance to gather more samples.
+ mtx_assert(&mchdev_lock, MA_OWNED);
+
+ diff1 = now - dev_priv->ips.last_time1;
+
+ /* Prevent division-by-zero if we are asking too fast.
+ * Also, we don't get interesting results if we are polling
+ * faster than once in 10ms, so just return the saved value
+ * in such cases.
*/
if (diff1 <= 10)
- return dev_priv->chipset_power;
+ return dev_priv->ips.chipset_power;
count1 = I915_READ(DMIEC);
count2 = I915_READ(DDREC);
@@ -2715,33 +2891,50 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
total_count = count1 + count2 + count3;
/* FIXME: handle per-counter overflow */
- if (total_count < dev_priv->last_count1) {
- diff = ~0UL - dev_priv->last_count1;
+ if (total_count < dev_priv->ips.last_count1) {
+ diff = ~0UL - dev_priv->ips.last_count1;
diff += total_count;
} else {
- diff = total_count - dev_priv->last_count1;
+ diff = total_count - dev_priv->ips.last_count1;
}
- for (i = 0; i < DRM_ARRAY_SIZE(cparams); i++) {
- if (cparams[i].i == dev_priv->c_m &&
- cparams[i].t == dev_priv->r_t) {
+ for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+ if (cparams[i].i == dev_priv->ips.c_m &&
+ cparams[i].t == dev_priv->ips.r_t) {
m = cparams[i].m;
c = cparams[i].c;
break;
}
}
- diff = diff / diff1;
+ diff = div_u64(diff, diff1);
ret = ((m * diff) + c);
- ret = ret / 10;
+ ret = div_u64(ret, 10);
+
+ dev_priv->ips.last_count1 = total_count;
+ dev_priv->ips.last_time1 = now;
- dev_priv->last_count1 = total_count;
- dev_priv->last_time1 = now;
+ dev_priv->ips.chipset_power = ret;
- dev_priv->chipset_power = ret;
return ret;
}
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long val;
+
+ if (dev_priv->info->gen != 5)
+ return 0;
+
+ mtx_lock(&mchdev_lock);
+
+ val = __i915_chipset_val(dev_priv);
+
+ mtx_unlock(&mchdev_lock);
+
+ return val;
+}
+
unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
{
unsigned long m, x, b;
@@ -2898,19 +3091,18 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
return v_table[pxvid].vd;
}
-void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
struct timespec now, diff1;
u64 diff;
unsigned long diffms;
u32 count;
- if (dev_priv->info->gen != 5)
- return;
+ mtx_assert(&mchdev_lock, MA_OWNED);
nanotime(&now);
diff1 = now;
- timespecsub(&diff1, &dev_priv->last_time2);
+ timespecsub(&diff1, &dev_priv->ips.last_time2);
/* Don't divide by 0 */
diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
@@ -2919,28 +3111,42 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
count = I915_READ(GFXEC);
- if (count < dev_priv->last_count2) {
- diff = ~0UL - dev_priv->last_count2;
+ if (count < dev_priv->ips.last_count2) {
+ diff = ~0UL - dev_priv->ips.last_count2;
diff += count;
} else {
- diff = count - dev_priv->last_count2;
+ diff = count - dev_priv->ips.last_count2;
}
- dev_priv->last_count2 = count;
- dev_priv->last_time2 = now;
+ dev_priv->ips.last_count2 = count;
+ dev_priv->ips.last_time2 = now;
/* More magic constants... */
diff = diff * 1181;
- diff = diff / (diffms * 10);
- dev_priv->gfx_power = diff;
+ diff = div_u64(diff, diffms * 10);
+ dev_priv->ips.gfx_power = diff;
}
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->info->gen != 5)
+ return;
+
+ mtx_lock(&mchdev_lock);
+
+ __i915_update_gfx_val(dev_priv);
+
+ mtx_unlock(&mchdev_lock);
+}
+
+static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
{
unsigned long t, corr, state1, corr2, state2;
u32 pxvid, ext_v;
- pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+ mtx_assert(&mchdev_lock, MA_OWNED);
+
+ pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
pxvid = (pxvid >> 24) & 0x7f;
ext_v = pvid_to_extvid(dev_priv, pxvid);
@@ -2960,14 +3166,30 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
corr = corr * ((150142 * state1) / 10000 - 78642);
corr /= 100000;
- corr2 = (corr * dev_priv->corr);
+ corr2 = (corr * dev_priv->ips.corr);
state2 = (corr2 * state1) / 10000;
state2 /= 100; /* convert to mW */
- i915_update_gfx_val(dev_priv);
+ __i915_update_gfx_val(dev_priv);
+
+ return dev_priv->ips.gfx_power + state2;
+}
+
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long val;
+
+ if (dev_priv->info->gen != 5)
+ return 0;
+
+ mtx_lock(&mchdev_lock);
+
+ val = __i915_gfx_val(dev_priv);
+
+ mtx_unlock(&mchdev_lock);
- return dev_priv->gfx_power + state2;
+ return val;
}
/**
@@ -2986,8 +3208,8 @@ unsigned long i915_read_mch_val(void)
goto out_unlock;
dev_priv = i915_mch_dev;
- chipset_val = i915_chipset_val(dev_priv);
- graphics_val = i915_gfx_val(dev_priv);
+ chipset_val = __i915_chipset_val(dev_priv);
+ graphics_val = __i915_gfx_val(dev_priv);
ret = chipset_val + graphics_val;
@@ -2996,6 +3218,7 @@ out_unlock:
return ret;
}
+EXPORT_SYMBOL_GPL(i915_read_mch_val);
/**
* i915_gpu_raise - raise GPU frequency limit
@@ -3014,14 +3237,15 @@ bool i915_gpu_raise(void)
}
dev_priv = i915_mch_dev;
- if (dev_priv->max_delay > dev_priv->fmax)
- dev_priv->max_delay--;
+ if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
+ dev_priv->ips.max_delay--;
out_unlock:
mtx_unlock(&mchdev_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(i915_gpu_raise);
/**
* i915_gpu_lower - lower GPU frequency limit
@@ -3041,14 +3265,15 @@ bool i915_gpu_lower(void)
}
dev_priv = i915_mch_dev;
- if (dev_priv->max_delay < dev_priv->min_delay)
- dev_priv->max_delay++;
+ if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
+ dev_priv->ips.max_delay++;
out_unlock:
mtx_unlock(&mchdev_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(i915_gpu_lower);
/**
* i915_gpu_busy - indicate GPU business to IPS
@@ -3058,20 +3283,24 @@ out_unlock:
bool i915_gpu_busy(void)
{
struct drm_i915_private *dev_priv;
+ struct intel_ring_buffer *ring;
bool ret = false;
+ int i;
mtx_lock(&mchdev_lock);
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
- ret = dev_priv->busy;
+ for_each_ring(ring, dev_priv, i)
+ ret |= !list_empty(&ring->request_list);
out_unlock:
mtx_unlock(&mchdev_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(i915_gpu_busy);
/**
* i915_gpu_turbo_disable - disable graphics turbo
@@ -3091,9 +3320,9 @@ bool i915_gpu_turbo_disable(void)
}
dev_priv = i915_mch_dev;
- dev_priv->max_delay = dev_priv->fstart;
+ dev_priv->ips.max_delay = dev_priv->ips.fstart;
- if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
+ if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
ret = false;
out_unlock:
@@ -3101,17 +3330,41 @@ out_unlock:
return ret;
}
+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
+
+#ifdef FREEBSD_WIP
+/**
+ * Tells the intel_ips driver that the i915 driver is now loaded, if
+ * IPS got loaded first.
+ *
+ * This awkward dance is so that neither module has to depend on the
+ * other in order for IPS to do the appropriate communication of
+ * GPU turbo limits to i915.
+ */
+static void
+ips_ping_for_i915_load(void)
+{
+ void (*link)(void);
+
+ link = symbol_get(ips_link_to_i915_driver);
+ if (link) {
+ link();
+ symbol_put(ips_link_to_i915_driver);
+ }
+}
+#endif /* FREEBSD_WIP */
void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
{
+ /* We only register the i915 ips part with intel-ips once everything is
+ * set up, to avoid intel-ips sneaking in and reading bogus values. */
mtx_lock(&mchdev_lock);
i915_mch_dev = dev_priv;
- dev_priv->mchdev_lock = &mchdev_lock;
mtx_unlock(&mchdev_lock);
-#if 0
+#ifdef FREEBSD_WIP
ips_ping_for_i915_load();
-#endif
+#endif /* FREEBSD_WIP */
}
void intel_gpu_ips_teardown(void)
@@ -3120,8 +3373,7 @@ void intel_gpu_ips_teardown(void)
i915_mch_dev = NULL;
mtx_unlock(&mchdev_lock);
}
-
-void intel_init_emon(struct drm_device *dev)
+static void intel_init_emon(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 lcfuse;
@@ -3189,7 +3441,52 @@ void intel_init_emon(struct drm_device *dev)
lcfuse = I915_READ(LCFUSE02);
- dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
+ dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
+}
+
+void intel_disable_gt_powersave(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_IRONLAKE_M(dev)) {
+ ironlake_disable_drps(dev);
+ ironlake_disable_rc6(dev);
+ } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
+ taskqueue_cancel_timeout(dev_priv->wq, &dev_priv->rps.delayed_resume_work, NULL);
+ sx_xlock(&dev_priv->rps.hw_lock);
+ gen6_disable_rps(dev);
+ sx_xunlock(&dev_priv->rps.hw_lock);
+ }
+}
+
+static void intel_gen6_powersave_work(void *arg, int pending)
+{
+ struct drm_i915_private *dev_priv = arg;
+ struct drm_device *dev = dev_priv->dev;
+
+ sx_xlock(&dev_priv->rps.hw_lock);
+ gen6_enable_rps(dev);
+ gen6_update_ring_freq(dev);
+ sx_xunlock(&dev_priv->rps.hw_lock);
+}
+
+void intel_enable_gt_powersave(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_IRONLAKE_M(dev)) {
+ ironlake_enable_drps(dev);
+ ironlake_enable_rc6(dev);
+ intel_init_emon(dev);
+ } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
+ /*
+ * PCU communication is slow and this doesn't need to be
+ * done at any specific time, so do this out of our fast path
+ * to make resume and init faster.
+ */
+ taskqueue_enqueue_timeout(dev_priv->wq, &dev_priv->rps.delayed_resume_work,
+ round_jiffies_up_relative(HZ));
+ }
}
static void ibx_init_clock_gating(struct drm_device *dev)
@@ -3207,14 +3504,12 @@ static void ibx_init_clock_gating(struct drm_device *dev)
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+ uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
/* Required for FBC */
- dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
- DPFCRUNIT_CLOCK_GATE_DISABLE |
- DPFDUNIT_CLOCK_GATE_DISABLE;
- /* Required for CxSR */
- dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
+ dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
+ ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
+ ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
I915_WRITE(PCH_3DCGDIS0,
MARIUNIT_CLOCK_GATE_DISABLE |
@@ -3222,8 +3517,6 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(PCH_3DCGDIS1,
VFMUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
/*
* According to the spec the following bits should be set in
* order to enable memory self-refresh
@@ -3234,9 +3527,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DISPLAY_CHICKEN2,
(I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL));
- I915_WRITE(ILK_DSPCLK_GATE,
- (I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPARB_CLK_GATE));
+ dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
@@ -3258,25 +3549,29 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE);
- I915_WRITE(ILK_DSPCLK_GATE,
- I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPFC_DIS1 |
- ILK_DPFC_DIS2 |
- ILK_CLK_FBC);
}
+ I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
I915_WRITE(_3D_CHICKEN2,
_3D_CHICKEN2_WM_READ_PIPELINED << 16 |
_3D_CHICKEN2_WM_READ_PIPELINED);
+
+ /* WaDisableRenderCachePipelinedFlush */
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
+
+ ibx_init_clock_gating(dev);
}
static void cpt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
+ uint32_t val;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
@@ -3286,23 +3581,43 @@ static void cpt_init_clock_gating(struct drm_device *dev)
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
DPLS_EDP_PPS_FIX_DIS);
- /* Without this, mode sets may fail silently on FDI */
- for_each_pipe(pipe)
- I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
+ /* The below fixes the weird display corruption, a few pixels shifted
+ * downward, on (only) LVDS of some HP laptops with IVY.
+ */
+ for_each_pipe(pipe) {
+ val = TRANS_CHICKEN2_TIMING_OVERRIDE;
+ if (dev_priv->fdi_rx_polarity_inverted)
+ val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
+ I915_WRITE(TRANS_CHICKEN2(pipe), val);
+ }
+ /* WADP0ClockGatingDisable */
+ for_each_pipe(pipe) {
+ I915_WRITE(TRANS_CHICKEN1(pipe),
+ TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
+ }
}
static void gen6_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+ uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+ I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
+ /* WaDisableHiZPlanesWhenMSAAEnabled */
+ I915_WRITE(_3D_CHICKEN,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
+
+ /* WaSetupGtModeTdRowDispatch */
+ if (IS_SNB_GT1(dev))
+ I915_WRITE(GEN6_GT_MODE,
+ _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
+
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
@@ -3324,13 +3639,17 @@ static void gen6_init_clock_gating(struct drm_device *dev)
*
* According to the spec, bit 11 (RCCUNIT) must also be set,
* but we didn't debug actual testcases to find it out.
+ *
+ * Also apply WaDisableVDSUnitClockGating and
+ * WaDisableRCPBUnitClockGating.
*/
I915_WRITE(GEN6_UCGCTL2,
+ GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
/* Bspec says we need to always set all mask bits. */
- I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) |
+ I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
/*
@@ -3348,10 +3667,14 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL);
- I915_WRITE(ILK_DSPCLK_GATE,
- I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPARB_CLK_GATE |
- ILK_DPFD_CLK_GATE);
+ I915_WRITE(ILK_DSPCLK_GATE_D,
+ I915_READ(ILK_DSPCLK_GATE_D) |
+ ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
+ ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
+
+ /* WaMbcDriverBootEnable */
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
@@ -3359,6 +3682,13 @@ static void gen6_init_clock_gating(struct drm_device *dev)
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
}
+
+ /* The default value should be 0x200 according to docs, but the two
+ * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
+ I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
+ I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
+
+ cpt_init_clock_gating(dev);
}
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@@ -3373,13 +3703,24 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
-static void ivybridge_init_clock_gating(struct drm_device *dev)
+static void lpt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+ /*
+ * TODO: this bit should only be enabled when really needed, then
+ * disabled when not needed anymore in order to save power.
+ */
+ if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+ I915_WRITE(SOUTH_DSPCLK_GATE_D,
+ I915_READ(SOUTH_DSPCLK_GATE_D) |
+ PCH_LP_PARTITION_LEVEL_DISABLE);
+}
+
+static void haswell_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
@@ -3390,12 +3731,79 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
*/
I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+ I915_WRITE(GEN7_L3CNTLREG1,
+ GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+ GEN7_WA_L3_CHICKEN_MODE);
+
+ /* This is required by WaCatErrorRejectionIssue */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+
+ gen7_setup_fixed_func_scheduler(dev_priv);
+
+ /* WaDisable4x2SubspanOptimization */
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+ /* WaMbcDriverBootEnable */
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
+ /* XXX: This is a workaround for early silicon revisions and should be
+ * removed later.
+ */
+ I915_WRITE(WM_DBG,
+ I915_READ(WM_DBG) |
+ WM_DBG_DISALLOW_MULTIPLE_LP |
+ WM_DBG_DISALLOW_SPRITE |
+ WM_DBG_DISALLOW_MAXFIFO);
+
+ lpt_init_clock_gating(dev);
+}
+
+static void ivybridge_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t snpcr;
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+ /* WaDisableEarlyCull */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+
+ /* WaDisableBackToBackFlipFix */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
+ /* WaDisablePSDDualDispatchEnable */
+ if (IS_IVB_GT1(dev))
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+ else
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
+ _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@ -3404,7 +3812,35 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_L3CNTLREG1,
GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
- GEN7_WA_L3_CHICKEN_MODE);
+ GEN7_WA_L3_CHICKEN_MODE);
+ if (IS_IVB_GT1(dev))
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ else
+ I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+
+ /* WaForceL3Serialization */
+ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+ ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+ /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ * gating disable must be set. Failure to set it results in
+ * flickering pixels due to Z write ordering failures after
+ * some amount of runtime in the Mesa "fire" demo, and Unigine
+ * Sanctuary and Tropics, and apparently anything else with
+ * alpha test or pixel discard.
+ *
+ * According to the spec, bit 11 (RCCUNIT) must also be set,
+ * but we didn't debug actual testcases to find it out.
+ *
+ * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating workaround.
+ */
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
/* This is required by WaCatErrorRejectionIssue */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
@@ -3418,49 +3854,102 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
intel_flush_display_plane(dev_priv, pipe);
}
+ /* WaMbcDriverBootEnable */
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
gen7_setup_fixed_func_scheduler(dev_priv);
/* WaDisable4x2SubspanOptimization */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ snpcr &= ~GEN6_MBC_SNPCR_MASK;
+ snpcr |= GEN6_MBC_SNPCR_MED;
+ I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+
+ cpt_init_clock_gating(dev);
}
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
- * This implements the WaDisableRCZUnitClockGating workaround.
- */
- I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+ /* WaDisableEarlyCull */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+ /* WaDisableBackToBackFlipFix */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
- I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
+ /* WaForceL3Serialization */
+ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+ ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+ /* WaDisableDopClockGating */
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+ /* WaForceL3Serialization */
+ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+ ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
/* This is required by WaCatErrorRejectionIssue */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+ /* WaMbcDriverBootEnable */
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
+
+ /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ * gating disable must be set. Failure to set it results in
+ * flickering pixels due to Z write ordering failures after
+ * some amount of runtime in the Mesa "fire" demo, and Unigine
+ * Sanctuary and Tropics, and apparently anything else with
+ * alpha test or pixel discard.
+ *
+ * According to the spec, bit 11 (RCCUNIT) must also be set,
+ * but we didn't debug actual testcases to find it out.
+ *
+ * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating workaround.
+ *
+ * Also apply WaDisableVDSUnitClockGating and
+ * WaDisableRCPBUnitClockGating.
+ */
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
+ GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
+
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
@@ -3470,6 +3959,26 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+ /*
+ * On ValleyView, the GUnit needs to signal the GT
+ * when flip and other events complete. So enable
+ * all the GUnit->GT interrupts here
+ */
+ I915_WRITE(VLV_DPFLIPSTAT, PIPEB_LINE_COMPARE_INT_EN |
+ PIPEB_HLINE_INT_EN | PIPEB_VBLANK_INT_EN |
+ SPRITED_FLIPDONE_INT_EN | SPRITEC_FLIPDONE_INT_EN |
+ PLANEB_FLIPDONE_INT_EN | PIPEA_LINE_COMPARE_INT_EN |
+ PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
+ SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
+ PLANEA_FLIPDONE_INT_EN);
+
+ /*
+ * WaDisableVLVClockGating_VBIIssue
+ * Disable clock gating on th GCFG unit to prevent a delay
+ * in the reporting of vblank events.
+ */
+ I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
static void g4x_init_clock_gating(struct drm_device *dev)
@@ -3488,6 +3997,10 @@ static void g4x_init_clock_gating(struct drm_device *dev)
if (IS_GM45(dev))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+
+ /* WaDisableRenderCachePipelinedFlush */
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
}
static void crestline_init_clock_gating(struct drm_device *dev)
@@ -3524,6 +4037,9 @@ static void gen3_init_clock_gating(struct drm_device *dev)
if (IS_PINEVIEW(dev))
I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+
+ /* IIR "flip pending" means done if this bit is set */
+ I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
}
static void i85x_init_clock_gating(struct drm_device *dev)
@@ -3545,50 +4061,12 @@ void intel_init_clock_gating(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->display.init_clock_gating(dev);
-
- if (dev_priv->display.init_pch_clock_gating)
- dev_priv->display.init_pch_clock_gating(dev);
-}
-
-static void gen6_sanitize_pm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 limits, delay, old;
-
- gen6_gt_force_wake_get(dev_priv);
-
- old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS);
- /* Make sure we continue to get interrupts
- * until we hit the minimum or maximum frequencies.
- */
- limits &= ~(0x3f << 16 | 0x3f << 24);
- delay = dev_priv->cur_delay;
- if (delay < dev_priv->max_delay)
- limits |= (dev_priv->max_delay & 0x3f) << 24;
- if (delay > dev_priv->min_delay)
- limits |= (dev_priv->min_delay & 0x3f) << 16;
-
- if (old != limits) {
- DRM_ERROR("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS expected %08x, was %08x\n",
- limits, old);
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
- }
-
- gen6_gt_force_wake_put(dev_priv);
-}
-
-void intel_sanitize_pm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->display.sanitize_pm)
- dev_priv->display.sanitize_pm(dev);
}
/* Starting with Haswell, we have different power wells for
* different parts of the GPU. This attempts to enable them all.
*/
-static void intel_init_power_wells(struct drm_device *dev)
+void intel_init_power_wells(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long power_wells[] = {
@@ -3603,19 +4081,16 @@ static void intel_init_power_wells(struct drm_device *dev)
DRM_LOCK(dev);
- for (i = 0; i < DRM_ARRAY_SIZE(power_wells); i++) {
+ for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
int well = I915_READ(power_wells[i]);
if ((well & HSW_PWR_WELL_STATE) == 0) {
I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
- if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
+ if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20))
DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
}
}
-printf("XXXKIB HACK: HSW RC OFF\n");
- I915_WRITE(GEN6_RC_STATE, 0);
- I915_WRITE(GEN6_RC_CONTROL, 0);
DRM_UNLOCK(dev);
}
@@ -3649,39 +4124,6 @@ void intel_init_pm(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
- dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
-
- /* IVB configs may use multi-threaded forcewake */
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
- u32 ecobus;
-
- /* A small trick here - if the bios hasn't configured MT forcewake,
- * and if the device is in RC6, then force_wake_mt_get will not wake
- * the device and the ECOBUS read will return zero. Which will be
- * (correctly) interpreted by the test below as MT forcewake being
- * disabled.
- */
- DRM_LOCK(dev);
- __gen6_gt_force_wake_mt_get(dev_priv);
- ecobus = I915_READ_NOTRACE(ECOBUS);
- __gen6_gt_force_wake_mt_put(dev_priv);
- DRM_UNLOCK(dev);
-
- if (ecobus & FORCEWAKE_MT_ENABLE) {
- DRM_DEBUG_KMS("Using MT version of forcewake\n");
- dev_priv->display.force_wake_get =
- __gen6_gt_force_wake_mt_get;
- dev_priv->display.force_wake_put =
- __gen6_gt_force_wake_mt_put;
- }
- }
-
- if (HAS_PCH_IBX(dev))
- dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
- else if (HAS_PCH_CPT(dev))
- dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
-
if (IS_GEN5(dev)) {
if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
dev_priv->display.update_wm = ironlake_update_wm;
@@ -3701,11 +4143,10 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
- dev_priv->display.sanitize_pm = gen6_sanitize_pm;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
if (SNB_READ_WM0_LATENCY()) {
- dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_wm = ivybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
@@ -3713,7 +4154,6 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
- dev_priv->display.sanitize_pm = gen6_sanitize_pm;
} else if (IS_HASWELL(dev)) {
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
@@ -3724,16 +4164,13 @@ void intel_init_pm(struct drm_device *dev)
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
- dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
- dev_priv->display.sanitize_pm = gen6_sanitize_pm;
+ dev_priv->display.init_clock_gating = haswell_init_clock_gating;
} else
dev_priv->display.update_wm = NULL;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.update_wm = valleyview_update_wm;
dev_priv->display.init_clock_gating =
valleyview_init_clock_gating;
- dev_priv->display.force_wake_get = vlv_force_wake_get;
- dev_priv->display.force_wake_put = vlv_force_wake_put;
} else if (IS_PINEVIEW(dev)) {
if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
dev_priv->is_ddr3,
@@ -3779,10 +4216,264 @@ void intel_init_pm(struct drm_device *dev)
else
dev_priv->display.get_fifo_size = i830_get_fifo_size;
}
+}
+
+static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+{
+ u32 gt_thread_status_mask;
- /* We attempt to init the necessary power wells early in the initialization
- * time, so the subsystems that expect power to be enabled can work.
+ if (IS_HASWELL(dev_priv->dev))
+ gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
+ else
+ gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
+
+ /* w/a for a sporadic read returning 0 by waiting for the GT
+ * thread to wake up.
*/
- intel_init_power_wells(dev);
+ if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
+ DRM_ERROR("GT thread status wait timed out\n");
+}
+
+static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE, 0);
+ POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+}
+
+static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ u32 forcewake_ack;
+
+ if (IS_HASWELL(dev_priv->dev))
+ forcewake_ack = FORCEWAKE_ACK_HSW;
+ else
+ forcewake_ack = FORCEWAKE_ACK;
+
+ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+ I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
+ POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+
+ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ POSTING_READ(ECOBUS);
+}
+
+static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+ u32 forcewake_ack;
+
+ if (IS_HASWELL(dev_priv->dev))
+ forcewake_ack = FORCEWAKE_ACK_HSW;
+ else
+ forcewake_ack = FORCEWAKE_MT_ACK;
+
+ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ POSTING_READ(ECOBUS);
+
+ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+/*
+ * Generally this is called implicitly by the register read function. However,
+ * if some sequence requires the GT to not power down then this function should
+ * be called at the beginning of the sequence followed by a call to
+ * gen6_gt_force_wake_put() at the end of the sequence.
+ */
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+
+ mtx_lock(&dev_priv->gt_lock);
+ if (dev_priv->forcewake_count++ == 0)
+ dev_priv->gt.force_wake_get(dev_priv);
+ mtx_unlock(&dev_priv->gt_lock);
+}
+
+void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
+{
+ u32 gtfifodbg;
+ gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
+ if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
+ "MMIO read or write has been dropped %x\n", gtfifodbg))
+ I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
}
+static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE, 0);
+ /* something from same cacheline, but !FORCEWAKE */
+ POSTING_READ(ECOBUS);
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ POSTING_READ(ECOBUS);
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+/*
+ * see gen6_gt_force_wake_get()
+ */
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+
+ mtx_lock(&dev_priv->gt_lock);
+ if (--dev_priv->forcewake_count == 0)
+ dev_priv->gt.force_wake_put(dev_priv);
+ mtx_unlock(&dev_priv->gt_lock);
+}
+
+int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+ int ret = 0;
+
+ if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
+ int loop = 500;
+ u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
+ udelay(10);
+ fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ }
+ if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
+ ++ret;
+ dev_priv->gt_fifo_count = fifo;
+ }
+ dev_priv->gt_fifo_count--;
+
+ return ret;
+}
+
+static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
+ /* something from same cacheline, but !FORCEWAKE_VLV */
+ POSTING_READ(FORCEWAKE_ACK_VLV);
+}
+
+static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+ if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_VLV */
+ POSTING_READ(FORCEWAKE_ACK_VLV);
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+void intel_gt_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_VALLEYVIEW(dev)) {
+ vlv_force_wake_reset(dev_priv);
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ __gen6_gt_force_wake_reset(dev_priv);
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ __gen6_gt_force_wake_mt_reset(dev_priv);
+ }
+}
+
+void intel_gt_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mtx_init(&dev_priv->gt_lock, "i915_gt_lock", NULL, MTX_DEF);
+
+ intel_gt_reset(dev);
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->gt.force_wake_get = vlv_force_wake_get;
+ dev_priv->gt.force_wake_put = vlv_force_wake_put;
+ } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
+ dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
+ } else if (IS_GEN6(dev)) {
+ dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
+ }
+ TIMEOUT_TASK_INIT(dev_priv->wq, &dev_priv->rps.delayed_resume_work, 0,
+ intel_gen6_powersave_work, dev_priv);
+}
+
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
+{
+ sx_assert(&dev_priv->rps.hw_lock, SA_XLOCKED);
+
+ if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+ DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
+ return -EAGAIN;
+ }
+
+ I915_WRITE(GEN6_PCODE_DATA, *val);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500)) {
+ DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
+ return -ETIMEDOUT;
+ }
+
+ *val = I915_READ(GEN6_PCODE_DATA);
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+
+ return 0;
+}
+
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
+{
+ sx_assert(&dev_priv->rps.hw_lock, SA_XLOCKED);
+
+ if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+ DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
+ return -EAGAIN;
+ }
+
+ I915_WRITE(GEN6_PCODE_DATA, val);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500)) {
+ DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
+ return -ETIMEDOUT;
+ }
+
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+
+ return 0;
+}
diff --git a/sys/dev/drm2/i915/intel_ringbuffer.c b/sys/dev/drm2/i915/intel_ringbuffer.c
index e0abb83..92a7927 100644
--- a/sys/dev/drm2/i915/intel_ringbuffer.c
+++ b/sys/dev/drm2/i915/intel_ringbuffer.c
@@ -31,11 +31,9 @@
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
-#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
+#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/intel_drv.h>
-#include <dev/drm2/i915/intel_ringbuffer.h>
#include <sys/sched.h>
#include <sys/sf_buf.h>
@@ -49,23 +47,9 @@ struct pipe_control {
u32 gtt_offset;
};
-void
-i915_trace_irq_get(struct intel_ring_buffer *ring, uint32_t seqno)
-{
- struct drm_i915_private *dev_priv;
-
- if (ring->trace_irq_seqno == 0) {
- dev_priv = ring->dev->dev_private;
- mtx_lock(&dev_priv->irq_lock);
- if (ring->irq_get(ring))
- ring->trace_irq_seqno = seqno;
- mtx_unlock(&dev_priv->irq_lock);
- }
-}
-
static inline int ring_space(struct intel_ring_buffer *ring)
{
- int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
+ int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
if (space < 0)
space += ring->size;
return space;
@@ -238,30 +222,121 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
- intel_emit_post_sync_nonzero_flush(ring);
+ ret = intel_emit_post_sync_nonzero_flush(ring);
+ if (ret)
+ return ret;
/* Just flush everything. Experiments have shown that reducing the
* number of bits based on the write domains has little performance
* impact.
*/
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ /*
+ * Ensure that any following seqno writes only happen
+ * when the render cache is indeed flushed.
+ */
+ flags |= PIPE_CONTROL_CS_STALL;
+ }
+ if (invalidate_domains) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ /*
+ * TLB invalidate requires a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
+ }
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
intel_ring_emit(ring, flags);
intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, 0); /* lower dword */
- intel_ring_emit(ring, 0); /* uppwer dword */
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+gen7_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains, u32 flush_domains)
+{
+ u32 flags = 0;
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+ /*
+ * Ensure that any following seqno writes only happen when the render
+ * cache is indeed flushed.
+ *
+ * Workaround: 4th PIPE_CONTROL command (except the ones with only
+ * read-cache invalidate bits set) must have the CS_STALL bit set. We
+ * don't try to be clever and just set it unconditionally.
+ */
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ /* Just flush everything. Experiments have shown that reducing the
+ * number of bits based on the write domains has little performance
+ * impact.
+ */
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ }
+ if (invalidate_domains) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ /*
+ * TLB invalidate requires a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ /* Workaround: we must issue a pipe_control with CS-stall bit
+ * set before a pipe_control command that has the state cache
+ * invalidate bit set. */
+ gen7_render_ring_cs_stall_wa(ring);
+ }
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, 0);
intel_ring_advance(ring);
return 0;
@@ -285,17 +360,20 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
static int init_ring_common(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = ring->obj;
+ int ret = 0;
u32 head;
+ if (HAS_FORCE_WAKE(dev))
+ gen6_gt_force_wake_get(dev_priv);
+
/* Stop the ring if it's running. */
I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0);
ring->write_tail(ring, 0);
- /* Initialize the ring. */
- I915_WRITE_START(ring, obj->gtt_offset);
head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* G45 ring initialization fails to reset head to zero */
@@ -321,16 +399,19 @@ static int init_ring_common(struct intel_ring_buffer *ring)
}
}
+ /* Initialize the ring. This must happen _after_ we've cleared the ring
+ * registers with the above sequence (the readback of the HEAD registers
+ * also enforces ordering), otherwise the hw might lose the new ring
+ * register values. */
+ I915_WRITE_START(ring, obj->gtt_offset);
I915_WRITE_CTL(ring,
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
/* If the head is still not zero, the ring is dead */
- if (_intel_wait_for(ring->dev,
- (I915_READ_CTL(ring) & RING_VALID) != 0 &&
- I915_READ_START(ring) == obj->gtt_offset &&
- (I915_READ_HEAD(ring) & HEAD_ADDR) == 0,
- 50, 1, "915rii")) {
+ if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
+ I915_READ_START(ring) == obj->gtt_offset &&
+ (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
@@ -338,7 +419,8 @@ static int init_ring_common(struct intel_ring_buffer *ring)
I915_READ_HEAD(ring),
I915_READ_TAIL(ring),
I915_READ_START(ring));
- return -EIO;
+ ret = -EIO;
+ goto out;
}
if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
@@ -347,9 +429,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
ring->head = I915_READ_HEAD(ring);
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring_space(ring);
+ ring->last_retired_head = -1;
}
- return 0;
+out:
+ if (HAS_FORCE_WAKE(dev))
+ gen6_gt_force_wake_put(dev_priv);
+
+ return ret;
}
static int
@@ -375,7 +462,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- ret = i915_gem_object_pin(obj, 4096, true);
+ ret = i915_gem_object_pin(obj, 4096, true, false);
if (ret)
goto err_unref;
@@ -426,13 +513,25 @@ static int init_render_ring(struct intel_ring_buffer *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = init_ring_common(ring);
- if (INTEL_INFO(dev)->gen > 3) {
+ if (INTEL_INFO(dev)->gen > 3)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
- if (IS_GEN7(dev))
- I915_WRITE(GFX_MODE_GEN7,
- _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
- _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
- }
+
+ /* We need to disable the AsyncFlip performance optimisations in order
+ * to use MI_WAIT_FOR_EVENT within the CS. It should already be
+ * programmed to '1' on all products.
+ */
+ if (INTEL_INFO(dev)->gen >= 6)
+ I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+
+ /* Required for the hardware to program scanline values for waiting */
+ if (INTEL_INFO(dev)->gen == 6)
+ I915_WRITE(GFX_MODE,
+ _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
+
+ if (IS_GEN7(dev))
+ I915_WRITE(GFX_MODE_GEN7,
+ _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+ _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
if (INTEL_INFO(dev)->gen >= 5) {
ret = init_pipe_control(ring);
@@ -460,28 +559,32 @@ static int init_render_ring(struct intel_ring_buffer *ring)
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
+ if (HAS_L3_GPU_CACHE(dev))
+ I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
+
return ret;
}
static void render_ring_cleanup(struct intel_ring_buffer *ring)
{
+ struct drm_device *dev = ring->dev;
+
if (!ring->private)
return;
+ if (HAS_BROKEN_CS_TLB(dev))
+ drm_gem_object_unreference(to_gem_object(ring->private));
+
cleanup_pipe_control(ring);
}
static void
update_mboxes(struct intel_ring_buffer *ring,
- u32 seqno,
u32 mmio_offset)
{
- intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_REGISTER |
- MI_SEMAPHORE_UPDATE);
- intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, mmio_offset);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
}
/**
@@ -494,8 +597,7 @@ update_mboxes(struct intel_ring_buffer *ring,
* This acts like a signal in the canonical semaphore.
*/
static int
-gen6_add_request(struct intel_ring_buffer *ring,
- u32 *seqno)
+gen6_add_request(struct intel_ring_buffer *ring)
{
u32 mbox1_reg;
u32 mbox2_reg;
@@ -508,13 +610,11 @@ gen6_add_request(struct intel_ring_buffer *ring,
mbox1_reg = ring->signal_mbox[0];
mbox2_reg = ring->signal_mbox[1];
- *seqno = i915_gem_next_request_seqno(ring);
-
- update_mboxes(ring, *seqno, mbox1_reg);
- update_mboxes(ring, *seqno, mbox2_reg);
+ update_mboxes(ring, mbox1_reg);
+ update_mboxes(ring, mbox2_reg);
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, *seqno);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
@@ -544,10 +644,8 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,
*/
seqno -= 1;
- if (signaller->semaphore_register[waiter->id] ==
- MI_SEMAPHORE_SYNC_INVALID)
- printf("gen6_ring_sync semaphore_register %d invalid\n",
- waiter->id);
+ WARN_ON(signaller->semaphore_register[waiter->id] ==
+ MI_SEMAPHORE_SYNC_INVALID);
ret = intel_ring_begin(waiter, 4);
if (ret)
@@ -563,13 +661,6 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,
return 0;
}
-int render_ring_sync_to(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller, u32 seqno);
-int gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller, u32 seqno);
-int gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller, u32 seqno);
-
#define PIPE_CONTROL_FLUSH(ring__, addr__) \
do { \
intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
@@ -580,10 +671,8 @@ do { \
} while (0)
static int
-pc_render_add_request(struct intel_ring_buffer *ring,
- uint32_t *result)
+pc_render_add_request(struct intel_ring_buffer *ring)
{
- u32 seqno = i915_gem_next_request_seqno(ring);
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
int ret;
@@ -604,7 +693,7 @@ pc_render_add_request(struct intel_ring_buffer *ring,
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128; /* write to separate cachelines */
@@ -617,48 +706,41 @@ pc_render_add_request(struct intel_ring_buffer *ring,
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128;
PIPE_CONTROL_FLUSH(ring, scratch_addr);
+
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
- *result = seqno;
return 0;
}
static u32
-gen6_ring_get_seqno(struct intel_ring_buffer *ring)
+gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
- struct drm_device *dev = ring->dev;
-
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
* ACTHD) before reading the status page. */
- if (/* IS_GEN6(dev) || */IS_GEN7(dev))
+ if (!lazy_coherency)
intel_ring_get_active_head(ring);
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
static u32
-ring_get_seqno(struct intel_ring_buffer *ring)
+ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
- if (ring->status_page.page_addr == NULL)
- return (-1);
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
static u32
-pc_render_get_seqno(struct intel_ring_buffer *ring)
+pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
struct pipe_control *pc = ring->private;
- if (pc != NULL)
- return pc->cpu_page[0];
- else
- return (-1);
+ return pc->cpu_page[0];
}
static bool
@@ -670,12 +752,13 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
if (!dev->irq_enabled)
return false;
- mtx_assert(&dev_priv->irq_lock, MA_OWNED);
+ mtx_lock(&dev_priv->irq_lock);
if (ring->irq_refcount++ == 0) {
dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
+ mtx_unlock(&dev_priv->irq_lock);
return true;
}
@@ -686,12 +769,13 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- mtx_assert(&dev_priv->irq_lock, MA_OWNED);
+ mtx_lock(&dev_priv->irq_lock);
if (--ring->irq_refcount == 0) {
dev_priv->gt_irq_mask |= ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
+ mtx_unlock(&dev_priv->irq_lock);
}
static bool
@@ -703,12 +787,13 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
if (!dev->irq_enabled)
return false;
- mtx_assert(&dev_priv->irq_lock, MA_OWNED);
+ mtx_lock(&dev_priv->irq_lock);
if (ring->irq_refcount++ == 0) {
dev_priv->irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
}
+ mtx_unlock(&dev_priv->irq_lock);
return true;
}
@@ -719,12 +804,13 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- mtx_assert(&dev_priv->irq_lock, MA_OWNED);
+ mtx_lock(&dev_priv->irq_lock);
if (--ring->irq_refcount == 0) {
dev_priv->irq_mask |= ring->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
}
+ mtx_unlock(&dev_priv->irq_lock);
}
static bool
@@ -736,12 +822,13 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
if (!dev->irq_enabled)
return false;
- mtx_assert(&dev_priv->irq_lock, MA_OWNED);
+ mtx_lock(&dev_priv->irq_lock);
if (ring->irq_refcount++ == 0) {
dev_priv->irq_mask &= ~ring->irq_enable_mask;
I915_WRITE16(IMR, dev_priv->irq_mask);
POSTING_READ16(IMR);
}
+ mtx_unlock(&dev_priv->irq_lock);
return true;
}
@@ -752,18 +839,19 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- mtx_assert(&dev_priv->irq_lock, MA_OWNED);
+ mtx_lock(&dev_priv->irq_lock);
if (--ring->irq_refcount == 0) {
dev_priv->irq_mask |= ring->irq_enable_mask;
I915_WRITE16(IMR, dev_priv->irq_mask);
POSTING_READ16(IMR);
}
+ mtx_unlock(&dev_priv->irq_lock);
}
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 mmio = 0;
/* The ring status page addresses are no longer next to the rest of
@@ -781,7 +869,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
mmio = BSD_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN6(dev)) {
+ } else if (IS_GEN6(ring->dev)) {
mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
} else {
mmio = RING_HWS_PGA(ring->mmio_base);
@@ -809,25 +897,20 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
}
static int
-i9xx_add_request(struct intel_ring_buffer *ring,
- u32 *result)
+i9xx_add_request(struct intel_ring_buffer *ring)
{
- u32 seqno;
int ret;
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
- seqno = i915_gem_next_request_seqno(ring);
-
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
- *result = seqno;
return 0;
}
@@ -840,15 +923,23 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
if (!dev->irq_enabled)
return false;
+ /* It looks like we need to prevent the gt from suspending while waiting
+ * for an notifiy irq, otherwise irqs seem to get lost on at least the
+ * blt/bsd rings on ivb. */
gen6_gt_force_wake_get(dev_priv);
- mtx_assert(&dev_priv->irq_lock, MA_OWNED);
+ mtx_lock(&dev_priv->irq_lock);
if (ring->irq_refcount++ == 0) {
- I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+ if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
+ I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
+ GEN6_RENDER_L3_PARITY_ERROR));
+ else
+ I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
+ mtx_unlock(&dev_priv->irq_lock);
return true;
}
@@ -859,20 +950,25 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- mtx_assert(&dev_priv->irq_lock, MA_OWNED);
+ mtx_lock(&dev_priv->irq_lock);
if (--ring->irq_refcount == 0) {
- I915_WRITE_IMR(ring, ~0);
+ if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
+ I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
+ else
+ I915_WRITE_IMR(ring, ~0);
dev_priv->gt_irq_mask |= ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
+ mtx_unlock(&dev_priv->irq_lock);
gen6_gt_force_wake_put(dev_priv);
}
static int
i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 length)
+ u32 offset, u32 length,
+ unsigned flags)
{
int ret;
@@ -883,35 +979,71 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
MI_BATCH_GTT |
- MI_BATCH_NON_SECURE_I965);
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
return 0;
}
+/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
+#define I830_BATCH_LIMIT (256*1024)
static int
i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len)
+ u32 offset, u32 len,
+ unsigned flags)
{
int ret;
- ret = intel_ring_begin(ring, 4);
- if (ret)
- return ret;
+ if (flags & I915_DISPATCH_PINNED) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER);
- intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
- intel_ring_emit(ring, offset + len - 8);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ } else {
+ struct drm_i915_gem_object *obj = ring->private;
+ u32 cs_offset = obj->gtt_offset;
+
+ if (len > I830_BATCH_LIMIT)
+ return -ENOSPC;
+
+ ret = intel_ring_begin(ring, 9+3);
+ if (ret)
+ return ret;
+ /* Blit the batch (which has now all relocs applied) to the stable batch
+ * scratch bo area (so that the CS never stumbles over its tlb
+ * invalidation bug) ... */
+ intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
+ XY_SRC_COPY_BLT_WRITE_ALPHA |
+ XY_SRC_COPY_BLT_WRITE_RGB);
+ intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
+ intel_ring_emit(ring, cs_offset);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 4096);
+ intel_ring_emit(ring, offset);
+ intel_ring_emit(ring, MI_FLUSH);
+
+ /* ... and execute it. */
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, cs_offset + len - 8);
+ intel_ring_advance(ring);
+ }
return 0;
}
static int
i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len)
+ u32 offset, u32 len,
+ unsigned flags)
{
int ret;
@@ -920,7 +1052,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
intel_ring_advance(ring);
return 0;
@@ -957,7 +1089,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- ret = i915_gem_object_pin(obj, 4096, true);
+ ret = i915_gem_object_pin(obj, 4096, true, false);
if (ret != 0) {
goto err_unref;
}
@@ -965,6 +1097,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
ring->status_page.gfx_addr = obj->gtt_offset;
ring->status_page.page_addr = (void *)kva_alloc(PAGE_SIZE);
if (ring->status_page.page_addr == NULL) {
+ ret = -ENOMEM;
goto err_unpin;
}
pmap_qenter((vm_offset_t)ring->status_page.page_addr, &obj->pages[0],
@@ -988,22 +1121,55 @@ err:
return ret;
}
+static int init_phys_hws_pga(struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ u32 addr;
+
+ if (!dev_priv->status_page_dmah) {
+ dev_priv->status_page_dmah =
+ drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE, BUS_SPACE_MAXADDR);
+ if (!dev_priv->status_page_dmah)
+ return -ENOMEM;
+ }
+
+ addr = dev_priv->status_page_dmah->busaddr;
+ if (INTEL_INFO(ring->dev)->gen >= 4)
+ addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+ I915_WRITE(HWS_PGA, addr);
+
+ ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+ return 0;
+}
+
static int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
- INIT_LIST_HEAD(&ring->gpu_write_list);
ring->size = 32 * PAGE_SIZE;
+ memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
+
+#ifdef __linux__
+ init_waitqueue_head(&ring->irq_queue);
+#endif
if (I915_NEED_GFX_HWS(dev)) {
ret = init_status_page(ring);
if (ret)
return ret;
+ } else {
+ BUG_ON(ring->id != RCS);
+ ret = init_phys_hws_pga(ring);
+ if (ret)
+ return ret;
}
obj = i915_gem_alloc_object(dev, ring->size);
@@ -1015,13 +1181,18 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ring->obj = obj;
- ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
+ ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
if (ret)
goto err_unref;
- ring->virtual_start = pmap_mapdev_attr(
- dev->agp->base + obj->gtt_offset, ring->size,
- VM_MEMATTR_WRITE_COMBINING);
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto err_unpin;
+
+ ring->virtual_start =
+ pmap_mapdev_attr(
+ dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset, ring->size,
+ VM_MEMATTR_WRITE_COMBINING);
if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
ret = -EINVAL;
@@ -1064,7 +1235,11 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
/* Disable the ring buffer. The ring must be idle at this point */
dev_priv = ring->dev->dev_private;
- ret = intel_wait_ring_idle(ring);
+ ret = intel_ring_idle(ring);
+ if (ret)
+ DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
+ ring->name, ret);
+
I915_WRITE_CTL(ring, 0);
pmap_unmapdev((vm_offset_t)ring->virtual_start, ring->size);
@@ -1081,20 +1256,9 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- bool was_interruptible;
int ret;
- /* XXX As we have not yet audited all the paths to check that
- * they are ready for ERESTARTSYS from intel_ring_begin, do not
- * allow us to be interruptible by a signal.
- */
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
-
- ret = i915_wait_request(ring, seqno);
-
- dev_priv->mm.interruptible = was_interruptible;
+ ret = i915_wait_seqno(ring, seqno);
if (!ret)
i915_gem_retire_requests_ring(ring);
@@ -1123,7 +1287,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
if (request->tail == -1)
continue;
- space = request->tail - (ring->tail + 8);
+ space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
if (space < 0)
space += ring->size;
if (space >= n) {
@@ -1146,23 +1310,23 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
if (ret)
return ret;
- if (ring->last_retired_head == -1)
+ if (WARN_ON(ring->last_retired_head == -1))
return -ENOSPC;
ring->head = ring->last_retired_head;
ring->last_retired_head = -1;
ring->space = ring_space(ring);
- if (ring->space < n)
+ if (WARN_ON(ring->space < n))
return -ENOSPC;
return 0;
}
-int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
+static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int end;
+ unsigned long end;
int ret;
ret = intel_ring_wait_request(ring, n);
@@ -1175,7 +1339,7 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
* to running on almost untested codepaths). But on resume
* timers don't work yet, so prevent a complete hang in that
* case by choosing an insanely large timeout. */
- end = ticks + hz * 60;
+ end = jiffies + 60 * HZ;
do {
ring->head = I915_READ_HEAD(ring);
@@ -1191,23 +1355,25 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
}
- pause("915rng", 1);
- if (atomic_load_acq_32(&dev_priv->mm.wedged) != 0) {
+ DRM_MSLEEP(1);
+
+ ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
+ if (ret) {
CTR1(KTR_DRM, "ring_wait_end %s wedged", ring->name);
- return -EAGAIN;
+ return ret;
}
- } while (!time_after(ticks, end));
+ } while (!time_after(jiffies, end));
CTR1(KTR_DRM, "ring_wait_end %s busy", ring->name);
return -EBUSY;
}
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
{
- uint32_t *virt;
+ uint32_t __iomem *virt;
int rem = ring->size - ring->tail;
if (ring->space < rem) {
- int ret = intel_wait_ring_buffer(ring, rem);
+ int ret = ring_wait_for_space(ring, rem);
if (ret)
return ret;
}
@@ -1215,7 +1381,7 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
virt = (uint32_t *)((char *)ring->virtual_start + ring->tail);
rem /= 4;
while (rem--)
- *virt++ = MI_NOOP;
+ iowrite32(MI_NOOP, virt++);
ring->tail = 0;
ring->space = ring_space(ring);
@@ -1223,25 +1389,63 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
return 0;
}
+int intel_ring_idle(struct intel_ring_buffer *ring)
+{
+ u32 seqno;
+ int ret;
+
+ /* We need to add any requests required to flush the objects and ring */
+ if (ring->outstanding_lazy_request) {
+ ret = i915_add_request(ring, NULL, NULL);
+ if (ret)
+ return ret;
+ }
+
+ /* Wait upon the last request to be completed */
+ if (list_empty(&ring->request_list))
+ return 0;
+
+ seqno = list_entry(ring->request_list.prev,
+ struct drm_i915_gem_request,
+ list)->seqno;
+
+ return i915_wait_seqno(ring, seqno);
+}
+
+static int
+intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
+{
+ if (ring->outstanding_lazy_request)
+ return 0;
+
+ return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
+}
+
int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
int n = 4*num_dwords;
int ret;
- if (atomic_load_acq_int(&dev_priv->mm.wedged))
- return -EIO;
+ ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
+ if (ret)
+ return ret;
+
+ /* Preallocate the olr before touching the ring */
+ ret = intel_ring_alloc_seqno(ring);
+ if (ret)
+ return ret;
- if (ring->tail + n > ring->effective_size) {
+ if (unlikely(ring->tail + n > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
- if (ret != 0)
+ if (unlikely(ret))
return ret;
}
- if (ring->space < n) {
- ret = intel_wait_ring_buffer(ring, n);
- if (ret != 0)
+ if (unlikely(ring->space < n)) {
+ ret = ring_wait_for_space(ring, n);
+ if (unlikely(ret))
return ret;
}
@@ -1265,22 +1469,32 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
- /* Every tail move must follow the sequence below */
+ /* Every tail move must follow the sequence below */
+
+ /* Disable notification that the ring is IDLE. The GT
+ * will then assume that it is busy and bring it out of rc6.
+ */
I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
- I915_WRITE(GEN6_BSD_RNCID, 0x0);
+ _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
- if (_intel_wait_for(ring->dev,
- (I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
- GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, 50,
- true, "915g6i") != 0)
- DRM_ERROR("timed out waiting for IDLE Indicator\n");
+ /* Clear the context id. Here be magic! */
+ I915_WRITE64(GEN6_BSD_RNCID, 0x0);
+ /* Wait for the ring not to be idle, i.e. for it to wake up. */
+ if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
+ GEN6_BSD_SLEEP_INDICATOR) == 0,
+ 50))
+ DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
+
+ /* Now that the ring is fully powered up, update the tail */
I915_WRITE_TAIL(ring, value);
+ POSTING_READ(RING_TAIL(ring->mmio_base));
+
+ /* Let the ring send IDLE messages to the GT again,
+ * and so let it sleep to conserve power when idle.
+ */
I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
+ _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
}
static int gen6_ring_flush(struct intel_ring_buffer *ring,
@@ -1294,10 +1508,17 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
return ret;
cmd = MI_FLUSH_DW;
+ /*
+ * Bspec vol 1c.5 - video engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+ * operation is complete. This bit is only valid when the
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
if (invalidate & I915_GEM_GPU_DOMAINS)
- cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
+ cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
+ MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@@ -1305,8 +1526,30 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
}
static int
+hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len,
+ unsigned flags)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
+ /* bit0-7 is the length on GEN6+ */
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len)
+ u32 offset, u32 len,
+ unsigned flags)
{
int ret;
@@ -1314,7 +1557,9 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
if (ret)
return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START |
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
@@ -1335,10 +1580,17 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
return ret;
cmd = MI_FLUSH_DW;
+ /*
+ * Bspec vol 1c.3 - blitter engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+ * operation is complete. This bit is only valid when the
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
if (invalidate & I915_GEM_DOMAIN_RENDER)
- cmd |= MI_INVALIDATE_TLB;
+ cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
+ MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@@ -1348,7 +1600,7 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
int intel_init_render_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
ring->name = "render ring";
ring->id = RCS;
@@ -1356,7 +1608,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
- ring->flush = gen6_render_ring_flush;
+ ring->flush = gen7_render_ring_flush;
+ if (INTEL_INFO(dev)->gen == 6)
+ ring->flush = gen6_render_ring_flush;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT;
@@ -1391,7 +1645,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->irq_enable_mask = I915_USER_INTERRUPT;
}
ring->write_tail = ring_write_tail;
- if (INTEL_INFO(dev)->gen >= 6)
+ if (IS_HASWELL(dev))
+ ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+ else if (INTEL_INFO(dev)->gen >= 6)
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 4)
ring->dispatch_execbuffer = i965_dispatch_execbuffer;
@@ -1402,10 +1658,25 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->init = init_render_ring;
ring->cleanup = render_ring_cleanup;
+ /* Workaround batchbuffer to combat CS tlb bug. */
+ if (HAS_BROKEN_CS_TLB(dev)) {
+ struct drm_i915_gem_object *obj;
+ int ret;
- if (!I915_NEED_GFX_HWS(dev)) {
- ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
- memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate batch bo\n");
+ return -ENOMEM;
+ }
+
+ ret = i915_gem_object_pin(obj, 0, true, false);
+ if (ret != 0) {
+ drm_gem_object_unreference(&obj->base);
+ DRM_ERROR("Failed to ping batch bo\n");
+ return ret;
+ }
+
+ ring->private = obj;
}
return intel_init_ring_buffer(dev, ring);
@@ -1414,7 +1685,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ int ret;
ring->name = "render ring";
ring->id = RCS;
@@ -1455,11 +1727,10 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
- INIT_LIST_HEAD(&ring->gpu_write_list);
ring->size = size;
ring->effective_size = ring->size;
- if (IS_I830(ring->dev))
+ if (IS_I830(ring->dev) || IS_845G(ring->dev))
ring->effective_size -= 128;
ring->virtual_start = pmap_mapdev_attr(start, size,
@@ -1470,13 +1741,19 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
return -ENOMEM;
}
+ if (!I915_NEED_GFX_HWS(dev)) {
+ ret = init_phys_hws_pga(ring);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->rings[VCS];
+ struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
ring->name = "bsd ring";
ring->id = VCS;
@@ -1518,14 +1795,13 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
}
ring->init = init_ring_common;
-
return intel_init_ring_buffer(dev, ring);
}
int intel_init_blt_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->rings[BCS];
+ struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
ring->name = "blitter ring";
ring->id = BCS;
@@ -1549,3 +1825,37 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
return intel_init_ring_buffer(dev, ring);
}
+
+int
+intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
+{
+ int ret;
+
+ if (!ring->gpu_caches_dirty)
+ return 0;
+
+ ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+
+ ring->gpu_caches_dirty = false;
+ return 0;
+}
+
+int
+intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
+{
+ uint32_t flush_domains;
+ int ret;
+
+ flush_domains = 0;
+ if (ring->gpu_caches_dirty)
+ flush_domains = I915_GEM_GPU_DOMAINS;
+
+ ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+ if (ret)
+ return ret;
+
+ ring->gpu_caches_dirty = false;
+ return 0;
+}
diff --git a/sys/dev/drm2/i915/intel_ringbuffer.h b/sys/dev/drm2/i915/intel_ringbuffer.h
index e5e52ae..fb6707f 100644
--- a/sys/dev/drm2/i915/intel_ringbuffer.h
+++ b/sys/dev/drm2/i915/intel_ringbuffer.h
@@ -1,10 +1,9 @@
-/*
- * $FreeBSD$
- */
-
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
/*
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
* Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
@@ -50,7 +49,7 @@ struct intel_ring_buffer {
} id;
#define I915_NUM_RINGS 3
u32 mmio_base;
- void *virtual_start;
+ void __iomem *virtual_start;
struct drm_device *dev;
struct drm_i915_gem_object *obj;
@@ -75,21 +74,28 @@ struct intel_ring_buffer {
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 trace_irq_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
- bool (*irq_get)(struct intel_ring_buffer *ring);
+ bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
void (*irq_put)(struct intel_ring_buffer *ring);
int (*init)(struct intel_ring_buffer *ring);
void (*write_tail)(struct intel_ring_buffer *ring,
u32 value);
- int (*flush)(struct intel_ring_buffer *ring,
+ int __must_check (*flush)(struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains);
- int (*add_request)(struct intel_ring_buffer *ring,
- uint32_t *seqno);
- uint32_t (*get_seqno)(struct intel_ring_buffer *ring);
+ int (*add_request)(struct intel_ring_buffer *ring);
+ /* Some chipsets are not quite as coherent as advertised and need
+ * an expensive kick to force a true read of the up-to-date seqno.
+ * However, the up-to-date seqno is not always required and the last
+ * seen value is good enough. Note that the seqno will always be
+ * monotonic, even if not coherent.
+ */
+ u32 (*get_seqno)(struct intel_ring_buffer *ring,
+ bool lazy_coherency);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
- u32 offset, u32 length);
+ u32 offset, u32 length,
+ unsigned flags);
#define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_ring_buffer *ring);
@@ -118,18 +124,12 @@ struct intel_ring_buffer {
struct list_head request_list;
/**
- * List of objects currently pending a GPU write flush.
- *
- * All elements on this list will belong to either the
- * active_list or flushing_list, last_rendering_seqno can
- * be used to differentiate between the two elements.
- */
- struct list_head gpu_write_list;
-
- /**
* Do we have some not yet emitted requests outstanding?
*/
u32 outstanding_lazy_request;
+ bool gpu_caches_dirty;
+
+ wait_queue_head_t irq_queue;
/**
* Do an explicit TLB flush before MI_SET_CONTEXT
@@ -138,8 +138,6 @@ struct intel_ring_buffer {
struct i915_hw_context *default_context;
struct drm_i915_gem_object *last_context_obj;
- drm_local_map_t map;
-
void *private;
};
@@ -179,32 +177,43 @@ intel_read_status_page(struct intel_ring_buffer *ring,
int reg)
{
/* Ensure that the compiler doesn't optimize away the load. */
- __compiler_membar();
+ barrier();
return atomic_load_acq_32(ring->status_page.page_addr + reg);
}
-void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
-
-int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
-static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
-{
-
- return (intel_wait_ring_buffer(ring, ring->size - 8));
-}
+/**
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 0x04: ring 0 head pointer
+ * 0x05: ring 1 head pointer (915-class)
+ * 0x06: ring 2 head pointer (915-class)
+ * 0x10-0x1b: Context status DWords (GM45)
+ * 0x1f: Last written status offset. (GM45)
+ *
+ * The area from dword 0x20 to 0x3ff is available for driver usage.
+ */
+#define I915_GEM_HWS_INDEX 0x20
+#define I915_GEM_HWS_SCRATCH_INDEX 0x30
+#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
-int intel_ring_begin(struct intel_ring_buffer *ring, int n);
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
+int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
static inline void intel_ring_emit(struct intel_ring_buffer *ring,
u32 data)
{
- *(volatile uint32_t *)((char *)ring->virtual_start +
- ring->tail) = data;
+ iowrite32(data, ring->virtual_start + ring->tail);
ring->tail += 4;
}
-
void intel_ring_advance(struct intel_ring_buffer *ring);
+int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
-uint32_t intel_ring_get_seqno(struct intel_ring_buffer *ring);
+int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
+int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
@@ -218,7 +227,19 @@ static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
return ring->tail;
}
-void i915_trace_irq_get(struct intel_ring_buffer *ring, uint32_t seqno);
+static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
+{
+ BUG_ON(ring->outstanding_lazy_request == 0);
+ return ring->outstanding_lazy_request;
+}
+
+#ifdef __linux__
+static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
+{
+ if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
+ ring->trace_irq_seqno = seqno;
+}
+#endif
/* DRI warts */
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
diff --git a/sys/dev/drm2/i915/intel_sdvo.c b/sys/dev/drm2/i915/intel_sdvo.c
index 91056d7..78b60d5 100644
--- a/sys/dev/drm2/i915/intel_sdvo.c
+++ b/sys/dev/drm2/i915/intel_sdvo.c
@@ -25,18 +25,16 @@
* Authors:
* Eric Anholt <eric@anholt.net>
*/
-
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
#include <dev/drm2/drm_crtc.h>
#include <dev/drm2/drm_edid.h>
+#include <dev/drm2/i915/intel_drv.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
#include <dev/drm2/i915/intel_sdvo_regs.h>
-#include <dev/drm2/i915/intel_drv.h>
#include <dev/iicbus/iic.h>
#include <dev/iicbus/iiconf.h>
#include "iicbus_if.h"
@@ -100,7 +98,7 @@ struct intel_sdvo {
/*
* Hotplug activation bits for this device
*/
- uint8_t hotplug_active[2];
+ uint16_t hotplug_active;
/**
* This is used to select the color range of RBG outputs in HDMI mode.
@@ -144,8 +142,10 @@ struct intel_sdvo {
/* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
- /* Input timings for adjusted_mode */
- struct intel_sdvo_dtd input_dtd;
+ /*
+ * the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd
+ */
+ uint8_t dtd_sdvo_flags;
};
struct intel_sdvo_connector {
@@ -276,14 +276,14 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
},
{
.slave = intel_sdvo->slave_addr << 1,
- .flags = IIC_M_RD,
+ .flags = I2C_M_RD,
.len = 1,
.buf = ch,
}
};
int ret;
- if ((ret = iicbus_transfer(intel_sdvo->i2c, msgs, 2)) == 0)
+ if ((ret = -iicbus_transfer(intel_sdvo->i2c, msgs, 2)) == 0)
return true;
DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
@@ -416,22 +416,21 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
{
int i;
- if ((drm_debug & DRM_DEBUGBITS_KMS) == 0)
- return;
- DRM_DEBUG_KMS("%s: W: %02X ", SDVO_NAME(intel_sdvo), cmd);
+ DRM_DEBUG_KMS("%s: W: %02X ",
+ SDVO_NAME(intel_sdvo), cmd);
for (i = 0; i < args_len; i++)
- printf("%02X ", ((const u8 *)args)[i]);
+ DRM_LOG_KMS("%02X ", ((const u8 *)args)[i]);
for (; i < 8; i++)
- printf(" ");
+ DRM_LOG_KMS(" ");
for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
if (cmd == sdvo_cmd_names[i].cmd) {
- printf("(%s)", sdvo_cmd_names[i].name);
+ DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
break;
}
}
if (i == ARRAY_SIZE(sdvo_cmd_names))
- printf("(%02X)", cmd);
- printf("\n");
+ DRM_LOG_KMS("(%02X)", cmd);
+ DRM_LOG_KMS("\n");
}
static const char *cmd_status_names[] = {
@@ -444,13 +443,23 @@ static const char *cmd_status_names[] = {
"Scaling not supported"
};
-static bool
-intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args,
- int args_len)
+static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
+ const void *args, int args_len)
{
- u8 buf[args_len*2 + 2], status;
- struct iic_msg msgs[args_len + 3];
- int i, ret;
+ u8 *buf, status;
+ struct iic_msg *msgs;
+ int i, ret = true;
+
+ /* Would be simpler to allocate both in one go ? */
+ buf = (u8 *)malloc(args_len * 2 + 2, DRM_MEM_KMS, M_NOWAIT | M_ZERO);
+ if (!buf)
+ return false;
+
+ msgs = malloc(args_len + 3 * sizeof(*msgs), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
+ if (!msgs) {
+ free(buf, DRM_MEM_KMS);
+ return false;
+ }
intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
@@ -477,31 +486,27 @@ intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args,
msgs[i+1].buf = &status;
msgs[i+2].slave = intel_sdvo->slave_addr << 1;
- msgs[i+2].flags = IIC_M_RD;
+ msgs[i+2].flags = I2C_M_RD;
msgs[i+2].len = 1;
msgs[i+2].buf = &status;
- ret = iicbus_transfer(intel_sdvo->i2c, msgs, i+3);
- if (ret != 0) {
+ ret = -iicbus_transfer(intel_sdvo->i2c, msgs, i+3);
+ if (ret < 0) {
DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
- return (false);
- }
-#if 0
- if (ret != i+3) {
- /* failure in I2C transfer */
- DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
- return false;
+ ret = false;
+ goto out;
}
-#endif
- return true;
+out:
+ free(msgs, DRM_MEM_KMS);
+ free(buf, DRM_MEM_KMS);
+ return ret;
}
-static bool
-intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, void *response,
- int response_len)
+static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
+ void *response, int response_len)
{
- u8 retry = 5;
+ u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status;
int i;
@@ -514,23 +519,37 @@ intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, void *response,
* command to be complete.
*
* Check 5 times in case the hardware failed to read the docs.
+ *
+ * Also beware that the first response by many devices is to
+ * reply PENDING and stall for time. TVs are notorious for
+ * requiring longer than specified to complete their replies.
+ * Originally (in the DDX long ago), the delay was only ever 15ms
+ * with an additional delay of 30ms applied for TVs added later after
+ * many experiments. To accommodate both sets of delays, we do a
+ * sequence of slow checks if the device is falling behind and fails
+ * to reply within 5*15µs.
*/
- if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS, &status))
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
goto log_fail;
- while (status == SDVO_CMD_STATUS_PENDING && retry--) {
- DELAY(15);
+ while (status == SDVO_CMD_STATUS_PENDING && --retry) {
+ if (retry < 10)
+ DRM_MSLEEP(15);
+ else
+ udelay(15);
+
if (!intel_sdvo_read_byte(intel_sdvo,
- SDVO_I2C_CMD_STATUS, &status))
+ SDVO_I2C_CMD_STATUS,
+ &status))
goto log_fail;
}
- if ((drm_debug & DRM_DEBUGBITS_KMS) != 0) {
- if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
- printf("(%s)", cmd_status_names[status]);
- else
- printf("(??? %d)", status);
- }
+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+ DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+ else
+ DRM_LOG_KMS("(??? %d)", status);
if (status != SDVO_CMD_STATUS_SUCCESS)
goto log_fail;
@@ -541,17 +560,14 @@ intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, void *response,
SDVO_I2C_RETURN_0 + i,
&((u8 *)response)[i]))
goto log_fail;
- if ((drm_debug & DRM_DEBUGBITS_KMS) != 0)
- printf(" %02X", ((u8 *)response)[i]);
+ DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
}
- if ((drm_debug & DRM_DEBUGBITS_KMS) != 0)
- printf("\n");
- return (true);
+ DRM_LOG_KMS("\n");
+ return true;
log_fail:
- if ((drm_debug & DRM_DEBUGBITS_KMS) != 0)
- printf("... failed\n");
- return (false);
+ DRM_LOG_KMS("... failed\n");
+ return false;
}
static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
@@ -608,7 +624,7 @@ static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *i
{
struct intel_sdvo_get_trained_inputs_response response;
- CTASSERT(sizeof(response) == 1);
+ BUILD_BUG_ON(sizeof(response) != 1);
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
&response, sizeof(response)))
return false;
@@ -626,6 +642,14 @@ static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
&outputs, sizeof(outputs));
}
+static bool intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo,
+ u16 *outputs)
+{
+ return intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_ACTIVE_OUTPUTS,
+ outputs, sizeof(*outputs));
+}
+
static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
int mode)
{
@@ -656,7 +680,7 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo
{
struct intel_sdvo_pixel_clock_range clocks;
- CTASSERT(sizeof(clocks) == 4);
+ BUILD_BUG_ON(sizeof(clocks) != 4);
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
&clocks, sizeof(clocks)))
@@ -724,8 +748,8 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_dtd *dtd)
{
- CTASSERT(sizeof(dtd->part1) == 8);
- CTASSERT(sizeof(dtd->part2) == 8);
+ BUILD_BUG_ON(sizeof(dtd->part1) != 8);
+ BUILD_BUG_ON(sizeof(dtd->part2) != 8);
return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
&dtd->part1, sizeof(dtd->part1)) &&
intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
@@ -748,7 +772,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
width = mode->hdisplay;
height = mode->vdisplay;
- /* do some mode translations */
+ /* do some mode translations */
h_blank_len = mode->htotal - mode->hdisplay;
h_sync_len = mode->hsync_end - mode->hsync_start;
@@ -781,10 +805,12 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
((v_sync_len & 0x30) >> 4);
dtd->part2.dtd_flags = 0x18;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- dtd->part2.dtd_flags |= 0x2;
+ dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- dtd->part2.dtd_flags |= 0x4;
+ dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
dtd->part2.sdvo_flags = 0;
dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
@@ -818,9 +844,11 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
mode->clock = dtd->part1.clock * 10;
mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
- if (dtd->part2.dtd_flags & 0x2)
+ if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
mode->flags |= DRM_MODE_FLAG_PHSYNC;
- if (dtd->part2.dtd_flags & 0x4)
+ if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
@@ -828,7 +856,7 @@ static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
{
struct intel_sdvo_encode encode;
- CTASSERT(sizeof(encode) == 2);
+ BUILD_BUG_ON(sizeof(encode) != 2);
return intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_SUPP_ENCODE,
&encode, sizeof(encode));
@@ -876,6 +904,45 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
}
#endif
+static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
+ unsigned if_index, uint8_t tx_rate,
+ uint8_t *data, unsigned length)
+{
+ uint8_t set_buf_index[2] = { if_index, 0 };
+ uint8_t hbuf_size, tmp[8];
+ int i;
+
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return false;
+
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
+ &hbuf_size, 1))
+ return false;
+
+ /* Buffer size is 0 based, hooray! */
+ hbuf_size++;
+
+ DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
+ if_index, length, hbuf_size);
+
+ for (i = 0; i < hbuf_size; i += 8) {
+ memset(tmp, 0, 8);
+ if (i < length)
+ memcpy(tmp, data + i, min_t(unsigned, 8, length - i));
+
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_DATA,
+ tmp, 8))
+ return false;
+ }
+
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_TXRATE,
+ &tx_rate, 1);
+}
+
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
{
struct dip_infoframe avi_if = {
@@ -883,11 +950,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
.ver = DIP_VERSION_AVI,
.len = DIP_LEN_AVI,
};
- uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
- uint8_t set_buf_index[2] = { 1, 0 };
uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
- uint64_t *data = (uint64_t *)sdvo_data;
- unsigned i;
intel_dip_infoframe_csum(&avi_if);
@@ -897,22 +960,9 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
sdvo_data[3] = avi_if.checksum;
memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
- if (!intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_HBUF_INDEX,
- set_buf_index, 2))
- return false;
-
- for (i = 0; i < sizeof(sdvo_data); i += 8) {
- if (!intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_HBUF_DATA,
- data, 8))
- return false;
- data++;
- }
-
- return intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_HBUF_TXRATE,
- &tx_rate, 1);
+ return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
+ SDVO_HBUF_TX_VSYNC,
+ sdvo_data, sizeof(sdvo_data));
}
static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
@@ -924,7 +974,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
memset(&format, 0, sizeof(format));
memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
- CTASSERT(sizeof(format) == 6);
+ BUILD_BUG_ON(sizeof(format) != 6);
return intel_sdvo_set_value(intel_sdvo,
SDVO_CMD_SET_TV_FORMAT,
&format, sizeof(format));
@@ -947,11 +997,15 @@ intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
return true;
}
+/* Asks the sdvo controller for the preferred input mode given the output mode.
+ * Unfortunately we have to set up the full output mode to do that. */
static bool
-intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
+ struct intel_sdvo_dtd input_dtd;
+
/* Reset the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
return false;
@@ -963,10 +1017,11 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
return false;
if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
- &intel_sdvo->input_dtd))
+ &input_dtd))
return false;
- intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
+ intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+ intel_sdvo->dtd_sdvo_flags = input_dtd.part2.sdvo_flags;
return true;
}
@@ -987,17 +1042,17 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
return false;
- (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
- mode,
- adjusted_mode);
+ (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
+ mode,
+ adjusted_mode);
} else if (intel_sdvo->is_lvds) {
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
intel_sdvo->sdvo_lvds_fixed_mode))
return false;
- (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
- mode,
- adjusted_mode);
+ (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
+ mode,
+ adjusted_mode);
}
/* Make the CRTC code factor in the SDVO pixel multiplier. The
@@ -1051,7 +1106,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
intel_sdvo->sdvo_lvds_fixed_mode);
else
intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
- (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd);
+ if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
+ DRM_INFO("Setting output timings on %s failed\n",
+ SDVO_NAME(intel_sdvo));
/* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
@@ -1073,7 +1130,11 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
* adjusted_mode.
*/
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
- (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
+ if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
+ input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
+ if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
+ DRM_INFO("Setting input timings on %s failed\n",
+ SDVO_NAME(intel_sdvo));
switch (pixel_multiplier) {
default:
@@ -1128,51 +1189,170 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
-static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
{
- struct drm_device *dev = encoder->dev;
+ struct intel_sdvo_connector *intel_sdvo_connector =
+ to_intel_sdvo_connector(&connector->base);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
+ u16 active_outputs;
+
+ intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
+
+ if (active_outputs & intel_sdvo_connector->output_flag)
+ return true;
+ else
+ return false;
+}
+
+static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ u16 active_outputs;
+ u32 tmp;
+
+ tmp = I915_READ(intel_sdvo->sdvo_reg);
+ intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
+
+ if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_disable_sdvo(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
u32 temp;
+ intel_sdvo_set_active_outputs(intel_sdvo, 0);
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo,
+ DRM_MODE_DPMS_OFF);
+
+ temp = I915_READ(intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) != 0) {
+ /* HW workaround for IBX, we need to move the port to
+ * transcoder A before disabling it. */
+ if (HAS_PCH_IBX(encoder->base.dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ if (temp & SDVO_PIPE_B_SELECT) {
+ temp &= ~SDVO_PIPE_B_SELECT;
+ I915_WRITE(intel_sdvo->sdvo_reg, temp);
+ POSTING_READ(intel_sdvo->sdvo_reg);
+
+ /* Again we need to write this twice. */
+ I915_WRITE(intel_sdvo->sdvo_reg, temp);
+ POSTING_READ(intel_sdvo->sdvo_reg);
+
+ /* Transcoder selection bits only update
+ * effectively on vblank. */
+ if (crtc)
+ intel_wait_for_vblank(encoder->base.dev, pipe);
+ else
+ DRM_MSLEEP(50);
+ }
+ }
+
+ intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
+ }
+}
+
+static void intel_enable_sdvo(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ u32 temp;
+ bool input1, input2;
+ int i;
+ u8 status;
+
+ temp = I915_READ(intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) == 0) {
+ /* HW workaround for IBX, we need to move the port
+ * to transcoder A before disabling it. */
+ if (HAS_PCH_IBX(dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ /* Restore the transcoder select bit. */
+ if (pipe == PIPE_B)
+ temp |= SDVO_PIPE_B_SELECT;
+ }
+
+ intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
+ }
+ for (i = 0; i < 2; i++)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
+ /* Warn if the device reported failure to sync.
+ * A lot of SDVO devices fail to notify of sync, but it's
+ * a given it the status is a success, we succeeded.
+ */
+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+ DRM_DEBUG_KMS("First %s output reported failure to "
+ "sync\n", SDVO_NAME(intel_sdvo));
+ }
+
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo,
+ DRM_MODE_DPMS_ON);
+ intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+}
+
+static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_crtc *crtc;
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+
+ /* dvo supports only 2 dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = intel_sdvo->base.base.crtc;
+ if (!crtc) {
+ intel_sdvo->base.connectors_active = false;
+ return;
+ }
+
if (mode != DRM_MODE_DPMS_ON) {
intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
- if (mode == DRM_MODE_DPMS_OFF) {
- temp = I915_READ(intel_sdvo->sdvo_reg);
- if ((temp & SDVO_ENABLE) != 0) {
- intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
- }
- }
+ intel_sdvo->base.connectors_active = false;
+
+ intel_crtc_update_dpms(crtc);
} else {
- bool input1, input2;
- int i;
- u8 status;
-
- temp = I915_READ(intel_sdvo->sdvo_reg);
- if ((temp & SDVO_ENABLE) == 0)
- intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
- for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
-
- status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
- /* Warn if the device reported failure to sync.
- * A lot of SDVO devices fail to notify of sync, but it's
- * a given it the status is a success, we succeeded.
- */
- if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
- DRM_DEBUG_KMS("First %s output reported failure to "
- "sync\n", SDVO_NAME(intel_sdvo));
- }
+ intel_sdvo->base.connectors_active = true;
+
+ intel_crtc_update_dpms(crtc);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
}
- return;
+
+ intel_modeset_check_state(connector->dev);
}
static int intel_sdvo_mode_valid(struct drm_connector *connector,
@@ -1202,7 +1382,7 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
{
- CTASSERT(sizeof(*caps) == 8);
+ BUILD_BUG_ON(sizeof(*caps) != 8);
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_DEVICE_CAPS,
caps, sizeof(*caps)))
@@ -1237,18 +1417,21 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
return true;
}
-static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
+static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
{
struct drm_device *dev = intel_sdvo->base.base.dev;
- u8 response[2];
+ uint16_t hotplug;
/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
* on the line. */
if (IS_I945G(dev) || IS_I945GM(dev))
- return false;
+ return 0;
- return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
- &response, 2) && response[0];
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+ &hotplug, sizeof(hotplug)))
+ return 0;
+
+ return hotplug;
}
static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
@@ -1256,7 +1439,7 @@ static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
- &intel_sdvo->hotplug_active, 2);
+ &intel_sdvo->hotplug_active, 2);
}
static bool
@@ -1364,15 +1547,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
- if (!intel_sdvo_write_cmd(intel_sdvo,
- SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
- return connector_status_unknown;
-
- /* add 30ms delay when the output type might be TV */
- if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
- drm_msleep(30, "915svo");
-
- if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_ATTACHED_DISPLAYS,
+ &response, 2))
return connector_status_unknown;
DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
@@ -1536,7 +1713,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output))
return;
- CTASSERT(sizeof(tv_res) == 3);
+ BUILD_BUG_ON(sizeof(tv_res) != 3);
if (!intel_sdvo_write_cmd(intel_sdvo,
SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
&tv_res, sizeof(tv_res)))
@@ -1566,7 +1743,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* arranged in priority order.
*/
intel_ddc_get_modes(connector, intel_sdvo->i2c);
- if (!list_empty(&connector->probed_modes))
+ if (list_empty(&connector->probed_modes) == false)
goto end;
/* Fetch modes from VBT */
@@ -1659,11 +1836,8 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
intel_sdvo_connector->tv_format);
intel_sdvo_destroy_enhance_property(connector);
-#if 0
- drm_sysfs_connector_remove(connector);
-#endif
drm_connector_cleanup(connector);
- free(connector, DRM_MEM_KMS);
+ free(intel_sdvo_connector, DRM_MEM_KMS);
}
static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
@@ -1678,6 +1852,7 @@ static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
edid = intel_sdvo_get_edid(connector);
if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
+ free(edid, DRM_MEM_KMS);
return has_audio;
}
@@ -1822,8 +1997,8 @@ set_value:
done:
if (intel_sdvo->base.base.crtc) {
struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
- drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
- crtc->y, crtc->fb);
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
}
return 0;
@@ -1831,15 +2006,13 @@ done:
}
static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
- .dpms = intel_sdvo_dpms,
.mode_fixup = intel_sdvo_mode_fixup,
- .prepare = intel_encoder_prepare,
.mode_set = intel_sdvo_mode_set,
- .commit = intel_encoder_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_sdvo_dpms,
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_sdvo_set_property,
@@ -1941,17 +2114,24 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
else
mapping = &dev_priv->sdvo_mappings[1];
- pin = GMBUS_PORT_DPB;
- if (mapping->initialized)
+ if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin))
pin = mapping->i2c_pin;
+ else
+ pin = GMBUS_PORT_DPB;
- if (intel_gmbus_is_port_valid(pin)) {
- sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
- intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
- intel_gmbus_force_bit(sdvo->i2c, true);
- } else {
- sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
- }
+ sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
+
+ /* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
+ * our code totally fails once we start using gmbus. Hence fall back to
+ * bit banging for now. */
+ intel_gmbus_force_bit(sdvo->i2c, true);
+}
+
+/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */
+static void
+intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo)
+{
+ intel_gmbus_force_bit(sdvo->i2c, false);
}
static bool
@@ -2012,11 +2192,9 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
connector->base.base.interlace_allowed = 1;
connector->base.base.doublescan_allowed = 0;
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
intel_connector_attach_encoder(&connector->base, &encoder->base);
-#if 0
- drm_sysfs_connector_add(&connector->base.base);
-#endif
}
static void
@@ -2038,8 +2216,9 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- intel_sdvo_connector = malloc(sizeof(struct intel_sdvo_connector),
- DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ intel_sdvo_connector = malloc(sizeof(struct intel_sdvo_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!intel_sdvo_connector)
+ return false;
if (device == 0) {
intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
@@ -2051,17 +2230,18 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
- if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) {
+ if (intel_sdvo_get_hotplug_support(intel_sdvo) &
+ intel_sdvo_connector->output_flag) {
connector->polled = DRM_CONNECTOR_POLL_HPD;
- intel_sdvo->hotplug_active[0] |= 1 << device;
+ intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
/* Some SDVO devices have one-shot hotplug interrupts.
* Ensure that they get re-enabled when an interrupt happens.
*/
intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
intel_sdvo_enable_hotplug(intel_encoder);
- }
- else
+ } else {
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ }
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
@@ -2069,8 +2249,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo->is_hdmi = true;
}
- intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT));
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (intel_sdvo->is_hdmi)
@@ -2087,8 +2265,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- intel_sdvo_connector = malloc(sizeof(struct intel_sdvo_connector),
- DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ intel_sdvo_connector = malloc(sizeof(struct intel_sdvo_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO);
if (!intel_sdvo_connector)
return false;
@@ -2102,7 +2279,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
intel_sdvo->is_tv = true;
intel_sdvo->base.needs_tv_clock = true;
- intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
@@ -2127,8 +2303,9 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- intel_sdvo_connector = malloc(sizeof(struct intel_sdvo_connector),
- DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ intel_sdvo_connector = malloc(sizeof(struct intel_sdvo_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!intel_sdvo_connector)
+ return false;
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
@@ -2144,9 +2321,6 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
}
- intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT));
-
intel_sdvo_connector_init(intel_sdvo_connector,
intel_sdvo);
return true;
@@ -2160,8 +2334,9 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- intel_sdvo_connector = malloc(sizeof(struct intel_sdvo_connector),
- DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ intel_sdvo_connector = malloc(sizeof(struct intel_sdvo_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!intel_sdvo_connector)
+ return false;
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
@@ -2176,9 +2351,6 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
}
- intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT));
-
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
@@ -2251,6 +2423,18 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
return true;
}
+static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector, *tmp;
+
+ list_for_each_entry_safe(connector, tmp,
+ &dev->mode_config.connector_list, head) {
+ if (intel_attached_encoder(connector) == &intel_sdvo->base)
+ intel_sdvo_destroy(connector);
+ }
+}
+
static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
int type)
@@ -2262,7 +2446,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_set_target_output(intel_sdvo, type))
return false;
- CTASSERT(sizeof(format) == 6);
+ BUILD_BUG_ON(sizeof(format) != 6);
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
&format, sizeof(format)))
@@ -2455,7 +2639,7 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
uint16_t response;
} enhancements;
- CTASSERT(sizeof(enhancements) == 2);
+ BUILD_BUG_ON(sizeof(enhancements) != 2);
enhancements.response = 0;
intel_sdvo_get_value(intel_sdvo,
@@ -2503,14 +2687,10 @@ intel_sdvo_ddc_proxy_attach(device_t idev)
static int
intel_sdvo_ddc_proxy_detach(device_t idev)
{
- struct intel_sdvo_ddc_proxy_sc *sc;
- device_t port;
- sc = device_get_softc(idev);
- port = sc->port;
bus_generic_detach(idev);
- if (port != NULL)
- device_delete_child(idev, port);
+ device_delete_children(idev);
+
return (0);
}
@@ -2520,7 +2700,7 @@ intel_sdvo_ddc_proxy_reset(device_t idev, u_char speed, u_char addr,
{
struct intel_sdvo_ddc_proxy_sc *sc;
struct intel_sdvo *sdvo;
-
+
sc = device_get_softc(idev);
sdvo = sc->intel_sdvo;
@@ -2528,39 +2708,57 @@ intel_sdvo_ddc_proxy_reset(device_t idev, u_char speed, u_char addr,
oldaddr));
}
-static int
-intel_sdvo_ddc_proxy_transfer(device_t idev, struct iic_msg *msgs, uint32_t num)
+static int intel_sdvo_ddc_proxy_xfer(device_t adapter,
+ struct iic_msg *msgs,
+ uint32_t num)
{
struct intel_sdvo_ddc_proxy_sc *sc;
struct intel_sdvo *sdvo;
-
- sc = device_get_softc(idev);
+
+ sc = device_get_softc(adapter);
sdvo = sc->intel_sdvo;
if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
- return (EIO);
+ return EIO;
return (iicbus_transfer(sdvo->i2c, msgs, num));
}
+static device_method_t intel_sdvo_ddc_proxy_methods[] = {
+ DEVMETHOD(device_probe, intel_sdvo_ddc_proxy_probe),
+ DEVMETHOD(device_attach, intel_sdvo_ddc_proxy_attach),
+ DEVMETHOD(device_detach, intel_sdvo_ddc_proxy_detach),
+ DEVMETHOD(iicbus_reset, intel_sdvo_ddc_proxy_reset),
+ DEVMETHOD(iicbus_transfer, intel_sdvo_ddc_proxy_xfer),
+ DEVMETHOD_END
+};
+static driver_t intel_sdvo_ddc_proxy_driver = {
+ "intel_sdvo_ddc_proxy",
+ intel_sdvo_ddc_proxy_methods,
+ sizeof(struct intel_sdvo_ddc_proxy_sc)
+};
+static devclass_t intel_sdvo_devclass;
+DRIVER_MODULE_ORDERED(intel_sdvo_ddc_proxy, drmn, intel_sdvo_ddc_proxy_driver,
+ intel_sdvo_devclass, 0, 0, SI_ORDER_FIRST);
+
static bool
-intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, struct drm_device *dev,
- int sdvo_reg)
+intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
+ struct drm_device *dev)
{
struct intel_sdvo_ddc_proxy_sc *sc;
int ret;
sdvo->ddc_iic_bus = device_add_child(dev->dev,
- "intel_sdvo_ddc_proxy", sdvo_reg);
+ "intel_sdvo_ddc_proxy", sdvo->sdvo_reg);
if (sdvo->ddc_iic_bus == NULL) {
- DRM_ERROR("cannot create ddc proxy bus %d\n", sdvo_reg);
+ DRM_ERROR("cannot create ddc proxy bus %d\n", sdvo->sdvo_reg);
return (false);
}
device_quiet(sdvo->ddc_iic_bus);
ret = device_probe_and_attach(sdvo->ddc_iic_bus);
if (ret != 0) {
DRM_ERROR("cannot attach proxy bus %d error %d\n",
- sdvo_reg, ret);
+ sdvo->sdvo_reg, ret);
device_delete_child(dev->dev, sdvo->ddc_iic_bus);
return (false);
}
@@ -2568,45 +2766,27 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, struct drm_device *dev,
sc->intel_sdvo = sdvo;
sdvo->ddc = sc->port;
- return (true);
-}
-
-static device_method_t intel_sdvo_ddc_proxy_methods[] = {
- DEVMETHOD(device_probe, intel_sdvo_ddc_proxy_probe),
- DEVMETHOD(device_attach, intel_sdvo_ddc_proxy_attach),
- DEVMETHOD(device_detach, intel_sdvo_ddc_proxy_detach),
- DEVMETHOD(iicbus_reset, intel_sdvo_ddc_proxy_reset),
- DEVMETHOD(iicbus_transfer, intel_sdvo_ddc_proxy_transfer),
- DEVMETHOD_END
-};
-static driver_t intel_sdvo_ddc_proxy_driver = {
- "intel_sdvo_ddc_proxy",
- intel_sdvo_ddc_proxy_methods,
- sizeof(struct intel_sdvo_ddc_proxy_sc)
-};
-static devclass_t intel_sdvo_devclass;
-DRIVER_MODULE_ORDERED(intel_sdvo_ddc_proxy, drmn, intel_sdvo_ddc_proxy_driver,
- intel_sdvo_devclass, 0, 0, SI_ORDER_FIRST);
+ return true;
+}
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
+ u32 hotplug_mask;
int i;
-
- intel_sdvo = malloc(sizeof(struct intel_sdvo), DRM_MEM_KMS,
- M_WAITOK | M_ZERO);
+ intel_sdvo = malloc(sizeof(struct intel_sdvo), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!intel_sdvo)
+ return false;
intel_sdvo->sdvo_reg = sdvo_reg;
intel_sdvo->is_sdvob = is_sdvob;
intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
- if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev, sdvo_reg)) {
- free(intel_sdvo, DRM_MEM_KMS);
- return false;
- }
+ if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
+ goto err_i2c_bus;
/* encoder type will be decided later */
intel_encoder = &intel_sdvo->base;
@@ -2624,42 +2804,62 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
}
}
- if (intel_sdvo->is_sdvob)
- dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
- else
- dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
+ hotplug_mask = 0;
+ if (IS_G4X(dev)) {
+ hotplug_mask = intel_sdvo->is_sdvob ?
+ SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X;
+ } else if (IS_GEN4(dev)) {
+ hotplug_mask = intel_sdvo->is_sdvob ?
+ SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965;
+ } else {
+ hotplug_mask = intel_sdvo->is_sdvob ?
+ SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
+ }
drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
+ intel_encoder->disable = intel_disable_sdvo;
+ intel_encoder->enable = intel_enable_sdvo;
+ intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
+
/* In default case sdvo lvds is false */
if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
goto err;
- /* Set up hotplug command - note paranoia about contents of reply.
- * We assume that the hardware is in a sane state, and only touch
- * the bits we think we understand.
- */
- intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
- &intel_sdvo->hotplug_active, 2);
- intel_sdvo->hotplug_active[0] &= ~0x3;
-
- if (intel_sdvo_output_setup(intel_sdvo,
- intel_sdvo->caps.output_flags) != true) {
+ if (intel_sdvo_output_setup(intel_sdvo,
+ intel_sdvo->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
SDVO_NAME(intel_sdvo));
- goto err;
- }
+ /* Output_setup can leave behind connectors! */
+ goto err_output;
+ }
+
+ /*
+ * Cloning SDVO with anything is often impossible, since the SDVO
+ * encoder can request a special input timing mode. And even if that's
+ * not the case we have evidence that cloning a plain unscaled mode with
+ * VGA doesn't really work. Furthermore the cloning flags are way too
+ * simplistic anyway to express such constraints, so just give up on
+ * cloning for SDVO encoders.
+ */
+ intel_sdvo->base.cloneable = false;
+
+ /* Only enable the hotplug irq if we need it, to work around noisy
+ * hotplug lines.
+ */
+ if (intel_sdvo->hotplug_active)
+ dev_priv->hotplug_supported_mask |= hotplug_mask;
intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
/* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
- goto err;
+ goto err_output;
if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
&intel_sdvo->pixel_clock_min,
&intel_sdvo->pixel_clock_max))
- goto err;
+ goto err_output;
DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
@@ -2679,8 +2879,14 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
return true;
+err_output:
+ intel_sdvo_output_cleanup(intel_sdvo);
+
err:
drm_encoder_cleanup(&intel_encoder->base);
+ device_delete_child(dev->dev, intel_sdvo->ddc_iic_bus);
+err_i2c_bus:
+ intel_sdvo_unselect_i2c_bus(intel_sdvo);
free(intel_sdvo, DRM_MEM_KMS);
return false;
diff --git a/sys/dev/drm2/i915/intel_sprite.c b/sys/dev/drm2/i915/intel_sprite.c
index 35f2baa..a8b218a 100644
--- a/sys/dev/drm2/i915/intel_sprite.c
+++ b/sys/dev/drm2/i915/intel_sprite.c
@@ -34,11 +34,11 @@
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
+#include <dev/drm2/drm_crtc.h>
+#include <dev/drm2/drm_fourcc.h>
+#include <dev/drm2/i915/intel_drv.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
-#include <dev/drm2/i915/intel_drv.h>
-#include <dev/drm2/drm_fourcc.h>
static void
ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
@@ -52,7 +52,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
- int pixel_size;
+ unsigned long sprsurf_offset, linear_offset;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
sprctl = I915_READ(SPRCTL(pipe));
@@ -60,37 +61,29 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
sprctl &= ~SPRITE_PIXFORMAT_MASK;
sprctl &= ~SPRITE_RGB_ORDER_RGBX;
sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
+ sprctl &= ~SPRITE_TILED;
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
- sprctl |= SPRITE_FORMAT_RGBX888;
- pixel_size = 4;
+ sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
break;
case DRM_FORMAT_XRGB8888:
- sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
- pixel_size = 4;
+ sprctl |= SPRITE_FORMAT_RGBX888;
break;
case DRM_FORMAT_YUYV:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
- pixel_size = 2;
break;
case DRM_FORMAT_YVYU:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
- pixel_size = 2;
break;
case DRM_FORMAT_UYVY:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
- pixel_size = 2;
break;
case DRM_FORMAT_VYUY:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
- pixel_size = 2;
break;
default:
- DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
- sprctl |= DVS_FORMAT_RGBX888;
- pixel_size = 4;
- break;
+ BUG();
}
if (obj->tiling_mode != I915_TILING_NONE)
@@ -130,18 +123,27 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
- if (obj->tiling_mode != I915_TILING_NONE) {
+
+ linear_offset = y * fb->pitches[0] + x * pixel_size;
+ sprsurf_offset =
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ pixel_size, fb->pitches[0]);
+ linear_offset -= sprsurf_offset;
+
+ /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
+ * register */
+ if (IS_HASWELL(dev))
+ I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
+ else if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
- } else {
- unsigned long offset;
+ else
+ I915_WRITE(SPRLINOFF(pipe), linear_offset);
- offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
- I915_WRITE(SPRLINOFF(pipe), offset);
- }
I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
- I915_WRITE(SPRSCALE(pipe), sprscale);
+ if (intel_plane->can_scale)
+ I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
- I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset);
+ I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
}
@@ -155,7 +157,8 @@ ivb_disable_plane(struct drm_plane *plane)
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
/* Can't leave the scaler enabled... */
- I915_WRITE(SPRSCALE(pipe), 0);
+ if (intel_plane->can_scale)
+ I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
@@ -228,8 +231,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
- int pipe = intel_plane->pipe, pixel_size;
+ int pipe = intel_plane->pipe;
+ unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
dvscntr = I915_READ(DVSCNTR(pipe));
@@ -237,37 +242,29 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
dvscntr &= ~DVS_PIXFORMAT_MASK;
dvscntr &= ~DVS_RGB_ORDER_XBGR;
dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
+ dvscntr &= ~DVS_TILED;
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
- pixel_size = 4;
break;
case DRM_FORMAT_XRGB8888:
dvscntr |= DVS_FORMAT_RGBX888;
- pixel_size = 4;
break;
case DRM_FORMAT_YUYV:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
- pixel_size = 2;
break;
case DRM_FORMAT_YVYU:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
- pixel_size = 2;
break;
case DRM_FORMAT_UYVY:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
- pixel_size = 2;
break;
case DRM_FORMAT_VYUY:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
- pixel_size = 2;
break;
default:
- DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
- dvscntr |= DVS_FORMAT_RGBX888;
- pixel_size = 4;
- break;
+ BUG();
}
if (obj->tiling_mode != I915_TILING_NONE)
@@ -291,18 +288,22 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
- if (obj->tiling_mode != I915_TILING_NONE) {
+
+ linear_offset = y * fb->pitches[0] + x * pixel_size;
+ dvssurf_offset =
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ pixel_size, fb->pitches[0]);
+ linear_offset -= dvssurf_offset;
+
+ if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
- } else {
- unsigned long offset;
+ else
+ I915_WRITE(DVSLINOFF(pipe), linear_offset);
- offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
- I915_WRITE(DVSLINOFF(pipe), offset);
- }
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
- I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset);
+ I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
@@ -330,6 +331,12 @@ intel_enable_primary(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int reg = DSPCNTR(intel_crtc->plane);
+ if (!intel_crtc->primary_disabled)
+ return;
+
+ intel_crtc->primary_disabled = false;
+ intel_update_fbc(dev);
+
I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
}
@@ -341,7 +348,13 @@ intel_disable_primary(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int reg = DSPCNTR(intel_crtc->plane);
+ if (intel_crtc->primary_disabled)
+ return;
+
I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+
+ intel_crtc->primary_disabled = true;
+ intel_update_fbc(dev);
}
static int
@@ -412,6 +425,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj, *old_obj;
int pipe = intel_plane->pipe;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
int ret = 0;
int x = src_x >> 16, y = src_y >> 16;
int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
@@ -426,7 +441,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
src_h = src_h >> 16;
/* Pipe must be running... */
- if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
+ if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE))
return -EINVAL;
if (crtc_x >= primary_w || crtc_y >= primary_h)
@@ -436,6 +451,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (intel_plane->pipe != intel_crtc->pipe)
return -EINVAL;
+ /* Sprite planes can be linear or x-tiled surfaces */
+ switch (obj->tiling_mode) {
+ case I915_TILING_NONE:
+ case I915_TILING_X:
+ break;
+ default:
+ return -EINVAL;
+ }
+
/*
* Clamp the width & height into the visible area. Note we don't
* try to scale the source if part of the visible region is offscreen.
@@ -463,6 +487,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
goto out;
/*
+ * We may not have a scaler, eg. HSW does not have it any more
+ */
+ if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h))
+ return -EINVAL;
+
+ /*
* We can take a larger source and scale it down, but
* only so much... 16x is the max on SNB.
*/
@@ -489,18 +519,14 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
* Be sure to re-enable the primary before the sprite is no longer
* covering it fully.
*/
- if (!disable_primary && intel_plane->primary_disabled) {
+ if (!disable_primary)
intel_enable_primary(crtc);
- intel_plane->primary_disabled = false;
- }
intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
crtc_w, crtc_h, x, y, src_w, src_h);
- if (disable_primary) {
+ if (disable_primary)
intel_disable_primary(crtc);
- intel_plane->primary_disabled = true;
- }
/* Unpin old obj after new one is active to avoid ugliness */
if (old_obj) {
@@ -531,11 +557,8 @@ intel_disable_plane(struct drm_plane *plane)
struct intel_plane *intel_plane = to_intel_plane(plane);
int ret = 0;
- if (intel_plane->primary_disabled) {
+ if (plane->crtc)
intel_enable_primary(plane->crtc);
- intel_plane->primary_disabled = false;
- }
-
intel_plane->disable_plane(plane);
if (!intel_plane->obj)
@@ -575,7 +598,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
return -EINVAL;
sx_xlock(&dev->mode_config.mutex);
-
+
obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
if (!obj) {
ret = -EINVAL;
@@ -655,12 +678,14 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
if (INTEL_INFO(dev)->gen < 5)
return -ENODEV;
- intel_plane = malloc(sizeof(struct intel_plane), DRM_MEM_KMS,
- M_WAITOK | M_ZERO);
+ intel_plane = malloc(sizeof(struct intel_plane), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!intel_plane)
+ return -ENOMEM;
switch (INTEL_INFO(dev)->gen) {
case 5:
case 6:
+ intel_plane->can_scale = true;
intel_plane->max_downscale = 16;
intel_plane->update_plane = ilk_update_plane;
intel_plane->disable_plane = ilk_disable_plane;
@@ -669,14 +694,18 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
if (IS_GEN6(dev)) {
plane_formats = snb_plane_formats;
- num_plane_formats = DRM_ARRAY_SIZE(snb_plane_formats);
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
} else {
plane_formats = ilk_plane_formats;
- num_plane_formats = DRM_ARRAY_SIZE(ilk_plane_formats);
+ num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
}
break;
case 7:
+ if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev))
+ intel_plane->can_scale = false;
+ else
+ intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
@@ -684,10 +713,11 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
intel_plane->get_colorkey = ivb_get_colorkey;
plane_formats = snb_plane_formats;
- num_plane_formats = DRM_ARRAY_SIZE(snb_plane_formats);
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
break;
default:
+ free(intel_plane, DRM_MEM_KMS);
return -ENODEV;
}
@@ -702,4 +732,3 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
return ret;
}
-
diff --git a/sys/dev/drm2/i915/intel_tv.c b/sys/dev/drm2/i915/intel_tv.c
index bc276d8..04b3ead 100644
--- a/sys/dev/drm2/i915/intel_tv.c
+++ b/sys/dev/drm2/i915/intel_tv.c
@@ -34,12 +34,11 @@
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
-#include <dev/drm2/drm.h>
#include <dev/drm2/drm_crtc.h>
#include <dev/drm2/drm_edid.h>
+#include <dev/drm2/i915/intel_drv.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
-#include <dev/drm2/i915/intel_drv.h>
enum tv_margin {
TV_MARGIN_LEFT, TV_MARGIN_TOP,
@@ -677,6 +676,54 @@ static const struct tv_mode tv_modes[] = {
.filter_table = filter_table,
},
{
+ .name = "480p",
+ .clock = 107520,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_4X,
+ .component_only = 1,
+
+ .hsync_end = 64, .hblank_end = 122,
+ .hblank_start = 842, .htotal = 857,
+
+ .progressive = true, .trilevel_sync = false,
+
+ .vsync_start_f1 = 12, .vsync_start_f2 = 12,
+ .vsync_len = 12,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 44, .vi_end_f2 = 44,
+ .nbr_end = 479,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "576p",
+ .clock = 107520,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_4X,
+ .component_only = 1,
+
+ .hsync_end = 64, .hblank_end = 139,
+ .hblank_start = 859, .htotal = 863,
+
+ .progressive = true, .trilevel_sync = false,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 48, .vi_end_f2 = 48,
+ .nbr_end = 575,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
.name = "720p@60Hz",
.clock = 148800,
.refresh = 60000,
@@ -791,22 +838,37 @@ static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
base);
}
+static bool
+intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp = I915_READ(TV_CTL);
+
+ if (!(tmp & TV_ENC_ENABLE))
+ return false;
+
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
static void
-intel_tv_dpms(struct drm_encoder *encoder, int mode)
+intel_enable_tv(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
- break;
- }
+ I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
+}
+
+static void
+intel_disable_tv(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
}
static const struct tv_mode *
@@ -814,7 +876,7 @@ intel_tv_mode_lookup(const char *tv_format)
{
int i;
- for (i = 0; i < DRM_ARRAY_SIZE(tv_modes); i++) {
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
const struct tv_mode *tv_mode = &tv_modes[i];
if (!strcmp(tv_format, tv_mode->name))
@@ -846,24 +908,18 @@ intel_tv_mode_valid(struct drm_connector *connector,
static bool
-intel_tv_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
+intel_tv_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
- struct drm_mode_config *drm_config = &dev->mode_config;
struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
- struct drm_encoder *other_encoder;
if (!tv_mode)
return false;
- /* FIXME: lock encoder list */
- list_for_each_entry(other_encoder, &drm_config->encoder_list, head) {
- if (other_encoder != encoder &&
- other_encoder->crtc == encoder->crtc)
- return false;
- }
+ if (intel_encoder_check_is_cloned(&intel_tv->base))
+ return false;
adjusted_mode->clock = tv_mode->clock;
return true;
@@ -1035,13 +1091,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
int dspcntr_reg = DSPCNTR(intel_crtc->plane);
int pipeconf = I915_READ(pipeconf_reg);
int dspcntr = I915_READ(dspcntr_reg);
- int dspbase_reg = DSPADDR(intel_crtc->plane);
int xpos = 0x0, ypos = 0x0;
unsigned int xsize, ysize;
/* Pipe must be off here */
I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+ intel_flush_display_plane(dev_priv, intel_crtc->plane);
/* Wait for vblank for the disable to take effect */
if (IS_GEN2(dev))
@@ -1070,8 +1124,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
I915_WRITE(pipeconf_reg, pipeconf);
I915_WRITE(dspcntr_reg, dspcntr);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+ intel_flush_display_plane(dev_priv, intel_crtc->plane);
}
j = 0;
@@ -1196,6 +1249,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
I915_WRITE(TV_CTL, save_tv_ctl);
+ POSTING_READ(TV_CTL);
+
+ /* For unknown reasons the hw barfs if we don't do this vblank wait. */
+ intel_wait_for_vblank(intel_tv->base.base.dev,
+ to_intel_crtc(intel_tv->base.base.crtc)->pipe);
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
@@ -1251,17 +1309,13 @@ intel_tv_detect(struct drm_connector *connector, bool force)
int type;
mode = reported_modes[0];
- drm_mode_set_crtcinfo(&mode, 0);
if (force) {
struct intel_load_detect_pipe tmp;
- if (intel_get_load_detect_pipe(&intel_tv->base, connector,
- &mode, &tmp)) {
+ if (intel_get_load_detect_pipe(connector, &mode, &tmp)) {
type = intel_tv_detect_type(intel_tv, connector);
- intel_release_load_detect_pipe(&intel_tv->base,
- connector,
- &tmp);
+ intel_release_load_detect_pipe(connector, &tmp);
} else
return connector_status_unknown;
} else
@@ -1360,7 +1414,7 @@ intel_tv_get_modes(struct drm_connector *connector)
tmp = (u64) tv_mode->refresh * mode_ptr->vtotal;
tmp *= mode_ptr->htotal;
- tmp = tmp / 1000000;
+ tmp = div_u64(tmp, 1000000);
mode_ptr->clock = (int) tmp;
mode_ptr->type = DRM_MODE_TYPE_DRIVER;
@@ -1375,9 +1429,6 @@ intel_tv_get_modes(struct drm_connector *connector)
static void
intel_tv_destroy(struct drm_connector *connector)
{
-#if 0
- drm_sysfs_connector_remove(connector);
-#endif
drm_connector_cleanup(connector);
free(connector, DRM_MEM_KMS);
}
@@ -1429,22 +1480,20 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
}
if (changed && crtc)
- drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
- crtc->y, crtc->fb);
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
out:
return ret;
}
static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
- .dpms = intel_tv_dpms,
.mode_fixup = intel_tv_mode_fixup,
- .prepare = intel_encoder_prepare,
.mode_set = intel_tv_mode_set,
- .commit = intel_encoder_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_tv_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_connector_dpms,
.detect = intel_tv_detect,
.destroy = intel_tv_destroy,
.set_property = intel_tv_set_property,
@@ -1543,10 +1592,16 @@ intel_tv_init(struct drm_device *dev)
(tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
return;
- intel_tv = malloc(sizeof(struct intel_tv), DRM_MEM_KMS,
- M_WAITOK | M_ZERO);
- intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS,
- M_WAITOK | M_ZERO);
+ intel_tv = malloc(sizeof(struct intel_tv), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!intel_tv) {
+ return;
+ }
+
+ intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO);
+ if (!intel_connector) {
+ free(intel_tv, DRM_MEM_KMS);
+ return;
+ }
intel_encoder = &intel_tv->base;
connector = &intel_connector->base;
@@ -1568,10 +1623,15 @@ intel_tv_init(struct drm_device *dev)
drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
DRM_MODE_ENCODER_TVDAC);
+ intel_encoder->enable = intel_enable_tv;
+ intel_encoder->disable = intel_disable_tv;
+ intel_encoder->get_hw_state = intel_tv_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
- intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
+ intel_encoder->cloneable = false;
intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
@@ -1610,7 +1670,4 @@ intel_tv_init(struct drm_device *dev)
drm_object_attach_property(&connector->base,
dev->mode_config.tv_bottom_margin_property,
intel_tv->margin[TV_MARGIN_BOTTOM]);
-#if 0
- drm_sysfs_connector_add(connector);
-#endif
}
diff --git a/sys/modules/drm2/i915kms/Makefile b/sys/modules/drm2/i915kms/Makefile
index 8886be8..c42066a 100644
--- a/sys/modules/drm2/i915kms/Makefile
+++ b/sys/modules/drm2/i915kms/Makefile
@@ -3,23 +3,31 @@
.PATH: ${.CURDIR}/../../../dev/drm2/i915
KMOD = i915kms
SRCS = \
+ dvo_ch7017.c \
+ dvo_ch7xxx.c \
+ dvo_ivch.c \
+ dvo_ns2501.c \
+ dvo_sil164.c \
+ dvo_tfp410.c \
i915_debug.c \
i915_dma.c \
i915_drv.c \
i915_gem.c \
i915_gem_context.c \
- i915_gem_execbuffer.c \
i915_gem_evict.c \
+ i915_gem_execbuffer.c \
i915_gem_gtt.c \
i915_gem_stolen.c \
i915_gem_tiling.c \
i915_irq.c \
i915_suspend.c \
+ intel_acpi.c \
intel_bios.c \
intel_crt.c \
intel_ddi.c \
intel_display.c \
intel_dp.c \
+ intel_dvo.c \
intel_fb.c \
intel_hdmi.c \
intel_iic.c \
OpenPOWER on IntegriCloud