summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile13
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c39
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c270
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1070
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c359
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h311
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c645
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c87
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c96
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c60
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c43
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1000
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h643
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c57
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c22
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h104
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c14
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c463
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h10
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c719
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2851
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c985
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c16
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h212
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c44
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c381
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c279
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c120
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c338
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h6
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c136
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c2653
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c481
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate.h1
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen8.c792
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen9.c974
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c419
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h12
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c1406
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c47
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c605
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c9
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c495
51 files changed, 12437 insertions, 6900 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index c1dd485..e4083e4 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -11,7 +11,9 @@ i915-y := i915_drv.o \
i915_params.o \
i915_suspend.o \
i915_sysfs.o \
- intel_pm.o
+ intel_pm.o \
+ intel_runtime_pm.o
+
i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
@@ -38,13 +40,18 @@ i915-y += i915_cmd_parser.o \
# autogenerated null render state
i915-y += intel_renderstate_gen6.o \
intel_renderstate_gen7.o \
- intel_renderstate_gen8.o
+ intel_renderstate_gen8.o \
+ intel_renderstate_gen9.o
# modesetting core code
-i915-y += intel_bios.o \
+i915-y += intel_audio.o \
+ intel_bios.o \
intel_display.o \
+ intel_fifo_underrun.o \
+ intel_frontbuffer.o \
intel_modes.o \
intel_overlay.o \
+ intel_psr.o \
intel_sideband.o \
intel_sprite.o
i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 593b657..22c992a 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -73,7 +73,7 @@
* those commands required by the parser. This generally works because command
* opcode ranges have standard command length encodings. So for commands that
* the parser does not need to check, it can easily skip them. This is
- * implementated via a per-ring length decoding vfunc.
+ * implemented via a per-ring length decoding vfunc.
*
* Unfortunately, there are a number of commands that do not follow the standard
* length encoding for their opcode range, primarily amongst the MI_* commands.
@@ -138,6 +138,11 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
+ /*
+ * MI_BATCH_BUFFER_START requires some special handling. It's not
+ * really a 'skip' action but it doesn't seem like it's worth adding
+ * a new action. See i915_parse_cmds().
+ */
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
};
@@ -408,6 +413,8 @@ static const u32 gen7_render_regs[] = {
REG64(PS_INVOCATION_COUNT),
REG64(PS_DEPTH_COUNT),
OACONTROL, /* Only allowed for LRI and SRM. See below. */
+ REG64(MI_PREDICATE_SRC0),
+ REG64(MI_PREDICATE_SRC1),
GEN7_3DPRIM_END_OFFSET,
GEN7_3DPRIM_START_VERTEX,
GEN7_3DPRIM_VERTEX_COUNT,
@@ -838,7 +845,7 @@ finish:
* @ring: the ring in question
*
* Only certain platforms require software batch buffer command parsing, and
- * only when enabled via module paramter.
+ * only when enabled via module parameter.
*
* Return: true if the ring requires software command parsing
*/
@@ -847,12 +854,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
if (!ring->needs_cmd_parser)
return false;
- /*
- * XXX: VLV is Gen7 and therefore has cmd_tables, but has PPGTT
- * disabled. That will cause all of the parser's PPGTT checks to
- * fail. For now, disable parsing when PPGTT is off.
- */
- if (USES_PPGTT(ring->dev))
+ if (!USES_PPGTT(ring->dev))
return false;
return (i915.enable_cmd_parser == 1);
@@ -888,8 +890,10 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
*/
if (reg_addr == OACONTROL) {
- if (desc->cmd.value == MI_LOAD_REGISTER_MEM)
+ if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+ DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
return false;
+ }
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
*oacontrol_set = (cmd[2] != 0);
@@ -958,7 +962,8 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
*
- * Return: non-zero if the parser finds violations or otherwise fails
+ * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
+ * if the batch appears legal but should use hardware parsing
*/
int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
@@ -1005,6 +1010,16 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
break;
}
+ /*
+ * If the batch buffer contains a chained batch, return an
+ * error that tells the caller to abort and dispatch the
+ * workload as a non-secure batch.
+ */
+ if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+ ret = -EACCES;
+ break;
+ }
+
if (desc->flags & CMD_DESC_FIXED)
length = desc->length.fixed;
else
@@ -1059,6 +1074,8 @@ int i915_cmd_parser_get_version(void)
*
* 1. Initial version. Checks batches and reports violations, but leaves
* hardware parsing enabled (so does not allow new use cases).
+ * 2. Allow access to the MI_PREDICATE_SRC0 and
+ * MI_PREDICATE_SRC1 registers.
*/
- return 1;
+ return 2;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 063b448..779a275 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -116,7 +116,7 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
{
- return obj->has_global_gtt_mapping ? "g" : " ";
+ return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
}
static void
@@ -516,7 +516,6 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
struct intel_crtc *crtc;
int ret;
@@ -529,7 +528,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
const char plane = plane_name(crtc->plane);
struct intel_unpin_work *work;
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
work = crtc->unpin_work;
if (work == NULL) {
seq_printf(m, "No flip due on pipe %c (plane %c)\n",
@@ -575,7 +574,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
}
}
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
}
mutex_unlock(&dev->struct_mutex);
@@ -717,7 +716,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
for_each_pipe(dev_priv, pipe) {
- if (!intel_display_power_enabled(dev_priv,
+ if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe))) {
seq_printf(m, "Pipe %c power disabled\n",
pipe_name(pipe));
@@ -1241,11 +1240,12 @@ static int vlv_drpc_info(struct seq_file *m)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rpmodectl1, rcctl1;
+ u32 rpmodectl1, rcctl1, pw_status;
unsigned fw_rendercount = 0, fw_mediacount = 0;
intel_runtime_pm_get(dev_priv);
+ pw_status = I915_READ(VLV_GTLC_PW_STATUS);
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
@@ -1264,11 +1264,9 @@ static int vlv_drpc_info(struct seq_file *m)
yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
GEN6_RC_CTL_EI_MODE(1))));
seq_printf(m, "Render Power Well: %s\n",
- (I915_READ(VLV_GTLC_PW_STATUS) &
- VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
+ (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
seq_printf(m, "Media Power Well: %s\n",
- (I915_READ(VLV_GTLC_PW_STATUS) &
- VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
+ (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
seq_printf(m, "Render RC6 residency since boot: %u\n",
I915_READ(VLV_GT_RENDER_RC6));
@@ -1774,6 +1772,50 @@ static int i915_context_status(struct seq_file *m, void *unused)
return 0;
}
+static void i915_dump_lrc_obj(struct seq_file *m,
+ struct intel_engine_cs *ring,
+ struct drm_i915_gem_object *ctx_obj)
+{
+ struct page *page;
+ uint32_t *reg_state;
+ int j;
+ unsigned long ggtt_offset = 0;
+
+ if (ctx_obj == NULL) {
+ seq_printf(m, "Context on %s with no gem object\n",
+ ring->name);
+ return;
+ }
+
+ seq_printf(m, "CONTEXT: %s %u\n", ring->name,
+ intel_execlists_ctx_id(ctx_obj));
+
+ if (!i915_gem_obj_ggtt_bound(ctx_obj))
+ seq_puts(m, "\tNot bound in GGTT\n");
+ else
+ ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
+
+ if (i915_gem_object_get_pages(ctx_obj)) {
+ seq_puts(m, "\tFailed to get pages for context object\n");
+ return;
+ }
+
+ page = i915_gem_object_get_page(ctx_obj, 1);
+ if (!WARN_ON(page == NULL)) {
+ reg_state = kmap_atomic(page);
+
+ for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
+ seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ ggtt_offset + 4096 + (j * 4),
+ reg_state[j], reg_state[j + 1],
+ reg_state[j + 2], reg_state[j + 3]);
+ }
+ kunmap_atomic(reg_state);
+ }
+
+ seq_putc(m, '\n');
+}
+
static int i915_dump_lrc(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1794,29 +1836,9 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
list_for_each_entry(ctx, &dev_priv->context_list, link) {
for_each_ring(ring, dev_priv, i) {
- struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
-
- if (ring->default_context == ctx)
- continue;
-
- if (ctx_obj) {
- struct page *page = i915_gem_object_get_page(ctx_obj, 1);
- uint32_t *reg_state = kmap_atomic(page);
- int j;
-
- seq_printf(m, "CONTEXT: %s %u\n", ring->name,
- intel_execlists_ctx_id(ctx_obj));
-
- for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
- seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i915_gem_obj_ggtt_offset(ctx_obj) + 4096 + (j * 4),
- reg_state[j], reg_state[j + 1],
- reg_state[j + 2], reg_state[j + 3]);
- }
- kunmap_atomic(reg_state);
-
- seq_putc(m, '\n');
- }
+ if (ring->default_context != ctx)
+ i915_dump_lrc_obj(m, ring,
+ ctx->engine[i].state);
}
}
@@ -1849,6 +1871,8 @@ static int i915_execlists(struct seq_file *m, void *data)
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
+
for_each_ring(ring, dev_priv, ring_id) {
struct intel_ctx_submit_request *head_req = NULL;
int count = 0;
@@ -1900,6 +1924,7 @@ static int i915_execlists(struct seq_file *m, void *data)
seq_putc(m, '\n');
}
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -1973,6 +1998,8 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
if (IS_GEN3(dev) || IS_GEN4(dev)) {
seq_printf(m, "DDC = 0x%08x\n",
I915_READ(DCC));
+ seq_printf(m, "DDC2 = 0x%08x\n",
+ I915_READ(DCC2));
seq_printf(m, "C0DRB3 = 0x%04x\n",
I915_READ16(C0DRB3));
seq_printf(m, "C1DRB3 = 0x%04x\n",
@@ -1986,7 +2013,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
I915_READ(MAD_DIMM_C2));
seq_printf(m, "TILECTL = 0x%08x\n",
I915_READ(TILECTL));
- if (IS_GEN8(dev))
+ if (INTEL_INFO(dev)->gen >= 8)
seq_printf(m, "GAMTARBMODE = 0x%08x\n",
I915_READ(GAMTARBMODE));
else
@@ -1995,6 +2022,10 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
I915_READ(DISP_ARB_CTL));
}
+
+ if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ seq_puts(m, "L-shaped memory detected\n");
+
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -2628,14 +2659,15 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
- seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount,
- pll->active, yesno(pll->on));
+ seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n",
+ pll->config.crtc_mask, pll->active, yesno(pll->on));
seq_printf(m, " tracked hardware state:\n");
- seq_printf(m, " dpll: 0x%08x\n", pll->hw_state.dpll);
- seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md);
- seq_printf(m, " fp0: 0x%08x\n", pll->hw_state.fp0);
- seq_printf(m, " fp1: 0x%08x\n", pll->hw_state.fp1);
- seq_printf(m, " wrpll: 0x%08x\n", pll->hw_state.wrpll);
+ seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll);
+ seq_printf(m, " dpll_md: 0x%08x\n",
+ pll->config.hw_state.dpll_md);
+ seq_printf(m, " fp0: 0x%08x\n", pll->config.hw_state.fp0);
+ seq_printf(m, " fp1: 0x%08x\n", pll->config.hw_state.fp1);
+ seq_printf(m, " wrpll: 0x%08x\n", pll->config.hw_state.wrpll);
}
drm_modeset_unlock_all(dev);
@@ -2656,18 +2688,18 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
- seq_printf(m, "Workarounds applied: %d\n", dev_priv->num_wa_regs);
- for (i = 0; i < dev_priv->num_wa_regs; ++i) {
- u32 addr, mask;
-
- addr = dev_priv->intel_wa_regs[i].addr;
- mask = dev_priv->intel_wa_regs[i].mask;
- dev_priv->intel_wa_regs[i].value = I915_READ(addr) | mask;
- if (dev_priv->intel_wa_regs[i].addr)
- seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
- dev_priv->intel_wa_regs[i].addr,
- dev_priv->intel_wa_regs[i].value,
- dev_priv->intel_wa_regs[i].mask);
+ seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
+ for (i = 0; i < dev_priv->workarounds.count; ++i) {
+ u32 addr, mask, value, read;
+ bool ok;
+
+ addr = dev_priv->workarounds.reg[i].addr;
+ mask = dev_priv->workarounds.reg[i].mask;
+ value = dev_priv->workarounds.reg[i].value;
+ read = I915_READ(addr);
+ ok = (value & mask) == (read & mask);
+ seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
+ addr, value, mask, read, ok ? "OK" : "FAIL");
}
intel_runtime_pm_put(dev_priv);
@@ -2676,6 +2708,42 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
return 0;
}
+static int i915_ddb_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct skl_ddb_allocation *ddb;
+ struct skl_ddb_entry *entry;
+ enum pipe pipe;
+ int plane;
+
+ drm_modeset_lock_all(dev);
+
+ ddb = &dev_priv->wm.skl_hw.ddb;
+
+ seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
+
+ for_each_pipe(dev_priv, pipe) {
+ seq_printf(m, "Pipe %c\n", pipe_name(pipe));
+
+ for_each_plane(pipe, plane) {
+ entry = &ddb->plane[pipe][plane];
+ seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
+ entry->start, entry->end,
+ skl_ddb_entry_size(entry));
+ }
+
+ entry = &ddb->cursor[pipe];
+ seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
+ entry->end, skl_ddb_entry_size(entry));
+ }
+
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
struct pipe_crc_info {
const char *name;
struct drm_device *dev;
@@ -2969,6 +3037,8 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
break;
}
break;
+ default:
+ break;
}
}
drm_modeset_unlock_all(dev);
@@ -3256,6 +3326,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+ struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
+ pipe));
u32 val = 0; /* shut up gcc */
int ret;
@@ -3266,6 +3338,11 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
if (pipe_crc->source && source)
return -EINVAL;
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) {
+ DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
+ return -EIO;
+ }
+
if (IS_GEN2(dev))
ret = i8xx_pipe_crc_ctl_reg(&source, &val);
else if (INTEL_INFO(dev)->gen < 5)
@@ -3291,6 +3368,14 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
if (!pipe_crc->entries)
return -ENOMEM;
+ /*
+ * When IPS gets enabled, the pipe CRC changes. Since IPS gets
+ * enabled and disabled dynamically based on package C states,
+ * user space can't make reliable use of the CRCs, so let's just
+ * completely disable it.
+ */
+ hsw_disable_ips(crtc);
+
spin_lock_irq(&pipe_crc->lock);
pipe_crc->head = 0;
pipe_crc->tail = 0;
@@ -3329,6 +3414,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
vlv_undo_pipe_scramble_reset(dev, pipe);
else if (IS_HASWELL(dev) && pipe == PIPE_A)
hsw_undo_trans_edp_pipe_A_crc_wa(dev);
+
+ hsw_enable_ips(crtc);
}
return 0;
@@ -3506,7 +3593,7 @@ static const struct file_operations i915_display_crc_ctl_fops = {
.write = display_crc_ctl_write
};
-static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
+static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
{
struct drm_device *dev = m->private;
int num_levels = ilk_wm_max_level(dev) + 1;
@@ -3517,13 +3604,17 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
for (level = 0; level < num_levels; level++) {
unsigned int latency = wm[level];
- /* WM1+ latency values in 0.5us units */
- if (level > 0)
+ /*
+ * - WM1+ latency values in 0.5us units
+ * - latencies are in us on gen9
+ */
+ if (INTEL_INFO(dev)->gen >= 9)
+ latency *= 10;
+ else if (level > 0)
latency *= 5;
seq_printf(m, "WM%d %u (%u.%u usec)\n",
- level, wm[level],
- latency / 10, latency % 10);
+ level, wm[level], latency / 10, latency % 10);
}
drm_modeset_unlock_all(dev);
@@ -3532,8 +3623,15 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
static int pri_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const uint16_t *latencies;
+
+ if (INTEL_INFO(dev)->gen >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = to_i915(dev)->wm.pri_latency;
- wm_latency_show(m, to_i915(dev)->wm.pri_latency);
+ wm_latency_show(m, latencies);
return 0;
}
@@ -3541,8 +3639,15 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
static int spr_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const uint16_t *latencies;
+
+ if (INTEL_INFO(dev)->gen >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = to_i915(dev)->wm.spr_latency;
- wm_latency_show(m, to_i915(dev)->wm.spr_latency);
+ wm_latency_show(m, latencies);
return 0;
}
@@ -3550,8 +3655,15 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
static int cur_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const uint16_t *latencies;
+
+ if (INTEL_INFO(dev)->gen >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = to_i915(dev)->wm.cur_latency;
- wm_latency_show(m, to_i915(dev)->wm.cur_latency);
+ wm_latency_show(m, latencies);
return 0;
}
@@ -3587,11 +3699,11 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file)
}
static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp, uint16_t wm[5])
+ size_t len, loff_t *offp, uint16_t wm[8])
{
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
- uint16_t new[5] = { 0 };
+ uint16_t new[8] = { 0 };
int num_levels = ilk_wm_max_level(dev) + 1;
int level;
int ret;
@@ -3605,7 +3717,9 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
tmp[len] = '\0';
- ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]);
+ ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
+ &new[0], &new[1], &new[2], &new[3],
+ &new[4], &new[5], &new[6], &new[7]);
if (ret != num_levels)
return -EINVAL;
@@ -3625,8 +3739,15 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint16_t *latencies;
- return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency);
+ if (INTEL_INFO(dev)->gen >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = to_i915(dev)->wm.pri_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
}
static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
@@ -3634,8 +3755,15 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint16_t *latencies;
- return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency);
+ if (INTEL_INFO(dev)->gen >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = to_i915(dev)->wm.spr_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
}
static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
@@ -3643,8 +3771,15 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint16_t *latencies;
+
+ if (INTEL_INFO(dev)->gen >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = to_i915(dev)->wm.cur_latency;
- return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency);
+ return wm_latency_write(file, ubuf, len, offp, latencies);
}
static const struct file_operations i915_pri_wm_latency_fops = {
@@ -4187,6 +4322,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
+ {"i915_ddb_info", i915_ddb_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 318ade9..ecee3bc 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -50,884 +50,6 @@
#include <linux/pm_runtime.h>
#include <linux/oom.h>
-#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
-
-#define BEGIN_LP_RING(n) \
- intel_ring_begin(LP_RING(dev_priv), (n))
-
-#define OUT_RING(x) \
- intel_ring_emit(LP_RING(dev_priv), x)
-
-#define ADVANCE_LP_RING() \
- __intel_ring_advance(LP_RING(dev_priv))
-
-/**
- * Lock test for when it's just for synchronization of ring access.
- *
- * In that case, we don't need to do it when GEM is initialized as nobody else
- * has access to the ring.
- */
-#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
- if (LP_RING(dev->dev_private)->buffer->obj == NULL) \
- LOCK_TEST_WITH_RETURN(dev, file); \
-} while (0)
-
-static inline u32
-intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
-{
- if (I915_NEED_GFX_HWS(dev_priv->dev))
- return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
- else
- return intel_read_status_page(LP_RING(dev_priv), reg);
-}
-
-#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
-#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
-#define I915_BREADCRUMB_INDEX 0x21
-
-void i915_update_dri1_breadcrumb(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv;
-
- /*
- * The dri breadcrumb update races against the drm master disappearing.
- * Instead of trying to fix this (this is by far not the only ums issue)
- * just don't do the update in kms mode.
- */
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
- }
-}
-
-static void i915_write_hws_pga(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 addr;
-
- addr = dev_priv->status_page_dmah->busaddr;
- if (INTEL_INFO(dev)->gen >= 4)
- addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
- I915_WRITE(HWS_PGA, addr);
-}
-
-/**
- * Frees the hardware status page, whether it's a physical address or a virtual
- * address set up by the X Server.
- */
-static void i915_free_hws(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = LP_RING(dev_priv);
-
- if (dev_priv->status_page_dmah) {
- drm_pci_free(dev, dev_priv->status_page_dmah);
- dev_priv->status_page_dmah = NULL;
- }
-
- if (ring->status_page.gfx_addr) {
- ring->status_page.gfx_addr = 0;
- iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
- }
-
- /* Need to rewrite hardware status page */
- I915_WRITE(HWS_PGA, 0x1ffff000);
-}
-
-void i915_kernel_lost_context(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv;
- struct intel_engine_cs *ring = LP_RING(dev_priv);
- struct intel_ringbuffer *ringbuf = ring->buffer;
-
- /*
- * We should never lose context on the ring with modesetting
- * as we don't expose it to userspace
- */
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- ringbuf->head = I915_READ_HEAD(ring) & HEAD_ADDR;
- ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ringbuf->space = ringbuf->head - (ringbuf->tail + I915_RING_FREE_SPACE);
- if (ringbuf->space < 0)
- ringbuf->space += ringbuf->size;
-
- if (!dev->primary->master)
- return;
-
- master_priv = dev->primary->master->driver_priv;
- if (ringbuf->head == ringbuf->tail && master_priv->sarea_priv)
- master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
-}
-
-static int i915_dma_cleanup(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- /* Make sure interrupts are disabled here because the uninstall ioctl
- * may not have been called from userspace and after dev_private
- * is freed, it's too late.
- */
- if (dev->irq_enabled)
- drm_irq_uninstall(dev);
-
- mutex_lock(&dev->struct_mutex);
- for (i = 0; i < I915_NUM_RINGS; i++)
- intel_cleanup_ring_buffer(&dev_priv->ring[i]);
- mutex_unlock(&dev->struct_mutex);
-
- /* Clear the HWS virtual address at teardown */
- if (I915_NEED_GFX_HWS(dev))
- i915_free_hws(dev);
-
- return 0;
-}
-
-static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- int ret;
-
- master_priv->sarea = drm_legacy_getsarea(dev);
- if (master_priv->sarea) {
- master_priv->sarea_priv = (drm_i915_sarea_t *)
- ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
- } else {
- DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
- }
-
- if (init->ring_size != 0) {
- if (LP_RING(dev_priv)->buffer->obj != NULL) {
- i915_dma_cleanup(dev);
- DRM_ERROR("Client tried to initialize ringbuffer in "
- "GEM mode\n");
- return -EINVAL;
- }
-
- ret = intel_render_ring_init_dri(dev,
- init->ring_start,
- init->ring_size);
- if (ret) {
- i915_dma_cleanup(dev);
- return ret;
- }
- }
-
- dev_priv->dri1.cpp = init->cpp;
- dev_priv->dri1.back_offset = init->back_offset;
- dev_priv->dri1.front_offset = init->front_offset;
- dev_priv->dri1.current_page = 0;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->pf_current_page = 0;
-
- /* Allow hardware batchbuffers unless told otherwise.
- */
- dev_priv->dri1.allow_batchbuffer = 1;
-
- return 0;
-}
-
-static int i915_dma_resume(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = LP_RING(dev_priv);
-
- DRM_DEBUG_DRIVER("%s\n", __func__);
-
- if (ring->buffer->virtual_start == NULL) {
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return -ENOMEM;
- }
-
- /* Program Hardware Status Page */
- if (!ring->status_page.page_addr) {
- DRM_ERROR("Can not find hardware status page\n");
- return -EINVAL;
- }
- DRM_DEBUG_DRIVER("hw status page @ %p\n",
- ring->status_page.page_addr);
- if (ring->status_page.gfx_addr != 0)
- intel_ring_setup_status_page(ring);
- else
- i915_write_hws_pga(dev);
-
- DRM_DEBUG_DRIVER("Enabled hardware status page\n");
-
- return 0;
-}
-
-static int i915_dma_init(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_init_t *init = data;
- int retcode = 0;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- switch (init->func) {
- case I915_INIT_DMA:
- retcode = i915_initialize(dev, init);
- break;
- case I915_CLEANUP_DMA:
- retcode = i915_dma_cleanup(dev);
- break;
- case I915_RESUME_DMA:
- retcode = i915_dma_resume(dev);
- break;
- default:
- retcode = -EINVAL;
- break;
- }
-
- return retcode;
-}
-
-/* Implement basically the same security restrictions as hardware does
- * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
- *
- * Most of the calculations below involve calculating the size of a
- * particular instruction. It's important to get the size right as
- * that tells us where the next instruction to check is. Any illegal
- * instruction detected will be given a size of zero, which is a
- * signal to abort the rest of the buffer.
- */
-static int validate_cmd(int cmd)
-{
- switch (((cmd >> 29) & 0x7)) {
- case 0x0:
- switch ((cmd >> 23) & 0x3f) {
- case 0x0:
- return 1; /* MI_NOOP */
- case 0x4:
- return 1; /* MI_FLUSH */
- default:
- return 0; /* disallow everything else */
- }
- break;
- case 0x1:
- return 0; /* reserved */
- case 0x2:
- return (cmd & 0xff) + 2; /* 2d commands */
- case 0x3:
- if (((cmd >> 24) & 0x1f) <= 0x18)
- return 1;
-
- switch ((cmd >> 24) & 0x1f) {
- case 0x1c:
- return 1;
- case 0x1d:
- switch ((cmd >> 16) & 0xff) {
- case 0x3:
- return (cmd & 0x1f) + 2;
- case 0x4:
- return (cmd & 0xf) + 2;
- default:
- return (cmd & 0xffff) + 2;
- }
- case 0x1e:
- if (cmd & (1 << 23))
- return (cmd & 0xffff) + 1;
- else
- return 1;
- case 0x1f:
- if ((cmd & (1 << 23)) == 0) /* inline vertices */
- return (cmd & 0x1ffff) + 2;
- else if (cmd & (1 << 17)) /* indirect random */
- if ((cmd & 0xffff) == 0)
- return 0; /* unknown length, too hard */
- else
- return (((cmd & 0xffff) + 1) / 2) + 1;
- else
- return 2; /* indirect sequential */
- default:
- return 0;
- }
- default:
- return 0;
- }
-
- return 0;
-}
-
-static int i915_emit_cmds(struct drm_device *dev, int *buffer, int dwords)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i, ret;
-
- if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
- return -EINVAL;
-
- for (i = 0; i < dwords;) {
- int sz = validate_cmd(buffer[i]);
-
- if (sz == 0 || i + sz > dwords)
- return -EINVAL;
- i += sz;
- }
-
- ret = BEGIN_LP_RING((dwords+1)&~1);
- if (ret)
- return ret;
-
- for (i = 0; i < dwords; i++)
- OUT_RING(buffer[i]);
- if (dwords & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-
- return 0;
-}
-
-int
-i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect *box,
- int DR1, int DR4)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
- box->y2 <= 0 || box->x2 <= 0) {
- DRM_ERROR("Bad box %d,%d..%d,%d\n",
- box->x1, box->y1, box->x2, box->y2);
- return -EINVAL;
- }
-
- if (INTEL_INFO(dev)->gen >= 4) {
- ret = BEGIN_LP_RING(4);
- if (ret)
- return ret;
-
- OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
- OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
- OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
- OUT_RING(DR4);
- } else {
- ret = BEGIN_LP_RING(6);
- if (ret)
- return ret;
-
- OUT_RING(GFX_OP_DRAWRECT_INFO);
- OUT_RING(DR1);
- OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
- OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
- OUT_RING(DR4);
- OUT_RING(0);
- }
- ADVANCE_LP_RING();
-
- return 0;
-}
-
-/* XXX: Emitting the counter should really be moved to part of the IRQ
- * emit. For now, do it in both places:
- */
-
-static void i915_emit_breadcrumb(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-
- dev_priv->dri1.counter++;
- if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
- dev_priv->dri1.counter = 0;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
-
- if (BEGIN_LP_RING(4) == 0) {
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->dri1.counter);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-}
-
-static int i915_dispatch_cmdbuffer(struct drm_device *dev,
- drm_i915_cmdbuffer_t *cmd,
- struct drm_clip_rect *cliprects,
- void *cmdbuf)
-{
- int nbox = cmd->num_cliprects;
- int i = 0, count, ret;
-
- if (cmd->sz & 0x3) {
- DRM_ERROR("alignment");
- return -EINVAL;
- }
-
- i915_kernel_lost_context(dev);
-
- count = nbox ? nbox : 1;
-
- for (i = 0; i < count; i++) {
- if (i < nbox) {
- ret = i915_emit_box(dev, &cliprects[i],
- cmd->DR1, cmd->DR4);
- if (ret)
- return ret;
- }
-
- ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
- if (ret)
- return ret;
- }
-
- i915_emit_breadcrumb(dev);
- return 0;
-}
-
-static int i915_dispatch_batchbuffer(struct drm_device *dev,
- drm_i915_batchbuffer_t *batch,
- struct drm_clip_rect *cliprects)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int nbox = batch->num_cliprects;
- int i, count, ret;
-
- if ((batch->start | batch->used) & 0x7) {
- DRM_ERROR("alignment");
- return -EINVAL;
- }
-
- i915_kernel_lost_context(dev);
-
- count = nbox ? nbox : 1;
- for (i = 0; i < count; i++) {
- if (i < nbox) {
- ret = i915_emit_box(dev, &cliprects[i],
- batch->DR1, batch->DR4);
- if (ret)
- return ret;
- }
-
- if (!IS_I830(dev) && !IS_845G(dev)) {
- ret = BEGIN_LP_RING(2);
- if (ret)
- return ret;
-
- if (INTEL_INFO(dev)->gen >= 4) {
- OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
- OUT_RING(batch->start);
- } else {
- OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
- OUT_RING(batch->start | MI_BATCH_NON_SECURE);
- }
- } else {
- ret = BEGIN_LP_RING(4);
- if (ret)
- return ret;
-
- OUT_RING(MI_BATCH_BUFFER);
- OUT_RING(batch->start | MI_BATCH_NON_SECURE);
- OUT_RING(batch->start + batch->used - 4);
- OUT_RING(0);
- }
- ADVANCE_LP_RING();
- }
-
-
- if (IS_G4X(dev) || IS_GEN5(dev)) {
- if (BEGIN_LP_RING(2) == 0) {
- OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
- }
- }
-
- i915_emit_breadcrumb(dev);
- return 0;
-}
-
-static int i915_dispatch_flip(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv =
- dev->primary->master->driver_priv;
- int ret;
-
- if (!master_priv->sarea_priv)
- return -EINVAL;
-
- DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
- __func__,
- dev_priv->dri1.current_page,
- master_priv->sarea_priv->pf_current_page);
-
- i915_kernel_lost_context(dev);
-
- ret = BEGIN_LP_RING(10);
- if (ret)
- return ret;
-
- OUT_RING(MI_FLUSH | MI_READ_FLUSH);
- OUT_RING(0);
-
- OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
- OUT_RING(0);
- if (dev_priv->dri1.current_page == 0) {
- OUT_RING(dev_priv->dri1.back_offset);
- dev_priv->dri1.current_page = 1;
- } else {
- OUT_RING(dev_priv->dri1.front_offset);
- dev_priv->dri1.current_page = 0;
- }
- OUT_RING(0);
-
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-
- master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
-
- if (BEGIN_LP_RING(4) == 0) {
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->dri1.counter);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-
- master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
- return 0;
-}
-
-static int i915_quiescent(struct drm_device *dev)
-{
- i915_kernel_lost_context(dev);
- return intel_ring_idle(LP_RING(dev->dev_private));
-}
-
-static int i915_flush_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- mutex_lock(&dev->struct_mutex);
- ret = i915_quiescent(dev);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-static int i915_batchbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv;
- drm_i915_sarea_t *sarea_priv;
- drm_i915_batchbuffer_t *batch = data;
- int ret;
- struct drm_clip_rect *cliprects = NULL;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- master_priv = dev->primary->master->driver_priv;
- sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
-
- if (!dev_priv->dri1.allow_batchbuffer) {
- DRM_ERROR("Batchbuffer ioctl disabled\n");
- return -EINVAL;
- }
-
- DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
- batch->start, batch->used, batch->num_cliprects);
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (batch->num_cliprects < 0)
- return -EINVAL;
-
- if (batch->num_cliprects) {
- cliprects = kcalloc(batch->num_cliprects,
- sizeof(*cliprects),
- GFP_KERNEL);
- if (cliprects == NULL)
- return -ENOMEM;
-
- ret = copy_from_user(cliprects, batch->cliprects,
- batch->num_cliprects *
- sizeof(struct drm_clip_rect));
- if (ret != 0) {
- ret = -EFAULT;
- goto fail_free;
- }
- }
-
- mutex_lock(&dev->struct_mutex);
- ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
- mutex_unlock(&dev->struct_mutex);
-
- if (sarea_priv)
- sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-
-fail_free:
- kfree(cliprects);
-
- return ret;
-}
-
-static int i915_cmdbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv;
- drm_i915_sarea_t *sarea_priv;
- drm_i915_cmdbuffer_t *cmdbuf = data;
- struct drm_clip_rect *cliprects = NULL;
- void *batch_data;
- int ret;
-
- DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
- cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- master_priv = dev->primary->master->driver_priv;
- sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (cmdbuf->num_cliprects < 0)
- return -EINVAL;
-
- batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
- if (batch_data == NULL)
- return -ENOMEM;
-
- ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
- if (ret != 0) {
- ret = -EFAULT;
- goto fail_batch_free;
- }
-
- if (cmdbuf->num_cliprects) {
- cliprects = kcalloc(cmdbuf->num_cliprects,
- sizeof(*cliprects), GFP_KERNEL);
- if (cliprects == NULL) {
- ret = -ENOMEM;
- goto fail_batch_free;
- }
-
- ret = copy_from_user(cliprects, cmdbuf->cliprects,
- cmdbuf->num_cliprects *
- sizeof(struct drm_clip_rect));
- if (ret != 0) {
- ret = -EFAULT;
- goto fail_clip_free;
- }
- }
-
- mutex_lock(&dev->struct_mutex);
- ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
- mutex_unlock(&dev->struct_mutex);
- if (ret) {
- DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
- goto fail_clip_free;
- }
-
- if (sarea_priv)
- sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-
-fail_clip_free:
- kfree(cliprects);
-fail_batch_free:
- kfree(batch_data);
-
- return ret;
-}
-
-static int i915_emit_irq(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-
- i915_kernel_lost_context(dev);
-
- DRM_DEBUG_DRIVER("\n");
-
- dev_priv->dri1.counter++;
- if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
- dev_priv->dri1.counter = 1;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
-
- if (BEGIN_LP_RING(4) == 0) {
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->dri1.counter);
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
- }
-
- return dev_priv->dri1.counter;
-}
-
-static int i915_wait_irq(struct drm_device *dev, int irq_nr)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- int ret = 0;
- struct intel_engine_cs *ring = LP_RING(dev_priv);
-
- DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
- READ_BREADCRUMB(dev_priv));
-
- if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
- return 0;
- }
-
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-
- if (ring->irq_get(ring)) {
- DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ,
- READ_BREADCRUMB(dev_priv) >= irq_nr);
- ring->irq_put(ring);
- } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
- ret = -EBUSY;
-
- if (ret == -EBUSY) {
- DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
- READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
- }
-
- return ret;
-}
-
-/* Needs the lock as it touches the ring.
- */
-static int i915_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- drm_i915_irq_emit_t *emit = data;
- int result;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- mutex_lock(&dev->struct_mutex);
- result = i915_emit_irq(dev);
- mutex_unlock(&dev->struct_mutex);
-
- if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-/* Doesn't need the hardware lock.
- */
-static int i915_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- drm_i915_irq_wait_t *irqwait = data;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- return i915_wait_irq(dev, irqwait->irq_seq);
-}
-
-static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- drm_i915_vblank_pipe_t *pipe = data;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
-
- return 0;
-}
-
-/**
- * Schedule buffer swap at given vertical blank.
- */
-static int i915_vblank_swap(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- /* The delayed swap mechanism was fundamentally racy, and has been
- * removed. The model was that the client requested a delayed flip/swap
- * from the kernel, then waited for vblank before continuing to perform
- * rendering. The problem was that the kernel might wake the client
- * up before it dispatched the vblank swap (since the lock has to be
- * held while touching the ringbuffer), in which case the client would
- * clear and start the next frame before the swap occurred, and
- * flicker would occur in addition to likely missing the vblank.
- *
- * In the absence of this ioctl, userland falls back to a correct path
- * of waiting for a vblank, then dispatching the swap on its own.
- * Context switching to userland and back is plenty fast enough for
- * meeting the requirements of vblank swapping.
- */
- return -EINVAL;
-}
-
-static int i915_flip_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- DRM_DEBUG_DRIVER("%s\n", __func__);
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- mutex_lock(&dev->struct_mutex);
- ret = i915_dispatch_flip(dev);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
static int i915_getparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -936,21 +58,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
drm_i915_getparam_t *param = data;
int value;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
switch (param->param) {
case I915_PARAM_IRQ_ACTIVE:
- value = dev->pdev->irq ? 1 : 0;
- break;
case I915_PARAM_ALLOW_BATCHBUFFER:
- value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
- break;
case I915_PARAM_LAST_DISPATCH:
- value = READ_BREADCRUMB(dev_priv);
- break;
+ /* Reject all old ums/dri params. */
+ return -ENODEV;
case I915_PARAM_CHIPSET_ID:
value = dev->pdev->device;
break;
@@ -1027,6 +140,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_CMD_PARSER_VERSION:
value = i915_cmd_parser_get_version();
break;
+ case I915_PARAM_HAS_COHERENT_PHYS_GTT:
+ value = 1;
+ break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
@@ -1046,19 +162,13 @@ static int i915_setparam(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_setparam_t *param = data;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
switch (param->param) {
case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
- break;
case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
- break;
case I915_SETPARAM_ALLOW_BATCHBUFFER:
- dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
- break;
+ /* Reject all old ums/dri params. */
+ return -ENODEV;
+
case I915_SETPARAM_NUM_USED_FENCES:
if (param->value > dev_priv->num_fence_regs ||
param->value < 0)
@@ -1075,54 +185,6 @@ static int i915_setparam(struct drm_device *dev, void *data,
return 0;
}
-static int i915_set_status_page(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- drm_i915_hws_addr_t *hws = data;
- struct intel_engine_cs *ring;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- if (!I915_NEED_GFX_HWS(dev))
- return -EINVAL;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- WARN(1, "tried to set status page when mode setting active\n");
- return 0;
- }
-
- DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
-
- ring = LP_RING(dev_priv);
- ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
-
- dev_priv->dri1.gfx_hws_cpu_addr =
- ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096);
- if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
- i915_dma_cleanup(dev);
- ring->status_page.gfx_addr = 0;
- DRM_ERROR("can not ioremap virtual address for"
- " G33 hw status page\n");
- return -ENOMEM;
- }
-
- memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
-
- DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
- ring->status_page.gfx_addr);
- DRM_DEBUG_DRIVER("load hws at %p\n",
- ring->status_page.page_addr);
- return 0;
-}
-
static int i915_get_bridge_dev(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1275,12 +337,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(dev->pdev, PCI_D0);
- i915_resume(dev);
+ i915_resume_legacy(dev);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
pr_err("switched off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- i915_suspend(dev, pmm);
+ i915_suspend_legacy(dev, pmm);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -1338,14 +400,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_power_domains_init_hw(dev_priv);
- /*
- * We enable some interrupt sources in our postinstall hooks, so mark
- * interrupts as enabled _before_ actually enabling them to avoid
- * special cases in our ordering checks.
- */
- dev_priv->pm._irqs_disabled = false;
-
- ret = drm_irq_install(dev, dev->pdev->irq);
+ ret = intel_irq_install(dev_priv);
if (ret)
goto cleanup_gem_stolen;
@@ -1370,7 +425,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
goto cleanup_gem;
/* Only enable hotplug handling once the fbdev is fully set up. */
- intel_hpd_init(dev);
+ intel_hpd_init(dev_priv);
/*
* Some ports require correctly set-up hpd registers for detection to
@@ -1405,30 +460,6 @@ out:
return ret;
}
-int i915_master_create(struct drm_device *dev, struct drm_master *master)
-{
- struct drm_i915_master_private *master_priv;
-
- master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
- if (!master_priv)
- return -ENOMEM;
-
- master->driver_priv = master_priv;
- return 0;
-}
-
-void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
-{
- struct drm_i915_master_private *master_priv = master->driver_priv;
-
- if (!master_priv)
- return;
-
- kfree(master_priv);
-
- master->driver_priv = NULL;
-}
-
#if IS_ENABLED(CONFIG_FB)
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
@@ -1534,7 +565,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
info = (struct intel_device_info *)&dev_priv->info;
- if (IS_VALLEYVIEW(dev))
+ if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2;
else
@@ -1614,7 +645,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
- spin_lock_init(&dev_priv->backlight_lock);
+ mutex_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
spin_lock_init(&dev_priv->mmio_flip_lock);
@@ -1742,7 +773,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_freewq;
}
- intel_irq_init(dev);
+ intel_irq_init(dev_priv);
intel_uncore_sanitize(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
@@ -1784,9 +815,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
DRM_ERROR("failed to init modeset\n");
goto out_power_well;
}
- } else {
- /* Start out suspended in ums mode. */
- dev_priv->ums.mm_suspended = 1;
}
i915_setup_sysfs(dev);
@@ -1800,12 +828,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_GEN5(dev))
intel_gpu_ips_init(dev_priv);
- intel_init_runtime_pm(dev_priv);
+ intel_runtime_pm_enable(dev_priv);
return 0;
out_power_well:
- intel_power_domains_remove(dev_priv);
+ intel_power_domains_fini(dev_priv);
drm_vblank_cleanup(dev);
out_gem_unload:
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
@@ -1848,16 +876,10 @@ int i915_driver_unload(struct drm_device *dev)
return ret;
}
- intel_fini_runtime_pm(dev_priv);
+ intel_power_domains_fini(dev_priv);
intel_gpu_ips_teardown();
- /* The i915.ko module is still not prepared to be loaded when
- * the power well is not enabled, so just enable it in case
- * we're going to unload/reload. */
- intel_display_set_init_power(dev_priv, true);
- intel_power_domains_remove(dev_priv);
-
i915_teardown_sysfs(dev);
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
@@ -1868,8 +890,12 @@ int i915_driver_unload(struct drm_device *dev)
acpi_video_unregister();
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
intel_fbdev_fini(dev);
+
+ drm_vblank_cleanup(dev);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_modeset_cleanup(dev);
/*
@@ -1905,13 +931,8 @@ int i915_driver_unload(struct drm_device *dev)
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_stolen(dev);
-
- if (!I915_NEED_GFX_HWS(dev))
- i915_free_hws(dev);
}
- drm_vblank_cleanup(dev);
-
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
@@ -1959,23 +980,8 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
*/
void i915_driver_lastclose(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* On gen6+ we refuse to init without kms enabled, but then the drm core
- * goes right around and calls lastclose. Check for this and don't clean
- * up anything. */
- if (!dev_priv)
- return;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- intel_fbdev_restore_mode(dev);
- vga_switcheroo_process_delayed_switch();
- return;
- }
-
- i915_gem_lastclose(dev);
-
- i915_dma_cleanup(dev);
+ intel_fbdev_restore_mode(dev);
+ vga_switcheroo_process_delayed_switch();
}
void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
@@ -1999,24 +1005,24 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
}
const struct drm_ioctl_desc i915_ioctls[] = {
- DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
@@ -2025,8 +1031,8 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2318b4c..f990ab4c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -356,6 +356,19 @@ static const struct intel_device_info intel_cherryview_info = {
CURSOR_OFFSETS,
};
+static const struct intel_device_info intel_skylake_info = {
+ .is_preliminary = 1,
+ .is_skylake = 1,
+ .gen = 9, .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ .has_llc = 1,
+ .has_ddi = 1,
+ .has_fbc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
+};
+
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
@@ -392,7 +405,8 @@ static const struct intel_device_info intel_cherryview_info = {
INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
- INTEL_CHV_IDS(&intel_cherryview_info)
+ INTEL_CHV_IDS(&intel_cherryview_info), \
+ INTEL_SKL_IDS(&intel_skylake_info)
static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_PCI_IDS,
@@ -449,7 +463,7 @@ void intel_detect_pch(struct drm_device *dev)
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev));
- WARN_ON(IS_ULT(dev));
+ WARN_ON(IS_HSW_ULT(dev));
} else if (IS_BROADWELL(dev)) {
dev_priv->pch_type = PCH_LPT;
dev_priv->pch_id =
@@ -460,7 +474,15 @@ void intel_detect_pch(struct drm_device *dev)
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
- WARN_ON(!IS_ULT(dev));
+ WARN_ON(!IS_HSW_ULT(dev));
+ } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_SPT;
+ DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
+ WARN_ON(!IS_SKYLAKE(dev));
+ } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_SPT;
+ DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
+ WARN_ON(!IS_SKYLAKE(dev));
} else
continue;
@@ -529,10 +551,10 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
}
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
-static int intel_resume_prepare(struct drm_i915_private *dev_priv,
- bool rpm_resume);
+static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
+ bool rpm_resume);
-static int i915_drm_freeze(struct drm_device *dev)
+static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
@@ -562,6 +584,8 @@ static int i915_drm_freeze(struct drm_device *dev)
return error;
}
+ intel_suspend_gt_powersave(dev);
+
/*
* Disable CRTCs directly since we want to preserve sw state
* for _thaw. Also, power gate the CRTC power wells.
@@ -573,16 +597,12 @@ static int i915_drm_freeze(struct drm_device *dev)
intel_dp_mst_suspend(dev);
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- intel_runtime_pm_disable_interrupts(dev);
+ intel_runtime_pm_disable_interrupts(dev_priv);
intel_hpd_cancel_work(dev_priv);
intel_suspend_encoders(dev_priv);
- intel_suspend_gt_powersave(dev);
-
- intel_modeset_suspend_hw(dev);
+ intel_suspend_hw(dev);
}
i915_gem_suspend_gtt_mappings(dev);
@@ -608,7 +628,26 @@ static int i915_drm_freeze(struct drm_device *dev)
return 0;
}
-int i915_suspend(struct drm_device *dev, pm_message_t state)
+static int i915_drm_suspend_late(struct drm_device *drm_dev)
+{
+ struct drm_i915_private *dev_priv = drm_dev->dev_private;
+ int ret;
+
+ ret = intel_suspend_complete(dev_priv);
+
+ if (ret) {
+ DRM_ERROR("Suspend complete failed: %d\n", ret);
+
+ return ret;
+ }
+
+ pci_disable_device(drm_dev->pdev);
+ pci_set_power_state(drm_dev->pdev, PCI_D3hot);
+
+ return 0;
+}
+
+int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
{
int error;
@@ -618,48 +657,25 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
return -ENODEV;
}
- if (state.event == PM_EVENT_PRETHAW)
- return 0;
-
+ if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
+ state.event != PM_EVENT_FREEZE))
+ return -EINVAL;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- error = i915_drm_freeze(dev);
+ error = i915_drm_suspend(dev);
if (error)
return error;
- if (state.event == PM_EVENT_SUSPEND) {
- /* Shut down the device */
- pci_disable_device(dev->pdev);
- pci_set_power_state(dev->pdev, PCI_D3hot);
- }
-
- return 0;
+ return i915_drm_suspend_late(dev);
}
-static int i915_drm_thaw_early(struct drm_device *dev)
+static int i915_drm_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- ret = intel_resume_prepare(dev_priv, false);
- if (ret)
- DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
- intel_uncore_early_sanitize(dev, true);
- intel_uncore_sanitize(dev);
- intel_power_domains_init_hw(dev_priv);
-
- return ret;
-}
-
-static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET) &&
- restore_gtt_mappings) {
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
@@ -680,30 +696,29 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
}
mutex_unlock(&dev->struct_mutex);
- intel_runtime_pm_restore_interrupts(dev);
+ /* We need working interrupts for modeset enabling ... */
+ intel_runtime_pm_enable_interrupts(dev_priv);
intel_modeset_init_hw(dev);
- {
- unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- }
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irq(&dev_priv->irq_lock);
- intel_dp_mst_resume(dev);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
+ intel_dp_mst_resume(dev);
+
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
* bother with the tiny race here where we might loose hotplug
* notifications.
* */
- intel_hpd_init(dev);
+ intel_hpd_init(dev_priv);
/* Config may have changed between suspend and resume */
drm_helper_hpd_irq_event(dev);
}
@@ -718,21 +733,15 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
intel_opregion_notify_adapter(dev, PCI_D0);
- return 0;
-}
-
-static int i915_drm_thaw(struct drm_device *dev)
-{
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- i915_check_and_clear_faults(dev);
+ drm_kms_helper_poll_enable(dev);
- return __i915_drm_thaw(dev, true);
+ return 0;
}
-static int i915_resume_early(struct drm_device *dev)
+static int i915_drm_resume_early(struct drm_device *dev)
{
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
- return 0;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = 0;
/*
* We have a resume ordering issue with the snd-hda driver also
@@ -748,33 +757,34 @@ static int i915_resume_early(struct drm_device *dev)
pci_set_master(dev->pdev);
- return i915_drm_thaw_early(dev);
+ if (IS_VALLEYVIEW(dev_priv))
+ ret = vlv_resume_prepare(dev_priv, false);
+ if (ret)
+ DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
+
+ intel_uncore_early_sanitize(dev, true);
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ hsw_disable_pc8(dev_priv);
+
+ intel_uncore_sanitize(dev);
+ intel_power_domains_init_hw(dev_priv);
+
+ return ret;
}
-int i915_resume(struct drm_device *dev)
+int i915_resume_legacy(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- /*
- * Platforms with opregion should have sane BIOS, older ones (gen3 and
- * earlier) need to restore the GTT mappings since the BIOS might clear
- * all our scratch PTEs.
- */
- ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ ret = i915_drm_resume_early(dev);
if (ret)
return ret;
- drm_kms_helper_poll_enable(dev);
- return 0;
-}
-
-static int i915_resume_legacy(struct drm_device *dev)
-{
- i915_resume_early(dev);
- i915_resume(dev);
-
- return 0;
+ return i915_drm_resume(dev);
}
/**
@@ -820,6 +830,9 @@ int i915_reset(struct drm_device *dev)
}
}
+ if (i915_stop_ring_allow_warn(dev_priv))
+ pr_notice("drm/i915: Resetting chip after gpu hang\n");
+
if (ret) {
DRM_ERROR("Failed to reset chip: %i\n", ret);
mutex_unlock(&dev->struct_mutex);
@@ -840,10 +853,7 @@ int i915_reset(struct drm_device *dev)
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
- if (drm_core_check_feature(dev, DRIVER_MODESET) ||
- !dev_priv->ums.mm_suspended) {
- dev_priv->ums.mm_suspended = 0;
-
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
dev_priv->gpu_error.reload_in_reset = true;
@@ -923,15 +933,13 @@ static int i915_pm_suspend(struct device *dev)
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_freeze(drm_dev);
+ return i915_drm_suspend(drm_dev);
}
static int i915_pm_suspend_late(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct drm_i915_private *dev_priv = drm_dev->dev_private;
- int ret;
/*
* We have a suspedn ordering issue with the snd-hda driver also
@@ -945,16 +953,7 @@ static int i915_pm_suspend_late(struct device *dev)
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- ret = intel_suspend_complete(dev_priv);
-
- if (ret)
- DRM_ERROR("Suspend complete failed: %d\n", ret);
- else {
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
- return ret;
+ return i915_drm_suspend_late(drm_dev);
}
static int i915_pm_resume_early(struct device *dev)
@@ -962,61 +961,21 @@ static int i915_pm_resume_early(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return i915_resume_early(drm_dev);
-}
-
-static int i915_pm_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
-
- return i915_resume(drm_dev);
-}
-
-static int i915_pm_freeze(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
-
- if (!drm_dev || !drm_dev->dev_private) {
- dev_err(dev, "DRM not initialized, aborting suspend.\n");
- return -ENODEV;
- }
-
- return i915_drm_freeze(drm_dev);
-}
-
-static int i915_pm_freeze_late(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct drm_i915_private *dev_priv = drm_dev->dev_private;
-
- return intel_suspend_complete(dev_priv);
-}
-
-static int i915_pm_thaw_early(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
- return i915_drm_thaw_early(drm_dev);
+ return i915_drm_resume_early(drm_dev);
}
-static int i915_pm_thaw(struct device *dev)
+static int i915_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return i915_drm_thaw(drm_dev);
-}
-
-static int i915_pm_poweroff(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
- return i915_drm_freeze(drm_dev);
+ return i915_drm_resume(drm_dev);
}
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
@@ -1026,25 +985,6 @@ static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
return 0;
}
-static int snb_resume_prepare(struct drm_i915_private *dev_priv,
- bool rpm_resume)
-{
- struct drm_device *dev = dev_priv->dev;
-
- if (rpm_resume)
- intel_init_pch_refclk(dev);
-
- return 0;
-}
-
-static int hsw_resume_prepare(struct drm_i915_private *dev_priv,
- bool rpm_resume)
-{
- hsw_disable_pc8(dev_priv);
-
- return 0;
-}
-
/*
* Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is
@@ -1449,18 +1389,13 @@ static int intel_runtime_suspend(struct device *device)
i915_gem_release_all_mmaps(dev_priv);
mutex_unlock(&dev->struct_mutex);
- /*
- * rps.work can't be rearmed here, since we get here only after making
- * sure the GPU is idle and the RPS freq is set to the minimum. See
- * intel_mark_idle().
- */
- cancel_work_sync(&dev_priv->rps.work);
- intel_runtime_pm_disable_interrupts(dev);
+ intel_suspend_gt_powersave(dev);
+ intel_runtime_pm_disable_interrupts(dev_priv);
ret = intel_suspend_complete(dev_priv);
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
- intel_runtime_pm_restore_interrupts(dev);
+ intel_runtime_pm_enable_interrupts(dev_priv);
return ret;
}
@@ -1502,7 +1437,7 @@ static int intel_runtime_resume(struct device *device)
struct pci_dev *pdev = to_pci_dev(device);
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
+ int ret = 0;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
return -ENODEV;
@@ -1512,7 +1447,13 @@ static int intel_runtime_resume(struct device *device)
intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false;
- ret = intel_resume_prepare(dev_priv, true);
+ if (IS_GEN6(dev_priv))
+ intel_init_pch_refclk(dev);
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ hsw_disable_pc8(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ ret = vlv_resume_prepare(dev_priv, true);
+
/*
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
@@ -1520,8 +1461,8 @@ static int intel_runtime_resume(struct device *device)
i915_gem_init_swizzling(dev);
gen6_update_ring_freq(dev);
- intel_runtime_pm_restore_interrupts(dev);
- intel_reset_gt_powersave(dev);
+ intel_runtime_pm_enable_interrupts(dev_priv);
+ intel_enable_gt_powersave(dev);
if (ret)
DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
@@ -1550,41 +1491,41 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv)
return ret;
}
-/*
- * This function implements common functionality of runtime and system
- * resume sequence. Variable rpm_resume used for implementing different
- * code paths.
- */
-static int intel_resume_prepare(struct drm_i915_private *dev_priv,
- bool rpm_resume)
-{
- struct drm_device *dev = dev_priv->dev;
- int ret;
-
- if (IS_GEN6(dev))
- ret = snb_resume_prepare(dev_priv, rpm_resume);
- else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- ret = hsw_resume_prepare(dev_priv, rpm_resume);
- else if (IS_VALLEYVIEW(dev))
- ret = vlv_resume_prepare(dev_priv, rpm_resume);
- else
- ret = 0;
-
- return ret;
-}
-
static const struct dev_pm_ops i915_pm_ops = {
+ /*
+ * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
+ * PMSG_RESUME]
+ */
.suspend = i915_pm_suspend,
.suspend_late = i915_pm_suspend_late,
.resume_early = i915_pm_resume_early,
.resume = i915_pm_resume,
- .freeze = i915_pm_freeze,
- .freeze_late = i915_pm_freeze_late,
- .thaw_early = i915_pm_thaw_early,
- .thaw = i915_pm_thaw,
- .poweroff = i915_pm_poweroff,
+
+ /*
+ * S4 event handlers
+ * @freeze, @freeze_late : called (1) before creating the
+ * hibernation image [PMSG_FREEZE] and
+ * (2) after rebooting, before restoring
+ * the image [PMSG_QUIESCE]
+ * @thaw, @thaw_early : called (1) after creating the hibernation
+ * image, before writing it [PMSG_THAW]
+ * and (2) after failing to create or
+ * restore the image [PMSG_RECOVER]
+ * @poweroff, @poweroff_late: called after writing the hibernation
+ * image, before rebooting [PMSG_HIBERNATE]
+ * @restore, @restore_early : called after rebooting and restoring the
+ * hibernation image [PMSG_RESTORE]
+ */
+ .freeze = i915_pm_suspend,
+ .freeze_late = i915_pm_suspend_late,
+ .thaw_early = i915_pm_resume_early,
+ .thaw = i915_pm_resume,
+ .poweroff = i915_pm_suspend,
+ .poweroff_late = i915_pm_suspend_late,
.restore_early = i915_pm_resume_early,
.restore = i915_pm_resume,
+
+ /* S0ix (via runtime suspend) event handlers */
.runtime_suspend = intel_runtime_suspend,
.runtime_resume = intel_runtime_resume,
};
@@ -1626,12 +1567,10 @@ static struct drm_driver driver = {
.set_busid = drm_pci_set_busid,
/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
- .suspend = i915_suspend,
+ .suspend = i915_suspend_legacy,
.resume = i915_resume_legacy,
.device_is_agp = i915_driver_device_is_agp,
- .master_create = i915_master_create,
- .master_destroy = i915_master_destroy,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = i915_debugfs_init,
.debugfs_cleanup = i915_debugfs_cleanup,
@@ -1645,7 +1584,7 @@ static struct drm_driver driver = {
.gem_prime_import = i915_gem_prime_import,
.dumb_create = i915_gem_dumb_create,
- .dumb_map_offset = i915_gem_mmap_gtt,
+ .dumb_map_offset = i915_gem_dumb_map_offset,
.dumb_destroy = drm_gem_dumb_destroy,
.ioctls = i915_ioctls,
.fops = &i915_driver_fops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 16a6f6d..63bcda5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -55,7 +55,10 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20140905"
+#define DRIVER_DATE "20141121"
+
+#undef WARN_ON
+#define WARN_ON(x) WARN(x, "WARN_ON(" #x ")")
enum pipe {
INVALID_PIPE = -1,
@@ -76,6 +79,14 @@ enum transcoder {
};
#define transcoder_name(t) ((t) + 'A')
+/*
+ * This is the maximum (across all platforms) number of planes (primary +
+ * sprites) that can be active at the same time on one pipe.
+ *
+ * This value doesn't count the cursor plane.
+ */
+#define I915_MAX_PLANES 3
+
enum plane {
PLANE_A = 0,
PLANE_B,
@@ -202,10 +213,15 @@ enum intel_dpll_id {
/* real shared dpll ids must be >= 0 */
DPLL_ID_PCH_PLL_A = 0,
DPLL_ID_PCH_PLL_B = 1,
+ /* hsw/bdw */
DPLL_ID_WRPLL1 = 0,
DPLL_ID_WRPLL2 = 1,
+ /* skl */
+ DPLL_ID_SKL_DPLL1 = 0,
+ DPLL_ID_SKL_DPLL2 = 1,
+ DPLL_ID_SKL_DPLL3 = 2,
};
-#define I915_NUM_PLLS 2
+#define I915_NUM_PLLS 3
struct intel_dpll_hw_state {
/* i9xx, pch plls */
@@ -216,16 +232,33 @@ struct intel_dpll_hw_state {
/* hsw, bdw */
uint32_t wrpll;
+
+ /* skl */
+ /*
+ * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
+ * lower part of crtl1 and they get shifted into position when writing
+ * the register. This allows us to easily compare the state to share
+ * the DPLL.
+ */
+ uint32_t ctrl1;
+ /* HDMI only, 0 when used for DP */
+ uint32_t cfgcr1, cfgcr2;
+};
+
+struct intel_shared_dpll_config {
+ unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
+ struct intel_dpll_hw_state hw_state;
};
struct intel_shared_dpll {
- int refcount; /* count of number of CRTCs sharing this PLL */
+ struct intel_shared_dpll_config config;
+ struct intel_shared_dpll_config *new_config;
+
int active; /* count of number of active CRTCs (i.e. DPMS on) */
bool on; /* is the PLL actually active? Disabled during modeset */
const char *name;
/* should match the index in the dev_priv->shared_dplls array */
enum intel_dpll_id id;
- struct intel_dpll_hw_state hw_state;
/* The mode_set hook is optional and should be used together with the
* intel_prepare_shared_dpll function. */
void (*mode_set)(struct drm_i915_private *dev_priv,
@@ -239,6 +272,11 @@ struct intel_shared_dpll {
struct intel_dpll_hw_state *hw_state);
};
+#define SKL_DPLL0 0
+#define SKL_DPLL1 1
+#define SKL_DPLL2 2
+#define SKL_DPLL3 3
+
/* Used by dp and fdi links */
struct intel_link_m_n {
uint32_t tu;
@@ -267,7 +305,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
#define DRIVER_PATCHLEVEL 0
#define WATCH_LISTS 0
-#define WATCH_GTT 0
struct opregion_header;
struct opregion_acpi;
@@ -290,12 +327,6 @@ struct intel_opregion {
struct intel_overlay;
struct intel_overlay_error_state;
-struct drm_local_map;
-
-struct drm_i915_master_private {
- struct drm_local_map *sarea;
- struct _drm_i915_sarea *sarea_priv;
-};
#define I915_FENCE_REG_NONE -1
#define I915_MAX_NUM_FENCES 32
/* 32 fences + sign bit for FENCE_REG_NONE */
@@ -426,6 +457,7 @@ struct drm_i915_error_state {
};
struct intel_connector;
+struct intel_encoder;
struct intel_crtc_config;
struct intel_plane_config;
struct intel_crtc;
@@ -452,7 +484,7 @@ struct drm_i915_display_funcs {
* Returns true on success, false on failure.
*/
bool (*find_dpll)(const struct intel_limit *limit,
- struct drm_crtc *crtc,
+ struct intel_crtc *crtc,
int target, int refclk,
struct dpll *match_clock,
struct dpll *best_clock);
@@ -468,15 +500,14 @@ struct drm_i915_display_funcs {
struct intel_crtc_config *);
void (*get_plane_config)(struct intel_crtc *,
struct intel_plane_config *);
- int (*crtc_mode_set)(struct drm_crtc *crtc,
- int x, int y,
- struct drm_framebuffer *old_fb);
+ int (*crtc_compute_clock)(struct intel_crtc *crtc);
void (*crtc_enable)(struct drm_crtc *crtc);
void (*crtc_disable)(struct drm_crtc *crtc);
void (*off)(struct drm_crtc *crtc);
- void (*write_eld)(struct drm_connector *connector,
- struct drm_crtc *crtc,
- struct drm_display_mode *mode);
+ void (*audio_codec_enable)(struct drm_connector *connector,
+ struct intel_encoder *encoder,
+ struct drm_display_mode *mode);
+ void (*audio_codec_disable)(struct intel_encoder *encoder);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
@@ -494,7 +525,7 @@ struct drm_i915_display_funcs {
/* display clock increase/decrease */
/* pll clock increase/decrease */
- int (*setup_backlight)(struct intel_connector *connector);
+ int (*setup_backlight)(struct intel_connector *connector, enum pipe pipe);
uint32_t (*get_backlight)(struct intel_connector *connector);
void (*set_backlight)(struct intel_connector *connector,
uint32_t level);
@@ -533,6 +564,7 @@ struct intel_uncore {
unsigned fw_rendercount;
unsigned fw_mediacount;
+ unsigned fw_blittercount;
struct timer_list force_wake_timer;
};
@@ -551,6 +583,7 @@ struct intel_uncore {
func(is_ivybridge) sep \
func(is_valleyview) sep \
func(is_haswell) sep \
+ func(is_skylake) sep \
func(is_preliminary) sep \
func(has_fbc) sep \
func(has_pipe_cxsr) sep \
@@ -646,6 +679,7 @@ struct intel_context {
struct {
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
+ int unpin_count;
} engine[I915_NUM_RINGS];
struct list_head link;
@@ -663,6 +697,18 @@ struct i915_fbc {
bool false_color;
+ /* Tracks whether the HW is actually enabled, not whether the feature is
+ * possible. */
+ bool enabled;
+
+ /* On gen8 some rings cannont perform fbc clean operation so for now
+ * we are doing this on SW with mmio.
+ * This variable works in the opposite information direction
+ * of ring->fbc_dirty telling software on frontbuffer tracking
+ * to perform the cache clean on sw side.
+ */
+ bool need_sw_cache_clean;
+
struct intel_fbc_work {
struct delayed_work work;
struct drm_crtc *crtc;
@@ -704,6 +750,7 @@ enum intel_pch {
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint PCH */
PCH_LPT, /* Lynxpoint PCH */
+ PCH_SPT, /* Sunrisepoint PCH */
PCH_NOP,
};
@@ -717,6 +764,7 @@ enum intel_sbi_destination {
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
#define QUIRK_PIPEB_FORCE (1<<4)
+#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
struct intel_fbdev;
struct intel_fbc_work;
@@ -768,7 +816,6 @@ struct i915_suspend_saved_registers {
u32 saveBLC_HIST_CTL;
u32 saveBLC_PWM_CTL;
u32 saveBLC_PWM_CTL2;
- u32 saveBLC_HIST_CTL_B;
u32 saveBLC_CPU_PWM_CTL;
u32 saveBLC_CPU_PWM_CTL2;
u32 saveFPB0;
@@ -877,6 +924,7 @@ struct i915_suspend_saved_registers {
u32 savePIPEB_LINK_N1;
u32 saveMCHBAR_RENDER_STANDBY;
u32 savePCH_PORT_HOTPLUG;
+ u16 saveGCDGMBUS;
};
struct vlv_s0ix_state {
@@ -947,8 +995,12 @@ struct intel_rps_ei {
};
struct intel_gen6_power_mgmt {
- /* work and pm_iir are protected by dev_priv->irq_lock */
+ /*
+ * work, interrupts_enabled and pm_iir are protected by
+ * dev_priv->irq_lock
+ */
struct work_struct work;
+ bool interrupts_enabled;
u32 pm_iir;
/* Frequencies are stored in potentially platform dependent multiples.
@@ -1071,31 +1123,6 @@ struct i915_power_domains {
struct i915_power_well *power_wells;
};
-struct i915_dri1_state {
- unsigned allow_batchbuffer : 1;
- u32 __iomem *gfx_hws_cpu_addr;
-
- unsigned int cpp;
- int back_offset;
- int front_offset;
- int current_page;
- int page_flipping;
-
- uint32_t counter;
-};
-
-struct i915_ums_state {
- /**
- * Flag if the X Server, and thus DRM, is not currently in
- * control of the device.
- *
- * This is set between LeaveVT and EnterVT. It needs to be
- * replaced with a semaphore. It also needs to be
- * transitioned away from for kernel modesetting.
- */
- int mm_suspended;
-};
-
#define MAX_L3_SLICES 2
struct intel_l3_parity {
u32 *remap_info[MAX_L3_SLICES];
@@ -1357,6 +1384,49 @@ struct ilk_wm_values {
enum intel_ddb_partitioning partitioning;
};
+struct skl_ddb_entry {
+ uint16_t start, end; /* in number of blocks, 'end' is exclusive */
+};
+
+static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
+{
+ return entry->end - entry->start;
+}
+
+static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
+ const struct skl_ddb_entry *e2)
+{
+ if (e1->start == e2->start && e1->end == e2->end)
+ return true;
+
+ return false;
+}
+
+struct skl_ddb_allocation {
+ struct skl_ddb_entry pipe[I915_MAX_PIPES];
+ struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
+ struct skl_ddb_entry cursor[I915_MAX_PIPES];
+};
+
+struct skl_wm_values {
+ bool dirty[I915_MAX_PIPES];
+ struct skl_ddb_allocation ddb;
+ uint32_t wm_linetime[I915_MAX_PIPES];
+ uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
+ uint32_t cursor[I915_MAX_PIPES][8];
+ uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
+ uint32_t cursor_trans[I915_MAX_PIPES];
+};
+
+struct skl_wm_level {
+ bool plane_en[I915_MAX_PLANES];
+ bool cursor_en;
+ uint16_t plane_res_b[I915_MAX_PLANES];
+ uint8_t plane_res_l[I915_MAX_PLANES];
+ uint16_t cursor_res_b;
+ uint8_t cursor_res_l;
+};
+
/*
* This struct helps tracking the state needed for runtime PM, which puts the
* device in PCI D3 state. Notice that when this happens, nothing on the
@@ -1369,7 +1439,7 @@ struct ilk_wm_values {
*
* Our driver uses the autosuspend delay feature, which means we'll only really
* suspend if we stay with zero refcount for a certain amount of time. The
- * default value is currently very conservative (see intel_init_runtime_pm), but
+ * default value is currently very conservative (see intel_runtime_pm_enable), but
* it can be changed with the standard runtime PM files from sysfs.
*
* The irqs_disabled variable becomes true exactly after we disable the IRQs and
@@ -1382,7 +1452,7 @@ struct ilk_wm_values {
*/
struct i915_runtime_pm {
bool suspended;
- bool _irqs_disabled;
+ bool irqs_enabled;
};
enum intel_pipe_crc_source {
@@ -1426,6 +1496,20 @@ struct i915_frontbuffer_tracking {
unsigned flip_bits;
};
+struct i915_wa_reg {
+ u32 addr;
+ u32 value;
+ /* bitmask representing WA bits */
+ u32 mask;
+};
+
+#define I915_MAX_WA_REGS 16
+
+struct i915_workarounds {
+ struct i915_wa_reg reg[I915_MAX_WA_REGS];
+ u32 count;
+};
+
struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@@ -1505,11 +1589,13 @@ struct drm_i915_private {
struct intel_opregion opregion;
struct intel_vbt_data vbt;
+ bool preserve_bios_swizzle;
+
/* overlay */
struct intel_overlay *overlay;
/* backlight registers and fields in struct intel_panel */
- spinlock_t backlight_lock;
+ struct mutex backlight_lock;
/* LVDS info */
bool no_aux_handshake;
@@ -1523,6 +1609,7 @@ struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int vlv_cdclk_freq;
+ unsigned int hpll_freq;
/**
* wq - Driver workqueue for GEM.
@@ -1568,19 +1655,7 @@ struct drm_i915_private {
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
- /*
- * workarounds are currently applied at different places and
- * changes are being done to consolidate them so exact count is
- * not clear at this point, use a max value for now.
- */
-#define I915_MAX_WA_REGS 16
- struct {
- u32 addr;
- u32 value;
- /* bitmask representing WA bits */
- u32 mask;
- } intel_wa_regs[I915_MAX_WA_REGS];
- u32 num_wa_regs;
+ struct i915_workarounds workarounds;
/* Reclocking support */
bool render_reclock_avail;
@@ -1644,9 +1719,25 @@ struct drm_i915_private {
uint16_t spr_latency[5];
/* cursor */
uint16_t cur_latency[5];
+ /*
+ * Raw watermark memory latency values
+ * for SKL for all 8 levels
+ * in 1us units.
+ */
+ uint16_t skl_latency[8];
+
+ /*
+ * The skl_wm_values structure is a bit too big for stack
+ * allocation, so we keep the staging struct where we store
+ * intermediate results here instead.
+ */
+ struct skl_wm_values skl_results;
/* current hardware state */
- struct ilk_wm_values hw;
+ union {
+ struct ilk_wm_values hw;
+ struct skl_wm_values skl_hw;
+ };
} wm;
struct i915_runtime_pm pm;
@@ -1667,12 +1758,6 @@ struct drm_i915_private {
uint32_t bios_vgacntr;
- /* Old dri1 support infrastructure, beware the dragons ya fools entering
- * here! */
- struct i915_dri1_state dri1;
- /* Old ums support infrastructure, same warning applies. */
- struct i915_ums_state ums;
-
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
@@ -1830,8 +1915,6 @@ struct drm_i915_gem_object {
unsigned long gt_ro:1;
unsigned int cache_level:3;
- unsigned int has_aliasing_ppgtt_mapping:1;
- unsigned int has_global_gtt_mapping:1;
unsigned int has_dma_mapping:1;
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
@@ -1864,10 +1947,10 @@ struct drm_i915_gem_object {
unsigned long user_pin_count;
struct drm_file *pin_filp;
- /** for phy allocated objects */
- struct drm_dma_handle *phys_handle;
-
union {
+ /** for phy allocated objects */
+ struct drm_dma_handle *phys_handle;
+
struct i915_gem_userptr {
uintptr_t ptr;
unsigned read_only :1;
@@ -2073,6 +2156,7 @@ struct drm_i915_cmd_table {
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
+#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
@@ -2080,9 +2164,10 @@ struct drm_i915_cmd_table {
((INTEL_DEVID(dev) & 0xf) == 0x2 || \
(INTEL_DEVID(dev) & 0xf) == 0x6 || \
(INTEL_DEVID(dev) & 0xf) == 0xe))
+#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
+ (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
-#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
/* ULX machines are also considered ULT. */
@@ -2103,6 +2188,7 @@ struct drm_i915_cmd_table {
#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
+#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9)
#define RENDER_RING (1<<RCS)
#define BSD_RING (1<<VCS)
@@ -2115,13 +2201,11 @@ struct drm_i915_cmd_table {
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
- to_i915(dev)->ellc_size)
+ __I915__(dev)->ellc_size)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
-#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6)
-#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
#define USES_PPGTT(dev) (i915.enable_ppgtt)
#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2)
@@ -2154,13 +2238,15 @@ struct drm_i915_cmd_table {
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
-#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev))
+#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
+#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
+#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -2168,8 +2254,11 @@ struct drm_i915_cmd_table {
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
+#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
-#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
+#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
+#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
@@ -2189,8 +2278,8 @@ struct drm_i915_cmd_table {
extern const struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
-extern int i915_suspend(struct drm_device *dev, pm_message_t state);
-extern int i915_resume(struct drm_device *dev);
+extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state);
+extern int i915_resume_legacy(struct drm_device *dev);
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
@@ -2227,8 +2316,6 @@ struct i915_params {
extern struct i915_params i915 __read_mostly;
/* i915_dma.c */
-void i915_update_dri1_breadcrumb(struct drm_device *dev);
-extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
@@ -2242,9 +2329,6 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
#endif
-extern int i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect *box,
- int DR1, int DR4);
extern int intel_gpu_reset(struct drm_device *dev);
extern int i915_reset(struct drm_device *dev);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -2260,10 +2344,10 @@ __printf(3, 4)
void i915_handle_error(struct drm_device *dev, bool wedged,
const char *fmt, ...);
-void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
- int new_delay);
-extern void intel_irq_init(struct drm_device *dev);
-extern void intel_hpd_init(struct drm_device *dev);
+extern void intel_irq_init(struct drm_i915_private *dev_priv);
+extern void intel_hpd_init(struct drm_i915_private *dev_priv);
+int intel_irq_install(struct drm_i915_private *dev_priv);
+void intel_irq_uninstall(struct drm_i915_private *dev_priv);
extern void intel_uncore_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev,
@@ -2283,10 +2367,19 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
+void
+ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
+void
+ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
+void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask);
+#define ibx_enable_display_interrupt(dev_priv, bits) \
+ ibx_display_interrupt_update((dev_priv), (bits), (bits))
+#define ibx_disable_display_interrupt(dev_priv, bits) \
+ ibx_display_interrupt_update((dev_priv), (bits), 0)
/* i915_gem.c */
-int i915_gem_init_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
@@ -2333,10 +2426,6 @@ int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_tiling(struct drm_device *dev, void *data,
@@ -2379,7 +2468,6 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
-void i915_gem_lastclose(struct drm_device *dev);
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
int *needs_clflush);
@@ -2413,8 +2501,9 @@ void i915_vma_move_to_active(struct i915_vma *vma,
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
+int i915_gem_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
/**
* Returns true if seq1 is later than seq2.
*/
@@ -2486,6 +2575,11 @@ int __i915_add_request(struct intel_engine_cs *ring,
u32 *seqno);
#define i915_add_request(ring, seqno) \
__i915_add_request(ring, NULL, NULL, seqno)
+int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+ unsigned reset_counter,
+ bool interruptible,
+ s64 *timeout,
+ struct drm_i915_file_private *file_priv);
int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -2755,7 +2849,6 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
extern void intel_i2c_reset(struct drm_device *dev);
/* intel_opregion.c */
-struct intel_encoder;
#ifdef CONFIG_ACPI
extern int intel_opregion_setup(struct drm_device *dev);
extern void intel_opregion_init(struct drm_device *dev);
@@ -2793,7 +2886,6 @@ static inline void intel_unregister_dsm_handler(void) { return; }
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
-extern void intel_modeset_suspend_hw(struct drm_device *dev);
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -2804,7 +2896,7 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev,
extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
-extern void gen8_fbc_sw_flush(struct drm_device *dev, u32 value);
+extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
@@ -2842,8 +2934,8 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
-int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
/* intel_sideband.c */
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
@@ -2873,7 +2965,9 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
#define FORCEWAKE_RENDER (1 << 0)
#define FORCEWAKE_MEDIA (1 << 1)
-#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
+#define FORCEWAKE_BLITTER (1 << 2)
+#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA | \
+ FORCEWAKE_BLITTER)
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
@@ -2939,6 +3033,11 @@ static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
}
+static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
+{
+ return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
+}
+
static inline unsigned long
timespec_to_jiffies_timeout(const struct timespec *value)
{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 28f91df..4a9faea6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -160,33 +160,6 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
}
int
-i915_gem_init_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_init *args = data;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- if (args->gtt_start >= args->gtt_end ||
- (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
- return -EINVAL;
-
- /* GEM with user mode setting was never supported on ilk and later. */
- if (INTEL_INFO(dev)->gen >= 5)
- return -ENODEV;
-
- mutex_lock(&dev->struct_mutex);
- i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
- args->gtt_end);
- dev_priv->gtt.mappable_end = args->gtt_end;
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -208,40 +181,137 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
+static int
+i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
- drm_dma_handle_t *phys = obj->phys_handle;
+ struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
+ char *vaddr = obj->phys_handle->vaddr;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int i;
- if (!phys)
- return;
+ if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
+ return -EINVAL;
- if (obj->madv == I915_MADV_WILLNEED) {
+ for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+ struct page *page;
+ char *src;
+
+ page = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ src = kmap_atomic(page);
+ memcpy(vaddr, src, PAGE_SIZE);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
+ kunmap_atomic(src);
+
+ page_cache_release(page);
+ vaddr += PAGE_SIZE;
+ }
+
+ i915_gem_chipset_flush(obj->base.dev);
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL)
+ return -ENOMEM;
+
+ if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ sg = st->sgl;
+ sg->offset = 0;
+ sg->length = obj->base.size;
+
+ sg_dma_address(sg) = obj->phys_handle->busaddr;
+ sg_dma_len(sg) = obj->base.size;
+
+ obj->pages = st;
+ obj->has_dma_mapping = true;
+ return 0;
+}
+
+static void
+i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ BUG_ON(obj->madv == __I915_MADV_PURGED);
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
+ if (ret) {
+ /* In the event of a disaster, abandon all caches and
+ * hope for the best.
+ */
+ WARN_ON(ret != -EIO);
+ obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+ if (obj->madv == I915_MADV_DONTNEED)
+ obj->dirty = 0;
+
+ if (obj->dirty) {
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
- char *vaddr = phys->vaddr;
+ char *vaddr = obj->phys_handle->vaddr;
int i;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
- struct page *page = shmem_read_mapping_page(mapping, i);
- if (!IS_ERR(page)) {
- char *dst = kmap_atomic(page);
- memcpy(dst, vaddr, PAGE_SIZE);
- drm_clflush_virt_range(dst, PAGE_SIZE);
- kunmap_atomic(dst);
-
- set_page_dirty(page);
+ struct page *page;
+ char *dst;
+
+ page = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(page))
+ continue;
+
+ dst = kmap_atomic(page);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
+ memcpy(dst, vaddr, PAGE_SIZE);
+ kunmap_atomic(dst);
+
+ set_page_dirty(page);
+ if (obj->madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
- page_cache_release(page);
- }
+ page_cache_release(page);
vaddr += PAGE_SIZE;
}
- i915_gem_chipset_flush(obj->base.dev);
+ obj->dirty = 0;
}
-#ifdef CONFIG_X86
- set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
-#endif
- drm_pci_free(obj->base.dev, phys);
- obj->phys_handle = NULL;
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+
+ obj->has_dma_mapping = false;
+}
+
+static void
+i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
+{
+ drm_pci_free(obj->base.dev, obj->phys_handle);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
+ .get_pages = i915_gem_object_get_pages_phys,
+ .put_pages = i915_gem_object_put_pages_phys,
+ .release = i915_gem_object_release_phys,
+};
+
+static int
+drop_pages(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma, *next;
+ int ret;
+
+ drm_gem_object_reference(&obj->base);
+ list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
+ if (i915_vma_unbind(vma))
+ break;
+
+ ret = i915_gem_object_put_pages(obj);
+ drm_gem_object_unreference(&obj->base);
+
+ return ret;
}
int
@@ -249,9 +319,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align)
{
drm_dma_handle_t *phys;
- struct address_space *mapping;
- char *vaddr;
- int i;
+ int ret;
if (obj->phys_handle) {
if ((unsigned long)obj->phys_handle->vaddr & (align -1))
@@ -266,41 +334,19 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
if (obj->base.filp == NULL)
return -EINVAL;
+ ret = drop_pages(obj);
+ if (ret)
+ return ret;
+
/* create a new object */
phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
if (!phys)
return -ENOMEM;
- vaddr = phys->vaddr;
-#ifdef CONFIG_X86
- set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
-#endif
- mapping = file_inode(obj->base.filp)->i_mapping;
- for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
- struct page *page;
- char *src;
-
- page = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(page)) {
-#ifdef CONFIG_X86
- set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
-#endif
- drm_pci_free(obj->base.dev, phys);
- return PTR_ERR(page);
- }
-
- src = kmap_atomic(page);
- memcpy(vaddr, src, PAGE_SIZE);
- kunmap_atomic(src);
-
- mark_page_accessed(page);
- page_cache_release(page);
-
- vaddr += PAGE_SIZE;
- }
-
obj->phys_handle = phys;
- return 0;
+ obj->ops = &i915_gem_phys_ops;
+
+ return i915_gem_object_get_pages(obj);
}
static int
@@ -311,6 +357,14 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = to_user_ptr(args->data_ptr);
+ int ret;
+
+ /* We manually control the domain here and pretend that it
+ * remains coherent i.e. in the GTT domain, like shmem_pwrite.
+ */
+ ret = i915_gem_object_wait_rendering(obj, false);
+ if (ret)
+ return ret;
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
unsigned long unwritten;
@@ -326,6 +380,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
return -EFAULT;
}
+ drm_clflush_virt_range(vaddr, args->size);
i915_gem_chipset_flush(dev);
return 0;
}
@@ -346,6 +401,7 @@ static int
i915_gem_create(struct drm_file *file,
struct drm_device *dev,
uint64_t size,
+ bool dumb,
uint32_t *handle_p)
{
struct drm_i915_gem_object *obj;
@@ -361,6 +417,7 @@ i915_gem_create(struct drm_file *file,
if (obj == NULL)
return -ENOMEM;
+ obj->base.dumb = dumb;
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(&obj->base);
@@ -380,7 +437,7 @@ i915_gem_dumb_create(struct drm_file *file,
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height;
return i915_gem_create(file, dev,
- args->size, &args->handle);
+ args->size, true, &args->handle);
}
/**
@@ -393,7 +450,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_create *args = data;
return i915_gem_create(file, dev,
- args->size, &args->handle);
+ args->size, false, &args->handle);
}
static inline int
@@ -1046,11 +1103,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client.
*/
- if (obj->phys_handle) {
- ret = i915_gem_phys_pwrite(obj, args, file);
- goto out;
- }
-
if (obj->tiling_mode == I915_TILING_NONE &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
cpu_write_needs_clflush(obj)) {
@@ -1060,8 +1112,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* textures). Fallback to the shmem path in that case. */
}
- if (ret == -EFAULT || ret == -ENOSPC)
- ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+ if (ret == -EFAULT || ret == -ENOSPC) {
+ if (obj->phys_handle)
+ ret = i915_gem_phys_pwrite(obj, args, file);
+ else
+ ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+ }
out:
drm_gem_object_unreference(&obj->base);
@@ -1134,7 +1190,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
}
/**
- * __wait_seqno - wait until execution of seqno has finished
+ * __i915_wait_seqno - wait until execution of seqno has finished
* @ring: the ring expected to report seqno
* @seqno: duh!
* @reset_counter: reset sequence associated with the given seqno
@@ -1151,7 +1207,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
* Returns 0 if the seqno was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
-static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
@@ -1171,7 +1227,8 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
- timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
+ timeout_expire = timeout ?
+ jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
gen6_rps_boost(dev_priv);
@@ -1247,6 +1304,16 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
s64 tres = *timeout - (now - before);
*timeout = tres < 0 ? 0 : tres;
+
+ /*
+ * Apparently ktime isn't accurate enough and occasionally has a
+ * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
+ * things up to make the test happy. We allow up to 1 jiffy.
+ *
+ * This is a regrssion from the timespec->ktime conversion.
+ */
+ if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
+ *timeout = 0;
}
return ret;
@@ -1262,6 +1329,7 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool interruptible = dev_priv->mm.interruptible;
+ unsigned reset_counter;
int ret;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -1275,14 +1343,13 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
if (ret)
return ret;
- return __wait_seqno(ring, seqno,
- atomic_read(&dev_priv->gpu_error.reset_counter),
- interruptible, NULL, NULL);
+ reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
+ NULL, NULL);
}
static int
-i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring)
+i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
{
if (!obj->active)
return 0;
@@ -1319,7 +1386,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- return i915_gem_object_wait_rendering__tail(obj, ring);
+ return i915_gem_object_wait_rendering__tail(obj);
}
/* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -1354,12 +1421,13 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
- ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
+ ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
+ file_priv);
mutex_lock(&dev->struct_mutex);
if (ret)
return ret;
- return i915_gem_object_wait_rendering__tail(obj, ring);
+ return i915_gem_object_wait_rendering__tail(obj);
}
/**
@@ -1466,6 +1534,16 @@ unlock:
*
* While the mapping holds a reference on the contents of the object, it doesn't
* imply a ref on the object itself.
+ *
+ * IMPORTANT:
+ *
+ * DRM driver writers who look a this function as an example for how to do GEM
+ * mmap support, please don't implement mmap support like here. The modern way
+ * to implement DRM mmap support is with an mmap offset ioctl (like
+ * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
+ * That way debug tooling like valgrind will understand what's going on, hiding
+ * the mmap call in a driver private ioctl will break that. The i915 driver only
+ * does cpu mmaps this way because we didn't know better.
*/
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
@@ -1762,10 +1840,10 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
drm_gem_free_mmap_offset(&obj->base);
}
-int
+static int
i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev,
- uint32_t handle,
+ uint32_t handle, bool dumb,
uint64_t *offset)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1782,6 +1860,13 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto unlock;
}
+ /*
+ * We don't allow dumb mmaps on objects created using another
+ * interface.
+ */
+ WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
+ "Illegal dumb map of accelerated buffer.\n");
+
if (obj->base.size > dev_priv->gtt.mappable_end) {
ret = -E2BIG;
goto out;
@@ -1806,6 +1891,15 @@ unlock:
return ret;
}
+int
+i915_gem_dumb_map_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ return i915_gem_mmap_gtt(file, dev, handle, true, offset);
+}
+
/**
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
@@ -1827,7 +1921,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_mmap_gtt *args = data;
- return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+ return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset);
}
static inline int
@@ -1945,7 +2039,14 @@ unsigned long
i915_gem_shrink(struct drm_i915_private *dev_priv,
long target, unsigned flags)
{
- const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
+ const struct {
+ struct list_head *list;
+ unsigned int bit;
+ } phases[] = {
+ { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
+ { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
+ { NULL, 0 },
+ }, *phase;
unsigned long count = 0;
/*
@@ -1967,48 +2068,30 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
* dev->struct_mutex and so we won't ever be able to observe an
* object on the bound_list with a reference count equals 0.
*/
- if (flags & I915_SHRINK_UNBOUND) {
+ for (phase = phases; phase->list; phase++) {
struct list_head still_in_list;
- INIT_LIST_HEAD(&still_in_list);
- while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
- struct drm_i915_gem_object *obj;
-
- obj = list_first_entry(&dev_priv->mm.unbound_list,
- typeof(*obj), global_list);
- list_move_tail(&obj->global_list, &still_in_list);
-
- if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
- continue;
-
- drm_gem_object_reference(&obj->base);
-
- if (i915_gem_object_put_pages(obj) == 0)
- count += obj->base.size >> PAGE_SHIFT;
-
- drm_gem_object_unreference(&obj->base);
- }
- list_splice(&still_in_list, &dev_priv->mm.unbound_list);
- }
-
- if (flags & I915_SHRINK_BOUND) {
- struct list_head still_in_list;
+ if ((flags & phase->bit) == 0)
+ continue;
INIT_LIST_HEAD(&still_in_list);
- while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
+ while (count < target && !list_empty(phase->list)) {
struct drm_i915_gem_object *obj;
struct i915_vma *vma, *v;
- obj = list_first_entry(&dev_priv->mm.bound_list,
+ obj = list_first_entry(phase->list,
typeof(*obj), global_list);
list_move_tail(&obj->global_list, &still_in_list);
- if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+ if (flags & I915_SHRINK_PURGEABLE &&
+ !i915_gem_object_is_purgeable(obj))
continue;
drm_gem_object_reference(&obj->base);
- list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
+ /* For the unbound phase, this should be a no-op! */
+ list_for_each_entry_safe(vma, v,
+ &obj->vma_list, vma_link)
if (i915_vma_unbind(vma))
break;
@@ -2017,7 +2100,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
drm_gem_object_unreference(&obj->base);
}
- list_splice(&still_in_list, &dev_priv->mm.bound_list);
+ list_splice(&still_in_list, phase->list);
}
return count;
@@ -2122,6 +2205,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj);
+ if (obj->tiling_mode != I915_TILING_NONE &&
+ dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ i915_gem_object_pin_pages(obj);
+
return 0;
err_pages:
@@ -2420,15 +2507,13 @@ int __i915_add_request(struct intel_engine_cs *ring,
ring->outstanding_lazy_seqno = 0;
ring->preallocated_lazy_request = NULL;
- if (!dev_priv->ums.mm_suspended) {
- i915_queue_hangcheck(ring->dev);
+ i915_queue_hangcheck(ring->dev);
- cancel_delayed_work_sync(&dev_priv->mm.idle_work);
- queue_delayed_work(dev_priv->wq,
- &dev_priv->mm.retire_work,
- round_jiffies_up_relative(HZ));
- intel_mark_busy(dev_priv->dev);
- }
+ cancel_delayed_work_sync(&dev_priv->mm.idle_work);
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
+ intel_mark_busy(dev_priv->dev);
if (out_seqno)
*out_seqno = request->seqno;
@@ -2495,12 +2580,20 @@ static void i915_set_reset_status(struct drm_i915_private *dev_priv,
static void i915_gem_free_request(struct drm_i915_gem_request *request)
{
+ struct intel_context *ctx = request->ctx;
+
list_del(&request->list);
i915_gem_request_remove_from_client(request);
- if (request->ctx)
- i915_gem_context_unreference(request->ctx);
+ if (ctx) {
+ if (i915.enable_execlists) {
+ struct intel_engine_cs *ring = request->ring;
+ if (ctx != ring->default_context)
+ intel_lr_context_unpin(ring, ctx);
+ }
+ i915_gem_context_unreference(ctx);
+ }
kfree(request);
}
@@ -2555,6 +2648,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
}
/*
+ * Clear the execlists queue up before freeing the requests, as those
+ * are the ones that keep the context and ringbuffer backing objects
+ * pinned in place.
+ */
+ while (!list_empty(&ring->execlist_queue)) {
+ struct intel_ctx_submit_request *submit_req;
+
+ submit_req = list_first_entry(&ring->execlist_queue,
+ struct intel_ctx_submit_request,
+ execlist_link);
+ list_del(&submit_req->execlist_link);
+ intel_runtime_pm_put(dev_priv);
+ i915_gem_context_unreference(submit_req->ctx);
+ kfree(submit_req);
+ }
+
+ /*
* We must free the requests after all the corresponding objects have
* been moved off active lists. Which is the same order as the normal
* retire_requests function does. This is important if object hold
@@ -2571,18 +2681,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
i915_gem_free_request(request);
}
- while (!list_empty(&ring->execlist_queue)) {
- struct intel_ctx_submit_request *submit_req;
-
- submit_req = list_first_entry(&ring->execlist_queue,
- struct intel_ctx_submit_request,
- execlist_link);
- list_del(&submit_req->execlist_link);
- intel_runtime_pm_put(dev_priv);
- i915_gem_context_unreference(submit_req->ctx);
- kfree(submit_req);
- }
-
/* These may not have been flush before the reset, do so now */
kfree(ring->preallocated_lazy_request);
ring->preallocated_lazy_request = NULL;
@@ -2719,6 +2817,15 @@ i915_gem_retire_requests(struct drm_device *dev)
for_each_ring(ring, dev_priv, i) {
i915_gem_retire_requests_ring(ring);
idle &= list_empty(&ring->request_list);
+ if (i915.enable_execlists) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->execlist_lock, flags);
+ idle &= list_empty(&ring->execlist_queue);
+ spin_unlock_irqrestore(&ring->execlist_lock, flags);
+
+ intel_execlists_retire_requests(ring);
+ }
}
if (idle)
@@ -2811,6 +2918,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u32 seqno = 0;
int ret = 0;
+ if (args->flags != 0)
+ return -EINVAL;
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -2846,8 +2956,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
- return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns,
- file->driver_priv);
+ return __i915_wait_seqno(ring, seqno, reset_counter, true,
+ &args->timeout_ns, file->driver_priv);
out:
drm_gem_object_unreference(&obj->base);
@@ -3166,6 +3276,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
obj->stride, obj->tiling_mode);
switch (INTEL_INFO(dev)->gen) {
+ case 9:
case 8:
case 7:
case 6:
@@ -3384,46 +3495,6 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
return true;
}
-static void i915_gem_verify_gtt(struct drm_device *dev)
-{
-#if WATCH_GTT
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj;
- int err = 0;
-
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
- if (obj->gtt_space == NULL) {
- printk(KERN_ERR "object found on GTT list with no space reserved\n");
- err++;
- continue;
- }
-
- if (obj->cache_level != obj->gtt_space->color) {
- printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
- i915_gem_obj_ggtt_offset(obj),
- i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
- obj->cache_level,
- obj->gtt_space->color);
- err++;
- continue;
- }
-
- if (!i915_gem_valid_gtt_space(dev,
- obj->gtt_space,
- obj->cache_level)) {
- printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
- i915_gem_obj_ggtt_offset(obj),
- i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
- obj->cache_level);
- err++;
- continue;
- }
- }
-
- WARN_ON(err);
-#endif
-}
-
/**
* Finds free space in the GTT aperture and binds the object there.
*/
@@ -3514,25 +3585,10 @@ search_free:
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &vm->inactive_list);
- if (i915_is_ggtt(vm)) {
- bool mappable, fenceable;
-
- fenceable = (vma->node.size == fence_size &&
- (vma->node.start & (fence_alignment - 1)) == 0);
-
- mappable = (vma->node.start + obj->base.size <=
- dev_priv->gtt.mappable_end);
-
- obj->map_and_fenceable = mappable && fenceable;
- }
-
- WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
-
trace_i915_vma_bind(vma, flags);
vma->bind_vma(vma, obj->cache_level,
- flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
+ flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
- i915_gem_verify_gtt(dev);
return vma;
err_remove_node:
@@ -3560,7 +3616,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
* Stolen memory is always coherent with the GPU as it is explicitly
* marked as wc by the system, or the system is cache-coherent.
*/
- if (obj->stolen)
+ if (obj->stolen || obj->phys_handle)
return false;
/* If the GPU is snooping the contents of the CPU cache,
@@ -3739,7 +3795,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (drm_mm_node_allocated(&vma->node))
vma->bind_vma(vma, cache_level,
- obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
+ vma->bound & GLOBAL_BIND);
}
list_for_each_entry(vma, &obj->vma_list, vma_link)
@@ -3769,7 +3825,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
old_write_domain);
}
- i915_gem_verify_gtt(dev);
return 0;
}
@@ -4067,7 +4122,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (seqno == 0)
return 0;
- ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
+ ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@@ -4101,6 +4156,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma;
+ unsigned bound;
int ret;
if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
@@ -4109,6 +4165,9 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
return -EINVAL;
+ if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
+ return -EINVAL;
+
vma = i915_gem_obj_to_vma(obj, vm);
if (vma) {
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
@@ -4130,15 +4189,39 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
}
}
+ bound = vma ? vma->bound : 0;
if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
}
- if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
+ if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND))
vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
+ if ((bound ^ vma->bound) & GLOBAL_BIND) {
+ bool mappable, fenceable;
+ u32 fence_size, fence_alignment;
+
+ fence_size = i915_gem_get_gtt_size(obj->base.dev,
+ obj->base.size,
+ obj->tiling_mode);
+ fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
+ obj->base.size,
+ obj->tiling_mode,
+ true);
+
+ fenceable = (vma->node.size == fence_size &&
+ (vma->node.start & (fence_alignment - 1)) == 0);
+
+ mappable = (vma->node.start + obj->base.size <=
+ dev_priv->gtt.mappable_end);
+
+ obj->map_and_fenceable = mappable && fenceable;
+ }
+
+ WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
+
vma->pin_count++;
if (flags & PIN_MAPPABLE)
obj->pin_mappable |= true;
@@ -4193,7 +4276,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int ret;
- if (INTEL_INFO(dev)->gen >= 6)
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
ret = i915_mutex_lock_interruptible(dev);
@@ -4249,6 +4332,9 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -4326,6 +4412,7 @@ int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_madvise *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -4353,6 +4440,15 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
goto out;
}
+ if (obj->pages &&
+ obj->tiling_mode != I915_TILING_NONE &&
+ dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ if (obj->madv == I915_MADV_WILLNEED)
+ i915_gem_object_unpin_pages(obj);
+ if (args->madv == I915_MADV_WILLNEED)
+ i915_gem_object_pin_pages(obj);
+ }
+
if (obj->madv != __I915_MADV_PURGED)
obj->madv = args->madv;
@@ -4495,8 +4591,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
}
}
- i915_gem_object_detach_phys(obj);
-
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
* before progressing. */
if (obj->stolen)
@@ -4504,6 +4598,11 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
WARN_ON(obj->frontbuffer_bits);
+ if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
+ dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
+ obj->tiling_mode != I915_TILING_NONE)
+ i915_gem_object_unpin_pages(obj);
+
if (WARN_ON(obj->pages_pin_count))
obj->pages_pin_count = 0;
if (discard_backing_storage(obj))
@@ -4576,9 +4675,6 @@ i915_gem_suspend(struct drm_device *dev)
int ret = 0;
mutex_lock(&dev->struct_mutex);
- if (dev_priv->ums.mm_suspended)
- goto err;
-
ret = i915_gpu_idle(dev);
if (ret)
goto err;
@@ -4589,15 +4685,7 @@ i915_gem_suspend(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_gem_evict_everything(dev);
- i915_kernel_lost_context(dev);
i915_gem_stop_ringbuffers(dev);
-
- /* Hack! Don't let anybody do execbuf while we don't control the chip.
- * We need to replace this with a semaphore, or something.
- * And not confound ums.mm_suspended!
- */
- dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
- DRIVER_MODESET);
mutex_unlock(&dev->struct_mutex);
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
@@ -4888,9 +4976,6 @@ int i915_gem_init(struct drm_device *dev)
}
mutex_unlock(&dev->struct_mutex);
- /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- dev_priv->dri1.allow_batchbuffer = 1;
return ret;
}
@@ -4905,74 +4990,6 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
dev_priv->gt.cleanup_ring(ring);
}
-int
-i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return 0;
-
- if (i915_reset_in_progress(&dev_priv->gpu_error)) {
- DRM_ERROR("Reenabling wedged hardware, good luck\n");
- atomic_set(&dev_priv->gpu_error.reset_counter, 0);
- }
-
- mutex_lock(&dev->struct_mutex);
- dev_priv->ums.mm_suspended = 0;
-
- ret = i915_gem_init_hw(dev);
- if (ret != 0) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
-
- BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
-
- ret = drm_irq_install(dev, dev->pdev->irq);
- if (ret)
- goto cleanup_ringbuffer;
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-
-cleanup_ringbuffer:
- i915_gem_cleanup_ringbuffer(dev);
- dev_priv->ums.mm_suspended = 1;
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-int
-i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return 0;
-
- mutex_lock(&dev->struct_mutex);
- drm_irq_uninstall(dev);
- mutex_unlock(&dev->struct_mutex);
-
- return i915_gem_suspend(dev);
-}
-
-void
-i915_gem_lastclose(struct drm_device *dev)
-{
- int ret;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- ret = i915_gem_suspend(dev);
- if (ret)
- DRM_ERROR("failed to idle hardware: %d\n", ret);
-}
-
static void
init_ring_lists(struct intel_engine_cs *ring)
{
@@ -5119,6 +5136,15 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
return ret;
}
+/**
+ * i915_gem_track_fb - update frontbuffer tracking
+ * old: current GEM buffer for the frontbuffer slots
+ * new: new GEM buffer for the frontbuffer slots
+ * frontbuffer_bits: bitmask of frontbuffer slots
+ *
+ * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
+ * from @old and setting them in @new. Both @old and @new can be NULL.
+ */
void i915_gem_track_fb(struct drm_i915_gem_object *old,
struct drm_i915_gem_object *new,
unsigned frontbuffer_bits)
@@ -5302,7 +5328,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
unsigned long timeout = msecs_to_jiffies(5000) + 1;
- unsigned long pinned, bound, unbound, freed;
+ unsigned long pinned, bound, unbound, freed_pages;
bool was_interruptible;
bool unlock;
@@ -5319,7 +5345,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
- freed = i915_gem_shrink_all(dev_priv);
+ freed_pages = i915_gem_shrink_all(dev_priv);
dev_priv->mm.interruptible = was_interruptible;
@@ -5350,14 +5376,15 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
if (unlock)
mutex_unlock(&dev->struct_mutex);
- pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
- freed, pinned);
+ if (freed_pages || unbound || bound)
+ pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
+ freed_pages << PAGE_SHIFT, pinned);
if (unbound || bound)
pr_err("%lu and %lu bytes still available in the "
"bound and unbound GPU page lists.\n",
bound, unbound);
- *(unsigned long *)ptr += freed;
+ *(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a5221d8..d17ff43 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -88,6 +88,7 @@
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_trace.h"
/* This is a HW constraint. The value below is the largest known requirement
* I've seen in a spec to date, and that was a workaround for a non-shipping
@@ -137,6 +138,8 @@ void i915_gem_context_free(struct kref *ctx_ref)
struct intel_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
+ trace_i915_context_free(ctx);
+
if (i915.enable_execlists)
intel_lr_context_free(ctx);
@@ -274,6 +277,8 @@ i915_gem_create_context(struct drm_device *dev,
ctx->ppgtt = ppgtt;
}
+ trace_i915_context_create(ctx);
+
return ctx;
err_unpin:
@@ -522,6 +527,7 @@ static int do_switch(struct intel_engine_cs *ring,
struct intel_context *from = ring->last_context;
u32 hw_flags = 0;
bool uninitialized = false;
+ struct i915_vma *vma;
int ret, i;
if (from != NULL && ring == &dev_priv->ring[RCS]) {
@@ -548,6 +554,7 @@ static int do_switch(struct intel_engine_cs *ring,
from = ring->last_context;
if (to->ppgtt) {
+ trace_switch_mm(ring, to);
ret = to->ppgtt->switch_mm(to->ppgtt, ring);
if (ret)
goto unpin_out;
@@ -571,11 +578,10 @@ static int do_switch(struct intel_engine_cs *ring,
if (ret)
goto unpin_out;
- if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
- struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
- &dev_priv->gtt.base);
- vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
- }
+ vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
+ if (!(vma->bound & GLOBAL_BIND))
+ vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level,
+ GLOBAL_BIND);
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT;
@@ -629,7 +635,7 @@ done:
if (uninitialized) {
if (ring->init_context) {
- ret = ring->init_context(ring);
+ ret = ring->init_context(ring, to);
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1a0611b..f06027b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -121,6 +121,9 @@ eb_lookup_vmas(struct eb_vmas *eb,
goto err;
}
+ WARN_ONCE(obj->base.dumb,
+ "GPU use of dumb buffer is illegal.\n");
+
drm_gem_object_reference(&obj->base);
list_add_tail(&obj->obj_exec_link, &objects);
}
@@ -357,12 +360,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
* through the ppgtt for non_secure batchbuffers. */
if (unlikely(IS_GEN6(dev) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
- !target_i915_obj->has_global_gtt_mapping)) {
- struct i915_vma *vma =
- list_first_entry(&target_i915_obj->vma_list,
- typeof(*vma), vma_link);
- vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
- }
+ !(target_vma->bound & GLOBAL_BIND)))
+ target_vma->bind_vma(target_vma, target_i915_obj->cache_level,
+ GLOBAL_BIND);
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
@@ -531,7 +531,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
flags = 0;
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
- flags |= PIN_MAPPABLE;
+ flags |= PIN_GLOBAL | PIN_MAPPABLE;
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
flags |= PIN_GLOBAL;
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
@@ -1023,6 +1023,47 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return 0;
}
+static int
+i915_emit_box(struct intel_engine_cs *ring,
+ struct drm_clip_rect *box,
+ int DR1, int DR4)
+{
+ int ret;
+
+ if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
+ box->y2 <= 0 || box->x2 <= 0) {
+ DRM_ERROR("Bad box %d,%d..%d,%d\n",
+ box->x1, box->y1, box->x2, box->y2);
+ return -EINVAL;
+ }
+
+ if (INTEL_INFO(ring->dev)->gen >= 4) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
+ intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
+ intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
+ intel_ring_emit(ring, DR4);
+ } else {
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
+ intel_ring_emit(ring, DR1);
+ intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
+ intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
+ intel_ring_emit(ring, DR4);
+ intel_ring_emit(ring, 0);
+ }
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+
int
i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring,
@@ -1151,7 +1192,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
- ret = i915_emit_box(dev, &cliprects[i],
+ ret = i915_emit_box(ring, &cliprects[i],
args->DR1, args->DR4);
if (ret)
goto error;
@@ -1300,12 +1341,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto pre_mutex_err;
- if (dev_priv->ums.mm_suspended) {
- mutex_unlock(&dev->struct_mutex);
- ret = -EBUSY;
- goto pre_mutex_err;
- }
-
ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
@@ -1368,17 +1403,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
batch_obj,
args->batch_start_offset,
file->is_master);
- if (ret)
- goto err;
-
- /*
- * XXX: Actually do this when enabling batch copy...
- *
- * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
- * from MI_BATCH_BUFFER_START commands issued in the
- * dispatch_execbuffer implementations. We specifically don't
- * want that set when the command parser is enabled.
- */
+ if (ret) {
+ if (ret != -EACCES)
+ goto err;
+ } else {
+ /*
+ * XXX: Actually do this when enabling batch copy...
+ *
+ * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
+ * from MI_BATCH_BUFFER_START commands issued in the
+ * dispatch_execbuffer implementations. We specifically don't
+ * want that set when the command parser is enabled.
+ */
+ }
}
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 728938f..171f6ea 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -35,13 +35,26 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
{
- if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
+ bool has_aliasing_ppgtt;
+ bool has_full_ppgtt;
+
+ has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
+ has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
+ if (IS_GEN8(dev))
+ has_full_ppgtt = false; /* XXX why? */
+
+ /*
+ * We don't allow disabling PPGTT for gen9+ as it's a requirement for
+ * execlists, the sole mechanism available to submit work.
+ */
+ if (INTEL_INFO(dev)->gen < 9 &&
+ (enable_ppgtt == 0 || !has_aliasing_ppgtt))
return 0;
if (enable_ppgtt == 1)
return 1;
- if (enable_ppgtt == 2 && HAS_PPGTT(dev))
+ if (enable_ppgtt == 2 && has_full_ppgtt)
return 2;
#ifdef CONFIG_INTEL_IOMMU
@@ -59,7 +72,7 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
return 0;
}
- return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
+ return has_aliasing_ppgtt ? 1 : 0;
}
@@ -156,9 +169,6 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
- /* Mark the page as writeable. Other platforms don't have a
- * setting for read-only/writable, so this matches that behavior.
- */
if (!(flags & PTE_READ_ONLY))
pte |= BYT_PTE_WRITEABLE;
@@ -1092,7 +1102,7 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
if (INTEL_INFO(dev)->gen < 8)
return gen6_ppgtt_init(ppgtt);
- else if (IS_GEN8(dev))
+ else if (IS_GEN8(dev) || IS_GEN9(dev))
return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
else
BUG();
@@ -1166,6 +1176,8 @@ i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
ppgtt->file_priv = fpriv;
+ trace_i915_ppgtt_create(&ppgtt->base);
+
return ppgtt;
}
@@ -1174,6 +1186,8 @@ void i915_ppgtt_release(struct kref *kref)
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
+ trace_i915_ppgtt_release(&ppgtt->base);
+
/* vmas should already be unbound */
WARN_ON(!list_empty(&ppgtt->base.active_list));
WARN_ON(!list_empty(&ppgtt->base.inactive_list));
@@ -1258,7 +1272,7 @@ void i915_check_and_clear_faults(struct drm_device *dev)
fault_reg = I915_READ(RING_FAULT_REG(ring));
if (fault_reg & RING_FAULT_VALID) {
DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08lx\\n"
+ "\tAddr: 0x%08lx\n"
"\tAddress space: %s\n"
"\tSource ID: %d\n"
"\tType: %d\n",
@@ -1328,7 +1342,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
* Unfortunately above, we've just wiped out the mappings
* without telling our object about it. So we need to fake it.
*/
- obj->has_global_gtt_mapping = 0;
+ vma->bound &= ~GLOBAL_BIND;
vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
}
@@ -1525,7 +1539,7 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma,
BUG_ON(!i915_is_ggtt(vma->vm));
intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
- vma->obj->has_global_gtt_mapping = 1;
+ vma->bound = GLOBAL_BIND;
}
static void i915_ggtt_clear_range(struct i915_address_space *vm,
@@ -1544,7 +1558,7 @@ static void i915_ggtt_unbind_vma(struct i915_vma *vma)
const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
BUG_ON(!i915_is_ggtt(vma->vm));
- vma->obj->has_global_gtt_mapping = 0;
+ vma->bound = 0;
intel_gtt_clear_range(first, size);
}
@@ -1572,24 +1586,24 @@ static void ggtt_bind_vma(struct i915_vma *vma,
* flags. At all other times, the GPU will use the aliasing PPGTT.
*/
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
- if (!obj->has_global_gtt_mapping ||
+ if (!(vma->bound & GLOBAL_BIND) ||
(cache_level != obj->cache_level)) {
vma->vm->insert_entries(vma->vm, obj->pages,
vma->node.start,
cache_level, flags);
- obj->has_global_gtt_mapping = 1;
+ vma->bound |= GLOBAL_BIND;
}
}
if (dev_priv->mm.aliasing_ppgtt &&
- (!obj->has_aliasing_ppgtt_mapping ||
+ (!(vma->bound & LOCAL_BIND) ||
(cache_level != obj->cache_level))) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base,
vma->obj->pages,
vma->node.start,
cache_level, flags);
- vma->obj->has_aliasing_ppgtt_mapping = 1;
+ vma->bound |= LOCAL_BIND;
}
}
@@ -1599,21 +1613,21 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
- if (obj->has_global_gtt_mapping) {
+ if (vma->bound & GLOBAL_BIND) {
vma->vm->clear_range(vma->vm,
vma->node.start,
obj->base.size,
true);
- obj->has_global_gtt_mapping = 0;
+ vma->bound &= ~GLOBAL_BIND;
}
- if (obj->has_aliasing_ppgtt_mapping) {
+ if (vma->bound & LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.clear_range(&appgtt->base,
vma->node.start,
obj->base.size,
true);
- obj->has_aliasing_ppgtt_mapping = 0;
+ vma->bound &= ~LOCAL_BIND;
}
}
@@ -1650,10 +1664,10 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
}
}
-int i915_gem_setup_global_gtt(struct drm_device *dev,
- unsigned long start,
- unsigned long mappable_end,
- unsigned long end)
+static int i915_gem_setup_global_gtt(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end)
{
/* Let GEM Manage all of the aperture.
*
@@ -1691,7 +1705,7 @@ int i915_gem_setup_global_gtt(struct drm_device *dev,
DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
return ret;
}
- obj->has_global_gtt_mapping = 1;
+ vma->bound |= GLOBAL_BIND;
}
dev_priv->gtt.base.start = start;
@@ -1764,7 +1778,6 @@ static int setup_scratch_page(struct drm_device *dev)
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (page == NULL)
return -ENOMEM;
- get_page(page);
set_pages_uc(page, 1);
#ifdef CONFIG_INTEL_IOMMU
@@ -1789,7 +1802,6 @@ static void teardown_scratch_page(struct drm_device *dev)
set_pages_wb(page, 1);
pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- put_page(page);
__free_page(page);
}
@@ -1859,6 +1871,18 @@ static size_t chv_get_stolen_size(u16 gmch_ctrl)
return (gmch_ctrl - 0x17 + 9) << 22;
}
+static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
+{
+ gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
+ gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
+
+ if (gen9_gmch_ctl < 0xf0)
+ return gen9_gmch_ctl << 25; /* 32 MB units */
+ else
+ /* 4MB increments starting at 0xf0 for 4MB */
+ return (gen9_gmch_ctl - 0xf0 + 1) << 22;
+}
+
static int ggtt_probe_common(struct drm_device *dev,
size_t gtt_size)
{
@@ -1934,9 +1958,17 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
* Only the snoop bit has meaning for CHV, the rest is
* ignored.
*
- * Note that the harware enforces snooping for all page
- * table accesses. The snoop bit is actually ignored for
- * PDEs.
+ * The hardware will never snoop for certain types of accesses:
+ * - CPU GTT (GMADR->GGTT->no snoop->memory)
+ * - PPGTT page tables
+ * - some other special cycles
+ *
+ * As with BDW, we also need to consider the following for GT accesses:
+ * "For GGTT, there is NO pat_sel[2:0] from the entry,
+ * so RTL will always use the value corresponding to
+ * pat_sel = 000".
+ * Which means we must set the snoop bit in PAT entry 0
+ * in order to keep the global status page working.
*/
pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
GEN8_PPAT(1, 0) |
@@ -1971,7 +2003,10 @@ static int gen8_gmch_probe(struct drm_device *dev,
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- if (IS_CHERRYVIEW(dev)) {
+ if (INTEL_INFO(dev)->gen >= 9) {
+ *stolen = gen9_get_stolen_size(snb_gmch_ctl);
+ gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
+ } else if (IS_CHERRYVIEW(dev)) {
*stolen = chv_get_stolen_size(snb_gmch_ctl);
gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
} else {
@@ -2143,6 +2178,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
vma->obj = obj;
switch (INTEL_INFO(vm->dev)->gen) {
+ case 9:
case 8:
case 7:
case 6:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index d5c14af..beaf4bc 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -123,6 +123,12 @@ struct i915_vma {
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
+ /** Flags and address space this VMA is bound to */
+#define GLOBAL_BIND (1<<0)
+#define LOCAL_BIND (1<<1)
+#define PTE_READ_ONLY (1<<2)
+ unsigned int bound : 4;
+
/** This object's place on the active/inactive lists */
struct list_head mm_list;
@@ -155,8 +161,6 @@ struct i915_vma {
* setting the valid PTE entries to a reserved scratch page. */
void (*unbind_vma)(struct i915_vma *vma);
/* Map an object into an address space with the given cache flags. */
-#define GLOBAL_BIND (1<<0)
-#define PTE_READ_ONLY (1<<1)
void (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
@@ -270,8 +274,6 @@ struct i915_hw_ppgtt {
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev);
-int i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
- unsigned long mappable_end, unsigned long end);
void i915_global_gtt_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index a9a62d7..98dcd94 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -38,6 +38,8 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
return &gen7_null_state;
case 8:
return &gen8_null_state;
+ case 9:
+ return &gen9_null_state;
}
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 85fda6b..a204584 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -137,7 +137,11 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
r = devm_request_mem_region(dev->dev, base + 1,
dev_priv->gtt.stolen_size - 1,
"Graphics Stolen Memory");
- if (r == NULL) {
+ /*
+ * GEN3 firmware likes to smash pci bridges into the stolen
+ * range. Apparently this works.
+ */
+ if (r == NULL && !IS_GEN3(dev)) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
base, base + (uint32_t)dev_priv->gtt.stolen_size);
base = 0;
@@ -533,7 +537,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
}
}
- obj->has_global_gtt_mapping = 1;
+ vma->bound |= GLOBAL_BIND;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 2b1eaa2..4727a4e 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -102,22 +102,33 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (INTEL_INFO(dev)->gen >= 6) {
- uint32_t dimm_c0, dimm_c1;
- dimm_c0 = I915_READ(MAD_DIMM_C0);
- dimm_c1 = I915_READ(MAD_DIMM_C1);
- dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
- dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
- /* Enable swizzling when the channels are populated with
- * identically sized dimms. We don't need to check the 3rd
- * channel because no cpu with gpu attached ships in that
- * configuration. Also, swizzling only makes sense for 2
- * channels anyway. */
- if (dimm_c0 == dimm_c1) {
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
+ if (dev_priv->preserve_bios_swizzle) {
+ if (I915_READ(DISP_ARB_CTL) &
+ DISP_TILE_SURFACE_SWIZZLING) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
} else {
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ uint32_t dimm_c0, dimm_c1;
+ dimm_c0 = I915_READ(MAD_DIMM_C0);
+ dimm_c1 = I915_READ(MAD_DIMM_C1);
+ dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ /* Enable swizzling when the channels are populated
+ * with identically sized dimms. We don't need to check
+ * the 3rd channel because no cpu with gpu attached
+ * ships in that configuration. Also, swizzling only
+ * makes sense for 2 channels anyway. */
+ if (dimm_c0 == dimm_c1) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
}
} else if (IS_GEN5(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
@@ -167,6 +178,15 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
}
break;
}
+
+ /* check for L-shaped memory aka modified enhanced addressing */
+ if (IS_GEN4(dev)) {
+ uint32_t ddc2 = I915_READ(DCC2);
+
+ if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
+ dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+ }
+
if (dcc == 0xffffffff) {
DRM_ERROR("Couldn't read from MCHBAR. "
"Disabling tiling.\n");
@@ -369,6 +389,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
ret = i915_gem_object_ggtt_unbind(obj);
if (ret == 0) {
+ if (obj->pages &&
+ obj->madv == I915_MADV_WILLNEED &&
+ dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ if (args->tiling_mode == I915_TILING_NONE)
+ i915_gem_object_unpin_pages(obj);
+ if (obj->tiling_mode == I915_TILING_NONE)
+ i915_gem_object_pin_pages(obj);
+ }
+
obj->fence_dirty =
obj->last_fenced_seqno ||
obj->fence_reg != I915_FENCE_REG_NONE;
@@ -434,6 +463,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
+ args->phys_swizzle_mode = args->swizzle_mode;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 2c87a79..cdaee6c 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -242,11 +242,15 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
struct drm_device *dev,
- struct drm_i915_error_ring *ring)
+ struct drm_i915_error_state *error,
+ int ring_idx)
{
+ struct drm_i915_error_ring *ring = &error->ring[ring_idx];
+
if (!ring->valid)
return;
+ err_printf(m, "%s command stream:\n", ring_str(ring_idx));
err_printf(m, " HEAD: 0x%08x\n", ring->head);
err_printf(m, " TAIL: 0x%08x\n", ring->tail);
err_printf(m, " CTL: 0x%08x\n", ring->ctl);
@@ -388,10 +392,8 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (INTEL_INFO(dev)->gen == 7)
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
- for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
- err_printf(m, "%s command stream:\n", ring_str(i));
- i915_ring_error_state(m, dev, &error->ring[i]);
- }
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++)
+ i915_ring_error_state(m, dev, error, i);
for (i = 0; i < error->vm_count; i++) {
err_printf(m, "vm[%d]\n", i);
@@ -565,6 +567,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
struct i915_address_space *vm)
{
struct drm_i915_error_object *dst;
+ struct i915_vma *vma = NULL;
int num_pages;
bool use_ggtt;
int i = 0;
@@ -585,16 +588,17 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
dst->gtt_offset = -1;
reloc_offset = dst->gtt_offset;
+ if (i915_is_ggtt(vm))
+ vma = i915_gem_obj_to_ggtt(src);
use_ggtt = (src->cache_level == I915_CACHE_NONE &&
- i915_is_ggtt(vm) &&
- src->has_global_gtt_mapping &&
- reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
+ vma && (vma->bound & GLOBAL_BIND) &&
+ reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
/* Cannot access stolen address directly, try to use the aperture */
if (src->stolen) {
use_ggtt = true;
- if (!src->has_global_gtt_mapping)
+ if (!(vma && vma->bound & GLOBAL_BIND))
goto unwind;
reloc_offset = i915_gem_obj_ggtt_offset(src);
@@ -765,6 +769,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
/* Fences */
switch (INTEL_INFO(dev)->gen) {
+ case 9:
case 8:
case 7:
case 6:
@@ -804,9 +809,8 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
if (!error->semaphore_obj)
error->semaphore_obj =
- i915_error_object_create(dev_priv,
- dev_priv->semaphore_obj,
- &dev_priv->gtt.base);
+ i915_error_ggtt_object_create(dev_priv,
+ dev_priv->semaphore_obj);
for_each_ring(to, dev_priv, i) {
int idx;
@@ -923,6 +927,7 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
switch (INTEL_INFO(dev)->gen) {
+ case 9:
case 8:
for (i = 0; i < 4; i++) {
ering->vm_info.pdp[i] =
@@ -1238,7 +1243,8 @@ static void i915_error_capture_msg(struct drm_device *dev,
ecode = i915_error_generate_code(dev_priv, error, &ring_id);
len = scnprintf(error->error_msg, sizeof(error->error_msg),
- "GPU HANG: ecode %d:0x%08x", ring_id, ecode);
+ "GPU HANG: ecode %d:%d:0x%08x",
+ INTEL_INFO(dev)->gen, ring_id, ecode);
if (ring_id != -1 && error->ring[ring_id].pid != -1)
len += scnprintf(error->error_msg + len,
@@ -1326,13 +1332,12 @@ void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ spin_lock_irq(&dev_priv->gpu_error.lock);
error_priv->error = dev_priv->gpu_error.first_error;
if (error_priv->error)
kref_get(&error_priv->error->ref);
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+ spin_unlock_irq(&dev_priv->gpu_error.lock);
}
@@ -1346,12 +1351,11 @@ void i915_destroy_error_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
- unsigned long flags;
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ spin_lock_irq(&dev_priv->gpu_error.lock);
error = dev_priv->gpu_error.first_error;
dev_priv->gpu_error.first_error = NULL;
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+ spin_unlock_irq(&dev_priv->gpu_error.lock);
if (error)
kref_put(&error->ref, i915_error_state_free);
@@ -1389,6 +1393,7 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
WARN_ONCE(1, "Unsupported platform\n");
case 7:
case 8:
+ case 9:
instdone[0] = I915_READ(GEN7_INSTDONE_1);
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 2e0613e..176de63 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -189,7 +189,6 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
[DRM_I915_ALLOC] = compat_i915_alloc
};
-#ifdef CONFIG_COMPAT
/**
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
@@ -218,4 +217,3 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return ret;
}
-#endif
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f66392b..981834b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -37,6 +37,14 @@
#include "i915_trace.h"
#include "intel_drv.h"
+/**
+ * DOC: interrupt handling
+ *
+ * These functions provide the basic support for enabling and disabling the
+ * interrupt handling support. There's a lot more functionality in i915_irq.c
+ * and related files, but that will be described in separate chapters.
+ */
+
static const u32 hpd_ibx[] = {
[HPD_CRT] = SDE_CRT_HOTPLUG,
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
@@ -118,20 +126,22 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
- I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
- POSTING_READ(GEN8_##type##_IER(which)); \
+ I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
+ POSTING_READ(GEN8_##type##_IMR(which)); \
} while (0)
#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
- I915_WRITE(type##IMR, (imr_val)); \
I915_WRITE(type##IER, (ier_val)); \
- POSTING_READ(type##IER); \
+ I915_WRITE(type##IMR, (imr_val)); \
+ POSTING_READ(type##IMR); \
} while (0)
+static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
+
/* For display hotplug interrupt */
-static void
+void
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
@@ -146,7 +156,7 @@ ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
}
}
-static void
+void
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
@@ -192,71 +202,28 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
ilk_update_gt_irq(dev_priv, mask, 0);
}
-/**
- * snb_update_pm_irq - update GEN6_PMIMR
- * @dev_priv: driver private
- * @interrupt_mask: mask of interrupt bits to update
- * @enabled_irq_mask: mask of interrupt bits to enable
- */
-static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask)
-{
- uint32_t new_val;
-
- assert_spin_locked(&dev_priv->irq_lock);
-
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return;
-
- new_val = dev_priv->pm_irq_mask;
- new_val &= ~interrupt_mask;
- new_val |= (~enabled_irq_mask & interrupt_mask);
-
- if (new_val != dev_priv->pm_irq_mask) {
- dev_priv->pm_irq_mask = new_val;
- I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
- POSTING_READ(GEN6_PMIMR);
- }
-}
-
-void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
{
- snb_update_pm_irq(dev_priv, mask, mask);
+ return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
}
-void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
{
- snb_update_pm_irq(dev_priv, mask, 0);
+ return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
}
-static bool ivb_can_enable_err_int(struct drm_device *dev)
+static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *crtc;
- enum pipe pipe;
-
- assert_spin_locked(&dev_priv->irq_lock);
-
- for_each_pipe(dev_priv, pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-
- if (crtc->cpu_fifo_underrun_disabled)
- return false;
- }
-
- return true;
+ return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
}
/**
- * bdw_update_pm_irq - update GT interrupt 2
+ * snb_update_pm_irq - update GEN6_PMIMR
* @dev_priv: driver private
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
- *
- * Copied from the snb function, updated with relevant register offsets
*/
-static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
+static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
{
@@ -264,144 +231,87 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return;
-
new_val = dev_priv->pm_irq_mask;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
if (new_val != dev_priv->pm_irq_mask) {
dev_priv->pm_irq_mask = new_val;
- I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
- POSTING_READ(GEN8_GT_IMR(2));
+ I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
+ POSTING_READ(gen6_pm_imr(dev_priv));
}
}
-void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
- bdw_update_pm_irq(dev_priv, mask, mask);
-}
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ return;
-void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
-{
- bdw_update_pm_irq(dev_priv, mask, 0);
+ snb_update_pm_irq(dev_priv, mask, mask);
}
-static bool cpt_can_enable_serr_int(struct drm_device *dev)
+static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
+ uint32_t mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe pipe;
- struct intel_crtc *crtc;
-
- assert_spin_locked(&dev_priv->irq_lock);
-
- for_each_pipe(dev_priv, pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-
- if (crtc->pch_fifo_underrun_disabled)
- return false;
- }
-
- return true;
+ snb_update_pm_irq(dev_priv, mask, 0);
}
-void i9xx_check_fifo_underruns(struct drm_device *dev)
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *crtc;
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
-
- for_each_intel_crtc(dev, crtc) {
- u32 reg = PIPESTAT(crtc->pipe);
- u32 pipestat;
-
- if (crtc->cpu_fifo_underrun_disabled)
- continue;
-
- pipestat = I915_READ(reg) & 0xffff0000;
- if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
- continue;
-
- I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
- POSTING_READ(reg);
-
- DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
- }
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ return;
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ __gen6_disable_pm_irq(dev_priv, mask);
}
-static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe,
- bool enable, bool old)
+void gen6_reset_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg = PIPESTAT(pipe);
- u32 pipestat = I915_READ(reg) & 0xffff0000;
-
- assert_spin_locked(&dev_priv->irq_lock);
+ uint32_t reg = gen6_pm_iir(dev_priv);
- if (enable) {
- I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
- POSTING_READ(reg);
- } else {
- if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
- }
+ spin_lock_irq(&dev_priv->irq_lock);
+ I915_WRITE(reg, dev_priv->pm_rps_events);
+ I915_WRITE(reg, dev_priv->pm_rps_events);
+ POSTING_READ(reg);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
-static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
+void gen6_enable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
- DE_PIPEB_FIFO_UNDERRUN;
- if (enable)
- ironlake_enable_display_irq(dev_priv, bit);
- else
- ironlake_disable_display_irq(dev_priv, bit);
+ spin_lock_irq(&dev_priv->irq_lock);
+ WARN_ON(dev_priv->rps.pm_iir);
+ WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
+ dev_priv->rps.interrupts_enabled = true;
+ gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
-static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe,
- bool enable, bool old)
+void gen6_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (enable) {
- I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
- if (!ivb_can_enable_err_int(dev))
- return;
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->rps.interrupts_enabled = false;
+ spin_unlock_irq(&dev_priv->irq_lock);
- ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
- } else {
- ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ cancel_work_sync(&dev_priv->rps.work);
- if (old &&
- I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
- DRM_ERROR("uncleared fifo underrun on pipe %c\n",
- pipe_name(pipe));
- }
- }
-}
+ spin_lock_irq(&dev_priv->irq_lock);
-static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
+ ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
- assert_spin_locked(&dev_priv->irq_lock);
+ __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
+ ~dev_priv->pm_rps_events);
+ I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
+ I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
- if (enable)
- dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
- else
- dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
- I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
- POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+ dev_priv->rps.pm_iir = 0;
+
+ spin_unlock_irq(&dev_priv->irq_lock);
}
/**
@@ -410,9 +320,9 @@ static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask)
+void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask)
{
uint32_t sdeimr = I915_READ(SDEIMR);
sdeimr &= ~interrupt_mask;
@@ -426,160 +336,6 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
I915_WRITE(SDEIMR, sdeimr);
POSTING_READ(SDEIMR);
}
-#define ibx_enable_display_interrupt(dev_priv, bits) \
- ibx_display_interrupt_update((dev_priv), (bits), (bits))
-#define ibx_disable_display_interrupt(dev_priv, bits) \
- ibx_display_interrupt_update((dev_priv), (bits), 0)
-
-static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
- enum transcoder pch_transcoder,
- bool enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
- SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
-
- if (enable)
- ibx_enable_display_interrupt(dev_priv, bit);
- else
- ibx_disable_display_interrupt(dev_priv, bit);
-}
-
-static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
- enum transcoder pch_transcoder,
- bool enable, bool old)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (enable) {
- I915_WRITE(SERR_INT,
- SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
-
- if (!cpt_can_enable_serr_int(dev))
- return;
-
- ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
- } else {
- ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
-
- if (old && I915_READ(SERR_INT) &
- SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
- DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
- transcoder_name(pch_transcoder));
- }
- }
-}
-
-/**
- * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
- * @dev: drm device
- * @pipe: pipe
- * @enable: true if we want to report FIFO underrun errors, false otherwise
- *
- * This function makes us disable or enable CPU fifo underruns for a specific
- * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
- * reporting for one pipe may also disable all the other CPU error interruts for
- * the other pipes, due to the fact that there's just one interrupt mask/enable
- * bit for all the pipes.
- *
- * Returns the previous state of underrun reporting.
- */
-static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- bool old;
-
- assert_spin_locked(&dev_priv->irq_lock);
-
- old = !intel_crtc->cpu_fifo_underrun_disabled;
- intel_crtc->cpu_fifo_underrun_disabled = !enable;
-
- if (HAS_GMCH_DISPLAY(dev))
- i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
- else if (IS_GEN5(dev) || IS_GEN6(dev))
- ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
- else if (IS_GEN7(dev))
- ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
- else if (IS_GEN8(dev))
- broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
-
- return old;
-}
-
-bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
- bool ret;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- return ret;
-}
-
-static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
- enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- return !intel_crtc->cpu_fifo_underrun_disabled;
-}
-
-/**
- * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
- * @dev: drm device
- * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
- * @enable: true if we want to report FIFO underrun errors, false otherwise
- *
- * This function makes us disable or enable PCH fifo underruns for a specific
- * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
- * underrun reporting for one transcoder may also disable all the other PCH
- * error interruts for the other transcoders, due to the fact that there's just
- * one interrupt mask/enable bit for all the transcoders.
- *
- * Returns the previous state of underrun reporting.
- */
-bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
- enum transcoder pch_transcoder,
- bool enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long flags;
- bool old;
-
- /*
- * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
- * has only one pch transcoder A that all pipes can use. To avoid racy
- * pch transcoder -> pipe lookups from interrupt code simply store the
- * underrun statistics in crtc A. Since we never expose this anywhere
- * nor use it outside of the fifo underrun code here using the "wrong"
- * crtc on LPT won't cause issues.
- */
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
-
- old = !intel_crtc->pch_fifo_underrun_disabled;
- intel_crtc->pch_fifo_underrun_disabled = !enable;
-
- if (HAS_PCH_IBX(dev))
- ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
- else
- cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
-
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
- return old;
-}
-
static void
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@ -589,6 +345,7 @@ __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
assert_spin_locked(&dev_priv->irq_lock);
+ WARN_ON(!intel_irqs_enabled(dev_priv));
if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
status_mask & ~PIPESTAT_INT_STATUS_MASK,
@@ -615,6 +372,7 @@ __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
assert_spin_locked(&dev_priv->irq_lock);
+ WARN_ON(!intel_irqs_enabled(dev_priv));
if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
status_mask & ~PIPESTAT_INT_STATUS_MASK,
@@ -694,19 +452,18 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
static void i915_enable_asle_pipestat(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long irqflags;
if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
return;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, PIPE_A,
PIPE_LEGACY_BLC_EVENT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
/**
@@ -1094,18 +851,17 @@ static void i915_digport_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, dig_port_work);
- unsigned long irqflags;
u32 long_port_mask, short_port_mask;
struct intel_digital_port *intel_dig_port;
int i, ret;
u32 old_bits = 0;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
long_port_mask = dev_priv->long_hpd_port_mask;
dev_priv->long_hpd_port_mask = 0;
short_port_mask = dev_priv->short_hpd_port_mask;
dev_priv->short_hpd_port_mask = 0;
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
for (i = 0; i < I915_MAX_PORTS; i++) {
bool valid = false;
@@ -1130,9 +886,9 @@ static void i915_digport_work_func(struct work_struct *work)
}
if (old_bits) {
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
dev_priv->hpd_event_bits |= old_bits;
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
schedule_work(&dev_priv->hotplug_work);
}
}
@@ -1151,7 +907,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct intel_connector *intel_connector;
struct intel_encoder *intel_encoder;
struct drm_connector *connector;
- unsigned long irqflags;
bool hpd_disabled = false;
bool changed = false;
u32 hpd_event_bits;
@@ -1159,7 +914,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
hpd_event_bits = dev_priv->hpd_event_bits;
dev_priv->hpd_event_bits = 0;
@@ -1193,7 +948,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
}
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
@@ -1260,11 +1015,7 @@ static void notify_ring(struct drm_device *dev,
trace_i915_gem_request_complete(ring);
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_notify_mmio_flip(ring);
-
wake_up_all(&ring->irq_queue);
- i915_queue_hangcheck(dev);
}
static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
@@ -1400,14 +1151,15 @@ static void gen6_pm_rps_work(struct work_struct *work)
int new_delay, adj;
spin_lock_irq(&dev_priv->irq_lock);
+ /* Speed up work cancelation during disabling rps interrupts. */
+ if (!dev_priv->rps.interrupts_enabled) {
+ spin_unlock_irq(&dev_priv->irq_lock);
+ return;
+ }
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
- if (INTEL_INFO(dev_priv->dev)->gen >= 8)
- gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
- else {
- /* Make sure not to corrupt PMIMR state used by ringbuffer */
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
- }
+ /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
+ gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
@@ -1488,7 +1240,6 @@ static void ivybridge_parity_work(struct work_struct *work)
u32 error_status, row, bank, subbank;
char *parity_event[6];
uint32_t misccpctl;
- unsigned long flags;
uint8_t slice = 0;
/* We must turn off DOP level clock gating to access the L3 registers.
@@ -1547,9 +1298,9 @@ static void ivybridge_parity_work(struct work_struct *work)
out:
WARN_ON(dev_priv->l3_parity.which_slice);
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ spin_lock_irq(&dev_priv->irq_lock);
gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ spin_unlock_irq(&dev_priv->irq_lock);
mutex_unlock(&dev_priv->dev->struct_mutex);
}
@@ -1601,28 +1352,13 @@ static void snb_gt_irq_handler(struct drm_device *dev,
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
- GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
- i915_handle_error(dev, false, "GT error interrupt 0x%08x",
- gt_iir);
- }
+ GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
+ DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
if (gt_iir & GT_PARITY_ERROR(dev))
ivybridge_parity_error_irq_handler(dev, gt_iir);
}
-static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
-{
- if ((pm_iir & dev_priv->pm_rps_events) == 0)
- return;
-
- spin_lock(&dev_priv->irq_lock);
- dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
- gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
- spin_unlock(&dev_priv->irq_lock);
-
- queue_work(dev_priv->wq, &dev_priv->rps.work);
-}
-
static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 master_ctl)
@@ -1684,7 +1420,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
I915_WRITE(GEN8_GT_IIR(2),
tmp & dev_priv->pm_rps_events);
ret = IRQ_HANDLED;
- gen8_rps_irq_handler(dev_priv, tmp);
+ gen6_rps_irq_handler(dev_priv, tmp);
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
}
@@ -1898,7 +1634,7 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
if (!pipe_crc->entries) {
spin_unlock(&pipe_crc->lock);
- DRM_ERROR("spurious interrupt\n");
+ DRM_DEBUG_KMS("spurious interrupt\n");
return;
}
@@ -1984,24 +1720,30 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
* the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
+ /* TODO: RPS on GEN9+ is not supported yet. */
+ if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
+ "GEN9+: unexpected RPS IRQ\n"))
+ return;
+
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
- dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ if (dev_priv->rps.interrupts_enabled) {
+ dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
+ queue_work(dev_priv->wq, &dev_priv->rps.work);
+ }
spin_unlock(&dev_priv->irq_lock);
-
- queue_work(dev_priv->wq, &dev_priv->rps.work);
}
+ if (INTEL_INFO(dev_priv)->gen >= 8)
+ return;
+
if (HAS_VEBOX(dev_priv->dev)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
- if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
- i915_handle_error(dev_priv->dev, false,
- "VEBOX CS error interrupt 0x%08x",
- pm_iir);
- }
+ if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
+ DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
}
}
@@ -2031,9 +1773,9 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
* we need to be careful that we only handle what we want to
* handle.
*/
- mask = 0;
- if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
- mask |= PIPE_FIFO_UNDERRUN_STATUS;
+
+ /* fifo underruns are filterered in the underrun handler. */
+ mask = PIPE_FIFO_UNDERRUN_STATUS;
switch (pipe) {
case PIPE_A:
@@ -2078,9 +1820,8 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
- DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
}
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
@@ -2247,14 +1988,10 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
- if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
- false))
- DRM_ERROR("PCH transcoder A FIFO underrun\n");
+ intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
if (pch_iir & SDE_TRANSB_FIFO_UNDER)
- if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
- false))
- DRM_ERROR("PCH transcoder B FIFO underrun\n");
+ intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
}
static void ivb_err_int_handler(struct drm_device *dev)
@@ -2267,12 +2004,8 @@ static void ivb_err_int_handler(struct drm_device *dev)
DRM_ERROR("Poison interrupt\n");
for_each_pipe(dev_priv, pipe) {
- if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
- if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
- false))
- DRM_ERROR("Pipe %c FIFO underrun\n",
- pipe_name(pipe));
- }
+ if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
+ intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
if (IS_IVYBRIDGE(dev))
@@ -2294,19 +2027,13 @@ static void cpt_serr_int_handler(struct drm_device *dev)
DRM_ERROR("PCH poison interrupt\n");
if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
- if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
- false))
- DRM_ERROR("PCH transcoder A FIFO underrun\n");
+ intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
- if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
- false))
- DRM_ERROR("PCH transcoder B FIFO underrun\n");
+ intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
- if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
- false))
- DRM_ERROR("PCH transcoder C FIFO underrun\n");
+ intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
I915_WRITE(SERR_INT, serr_int);
}
@@ -2372,9 +2099,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
intel_check_page_flip(dev, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
- if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
- DRM_ERROR("Pipe %c FIFO underrun\n",
- pipe_name(pipe));
+ intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
if (de_iir & DE_PIPE_CRC_DONE(pipe))
i9xx_pipe_crc_irq_handler(dev, pipe);
@@ -2524,6 +2249,11 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
irqreturn_t ret = IRQ_NONE;
uint32_t tmp = 0;
enum pipe pipe;
+ u32 aux_mask = GEN8_AUX_CHANNEL_A;
+
+ if (IS_GEN9(dev))
+ aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
+ GEN9_AUX_CHANNEL_D;
master_ctl = I915_READ(GEN8_MASTER_IRQ);
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
@@ -2556,7 +2286,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
if (tmp) {
I915_WRITE(GEN8_DE_PORT_IIR, tmp);
ret = IRQ_HANDLED;
- if (tmp & GEN8_AUX_CHANNEL_A)
+
+ if (tmp & aux_mask)
dp_aux_irq_handler(dev);
else
DRM_ERROR("Unexpected DE Port interrupt\n");
@@ -2566,7 +2297,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
}
for_each_pipe(dev_priv, pipe) {
- uint32_t pipe_iir;
+ uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
continue;
@@ -2575,11 +2306,17 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
if (pipe_iir) {
ret = IRQ_HANDLED;
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+
if (pipe_iir & GEN8_PIPE_VBLANK &&
intel_pipe_handle_vblank(dev, pipe))
intel_check_page_flip(dev, pipe);
- if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
+ if (IS_GEN9(dev))
+ flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
+ else
+ flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
+
+ if (flip_done) {
intel_prepare_page_flip(dev, pipe);
intel_finish_page_flip_plane(dev, pipe);
}
@@ -2587,18 +2324,20 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev, pipe);
- if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
- if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
- false))
- DRM_ERROR("Pipe %c FIFO underrun\n",
- pipe_name(pipe));
- }
+ if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv,
+ pipe);
+
- if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
+ if (IS_GEN9(dev))
+ fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+ else
+ fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+
+ if (fault_errors)
DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
pipe_name(pipe),
pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
- }
} else
DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
}
@@ -2697,6 +2436,9 @@ static void i915_error_work_func(struct work_struct *work)
* simulated reset via debugs, so get an RPM reference.
*/
intel_runtime_pm_get(dev_priv);
+
+ intel_prepare_reset(dev);
+
/*
* All state reset _must_ be completed before we update the
* reset counter, for otherwise waiters might miss the reset
@@ -2705,7 +2447,7 @@ static void i915_error_work_func(struct work_struct *work)
*/
ret = i915_reset(dev);
- intel_display_handle_reset(dev);
+ intel_finish_reset(dev);
intel_runtime_pm_put(dev_priv);
@@ -3330,10 +3072,15 @@ static void i915_hangcheck_elapsed(unsigned long data)
void i915_queue_hangcheck(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer;
+
if (!i915.enable_hangcheck)
return;
- mod_timer(&dev_priv->gpu_error.hangcheck_timer,
+ /* Don't continually defer the hangcheck, but make sure it is active */
+ if (timer_pending(timer))
+ return;
+ mod_timer(timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
@@ -3396,10 +3143,22 @@ static void ironlake_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev);
}
+static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+{
+ enum pipe pipe;
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+ for_each_pipe(dev_priv, pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+ GEN5_IRQ_RESET(VLV_);
+}
+
static void valleyview_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
/* VLV magic */
I915_WRITE(VLV_IMR, 0);
@@ -3407,22 +3166,11 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
- /* and GT */
- I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIIR, I915_READ(GTIIR));
-
gen5_gt_irq_reset(dev);
- I915_WRITE(DPINVGTT, 0xff);
+ I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
- I915_WRITE(PORT_HOTPLUG_EN, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0xffff);
- I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(VLV_IMR, 0xffffffff);
- I915_WRITE(VLV_IER, 0x0);
- POSTING_READ(VLV_IER);
+ vlv_display_irq_reset(dev_priv);
}
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
@@ -3444,8 +3192,8 @@ static void gen8_irq_reset(struct drm_device *dev)
gen8_gt_irq_reset(dev_priv);
for_each_pipe(dev_priv, pipe)
- if (intel_display_power_enabled(dev_priv,
- POWER_DOMAIN_PIPE(pipe)))
+ if (intel_display_power_is_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
GEN5_IRQ_RESET(GEN8_DE_PORT_);
@@ -3457,21 +3205,19 @@ static void gen8_irq_reset(struct drm_device *dev)
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
{
- unsigned long irqflags;
uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
static void cherryview_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
@@ -3480,20 +3226,9 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
GEN5_IRQ_RESET(GEN8_PCU_);
- POSTING_READ(GEN8_PCU_IIR);
-
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
- I915_WRITE(PORT_HOTPLUG_EN, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
-
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0xffff);
-
- I915_WRITE(VLV_IMR, 0xffffffff);
- I915_WRITE(VLV_IER, 0x0);
- I915_WRITE(VLV_IIR, 0xffffffff);
- POSTING_READ(VLV_IIR);
+ vlv_display_irq_reset(dev_priv);
}
static void ibx_hpd_irq_setup(struct drm_device *dev)
@@ -3584,7 +3319,6 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
static int ironlake_irq_postinstall(struct drm_device *dev)
{
- unsigned long irqflags;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 display_mask, extra_mask;
@@ -3623,9 +3357,9 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
* spinlocking not required here for correctness since interrupt
* setup is guaranteed to run in single-threaded context. But we
* need it to make the assert_spin_locked happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
return 0;
@@ -3635,45 +3369,51 @@ static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
{
u32 pipestat_mask;
u32 iir_mask;
+ enum pipe pipe;
pipestat_mask = PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS;
- I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
- I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
+ for_each_pipe(dev_priv, pipe)
+ I915_WRITE(PIPESTAT(pipe), pipestat_mask);
POSTING_READ(PIPESTAT(PIPE_A));
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
- i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
- PIPE_GMBUS_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ for_each_pipe(dev_priv, pipe)
+ i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
iir_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ if (IS_CHERRYVIEW(dev_priv))
+ iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
dev_priv->irq_mask &= ~iir_mask;
I915_WRITE(VLV_IIR, iir_mask);
I915_WRITE(VLV_IIR, iir_mask);
- I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
- POSTING_READ(VLV_IER);
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ POSTING_READ(VLV_IMR);
}
static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
{
u32 pipestat_mask;
u32 iir_mask;
+ enum pipe pipe;
iir_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ if (IS_CHERRYVIEW(dev_priv))
+ iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
dev_priv->irq_mask |= iir_mask;
- I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IIR, iir_mask);
I915_WRITE(VLV_IIR, iir_mask);
POSTING_READ(VLV_IIR);
@@ -3681,14 +3421,15 @@ static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
- i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
- PIPE_GMBUS_INTERRUPT_STATUS);
- i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
+ i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ for_each_pipe(dev_priv, pipe)
+ i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
pipestat_mask = PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS;
- I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
- I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
+
+ for_each_pipe(dev_priv, pipe)
+ I915_WRITE(PIPESTAT(pipe), pipestat_mask);
POSTING_READ(PIPESTAT(PIPE_A));
}
@@ -3701,7 +3442,7 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
dev_priv->display_irqs_enabled = true;
- if (dev_priv->dev->irq_enabled)
+ if (intel_irqs_enabled(dev_priv))
valleyview_display_irqs_install(dev_priv);
}
@@ -3714,34 +3455,36 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
dev_priv->display_irqs_enabled = false;
- if (dev_priv->dev->irq_enabled)
+ if (intel_irqs_enabled(dev_priv))
valleyview_display_irqs_uninstall(dev_priv);
}
-static int valleyview_irq_postinstall(struct drm_device *dev)
+static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long irqflags;
-
dev_priv->irq_mask = ~0;
I915_WRITE(PORT_HOTPLUG_EN, 0);
POSTING_READ(PORT_HOTPLUG_EN);
- I915_WRITE(VLV_IMR, dev_priv->irq_mask);
- I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IIR, 0xffffffff);
- POSTING_READ(VLV_IER);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ POSTING_READ(VLV_IMR);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
valleyview_display_irqs_install(dev_priv);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
- I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(VLV_IIR, 0xffffffff);
+static int valleyview_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ vlv_display_irq_postinstall(dev_priv);
gen5_gt_irq_postinstall(dev);
@@ -3783,24 +3526,35 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
- uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
- GEN8_PIPE_CDCLK_CRC_DONE |
- GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
- uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
- GEN8_PIPE_FIFO_UNDERRUN;
+ uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
+ uint32_t de_pipe_enables;
int pipe;
+ u32 aux_en = GEN8_AUX_CHANNEL_A;
+
+ if (IS_GEN9(dev_priv)) {
+ de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
+ GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+ aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
+ GEN9_AUX_CHANNEL_D;
+ } else
+ de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
+ GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+
+ de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
+ GEN8_PIPE_FIFO_UNDERRUN;
+
dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
for_each_pipe(dev_priv, pipe)
- if (intel_display_power_enabled(dev_priv,
+ if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
dev_priv->de_irq_mask[pipe],
de_pipe_enables);
- GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
+ GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
}
static int gen8_irq_postinstall(struct drm_device *dev)
@@ -3823,33 +3577,8 @@ static int gen8_irq_postinstall(struct drm_device *dev)
static int cherryview_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
- u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
- PIPE_CRC_DONE_INTERRUPT_STATUS;
- unsigned long irqflags;
- int pipe;
- /*
- * Leave vblank interrupts masked initially. enable/disable will
- * toggle them based on usage.
- */
- dev_priv->irq_mask = ~enable_mask;
-
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0xffff);
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- for_each_pipe(dev_priv, pipe)
- i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
- I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(VLV_IMR, dev_priv->irq_mask);
- I915_WRITE(VLV_IER, enable_mask);
+ vlv_display_irq_postinstall(dev_priv);
gen8_gt_irq_postinstall(dev_priv);
@@ -3869,41 +3598,39 @@ static void gen8_irq_uninstall(struct drm_device *dev)
gen8_irq_reset(dev);
}
+static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
+{
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy. */
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled)
+ valleyview_display_irqs_uninstall(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ vlv_display_irq_reset(dev_priv);
+
+ dev_priv->irq_mask = 0;
+}
+
static void valleyview_irq_uninstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long irqflags;
- int pipe;
if (!dev_priv)
return;
I915_WRITE(VLV_MASTER_IER, 0);
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0xffff);
+ gen5_gt_irq_reset(dev);
I915_WRITE(HWSTAM, 0xffffffff);
- I915_WRITE(PORT_HOTPLUG_EN, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- if (dev_priv->display_irqs_enabled)
- valleyview_display_irqs_uninstall(dev_priv);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- dev_priv->irq_mask = 0;
-
- I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(VLV_IMR, 0xffffffff);
- I915_WRITE(VLV_IER, 0x0);
- POSTING_READ(VLV_IER);
+ vlv_display_irq_uninstall(dev_priv);
}
static void cherryview_irq_uninstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
if (!dev_priv)
return;
@@ -3911,44 +3638,11 @@ static void cherryview_irq_uninstall(struct drm_device *dev)
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
-#define GEN8_IRQ_FINI_NDX(type, which) \
-do { \
- I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
- I915_WRITE(GEN8_##type##_IER(which), 0); \
- I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
- POSTING_READ(GEN8_##type##_IIR(which)); \
- I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
-} while (0)
-
-#define GEN8_IRQ_FINI(type) \
-do { \
- I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
- I915_WRITE(GEN8_##type##_IER, 0); \
- I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
- POSTING_READ(GEN8_##type##_IIR); \
- I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
-} while (0)
-
- GEN8_IRQ_FINI_NDX(GT, 0);
- GEN8_IRQ_FINI_NDX(GT, 1);
- GEN8_IRQ_FINI_NDX(GT, 2);
- GEN8_IRQ_FINI_NDX(GT, 3);
-
- GEN8_IRQ_FINI(PCU);
-
-#undef GEN8_IRQ_FINI
-#undef GEN8_IRQ_FINI_NDX
-
- I915_WRITE(PORT_HOTPLUG_EN, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ gen8_gt_irq_reset(dev_priv);
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0xffff);
+ GEN5_IRQ_RESET(GEN8_PCU_);
- I915_WRITE(VLV_IMR, 0xffffffff);
- I915_WRITE(VLV_IER, 0x0);
- I915_WRITE(VLV_IIR, 0xffffffff);
- POSTING_READ(VLV_IIR);
+ vlv_display_irq_uninstall(dev_priv);
}
static void ironlake_irq_uninstall(struct drm_device *dev)
@@ -3976,7 +3670,6 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
static int i8xx_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long irqflags;
I915_WRITE16(EMR,
~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -3999,10 +3692,10 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
return 0;
}
@@ -4047,7 +3740,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
struct drm_i915_private *dev_priv = dev->dev_private;
u16 iir, new_iir;
u32 pipe_stats[2];
- unsigned long irqflags;
int pipe;
u16 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -4063,11 +3755,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock(&dev_priv->irq_lock);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev, false,
- "Command parser error, iir 0x%08x",
- iir);
+ DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe);
@@ -4079,13 +3769,11 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & 0x8000ffff)
I915_WRITE(reg, pipe_stats[pipe]);
}
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock(&dev_priv->irq_lock);
I915_WRITE16(IIR, iir & ~flip_mask);
new_iir = I915_READ16(IIR); /* Flush posted writes */
- i915_update_dri1_breadcrumb(dev);
-
if (iir & I915_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[RCS]);
@@ -4101,9 +3789,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
- DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv,
+ pipe);
}
iir = new_iir;
@@ -4149,7 +3837,6 @@ static int i915_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 enable_mask;
- unsigned long irqflags;
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -4187,10 +3874,10 @@ static int i915_irq_postinstall(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
return 0;
}
@@ -4234,7 +3921,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
- unsigned long irqflags;
u32 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
@@ -4250,11 +3936,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock(&dev_priv->irq_lock);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev, false,
- "Command parser error, iir 0x%08x",
- iir);
+ DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe);
@@ -4266,7 +3950,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
irq_received = true;
}
}
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock(&dev_priv->irq_lock);
if (!irq_received)
break;
@@ -4297,9 +3981,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
- DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv,
+ pipe);
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -4324,8 +4008,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
iir = new_iir;
} while (iir & ~flip_mask);
- i915_update_dri1_breadcrumb(dev);
-
return ret;
}
@@ -4372,7 +4054,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 enable_mask;
u32 error_mask;
- unsigned long irqflags;
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
@@ -4393,11 +4074,11 @@ static int i965_irq_postinstall(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
/*
* Enable some error detection, note the instruction error mask
@@ -4462,7 +4143,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, new_iir;
u32 pipe_stats[I915_MAX_PIPES];
- unsigned long irqflags;
int ret = IRQ_NONE, pipe;
u32 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -4479,11 +4159,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock(&dev_priv->irq_lock);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev, false,
- "Command parser error, iir 0x%08x",
- iir);
+ DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe);
@@ -4497,7 +4175,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
irq_received = true;
}
}
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock(&dev_priv->irq_lock);
if (!irq_received)
break;
@@ -4527,9 +4205,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
- DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -4556,8 +4233,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
iir = new_iir;
}
- i915_update_dri1_breadcrumb(dev);
-
return ret;
}
@@ -4584,19 +4259,18 @@ static void i965_irq_uninstall(struct drm_device * dev)
I915_WRITE(IIR, I915_READ(IIR));
}
-static void intel_hpd_irq_reenable(struct work_struct *work)
+static void intel_hpd_irq_reenable_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
hotplug_reenable_work.work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- unsigned long irqflags;
int i;
intel_runtime_pm_get(dev_priv);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
struct drm_connector *connector;
@@ -4620,14 +4294,21 @@ static void intel_hpd_irq_reenable(struct work_struct *work)
}
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
intel_runtime_pm_put(dev_priv);
}
-void intel_irq_init(struct drm_device *dev)
+/**
+ * intel_irq_init - initializes irq support
+ * @dev_priv: i915 device instance
+ *
+ * This function initializes all the irq support including work items, timers
+ * and all the vtables. It does not setup the interrupt itself though.
+ */
+void intel_irq_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = dev_priv->dev;
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
@@ -4636,7 +4317,7 @@ void intel_irq_init(struct drm_device *dev)
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
/* Let's track the enabled rps events */
- if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
else
@@ -4646,17 +4327,14 @@ void intel_irq_init(struct drm_device *dev)
i915_hangcheck_elapsed,
(unsigned long) dev);
INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
- intel_hpd_irq_reenable);
+ intel_hpd_irq_reenable_work);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
- /* Haven't installed the IRQ handler yet */
- dev_priv->pm._irqs_disabled = true;
-
- if (IS_GEN2(dev)) {
+ if (IS_GEN2(dev_priv)) {
dev->max_vblank_count = 0;
dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
- } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
} else {
@@ -4669,7 +4347,7 @@ void intel_irq_init(struct drm_device *dev)
* Gen2 doesn't have a hardware frame counter and so depends on
* vblank interrupts to produce sane vblank seuquence numbers.
*/
- if (!IS_GEN2(dev))
+ if (!IS_GEN2(dev_priv))
dev->vblank_disable_immediate = true;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -4677,7 +4355,7 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
}
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
dev->driver->irq_handler = cherryview_irq_handler;
dev->driver->irq_preinstall = cherryview_irq_preinstall;
dev->driver->irq_postinstall = cherryview_irq_postinstall;
@@ -4685,7 +4363,7 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
dev->driver->irq_handler = valleyview_irq_handler;
dev->driver->irq_preinstall = valleyview_irq_preinstall;
dev->driver->irq_postinstall = valleyview_irq_postinstall;
@@ -4693,7 +4371,7 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- } else if (IS_GEN8(dev)) {
+ } else if (INTEL_INFO(dev_priv)->gen >= 8) {
dev->driver->irq_handler = gen8_irq_handler;
dev->driver->irq_preinstall = gen8_irq_reset;
dev->driver->irq_postinstall = gen8_irq_postinstall;
@@ -4710,12 +4388,12 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->disable_vblank = ironlake_disable_vblank;
dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
} else {
- if (INTEL_INFO(dev)->gen == 2) {
+ if (INTEL_INFO(dev_priv)->gen == 2) {
dev->driver->irq_preinstall = i8xx_irq_preinstall;
dev->driver->irq_postinstall = i8xx_irq_postinstall;
dev->driver->irq_handler = i8xx_irq_handler;
dev->driver->irq_uninstall = i8xx_irq_uninstall;
- } else if (INTEL_INFO(dev)->gen == 3) {
+ } else if (INTEL_INFO(dev_priv)->gen == 3) {
dev->driver->irq_preinstall = i915_irq_preinstall;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
@@ -4733,12 +4411,23 @@ void intel_irq_init(struct drm_device *dev)
}
}
-void intel_hpd_init(struct drm_device *dev)
+/**
+ * intel_hpd_init - initializes and enables hpd support
+ * @dev_priv: i915 device instance
+ *
+ * This function enables the hotplug support. It requires that interrupts have
+ * already been enabled with intel_irq_init_hw(). From this point on hotplug and
+ * poll request can run concurrently to other code, so locking rules must be
+ * obeyed.
+ *
+ * This is a separate step from interrupt enabling to simplify the locking rules
+ * in the driver load and resume code.
+ */
+void intel_hpd_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
- unsigned long irqflags;
int i;
for (i = 1; i < HPD_NUM_PINS; i++) {
@@ -4756,27 +4445,72 @@ void intel_hpd_init(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked checks happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
-/* Disable interrupts so we can allow runtime PM. */
-void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
+/**
+ * intel_irq_install - enables the hardware interrupt
+ * @dev_priv: i915 device instance
+ *
+ * This function enables the hardware interrupt handling, but leaves the hotplug
+ * handling still disabled. It is called after intel_irq_init().
+ *
+ * In the driver load and resume code we need working interrupts in a few places
+ * but don't want to deal with the hassle of concurrent probe and hotplug
+ * workers. Hence the split into this two-stage approach.
+ */
+int intel_irq_install(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ /*
+ * We enable some interrupt sources in our postinstall hooks, so mark
+ * interrupts as enabled _before_ actually enabling them to avoid
+ * special cases in our ordering checks.
+ */
+ dev_priv->pm.irqs_enabled = true;
- dev->driver->irq_uninstall(dev);
- dev_priv->pm._irqs_disabled = true;
+ return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
}
-/* Restore interrupts so we can recover from runtime PM. */
-void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
+/**
+ * intel_irq_uninstall - finilizes all irq handling
+ * @dev_priv: i915 device instance
+ *
+ * This stops interrupt and hotplug handling and unregisters and frees all
+ * resources acquired in the init functions.
+ */
+void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ drm_irq_uninstall(dev_priv->dev);
+ intel_hpd_cancel_work(dev_priv);
+ dev_priv->pm.irqs_enabled = false;
+}
- dev_priv->pm._irqs_disabled = false;
- dev->driver->irq_preinstall(dev);
- dev->driver->irq_postinstall(dev);
+/**
+ * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
+ * @dev_priv: i915 device instance
+ *
+ * This function is used to disable interrupts at runtime, both in the runtime
+ * pm and the system suspend/resume code.
+ */
+void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
+{
+ dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
+ dev_priv->pm.irqs_enabled = false;
+}
+
+/**
+ * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
+ * @dev_priv: i915 device instance
+ *
+ * This function is used to enable interrupts at runtime, both in the runtime
+ * pm and the system suspend/resume code.
+ */
+void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
+{
+ dev_priv->pm.irqs_enabled = true;
+ dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
+ dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c01e5f3..eefdc23 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,14 +26,25 @@
#define _I915_REG_H_
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _PLANE(plane, a, b) _PIPE(plane, a, b)
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
-
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
(pipe) == PIPE_B ? (b) : (c))
-#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
-#define _MASKED_BIT_DISABLE(a) ((a) << 16)
+#define _MASKED_FIELD(mask, value) ({ \
+ if (__builtin_constant_p(mask)) \
+ BUILD_BUG_ON_MSG(((mask) & 0xffff0000), "Incorrect mask"); \
+ if (__builtin_constant_p(value)) \
+ BUILD_BUG_ON_MSG((value) & 0xffff0000, "Incorrect value"); \
+ if (__builtin_constant_p(mask) && __builtin_constant_p(value)) \
+ BUILD_BUG_ON_MSG((value) & ~(mask), \
+ "Incorrect value for mask"); \
+ (mask) << 16 | (value); })
+#define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
+#define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0))
+
+
/* PCI config space */
@@ -74,15 +85,17 @@
#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
+#define GCDGMBUS 0xcc
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
/* Graphics reset regs */
-#define I965_GDRST 0xc0 /* PCI config register */
+#define I915_GDRST 0xc0 /* PCI config register */
#define GRDOM_FULL (0<<2)
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
#define GRDOM_MASK (3<<2)
+#define GRDOM_RESET_STATUS (1<<1)
#define GRDOM_RESET_ENABLE (1<<0)
#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
@@ -248,6 +261,16 @@
#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
+/* SKL ones */
+#define MI_DISPLAY_FLIP_SKL_PLANE_1_A (0 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_1_B (1 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_1_C (2 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_2_A (4 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_2_B (5 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_2_C (6 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_3_A (7 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_3_B (8 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_3_C (9 << 8)
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21)
@@ -314,6 +337,8 @@
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
+#define MI_PREDICATE_SRC0 (0x2400)
+#define MI_PREDICATE_SRC1 (0x2408)
#define MI_PREDICATE_RESULT_2 (0x2214)
#define LOWER_SLICE_ENABLED (1<<0)
@@ -564,6 +589,7 @@ enum punit_power_well {
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
#define PUNIT_REG_GPU_FREQ_STS 0xd8
+#define GPLLENABLE (1<<4)
#define GENFREQSTATUS (1<<0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
#define PUNIT_REG_CZ_TIMESTAMP 0xce
@@ -672,7 +698,7 @@ enum punit_power_well {
* need to be accessed during AUX communication,
*
* Generally the common lane corresponds to the pipe and
- * the spline (PCS/TX) correponds to the port.
+ * the spline (PCS/TX) corresponds to the port.
*
* For dual channel PHY (VLV/CHV):
*
@@ -796,6 +822,8 @@ enum punit_power_well {
#define _VLV_PCS_DW0_CH1 0x8400
#define DPIO_PCS_TX_LANE2_RESET (1<<16)
#define DPIO_PCS_TX_LANE1_RESET (1<<7)
+#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1<<4)
+#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1<<3)
#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
#define _VLV_PCS01_DW0_CH0 0x200
@@ -836,12 +864,31 @@ enum punit_power_well {
#define _VLV_PCS_DW9_CH0 0x8224
#define _VLV_PCS_DW9_CH1 0x8424
+#define DPIO_PCS_TX2MARGIN_MASK (0x7<<13)
+#define DPIO_PCS_TX2MARGIN_000 (0<<13)
+#define DPIO_PCS_TX2MARGIN_101 (1<<13)
+#define DPIO_PCS_TX1MARGIN_MASK (0x7<<10)
+#define DPIO_PCS_TX1MARGIN_000 (0<<10)
+#define DPIO_PCS_TX1MARGIN_101 (1<<10)
#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
+#define _VLV_PCS01_DW9_CH0 0x224
+#define _VLV_PCS23_DW9_CH0 0x424
+#define _VLV_PCS01_DW9_CH1 0x2624
+#define _VLV_PCS23_DW9_CH1 0x2824
+#define VLV_PCS01_DW9(ch) _PORT(ch, _VLV_PCS01_DW9_CH0, _VLV_PCS01_DW9_CH1)
+#define VLV_PCS23_DW9(ch) _PORT(ch, _VLV_PCS23_DW9_CH0, _VLV_PCS23_DW9_CH1)
+
#define _CHV_PCS_DW10_CH0 0x8228
#define _CHV_PCS_DW10_CH1 0x8428
#define DPIO_PCS_SWING_CALC_TX0_TX2 (1<<30)
#define DPIO_PCS_SWING_CALC_TX1_TX3 (1<<31)
+#define DPIO_PCS_TX2DEEMP_MASK (0xf<<24)
+#define DPIO_PCS_TX2DEEMP_9P5 (0<<24)
+#define DPIO_PCS_TX2DEEMP_6P0 (2<<24)
+#define DPIO_PCS_TX1DEEMP_MASK (0xf<<16)
+#define DPIO_PCS_TX1DEEMP_9P5 (0<<16)
+#define DPIO_PCS_TX1DEEMP_6P0 (2<<16)
#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
#define _VLV_PCS01_DW10_CH0 0x0228
@@ -853,8 +900,18 @@ enum punit_power_well {
#define _VLV_PCS_DW11_CH0 0x822c
#define _VLV_PCS_DW11_CH1 0x842c
+#define DPIO_LANEDESKEW_STRAP_OVRD (1<<3)
+#define DPIO_LEFT_TXFIFO_RST_MASTER (1<<1)
+#define DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0)
#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
+#define _VLV_PCS01_DW11_CH0 0x022c
+#define _VLV_PCS23_DW11_CH0 0x042c
+#define _VLV_PCS01_DW11_CH1 0x262c
+#define _VLV_PCS23_DW11_CH1 0x282c
+#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1)
+#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1)
+
#define _VLV_PCS_DW12_CH0 0x8230
#define _VLV_PCS_DW12_CH1 0x8430
#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
@@ -1237,7 +1294,7 @@ enum punit_power_well {
#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
-#define GEN6_WIZ_HASHING_MASK (GEN6_WIZ_HASHING(1, 1) << 16)
+#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1)
#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
#define GFX_MODE 0x02520
@@ -1999,6 +2056,8 @@ enum punit_power_well {
#define DCC_ADDRESSING_MODE_MASK (3 << 0)
#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
+#define DCC2 0x10204
+#define DCC2_MODIFIED_ENHANCED_DISABLE (1 << 20)
/* Pineview MCH register contains DDR3 setting */
#define CSHRDDR3CTL 0x101a8
@@ -2282,7 +2341,6 @@ enum punit_power_well {
#define GEN6_GT_THREAD_STATUS_REG 0x13805c
#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
-#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948)
#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
@@ -2506,9 +2564,7 @@ enum punit_power_well {
#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10)
#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14)
-#define EDP_PSR_DPCD_COMMAND 0x80060000
#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18)
-#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c)
#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20)
#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24)
@@ -3645,6 +3701,7 @@ enum punit_power_well {
#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
+#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1)
/*
* Computing GMCH M and N values for the Display Port link
@@ -4024,17 +4081,18 @@ enum punit_power_well {
#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
/* drain latency register values*/
+#define DRAIN_LATENCY_PRECISION_16 16
#define DRAIN_LATENCY_PRECISION_32 32
#define DRAIN_LATENCY_PRECISION_64 64
#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
-#define DDL_CURSOR_PRECISION_64 (1<<31)
-#define DDL_CURSOR_PRECISION_32 (0<<31)
+#define DDL_CURSOR_PRECISION_HIGH (1<<31)
+#define DDL_CURSOR_PRECISION_LOW (0<<31)
#define DDL_CURSOR_SHIFT 24
-#define DDL_SPRITE_PRECISION_64(sprite) (1<<(15+8*(sprite)))
-#define DDL_SPRITE_PRECISION_32(sprite) (0<<(15+8*(sprite)))
+#define DDL_SPRITE_PRECISION_HIGH(sprite) (1<<(15+8*(sprite)))
+#define DDL_SPRITE_PRECISION_LOW(sprite) (0<<(15+8*(sprite)))
#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
-#define DDL_PLANE_PRECISION_64 (1<<7)
-#define DDL_PLANE_PRECISION_32 (0<<7)
+#define DDL_PLANE_PRECISION_HIGH (1<<7)
+#define DDL_PLANE_PRECISION_LOW (0<<7)
#define DDL_PLANE_SHIFT 0
#define DRAIN_LATENCY_MASK 0x7f
@@ -4071,6 +4129,41 @@ enum punit_power_well {
#define I965_CURSOR_MAX_WM 32
#define I965_CURSOR_DFT_WM 8
+/* Watermark register definitions for SKL */
+#define CUR_WM_A_0 0x70140
+#define CUR_WM_B_0 0x71140
+#define PLANE_WM_1_A_0 0x70240
+#define PLANE_WM_1_B_0 0x71240
+#define PLANE_WM_2_A_0 0x70340
+#define PLANE_WM_2_B_0 0x71340
+#define PLANE_WM_TRANS_1_A_0 0x70268
+#define PLANE_WM_TRANS_1_B_0 0x71268
+#define PLANE_WM_TRANS_2_A_0 0x70368
+#define PLANE_WM_TRANS_2_B_0 0x71368
+#define CUR_WM_TRANS_A_0 0x70168
+#define CUR_WM_TRANS_B_0 0x71168
+#define PLANE_WM_EN (1 << 31)
+#define PLANE_WM_LINES_SHIFT 14
+#define PLANE_WM_LINES_MASK 0x1f
+#define PLANE_WM_BLOCKS_MASK 0x3ff
+
+#define CUR_WM_0(pipe) _PIPE(pipe, CUR_WM_A_0, CUR_WM_B_0)
+#define CUR_WM(pipe, level) (CUR_WM_0(pipe) + ((4) * (level)))
+#define CUR_WM_TRANS(pipe) _PIPE(pipe, CUR_WM_TRANS_A_0, CUR_WM_TRANS_B_0)
+
+#define _PLANE_WM_1(pipe) _PIPE(pipe, PLANE_WM_1_A_0, PLANE_WM_1_B_0)
+#define _PLANE_WM_2(pipe) _PIPE(pipe, PLANE_WM_2_A_0, PLANE_WM_2_B_0)
+#define _PLANE_WM_BASE(pipe, plane) \
+ _PLANE(plane, _PLANE_WM_1(pipe), _PLANE_WM_2(pipe))
+#define PLANE_WM(pipe, plane, level) \
+ (_PLANE_WM_BASE(pipe, plane) + ((4) * (level)))
+#define _PLANE_WM_TRANS_1(pipe) \
+ _PIPE(pipe, PLANE_WM_TRANS_1_A_0, PLANE_WM_TRANS_1_B_0)
+#define _PLANE_WM_TRANS_2(pipe) \
+ _PIPE(pipe, PLANE_WM_TRANS_2_A_0, PLANE_WM_TRANS_2_B_0)
+#define PLANE_WM_TRANS(pipe, plane) \
+ _PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe))
+
/* define the Watermark register on Ironlake */
#define WM0_PIPEA_ILK 0x45100
#define WM0_PIPE_PLANE_MASK (0xffff<<16)
@@ -4177,6 +4270,7 @@ enum punit_power_well {
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
+#define CURSOR_ROTATE_180 (1<<15)
#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE 0x70084
#define _CURAPOS 0x70088
@@ -4240,9 +4334,11 @@ enum punit_power_well {
#define DISPPLANE_NO_LINE_DOUBLE 0
#define DISPPLANE_STEREO_POLARITY_FIRST 0
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
-#define DISPPLANE_ROTATE_180 (1<<15)
+#define DISPPLANE_ALPHA_PREMULTIPLY (1<<16) /* CHV pipe B */
+#define DISPPLANE_ROTATE_180 (1<<15)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10)
+#define DISPPLANE_MIRROR (1<<8) /* CHV pipe B */
#define _DSPAADDR 0x70184
#define _DSPASTRIDE 0x70188
#define _DSPAPOS 0x7018C /* reserved */
@@ -4263,6 +4359,24 @@ enum punit_power_well {
#define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET)
#define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE)
+/* CHV pipe B blender and primary plane */
+#define _CHV_BLEND_A 0x60a00
+#define CHV_BLEND_LEGACY (0<<30)
+#define CHV_BLEND_ANDROID (1<<30)
+#define CHV_BLEND_MPO (2<<30)
+#define CHV_BLEND_MASK (3<<30)
+#define _CHV_CANVAS_A 0x60a04
+#define _PRIMPOS_A 0x60a08
+#define _PRIMSIZE_A 0x60a0c
+#define _PRIMCNSTALPHA_A 0x60a10
+#define PRIM_CONST_ALPHA_ENABLE (1<<31)
+
+#define CHV_BLEND(pipe) _TRANSCODER2(pipe, _CHV_BLEND_A)
+#define CHV_CANVAS(pipe) _TRANSCODER2(pipe, _CHV_CANVAS_A)
+#define PRIMPOS(plane) _TRANSCODER2(plane, _PRIMPOS_A)
+#define PRIMSIZE(plane) _TRANSCODER2(plane, _PRIMSIZE_A)
+#define PRIMCNSTALPHA(plane) _TRANSCODER2(plane, _PRIMCNSTALPHA_A)
+
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
@@ -4464,6 +4578,7 @@ enum punit_power_well {
#define SP_FORMAT_RGBA1010102 (9<<26)
#define SP_FORMAT_RGBX8888 (0xe<<26)
#define SP_FORMAT_RGBA8888 (0xf<<26)
+#define SP_ALPHA_PREMULTIPLY (1<<23) /* CHV pipe B */
#define SP_SOURCE_KEY (1<<22)
#define SP_YUV_BYTE_ORDER_MASK (3<<16)
#define SP_YUV_ORDER_YUYV (0<<16)
@@ -4472,6 +4587,7 @@ enum punit_power_well {
#define SP_YUV_ORDER_VYUY (3<<16)
#define SP_ROTATE_180 (1<<15)
#define SP_TILED (1<<10)
+#define SP_MIRROR (1<<8) /* CHV pipe B */
#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c)
@@ -4482,6 +4598,7 @@ enum punit_power_well {
#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0)
#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
+#define SP_CONST_ALPHA_ENABLE (1<<31)
#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4)
#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
@@ -4510,6 +4627,195 @@ enum punit_power_well {
#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA)
#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC)
+/*
+ * CHV pipe B sprite CSC
+ *
+ * |cr| |c0 c1 c2| |cr + cr_ioff| |cr_ooff|
+ * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff|
+ * |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff|
+ */
+#define SPCSCYGOFF(sprite) (VLV_DISPLAY_BASE + 0x6d900 + (sprite) * 0x1000)
+#define SPCSCCBOFF(sprite) (VLV_DISPLAY_BASE + 0x6d904 + (sprite) * 0x1000)
+#define SPCSCCROFF(sprite) (VLV_DISPLAY_BASE + 0x6d908 + (sprite) * 0x1000)
+#define SPCSC_OOFF(x) (((x) & 0x7ff) << 16) /* s11 */
+#define SPCSC_IOFF(x) (((x) & 0x7ff) << 0) /* s11 */
+
+#define SPCSCC01(sprite) (VLV_DISPLAY_BASE + 0x6d90c + (sprite) * 0x1000)
+#define SPCSCC23(sprite) (VLV_DISPLAY_BASE + 0x6d910 + (sprite) * 0x1000)
+#define SPCSCC45(sprite) (VLV_DISPLAY_BASE + 0x6d914 + (sprite) * 0x1000)
+#define SPCSCC67(sprite) (VLV_DISPLAY_BASE + 0x6d918 + (sprite) * 0x1000)
+#define SPCSCC8(sprite) (VLV_DISPLAY_BASE + 0x6d91c + (sprite) * 0x1000)
+#define SPCSC_C1(x) (((x) & 0x7fff) << 16) /* s3.12 */
+#define SPCSC_C0(x) (((x) & 0x7fff) << 0) /* s3.12 */
+
+#define SPCSCYGICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d920 + (sprite) * 0x1000)
+#define SPCSCCBICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d924 + (sprite) * 0x1000)
+#define SPCSCCRICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d928 + (sprite) * 0x1000)
+#define SPCSC_IMAX(x) (((x) & 0x7ff) << 16) /* s11 */
+#define SPCSC_IMIN(x) (((x) & 0x7ff) << 0) /* s11 */
+
+#define SPCSCYGOCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d92c + (sprite) * 0x1000)
+#define SPCSCCBOCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d930 + (sprite) * 0x1000)
+#define SPCSCCROCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d934 + (sprite) * 0x1000)
+#define SPCSC_OMAX(x) ((x) << 16) /* u10 */
+#define SPCSC_OMIN(x) ((x) << 0) /* u10 */
+
+/* Skylake plane registers */
+
+#define _PLANE_CTL_1_A 0x70180
+#define _PLANE_CTL_2_A 0x70280
+#define _PLANE_CTL_3_A 0x70380
+#define PLANE_CTL_ENABLE (1 << 31)
+#define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30)
+#define PLANE_CTL_FORMAT_MASK (0xf << 24)
+#define PLANE_CTL_FORMAT_YUV422 ( 0 << 24)
+#define PLANE_CTL_FORMAT_NV12 ( 1 << 24)
+#define PLANE_CTL_FORMAT_XRGB_2101010 ( 2 << 24)
+#define PLANE_CTL_FORMAT_XRGB_8888 ( 4 << 24)
+#define PLANE_CTL_FORMAT_XRGB_16161616F ( 6 << 24)
+#define PLANE_CTL_FORMAT_AYUV ( 8 << 24)
+#define PLANE_CTL_FORMAT_INDEXED ( 12 << 24)
+#define PLANE_CTL_FORMAT_RGB_565 ( 14 << 24)
+#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23)
+#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21)
+#define PLANE_CTL_KEY_ENABLE_SOURCE ( 1 << 21)
+#define PLANE_CTL_KEY_ENABLE_DESTINATION ( 2 << 21)
+#define PLANE_CTL_ORDER_BGRX (0 << 20)
+#define PLANE_CTL_ORDER_RGBX (1 << 20)
+#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16)
+#define PLANE_CTL_YUV422_YUYV ( 0 << 16)
+#define PLANE_CTL_YUV422_UYVY ( 1 << 16)
+#define PLANE_CTL_YUV422_YVYU ( 2 << 16)
+#define PLANE_CTL_YUV422_VYUY ( 3 << 16)
+#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15)
+#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
+#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13)
+#define PLANE_CTL_TILED_MASK (0x7 << 10)
+#define PLANE_CTL_TILED_LINEAR ( 0 << 10)
+#define PLANE_CTL_TILED_X ( 1 << 10)
+#define PLANE_CTL_TILED_Y ( 4 << 10)
+#define PLANE_CTL_TILED_YF ( 5 << 10)
+#define PLANE_CTL_ALPHA_MASK (0x3 << 4)
+#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4)
+#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4)
+#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4)
+#define PLANE_CTL_ROTATE_MASK 0x3
+#define PLANE_CTL_ROTATE_0 0x0
+#define PLANE_CTL_ROTATE_180 0x2
+#define _PLANE_STRIDE_1_A 0x70188
+#define _PLANE_STRIDE_2_A 0x70288
+#define _PLANE_STRIDE_3_A 0x70388
+#define _PLANE_POS_1_A 0x7018c
+#define _PLANE_POS_2_A 0x7028c
+#define _PLANE_POS_3_A 0x7038c
+#define _PLANE_SIZE_1_A 0x70190
+#define _PLANE_SIZE_2_A 0x70290
+#define _PLANE_SIZE_3_A 0x70390
+#define _PLANE_SURF_1_A 0x7019c
+#define _PLANE_SURF_2_A 0x7029c
+#define _PLANE_SURF_3_A 0x7039c
+#define _PLANE_OFFSET_1_A 0x701a4
+#define _PLANE_OFFSET_2_A 0x702a4
+#define _PLANE_OFFSET_3_A 0x703a4
+#define _PLANE_KEYVAL_1_A 0x70194
+#define _PLANE_KEYVAL_2_A 0x70294
+#define _PLANE_KEYMSK_1_A 0x70198
+#define _PLANE_KEYMSK_2_A 0x70298
+#define _PLANE_KEYMAX_1_A 0x701a0
+#define _PLANE_KEYMAX_2_A 0x702a0
+#define _PLANE_BUF_CFG_1_A 0x7027c
+#define _PLANE_BUF_CFG_2_A 0x7037c
+
+#define _PLANE_CTL_1_B 0x71180
+#define _PLANE_CTL_2_B 0x71280
+#define _PLANE_CTL_3_B 0x71380
+#define _PLANE_CTL_1(pipe) _PIPE(pipe, _PLANE_CTL_1_A, _PLANE_CTL_1_B)
+#define _PLANE_CTL_2(pipe) _PIPE(pipe, _PLANE_CTL_2_A, _PLANE_CTL_2_B)
+#define _PLANE_CTL_3(pipe) _PIPE(pipe, _PLANE_CTL_3_A, _PLANE_CTL_3_B)
+#define PLANE_CTL(pipe, plane) \
+ _PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe))
+
+#define _PLANE_STRIDE_1_B 0x71188
+#define _PLANE_STRIDE_2_B 0x71288
+#define _PLANE_STRIDE_3_B 0x71388
+#define _PLANE_STRIDE_1(pipe) \
+ _PIPE(pipe, _PLANE_STRIDE_1_A, _PLANE_STRIDE_1_B)
+#define _PLANE_STRIDE_2(pipe) \
+ _PIPE(pipe, _PLANE_STRIDE_2_A, _PLANE_STRIDE_2_B)
+#define _PLANE_STRIDE_3(pipe) \
+ _PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B)
+#define PLANE_STRIDE(pipe, plane) \
+ _PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe))
+
+#define _PLANE_POS_1_B 0x7118c
+#define _PLANE_POS_2_B 0x7128c
+#define _PLANE_POS_3_B 0x7138c
+#define _PLANE_POS_1(pipe) _PIPE(pipe, _PLANE_POS_1_A, _PLANE_POS_1_B)
+#define _PLANE_POS_2(pipe) _PIPE(pipe, _PLANE_POS_2_A, _PLANE_POS_2_B)
+#define _PLANE_POS_3(pipe) _PIPE(pipe, _PLANE_POS_3_A, _PLANE_POS_3_B)
+#define PLANE_POS(pipe, plane) \
+ _PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe))
+
+#define _PLANE_SIZE_1_B 0x71190
+#define _PLANE_SIZE_2_B 0x71290
+#define _PLANE_SIZE_3_B 0x71390
+#define _PLANE_SIZE_1(pipe) _PIPE(pipe, _PLANE_SIZE_1_A, _PLANE_SIZE_1_B)
+#define _PLANE_SIZE_2(pipe) _PIPE(pipe, _PLANE_SIZE_2_A, _PLANE_SIZE_2_B)
+#define _PLANE_SIZE_3(pipe) _PIPE(pipe, _PLANE_SIZE_3_A, _PLANE_SIZE_3_B)
+#define PLANE_SIZE(pipe, plane) \
+ _PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe))
+
+#define _PLANE_SURF_1_B 0x7119c
+#define _PLANE_SURF_2_B 0x7129c
+#define _PLANE_SURF_3_B 0x7139c
+#define _PLANE_SURF_1(pipe) _PIPE(pipe, _PLANE_SURF_1_A, _PLANE_SURF_1_B)
+#define _PLANE_SURF_2(pipe) _PIPE(pipe, _PLANE_SURF_2_A, _PLANE_SURF_2_B)
+#define _PLANE_SURF_3(pipe) _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B)
+#define PLANE_SURF(pipe, plane) \
+ _PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe))
+
+#define _PLANE_OFFSET_1_B 0x711a4
+#define _PLANE_OFFSET_2_B 0x712a4
+#define _PLANE_OFFSET_1(pipe) _PIPE(pipe, _PLANE_OFFSET_1_A, _PLANE_OFFSET_1_B)
+#define _PLANE_OFFSET_2(pipe) _PIPE(pipe, _PLANE_OFFSET_2_A, _PLANE_OFFSET_2_B)
+#define PLANE_OFFSET(pipe, plane) \
+ _PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe))
+
+#define _PLANE_KEYVAL_1_B 0x71194
+#define _PLANE_KEYVAL_2_B 0x71294
+#define _PLANE_KEYVAL_1(pipe) _PIPE(pipe, _PLANE_KEYVAL_1_A, _PLANE_KEYVAL_1_B)
+#define _PLANE_KEYVAL_2(pipe) _PIPE(pipe, _PLANE_KEYVAL_2_A, _PLANE_KEYVAL_2_B)
+#define PLANE_KEYVAL(pipe, plane) \
+ _PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe))
+
+#define _PLANE_KEYMSK_1_B 0x71198
+#define _PLANE_KEYMSK_2_B 0x71298
+#define _PLANE_KEYMSK_1(pipe) _PIPE(pipe, _PLANE_KEYMSK_1_A, _PLANE_KEYMSK_1_B)
+#define _PLANE_KEYMSK_2(pipe) _PIPE(pipe, _PLANE_KEYMSK_2_A, _PLANE_KEYMSK_2_B)
+#define PLANE_KEYMSK(pipe, plane) \
+ _PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe))
+
+#define _PLANE_KEYMAX_1_B 0x711a0
+#define _PLANE_KEYMAX_2_B 0x712a0
+#define _PLANE_KEYMAX_1(pipe) _PIPE(pipe, _PLANE_KEYMAX_1_A, _PLANE_KEYMAX_1_B)
+#define _PLANE_KEYMAX_2(pipe) _PIPE(pipe, _PLANE_KEYMAX_2_A, _PLANE_KEYMAX_2_B)
+#define PLANE_KEYMAX(pipe, plane) \
+ _PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe))
+
+#define _PLANE_BUF_CFG_1_B 0x7127c
+#define _PLANE_BUF_CFG_2_B 0x7137c
+#define _PLANE_BUF_CFG_1(pipe) \
+ _PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B)
+#define _PLANE_BUF_CFG_2(pipe) \
+ _PIPE(pipe, _PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B)
+#define PLANE_BUF_CFG(pipe, plane) \
+ _PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe))
+
+/* SKL new cursor registers */
+#define _CUR_BUF_CFG_A 0x7017c
+#define _CUR_BUF_CFG_B 0x7117c
+#define CUR_BUF_CFG(pipe) _PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B)
+
/* VBIOS regs */
#define VGACNTRL 0x71400
# define VGA_DISP_DISABLE (1 << 31)
@@ -4625,6 +4931,18 @@ enum punit_power_well {
#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
+#define _PSA_CTL 0x68180
+#define _PSB_CTL 0x68980
+#define PS_ENABLE (1<<31)
+#define _PSA_WIN_SZ 0x68174
+#define _PSB_WIN_SZ 0x68974
+#define _PSA_WIN_POS 0x68170
+#define _PSB_WIN_POS 0x68970
+
+#define PS_CTL(pipe) _PIPE(pipe, _PSA_CTL, _PSB_CTL)
+#define PS_WIN_SZ(pipe) _PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ)
+#define PS_WIN_POS(pipe) _PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS)
+
/* legacy palette */
#define _LGC_PALETTE_A 0x4a000
#define _LGC_PALETTE_B 0x4a800
@@ -4746,16 +5064,32 @@ enum punit_power_well {
#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
#define GEN8_PIPE_VSYNC (1 << 1)
#define GEN8_PIPE_VBLANK (1 << 0)
+#define GEN9_PIPE_CURSOR_FAULT (1 << 11)
+#define GEN9_PIPE_PLANE3_FAULT (1 << 9)
+#define GEN9_PIPE_PLANE2_FAULT (1 << 8)
+#define GEN9_PIPE_PLANE1_FAULT (1 << 7)
+#define GEN9_PIPE_PLANE3_FLIP_DONE (1 << 5)
+#define GEN9_PIPE_PLANE2_FLIP_DONE (1 << 4)
+#define GEN9_PIPE_PLANE1_FLIP_DONE (1 << 3)
+#define GEN9_PIPE_PLANE_FLIP_DONE(p) (1 << (3 + p))
#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
(GEN8_PIPE_CURSOR_FAULT | \
GEN8_PIPE_SPRITE_FAULT | \
GEN8_PIPE_PRIMARY_FAULT)
+#define GEN9_DE_PIPE_IRQ_FAULT_ERRORS \
+ (GEN9_PIPE_CURSOR_FAULT | \
+ GEN9_PIPE_PLANE3_FAULT | \
+ GEN9_PIPE_PLANE2_FAULT | \
+ GEN9_PIPE_PLANE1_FAULT)
#define GEN8_DE_PORT_ISR 0x44440
#define GEN8_DE_PORT_IMR 0x44444
#define GEN8_DE_PORT_IIR 0x44448
#define GEN8_DE_PORT_IER 0x4444c
#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
+#define GEN9_AUX_CHANNEL_D (1 << 27)
+#define GEN9_AUX_CHANNEL_C (1 << 26)
+#define GEN9_AUX_CHANNEL_B (1 << 25)
#define GEN8_AUX_CHANNEL_A (1 << 0)
#define GEN8_DE_MISC_ISR 0x44460
@@ -4839,6 +5173,8 @@ enum punit_power_well {
/* GEN8 chicken */
#define HDC_CHICKEN0 0x7300
#define HDC_FORCE_NON_COHERENT (1<<4)
+#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
+#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
@@ -5540,6 +5876,12 @@ enum punit_power_well {
#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
+#define FORCEWAKE_MEDIA_GEN9 0xa270
+#define FORCEWAKE_RENDER_GEN9 0xa278
+#define FORCEWAKE_BLITTER_GEN9 0xa188
+#define FORCEWAKE_ACK_MEDIA_GEN9 0x0D88
+#define FORCEWAKE_ACK_RENDER_GEN9 0x0D84
+#define FORCEWAKE_ACK_BLITTER_GEN9 0x130044
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
#define FORCEWAKE_MT_ACK 0x130040
@@ -5711,9 +6053,17 @@ enum punit_power_well {
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define DISPLAY_IPS_CONTROL 0x19
+#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
+#define GEN6_PCODE_DATA1 0x13812C
+
+#define GEN9_PCODE_READ_MEM_LATENCY 0x6
+#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
+#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
+#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
+#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
#define GEN6_GT_CORE_STATUS 0x138060
#define GEN6_CORE_CPD_STATE_MASK (7<<4)
@@ -5751,6 +6101,9 @@ enum punit_power_well {
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
+#define GEN9_HALF_SLICE_CHICKEN5 0xe188
+#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5)
+
#define GEN8_ROW_CHICKEN 0xe4f0
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
#define STALL_DOP_GATING_DISABLE (1<<5)
@@ -5766,57 +6119,58 @@ enum punit_power_well {
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
+/* Audio */
#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020)
-#define INTEL_AUDIO_DEVCL 0x808629FB
-#define INTEL_AUDIO_DEVBLC 0x80862801
-#define INTEL_AUDIO_DEVCTG 0x80862802
+#define INTEL_AUDIO_DEVCL 0x808629FB
+#define INTEL_AUDIO_DEVBLC 0x80862801
+#define INTEL_AUDIO_DEVCTG 0x80862802
#define G4X_AUD_CNTL_ST 0x620B4
-#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
-#define G4X_ELDV_DEVCTG (1 << 14)
-#define G4X_ELD_ADDR (0xf << 5)
-#define G4X_ELD_ACK (1 << 4)
+#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
+#define G4X_ELDV_DEVCTG (1 << 14)
+#define G4X_ELD_ADDR_MASK (0xf << 5)
+#define G4X_ELD_ACK (1 << 4)
#define G4X_HDMIW_HDMIEDID 0x6210C
-#define IBX_HDMIW_HDMIEDID_A 0xE2050
-#define IBX_HDMIW_HDMIEDID_B 0xE2150
+#define _IBX_HDMIW_HDMIEDID_A 0xE2050
+#define _IBX_HDMIW_HDMIEDID_B 0xE2150
#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
- IBX_HDMIW_HDMIEDID_A, \
- IBX_HDMIW_HDMIEDID_B)
-#define IBX_AUD_CNTL_ST_A 0xE20B4
-#define IBX_AUD_CNTL_ST_B 0xE21B4
+ _IBX_HDMIW_HDMIEDID_A, \
+ _IBX_HDMIW_HDMIEDID_B)
+#define _IBX_AUD_CNTL_ST_A 0xE20B4
+#define _IBX_AUD_CNTL_ST_B 0xE21B4
#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
- IBX_AUD_CNTL_ST_A, \
- IBX_AUD_CNTL_ST_B)
-#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
-#define IBX_ELD_ADDRESS (0x1f << 5)
-#define IBX_ELD_ACK (1 << 4)
+ _IBX_AUD_CNTL_ST_A, \
+ _IBX_AUD_CNTL_ST_B)
+#define IBX_ELD_BUFFER_SIZE_MASK (0x1f << 10)
+#define IBX_ELD_ADDRESS_MASK (0x1f << 5)
+#define IBX_ELD_ACK (1 << 4)
#define IBX_AUD_CNTL_ST2 0xE20C0
-#define IBX_ELD_VALIDB (1 << 0)
-#define IBX_CP_READYB (1 << 1)
+#define IBX_CP_READY(port) ((1 << 1) << (((port) - 1) * 4))
+#define IBX_ELD_VALID(port) ((1 << 0) << (((port) - 1) * 4))
-#define CPT_HDMIW_HDMIEDID_A 0xE5050
-#define CPT_HDMIW_HDMIEDID_B 0xE5150
+#define _CPT_HDMIW_HDMIEDID_A 0xE5050
+#define _CPT_HDMIW_HDMIEDID_B 0xE5150
#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
- CPT_HDMIW_HDMIEDID_A, \
- CPT_HDMIW_HDMIEDID_B)
-#define CPT_AUD_CNTL_ST_A 0xE50B4
-#define CPT_AUD_CNTL_ST_B 0xE51B4
+ _CPT_HDMIW_HDMIEDID_A, \
+ _CPT_HDMIW_HDMIEDID_B)
+#define _CPT_AUD_CNTL_ST_A 0xE50B4
+#define _CPT_AUD_CNTL_ST_B 0xE51B4
#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
- CPT_AUD_CNTL_ST_A, \
- CPT_AUD_CNTL_ST_B)
+ _CPT_AUD_CNTL_ST_A, \
+ _CPT_AUD_CNTL_ST_B)
#define CPT_AUD_CNTRL_ST2 0xE50C0
-#define VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
-#define VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
+#define _VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
+#define _VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
- VLV_HDMIW_HDMIEDID_A, \
- VLV_HDMIW_HDMIEDID_B)
-#define VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
-#define VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
+ _VLV_HDMIW_HDMIEDID_A, \
+ _VLV_HDMIW_HDMIEDID_B)
+#define _VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
+#define _VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \
- VLV_AUD_CNTL_ST_A, \
- VLV_AUD_CNTL_ST_B)
+ _VLV_AUD_CNTL_ST_A, \
+ _VLV_AUD_CNTL_ST_B)
#define VLV_AUD_CNTL_ST2 (VLV_DISPLAY_BASE + 0x620C0)
/* These are the 4 32-bit write offset registers for each stream
@@ -5825,28 +6179,28 @@ enum punit_power_well {
*/
#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
-#define IBX_AUD_CONFIG_A 0xe2000
-#define IBX_AUD_CONFIG_B 0xe2100
+#define _IBX_AUD_CONFIG_A 0xe2000
+#define _IBX_AUD_CONFIG_B 0xe2100
#define IBX_AUD_CFG(pipe) _PIPE(pipe, \
- IBX_AUD_CONFIG_A, \
- IBX_AUD_CONFIG_B)
-#define CPT_AUD_CONFIG_A 0xe5000
-#define CPT_AUD_CONFIG_B 0xe5100
+ _IBX_AUD_CONFIG_A, \
+ _IBX_AUD_CONFIG_B)
+#define _CPT_AUD_CONFIG_A 0xe5000
+#define _CPT_AUD_CONFIG_B 0xe5100
#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
- CPT_AUD_CONFIG_A, \
- CPT_AUD_CONFIG_B)
-#define VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
-#define VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
+ _CPT_AUD_CONFIG_A, \
+ _CPT_AUD_CONFIG_B)
+#define _VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
+#define _VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
#define VLV_AUD_CFG(pipe) _PIPE(pipe, \
- VLV_AUD_CONFIG_A, \
- VLV_AUD_CONFIG_B)
+ _VLV_AUD_CONFIG_A, \
+ _VLV_AUD_CONFIG_B)
#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
#define AUD_CONFIG_UPPER_N_SHIFT 20
-#define AUD_CONFIG_UPPER_N_VALUE (0xff << 20)
+#define AUD_CONFIG_UPPER_N_MASK (0xff << 20)
#define AUD_CONFIG_LOWER_N_SHIFT 4
-#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4)
+#define AUD_CONFIG_LOWER_N_MASK (0xfff << 4)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
@@ -5862,52 +6216,44 @@ enum punit_power_well {
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
/* HSW Audio */
-#define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */
-#define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */
-#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
- HSW_AUD_CONFIG_A, \
- HSW_AUD_CONFIG_B)
-
-#define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */
-#define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */
-#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
- HSW_AUD_MISC_CTRL_A, \
- HSW_AUD_MISC_CTRL_B)
-
-#define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */
-#define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */
-#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
- HSW_AUD_DIP_ELD_CTRL_ST_A, \
- HSW_AUD_DIP_ELD_CTRL_ST_B)
+#define _HSW_AUD_CONFIG_A 0x65000
+#define _HSW_AUD_CONFIG_B 0x65100
+#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
+ _HSW_AUD_CONFIG_A, \
+ _HSW_AUD_CONFIG_B)
+
+#define _HSW_AUD_MISC_CTRL_A 0x65010
+#define _HSW_AUD_MISC_CTRL_B 0x65110
+#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
+ _HSW_AUD_MISC_CTRL_A, \
+ _HSW_AUD_MISC_CTRL_B)
+
+#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
+#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
+#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
+ _HSW_AUD_DIP_ELD_CTRL_ST_A, \
+ _HSW_AUD_DIP_ELD_CTRL_ST_B)
/* Audio Digital Converter */
-#define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */
-#define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */
-#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
- HSW_AUD_DIG_CNVT_1, \
- HSW_AUD_DIG_CNVT_2)
-#define DIP_PORT_SEL_MASK 0x3
-
-#define HSW_AUD_EDID_DATA_A 0x65050
-#define HSW_AUD_EDID_DATA_B 0x65150
-#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
- HSW_AUD_EDID_DATA_A, \
- HSW_AUD_EDID_DATA_B)
-
-#define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */
-#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */
-#define AUDIO_INACTIVE_C (1<<11)
-#define AUDIO_INACTIVE_B (1<<7)
-#define AUDIO_INACTIVE_A (1<<3)
-#define AUDIO_OUTPUT_ENABLE_A (1<<2)
-#define AUDIO_OUTPUT_ENABLE_B (1<<6)
-#define AUDIO_OUTPUT_ENABLE_C (1<<10)
-#define AUDIO_ELD_VALID_A (1<<0)
-#define AUDIO_ELD_VALID_B (1<<4)
-#define AUDIO_ELD_VALID_C (1<<8)
-#define AUDIO_CP_READY_A (1<<1)
-#define AUDIO_CP_READY_B (1<<5)
-#define AUDIO_CP_READY_C (1<<9)
+#define _HSW_AUD_DIG_CNVT_1 0x65080
+#define _HSW_AUD_DIG_CNVT_2 0x65180
+#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
+ _HSW_AUD_DIG_CNVT_1, \
+ _HSW_AUD_DIG_CNVT_2)
+#define DIP_PORT_SEL_MASK 0x3
+
+#define _HSW_AUD_EDID_DATA_A 0x65050
+#define _HSW_AUD_EDID_DATA_B 0x65150
+#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
+ _HSW_AUD_EDID_DATA_A, \
+ _HSW_AUD_EDID_DATA_B)
+
+#define HSW_AUD_PIPE_CONV_CFG 0x6507c
+#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0
+#define AUDIO_INACTIVE(trans) ((1 << 3) << ((trans) * 4))
+#define AUDIO_OUTPUT_ENABLE(trans) ((1 << 2) << ((trans) * 4))
+#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4))
+#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4))
/* HSW Power Wells */
#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */
@@ -6125,6 +6471,83 @@ enum punit_power_well {
#define LCPLL_CD_SOURCE_FCLK (1<<21)
#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
+/*
+ * SKL Clocks
+ */
+
+/* CDCLK_CTL */
+#define CDCLK_CTL 0x46000
+#define CDCLK_FREQ_SEL_MASK (3<<26)
+#define CDCLK_FREQ_450_432 (0<<26)
+#define CDCLK_FREQ_540 (1<<26)
+#define CDCLK_FREQ_337_308 (2<<26)
+#define CDCLK_FREQ_675_617 (3<<26)
+#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
+
+/* LCPLL_CTL */
+#define LCPLL1_CTL 0x46010
+#define LCPLL2_CTL 0x46014
+#define LCPLL_PLL_ENABLE (1<<31)
+
+/* DPLL control1 */
+#define DPLL_CTRL1 0x6C058
+#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5))
+#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4))
+#define DPLL_CRTL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
+#define DPLL_CRTL1_LINK_RATE_SHIFT(id) ((id)*6+1)
+#define DPLL_CRTL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1))
+#define DPLL_CTRL1_OVERRIDE(id) (1<<((id)*6))
+#define DPLL_CRTL1_LINK_RATE_2700 0
+#define DPLL_CRTL1_LINK_RATE_1350 1
+#define DPLL_CRTL1_LINK_RATE_810 2
+#define DPLL_CRTL1_LINK_RATE_1620 3
+#define DPLL_CRTL1_LINK_RATE_1080 4
+#define DPLL_CRTL1_LINK_RATE_2160 5
+
+/* DPLL control2 */
+#define DPLL_CTRL2 0x6C05C
+#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<(port+15))
+#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1))
+#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1)
+#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) (clk<<((port)*3+1))
+#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3))
+
+/* DPLL Status */
+#define DPLL_STATUS 0x6C060
+#define DPLL_LOCK(id) (1<<((id)*8))
+
+/* DPLL cfg */
+#define DPLL1_CFGCR1 0x6C040
+#define DPLL2_CFGCR1 0x6C048
+#define DPLL3_CFGCR1 0x6C050
+#define DPLL_CFGCR1_FREQ_ENABLE (1<<31)
+#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9)
+#define DPLL_CFGCR1_DCO_FRACTION(x) (x<<9)
+#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
+
+#define DPLL1_CFGCR2 0x6C044
+#define DPLL2_CFGCR2 0x6C04C
+#define DPLL3_CFGCR2 0x6C054
+#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8)
+#define DPLL_CFGCR2_QDIV_RATIO(x) (x<<8)
+#define DPLL_CFGCR2_QDIV_MODE(x) (x<<7)
+#define DPLL_CFGCR2_KDIV_MASK (3<<5)
+#define DPLL_CFGCR2_KDIV(x) (x<<5)
+#define DPLL_CFGCR2_KDIV_5 (0<<5)
+#define DPLL_CFGCR2_KDIV_2 (1<<5)
+#define DPLL_CFGCR2_KDIV_3 (2<<5)
+#define DPLL_CFGCR2_KDIV_1 (3<<5)
+#define DPLL_CFGCR2_PDIV_MASK (7<<2)
+#define DPLL_CFGCR2_PDIV(x) (x<<2)
+#define DPLL_CFGCR2_PDIV_1 (0<<2)
+#define DPLL_CFGCR2_PDIV_2 (1<<2)
+#define DPLL_CFGCR2_PDIV_3 (2<<2)
+#define DPLL_CFGCR2_PDIV_7 (4<<2)
+#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
+
+#define GET_CFG_CR1_REG(id) (DPLL1_CFGCR1 + (id - SKL_DPLL1) * 8)
+#define GET_CFG_CR2_REG(id) (DPLL1_CFGCR2 + (id - SKL_DPLL1) * 8)
+
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 043123c..2636882 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -203,34 +203,19 @@ static void i915_save_display(struct drm_device *dev)
i915_save_display_reg(dev);
/* LVDS state */
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
- } else if (IS_VALLEYVIEW(dev)) {
- dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
- dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
-
- dev_priv->regfile.saveBLC_HIST_CTL =
- I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
- dev_priv->regfile.saveBLC_HIST_CTL_B =
- I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
- } else {
- dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
- dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
- dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
- if (IS_MOBILE(dev) && !IS_I830(dev))
- dev_priv->regfile.saveLVDS = I915_READ(LVDS);
- }
-
- if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
- dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
+ else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+ dev_priv->regfile.saveLVDS = I915_READ(LVDS);
+ /* Panel power sequencer */
if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
- } else {
+ } else if (!IS_VALLEYVIEW(dev)) {
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
@@ -259,29 +244,19 @@ static void i915_restore_display(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
mask = ~LVDS_PORT_EN;
+ /* LVDS state */
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);
- if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
- I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
-
+ /* Panel power sequencer */
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
- I915_WRITE(RSTDBYCTL,
- dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
- } else if (IS_VALLEYVIEW(dev)) {
- I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
- dev_priv->regfile.saveBLC_HIST_CTL);
- I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
- dev_priv->regfile.saveBLC_HIST_CTL);
- } else {
- I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
- I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
+ } else if (!IS_VALLEYVIEW(dev)) {
I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
@@ -328,6 +303,10 @@ int i915_save_state(struct drm_device *dev)
}
}
+ if (IS_GEN4(dev))
+ pci_read_config_word(dev->pdev, GCDGMBUS,
+ &dev_priv->regfile.saveGCDGMBUS);
+
/* Cache mode state */
if (INTEL_INFO(dev)->gen < 7)
dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -356,6 +335,10 @@ int i915_restore_state(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
i915_gem_restore_fences(dev);
+
+ if (IS_GEN4(dev))
+ pci_write_config_word(dev->pdev, GCDGMBUS,
+ dev_priv->regfile.saveGCDGMBUS);
i915_restore_display(dev);
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -368,6 +351,8 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
+ I915_WRITE(RSTDBYCTL,
+ dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
} else {
I915_WRITE(IER, dev_priv->regfile.saveIER);
I915_WRITE(IMR, dev_priv->regfile.saveIMR);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 503847f..4a5af69 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -139,8 +139,6 @@ static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
static struct attribute *rc6_attrs[] = {
&dev_attr_rc6_enable.attr,
&dev_attr_rc6_residency_ms.attr,
- &dev_attr_rc6p_residency_ms.attr,
- &dev_attr_rc6pp_residency_ms.attr,
NULL
};
@@ -148,6 +146,17 @@ static struct attribute_group rc6_attr_group = {
.name = power_group_name,
.attrs = rc6_attrs
};
+
+static struct attribute *rc6p_attrs[] = {
+ &dev_attr_rc6p_residency_ms.attr,
+ &dev_attr_rc6pp_residency_ms.attr,
+ NULL
+};
+
+static struct attribute_group rc6p_attr_group = {
+ .name = power_group_name,
+ .attrs = rc6p_attrs
+};
#endif
static int l3_access_valid(struct drm_device *dev, loff_t offset)
@@ -595,12 +604,18 @@ void i915_setup_sysfs(struct drm_device *dev)
int ret;
#ifdef CONFIG_PM
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (HAS_RC6(dev)) {
ret = sysfs_merge_group(&dev->primary->kdev->kobj,
&rc6_attr_group);
if (ret)
DRM_ERROR("RC6 residency sysfs setup failed\n");
}
+ if (HAS_RC6p(dev)) {
+ ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+ &rc6p_attr_group);
+ if (ret)
+ DRM_ERROR("RC6p residency sysfs setup failed\n");
+ }
#endif
if (HAS_L3_DPF(dev)) {
ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
@@ -640,5 +655,6 @@ void i915_teardown_sysfs(struct drm_device *dev)
device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
#ifdef CONFIG_PM
sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
+ sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
#endif
}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index f5aa006..751d4ad 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -587,6 +587,110 @@ TRACE_EVENT(intel_gpu_freq_change,
TP_printk("new_freq=%u", __entry->freq)
);
+/**
+ * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
+ *
+ * With full ppgtt enabled each process using drm will allocate at least one
+ * translation table. With these traces it is possible to keep track of the
+ * allocation and of the lifetime of the tables; this can be used during
+ * testing/debug to verify that we are not leaking ppgtts.
+ * These traces identify the ppgtt through the vm pointer, which is also printed
+ * by the i915_vma_bind and i915_vma_unbind tracepoints.
+ */
+DECLARE_EVENT_CLASS(i915_ppgtt,
+ TP_PROTO(struct i915_address_space *vm),
+ TP_ARGS(vm),
+
+ TP_STRUCT__entry(
+ __field(struct i915_address_space *, vm)
+ __field(u32, dev)
+ ),
+
+ TP_fast_assign(
+ __entry->vm = vm;
+ __entry->dev = vm->dev->primary->index;
+ ),
+
+ TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
+)
+
+DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create,
+ TP_PROTO(struct i915_address_space *vm),
+ TP_ARGS(vm)
+);
+
+DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
+ TP_PROTO(struct i915_address_space *vm),
+ TP_ARGS(vm)
+);
+
+/**
+ * DOC: i915_context_create and i915_context_free tracepoints
+ *
+ * These tracepoints are used to track creation and deletion of contexts.
+ * If full ppgtt is enabled, they also print the address of the vm assigned to
+ * the context.
+ */
+DECLARE_EVENT_CLASS(i915_context,
+ TP_PROTO(struct intel_context *ctx),
+ TP_ARGS(ctx),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(struct intel_context *, ctx)
+ __field(struct i915_address_space *, vm)
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
+ __entry->dev = ctx->file_priv->dev_priv->dev->primary->index;
+ ),
+
+ TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
+ __entry->dev, __entry->ctx, __entry->vm)
+)
+
+DEFINE_EVENT(i915_context, i915_context_create,
+ TP_PROTO(struct intel_context *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(i915_context, i915_context_free,
+ TP_PROTO(struct intel_context *ctx),
+ TP_ARGS(ctx)
+);
+
+/**
+ * DOC: switch_mm tracepoint
+ *
+ * This tracepoint allows tracking of the mm switch, which is an important point
+ * in the lifetime of the vm in the legacy submission path. This tracepoint is
+ * called only if full ppgtt is enabled.
+ */
+TRACE_EVENT(switch_mm,
+ TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to),
+
+ TP_ARGS(ring, to),
+
+ TP_STRUCT__entry(
+ __field(u32, ring)
+ __field(struct intel_context *, to)
+ __field(struct i915_address_space *, vm)
+ __field(u32, dev)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = ring->id;
+ __entry->to = to;
+ __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
+ __entry->dev = ring->dev->primary->index;
+ ),
+
+ TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
+ __entry->dev, __entry->ring, __entry->to, __entry->vm)
+);
+
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
index 480da59..d10fe3e 100644
--- a/drivers/gpu/drm/i915/i915_ums.c
+++ b/drivers/gpu/drm/i915/i915_ums.c
@@ -270,6 +270,12 @@ void i915_save_display_reg(struct drm_device *dev)
}
/* FIXME: regfile.save TV & SDVO state */
+ /* Panel fitter */
+ if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+ dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+ }
+
/* Backlight */
if (INTEL_INFO(dev)->gen <= 4)
pci_read_config_byte(dev->pdev, PCI_LBPC,
@@ -284,6 +290,7 @@ void i915_save_display_reg(struct drm_device *dev)
dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
if (INTEL_INFO(dev)->gen >= 4)
dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
}
return;
@@ -313,6 +320,13 @@ void i915_restore_display_reg(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 4)
I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
+ I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
+ }
+
+ /* Panel fitter */
+ if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
+ I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
}
/* Display port ratios (must be done before clock is set) */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
new file mode 100644
index 0000000..2c7ed5c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+/**
+ * DOC: High Definition Audio over HDMI and Display Port
+ *
+ * The graphics and audio drivers together support High Definition Audio over
+ * HDMI and Display Port. The audio programming sequences are divided into audio
+ * codec and controller enable and disable sequences. The graphics driver
+ * handles the audio codec sequences, while the audio driver handles the audio
+ * controller sequences.
+ *
+ * The disable sequences must be performed before disabling the transcoder or
+ * port. The enable sequences may only be performed after enabling the
+ * transcoder and port, and after completed link training.
+ *
+ * The codec and controller sequences could be done either parallel or serial,
+ * but generally the ELDV/PD change in the codec sequence indicates to the audio
+ * driver that the controller sequence should start. Indeed, most of the
+ * co-operation between the graphics and audio drivers is handled via audio
+ * related registers. (The notable exception is the power management, not
+ * covered here.)
+ */
+
+static const struct {
+ int clock;
+ u32 config;
+} hdmi_audio_clock[] = {
+ { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
+ { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
+ { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
+ { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
+ { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
+ { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
+ { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
+ { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
+ { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
+ { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
+};
+
+/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
+static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
+ if (mode->clock == hdmi_audio_clock[i].clock)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(hdmi_audio_clock)) {
+ DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
+ i = 1;
+ }
+
+ DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
+ hdmi_audio_clock[i].clock,
+ hdmi_audio_clock[i].config);
+
+ return hdmi_audio_clock[i].config;
+}
+
+static bool intel_eld_uptodate(struct drm_connector *connector,
+ int reg_eldv, uint32_t bits_eldv,
+ int reg_elda, uint32_t bits_elda,
+ int reg_edid)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t tmp;
+ int i;
+
+ tmp = I915_READ(reg_eldv);
+ tmp &= bits_eldv;
+
+ if (!tmp)
+ return false;
+
+ tmp = I915_READ(reg_elda);
+ tmp &= ~bits_elda;
+ I915_WRITE(reg_elda, tmp);
+
+ for (i = 0; i < drm_eld_size(eld) / 4; i++)
+ if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+ return false;
+
+ return true;
+}
+
+static void g4x_audio_codec_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ uint32_t eldv, tmp;
+
+ DRM_DEBUG_KMS("Disable audio codec\n");
+
+ tmp = I915_READ(G4X_AUD_VID_DID);
+ if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
+ eldv = G4X_ELDV_DEVCL_DEVBLC;
+ else
+ eldv = G4X_ELDV_DEVCTG;
+
+ /* Invalidate ELD */
+ tmp = I915_READ(G4X_AUD_CNTL_ST);
+ tmp &= ~eldv;
+ I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+}
+
+static void g4x_audio_codec_enable(struct drm_connector *connector,
+ struct intel_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t eldv;
+ uint32_t tmp;
+ int len, i;
+
+ DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", eld[2]);
+
+ tmp = I915_READ(G4X_AUD_VID_DID);
+ if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
+ eldv = G4X_ELDV_DEVCL_DEVBLC;
+ else
+ eldv = G4X_ELDV_DEVCTG;
+
+ if (intel_eld_uptodate(connector,
+ G4X_AUD_CNTL_ST, eldv,
+ G4X_AUD_CNTL_ST, G4X_ELD_ADDR_MASK,
+ G4X_HDMIW_HDMIEDID))
+ return;
+
+ tmp = I915_READ(G4X_AUD_CNTL_ST);
+ tmp &= ~(eldv | G4X_ELD_ADDR_MASK);
+ len = (tmp >> 9) & 0x1f; /* ELD buffer size */
+ I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+
+ len = min(drm_eld_size(eld) / 4, len);
+ DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ for (i = 0; i < len; i++)
+ I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
+
+ tmp = I915_READ(G4X_AUD_CNTL_ST);
+ tmp |= eldv;
+ I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+}
+
+static void hsw_audio_codec_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ uint32_t tmp;
+
+ DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
+
+ /* Disable timestamps */
+ tmp = I915_READ(HSW_AUD_CFG(pipe));
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ tmp &= ~AUD_CONFIG_UPPER_N_MASK;
+ tmp &= ~AUD_CONFIG_LOWER_N_MASK;
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ tmp |= AUD_CONFIG_N_VALUE_INDEX;
+ I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+
+ /* Invalidate ELD */
+ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp &= ~AUDIO_ELD_VALID(pipe);
+ tmp &= ~AUDIO_OUTPUT_ENABLE(pipe);
+ I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+}
+
+static void hsw_audio_codec_enable(struct drm_connector *connector,
+ struct intel_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ const uint8_t *eld = connector->eld;
+ uint32_t tmp;
+ int len, i;
+
+ DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
+ pipe_name(pipe), drm_eld_size(eld));
+
+ /* Enable audio presence detect, invalidate ELD */
+ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp |= AUDIO_OUTPUT_ENABLE(pipe);
+ tmp &= ~AUDIO_ELD_VALID(pipe);
+ I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+
+ /*
+ * FIXME: We're supposed to wait for vblank here, but we have vblanks
+ * disabled during the mode set. The proper fix would be to push the
+ * rest of the setup into a vblank work item, queued here, but the
+ * infrastructure is not there yet.
+ */
+
+ /* Reset ELD write address */
+ tmp = I915_READ(HSW_AUD_DIP_ELD_CTRL(pipe));
+ tmp &= ~IBX_ELD_ADDRESS_MASK;
+ I915_WRITE(HSW_AUD_DIP_ELD_CTRL(pipe), tmp);
+
+ /* Up to 84 bytes of hw ELD buffer */
+ len = min(drm_eld_size(eld), 84);
+ for (i = 0; i < len / 4; i++)
+ I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((uint32_t *)eld + i));
+
+ /* ELD valid */
+ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp |= AUDIO_ELD_VALID(pipe);
+ I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+
+ /* Enable timestamps */
+ tmp = I915_READ(HSW_AUD_CFG(pipe));
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+ tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ tmp |= AUD_CONFIG_N_VALUE_INDEX;
+ else
+ tmp |= audio_config_hdmi_pixel_clock(mode);
+ I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+}
+
+static void ilk_audio_codec_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(&encoder->base);
+ enum port port = intel_dig_port->port;
+ enum pipe pipe = intel_crtc->pipe;
+ uint32_t tmp, eldv;
+ int aud_config;
+ int aud_cntrl_st2;
+
+ DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
+ port_name(port), pipe_name(pipe));
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ aud_config = IBX_AUD_CFG(pipe);
+ aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ aud_config = VLV_AUD_CFG(pipe);
+ aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
+ } else {
+ aud_config = CPT_AUD_CFG(pipe);
+ aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+ }
+
+ /* Disable timestamps */
+ tmp = I915_READ(aud_config);
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ tmp &= ~AUD_CONFIG_UPPER_N_MASK;
+ tmp &= ~AUD_CONFIG_LOWER_N_MASK;
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ tmp |= AUD_CONFIG_N_VALUE_INDEX;
+ I915_WRITE(aud_config, tmp);
+
+ if (WARN_ON(!port)) {
+ eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
+ IBX_ELD_VALID(PORT_D);
+ } else {
+ eldv = IBX_ELD_VALID(port);
+ }
+
+ /* Invalidate ELD */
+ tmp = I915_READ(aud_cntrl_st2);
+ tmp &= ~eldv;
+ I915_WRITE(aud_cntrl_st2, tmp);
+}
+
+static void ilk_audio_codec_enable(struct drm_connector *connector,
+ struct intel_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(&encoder->base);
+ enum port port = intel_dig_port->port;
+ enum pipe pipe = intel_crtc->pipe;
+ uint8_t *eld = connector->eld;
+ uint32_t eldv;
+ uint32_t tmp;
+ int len, i;
+ int hdmiw_hdmiedid;
+ int aud_config;
+ int aud_cntl_st;
+ int aud_cntrl_st2;
+
+ DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
+ port_name(port), pipe_name(pipe), drm_eld_size(eld));
+
+ /*
+ * FIXME: We're supposed to wait for vblank here, but we have vblanks
+ * disabled during the mode set. The proper fix would be to push the
+ * rest of the setup into a vblank work item, queued here, but the
+ * infrastructure is not there yet.
+ */
+
+ if (HAS_PCH_IBX(connector->dev)) {
+ hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+ aud_config = IBX_AUD_CFG(pipe);
+ aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
+ aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+ } else if (IS_VALLEYVIEW(connector->dev)) {
+ hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
+ aud_config = VLV_AUD_CFG(pipe);
+ aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
+ aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
+ } else {
+ hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
+ aud_config = CPT_AUD_CFG(pipe);
+ aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
+ aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+ }
+
+ if (WARN_ON(!port)) {
+ eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
+ IBX_ELD_VALID(PORT_D);
+ } else {
+ eldv = IBX_ELD_VALID(port);
+ }
+
+ /* Invalidate ELD */
+ tmp = I915_READ(aud_cntrl_st2);
+ tmp &= ~eldv;
+ I915_WRITE(aud_cntrl_st2, tmp);
+
+ /* Reset ELD write address */
+ tmp = I915_READ(aud_cntl_st);
+ tmp &= ~IBX_ELD_ADDRESS_MASK;
+ I915_WRITE(aud_cntl_st, tmp);
+
+ /* Up to 84 bytes of hw ELD buffer */
+ len = min(drm_eld_size(eld), 84);
+ for (i = 0; i < len / 4; i++)
+ I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+
+ /* ELD valid */
+ tmp = I915_READ(aud_cntrl_st2);
+ tmp |= eldv;
+ I915_WRITE(aud_cntrl_st2, tmp);
+
+ /* Enable timestamps */
+ tmp = I915_READ(aud_config);
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+ tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ tmp |= AUD_CONFIG_N_VALUE_INDEX;
+ else
+ tmp |= audio_config_hdmi_pixel_clock(mode);
+ I915_WRITE(aud_config, tmp);
+}
+
+/**
+ * intel_audio_codec_enable - Enable the audio codec for HD audio
+ * @intel_encoder: encoder on which to enable audio
+ *
+ * The enable sequences may only be performed after enabling the transcoder and
+ * port, and after completed link training.
+ */
+void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+ struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ connector = drm_select_eld(encoder, mode);
+ if (!connector)
+ return;
+
+ DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id,
+ connector->name,
+ connector->encoder->base.id,
+ connector->encoder->name);
+
+ /* ELD Conn_Type */
+ connector->eld[5] &= ~(3 << 2);
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ connector->eld[5] |= (1 << 2);
+
+ connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
+
+ if (dev_priv->display.audio_codec_enable)
+ dev_priv->display.audio_codec_enable(connector, intel_encoder, mode);
+}
+
+/**
+ * intel_audio_codec_disable - Disable the audio codec for HD audio
+ * @encoder: encoder on which to disable audio
+ *
+ * The disable sequences must be performed before disabling the transcoder or
+ * port.
+ */
+void intel_audio_codec_disable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.audio_codec_disable)
+ dev_priv->display.audio_codec_disable(encoder);
+}
+
+/**
+ * intel_init_audio - Set up chip specific audio functions
+ * @dev: drm device
+ */
+void intel_init_audio(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_G4X(dev)) {
+ dev_priv->display.audio_codec_enable = g4x_audio_codec_enable;
+ dev_priv->display.audio_codec_disable = g4x_audio_codec_disable;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
+ dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
+ } else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) {
+ dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
+ dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
+ } else if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
+ dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 905999b..7603765 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -46,7 +46,7 @@ struct bdb_header {
u16 version; /**< decimal */
u16 header_size; /**< in bytes */
u16 bdb_size; /**< in bytes */
-};
+} __packed;
/* strictly speaking, this is a "skip" block, but it has interesting info */
struct vbios_data {
@@ -252,7 +252,7 @@ union child_device_config {
/* This one should also be safe to use anywhere, even without version
* checks. */
struct common_child_dev_config common;
-};
+} __packed;
struct bdb_general_definitions {
/* DDC GPIO */
@@ -888,12 +888,12 @@ struct mipi_pps_data {
u16 bl_disable_delay;
u16 panel_off_delay;
u16 panel_power_cycle_delay;
-};
+} __packed;
struct bdb_mipi_config {
struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
-};
+} __packed;
/* Block 53 contains MIPI sequences as needed by the panel
* for enabling it. This block can be variable in size and
@@ -902,7 +902,7 @@ struct bdb_mipi_config {
struct bdb_mipi_sequence {
u8 version;
u8 data[0];
-};
+} __packed;
/* MIPI Sequnece Block definitions */
enum mipi_seq {
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9212e65..a9af9a4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -72,7 +72,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
u32 tmp;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(crt->adpa_reg);
@@ -775,7 +775,7 @@ static void intel_crt_reset(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
POSTING_READ(crt->adpa_reg);
- DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
+ DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b63d4fa..e6b45cd 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -95,8 +95,8 @@ static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
{ 0x00BEFFFF, 0x00140006 },
{ 0x80B2CFFF, 0x001B0002 },
{ 0x00FFFFFF, 0x000E000A },
- { 0x00D75FFF, 0x00180004 },
- { 0x80CB2FFF, 0x001B0002 },
+ { 0x00DB6FFF, 0x00160005 },
+ { 0x80C71FFF, 0x001A0002 },
{ 0x00F7DFFF, 0x00180004 },
{ 0x80D75FFF, 0x001B0002 },
};
@@ -127,6 +127,32 @@ static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
{ 0x80FFFFFF, 0x001B0002 }, /* 9: 1000 1000 0 */
};
+static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
+ { 0x00000018, 0x000000a0 },
+ { 0x00004014, 0x00000098 },
+ { 0x00006012, 0x00000088 },
+ { 0x00008010, 0x00000080 },
+ { 0x00000018, 0x00000098 },
+ { 0x00004014, 0x00000088 },
+ { 0x00006012, 0x00000080 },
+ { 0x00000018, 0x00000088 },
+ { 0x00004014, 0x00000080 },
+};
+
+static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
+ /* Idx NT mV T mV db */
+ { 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */
+ { 0x00004014, 0x00000098 }, /* 1: 400 600 3.5 */
+ { 0x00006012, 0x00000088 }, /* 2: 400 800 6 */
+ { 0x00000018, 0x0000003c }, /* 3: 450 450 0 */
+ { 0x00000018, 0x00000098 }, /* 4: 600 600 0 */
+ { 0x00003015, 0x00000088 }, /* 5: 600 800 2.5 */
+ { 0x00005013, 0x00000080 }, /* 6: 600 1000 4.5 */
+ { 0x00000018, 0x00000088 }, /* 7: 800 800 0 */
+ { 0x00000096, 0x00000080 }, /* 8: 800 1000 2 */
+ { 0x00000018, 0x00000080 }, /* 9: 1200 1200 0 */
+};
+
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
@@ -169,7 +195,14 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
const struct ddi_buf_trans *ddi_translations_hdmi;
const struct ddi_buf_trans *ddi_translations;
- if (IS_BROADWELL(dev)) {
+ if (IS_SKYLAKE(dev)) {
+ ddi_translations_fdi = NULL;
+ ddi_translations_dp = skl_ddi_translations_dp;
+ ddi_translations_edp = skl_ddi_translations_dp;
+ ddi_translations_hdmi = skl_ddi_translations_hdmi;
+ n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
+ hdmi_800mV_0dB = 7;
+ } else if (IS_BROADWELL(dev)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_edp = bdw_ddi_translations_edp;
@@ -208,7 +241,10 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
ddi_translations = ddi_translations_dp;
break;
case PORT_E:
- ddi_translations = ddi_translations_fdi;
+ if (ddi_translations_fdi)
+ ddi_translations = ddi_translations_fdi;
+ else
+ ddi_translations = ddi_translations_dp;
break;
default:
BUG();
@@ -423,6 +459,27 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
return ret;
}
+static struct intel_encoder *
+intel_ddi_get_crtc_new_encoder(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_encoder *intel_encoder, *ret = NULL;
+ int num_encoders = 0;
+
+ for_each_intel_encoder(dev, intel_encoder) {
+ if (intel_encoder->new_crtc == crtc) {
+ ret = intel_encoder;
+ num_encoders++;
+ }
+ }
+
+ WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders,
+ pipe_name(crtc->pipe));
+
+ BUG_ON(ret == NULL);
+ return ret;
+}
+
#define LC_FREQ 2700
#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
@@ -613,6 +670,111 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
return (refclk * n * 100) / (p * r);
}
+static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ uint32_t dpll)
+{
+ uint32_t cfgcr1_reg, cfgcr2_reg;
+ uint32_t cfgcr1_val, cfgcr2_val;
+ uint32_t p0, p1, p2, dco_freq;
+
+ cfgcr1_reg = GET_CFG_CR1_REG(dpll);
+ cfgcr2_reg = GET_CFG_CR2_REG(dpll);
+
+ cfgcr1_val = I915_READ(cfgcr1_reg);
+ cfgcr2_val = I915_READ(cfgcr2_reg);
+
+ p0 = cfgcr2_val & DPLL_CFGCR2_PDIV_MASK;
+ p2 = cfgcr2_val & DPLL_CFGCR2_KDIV_MASK;
+
+ if (cfgcr2_val & DPLL_CFGCR2_QDIV_MODE(1))
+ p1 = (cfgcr2_val & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
+ else
+ p1 = 1;
+
+
+ switch (p0) {
+ case DPLL_CFGCR2_PDIV_1:
+ p0 = 1;
+ break;
+ case DPLL_CFGCR2_PDIV_2:
+ p0 = 2;
+ break;
+ case DPLL_CFGCR2_PDIV_3:
+ p0 = 3;
+ break;
+ case DPLL_CFGCR2_PDIV_7:
+ p0 = 7;
+ break;
+ }
+
+ switch (p2) {
+ case DPLL_CFGCR2_KDIV_5:
+ p2 = 5;
+ break;
+ case DPLL_CFGCR2_KDIV_2:
+ p2 = 2;
+ break;
+ case DPLL_CFGCR2_KDIV_3:
+ p2 = 3;
+ break;
+ case DPLL_CFGCR2_KDIV_1:
+ p2 = 1;
+ break;
+ }
+
+ dco_freq = (cfgcr1_val & DPLL_CFGCR1_DCO_INTEGER_MASK) * 24 * 1000;
+
+ dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 *
+ 1000) / 0x8000;
+
+ return dco_freq / (p0 * p1 * p2 * 5);
+}
+
+
+static void skl_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ int link_clock = 0;
+ uint32_t dpll_ctl1, dpll;
+
+ dpll = pipe_config->ddi_pll_sel;
+
+ dpll_ctl1 = I915_READ(DPLL_CTRL1);
+
+ if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(dpll)) {
+ link_clock = skl_calc_wrpll_link(dev_priv, dpll);
+ } else {
+ link_clock = dpll_ctl1 & DPLL_CRTL1_LINK_RATE_MASK(dpll);
+ link_clock >>= DPLL_CRTL1_LINK_RATE_SHIFT(dpll);
+
+ switch (link_clock) {
+ case DPLL_CRTL1_LINK_RATE_810:
+ link_clock = 81000;
+ break;
+ case DPLL_CRTL1_LINK_RATE_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_CRTL1_LINK_RATE_2700:
+ link_clock = 270000;
+ break;
+ default:
+ WARN(1, "Unsupported link rate\n");
+ break;
+ }
+ link_clock *= 2;
+ }
+
+ pipe_config->port_clock = link_clock;
+
+ if (pipe_config->has_dp_encoder)
+ pipe_config->adjusted_mode.crtc_clock =
+ intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+ else
+ pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+}
+
static void hsw_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
@@ -756,7 +918,7 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
- intel_crtc->config.dpll_hw_state.wrpll = val;
+ intel_crtc->new_config->dpll_hw_state.wrpll = val;
pll = intel_get_shared_dpll(intel_crtc);
if (pll == NULL) {
@@ -765,12 +927,234 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
return false;
}
- intel_crtc->config.ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
+ intel_crtc->new_config->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
}
return true;
}
+struct skl_wrpll_params {
+ uint32_t dco_fraction;
+ uint32_t dco_integer;
+ uint32_t qdiv_ratio;
+ uint32_t qdiv_mode;
+ uint32_t kdiv;
+ uint32_t pdiv;
+ uint32_t central_freq;
+};
+
+static void
+skl_ddi_calculate_wrpll(int clock /* in Hz */,
+ struct skl_wrpll_params *wrpll_params)
+{
+ uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
+ uint64_t dco_central_freq[3] = {8400000000ULL,
+ 9000000000ULL,
+ 9600000000ULL};
+ uint32_t min_dco_deviation = 400;
+ uint32_t min_dco_index = 3;
+ uint32_t P0[4] = {1, 2, 3, 7};
+ uint32_t P2[4] = {1, 2, 3, 5};
+ bool found = false;
+ uint32_t candidate_p = 0;
+ uint32_t candidate_p0[3] = {0}, candidate_p1[3] = {0};
+ uint32_t candidate_p2[3] = {0};
+ uint32_t dco_central_freq_deviation[3];
+ uint32_t i, P1, k, dco_count;
+ bool retry_with_odd = false;
+ uint64_t dco_freq;
+
+ /* Determine P0, P1 or P2 */
+ for (dco_count = 0; dco_count < 3; dco_count++) {
+ found = false;
+ candidate_p =
+ div64_u64(dco_central_freq[dco_count], afe_clock);
+ if (retry_with_odd == false)
+ candidate_p = (candidate_p % 2 == 0 ?
+ candidate_p : candidate_p + 1);
+
+ for (P1 = 1; P1 < candidate_p; P1++) {
+ for (i = 0; i < 4; i++) {
+ if (!(P0[i] != 1 || P1 == 1))
+ continue;
+
+ for (k = 0; k < 4; k++) {
+ if (P1 != 1 && P2[k] != 2)
+ continue;
+
+ if (candidate_p == P0[i] * P1 * P2[k]) {
+ /* Found possible P0, P1, P2 */
+ found = true;
+ candidate_p0[dco_count] = P0[i];
+ candidate_p1[dco_count] = P1;
+ candidate_p2[dco_count] = P2[k];
+ goto found;
+ }
+
+ }
+ }
+ }
+
+found:
+ if (found) {
+ dco_central_freq_deviation[dco_count] =
+ div64_u64(10000 *
+ abs_diff((candidate_p * afe_clock),
+ dco_central_freq[dco_count]),
+ dco_central_freq[dco_count]);
+
+ if (dco_central_freq_deviation[dco_count] <
+ min_dco_deviation) {
+ min_dco_deviation =
+ dco_central_freq_deviation[dco_count];
+ min_dco_index = dco_count;
+ }
+ }
+
+ if (min_dco_index > 2 && dco_count == 2) {
+ retry_with_odd = true;
+ dco_count = 0;
+ }
+ }
+
+ if (min_dco_index > 2) {
+ WARN(1, "No valid values found for the given pixel clock\n");
+ } else {
+ wrpll_params->central_freq = dco_central_freq[min_dco_index];
+
+ switch (dco_central_freq[min_dco_index]) {
+ case 9600000000ULL:
+ wrpll_params->central_freq = 0;
+ break;
+ case 9000000000ULL:
+ wrpll_params->central_freq = 1;
+ break;
+ case 8400000000ULL:
+ wrpll_params->central_freq = 3;
+ }
+
+ switch (candidate_p0[min_dco_index]) {
+ case 1:
+ wrpll_params->pdiv = 0;
+ break;
+ case 2:
+ wrpll_params->pdiv = 1;
+ break;
+ case 3:
+ wrpll_params->pdiv = 2;
+ break;
+ case 7:
+ wrpll_params->pdiv = 4;
+ break;
+ default:
+ WARN(1, "Incorrect PDiv\n");
+ }
+
+ switch (candidate_p2[min_dco_index]) {
+ case 5:
+ wrpll_params->kdiv = 0;
+ break;
+ case 2:
+ wrpll_params->kdiv = 1;
+ break;
+ case 3:
+ wrpll_params->kdiv = 2;
+ break;
+ case 1:
+ wrpll_params->kdiv = 3;
+ break;
+ default:
+ WARN(1, "Incorrect KDiv\n");
+ }
+
+ wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
+ wrpll_params->qdiv_mode =
+ (wrpll_params->qdiv_ratio == 1) ? 0 : 1;
+
+ dco_freq = candidate_p0[min_dco_index] *
+ candidate_p1[min_dco_index] *
+ candidate_p2[min_dco_index] * afe_clock;
+
+ /*
+ * Intermediate values are in Hz.
+ * Divide by MHz to match bsepc
+ */
+ wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
+ wrpll_params->dco_fraction =
+ div_u64(((div_u64(dco_freq, 24) -
+ wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
+
+ }
+}
+
+
+static bool
+skl_ddi_pll_select(struct intel_crtc *intel_crtc,
+ struct intel_encoder *intel_encoder,
+ int clock)
+{
+ struct intel_shared_dpll *pll;
+ uint32_t ctrl1, cfgcr1, cfgcr2;
+
+ /*
+ * See comment in intel_dpll_hw_state to understand why we always use 0
+ * as the DPLL id in this function.
+ */
+
+ ctrl1 = DPLL_CTRL1_OVERRIDE(0);
+
+ if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
+ struct skl_wrpll_params wrpll_params = { 0, };
+
+ ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+
+ skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params);
+
+ cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+ DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
+ wrpll_params.dco_integer;
+
+ cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+ DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
+ DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
+ DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+ wrpll_params.central_freq;
+ } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ switch (intel_dp->link_bw) {
+ case DP_LINK_BW_1_62:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810, 0);
+ break;
+ case DP_LINK_BW_2_7:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350, 0);
+ break;
+ case DP_LINK_BW_5_4:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700, 0);
+ break;
+ }
+
+ cfgcr1 = cfgcr2 = 0;
+ } else /* eDP */
+ return true;
+
+ intel_crtc->new_config->dpll_hw_state.ctrl1 = ctrl1;
+ intel_crtc->new_config->dpll_hw_state.cfgcr1 = cfgcr1;
+ intel_crtc->new_config->dpll_hw_state.cfgcr2 = cfgcr2;
+
+ pll = intel_get_shared_dpll(intel_crtc);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+ pipe_name(intel_crtc->pipe));
+ return false;
+ }
+
+ /* shared DPLL id 0 is DPLL 1 */
+ intel_crtc->new_config->ddi_pll_sel = pll->id + 1;
+
+ return true;
+}
/*
* Tries to find a *shared* PLL for the CRTC and store it in
@@ -781,13 +1165,15 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
*/
bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- int clock = intel_crtc->config.port_clock;
-
- intel_put_shared_dpll(intel_crtc);
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct intel_encoder *intel_encoder =
+ intel_ddi_get_crtc_new_encoder(intel_crtc);
+ int clock = intel_crtc->new_config->port_clock;
- return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock);
+ if (IS_SKYLAKE(dev))
+ return skl_ddi_pll_select(intel_crtc, intel_encoder, clock);
+ else
+ return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock);
}
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
@@ -962,7 +1348,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
uint32_t tmp;
power_domain = intel_display_port_power_domain(intel_encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
@@ -1008,7 +1394,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
int i;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(DDI_BUF_CTL(port));
@@ -1079,27 +1465,53 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
- if (crtc->config.has_audio) {
- DRM_DEBUG_DRIVER("Audio on pipe %c on DDI\n",
- pipe_name(crtc->pipe));
-
- /* write eld */
- DRM_DEBUG_DRIVER("DDI audio: write eld information\n");
- intel_write_eld(encoder, &crtc->config.adjusted_mode);
- }
-
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_edp_panel_on(intel_dp);
}
- WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
- I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
+ if (IS_SKYLAKE(dev)) {
+ uint32_t dpll = crtc->config.ddi_pll_sel;
+ uint32_t val;
+
+ /*
+ * DPLL0 is used for eDP and is the only "private" DPLL (as
+ * opposed to shared) on SKL
+ */
+ if (type == INTEL_OUTPUT_EDP) {
+ WARN_ON(dpll != SKL_DPLL0);
+
+ val = I915_READ(DPLL_CTRL1);
+
+ val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
+ DPLL_CTRL1_SSC(dpll) |
+ DPLL_CRTL1_LINK_RATE_MASK(dpll));
+ val |= crtc->config.dpll_hw_state.ctrl1 << (dpll * 6);
+
+ I915_WRITE(DPLL_CTRL1, val);
+ POSTING_READ(DPLL_CTRL1);
+ }
+
+ /* DDI -> PLL mapping */
+ val = I915_READ(DPLL_CTRL2);
+
+ val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
+ DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
+ val |= (DPLL_CTRL2_DDI_CLK_SEL(dpll, port) |
+ DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
+
+ I915_WRITE(DPLL_CTRL2, val);
+
+ } else {
+ WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
+ I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
+ }
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1109,7 +1521,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
- if (port != PORT_A)
+ if (port != PORT_A || INTEL_INFO(dev)->gen >= 9)
intel_dp_stop_link_train(intel_dp);
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
@@ -1123,7 +1535,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t val;
@@ -1151,7 +1564,11 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
intel_edp_panel_off(intel_dp);
}
- I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+ if (IS_SKYLAKE(dev))
+ I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
+ DPLL_CTRL2_DDI_CLK_OFF(port)));
+ else
+ I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
}
static void intel_enable_ddi(struct intel_encoder *intel_encoder)
@@ -1159,12 +1576,10 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
- uint32_t tmp;
if (type == INTEL_OUTPUT_HDMI) {
struct intel_digital_port *intel_dig_port =
@@ -1180,18 +1595,16 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (port == PORT_A)
+ if (port == PORT_A && INTEL_INFO(dev)->gen < 9)
intel_dp_stop_link_train(intel_dp);
intel_edp_backlight_on(intel_dp);
- intel_edp_psr_enable(intel_dp);
+ intel_psr_enable(intel_dp);
}
if (intel_crtc->config.has_audio) {
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
- I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ intel_audio_codec_enable(intel_encoder);
}
}
@@ -1200,30 +1613,71 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
int type = intel_encoder->type;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t tmp;
- /* We can't touch HSW_AUD_PIN_ELD_CP_VLD uncionditionally because this
- * register is part of the power well on Haswell. */
if (intel_crtc->config.has_audio) {
- tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) <<
- (pipe * 4));
- I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ intel_audio_codec_disable(intel_encoder);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
}
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- intel_edp_psr_disable(intel_dp);
+ intel_psr_disable(intel_dp);
intel_edp_backlight_off(intel_dp);
}
}
+static int skl_get_cdclk_freq(struct drm_i915_private *dev_priv)
+{
+ uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
+ uint32_t cdctl = I915_READ(CDCLK_CTL);
+ uint32_t linkrate;
+
+ if (!(lcpll1 & LCPLL_PLL_ENABLE)) {
+ WARN(1, "LCPLL1 not enabled\n");
+ return 24000; /* 24MHz is the cd freq with NSSC ref */
+ }
+
+ if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
+ return 540000;
+
+ linkrate = (I915_READ(DPLL_CTRL1) &
+ DPLL_CRTL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
+
+ if (linkrate == DPLL_CRTL1_LINK_RATE_2160 ||
+ linkrate == DPLL_CRTL1_LINK_RATE_1080) {
+ /* vco 8640 */
+ switch (cdctl & CDCLK_FREQ_SEL_MASK) {
+ case CDCLK_FREQ_450_432:
+ return 432000;
+ case CDCLK_FREQ_337_308:
+ return 308570;
+ case CDCLK_FREQ_675_617:
+ return 617140;
+ default:
+ WARN(1, "Unknown cd freq selection\n");
+ }
+ } else {
+ /* vco 8100 */
+ switch (cdctl & CDCLK_FREQ_SEL_MASK) {
+ case CDCLK_FREQ_450_432:
+ return 450000;
+ case CDCLK_FREQ_337_308:
+ return 337500;
+ case CDCLK_FREQ_675_617:
+ return 675000;
+ default:
+ WARN(1, "Unknown cd freq selection\n");
+ }
+ }
+
+ /* error case, do as if DPLL0 isn't enabled */
+ return 24000;
+}
+
static int bdw_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
uint32_t lcpll = I915_READ(LCPLL_CTL);
@@ -1255,7 +1709,7 @@ static int hsw_get_cdclk_freq(struct drm_i915_private *dev_priv)
return 450000;
else if (freq == LCPLL_CLK_FREQ_450)
return 450000;
- else if (IS_ULT(dev))
+ else if (IS_HSW_ULT(dev))
return 337500;
else
return 540000;
@@ -1265,6 +1719,9 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
+ if (IS_SKYLAKE(dev))
+ return skl_get_cdclk_freq(dev_priv);
+
if (IS_BROADWELL(dev))
return bdw_get_cdclk_freq(dev_priv);
@@ -1275,7 +1732,7 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- I915_WRITE(WRPLL_CTL(pll->id), pll->hw_state.wrpll);
+ I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
POSTING_READ(WRPLL_CTL(pll->id));
udelay(20);
}
@@ -1296,7 +1753,7 @@ static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
{
uint32_t val;
- if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
val = I915_READ(WRPLL_CTL(pll->id));
@@ -1326,26 +1783,156 @@ static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
}
}
+static const char * const skl_ddi_pll_names[] = {
+ "DPLL 1",
+ "DPLL 2",
+ "DPLL 3",
+};
+
+struct skl_dpll_regs {
+ u32 ctl, cfgcr1, cfgcr2;
+};
+
+/* this array is indexed by the *shared* pll id */
+static const struct skl_dpll_regs skl_dpll_regs[3] = {
+ {
+ /* DPLL 1 */
+ .ctl = LCPLL2_CTL,
+ .cfgcr1 = DPLL1_CFGCR1,
+ .cfgcr2 = DPLL1_CFGCR2,
+ },
+ {
+ /* DPLL 2 */
+ .ctl = WRPLL_CTL1,
+ .cfgcr1 = DPLL2_CFGCR1,
+ .cfgcr2 = DPLL2_CFGCR2,
+ },
+ {
+ /* DPLL 3 */
+ .ctl = WRPLL_CTL2,
+ .cfgcr1 = DPLL3_CFGCR1,
+ .cfgcr2 = DPLL3_CFGCR2,
+ },
+};
+
+static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ uint32_t val;
+ unsigned int dpll;
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+ /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
+ dpll = pll->id + 1;
+
+ val = I915_READ(DPLL_CTRL1);
+
+ val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) |
+ DPLL_CRTL1_LINK_RATE_MASK(dpll));
+ val |= pll->config.hw_state.ctrl1 << (dpll * 6);
+
+ I915_WRITE(DPLL_CTRL1, val);
+ POSTING_READ(DPLL_CTRL1);
+
+ I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1);
+ I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2);
+ POSTING_READ(regs[pll->id].cfgcr1);
+ POSTING_READ(regs[pll->id].cfgcr2);
+
+ /* the enable bit is always bit 31 */
+ I915_WRITE(regs[pll->id].ctl,
+ I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
+
+ if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(dpll), 5))
+ DRM_ERROR("DPLL %d not locked\n", dpll);
+}
+
+static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+ /* the enable bit is always bit 31 */
+ I915_WRITE(regs[pll->id].ctl,
+ I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
+ POSTING_READ(regs[pll->id].ctl);
+}
+
+static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ uint32_t val;
+ unsigned int dpll;
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
+ dpll = pll->id + 1;
+
+ val = I915_READ(regs[pll->id].ctl);
+ if (!(val & LCPLL_PLL_ENABLE))
+ return false;
+
+ val = I915_READ(DPLL_CTRL1);
+ hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
+
+ /* avoid reading back stale values if HDMI mode is not enabled */
+ if (val & DPLL_CTRL1_HDMI_MODE(dpll)) {
+ hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
+ hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
+ }
+
+ return true;
+}
+
+static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
+{
+ int i;
+
+ dev_priv->num_shared_dpll = 3;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ dev_priv->shared_dplls[i].id = i;
+ dev_priv->shared_dplls[i].name = skl_ddi_pll_names[i];
+ dev_priv->shared_dplls[i].disable = skl_ddi_pll_disable;
+ dev_priv->shared_dplls[i].enable = skl_ddi_pll_enable;
+ dev_priv->shared_dplls[i].get_hw_state =
+ skl_ddi_pll_get_hw_state;
+ }
+}
+
void intel_ddi_pll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val = I915_READ(LCPLL_CTL);
- hsw_shared_dplls_init(dev_priv);
-
- /* The LCPLL register should be turned on by the BIOS. For now let's
- * just check its state and print errors in case something is wrong.
- * Don't even try to turn it on.
- */
+ if (IS_SKYLAKE(dev))
+ skl_shared_dplls_init(dev_priv);
+ else
+ hsw_shared_dplls_init(dev_priv);
DRM_DEBUG_KMS("CDCLK running at %dKHz\n",
intel_ddi_get_cdclk_freq(dev_priv));
- if (val & LCPLL_CD_SOURCE_FCLK)
- DRM_ERROR("CDCLK source is not LCPLL\n");
+ if (IS_SKYLAKE(dev)) {
+ if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
+ DRM_ERROR("LCPLL1 is disabled\n");
+ } else {
+ /*
+ * The LCPLL register should be turned on by the BIOS. For now
+ * let's just check its state and print errors in case
+ * something is wrong. Don't even try to turn it on.
+ */
- if (val & LCPLL_PLL_DISABLE)
- DRM_ERROR("LCPLL is disabled\n");
+ if (val & LCPLL_CD_SOURCE_FCLK)
+ DRM_ERROR("CDCLK source is not LCPLL\n");
+
+ if (val & LCPLL_PLL_DISABLE)
+ DRM_ERROR("LCPLL is disabled\n");
+ }
}
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
@@ -1440,7 +2027,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ struct intel_hdmi *intel_hdmi;
u32 temp, flags = 0;
+ struct drm_device *dev = dev_priv->dev;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
@@ -1474,6 +2063,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
pipe_config->has_hdmi_sink = true;
+ intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+
+ if (intel_hdmi->infoframe_enabled(&encoder->base))
+ pipe_config->has_infoframe = true;
+ break;
case TRANS_DDI_MODE_SELECT_DVI:
case TRANS_DDI_MODE_SELECT_FDI:
break;
@@ -1486,9 +2080,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
break;
}
- if (intel_display_power_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+ if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- if (temp & (AUDIO_OUTPUT_ENABLE_A << (intel_crtc->pipe * 4)))
+ if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
pipe_config->has_audio = true;
}
@@ -1512,7 +2106,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
}
- hsw_ddi_clock_get(encoder, pipe_config);
+ if (INTEL_INFO(dev)->gen <= 8)
+ hsw_ddi_clock_get(encoder, pipe_config);
+ else
+ skl_ddi_clock_get(encoder, pipe_config);
}
static void intel_ddi_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9cb5c95..fb3e3d4 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -73,8 +73,6 @@ static const uint32_t intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
};
-static void intel_increase_pllclock(struct drm_device *dev,
- enum pipe pipe);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
@@ -96,8 +94,10 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
static void haswell_set_pipeconf(struct drm_crtc *crtc);
static void intel_set_pipe_csc(struct drm_crtc *crtc);
-static void vlv_prepare_pll(struct intel_crtc *crtc);
-static void chv_prepare_pll(struct intel_crtc *crtc);
+static void vlv_prepare_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_config *pipe_config);
+static void chv_prepare_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_config *pipe_config);
static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
{
@@ -408,25 +408,43 @@ static void vlv_clock(int refclk, intel_clock_t *clock)
/**
* Returns whether any output on the specified pipe is of the specified type
*/
-static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
+bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct intel_encoder *encoder;
- for_each_encoder_on_crtc(dev, crtc, encoder)
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder)
if (encoder->type == type)
return true;
return false;
}
-static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
+/**
+ * Returns whether any output on the specified pipe will have the specified
+ * type after a staged modeset is complete, i.e., the same as
+ * intel_pipe_has_type() but looking at encoder->new_crtc instead of
+ * encoder->crtc.
+ */
+static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(dev, encoder)
+ if (encoder->new_crtc == crtc && encoder->type == type)
+ return true;
+
+ return false;
+}
+
+static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
int refclk)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
const intel_limit_t *limit;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev)) {
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -444,20 +462,20 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
return limit;
}
-static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
const intel_limit_t *limit;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev))
limit = &intel_limits_g4x_dual_channel_lvds;
else
limit = &intel_limits_g4x_single_channel_lvds;
- } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
+ } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) ||
+ intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) {
limit = &intel_limits_g4x_hdmi;
- } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
+ } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) {
limit = &intel_limits_g4x_sdvo;
} else /* The option is for other outputs */
limit = &intel_limits_i9xx_sdvo;
@@ -465,9 +483,9 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
return limit;
}
-static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
+static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
const intel_limit_t *limit;
if (HAS_PCH_SPLIT(dev))
@@ -475,7 +493,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
} else if (IS_PINEVIEW(dev)) {
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
@@ -484,14 +502,14 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
} else if (IS_VALLEYVIEW(dev)) {
limit = &intel_limits_vlv;
} else if (!IS_GEN2(dev)) {
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
else
limit = &intel_limits_i9xx_sdvo;
} else {
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
- else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
+ else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
limit = &intel_limits_i8xx_dvo;
else
limit = &intel_limits_i8xx_dac;
@@ -578,15 +596,15 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
}
static bool
-i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int err = target;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
@@ -639,15 +657,15 @@ i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
}
static bool
-pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int err = target;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
@@ -698,11 +716,11 @@ pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
}
static bool
-g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int max_n;
bool found;
@@ -710,7 +728,7 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
int err_most = (target >> 8) + (target >> 9);
found = false;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
@@ -755,11 +773,11 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
}
static bool
-vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
unsigned int bestppm = 1000000;
/* min update 19.2 MHz */
@@ -812,11 +830,11 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
}
static bool
-chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
uint64_t m2;
int found = false;
@@ -889,60 +907,6 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
return intel_crtc->config.cpu_transcoder;
}
-static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
-
- frame = I915_READ(frame_reg);
-
- if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
- WARN(1, "vblank wait on pipe %c timed out\n",
- pipe_name(pipe));
-}
-
-/**
- * intel_wait_for_vblank - wait for vblank on a given pipe
- * @dev: drm device
- * @pipe: pipe to wait for
- *
- * Wait for vblank to occur on a given pipe. Needed for various bits of
- * mode setting code.
- */
-void intel_wait_for_vblank(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipestat_reg = PIPESTAT(pipe);
-
- if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
- g4x_wait_for_vblank(dev, pipe);
- return;
- }
-
- /* Clear existing vblank status. Note this will clear any other
- * sticky status fields as well.
- *
- * This races with i915_driver_irq_handler() with the result
- * that either function could miss a vblank event. Here it is not
- * fatal, as we will either wait upon the next vblank interrupt or
- * timeout. Generally speaking intel_wait_for_vblank() is only
- * called during modeset at which time the GPU should be idle and
- * should *not* be performing page flips and thus not waiting on
- * vblanks...
- * Currently, the result of us stealing a vblank from the irq
- * handler is that a single frame will be skipped during swapbuffers.
- */
- I915_WRITE(pipestat_reg,
- I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
-
- /* Wait for vblank interrupt bit to set */
- if (wait_for(I915_READ(pipestat_reg) &
- PIPE_VBLANK_INTERRUPT_STATUS,
- 50))
- DRM_DEBUG_KMS("vblank wait on pipe %c timed out\n",
- pipe_name(pipe));
-}
-
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1189,8 +1153,8 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
state_string(state), state_string(cur_state));
}
-static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
struct drm_device *dev = dev_priv->dev;
int pp_reg;
@@ -1263,7 +1227,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
state = true;
- if (!intel_display_power_enabled(dev_priv,
+ if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
cur_state = false;
} else {
@@ -1332,7 +1296,14 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
int reg, sprite;
u32 val;
- if (IS_VALLEYVIEW(dev)) {
+ if (INTEL_INFO(dev)->gen >= 9) {
+ for_each_sprite(pipe, sprite) {
+ val = I915_READ(PLANE_CTL(pipe, sprite));
+ WARN(val & PLANE_CTL_ENABLE,
+ "plane %d assertion failure, should be off on pipe %c but is still active\n",
+ sprite, pipe_name(pipe));
+ }
+ } else if (IS_VALLEYVIEW(dev)) {
for_each_sprite(pipe, sprite) {
reg = SPCNTR(pipe, sprite);
val = I915_READ(reg);
@@ -1533,12 +1504,13 @@ static void intel_init_dpio(struct drm_device *dev)
}
}
-static void vlv_enable_pll(struct intel_crtc *crtc)
+static void vlv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = DPLL(crtc->pipe);
- u32 dpll = crtc->config.dpll_hw_state.dpll;
+ u32 dpll = pipe_config->dpll_hw_state.dpll;
assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -1556,7 +1528,7 @@ static void vlv_enable_pll(struct intel_crtc *crtc)
if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
- I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
+ I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
POSTING_READ(DPLL_MD(crtc->pipe));
/* We do this three times for luck */
@@ -1571,7 +1543,8 @@ static void vlv_enable_pll(struct intel_crtc *crtc)
udelay(150); /* wait for warmup */
}
-static void chv_enable_pll(struct intel_crtc *crtc)
+static void chv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1596,14 +1569,14 @@ static void chv_enable_pll(struct intel_crtc *crtc)
udelay(1);
/* Enable PLL */
- I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll);
+ I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
/* Check PLL is locked */
if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("PLL %d failed to lock\n", pipe);
/* not sure when this should be written */
- I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md);
+ I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
POSTING_READ(DPLL_MD(pipe));
mutex_unlock(&dev_priv->dpio_lock);
@@ -1616,7 +1589,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
for_each_intel_crtc(dev, crtc)
count += crtc->active &&
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO);
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
return count;
}
@@ -1695,7 +1668,7 @@ static void i9xx_disable_pll(struct intel_crtc *crtc)
/* Disable DVO 2x clock on both PLLs if necessary */
if (IS_I830(dev) &&
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO) &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
intel_num_dvo_pipes(dev) == 1) {
I915_WRITE(DPLL(PIPE_B),
I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
@@ -1806,7 +1779,7 @@ static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
if (WARN_ON(pll == NULL))
return;
- WARN_ON(!pll->refcount);
+ WARN_ON(!pll->config.crtc_mask);
if (pll->active == 0) {
DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
WARN_ON(pll->on);
@@ -1833,7 +1806,7 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc)
if (WARN_ON(pll == NULL))
return;
- if (WARN_ON(pll->refcount == 0))
+ if (WARN_ON(pll->config.crtc_mask == 0))
return;
DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
@@ -1865,7 +1838,7 @@ static void intel_disable_shared_dpll(struct intel_crtc *crtc)
if (WARN_ON(pll == NULL))
return;
- if (WARN_ON(pll->refcount == 0))
+ if (WARN_ON(pll->config.crtc_mask == 0))
return;
DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
@@ -1933,7 +1906,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val &= ~TRANS_INTERLACE_MASK;
if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
if (HAS_PCH_IBX(dev_priv->dev) &&
- intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
+ intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
val |= TRANS_LEGACY_INTERLACED_ILK;
else
val |= TRANS_INTERLACED;
@@ -2056,7 +2029,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
* need the check.
*/
if (!HAS_PCH_SPLIT(dev_priv->dev))
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI))
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
@@ -2221,11 +2194,13 @@ static int intel_align_height(struct drm_device *dev, int height, bool tiled)
}
int
-intel_pin_and_fence_fb_obj(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
+intel_pin_and_fence_fb_obj(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
struct intel_engine_cs *pipelined)
{
+ struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 alignment;
int ret;
@@ -2233,7 +2208,9 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
switch (obj->tiling_mode) {
case I915_TILING_NONE:
- if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+ if (INTEL_INFO(dev)->gen >= 9)
+ alignment = 256 * 1024;
+ else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
alignment = 128 * 1024;
else if (INTEL_INFO(dev)->gen >= 4)
alignment = 4 * 1024;
@@ -2241,8 +2218,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
alignment = 64 * 1024;
break;
case I915_TILING_X:
- /* pin() will align the object as required by fence */
- alignment = 0;
+ if (INTEL_INFO(dev)->gen >= 9)
+ alignment = 256 * 1024;
+ else {
+ /* pin() will align the object as required by fence */
+ alignment = 0;
+ }
break;
case I915_TILING_Y:
WARN(1, "Y tiled bo slipped through, driver bug!\n");
@@ -2402,6 +2383,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
struct intel_plane_config *plane_config)
{
struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct intel_crtc *i;
struct drm_i915_gem_object *obj;
@@ -2433,6 +2415,9 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
continue;
if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dev_priv->preserve_bios_swizzle = true;
+
drm_framebuffer_reference(c->primary->fb);
intel_crtc->base.primary->fb = c->primary->fb;
obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
@@ -2486,6 +2471,12 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
((intel_crtc->config.pipe_src_h - 1) << 16) |
(intel_crtc->config.pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
+ } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
+ I915_WRITE(PRIMSIZE(plane),
+ ((intel_crtc->config.pipe_src_h - 1) << 16) |
+ (intel_crtc->config.pipe_src_w - 1));
+ I915_WRITE(PRIMPOS(plane), 0);
+ I915_WRITE(PRIMCNSTALPHA(plane), 0);
}
switch (fb->pixel_format) {
@@ -2672,6 +2663,92 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
POSTING_READ(reg);
}
+static void skylake_update_primary_plane(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int pipe = intel_crtc->pipe;
+ u32 plane_ctl, stride;
+
+ if (!intel_crtc->primary_enabled) {
+ I915_WRITE(PLANE_CTL(pipe, 0), 0);
+ I915_WRITE(PLANE_SURF(pipe, 0), 0);
+ POSTING_READ(PLANE_CTL(pipe, 0));
+ return;
+ }
+
+ plane_ctl = PLANE_CTL_ENABLE |
+ PLANE_CTL_PIPE_GAMMA_ENABLE |
+ PLANE_CTL_PIPE_CSC_ENABLE;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_RGB565:
+ plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ plane_ctl |= PLANE_CTL_ORDER_RGBX;
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ plane_ctl |= PLANE_CTL_ORDER_RGBX;
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
+ break;
+ default:
+ BUG();
+ }
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ /*
+ * The stride is either expressed as a multiple of 64 bytes chunks for
+ * linear buffers or in number of tiles for tiled buffers.
+ */
+ switch (obj->tiling_mode) {
+ case I915_TILING_NONE:
+ stride = fb->pitches[0] >> 6;
+ break;
+ case I915_TILING_X:
+ plane_ctl |= PLANE_CTL_TILED_X;
+ stride = fb->pitches[0] >> 9;
+ break;
+ default:
+ BUG();
+ }
+
+ plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
+ if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180))
+ plane_ctl |= PLANE_CTL_ROTATE_180;
+
+ I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
+
+ DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
+ i915_gem_obj_ggtt_offset(obj),
+ x, y, fb->width, fb->height,
+ fb->pitches[0]);
+
+ I915_WRITE(PLANE_POS(pipe, 0), 0);
+ I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
+ I915_WRITE(PLANE_SIZE(pipe, 0),
+ (intel_crtc->config.pipe_src_h - 1) << 16 |
+ (intel_crtc->config.pipe_src_w - 1));
+ I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
+ I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
+
+ POSTING_READ(PLANE_SURF(pipe, 0));
+}
+
/* Assume fb object is pinned & idle & fenced and just update base pointers */
static int
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -2682,32 +2759,16 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
dev_priv->display.update_primary_plane(crtc, fb, x, y);
return 0;
}
-void intel_display_handle_reset(struct drm_device *dev)
+static void intel_complete_page_flips(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
- /*
- * Flips in the rings have been nuked by the reset,
- * so complete all pending flips so that user space
- * will get its events and not get stuck.
- *
- * Also update the base address of all primary
- * planes to the the last fb to make sure we're
- * showing the correct fb after a reset.
- *
- * Need to make two loops over the crtcs so that we
- * don't try to grab a crtc mutex before the
- * pending_flip_queue really got woken up.
- */
-
for_each_crtc(dev, crtc) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum plane plane = intel_crtc->plane;
@@ -2715,6 +2776,12 @@ void intel_display_handle_reset(struct drm_device *dev)
intel_prepare_page_flip(dev, plane);
intel_finish_page_flip_plane(dev, plane);
}
+}
+
+static void intel_update_primary_planes(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
for_each_crtc(dev, crtc) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -2734,6 +2801,79 @@ void intel_display_handle_reset(struct drm_device *dev)
}
}
+void intel_prepare_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc;
+
+ /* no reset support for gen2 */
+ if (IS_GEN2(dev))
+ return;
+
+ /* reset doesn't touch the display */
+ if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ return;
+
+ drm_modeset_lock_all(dev);
+
+ /*
+ * Disabling the crtcs gracefully seems nicer. Also the
+ * g33 docs say we should at least disable all the planes.
+ */
+ for_each_intel_crtc(dev, crtc) {
+ if (crtc->active)
+ dev_priv->display.crtc_disable(&crtc->base);
+ }
+}
+
+void intel_finish_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ /*
+ * Flips in the rings will be nuked by the reset,
+ * so complete all pending flips so that user space
+ * will get its events and not get stuck.
+ */
+ intel_complete_page_flips(dev);
+
+ /* no reset support for gen2 */
+ if (IS_GEN2(dev))
+ return;
+
+ /* reset doesn't touch the display */
+ if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
+ /*
+ * Flips in the rings have been nuked by the reset,
+ * so update the base address of all primary
+ * planes to the the last fb to make sure we're
+ * showing the correct fb after a reset.
+ */
+ intel_update_primary_planes(dev);
+ return;
+ }
+
+ /*
+ * The display has been reset as well,
+ * so need a full re-initialization.
+ */
+ intel_runtime_pm_disable_interrupts(dev_priv);
+ intel_runtime_pm_enable_interrupts(dev_priv);
+
+ intel_modeset_init_hw(dev);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ intel_modeset_setup_hw_state(dev, true);
+
+ intel_hpd_init(dev_priv);
+
+ drm_modeset_unlock_all(dev);
+}
+
static int
intel_finish_fb(struct drm_framebuffer *old_fb)
{
@@ -2762,20 +2902,58 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long flags;
bool pending;
if (i915_reset_in_progress(&dev_priv->gpu_error) ||
intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
return false;
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
pending = to_intel_crtc(crtc)->unpin_work != NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
return pending;
}
+static void intel_update_pipe_size(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct drm_display_mode *adjusted_mode;
+
+ if (!i915.fastboot)
+ return;
+
+ /*
+ * Update pipe size and adjust fitter if needed: the reason for this is
+ * that in compute_mode_changes we check the native mode (not the pfit
+ * mode) to see if we can flip rather than do a full mode set. In the
+ * fastboot case, we'll flip, but if we don't update the pipesrc and
+ * pfit state, we'll end up with a big fb scanned out into the wrong
+ * sized surface.
+ *
+ * To fix this properly, we need to hoist the checks up into
+ * compute_mode_changes (or above), check the actual pfit state and
+ * whether the platform allows pfit disable with pipe active, and only
+ * then update the pipesrc and pfit state, even on the flip path.
+ */
+
+ adjusted_mode = &crtc->config.adjusted_mode;
+
+ I915_WRITE(PIPESRC(crtc->pipe),
+ ((adjusted_mode->crtc_hdisplay - 1) << 16) |
+ (adjusted_mode->crtc_vdisplay - 1));
+ if (!crtc->config.pch_pfit.enabled &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+ I915_WRITE(PF_CTL(crtc->pipe), 0);
+ I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
+ I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
+ }
+ crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
+ crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
+}
+
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *fb)
@@ -2785,7 +2963,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
int ret;
@@ -2808,9 +2985,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
}
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
if (ret == 0)
- i915_gem_track_fb(old_obj, obj,
+ i915_gem_track_fb(old_obj, intel_fb_obj(fb),
INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
if (ret != 0) {
@@ -2818,37 +2995,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- /*
- * Update pipe size and adjust fitter if needed: the reason for this is
- * that in compute_mode_changes we check the native mode (not the pfit
- * mode) to see if we can flip rather than do a full mode set. In the
- * fastboot case, we'll flip, but if we don't update the pipesrc and
- * pfit state, we'll end up with a big fb scanned out into the wrong
- * sized surface.
- *
- * To fix this properly, we need to hoist the checks up into
- * compute_mode_changes (or above), check the actual pfit state and
- * whether the platform allows pfit disable with pipe active, and only
- * then update the pipesrc and pfit state, even on the flip path.
- */
- if (i915.fastboot) {
- const struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
-
- I915_WRITE(PIPESRC(intel_crtc->pipe),
- ((adjusted_mode->crtc_hdisplay - 1) << 16) |
- (adjusted_mode->crtc_vdisplay - 1));
- if (!intel_crtc->config.pch_pfit.enabled &&
- (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
- I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
- I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
- I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
- }
- intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
- intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
- }
-
dev_priv->display.update_primary_plane(crtc, fb, x, y);
if (intel_crtc->active)
@@ -3472,14 +3618,13 @@ void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
!intel_crtc_has_pending_flip(crtc),
60*HZ) == 0)) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long flags;
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
if (intel_crtc->unpin_work) {
WARN_ONCE(1, "Removing stuck page flip\n");
page_flip_completed(intel_crtc);
}
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
}
if (crtc->primary->fb) {
@@ -3704,9 +3849,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev) &&
- (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+ if (HAS_PCH_CPT(dev) && intel_crtc->config.has_dp_encoder) {
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
@@ -3766,12 +3909,13 @@ void intel_put_shared_dpll(struct intel_crtc *crtc)
if (pll == NULL)
return;
- if (pll->refcount == 0) {
- WARN(1, "bad %s refcount\n", pll->name);
+ if (!(pll->config.crtc_mask & (1 << crtc->pipe))) {
+ WARN(1, "bad %s crtc mask\n", pll->name);
return;
}
- if (--pll->refcount == 0) {
+ pll->config.crtc_mask &= ~(1 << crtc->pipe);
+ if (pll->config.crtc_mask == 0) {
WARN_ON(pll->on);
WARN_ON(pll->active);
}
@@ -3782,15 +3926,9 @@ void intel_put_shared_dpll(struct intel_crtc *crtc)
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+ struct intel_shared_dpll *pll;
enum intel_dpll_id i;
- if (pll) {
- DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
- crtc->base.base.id, pll->name);
- intel_put_shared_dpll(crtc);
- }
-
if (HAS_PCH_IBX(dev_priv->dev)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
i = (enum intel_dpll_id) crtc->pipe;
@@ -3799,7 +3937,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
crtc->base.base.id, pll->name);
- WARN_ON(pll->refcount);
+ WARN_ON(pll->new_config->crtc_mask);
goto found;
}
@@ -3808,15 +3946,16 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
pll = &dev_priv->shared_dplls[i];
/* Only want to check enabled timings first */
- if (pll->refcount == 0)
+ if (pll->new_config->crtc_mask == 0)
continue;
- if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
- sizeof(pll->hw_state)) == 0) {
- DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
- crtc->base.base.id,
- pll->name, pll->refcount, pll->active);
-
+ if (memcmp(&crtc->new_config->dpll_hw_state,
+ &pll->new_config->hw_state,
+ sizeof(pll->new_config->hw_state)) == 0) {
+ DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
+ crtc->base.base.id, pll->name,
+ pll->new_config->crtc_mask,
+ pll->active);
goto found;
}
}
@@ -3824,7 +3963,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
/* Ok no matching timings, maybe there's a free one? */
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
pll = &dev_priv->shared_dplls[i];
- if (pll->refcount == 0) {
+ if (pll->new_config->crtc_mask == 0) {
DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
crtc->base.base.id, pll->name);
goto found;
@@ -3834,18 +3973,86 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
return NULL;
found:
- if (pll->refcount == 0)
- pll->hw_state = crtc->config.dpll_hw_state;
+ if (pll->new_config->crtc_mask == 0)
+ pll->new_config->hw_state = crtc->new_config->dpll_hw_state;
- crtc->config.shared_dpll = i;
+ crtc->new_config->shared_dpll = i;
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
pipe_name(crtc->pipe));
- pll->refcount++;
+ pll->new_config->crtc_mask |= 1 << crtc->pipe;
return pll;
}
+/**
+ * intel_shared_dpll_start_config - start a new PLL staged config
+ * @dev_priv: DRM device
+ * @clear_pipes: mask of pipes that will have their PLLs freed
+ *
+ * Starts a new PLL staged config, copying the current config but
+ * releasing the references of pipes specified in clear_pipes.
+ */
+static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv,
+ unsigned clear_pipes)
+{
+ struct intel_shared_dpll *pll;
+ enum intel_dpll_id i;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ pll = &dev_priv->shared_dplls[i];
+
+ pll->new_config = kmemdup(&pll->config, sizeof pll->config,
+ GFP_KERNEL);
+ if (!pll->new_config)
+ goto cleanup;
+
+ pll->new_config->crtc_mask &= ~clear_pipes;
+ }
+
+ return 0;
+
+cleanup:
+ while (--i >= 0) {
+ pll = &dev_priv->shared_dplls[i];
+ kfree(pll->new_config);
+ pll->new_config = NULL;
+ }
+
+ return -ENOMEM;
+}
+
+static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv)
+{
+ struct intel_shared_dpll *pll;
+ enum intel_dpll_id i;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ pll = &dev_priv->shared_dplls[i];
+
+ WARN_ON(pll->new_config == &pll->config);
+
+ pll->config = *pll->new_config;
+ kfree(pll->new_config);
+ pll->new_config = NULL;
+ }
+}
+
+static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv)
+{
+ struct intel_shared_dpll *pll;
+ enum intel_dpll_id i;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ pll = &dev_priv->shared_dplls[i];
+
+ WARN_ON(pll->new_config == &pll->config);
+
+ kfree(pll->new_config);
+ pll->new_config = NULL;
+ }
+}
+
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3860,6 +4067,19 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
}
}
+static void skylake_pfit_enable(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+
+ if (crtc->config.pch_pfit.enabled) {
+ I915_WRITE(PS_CTL(pipe), PS_ENABLE);
+ I915_WRITE(PS_WIN_POS(pipe), crtc->config.pch_pfit.pos);
+ I915_WRITE(PS_WIN_SZ(pipe), crtc->config.pch_pfit.size);
+ }
+}
+
static void ironlake_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -3983,7 +4203,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
return;
if (!HAS_PCH_SPLIT(dev_priv->dev)) {
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
@@ -4038,10 +4258,6 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- assert_vblank_disabled(crtc);
-
- drm_vblank_on(dev, pipe);
-
intel_enable_primary_hw_plane(crtc->primary, crtc);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
@@ -4087,10 +4303,6 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
* consider this a flip to a NULL plane.
*/
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
-
- drm_vblank_off(dev, pipe);
-
- assert_vblank_disabled(crtc);
}
static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -4123,8 +4335,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
- intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
@@ -4160,6 +4372,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
if (HAS_PCH_CPT(dev))
cpt_verify_modeset(dev, intel_crtc->pipe);
+ assert_vblank_disabled(crtc);
+ drm_crtc_vblank_on(crtc);
+
intel_crtc_enable_planes(crtc);
}
@@ -4235,19 +4450,23 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
if (intel_crtc->config.has_pch_encoder) {
- intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
+ true);
dev_priv->display.fdi_link_train(crtc);
}
intel_ddi_enable_pipe_clock(intel_crtc);
- ironlake_pfit_enable(intel_crtc);
+ if (IS_SKYLAKE(dev))
+ skylake_pfit_enable(intel_crtc);
+ else
+ ironlake_pfit_enable(intel_crtc);
/*
* On ILK+ LUT must be loaded before the pipe is running but with
@@ -4272,12 +4491,30 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_opregion_notify_encoder(encoder, true);
}
+ assert_vblank_disabled(crtc);
+ drm_crtc_vblank_on(crtc);
+
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
haswell_mode_set_planes_workaround(intel_crtc);
intel_crtc_enable_planes(crtc);
}
+static void skylake_pfit_disable(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+
+ /* To avoid upsetting the power well on haswell only disable the pfit if
+ * it's in use. The hw state code will make sure we get this right. */
+ if (crtc->config.pch_pfit.enabled) {
+ I915_WRITE(PS_CTL(pipe), 0);
+ I915_WRITE(PS_WIN_POS(pipe), 0);
+ I915_WRITE(PS_WIN_SZ(pipe), 0);
+ }
+}
+
static void ironlake_pfit_disable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -4307,11 +4544,14 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_crtc_disable_planes(crtc);
+ drm_crtc_vblank_off(crtc);
+ assert_vblank_disabled(crtc);
+
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
if (intel_crtc->config.has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
intel_disable_pipe(intel_crtc);
@@ -4368,13 +4608,17 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_crtc_disable_planes(crtc);
+ drm_crtc_vblank_off(crtc);
+ assert_vblank_disabled(crtc);
+
for_each_encoder_on_crtc(dev, crtc, encoder) {
intel_opregion_notify_encoder(encoder, false);
encoder->disable(encoder);
}
if (intel_crtc->config.has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
+ false);
intel_disable_pipe(intel_crtc);
if (intel_crtc->config.dp_encoder_is_mst)
@@ -4382,7 +4626,10 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
- ironlake_pfit_disable(intel_crtc);
+ if (IS_SKYLAKE(dev))
+ skylake_pfit_disable(intel_crtc);
+ else
+ ironlake_pfit_disable(intel_crtc);
intel_ddi_disable_pipe_clock(intel_crtc);
@@ -4508,20 +4755,6 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
return mask;
}
-void intel_display_set_init_power(struct drm_i915_private *dev_priv,
- bool enable)
-{
- if (dev_priv->power_domains.init_power_on == enable)
- return;
-
- if (enable)
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
- else
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
-
- dev_priv->power_domains.init_power_on = enable;
-}
-
static void modeset_update_crtc_power_domains(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4544,6 +4777,9 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
intel_display_power_get(dev_priv, domain);
}
+ if (dev_priv->display.modeset_global_resources)
+ dev_priv->display.modeset_global_resources(dev);
+
for_each_intel_crtc(dev, crtc) {
enum intel_display_power_domain domain;
@@ -4575,7 +4811,7 @@ static void vlv_update_cdclk(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
- DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz",
+ DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
dev_priv->vlv_cdclk_freq);
/*
@@ -4614,10 +4850,9 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
mutex_unlock(&dev_priv->rps.hw_lock);
if (cdclk == 400000) {
- u32 divider, vco;
+ u32 divider;
- vco = valleyview_get_vco(dev_priv);
- divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
+ divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
mutex_lock(&dev_priv->dpio_lock);
/* adjust cdclk divider */
@@ -4696,8 +4931,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
int max_pixclk)
{
- int vco = valleyview_get_vco(dev_priv);
- int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000;
+ int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
/* FIXME: Punit isn't quite ready yet */
if (IS_CHERRYVIEW(dev_priv->dev))
@@ -4766,18 +5000,30 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
if (req_cdclk != dev_priv->vlv_cdclk_freq) {
+ /*
+ * FIXME: We can end up here with all power domains off, yet
+ * with a CDCLK frequency other than the minimum. To account
+ * for this take the PIPE-A power domain, which covers the HW
+ * blocks needed for the following programming. This can be
+ * removed once it's guaranteed that we get here either with
+ * the minimum CDCLK set, or the required power domains
+ * enabled.
+ */
+ intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+
if (IS_CHERRYVIEW(dev))
cherryview_set_cdclk(dev, req_cdclk);
else
valleyview_set_cdclk(dev, req_cdclk);
- }
- modeset_update_crtc_power_domains(dev);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
+ }
}
static void valleyview_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
@@ -4788,13 +5034,13 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->active)
return;
- is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
+ is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
if (!is_dsi) {
if (IS_CHERRYVIEW(dev))
- chv_prepare_pll(intel_crtc);
+ chv_prepare_pll(intel_crtc, &intel_crtc->config);
else
- vlv_prepare_pll(intel_crtc);
+ vlv_prepare_pll(intel_crtc, &intel_crtc->config);
}
if (intel_crtc->config.has_dp_encoder)
@@ -4802,11 +5048,18 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_set_pipe_timings(intel_crtc);
+ if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
+ I915_WRITE(CHV_CANVAS(pipe), 0);
+ }
+
i9xx_set_pipeconf(intel_crtc);
intel_crtc->active = true;
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
@@ -4814,9 +5067,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (!is_dsi) {
if (IS_CHERRYVIEW(dev))
- chv_enable_pll(intel_crtc);
+ chv_enable_pll(intel_crtc, &intel_crtc->config);
else
- vlv_enable_pll(intel_crtc);
+ vlv_enable_pll(intel_crtc, &intel_crtc->config);
}
for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4833,10 +5086,13 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
+ assert_vblank_disabled(crtc);
+ drm_crtc_vblank_on(crtc);
+
intel_crtc_enable_planes(crtc);
/* Underruns don't raise interrupts, so check manually. */
- i9xx_check_fifo_underruns(dev);
+ i9xx_check_fifo_underruns(dev_priv);
}
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
@@ -4851,6 +5107,7 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
static void i9xx_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
@@ -4872,7 +5129,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
if (!IS_GEN2(dev))
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
@@ -4890,6 +5147,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
+ assert_vblank_disabled(crtc);
+ drm_crtc_vblank_on(crtc);
+
intel_crtc_enable_planes(crtc);
/*
@@ -4900,10 +5160,10 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
* but leave the pipe running.
*/
if (IS_GEN2(dev))
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
/* Underruns don't raise interrupts, so check manually. */
- i9xx_check_fifo_underruns(dev);
+ i9xx_check_fifo_underruns(dev_priv);
}
static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -4939,7 +5199,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
* but leave the pipe running.
*/
if (IS_GEN2(dev))
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
/*
* Vblank time updates from the shadow to live plane control register
@@ -4953,9 +5213,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_set_memory_cxsr(dev_priv, false);
intel_crtc_disable_planes(crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- encoder->disable(encoder);
-
/*
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
@@ -4964,6 +5221,12 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
*/
intel_wait_for_vblank(dev, pipe);
+ drm_crtc_vblank_off(crtc);
+ assert_vblank_disabled(crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
intel_disable_pipe(intel_crtc);
i9xx_pfit_disable(intel_crtc);
@@ -4972,7 +5235,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
if (encoder->post_disable)
encoder->post_disable(encoder);
- if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) {
+ if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
if (IS_CHERRYVIEW(dev))
chv_disable_pll(dev_priv, pipe);
else if (IS_VALLEYVIEW(dev))
@@ -4982,7 +5245,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
}
if (!IS_GEN2(dev))
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_crtc->active = false;
intel_update_watermarks(crtc);
@@ -4996,36 +5259,6 @@ static void i9xx_crtc_off(struct drm_crtc *crtc)
{
}
-static void intel_crtc_update_sarea(struct drm_crtc *crtc,
- bool enabled)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_master_private *master_priv;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
-
- if (!dev->primary->master)
- return;
-
- master_priv = dev->primary->master->driver_priv;
- if (!master_priv->sarea_priv)
- return;
-
- switch (pipe) {
- case 0:
- master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
- master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
- break;
- case 1:
- master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
- master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
- break;
- default:
- DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
- break;
- }
-}
-
/* Master function to enable/disable CRTC and corresponding power wells */
void intel_crtc_control(struct drm_crtc *crtc, bool enable)
{
@@ -5069,8 +5302,6 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc)
enable |= intel_encoder->connectors_active;
intel_crtc_control(crtc, enable);
-
- intel_crtc_update_sarea(crtc, enable);
}
static void intel_crtc_disable(struct drm_crtc *crtc)
@@ -5085,7 +5316,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
WARN_ON(!crtc->enabled);
dev_priv->display.crtc_disable(crtc);
- intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
if (crtc->primary->fb) {
@@ -5324,11 +5554,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
/* FIXME should check pixel clock limits on all platforms */
if (INTEL_INFO(dev)->gen < 4) {
- struct drm_i915_private *dev_priv = dev->dev_private;
int clock_limit =
dev_priv->display.get_display_clock_speed(dev);
@@ -5355,7 +5585,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
* - LVDS dual channel mode
* - Double wide pipe
*/
- if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
pipe_config->pipe_src_w &= ~1;
@@ -5377,13 +5607,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
if (HAS_IPS(dev))
hsw_compute_ips_config(crtc, pipe_config);
- /*
- * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the
- * old clock survives for now.
- */
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev))
- pipe_config->shared_dpll = crtc->config.shared_dpll;
-
if (pipe_config->has_pch_encoder)
return ironlake_fdi_compute_config(crtc, pipe_config);
@@ -5393,7 +5616,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
static int valleyview_get_display_clock_speed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int vco = valleyview_get_vco(dev_priv);
u32 val;
int divider;
@@ -5401,6 +5623,9 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev)
if (IS_CHERRYVIEW(dev))
return 400000;
+ if (dev_priv->hpll_freq == 0)
+ dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
+
mutex_lock(&dev_priv->dpio_lock);
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
mutex_unlock(&dev_priv->dpio_lock);
@@ -5411,7 +5636,7 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev)
(divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
"cdclk change in progress\n");
- return DIV_ROUND_CLOSEST(vco << 1, divider + 1);
+ return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
}
static int i945_get_display_clock_speed(struct drm_device *dev)
@@ -5543,15 +5768,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
-static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
+static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int refclk;
if (IS_VALLEYVIEW(dev)) {
refclk = 100000;
- } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->vbt.lvds_ssc_freq;
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -5581,24 +5806,24 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
u32 fp, fp2 = 0;
if (IS_PINEVIEW(dev)) {
- fp = pnv_dpll_compute_fp(&crtc->config.dpll);
+ fp = pnv_dpll_compute_fp(&crtc->new_config->dpll);
if (reduced_clock)
fp2 = pnv_dpll_compute_fp(reduced_clock);
} else {
- fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
+ fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
if (reduced_clock)
fp2 = i9xx_dpll_compute_fp(reduced_clock);
}
- crtc->config.dpll_hw_state.fp0 = fp;
+ crtc->new_config->dpll_hw_state.fp0 = fp;
crtc->lowfreq_avail = false;
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
reduced_clock && i915.powersave) {
- crtc->config.dpll_hw_state.fp1 = fp2;
+ crtc->new_config->dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else {
- crtc->config.dpll_hw_state.fp1 = fp;
+ crtc->new_config->dpll_hw_state.fp1 = fp;
}
}
@@ -5687,7 +5912,8 @@ void intel_dp_set_m_n(struct intel_crtc *crtc)
&crtc->config.dp_m2_n2);
}
-static void vlv_update_pll(struct intel_crtc *crtc)
+static void vlv_update_pll(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
{
u32 dpll, dpll_md;
@@ -5702,14 +5928,15 @@ static void vlv_update_pll(struct intel_crtc *crtc)
if (crtc->pipe == PIPE_B)
dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
dpll |= DPLL_VCO_ENABLE;
- crtc->config.dpll_hw_state.dpll = dpll;
+ pipe_config->dpll_hw_state.dpll = dpll;
- dpll_md = (crtc->config.pixel_multiplier - 1)
+ dpll_md = (pipe_config->pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
- crtc->config.dpll_hw_state.dpll_md = dpll_md;
+ pipe_config->dpll_hw_state.dpll_md = dpll_md;
}
-static void vlv_prepare_pll(struct intel_crtc *crtc)
+static void vlv_prepare_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5720,11 +5947,11 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
mutex_lock(&dev_priv->dpio_lock);
- bestn = crtc->config.dpll.n;
- bestm1 = crtc->config.dpll.m1;
- bestm2 = crtc->config.dpll.m2;
- bestp1 = crtc->config.dpll.p1;
- bestp2 = crtc->config.dpll.p2;
+ bestn = pipe_config->dpll.n;
+ bestm1 = pipe_config->dpll.m1;
+ bestm2 = pipe_config->dpll.m2;
+ bestp1 = pipe_config->dpll.p1;
+ bestp2 = pipe_config->dpll.p2;
/* See eDP HDMI DPIO driver vbios notes doc */
@@ -5761,17 +5988,16 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
/* Set HBR and RBR LPF coefficients */
- if (crtc->config.port_clock == 162000 ||
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
+ if (pipe_config->port_clock == 162000 ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x009f0003);
else
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x00d0000f);
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
+ if (crtc->config.has_dp_encoder) {
/* Use SSC source */
if (pipe == PIPE_A)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
@@ -5791,8 +6017,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
coreclk |= 0x01000000;
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
@@ -5800,19 +6026,21 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
mutex_unlock(&dev_priv->dpio_lock);
}
-static void chv_update_pll(struct intel_crtc *crtc)
+static void chv_update_pll(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
{
- crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
+ pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
DPLL_VCO_ENABLE;
if (crtc->pipe != PIPE_A)
- crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+ pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
- crtc->config.dpll_hw_state.dpll_md =
- (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ pipe_config->dpll_hw_state.dpll_md =
+ (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
-static void chv_prepare_pll(struct intel_crtc *crtc)
+static void chv_prepare_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5823,18 +6051,18 @@ static void chv_prepare_pll(struct intel_crtc *crtc)
u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
int refclk;
- bestn = crtc->config.dpll.n;
- bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
- bestm1 = crtc->config.dpll.m1;
- bestm2 = crtc->config.dpll.m2 >> 22;
- bestp1 = crtc->config.dpll.p1;
- bestp2 = crtc->config.dpll.p2;
+ bestn = pipe_config->dpll.n;
+ bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
+ bestm1 = pipe_config->dpll.m1;
+ bestm2 = pipe_config->dpll.m2 >> 22;
+ bestp1 = pipe_config->dpll.p1;
+ bestp2 = pipe_config->dpll.p2;
/*
* Enable Refclk and SSC
*/
I915_WRITE(dpll_reg,
- crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+ pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
mutex_lock(&dev_priv->dpio_lock);
@@ -5862,7 +6090,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc)
(2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
/* Loop filter */
- refclk = i9xx_get_refclk(&crtc->base, 0);
+ refclk = i9xx_get_refclk(crtc, 0);
loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
2 << DPIO_CHV_GAIN_CTRL_SHIFT;
if (refclk == 100000)
@@ -5882,6 +6110,53 @@ static void chv_prepare_pll(struct intel_crtc *crtc)
mutex_unlock(&dev_priv->dpio_lock);
}
+/**
+ * vlv_force_pll_on - forcibly enable just the PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ * @dpll: PLL configuration
+ *
+ * Enable the PLL for @pipe using the supplied @dpll config. To be used
+ * in cases where we need the PLL enabled even when @pipe is not going to
+ * be enabled.
+ */
+void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+ const struct dpll *dpll)
+{
+ struct intel_crtc *crtc =
+ to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+ struct intel_crtc_config pipe_config = {
+ .pixel_multiplier = 1,
+ .dpll = *dpll,
+ };
+
+ if (IS_CHERRYVIEW(dev)) {
+ chv_update_pll(crtc, &pipe_config);
+ chv_prepare_pll(crtc, &pipe_config);
+ chv_enable_pll(crtc, &pipe_config);
+ } else {
+ vlv_update_pll(crtc, &pipe_config);
+ vlv_prepare_pll(crtc, &pipe_config);
+ vlv_enable_pll(crtc, &pipe_config);
+ }
+}
+
+/**
+ * vlv_force_pll_off - forcibly disable just the PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to disable
+ *
+ * Disable the PLL for @pipe. To be used in cases where we need
+ * the PLL enabled even when @pipe is not going to be enabled.
+ */
+void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
+{
+ if (IS_CHERRYVIEW(dev))
+ chv_disable_pll(to_i915(dev), pipe);
+ else
+ vlv_disable_pll(to_i915(dev), pipe);
+}
+
static void i9xx_update_pll(struct intel_crtc *crtc,
intel_clock_t *reduced_clock,
int num_connectors)
@@ -5890,29 +6165,29 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll;
bool is_sdvo;
- struct dpll *clock = &crtc->config.dpll;
+ struct dpll *clock = &crtc->new_config->dpll;
i9xx_update_pll_dividers(crtc, reduced_clock);
- is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
+ is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI);
dpll = DPLL_VGA_MODE_DIS;
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
- dpll |= (crtc->config.pixel_multiplier - 1)
+ dpll |= (crtc->new_config->pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
if (is_sdvo)
dpll |= DPLL_SDVO_HIGH_SPEED;
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
+ if (crtc->new_config->has_dp_encoder)
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
@@ -5940,21 +6215,21 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
if (INTEL_INFO(dev)->gen >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
- if (crtc->config.sdvo_tv_clock)
+ if (crtc->new_config->sdvo_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
- else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
dpll |= DPLL_VCO_ENABLE;
- crtc->config.dpll_hw_state.dpll = dpll;
+ crtc->new_config->dpll_hw_state.dpll = dpll;
if (INTEL_INFO(dev)->gen >= 4) {
- u32 dpll_md = (crtc->config.pixel_multiplier - 1)
+ u32 dpll_md = (crtc->new_config->pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
- crtc->config.dpll_hw_state.dpll_md = dpll_md;
+ crtc->new_config->dpll_hw_state.dpll_md = dpll_md;
}
}
@@ -5965,13 +6240,13 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll;
- struct dpll *clock = &crtc->config.dpll;
+ struct dpll *clock = &crtc->new_config->dpll;
i9xx_update_pll_dividers(crtc, reduced_clock);
dpll = DPLL_VGA_MODE_DIS;
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
} else {
if (clock->p1 == 2)
@@ -5982,17 +6257,17 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
- if (!IS_I830(dev) && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
+ if (!IS_I830(dev) && intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
dpll |= DPLL_VCO_ENABLE;
- crtc->config.dpll_hw_state.dpll = dpll;
+ crtc->new_config->dpll_hw_state.dpll = dpll;
}
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
@@ -6016,7 +6291,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
crtc_vtotal -= 1;
crtc_vblank_end -= 1;
- if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
else
vsyncshift = adjusted_mode->crtc_hsync_start -
@@ -6174,7 +6449,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (INTEL_INFO(dev)->gen < 4 ||
- intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
+ intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
else
pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
@@ -6188,13 +6463,10 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
POSTING_READ(PIPECONF(intel_crtc->pipe));
}
-static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
- int x, int y,
- struct drm_framebuffer *fb)
+static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
bool ok, has_reduced_clock = false;
@@ -6202,7 +6474,10 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct intel_encoder *encoder;
const intel_limit_t *limit;
- for_each_encoder_on_crtc(dev, crtc, encoder) {
+ for_each_intel_encoder(dev, encoder) {
+ if (encoder->new_crtc != crtc)
+ continue;
+
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -6210,6 +6485,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
case INTEL_OUTPUT_DSI:
is_dsi = true;
break;
+ default:
+ break;
}
num_connectors++;
@@ -6218,7 +6495,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
if (is_dsi)
return 0;
- if (!intel_crtc->config.clock_set) {
+ if (!crtc->new_config->clock_set) {
refclk = i9xx_get_refclk(crtc, num_connectors);
/*
@@ -6229,7 +6506,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
*/
limit = intel_limit(crtc, refclk);
ok = dev_priv->display.find_dpll(limit, crtc,
- intel_crtc->config.port_clock,
+ crtc->new_config->port_clock,
refclk, NULL, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
@@ -6250,23 +6527,23 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
&reduced_clock);
}
/* Compat-code for transition, will disappear. */
- intel_crtc->config.dpll.n = clock.n;
- intel_crtc->config.dpll.m1 = clock.m1;
- intel_crtc->config.dpll.m2 = clock.m2;
- intel_crtc->config.dpll.p1 = clock.p1;
- intel_crtc->config.dpll.p2 = clock.p2;
+ crtc->new_config->dpll.n = clock.n;
+ crtc->new_config->dpll.m1 = clock.m1;
+ crtc->new_config->dpll.m2 = clock.m2;
+ crtc->new_config->dpll.p1 = clock.p1;
+ crtc->new_config->dpll.p2 = clock.p2;
}
if (IS_GEN2(dev)) {
- i8xx_update_pll(intel_crtc,
+ i8xx_update_pll(crtc,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
} else if (IS_CHERRYVIEW(dev)) {
- chv_update_pll(intel_crtc);
+ chv_update_pll(crtc, crtc->new_config);
} else if (IS_VALLEYVIEW(dev)) {
- vlv_update_pll(intel_crtc);
+ vlv_update_pll(crtc, crtc->new_config);
} else {
- i9xx_update_pll(intel_crtc,
+ i9xx_update_pll(crtc,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
}
@@ -6432,8 +6709,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
- if (!intel_display_power_enabled(dev_priv,
- POWER_DOMAIN_PIPE(crtc->pipe)))
+ if (!intel_display_power_is_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -6538,6 +6815,8 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
if (enc_to_dig_port(&encoder->base)->port == PORT_A)
has_cpu_edp = true;
break;
+ default:
+ break;
}
}
@@ -6842,6 +7121,8 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
case INTEL_OUTPUT_ANALOG:
has_vga = true;
break;
+ default:
+ break;
}
}
@@ -6870,11 +7151,16 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
int num_connectors = 0;
bool is_lvds = false;
- for_each_encoder_on_crtc(dev, crtc, encoder) {
+ for_each_intel_encoder(dev, encoder) {
+ if (encoder->new_crtc != to_intel_crtc(crtc))
+ continue;
+
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
+ default:
+ break;
}
num_connectors++;
}
@@ -7019,7 +7305,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
val = 0;
switch (intel_crtc->config.pipe_bpp) {
@@ -7054,18 +7340,12 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int refclk;
const intel_limit_t *limit;
bool ret, is_lvds = false;
- for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
- switch (intel_encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
- }
- }
+ is_lvds = intel_pipe_will_have_type(intel_crtc, INTEL_OUTPUT_LVDS);
refclk = ironlake_get_refclk(crtc);
@@ -7074,9 +7354,9 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
- limit = intel_limit(crtc, refclk);
- ret = dev_priv->display.find_dpll(limit, crtc,
- to_intel_crtc(crtc)->config.port_clock,
+ limit = intel_limit(intel_crtc, refclk);
+ ret = dev_priv->display.find_dpll(limit, intel_crtc,
+ intel_crtc->new_config->port_clock,
refclk, NULL, clock);
if (!ret)
return false;
@@ -7089,7 +7369,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
* downclock feature.
*/
*has_reduced_clock =
- dev_priv->display.find_dpll(limit, crtc,
+ dev_priv->display.find_dpll(limit, intel_crtc,
dev_priv->lvds_downclock,
refclk, clock,
reduced_clock);
@@ -7126,7 +7406,10 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
int factor, num_connectors = 0;
bool is_lvds = false, is_sdvo = false;
- for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ for_each_intel_encoder(dev, intel_encoder) {
+ if (intel_encoder->new_crtc != to_intel_crtc(crtc))
+ continue;
+
switch (intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -7135,6 +7418,8 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
break;
+ default:
+ break;
}
num_connectors++;
@@ -7147,10 +7432,10 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
dev_priv->vbt.lvds_ssc_freq == 100000) ||
(HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
factor = 25;
- } else if (intel_crtc->config.sdvo_tv_clock)
+ } else if (intel_crtc->new_config->sdvo_tv_clock)
factor = 20;
- if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
+ if (ironlake_needs_fb_cb_tune(&intel_crtc->new_config->dpll, factor))
*fp |= FP_CB_TUNE;
if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
@@ -7163,20 +7448,20 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
- dpll |= (intel_crtc->config.pixel_multiplier - 1)
+ dpll |= (intel_crtc->new_config->pixel_multiplier - 1)
<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
if (is_sdvo)
dpll |= DPLL_SDVO_HIGH_SPEED;
- if (intel_crtc->config.has_dp_encoder)
+ if (intel_crtc->new_config->has_dp_encoder)
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
- dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- switch (intel_crtc->config.dpll.p2) {
+ switch (intel_crtc->new_config->dpll.p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
@@ -7199,78 +7484,64 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
return dpll | DPLL_VCO_ENABLE;
}
-static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
- int x, int y,
- struct drm_framebuffer *fb)
+static int ironlake_crtc_compute_clock(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int num_connectors = 0;
+ struct drm_device *dev = crtc->base.dev;
intel_clock_t clock, reduced_clock;
u32 dpll = 0, fp = 0, fp2 = 0;
bool ok, has_reduced_clock = false;
bool is_lvds = false;
- struct intel_encoder *encoder;
struct intel_shared_dpll *pll;
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
- }
-
- num_connectors++;
- }
+ is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
"Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
- ok = ironlake_compute_clocks(crtc, &clock,
+ ok = ironlake_compute_clocks(&crtc->base, &clock,
&has_reduced_clock, &reduced_clock);
- if (!ok && !intel_crtc->config.clock_set) {
+ if (!ok && !crtc->new_config->clock_set) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
/* Compat-code for transition, will disappear. */
- if (!intel_crtc->config.clock_set) {
- intel_crtc->config.dpll.n = clock.n;
- intel_crtc->config.dpll.m1 = clock.m1;
- intel_crtc->config.dpll.m2 = clock.m2;
- intel_crtc->config.dpll.p1 = clock.p1;
- intel_crtc->config.dpll.p2 = clock.p2;
+ if (!crtc->new_config->clock_set) {
+ crtc->new_config->dpll.n = clock.n;
+ crtc->new_config->dpll.m1 = clock.m1;
+ crtc->new_config->dpll.m2 = clock.m2;
+ crtc->new_config->dpll.p1 = clock.p1;
+ crtc->new_config->dpll.p2 = clock.p2;
}
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
- if (intel_crtc->config.has_pch_encoder) {
- fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
+ if (crtc->new_config->has_pch_encoder) {
+ fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
if (has_reduced_clock)
fp2 = i9xx_dpll_compute_fp(&reduced_clock);
- dpll = ironlake_compute_dpll(intel_crtc,
+ dpll = ironlake_compute_dpll(crtc,
&fp, &reduced_clock,
has_reduced_clock ? &fp2 : NULL);
- intel_crtc->config.dpll_hw_state.dpll = dpll;
- intel_crtc->config.dpll_hw_state.fp0 = fp;
+ crtc->new_config->dpll_hw_state.dpll = dpll;
+ crtc->new_config->dpll_hw_state.fp0 = fp;
if (has_reduced_clock)
- intel_crtc->config.dpll_hw_state.fp1 = fp2;
+ crtc->new_config->dpll_hw_state.fp1 = fp2;
else
- intel_crtc->config.dpll_hw_state.fp1 = fp;
+ crtc->new_config->dpll_hw_state.fp1 = fp;
- pll = intel_get_shared_dpll(intel_crtc);
+ pll = intel_get_shared_dpll(crtc);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
- pipe_name(intel_crtc->pipe));
+ pipe_name(crtc->pipe));
return -EINVAL;
}
- } else
- intel_put_shared_dpll(intel_crtc);
+ }
if (is_lvds && has_reduced_clock && i915.powersave)
- intel_crtc->lowfreq_avail = true;
+ crtc->lowfreq_avail = true;
else
- intel_crtc->lowfreq_avail = false;
+ crtc->lowfreq_avail = false;
return 0;
}
@@ -7351,6 +7622,22 @@ static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
&pipe_config->fdi_m_n, NULL);
}
+static void skylake_get_pfit_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = I915_READ(PS_CTL(crtc->pipe));
+
+ if (tmp & PS_ENABLE) {
+ pipe_config->pch_pfit.enabled = true;
+ pipe_config->pch_pfit.pos = I915_READ(PS_WIN_POS(crtc->pipe));
+ pipe_config->pch_pfit.size = I915_READ(PS_WIN_SZ(crtc->pipe));
+ }
+}
+
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
@@ -7442,8 +7729,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
- if (!intel_display_power_enabled(dev_priv,
- POWER_DOMAIN_PIPE(crtc->pipe)))
+ if (!intel_display_power_is_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -7636,7 +7923,6 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
{
uint32_t val;
- unsigned long irqflags;
val = I915_READ(LCPLL_CTL);
@@ -7656,10 +7942,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
* to call special forcewake code that doesn't touch runtime PM and
* doesn't enable the forcewake delayed work.
*/
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ spin_lock_irq(&dev_priv->uncore.lock);
if (dev_priv->uncore.forcewake_count++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_unlock_irq(&dev_priv->uncore.lock);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -7690,10 +7976,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
}
/* See the big comment above. */
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ spin_lock_irq(&dev_priv->uncore.lock);
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_unlock_irq(&dev_priv->uncore.lock);
}
/*
@@ -7755,28 +8041,36 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
intel_prepare_ddi(dev);
}
-static void snb_modeset_global_resources(struct drm_device *dev)
+static int haswell_crtc_compute_clock(struct intel_crtc *crtc)
{
- modeset_update_crtc_power_domains(dev);
-}
+ if (!intel_ddi_pll_select(crtc))
+ return -EINVAL;
-static void haswell_modeset_global_resources(struct drm_device *dev)
-{
- modeset_update_crtc_power_domains(dev);
+ crtc->lowfreq_avail = false;
+
+ return 0;
}
-static int haswell_crtc_mode_set(struct drm_crtc *crtc,
- int x, int y,
- struct drm_framebuffer *fb)
+static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
+ enum port port,
+ struct intel_crtc_config *pipe_config)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- if (!intel_ddi_pll_select(intel_crtc))
- return -EINVAL;
+ u32 temp;
- intel_crtc->lowfreq_avail = false;
+ temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
+ pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
- return 0;
+ switch (pipe_config->ddi_pll_sel) {
+ case SKL_DPLL1:
+ pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
+ break;
+ case SKL_DPLL2:
+ pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
+ break;
+ case SKL_DPLL3:
+ pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
+ break;
+ }
}
static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
@@ -7808,7 +8102,10 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
- haswell_get_ddi_pll(dev_priv, port, pipe_config);
+ if (IS_SKYLAKE(dev))
+ skylake_get_ddi_pll(dev_priv, port, pipe_config);
+ else
+ haswell_get_ddi_pll(dev_priv, port, pipe_config);
if (pipe_config->shared_dpll >= 0) {
pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
@@ -7822,7 +8119,8 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
* DDI E. So just check whether this pipe is wired to DDI E and whether
* the PCH transcoder is on.
*/
- if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
+ if (INTEL_INFO(dev)->gen < 9 &&
+ (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
pipe_config->has_pch_encoder = true;
tmp = I915_READ(FDI_RX_CTL(PIPE_A));
@@ -7841,7 +8139,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
enum intel_display_power_domain pfit_domain;
uint32_t tmp;
- if (!intel_display_power_enabled(dev_priv,
+ if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
@@ -7870,7 +8168,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->cpu_transcoder = TRANSCODER_EDP;
}
- if (!intel_display_power_enabled(dev_priv,
+ if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
@@ -7883,8 +8181,12 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_timings(crtc, pipe_config);
pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
- if (intel_display_power_enabled(dev_priv, pfit_domain))
- ironlake_get_pfit_config(crtc, pipe_config);
+ if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
+ if (IS_SKYLAKE(dev))
+ skylake_get_pfit_config(crtc, pipe_config);
+ else
+ ironlake_get_pfit_config(crtc, pipe_config);
+ }
if (IS_HASWELL(dev))
pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
@@ -7900,314 +8202,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
return true;
}
-static struct {
- int clock;
- u32 config;
-} hdmi_audio_clock[] = {
- { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
- { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
- { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
- { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
- { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
- { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
- { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
- { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
- { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
- { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
-};
-
-/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
-static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
- if (mode->clock == hdmi_audio_clock[i].clock)
- break;
- }
-
- if (i == ARRAY_SIZE(hdmi_audio_clock)) {
- DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
- i = 1;
- }
-
- DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
- hdmi_audio_clock[i].clock,
- hdmi_audio_clock[i].config);
-
- return hdmi_audio_clock[i].config;
-}
-
-static bool intel_eld_uptodate(struct drm_connector *connector,
- int reg_eldv, uint32_t bits_eldv,
- int reg_elda, uint32_t bits_elda,
- int reg_edid)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- uint8_t *eld = connector->eld;
- uint32_t i;
-
- i = I915_READ(reg_eldv);
- i &= bits_eldv;
-
- if (!eld[0])
- return !i;
-
- if (!i)
- return false;
-
- i = I915_READ(reg_elda);
- i &= ~bits_elda;
- I915_WRITE(reg_elda, i);
-
- for (i = 0; i < eld[2]; i++)
- if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
- return false;
-
- return true;
-}
-
-static void g4x_write_eld(struct drm_connector *connector,
- struct drm_crtc *crtc,
- struct drm_display_mode *mode)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- uint8_t *eld = connector->eld;
- uint32_t eldv;
- uint32_t len;
- uint32_t i;
-
- i = I915_READ(G4X_AUD_VID_DID);
-
- if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
- eldv = G4X_ELDV_DEVCL_DEVBLC;
- else
- eldv = G4X_ELDV_DEVCTG;
-
- if (intel_eld_uptodate(connector,
- G4X_AUD_CNTL_ST, eldv,
- G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
- G4X_HDMIW_HDMIEDID))
- return;
-
- i = I915_READ(G4X_AUD_CNTL_ST);
- i &= ~(eldv | G4X_ELD_ADDR);
- len = (i >> 9) & 0x1f; /* ELD buffer size */
- I915_WRITE(G4X_AUD_CNTL_ST, i);
-
- if (!eld[0])
- return;
-
- len = min_t(uint8_t, eld[2], len);
- DRM_DEBUG_DRIVER("ELD size %d\n", len);
- for (i = 0; i < len; i++)
- I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
-
- i = I915_READ(G4X_AUD_CNTL_ST);
- i |= eldv;
- I915_WRITE(G4X_AUD_CNTL_ST, i);
-}
-
-static void haswell_write_eld(struct drm_connector *connector,
- struct drm_crtc *crtc,
- struct drm_display_mode *mode)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- uint8_t *eld = connector->eld;
- uint32_t eldv;
- uint32_t i;
- int len;
- int pipe = to_intel_crtc(crtc)->pipe;
- int tmp;
-
- int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
- int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
- int aud_config = HSW_AUD_CFG(pipe);
- int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
-
- /* Audio output enable */
- DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
- tmp = I915_READ(aud_cntrl_st2);
- tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
- I915_WRITE(aud_cntrl_st2, tmp);
- POSTING_READ(aud_cntrl_st2);
-
- assert_pipe_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
-
- /* Set ELD valid state */
- tmp = I915_READ(aud_cntrl_st2);
- DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
- tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
- I915_WRITE(aud_cntrl_st2, tmp);
- tmp = I915_READ(aud_cntrl_st2);
- DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
-
- /* Enable HDMI mode */
- tmp = I915_READ(aud_config);
- DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
- /* clear N_programing_enable and N_value_index */
- tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
- I915_WRITE(aud_config, tmp);
-
- DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
-
- eldv = AUDIO_ELD_VALID_A << (pipe * 4);
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
- DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
- eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
- I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
- } else {
- I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
- }
-
- if (intel_eld_uptodate(connector,
- aud_cntrl_st2, eldv,
- aud_cntl_st, IBX_ELD_ADDRESS,
- hdmiw_hdmiedid))
- return;
-
- i = I915_READ(aud_cntrl_st2);
- i &= ~eldv;
- I915_WRITE(aud_cntrl_st2, i);
-
- if (!eld[0])
- return;
-
- i = I915_READ(aud_cntl_st);
- i &= ~IBX_ELD_ADDRESS;
- I915_WRITE(aud_cntl_st, i);
- i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
- DRM_DEBUG_DRIVER("port num:%d\n", i);
-
- len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
- DRM_DEBUG_DRIVER("ELD size %d\n", len);
- for (i = 0; i < len; i++)
- I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
-
- i = I915_READ(aud_cntrl_st2);
- i |= eldv;
- I915_WRITE(aud_cntrl_st2, i);
-
-}
-
-static void ironlake_write_eld(struct drm_connector *connector,
- struct drm_crtc *crtc,
- struct drm_display_mode *mode)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- uint8_t *eld = connector->eld;
- uint32_t eldv;
- uint32_t i;
- int len;
- int hdmiw_hdmiedid;
- int aud_config;
- int aud_cntl_st;
- int aud_cntrl_st2;
- int pipe = to_intel_crtc(crtc)->pipe;
-
- if (HAS_PCH_IBX(connector->dev)) {
- hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
- aud_config = IBX_AUD_CFG(pipe);
- aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
- aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
- } else if (IS_VALLEYVIEW(connector->dev)) {
- hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
- aud_config = VLV_AUD_CFG(pipe);
- aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
- aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
- } else {
- hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
- aud_config = CPT_AUD_CFG(pipe);
- aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
- aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
- }
-
- DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
-
- if (IS_VALLEYVIEW(connector->dev)) {
- struct intel_encoder *intel_encoder;
- struct intel_digital_port *intel_dig_port;
-
- intel_encoder = intel_attached_encoder(connector);
- intel_dig_port = enc_to_dig_port(&intel_encoder->base);
- i = intel_dig_port->port;
- } else {
- i = I915_READ(aud_cntl_st);
- i = (i >> 29) & DIP_PORT_SEL_MASK;
- /* DIP_Port_Select, 0x1 = PortB */
- }
-
- if (!i) {
- DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
- /* operate blindly on all ports */
- eldv = IBX_ELD_VALIDB;
- eldv |= IBX_ELD_VALIDB << 4;
- eldv |= IBX_ELD_VALIDB << 8;
- } else {
- DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
- eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
- }
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
- DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
- eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
- I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
- } else {
- I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
- }
-
- if (intel_eld_uptodate(connector,
- aud_cntrl_st2, eldv,
- aud_cntl_st, IBX_ELD_ADDRESS,
- hdmiw_hdmiedid))
- return;
-
- i = I915_READ(aud_cntrl_st2);
- i &= ~eldv;
- I915_WRITE(aud_cntrl_st2, i);
-
- if (!eld[0])
- return;
-
- i = I915_READ(aud_cntl_st);
- i &= ~IBX_ELD_ADDRESS;
- I915_WRITE(aud_cntl_st, i);
-
- len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
- DRM_DEBUG_DRIVER("ELD size %d\n", len);
- for (i = 0; i < len; i++)
- I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
-
- i = I915_READ(aud_cntrl_st2);
- i |= eldv;
- I915_WRITE(aud_cntrl_st2, i);
-}
-
-void intel_write_eld(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct drm_crtc *crtc = encoder->crtc;
- struct drm_connector *connector;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- connector = drm_select_eld(encoder, mode);
- if (!connector)
- return;
-
- DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
- connector->base.id,
- connector->name,
- connector->encoder->base.id,
- connector->encoder->name);
-
- connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
-
- if (dev_priv->display.write_eld)
- dev_priv->display.write_eld(connector, crtc, mode);
-}
-
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_device *dev = crtc->dev;
@@ -8253,8 +8247,10 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
intel_crtc->cursor_cntl = 0;
}
- if (intel_crtc->cursor_base != base)
+ if (intel_crtc->cursor_base != base) {
I915_WRITE(_CURABASE, base);
+ intel_crtc->cursor_base = base;
+ }
if (intel_crtc->cursor_size != size) {
I915_WRITE(CURSIZE, size);
@@ -8294,9 +8290,13 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
return;
}
cntl |= pipe << 28; /* Connect to correct pipe */
+
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ cntl |= CURSOR_PIPE_CSC_ENABLE;
}
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- cntl |= CURSOR_PIPE_CSC_ENABLE;
+
+ if (to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180))
+ cntl |= CURSOR_ROTATE_180;
if (intel_crtc->cursor_cntl != cntl) {
I915_WRITE(CURCNTR(pipe), cntl);
@@ -8307,6 +8307,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
/* and commit changes on next vblank */
I915_WRITE(CURBASE(pipe), base);
POSTING_READ(CURBASE(pipe));
+
+ intel_crtc->cursor_base = base;
}
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@ -8353,11 +8355,17 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
I915_WRITE(CURPOS(pipe), pos);
+ /* ILK+ do this automagically */
+ if (HAS_GMCH_DISPLAY(dev) &&
+ to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180)) {
+ base += (intel_crtc->cursor_height *
+ intel_crtc->cursor_width - 1) * 4;
+ }
+
if (IS_845G(dev) || IS_I865G(dev))
i845_update_cursor(crtc, base);
else
i9xx_update_cursor(crtc, base);
- intel_crtc->cursor_base = base;
}
static bool cursor_size_ok(struct drm_device *dev,
@@ -8397,22 +8405,15 @@ static bool cursor_size_ok(struct drm_device *dev,
return true;
}
-/*
- * intel_crtc_cursor_set_obj - Set cursor to specified GEM object
- *
- * Note that the object's reference will be consumed if the update fails. If
- * the update succeeds, the reference of the old object (if any) will be
- * consumed.
- */
static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
struct drm_i915_gem_object *obj,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
- unsigned old_width, stride;
+ unsigned old_width;
uint32_t addr;
int ret;
@@ -8424,30 +8425,11 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
goto finish;
}
- /* Check for which cursor types we support */
- if (!cursor_size_ok(dev, width, height)) {
- DRM_DEBUG("Cursor dimension not supported\n");
- return -EINVAL;
- }
-
- stride = roundup_pow_of_two(width) * 4;
- if (obj->base.size < stride * height) {
- DRM_DEBUG_KMS("buffer is too small\n");
- ret = -ENOMEM;
- goto fail;
- }
-
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
if (!INTEL_INFO(dev)->cursor_needs_physical) {
unsigned alignment;
- if (obj->tiling_mode) {
- DRM_DEBUG_KMS("cursor cannot be tiled\n");
- ret = -EINVAL;
- goto fail_locked;
- }
-
/*
* Global gtt pte registers are special registers which actually
* forward writes to a chunk of system memory. Which means that
@@ -8514,17 +8496,15 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
if (old_width != width)
intel_update_watermarks(crtc);
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
- }
- intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
+ }
return 0;
fail_unpin:
i915_gem_object_unpin_from_display_plane(obj);
fail_locked:
mutex_unlock(&dev->struct_mutex);
-fail:
- drm_gem_object_unreference_unlocked(&obj->base);
return ret;
}
@@ -8559,7 +8539,7 @@ __intel_framebuffer_create(struct drm_device *dev,
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
- drm_gem_object_unreference_unlocked(&obj->base);
+ drm_gem_object_unreference(&obj->base);
return ERR_PTR(-ENOMEM);
}
@@ -8569,7 +8549,7 @@ __intel_framebuffer_create(struct drm_device *dev,
return &intel_fb->base;
err:
- drm_gem_object_unreference_unlocked(&obj->base);
+ drm_gem_object_unreference(&obj->base);
kfree(intel_fb);
return ERR_PTR(ret);
@@ -8702,6 +8682,9 @@ retry:
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
goto fail_unlock;
+ ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
+ if (ret)
+ goto fail_unlock;
old->dpms_mode = connector->dpms;
old->load_detect_temp = false;
@@ -8739,6 +8722,9 @@ retry:
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
goto fail_unlock;
+ ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
+ if (ret)
+ goto fail_unlock;
intel_encoder->new_crtc = to_intel_crtc(crtc);
to_intel_connector(connector)->new_encoder = intel_encoder;
@@ -9021,35 +9007,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-static void intel_increase_pllclock(struct drm_device *dev,
- enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int dpll_reg = DPLL(pipe);
- int dpll;
-
- if (!HAS_GMCH_DISPLAY(dev))
- return;
-
- if (!dev_priv->lvds_downclock_avail)
- return;
-
- dpll = I915_READ(dpll_reg);
- if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
- DRM_DEBUG_DRIVER("upclocking LVDS\n");
-
- assert_panel_unlocked(dev_priv, pipe);
-
- dpll &= ~DISPLAY_RATE_SELECT_FPA1;
- I915_WRITE(dpll_reg, dpll);
- intel_wait_for_vblank(dev, pipe);
-
- dpll = I915_READ(dpll_reg);
- if (dpll & DISPLAY_RATE_SELECT_FPA1)
- DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
- }
-}
-
static void intel_decrease_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -9125,199 +9082,16 @@ out:
intel_runtime_pm_put(dev_priv);
}
-
-/**
- * intel_mark_fb_busy - mark given planes as busy
- * @dev: DRM device
- * @frontbuffer_bits: bits for the affected planes
- * @ring: optional ring for asynchronous commands
- *
- * This function gets called every time the screen contents change. It can be
- * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
- */
-static void intel_mark_fb_busy(struct drm_device *dev,
- unsigned frontbuffer_bits,
- struct intel_engine_cs *ring)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe pipe;
-
- if (!i915.powersave)
- return;
-
- for_each_pipe(dev_priv, pipe) {
- if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
- continue;
-
- intel_increase_pllclock(dev, pipe);
- if (ring && intel_fbc_enabled(dev))
- ring->fbc_dirty = true;
- }
-}
-
-/**
- * intel_fb_obj_invalidate - invalidate frontbuffer object
- * @obj: GEM object to invalidate
- * @ring: set for asynchronous rendering
- *
- * This function gets called every time rendering on the given object starts and
- * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
- * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
- * until the rendering completes or a flip on this frontbuffer plane is
- * scheduled.
- */
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (!obj->frontbuffer_bits)
- return;
-
- if (ring) {
- mutex_lock(&dev_priv->fb_tracking.lock);
- dev_priv->fb_tracking.busy_bits
- |= obj->frontbuffer_bits;
- dev_priv->fb_tracking.flip_bits
- &= ~obj->frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
- }
-
- intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
-
- intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
-}
-
-/**
- * intel_frontbuffer_flush - flush frontbuffer
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called every time rendering on the given planes has
- * completed and frontbuffer caching can be started again. Flushes will get
- * delayed if they're blocked by some oustanding asynchronous rendering.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flush(struct drm_device *dev,
- unsigned frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* Delay flushing when rings are still busy.*/
- mutex_lock(&dev_priv->fb_tracking.lock);
- frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
-
- intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
-
- intel_edp_psr_flush(dev, frontbuffer_bits);
-
- /*
- * FIXME: Unconditional fbc flushing here is a rather gross hack and
- * needs to be reworked into a proper frontbuffer tracking scheme like
- * psr employs.
- */
- if (IS_BROADWELL(dev))
- gen8_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
-}
-
-/**
- * intel_fb_obj_flush - flush frontbuffer object
- * @obj: GEM object to flush
- * @retire: set when retiring asynchronous rendering
- *
- * This function gets called every time rendering on the given object has
- * completed and frontbuffer caching can be started again. If @retire is true
- * then any delayed flushes will be unblocked.
- */
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- bool retire)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned frontbuffer_bits;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (!obj->frontbuffer_bits)
- return;
-
- frontbuffer_bits = obj->frontbuffer_bits;
-
- if (retire) {
- mutex_lock(&dev_priv->fb_tracking.lock);
- /* Filter out new bits since rendering started. */
- frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
-
- dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
- }
-
- intel_frontbuffer_flush(dev, frontbuffer_bits);
-}
-
-/**
- * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after scheduling a flip on @obj. The actual
- * frontbuffer flushing will be delayed until completion is signalled with
- * intel_frontbuffer_flip_complete. If an invalidate happens in between this
- * flush will be cancelled.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flip_prepare(struct drm_device *dev,
- unsigned frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- mutex_lock(&dev_priv->fb_tracking.lock);
- dev_priv->fb_tracking.flip_bits
- |= frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
-}
-
-/**
- * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after the flip has been latched and will complete
- * on the next vblank. It will execute the fush if it hasn't been cancalled yet.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flip_complete(struct drm_device *dev,
- unsigned frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- mutex_lock(&dev_priv->fb_tracking.lock);
- /* Mask any cancelled flips. */
- frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
- dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
-
- intel_frontbuffer_flush(dev, frontbuffer_bits);
-}
-
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct intel_unpin_work *work;
- unsigned long flags;
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
work = intel_crtc->unpin_work;
intel_crtc->unpin_work = NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
if (work) {
cancel_work_sync(&work->work);
@@ -9363,6 +9137,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
if (intel_crtc == NULL)
return;
+ /*
+ * This is called both by irq handlers and the reset code (to complete
+ * lost pageflips) so needs the full irqsave spinlocks.
+ */
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
@@ -9448,7 +9226,12 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
unsigned long flags;
- /* NB: An MMIO update of the plane base pointer will also
+
+ /*
+ * This is called both by irq handlers and the reset code (to complete
+ * lost pageflips) so needs the full irqsave spinlocks.
+ *
+ * NB: An MMIO update of the plane base pointer will also
* generate a page-flip completion irq, i.e. every modeset
* is also accompanied by a spurious intel_prepare_page_flip().
*/
@@ -9738,115 +9521,128 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(intel_crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
+ bool atomic_update;
+ u32 start_vbl_count;
u32 dspcntr;
u32 reg;
intel_mark_page_flip_active(intel_crtc);
+ atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+
reg = DSPCNTR(intel_crtc->plane);
dspcntr = I915_READ(reg);
- if (INTEL_INFO(dev)->gen >= 4) {
- if (obj->tiling_mode != I915_TILING_NONE)
- dspcntr |= DISPPLANE_TILED;
- else
- dspcntr &= ~DISPPLANE_TILED;
- }
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
+
I915_WRITE(reg, dspcntr);
I915_WRITE(DSPSURF(intel_crtc->plane),
intel_crtc->unpin_work->gtt_offset);
POSTING_READ(DSPSURF(intel_crtc->plane));
+
+ if (atomic_update)
+ intel_pipe_update_end(intel_crtc, start_vbl_count);
}
-static int intel_postpone_flip(struct drm_i915_gem_object *obj)
+static void intel_mmio_flip_work_func(struct work_struct *work)
{
+ struct intel_crtc *intel_crtc =
+ container_of(work, struct intel_crtc, mmio_flip.work);
struct intel_engine_cs *ring;
- int ret;
+ uint32_t seqno;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ seqno = intel_crtc->mmio_flip.seqno;
+ ring = intel_crtc->mmio_flip.ring;
- if (!obj->last_write_seqno)
- return 0;
+ if (seqno)
+ WARN_ON(__i915_wait_seqno(ring, seqno,
+ intel_crtc->reset_counter,
+ false, NULL, NULL) != 0);
- ring = obj->ring;
-
- if (i915_seqno_passed(ring->get_seqno(ring, true),
- obj->last_write_seqno))
- return 0;
-
- ret = i915_gem_check_olr(ring, obj->last_write_seqno);
- if (ret)
- return ret;
-
- if (WARN_ON(!ring->irq_get(ring)))
- return 0;
-
- return 1;
+ intel_do_mmio_flip(intel_crtc);
}
-void intel_notify_mmio_flip(struct intel_engine_cs *ring)
+static int intel_queue_mmio_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
+ uint32_t flags)
{
- struct drm_i915_private *dev_priv = to_i915(ring->dev);
- struct intel_crtc *intel_crtc;
- unsigned long irq_flags;
- u32 seqno;
-
- seqno = ring->get_seqno(ring, false);
-
- spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
- for_each_intel_crtc(ring->dev, intel_crtc) {
- struct intel_mmio_flip *mmio_flip;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- mmio_flip = &intel_crtc->mmio_flip;
- if (mmio_flip->seqno == 0)
- continue;
+ intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
+ intel_crtc->mmio_flip.ring = obj->ring;
- if (ring->id != mmio_flip->ring_id)
- continue;
+ schedule_work(&intel_crtc->mmio_flip.work);
- if (i915_seqno_passed(seqno, mmio_flip->seqno)) {
- intel_do_mmio_flip(intel_crtc);
- mmio_flip->seqno = 0;
- ring->irq_put(ring);
- }
- }
- spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+ return 0;
}
-static int intel_queue_mmio_flip(struct drm_device *dev,
+static int intel_gen9_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring,
uint32_t flags)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long irq_flags;
+ uint32_t plane = 0, stride;
int ret;
- if (WARN_ON(intel_crtc->mmio_flip.seqno))
- return -EBUSY;
+ switch(intel_crtc->pipe) {
+ case PIPE_A:
+ plane = MI_DISPLAY_FLIP_SKL_PLANE_1_A;
+ break;
+ case PIPE_B:
+ plane = MI_DISPLAY_FLIP_SKL_PLANE_1_B;
+ break;
+ case PIPE_C:
+ plane = MI_DISPLAY_FLIP_SKL_PLANE_1_C;
+ break;
+ default:
+ WARN_ONCE(1, "unknown plane in flip command\n");
+ return -ENODEV;
+ }
- ret = intel_postpone_flip(obj);
- if (ret < 0)
- return ret;
- if (ret == 0) {
- intel_do_mmio_flip(intel_crtc);
- return 0;
+ switch (obj->tiling_mode) {
+ case I915_TILING_NONE:
+ stride = fb->pitches[0] >> 6;
+ break;
+ case I915_TILING_X:
+ stride = fb->pitches[0] >> 9;
+ break;
+ default:
+ WARN_ONCE(1, "unknown tiling in flip command\n");
+ return -ENODEV;
}
- spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
- intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
- intel_crtc->mmio_flip.ring_id = obj->ring->id;
- spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+ ret = intel_ring_begin(ring, 10);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, DERRMR);
+ intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+ DERRMR_PIPEB_PRI_FLIP_DONE |
+ DERRMR_PIPEC_PRI_FLIP_DONE));
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
+ MI_SRM_LRM_GLOBAL_GTT);
+ intel_ring_emit(ring, DERRMR);
+ intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
+ intel_ring_emit(ring, 0);
+
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane);
+ intel_ring_emit(ring, stride << 6 | obj->tiling_mode);
+ intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
+
+ intel_mark_page_flip_active(intel_crtc);
+ __intel_ring_advance(ring);
- /*
- * Double check to catch cases where irq fired before
- * mmio flip data was ready
- */
- intel_notify_mmio_flip(obj->ring);
return 0;
}
@@ -9905,18 +9701,19 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long flags;
+
+ WARN_ON(!in_irq());
if (crtc == NULL)
return;
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock(&dev->event_lock);
if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe));
page_flip_completed(intel_crtc);
}
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock(&dev->event_lock);
}
static int intel_crtc_page_flip(struct drm_crtc *crtc,
@@ -9932,7 +9729,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work;
struct intel_engine_cs *ring;
- unsigned long flags;
int ret;
/*
@@ -9973,7 +9769,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
goto free_work;
/* We borrow the event spin lock for protecting unpin_work */
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
if (intel_crtc->unpin_work) {
/* Before declaring the flip queue wedged, check if
* the hardware completed the operation behind our backs.
@@ -9983,7 +9779,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
page_flip_completed(intel_crtc);
} else {
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
drm_crtc_vblank_put(crtc);
kfree(work);
@@ -9991,7 +9787,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
}
}
intel_crtc->unpin_work = work;
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
flush_workqueue(dev_priv->wq);
@@ -10029,7 +9825,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
ring = &dev_priv->ring[RCS];
}
- ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+ ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, ring);
if (ret)
goto cleanup_pending;
@@ -10078,9 +9874,9 @@ cleanup_pending:
mutex_unlock(&dev->struct_mutex);
cleanup:
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
intel_crtc->unpin_work = NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
drm_crtc_vblank_put(crtc);
free_work:
@@ -10091,9 +9887,9 @@ out_hang:
intel_crtc_wait_for_pending_flips(crtc);
ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
if (ret == 0 && event) {
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
drm_send_vblank_event(dev, pipe, event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
}
}
return ret;
@@ -10289,6 +10085,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->dp_m2_n2.link_n,
pipe_config->dp_m2_n2.tu);
+ DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
+ pipe_config->has_audio,
+ pipe_config->has_infoframe);
+
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->requested_mode);
DRM_DEBUG_KMS("adjusted mode:\n");
@@ -10350,6 +10150,48 @@ static bool check_encoder_cloning(struct intel_crtc *crtc)
return true;
}
+static bool check_digital_port_conflicts(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+ unsigned int used_ports = 0;
+
+ /*
+ * Walk the connector list instead of the encoder
+ * list to detect the problem on ddi platforms
+ * where there's just one encoder per digital port.
+ */
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list, base.head) {
+ struct intel_encoder *encoder = connector->new_encoder;
+
+ if (!encoder)
+ continue;
+
+ WARN_ON(!encoder->new_crtc);
+
+ switch (encoder->type) {
+ unsigned int port_mask;
+ case INTEL_OUTPUT_UNKNOWN:
+ if (WARN_ON(!HAS_DDI(dev)))
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_HDMI:
+ case INTEL_OUTPUT_EDP:
+ port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
+
+ /* the same port mustn't appear more than once */
+ if (used_ports & port_mask)
+ return false;
+
+ used_ports |= port_mask;
+ default:
+ break;
+ }
+ }
+
+ return true;
+}
+
static struct intel_crtc_config *
intel_modeset_pipe_config(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -10366,6 +10208,11 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
return ERR_PTR(-EINVAL);
}
+ if (!check_digital_port_conflicts(dev)) {
+ DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
+ return ERR_PTR(-EINVAL);
+ }
+
pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
if (!pipe_config)
return ERR_PTR(-ENOMEM);
@@ -10571,10 +10418,13 @@ static bool intel_crtc_in_use(struct drm_crtc *crtc)
static void
intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_crtc *intel_crtc;
struct drm_connector *connector;
+ intel_shared_dpll_commit(dev_priv);
+
for_each_intel_encoder(dev, intel_encoder) {
if (!intel_encoder->base.crtc)
continue;
@@ -10754,6 +10604,7 @@ intel_pipe_config_compare(struct drm_device *dev,
if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
IS_VALLEYVIEW(dev))
PIPE_CONF_CHECK_I(limited_color_range);
+ PIPE_CONF_CHECK_I(has_infoframe);
PIPE_CONF_CHECK_I(has_audio);
@@ -10810,6 +10661,9 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
+ PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
+ PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
+ PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
@@ -10827,6 +10681,56 @@ intel_pipe_config_compare(struct drm_device *dev,
return true;
}
+static void check_wm_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct skl_ddb_allocation hw_ddb, *sw_ddb;
+ struct intel_crtc *intel_crtc;
+ int plane;
+
+ if (INTEL_INFO(dev)->gen < 9)
+ return;
+
+ skl_ddb_get_hw_state(dev_priv, &hw_ddb);
+ sw_ddb = &dev_priv->wm.skl_hw.ddb;
+
+ for_each_intel_crtc(dev, intel_crtc) {
+ struct skl_ddb_entry *hw_entry, *sw_entry;
+ const enum pipe pipe = intel_crtc->pipe;
+
+ if (!intel_crtc->active)
+ continue;
+
+ /* planes */
+ for_each_plane(pipe, plane) {
+ hw_entry = &hw_ddb.plane[pipe][plane];
+ sw_entry = &sw_ddb->plane[pipe][plane];
+
+ if (skl_ddb_entry_equal(hw_entry, sw_entry))
+ continue;
+
+ DRM_ERROR("mismatch in DDB state pipe %c plane %d "
+ "(expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe), plane + 1,
+ sw_entry->start, sw_entry->end,
+ hw_entry->start, hw_entry->end);
+ }
+
+ /* cursor */
+ hw_entry = &hw_ddb.cursor[pipe];
+ sw_entry = &sw_ddb->cursor[pipe];
+
+ if (skl_ddb_entry_equal(hw_entry, sw_entry))
+ continue;
+
+ DRM_ERROR("mismatch in DDB state pipe %c cursor "
+ "(expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe),
+ sw_entry->start, sw_entry->end,
+ hw_entry->start, hw_entry->end);
+ }
+}
+
static void
check_connector_state(struct drm_device *dev)
{
@@ -10993,9 +10897,9 @@ check_shared_dpll_state(struct drm_device *dev)
active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
- WARN(pll->active > pll->refcount,
+ WARN(pll->active > hweight32(pll->config.crtc_mask),
"more active pll users than references: %i vs %i\n",
- pll->active, pll->refcount);
+ pll->active, hweight32(pll->config.crtc_mask));
WARN(pll->active && !pll->on,
"pll in active use but not on in sw tracking\n");
WARN(pll->on && !pll->active,
@@ -11013,11 +10917,11 @@ check_shared_dpll_state(struct drm_device *dev)
WARN(pll->active != active_crtcs,
"pll active crtcs mismatch (expected %i, found %i)\n",
pll->active, active_crtcs);
- WARN(pll->refcount != enabled_crtcs,
+ WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
"pll enabled crtcs mismatch (expected %i, found %i)\n",
- pll->refcount, enabled_crtcs);
+ hweight32(pll->config.crtc_mask), enabled_crtcs);
- WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
+ WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
sizeof(dpll_hw_state)),
"pll hw state mismatch\n");
}
@@ -11026,6 +10930,7 @@ check_shared_dpll_state(struct drm_device *dev)
void
intel_modeset_check_state(struct drm_device *dev)
{
+ check_wm_state(dev);
check_connector_state(dev);
check_encoder_state(dev);
check_crtc_state(dev);
@@ -11076,50 +10981,67 @@ static void update_scanline_offset(struct intel_crtc *crtc)
crtc->scanline_offset = vtotal - 1;
} else if (HAS_DDI(dev) &&
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
crtc->scanline_offset = 2;
} else
crtc->scanline_offset = 1;
}
+static struct intel_crtc_config *
+intel_modeset_compute_config(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_framebuffer *fb,
+ unsigned *modeset_pipes,
+ unsigned *prepare_pipes,
+ unsigned *disable_pipes)
+{
+ struct intel_crtc_config *pipe_config = NULL;
+
+ intel_modeset_affected_pipes(crtc, modeset_pipes,
+ prepare_pipes, disable_pipes);
+
+ if ((*modeset_pipes) == 0)
+ goto out;
+
+ /*
+ * Note this needs changes when we start tracking multiple modes
+ * and crtcs. At that point we'll need to compute the whole config
+ * (i.e. one pipe_config for each crtc) rather than just the one
+ * for this crtc.
+ */
+ pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
+ if (IS_ERR(pipe_config)) {
+ goto out;
+ }
+ intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
+ "[modeset]");
+
+out:
+ return pipe_config;
+}
+
static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb)
+ int x, int y, struct drm_framebuffer *fb,
+ struct intel_crtc_config *pipe_config,
+ unsigned modeset_pipes,
+ unsigned prepare_pipes,
+ unsigned disable_pipes)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *saved_mode;
- struct intel_crtc_config *pipe_config = NULL;
struct intel_crtc *intel_crtc;
- unsigned disable_pipes, prepare_pipes, modeset_pipes;
int ret = 0;
saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
if (!saved_mode)
return -ENOMEM;
- intel_modeset_affected_pipes(crtc, &modeset_pipes,
- &prepare_pipes, &disable_pipes);
-
*saved_mode = crtc->mode;
- /* Hack: Because we don't (yet) support global modeset on multiple
- * crtcs, we don't keep track of the new mode for more than one crtc.
- * Hence simply check whether any bit is set in modeset_pipes in all the
- * pieces of code that are not yet converted to deal with mutliple crtcs
- * changing their mode at the same time. */
- if (modeset_pipes) {
- pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
- if (IS_ERR(pipe_config)) {
- ret = PTR_ERR(pipe_config);
- pipe_config = NULL;
-
- goto out;
- }
- intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
- "[modeset]");
+ if (modeset_pipes)
to_intel_crtc(crtc)->new_config = pipe_config;
- }
/*
* See if the config requires any additional preparation, e.g.
@@ -11135,6 +11057,22 @@ static int __intel_set_mode(struct drm_crtc *crtc,
prepare_pipes &= ~disable_pipes;
}
+ if (dev_priv->display.crtc_compute_clock) {
+ unsigned clear_pipes = modeset_pipes | disable_pipes;
+
+ ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
+ if (ret)
+ goto done;
+
+ for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
+ ret = dev_priv->display.crtc_compute_clock(intel_crtc);
+ if (ret) {
+ intel_shared_dpll_abort_config(dev_priv);
+ goto done;
+ }
+ }
+ }
+
for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
intel_crtc_disable(&intel_crtc->base);
@@ -11145,6 +11083,10 @@ static int __intel_set_mode(struct drm_crtc *crtc,
/* crtc->mode is already used by the ->mode_set callbacks, hence we need
* to set it here already despite that we pass it down the callchain.
+ *
+ * Note we'll need to fix this up when we start tracking multiple
+ * pipes; here we assume a single modeset_pipe and only track the
+ * single crtc and mode.
*/
if (modeset_pipes) {
crtc->mode = *mode;
@@ -11166,8 +11108,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* update the the output configuration. */
intel_modeset_update_state(dev, prepare_pipes);
- if (dev_priv->display.modeset_global_resources)
- dev_priv->display.modeset_global_resources(dev);
+ modeset_update_crtc_power_domains(dev);
/* Set up the DPLL and any encoders state that needs to adjust or depend
* on the DPLL.
@@ -11178,9 +11119,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev,
- obj,
- NULL);
+ ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
if (ret != 0) {
DRM_ERROR("pin & fence failed\n");
mutex_unlock(&dev->struct_mutex);
@@ -11195,11 +11134,6 @@ static int __intel_set_mode(struct drm_crtc *crtc,
crtc->primary->fb = fb;
crtc->x = x;
crtc->y = y;
-
- ret = dev_priv->display.crtc_mode_set(&intel_crtc->base,
- x, y, fb);
- if (ret)
- goto done;
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -11214,19 +11148,23 @@ done:
if (ret && crtc->enabled)
crtc->mode = *saved_mode;
-out:
kfree(pipe_config);
kfree(saved_mode);
return ret;
}
-static int intel_set_mode(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb)
+static int intel_set_mode_pipes(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb,
+ struct intel_crtc_config *pipe_config,
+ unsigned modeset_pipes,
+ unsigned prepare_pipes,
+ unsigned disable_pipes)
{
int ret;
- ret = __intel_set_mode(crtc, mode, x, y, fb);
+ ret = __intel_set_mode(crtc, mode, x, y, fb, pipe_config, modeset_pipes,
+ prepare_pipes, disable_pipes);
if (ret == 0)
intel_modeset_check_state(crtc->dev);
@@ -11234,6 +11172,26 @@ static int intel_set_mode(struct drm_crtc *crtc,
return ret;
}
+static int intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
+{
+ struct intel_crtc_config *pipe_config;
+ unsigned modeset_pipes, prepare_pipes, disable_pipes;
+
+ pipe_config = intel_modeset_compute_config(crtc, mode, fb,
+ &modeset_pipes,
+ &prepare_pipes,
+ &disable_pipes);
+
+ if (IS_ERR(pipe_config))
+ return PTR_ERR(pipe_config);
+
+ return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
+ modeset_pipes, prepare_pipes,
+ disable_pipes);
+}
+
void intel_crtc_restore_mode(struct drm_crtc *crtc)
{
intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
@@ -11562,6 +11520,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
struct drm_device *dev;
struct drm_mode_set save_set;
struct intel_set_config *config;
+ struct intel_crtc_config *pipe_config;
+ unsigned modeset_pipes, prepare_pipes, disable_pipes;
int ret;
BUG_ON(!set);
@@ -11607,9 +11567,38 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
if (ret)
goto fail;
+ pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
+ set->fb,
+ &modeset_pipes,
+ &prepare_pipes,
+ &disable_pipes);
+ if (IS_ERR(pipe_config)) {
+ ret = PTR_ERR(pipe_config);
+ goto fail;
+ } else if (pipe_config) {
+ if (pipe_config->has_audio !=
+ to_intel_crtc(set->crtc)->config.has_audio)
+ config->mode_changed = true;
+
+ /*
+ * Note we have an issue here with infoframes: current code
+ * only updates them on the full mode set path per hw
+ * requirements. So here we should be checking for any
+ * required changes and forcing a mode set.
+ */
+ }
+
+ /* set_mode will free it in the mode_changed case */
+ if (!config->mode_changed)
+ kfree(pipe_config);
+
+ intel_update_pipe_size(to_intel_crtc(set->crtc));
+
if (config->mode_changed) {
- ret = intel_set_mode(set->crtc, set->mode,
- set->x, set->y, set->fb);
+ ret = intel_set_mode_pipes(set->crtc, set->mode,
+ set->x, set->y, set->fb, pipe_config,
+ modeset_pipes, prepare_pipes,
+ disable_pipes);
} else if (config->fb_changed) {
struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
@@ -11679,7 +11668,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
{
uint32_t val;
- if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
val = I915_READ(PCH_DPLL(pll->id));
@@ -11693,8 +11682,8 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
- I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
+ I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
+ I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
}
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
@@ -11703,7 +11692,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
/* PCH refclock must be enabled first */
ibx_assert_pch_refclk_enabled(dev_priv);
- I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
+ I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
/* Wait for the clocks to stabilize. */
POSTING_READ(PCH_DPLL(pll->id));
@@ -11714,7 +11703,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
*
* So write it again.
*/
- I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
+ I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
POSTING_READ(PCH_DPLL(pll->id));
udelay(200);
}
@@ -11813,161 +11802,195 @@ disable_unpin:
}
static int
-intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+intel_check_primary_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_rect *dest = &state->dst;
+ struct drm_rect *src = &state->src;
+ const struct drm_rect *clip = &state->clip;
+
+ return drm_plane_helper_check_update(plane, crtc, fb,
+ src, dest, clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true, &state->visible);
+}
+
+static int
+intel_prepare_primary_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
- struct drm_rect dest = {
- /* integer pixels */
- .x1 = crtc_x,
- .y1 = crtc_y,
- .x2 = crtc_x + crtc_w,
- .y2 = crtc_y + crtc_h,
- };
- struct drm_rect src = {
- /* 16.16 fixed point */
- .x1 = src_x,
- .y1 = src_y,
- .x2 = src_x + src_w,
- .y2 = src_y + src_h,
- };
- const struct drm_rect clip = {
- /* integer pixels */
- .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
- .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
- };
- const struct {
- int crtc_x, crtc_y;
- unsigned int crtc_w, crtc_h;
- uint32_t src_x, src_y, src_w, src_h;
- } orig = {
- .crtc_x = crtc_x,
- .crtc_y = crtc_y,
- .crtc_w = crtc_w,
- .crtc_h = crtc_h,
- .src_x = src_x,
- .src_y = src_y,
- .src_w = src_w,
- .src_h = src_h,
- };
- struct intel_plane *intel_plane = to_intel_plane(plane);
- bool visible;
int ret;
- ret = drm_plane_helper_check_update(plane, crtc, fb,
- &src, &dest, &clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true, &visible);
+ intel_crtc_wait_for_pending_flips(crtc);
- if (ret)
- return ret;
+ if (intel_crtc_has_pending_flip(crtc)) {
+ DRM_ERROR("pipe is still busy with an old pageflip\n");
+ return -EBUSY;
+ }
- /*
- * If the CRTC isn't enabled, we're just pinning the framebuffer,
- * updating the fb pointer, and returning without touching the
- * hardware. This allows us to later do a drmModeSetCrtc with fb=-1 to
- * turn on the display with all planes setup as desired.
- */
- if (!crtc->enabled) {
+ if (old_obj != obj) {
mutex_lock(&dev->struct_mutex);
-
- /*
- * If we already called setplane while the crtc was disabled,
- * we may have an fb pinned; unpin it.
- */
- if (plane->fb)
- intel_unpin_fb_obj(old_obj);
-
- i915_gem_track_fb(old_obj, obj,
- INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
-
- /* Pin and return without programming hardware */
- ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
+ if (ret == 0)
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
-
- return ret;
+ if (ret != 0) {
+ DRM_DEBUG_KMS("pin & fence failed\n");
+ return ret;
+ }
}
- intel_crtc_wait_for_pending_flips(crtc);
+ return 0;
+}
- /*
- * If clipping results in a non-visible primary plane, we'll disable
- * the primary plane. Note that this is a bit different than what
- * happens if userspace explicitly disables the plane by passing fb=0
- * because plane->fb still gets set and pinned.
- */
- if (!visible) {
- mutex_lock(&dev->struct_mutex);
+static void
+intel_commit_primary_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ struct drm_framebuffer *old_fb = plane->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_rect *src = &state->src;
+ crtc->primary->fb = fb;
+ crtc->x = src->x1 >> 16;
+ crtc->y = src->y1 >> 16;
+
+ intel_plane->crtc_x = state->orig_dst.x1;
+ intel_plane->crtc_y = state->orig_dst.y1;
+ intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
+ intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
+ intel_plane->src_x = state->orig_src.x1;
+ intel_plane->src_y = state->orig_src.y1;
+ intel_plane->src_w = drm_rect_width(&state->orig_src);
+ intel_plane->src_h = drm_rect_height(&state->orig_src);
+ intel_plane->obj = obj;
+
+ if (intel_crtc->active) {
/*
- * Try to pin the new fb first so that we can bail out if we
- * fail.
+ * FBC does not work on some platforms for rotated
+ * planes, so disable it when rotation is not 0 and
+ * update it when rotation is set back to 0.
+ *
+ * FIXME: This is redundant with the fbc update done in
+ * the primary plane enable function except that that
+ * one is done too late. We eventually need to unify
+ * this.
*/
- if (plane->fb != fb) {
- ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ if (intel_crtc->primary_enabled &&
+ INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+ dev_priv->fbc.plane == intel_crtc->plane &&
+ intel_plane->rotation != BIT(DRM_ROTATE_0)) {
+ intel_disable_fbc(dev);
}
- i915_gem_track_fb(old_obj, obj,
- INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
-
- if (intel_crtc->primary_enabled)
- intel_disable_primary_hw_plane(plane, crtc);
+ if (state->visible) {
+ bool was_enabled = intel_crtc->primary_enabled;
+ /* FIXME: kill this fastboot hack */
+ intel_update_pipe_size(intel_crtc);
- if (plane->fb != fb)
- if (plane->fb)
- intel_unpin_fb_obj(old_obj);
+ intel_crtc->primary_enabled = true;
- mutex_unlock(&dev->struct_mutex);
+ dev_priv->display.update_primary_plane(crtc, plane->fb,
+ crtc->x, crtc->y);
- } else {
- if (intel_crtc && intel_crtc->active &&
- intel_crtc->primary_enabled) {
/*
- * FBC does not work on some platforms for rotated
- * planes, so disable it when rotation is not 0 and
- * update it when rotation is set back to 0.
- *
- * FIXME: This is redundant with the fbc update done in
- * the primary plane enable function except that that
- * one is done too late. We eventually need to unify
- * this.
+ * BDW signals flip done immediately if the plane
+ * is disabled, even if the plane enable is already
+ * armed to occur at the next vblank :(
*/
- if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
- dev_priv->fbc.plane == intel_crtc->plane &&
- intel_plane->rotation != BIT(DRM_ROTATE_0)) {
- intel_disable_fbc(dev);
- }
+ if (IS_BROADWELL(dev) && !was_enabled)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ } else {
+ /*
+ * If clipping results in a non-visible primary plane,
+ * we'll disable the primary plane. Note that this is
+ * a bit different than what happens if userspace
+ * explicitly disables the plane by passing fb=0
+ * because plane->fb still gets set and pinned.
+ */
+ intel_disable_primary_hw_plane(plane, crtc);
}
- ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
- if (ret)
- return ret;
- if (!intel_crtc->primary_enabled)
- intel_enable_primary_hw_plane(plane, crtc);
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
}
- intel_plane->crtc_x = orig.crtc_x;
- intel_plane->crtc_y = orig.crtc_y;
- intel_plane->crtc_w = orig.crtc_w;
- intel_plane->crtc_h = orig.crtc_h;
- intel_plane->src_x = orig.src_x;
- intel_plane->src_y = orig.src_y;
- intel_plane->src_w = orig.src_w;
- intel_plane->src_h = orig.src_h;
- intel_plane->obj = obj;
+ if (old_fb && old_fb != fb) {
+ if (intel_crtc->active)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_unpin_fb_obj(old_obj);
+ mutex_unlock(&dev->struct_mutex);
+ }
+}
+
+static int
+intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct intel_plane_state state;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int ret;
+
+ state.crtc = crtc;
+ state.fb = fb;
+
+ /* sample coordinates in 16.16 fixed point */
+ state.src.x1 = src_x;
+ state.src.x2 = src_x + src_w;
+ state.src.y1 = src_y;
+ state.src.y2 = src_y + src_h;
+
+ /* integer pixels */
+ state.dst.x1 = crtc_x;
+ state.dst.x2 = crtc_x + crtc_w;
+ state.dst.y1 = crtc_y;
+ state.dst.y2 = crtc_y + crtc_h;
+
+ state.clip.x1 = 0;
+ state.clip.y1 = 0;
+ state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
+ state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+
+ state.orig_src = state.src;
+ state.orig_dst = state.dst;
+
+ ret = intel_check_primary_plane(plane, &state);
+ if (ret)
+ return ret;
+
+ ret = intel_prepare_primary_plane(plane, &state);
+ if (ret)
+ return ret;
+
+ intel_commit_primary_plane(plane, &state);
return 0;
}
@@ -12046,51 +12069,92 @@ intel_cursor_plane_disable(struct drm_plane *plane)
}
static int
-intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+intel_check_cursor_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- struct drm_rect dest = {
- /* integer pixels */
- .x1 = crtc_x,
- .y1 = crtc_y,
- .x2 = crtc_x + crtc_w,
- .y2 = crtc_y + crtc_h,
- };
- struct drm_rect src = {
- /* 16.16 fixed point */
- .x1 = src_x,
- .y1 = src_y,
- .x2 = src_x + src_w,
- .y2 = src_y + src_h,
- };
- const struct drm_rect clip = {
- /* integer pixels */
- .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
- .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
- };
- bool visible;
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_rect *dest = &state->dst;
+ struct drm_rect *src = &state->src;
+ const struct drm_rect *clip = &state->clip;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ int crtc_w, crtc_h;
+ unsigned stride;
int ret;
ret = drm_plane_helper_check_update(plane, crtc, fb,
- &src, &dest, &clip,
+ src, dest, clip,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
- true, true, &visible);
+ true, true, &state->visible);
if (ret)
return ret;
- crtc->cursor_x = crtc_x;
- crtc->cursor_y = crtc_y;
+
+ /* if we want to turn off the cursor ignore width and height */
+ if (!obj)
+ return 0;
+
+ /* Check for which cursor types we support */
+ crtc_w = drm_rect_width(&state->orig_dst);
+ crtc_h = drm_rect_height(&state->orig_dst);
+ if (!cursor_size_ok(dev, crtc_w, crtc_h)) {
+ DRM_DEBUG("Cursor dimension not supported\n");
+ return -EINVAL;
+ }
+
+ stride = roundup_pow_of_two(crtc_w) * 4;
+ if (obj->base.size < stride * crtc_h) {
+ DRM_DEBUG_KMS("buffer is too small\n");
+ return -ENOMEM;
+ }
+
+ if (fb == crtc->cursor->fb)
+ return 0;
+
+ /* we only need to pin inside GTT if cursor is non-phy */
+ mutex_lock(&dev->struct_mutex);
+ if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
+ DRM_DEBUG_KMS("cursor cannot be tiled\n");
+ ret = -EINVAL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+static int
+intel_commit_cursor_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ int crtc_w, crtc_h;
+
+ crtc->cursor_x = state->orig_dst.x1;
+ crtc->cursor_y = state->orig_dst.y1;
+
+ intel_plane->crtc_x = state->orig_dst.x1;
+ intel_plane->crtc_y = state->orig_dst.y1;
+ intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
+ intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
+ intel_plane->src_x = state->orig_src.x1;
+ intel_plane->src_y = state->orig_src.y1;
+ intel_plane->src_w = drm_rect_width(&state->orig_src);
+ intel_plane->src_h = drm_rect_height(&state->orig_src);
+ intel_plane->obj = obj;
+
if (fb != crtc->cursor->fb) {
+ crtc_w = drm_rect_width(&state->orig_dst);
+ crtc_h = drm_rect_height(&state->orig_dst);
return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
} else {
- intel_crtc_update_cursor(crtc, visible);
+ intel_crtc_update_cursor(crtc, state->visible);
intel_frontbuffer_flip(crtc->dev,
INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe));
@@ -12098,10 +12162,53 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
return 0;
}
}
+
+static int
+intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane_state state;
+ int ret;
+
+ state.crtc = crtc;
+ state.fb = fb;
+
+ /* sample coordinates in 16.16 fixed point */
+ state.src.x1 = src_x;
+ state.src.x2 = src_x + src_w;
+ state.src.y1 = src_y;
+ state.src.y2 = src_y + src_h;
+
+ /* integer pixels */
+ state.dst.x1 = crtc_x;
+ state.dst.x2 = crtc_x + crtc_w;
+ state.dst.y1 = crtc_y;
+ state.dst.y2 = crtc_y + crtc_h;
+
+ state.clip.x1 = 0;
+ state.clip.y1 = 0;
+ state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
+ state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+
+ state.orig_src = state.src;
+ state.orig_dst = state.dst;
+
+ ret = intel_check_cursor_plane(plane, &state);
+ if (ret)
+ return ret;
+
+ return intel_commit_cursor_plane(plane, &state);
+}
+
static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.update_plane = intel_cursor_plane_update,
.disable_plane = intel_cursor_plane_disable,
.destroy = intel_plane_destroy,
+ .set_property = intel_plane_set_property,
};
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
@@ -12117,12 +12224,26 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
cursor->max_downscale = 1;
cursor->pipe = pipe;
cursor->plane = pipe;
+ cursor->rotation = BIT(DRM_ROTATE_0);
drm_universal_plane_init(dev, &cursor->base, 0,
&intel_cursor_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
DRM_PLANE_TYPE_CURSOR);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (!dev->mode_config.rotation_property)
+ dev->mode_config.rotation_property =
+ drm_mode_create_rotation_property(dev,
+ BIT(DRM_ROTATE_0) |
+ BIT(DRM_ROTATE_180));
+ if (dev->mode_config.rotation_property)
+ drm_object_attach_property(&cursor->base.base,
+ dev->mode_config.rotation_property,
+ cursor->rotation);
+ }
+
return &cursor->base;
}
@@ -12178,6 +12299,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
+ INIT_WORK(&intel_crtc->mmio_flip.work, intel_mmio_flip_work_func);
+
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
@@ -12198,7 +12321,7 @@ enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- if (!encoder)
+ if (!encoder || WARN_ON(!encoder->crtc))
return INVALID_PIPE;
return to_intel_crtc(encoder->crtc)->pipe;
@@ -12286,7 +12409,10 @@ static bool intel_crt_present(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_ULT(dev))
+ if (INTEL_INFO(dev)->gen >= 9)
+ return false;
+
+ if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
return false;
if (IS_CHERRYVIEW(dev))
@@ -12430,7 +12556,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
- intel_edp_psr_init(dev);
+ intel_psr_init(dev);
for_each_intel_encoder(dev, encoder) {
encoder->base.possible_crtcs = encoder->crtc_mask;
@@ -12634,16 +12760,22 @@ static void intel_init_display(struct drm_device *dev)
if (HAS_DDI(dev)) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.get_plane_config = ironlake_get_plane_config;
- dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
+ dev_priv->display.crtc_compute_clock =
+ haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
- dev_priv->display.update_primary_plane =
- ironlake_update_primary_plane;
+ if (INTEL_INFO(dev)->gen >= 9)
+ dev_priv->display.update_primary_plane =
+ skylake_update_primary_plane;
+ else
+ dev_priv->display.update_primary_plane =
+ ironlake_update_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
dev_priv->display.get_plane_config = ironlake_get_plane_config;
- dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+ dev_priv->display.crtc_compute_clock =
+ ironlake_crtc_compute_clock;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
@@ -12652,7 +12784,7 @@ static void intel_init_display(struct drm_device *dev)
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_plane_config = i9xx_get_plane_config;
- dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
@@ -12661,7 +12793,7 @@ static void intel_init_display(struct drm_device *dev)
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_plane_config = i9xx_get_plane_config;
- dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
@@ -12698,31 +12830,20 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.get_display_clock_speed =
i830_get_display_clock_speed;
- if (IS_G4X(dev)) {
- dev_priv->display.write_eld = g4x_write_eld;
- } else if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev)) {
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
- dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_GEN6(dev)) {
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
- dev_priv->display.write_eld = ironlake_write_eld;
- dev_priv->display.modeset_global_resources =
- snb_modeset_global_resources;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- dev_priv->display.write_eld = ironlake_write_eld;
dev_priv->display.modeset_global_resources =
ivb_modeset_global_resources;
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
- dev_priv->display.write_eld = haswell_write_eld;
- dev_priv->display.modeset_global_resources =
- haswell_modeset_global_resources;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.modeset_global_resources =
valleyview_modeset_global_resources;
- dev_priv->display.write_eld = ironlake_write_eld;
}
/* Default just returns -ENODEV to indicate unsupported */
@@ -12749,6 +12870,9 @@ static void intel_init_display(struct drm_device *dev)
case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
dev_priv->display.queue_flip = intel_gen7_queue_flip;
break;
+ case 9:
+ dev_priv->display.queue_flip = intel_gen9_queue_flip;
+ break;
}
intel_panel_init_backlight_funcs(dev);
@@ -12953,11 +13077,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
intel_enable_gt_powersave(dev);
}
-void intel_modeset_suspend_hw(struct drm_device *dev)
-{
- intel_suspend_hw(dev);
-}
-
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -12983,6 +13102,7 @@ void intel_modeset_init(struct drm_device *dev)
return;
intel_init_display(dev);
+ intel_init_audio(dev);
if (IS_GEN2(dev)) {
dev->mode_config.max_width = 2048;
@@ -13293,7 +13413,7 @@ void i915_redisable_vga(struct drm_device *dev)
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses. */
- if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
return;
i915_redisable_vga_power_on(dev);
@@ -13337,18 +13457,21 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
- pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
+ pll->on = pll->get_hw_state(dev_priv, pll,
+ &pll->config.hw_state);
pll->active = 0;
+ pll->config.crtc_mask = 0;
for_each_intel_crtc(dev, crtc) {
- if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
+ if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
pll->active++;
+ pll->config.crtc_mask |= 1 << crtc->pipe;
+ }
}
- pll->refcount = pll->active;
- DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
- pll->name, pll->refcount, pll->on);
+ DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
+ pll->name, pll->config.crtc_mask, pll->on);
- if (pll->refcount)
+ if (pll->config.crtc_mask)
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
}
@@ -13438,7 +13561,9 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
pll->on = false;
}
- if (HAS_PCH_SPLIT(dev))
+ if (IS_GEN9(dev))
+ skl_wm_get_hw_state(dev);
+ else if (HAS_PCH_SPLIT(dev))
ilk_wm_get_hw_state(dev);
if (force_restore) {
@@ -13452,8 +13577,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_crtc *crtc =
dev_priv->pipe_to_crtc_mapping[pipe];
- __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
- crtc->primary->fb);
+ intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
+ crtc->primary->fb);
}
} else {
intel_modeset_update_staged_output_state(dev);
@@ -13464,6 +13589,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
void intel_modeset_gem_init(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct drm_i915_gem_object *obj;
@@ -13471,6 +13597,16 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_init_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex);
+ /*
+ * There may be no VBT; and if the BIOS enabled SSC we can
+ * just keep using it to avoid unnecessary flicker. Whereas if the
+ * BIOS isn't using it, don't assume it will work even if the VBT
+ * indicates as much.
+ */
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
+ DREF_SSC1_ENABLE);
+
intel_modeset_init_hw(dev);
intel_setup_overlay(dev);
@@ -13486,7 +13622,9 @@ void intel_modeset_gem_init(struct drm_device *dev)
if (obj == NULL)
continue;
- if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) {
+ if (intel_pin_and_fence_fb_obj(c->primary,
+ c->primary->fb,
+ NULL)) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb);
@@ -13494,6 +13632,8 @@ void intel_modeset_gem_init(struct drm_device *dev)
}
}
mutex_unlock(&dev->struct_mutex);
+
+ intel_backlight_register(dev);
}
void intel_connector_unregister(struct intel_connector *intel_connector)
@@ -13509,14 +13649,16 @@ void intel_modeset_cleanup(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
+ intel_disable_gt_powersave(dev);
+
+ intel_backlight_unregister(dev);
+
/*
* Interrupts and polling as the first thing to avoid creating havoc.
- * Too much stuff here (turning of rps, connectors, ...) would
+ * Too much stuff here (turning of connectors, ...) would
* experience fancy races otherwise.
*/
- drm_irq_uninstall(dev);
- intel_hpd_cancel_work(dev_priv);
- dev_priv->pm._irqs_disabled = true;
+ intel_irq_uninstall(dev_priv);
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
@@ -13530,8 +13672,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_disable_fbc(dev);
- intel_disable_gt_powersave(dev);
-
ironlake_teardown_rc6(dev);
mutex_unlock(&dev->struct_mutex);
@@ -13671,8 +13811,8 @@ intel_display_capture_error_state(struct drm_device *dev)
for_each_pipe(dev_priv, i) {
error->pipe[i].power_domain_on =
- intel_display_power_enabled_unlocked(dev_priv,
- POWER_DOMAIN_PIPE(i));
+ __intel_display_power_is_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(i));
if (!error->pipe[i].power_domain_on)
continue;
@@ -13707,7 +13847,7 @@ intel_display_capture_error_state(struct drm_device *dev)
enum transcoder cpu_transcoder = transcoders[i];
error->transcoder[i].power_domain_on =
- intel_display_power_enabled_unlocked(dev_priv,
+ __intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
if (!error->transcoder[i].power_domain_on)
continue;
@@ -13791,9 +13931,8 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
for_each_intel_crtc(dev, crtc) {
struct intel_unpin_work *work;
- unsigned long irqflags;
- spin_lock_irqsave(&dev->event_lock, irqflags);
+ spin_lock_irq(&dev->event_lock);
work = crtc->unpin_work;
@@ -13803,6 +13942,6 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
work->event = NULL;
}
- spin_unlock_irqrestore(&dev->event_lock, irqflags);
+ spin_unlock_irq(&dev->event_lock);
}
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4bcd917..5cecc20 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -113,6 +113,9 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
static void intel_dp_link_down(struct intel_dp *intel_dp);
static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
+static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
+static void vlv_steal_power_sequencer(struct drm_device *dev,
+ enum pipe pipe);
int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
@@ -224,8 +227,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static uint32_t
-pack_aux(uint8_t *src, int src_bytes)
+uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
{
int i;
uint32_t v = 0;
@@ -237,8 +239,7 @@ pack_aux(uint8_t *src, int src_bytes)
return v;
}
-static void
-unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
{
int i;
if (dst_bytes > 4)
@@ -283,12 +284,10 @@ intel_hrawclk(struct drm_device *dev)
static void
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
- struct intel_dp *intel_dp,
- struct edp_power_seq *out);
+ struct intel_dp *intel_dp);
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
- struct intel_dp *intel_dp,
- struct edp_power_seq *out);
+ struct intel_dp *intel_dp);
static void pps_lock(struct intel_dp *intel_dp)
{
@@ -322,6 +321,66 @@ static void pps_unlock(struct intel_dp *intel_dp)
intel_display_power_put(dev_priv, power_domain);
}
+static void
+vlv_power_sequencer_kick(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_dp->pps_pipe;
+ bool pll_enabled;
+ uint32_t DP;
+
+ if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
+ "skipping pipe %c power seqeuncer kick due to port %c being active\n",
+ pipe_name(pipe), port_name(intel_dig_port->port)))
+ return;
+
+ DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
+ pipe_name(pipe), port_name(intel_dig_port->port));
+
+ /* Preserve the BIOS-computed detected bit. This is
+ * supposed to be read-only.
+ */
+ DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+ DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ DP |= DP_PORT_WIDTH(1);
+ DP |= DP_LINK_TRAIN_PAT_1;
+
+ if (IS_CHERRYVIEW(dev))
+ DP |= DP_PIPE_SELECT_CHV(pipe);
+ else if (pipe == PIPE_B)
+ DP |= DP_PIPEB_SELECT;
+
+ pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
+
+ /*
+ * The DPLL for the pipe must be enabled for this to work.
+ * So enable temporarily it if it's not already enabled.
+ */
+ if (!pll_enabled)
+ vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
+ &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
+
+ /*
+ * Similar magic as in intel_dp_enable_port().
+ * We _must_ do this port enable + disable trick
+ * to make this power seqeuencer lock onto the port.
+ * Otherwise even VDD force bit won't work.
+ */
+ I915_WRITE(intel_dp->output_reg, DP);
+ POSTING_READ(intel_dp->output_reg);
+
+ I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
+ POSTING_READ(intel_dp->output_reg);
+
+ I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+ POSTING_READ(intel_dp->output_reg);
+
+ if (!pll_enabled)
+ vlv_force_pll_off(dev, pipe);
+}
+
static enum pipe
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
{
@@ -330,10 +389,13 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
- struct edp_power_seq power_seq;
+ enum pipe pipe;
lockdep_assert_held(&dev_priv->pps_mutex);
+ /* We should never land here with regular DP ports */
+ WARN_ON(!is_edp(intel_dp));
+
if (intel_dp->pps_pipe != INVALID_PIPE)
return intel_dp->pps_pipe;
@@ -359,18 +421,26 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
* are two power sequencers and up to two eDP ports.
*/
if (WARN_ON(pipes == 0))
- return PIPE_A;
+ pipe = PIPE_A;
+ else
+ pipe = ffs(pipes) - 1;
- intel_dp->pps_pipe = ffs(pipes) - 1;
+ vlv_steal_power_sequencer(dev, pipe);
+ intel_dp->pps_pipe = pipe;
DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
pipe_name(intel_dp->pps_pipe),
port_name(intel_dig_port->port));
/* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
- &power_seq);
+ intel_dp_init_panel_power_sequencer(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+
+ /*
+ * Even vdd force doesn't work until we've made
+ * the power sequencer lock in on the port.
+ */
+ vlv_power_sequencer_kick(intel_dp);
return intel_dp->pps_pipe;
}
@@ -425,7 +495,6 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct edp_power_seq power_seq;
enum port port = intel_dig_port->port;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -453,9 +522,8 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
port_name(port), pipe_name(intel_dp->pps_pipe));
- intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
- &power_seq);
+ intel_dp_init_panel_power_sequencer(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
}
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
@@ -550,6 +618,10 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
+ if (IS_VALLEYVIEW(dev) &&
+ intel_dp->pps_pipe == INVALID_PIPE)
+ return false;
+
return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
}
@@ -560,6 +632,10 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
+ if (IS_VALLEYVIEW(dev) &&
+ intel_dp->pps_pipe == INVALID_PIPE)
+ return false;
+
return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
}
@@ -661,6 +737,16 @@ static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return index ? 0 : 100;
}
+static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ /*
+ * SKL doesn't need us to program the AUX clock divider (Hardware will
+ * derive the clock from CDCLK automatically). We still implement the
+ * get_aux_clock_divider vfunc to plug-in into the existing code.
+ */
+ return index ? 0 : 1;
+}
+
static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
bool has_aux_irq,
int send_bytes,
@@ -691,9 +777,24 @@ static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
}
+static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+ bool has_aux_irq,
+ int send_bytes,
+ uint32_t unused)
+{
+ return DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_DONE |
+ (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_TIME_OUT_1600us |
+ DP_AUX_CH_CTL_RECEIVE_ERROR |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
+}
+
static int
intel_dp_aux_ch(struct intel_dp *intel_dp,
- uint8_t *send, int send_bytes,
+ const uint8_t *send, int send_bytes,
uint8_t *recv, int recv_size)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -760,7 +861,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
/* Load the send data into the aux channel data registers */
for (i = 0; i < send_bytes; i += 4)
I915_WRITE(ch_data + i,
- pack_aux(send + i, send_bytes - i));
+ intel_dp_pack_aux(send + i,
+ send_bytes - i));
/* Send the command and wait for it to complete */
I915_WRITE(ch_ctl, send_ctl);
@@ -814,8 +916,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
recv_bytes = recv_size;
for (i = 0; i < recv_bytes; i += 4)
- unpack_aux(I915_READ(ch_data + i),
- recv + i, recv_bytes - i);
+ intel_dp_unpack_aux(I915_READ(ch_data + i),
+ recv + i, recv_bytes - i);
ret = recv_bytes;
out:
@@ -925,7 +1027,16 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
BUG();
}
- if (!HAS_DDI(dev))
+ /*
+ * The AUX_CTL register is usually DP_CTL + 0x10.
+ *
+ * On Haswell and Broadwell though:
+ * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
+ * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
+ *
+ * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
+ */
+ if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
intel_dp->aux.name = name;
@@ -963,6 +1074,33 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
}
static void
+skl_edp_set_pll_config(struct intel_crtc_config *pipe_config, int link_bw)
+{
+ u32 ctrl1;
+
+ pipe_config->ddi_pll_sel = SKL_DPLL0;
+ pipe_config->dpll_hw_state.cfgcr1 = 0;
+ pipe_config->dpll_hw_state.cfgcr2 = 0;
+
+ ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
+ switch (link_bw) {
+ case DP_LINK_BW_1_62:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
+ SKL_DPLL0);
+ break;
+ case DP_LINK_BW_2_7:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
+ SKL_DPLL0);
+ break;
+ case DP_LINK_BW_5_4:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
+ SKL_DPLL0);
+ break;
+ }
+ pipe_config->dpll_hw_state.ctrl1 = ctrl1;
+}
+
+static void
hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
{
switch (link_bw) {
@@ -1139,7 +1277,9 @@ found:
&pipe_config->dp_m2_n2);
}
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_SKYLAKE(dev) && is_edp(intel_dp))
+ skl_edp_set_pll_config(pipe_config, intel_dp->link_bw);
+ else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
else
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@@ -1212,12 +1352,8 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
- if (crtc->config.has_audio) {
- DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
- pipe_name(crtc->pipe));
+ if (crtc->config.has_audio)
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
- intel_write_eld(&encoder->base, adjusted_mode);
- }
/* Split out the IBX/CPU vs CPT settings */
@@ -1367,6 +1503,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
if (!is_edp(intel_dp))
return false;
+ cancel_delayed_work(&intel_dp->panel_vdd_work);
intel_dp->want_panel_vdd = true;
if (edp_have_panel_vdd(intel_dp))
@@ -1375,7 +1512,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
- DRM_DEBUG_KMS("Turning eDP VDD on\n");
+ DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
+ port_name(intel_dig_port->port));
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
@@ -1394,7 +1532,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
* If the panel wasn't on, delay before accessing aux channel
*/
if (!edp_have_panel_power(intel_dp)) {
- DRM_DEBUG_KMS("eDP was not running\n");
+ DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
+ port_name(intel_dig_port->port));
msleep(intel_dp->panel_power_up_delay);
}
@@ -1419,7 +1558,8 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
vdd = edp_panel_vdd_on(intel_dp);
pps_unlock(intel_dp);
- WARN(!vdd, "eDP VDD already requested on\n");
+ WARN(!vdd, "eDP port %c VDD already requested on\n",
+ port_name(dp_to_dig_port(intel_dp)->port));
}
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
@@ -1440,7 +1580,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
if (!edp_have_panel_vdd(intel_dp))
return;
- DRM_DEBUG_KMS("Turning eDP VDD off\n");
+ DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
+ port_name(intel_dig_port->port));
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
@@ -1501,7 +1642,8 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
if (!is_edp(intel_dp))
return;
- WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
+ WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
+ port_name(dp_to_dig_port(intel_dp)->port));
intel_dp->want_panel_vdd = false;
@@ -1511,40 +1653,25 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
edp_panel_vdd_schedule_off(intel_dp);
}
-/*
- * Must be paired with intel_edp_panel_vdd_on().
- * Nested calls to these functions are not allowed since
- * we drop the lock. Caller must use some higher level
- * locking to prevent nested calls from other threads.
- */
-static void intel_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
-{
- if (!is_edp(intel_dp))
- return;
-
- pps_lock(intel_dp);
- edp_panel_vdd_off(intel_dp, sync);
- pps_unlock(intel_dp);
-}
-
-void intel_edp_panel_on(struct intel_dp *intel_dp)
+static void edp_panel_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
u32 pp_ctrl_reg;
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
if (!is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn eDP power on\n");
+ DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
+ port_name(dp_to_dig_port(intel_dp)->port));
- pps_lock(intel_dp);
-
- if (edp_have_panel_power(intel_dp)) {
- DRM_DEBUG_KMS("eDP power already on\n");
- goto out;
- }
+ if (WARN(edp_have_panel_power(intel_dp),
+ "eDP port %c panel power already on\n",
+ port_name(dp_to_dig_port(intel_dp)->port)))
+ return;
wait_panel_power_cycle(intel_dp);
@@ -1572,12 +1699,20 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
}
+}
+
+void intel_edp_panel_on(struct intel_dp *intel_dp)
+{
+ if (!is_edp(intel_dp))
+ return;
- out:
+ pps_lock(intel_dp);
+ edp_panel_on(intel_dp);
pps_unlock(intel_dp);
}
-void intel_edp_panel_off(struct intel_dp *intel_dp)
+
+static void edp_panel_off(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
@@ -1587,14 +1722,16 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
u32 pp;
u32 pp_ctrl_reg;
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
if (!is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn eDP power off\n");
-
- pps_lock(intel_dp);
+ DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
+ port_name(dp_to_dig_port(intel_dp)->port));
- WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
+ WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
+ port_name(dp_to_dig_port(intel_dp)->port));
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -1615,7 +1752,15 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
/* We got a reference when we enabled the VDD. */
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_put(dev_priv, power_domain);
+}
+
+void intel_edp_panel_off(struct intel_dp *intel_dp)
+{
+ if (!is_edp(intel_dp))
+ return;
+ pps_lock(intel_dp);
+ edp_panel_off(intel_dp);
pps_unlock(intel_dp);
}
@@ -1819,7 +1964,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
u32 tmp;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(intel_dp->output_reg);
@@ -1951,368 +2096,14 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
}
}
-static bool is_edp_psr(struct intel_dp *intel_dp)
-{
- return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
-}
-
-static bool intel_edp_is_psr_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!HAS_PSR(dev))
- return false;
-
- return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
-}
-
-static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
- struct edp_vsc_psr *vsc_psr)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
- u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
- u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
- uint32_t *data = (uint32_t *) vsc_psr;
- unsigned int i;
-
- /* As per BSPec (Pipe Video Data Island Packet), we need to disable
- the video DIP being updated before program video DIP data buffer
- registers for DIP being updated. */
- I915_WRITE(ctl_reg, 0);
- POSTING_READ(ctl_reg);
-
- for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
- if (i < sizeof(struct edp_vsc_psr))
- I915_WRITE(data_reg + i, *data++);
- else
- I915_WRITE(data_reg + i, 0);
- }
-
- I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
- POSTING_READ(ctl_reg);
-}
-
-static void intel_edp_psr_setup(struct intel_dp *intel_dp)
-{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct edp_vsc_psr psr_vsc;
-
- /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
- memset(&psr_vsc, 0, sizeof(psr_vsc));
- psr_vsc.sdp_header.HB0 = 0;
- psr_vsc.sdp_header.HB1 = 0x7;
- psr_vsc.sdp_header.HB2 = 0x2;
- psr_vsc.sdp_header.HB3 = 0x8;
- intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
-
- /* Avoid continuous PSR exit by masking memup and hpd */
- I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
-}
-
-static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t aux_clock_divider;
- int precharge = 0x3;
- int msg_size = 5; /* Header(4) + Message(1) */
- bool only_standby = false;
-
- aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
-
- if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
- only_standby = true;
-
- /* Enable PSR in sink */
- if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
- else
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
-
- /* Setup AUX registers */
- I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
- I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
- I915_WRITE(EDP_PSR_AUX_CTL(dev),
- DP_AUX_CH_CTL_TIME_OUT_400us |
- (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
- (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
-}
-
-static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t max_sleep_time = 0x1f;
- uint32_t idle_frames = 1;
- uint32_t val = 0x0;
- const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
- bool only_standby = false;
-
- if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
- only_standby = true;
-
- if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
- val |= EDP_PSR_LINK_STANDBY;
- val |= EDP_PSR_TP2_TP3_TIME_0us;
- val |= EDP_PSR_TP1_TIME_0us;
- val |= EDP_PSR_SKIP_AUX_EXIT;
- val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
- } else
- val |= EDP_PSR_LINK_DISABLE;
-
- I915_WRITE(EDP_PSR_CTL(dev), val |
- (IS_BROADWELL(dev) ? 0 : link_entry_time) |
- max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
- idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
- EDP_PSR_ENABLE);
-}
-
-static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dig_port->base.base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- lockdep_assert_held(&dev_priv->psr.lock);
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
- dev_priv->psr.source_ok = false;
-
- if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
- DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
- return false;
- }
-
- if (!i915.enable_psr) {
- DRM_DEBUG_KMS("PSR disable by flag\n");
- return false;
- }
-
- /* Below limitations aren't valid for Broadwell */
- if (IS_BROADWELL(dev))
- goto out;
-
- if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
- S3D_ENABLE) {
- DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
- return false;
- }
-
- if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
- DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
- return false;
- }
-
- out:
- dev_priv->psr.source_ok = true;
- return true;
-}
-
-static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
- WARN_ON(dev_priv->psr.active);
- lockdep_assert_held(&dev_priv->psr.lock);
-
- /* Enable PSR on the panel */
- intel_edp_psr_enable_sink(intel_dp);
-
- /* Enable PSR on the host */
- intel_edp_psr_enable_source(intel_dp);
-
- dev_priv->psr.active = true;
-}
-
-void intel_edp_psr_enable(struct intel_dp *intel_dp)
-{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!HAS_PSR(dev)) {
- DRM_DEBUG_KMS("PSR not supported on this platform\n");
- return;
- }
-
- if (!is_edp_psr(intel_dp)) {
- DRM_DEBUG_KMS("PSR not supported by this panel\n");
- return;
- }
-
- mutex_lock(&dev_priv->psr.lock);
- if (dev_priv->psr.enabled) {
- DRM_DEBUG_KMS("PSR already in use\n");
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
-
- dev_priv->psr.busy_frontbuffer_bits = 0;
-
- /* Setup PSR once */
- intel_edp_psr_setup(intel_dp);
-
- if (intel_edp_psr_match_conditions(intel_dp))
- dev_priv->psr.enabled = intel_dp;
- mutex_unlock(&dev_priv->psr.lock);
-}
-
-void intel_edp_psr_disable(struct intel_dp *intel_dp)
-{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
-
- if (dev_priv->psr.active) {
- I915_WRITE(EDP_PSR_CTL(dev),
- I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
-
- /* Wait till PSR is idle */
- if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
- EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
- DRM_ERROR("Timed out waiting for PSR Idle State\n");
-
- dev_priv->psr.active = false;
- } else {
- WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
- }
-
- dev_priv->psr.enabled = NULL;
- mutex_unlock(&dev_priv->psr.lock);
-
- cancel_delayed_work_sync(&dev_priv->psr.work);
-}
-
-static void intel_edp_psr_work(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), psr.work.work);
- struct intel_dp *intel_dp = dev_priv->psr.enabled;
-
- mutex_lock(&dev_priv->psr.lock);
- intel_dp = dev_priv->psr.enabled;
-
- if (!intel_dp)
- goto unlock;
-
- /*
- * The delayed work can race with an invalidate hence we need to
- * recheck. Since psr_flush first clears this and then reschedules we
- * won't ever miss a flush when bailing out here.
- */
- if (dev_priv->psr.busy_frontbuffer_bits)
- goto unlock;
-
- intel_edp_psr_do_enable(intel_dp);
-unlock:
- mutex_unlock(&dev_priv->psr.lock);
-}
-
-static void intel_edp_psr_do_exit(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->psr.active) {
- u32 val = I915_READ(EDP_PSR_CTL(dev));
-
- WARN_ON(!(val & EDP_PSR_ENABLE));
-
- I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
-
- dev_priv->psr.active = false;
- }
-
-}
-
-void intel_edp_psr_invalidate(struct drm_device *dev,
- unsigned frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- enum pipe pipe;
-
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
-
- crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- intel_edp_psr_do_exit(dev);
-
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
-
- dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
- mutex_unlock(&dev_priv->psr.lock);
-}
-
-void intel_edp_psr_flush(struct drm_device *dev,
- unsigned frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- enum pipe pipe;
-
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
-
- crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
- dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
-
- /*
- * On Haswell sprite plane updates don't result in a psr invalidating
- * signal in the hardware. Which means we need to manually fake this in
- * software for all flushes, not just when we've seen a preceding
- * invalidation through frontbuffer rendering.
- */
- if (IS_HASWELL(dev) &&
- (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
- intel_edp_psr_do_exit(dev);
-
- if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
- schedule_delayed_work(&dev_priv->psr.work,
- msecs_to_jiffies(100));
- mutex_unlock(&dev_priv->psr.lock);
-}
-
-void intel_edp_psr_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
- mutex_init(&dev_priv->psr.lock);
-}
-
static void intel_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+ if (crtc->config.has_audio)
+ intel_audio_codec_disable(encoder);
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
@@ -2467,14 +2258,23 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- intel_dp->DP |= DP_PORT_EN;
-
/* enable with pattern 1 (as per spec) */
_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
DP_TRAINING_PATTERN_1);
I915_WRITE(intel_dp->output_reg, intel_dp->DP);
POSTING_READ(intel_dp->output_reg);
+
+ /*
+ * Magic for VLV/CHV. We _must_ first set up the register
+ * without actually enabling the port, and then do another
+ * write to enable the port. Otherwise link training will
+ * fail when the power sequencer is freshly used for this port.
+ */
+ intel_dp->DP |= DP_PORT_EN;
+
+ I915_WRITE(intel_dp->output_reg, intel_dp->DP);
+ POSTING_READ(intel_dp->output_reg);
}
static void intel_enable_dp(struct intel_encoder *encoder)
@@ -2482,19 +2282,38 @@ static void intel_enable_dp(struct intel_encoder *encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
if (WARN_ON(dp_reg & DP_PORT_EN))
return;
+ pps_lock(intel_dp);
+
+ if (IS_VALLEYVIEW(dev))
+ vlv_init_panel_power_sequencer(intel_dp);
+
intel_dp_enable_port(intel_dp);
- intel_edp_panel_vdd_on(intel_dp);
- intel_edp_panel_on(intel_dp);
- intel_edp_panel_vdd_off(intel_dp, true);
+
+ edp_panel_vdd_on(intel_dp);
+ edp_panel_on(intel_dp);
+ edp_panel_vdd_off(intel_dp, true);
+
+ pps_unlock(intel_dp);
+
+ if (IS_VALLEYVIEW(dev))
+ vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
+
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
+
+ if (crtc->config.has_audio) {
+ DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
+ pipe_name(crtc->pipe));
+ intel_audio_codec_enable(encoder);
+ }
}
static void g4x_enable_dp(struct intel_encoder *encoder)
@@ -2526,6 +2345,32 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder)
}
}
+static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
+ enum pipe pipe = intel_dp->pps_pipe;
+ int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+
+ edp_panel_vdd_off_sync(intel_dp);
+
+ /*
+ * VLV seems to get confused when multiple power seqeuencers
+ * have the same port selected (even if only one has power/vdd
+ * enabled). The failure manifests as vlv_wait_port_ready() failing
+ * CHV on the other hand doesn't seem to mind having the same port
+ * selected in multiple power seqeuencers, but let's clear the
+ * port select always when logically disconnecting a power sequencer
+ * from a port.
+ */
+ DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
+ pipe_name(pipe), port_name(intel_dig_port->port));
+ I915_WRITE(pp_on_reg, 0);
+ POSTING_READ(pp_on_reg);
+
+ intel_dp->pps_pipe = INVALID_PIPE;
+}
+
static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe)
{
@@ -2534,6 +2379,9 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
lockdep_assert_held(&dev_priv->pps_mutex);
+ if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ return;
+
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
struct intel_dp *intel_dp;
@@ -2551,10 +2399,12 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
pipe_name(pipe), port_name(port));
- /* make sure vdd is off before we steal it */
- edp_panel_vdd_off_sync(intel_dp);
+ WARN(encoder->connectors_active,
+ "stealing pipe %c power sequencer from active eDP port %c\n",
+ pipe_name(pipe), port_name(port));
- intel_dp->pps_pipe = INVALID_PIPE;
+ /* make sure vdd is off before we steal it */
+ vlv_detach_power_sequencer(intel_dp);
}
}
@@ -2565,10 +2415,12 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- struct edp_power_seq power_seq;
lockdep_assert_held(&dev_priv->pps_mutex);
+ if (!is_edp(intel_dp))
+ return;
+
if (intel_dp->pps_pipe == crtc->pipe)
return;
@@ -2578,7 +2430,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
* we still have control of it.
*/
if (intel_dp->pps_pipe != INVALID_PIPE)
- edp_panel_vdd_off_sync(intel_dp);
+ vlv_detach_power_sequencer(intel_dp);
/*
* We may be stealing the power
@@ -2593,9 +2445,8 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
/* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
- &power_seq);
+ intel_dp_init_panel_power_sequencer(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
}
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
@@ -2624,15 +2475,7 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
- if (is_edp(intel_dp)) {
- pps_lock(intel_dp);
- vlv_init_panel_power_sequencer(intel_dp);
- pps_unlock(intel_dp);
- }
-
intel_enable_dp(encoder);
-
- vlv_wait_port_ready(dev_priv, dport);
}
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -2680,6 +2523,15 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
mutex_lock(&dev_priv->dpio_lock);
+ /* allow hardware to manage TX FIFO reset source */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+
/* Deassert soft data lane reset*/
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
@@ -2715,15 +2567,7 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
- if (is_edp(intel_dp)) {
- pps_lock(intel_dp);
- vlv_init_panel_power_sequencer(intel_dp);
- pps_unlock(intel_dp);
- }
-
intel_enable_dp(encoder);
-
- vlv_wait_port_ready(dev_priv, dport);
}
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -2843,7 +2687,9 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_VALLEYVIEW(dev))
+ if (INTEL_INFO(dev)->gen >= 9)
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ else if (IS_VALLEYVIEW(dev))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (IS_GEN7(dev) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -2859,7 +2705,18 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (INTEL_INFO(dev)->gen >= 9) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ return DP_TRAIN_PRE_EMPH_LEVEL_3;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ return DP_TRAIN_PRE_EMPH_LEVEL_2;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+ return DP_TRAIN_PRE_EMPH_LEVEL_1;
+ default:
+ return DP_TRAIN_PRE_EMPH_LEVEL_0;
+ }
+ } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3095,12 +2952,26 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+
/* Program swing deemph */
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
@@ -3341,7 +3212,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
uint32_t signal_levels, mask;
uint8_t train_set = intel_dp->train_set[0];
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
signal_levels = intel_hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_CHERRYVIEW(dev)) {
@@ -3605,7 +3476,6 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
/* Try 5 times, then try clock recovery if that fails */
if (tries > 5) {
- intel_dp_link_down(intel_dp);
intel_dp_start_link_train(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
training_pattern |
@@ -3763,8 +3633,6 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
- intel_edp_panel_vdd_on(intel_dp);
-
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
@@ -3772,8 +3640,6 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
-
- intel_edp_panel_vdd_off(intel_dp, false);
}
static bool
@@ -3787,7 +3653,6 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
return false;
- intel_edp_panel_vdd_on(intel_dp);
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
if (buf[0] & DP_MST_CAP) {
DRM_DEBUG_KMS("Sink is MST capable\n");
@@ -3797,7 +3662,6 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
intel_dp->is_mst = false;
}
}
- intel_edp_panel_vdd_off(intel_dp, false);
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
return intel_dp->is_mst;
@@ -3809,26 +3673,48 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
struct drm_device *dev = intel_dig_port->base.base.dev;
struct intel_crtc *intel_crtc =
to_intel_crtc(intel_dig_port->base.base.crtc);
- u8 buf[1];
+ u8 buf;
+ int test_crc_count;
+ int attempts = 6;
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
return -EIO;
- if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
+ if (!(buf & DP_TEST_CRC_SUPPORTED))
return -ENOTTY;
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
+ return -EIO;
+
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
- DP_TEST_SINK_START) < 0)
+ buf | DP_TEST_SINK_START) < 0)
+ return -EIO;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
return -EIO;
+ test_crc_count = buf & DP_TEST_COUNT_MASK;
- /* Wait 2 vblanks to be sure we will have the correct CRC value */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ do {
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_TEST_SINK_MISC, &buf) < 0)
+ return -EIO;
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
+
+ if (attempts == 0) {
+ DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
+ return -ETIMEDOUT;
+ }
if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
return -EIO;
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
+ return -EIO;
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
+ buf & ~DP_TEST_SINK_START) < 0)
+ return -EIO;
+
return 0;
}
@@ -4456,9 +4342,52 @@ static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
pps_unlock(intel_dp);
}
+static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!edp_have_panel_vdd(intel_dp))
+ return;
+
+ /*
+ * The VDD bit needs a power domain reference, so if the bit is
+ * already enabled when we boot or resume, grab this reference and
+ * schedule a vdd off, so we don't hold on to the reference
+ * indefinitely.
+ */
+ DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
+ power_domain = intel_display_port_power_domain(&intel_dig_port->base);
+ intel_display_power_get(dev_priv, power_domain);
+
+ edp_panel_vdd_schedule_off(intel_dp);
+}
+
static void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
- intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
+ struct intel_dp *intel_dp;
+
+ if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
+ return;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ pps_lock(intel_dp);
+
+ /*
+ * Read out the current power sequencer assignment,
+ * in case the BIOS did something with it.
+ */
+ if (IS_VALLEYVIEW(encoder->dev))
+ vlv_initial_power_sequencer_setup(intel_dp);
+
+ intel_edp_panel_vdd_sanitize(intel_dp);
+
+ pps_unlock(intel_dp);
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -4645,16 +4574,20 @@ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
static void
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
- struct intel_dp *intel_dp,
- struct edp_power_seq *out)
+ struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct edp_power_seq cur, vbt, spec, final;
+ struct edp_power_seq cur, vbt, spec,
+ *final = &intel_dp->pps_delays;
u32 pp_on, pp_off, pp_div, pp;
int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
lockdep_assert_held(&dev_priv->pps_mutex);
+ /* already initialized? */
+ if (final->t11_t12 != 0)
+ return;
+
if (HAS_PCH_SPLIT(dev)) {
pp_ctrl_reg = PCH_PP_CONTROL;
pp_on_reg = PCH_PP_ON_DELAYS;
@@ -4716,7 +4649,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
/* Use the max of the register settings and vbt. If both are
* unset, fall back to the spec limits. */
-#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
+#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
spec.field : \
max(cur.field, vbt.field))
assign_final(t1_t3);
@@ -4726,7 +4659,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
assign_final(t11_t12);
#undef assign_final
-#define get_delay(field) (DIV_ROUND_UP(final.field, 10))
+#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
intel_dp->panel_power_up_delay = get_delay(t1_t3);
intel_dp->backlight_on_delay = get_delay(t8);
intel_dp->backlight_off_delay = get_delay(t9);
@@ -4740,21 +4673,18 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
-
- if (out)
- *out = final;
}
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
- struct intel_dp *intel_dp,
- struct edp_power_seq *seq)
+ struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_on, pp_off, pp_div, port_sel = 0;
int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
int pp_on_reg, pp_off_reg, pp_div_reg;
enum port port = dp_to_dig_port(intel_dp)->port;
+ const struct edp_power_seq *seq = &intel_dp->pps_delays;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -4837,7 +4767,7 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
* hard to tell without seeing the user of this function of this code.
* Check locking and ordering once that lands.
*/
- if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
+ if (INTEL_INFO(dev)->gen < 8 && intel_psr_is_enabled(dev)) {
DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
return;
}
@@ -4940,40 +4870,8 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
return downclock_mode;
}
-void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
-{
- struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp *intel_dp;
- enum intel_display_power_domain power_domain;
-
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- return;
-
- intel_dp = enc_to_intel_dp(&intel_encoder->base);
-
- pps_lock(intel_dp);
-
- if (!edp_have_panel_vdd(intel_dp))
- goto out;
- /*
- * The VDD bit needs a power domain reference, so if the bit is
- * already enabled when we boot or resume, grab this reference and
- * schedule a vdd off, so we don't hold on to the reference
- * indefinitely.
- */
- DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
- power_domain = intel_display_port_power_domain(intel_encoder);
- intel_display_power_get(dev_priv, power_domain);
-
- edp_panel_vdd_schedule_off(intel_dp);
- out:
- pps_unlock(intel_dp);
-}
-
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
- struct intel_connector *intel_connector,
- struct edp_power_seq *power_seq)
+ struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -4985,18 +4883,19 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
bool has_dpcd;
struct drm_display_mode *scan;
struct edid *edid;
+ enum pipe pipe = INVALID_PIPE;
intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
if (!is_edp(intel_dp))
return true;
- intel_edp_panel_vdd_sanitize(intel_encoder);
+ pps_lock(intel_dp);
+ intel_edp_panel_vdd_sanitize(intel_dp);
+ pps_unlock(intel_dp);
/* Cache DPCD and EDID for edp. */
- intel_edp_panel_vdd_on(intel_dp);
has_dpcd = intel_dp_get_dpcd(intel_dp);
- intel_edp_panel_vdd_off(intel_dp, false);
if (has_dpcd) {
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
@@ -5011,7 +4910,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
/* We now know it's not a ghost, init power sequence regs. */
pps_lock(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
pps_unlock(intel_dp);
mutex_lock(&dev->mode_config.mutex);
@@ -5053,11 +4952,30 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (IS_VALLEYVIEW(dev)) {
intel_dp->edp_notifier.notifier_call = edp_notify_handler;
register_reboot_notifier(&intel_dp->edp_notifier);
+
+ /*
+ * Figure out the current pipe for the initial backlight setup.
+ * If the current pipe isn't valid, try the PPS pipe, and if that
+ * fails just assume pipe A.
+ */
+ if (IS_CHERRYVIEW(dev))
+ pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
+ else
+ pipe = PORT_TO_PIPE(intel_dp->DP);
+
+ if (pipe != PIPE_A && pipe != PIPE_B)
+ pipe = intel_dp->pps_pipe;
+
+ if (pipe != PIPE_A && pipe != PIPE_B)
+ pipe = PIPE_A;
+
+ DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
+ pipe_name(pipe));
}
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_connector->panel.backlight_power = intel_edp_backlight_power;
- intel_panel_setup_backlight(connector);
+ intel_panel_setup_backlight(connector, pipe);
return true;
}
@@ -5072,13 +4990,14 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
- struct edp_power_seq power_seq = { 0 };
int type;
intel_dp->pps_pipe = INVALID_PIPE;
/* intel_dp vfuncs */
- if (IS_VALLEYVIEW(dev))
+ if (INTEL_INFO(dev)->gen >= 9)
+ intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
+ else if (IS_VALLEYVIEW(dev))
intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
@@ -5087,7 +5006,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
else
intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
- intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
+ if (INTEL_INFO(dev)->gen >= 9)
+ intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
+ else
+ intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
@@ -5106,6 +5028,11 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
if (type == DRM_MODE_CONNECTOR_eDP)
intel_encoder->type = INTEL_OUTPUT_EDP;
+ /* eDP only on port B and/or C on vlv/chv */
+ if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
+ port != PORT_B && port != PORT_C))
+ return false;
+
DRM_DEBUG_KMS("Adding %s connector on port %c\n",
type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
port_name(port));
@@ -5148,13 +5075,11 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
if (is_edp(intel_dp)) {
pps_lock(intel_dp);
- if (IS_VALLEYVIEW(dev)) {
+ intel_dp_init_panel_power_timestamps(intel_dp);
+ if (IS_VALLEYVIEW(dev))
vlv_initial_power_sequencer_setup(intel_dp);
- } else {
- intel_dp_init_panel_power_timestamps(intel_dp);
- intel_dp_init_panel_power_sequencer(dev, intel_dp,
- &power_seq);
- }
+ else
+ intel_dp_init_panel_power_sequencer(dev, intel_dp);
pps_unlock(intel_dp);
}
@@ -5168,7 +5093,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
}
}
- if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
+ if (!intel_edp_init_connector(intel_dp, intel_connector)) {
drm_dp_aux_unregister(&intel_dp->aux);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index d9a7a78..7f8c6a6 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -278,20 +278,12 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
}
static enum drm_connector_status
-intel_mst_port_dp_detect(struct drm_connector *connector)
+intel_dp_mst_detect(struct drm_connector *connector, bool force)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
- return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port);
-}
-
-static enum drm_connector_status
-intel_dp_mst_detect(struct drm_connector *connector, bool force)
-{
- enum drm_connector_status status;
- status = intel_mst_port_dp_detect(connector);
- return status;
+ return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port);
}
static int
@@ -393,7 +385,7 @@ static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
#endif
}
-static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *pathprop)
+static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -422,6 +414,8 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_dp_add_properties(intel_dp, connector);
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
+ drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
+
drm_mode_connector_set_path_property(connector, pathprop);
drm_reinit_primary_mode_group(dev);
mutex_lock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ba71522..25fdbb1 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -34,6 +34,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_rect.h>
#define DIV_ROUND_CLOSEST_ULL(ll, d) \
({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
@@ -93,18 +94,20 @@
/* these are outputs from the chip - integrated only
external chips are via DVO or SDVO output */
-#define INTEL_OUTPUT_UNUSED 0
-#define INTEL_OUTPUT_ANALOG 1
-#define INTEL_OUTPUT_DVO 2
-#define INTEL_OUTPUT_SDVO 3
-#define INTEL_OUTPUT_LVDS 4
-#define INTEL_OUTPUT_TVOUT 5
-#define INTEL_OUTPUT_HDMI 6
-#define INTEL_OUTPUT_DISPLAYPORT 7
-#define INTEL_OUTPUT_EDP 8
-#define INTEL_OUTPUT_DSI 9
-#define INTEL_OUTPUT_UNKNOWN 10
-#define INTEL_OUTPUT_DP_MST 11
+enum intel_output_type {
+ INTEL_OUTPUT_UNUSED = 0,
+ INTEL_OUTPUT_ANALOG = 1,
+ INTEL_OUTPUT_DVO = 2,
+ INTEL_OUTPUT_SDVO = 3,
+ INTEL_OUTPUT_LVDS = 4,
+ INTEL_OUTPUT_TVOUT = 5,
+ INTEL_OUTPUT_HDMI = 6,
+ INTEL_OUTPUT_DISPLAYPORT = 7,
+ INTEL_OUTPUT_EDP = 8,
+ INTEL_OUTPUT_DSI = 9,
+ INTEL_OUTPUT_UNKNOWN = 10,
+ INTEL_OUTPUT_DP_MST = 11,
+};
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
@@ -135,7 +138,7 @@ struct intel_encoder {
*/
struct intel_crtc *new_crtc;
- int type;
+ enum intel_output_type type;
unsigned int cloneable;
bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
@@ -240,6 +243,17 @@ typedef struct dpll {
int p;
} intel_clock_t;
+struct intel_plane_state {
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ struct drm_rect src;
+ struct drm_rect dst;
+ struct drm_rect clip;
+ struct drm_rect orig_src;
+ struct drm_rect orig_dst;
+ bool visible;
+};
+
struct intel_plane_config {
bool tiled;
int size;
@@ -278,6 +292,9 @@ struct intel_crtc_config {
* between pch encoders and cpu encoders. */
bool has_pch_encoder;
+ /* Are we sending infoframes on the attached port */
+ bool has_infoframe;
+
/* CPU Transcoder for the pipe. Currently this can only differ from the
* pipe on Haswell (where we have a special eDP transcoder). */
enum transcoder cpu_transcoder;
@@ -326,7 +343,10 @@ struct intel_crtc_config {
/* Selected dpll when shared or DPLL_ID_PRIVATE. */
enum intel_dpll_id shared_dpll;
- /* PORT_CLK_SEL for DDI ports. */
+ /*
+ * - PORT_CLK_SEL for DDI ports on HSW/BDW.
+ * - enum skl_dpll on SKL
+ */
uint32_t ddi_pll_sel;
/* Actual register state of the dpll, for shared dpll cross-checking. */
@@ -387,7 +407,14 @@ struct intel_pipe_wm {
struct intel_mmio_flip {
u32 seqno;
- u32 ring_id;
+ struct intel_engine_cs *ring;
+ struct work_struct work;
+};
+
+struct skl_pipe_wm {
+ struct skl_wm_level wm[8];
+ struct skl_wm_level trans_wm;
+ uint32_t linetime;
};
struct intel_crtc {
@@ -437,6 +464,8 @@ struct intel_crtc {
struct {
/* watermarks currently being used */
struct intel_pipe_wm active;
+ /* SKL wm values currently in use */
+ struct skl_pipe_wm skl_active;
} wm;
int scanline_offset;
@@ -529,6 +558,7 @@ struct intel_hdmi {
void (*set_infoframes)(struct drm_encoder *encoder,
bool enable,
struct drm_display_mode *adjusted_mode);
+ bool (*infoframe_enabled)(struct drm_encoder *encoder);
};
struct intel_dp_mst_encoder;
@@ -578,6 +608,7 @@ struct intel_dp {
* this port. Only relevant on VLV/CHV.
*/
enum pipe pps_pipe;
+ struct edp_power_seq pps_delays;
bool use_tps3;
bool can_mst; /* this port supports mst */
@@ -734,32 +765,47 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
+/*
+ * Returns the number of planes for this pipe, ie the number of sprites + 1
+ * (primary plane). This doesn't count the cursor plane then.
+ */
+static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
+{
+ return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
+}
-/* i915_irq.c */
-bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+/* intel_fifo_underrun.c */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pipe, bool enable);
-bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
+bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum transcoder pch_transcoder,
bool enable);
+void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ enum transcoder pch_transcoder);
+void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv);
+
+/* i915_irq.c */
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
-void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
+void gen6_reset_rps_interrupts(struct drm_device *dev);
+void gen6_enable_rps_interrupts(struct drm_device *dev);
+void gen6_disable_rps_interrupts(struct drm_device *dev);
+void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
{
/*
* We only use drm_irq_uninstall() at unload and VT switch, so
* this is the only thing we need to check.
*/
- return !dev_priv->pm._irqs_disabled;
+ return dev_priv->pm.irqs_enabled;
}
int intel_get_crtc_scanline(struct intel_crtc *crtc);
-void i9xx_check_fifo_underruns(struct drm_device *dev);
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
/* intel_crt.c */
@@ -792,11 +838,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
-/* intel_display.c */
-const char *intel_output_name(int output);
-bool intel_has_pending_fb_unpin(struct drm_device *dev);
-int intel_pch_rawclk(struct drm_device *dev);
-void intel_mark_busy(struct drm_device *dev);
+/* intel_frontbuffer.c */
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring);
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
@@ -806,7 +848,7 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
void intel_frontbuffer_flush(struct drm_device *dev,
unsigned frontbuffer_bits);
/**
- * intel_frontbuffer_flip - prepare frontbuffer flip
+ * intel_frontbuffer_flip - synchronous frontbuffer flip
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
@@ -824,6 +866,18 @@ void intel_frontbuffer_flip(struct drm_device *dev,
}
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
+
+
+/* intel_audio.c */
+void intel_init_audio(struct drm_device *dev);
+void intel_audio_codec_enable(struct intel_encoder *encoder);
+void intel_audio_codec_disable(struct intel_encoder *encoder);
+
+/* intel_display.c */
+const char *intel_output_name(int output);
+bool intel_has_pending_fb_unpin(struct drm_device *dev);
+int intel_pch_rawclk(struct drm_device *dev);
+void intel_mark_busy(struct drm_device *dev);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
void intel_crtc_control(struct drm_crtc *crtc, bool enable);
@@ -844,7 +898,12 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe);
-void intel_wait_for_vblank(struct drm_device *dev, int pipe);
+bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type);
+static inline void
+intel_wait_for_vblank(struct drm_device *dev, int pipe)
+{
+ drm_wait_one_vblank(dev, pipe);
+}
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport);
@@ -854,8 +913,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old);
-int intel_pin_and_fence_fb_obj(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
+int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
struct intel_engine_cs *pipelined);
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
struct drm_framebuffer *
@@ -877,7 +936,13 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
void intel_put_shared_dpll(struct intel_crtc *crtc);
+void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+ const struct dpll *dpll);
+void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
+
/* modesetting asserts */
+void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
@@ -889,13 +954,12 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-void intel_write_eld(struct drm_encoder *encoder,
- struct drm_display_mode *mode);
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
unsigned int tiling_mode,
unsigned int bpp,
unsigned int pitch);
-void intel_display_handle_reset(struct drm_device *dev);
+void intel_prepare_reset(struct drm_device *dev);
+void intel_finish_reset(struct drm_device *dev);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
@@ -908,7 +972,6 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
bool intel_crtc_active(struct drm_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
-void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
@@ -936,25 +999,18 @@ bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
void intel_edp_backlight_on(struct intel_dp *intel_dp);
void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
-void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder);
void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
-void intel_edp_psr_enable(struct intel_dp *intel_dp);
-void intel_edp_psr_disable(struct intel_dp *intel_dp);
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
-void intel_edp_psr_invalidate(struct drm_device *dev,
- unsigned frontbuffer_bits);
-void intel_edp_psr_flush(struct drm_device *dev,
- unsigned frontbuffer_bits);
-void intel_edp_psr_init(struct drm_device *dev);
-
-int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd);
void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
void intel_dp_mst_suspend(struct drm_device *dev);
void intel_dp_mst_resume(struct drm_device *dev);
int intel_dp_max_link_bw(struct intel_dp *intel_dp);
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
+uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
+void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
+
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
@@ -1044,7 +1100,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *crtc,
int fitting_mode);
void intel_panel_set_backlight_acpi(struct intel_connector *connector,
u32 level, u32 max);
-int intel_panel_setup_backlight(struct drm_connector *connector);
+int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe);
void intel_panel_enable_backlight(struct intel_connector *connector);
void intel_panel_disable_backlight(struct intel_connector *connector);
void intel_panel_destroy_backlight(struct drm_connector *connector);
@@ -1054,6 +1110,41 @@ extern struct drm_display_mode *intel_find_panel_downclock(
struct drm_device *dev,
struct drm_display_mode *fixed_mode,
struct drm_connector *connector);
+void intel_backlight_register(struct drm_device *dev);
+void intel_backlight_unregister(struct drm_device *dev);
+
+
+/* intel_psr.c */
+bool intel_psr_is_enabled(struct drm_device *dev);
+void intel_psr_enable(struct intel_dp *intel_dp);
+void intel_psr_disable(struct intel_dp *intel_dp);
+void intel_psr_invalidate(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_psr_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_psr_init(struct drm_device *dev);
+
+/* intel_runtime_pm.c */
+int intel_power_domains_init(struct drm_i915_private *);
+void intel_power_domains_fini(struct drm_i915_private *);
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
+
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void intel_display_power_get(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
+void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
+
+void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
/* intel_pm.c */
void intel_init_clock_gating(struct drm_device *dev);
@@ -1072,17 +1163,6 @@ bool intel_fbc_enabled(struct drm_device *dev);
void intel_update_fbc(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
-int intel_power_domains_init(struct drm_i915_private *);
-void intel_power_domains_remove(struct drm_i915_private *);
-bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-void intel_display_power_get(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-void intel_display_power_put(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
void intel_init_gt_powersave(struct drm_device *dev);
void intel_cleanup_gt_powersave(struct drm_device *dev);
void intel_enable_gt_powersave(struct drm_device *dev);
@@ -1093,14 +1173,10 @@ void ironlake_teardown_rc6(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv);
-void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
-void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
-void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
-void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_device *dev);
+void skl_wm_get_hw_state(struct drm_device *dev);
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb /* out */);
/* intel_sdvo.c */
@@ -1120,7 +1196,9 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-
+bool intel_pipe_update_start(struct intel_crtc *crtc,
+ uint32_t *start_vbl_count);
+void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
/* intel_tv.c */
void intel_tv_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 5bd9e09..0b18407 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -344,7 +344,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
DRM_DEBUG_KMS("\n");
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
/* XXX: this only works for one DSI output */
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 9b584f3..850cf7d 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -119,25 +119,25 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
goto out;
}
- /* Flush everything out, we'll be doing GTT only from now on */
- ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
- if (ret) {
- DRM_ERROR("failed to pin obj: %d\n", ret);
- goto out_unref;
- }
-
fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
- goto out_unpin;
+ goto out_unref;
+ }
+
+ /* Flush everything out, we'll be doing GTT only from now on */
+ ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL);
+ if (ret) {
+ DRM_ERROR("failed to pin obj: %d\n", ret);
+ goto out_fb;
}
ifbdev->fb = to_intel_framebuffer(fb);
return 0;
-out_unpin:
- i915_gem_object_ggtt_unpin(obj);
+out_fb:
+ drm_framebuffer_remove(fb);
out_unref:
drm_gem_object_unreference(&obj->base);
out:
@@ -324,6 +324,7 @@ intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_crtc **crtcs,
struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
bool *enabled, int width, int height)
{
struct drm_device *dev = fb_helper->dev;
@@ -332,6 +333,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
bool fallback = true;
int num_connectors_enabled = 0;
int num_connectors_detected = 0;
+ uint64_t conn_configured = 0, mask;
+ int pass = 0;
save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
GFP_KERNEL);
@@ -339,7 +342,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
return false;
memcpy(save_enabled, enabled, dev->mode_config.num_connector);
-
+ mask = (1 << fb_helper->connector_count) - 1;
+retry:
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
@@ -349,12 +353,19 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
fb_conn = fb_helper->connector_info[i];
connector = fb_conn->connector;
+ if (conn_configured & (1 << i))
+ continue;
+
+ if (pass == 0 && !connector->has_tile)
+ continue;
+
if (connector->status == connector_status_connected)
num_connectors_detected++;
if (!enabled[i]) {
DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
connector->name);
+ conn_configured |= (1 << i);
continue;
}
@@ -373,6 +384,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
connector->name);
enabled[i] = false;
+ conn_configured |= (1 << i);
continue;
}
@@ -400,8 +412,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
/* try for preferred next */
if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %s\n",
- connector->name);
+ DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
+ connector->name, connector->has_tile);
modes[i] = drm_has_preferred_mode(fb_conn, width,
height);
}
@@ -444,6 +456,12 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
fallback = false;
+ conn_configured |= (1 << i);
+ }
+
+ if ((conn_configured & mask) != mask) {
+ pass++;
+ goto retry;
}
/*
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
new file mode 100644
index 0000000..77af512
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -0,0 +1,381 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/**
+ * DOC: fifo underrun handling
+ *
+ * The i915 driver checks for display fifo underruns using the interrupt signals
+ * provided by the hardware. This is enabled by default and fairly useful to
+ * debug display issues, especially watermark settings.
+ *
+ * If an underrun is detected this is logged into dmesg. To avoid flooding logs
+ * and occupying the cpu underrun interrupts are disabled after the first
+ * occurrence until the next modeset on a given pipe.
+ *
+ * Note that underrun detection on gmch platforms is a bit more ugly since there
+ * is no interrupt (despite that the signalling bit is in the PIPESTAT pipe
+ * interrupt register). Also on some other platforms underrun interrupts are
+ * shared, which means that if we detect an underrun we need to disable underrun
+ * reporting on all pipes.
+ *
+ * The code also supports underrun detection on the PCH transcoder.
+ */
+
+static bool ivb_can_enable_err_int(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc;
+ enum pipe pipe;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ for_each_pipe(dev_priv, pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+ if (crtc->cpu_fifo_underrun_disabled)
+ return false;
+ }
+
+ return true;
+}
+
+static bool cpt_can_enable_serr_int(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ for_each_pipe(dev_priv, pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+ if (crtc->pch_fifo_underrun_disabled)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * i9xx_check_fifo_underruns - check for fifo underruns
+ * @dev_priv: i915 device instance
+ *
+ * This function checks for fifo underruns on GMCH platforms. This needs to be
+ * done manually on modeset to make sure that we catch all underruns since they
+ * do not generate an interrupt by themselves on these platforms.
+ */
+void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+
+ for_each_intel_crtc(dev_priv->dev, crtc) {
+ u32 reg = PIPESTAT(crtc->pipe);
+ u32 pipestat;
+
+ if (crtc->cpu_fifo_underrun_disabled)
+ continue;
+
+ pipestat = I915_READ(reg) & 0xffff0000;
+ if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
+ continue;
+
+ I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+ POSTING_READ(reg);
+
+ DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
+ }
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe,
+ bool enable, bool old)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg = PIPESTAT(pipe);
+ u32 pipestat = I915_READ(reg) & 0xffff0000;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (enable) {
+ I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+ POSTING_READ(reg);
+ } else {
+ if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ }
+}
+
+static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
+ DE_PIPEB_FIFO_UNDERRUN;
+
+ if (enable)
+ ironlake_enable_display_irq(dev_priv, bit);
+ else
+ ironlake_disable_display_irq(dev_priv, bit);
+}
+
+static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe,
+ bool enable, bool old)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (enable) {
+ I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+
+ if (!ivb_can_enable_err_int(dev))
+ return;
+
+ ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ } else {
+ ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+
+ if (old &&
+ I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
+ DRM_ERROR("uncleared fifo underrun on pipe %c\n",
+ pipe_name(pipe));
+ }
+ }
+}
+
+static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (enable)
+ dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
+ else
+ dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
+ I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+ POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+}
+
+static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum transcoder pch_transcoder,
+ bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
+ SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
+
+ if (enable)
+ ibx_enable_display_interrupt(dev_priv, bit);
+ else
+ ibx_disable_display_interrupt(dev_priv, bit);
+}
+
+static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum transcoder pch_transcoder,
+ bool enable, bool old)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (enable) {
+ I915_WRITE(SERR_INT,
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+
+ if (!cpt_can_enable_serr_int(dev))
+ return;
+
+ ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+ } else {
+ ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+
+ if (old && I915_READ(SERR_INT) &
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
+ DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
+ transcoder_name(pch_transcoder));
+ }
+ }
+}
+
+static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ bool old;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ old = !intel_crtc->cpu_fifo_underrun_disabled;
+ intel_crtc->cpu_fifo_underrun_disabled = !enable;
+
+ if (HAS_GMCH_DISPLAY(dev))
+ i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
+ else if (IS_GEN5(dev) || IS_GEN6(dev))
+ ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
+ else if (IS_GEN7(dev))
+ ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
+ else if (IS_GEN8(dev) || IS_GEN9(dev))
+ broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
+
+ return old;
+}
+
+/**
+ * intel_set_cpu_fifo_underrun_reporting - set cpu fifo underrrun reporting state
+ * @dev_priv: i915 device instance
+ * @pipe: (CPU) pipe to set state for
+ * @enable: whether underruns should be reported or not
+ *
+ * This function sets the fifo underrun state for @pipe. It is used in the
+ * modeset code to avoid false positives since on many platforms underruns are
+ * expected when disabling or enabling the pipe.
+ *
+ * Notice that on some platforms disabling underrun reports for one pipe
+ * disables for all due to shared interrupts. Actual reporting is still per-pipe
+ * though.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool enable)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe,
+ enable);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return ret;
+}
+
+static bool
+__cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ return !intel_crtc->cpu_fifo_underrun_disabled;
+}
+
+/**
+ * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
+ * @dev_priv: i915 device instance
+ * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
+ * @enable: whether underruns should be reported or not
+ *
+ * This function makes us disable or enable PCH fifo underruns for a specific
+ * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
+ * underrun reporting for one transcoder may also disable all the other PCH
+ * error interruts for the other transcoders, due to the fact that there's just
+ * one interrupt mask/enable bit for all the transcoders.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+ enum transcoder pch_transcoder,
+ bool enable)
+{
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long flags;
+ bool old;
+
+ /*
+ * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
+ * has only one pch transcoder A that all pipes can use. To avoid racy
+ * pch transcoder -> pipe lookups from interrupt code simply store the
+ * underrun statistics in crtc A. Since we never expose this anywhere
+ * nor use it outside of the fifo underrun code here using the "wrong"
+ * crtc on LPT won't cause issues.
+ */
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+
+ old = !intel_crtc->pch_fifo_underrun_disabled;
+ intel_crtc->pch_fifo_underrun_disabled = !enable;
+
+ if (HAS_PCH_IBX(dev_priv->dev))
+ ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
+ enable);
+ else
+ cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
+ enable, old);
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ return old;
+}
+
+/**
+ * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
+ * @dev_priv: i915 device instance
+ * @pipe: (CPU) pipe to set state for
+ *
+ * This handles a CPU fifo underrun interrupt, generating an underrun warning
+ * into dmesg if underrun reporting is enabled and then disables the underrun
+ * interrupt to avoid an irq storm.
+ */
+void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ /* GMCH can't disable fifo underruns, filter them. */
+ if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
+ !__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe))
+ return;
+
+ if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
+ DRM_ERROR("CPU pipe %c FIFO underrun\n",
+ pipe_name(pipe));
+}
+
+/**
+ * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
+ * @dev_priv: i915 device instance
+ * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
+ *
+ * This handles a PCH fifo underrun interrupt, generating an underrun warning
+ * into dmesg if underrun reporting is enabled and then disables the underrun
+ * interrupt to avoid an irq storm.
+ */
+void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ enum transcoder pch_transcoder)
+{
+ if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
+ false))
+ DRM_ERROR("PCH transcoder %c FIFO underrun\n",
+ transcoder_name(pch_transcoder));
+}
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
new file mode 100644
index 0000000..79f6d72
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -0,0 +1,279 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+/**
+ * DOC: frontbuffer tracking
+ *
+ * Many features require us to track changes to the currently active
+ * frontbuffer, especially rendering targeted at the frontbuffer.
+ *
+ * To be able to do so GEM tracks frontbuffers using a bitmask for all possible
+ * frontbuffer slots through i915_gem_track_fb(). The function in this file are
+ * then called when the contents of the frontbuffer are invalidated, when
+ * frontbuffer rendering has stopped again to flush out all the changes and when
+ * the frontbuffer is exchanged with a flip. Subsystems interested in
+ * frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
+ * into the relevant places and filter for the frontbuffer slots that they are
+ * interested int.
+ *
+ * On a high level there are two types of powersaving features. The first one
+ * work like a special cache (FBC and PSR) and are interested when they should
+ * stop caching and when to restart caching. This is done by placing callbacks
+ * into the invalidate and the flush functions: At invalidate the caching must
+ * be stopped and at flush time it can be restarted. And maybe they need to know
+ * when the frontbuffer changes (e.g. when the hw doesn't initiate an invalidate
+ * and flush on its own) which can be achieved with placing callbacks into the
+ * flip functions.
+ *
+ * The other type of display power saving feature only cares about busyness
+ * (e.g. DRRS). In that case all three (invalidate, flush and flip) indicate
+ * busyness. There is no direct way to detect idleness. Instead an idle timer
+ * work delayed work should be started from the flush and flip functions and
+ * cancelled as soon as busyness is detected.
+ *
+ * Note that there's also an older frontbuffer activity tracking scheme which
+ * just tracks general activity. This is done by the various mark_busy and
+ * mark_idle functions. For display power management features using these
+ * functions is deprecated and should be avoided.
+ */
+
+#include <drm/drmP.h>
+
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+static void intel_increase_pllclock(struct drm_device *dev,
+ enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int dpll_reg = DPLL(pipe);
+ int dpll;
+
+ if (!HAS_GMCH_DISPLAY(dev))
+ return;
+
+ if (!dev_priv->lvds_downclock_avail)
+ return;
+
+ dpll = I915_READ(dpll_reg);
+ if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
+ DRM_DEBUG_DRIVER("upclocking LVDS\n");
+
+ assert_panel_unlocked(dev_priv, pipe);
+
+ dpll &= ~DISPLAY_RATE_SELECT_FPA1;
+ I915_WRITE(dpll_reg, dpll);
+ intel_wait_for_vblank(dev, pipe);
+
+ dpll = I915_READ(dpll_reg);
+ if (dpll & DISPLAY_RATE_SELECT_FPA1)
+ DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
+ }
+}
+
+/**
+ * intel_mark_fb_busy - mark given planes as busy
+ * @dev: DRM device
+ * @frontbuffer_bits: bits for the affected planes
+ * @ring: optional ring for asynchronous commands
+ *
+ * This function gets called every time the screen contents change. It can be
+ * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
+ */
+static void intel_mark_fb_busy(struct drm_device *dev,
+ unsigned frontbuffer_bits,
+ struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+
+ if (!i915.powersave)
+ return;
+
+ for_each_pipe(dev_priv, pipe) {
+ if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
+ continue;
+
+ intel_increase_pllclock(dev, pipe);
+ if (ring && intel_fbc_enabled(dev))
+ ring->fbc_dirty = true;
+ }
+}
+
+/**
+ * intel_fb_obj_invalidate - invalidate frontbuffer object
+ * @obj: GEM object to invalidate
+ * @ring: set for asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object starts and
+ * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
+ * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
+ * until the rendering completes or a flip on this frontbuffer plane is
+ * scheduled.
+ */
+void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (!obj->frontbuffer_bits)
+ return;
+
+ if (ring) {
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ dev_priv->fb_tracking.busy_bits
+ |= obj->frontbuffer_bits;
+ dev_priv->fb_tracking.flip_bits
+ &= ~obj->frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+ }
+
+ intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
+
+ intel_psr_invalidate(dev, obj->frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flush - flush frontbuffer
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called every time rendering on the given planes has
+ * completed and frontbuffer caching can be started again. Flushes will get
+ * delayed if they're blocked by some outstanding asynchronous rendering.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Delay flushing when rings are still busy.*/
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+
+ intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
+
+ intel_psr_flush(dev, frontbuffer_bits);
+
+ /*
+ * FIXME: Unconditional fbc flushing here is a rather gross hack and
+ * needs to be reworked into a proper frontbuffer tracking scheme like
+ * psr employs.
+ */
+ if (dev_priv->fbc.need_sw_cache_clean) {
+ dev_priv->fbc.need_sw_cache_clean = false;
+ bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
+ }
+}
+
+/**
+ * intel_fb_obj_flush - flush frontbuffer object
+ * @obj: GEM object to flush
+ * @retire: set when retiring asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object has
+ * completed and frontbuffer caching can be started again. If @retire is true
+ * then any delayed flushes will be unblocked.
+ */
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+ bool retire)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned frontbuffer_bits;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (!obj->frontbuffer_bits)
+ return;
+
+ frontbuffer_bits = obj->frontbuffer_bits;
+
+ if (retire) {
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ /* Filter out new bits since rendering started. */
+ frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
+
+ dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+ }
+
+ intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. The actual
+ * frontbuffer flushing will be delayed until completion is signalled with
+ * intel_frontbuffer_flip_complete. If an invalidate happens in between this
+ * flush will be cancelled.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
+ /* Remove stale busy bits due to the old buffer. */
+ dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+}
+
+/**
+ * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after the flip has been latched and will complete
+ * on the next vblank. It will execute the flush if it hasn't been cancelled yet.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_complete(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ /* Mask any cancelled flips. */
+ frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
+ dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+
+ intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 29ec153..3abc200 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -166,6 +166,19 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(VIDEO_DIP_CTL);
}
+static bool g4x_infoframe_enabled(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ u32 val = I915_READ(VIDEO_DIP_CTL);
+
+ if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
+ return val & VIDEO_DIP_ENABLE;
+
+ return false;
+}
+
static void ibx_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
@@ -204,6 +217,17 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
+static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ return val & VIDEO_DIP_ENABLE;
+}
+
static void cpt_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
@@ -245,6 +269,17 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
+static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ return val & VIDEO_DIP_ENABLE;
+}
+
static void vlv_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
@@ -283,6 +318,17 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
+static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ return val & VIDEO_DIP_ENABLE;
+}
+
static void hsw_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
@@ -320,6 +366,18 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(ctl_reg);
}
+static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
+ u32 val = I915_READ(ctl_reg);
+
+ return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
+ VIDEO_DIP_ENABLE_VS_HSW);
+}
+
/*
* The data we write to the DIP data buffer registers is 1 byte bigger than the
* HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
@@ -661,14 +719,6 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
if (crtc->config.has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
- if (crtc->config.has_audio) {
- WARN_ON(!crtc->config.has_hdmi_sink);
- DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
- pipe_name(crtc->pipe));
- hdmi_val |= SDVO_AUDIO_ENABLE;
- intel_write_eld(&encoder->base, adjusted_mode);
- }
-
if (HAS_PCH_CPT(dev))
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else if (IS_CHERRYVIEW(dev))
@@ -690,7 +740,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
u32 tmp;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(intel_hdmi->hdmi_reg);
@@ -732,6 +782,9 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_hdmi_sink = true;
+ if (intel_hdmi->infoframe_enabled(&encoder->base))
+ pipe_config->has_infoframe = true;
+
if (tmp & SDVO_AUDIO_ENABLE)
pipe_config->has_audio = true;
@@ -791,6 +844,13 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
}
+
+ if (intel_crtc->config.has_audio) {
+ WARN_ON(!intel_crtc->config.has_hdmi_sink);
+ DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
+ pipe_name(intel_crtc->pipe));
+ intel_audio_codec_enable(encoder);
+ }
}
static void vlv_enable_hdmi(struct intel_encoder *encoder)
@@ -802,9 +862,13 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
+ if (crtc->config.has_audio)
+ intel_audio_codec_disable(encoder);
+
temp = I915_READ(intel_hdmi->hdmi_reg);
/* HW workaround for IBX, we need to move the port to transcoder A
@@ -922,6 +986,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
+ if (pipe_config->has_hdmi_sink)
+ pipe_config->has_infoframe = true;
+
if (intel_hdmi->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
if (pipe_config->has_hdmi_sink &&
@@ -1394,10 +1461,13 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder)
static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
+ struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
enum dpio_channel ch = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
int data, i;
@@ -1405,6 +1475,15 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
mutex_lock(&dev_priv->dpio_lock);
+ /* allow hardware to manage TX FIFO reset source */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+
/* Deassert soft data lane reset*/
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
@@ -1441,12 +1520,26 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+
/* FIXME: Program the support xxx V-dB */
/* Use 800mV-0dB */
for (i = 0; i < 4; i++) {
@@ -1499,6 +1592,10 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
+ intel_hdmi->set_infoframes(&encoder->base,
+ intel_crtc->config.has_hdmi_sink,
+ adjusted_mode);
+
intel_enable_hdmi(encoder);
vlv_wait_port_ready(dev_priv, dport);
@@ -1593,18 +1690,23 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
if (IS_VALLEYVIEW(dev)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes;
+ intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
} else if (IS_G4X(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
+ intel_hdmi->infoframe_enabled = g4x_infoframe_enabled;
} else if (HAS_DDI(dev)) {
intel_hdmi->write_infoframe = hsw_write_infoframe;
intel_hdmi->set_infoframes = hsw_set_infoframes;
+ intel_hdmi->infoframe_enabled = hsw_infoframe_enabled;
} else if (HAS_PCH_IBX(dev)) {
intel_hdmi->write_infoframe = ibx_write_infoframe;
intel_hdmi->set_infoframes = ibx_set_infoframes;
+ intel_hdmi->infoframe_enabled = ibx_infoframe_enabled;
} else {
intel_hdmi->write_infoframe = cpt_write_infoframe;
intel_hdmi->set_infoframes = cpt_set_infoframes;
+ intel_hdmi->infoframe_enabled = cpt_infoframe_enabled;
}
if (HAS_DDI(dev))
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index bafd38b..e588376 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -136,11 +136,10 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
-#define GEN8_LR_CONTEXT_ALIGN 4096
-
#define RING_EXECLIST_QFULL (1 << 0x2)
#define RING_EXECLIST1_VALID (1 << 0x3)
#define RING_EXECLIST0_VALID (1 << 0x4)
@@ -204,6 +203,9 @@ enum {
};
#define GEN8_CTX_ID_SHIFT 32
+static int intel_lr_context_pin(struct intel_engine_cs *ring,
+ struct intel_context *ctx);
+
/**
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
* @dev: DRM device.
@@ -219,6 +221,9 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
{
WARN_ON(i915.enable_ppgtt == -1);
+ if (INTEL_INFO(dev)->gen >= 9)
+ return 1;
+
if (enable_execlists == 0)
return 0;
@@ -275,7 +280,8 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
struct drm_i915_gem_object *ctx_obj0,
struct drm_i915_gem_object *ctx_obj1)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
uint64_t temp = 0;
uint32_t desc[4];
unsigned long flags;
@@ -300,13 +306,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
* Instead, we do the runtime_pm_get/put when creating/destroying requests.
*/
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- if (IS_CHERRYVIEW(dev_priv->dev)) {
+ if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
if (dev_priv->uncore.fw_rendercount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_RENDER);
if (dev_priv->uncore.fw_mediacount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_MEDIA);
+ if (INTEL_INFO(dev)->gen >= 9) {
+ if (dev_priv->uncore.fw_blittercount++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ FORCEWAKE_BLITTER);
+ }
} else {
if (dev_priv->uncore.forcewake_count++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
@@ -325,13 +336,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
/* Release Force Wakeup (see the big comment above). */
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- if (IS_CHERRYVIEW(dev_priv->dev)) {
+ if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
if (--dev_priv->uncore.fw_rendercount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_RENDER);
if (--dev_priv->uncore.fw_mediacount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_MEDIA);
+ if (INTEL_INFO(dev)->gen >= 9) {
+ if (--dev_priv->uncore.fw_blittercount == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv,
+ FORCEWAKE_BLITTER);
+ }
} else {
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
@@ -341,7 +357,9 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
}
-static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tail)
+static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
+ struct drm_i915_gem_object *ring_obj,
+ u32 tail)
{
struct page *page;
uint32_t *reg_state;
@@ -350,43 +368,45 @@ static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tai
reg_state = kmap_atomic(page);
reg_state[CTX_RING_TAIL+1] = tail;
+ reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
kunmap_atomic(reg_state);
return 0;
}
-static int execlists_submit_context(struct intel_engine_cs *ring,
- struct intel_context *to0, u32 tail0,
- struct intel_context *to1, u32 tail1)
+static void execlists_submit_contexts(struct intel_engine_cs *ring,
+ struct intel_context *to0, u32 tail0,
+ struct intel_context *to1, u32 tail1)
{
- struct drm_i915_gem_object *ctx_obj0;
+ struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state;
+ struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf;
struct drm_i915_gem_object *ctx_obj1 = NULL;
+ struct intel_ringbuffer *ringbuf1 = NULL;
- ctx_obj0 = to0->engine[ring->id].state;
BUG_ON(!ctx_obj0);
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
+ WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
- execlists_ctx_write_tail(ctx_obj0, tail0);
+ execlists_update_context(ctx_obj0, ringbuf0->obj, tail0);
if (to1) {
+ ringbuf1 = to1->engine[ring->id].ringbuf;
ctx_obj1 = to1->engine[ring->id].state;
BUG_ON(!ctx_obj1);
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
+ WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
- execlists_ctx_write_tail(ctx_obj1, tail1);
+ execlists_update_context(ctx_obj1, ringbuf1->obj, tail1);
}
execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
-
- return 0;
}
static void execlists_context_unqueue(struct intel_engine_cs *ring)
{
struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
assert_spin_locked(&ring->execlist_lock);
@@ -403,7 +423,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
* will update tail past first request's workload */
cursor->elsp_submitted = req0->elsp_submitted;
list_del(&req0->execlist_link);
- queue_work(dev_priv->wq, &req0->work);
+ list_add_tail(&req0->execlist_link,
+ &ring->execlist_retired_req_list);
req0 = cursor;
} else {
req1 = cursor;
@@ -413,9 +434,9 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
WARN_ON(req1 && req1->elsp_submitted);
- WARN_ON(execlists_submit_context(ring, req0->ctx, req0->tail,
- req1 ? req1->ctx : NULL,
- req1 ? req1->tail : 0));
+ execlists_submit_contexts(ring, req0->ctx, req0->tail,
+ req1 ? req1->ctx : NULL,
+ req1 ? req1->tail : 0);
req0->elsp_submitted++;
if (req1)
@@ -425,7 +446,6 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
static bool execlists_check_remove_request(struct intel_engine_cs *ring,
u32 request_id)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_ctx_submit_request *head_req;
assert_spin_locked(&ring->execlist_lock);
@@ -443,7 +463,8 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
if (--head_req->elsp_submitted <= 0) {
list_del(&head_req->execlist_link);
- queue_work(dev_priv->wq, &head_req->work);
+ list_add_tail(&head_req->execlist_link,
+ &ring->execlist_retired_req_list);
return true;
}
}
@@ -512,22 +533,6 @@ void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
((u32)ring->next_context_status_buffer & 0x07) << 8);
}
-static void execlists_free_request_task(struct work_struct *work)
-{
- struct intel_ctx_submit_request *req =
- container_of(work, struct intel_ctx_submit_request, work);
- struct drm_device *dev = req->ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- intel_runtime_pm_put(dev_priv);
-
- mutex_lock(&dev->struct_mutex);
- i915_gem_context_unreference(req->ctx);
- mutex_unlock(&dev->struct_mutex);
-
- kfree(req);
-}
-
static int execlists_context_queue(struct intel_engine_cs *ring,
struct intel_context *to,
u32 tail)
@@ -542,9 +547,12 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
return -ENOMEM;
req->ctx = to;
i915_gem_context_reference(req->ctx);
+
+ if (to != ring->default_context)
+ intel_lr_context_pin(ring, to);
+
req->ring = ring;
req->tail = tail;
- INIT_WORK(&req->work, execlists_free_request_task);
intel_runtime_pm_get(dev_priv);
@@ -563,9 +571,10 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
if (to == tail_req->ctx) {
WARN(tail_req->elsp_submitted != 0,
- "More than 2 already-submitted reqs queued\n");
+ "More than 2 already-submitted reqs queued\n");
list_del(&tail_req->execlist_link);
- queue_work(dev_priv->wq, &tail_req->work);
+ list_add_tail(&tail_req->execlist_link,
+ &ring->execlist_retired_req_list);
}
}
@@ -733,6 +742,36 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
return 0;
}
+void intel_execlists_retire_requests(struct intel_engine_cs *ring)
+{
+ struct intel_ctx_submit_request *req, *tmp;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ unsigned long flags;
+ struct list_head retired_list;
+
+ WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+ if (list_empty(&ring->execlist_retired_req_list))
+ return;
+
+ INIT_LIST_HEAD(&retired_list);
+ spin_lock_irqsave(&ring->execlist_lock, flags);
+ list_replace_init(&ring->execlist_retired_req_list, &retired_list);
+ spin_unlock_irqrestore(&ring->execlist_lock, flags);
+
+ list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
+ struct intel_context *ctx = req->ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ ctx->engine[ring->id].state;
+
+ if (ctx_obj && (ctx != ring->default_context))
+ intel_lr_context_unpin(ring, ctx);
+ intel_runtime_pm_put(dev_priv);
+ i915_gem_context_unreference(req->ctx);
+ list_del(&req->execlist_link);
+ kfree(req);
+ }
+}
+
void intel_logical_ring_stop(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -793,9 +832,55 @@ void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
execlists_context_queue(ring, ctx, ringbuf->tail);
}
+static int intel_lr_context_pin(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+ struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ int ret = 0;
+
+ WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+ if (ctx->engine[ring->id].unpin_count++ == 0) {
+ ret = i915_gem_obj_ggtt_pin(ctx_obj,
+ GEN8_LR_CONTEXT_ALIGN, 0);
+ if (ret)
+ goto reset_unpin_count;
+
+ ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
+ if (ret)
+ goto unpin_ctx_obj;
+ }
+
+ return ret;
+
+unpin_ctx_obj:
+ i915_gem_object_ggtt_unpin(ctx_obj);
+reset_unpin_count:
+ ctx->engine[ring->id].unpin_count = 0;
+
+ return ret;
+}
+
+void intel_lr_context_unpin(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+ struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+
+ if (ctx_obj) {
+ WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+ if (--ctx->engine[ring->id].unpin_count == 0) {
+ intel_unpin_ringbuffer_obj(ringbuf);
+ i915_gem_object_ggtt_unpin(ctx_obj);
+ }
+ }
+}
+
static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
+ int ret;
+
if (ring->outstanding_lazy_seqno)
return 0;
@@ -806,6 +891,14 @@ static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
if (request == NULL)
return -ENOMEM;
+ if (ctx != ring->default_context) {
+ ret = intel_lr_context_pin(ring, ctx);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+ }
+
/* Hold a reference to the context this request belongs to
* (we will need it when the time comes to emit/retire the
* request).
@@ -991,6 +1084,44 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
return 0;
}
+static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ int ret, i;
+ struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_workarounds *w = &dev_priv->workarounds;
+
+ if (WARN_ON(w->count == 0))
+ return 0;
+
+ ring->gpu_caches_dirty = true;
+ ret = logical_ring_flush_all_caches(ringbuf);
+ if (ret)
+ return ret;
+
+ ret = intel_logical_ring_begin(ringbuf, w->count * 2 + 2);
+ if (ret)
+ return ret;
+
+ intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
+ for (i = 0; i < w->count; i++) {
+ intel_logical_ring_emit(ringbuf, w->reg[i].addr);
+ intel_logical_ring_emit(ringbuf, w->reg[i].value);
+ }
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
+
+ intel_logical_ring_advance(ringbuf);
+
+ ring->gpu_caches_dirty = true;
+ ret = logical_ring_flush_all_caches(ringbuf);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int gen8_init_common_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
@@ -1034,7 +1165,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
- return ret;
+ return init_workarounds_ring(ring);
}
static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
@@ -1063,7 +1194,7 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1214,11 +1345,13 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
*/
void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv;
if (!intel_ring_initialized(ring))
return;
+ dev_priv = ring->dev->dev_private;
+
intel_logical_ring_stop(ring);
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
ring->preallocated_lazy_request = NULL;
@@ -1248,6 +1381,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
init_waitqueue_head(&ring->irq_queue);
INIT_LIST_HEAD(&ring->execlist_queue);
+ INIT_LIST_HEAD(&ring->execlist_retired_req_list);
spin_lock_init(&ring->execlist_lock);
ring->next_context_status_buffer = 0;
@@ -1282,6 +1416,7 @@ static int logical_render_ring_init(struct drm_device *dev)
ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
ring->init = gen8_init_render_ring;
+ ring->init_context = intel_logical_ring_workarounds_emit;
ring->cleanup = intel_fini_pipe_control;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
@@ -1495,7 +1630,6 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *ring_obj = ringbuf->obj;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
struct page *page;
uint32_t *reg_state;
@@ -1541,7 +1675,9 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
reg_state[CTX_RING_TAIL+1] = 0;
reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
- reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
+ /* Ring buffer start address is not known until the buffer is pinned.
+ * It is written to the context image in execlists_update_context()
+ */
reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
reg_state[CTX_RING_BUFFER_CONTROL+1] =
((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
@@ -1617,12 +1753,18 @@ void intel_lr_context_free(struct intel_context *ctx)
for (i = 0; i < I915_NUM_RINGS; i++) {
struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
- struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
if (ctx_obj) {
+ struct intel_ringbuffer *ringbuf =
+ ctx->engine[i].ringbuf;
+ struct intel_engine_cs *ring = ringbuf->ring;
+
+ if (ctx == ring->default_context) {
+ intel_unpin_ringbuffer_obj(ringbuf);
+ i915_gem_object_ggtt_unpin(ctx_obj);
+ }
intel_destroy_ringbuffer_obj(ringbuf);
kfree(ringbuf);
- i915_gem_object_ggtt_unpin(ctx_obj);
drm_gem_object_unreference(&ctx_obj->base);
}
}
@@ -1632,11 +1774,14 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
{
int ret = 0;
- WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
+ WARN_ON(INTEL_INFO(ring->dev)->gen < 8);
switch (ring->id) {
case RCS:
- ret = GEN8_LR_CONTEXT_RENDER_SIZE;
+ if (INTEL_INFO(ring->dev)->gen >= 9)
+ ret = GEN9_LR_CONTEXT_RENDER_SIZE;
+ else
+ ret = GEN8_LR_CONTEXT_RENDER_SIZE;
break;
case VCS:
case BCS:
@@ -1649,6 +1794,23 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
return ret;
}
+static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
+ struct drm_i915_gem_object *default_ctx_obj)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ /* The status page is offset 0 from the default context object
+ * in LRC mode. */
+ ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj);
+ ring->status_page.page_addr =
+ kmap(sg_page(default_ctx_obj->pages->sgl));
+ ring->status_page.obj = default_ctx_obj;
+
+ I915_WRITE(RING_HWS_PGA(ring->mmio_base),
+ (u32)ring->status_page.gfx_addr);
+ POSTING_READ(RING_HWS_PGA(ring->mmio_base));
+}
+
/**
* intel_lr_context_deferred_create() - create the LRC specific bits of a context
* @ctx: LR context to create.
@@ -1660,11 +1822,12 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
* the creation is a deferred call: it's better to make sure first that we need to use
* a given ring with the context.
*
- * Return: non-zero on eror.
+ * Return: non-zero on error.
*/
int intel_lr_context_deferred_create(struct intel_context *ctx,
struct intel_engine_cs *ring)
{
+ const bool is_global_default_ctx = (ctx == ring->default_context);
struct drm_device *dev = ring->dev;
struct drm_i915_gem_object *ctx_obj;
uint32_t context_size;
@@ -1684,21 +1847,22 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
return ret;
}
- ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
- if (ret) {
- DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
- drm_gem_object_unreference(&ctx_obj->base);
- return ret;
+ if (is_global_default_ctx) {
+ ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n",
+ ret);
+ drm_gem_object_unreference(&ctx_obj->base);
+ return ret;
+ }
}
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
if (!ringbuf) {
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
ring->name);
- i915_gem_object_ggtt_unpin(ctx_obj);
- drm_gem_object_unreference(&ctx_obj->base);
ret = -ENOMEM;
- return ret;
+ goto error_unpin_ctx;
}
ringbuf->ring = ring;
@@ -1711,46 +1875,51 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
ringbuf->space = ringbuf->size;
ringbuf->last_retired_head = -1;
- /* TODO: For now we put this in the mappable region so that we can reuse
- * the existing ringbuffer code which ioremaps it. When we start
- * creating many contexts, this will no longer work and we must switch
- * to a kmapish interface.
- */
- ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
- if (ret) {
- DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
+ if (ringbuf->obj == NULL) {
+ ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_DEBUG_DRIVER(
+ "Failed to allocate ringbuffer obj %s: %d\n",
ring->name, ret);
- goto error;
+ goto error_free_rbuf;
+ }
+
+ if (is_global_default_ctx) {
+ ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_ERROR(
+ "Failed to pin and map ringbuffer %s: %d\n",
+ ring->name, ret);
+ goto error_destroy_rbuf;
+ }
+ }
+
}
ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
if (ret) {
DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
- intel_destroy_ringbuffer_obj(ringbuf);
goto error;
}
ctx->engine[ring->id].ringbuf = ringbuf;
ctx->engine[ring->id].state = ctx_obj;
- if (ctx == ring->default_context) {
- /* The status page is offset 0 from the default context object
- * in LRC mode. */
- ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(ctx_obj);
- ring->status_page.page_addr =
- kmap(sg_page(ctx_obj->pages->sgl));
- if (ring->status_page.page_addr == NULL)
- return -ENOMEM;
- ring->status_page.obj = ctx_obj;
- }
+ if (ctx == ring->default_context)
+ lrc_setup_hardware_status_page(ring, ctx_obj);
if (ring->id == RCS && !ctx->rcs_initialized) {
+ if (ring->init_context) {
+ ret = ring->init_context(ring, ctx);
+ if (ret)
+ DRM_ERROR("ring init context: %d\n", ret);
+ }
+
ret = intel_lr_context_render_state_init(ring, ctx);
if (ret) {
DRM_ERROR("Init render state failed: %d\n", ret);
ctx->engine[ring->id].ringbuf = NULL;
ctx->engine[ring->id].state = NULL;
- intel_destroy_ringbuffer_obj(ringbuf);
goto error;
}
ctx->rcs_initialized = true;
@@ -1759,8 +1928,15 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
return 0;
error:
+ if (is_global_default_ctx)
+ intel_unpin_ringbuffer_obj(ringbuf);
+error_destroy_rbuf:
+ intel_destroy_ringbuffer_obj(ringbuf);
+error_free_rbuf:
kfree(ringbuf);
- i915_gem_object_ggtt_unpin(ctx_obj);
+error_unpin_ctx:
+ if (is_global_default_ctx)
+ i915_gem_object_ggtt_unpin(ctx_obj);
drm_gem_object_unreference(&ctx_obj->base);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 33c3b4b..14b216b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -24,6 +24,8 @@
#ifndef _INTEL_LRC_H_
#define _INTEL_LRC_H_
+#define GEN8_LR_CONTEXT_ALIGN 4096
+
/* Execlists regs */
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
@@ -67,6 +69,8 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
void intel_lr_context_free(struct intel_context *ctx);
int intel_lr_context_deferred_create(struct intel_context *ctx,
struct intel_engine_cs *ring);
+void intel_lr_context_unpin(struct intel_engine_cs *ring,
+ struct intel_context *ctx);
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
@@ -104,11 +108,11 @@ struct intel_ctx_submit_request {
u32 tail;
struct list_head execlist_link;
- struct work_struct work;
int elsp_submitted;
};
void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
+void intel_execlists_retire_requests(struct intel_engine_cs *ring);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c0bbf21..14654d6 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -76,7 +76,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
u32 tmp;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(lvds_encoder->reg);
@@ -1116,7 +1116,7 @@ out:
drm_connector_register(connector);
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
- intel_panel_setup_backlight(connector);
+ intel_panel_setup_backlight(connector, INVALID_PIPE);
return;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 41b3be2..4d63839 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -521,6 +521,9 @@ static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ return 0;
+
return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
}
@@ -536,15 +539,17 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 val;
- unsigned long flags;
+ struct intel_panel *panel = &connector->panel;
+ u32 val = 0;
- spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+ mutex_lock(&dev_priv->backlight_lock);
- val = dev_priv->display.get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
+ if (panel->backlight.enabled) {
+ val = dev_priv->display.get_backlight(connector);
+ val = intel_panel_compute_brightness(connector, val);
+ }
- spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+ mutex_unlock(&dev_priv->backlight_lock);
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
return val;
@@ -603,6 +608,9 @@ static void vlv_set_backlight(struct intel_connector *connector, u32 level)
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 tmp;
+ if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ return;
+
tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
}
@@ -626,14 +634,12 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
- enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 hw_level;
- unsigned long flags;
- if (!panel->backlight.present || pipe == INVALID_PIPE)
+ if (!panel->backlight.present)
return;
- spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+ mutex_lock(&dev_priv->backlight_lock);
WARN_ON(panel->backlight.max == 0);
@@ -643,7 +649,7 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(connector, hw_level);
- spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+ mutex_unlock(&dev_priv->backlight_lock);
}
/* set backlight brightness to level in range [0..max], assuming hw min is
@@ -657,12 +663,17 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 hw_level;
- unsigned long flags;
+ /*
+ * INVALID_PIPE may occur during driver init because
+ * connection_mutex isn't held across the entire backlight
+ * setup + modeset readout, and the BIOS can issue the
+ * requests at any time.
+ */
if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
- spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+ mutex_lock(&dev_priv->backlight_lock);
WARN_ON(panel->backlight.max == 0);
@@ -678,7 +689,7 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(connector, hw_level);
- spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+ mutex_unlock(&dev_priv->backlight_lock);
}
static void pch_disable_backlight(struct intel_connector *connector)
@@ -720,6 +731,9 @@ static void vlv_disable_backlight(struct intel_connector *connector)
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 tmp;
+ if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ return;
+
intel_panel_actually_set_backlight(connector, 0);
tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
@@ -731,10 +745,8 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
- enum pipe pipe = intel_get_pipe_from_connector(connector);
- unsigned long flags;
- if (!panel->backlight.present || pipe == INVALID_PIPE)
+ if (!panel->backlight.present)
return;
/*
@@ -748,14 +760,14 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
return;
}
- spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+ mutex_lock(&dev_priv->backlight_lock);
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
panel->backlight.enabled = false;
dev_priv->display.disable_backlight(connector);
- spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+ mutex_unlock(&dev_priv->backlight_lock);
}
static void bdw_enable_backlight(struct intel_connector *connector)
@@ -779,8 +791,9 @@ static void bdw_enable_backlight(struct intel_connector *connector)
if (panel->backlight.active_low_pwm)
pch_ctl1 |= BLM_PCH_POLARITY;
- /* BDW always uses the pch pwm controls. */
- pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
+ /* After LPT, override is the default. */
+ if (HAS_PCH_LPT(dev_priv))
+ pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
POSTING_READ(BLC_PWM_PCH_CTL1);
@@ -909,6 +922,9 @@ static void vlv_enable_backlight(struct intel_connector *connector)
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 ctl, ctl2;
+ if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ return;
+
ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
if (ctl2 & BLM_PWM_ENABLE) {
DRM_DEBUG_KMS("backlight already enabled\n");
@@ -936,14 +952,13 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
- unsigned long flags;
- if (!panel->backlight.present || pipe == INVALID_PIPE)
+ if (!panel->backlight.present)
return;
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
- spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+ mutex_lock(&dev_priv->backlight_lock);
WARN_ON(panel->backlight.max == 0);
@@ -961,7 +976,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_UNBLANK;
- spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+ mutex_unlock(&dev_priv->backlight_lock);
}
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
@@ -1030,6 +1045,9 @@ static int intel_backlight_device_register(struct intel_connector *connector)
if (WARN_ON(panel->backlight.device))
return -ENODEV;
+ if (!panel->backlight.present)
+ return 0;
+
WARN_ON(panel->backlight.max == 0);
memset(&props, 0, sizeof(props));
@@ -1065,6 +1083,10 @@ static int intel_backlight_device_register(struct intel_connector *connector)
panel->backlight.device = NULL;
return -ENODEV;
}
+
+ DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n",
+ connector->base.name);
+
return 0;
}
@@ -1119,7 +1141,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
return scale(min, 0, 255, 0, panel->backlight.max);
}
-static int bdw_setup_backlight(struct intel_connector *connector)
+static int bdw_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1145,7 +1167,7 @@ static int bdw_setup_backlight(struct intel_connector *connector)
return 0;
}
-static int pch_setup_backlight(struct intel_connector *connector)
+static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1172,7 +1194,7 @@ static int pch_setup_backlight(struct intel_connector *connector)
return 0;
}
-static int i9xx_setup_backlight(struct intel_connector *connector)
+static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1204,7 +1226,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector)
return 0;
}
-static int i965_setup_backlight(struct intel_connector *connector)
+static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1234,37 +1256,40 @@ static int i965_setup_backlight(struct intel_connector *connector)
return 0;
}
-static int vlv_setup_backlight(struct intel_connector *connector)
+static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
- enum pipe pipe;
+ enum pipe p;
u32 ctl, ctl2, val;
- for_each_pipe(dev_priv, pipe) {
- u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
+ for_each_pipe(dev_priv, p) {
+ u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(p));
/* Skip if the modulation freq is already set */
if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
continue;
cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
+ I915_WRITE(VLV_BLC_PWM_CTL(p), (0xf42 << 16) |
cur_val);
}
- ctl2 = I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
+ if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ return -ENODEV;
+
+ ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
- ctl = I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
+ ctl = I915_READ(VLV_BLC_PWM_CTL(pipe));
panel->backlight.max = ctl >> 16;
if (!panel->backlight.max)
return -ENODEV;
panel->backlight.min = get_backlight_min_vbt(connector);
- val = _vlv_get_backlight(dev, PIPE_A);
+ val = _vlv_get_backlight(dev, pipe);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
@@ -1273,13 +1298,12 @@ static int vlv_setup_backlight(struct intel_connector *connector)
return 0;
}
-int intel_panel_setup_backlight(struct drm_connector *connector)
+int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_panel *panel = &intel_connector->panel;
- unsigned long flags;
int ret;
if (!dev_priv->vbt.backlight.present) {
@@ -1292,9 +1316,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
}
/* set level and max in panel struct */
- spin_lock_irqsave(&dev_priv->backlight_lock, flags);
- ret = dev_priv->display.setup_backlight(intel_connector);
- spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+ mutex_lock(&dev_priv->backlight_lock);
+ ret = dev_priv->display.setup_backlight(intel_connector, pipe);
+ mutex_unlock(&dev_priv->backlight_lock);
if (ret) {
DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
@@ -1302,15 +1326,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
return ret;
}
- intel_backlight_device_register(intel_connector);
-
panel->backlight.present = true;
- DRM_DEBUG_KMS("backlight initialized, %s, brightness %u/%u, "
- "sysfs interface %sregistered\n",
+ DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n",
+ connector->name,
panel->backlight.enabled ? "enabled" : "disabled",
- panel->backlight.level, panel->backlight.max,
- panel->backlight.device ? "" : "not ");
+ panel->backlight.level, panel->backlight.max);
return 0;
}
@@ -1321,7 +1342,6 @@ void intel_panel_destroy_backlight(struct drm_connector *connector)
struct intel_panel *panel = &intel_connector->panel;
panel->backlight.present = false;
- intel_backlight_device_unregister(intel_connector);
}
/* Set up chip specific backlight functions */
@@ -1329,7 +1349,7 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) {
dev_priv->display.setup_backlight = bdw_setup_backlight;
dev_priv->display.enable_backlight = bdw_enable_backlight;
dev_priv->display.disable_backlight = pch_disable_backlight;
@@ -1384,3 +1404,19 @@ void intel_panel_fini(struct intel_panel *panel)
drm_mode_destroy(intel_connector->base.dev,
panel->downclock_mode);
}
+
+void intel_backlight_register(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
+ intel_backlight_device_register(connector);
+}
+
+void intel_backlight_unregister(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
+ intel_backlight_device_unregister(connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ad2fd60..1f4b56e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -30,9 +30,6 @@
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
#include <linux/module.h>
-#include <linux/vgaarb.h>
-#include <drm/i915_powerwell.h>
-#include <linux/pm_runtime.h>
/**
* RC6 is a special power stage which allows the GPU to enter an very
@@ -66,11 +63,37 @@
* i915.i915_enable_fbc parameter
*/
+static void gen9_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * WaDisableSDEUnitClockGating:skl
+ * This seems to be a pre-production w/a.
+ */
+ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+ /*
+ * WaDisableDgMirrorFixInHalfSliceChicken5:skl
+ * This is a pre-production w/a.
+ */
+ I915_WRITE(GEN9_HALF_SLICE_CHICKEN5,
+ I915_READ(GEN9_HALF_SLICE_CHICKEN5) &
+ ~GEN9_DG_MIRROR_FIX_ENABLE);
+
+ /* Wa4x4STCOptimizationDisable:skl */
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
+}
+
static void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 fbc_ctl;
+ dev_priv->fbc.enabled = false;
+
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
@@ -99,6 +122,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
int i;
u32 fbc_ctl;
+ dev_priv->fbc.enabled = true;
+
cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
@@ -153,6 +178,8 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
+ dev_priv->fbc.enabled = true;
+
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
@@ -173,6 +200,8 @@ static void g4x_disable_fbc(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
+ dev_priv->fbc.enabled = false;
+
/* Disable compression */
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
@@ -224,6 +253,8 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
+ dev_priv->fbc.enabled = true;
+
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dev_priv->fbc.threshold++;
@@ -264,6 +295,8 @@ static void ironlake_disable_fbc(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
+ dev_priv->fbc.enabled = false;
+
/* Disable compression */
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
@@ -290,6 +323,8 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
+ dev_priv->fbc.enabled = true;
+
dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dev_priv->fbc.threshold++;
@@ -339,19 +374,19 @@ bool intel_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!dev_priv->display.fbc_enabled)
- return false;
-
- return dev_priv->display.fbc_enabled(dev);
+ return dev_priv->fbc.enabled;
}
-void gen8_fbc_sw_flush(struct drm_device *dev, u32 value)
+void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!IS_GEN8(dev))
return;
+ if (!intel_fbc_enabled(dev))
+ return;
+
I915_WRITE(MSG_FBC_REND_STATE, value);
}
@@ -1310,6 +1345,7 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
int *prec_mult,
int *drain_latency)
{
+ struct drm_device *dev = crtc->dev;
int entries;
int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
@@ -1320,8 +1356,12 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
return false;
entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
- *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
- DRAIN_LATENCY_PRECISION_32;
+ if (IS_CHERRYVIEW(dev))
+ *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 :
+ DRAIN_LATENCY_PRECISION_16;
+ else
+ *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
+ DRAIN_LATENCY_PRECISION_32;
*drain_latency = (64 * (*prec_mult) * 4) / entries;
if (*drain_latency > DRAIN_LATENCY_MASK)
@@ -1340,15 +1380,18 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
static void vlv_update_drain_latency(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pixel_size;
int drain_latency;
enum pipe pipe = intel_crtc->pipe;
int plane_prec, prec_mult, plane_dl;
+ const int high_precision = IS_CHERRYVIEW(dev) ?
+ DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
- plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_64 |
- DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_64 |
+ plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH |
+ DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH |
(DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
if (!intel_crtc_active(crtc)) {
@@ -1359,9 +1402,9 @@ static void vlv_update_drain_latency(struct drm_crtc *crtc)
/* Primary plane Drain Latency */
pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
- plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
- DDL_PLANE_PRECISION_64 :
- DDL_PLANE_PRECISION_32;
+ plane_prec = (prec_mult == high_precision) ?
+ DDL_PLANE_PRECISION_HIGH :
+ DDL_PLANE_PRECISION_LOW;
plane_dl |= plane_prec | drain_latency;
}
@@ -1373,9 +1416,9 @@ static void vlv_update_drain_latency(struct drm_crtc *crtc)
/* Program cursor DL only if it is enabled */
if (intel_crtc->cursor_base &&
vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
- plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
- DDL_CURSOR_PRECISION_64 :
- DDL_CURSOR_PRECISION_32;
+ plane_prec = (prec_mult == high_precision) ?
+ DDL_CURSOR_PRECISION_HIGH :
+ DDL_CURSOR_PRECISION_LOW;
plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
}
@@ -1543,15 +1586,17 @@ static void valleyview_update_sprite_wm(struct drm_plane *plane,
int plane_prec;
int sprite_dl;
int prec_mult;
+ const int high_precision = IS_CHERRYVIEW(dev) ?
+ DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
- sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_64(sprite) |
+ sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) |
(DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
&drain_latency)) {
- plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
- DDL_SPRITE_PRECISION_64(sprite) :
- DDL_SPRITE_PRECISION_32(sprite);
+ plane_prec = (prec_mult == high_precision) ?
+ DDL_SPRITE_PRECISION_HIGH(sprite) :
+ DDL_SPRITE_PRECISION_LOW(sprite);
sprite_dl |= plane_prec |
(drain_latency << DDL_SPRITE_SHIFT(sprite));
}
@@ -1915,6 +1960,14 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
}
+struct skl_pipe_wm_parameters {
+ bool active;
+ uint32_t pipe_htotal;
+ uint32_t pixel_rate; /* in KHz */
+ struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
+ struct intel_plane_wm_parameters cursor;
+};
+
struct ilk_pipe_wm_parameters {
bool active;
uint32_t pipe_htotal;
@@ -2226,11 +2279,82 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
PIPE_WM_LINETIME_TIME(linetime);
}
-static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
+static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_GEN9(dev)) {
+ uint32_t val;
+ int ret, i;
+ int level, max_level = ilk_wm_max_level(dev);
+
+ /* read the first set of memory latencies[0:3] */
+ val = 0; /* data0 to be programmed to 0 for first set */
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = sandybridge_pcode_read(dev_priv,
+ GEN9_PCODE_READ_MEM_LATENCY,
+ &val);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ if (ret) {
+ DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+ return;
+ }
+
+ wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK;
+
+ /* read the second set of memory latencies[4:7] */
+ val = 1; /* data0 to be programmed to 1 for second set */
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = sandybridge_pcode_read(dev_priv,
+ GEN9_PCODE_READ_MEM_LATENCY,
+ &val);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ if (ret) {
+ DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+ return;
+ }
+
+ wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK;
+
+ /*
+ * punit doesn't take into account the read latency so we need
+ * to add 2us to the various latency levels we retrieve from
+ * the punit.
+ * - W0 is a bit special in that it's the only level that
+ * can't be disabled if we want to have display working, so
+ * we always add 2us there.
+ * - For levels >=1, punit returns 0us latency when they are
+ * disabled, so we respect that and don't add 2us then
+ *
+ * Additionally, if a level n (n > 1) has a 0us latency, all
+ * levels m (m >= n) need to be disabled. We make sure to
+ * sanitize the values out of the punit to satisfy this
+ * requirement.
+ */
+ wm[0] += 2;
+ for (level = 1; level <= max_level; level++)
+ if (wm[level] != 0)
+ wm[level] += 2;
+ else {
+ for (i = level + 1; i <= max_level; i++)
+ wm[i] = 0;
+
+ break;
+ }
+ } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
uint64_t sskpd = I915_READ64(MCH_SSKPD);
wm[0] = (sskpd >> 56) & 0xFF;
@@ -2278,7 +2402,9 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
int ilk_wm_max_level(const struct drm_device *dev)
{
/* how many WM levels are we expecting */
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_GEN9(dev))
+ return 7;
+ else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
return 4;
else if (INTEL_INFO(dev)->gen >= 6)
return 3;
@@ -2288,7 +2414,7 @@ int ilk_wm_max_level(const struct drm_device *dev)
static void intel_print_wm_latency(struct drm_device *dev,
const char *name,
- const uint16_t wm[5])
+ const uint16_t wm[8])
{
int level, max_level = ilk_wm_max_level(dev);
@@ -2301,8 +2427,13 @@ static void intel_print_wm_latency(struct drm_device *dev,
continue;
}
- /* WM1+ latency values in 0.5us units */
- if (level > 0)
+ /*
+ * - latencies are in us on gen9.
+ * - before then, WM1+ latency values are in 0.5us units
+ */
+ if (IS_GEN9(dev))
+ latency *= 10;
+ else if (level > 0)
latency *= 5;
DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
@@ -2370,6 +2501,14 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
snb_wm_latency_quirk(dev);
}
+static void skl_setup_wm_latency(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
+ intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
+}
+
static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
struct ilk_pipe_wm_parameters *p)
{
@@ -2860,6 +2999,769 @@ static bool ilk_disable_lp_wm(struct drm_device *dev)
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
+/*
+ * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
+ * different active planes.
+ */
+
+#define SKL_DDB_SIZE 896 /* in blocks */
+
+static void
+skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
+ struct drm_crtc *for_crtc,
+ const struct intel_wm_config *config,
+ const struct skl_pipe_wm_parameters *params,
+ struct skl_ddb_entry *alloc /* out */)
+{
+ struct drm_crtc *crtc;
+ unsigned int pipe_size, ddb_size;
+ int nth_active_pipe;
+
+ if (!params->active) {
+ alloc->start = 0;
+ alloc->end = 0;
+ return;
+ }
+
+ ddb_size = SKL_DDB_SIZE;
+
+ ddb_size -= 4; /* 4 blocks for bypass path allocation */
+
+ nth_active_pipe = 0;
+ for_each_crtc(dev, crtc) {
+ if (!intel_crtc_active(crtc))
+ continue;
+
+ if (crtc == for_crtc)
+ break;
+
+ nth_active_pipe++;
+ }
+
+ pipe_size = ddb_size / config->num_pipes_active;
+ alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
+ alloc->end = alloc->start + pipe_size;
+}
+
+static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
+{
+ if (config->num_pipes_active == 1)
+ return 32;
+
+ return 8;
+}
+
+static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
+{
+ entry->start = reg & 0x3ff;
+ entry->end = (reg >> 16) & 0x3ff;
+ if (entry->end)
+ entry->end += 1;
+}
+
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb /* out */)
+{
+ struct drm_device *dev = dev_priv->dev;
+ enum pipe pipe;
+ int plane;
+ u32 val;
+
+ for_each_pipe(dev_priv, pipe) {
+ for_each_plane(pipe, plane) {
+ val = I915_READ(PLANE_BUF_CFG(pipe, plane));
+ skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
+ val);
+ }
+
+ val = I915_READ(CUR_BUF_CFG(pipe));
+ skl_ddb_entry_init_from_hw(&ddb->cursor[pipe], val);
+ }
+}
+
+static unsigned int
+skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p)
+{
+ return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
+}
+
+/*
+ * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
+ * a 8192x4096@32bpp framebuffer:
+ * 3 * 4096 * 8192 * 4 < 2^32
+ */
+static unsigned int
+skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc,
+ const struct skl_pipe_wm_parameters *params)
+{
+ unsigned int total_data_rate = 0;
+ int plane;
+
+ for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
+ const struct intel_plane_wm_parameters *p;
+
+ p = &params->plane[plane];
+ if (!p->enabled)
+ continue;
+
+ total_data_rate += skl_plane_relative_data_rate(p);
+ }
+
+ return total_data_rate;
+}
+
+static void
+skl_allocate_pipe_ddb(struct drm_crtc *crtc,
+ const struct intel_wm_config *config,
+ const struct skl_pipe_wm_parameters *params,
+ struct skl_ddb_allocation *ddb /* out */)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
+ uint16_t alloc_size, start, cursor_blocks;
+ unsigned int total_data_rate;
+ int plane;
+
+ skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc);
+ alloc_size = skl_ddb_entry_size(alloc);
+ if (alloc_size == 0) {
+ memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
+ memset(&ddb->cursor[pipe], 0, sizeof(ddb->cursor[pipe]));
+ return;
+ }
+
+ cursor_blocks = skl_cursor_allocation(config);
+ ddb->cursor[pipe].start = alloc->end - cursor_blocks;
+ ddb->cursor[pipe].end = alloc->end;
+
+ alloc_size -= cursor_blocks;
+ alloc->end -= cursor_blocks;
+
+ /*
+ * Each active plane get a portion of the remaining space, in
+ * proportion to the amount of data they need to fetch from memory.
+ *
+ * FIXME: we may not allocate every single block here.
+ */
+ total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params);
+
+ start = alloc->start;
+ for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
+ const struct intel_plane_wm_parameters *p;
+ unsigned int data_rate;
+ uint16_t plane_blocks;
+
+ p = &params->plane[plane];
+ if (!p->enabled)
+ continue;
+
+ data_rate = skl_plane_relative_data_rate(p);
+
+ /*
+ * promote the expression to 64 bits to avoid overflowing, the
+ * result is < available as data_rate / total_data_rate < 1
+ */
+ plane_blocks = div_u64((uint64_t)alloc_size * data_rate,
+ total_data_rate);
+
+ ddb->plane[pipe][plane].start = start;
+ ddb->plane[pipe][plane].end = start + plane_blocks;
+
+ start += plane_blocks;
+ }
+
+}
+
+static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_config *config)
+{
+ /* TODO: Take into account the scalers once we support them */
+ return config->adjusted_mode.crtc_clock;
+}
+
+/*
+ * The max latency should be 257 (max the punit can code is 255 and we add 2us
+ * for the read latency) and bytes_per_pixel should always be <= 8, so that
+ * should allow pixel_rate up to ~2 GHz which seems sufficient since max
+ * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
+*/
+static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
+ uint32_t latency)
+{
+ uint32_t wm_intermediate_val, ret;
+
+ if (latency == 0)
+ return UINT_MAX;
+
+ wm_intermediate_val = latency * pixel_rate * bytes_per_pixel;
+ ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
+
+ return ret;
+}
+
+static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
+ uint32_t horiz_pixels, uint8_t bytes_per_pixel,
+ uint32_t latency)
+{
+ uint32_t ret, plane_bytes_per_line, wm_intermediate_val;
+
+ if (latency == 0)
+ return UINT_MAX;
+
+ plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
+ wm_intermediate_val = latency * pixel_rate;
+ ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
+ plane_bytes_per_line;
+
+ return ret;
+}
+
+static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
+ const struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+ enum pipe pipe = intel_crtc->pipe;
+
+ if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe],
+ sizeof(new_ddb->plane[pipe])))
+ return true;
+
+ if (memcmp(&new_ddb->cursor[pipe], &cur_ddb->cursor[pipe],
+ sizeof(new_ddb->cursor[pipe])))
+ return true;
+
+ return false;
+}
+
+static void skl_compute_wm_global_parameters(struct drm_device *dev,
+ struct intel_wm_config *config)
+{
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ config->num_pipes_active += intel_crtc_active(crtc);
+
+ /* FIXME: I don't think we need those two global parameters on SKL */
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+
+ config->sprites_enabled |= intel_plane->wm.enabled;
+ config->sprites_scaled |= intel_plane->wm.scaled;
+ }
+}
+
+static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
+ struct skl_pipe_wm_parameters *p)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ struct drm_plane *plane;
+ int i = 1; /* Index for sprite planes start */
+
+ p->active = intel_crtc_active(crtc);
+ if (p->active) {
+ p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
+ p->pixel_rate = skl_pipe_pixel_rate(&intel_crtc->config);
+
+ /*
+ * For now, assume primary and cursor planes are always enabled.
+ */
+ p->plane[0].enabled = true;
+ p->plane[0].bytes_per_pixel =
+ crtc->primary->fb->bits_per_pixel / 8;
+ p->plane[0].horiz_pixels = intel_crtc->config.pipe_src_w;
+ p->plane[0].vert_pixels = intel_crtc->config.pipe_src_h;
+
+ p->cursor.enabled = true;
+ p->cursor.bytes_per_pixel = 4;
+ p->cursor.horiz_pixels = intel_crtc->cursor_width ?
+ intel_crtc->cursor_width : 64;
+ }
+
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+
+ if (intel_plane->pipe == pipe)
+ p->plane[i++] = intel_plane->wm;
+ }
+}
+
+static bool skl_compute_plane_wm(struct skl_pipe_wm_parameters *p,
+ struct intel_plane_wm_parameters *p_params,
+ uint16_t ddb_allocation,
+ uint32_t mem_value,
+ uint16_t *out_blocks, /* out */
+ uint8_t *out_lines /* out */)
+{
+ uint32_t method1, method2, plane_bytes_per_line, res_blocks, res_lines;
+ uint32_t result_bytes;
+
+ if (mem_value == 0 || !p->active || !p_params->enabled)
+ return false;
+
+ method1 = skl_wm_method1(p->pixel_rate,
+ p_params->bytes_per_pixel,
+ mem_value);
+ method2 = skl_wm_method2(p->pixel_rate,
+ p->pipe_htotal,
+ p_params->horiz_pixels,
+ p_params->bytes_per_pixel,
+ mem_value);
+
+ plane_bytes_per_line = p_params->horiz_pixels *
+ p_params->bytes_per_pixel;
+
+ /* For now xtile and linear */
+ if (((ddb_allocation * 512) / plane_bytes_per_line) >= 1)
+ result_bytes = min(method1, method2);
+ else
+ result_bytes = method1;
+
+ res_blocks = DIV_ROUND_UP(result_bytes, 512) + 1;
+ res_lines = DIV_ROUND_UP(result_bytes, plane_bytes_per_line);
+
+ if (res_blocks > ddb_allocation || res_lines > 31)
+ return false;
+
+ *out_blocks = res_blocks;
+ *out_lines = res_lines;
+
+ return true;
+}
+
+static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb,
+ struct skl_pipe_wm_parameters *p,
+ enum pipe pipe,
+ int level,
+ int num_planes,
+ struct skl_wm_level *result)
+{
+ uint16_t latency = dev_priv->wm.skl_latency[level];
+ uint16_t ddb_blocks;
+ int i;
+
+ for (i = 0; i < num_planes; i++) {
+ ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
+
+ result->plane_en[i] = skl_compute_plane_wm(p, &p->plane[i],
+ ddb_blocks,
+ latency,
+ &result->plane_res_b[i],
+ &result->plane_res_l[i]);
+ }
+
+ ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
+ result->cursor_en = skl_compute_plane_wm(p, &p->cursor, ddb_blocks,
+ latency, &result->cursor_res_b,
+ &result->cursor_res_l);
+}
+
+static uint32_t
+skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
+{
+ if (!intel_crtc_active(crtc))
+ return 0;
+
+ return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
+
+}
+
+static void skl_compute_transition_wm(struct drm_crtc *crtc,
+ struct skl_pipe_wm_parameters *params,
+ struct skl_wm_level *trans_wm /* out */)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int i;
+
+ if (!params->active)
+ return;
+
+ /* Until we know more, just disable transition WMs */
+ for (i = 0; i < intel_num_planes(intel_crtc); i++)
+ trans_wm->plane_en[i] = false;
+ trans_wm->cursor_en = false;
+}
+
+static void skl_compute_pipe_wm(struct drm_crtc *crtc,
+ struct skl_ddb_allocation *ddb,
+ struct skl_pipe_wm_parameters *params,
+ struct skl_pipe_wm *pipe_wm)
+{
+ struct drm_device *dev = crtc->dev;
+ const struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int level, max_level = ilk_wm_max_level(dev);
+
+ for (level = 0; level <= max_level; level++) {
+ skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe,
+ level, intel_num_planes(intel_crtc),
+ &pipe_wm->wm[level]);
+ }
+ pipe_wm->linetime = skl_compute_linetime_wm(crtc, params);
+
+ skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm);
+}
+
+static void skl_compute_wm_results(struct drm_device *dev,
+ struct skl_pipe_wm_parameters *p,
+ struct skl_pipe_wm *p_wm,
+ struct skl_wm_values *r,
+ struct intel_crtc *intel_crtc)
+{
+ int level, max_level = ilk_wm_max_level(dev);
+ enum pipe pipe = intel_crtc->pipe;
+ uint32_t temp;
+ int i;
+
+ for (level = 0; level <= max_level; level++) {
+ for (i = 0; i < intel_num_planes(intel_crtc); i++) {
+ temp = 0;
+
+ temp |= p_wm->wm[level].plane_res_l[i] <<
+ PLANE_WM_LINES_SHIFT;
+ temp |= p_wm->wm[level].plane_res_b[i];
+ if (p_wm->wm[level].plane_en[i])
+ temp |= PLANE_WM_EN;
+
+ r->plane[pipe][i][level] = temp;
+ }
+
+ temp = 0;
+
+ temp |= p_wm->wm[level].cursor_res_l << PLANE_WM_LINES_SHIFT;
+ temp |= p_wm->wm[level].cursor_res_b;
+
+ if (p_wm->wm[level].cursor_en)
+ temp |= PLANE_WM_EN;
+
+ r->cursor[pipe][level] = temp;
+
+ }
+
+ /* transition WMs */
+ for (i = 0; i < intel_num_planes(intel_crtc); i++) {
+ temp = 0;
+ temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
+ temp |= p_wm->trans_wm.plane_res_b[i];
+ if (p_wm->trans_wm.plane_en[i])
+ temp |= PLANE_WM_EN;
+
+ r->plane_trans[pipe][i] = temp;
+ }
+
+ temp = 0;
+ temp |= p_wm->trans_wm.cursor_res_l << PLANE_WM_LINES_SHIFT;
+ temp |= p_wm->trans_wm.cursor_res_b;
+ if (p_wm->trans_wm.cursor_en)
+ temp |= PLANE_WM_EN;
+
+ r->cursor_trans[pipe] = temp;
+
+ r->wm_linetime[pipe] = p_wm->linetime;
+}
+
+static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg,
+ const struct skl_ddb_entry *entry)
+{
+ if (entry->end)
+ I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
+ else
+ I915_WRITE(reg, 0);
+}
+
+static void skl_write_wm_values(struct drm_i915_private *dev_priv,
+ const struct skl_wm_values *new)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ int i, level, max_level = ilk_wm_max_level(dev);
+ enum pipe pipe = crtc->pipe;
+
+ if (!new->dirty[pipe])
+ continue;
+
+ I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
+
+ for (level = 0; level <= max_level; level++) {
+ for (i = 0; i < intel_num_planes(crtc); i++)
+ I915_WRITE(PLANE_WM(pipe, i, level),
+ new->plane[pipe][i][level]);
+ I915_WRITE(CUR_WM(pipe, level),
+ new->cursor[pipe][level]);
+ }
+ for (i = 0; i < intel_num_planes(crtc); i++)
+ I915_WRITE(PLANE_WM_TRANS(pipe, i),
+ new->plane_trans[pipe][i]);
+ I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]);
+
+ for (i = 0; i < intel_num_planes(crtc); i++)
+ skl_ddb_entry_write(dev_priv,
+ PLANE_BUF_CFG(pipe, i),
+ &new->ddb.plane[pipe][i]);
+
+ skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
+ &new->ddb.cursor[pipe]);
+ }
+}
+
+/*
+ * When setting up a new DDB allocation arrangement, we need to correctly
+ * sequence the times at which the new allocations for the pipes are taken into
+ * account or we'll have pipes fetching from space previously allocated to
+ * another pipe.
+ *
+ * Roughly the sequence looks like:
+ * 1. re-allocate the pipe(s) with the allocation being reduced and not
+ * overlapping with a previous light-up pipe (another way to put it is:
+ * pipes with their new allocation strickly included into their old ones).
+ * 2. re-allocate the other pipes that get their allocation reduced
+ * 3. allocate the pipes having their allocation increased
+ *
+ * Steps 1. and 2. are here to take care of the following case:
+ * - Initially DDB looks like this:
+ * | B | C |
+ * - enable pipe A.
+ * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
+ * allocation
+ * | A | B | C |
+ *
+ * We need to sequence the re-allocation: C, B, A (and not B, C, A).
+ */
+
+static void
+skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
+{
+ struct drm_device *dev = dev_priv->dev;
+ int plane;
+
+ DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
+
+ for_each_plane(pipe, plane) {
+ I915_WRITE(PLANE_SURF(pipe, plane),
+ I915_READ(PLANE_SURF(pipe, plane)));
+ }
+ I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
+}
+
+static bool
+skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
+ const struct skl_ddb_allocation *new,
+ enum pipe pipe)
+{
+ uint16_t old_size, new_size;
+
+ old_size = skl_ddb_entry_size(&old->pipe[pipe]);
+ new_size = skl_ddb_entry_size(&new->pipe[pipe]);
+
+ return old_size != new_size &&
+ new->pipe[pipe].start >= old->pipe[pipe].start &&
+ new->pipe[pipe].end <= old->pipe[pipe].end;
+}
+
+static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
+ struct skl_wm_values *new_values)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct skl_ddb_allocation *cur_ddb, *new_ddb;
+ bool reallocated[I915_MAX_PIPES] = {false, false, false};
+ struct intel_crtc *crtc;
+ enum pipe pipe;
+
+ new_ddb = &new_values->ddb;
+ cur_ddb = &dev_priv->wm.skl_hw.ddb;
+
+ /*
+ * First pass: flush the pipes with the new allocation contained into
+ * the old space.
+ *
+ * We'll wait for the vblank on those pipes to ensure we can safely
+ * re-allocate the freed space without this pipe fetching from it.
+ */
+ for_each_intel_crtc(dev, crtc) {
+ if (!crtc->active)
+ continue;
+
+ pipe = crtc->pipe;
+
+ if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
+ continue;
+
+ skl_wm_flush_pipe(dev_priv, pipe, 1);
+ intel_wait_for_vblank(dev, pipe);
+
+ reallocated[pipe] = true;
+ }
+
+
+ /*
+ * Second pass: flush the pipes that are having their allocation
+ * reduced, but overlapping with a previous allocation.
+ *
+ * Here as well we need to wait for the vblank to make sure the freed
+ * space is not used anymore.
+ */
+ for_each_intel_crtc(dev, crtc) {
+ if (!crtc->active)
+ continue;
+
+ pipe = crtc->pipe;
+
+ if (reallocated[pipe])
+ continue;
+
+ if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
+ skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
+ skl_wm_flush_pipe(dev_priv, pipe, 2);
+ intel_wait_for_vblank(dev, pipe);
+ }
+
+ reallocated[pipe] = true;
+ }
+
+ /*
+ * Third pass: flush the pipes that got more space allocated.
+ *
+ * We don't need to actively wait for the update here, next vblank
+ * will just get more DDB space with the correct WM values.
+ */
+ for_each_intel_crtc(dev, crtc) {
+ if (!crtc->active)
+ continue;
+
+ pipe = crtc->pipe;
+
+ /*
+ * At this point, only the pipes more space than before are
+ * left to re-allocate.
+ */
+ if (reallocated[pipe])
+ continue;
+
+ skl_wm_flush_pipe(dev_priv, pipe, 3);
+ }
+}
+
+static bool skl_update_pipe_wm(struct drm_crtc *crtc,
+ struct skl_pipe_wm_parameters *params,
+ struct intel_wm_config *config,
+ struct skl_ddb_allocation *ddb, /* out */
+ struct skl_pipe_wm *pipe_wm /* out */)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ skl_compute_wm_pipe_parameters(crtc, params);
+ skl_allocate_pipe_ddb(crtc, config, params, ddb);
+ skl_compute_pipe_wm(crtc, ddb, params, pipe_wm);
+
+ if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm)))
+ return false;
+
+ intel_crtc->wm.skl_active = *pipe_wm;
+ return true;
+}
+
+static void skl_update_other_pipe_wm(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct intel_wm_config *config,
+ struct skl_wm_values *r)
+{
+ struct intel_crtc *intel_crtc;
+ struct intel_crtc *this_crtc = to_intel_crtc(crtc);
+
+ /*
+ * If the WM update hasn't changed the allocation for this_crtc (the
+ * crtc we are currently computing the new WM values for), other
+ * enabled crtcs will keep the same allocation and we don't need to
+ * recompute anything for them.
+ */
+ if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
+ return;
+
+ /*
+ * Otherwise, because of this_crtc being freshly enabled/disabled, the
+ * other active pipes need new DDB allocation and WM values.
+ */
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ struct skl_pipe_wm_parameters params = {};
+ struct skl_pipe_wm pipe_wm = {};
+ bool wm_changed;
+
+ if (this_crtc->pipe == intel_crtc->pipe)
+ continue;
+
+ if (!intel_crtc->active)
+ continue;
+
+ wm_changed = skl_update_pipe_wm(&intel_crtc->base,
+ &params, config,
+ &r->ddb, &pipe_wm);
+
+ /*
+ * If we end up re-computing the other pipe WM values, it's
+ * because it was really needed, so we expect the WM values to
+ * be different.
+ */
+ WARN_ON(!wm_changed);
+
+ skl_compute_wm_results(dev, &params, &pipe_wm, r, intel_crtc);
+ r->dirty[intel_crtc->pipe] = true;
+ }
+}
+
+static void skl_update_wm(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct skl_pipe_wm_parameters params = {};
+ struct skl_wm_values *results = &dev_priv->wm.skl_results;
+ struct skl_pipe_wm pipe_wm = {};
+ struct intel_wm_config config = {};
+
+ memset(results, 0, sizeof(*results));
+
+ skl_compute_wm_global_parameters(dev, &config);
+
+ if (!skl_update_pipe_wm(crtc, &params, &config,
+ &results->ddb, &pipe_wm))
+ return;
+
+ skl_compute_wm_results(dev, &params, &pipe_wm, results, intel_crtc);
+ results->dirty[intel_crtc->pipe] = true;
+
+ skl_update_other_pipe_wm(dev, crtc, &config, results);
+ skl_write_wm_values(dev_priv, results);
+ skl_flush_wm_values(dev_priv, results);
+
+ /* store the new configuration */
+ dev_priv->wm.skl_hw = *results;
+}
+
+static void
+skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
+ uint32_t sprite_width, uint32_t sprite_height,
+ int pixel_size, bool enabled, bool scaled)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+
+ intel_plane->wm.enabled = enabled;
+ intel_plane->wm.scaled = scaled;
+ intel_plane->wm.horiz_pixels = sprite_width;
+ intel_plane->wm.vert_pixels = sprite_height;
+ intel_plane->wm.bytes_per_pixel = pixel_size;
+
+ skl_update_wm(crtc);
+}
+
static void ilk_update_wm(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -2934,6 +3836,113 @@ ilk_update_sprite_wm(struct drm_plane *plane,
ilk_update_wm(crtc);
}
+static void skl_pipe_wm_active_state(uint32_t val,
+ struct skl_pipe_wm *active,
+ bool is_transwm,
+ bool is_cursor,
+ int i,
+ int level)
+{
+ bool is_enabled = (val & PLANE_WM_EN) != 0;
+
+ if (!is_transwm) {
+ if (!is_cursor) {
+ active->wm[level].plane_en[i] = is_enabled;
+ active->wm[level].plane_res_b[i] =
+ val & PLANE_WM_BLOCKS_MASK;
+ active->wm[level].plane_res_l[i] =
+ (val >> PLANE_WM_LINES_SHIFT) &
+ PLANE_WM_LINES_MASK;
+ } else {
+ active->wm[level].cursor_en = is_enabled;
+ active->wm[level].cursor_res_b =
+ val & PLANE_WM_BLOCKS_MASK;
+ active->wm[level].cursor_res_l =
+ (val >> PLANE_WM_LINES_SHIFT) &
+ PLANE_WM_LINES_MASK;
+ }
+ } else {
+ if (!is_cursor) {
+ active->trans_wm.plane_en[i] = is_enabled;
+ active->trans_wm.plane_res_b[i] =
+ val & PLANE_WM_BLOCKS_MASK;
+ active->trans_wm.plane_res_l[i] =
+ (val >> PLANE_WM_LINES_SHIFT) &
+ PLANE_WM_LINES_MASK;
+ } else {
+ active->trans_wm.cursor_en = is_enabled;
+ active->trans_wm.cursor_res_b =
+ val & PLANE_WM_BLOCKS_MASK;
+ active->trans_wm.cursor_res_l =
+ (val >> PLANE_WM_LINES_SHIFT) &
+ PLANE_WM_LINES_MASK;
+ }
+ }
+}
+
+static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct skl_pipe_wm *active = &intel_crtc->wm.skl_active;
+ enum pipe pipe = intel_crtc->pipe;
+ int level, i, max_level;
+ uint32_t temp;
+
+ max_level = ilk_wm_max_level(dev);
+
+ hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+
+ for (level = 0; level <= max_level; level++) {
+ for (i = 0; i < intel_num_planes(intel_crtc); i++)
+ hw->plane[pipe][i][level] =
+ I915_READ(PLANE_WM(pipe, i, level));
+ hw->cursor[pipe][level] = I915_READ(CUR_WM(pipe, level));
+ }
+
+ for (i = 0; i < intel_num_planes(intel_crtc); i++)
+ hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
+ hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe));
+
+ if (!intel_crtc_active(crtc))
+ return;
+
+ hw->dirty[pipe] = true;
+
+ active->linetime = hw->wm_linetime[pipe];
+
+ for (level = 0; level <= max_level; level++) {
+ for (i = 0; i < intel_num_planes(intel_crtc); i++) {
+ temp = hw->plane[pipe][i][level];
+ skl_pipe_wm_active_state(temp, active, false,
+ false, i, level);
+ }
+ temp = hw->cursor[pipe][level];
+ skl_pipe_wm_active_state(temp, active, false, true, i, level);
+ }
+
+ for (i = 0; i < intel_num_planes(intel_crtc); i++) {
+ temp = hw->plane_trans[pipe][i];
+ skl_pipe_wm_active_state(temp, active, true, false, i, 0);
+ }
+
+ temp = hw->cursor_trans[pipe];
+ skl_pipe_wm_active_state(temp, active, true, true, i, 0);
+}
+
+void skl_wm_get_hw_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
+ struct drm_crtc *crtc;
+
+ skl_ddb_get_hw_state(dev_priv, ddb);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ skl_pipe_wm_get_hw_state(crtc);
+}
+
static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3442,7 +4451,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
dev_priv->rps.min_freq_softlimit);
if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
- & GENFREQSTATUS) == 0, 5))
+ & GENFREQSTATUS) == 0, 100))
DRM_ERROR("timed out waiting for Punit\n");
vlv_force_gfx_clock(dev_priv, false);
@@ -3495,14 +4504,8 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
"Odd GPU freq value\n"))
val &= ~1;
- if (val != dev_priv->rps.cur_freq) {
- DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
- dev_priv->rps.cur_freq,
- vlv_gpu_freq(dev_priv, val), val);
-
+ if (val != dev_priv->rps.cur_freq)
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
- }
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
@@ -3510,43 +4513,11 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
}
-static void gen8_disable_rps_interrupts(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
- I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
- ~dev_priv->pm_rps_events);
- /* Complete PM interrupt masking here doesn't race with the rps work
- * item again unmasking PM interrupts because that is using a different
- * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
- * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
- * gen8_enable_rps will clean up. */
-
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
-
- I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
-}
-
-static void gen6_disable_rps_interrupts(struct drm_device *dev)
+static void gen9_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
- I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
- ~dev_priv->pm_rps_events);
- /* Complete PM interrupt masking here doesn't race with the rps work
- * item again unmasking PM interrupts because that is using a different
- * register (PMIMR) to mask PM interrupts. The only risk is in leaving
- * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
-
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
-
- I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
+ I915_WRITE(GEN6_RC_CONTROL, 0);
}
static void gen6_disable_rps(struct drm_device *dev)
@@ -3555,11 +4526,6 @@ static void gen6_disable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
-
- if (IS_BROADWELL(dev))
- gen8_disable_rps_interrupts(dev);
- else
- gen6_disable_rps_interrupts(dev);
}
static void cherryview_disable_rps(struct drm_device *dev)
@@ -3567,8 +4533,6 @@ static void cherryview_disable_rps(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RC_CONTROL, 0);
-
- gen8_disable_rps_interrupts(dev);
}
static void valleyview_disable_rps(struct drm_device *dev)
@@ -3582,8 +4546,6 @@ static void valleyview_disable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_CONTROL, 0);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
-
- gen6_disable_rps_interrupts(dev);
}
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
@@ -3594,10 +4556,15 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
else
mode = 0;
}
- DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
- (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
- (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
- (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+ if (HAS_RC6p(dev))
+ DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
+ (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+ (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+ (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+
+ else
+ DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
+ (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
}
static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
@@ -3614,7 +4581,7 @@ static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
if (enable_rc6 >= 0) {
int mask;
- if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
+ if (HAS_RC6p(dev))
mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
INTEL_RC6pp_ENABLE;
else
@@ -3642,54 +4609,92 @@ int intel_enable_rc6(const struct drm_device *dev)
return i915.enable_rc6;
}
-static void gen8_enable_rps_interrupts(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- spin_lock_irq(&dev_priv->irq_lock);
- WARN_ON(dev_priv->rps.pm_iir);
- gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
- I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-static void gen6_enable_rps_interrupts(struct drm_device *dev)
+static void gen6_init_rps_frequencies(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t rp_state_cap;
+ u32 ddcc_status = 0;
+ int ret;
- spin_lock_irq(&dev_priv->irq_lock);
- WARN_ON(dev_priv->rps.pm_iir);
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
- I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
-{
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
/* All of these values are in units of 50MHz */
dev_priv->rps.cur_freq = 0;
- /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
- dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
+ /* static values from HW: RP0 > RP1 > RPn (min_freq) */
dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
+ dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
- /* XXX: only BYT has a special efficient freq */
- dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
/* hw_max = RP0 until we check for overclocking */
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
+ dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ ret = sandybridge_pcode_read(dev_priv,
+ HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
+ &ddcc_status);
+ if (0 == ret)
+ dev_priv->rps.efficient_freq =
+ (ddcc_status >> 8) & 0xff;
+ }
+
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
- if (dev_priv->rps.min_freq_softlimit == 0)
- dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+ if (dev_priv->rps.min_freq_softlimit == 0) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ dev_priv->rps.min_freq_softlimit =
+ /* max(RPe, 450 MHz) */
+ max(dev_priv->rps.efficient_freq, (u8) 9);
+ else
+ dev_priv->rps.min_freq_softlimit =
+ dev_priv->rps.min_freq;
+ }
+}
+
+static void gen9_enable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ uint32_t rc6_mask = 0;
+ int unused;
+
+ /* 1a: Software RC state - RC0 */
+ I915_WRITE(GEN6_RC_STATE, 0);
+
+ /* 1b: Get forcewake during program sequence. Although the driver
+ * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* 2a: Disable RC states. */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ /* 2b: Program RC6 thresholds.*/
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_ring(ring, dev_priv, unused)
+ I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
+
+ /* 3a: Enable RC6 */
+ if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+ rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+ DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
+ "on" : "off");
+ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1) |
+ rc6_mask);
+
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+
}
static void gen8_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
- uint32_t rc6_mask = 0, rp_state_cap;
+ uint32_t rc6_mask = 0;
int unused;
/* 1a: Software RC state - RC0 */
@@ -3702,8 +4707,8 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- parse_rp_state_cap(dev_priv, rp_state_cap);
+ /* Initialize rps frequencies */
+ gen6_init_rps_frequencies(dev);
/* 2b: Program RC6 thresholds.*/
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
@@ -3761,9 +4766,8 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 6: Ring frequency + overclocking (our driver does this later */
- gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
-
- gen8_enable_rps_interrupts(dev);
+ dev_priv->rps.power = HIGH_POWER; /* force a reset */
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -3772,7 +4776,6 @@ static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
- u32 rp_state_cap;
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
@@ -3796,9 +4799,8 @@ static void gen6_enable_rps(struct drm_device *dev)
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-
- parse_rp_state_cap(dev_priv, rp_state_cap);
+ /* Initialize rps frequencies */
+ gen6_init_rps_frequencies(dev);
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -3861,8 +4863,6 @@ static void gen6_enable_rps(struct drm_device *dev)
dev_priv->rps.power = HIGH_POWER; /* force a reset */
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
- gen6_enable_rps_interrupts(dev);
-
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
if (IS_GEN6(dev) && ret) {
@@ -3915,9 +4915,9 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
*/
- for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
+ for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq;
gpu_freq--) {
- int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
+ int diff = dev_priv->rps.max_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
if (INTEL_INFO(dev)->gen >= 8) {
@@ -4072,12 +5072,15 @@ static void cherryview_setup_pctx(struct drm_device *dev)
pcbr = I915_READ(VLV_PCBR);
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
+ DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
paddr = (dev_priv->mm.stolen_base +
(gtt->stolen_size - pctx_size));
pctx_paddr = (paddr & (~4095));
I915_WRITE(VLV_PCBR, pctx_paddr);
}
+
+ DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
}
static void valleyview_setup_pctx(struct drm_device *dev)
@@ -4103,6 +5106,8 @@ static void valleyview_setup_pctx(struct drm_device *dev)
goto out;
}
+ DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+
/*
* From the Gunit register HAS:
* The Gfx driver is expected to program this register and ensure
@@ -4121,6 +5126,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
I915_WRITE(VLV_PCBR, pctx_paddr);
out:
+ DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
dev_priv->vlv_pctx = pctx;
}
@@ -4157,7 +5163,7 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
dev_priv->mem_freq = 1333;
break;
}
- DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
@@ -4199,7 +5205,10 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
mutex_lock(&dev_priv->rps.hw_lock);
- val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
+ mutex_lock(&dev_priv->dpio_lock);
+ val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
+ mutex_unlock(&dev_priv->dpio_lock);
+
switch ((val >> 2) & 0x7) {
case 0:
case 1:
@@ -4223,7 +5232,7 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
dev_priv->mem_freq = 1600;
break;
}
- DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
@@ -4309,8 +5318,6 @@ static void cherryview_enable_rps(struct drm_device *dev)
/* For now we assume BIOS is allocating and populating the PCBR */
pcbr = I915_READ(VLV_PCBR);
- DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
-
/* 3: Enable RC6 */
if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
(pcbr >> VLV_PCBR_ADDR_SHIFT))
@@ -4340,7 +5347,10 @@ static void cherryview_enable_rps(struct drm_device *dev)
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
+ /* RPS code assumes GPLL is used */
+ WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+
+ DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
@@ -4354,8 +5364,6 @@ static void cherryview_enable_rps(struct drm_device *dev)
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
- gen8_enable_rps_interrupts(dev);
-
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -4420,7 +5428,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
+ /* RPS code assumes GPLL is used */
+ WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+
+ DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
@@ -4434,8 +5445,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
- gen6_enable_rps_interrupts(dev);
-
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -5194,12 +6203,17 @@ void intel_suspend_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- /* Interrupts should be disabled already to avoid re-arming. */
- WARN_ON(intel_irqs_enabled(dev_priv));
+ if (INTEL_INFO(dev)->gen < 6)
+ return;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
- cancel_work_sync(&dev_priv->rps.work);
+ /*
+ * TODO: disable RPS interrupts on GEN9+ too once RPS support
+ * is added for it.
+ */
+ if (INTEL_INFO(dev)->gen < 9)
+ gen6_disable_rps_interrupts(dev);
/* Force GPU to min freq during suspend */
gen6_rps_idle(dev_priv);
@@ -5209,9 +6223,6 @@ void intel_disable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- /* Interrupts should be disabled already to avoid re-arming. */
- WARN_ON(intel_irqs_enabled(dev_priv));
-
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
@@ -5219,12 +6230,15 @@ void intel_disable_gt_powersave(struct drm_device *dev)
intel_suspend_gt_powersave(dev);
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_CHERRYVIEW(dev))
+ if (INTEL_INFO(dev)->gen >= 9)
+ gen9_disable_rps(dev);
+ else if (IS_CHERRYVIEW(dev))
cherryview_disable_rps(dev);
else if (IS_VALLEYVIEW(dev))
valleyview_disable_rps(dev);
else
gen6_disable_rps(dev);
+
dev_priv->rps.enabled = false;
mutex_unlock(&dev_priv->rps.hw_lock);
}
@@ -5239,10 +6253,19 @@ static void intel_gen6_powersave_work(struct work_struct *work)
mutex_lock(&dev_priv->rps.hw_lock);
+ /*
+ * TODO: reset/enable RPS interrupts on GEN9+ too, once RPS support is
+ * added for it.
+ */
+ if (INTEL_INFO(dev)->gen < 9)
+ gen6_reset_rps_interrupts(dev);
+
if (IS_CHERRYVIEW(dev)) {
cherryview_enable_rps(dev);
} else if (IS_VALLEYVIEW(dev)) {
valleyview_enable_rps(dev);
+ } else if (INTEL_INFO(dev)->gen >= 9) {
+ gen9_enable_rps(dev);
} else if (IS_BROADWELL(dev)) {
gen8_enable_rps(dev);
__gen6_update_ring_freq(dev);
@@ -5251,6 +6274,10 @@ static void intel_gen6_powersave_work(struct work_struct *work)
__gen6_update_ring_freq(dev);
}
dev_priv->rps.enabled = true;
+
+ if (INTEL_INFO(dev)->gen < 9)
+ gen6_enable_rps_interrupts(dev);
+
mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
@@ -5481,7 +6508,7 @@ static void gen6_init_clock_gating(struct drm_device *dev)
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
I915_WRITE(GEN6_GT_MODE,
- GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
ilk_init_lp_watermarks(dev);
@@ -5609,16 +6636,6 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- /* FIXME(BDW): Check all the w/a, some might only apply to
- * pre-production hw. */
-
-
- I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
-
- I915_WRITE(_3D_CHICKEN3,
- _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
-
-
/* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -5689,7 +6706,7 @@ static void haswell_init_clock_gating(struct drm_device *dev)
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
I915_WRITE(GEN7_GT_MODE,
- GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
/* WaSwitchSolVfFArbitrationPriority:hsw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -5786,7 +6803,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
I915_WRITE(GEN7_GT_MODE,
- GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
snpcr &= ~GEN6_MBC_SNPCR_MASK;
@@ -5899,18 +6916,6 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
/* WaDisableSDEUnitClockGating:chv */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
-
- /* WaDisableGunitClockGating:chv (pre-production hw) */
- I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
- GINT_DIS);
-
- /* WaDisableFfDopClockGating:chv (pre-production hw) */
- I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
- _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
-
- /* WaDisableDopClockGating:chv (pre-production hw) */
- I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
- GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
}
static void g4x_init_clock_gating(struct drm_device *dev)
@@ -6036,1161 +7041,35 @@ void intel_suspend_hw(struct drm_device *dev)
lpt_suspend_hw(dev);
}
-#define for_each_power_well(i, power_well, domain_mask, power_domains) \
- for (i = 0; \
- i < (power_domains)->power_well_count && \
- ((power_well) = &(power_domains)->power_wells[i]); \
- i++) \
- if ((power_well)->domains & (domain_mask))
-
-#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
- for (i = (power_domains)->power_well_count - 1; \
- i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
- i--) \
- if ((power_well)->domains & (domain_mask))
-
-/**
- * We should only use the power well if we explicitly asked the hardware to
- * enable it, so check if it's enabled and also check if we've requested it to
- * be enabled.
- */
-static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return I915_READ(HSW_PWR_WELL_DRIVER) ==
- (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
-}
-
-bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains;
- struct i915_power_well *power_well;
- bool is_enabled;
- int i;
-
- if (dev_priv->pm.suspended)
- return false;
-
- power_domains = &dev_priv->power_domains;
-
- is_enabled = true;
-
- for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
- if (power_well->always_on)
- continue;
-
- if (!power_well->hw_enabled) {
- is_enabled = false;
- break;
- }
- }
-
- return is_enabled;
-}
-
-bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains;
- bool ret;
-
- power_domains = &dev_priv->power_domains;
-
- mutex_lock(&power_domains->lock);
- ret = intel_display_power_enabled_unlocked(dev_priv, domain);
- mutex_unlock(&power_domains->lock);
-
- return ret;
-}
-
-/*
- * Starting with Haswell, we have a "Power Down Well" that can be turned off
- * when not needed anymore. We have 4 registers that can request the power well
- * to be enabled, and it will only be disabled if none of the registers is
- * requesting it to be enabled.
- */
-static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
-
- /*
- * After we re-enable the power well, if we touch VGA register 0x3d5
- * we'll get unclaimed register interrupts. This stops after we write
- * anything to the VGA MSR register. The vgacon module uses this
- * register all the time, so if we unbind our driver and, as a
- * consequence, bind vgacon, we'll get stuck in an infinite loop at
- * console_unlock(). So make here we touch the VGA MSR register, making
- * sure vgacon can keep working normally without triggering interrupts
- * and error messages.
- */
- vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
- outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
- vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
-
- if (IS_BROADWELL(dev))
- gen8_irq_power_well_post_enable(dev_priv);
-}
-
-static void hsw_set_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well, bool enable)
-{
- bool is_enabled, enable_requested;
- uint32_t tmp;
-
- tmp = I915_READ(HSW_PWR_WELL_DRIVER);
- is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
- enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
-
- if (enable) {
- if (!enable_requested)
- I915_WRITE(HSW_PWR_WELL_DRIVER,
- HSW_PWR_WELL_ENABLE_REQUEST);
-
- if (!is_enabled) {
- DRM_DEBUG_KMS("Enabling power well\n");
- if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
- HSW_PWR_WELL_STATE_ENABLED), 20))
- DRM_ERROR("Timeout enabling power well\n");
- }
-
- hsw_power_well_post_enable(dev_priv);
- } else {
- if (enable_requested) {
- I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
- POSTING_READ(HSW_PWR_WELL_DRIVER);
- DRM_DEBUG_KMS("Requesting to disable the power well\n");
- }
- }
-}
-
-static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
-
- /*
- * We're taking over the BIOS, so clear any requests made by it since
- * the driver is in charge now.
- */
- if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
- I915_WRITE(HSW_PWR_WELL_BIOS, 0);
-}
-
-static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- hsw_set_power_well(dev_priv, power_well, true);
-}
-
-static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- hsw_set_power_well(dev_priv, power_well, false);
-}
-
-static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+static void intel_init_fbc(struct drm_i915_private *dev_priv)
{
-}
-
-static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return true;
-}
-
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well, bool enable)
-{
- enum punit_power_well power_well_id = power_well->data;
- u32 mask;
- u32 state;
- u32 ctrl;
-
- mask = PUNIT_PWRGT_MASK(power_well_id);
- state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
- PUNIT_PWRGT_PWR_GATE(power_well_id);
-
- mutex_lock(&dev_priv->rps.hw_lock);
-
-#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
-
- if (COND)
- goto out;
-
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
- ctrl &= ~mask;
- ctrl |= state;
- vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
-
- if (wait_for(COND, 100))
- DRM_ERROR("timout setting power well state %08x (%08x)\n",
- state,
- vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
-
-#undef COND
-
-out:
- mutex_unlock(&dev_priv->rps.hw_lock);
-}
-
-static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
-}
-
-static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, true);
-}
-
-static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- int power_well_id = power_well->data;
- bool enabled = false;
- u32 mask;
- u32 state;
- u32 ctrl;
-
- mask = PUNIT_PWRGT_MASK(power_well_id);
- ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
-
- mutex_lock(&dev_priv->rps.hw_lock);
-
- state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
- /*
- * We only ever set the power-on and power-gate states, anything
- * else is unexpected.
- */
- WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
- state != PUNIT_PWRGT_PWR_GATE(power_well_id));
- if (state == ctrl)
- enabled = true;
-
- /*
- * A transient state at this point would mean some unexpected party
- * is poking at the power controls too.
- */
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
- WARN_ON(ctrl != state);
-
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- return enabled;
-}
-
-static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
-
- vlv_set_power_well(dev_priv, power_well, true);
-
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_enable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /*
- * During driver initialization/resume we can avoid restoring the
- * part of the HW/SW state that will be inited anyway explicitly.
- */
- if (dev_priv->power_domains.initializing)
+ if (!HAS_FBC(dev_priv)) {
+ dev_priv->fbc.enabled = false;
return;
-
- intel_hpd_init(dev_priv->dev);
-
- i915_redisable_vga_power_on(dev_priv->dev);
-}
-
-static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
-
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_disable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- vlv_set_power_well(dev_priv, power_well, false);
-
- vlv_power_sequencer_reset(dev_priv);
-}
-
-static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
-
- /*
- * Enable the CRI clock source so we can get at the
- * display and the reference clock for VGA
- * hotplug / manual detection.
- */
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
- udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
-
- vlv_set_power_well(dev_priv, power_well, true);
-
- /*
- * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
- * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
- * a. GUnit 0x2110 bit[0] set to 1 (def 0)
- * b. The other bits such as sfr settings / modesel may all
- * be set to 0.
- *
- * This should only be done on init and resume from S3 with
- * both PLLs disabled, or we risk losing DPIO and PLL
- * synchronization.
- */
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
-}
-
-static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum pipe pipe;
-
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
-
- for_each_pipe(dev_priv, pipe)
- assert_pll_disabled(dev_priv, pipe);
-
- /* Assert common reset */
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
-
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum dpio_phy phy;
-
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
- power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
-
- /*
- * Enable the CRI clock source so we can get at the
- * display and the reference clock for VGA
- * hotplug / manual detection.
- */
- if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
- phy = DPIO_PHY0;
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_REFA_CLK_ENABLE_VLV);
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
- } else {
- phy = DPIO_PHY1;
- I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
- DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
}
- udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
- vlv_set_power_well(dev_priv, power_well, true);
- /* Poll for phypwrgood signal */
- if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
- DRM_ERROR("Display PHY %d is not power up\n", phy);
-
- I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
- PHY_COM_LANE_RESET_DEASSERT(phy));
-}
-
-static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum dpio_phy phy;
-
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
- power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
-
- if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
- phy = DPIO_PHY0;
- assert_pll_disabled(dev_priv, PIPE_A);
- assert_pll_disabled(dev_priv, PIPE_B);
+ if (INTEL_INFO(dev_priv)->gen >= 7) {
+ dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+ dev_priv->display.enable_fbc = gen7_enable_fbc;
+ dev_priv->display.disable_fbc = ironlake_disable_fbc;
+ } else if (INTEL_INFO(dev_priv)->gen >= 5) {
+ dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+ dev_priv->display.enable_fbc = ironlake_enable_fbc;
+ dev_priv->display.disable_fbc = ironlake_disable_fbc;
+ } else if (IS_GM45(dev_priv)) {
+ dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+ dev_priv->display.enable_fbc = g4x_enable_fbc;
+ dev_priv->display.disable_fbc = g4x_disable_fbc;
} else {
- phy = DPIO_PHY1;
- assert_pll_disabled(dev_priv, PIPE_C);
- }
+ dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+ dev_priv->display.enable_fbc = i8xx_enable_fbc;
+ dev_priv->display.disable_fbc = i8xx_disable_fbc;
- I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
- ~PHY_COM_LANE_RESET_DEASSERT(phy));
-
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum pipe pipe = power_well->data;
- bool enabled;
- u32 state, ctrl;
-
- mutex_lock(&dev_priv->rps.hw_lock);
-
- state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
- /*
- * We only ever set the power-on and power-gate states, anything
- * else is unexpected.
- */
- WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
- enabled = state == DP_SSS_PWR_ON(pipe);
-
- /*
- * A transient state at this point would mean some unexpected party
- * is poking at the power controls too.
- */
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
- WARN_ON(ctrl << 16 != state);
-
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- return enabled;
-}
-
-static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well,
- bool enable)
-{
- enum pipe pipe = power_well->data;
- u32 state;
- u32 ctrl;
-
- state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
-
- mutex_lock(&dev_priv->rps.hw_lock);
-
-#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
-
- if (COND)
- goto out;
-
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
- ctrl &= ~DP_SSC_MASK(pipe);
- ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
-
- if (wait_for(COND, 100))
- DRM_ERROR("timout setting power well state %08x (%08x)\n",
- state,
- vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
-
-#undef COND
-
-out:
- mutex_unlock(&dev_priv->rps.hw_lock);
-}
-
-static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
-}
-
-static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- WARN_ON_ONCE(power_well->data != PIPE_A &&
- power_well->data != PIPE_B &&
- power_well->data != PIPE_C);
-
- chv_set_pipe_power_well(dev_priv, power_well, true);
-}
-
-static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- WARN_ON_ONCE(power_well->data != PIPE_A &&
- power_well->data != PIPE_B &&
- power_well->data != PIPE_C);
-
- chv_set_pipe_power_well(dev_priv, power_well, false);
-}
-
-static void check_power_well_state(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
-
- if (power_well->always_on || !i915.disable_power_well) {
- if (!enabled)
- goto mismatch;
-
- return;
- }
-
- if (enabled != (power_well->count > 0))
- goto mismatch;
-
- return;
-
-mismatch:
- WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
- power_well->name, power_well->always_on, enabled,
- power_well->count, i915.disable_power_well);
-}
-
-void intel_display_power_get(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains;
- struct i915_power_well *power_well;
- int i;
-
- intel_runtime_pm_get(dev_priv);
-
- power_domains = &dev_priv->power_domains;
-
- mutex_lock(&power_domains->lock);
-
- for_each_power_well(i, power_well, BIT(domain), power_domains) {
- if (!power_well->count++) {
- DRM_DEBUG_KMS("enabling %s\n", power_well->name);
- power_well->ops->enable(dev_priv, power_well);
- power_well->hw_enabled = true;
- }
-
- check_power_well_state(dev_priv, power_well);
+ /* This value was pulled out of someone's hat */
+ I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
}
- power_domains->domain_use_count[domain]++;
-
- mutex_unlock(&power_domains->lock);
-}
-
-void intel_display_power_put(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains;
- struct i915_power_well *power_well;
- int i;
-
- power_domains = &dev_priv->power_domains;
-
- mutex_lock(&power_domains->lock);
-
- WARN_ON(!power_domains->domain_use_count[domain]);
- power_domains->domain_use_count[domain]--;
-
- for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
- WARN_ON(!power_well->count);
-
- if (!--power_well->count && i915.disable_power_well) {
- DRM_DEBUG_KMS("disabling %s\n", power_well->name);
- power_well->hw_enabled = false;
- power_well->ops->disable(dev_priv, power_well);
- }
-
- check_power_well_state(dev_priv, power_well);
- }
-
- mutex_unlock(&power_domains->lock);
-
- intel_runtime_pm_put(dev_priv);
-}
-
-static struct i915_power_domains *hsw_pwr;
-
-/* Display audio driver power well request */
-int i915_request_power_well(void)
-{
- struct drm_i915_private *dev_priv;
-
- if (!hsw_pwr)
- return -ENODEV;
-
- dev_priv = container_of(hsw_pwr, struct drm_i915_private,
- power_domains);
- intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- return 0;
-}
-EXPORT_SYMBOL_GPL(i915_request_power_well);
-
-/* Display audio driver power well release */
-int i915_release_power_well(void)
-{
- struct drm_i915_private *dev_priv;
-
- if (!hsw_pwr)
- return -ENODEV;
-
- dev_priv = container_of(hsw_pwr, struct drm_i915_private,
- power_domains);
- intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
- return 0;
-}
-EXPORT_SYMBOL_GPL(i915_release_power_well);
-
-/*
- * Private interface for the audio driver to get CDCLK in kHz.
- *
- * Caller must request power well using i915_request_power_well() prior to
- * making the call.
- */
-int i915_get_cdclk_freq(void)
-{
- struct drm_i915_private *dev_priv;
-
- if (!hsw_pwr)
- return -ENODEV;
-
- dev_priv = container_of(hsw_pwr, struct drm_i915_private,
- power_domains);
-
- return intel_ddi_get_cdclk_freq(dev_priv);
-}
-EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
-
-
-#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
-
-#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_A) | \
- BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
- BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_CRT) | \
- BIT(POWER_DOMAIN_PLLS) | \
- BIT(POWER_DOMAIN_INIT))
-#define HSW_DISPLAY_POWER_DOMAINS ( \
- (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
- HSW_ALWAYS_ON_POWER_DOMAINS | \
- BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
-#define BDW_DISPLAY_POWER_DOMAINS ( \
- (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
-#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
-
-#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_CRT) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_PIPE_A_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_A) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_PIPE_B_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_B) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_PIPE_C_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_C) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
- .sync_hw = i9xx_always_on_power_well_noop,
- .enable = i9xx_always_on_power_well_noop,
- .disable = i9xx_always_on_power_well_noop,
- .is_enabled = i9xx_always_on_power_well_enabled,
-};
-
-static const struct i915_power_well_ops chv_pipe_power_well_ops = {
- .sync_hw = chv_pipe_power_well_sync_hw,
- .enable = chv_pipe_power_well_enable,
- .disable = chv_pipe_power_well_disable,
- .is_enabled = chv_pipe_power_well_enabled,
-};
-
-static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
- .sync_hw = vlv_power_well_sync_hw,
- .enable = chv_dpio_cmn_power_well_enable,
- .disable = chv_dpio_cmn_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static struct i915_power_well i9xx_always_on_power_well[] = {
- {
- .name = "always-on",
- .always_on = 1,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- },
-};
-
-static const struct i915_power_well_ops hsw_power_well_ops = {
- .sync_hw = hsw_power_well_sync_hw,
- .enable = hsw_power_well_enable,
- .disable = hsw_power_well_disable,
- .is_enabled = hsw_power_well_enabled,
-};
-
-static struct i915_power_well hsw_power_wells[] = {
- {
- .name = "always-on",
- .always_on = 1,
- .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
- .ops = &i9xx_always_on_power_well_ops,
- },
- {
- .name = "display",
- .domains = HSW_DISPLAY_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- },
-};
-
-static struct i915_power_well bdw_power_wells[] = {
- {
- .name = "always-on",
- .always_on = 1,
- .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
- .ops = &i9xx_always_on_power_well_ops,
- },
- {
- .name = "display",
- .domains = BDW_DISPLAY_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- },
-};
-
-static const struct i915_power_well_ops vlv_display_power_well_ops = {
- .sync_hw = vlv_power_well_sync_hw,
- .enable = vlv_display_power_well_enable,
- .disable = vlv_display_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
- .sync_hw = vlv_power_well_sync_hw,
- .enable = vlv_dpio_cmn_power_well_enable,
- .disable = vlv_dpio_cmn_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
- .sync_hw = vlv_power_well_sync_hw,
- .enable = vlv_power_well_enable,
- .disable = vlv_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static struct i915_power_well vlv_power_wells[] = {
- {
- .name = "always-on",
- .always_on = 1,
- .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
- .ops = &i9xx_always_on_power_well_ops,
- },
- {
- .name = "display",
- .domains = VLV_DISPLAY_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DISP2D,
- .ops = &vlv_display_power_well_ops,
- },
- {
- .name = "dpio-tx-b-01",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
- },
- {
- .name = "dpio-tx-b-23",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
- },
- {
- .name = "dpio-tx-c-01",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
- },
- {
- .name = "dpio-tx-c-23",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
- },
- {
- .name = "dpio-common",
- .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
- .ops = &vlv_dpio_cmn_power_well_ops,
- },
-};
-
-static struct i915_power_well chv_power_wells[] = {
- {
- .name = "always-on",
- .always_on = 1,
- .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
- .ops = &i9xx_always_on_power_well_ops,
- },
-#if 0
- {
- .name = "display",
- .domains = VLV_DISPLAY_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DISP2D,
- .ops = &vlv_display_power_well_ops,
- },
- {
- .name = "pipe-a",
- .domains = CHV_PIPE_A_POWER_DOMAINS,
- .data = PIPE_A,
- .ops = &chv_pipe_power_well_ops,
- },
- {
- .name = "pipe-b",
- .domains = CHV_PIPE_B_POWER_DOMAINS,
- .data = PIPE_B,
- .ops = &chv_pipe_power_well_ops,
- },
- {
- .name = "pipe-c",
- .domains = CHV_PIPE_C_POWER_DOMAINS,
- .data = PIPE_C,
- .ops = &chv_pipe_power_well_ops,
- },
-#endif
- {
- .name = "dpio-common-bc",
- /*
- * XXX: cmnreset for one PHY seems to disturb the other.
- * As a workaround keep both powered on at the same
- * time for now.
- */
- .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
- .ops = &chv_dpio_cmn_power_well_ops,
- },
- {
- .name = "dpio-common-d",
- /*
- * XXX: cmnreset for one PHY seems to disturb the other.
- * As a workaround keep both powered on at the same
- * time for now.
- */
- .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_D,
- .ops = &chv_dpio_cmn_power_well_ops,
- },
-#if 0
- {
- .name = "dpio-tx-b-01",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
- },
- {
- .name = "dpio-tx-b-23",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
- },
- {
- .name = "dpio-tx-c-01",
- .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
- },
- {
- .name = "dpio-tx-c-23",
- .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
- },
- {
- .name = "dpio-tx-d-01",
- .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
- CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
- },
- {
- .name = "dpio-tx-d-23",
- .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
- CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
- },
-#endif
-};
-
-static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
- enum punit_power_well power_well_id)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *power_well;
- int i;
-
- for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
- if (power_well->data == power_well_id)
- return power_well;
- }
-
- return NULL;
-}
-
-#define set_power_wells(power_domains, __power_wells) ({ \
- (power_domains)->power_wells = (__power_wells); \
- (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
-})
-
-int intel_power_domains_init(struct drm_i915_private *dev_priv)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
- mutex_init(&power_domains->lock);
-
- /*
- * The enabling order will be from lower to higher indexed wells,
- * the disabling order is reversed.
- */
- if (IS_HASWELL(dev_priv->dev)) {
- set_power_wells(power_domains, hsw_power_wells);
- hsw_pwr = power_domains;
- } else if (IS_BROADWELL(dev_priv->dev)) {
- set_power_wells(power_domains, bdw_power_wells);
- hsw_pwr = power_domains;
- } else if (IS_CHERRYVIEW(dev_priv->dev)) {
- set_power_wells(power_domains, chv_power_wells);
- } else if (IS_VALLEYVIEW(dev_priv->dev)) {
- set_power_wells(power_domains, vlv_power_wells);
- } else {
- set_power_wells(power_domains, i9xx_always_on_power_well);
- }
-
- return 0;
-}
-
-void intel_power_domains_remove(struct drm_i915_private *dev_priv)
-{
- hsw_pwr = NULL;
-}
-
-static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *power_well;
- int i;
-
- mutex_lock(&power_domains->lock);
- for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
- power_well->ops->sync_hw(dev_priv, power_well);
- power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
- power_well);
- }
- mutex_unlock(&power_domains->lock);
-}
-
-static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
-{
- struct i915_power_well *cmn =
- lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
- struct i915_power_well *disp2d =
- lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
-
- /* nothing to do if common lane is already off */
- if (!cmn->ops->is_enabled(dev_priv, cmn))
- return;
-
- /* If the display might be already active skip this */
- if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
- I915_READ(DPIO_CTL) & DPIO_CMNRST)
- return;
-
- DRM_DEBUG_KMS("toggling display PHY side reset\n");
-
- /* cmnlane needs DPLL registers */
- disp2d->ops->enable(dev_priv, disp2d);
-
- /*
- * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
- * Need to assert and de-assert PHY SB reset by gating the
- * common lane power, then un-gating it.
- * Simply ungating isn't enough to reset the PHY enough to get
- * ports and lanes running.
- */
- cmn->ops->disable(dev_priv, cmn);
-}
-
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
- power_domains->initializing = true;
-
- if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
- mutex_lock(&power_domains->lock);
- vlv_cmnlane_wa(dev_priv);
- mutex_unlock(&power_domains->lock);
- }
-
- /* For now, we need the power well to be always enabled. */
- intel_display_set_init_power(dev_priv, true);
- intel_power_domains_resume(dev_priv);
- power_domains->initializing = false;
-}
-
-void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
-{
- intel_runtime_pm_get(dev_priv);
-}
-
-void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
-{
- intel_runtime_pm_put(dev_priv);
-}
-
-void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct device *device = &dev->pdev->dev;
-
- if (!HAS_RUNTIME_PM(dev))
- return;
-
- pm_runtime_get_sync(device);
- WARN(dev_priv->pm.suspended, "Device still suspended.\n");
-}
-
-void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct device *device = &dev->pdev->dev;
-
- if (!HAS_RUNTIME_PM(dev))
- return;
-
- WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
- pm_runtime_get_noresume(device);
-}
-
-void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct device *device = &dev->pdev->dev;
-
- if (!HAS_RUNTIME_PM(dev))
- return;
-
- pm_runtime_mark_last_busy(device);
- pm_runtime_put_autosuspend(device);
-}
-
-void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct device *device = &dev->pdev->dev;
-
- if (!HAS_RUNTIME_PM(dev))
- return;
-
- pm_runtime_set_active(device);
-
- /*
- * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
- * requirement.
- */
- if (!intel_enable_rc6(dev)) {
- DRM_INFO("RC6 disabled, disabling runtime PM support\n");
- return;
- }
-
- pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
- pm_runtime_mark_last_busy(device);
- pm_runtime_use_autosuspend(device);
-
- pm_runtime_put_autosuspend(device);
-}
-
-void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct device *device = &dev->pdev->dev;
-
- if (!HAS_RUNTIME_PM(dev))
- return;
-
- if (!intel_enable_rc6(dev))
- return;
-
- /* Make sure we're not suspended first. */
- pm_runtime_get_sync(device);
- pm_runtime_disable(device);
+ dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
}
/* Set up chip specific power management-related functions */
@@ -7198,28 +7077,7 @@ void intel_init_pm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (HAS_FBC(dev)) {
- if (INTEL_INFO(dev)->gen >= 7) {
- dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
- dev_priv->display.enable_fbc = gen7_enable_fbc;
- dev_priv->display.disable_fbc = ironlake_disable_fbc;
- } else if (INTEL_INFO(dev)->gen >= 5) {
- dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
- dev_priv->display.enable_fbc = ironlake_enable_fbc;
- dev_priv->display.disable_fbc = ironlake_disable_fbc;
- } else if (IS_GM45(dev)) {
- dev_priv->display.fbc_enabled = g4x_fbc_enabled;
- dev_priv->display.enable_fbc = g4x_enable_fbc;
- dev_priv->display.disable_fbc = g4x_disable_fbc;
- } else {
- dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
- dev_priv->display.enable_fbc = i8xx_enable_fbc;
- dev_priv->display.disable_fbc = i8xx_disable_fbc;
-
- /* This value was pulled out of someone's hat */
- I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
- }
- }
+ intel_init_fbc(dev_priv);
/* For cxsr */
if (IS_PINEVIEW(dev))
@@ -7228,7 +7086,13 @@ void intel_init_pm(struct drm_device *dev)
i915_ironlake_get_mem_freq(dev);
/* For FIFO watermark updates */
- if (HAS_PCH_SPLIT(dev)) {
+ if (INTEL_INFO(dev)->gen >= 9) {
+ skl_setup_wm_latency(dev);
+
+ dev_priv->display.init_clock_gating = gen9_init_clock_gating;
+ dev_priv->display.update_wm = skl_update_wm;
+ dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
+ } else if (HAS_PCH_SPLIT(dev)) {
ilk_setup_wm_latency(dev);
if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
@@ -7309,7 +7173,7 @@ void intel_init_pm(struct drm_device *dev)
}
}
-int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -7319,6 +7183,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
}
I915_WRITE(GEN6_PCODE_DATA, *val);
+ I915_WRITE(GEN6_PCODE_DATA1, 0);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
@@ -7333,7 +7198,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
return 0;
}
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -7356,99 +7221,66 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
return 0;
}
-static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
+static int vlv_gpu_freq_div(unsigned int czclk_freq)
{
- int div;
-
- /* 4 x czclk */
- switch (dev_priv->mem_freq) {
- case 800:
- div = 10;
- break;
- case 1066:
- div = 12;
- break;
- case 1333:
- div = 16;
- break;
+ switch (czclk_freq) {
+ case 200:
+ return 10;
+ case 267:
+ return 12;
+ case 320:
+ case 333:
+ return 16;
+ case 400:
+ return 20;
default:
return -1;
}
+}
+
+static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
+{
+ int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
+
+ div = vlv_gpu_freq_div(czclk_freq);
+ if (div < 0)
+ return div;
- return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
+ return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
}
static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- int mul;
+ int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
- /* 4 x czclk */
- switch (dev_priv->mem_freq) {
- case 800:
- mul = 10;
- break;
- case 1066:
- mul = 12;
- break;
- case 1333:
- mul = 16;
- break;
- default:
- return -1;
- }
+ mul = vlv_gpu_freq_div(czclk_freq);
+ if (mul < 0)
+ return mul;
- return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
+ return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
}
static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
- int div, freq;
+ int div, czclk_freq = dev_priv->rps.cz_freq;
- switch (dev_priv->rps.cz_freq) {
- case 200:
- div = 5;
- break;
- case 267:
- div = 6;
- break;
- case 320:
- case 333:
- case 400:
- div = 8;
- break;
- default:
- return -1;
- }
-
- freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
+ div = vlv_gpu_freq_div(czclk_freq) / 2;
+ if (div < 0)
+ return div;
- return freq;
+ return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
}
static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- int mul, opcode;
+ int mul, czclk_freq = dev_priv->rps.cz_freq;
- switch (dev_priv->rps.cz_freq) {
- case 200:
- mul = 5;
- break;
- case 267:
- mul = 6;
- break;
- case 320:
- case 333:
- case 400:
- mul = 8;
- break;
- default:
- return -1;
- }
+ mul = vlv_gpu_freq_div(czclk_freq) / 2;
+ if (mul < 0)
+ return mul;
/* CHV needs even values */
- opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
-
- return opcode;
+ return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
}
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
@@ -7485,5 +7317,4 @@ void intel_pm_setup(struct drm_device *dev)
intel_gen6_powersave_work);
dev_priv->pm.suspended = false;
- dev_priv->pm._irqs_disabled = false;
}
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
new file mode 100644
index 0000000..716b8a9
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: Panel Self Refresh (PSR/SRD)
+ *
+ * Since Haswell Display controller supports Panel Self-Refresh on display
+ * panels witch have a remote frame buffer (RFB) implemented according to PSR
+ * spec in eDP1.3. PSR feature allows the display to go to lower standby states
+ * when system is idle but display is on as it eliminates display refresh
+ * request to DDR memory completely as long as the frame buffer for that
+ * display is unchanged.
+ *
+ * Panel Self Refresh must be supported by both Hardware (source) and
+ * Panel (sink).
+ *
+ * PSR saves power by caching the framebuffer in the panel RFB, which allows us
+ * to power down the link and memory controller. For DSI panels the same idea
+ * is called "manual mode".
+ *
+ * The implementation uses the hardware-based PSR support which automatically
+ * enters/exits self-refresh mode. The hardware takes care of sending the
+ * required DP aux message and could even retrain the link (that part isn't
+ * enabled yet though). The hardware also keeps track of any frontbuffer
+ * changes to know when to exit self-refresh mode again. Unfortunately that
+ * part doesn't work too well, hence why the i915 PSR support uses the
+ * software frontbuffer tracking to make sure it doesn't miss a screen
+ * update. For this integration intel_psr_invalidate() and intel_psr_flush()
+ * get called by the frontbuffer tracking code. Note that because of locking
+ * issues the self-refresh re-enable code is done from a work queue, which
+ * must be correctly synchronized/cancelled when shutting down the pipe."
+ */
+
+#include <drm/drmP.h>
+
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+static bool is_edp_psr(struct intel_dp *intel_dp)
+{
+ return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
+}
+
+bool intel_psr_is_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!HAS_PSR(dev))
+ return false;
+
+ return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+}
+
+static void intel_psr_write_vsc(struct intel_dp *intel_dp,
+ struct edp_vsc_psr *vsc_psr)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
+ u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
+ uint32_t *data = (uint32_t *) vsc_psr;
+ unsigned int i;
+
+ /* As per BSPec (Pipe Video Data Island Packet), we need to disable
+ the video DIP being updated before program video DIP data buffer
+ registers for DIP being updated. */
+ I915_WRITE(ctl_reg, 0);
+ POSTING_READ(ctl_reg);
+
+ for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
+ if (i < sizeof(struct edp_vsc_psr))
+ I915_WRITE(data_reg + i, *data++);
+ else
+ I915_WRITE(data_reg + i, 0);
+ }
+
+ I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
+ POSTING_READ(ctl_reg);
+}
+
+static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
+{
+ struct edp_vsc_psr psr_vsc;
+
+ /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ psr_vsc.sdp_header.HB2 = 0x2;
+ psr_vsc.sdp_header.HB3 = 0x8;
+ intel_psr_write_vsc(intel_dp, &psr_vsc);
+}
+
+static void intel_psr_enable_sink(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t aux_clock_divider;
+ int precharge = 0x3;
+ bool only_standby = false;
+ static const uint8_t aux_msg[] = {
+ [0] = DP_AUX_NATIVE_WRITE << 4,
+ [1] = DP_SET_POWER >> 8,
+ [2] = DP_SET_POWER & 0xff,
+ [3] = 1 - 1,
+ [4] = DP_SET_POWER_D0,
+ };
+ int i;
+
+ BUILD_BUG_ON(sizeof(aux_msg) > 20);
+
+ aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
+
+ if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+ only_standby = true;
+
+ /* Enable PSR in sink */
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
+ else
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
+
+ /* Setup AUX registers */
+ for (i = 0; i < sizeof(aux_msg); i += 4)
+ I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
+ intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
+
+ I915_WRITE(EDP_PSR_AUX_CTL(dev),
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
+}
+
+static void intel_psr_enable_source(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t max_sleep_time = 0x1f;
+ uint32_t idle_frames = 1;
+ uint32_t val = 0x0;
+ const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+ bool only_standby = false;
+
+ if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+ only_standby = true;
+
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
+ val |= EDP_PSR_LINK_STANDBY;
+ val |= EDP_PSR_TP2_TP3_TIME_0us;
+ val |= EDP_PSR_TP1_TIME_0us;
+ val |= EDP_PSR_SKIP_AUX_EXIT;
+ val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
+ } else
+ val |= EDP_PSR_LINK_DISABLE;
+
+ I915_WRITE(EDP_PSR_CTL(dev), val |
+ (IS_BROADWELL(dev) ? 0 : link_entry_time) |
+ max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
+ idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
+ EDP_PSR_ENABLE);
+}
+
+static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dig_port->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ lockdep_assert_held(&dev_priv->psr.lock);
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+ dev_priv->psr.source_ok = false;
+
+ if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
+ DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
+ return false;
+ }
+
+ if (!i915.enable_psr) {
+ DRM_DEBUG_KMS("PSR disable by flag\n");
+ return false;
+ }
+
+ /* Below limitations aren't valid for Broadwell */
+ if (IS_BROADWELL(dev))
+ goto out;
+
+ if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
+ S3D_ENABLE) {
+ DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
+ return false;
+ }
+
+ if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
+ return false;
+ }
+
+ out:
+ dev_priv->psr.source_ok = true;
+ return true;
+}
+
+static void intel_psr_do_enable(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+ WARN_ON(dev_priv->psr.active);
+ lockdep_assert_held(&dev_priv->psr.lock);
+
+ /* Enable/Re-enable PSR on the host */
+ intel_psr_enable_source(intel_dp);
+
+ dev_priv->psr.active = true;
+}
+
+/**
+ * intel_psr_enable - Enable PSR
+ * @intel_dp: Intel DP
+ *
+ * This function can only be called after the pipe is fully trained and enabled.
+ */
+void intel_psr_enable(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!HAS_PSR(dev)) {
+ DRM_DEBUG_KMS("PSR not supported on this platform\n");
+ return;
+ }
+
+ if (!is_edp_psr(intel_dp)) {
+ DRM_DEBUG_KMS("PSR not supported by this panel\n");
+ return;
+ }
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (dev_priv->psr.enabled) {
+ DRM_DEBUG_KMS("PSR already in use\n");
+ goto unlock;
+ }
+
+ if (!intel_psr_match_conditions(intel_dp))
+ goto unlock;
+
+ dev_priv->psr.busy_frontbuffer_bits = 0;
+
+ intel_psr_setup_vsc(intel_dp);
+
+ /* Avoid continuous PSR exit by masking memup and hpd */
+ I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
+
+ /* Enable PSR on the panel */
+ intel_psr_enable_sink(intel_dp);
+
+ dev_priv->psr.enabled = intel_dp;
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+/**
+ * intel_psr_disable - Disable PSR
+ * @intel_dp: Intel DP
+ *
+ * This function needs to be called before disabling pipe.
+ */
+void intel_psr_disable(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ if (dev_priv->psr.active) {
+ I915_WRITE(EDP_PSR_CTL(dev),
+ I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
+
+ /* Wait till PSR is idle */
+ if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
+ EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+ DRM_ERROR("Timed out waiting for PSR Idle State\n");
+
+ dev_priv->psr.active = false;
+ } else {
+ WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+ }
+
+ dev_priv->psr.enabled = NULL;
+ mutex_unlock(&dev_priv->psr.lock);
+
+ cancel_delayed_work_sync(&dev_priv->psr.work);
+}
+
+static void intel_psr_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), psr.work.work);
+ struct intel_dp *intel_dp = dev_priv->psr.enabled;
+
+ /* We have to make sure PSR is ready for re-enable
+ * otherwise it keeps disabled until next full enable/disable cycle.
+ * PSR might take some time to get fully disabled
+ * and be ready for re-enable.
+ */
+ if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
+ EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
+ DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+ return;
+ }
+
+ mutex_lock(&dev_priv->psr.lock);
+ intel_dp = dev_priv->psr.enabled;
+
+ if (!intel_dp)
+ goto unlock;
+
+ /*
+ * The delayed work can race with an invalidate hence we need to
+ * recheck. Since psr_flush first clears this and then reschedules we
+ * won't ever miss a flush when bailing out here.
+ */
+ if (dev_priv->psr.busy_frontbuffer_bits)
+ goto unlock;
+
+ intel_psr_do_enable(intel_dp);
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+static void intel_psr_exit(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->psr.active) {
+ u32 val = I915_READ(EDP_PSR_CTL(dev));
+
+ WARN_ON(!(val & EDP_PSR_ENABLE));
+
+ I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
+
+ dev_priv->psr.active = false;
+ }
+
+}
+
+/**
+ * intel_psr_invalidate - Invalidade PSR
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * Since the hardware frontbuffer tracking has gaps we need to integrate
+ * with the software frontbuffer tracking. This function gets called every
+ * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
+ * disabled if the frontbuffer mask contains a buffer relevant to PSR.
+ *
+ * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
+ */
+void intel_psr_invalidate(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+
+ intel_psr_exit(dev);
+
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+
+ dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+/**
+ * intel_psr_flush - Flush PSR
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * Since the hardware frontbuffer tracking has gaps we need to integrate
+ * with the software frontbuffer tracking. This function gets called every
+ * time frontbuffer rendering has completed and flushed out to memory. PSR
+ * can be enabled again if no other frontbuffer relevant to PSR is dirty.
+ *
+ * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
+ */
+void intel_psr_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+ dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
+
+ /*
+ * On Haswell sprite plane updates don't result in a psr invalidating
+ * signal in the hardware. Which means we need to manually fake this in
+ * software for all flushes, not just when we've seen a preceding
+ * invalidation through frontbuffer rendering.
+ */
+ if (IS_HASWELL(dev) &&
+ (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
+ intel_psr_exit(dev);
+
+ if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
+ schedule_delayed_work(&dev_priv->psr.work,
+ msecs_to_jiffies(100));
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+/**
+ * intel_psr_init - Init basic PSR work and mutex.
+ * @dev: DRM device
+ *
+ * This function is called only once at driver load to initialize basic
+ * PSR stuff.
+ */
+void intel_psr_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
+ mutex_init(&dev_priv->psr.lock);
+}
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
index 6c792d3..5bd6985 100644
--- a/drivers/gpu/drm/i915/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -29,6 +29,7 @@
extern const struct intel_renderstate_rodata gen6_null_state;
extern const struct intel_renderstate_rodata gen7_null_state;
extern const struct intel_renderstate_rodata gen8_null_state;
+extern const struct intel_renderstate_rodata gen9_null_state;
#define RO_RENDERSTATE(_g) \
const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
index 75ef1b5d..78011d7 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
@@ -1,16 +1,134 @@
#include "intel_renderstate.h"
static const u32 gen8_null_state_relocs[] = {
- 0x00000048,
- 0x00000050,
- 0x00000060,
- 0x000003ec,
+ 0x00000798,
+ 0x000007a4,
+ 0x000007ac,
+ 0x000007bc,
-1,
};
static const u32 gen8_null_state_batch[] = {
+ 0x7a000004,
+ 0x01000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x69040000,
- 0x61020001,
+ 0x78140000,
+ 0x04000000,
+ 0x7820000a,
+ 0x00000000,
+ 0x00000000,
+ 0x80000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78130002,
+ 0x00000000,
+ 0x00000000,
+ 0x02001808,
+ 0x781f0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78510009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78100007,
+ 0x00000000,
+ 0x00000000,
+ 0x00010000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781b0007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000800,
+ 0x00000000,
+ 0x78110008,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781e0003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781d0007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78120002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78500003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781c0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x780c0000,
+ 0x00000000,
+ 0x78520003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78300000,
+ 0x08010040,
+ 0x78310000,
+ 0x1e000000,
+ 0x78320000,
+ 0x1e000000,
+ 0x78330000,
+ 0x1e000000,
+ 0x79190002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x791a0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x791b0002,
+ 0x00000000,
0x00000000,
0x00000000,
0x79120000,
@@ -23,48 +141,435 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x79160000,
0x00000000,
- 0x6101000e,
- 0x00000001,
+ 0x78150009,
0x00000000,
- 0x00000001,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78190009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781a0009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78160009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78170009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78490001,
+ 0x00000000,
+ 0x00000000,
+ 0x784a0000,
+ 0x00000000,
+ 0x784b0000,
+ 0x00000004,
+ 0x79170101,
+ 0x00000000,
+ 0x00000080,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x20000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x40000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x60000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x6101000e,
0x00000001, /* reloc */
0x00000000,
+ 0x00000000,
0x00000001, /* reloc */
0x00000000,
+ 0x00000001, /* reloc */
0x00000000,
+ 0x00000001,
0x00000000,
0x00000001, /* reloc */
0x00000000,
- 0xfffff001,
0x00001001,
- 0xfffff001,
0x00001001,
- 0x78230000,
- 0x000006e0,
- 0x78210000,
- 0x00000700,
- 0x78300000,
- 0x08010040,
- 0x78330000,
- 0x08000000,
- 0x78310000,
- 0x08000000,
- 0x78320000,
- 0x08000000,
- 0x78240000,
- 0x00000641,
- 0x780e0000,
- 0x00000601,
+ 0x00000001,
+ 0x00001001,
+ 0x61020001,
+ 0x00000000,
+ 0x00000000,
+ 0x79000002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78050006,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x40000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x80000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0xc0000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79080001,
+ 0x00000000,
+ 0x00000000,
+ 0x790a0001,
+ 0x00000000,
+ 0x00000000,
+ 0x78060003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78070003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78040001,
+ 0x00000000,
+ 0x00000000,
+ 0x79110000,
+ 0x00000000,
0x780d0000,
0x00000000,
- 0x78180000,
- 0x00000001,
- 0x78520003,
+ 0x79060000,
0x00000000,
+ 0x7907001f,
0x00000000,
0x00000000,
0x00000000,
- 0x78190009,
0x00000000,
0x00000000,
0x00000000,
@@ -75,7 +580,6 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x781b0007,
0x00000000,
0x00000000,
0x00000000,
@@ -84,26 +588,22 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x78270000,
0x00000000,
- 0x782c0000,
0x00000000,
- 0x781c0002,
0x00000000,
0x00000000,
0x00000000,
- 0x78160009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
+ 0x7902000f,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
- 0x78110008,
0x00000000,
0x00000000,
0x00000000,
@@ -113,12 +613,10 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x78290000,
0x00000000,
- 0x782e0000,
0x00000000,
- 0x781a0009,
0x00000000,
+ 0x790c000f,
0x00000000,
0x00000000,
0x00000000,
@@ -128,7 +626,6 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x781d0007,
0x00000000,
0x00000000,
0x00000000,
@@ -136,153 +633,153 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x780a0003,
0x00000000,
- 0x78280000,
0x00000000,
- 0x782d0000,
0x00000000,
- 0x78260000,
0x00000000,
- 0x782b0000,
+ 0x78080083,
+ 0x00004000,
0x00000000,
- 0x78150009,
0x00000000,
0x00000000,
+ 0x04004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x08004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x0c004000,
0x00000000,
0x00000000,
- 0x78100007,
0x00000000,
+ 0x10004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x14004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x18004000,
0x00000000,
- 0x781e0003,
0x00000000,
0x00000000,
+ 0x1c004000,
0x00000000,
0x00000000,
- 0x78120002,
0x00000000,
+ 0x20004000,
0x00000000,
0x00000000,
- 0x781f0002,
- 0x30400820,
0x00000000,
+ 0x24004000,
0x00000000,
- 0x78510009,
0x00000000,
0x00000000,
+ 0x28004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x2c004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x30004000,
0x00000000,
0x00000000,
- 0x78500003,
- 0x00210000,
0x00000000,
+ 0x34004000,
0x00000000,
0x00000000,
- 0x78130002,
0x00000000,
+ 0x38004000,
0x00000000,
0x00000000,
- 0x782a0000,
- 0x00000480,
- 0x782f0000,
- 0x00000540,
- 0x78140000,
- 0x00000800,
- 0x78170009,
0x00000000,
+ 0x3c004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x40004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x44004000,
0x00000000,
0x00000000,
0x00000000,
- 0x7820000a,
- 0x00000580,
+ 0x48004000,
0x00000000,
- 0x08080000,
0x00000000,
0x00000000,
- 0x1f000002,
- 0x00060000,
+ 0x4c004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x50004000,
0x00000000,
- 0x784d0000,
- 0x40000000,
- 0x784f0000,
- 0x80000100,
- 0x780f0000,
- 0x00000740,
- 0x78050006,
0x00000000,
0x00000000,
+ 0x54004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x58004000,
0x00000000,
0x00000000,
- 0x78070003,
0x00000000,
+ 0x5c004000,
0x00000000,
0x00000000,
0x00000000,
- 0x78060003,
+ 0x60004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x64004000,
0x00000000,
- 0x78040001,
0x00000000,
- 0x00000001,
- 0x79000002,
- 0xffffffff,
+ 0x00000000,
+ 0x68004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x6c004000,
+ 0x00000000,
0x00000000,
0x00000000,
- 0x78080003,
- 0x00006000,
- 0x000005e0, /* reloc */
+ 0x70004000,
0x00000000,
0x00000000,
- 0x78090005,
+ 0x00000000,
+ 0x74004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x7c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x80004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78090043,
0x02000000,
0x22220000,
- 0x02f60000,
- 0x11230000,
- 0x02850004,
- 0x11230000,
- 0x784b0000,
- 0x0000000f,
- 0x78490001,
0x00000000,
0x00000000,
- 0x7b000005,
0x00000000,
- 0x00000003,
0x00000000,
- 0x00000001,
0x00000000,
0x00000000,
- 0x05000000, /* cmds end */
0x00000000,
0x00000000,
0x00000000,
@@ -297,8 +794,6 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x000004c0, /* state start */
- 0x00000500,
0x00000000,
0x00000000,
0x00000000,
@@ -345,46 +840,65 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x680b0001,
+ 0x78260000,
+ 0x00000000,
+ 0x78270000,
+ 0x00000000,
+ 0x78280000,
+ 0x00000000,
+ 0x78290000,
+ 0x00000000,
+ 0x782a0000,
+ 0x00000000,
+ 0x780e0000,
+ 0x00000dc1,
+ 0x78240000,
+ 0x00000e01,
+ 0x784f0000,
+ 0x80000100,
+ 0x784d0000,
+ 0x40000000,
+ 0x782b0000,
+ 0x00000000,
+ 0x782c0000,
+ 0x00000000,
+ 0x782d0000,
0x00000000,
+ 0x782e0000,
0x00000000,
+ 0x782f0000,
0x00000000,
- 0x00000092,
+ 0x780f0000,
0x00000000,
+ 0x78230000,
+ 0x00000e60,
+ 0x78210000,
+ 0x00000e80,
+ 0x7b000005,
+ 0x00000004,
+ 0x00000001,
0x00000000,
+ 0x00000001,
0x00000000,
0x00000000,
+ 0x05000000, /* cmds end */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
+ 0x00000000, /* state start */
+ 0x00000000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
- 0x0060005a,
- 0x21403ae8,
- 0x3a0000c0,
- 0x008d0040,
- 0x0060005a,
- 0x21603ae8,
- 0x3a0000c0,
- 0x008d0080,
- 0x0060005a,
- 0x21803ae8,
- 0x3a0000d0,
- 0x008d0040,
- 0x0060005a,
- 0x21a03ae8,
- 0x3a0000d0,
- 0x008d0080,
- 0x02800031,
- 0x2e0022e8,
- 0x0e000140,
- 0x08840001,
- 0x05800031,
- 0x200022e0,
- 0x0e000e00,
- 0x90031000,
0x00000000,
0x00000000,
0x00000000,
@@ -410,38 +924,6 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
0x00000000,
0x00000000,
0x00000000,
@@ -449,8 +931,6 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0xf99a130c,
- 0x799a130c,
0x00000000,
0x00000000,
0x00000000,
@@ -466,9 +946,7 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x3f800000,
0x00000000,
- 0x3f800000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen9.c b/drivers/gpu/drm/i915/intel_renderstate_gen9.c
new file mode 100644
index 0000000..8750753
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen9.c
@@ -0,0 +1,974 @@
+#include "intel_renderstate.h"
+
+static const u32 gen9_null_state_relocs[] = {
+ 0x000007a8,
+ 0x000007b4,
+ 0x000007bc,
+ 0x000007cc,
+ -1,
+};
+
+static const u32 gen9_null_state_batch[] = {
+ 0x7a000004,
+ 0x01000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x69040300,
+ 0x78140000,
+ 0x04000000,
+ 0x7820000a,
+ 0x00000000,
+ 0x00000000,
+ 0x80000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78130002,
+ 0x00000000,
+ 0x00000000,
+ 0x02001808,
+ 0x781f0004,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78510009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78100007,
+ 0x00000000,
+ 0x00000000,
+ 0x00010000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781b0007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000800,
+ 0x00000000,
+ 0x78110008,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781e0003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781d0009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78120002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78500003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781c0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x780c0000,
+ 0x00000000,
+ 0x78520003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78300000,
+ 0x08010040,
+ 0x78310000,
+ 0x1e000000,
+ 0x78320000,
+ 0x1e000000,
+ 0x78330000,
+ 0x1e000000,
+ 0x79190002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x791a0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x791b0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79120000,
+ 0x00000000,
+ 0x79130000,
+ 0x00000000,
+ 0x79140000,
+ 0x00000000,
+ 0x79150000,
+ 0x00000000,
+ 0x79160000,
+ 0x00000000,
+ 0x78150009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78190009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781a0009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78160009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78170009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78490001,
+ 0x00000000,
+ 0x00000000,
+ 0x784a0000,
+ 0x00000000,
+ 0x784b0000,
+ 0x00000004,
+ 0x79170101,
+ 0x00000000,
+ 0x00000080,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x20000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x40000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x60000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x61010011,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00001001,
+ 0x00001001,
+ 0x00000001,
+ 0x00001001,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x61020001,
+ 0x00000000,
+ 0x00000000,
+ 0x79000002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78050006,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x40000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x80000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0xc0000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79080001,
+ 0x00000000,
+ 0x00000000,
+ 0x790a0001,
+ 0x00000000,
+ 0x00000000,
+ 0x78060003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78070003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78040001,
+ 0x00000000,
+ 0x00000000,
+ 0x79110000,
+ 0x00000000,
+ 0x780d0000,
+ 0x00000000,
+ 0x79060000,
+ 0x00000000,
+ 0x7907001f,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x7902000f,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x790c000f,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x780a0003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78080083,
+ 0x00004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x04004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x08004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x10004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x14004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x18004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x1c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x20004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x24004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x28004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x2c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x30004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x34004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x38004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x3c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x40004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x44004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x48004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x4c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x50004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x54004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x58004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x5c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x60004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x64004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x68004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x6c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x70004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x74004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x7c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x80004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78090043,
+ 0x02000000,
+ 0x22220000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78550003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x680b0001,
+ 0x780e0000,
+ 0x00000e01,
+ 0x78240000,
+ 0x00000e41,
+ 0x784f0000,
+ 0x80000100,
+ 0x784d0000,
+ 0x40000000,
+ 0x782b0000,
+ 0x00000000,
+ 0x782c0000,
+ 0x00000000,
+ 0x782d0000,
+ 0x00000000,
+ 0x782e0000,
+ 0x00000000,
+ 0x782f0000,
+ 0x00000000,
+ 0x780f0000,
+ 0x00000000,
+ 0x78230000,
+ 0x00000ea0,
+ 0x78210000,
+ 0x00000ec0,
+ 0x78260000,
+ 0x00000000,
+ 0x78270000,
+ 0x00000000,
+ 0x78280000,
+ 0x00000000,
+ 0x78290000,
+ 0x00000000,
+ 0x782a0000,
+ 0x00000000,
+ 0x7b000005,
+ 0x00000004,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000000,
+ 0x05000000, /* cmds end */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* state start */
+ 0x00000000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* state end */
+};
+
+RO_RENDERSTATE(9);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 0a80e41..9f445e9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -589,14 +589,10 @@ static int init_ring_common(struct intel_engine_cs *ring)
goto out;
}
- if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
- i915_kernel_lost_context(ring->dev);
- else {
- ringbuf->head = I915_READ_HEAD(ring);
- ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ringbuf->space = intel_ring_space(ringbuf);
- ringbuf->last_retired_head = -1;
- }
+ ringbuf->head = I915_READ_HEAD(ring);
+ ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ ringbuf->space = intel_ring_space(ringbuf);
+ ringbuf->last_retired_head = -1;
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
@@ -665,76 +661,112 @@ err:
return ret;
}
-static inline void intel_ring_emit_wa(struct intel_engine_cs *ring,
- u32 addr, u32 value)
+static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
{
+ int ret, i;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_workarounds *w = &dev_priv->workarounds;
- if (WARN_ON(dev_priv->num_wa_regs >= I915_MAX_WA_REGS))
- return;
+ if (WARN_ON(w->count == 0))
+ return 0;
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, addr);
- intel_ring_emit(ring, value);
+ ring->gpu_caches_dirty = true;
+ ret = intel_ring_flush_all_caches(ring);
+ if (ret)
+ return ret;
- dev_priv->intel_wa_regs[dev_priv->num_wa_regs].addr = addr;
- dev_priv->intel_wa_regs[dev_priv->num_wa_regs].mask = value & 0xFFFF;
- /* value is updated with the status of remaining bits of this
- * register when it is read from debugfs file
- */
- dev_priv->intel_wa_regs[dev_priv->num_wa_regs].value = value;
- dev_priv->num_wa_regs++;
+ ret = intel_ring_begin(ring, (w->count * 2 + 2));
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+ for (i = 0; i < w->count; i++) {
+ intel_ring_emit(ring, w->reg[i].addr);
+ intel_ring_emit(ring, w->reg[i].value);
+ }
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_advance(ring);
- return;
+ ring->gpu_caches_dirty = true;
+ ret = intel_ring_flush_all_caches(ring);
+ if (ret)
+ return ret;
+
+ DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
+
+ return 0;
}
+static int wa_add(struct drm_i915_private *dev_priv,
+ const u32 addr, const u32 mask, const u32 val)
+{
+ const u32 idx = dev_priv->workarounds.count;
+
+ if (WARN_ON(idx >= I915_MAX_WA_REGS))
+ return -ENOSPC;
+
+ dev_priv->workarounds.reg[idx].addr = addr;
+ dev_priv->workarounds.reg[idx].value = val;
+ dev_priv->workarounds.reg[idx].mask = mask;
+
+ dev_priv->workarounds.count++;
+
+ return 0;
+}
+
+#define WA_REG(addr, mask, val) { \
+ const int r = wa_add(dev_priv, (addr), (mask), (val)); \
+ if (r) \
+ return r; \
+ }
+
+#define WA_SET_BIT_MASKED(addr, mask) \
+ WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
+
+#define WA_CLR_BIT_MASKED(addr, mask) \
+ WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
+
+#define WA_SET_FIELD_MASKED(addr, mask, value) \
+ WA_REG(addr, mask, _MASKED_FIELD(mask, value))
+
+#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
+#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
+
+#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
+
static int bdw_init_workarounds(struct intel_engine_cs *ring)
{
- int ret;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- /*
- * workarounds applied in this fn are part of register state context,
- * they need to be re-initialized followed by gpu reset, suspend/resume,
- * module reload.
- */
- dev_priv->num_wa_regs = 0;
- memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
-
- /*
- * update the number of dwords required based on the
- * actual number of workarounds applied
- */
- ret = intel_ring_begin(ring, 18);
- if (ret)
- return ret;
-
/* WaDisablePartialInstShootdown:bdw */
- /* WaDisableThreadStallDopClockGating:bdw */
- /* FIXME: Unclear whether we really need this on production bdw. */
- intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
- _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE
- | STALL_DOP_GATING_DISABLE));
+ /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
+ STALL_DOP_GATING_DISABLE);
- /* WaDisableDopClockGating:bdw May not be needed for production */
- intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ /* WaDisableDopClockGating:bdw */
+ WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
+ DOP_CLOCK_GATING_DISABLE);
- intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
- _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+ GEN8_SAMPLER_POWER_BYPASS_DIS);
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
- intel_ring_emit_wa(ring, HDC_CHICKEN0,
- _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
+ /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT |
+ (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
/* Wa4x4STCOptimizationDisable:bdw */
- intel_ring_emit_wa(ring, CACHE_MODE_1,
- _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
+ WA_SET_BIT_MASKED(CACHE_MODE_1,
+ GEN8_4x4_STC_OPTIMIZATION_DISABLE);
/*
* BSpec recommends 8x4 when MSAA is used,
@@ -744,52 +776,51 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
- intel_ring_emit_wa(ring, GEN7_GT_MODE,
- GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
-
- intel_ring_advance(ring);
-
- DRM_DEBUG_DRIVER("Number of Workarounds applied: %d\n",
- dev_priv->num_wa_regs);
+ WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ GEN6_WIZ_HASHING_MASK,
+ GEN6_WIZ_HASHING_16x4);
return 0;
}
static int chv_init_workarounds(struct intel_engine_cs *ring)
{
- int ret;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- /*
- * workarounds applied in this fn are part of register state context,
- * they need to be re-initialized followed by gpu reset, suspend/resume,
- * module reload.
+ /* WaDisablePartialInstShootdown:chv */
+ /* WaDisableThreadStallDopClockGating:chv */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
+ STALL_DOP_GATING_DISABLE);
+
+ /* Use Force Non-Coherent whenever executing a 3D context. This is a
+ * workaround for a possible hang in the unlikely event a TLB
+ * invalidation occurs during a PSD flush.
*/
- dev_priv->num_wa_regs = 0;
- memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
+ /* WaForceEnableNonCoherent:chv */
+ /* WaHdcDisableFetchWhenMasked:chv */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT |
+ HDC_DONOT_FETCH_MEM_WHEN_MASKED);
- ret = intel_ring_begin(ring, 12);
- if (ret)
- return ret;
+ return 0;
+}
- /* WaDisablePartialInstShootdown:chv */
- intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
- _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
+int init_workarounds_ring(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- /* WaDisableThreadStallDopClockGating:chv */
- intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
- _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
+ WARN_ON(ring->id != RCS);
- /* WaDisableDopClockGating:chv (pre-production hw) */
- intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ dev_priv->workarounds.count = 0;
- /* WaDisableSamplerPowerBypass:chv (pre-production hw) */
- intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
- _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
+ if (IS_BROADWELL(dev))
+ return bdw_init_workarounds(ring);
- intel_ring_advance(ring);
+ if (IS_CHERRYVIEW(dev))
+ return chv_init_workarounds(ring);
return 0;
}
@@ -812,7 +843,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
*/
- if (INTEL_INFO(dev)->gen >= 6)
+ if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
/* Required for the hardware to program scanline values for waiting */
@@ -849,7 +880,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
if (HAS_L3_DPF(dev))
I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
- return ret;
+ return init_workarounds_ring(ring);
}
static void render_ring_cleanup(struct intel_engine_cs *ring)
@@ -1186,7 +1217,7 @@ gen5_ring_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1217,7 +1248,7 @@ i9xx_ring_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
+ if (!intel_irqs_enabled(dev_priv))
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1254,7 +1285,7 @@ i8xx_ring_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
+ if (!intel_irqs_enabled(dev_priv))
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1388,8 +1419,8 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
- return false;
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
@@ -1431,7 +1462,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1451,9 +1482,6 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
- return;
-
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
I915_WRITE_IMR(ring, ~0);
@@ -1469,7 +1497,7 @@ gen8_ring_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1694,13 +1722,42 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
return 0;
}
-void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
{
- if (!ringbuf->obj)
- return;
-
iounmap(ringbuf->virtual_start);
+ ringbuf->virtual_start = NULL;
i915_gem_object_ggtt_unpin(ringbuf->obj);
+}
+
+int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+ struct intel_ringbuffer *ringbuf)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_gem_object *obj = ringbuf->obj;
+ int ret;
+
+ ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret) {
+ i915_gem_object_ggtt_unpin(obj);
+ return ret;
+ }
+
+ ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
+ i915_gem_obj_ggtt_offset(obj), ringbuf->size);
+ if (ringbuf->virtual_start == NULL) {
+ i915_gem_object_ggtt_unpin(obj);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+{
drm_gem_object_unreference(&ringbuf->obj->base);
ringbuf->obj = NULL;
}
@@ -1708,12 +1765,7 @@ void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
- int ret;
-
- if (ringbuf->obj)
- return 0;
obj = NULL;
if (!HAS_LLC(dev))
@@ -1726,30 +1778,9 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
/* mark ring buffers as read-only from GPU side by default */
obj->gt_ro = 1;
- ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
- if (ret)
- goto err_unref;
-
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- goto err_unpin;
-
- ringbuf->virtual_start =
- ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
- ringbuf->size);
- if (ringbuf->virtual_start == NULL) {
- ret = -EINVAL;
- goto err_unpin;
- }
-
ringbuf->obj = obj;
- return 0;
-err_unpin:
- i915_gem_object_ggtt_unpin(obj);
-err_unref:
- drm_gem_object_unreference(&obj->base);
- return ret;
+ return 0;
}
static int intel_init_ring_buffer(struct drm_device *dev,
@@ -1786,10 +1817,21 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto error;
}
- ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
- if (ret) {
- DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
- goto error;
+ if (ringbuf->obj == NULL) {
+ ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
+ ring->name, ret);
+ goto error;
+ }
+
+ ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
+ ring->name, ret);
+ intel_destroy_ringbuffer_obj(ringbuf);
+ goto error;
+ }
}
/* Workaround an erratum on the i830 which causes a hang if
@@ -1818,15 +1860,19 @@ error:
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
{
- struct drm_i915_private *dev_priv = to_i915(ring->dev);
- struct intel_ringbuffer *ringbuf = ring->buffer;
+ struct drm_i915_private *dev_priv;
+ struct intel_ringbuffer *ringbuf;
if (!intel_ring_initialized(ring))
return;
+ dev_priv = to_i915(ring->dev);
+ ringbuf = ring->buffer;
+
intel_stop_ring_buffer(ring);
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
+ intel_unpin_ringbuffer_obj(ringbuf);
intel_destroy_ringbuffer_obj(ringbuf);
ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0;
@@ -1912,13 +1958,6 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
break;
}
- if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
- dev->primary->master) {
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
- }
-
msleep(1);
if (dev_priv->mm.interruptible && signal_pending(current)) {
@@ -2229,6 +2268,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
u32 invalidate, u32 flush)
{
struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t cmd;
int ret;
@@ -2259,8 +2299,12 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
}
intel_ring_advance(ring);
- if (IS_GEN7(dev) && !invalidate && flush)
- return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
+ if (!invalidate && flush) {
+ if (IS_GEN7(dev))
+ return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
+ else if (IS_BROADWELL(dev))
+ dev_priv->fbc.need_sw_cache_clean = true;
+ }
return 0;
}
@@ -2293,10 +2337,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
dev_priv->semaphore_obj = obj;
}
}
- if (IS_CHERRYVIEW(dev))
- ring->init_context = chv_init_workarounds;
- else
- ring->init_context = bdw_init_workarounds;
+
+ ring->init_context = intel_ring_workarounds_emit;
ring->add_request = gen6_add_request;
ring->flush = gen8_render_ring_flush;
ring->irq_get = gen8_ring_get_irq;
@@ -2406,91 +2448,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return intel_init_ring_buffer(dev, ring);
}
-int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
- struct intel_ringbuffer *ringbuf = ring->buffer;
- int ret;
-
- if (ringbuf == NULL) {
- ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
- if (!ringbuf)
- return -ENOMEM;
- ring->buffer = ringbuf;
- }
-
- ring->name = "render ring";
- ring->id = RCS;
- ring->mmio_base = RENDER_RING_BASE;
-
- if (INTEL_INFO(dev)->gen >= 6) {
- /* non-kms not supported on gen6+ */
- ret = -ENODEV;
- goto err_ringbuf;
- }
-
- /* Note: gem is not supported on gen5/ilk without kms (the corresponding
- * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
- * the special gen5 functions. */
- ring->add_request = i9xx_add_request;
- if (INTEL_INFO(dev)->gen < 4)
- ring->flush = gen2_render_ring_flush;
- else
- ring->flush = gen4_render_ring_flush;
- ring->get_seqno = ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
- if (IS_GEN2(dev)) {
- ring->irq_get = i8xx_ring_get_irq;
- ring->irq_put = i8xx_ring_put_irq;
- } else {
- ring->irq_get = i9xx_ring_get_irq;
- ring->irq_put = i9xx_ring_put_irq;
- }
- ring->irq_enable_mask = I915_USER_INTERRUPT;
- ring->write_tail = ring_write_tail;
- if (INTEL_INFO(dev)->gen >= 4)
- ring->dispatch_execbuffer = i965_dispatch_execbuffer;
- else if (IS_I830(dev) || IS_845G(dev))
- ring->dispatch_execbuffer = i830_dispatch_execbuffer;
- else
- ring->dispatch_execbuffer = i915_dispatch_execbuffer;
- ring->init = init_render_ring;
- ring->cleanup = render_ring_cleanup;
-
- ring->dev = dev;
- INIT_LIST_HEAD(&ring->active_list);
- INIT_LIST_HEAD(&ring->request_list);
-
- ringbuf->size = size;
- ringbuf->effective_size = ringbuf->size;
- if (IS_I830(ring->dev) || IS_845G(ring->dev))
- ringbuf->effective_size -= 2 * CACHELINE_BYTES;
-
- ringbuf->virtual_start = ioremap_wc(start, size);
- if (ringbuf->virtual_start == NULL) {
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- ret = -ENOMEM;
- goto err_ringbuf;
- }
-
- if (!I915_NEED_GFX_HWS(dev)) {
- ret = init_phys_status_page(ring);
- if (ret)
- goto err_vstart;
- }
-
- return 0;
-
-err_vstart:
- iounmap(ringbuf->virtual_start);
-err_ringbuf:
- kfree(ringbuf);
- ring->buffer = NULL;
- return ret;
-}
-
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 96479c8..fe426cf 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -148,7 +148,8 @@ struct intel_engine_cs {
int (*init)(struct intel_engine_cs *ring);
- int (*init_context)(struct intel_engine_cs *ring);
+ int (*init_context)(struct intel_engine_cs *ring,
+ struct intel_context *ctx);
void (*write_tail)(struct intel_engine_cs *ring,
u32 value);
@@ -235,6 +236,7 @@ struct intel_engine_cs {
/* Execlists */
spinlock_t execlist_lock;
struct list_head execlist_queue;
+ struct list_head execlist_retired_req_list;
u8 next_context_status_buffer;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
int (*emit_request)(struct intel_ringbuffer *ringbuf);
@@ -381,6 +383,9 @@ intel_write_status_page(struct intel_engine_cs *ring,
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
+void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
+int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+ struct intel_ringbuffer *ringbuf);
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf);
@@ -424,6 +429,8 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev);
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
void intel_ring_setup_status_page(struct intel_engine_cs *ring);
+int init_workarounds_ring(struct intel_engine_cs *ring);
+
static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
{
return ringbuf->tail;
@@ -441,7 +448,4 @@ static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
ring->trace_irq_seqno = seqno;
}
-/* DRI warts */
-int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
-
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
new file mode 100644
index 0000000..f5a78d5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -0,0 +1,1406 @@
+/*
+ * Copyright © 2012-2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eugeni Dodonov <eugeni.dodonov@intel.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/vgaarb.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include <drm/i915_powerwell.h>
+
+/**
+ * DOC: runtime pm
+ *
+ * The i915 driver supports dynamic enabling and disabling of entire hardware
+ * blocks at runtime. This is especially important on the display side where
+ * software is supposed to control many power gates manually on recent hardware,
+ * since on the GT side a lot of the power management is done by the hardware.
+ * But even there some manual control at the device level is required.
+ *
+ * Since i915 supports a diverse set of platforms with a unified codebase and
+ * hardware engineers just love to shuffle functionality around between power
+ * domains there's a sizeable amount of indirection required. This file provides
+ * generic functions to the driver for grabbing and releasing references for
+ * abstract power domains. It then maps those to the actual power wells
+ * present for a given platform.
+ */
+
+static struct i915_power_domains *hsw_pwr;
+
+#define for_each_power_well(i, power_well, domain_mask, power_domains) \
+ for (i = 0; \
+ i < (power_domains)->power_well_count && \
+ ((power_well) = &(power_domains)->power_wells[i]); \
+ i++) \
+ if ((power_well)->domains & (domain_mask))
+
+#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
+ for (i = (power_domains)->power_well_count - 1; \
+ i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
+ i--) \
+ if ((power_well)->domains & (domain_mask))
+
+/*
+ * We should only use the power well if we explicitly asked the hardware to
+ * enable it, so check if it's enabled and also check if we've requested it to
+ * be enabled.
+ */
+static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return I915_READ(HSW_PWR_WELL_DRIVER) ==
+ (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
+}
+
+/**
+ * __intel_display_power_is_enabled - unlocked check for a power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to check
+ *
+ * This is the unlocked version of intel_display_power_is_enabled() and should
+ * only be used from error capture and recovery code where deadlocks are
+ * possible.
+ *
+ * Returns:
+ * True when the power domain is enabled, false otherwise.
+ */
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ struct i915_power_well *power_well;
+ bool is_enabled;
+ int i;
+
+ if (dev_priv->pm.suspended)
+ return false;
+
+ power_domains = &dev_priv->power_domains;
+
+ is_enabled = true;
+
+ for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+ if (power_well->always_on)
+ continue;
+
+ if (!power_well->hw_enabled) {
+ is_enabled = false;
+ break;
+ }
+ }
+
+ return is_enabled;
+}
+
+/**
+ * intel_display_power_is_enabled - unlocked check for a power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to check
+ *
+ * This function can be used to check the hw power domain state. It is mostly
+ * used in hardware state readout functions. Everywhere else code should rely
+ * upon explicit power domain reference counting to ensure that the hardware
+ * block is powered up before accessing it.
+ *
+ * Callers must hold the relevant modesetting locks to ensure that concurrent
+ * threads can't disable the power well while the caller tries to read a few
+ * registers.
+ *
+ * Returns:
+ * True when the power domain is enabled, false otherwise.
+ */
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ bool ret;
+
+ power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+ ret = __intel_display_power_is_enabled(dev_priv, domain);
+ mutex_unlock(&power_domains->lock);
+
+ return ret;
+}
+
+/**
+ * intel_display_set_init_power - set the initial power domain state
+ * @dev_priv: i915 device instance
+ * @enable: whether to enable or disable the initial power domain state
+ *
+ * For simplicity our driver load/unload and system suspend/resume code assumes
+ * that all power domains are always enabled. This functions controls the state
+ * of this little hack. While the initial power domain state is enabled runtime
+ * pm is effectively disabled.
+ */
+void intel_display_set_init_power(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ if (dev_priv->power_domains.init_power_on == enable)
+ return;
+
+ if (enable)
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ else
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+ dev_priv->power_domains.init_power_on = enable;
+}
+
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
+static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ /*
+ * After we re-enable the power well, if we touch VGA register 0x3d5
+ * we'll get unclaimed register interrupts. This stops after we write
+ * anything to the VGA MSR register. The vgacon module uses this
+ * register all the time, so if we unbind our driver and, as a
+ * consequence, bind vgacon, we'll get stuck in an infinite loop at
+ * console_unlock(). So make here we touch the VGA MSR register, making
+ * sure vgacon can keep working normally without triggering interrupts
+ * and error messages.
+ */
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+
+ if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
+ gen8_irq_power_well_post_enable(dev_priv);
+}
+
+static void hsw_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
+{
+ bool is_enabled, enable_requested;
+ uint32_t tmp;
+
+ tmp = I915_READ(HSW_PWR_WELL_DRIVER);
+ is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
+ enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
+
+ if (enable) {
+ if (!enable_requested)
+ I915_WRITE(HSW_PWR_WELL_DRIVER,
+ HSW_PWR_WELL_ENABLE_REQUEST);
+
+ if (!is_enabled) {
+ DRM_DEBUG_KMS("Enabling power well\n");
+ if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
+ HSW_PWR_WELL_STATE_ENABLED), 20))
+ DRM_ERROR("Timeout enabling power well\n");
+ hsw_power_well_post_enable(dev_priv);
+ }
+
+ } else {
+ if (enable_requested) {
+ I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
+ POSTING_READ(HSW_PWR_WELL_DRIVER);
+ DRM_DEBUG_KMS("Requesting to disable the power well\n");
+ }
+ }
+}
+
+static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
+
+ /*
+ * We're taking over the BIOS, so clear any requests made by it since
+ * the driver is in charge now.
+ */
+ if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
+ I915_WRITE(HSW_PWR_WELL_BIOS, 0);
+}
+
+static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ hsw_set_power_well(dev_priv, power_well, true);
+}
+
+static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ hsw_set_power_well(dev_priv, power_well, false);
+}
+
+static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+}
+
+static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return true;
+}
+
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
+{
+ enum punit_power_well power_well_id = power_well->data;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+
+ mask = PUNIT_PWRGT_MASK(power_well_id);
+ state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
+ PUNIT_PWRGT_PWR_GATE(power_well_id);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+#define COND \
+ ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+
+ if (COND)
+ goto out;
+
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+ ctrl &= ~mask;
+ ctrl |= state;
+ vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+
+ if (wait_for(COND, 100))
+ DRM_ERROR("timout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+
+#undef COND
+
+out:
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
+}
+
+static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, true);
+}
+
+static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int power_well_id = power_well->data;
+ bool enabled = false;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+
+ mask = PUNIT_PWRGT_MASK(power_well_id);
+ ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+ /*
+ * We only ever set the power-on and power-gate states, anything
+ * else is unexpected.
+ */
+ WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
+ state != PUNIT_PWRGT_PWR_GATE(power_well_id));
+ if (state == ctrl)
+ enabled = true;
+
+ /*
+ * A transient state at this point would mean some unexpected party
+ * is poking at the power controls too.
+ */
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+ WARN_ON(ctrl != state);
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ return enabled;
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_enable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /*
+ * During driver initialization/resume we can avoid restoring the
+ * part of the HW/SW state that will be inited anyway explicitly.
+ */
+ if (dev_priv->power_domains.initializing)
+ return;
+
+ intel_hpd_init(dev_priv);
+
+ i915_redisable_vga_power_on(dev_priv->dev);
+}
+
+static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_disable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+
+ vlv_power_sequencer_reset(dev_priv);
+}
+
+static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+ /*
+ * Enable the CRI clock source so we can get at the
+ * display and the reference clock for VGA
+ * hotplug / manual detection.
+ */
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /*
+ * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+ * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
+ * a. GUnit 0x2110 bit[0] set to 1 (def 0)
+ * b. The other bits such as sfr settings / modesel may all
+ * be set to 0.
+ *
+ * This should only be done on init and resume from S3 with
+ * both PLLs disabled, or we risk losing DPIO and PLL
+ * synchronization.
+ */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
+static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum pipe pipe;
+
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+ for_each_pipe(dev_priv, pipe)
+ assert_pll_disabled(dev_priv, pipe);
+
+ /* Assert common reset */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum dpio_phy phy;
+
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+ power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+
+ /*
+ * Enable the CRI clock source so we can get at the
+ * display and the reference clock for VGA
+ * hotplug / manual detection.
+ */
+ if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ phy = DPIO_PHY0;
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_REFA_CLK_ENABLE_VLV);
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ } else {
+ phy = DPIO_PHY1;
+ I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
+ DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ }
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /* Poll for phypwrgood signal */
+ if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
+ DRM_ERROR("Display PHY %d is not power up\n", phy);
+
+ I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
+ PHY_COM_LANE_RESET_DEASSERT(phy));
+}
+
+static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum dpio_phy phy;
+
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+ power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+
+ if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ phy = DPIO_PHY0;
+ assert_pll_disabled(dev_priv, PIPE_A);
+ assert_pll_disabled(dev_priv, PIPE_B);
+ } else {
+ phy = DPIO_PHY1;
+ assert_pll_disabled(dev_priv, PIPE_C);
+ }
+
+ I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
+ ~PHY_COM_LANE_RESET_DEASSERT(phy));
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum pipe pipe = power_well->data;
+ bool enabled;
+ u32 state, ctrl;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
+ /*
+ * We only ever set the power-on and power-gate states, anything
+ * else is unexpected.
+ */
+ WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
+ enabled = state == DP_SSS_PWR_ON(pipe);
+
+ /*
+ * A transient state at this point would mean some unexpected party
+ * is poking at the power controls too.
+ */
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
+ WARN_ON(ctrl << 16 != state);
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ return enabled;
+}
+
+static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well,
+ bool enable)
+{
+ enum pipe pipe = power_well->data;
+ u32 state;
+ u32 ctrl;
+
+ state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+#define COND \
+ ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
+
+ if (COND)
+ goto out;
+
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ ctrl &= ~DP_SSC_MASK(pipe);
+ ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
+
+ if (wait_for(COND, 100))
+ DRM_ERROR("timout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
+
+#undef COND
+
+out:
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
+}
+
+static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PIPE_A &&
+ power_well->data != PIPE_B &&
+ power_well->data != PIPE_C);
+
+ chv_set_pipe_power_well(dev_priv, power_well, true);
+
+ if (power_well->data == PIPE_A) {
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_enable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /*
+ * During driver initialization/resume we can avoid restoring the
+ * part of the HW/SW state that will be inited anyway explicitly.
+ */
+ if (dev_priv->power_domains.initializing)
+ return;
+
+ intel_hpd_init(dev_priv);
+
+ i915_redisable_vga_power_on(dev_priv->dev);
+ }
+}
+
+static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PIPE_A &&
+ power_well->data != PIPE_B &&
+ power_well->data != PIPE_C);
+
+ if (power_well->data == PIPE_A) {
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_disable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+ }
+
+ chv_set_pipe_power_well(dev_priv, power_well, false);
+
+ if (power_well->data == PIPE_A)
+ vlv_power_sequencer_reset(dev_priv);
+}
+
+static void check_power_well_state(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
+
+ if (power_well->always_on || !i915.disable_power_well) {
+ if (!enabled)
+ goto mismatch;
+
+ return;
+ }
+
+ if (enabled != (power_well->count > 0))
+ goto mismatch;
+
+ return;
+
+mismatch:
+ WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
+ power_well->name, power_well->always_on, enabled,
+ power_well->count, i915.disable_power_well);
+}
+
+/**
+ * intel_display_power_get - grab a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function grabs a power domain reference for @domain and ensures that the
+ * power domain and all its parents are powered up. Therefore users should only
+ * grab a reference to the innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_display_power_put() to release the reference again.
+ */
+void intel_display_power_get(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ intel_runtime_pm_get(dev_priv);
+
+ power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+
+ for_each_power_well(i, power_well, BIT(domain), power_domains) {
+ if (!power_well->count++) {
+ DRM_DEBUG_KMS("enabling %s\n", power_well->name);
+ power_well->ops->enable(dev_priv, power_well);
+ power_well->hw_enabled = true;
+ }
+
+ check_power_well_state(dev_priv, power_well);
+ }
+
+ power_domains->domain_use_count[domain]++;
+
+ mutex_unlock(&power_domains->lock);
+}
+
+/**
+ * intel_display_power_put - release a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ */
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+
+ WARN_ON(!power_domains->domain_use_count[domain]);
+ power_domains->domain_use_count[domain]--;
+
+ for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+ WARN_ON(!power_well->count);
+
+ if (!--power_well->count && i915.disable_power_well) {
+ DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+ power_well->hw_enabled = false;
+ power_well->ops->disable(dev_priv, power_well);
+ }
+
+ check_power_well_state(dev_priv, power_well);
+ }
+
+ mutex_unlock(&power_domains->lock);
+
+ intel_runtime_pm_put(dev_priv);
+}
+
+#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
+
+#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_PLLS) | \
+ BIT(POWER_DOMAIN_INIT))
+#define HSW_DISPLAY_POWER_DOMAINS ( \
+ (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
+ HSW_ALWAYS_ON_POWER_DOMAINS | \
+ BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
+#define BDW_DISPLAY_POWER_DOMAINS ( \
+ (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
+#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
+
+#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_PIPE_A_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_PIPE_B_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_B) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_PIPE_C_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_C) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
+ .sync_hw = i9xx_always_on_power_well_noop,
+ .enable = i9xx_always_on_power_well_noop,
+ .disable = i9xx_always_on_power_well_noop,
+ .is_enabled = i9xx_always_on_power_well_enabled,
+};
+
+static const struct i915_power_well_ops chv_pipe_power_well_ops = {
+ .sync_hw = chv_pipe_power_well_sync_hw,
+ .enable = chv_pipe_power_well_enable,
+ .disable = chv_pipe_power_well_disable,
+ .is_enabled = chv_pipe_power_well_enabled,
+};
+
+static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = chv_dpio_cmn_power_well_enable,
+ .disable = chv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static struct i915_power_well i9xx_always_on_power_well[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_ops hsw_power_well_ops = {
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = hsw_power_well_enable,
+ .disable = hsw_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+static struct i915_power_well hsw_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+ {
+ .name = "display",
+ .domains = HSW_DISPLAY_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ },
+};
+
+static struct i915_power_well bdw_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+ {
+ .name = "display",
+ .domains = BDW_DISPLAY_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_ops vlv_display_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = vlv_display_power_well_enable,
+ .disable = vlv_display_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = vlv_dpio_cmn_power_well_enable,
+ .disable = vlv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = vlv_power_well_enable,
+ .disable = vlv_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static struct i915_power_well vlv_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+ {
+ .name = "display",
+ .domains = VLV_DISPLAY_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DISP2D,
+ .ops = &vlv_display_power_well_ops,
+ },
+ {
+ .name = "dpio-tx-b-01",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+ },
+ {
+ .name = "dpio-tx-b-23",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+ },
+ {
+ .name = "dpio-tx-c-01",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+ },
+ {
+ .name = "dpio-tx-c-23",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
+ },
+ {
+ .name = "dpio-common",
+ .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+ .ops = &vlv_dpio_cmn_power_well_ops,
+ },
+};
+
+static struct i915_power_well chv_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+#if 0
+ {
+ .name = "display",
+ .domains = VLV_DISPLAY_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DISP2D,
+ .ops = &vlv_display_power_well_ops,
+ },
+#endif
+ {
+ .name = "pipe-a",
+ /*
+ * FIXME: pipe A power well seems to be the new disp2d well.
+ * At least all registers seem to be housed there. Figure
+ * out if this a a temporary situation in pre-production
+ * hardware or a permanent state of affairs.
+ */
+ .domains = CHV_PIPE_A_POWER_DOMAINS | VLV_DISPLAY_POWER_DOMAINS,
+ .data = PIPE_A,
+ .ops = &chv_pipe_power_well_ops,
+ },
+#if 0
+ {
+ .name = "pipe-b",
+ .domains = CHV_PIPE_B_POWER_DOMAINS,
+ .data = PIPE_B,
+ .ops = &chv_pipe_power_well_ops,
+ },
+ {
+ .name = "pipe-c",
+ .domains = CHV_PIPE_C_POWER_DOMAINS,
+ .data = PIPE_C,
+ .ops = &chv_pipe_power_well_ops,
+ },
+#endif
+ {
+ .name = "dpio-common-bc",
+ /*
+ * XXX: cmnreset for one PHY seems to disturb the other.
+ * As a workaround keep both powered on at the same
+ * time for now.
+ */
+ .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+ .ops = &chv_dpio_cmn_power_well_ops,
+ },
+ {
+ .name = "dpio-common-d",
+ /*
+ * XXX: cmnreset for one PHY seems to disturb the other.
+ * As a workaround keep both powered on at the same
+ * time for now.
+ */
+ .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DPIO_CMN_D,
+ .ops = &chv_dpio_cmn_power_well_ops,
+ },
+#if 0
+ {
+ .name = "dpio-tx-b-01",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+ },
+ {
+ .name = "dpio-tx-b-23",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+ },
+ {
+ .name = "dpio-tx-c-01",
+ .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+ },
+ {
+ .name = "dpio-tx-c-23",
+ .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
+ },
+ {
+ .name = "dpio-tx-d-01",
+ .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
+ CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
+ },
+ {
+ .name = "dpio-tx-d-23",
+ .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
+ CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
+ },
+#endif
+};
+
+static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
+ enum punit_power_well power_well_id)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+ if (power_well->data == power_well_id)
+ return power_well;
+ }
+
+ return NULL;
+}
+
+#define set_power_wells(power_domains, __power_wells) ({ \
+ (power_domains)->power_wells = (__power_wells); \
+ (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
+})
+
+/**
+ * intel_power_domains_init - initializes the power domain structures
+ * @dev_priv: i915 device instance
+ *
+ * Initializes the power domain structures for @dev_priv depending upon the
+ * supported platform.
+ */
+int intel_power_domains_init(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+ mutex_init(&power_domains->lock);
+
+ /*
+ * The enabling order will be from lower to higher indexed wells,
+ * the disabling order is reversed.
+ */
+ if (IS_HASWELL(dev_priv->dev)) {
+ set_power_wells(power_domains, hsw_power_wells);
+ hsw_pwr = power_domains;
+ } else if (IS_BROADWELL(dev_priv->dev)) {
+ set_power_wells(power_domains, bdw_power_wells);
+ hsw_pwr = power_domains;
+ } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+ set_power_wells(power_domains, chv_power_wells);
+ } else if (IS_VALLEYVIEW(dev_priv->dev)) {
+ set_power_wells(power_domains, vlv_power_wells);
+ } else {
+ set_power_wells(power_domains, i9xx_always_on_power_well);
+ }
+
+ return 0;
+}
+
+static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ if (!intel_enable_rc6(dev))
+ return;
+
+ /* Make sure we're not suspended first. */
+ pm_runtime_get_sync(device);
+ pm_runtime_disable(device);
+}
+
+/**
+ * intel_power_domains_fini - finalizes the power domain structures
+ * @dev_priv: i915 device instance
+ *
+ * Finalizes the power domain structures for @dev_priv depending upon the
+ * supported platform. This function also disables runtime pm and ensures that
+ * the device stays powered up so that the driver can be reloaded.
+ */
+void intel_power_domains_fini(struct drm_i915_private *dev_priv)
+{
+ intel_runtime_pm_disable(dev_priv);
+
+ /* The i915.ko module is still not prepared to be loaded when
+ * the power well is not enabled, so just enable it in case
+ * we're going to unload/reload. */
+ intel_display_set_init_power(dev_priv, true);
+
+ hsw_pwr = NULL;
+}
+
+static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ mutex_lock(&power_domains->lock);
+ for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+ power_well->ops->sync_hw(dev_priv, power_well);
+ power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
+ power_well);
+ }
+ mutex_unlock(&power_domains->lock);
+}
+
+static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn =
+ lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+ struct i915_power_well *disp2d =
+ lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
+
+ /* If the display might be already active skip this */
+ if (cmn->ops->is_enabled(dev_priv, cmn) &&
+ disp2d->ops->is_enabled(dev_priv, disp2d) &&
+ I915_READ(DPIO_CTL) & DPIO_CMNRST)
+ return;
+
+ DRM_DEBUG_KMS("toggling display PHY side reset\n");
+
+ /* cmnlane needs DPLL registers */
+ disp2d->ops->enable(dev_priv, disp2d);
+
+ /*
+ * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
+ * Need to assert and de-assert PHY SB reset by gating the
+ * common lane power, then un-gating it.
+ * Simply ungating isn't enough to reset the PHY enough to get
+ * ports and lanes running.
+ */
+ cmn->ops->disable(dev_priv, cmn);
+}
+
+/**
+ * intel_power_domains_init_hw - initialize hardware power domain state
+ * @dev_priv: i915 device instance
+ *
+ * This function initializes the hardware power domain state and enables all
+ * power domains using intel_display_set_init_power().
+ */
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+ power_domains->initializing = true;
+
+ if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+ mutex_lock(&power_domains->lock);
+ vlv_cmnlane_wa(dev_priv);
+ mutex_unlock(&power_domains->lock);
+ }
+
+ /* For now, we need the power well to be always enabled. */
+ intel_display_set_init_power(dev_priv, true);
+ intel_power_domains_resume(dev_priv);
+ power_domains->initializing = false;
+}
+
+/**
+ * intel_aux_display_runtime_get - grab an auxilliary power domain reference
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a power domain reference for the auxiliary power domain
+ * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
+ * parents are powered up. Therefore users should only grab a reference to the
+ * innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_aux_display_runtime_put() to release the reference again.
+ */
+void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
+{
+ intel_runtime_pm_get(dev_priv);
+}
+
+/**
+ * intel_aux_display_runtime_put - release an auxilliary power domain reference
+ * @dev_priv: i915 device instance
+ *
+ * This function drops the auxilliary power domain reference obtained by
+ * intel_aux_display_runtime_get() and might power down the corresponding
+ * hardware block right away if this is the last reference.
+ */
+void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
+{
+ intel_runtime_pm_put(dev_priv);
+}
+
+/**
+ * intel_runtime_pm_get - grab a runtime pm reference
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a device-level runtime pm reference (mostly used for GEM
+ * code to ensure the GTT or GT is on) and ensures that it is powered up.
+ *
+ * Any runtime pm reference obtained by this function must have a symmetric
+ * call to intel_runtime_pm_put() to release the reference again.
+ */
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ pm_runtime_get_sync(device);
+ WARN(dev_priv->pm.suspended, "Device still suspended.\n");
+}
+
+/**
+ * intel_runtime_pm_get_noresume - grab a runtime pm reference
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a device-level runtime pm reference (mostly used for GEM
+ * code to ensure the GTT or GT is on).
+ *
+ * It will _not_ power up the device but instead only check that it's powered
+ * on. Therefore it is only valid to call this functions from contexts where
+ * the device is known to be powered up and where trying to power it up would
+ * result in hilarity and deadlocks. That pretty much means only the system
+ * suspend/resume code where this is used to grab runtime pm references for
+ * delayed setup down in work items.
+ *
+ * Any runtime pm reference obtained by this function must have a symmetric
+ * call to intel_runtime_pm_put() to release the reference again.
+ */
+void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
+ pm_runtime_get_noresume(device);
+}
+
+/**
+ * intel_runtime_pm_put - release a runtime pm reference
+ * @dev_priv: i915 device instance
+ *
+ * This function drops the device-level runtime pm reference obtained by
+ * intel_runtime_pm_get() and might power down the corresponding
+ * hardware block right away if this is the last reference.
+ */
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ pm_runtime_mark_last_busy(device);
+ pm_runtime_put_autosuspend(device);
+}
+
+/**
+ * intel_runtime_pm_enable - enable runtime pm
+ * @dev_priv: i915 device instance
+ *
+ * This function enables runtime pm at the end of the driver load sequence.
+ *
+ * Note that this function does currently not enable runtime pm for the
+ * subordinate display power domains. That is only done on the first modeset
+ * using intel_display_set_init_power().
+ */
+void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ pm_runtime_set_active(device);
+
+ /*
+ * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
+ * requirement.
+ */
+ if (!intel_enable_rc6(dev)) {
+ DRM_INFO("RC6 disabled, disabling runtime PM support\n");
+ return;
+ }
+
+ pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
+ pm_runtime_mark_last_busy(device);
+ pm_runtime_use_autosuspend(device);
+
+ pm_runtime_put_autosuspend(device);
+}
+
+/* Display audio driver power well request */
+int i915_request_power_well(void)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (!hsw_pwr)
+ return -ENODEV;
+
+ dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+ power_domains);
+ intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i915_request_power_well);
+
+/* Display audio driver power well release */
+int i915_release_power_well(void)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (!hsw_pwr)
+ return -ENODEV;
+
+ dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+ power_domains);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i915_release_power_well);
+
+/*
+ * Private interface for the audio driver to get CDCLK in kHz.
+ *
+ * Caller must request power well using i915_request_power_well() prior to
+ * making the call.
+ */
+int i915_get_cdclk_freq(void)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (!hsw_pwr)
+ return -ENODEV;
+
+ dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+ power_domains);
+
+ return intel_ddi_get_cdclk_freq(dev_priv);
+}
+EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 9350edd..6d7a277 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1991,57 +1991,10 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
return !list_empty(&connector->probed_modes);
}
-static void
-intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
-{
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- struct drm_device *dev = connector->dev;
-
- if (intel_sdvo_connector->left)
- drm_property_destroy(dev, intel_sdvo_connector->left);
- if (intel_sdvo_connector->right)
- drm_property_destroy(dev, intel_sdvo_connector->right);
- if (intel_sdvo_connector->top)
- drm_property_destroy(dev, intel_sdvo_connector->top);
- if (intel_sdvo_connector->bottom)
- drm_property_destroy(dev, intel_sdvo_connector->bottom);
- if (intel_sdvo_connector->hpos)
- drm_property_destroy(dev, intel_sdvo_connector->hpos);
- if (intel_sdvo_connector->vpos)
- drm_property_destroy(dev, intel_sdvo_connector->vpos);
- if (intel_sdvo_connector->saturation)
- drm_property_destroy(dev, intel_sdvo_connector->saturation);
- if (intel_sdvo_connector->contrast)
- drm_property_destroy(dev, intel_sdvo_connector->contrast);
- if (intel_sdvo_connector->hue)
- drm_property_destroy(dev, intel_sdvo_connector->hue);
- if (intel_sdvo_connector->sharpness)
- drm_property_destroy(dev, intel_sdvo_connector->sharpness);
- if (intel_sdvo_connector->flicker_filter)
- drm_property_destroy(dev, intel_sdvo_connector->flicker_filter);
- if (intel_sdvo_connector->flicker_filter_2d)
- drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d);
- if (intel_sdvo_connector->flicker_filter_adaptive)
- drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive);
- if (intel_sdvo_connector->tv_luma_filter)
- drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter);
- if (intel_sdvo_connector->tv_chroma_filter)
- drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter);
- if (intel_sdvo_connector->dot_crawl)
- drm_property_destroy(dev, intel_sdvo_connector->dot_crawl);
- if (intel_sdvo_connector->brightness)
- drm_property_destroy(dev, intel_sdvo_connector->brightness);
-}
-
static void intel_sdvo_destroy(struct drm_connector *connector)
{
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- if (intel_sdvo_connector->tv_format)
- drm_property_destroy(connector->dev,
- intel_sdvo_connector->tv_format);
-
- intel_sdvo_destroy_enhance_property(connector);
drm_connector_cleanup(connector);
kfree(intel_sdvo_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 07a74ef..7d9c340 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -37,6 +37,20 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+static bool
+format_is_yuv(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YVYU:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
{
/* paranoia */
@@ -46,7 +60,23 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
return DIV_ROUND_UP(usecs * mode->crtc_clock, 1000 * mode->crtc_htotal);
}
-static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
+/**
+ * intel_pipe_update_start() - start update of a set of display registers
+ * @crtc: the crtc of which the registers are going to be updated
+ * @start_vbl_count: vblank counter return pointer used for error checking
+ *
+ * Mark the start of an update to pipe registers that should be updated
+ * atomically regarding vblank. If the next vblank will happens within
+ * the next 100 us, this function waits until the vblank passes.
+ *
+ * After a successful call to this function, interrupts will be disabled
+ * until a subsequent call to intel_pipe_update_end(). That is done to
+ * avoid random delays. The value written to @start_vbl_count should be
+ * supplied to intel_pipe_update_end() for error checking.
+ *
+ * Return: true if the call was successful
+ */
+bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
{
struct drm_device *dev = crtc->base.dev;
const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
@@ -56,8 +86,6 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
DEFINE_WAIT(wait);
- WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
-
vblank_start = mode->crtc_vblank_start;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vblank_start = DIV_ROUND_UP(vblank_start, 2);
@@ -112,7 +140,16 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
return true;
}
-static void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
+/**
+ * intel_pipe_update_end() - end update of a set of display registers
+ * @crtc: the crtc of which the registers were updated
+ * @start_vbl_count: start vblank counter (used for error checking)
+ *
+ * Mark the end of an update started with intel_pipe_update_start(). This
+ * re-enables interrupts and verifies the update was actually completed
+ * before a vblank using the value of @start_vbl_count.
+ */
+void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
{
struct drm_device *dev = crtc->base.dev;
enum pipe pipe = crtc->pipe;
@@ -139,6 +176,226 @@ static void intel_update_primary_plane(struct intel_crtc *crtc)
}
static void
+skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = drm_plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+ const int pipe = intel_plane->pipe;
+ const int plane = intel_plane->plane + 1;
+ u32 plane_ctl, stride;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+
+ plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
+
+ /* Mask out pixel format bits in case we change it */
+ plane_ctl &= ~PLANE_CTL_FORMAT_MASK;
+ plane_ctl &= ~PLANE_CTL_ORDER_RGBX;
+ plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK;
+ plane_ctl &= ~PLANE_CTL_TILED_MASK;
+ plane_ctl &= ~PLANE_CTL_ALPHA_MASK;
+ plane_ctl &= ~PLANE_CTL_ROTATE_MASK;
+
+ /* Trickle feed has to be enabled */
+ plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_RGB565:
+ plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+ break;
+ /*
+ * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
+ * to be already pre-multiplied. We need to add a knob (or a different
+ * DRM_FORMAT) for user-space to configure that.
+ */
+ case DRM_FORMAT_ABGR8888:
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
+ PLANE_CTL_ORDER_RGBX |
+ PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
+ PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ break;
+ case DRM_FORMAT_YUYV:
+ plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
+ break;
+ case DRM_FORMAT_YVYU:
+ plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
+ break;
+ case DRM_FORMAT_UYVY:
+ plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
+ break;
+ case DRM_FORMAT_VYUY:
+ plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
+ break;
+ default:
+ BUG();
+ }
+
+ switch (obj->tiling_mode) {
+ case I915_TILING_NONE:
+ stride = fb->pitches[0] >> 6;
+ break;
+ case I915_TILING_X:
+ plane_ctl |= PLANE_CTL_TILED_X;
+ stride = fb->pitches[0] >> 9;
+ break;
+ default:
+ BUG();
+ }
+ if (intel_plane->rotation == BIT(DRM_ROTATE_180))
+ plane_ctl |= PLANE_CTL_ROTATE_180;
+
+ plane_ctl |= PLANE_CTL_ENABLE;
+ plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
+
+ intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
+ pixel_size, true,
+ src_w != crtc_w || src_h != crtc_h);
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
+
+ I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
+ I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
+ I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
+ I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
+ I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
+ I915_WRITE(PLANE_SURF(pipe, plane), i915_gem_obj_ggtt_offset(obj));
+ POSTING_READ(PLANE_SURF(pipe, plane));
+}
+
+static void
+skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
+{
+ struct drm_device *dev = drm_plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+ const int pipe = intel_plane->pipe;
+ const int plane = intel_plane->plane + 1;
+
+ I915_WRITE(PLANE_CTL(pipe, plane),
+ I915_READ(PLANE_CTL(pipe, plane)) & ~PLANE_CTL_ENABLE);
+
+ /* Activate double buffered register update */
+ I915_WRITE(PLANE_CTL(pipe, plane), 0);
+ POSTING_READ(PLANE_CTL(pipe, plane));
+
+ intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
+}
+
+static int
+skl_update_colorkey(struct drm_plane *drm_plane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = drm_plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+ const int pipe = intel_plane->pipe;
+ const int plane = intel_plane->plane;
+ u32 plane_ctl;
+
+ I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
+ I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
+ I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
+
+ plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
+ plane_ctl &= ~PLANE_CTL_KEY_ENABLE_MASK;
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
+ I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
+
+ POSTING_READ(PLANE_CTL(pipe, plane));
+
+ return 0;
+}
+
+static void
+skl_get_colorkey(struct drm_plane *drm_plane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = drm_plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+ const int pipe = intel_plane->pipe;
+ const int plane = intel_plane->plane;
+ u32 plane_ctl;
+
+ key->min_value = I915_READ(PLANE_KEYVAL(pipe, plane));
+ key->max_value = I915_READ(PLANE_KEYMAX(pipe, plane));
+ key->channel_mask = I915_READ(PLANE_KEYMSK(pipe, plane));
+
+ plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
+
+ switch (plane_ctl & PLANE_CTL_KEY_ENABLE_MASK) {
+ case PLANE_CTL_KEY_ENABLE_DESTINATION:
+ key->flags = I915_SET_COLORKEY_DESTINATION;
+ break;
+ case PLANE_CTL_KEY_ENABLE_SOURCE:
+ key->flags = I915_SET_COLORKEY_SOURCE;
+ break;
+ default:
+ key->flags = I915_SET_COLORKEY_NONE;
+ }
+}
+
+static void
+chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
+{
+ struct drm_i915_private *dev_priv = intel_plane->base.dev->dev_private;
+ int plane = intel_plane->plane;
+
+ /* Seems RGB data bypasses the CSC always */
+ if (!format_is_yuv(format))
+ return;
+
+ /*
+ * BT.601 limited range YCbCr -> full range RGB
+ *
+ * |r| | 6537 4769 0| |cr |
+ * |g| = |-3330 4769 -1605| x |y-64|
+ * |b| | 0 4769 8263| |cb |
+ *
+ * Cb and Cr apparently come in as signed already, so no
+ * need for any offset. For Y we need to remove the offset.
+ */
+ I915_WRITE(SPCSCYGOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
+ I915_WRITE(SPCSCCBOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+ I915_WRITE(SPCSCCROFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+
+ I915_WRITE(SPCSCC01(plane), SPCSC_C1(4769) | SPCSC_C0(6537));
+ I915_WRITE(SPCSCC23(plane), SPCSC_C1(-3330) | SPCSC_C0(0));
+ I915_WRITE(SPCSCC45(plane), SPCSC_C1(-1605) | SPCSC_C0(4769));
+ I915_WRITE(SPCSCC67(plane), SPCSC_C1(4769) | SPCSC_C0(0));
+ I915_WRITE(SPCSCC8(plane), SPCSC_C0(8263));
+
+ I915_WRITE(SPCSCYGICLAMP(plane), SPCSC_IMAX(940) | SPCSC_IMIN(64));
+ I915_WRITE(SPCSCCBICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+ I915_WRITE(SPCSCCRICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+
+ I915_WRITE(SPCSCYGOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ I915_WRITE(SPCSCCBOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ I915_WRITE(SPCSCCROCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+}
+
+static void
vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
@@ -249,6 +506,9 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
intel_update_primary_plane(intel_crtc);
+ if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
+ chv_update_csc(intel_plane, fb->pixel_format);
+
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
@@ -257,6 +517,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
else
I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
+ I915_WRITE(SPCONSTALPHA(pipe, plane), 0);
+
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl);
I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
@@ -821,20 +1083,6 @@ ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
key->flags = I915_SET_COLORKEY_NONE;
}
-static bool
-format_is_yuv(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_YVYU:
- return true;
- default:
- return false;
- }
-}
-
static bool colorkey_enabled(struct intel_plane *intel_plane)
{
struct drm_intel_sprite_colorkey key;
@@ -845,57 +1093,23 @@ static bool colorkey_enabled(struct intel_plane *intel_plane)
}
static int
-intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+intel_check_sprite_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
{
- struct drm_device *dev = plane->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(state->crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
- enum pipe pipe = intel_crtc->pipe;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- struct drm_i915_gem_object *old_obj = intel_plane->obj;
- int ret;
- bool primary_enabled;
- bool visible;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ int crtc_x, crtc_y;
+ unsigned int crtc_w, crtc_h;
+ uint32_t src_x, src_y, src_w, src_h;
+ struct drm_rect *src = &state->src;
+ struct drm_rect *dst = &state->dst;
+ struct drm_rect *orig_src = &state->orig_src;
+ const struct drm_rect *clip = &state->clip;
int hscale, vscale;
int max_scale, min_scale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- struct drm_rect src = {
- /* sample coordinates in 16.16 fixed point */
- .x1 = src_x,
- .x2 = src_x + src_w,
- .y1 = src_y,
- .y2 = src_y + src_h,
- };
- struct drm_rect dst = {
- /* integer pixels */
- .x1 = crtc_x,
- .x2 = crtc_x + crtc_w,
- .y1 = crtc_y,
- .y2 = crtc_y + crtc_h,
- };
- const struct drm_rect clip = {
- .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
- .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
- };
- const struct {
- int crtc_x, crtc_y;
- unsigned int crtc_w, crtc_h;
- uint32_t src_x, src_y, src_w, src_h;
- } orig = {
- .crtc_x = crtc_x,
- .crtc_y = crtc_y,
- .crtc_w = crtc_w,
- .crtc_h = crtc_h,
- .src_x = src_x,
- .src_y = src_y,
- .src_w = src_w,
- .src_h = src_h,
- };
/* Don't modify another pipe's plane */
if (intel_plane->pipe != intel_crtc->pipe) {
@@ -927,55 +1141,55 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
max_scale = intel_plane->max_downscale << 16;
min_scale = intel_plane->can_scale ? 1 : (1 << 16);
- drm_rect_rotate(&src, fb->width << 16, fb->height << 16,
+ drm_rect_rotate(src, fb->width << 16, fb->height << 16,
intel_plane->rotation);
- hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale);
+ hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale);
BUG_ON(hscale < 0);
- vscale = drm_rect_calc_vscale_relaxed(&src, &dst, min_scale, max_scale);
+ vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
BUG_ON(vscale < 0);
- visible = drm_rect_clip_scaled(&src, &dst, &clip, hscale, vscale);
+ state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
- crtc_x = dst.x1;
- crtc_y = dst.y1;
- crtc_w = drm_rect_width(&dst);
- crtc_h = drm_rect_height(&dst);
+ crtc_x = dst->x1;
+ crtc_y = dst->y1;
+ crtc_w = drm_rect_width(dst);
+ crtc_h = drm_rect_height(dst);
- if (visible) {
+ if (state->visible) {
/* check again in case clipping clamped the results */
- hscale = drm_rect_calc_hscale(&src, &dst, min_scale, max_scale);
+ hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
if (hscale < 0) {
DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
- drm_rect_debug_print(&src, true);
- drm_rect_debug_print(&dst, false);
+ drm_rect_debug_print(src, true);
+ drm_rect_debug_print(dst, false);
return hscale;
}
- vscale = drm_rect_calc_vscale(&src, &dst, min_scale, max_scale);
+ vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
if (vscale < 0) {
DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
- drm_rect_debug_print(&src, true);
- drm_rect_debug_print(&dst, false);
+ drm_rect_debug_print(src, true);
+ drm_rect_debug_print(dst, false);
return vscale;
}
/* Make the source viewport size an exact multiple of the scaling factors. */
- drm_rect_adjust_size(&src,
- drm_rect_width(&dst) * hscale - drm_rect_width(&src),
- drm_rect_height(&dst) * vscale - drm_rect_height(&src));
+ drm_rect_adjust_size(src,
+ drm_rect_width(dst) * hscale - drm_rect_width(src),
+ drm_rect_height(dst) * vscale - drm_rect_height(src));
- drm_rect_rotate_inv(&src, fb->width << 16, fb->height << 16,
+ drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16,
intel_plane->rotation);
/* sanity check to make sure the src viewport wasn't enlarged */
- WARN_ON(src.x1 < (int) src_x ||
- src.y1 < (int) src_y ||
- src.x2 > (int) (src_x + src_w) ||
- src.y2 > (int) (src_y + src_h));
+ WARN_ON(src->x1 < (int) orig_src->x1 ||
+ src->y1 < (int) orig_src->y1 ||
+ src->x2 > (int) orig_src->x2 ||
+ src->y2 > (int) orig_src->y2);
/*
* Hardware doesn't handle subpixel coordinates.
@@ -983,10 +1197,10 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
* increase the source viewport size, because that could
* push the downscaling factor out of bounds.
*/
- src_x = src.x1 >> 16;
- src_w = drm_rect_width(&src) >> 16;
- src_y = src.y1 >> 16;
- src_h = drm_rect_height(&src) >> 16;
+ src_x = src->x1 >> 16;
+ src_w = drm_rect_width(src) >> 16;
+ src_y = src->y1 >> 16;
+ src_h = drm_rect_height(src) >> 16;
if (format_is_yuv(fb->pixel_format)) {
src_x &= ~1;
@@ -1000,12 +1214,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
crtc_w &= ~1;
if (crtc_w == 0)
- visible = false;
+ state->visible = false;
}
}
/* Check size restrictions when scaling */
- if (visible && (src_w != crtc_w || src_h != crtc_h)) {
+ if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
unsigned int width_bytes;
WARN_ON(!intel_plane->can_scale);
@@ -1013,12 +1227,13 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
/* FIXME interlacing min height is 6 */
if (crtc_w < 3 || crtc_h < 3)
- visible = false;
+ state->visible = false;
if (src_w < 3 || src_h < 3)
- visible = false;
+ state->visible = false;
- width_bytes = ((src_x * pixel_size) & 63) + src_w * pixel_size;
+ width_bytes = ((src_x * pixel_size) & 63) +
+ src_w * pixel_size;
if (src_w > 2048 || src_h > 2048 ||
width_bytes > 4096 || fb->pitches[0] > 4096) {
@@ -1027,42 +1242,90 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
}
}
- dst.x1 = crtc_x;
- dst.x2 = crtc_x + crtc_w;
- dst.y1 = crtc_y;
- dst.y2 = crtc_y + crtc_h;
+ if (state->visible) {
+ src->x1 = src_x;
+ src->x2 = src_x + src_w;
+ src->y1 = src_y;
+ src->y2 = src_y + src_h;
+ }
- /*
- * If the sprite is completely covering the primary plane,
- * we can disable the primary and save power.
- */
- primary_enabled = !drm_rect_equals(&dst, &clip) || colorkey_enabled(intel_plane);
- WARN_ON(!primary_enabled && !visible && intel_crtc->active);
+ dst->x1 = crtc_x;
+ dst->x2 = crtc_x + crtc_w;
+ dst->y1 = crtc_y;
+ dst->y2 = crtc_y + crtc_h;
- mutex_lock(&dev->struct_mutex);
+ return 0;
+}
- /* Note that this will apply the VT-d workaround for scanouts,
- * which is more restrictive than required for sprites. (The
- * primary plane requires 256KiB alignment with 64 PTE padding,
- * the sprite planes only require 128KiB alignment and 32 PTE padding.
- */
- ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+static int
+intel_prepare_sprite_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_crtc *crtc = state->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ enum pipe pipe = intel_crtc->pipe;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *old_obj = intel_plane->obj;
+ int ret;
- i915_gem_track_fb(old_obj, obj,
- INTEL_FRONTBUFFER_SPRITE(pipe));
- mutex_unlock(&dev->struct_mutex);
+ if (old_obj != obj) {
+ mutex_lock(&dev->struct_mutex);
- if (ret)
- return ret;
+ /* Note that this will apply the VT-d workaround for scanouts,
+ * which is more restrictive than required for sprites. (The
+ * primary plane requires 256KiB alignment with 64 PTE padding,
+ * the sprite planes only require 128KiB alignment and 32 PTE
+ * padding.
+ */
+ ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
+ if (ret == 0)
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_SPRITE(pipe));
+ mutex_unlock(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
- intel_plane->crtc_x = orig.crtc_x;
- intel_plane->crtc_y = orig.crtc_y;
- intel_plane->crtc_w = orig.crtc_w;
- intel_plane->crtc_h = orig.crtc_h;
- intel_plane->src_x = orig.src_x;
- intel_plane->src_y = orig.src_y;
- intel_plane->src_w = orig.src_w;
- intel_plane->src_h = orig.src_h;
+static void
+intel_commit_sprite_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_crtc *crtc = state->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ enum pipe pipe = intel_crtc->pipe;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *old_obj = intel_plane->obj;
+ int crtc_x, crtc_y;
+ unsigned int crtc_w, crtc_h;
+ uint32_t src_x, src_y, src_w, src_h;
+ struct drm_rect *dst = &state->dst;
+ const struct drm_rect *clip = &state->clip;
+ bool primary_enabled;
+
+ /*
+ * If the sprite is completely covering the primary plane,
+ * we can disable the primary and save power.
+ */
+ primary_enabled = !drm_rect_equals(dst, clip) || colorkey_enabled(intel_plane);
+ WARN_ON(!primary_enabled && !state->visible && intel_crtc->active);
+
+ intel_plane->crtc_x = state->orig_dst.x1;
+ intel_plane->crtc_y = state->orig_dst.y1;
+ intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
+ intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
+ intel_plane->src_x = state->orig_src.x1;
+ intel_plane->src_y = state->orig_src.y1;
+ intel_plane->src_w = drm_rect_width(&state->orig_src);
+ intel_plane->src_h = drm_rect_height(&state->orig_src);
intel_plane->obj = obj;
if (intel_crtc->active) {
@@ -1076,12 +1339,22 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (primary_was_enabled && !primary_enabled)
intel_pre_disable_primary(crtc);
- if (visible)
+ if (state->visible) {
+ crtc_x = state->dst.x1;
+ crtc_y = state->dst.y1;
+ crtc_w = drm_rect_width(&state->dst);
+ crtc_h = drm_rect_height(&state->dst);
+ src_x = state->src.x1;
+ src_y = state->src.y1;
+ src_w = drm_rect_width(&state->src);
+ src_h = drm_rect_height(&state->src);
intel_plane->update_plane(plane, crtc, fb, obj,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
- else
+ } else {
intel_plane->disable_plane(plane, crtc);
+ }
+
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
@@ -1090,21 +1363,65 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
}
/* Unpin old obj after new one is active to avoid ugliness */
- if (old_obj) {
+ if (old_obj && old_obj != obj) {
+
/*
* It's fairly common to simply update the position of
* an existing object. In that case, we don't need to
* wait for vblank to avoid ugliness, we only need to
* do the pin & ref bookkeeping.
*/
- if (old_obj != obj && intel_crtc->active)
+ if (intel_crtc->active)
intel_wait_for_vblank(dev, intel_crtc->pipe);
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(old_obj);
mutex_unlock(&dev->struct_mutex);
}
+}
+
+static int
+intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct intel_plane_state state;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int ret;
+ state.crtc = crtc;
+ state.fb = fb;
+
+ /* sample coordinates in 16.16 fixed point */
+ state.src.x1 = src_x;
+ state.src.x2 = src_x + src_w;
+ state.src.y1 = src_y;
+ state.src.y2 = src_y + src_h;
+
+ /* integer pixels */
+ state.dst.x1 = crtc_x;
+ state.dst.x2 = crtc_x + crtc_w;
+ state.dst.y1 = crtc_y;
+ state.dst.y2 = crtc_y + crtc_h;
+
+ state.clip.x1 = 0;
+ state.clip.y1 = 0;
+ state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
+ state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+ state.orig_src = state.src;
+ state.orig_dst = state.dst;
+
+ ret = intel_check_sprite_plane(plane, &state);
+ if (ret)
+ return ret;
+
+ ret = intel_prepare_sprite_plane(plane, &state);
+ if (ret)
+ return ret;
+
+ intel_commit_sprite_plane(plane, &state);
return 0;
}
@@ -1305,6 +1622,18 @@ static uint32_t vlv_plane_formats[] = {
DRM_FORMAT_VYUY,
};
+static uint32_t skl_plane_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
int
intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
{
@@ -1368,7 +1697,21 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
}
break;
-
+ case 9:
+ /*
+ * FIXME: Skylake planes can be scaled (with some restrictions),
+ * but this is for another time.
+ */
+ intel_plane->can_scale = false;
+ intel_plane->max_downscale = 1;
+ intel_plane->update_plane = skl_update_plane;
+ intel_plane->disable_plane = skl_disable_plane;
+ intel_plane->update_colorkey = skl_update_colorkey;
+ intel_plane->get_colorkey = skl_get_colorkey;
+
+ plane_formats = skl_plane_formats;
+ num_plane_formats = ARRAY_SIZE(skl_plane_formats);
+ break;
default:
kfree(intel_plane);
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index c14341c..6f5f59b 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1182,18 +1182,17 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long irqflags;
u32 tv_ctl, save_tv_ctl;
u32 tv_dac, save_tv_dac;
int type;
/* Disable TV interrupts around load detect or we'll recurse */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
i915_disable_pipestat(dev_priv, 0,
PIPE_HOTPLUG_INTERRUPT_STATUS |
PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
save_tv_dac = tv_dac = I915_READ(TV_DAC);
@@ -1266,11 +1265,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, 0,
PIPE_HOTPLUG_INTERRUPT_STATUS |
PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
return type;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 918b761..46de8d7 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -43,23 +43,17 @@
static void
assert_device_not_suspended(struct drm_i915_private *dev_priv)
{
- WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
- "Device suspended\n");
+ WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
+ "Device suspended\n");
}
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
- u32 gt_thread_status_mask;
-
- if (IS_HASWELL(dev_priv->dev))
- gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
- else
- gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
-
/* w/a for a sporadic read returning 0 by waiting for the GT
* thread to wake up.
*/
- if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
+ if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
+ GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
DRM_ERROR("GT thread status wait timed out\n");
}
@@ -120,8 +114,7 @@ static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
/* WaRsForcewakeWaitTC0:ivb,hsw */
- if (INTEL_INFO(dev_priv->dev)->gen < 8)
- __gen6_gt_wait_for_thread_c0(dev_priv);
+ __gen6_gt_wait_for_thread_c0(dev_priv);
}
static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
@@ -229,10 +222,6 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: waiting for media to ack.\n");
}
-
- /* WaRsForcewakeWaitTC0:vlv */
- if (!IS_CHERRYVIEW(dev_priv->dev))
- __gen6_gt_wait_for_thread_c0(dev_priv);
}
static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
@@ -299,6 +288,154 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+static void __gen9_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
+ _MASKED_BIT_DISABLE(0xffff));
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
+ _MASKED_BIT_DISABLE(0xffff));
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
+ _MASKED_BIT_DISABLE(0xffff));
+}
+
+static void
+__gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
+{
+ /* Check for Render Engine */
+ if (FORCEWAKE_RENDER & fw_engine) {
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_RENDER_GEN9) &
+ FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_RENDER_GEN9) &
+ FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: waiting for Render to ack.\n");
+ }
+
+ /* Check for Media Engine */
+ if (FORCEWAKE_MEDIA & fw_engine) {
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_MEDIA_GEN9) &
+ FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_MEDIA_GEN9) &
+ FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: waiting for Media to ack.\n");
+ }
+
+ /* Check for Blitter Engine */
+ if (FORCEWAKE_BLITTER & fw_engine) {
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_BLITTER_GEN9) &
+ FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: Blitter forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_BLITTER_GEN9) &
+ FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: waiting for Blitter to ack.\n");
+ }
+}
+
+static void
+__gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
+{
+ /* Check for Render Engine */
+ if (FORCEWAKE_RENDER & fw_engine)
+ __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
+ /* Check for Media Engine */
+ if (FORCEWAKE_MEDIA & fw_engine)
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
+ /* Check for Blitter Engine */
+ if (FORCEWAKE_BLITTER & fw_engine)
+ __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+}
+
+static void
+gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ if (FORCEWAKE_RENDER & fw_engine) {
+ if (dev_priv->uncore.fw_rendercount++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ FORCEWAKE_RENDER);
+ }
+
+ if (FORCEWAKE_MEDIA & fw_engine) {
+ if (dev_priv->uncore.fw_mediacount++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ FORCEWAKE_MEDIA);
+ }
+
+ if (FORCEWAKE_BLITTER & fw_engine) {
+ if (dev_priv->uncore.fw_blittercount++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ FORCEWAKE_BLITTER);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ if (FORCEWAKE_RENDER & fw_engine) {
+ WARN_ON(dev_priv->uncore.fw_rendercount == 0);
+ if (--dev_priv->uncore.fw_rendercount == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv,
+ FORCEWAKE_RENDER);
+ }
+
+ if (FORCEWAKE_MEDIA & fw_engine) {
+ WARN_ON(dev_priv->uncore.fw_mediacount == 0);
+ if (--dev_priv->uncore.fw_mediacount == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv,
+ FORCEWAKE_MEDIA);
+ }
+
+ if (FORCEWAKE_BLITTER & fw_engine) {
+ WARN_ON(dev_priv->uncore.fw_blittercount == 0);
+ if (--dev_priv->uncore.fw_blittercount == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv,
+ FORCEWAKE_BLITTER);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
static void gen6_force_wake_timer(unsigned long arg)
{
struct drm_i915_private *dev_priv = (void *)arg;
@@ -337,6 +474,9 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
__gen7_gt_force_wake_mt_reset(dev_priv);
+ if (IS_GEN9(dev))
+ __gen9_gt_force_wake_mt_reset(dev_priv);
+
if (restore) { /* If reset with a user forcewake, try to restore */
unsigned fw = 0;
@@ -346,6 +486,15 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
if (dev_priv->uncore.fw_mediacount)
fw |= FORCEWAKE_MEDIA;
+ } else if (IS_GEN9(dev)) {
+ if (dev_priv->uncore.fw_rendercount)
+ fw |= FORCEWAKE_RENDER;
+
+ if (dev_priv->uncore.fw_mediacount)
+ fw |= FORCEWAKE_MEDIA;
+
+ if (dev_priv->uncore.fw_blittercount)
+ fw |= FORCEWAKE_BLITTER;
} else {
if (dev_priv->uncore.forcewake_count)
fw = FORCEWAKE_ALL;
@@ -363,7 +512,8 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
+static void __intel_uncore_early_sanitize(struct drm_device *dev,
+ bool restore_forcewake)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -389,6 +539,12 @@ void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
intel_uncore_forcewake_reset(dev, restore_forcewake);
}
+void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
+{
+ __intel_uncore_early_sanitize(dev, restore_forcewake);
+ i915_check_and_clear_faults(dev);
+}
+
void intel_uncore_sanitize(struct drm_device *dev)
{
/* BIOS often leaves RC6 enabled, but disable it for hw init */
@@ -410,6 +566,10 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
intel_runtime_pm_get(dev_priv);
+ /* Redirect to Gen9 specific routine */
+ if (IS_GEN9(dev_priv->dev))
+ return gen9_force_wake_get(dev_priv, fw_engine);
+
/* Redirect to VLV specific routine */
if (IS_VALLEYVIEW(dev_priv->dev))
return vlv_force_wake_get(dev_priv, fw_engine);
@@ -431,6 +591,12 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
if (!dev_priv->uncore.funcs.force_wake_put)
return;
+ /* Redirect to Gen9 specific routine */
+ if (IS_GEN9(dev_priv->dev)) {
+ gen9_force_wake_put(dev_priv, fw_engine);
+ goto out;
+ }
+
/* Redirect to VLV specific routine */
if (IS_VALLEYVIEW(dev_priv->dev)) {
vlv_force_wake_put(dev_priv, fw_engine);
@@ -504,6 +670,38 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
REG_RANGE((reg), 0x14000, 0x14400) || \
REG_RANGE((reg), 0x22000, 0x24000))
+#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
+ REG_RANGE((reg), 0xB00, 0x2000)
+
+#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x2000, 0x2700) || \
+ REG_RANGE((reg), 0x3000, 0x4000) || \
+ REG_RANGE((reg), 0x5200, 0x8000) || \
+ REG_RANGE((reg), 0x8140, 0x8160) || \
+ REG_RANGE((reg), 0x8300, 0x8500) || \
+ REG_RANGE((reg), 0x8C00, 0x8D00) || \
+ REG_RANGE((reg), 0xB000, 0xB480) || \
+ REG_RANGE((reg), 0xE000, 0xE900) || \
+ REG_RANGE((reg), 0x24400, 0x24800))
+
+#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x8130, 0x8140) || \
+ REG_RANGE((reg), 0x8800, 0x8A00) || \
+ REG_RANGE((reg), 0xD000, 0xD800) || \
+ REG_RANGE((reg), 0x12000, 0x14000) || \
+ REG_RANGE((reg), 0x1A000, 0x1EA00) || \
+ REG_RANGE((reg), 0x30000, 0x40000))
+
+#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
+ REG_RANGE((reg), 0x9400, 0x9800)
+
+#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
+ ((reg) < 0x40000 &&\
+ !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
+ !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
+ !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
+ !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
+
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
@@ -634,6 +832,45 @@ chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_FOOTER; \
}
+#define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \
+ ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
+
+#define __gen9_read(x) \
+static u##x \
+gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ REG_READ_HEADER(x); \
+ if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ } else { \
+ unsigned fwengine = 0; \
+ if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine = FORCEWAKE_RENDER; \
+ } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine = FORCEWAKE_MEDIA; \
+ } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine |= FORCEWAKE_RENDER; \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine |= FORCEWAKE_MEDIA; \
+ } else { \
+ if (dev_priv->uncore.fw_blittercount == 0) \
+ fwengine = FORCEWAKE_BLITTER; \
+ } \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
+ } \
+ REG_READ_FOOTER; \
+}
+
+__gen9_read(8)
+__gen9_read(16)
+__gen9_read(32)
+__gen9_read(64)
__chv_read(8)
__chv_read(16)
__chv_read(32)
@@ -655,6 +892,7 @@ __gen4_read(16)
__gen4_read(32)
__gen4_read(64)
+#undef __gen9_read
#undef __chv_read
#undef __vlv_read
#undef __gen6_read
@@ -792,6 +1030,69 @@ chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
REG_WRITE_FOOTER; \
}
+static const u32 gen9_shadowed_regs[] = {
+ RING_TAIL(RENDER_RING_BASE),
+ RING_TAIL(GEN6_BSD_RING_BASE),
+ RING_TAIL(VEBOX_RING_BASE),
+ RING_TAIL(BLT_RING_BASE),
+ FORCEWAKE_BLITTER_GEN9,
+ FORCEWAKE_RENDER_GEN9,
+ FORCEWAKE_MEDIA_GEN9,
+ GEN6_RPNSWREQ,
+ GEN6_RC_VIDEO_FREQ,
+ /* TODO: Other registers are not yet used */
+};
+
+static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
+ if (reg == gen9_shadowed_regs[i])
+ return true;
+
+ return false;
+}
+
+#define __gen9_write(x) \
+static void \
+gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
+ bool trace) { \
+ REG_WRITE_HEADER; \
+ if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
+ is_gen9_shadowed(dev_priv, reg)) { \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ } else { \
+ unsigned fwengine = 0; \
+ if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine = FORCEWAKE_RENDER; \
+ } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine = FORCEWAKE_MEDIA; \
+ } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine |= FORCEWAKE_RENDER; \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine |= FORCEWAKE_MEDIA; \
+ } else { \
+ if (dev_priv->uncore.fw_blittercount == 0) \
+ fwengine = FORCEWAKE_BLITTER; \
+ } \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+ fwengine); \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+ fwengine); \
+ } \
+ REG_WRITE_FOOTER; \
+}
+
+__gen9_write(8)
+__gen9_write(16)
+__gen9_write(32)
+__gen9_write(64)
__chv_write(8)
__chv_write(16)
__chv_write(32)
@@ -817,6 +1118,7 @@ __gen4_write(16)
__gen4_write(32)
__gen4_write(64)
+#undef __gen9_write
#undef __chv_write
#undef __gen8_write
#undef __hsw_write
@@ -826,6 +1128,22 @@ __gen4_write(64)
#undef REG_WRITE_FOOTER
#undef REG_WRITE_HEADER
+#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
+do { \
+ dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
+ dev_priv->uncore.funcs.mmio_writew = x##_write16; \
+ dev_priv->uncore.funcs.mmio_writel = x##_write32; \
+ dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
+} while (0)
+
+#define ASSIGN_READ_MMIO_VFUNCS(x) \
+do { \
+ dev_priv->uncore.funcs.mmio_readb = x##_read8; \
+ dev_priv->uncore.funcs.mmio_readw = x##_read16; \
+ dev_priv->uncore.funcs.mmio_readl = x##_read32; \
+ dev_priv->uncore.funcs.mmio_readq = x##_read64; \
+} while (0)
+
void intel_uncore_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -833,9 +1151,12 @@ void intel_uncore_init(struct drm_device *dev)
setup_timer(&dev_priv->uncore.force_wake_timer,
gen6_force_wake_timer, (unsigned long)dev_priv);
- intel_uncore_early_sanitize(dev, false);
+ __intel_uncore_early_sanitize(dev, false);
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_GEN9(dev)) {
+ dev_priv->uncore.funcs.force_wake_get = __gen9_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put = __gen9_force_wake_put;
+ } else if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
@@ -881,77 +1202,52 @@ void intel_uncore_init(struct drm_device *dev)
switch (INTEL_INFO(dev)->gen) {
default:
+ WARN_ON(1);
+ return;
+ case 9:
+ ASSIGN_WRITE_MMIO_VFUNCS(gen9);
+ ASSIGN_READ_MMIO_VFUNCS(gen9);
+ break;
+ case 8:
if (IS_CHERRYVIEW(dev)) {
- dev_priv->uncore.funcs.mmio_writeb = chv_write8;
- dev_priv->uncore.funcs.mmio_writew = chv_write16;
- dev_priv->uncore.funcs.mmio_writel = chv_write32;
- dev_priv->uncore.funcs.mmio_writeq = chv_write64;
- dev_priv->uncore.funcs.mmio_readb = chv_read8;
- dev_priv->uncore.funcs.mmio_readw = chv_read16;
- dev_priv->uncore.funcs.mmio_readl = chv_read32;
- dev_priv->uncore.funcs.mmio_readq = chv_read64;
+ ASSIGN_WRITE_MMIO_VFUNCS(chv);
+ ASSIGN_READ_MMIO_VFUNCS(chv);
} else {
- dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
- dev_priv->uncore.funcs.mmio_writew = gen8_write16;
- dev_priv->uncore.funcs.mmio_writel = gen8_write32;
- dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
- dev_priv->uncore.funcs.mmio_readb = gen6_read8;
- dev_priv->uncore.funcs.mmio_readw = gen6_read16;
- dev_priv->uncore.funcs.mmio_readl = gen6_read32;
- dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ ASSIGN_WRITE_MMIO_VFUNCS(gen8);
+ ASSIGN_READ_MMIO_VFUNCS(gen6);
}
break;
case 7:
case 6:
if (IS_HASWELL(dev)) {
- dev_priv->uncore.funcs.mmio_writeb = hsw_write8;
- dev_priv->uncore.funcs.mmio_writew = hsw_write16;
- dev_priv->uncore.funcs.mmio_writel = hsw_write32;
- dev_priv->uncore.funcs.mmio_writeq = hsw_write64;
+ ASSIGN_WRITE_MMIO_VFUNCS(hsw);
} else {
- dev_priv->uncore.funcs.mmio_writeb = gen6_write8;
- dev_priv->uncore.funcs.mmio_writew = gen6_write16;
- dev_priv->uncore.funcs.mmio_writel = gen6_write32;
- dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
+ ASSIGN_WRITE_MMIO_VFUNCS(gen6);
}
if (IS_VALLEYVIEW(dev)) {
- dev_priv->uncore.funcs.mmio_readb = vlv_read8;
- dev_priv->uncore.funcs.mmio_readw = vlv_read16;
- dev_priv->uncore.funcs.mmio_readl = vlv_read32;
- dev_priv->uncore.funcs.mmio_readq = vlv_read64;
+ ASSIGN_READ_MMIO_VFUNCS(vlv);
} else {
- dev_priv->uncore.funcs.mmio_readb = gen6_read8;
- dev_priv->uncore.funcs.mmio_readw = gen6_read16;
- dev_priv->uncore.funcs.mmio_readl = gen6_read32;
- dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ ASSIGN_READ_MMIO_VFUNCS(gen6);
}
break;
case 5:
- dev_priv->uncore.funcs.mmio_writeb = gen5_write8;
- dev_priv->uncore.funcs.mmio_writew = gen5_write16;
- dev_priv->uncore.funcs.mmio_writel = gen5_write32;
- dev_priv->uncore.funcs.mmio_writeq = gen5_write64;
- dev_priv->uncore.funcs.mmio_readb = gen5_read8;
- dev_priv->uncore.funcs.mmio_readw = gen5_read16;
- dev_priv->uncore.funcs.mmio_readl = gen5_read32;
- dev_priv->uncore.funcs.mmio_readq = gen5_read64;
+ ASSIGN_WRITE_MMIO_VFUNCS(gen5);
+ ASSIGN_READ_MMIO_VFUNCS(gen5);
break;
case 4:
case 3:
case 2:
- dev_priv->uncore.funcs.mmio_writeb = gen4_write8;
- dev_priv->uncore.funcs.mmio_writew = gen4_write16;
- dev_priv->uncore.funcs.mmio_writel = gen4_write32;
- dev_priv->uncore.funcs.mmio_writeq = gen4_write64;
- dev_priv->uncore.funcs.mmio_readb = gen4_read8;
- dev_priv->uncore.funcs.mmio_readw = gen4_read16;
- dev_priv->uncore.funcs.mmio_readl = gen4_read32;
- dev_priv->uncore.funcs.mmio_readq = gen4_read64;
+ ASSIGN_WRITE_MMIO_VFUNCS(gen4);
+ ASSIGN_READ_MMIO_VFUNCS(gen4);
break;
}
+
+ i915_check_and_clear_faults(dev);
}
+#undef ASSIGN_WRITE_MMIO_VFUNCS
+#undef ASSIGN_READ_MMIO_VFUNCS
void intel_uncore_fini(struct drm_device *dev)
{
@@ -968,7 +1264,7 @@ static const struct register_whitelist {
/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
uint32_t gen_bitmask;
} whitelist[] = {
- { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
+ { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
};
int i915_reg_read_ioctl(struct drm_device *dev,
@@ -1053,41 +1349,34 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
return 0;
}
-static int i965_reset_complete(struct drm_device *dev)
+static int i915_reset_complete(struct drm_device *dev)
{
u8 gdrst;
- pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
- return (gdrst & GRDOM_RESET_ENABLE) == 0;
+ pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
+ return (gdrst & GRDOM_RESET_STATUS) == 0;
}
-static int i965_do_reset(struct drm_device *dev)
+static int i915_do_reset(struct drm_device *dev)
{
- int ret;
-
- /* FIXME: i965g/gm need a display save/restore for gpu reset. */
- return -ENODEV;
+ /* assert reset for at least 20 usec */
+ pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+ udelay(20);
+ pci_write_config_byte(dev->pdev, I915_GDRST, 0);
- /*
- * Set the domains we want to reset (GRDOM/bits 2 and 3) as
- * well as the reset bit (GR/bit 0). Setting the GR bit
- * triggers the reset; when done, the hardware will clear it.
- */
- pci_write_config_byte(dev->pdev, I965_GDRST,
- GRDOM_RENDER | GRDOM_RESET_ENABLE);
- ret = wait_for(i965_reset_complete(dev), 500);
- if (ret)
- return ret;
-
- pci_write_config_byte(dev->pdev, I965_GDRST,
- GRDOM_MEDIA | GRDOM_RESET_ENABLE);
-
- ret = wait_for(i965_reset_complete(dev), 500);
- if (ret)
- return ret;
+ return wait_for(i915_reset_complete(dev), 500);
+}
- pci_write_config_byte(dev->pdev, I965_GDRST, 0);
+static int g4x_reset_complete(struct drm_device *dev)
+{
+ u8 gdrst;
+ pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
+ return (gdrst & GRDOM_RESET_ENABLE) == 0;
+}
- return 0;
+static int g33_do_reset(struct drm_device *dev)
+{
+ pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+ return wait_for(g4x_reset_complete(dev), 500);
}
static int g4x_do_reset(struct drm_device *dev)
@@ -1095,9 +1384,9 @@ static int g4x_do_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- pci_write_config_byte(dev->pdev, I965_GDRST,
+ pci_write_config_byte(dev->pdev, I915_GDRST,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
- ret = wait_for(i965_reset_complete(dev), 500);
+ ret = wait_for(g4x_reset_complete(dev), 500);
if (ret)
return ret;
@@ -1105,9 +1394,9 @@ static int g4x_do_reset(struct drm_device *dev)
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
- pci_write_config_byte(dev->pdev, I965_GDRST,
+ pci_write_config_byte(dev->pdev, I915_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
- ret = wait_for(i965_reset_complete(dev), 500);
+ ret = wait_for(g4x_reset_complete(dev), 500);
if (ret)
return ret;
@@ -1115,7 +1404,7 @@ static int g4x_do_reset(struct drm_device *dev)
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
- pci_write_config_byte(dev->pdev, I965_GDRST, 0);
+ pci_write_config_byte(dev->pdev, I915_GDRST, 0);
return 0;
}
@@ -1173,8 +1462,10 @@ int intel_gpu_reset(struct drm_device *dev)
return ironlake_do_reset(dev);
else if (IS_G4X(dev))
return g4x_do_reset(dev);
- else if (IS_GEN4(dev))
- return i965_do_reset(dev);
+ else if (IS_G33(dev))
+ return g33_do_reset(dev);
+ else if (INTEL_INFO(dev)->gen >= 3)
+ return i915_do_reset(dev);
else
return -ENODEV;
}
OpenPOWER on IntegriCloud